diff --git a/.circleci/base_config.yml b/.circleci/base_config.yml
index 09674d001b7f..b97973e9ee4c 100644
--- a/.circleci/base_config.yml
+++ b/.circleci/base_config.yml
@@ -79,7 +79,7 @@ jobs:
name: Install eslint
command: |
apk add --no-cache npm
- npm -g install eslint
+ npm -g install eslint@5.16.0
- run:
name: Run eslint
command: |
@@ -94,7 +94,7 @@ jobs:
enterprise:
type: boolean
docker:
- - image: arangodb/build-alpine-x86_64:3.16-gcc11.2-openssl3.0.8
+ - image: arangodb/build-alpine-x86_64:3.16-gcc11.2-openssl3.0.10
resource_class: xlarge
environment:
GIT_SSH_COMMAND: ssh
@@ -126,7 +126,7 @@ jobs:
if [ "$?" == "0" ] ; then
ENTERPRISE_BRANCH=$CIRCLE_BRANCH
else
- ENTERPRISE_BRANCH=devel
+ ENTERPRISE_BRANCH=3.11
fi
set -e
else
@@ -256,6 +256,8 @@ workflows:
community-pr:
jobs:
- compile-linux:
+ context:
+ - sccache-aws-bucket # add the environment variables to setup sccache for the S3 bucket
name: build-ce
preset: community-pr
edition: x64-community
@@ -263,6 +265,8 @@ workflows:
enterprise-pr:
jobs:
- compile-linux:
+ context:
+ - sccache-aws-bucket # add the environment variables to setup sccache for the S3 bucket
name: build-ee
preset: enterprise-pr
edition: x64-enterprise
diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
index 63a3eefed081..291df67194a7 100644
--- a/.github/CODEOWNERS
+++ b/.github/CODEOWNERS
@@ -4,8 +4,6 @@
# `documentation-team` ownership
/Documentation/ @arangodb/team-documentation
/utils/*Documentation* @arangodb/team-documentation
-/utils/*Examples* @arangodb/team-documentation
-/utils/*Swagger* @arangodb/team-documentation
# `qa-team` ownership
/Installation/ @arangodb/team-qa
diff --git a/.github/workflows/clang-format.yml b/.github/workflows/clang-format.yml
index eee225d9ce04..0ac83038970a 100644
--- a/.github/workflows/clang-format.yml
+++ b/.github/workflows/clang-format.yml
@@ -2,7 +2,7 @@ name: clang-format
on:
workflow_dispatch:
pull_request:
- branches: [ devel, staging/replication-2.0 ]
+ branches: [ "3.11" ]
paths:
- "arangod/**"
- "client-tools/**"
diff --git a/.gitignore b/.gitignore
index ae267bc4308e..52c74dd0c68d 100644
--- a/.gitignore
+++ b/.gitignore
@@ -108,6 +108,7 @@ js/apps/system/_admin/aardvark/APP/react/node_modules/*
js/node/**/node_modules/.bin/
js/node/node_modules/**/*.md
!js/node/node_modules/**/LICENSE.md
+js/node/node_modules/**/test/
js/node/node_modules/**/*.ts
js/node/node_modules/**/*.yml
js/node/node_modules/**/.babelrc
@@ -128,7 +129,6 @@ npm-debug.log
/log-*
data-*
databases
-!/Documentation/DocuBlocks/Rest/Databases
cluster-init
datafile-*.db
@@ -147,3 +147,7 @@ js/node/**/*.map
js/node/**/*.map.gz
js/apps/**/*.map
js/apps/**/*.map.gz
+
+swagger-ui-es-bundle*
+swagger-ui.js
+swagger-ui.js.map
diff --git a/3rdParty/CMakeLists.txt b/3rdParty/CMakeLists.txt
index 2421eeaa12d8..381b629de4b9 100755
--- a/3rdParty/CMakeLists.txt
+++ b/3rdParty/CMakeLists.txt
@@ -59,6 +59,7 @@ function (add_snappy)
set(SNAPPY_INSTALL OFF CACHE BOOL "disable Snappy installation")
set(SNAPPY_REQUIRE_AVX ON CACHE BOOL "target processors with AVX support" FORCE)
set(SNAPPY_REQUIRE_AVX2 OFF CACHE BOOL "target processors with AVX2 support" FORCE)
+ set(SNAPPY_HAVE_BMI2 OFF CACHE BOOL "target processors with BMI2 support" FORCE)
add_subdirectory(${SNAPPY_SOURCE_DIR})
endfunction ()
add_snappy()
@@ -87,6 +88,9 @@ if (USE_JEMALLOC)
set(JEMALLOC_HOME "${JEMALLOC_HOME}" PARENT_SCOPE)
set(SYS_LIBS ${SYS_LIBS} jemalloc PARENT_SCOPE)
set(JEMALLOC_LIB "${JEMALLOC_LIB}" PARENT_SCOPE)
+ if (USE_JEMALLOC_PROF AND USE_LIBUNWIND)
+ add_dependencies(jemalloc_build libunwind_build)
+ endif()
endif ()
################################################################################
@@ -249,6 +253,12 @@ target_include_directories(llhttp PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/llhttp/inc
add_subdirectory(nghttp2)
+################################################################################
+## syslog-win32
+################################################################################
+
+add_subdirectory(syslog-win32)
+
################################################################################
## IMMER
################################################################################
diff --git a/3rdParty/README_maintainers.md b/3rdParty/README_maintainers.md
index a715445c503b..aa27d76a1bc9 100644
--- a/3rdParty/README_maintainers.md
+++ b/3rdParty/README_maintainers.md
@@ -260,22 +260,32 @@ http://snowball.tartarus.org/ stemming for IResearch. We use the latest provided
https://github.com/swagger-api/swagger-ui/releases
-Our copy of swagger-ui resides at `js/assets/swagger`. The `index.html`
-contains a few tweaks to make swagger-ui work with the web interface.
+Our copy of swagger-ui resides at `js/server/assets/swagger`. The `index.css`
+and `swagger-initializer.js` files contain a few tweaks to make swagger-ui look
+a little nicer and make it work with the web interface.
To upgrade to a newer version:
-1. Copy the file `js/assets/swagger/index.html` to a safe location and open it in an editor
-2. Delete all existing files inside `js/assets/swagger` including `index.html`
+1. Copy the files `js/server/assets/swagger/index.css` and
+ `js/server/assets/swagger/swagger-initializer.js`
+ to a safe location and open them in an editor
+2. Delete all existing files inside `js/server/assets/swagger`
3. Download the release bundle of swagger-ui you want to upgrade to
-4. Copy all files from the bundle's `dist` folder into `js/assets/swagger`
-5. Open the new `js/assets/swagger/index.html` in an editor
-6. Add an HTML comment to the start of the file indicating the release version number,
- e.g. ``
-7. Apply all changes from the old copy to the new file,
- these are indicated by code comments in the following format:
+4. Copy all files from the bundle's `dist` folder into `js/server/assets/swagger`,
+ but delete the unnecessary `*es-bundle*` and non-bundle files (`swagger-ui.*`)
+5. Open the new `js/server/assets/swagger/index.css` file in an editor
+6. Apply the style adjustments from the old copy to the new file, indicated by
+ code comments in the following format:
+ `/* #region ArangoDB-specific changes */` and `/* #endregion */`
+7. Open the new `js/server/assets/swagger/swagger-initializer.js` file in an editor
+8. Add a comment to the start of the file indicating the release version number,
+ e.g. `// Version: swagger-ui 5.6.7`
+9. Apply all code changes from the old copy to the new file,
+ indicated by code comments in the following format:
`#region ArangoDB-specific changes` and `#endregion`
-8. Verify the changes were applied correctly and discard the old copy of `index.html`
+10. Verify the changes were applied correctly and discard the old copies of
+ `index.css` and `swagger-initializer.js`
+11. Update the information in `LICENSES-OTHER-COMPONENTS.md` for swagger-ui
To verify the changes were applied correctly, start the ArangoDB server and
open the _Rest API_ documentation (_Support_ tab) in the ArangoDB web interface.
@@ -300,12 +310,34 @@ the _Execute_ button.
user is authorized to execute, the response should not indicate an
ArangoDB authentication error.
- This confirms the `requestInterceptor`-related changes were applied correctly.
+ This confirms the `requestInterceptor`-related changes for authentication
+ were applied correctly.
+
+* When using the `POST /_api/index#persistent` endpoint with any collection name,
+ the response URL should contain `?collection=` but not contain
+ `#persistent` anywhere.
+
+ This confirms the `requestInterceptor`-related changes for removing
+ fragment identifiers used for disambiguation in OpenAPI were applied correctly.
* All text in the API documentation should use readable color combinations.
The API documentation should NOT look obviously "broken" or "ugly".
- This indicates the stylistic CSS changes were applied correctly.
+ Text should NOT partially have a font size of 12px or smaller but 14px.
+
+ Inline code should be black, NOT purple, and the background should only have
+ little padding that only slightly overlaps with other inline code in the
+ above or below line.
+
+ Code blocks should have a background, but NOT the individual lines of it.
+ The font weight should be normal, NOT bold.
+
+ Models should NOT have a background, expandible nested models should only have
+ a slightly larger font size than the properties. Property descriptions should
+ NOT use a monospace but a sans-serif font.
+
+ This indicates the stylistic CSS changes were applied correctly and that the
+ HTML IDs and classes are unchanged.
* Scroll to the very end of the page and check the bottom right corner.
There should be NO badge reading _INVALID_.
diff --git a/3rdParty/V8/v7.9.317/src/base/logging.h b/3rdParty/V8/v7.9.317/src/base/logging.h
index f2f68725a65b..15d6e0e7e34f 100644
--- a/3rdParty/V8/v7.9.317/src/base/logging.h
+++ b/3rdParty/V8/v7.9.317/src/base/logging.h
@@ -5,6 +5,7 @@
#ifndef V8_BASE_LOGGING_H_
#define V8_BASE_LOGGING_H_
+#include
#include
#include
#include
diff --git a/3rdParty/V8/v7.9.317/src/base/macros.h b/3rdParty/V8/v7.9.317/src/base/macros.h
index 5f52a9893e6a..ac6e10fd8e4a 100644
--- a/3rdParty/V8/v7.9.317/src/base/macros.h
+++ b/3rdParty/V8/v7.9.317/src/base/macros.h
@@ -5,6 +5,7 @@
#ifndef V8_BASE_MACROS_H_
#define V8_BASE_MACROS_H_
+#include
#include
#include
diff --git a/3rdParty/V8/v7.9.317/src/inspector/v8-string-conversions.h b/3rdParty/V8/v7.9.317/src/inspector/v8-string-conversions.h
index c1d69c18f0a8..eb33c6816a58 100644
--- a/3rdParty/V8/v7.9.317/src/inspector/v8-string-conversions.h
+++ b/3rdParty/V8/v7.9.317/src/inspector/v8-string-conversions.h
@@ -5,6 +5,7 @@
#ifndef V8_INSPECTOR_V8_STRING_CONVERSIONS_H_
#define V8_INSPECTOR_V8_STRING_CONVERSIONS_H_
+#include
#include
// Conversion routines between UT8 and UTF16, used by string-16.{h,cc}. You may
diff --git a/3rdParty/V8/v7.9.317/src/utils/utils.h b/3rdParty/V8/v7.9.317/src/utils/utils.h
index b414a4c52b15..6fbf86d54216 100644
--- a/3rdParty/V8/v7.9.317/src/utils/utils.h
+++ b/3rdParty/V8/v7.9.317/src/utils/utils.h
@@ -329,8 +329,20 @@ class BitField final {
static constexpr int kLastUsedBit = kShift + kSize - 1;
static constexpr U kNumValues = U{1} << kSize;
+ // clang 16 complains here about out of range values
+#if defined(__clang__)
+#if __has_warning("-Wenum-constexpr-conversion")
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wenum-constexpr-conversion"
+#endif
+#endif
// Value for the field with all bits set.
static constexpr T kMax = static_cast(kNumValues - 1);
+#if defined(__clang__)
+#if __has_warning("-Wenum-constexpr-conversion")
+#pragma clang diagnostic pop
+#endif
+#endif
template
using Next = BitField;
diff --git a/3rdParty/abseil-cpp b/3rdParty/abseil-cpp
index 6e254b1c18a1..4e312c3a15f6 160000
--- a/3rdParty/abseil-cpp
+++ b/3rdParty/abseil-cpp
@@ -1 +1 @@
-Subproject commit 6e254b1c18a1561b134e4b8c18d236d5b77b8381
+Subproject commit 4e312c3a15f61c62afa543c4ed06128073074f4e
diff --git a/3rdParty/boost/1.78.0/boost/asio/ssl/detail/engine.hpp b/3rdParty/boost/1.78.0/boost/asio/ssl/detail/engine.hpp
index 37161f3ea082..10445dc1172e 100644
--- a/3rdParty/boost/1.78.0/boost/asio/ssl/detail/engine.hpp
+++ b/3rdParty/boost/1.78.0/boost/asio/ssl/detail/engine.hpp
@@ -122,6 +122,8 @@ class engine
engine(const engine&);
engine& operator=(const engine&);
+ BOOST_ASIO_DECL void clear();
+
// Callback used when the SSL implementation wants to verify a certificate.
BOOST_ASIO_DECL static int verify_callback_function(
int preverified, X509_STORE_CTX* ctx);
diff --git a/3rdParty/boost/1.78.0/boost/asio/ssl/detail/impl/engine.ipp b/3rdParty/boost/1.78.0/boost/asio/ssl/detail/impl/engine.ipp
index e4f09de978cd..d7c9d007c836 100644
--- a/3rdParty/boost/1.78.0/boost/asio/ssl/detail/impl/engine.ipp
+++ b/3rdParty/boost/1.78.0/boost/asio/ssl/detail/impl/engine.ipp
@@ -68,17 +68,7 @@ engine::engine(engine&& other) BOOST_ASIO_NOEXCEPT
engine::~engine()
{
- if (ssl_ && SSL_get_app_data(ssl_))
- {
- delete static_cast(SSL_get_app_data(ssl_));
- SSL_set_app_data(ssl_, 0);
- }
-
- if (ext_bio_)
- ::BIO_free(ext_bio_);
-
- if (ssl_)
- ::SSL_free(ssl_);
+ clear();
}
#if defined(BOOST_ASIO_HAS_MOVE)
@@ -86,6 +76,7 @@ engine& engine::operator=(engine&& other) BOOST_ASIO_NOEXCEPT
{
if (this != &other)
{
+ clear();
ssl_ = other.ssl_;
ext_bio_ = other.ext_bio_;
other.ssl_ = 0;
@@ -95,6 +86,21 @@ engine& engine::operator=(engine&& other) BOOST_ASIO_NOEXCEPT
}
#endif // defined(BOOST_ASIO_HAS_MOVE)
+void engine::clear()
+{
+ if (ssl_ && SSL_get_app_data(ssl_))
+ {
+ delete static_cast(SSL_get_app_data(ssl_));
+ SSL_set_app_data(ssl_, 0);
+ }
+
+ if (ext_bio_)
+ ::BIO_free(ext_bio_);
+
+ if (ssl_)
+ ::SSL_free(ssl_);
+}
+
SSL* engine::native_handle()
{
return ssl_;
diff --git a/3rdParty/boost/1.78.0/boost/asio/ssl/detail/stream_core.hpp b/3rdParty/boost/1.78.0/boost/asio/ssl/detail/stream_core.hpp
index 72bb0609b664..1652c7713335 100644
--- a/3rdParty/boost/1.78.0/boost/asio/ssl/detail/stream_core.hpp
+++ b/3rdParty/boost/1.78.0/boost/asio/ssl/detail/stream_core.hpp
@@ -118,6 +118,7 @@ struct stream_core
input_buffer_space_ =
BOOST_ASIO_MOVE_CAST(std::vector)(
other.input_buffer_space_);
+ input_buffer_ = other.input_buffer_;
input_ = other.input_;
other.output_buffer_ = boost::asio::mutable_buffer(0, 0);
other.input_buffer_ = boost::asio::mutable_buffer(0, 0);
diff --git a/3rdParty/fuerte/include/fuerte/FuerteLogger.h b/3rdParty/fuerte/include/fuerte/FuerteLogger.h
index 84c217c5aeb1..59dad2cb7567 100644
--- a/3rdParty/fuerte/include/fuerte/FuerteLogger.h
+++ b/3rdParty/fuerte/include/fuerte/FuerteLogger.h
@@ -26,14 +26,15 @@
#if 0
#include
#include
+#include
-extern void LogHackWriter(char const* p);
+extern void LogHackWriter(std::string_view p);
class LogHack {
std::stringstream _s;
public:
LogHack() {};
- ~LogHack() { LogHackWriter(_s.str().c_str()); };
+ ~LogHack() { LogHackWriter(_s.str()); };
template LogHack& operator<<(T const& o) { _s << o; return *this; }
typedef std::basic_ostream > CoutType;
typedef CoutType& (*StandardEndLine)(CoutType&);
@@ -115,11 +116,4 @@ class LogHack {
if (0) std::cout
#endif
-#if ENABLE_FUERTE_LOG_NODE > 0
-#define FUERTE_LOG_NODE std::cout
-#else
-#define FUERTE_LOG_NODE \
- if (0) std::cout
-#endif
-
#endif
diff --git a/3rdParty/fuerte/include/fuerte/connection.h b/3rdParty/fuerte/include/fuerte/connection.h
index 0411436fdb57..d6e9345a4a56 100644
--- a/3rdParty/fuerte/include/fuerte/connection.h
+++ b/3rdParty/fuerte/include/fuerte/connection.h
@@ -175,6 +175,14 @@ class ConnectionBuilder {
return *this;
}
+#ifdef ARANGODB_USE_GOOGLE_TESTS
+ unsigned failConnectAttempts() const { return _conf._failConnectAttempts; }
+ ConnectionBuilder& failConnectAttempts(unsigned f) {
+ _conf._failConnectAttempts = f;
+ return *this;
+ }
+#endif
+
// Set the authentication type of the connection
AuthenticationType authenticationType() const {
return _conf._authenticationType;
diff --git a/3rdParty/fuerte/include/fuerte/loop.h b/3rdParty/fuerte/include/fuerte/loop.h
index 07277ea28e7c..92b2b4047c2d 100644
--- a/3rdParty/fuerte/include/fuerte/loop.h
+++ b/3rdParty/fuerte/include/fuerte/loop.h
@@ -27,9 +27,11 @@
#include
+#include
#include
#include
#include
+#include
// run / runWithWork / poll for Loop mapping to ioservice
// free function run with threads / with thread group barrier and work
@@ -38,8 +40,7 @@ namespace arangodb { namespace fuerte { inline namespace v1 {
// need partial rewrite so it can be better integrated in client applications
-typedef asio_ns::executor_work_guard
- asio_work_guard;
+using asio_work_guard = asio_ns::executor_work_guard;
/// @brief EventLoopService implements single-threaded event loops
/// Idea is to shard connections across io context's to avoid
@@ -59,10 +60,7 @@ class EventLoopService {
EventLoopService& operator=(EventLoopService const& other) = delete;
// io_service returns a reference to the boost io_service.
- std::shared_ptr& nextIOContext() {
- return _ioContexts[_lastUsed.fetch_add(1, std::memory_order_relaxed) %
- _ioContexts.size()];
- }
+ std::shared_ptr& nextIOContext();
asio_ns::ssl::context& sslContext();
diff --git a/3rdParty/fuerte/include/fuerte/types.h b/3rdParty/fuerte/include/fuerte/types.h
index ea647889fe7b..391c0a2a7ab5 100644
--- a/3rdParty/fuerte/include/fuerte/types.h
+++ b/3rdParty/fuerte/include/fuerte/types.h
@@ -216,10 +216,13 @@ struct ConnectionConfiguration {
_host("localhost"),
_port("8529"),
_verifyHost(false),
- _connectTimeout(15000),
+ _connectTimeout(60000),
_idleTimeout(300000),
_connectRetryPause(1000),
_maxConnectRetries(3),
+#ifdef ARANGODB_USE_GOOGLE_TESTS
+ _failConnectAttempts(0),
+#endif
_useIdleTimeout(true),
_authenticationType(AuthenticationType::None),
_user(""),
@@ -240,6 +243,9 @@ struct ConnectionConfiguration {
std::chrono::milliseconds _idleTimeout;
std::chrono::milliseconds _connectRetryPause;
unsigned _maxConnectRetries;
+#ifdef ARANGODB_USE_GOOGLE_TESTS
+ unsigned _failConnectAttempts;
+#endif
bool _useIdleTimeout;
AuthenticationType _authenticationType;
diff --git a/3rdParty/fuerte/src/AsioSockets.h b/3rdParty/fuerte/src/AsioSockets.h
index e3cda967bd71..a8da80570146 100644
--- a/3rdParty/fuerte/src/AsioSockets.h
+++ b/3rdParty/fuerte/src/AsioSockets.h
@@ -30,12 +30,29 @@
namespace arangodb { namespace fuerte { inline namespace v1 {
namespace {
-template
+template
void resolveConnect(detail::ConnectionConfiguration const& config,
asio_ns::ip::tcp::resolver& resolver, SocketT& socket,
- F&& done) {
- auto cb = [&socket, done(std::forward(done))](auto ec, auto it) mutable {
+ F&& done, IsAbortedCb&& isAborted) {
+ auto cb = [&socket,
+#ifdef ARANGODB_USE_GOOGLE_TESTS
+ fail = config._failConnectAttempts > 0,
+#endif
+ done = std::forward(done),
+ isAborted = std::forward(isAborted)](auto ec, auto it) mutable {
+#ifdef ARANGODB_USE_GOOGLE_TESTS
+ if (fail) {
+ // use an error code != operation_aborted
+ ec = boost::system::errc::make_error_code(boost::system::errc::not_enough_memory);
+ }
+#endif
+
+ if (isAborted()) {
+ ec = asio_ns::error::operation_aborted;
+ }
+
if (ec) { // error in address resolver
+ FUERTE_LOG_DEBUG << "received error during address resolving: " << ec.message() << "\n";
done(ec);
return;
}
@@ -44,7 +61,12 @@ void resolveConnect(detail::ConnectionConfiguration const& config,
// A successful resolve operation is guaranteed to pass a
// non-empty range to the handler.
asio_ns::async_connect(socket, it,
- [done(std::move(done))](auto ec, auto it) mutable {
+ [done](auto ec, auto it) mutable {
+ if (ec) {
+ FUERTE_LOG_DEBUG << "executing async connect callback, error: " << ec.message() << "\n";
+ } else {
+ FUERTE_LOG_DEBUG << "executing async connect callback, no error\n";
+ }
std::forward(done)(ec);
});
} catch (std::bad_alloc const&) {
@@ -63,12 +85,18 @@ void resolveConnect(detail::ConnectionConfiguration const& config,
auto it = resolver.resolve(config._host, config._port, ec);
cb(ec, it);
#else
- // Resolve the host asynchronous into a series of endpoints
+ // Resolve the host asynchronously into a series of endpoints
+ FUERTE_LOG_DEBUG << "scheduled callback to resolve host " << config._host << ":" << config._port << "\n";
resolver.async_resolve(config._host, config._port, std::move(cb));
#endif
}
} // namespace
+enum class ConnectTimerRole {
+ kConnect = 1,
+ kReconnect = 2,
+};
+
template
struct Socket {};
@@ -77,14 +105,39 @@ struct Socket {
Socket(EventLoopService&, asio_ns::io_context& ctx)
: resolver(ctx), socket(ctx), timer(ctx) {}
- ~Socket() { this->cancel(); }
+ ~Socket() {
+ try {
+ this->cancel();
+ } catch (std::exception const& ex) {
+ FUERTE_LOG_ERROR << "caught exception during tcp socket shutdown: " << ex.what() << "\n";
+ }
+ }
template
void connect(detail::ConnectionConfiguration const& config, F&& done) {
- resolveConnect(config, resolver, socket, std::forward(done));
+ resolveConnect(config, resolver, socket, [this, done = std::forward(done)](asio_ns::error_code ec) mutable {
+ FUERTE_LOG_DEBUG << "executing tcp connect callback, ec: " << ec.message() << ", canceled: " << this->canceled << "\n";
+ if (canceled) {
+ // cancel() was already called on this socket
+ FUERTE_ASSERT(socket.is_open() == false);
+ ec = asio_ns::error::operation_aborted;
+ }
+ done(ec);
+ }, [this]() {
+ return canceled;
+ });
+ }
+
+ bool isOpen() const {
+ return socket.is_open();
}
+ void rearm() {
+ canceled = false;
+ }
+
void cancel() {
+ canceled = true;
try {
timer.cancel();
resolver.cancel();
@@ -92,23 +145,28 @@ struct Socket {
asio_ns::error_code ec;
socket.close(ec);
}
- } catch (...) {
+ } catch (std::exception const& ex) {
+ FUERTE_LOG_ERROR << "caught exception during tcp socket cancelation: " << ex.what() << "\n";
}
}
template
void shutdown(F&& cb) {
- asio_ns::error_code ec; // prevents exceptions
+ // ec is an out parameter here that is passed to the methods so they
+ // can fill in whatever error happened. we ignore it here anyway. we
+ // use the ec-variants of the methods here to prevent exceptions.
+ asio_ns::error_code ec;
try {
-#ifndef _WIN32
- socket.cancel(ec);
-#endif
+ timer.cancel(ec);
if (socket.is_open()) {
+ socket.cancel(ec);
socket.shutdown(asio_ns::ip::tcp::socket::shutdown_both, ec);
- ec.clear();
socket.close(ec);
}
- } catch (...) {
+ } catch (std::exception const& ex) {
+ // an exception is unlikely to occur here, as we are using the error-code
+ // variants of cancel/shutdown/close above
+ FUERTE_LOG_ERROR << "caught exception during tcp socket shutdown: " << ex.what() << "\n";
}
std::forward(cb)(ec);
}
@@ -116,21 +174,36 @@ struct Socket {
asio_ns::ip::tcp::resolver resolver;
asio_ns::ip::tcp::socket socket;
asio_ns::steady_timer timer;
+ ConnectTimerRole connectTimerRole = ConnectTimerRole::kConnect;
+ bool canceled = false;
};
template <>
struct Socket {
Socket(EventLoopService& loop, asio_ns::io_context& ctx)
- : resolver(ctx), socket(ctx, loop.sslContext()), timer(ctx), cleanupDone(false) {}
+ : resolver(ctx), socket(ctx, loop.sslContext()), timer(ctx), ctx(ctx),
+ sslContext(loop.sslContext()), cleanupDone(false) {}
- ~Socket() { this->cancel(); }
+ ~Socket() {
+ try {
+ this->cancel();
+ } catch (std::exception const& ex) {
+ FUERTE_LOG_ERROR << "caught exception during ssl socket shutdown: " << ex.what() << "\n";
+ }
+ }
template
void connect(detail::ConnectionConfiguration const& config, F&& done) {
bool verify = config._verifyHost;
resolveConnect(
config, resolver, socket.next_layer(),
- [=, this, done(std::forward(done))](auto const& ec) mutable {
+ [=, this](asio_ns::error_code ec) mutable {
+ FUERTE_LOG_DEBUG << "executing ssl connect callback, ec: " << ec.message() << ", canceled: " << this->canceled << "\n";
+ if (canceled) {
+ // cancel() was already called on this socket
+ FUERTE_ASSERT(socket.lowest_layer().is_open() == false);
+ ec = asio_ns::error::operation_aborted;
+ }
if (ec) {
done(ec);
return;
@@ -167,20 +240,33 @@ struct Socket {
}
socket.async_handshake(asio_ns::ssl::stream_base::client,
std::move(done));
+ }, [this]() {
+ return canceled;
});
}
+
+ bool isOpen() const {
+ return socket.lowest_layer().is_open();
+ }
+ void rearm() {
+ // create a new socket and declare it ready
+ socket = asio_ns::ssl::stream(this->ctx, this->sslContext);
+ canceled = false;
+ }
+
void cancel() {
+ canceled = true;
try {
timer.cancel();
resolver.cancel();
if (socket.lowest_layer().is_open()) { // non-graceful shutdown
asio_ns::error_code ec;
socket.lowest_layer().shutdown(asio_ns::ip::tcp::socket::shutdown_both, ec);
- ec.clear();
socket.lowest_layer().close(ec);
}
- } catch (...) {
+ } catch (std::exception const& ex) {
+ FUERTE_LOG_ERROR << "caught exception during ssl socket cancelation: " << ex.what() << "\n";
}
}
@@ -192,45 +278,51 @@ struct Socket {
// socket is a member. This means that the allocation of the connection and
// this of the socket is kept until all asynchronous operations are completed
// (or aborted).
- asio_ns::error_code ec; // prevents exceptions
- socket.lowest_layer().cancel(ec);
+
+ // ec is an out parameter here that is passed to the methods so they
+ // can fill in whatever error happened. we ignore it here anyway. we
+ // use the ec-variants of the methods here to prevent exceptions.
+ asio_ns::error_code ec;
if (!socket.lowest_layer().is_open()) {
timer.cancel(ec);
std::forward(cb)(ec);
return;
}
+
+ socket.lowest_layer().cancel(ec);
cleanupDone = false;
- timer.expires_from_now(std::chrono::seconds(3));
- timer.async_wait([cb, this](asio_ns::error_code ec) {
- // Copy in callback such that the connection object is kept alive long
- // enough, please do not delete, although it is not used here!
- if (!cleanupDone && !ec) {
+ // implicitly cancels any previous timers
+ timer.expires_after(std::chrono::seconds(3));
+
+ socket.async_shutdown([cb, this](asio_ns::error_code ec) {
+ timer.cancel();
+ if (!cleanupDone) {
socket.lowest_layer().shutdown(asio_ns::ip::tcp::socket::shutdown_both, ec);
- ec.clear();
socket.lowest_layer().close(ec);
cleanupDone = true;
}
+ cb(ec);
});
- socket.async_shutdown([cb(std::forward(cb)), this](auto const& ec) {
- timer.cancel();
-#ifndef _WIN32
- if (!cleanupDone && (!ec || ec == asio_ns::error::basic_errors::not_connected)) {
- asio_ns::error_code ec2;
- socket.lowest_layer().shutdown(asio_ns::ip::tcp::socket::shutdown_both, ec2);
- ec2.clear();
- socket.lowest_layer().close(ec2);
+ timer.async_wait([cb(std::forward(cb)), this](asio_ns::error_code ec) {
+ // Copy in callback such that the connection object is kept alive long
+ // enough, please do not delete, although it is not used here!
+ if (!ec && !cleanupDone) {
+ socket.lowest_layer().shutdown(asio_ns::ip::tcp::socket::shutdown_both, ec);
+ socket.lowest_layer().close(ec);
cleanupDone = true;
}
-#endif
- cb(ec);
});
}
asio_ns::ip::tcp::resolver resolver;
asio_ns::ssl::stream socket;
asio_ns::steady_timer timer;
+ asio_ns::io_context& ctx;
+ asio_ns::ssl::context& sslContext;
std::atomic cleanupDone;
+ ConnectTimerRole connectTimerRole = ConnectTimerRole::kConnect;
+ bool canceled = false;
};
#ifdef ASIO_HAS_LOCAL_SOCKETS
@@ -238,36 +330,74 @@ template <>
struct Socket {
Socket(EventLoopService&, asio_ns::io_context& ctx)
: socket(ctx), timer(ctx) {}
- ~Socket() { this->cancel(); }
+
+ ~Socket() {
+ canceled = true;
+ try {
+ this->cancel();
+ } catch (std::exception const& ex) {
+ FUERTE_LOG_ERROR << "caught exception during unix socket shutdown: " << ex.what() << "\n";
+ }
+ }
template
void connect(detail::ConnectionConfiguration const& config, F&& done) {
+ if (canceled) {
+ // cancel() was already called on this socket
+ done(asio_ns::error::operation_aborted);
+ return;
+ }
+
asio_ns::local::stream_protocol::endpoint ep(config._host);
socket.async_connect(ep, std::forward(done));
}
+
+ bool isOpen() const {
+ return socket.is_open();
+ }
+ void rearm() {
+ canceled = false;
+ }
+
void cancel() {
- timer.cancel();
- if (socket.is_open()) { // non-graceful shutdown
- asio_ns::error_code ec;
- socket.close(ec);
+ canceled = true;
+ try {
+ timer.cancel();
+ if (socket.is_open()) { // non-graceful shutdown
+ asio_ns::error_code ec;
+ socket.close(ec);
+ }
+ } catch (std::exception const& ex) {
+ FUERTE_LOG_ERROR << "caught exception during unix socket cancelation: " << ex.what() << "\n";
}
}
template
void shutdown(F&& cb) {
- asio_ns::error_code ec; // prevents exceptions
- timer.cancel(ec);
- if (socket.is_open()) {
- socket.cancel(ec);
- socket.shutdown(asio_ns::ip::tcp::socket::shutdown_both, ec);
- socket.close(ec);
+ // ec is an out parameter here that is passed to the methods so they
+ // can fill in whatever error happened. we ignore it here anyway. we
+ // use the ec-variants of the methods here to prevent exceptions.
+ asio_ns::error_code ec;
+ try {
+ timer.cancel(ec);
+ if (socket.is_open()) {
+ socket.cancel(ec);
+ socket.shutdown(asio_ns::ip::tcp::socket::shutdown_both, ec);
+ socket.close(ec);
+ }
+ } catch (std::exception const& ex) {
+ // an exception is unlikely to occur here, as we are using the error-code
+ // variants of cancel/shutdown/close above
+ FUERTE_LOG_ERROR << "caught exception during unix socket shutdown: " << ex.what() << "\n";
}
std::forward(cb)(ec);
}
asio_ns::local::stream_protocol::socket socket;
asio_ns::steady_timer timer;
+ ConnectTimerRole connectTimerRole = ConnectTimerRole::kConnect;
+ bool canceled = false;
};
#endif // ASIO_HAS_LOCAL_SOCKETS
diff --git a/3rdParty/fuerte/src/GeneralConnection.h b/3rdParty/fuerte/src/GeneralConnection.h
index a8efe29bab73..f28acb50719e 100644
--- a/3rdParty/fuerte/src/GeneralConnection.h
+++ b/3rdParty/fuerte/src/GeneralConnection.h
@@ -74,6 +74,10 @@
#include "AsioSockets.h"
#include "debugging.h"
+#include
+#include
+#include
@@ -159,7 +162,7 @@
Timezone identifiers
-Names normally have the form
+Names normally have the format
AREA/LOCATION, where
AREA is a continent or ocean, and
LOCATION is a specific location within the area.
@@ -187,7 +190,7 @@
A name must not be empty, or contain '//', or
start or end with '/'.
+ Also, a name must not be 'Etc/Unknown', as
+ CLDR uses that string for an unknown or invalid timezone.
Do not use names that differ only in case.
@@ -218,10 +223,18 @@
Timezone identifiers
do not need locations, since local time is not defined there.
- If all the clocks in a timezone have agreed since 1970,
- do not bother to include more than one timezone
- even if some of the clocks disagreed before 1970.
+ If all clocks in a region have agreed since 1970,
+ give them just one name even if some of the clocks disagreed before 1970,
+ or reside in different countries or in notable or faraway locations.
Otherwise these tables would become annoyingly large.
+ For example, do not create a name Indian/Crozet
+ as a near-duplicate or alias of Asia/Dubai
+ merely because they are different countries or territories,
+ or their clocks disagreed before 1970, or the
+ Crozet Islands
+ are notable in their own right,
+ or the Crozet Islands are not adjacent to other locations
+ that use Asia/Dubai.
If boundaries between regions are fluid, such as during a war or
@@ -362,6 +375,11 @@
Timezone identifiers
but conforms to the older-version guidelines related to ISO 3166-1;
it lists only one country code per entry and unlike zone1970.tab
it can list names defined in backward.
+Applications that process only timestamps from now on can instead use the file
+zonenow.tab, which partitions the world more coarsely,
+into regions where clocks agree now and in the predicted future;
+this file is smaller and simpler than zone1970.tab
+and zone.tab.
@@ -373,7 +391,8 @@
Timezone identifiers
and no great weight should be attached to whether a link
is defined in backward or in some other file.
The source file etcetera defines names that may be useful
-on platforms that do not support POSIX-style TZ strings;
+on platforms that do not support proleptic TZ strings
+like <+08>-8;
no other source file other than backward
contains links to its zones.
One of etcetera's names is Etc/UTC,
@@ -420,8 +439,8 @@
Time zone abbreviations
In other words, in the C locale the POSIX extended regular
expression [-+[:alnum:]]{3,6} should match the
abbreviation.
- This guarantees that all abbreviations could have been specified by a
- POSIX TZ string.
+ This guarantees that all abbreviations could have been specified
+ explicitly by a POSIX proleptic TZ string.
@@ -571,8 +590,13 @@
Time zone abbreviations
locations while uninhabited.
The leading '-' is a flag that the UT offset is in
some sense undefined; this notation is derived
- from Internet
+ from Internet
RFC 3339.
+ (The abbreviation 'Z' that
+ Internet
+ RFC 9557 uses for this concept
+ would violate the POSIX requirement
+ of at least three characters in an abbreviation.)
part of many platforms, where the
primary use of this package is to update obsolete time-related files.
To do this, you may need to compile the time zone compiler
-'zic' supplied with this package instead of using the
-system 'zic', since the format of zic's
+zic supplied with this package instead of using the
+system zic, since the format of zic's
input is occasionally extended, and a platform may still be shipping
an older zic.
-
POSIX properties and limitations
+
+In POSIX, time display in a process is controlled by the
+environment variable TZ, which can have two forms:
+
+
+ A proleptic TZ value
+ like CET-1CEST,M3.5.0,M10.5.0/3 uses a complex
+ notation that specifies a single standard time along with daylight
+ saving rules that apply to all years past, present, and future.
+
+
+ A geographical TZ value
+ like Europe/Berlin names a location that stands for
+ civil time near that location, which can have more than
+ one standard time and more than one set of daylight saving rules,
+ to record timekeeping practice more accurately.
+ These names are defined by the tz database.
+
+
+
+
POSIX.1-2017 properties and limitations
+
+Some platforms support only the features required by POSIX.1-2017,
+and have not yet upgraded to POSIX.1-2024.
+Code intended to be portable to these platforms must deal
+with problems that were fixed in later POSIX editions.
+
+
+
+
+ POSIX.1-2017 does not require support for geographical TZ,
+ and there is no convenient and efficient way to determine
+ the UT offset and time zone abbreviation of arbitrary
+ timestamps, particularly for timezones
+ that do not fit into the POSIX model.
+
- In POSIX, time display in a process is controlled by the
- environment variable TZ.
- Unfortunately, the POSIX
- TZ string takes a form that is hard to describe and
- is error-prone in practice.
- Also, POSIX TZ strings cannot deal with daylight
+ The proleptic TZ string,
+ which is all that POSIX.1-2017 requires,
+ has a format that is hard to describe and is error-prone in practice.
+ Also, proleptic TZ strings cannot deal with daylight
saving time rules not based on the Gregorian calendar (as in
Morocco), or with situations where more than two time zone
abbreviations or UT offsets are used in an area.
- The POSIX TZ string takes the following form:
+ A proleptic TZ string has the following format:
@@ -950,7 +1007,7 @@
POSIX properties and limitations
- Here is an example POSIX TZ string for New
+ Here is an example proleptic TZ string for New
Zealand after 2007.
It says that standard time (NZST) is 12 hours ahead
of UT, and that daylight saving time
@@ -961,26 +1018,46 @@
POSIX properties and limitations
TZ='NZST-12NZDT,M9.5.0,M4.1.0/3'
- This POSIX TZ string is hard to remember, and
+ This proleptic TZ string is hard to remember, and
mishandles some timestamps before 2008.
- With this package you can use this instead:
+ With this package you can use a geographical TZ instead:
TZ='Pacific/Auckland'
+
+
+
+POSIX.1-2017 also has the limitations of POSIX.1-2024,
+discussed in the next section.
+
+
+
POSIX.1-2024 properties and limitations
+
+POSIX.1-2024 extends POSIX.1-2017 in the following significant ways:
+
+
+
+ POSIX.1-2024 requires support for geographical TZ.
+ Earlier POSIX editions require support only for proleptic TZ.
+
- POSIX does not define the DST transitions
- for TZ values like
- "EST5EDT".
- Traditionally the current US DST rules
- were used to interpret such values, but this meant that the
- US DST rules were compiled into each
- time conversion package, and when
- US time conversion rules changed (as in the United
- States in 1987 and again in 2007), all packages that
- interpreted TZ values had to be updated
- to ensure proper results.
+ POSIX.1-2024 requires struct tm
+ to have a UT offset member tm_gmtoff
+ and a time zone abbreviation member tm_zone.
+ Earlier POSIX editions lack this requirement.
+
+ DST transition times can range from −167:59:59
+ to 167:59:59 instead of merely from 00:00:00 to 24:59:59.
+ This allows for proleptic TZ strings
+ like "<-02>2<-01>,M3.5.0/-1,M10.5.0/0"
+ where the transition time −1:00 means 23:00 the previous day.
+
+
+
+However POSIX.1-2024, like earlier POSIX editions, has some limitations:
+
The TZ environment variable is process-global, which
makes it hard to write efficient, thread-safe applications that
@@ -998,16 +1075,34 @@
POSIX properties and limitations
handling daylight saving time shifts – as might be required to
limit phone calls to off-peak hours.
-
- POSIX provides no convenient and efficient way to determine
- the UT offset and time zone abbreviation of arbitrary
- timestamps, particularly for timezones
- that do not fit into the POSIX model.
-
POSIX requires that time_t clock counts exclude leap
seconds.
+
+ POSIX does not define the DST transitions
+ for TZ values like
+ "EST5EDT".
+ Traditionally the current US DST rules
+ were used to interpret such values, but this meant that the
+ US DST rules were compiled into each
+ time conversion package, and when
+ US time conversion rules changed (as in the United
+ States in 1987 and again in 2007), all packages that
+ interpreted TZ values had to be updated
+ to ensure proper results.
+
+
+
+
Extensions to POSIX in the
+tz code
+
+ The tz code defines some properties
+ left unspecified by POSIX, and attempts to support some
+ extensions to POSIX.
+
+
+
The tz code attempts to support all the
time_t implementations allowed by POSIX.
@@ -1021,25 +1116,18 @@
POSIX properties and limitations
and 40-bit integers are also used occasionally.
Although earlier POSIX versions allowed time_t to be a
floating-point type, this was not supported by any practical system,
- and POSIX.1-2013 and the tz code both
+ and POSIX.1-2013+ and the tz code both
require time_t to be an integer type.
-
-
-
Extensions to POSIX in the
-tz code
-
- The TZ environment variable is used in generating
- the name of a file from which time-related information is read
- (or is interpreted à la POSIX); TZ is no longer
- constrained to be a string containing abbreviations
- and numeric data as described above.
+ If the TZ environment variable uses the geographical format,
+ it is used in generating
+ the name of a file from which time-related information is read.
The file's format is TZif,
a timezone information format that contains binary data; see
- Internet
- RFC 8536.
+ Internet
+ RFC 9636.
The daylight saving time rules to be used for a
particular timezone are encoded in the
TZif file; the format of the file allows US,
@@ -1048,10 +1136,11 @@
Extensions to POSIX in the
abbreviations are used.
- It was recognized that allowing the TZ environment
+ When the tz code was developed in the 1980s,
+ it was recognized that allowing the TZ environment
variable to take on values such as 'America/New_York'
might cause "old" programs (that expect TZ to have a
- certain form) to operate incorrectly; consideration was given to using
+ certain format) to operate incorrectly; consideration was given to using
some other environment variable (for example, TIMEZONE)
to hold the string used to generate the TZif file's name.
In the end, however, it was decided to continue using
@@ -1064,15 +1153,6 @@
Extensions to POSIX in the
assume pre-POSIX TZ values.
-
- The code supports platforms with a UT offset member
- in struct tm, e.g., tm_gmtoff,
- or with a time zone abbreviation member in
- struct tm, e.g., tm_zone. As noted
- in Austin
- Group defect 1533, a future version of POSIX is planned to
- require tm_gmtoff and tm_zone.
-
Functions tzalloc, tzfree,
localtime_rz, and mktime_z for
@@ -1083,7 +1163,7 @@
Extensions to POSIX in the
and localtime_rz and mktime_z are
like localtime_r and mktime with an
extra timezone_t argument.
- The functions were inspired by NetBSD.
+ The functions were inspired by NetBSD.
Negative time_t values are supported, on systems
@@ -1111,6 +1191,7 @@
POSIX features no longer needed
The POSIX tzname variable does not suffice and is no
longer needed.
+ It is planned to be removed in a future edition of POSIX.
To get a timestamp's time zone abbreviation, consult
the tm_zone member if available; otherwise,
use strftime's "%Z" conversion
@@ -1119,6 +1200,7 @@
POSIX features no longer needed
The POSIX daylight and timezone
variables do not suffice and are no longer needed.
+ They are planned to be removed in a future edition of POSIX.
To get a timestamp's UT offset, consult
the tm_gmtoff member if available; otherwise,
subtract values returned by localtime
@@ -1130,12 +1212,15 @@
POSIX features no longer needed
The tm_isdst member is almost never needed and most of
its uses should be discouraged in favor of the abovementioned
APIs.
+ It was intended as an index into the tzname variable,
+ but as mentioned previously that usage is obsolete.
Although it can still be used in arguments to
mktime to disambiguate timestamps near
a DST transition when the clock jumps back on
platforms lacking tm_gmtoff, this
- disambiguation does not work when standard time itself jumps back,
- which can occur when a location changes to a time zone with a
+ disambiguation works only for proleptic TZ strings;
+ it does not work in general for geographical timezones,
+ such as when a location changes to a time zone with a
lesser UT offset.
@@ -1152,8 +1237,8 @@
Other portability notes
Programs that in the past used the timezone function
may now examine localtime(&clock)->tm_zone
(if TM_ZONE is defined) or
- tzname[localtime(&clock)->tm_isdst]
- (if HAVE_TZNAME is nonzero) to learn the correct time
+ use strftime with a %Z conversion specification
+ to learn the correct time
zone abbreviation to use.
@@ -1273,13 +1358,13 @@
Leap seconds
Leap seconds were introduced in 1972 to accommodate the
difference between atomic time and the less regular rotation of the earth.
-Unfortunately they caused so many problems with civil
-timekeeping that they
-are planned
-to be discontinued by 2035, with some as-yet-undetermined
-mechanism replacing them, perhaps after the year 2135.
-Despite their impending obsolescence, a record of leap seconds is still
-needed to resolve timestamps from 1972 through 2035.
+Unfortunately they have caused so many problems with civil
+timekeeping that there are
+plans
+to discontinue them by 2035.
+Even if these plans come to fruition, a record of leap seconds will still be
+needed to resolve timestamps from 1972 through 2035,
+and there may also be a need to record whatever mechanism replaces them.
@@ -1369,6 +1454,12 @@
Time and time zones off Earth
the establishment of a reference timescale for the Moon, which has
days roughly equivalent to 29.5 Earth days, and where relativistic
effects cause clocks to tick slightly faster than on Earth.
+Also, NASA
+has been ordered
+to consider the establishment of Coordinated Lunar Time (LTC).
+It is not yet known whether the US and European efforts will result in
+multiple timescales on the Moon.
diff --git a/3rdParty/tzdata/version b/3rdParty/tzdata/version
index 41e28b248330..0846b7f265fa 100644
--- a/3rdParty/tzdata/version
+++ b/3rdParty/tzdata/version
@@ -1 +1 @@
-2023a
+2025a
diff --git a/3rdParty/tzdata/windowsZones.xml b/3rdParty/tzdata/windowsZones.xml
index 75b7dff71b08..7ec2ab619ade 100644
--- a/3rdParty/tzdata/windowsZones.xml
+++ b/3rdParty/tzdata/windowsZones.xml
@@ -30,7 +30,6 @@ For terms of use, see http://www.unicode.org/copyright.html
-
@@ -49,7 +48,7 @@ For terms of use, see http://www.unicode.org/copyright.html
-
+
@@ -60,7 +59,6 @@ For terms of use, see http://www.unicode.org/copyright.html
-
@@ -70,15 +68,14 @@ For terms of use, see http://www.unicode.org/copyright.html
-
-
+
+
-
-
+
+
-
@@ -97,10 +94,9 @@ For terms of use, see http://www.unicode.org/copyright.html
-
-
+
+
-
@@ -108,7 +104,7 @@ For terms of use, see http://www.unicode.org/copyright.html
-
+
@@ -133,9 +129,8 @@ For terms of use, see http://www.unicode.org/copyright.html
-
+
-
@@ -421,7 +416,7 @@ For terms of use, see http://www.unicode.org/copyright.html
-
+
@@ -538,7 +533,8 @@ For terms of use, see http://www.unicode.org/copyright.html
-
+
+
@@ -570,13 +566,12 @@ For terms of use, see http://www.unicode.org/copyright.html
-
-
+
+
-
@@ -653,7 +648,7 @@ For terms of use, see http://www.unicode.org/copyright.html
-
+
@@ -710,7 +705,7 @@ For terms of use, see http://www.unicode.org/copyright.html
-
+
diff --git a/3rdParty/tzdata/ziguard.awk b/3rdParty/tzdata/ziguard.awk
index 7a3404fa4fcc..c0acb72a0380 100644
--- a/3rdParty/tzdata/ziguard.awk
+++ b/3rdParty/tzdata/ziguard.awk
@@ -5,14 +5,10 @@
# This is not a general-purpose converter; it is designed for current tzdata.
# It just converts from current source to main, vanguard, and rearguard forms.
# Although it might be nice for it to be idempotent, or to be useful
-# for converting back and forth between vanguard and rearguard formats,
+# for converting back and forth between formats,
# it does not do these nonessential tasks now.
#
-# Although main and vanguard forms are currently equivalent,
-# this need not always be the case. When the two forms differ,
-# this script can convert either from main to vanguard form (needed then),
-# or from vanguard to main form (this conversion would be needed later,
-# after main became rearguard and vanguard became main).
+# This script can convert from main to vanguard form and vice versa.
# There is no need to convert rearguard to other forms.
#
# When converting to vanguard form, the output can use the line
@@ -145,12 +141,12 @@ DATAFORM != "main" {
}
# If this line should differ due to Portugal benefiting from %z if supported,
- # uncomment the desired version and comment out the undesired one.
- if ($0 ~ /^#?[\t ]+-[12]:00[\t ]+Port[\t ]+[%+-]/) {
- if (($0 ~ /%z/) == (DATAFORM == "vanguard")) {
- uncomment = in_comment
- } else {
+ # comment out the undesired version and uncomment the desired one.
+ if ($0 ~ /^#?[\t ]+-[12]:00[\t ]+((Port|W-Eur)[\t ]+[%+-]|-[\t ]+(%z|-01)[\t ]+1982 Mar 28)/) {
+ if (($0 ~ /%z/) == (DATAFORM == "rearguard")) {
comment_out = !in_comment
+ } else {
+ uncomment = in_comment
}
}
@@ -172,13 +168,8 @@ DATAFORM != "main" {
sub(/^/, "#")
}
- # Prefer %z in vanguard form, explicit abbreviations otherwise.
- if (DATAFORM == "vanguard") {
- sub(/^(Zone[\t ]+[^\t ]+)?[\t ]+[^\t ]+[\t ]+[^\t ]+[\t ]+[-+][^\t ]+/, \
- "&CHANGE-TO-%z")
- sub(/-00CHANGE-TO-%z/, "-00")
- sub(/[-+][^\t ]+CHANGE-TO-/, "")
- } else {
+ # Prefer explicit abbreviations in rearguard form, %z otherwise.
+ if (DATAFORM == "rearguard") {
if ($0 ~ /^[^#]*%z/) {
stdoff_column = 2 * ($0 ~ /^Zone/) + 1
rules_column = stdoff_column + 1
@@ -216,6 +207,11 @@ DATAFORM != "main" {
}
sub(/%z/, abbr)
}
+ } else {
+ sub(/^(Zone[\t ]+[^\t ]+)?[\t ]+[^\t ]+[\t ]+[^\t ]+[\t ]+[-+][^\t ]+/, \
+ "&CHANGE-TO-%z")
+ sub(/-00CHANGE-TO-%z/, "-00")
+ sub(/[-+][^\t ]+CHANGE-TO-/, "")
}
# Normally, prefer whole seconds. However, prefer subseconds
diff --git a/3rdParty/tzdata/zishrink.awk b/3rdParty/tzdata/zishrink.awk
index 66968e8648e0..c98dc6ae786d 100644
--- a/3rdParty/tzdata/zishrink.awk
+++ b/3rdParty/tzdata/zishrink.awk
@@ -162,7 +162,7 @@ function make_line(n, field, \
# Process the input line LINE and save it for later output.
function process_input_line(line, \
- f, field, end, i, n, r, startdef, \
+ f, field, end, n, outline, r, \
linkline, ruleline, zoneline)
{
# Remove comments, normalize spaces, and append a space to each line.
@@ -199,8 +199,10 @@ function process_input_line(line, \
}
# Abbreviate "max", "min", "only" and month names.
- gsub(/ max /, " ma ", line)
- gsub(/ min /, " mi ", line)
+ # Although "max" and "min" can both be abbreviated to just "m",
+ # the longer forms "ma" and "mi" are needed with zic 2023d and earlier.
+ gsub(/ max /, dataform == "vanguard" ? " m " : " ma ", line)
+ gsub(/ min /, dataform == "vanguard" ? " m " : " mi ", line)
gsub(/ only /, " o ", line)
gsub(/ Jan /, " Ja ", line)
gsub(/ Feb /, " F ", line)
@@ -234,66 +236,96 @@ function process_input_line(line, \
rule_used[r] = 1
}
- # If this zone supersedes an earlier one, delete the earlier one
- # from the saved output lines.
- startdef = ""
if (zoneline)
zonename = startdef = field[2]
else if (linkline)
zonename = startdef = field[3]
else if (ruleline)
zonename = ""
- if (startdef) {
- i = zonedef[startdef]
- if (i) {
- do
- output_line[i - 1] = ""
- while (output_line[i++] ~ /^[-+0-9]/);
- }
- }
- zonedef[zonename] = nout + 1
- # Save the line for later output.
- output_line[nout++] = make_line(n, field)
+ # Save the information for later output.
+ outline = make_line(n, field)
+ if (ruleline)
+ rule_output_line[nrule_out++] = outline
+ else if (linkline) {
+ # In vanguard format with Gawk, links are output sorted by destination.
+ if (dataform == "vanguard" && PROCINFO["version"])
+ linkdef[zonename] = field[2]
+ else
+ link_output_line[nlink_out++] = outline
+ }else
+ zonedef[zonename] = (zoneline ? "" : zonedef[zonename] "\n") outline
}
function omit_unused_rules( \
i, field)
{
- for (i = 0; i < nout; i++) {
- split(output_line[i], field)
- if (field[1] == "R" && !rule_used[field[2]]) {
- output_line[i] = ""
- }
+ for (i = 0; i < nrule_out; i++) {
+ split(rule_output_line[i], field)
+ if (!rule_used[field[2]])
+ rule_output_line[i] = ""
}
}
function abbreviate_rule_names( \
- abbr, f, field, i, n, r)
+ abbr, f, field, i, n, newdef, newline, r, \
+ zoneline, zonelines, zonename)
{
- for (i = 0; i < nout; i++) {
- n = split(output_line[i], field)
+ for (i = 0; i < nrule_out; i++) {
+ n = split(rule_output_line[i], field)
if (n) {
- f = field[1] == "Z" ? 4 : field[1] == "L" ? 0 : 2
- r = field[f]
+ r = field[2]
if (r ~ /^[^-+0-9]/) {
abbr = rule[r]
if (!abbr) {
rule[r] = abbr = gen_rule_name(r)
}
- field[f] = abbr
- output_line[i] = make_line(n, field)
+ field[2] = abbr
+ rule_output_line[i] = make_line(n, field)
}
}
}
+ for (zonename in zonedef) {
+ zonelines = split(zonedef[zonename], zoneline, /\n/)
+ newdef = ""
+ for (i = 1; i <= zonelines; i++) {
+ newline = zoneline[i]
+ n = split(newline, field)
+ f = i == 1 ? 4 : 2
+ r = rule[field[f]]
+ if (r) {
+ field[f] = r
+ newline = make_line(n, field)
+ }
+ newdef = (newdef ? newdef "\n" : "") newline
+ }
+ zonedef[zonename] = newdef
+ }
}
function output_saved_lines( \
- i)
+ i, zonename)
{
- for (i = 0; i < nout; i++)
- if (output_line[i])
- print output_line[i]
+ for (i = 0; i < nrule_out; i++)
+ if (rule_output_line[i])
+ print rule_output_line[i]
+
+ # When using gawk, output zones sorted by name.
+ # This makes the output a bit more compressible.
+ PROCINFO["sorted_in"] = "@ind_str_asc"
+ for (zonename in zonedef)
+ print zonedef[zonename]
+
+ if (nlink_out)
+ for (i = 0; i < nlink_out; i++)
+ print link_output_line[i]
+ else {
+ # When using gawk, output links sorted by destination.
+ # This also helps compressibility a bit.
+ PROCINFO["sorted_in"] = "@val_type_asc"
+ for (zonename in linkdef)
+ printf "L %s %s\n", linkdef[zonename], zonename
+ }
}
BEGIN {
diff --git a/3rdParty/tzdata/zone.tab b/3rdParty/tzdata/zone.tab
index dbcb61793eeb..d2be66359f3b 100644
--- a/3rdParty/tzdata/zone.tab
+++ b/3rdParty/tzdata/zone.tab
@@ -48,7 +48,7 @@ AR -3124-06411 America/Argentina/Cordoba Argentina (most areas: CB, CC, CN, ER,
AR -2447-06525 America/Argentina/Salta Salta (SA, LP, NQ, RN)
AR -2411-06518 America/Argentina/Jujuy Jujuy (JY)
AR -2649-06513 America/Argentina/Tucuman Tucuman (TM)
-AR -2828-06547 America/Argentina/Catamarca Catamarca (CT); Chubut (CH)
+AR -2828-06547 America/Argentina/Catamarca Catamarca (CT), Chubut (CH)
AR -2926-06651 America/Argentina/La_Rioja La Rioja (LR)
AR -3132-06831 America/Argentina/San_Juan San Juan (SJ)
AR -3253-06849 America/Argentina/Mendoza Mendoza (MZ)
@@ -87,7 +87,7 @@ BN +0456+11455 Asia/Brunei
BO -1630-06809 America/La_Paz
BQ +120903-0681636 America/Kralendijk
BR -0351-03225 America/Noronha Atlantic islands
-BR -0127-04829 America/Belem Para (east); Amapa
+BR -0127-04829 America/Belem Para (east), Amapa
BR -0343-03830 America/Fortaleza Brazil (northeast: MA, PI, CE, RN, PB)
BR -0803-03454 America/Recife Pernambuco
BR -0712-04812 America/Araguaina Tocantins
@@ -107,21 +107,21 @@ BT +2728+08939 Asia/Thimphu
BW -2439+02555 Africa/Gaborone
BY +5354+02734 Europe/Minsk
BZ +1730-08812 America/Belize
-CA +4734-05243 America/St_Johns Newfoundland; Labrador (southeast)
-CA +4439-06336 America/Halifax Atlantic - NS (most areas); PE
+CA +4734-05243 America/St_Johns Newfoundland, Labrador (SE)
+CA +4439-06336 America/Halifax Atlantic - NS (most areas), PE
CA +4612-05957 America/Glace_Bay Atlantic - NS (Cape Breton)
CA +4606-06447 America/Moncton Atlantic - New Brunswick
CA +5320-06025 America/Goose_Bay Atlantic - Labrador (most areas)
CA +5125-05707 America/Blanc-Sablon AST - QC (Lower North Shore)
-CA +4339-07923 America/Toronto Eastern - ON, QC (most areas)
+CA +4339-07923 America/Toronto Eastern - ON & QC (most areas)
CA +6344-06828 America/Iqaluit Eastern - NU (most areas)
-CA +484531-0913718 America/Atikokan EST - ON (Atikokan); NU (Coral H)
-CA +4953-09709 America/Winnipeg Central - ON (west); Manitoba
+CA +484531-0913718 America/Atikokan EST - ON (Atikokan), NU (Coral H)
+CA +4953-09709 America/Winnipeg Central - ON (west), Manitoba
CA +744144-0944945 America/Resolute Central - NU (Resolute)
CA +624900-0920459 America/Rankin_Inlet Central - NU (central)
CA +5024-10439 America/Regina CST - SK (most areas)
CA +5017-10750 America/Swift_Current CST - SK (midwest)
-CA +5333-11328 America/Edmonton Mountain - AB; BC (E); NT (E); SK (W)
+CA +5333-11328 America/Edmonton Mountain - AB, BC(E), NT(E), SK(W)
CA +690650-1050310 America/Cambridge_Bay Mountain - NU (west)
CA +682059-1334300 America/Inuvik Mountain - NT (west)
CA +4906-11631 America/Creston MST - BC (Creston)
@@ -207,8 +207,8 @@ HT +1832-07220 America/Port-au-Prince
HU +4730+01905 Europe/Budapest
ID -0610+10648 Asia/Jakarta Java, Sumatra
ID -0002+10920 Asia/Pontianak Borneo (west, central)
-ID -0507+11924 Asia/Makassar Borneo (east, south); Sulawesi/Celebes, Bali, Nusa Tengarra; Timor (west)
-ID -0232+14042 Asia/Jayapura New Guinea (West Papua / Irian Jaya); Malukus/Moluccas
+ID -0507+11924 Asia/Makassar Borneo (east, south), Sulawesi/Celebes, Bali, Nusa Tengarra, Timor (west)
+ID -0232+14042 Asia/Jayapura New Guinea (West Papua / Irian Jaya), Malukus/Moluccas
IE +5320-00615 Europe/Dublin
IL +314650+0351326 Asia/Jerusalem
IM +5409-00428 Europe/Isle_of_Man
@@ -264,8 +264,7 @@ MK +4159+02126 Europe/Skopje
ML +1239-00800 Africa/Bamako
MM +1647+09610 Asia/Yangon
MN +4755+10653 Asia/Ulaanbaatar most of Mongolia
-MN +4801+09139 Asia/Hovd Bayan-Olgiy, Govi-Altai, Hovd, Uvs, Zavkhan
-MN +4804+11430 Asia/Choibalsan Dornod, Sukhbaatar
+MN +4801+09139 Asia/Hovd Bayan-Olgii, Hovd, Uvs
MO +221150+1133230 Asia/Macau
MP +1512+14545 Pacific/Saipan
MQ +1436-06105 America/Martinique
@@ -311,7 +310,7 @@ PF -0900-13930 Pacific/Marquesas Marquesas Islands
PF -2308-13457 Pacific/Gambier Gambier Islands
PG -0930+14710 Pacific/Port_Moresby most of Papua New Guinea
PG -0613+15534 Pacific/Bougainville Bougainville
-PH +1435+12100 Asia/Manila
+PH +143512+1205804 Asia/Manila
PK +2452+06703 Asia/Karachi
PL +5215+02100 Europe/Warsaw
PM +4703-05620 America/Miquelon
@@ -355,7 +354,7 @@ RU +4310+13156 Asia/Vladivostok MSK+07 - Amur River
RU +643337+1431336 Asia/Ust-Nera MSK+07 - Oymyakonsky
RU +5934+15048 Asia/Magadan MSK+08 - Magadan
RU +4658+14242 Asia/Sakhalin MSK+08 - Sakhalin Island
-RU +6728+15343 Asia/Srednekolymsk MSK+08 - Sakha (E); N Kuril Is
+RU +6728+15343 Asia/Srednekolymsk MSK+08 - Sakha (E), N Kuril Is
RU +5301+15839 Asia/Kamchatka MSK+09 - Kamchatka
RU +6445+17729 Asia/Anadyr MSK+09 - Bering Sea
RW -0157+03004 Africa/Kigali
@@ -418,7 +417,7 @@ US +470659-1011757 America/North_Dakota/Center Central - ND (Oliver)
US +465042-1012439 America/North_Dakota/New_Salem Central - ND (Morton rural)
US +471551-1014640 America/North_Dakota/Beulah Central - ND (Mercer)
US +394421-1045903 America/Denver Mountain (most areas)
-US +433649-1161209 America/Boise Mountain - ID (south); OR (east)
+US +433649-1161209 America/Boise Mountain - ID (south), OR (east)
US +332654-1120424 America/Phoenix MST - AZ (except Navajo)
US +340308-1181434 America/Los_Angeles Pacific
US +611305-1495401 America/Anchorage Alaska (most areas)
diff --git a/3rdParty/tzdata/zone1970.tab b/3rdParty/tzdata/zone1970.tab
index 1f1cecb84856..5ded0565ebf3 100644
--- a/3rdParty/tzdata/zone1970.tab
+++ b/3rdParty/tzdata/zone1970.tab
@@ -37,7 +37,7 @@
#country-
#codes coordinates TZ comments
AD +4230+00131 Europe/Andorra
-AE,OM,RE,SC,TF +2518+05518 Asia/Dubai Crozet, Scattered Is
+AE,OM,RE,SC,TF +2518+05518 Asia/Dubai Crozet
AF +3431+06912 Asia/Kabul
AL +4120+01950 Europe/Tirane
AM +4011+04430 Asia/Yerevan
@@ -47,12 +47,13 @@ AQ -6736+06253 Antarctica/Mawson Mawson
AQ -6448-06406 Antarctica/Palmer Palmer
AQ -6734-06808 Antarctica/Rothera Rothera
AQ -720041+0023206 Antarctica/Troll Troll
+AQ -7824+10654 Antarctica/Vostok Vostok
AR -3436-05827 America/Argentina/Buenos_Aires Buenos Aires (BA, CF)
AR -3124-06411 America/Argentina/Cordoba most areas: CB, CC, CN, ER, FM, MN, SE, SF
AR -2447-06525 America/Argentina/Salta Salta (SA, LP, NQ, RN)
AR -2411-06518 America/Argentina/Jujuy Jujuy (JY)
AR -2649-06513 America/Argentina/Tucuman Tucumán (TM)
-AR -2828-06547 America/Argentina/Catamarca Catamarca (CT); Chubut (CH)
+AR -2828-06547 America/Argentina/Catamarca Catamarca (CT), Chubut (CH)
AR -2926-06651 America/Argentina/La_Rioja La Rioja (LR)
AR -3132-06831 America/Argentina/San_Juan San Juan (SJ)
AR -3253-06849 America/Argentina/Mendoza Mendoza (MZ)
@@ -81,7 +82,7 @@ BG +4241+02319 Europe/Sofia
BM +3217-06446 Atlantic/Bermuda
BO -1630-06809 America/La_Paz
BR -0351-03225 America/Noronha Atlantic islands
-BR -0127-04829 America/Belem Pará (east); Amapá
+BR -0127-04829 America/Belem Pará (east), Amapá
BR -0343-03830 America/Fortaleza Brazil (northeast: MA, PI, CE, RN, PB)
BR -0803-03454 America/Recife Pernambuco
BR -0712-04812 America/Araguaina Tocantins
@@ -99,19 +100,19 @@ BR -0958-06748 America/Rio_Branco Acre
BT +2728+08939 Asia/Thimphu
BY +5354+02734 Europe/Minsk
BZ +1730-08812 America/Belize
-CA +4734-05243 America/St_Johns Newfoundland; Labrador (southeast)
-CA +4439-06336 America/Halifax Atlantic - NS (most areas); PE
+CA +4734-05243 America/St_Johns Newfoundland, Labrador (SE)
+CA +4439-06336 America/Halifax Atlantic - NS (most areas), PE
CA +4612-05957 America/Glace_Bay Atlantic - NS (Cape Breton)
CA +4606-06447 America/Moncton Atlantic - New Brunswick
CA +5320-06025 America/Goose_Bay Atlantic - Labrador (most areas)
-CA,BS +4339-07923 America/Toronto Eastern - ON, QC (most areas)
+CA,BS +4339-07923 America/Toronto Eastern - ON & QC (most areas)
CA +6344-06828 America/Iqaluit Eastern - NU (most areas)
-CA +4953-09709 America/Winnipeg Central - ON (west); Manitoba
+CA +4953-09709 America/Winnipeg Central - ON (west), Manitoba
CA +744144-0944945 America/Resolute Central - NU (Resolute)
CA +624900-0920459 America/Rankin_Inlet Central - NU (central)
CA +5024-10439 America/Regina CST - SK (most areas)
CA +5017-10750 America/Swift_Current CST - SK (midwest)
-CA +5333-11328 America/Edmonton Mountain - AB; BC (E); NT (E); SK (W)
+CA +5333-11328 America/Edmonton Mountain - AB, BC(E), NT(E), SK(W)
CA +690650-1050310 America/Cambridge_Bay Mountain - NU (west)
CA +682059-1334300 America/Inuvik Mountain - NT (west)
CA +5546-12014 America/Dawson_Creek MST - BC (Dawson Cr, Ft St John)
@@ -126,7 +127,7 @@ CL -3327-07040 America/Santiago most of Chile
CL -5309-07055 America/Punta_Arenas Region of Magallanes
CL -2709-10926 Pacific/Easter Easter Island
CN +3114+12128 Asia/Shanghai Beijing Time
-CN,AQ +4348+08735 Asia/Urumqi Xinjiang Time, Vostok
+CN +4348+08735 Asia/Urumqi Xinjiang Time
CO +0436-07405 America/Bogota
CR +0956-08405 America/Costa_Rica
CU +2308-08222 America/Havana
@@ -171,8 +172,8 @@ HT +1832-07220 America/Port-au-Prince
HU +4730+01905 Europe/Budapest
ID -0610+10648 Asia/Jakarta Java, Sumatra
ID -0002+10920 Asia/Pontianak Borneo (west, central)
-ID -0507+11924 Asia/Makassar Borneo (east, south); Sulawesi/Celebes, Bali, Nusa Tengarra; Timor (west)
-ID -0232+14042 Asia/Jayapura New Guinea (West Papua / Irian Jaya); Malukus/Moluccas
+ID -0507+11924 Asia/Makassar Borneo (east, south), Sulawesi/Celebes, Bali, Nusa Tengarra, Timor (west)
+ID -0232+14042 Asia/Jayapura New Guinea (West Papua / Irian Jaya), Malukus/Moluccas
IE +5320-00615 Europe/Dublin
IL +314650+0351326 Asia/Jerusalem
IN +2232+08822 Asia/Kolkata
@@ -182,7 +183,7 @@ IR +3540+05126 Asia/Tehran
IT,SM,VA +4154+01229 Europe/Rome
JM +175805-0764736 America/Jamaica
JO +3157+03556 Asia/Amman
-JP +353916+1394441 Asia/Tokyo
+JP,AU +353916+1394441 Asia/Tokyo Eyre Bird Observatory
KE,DJ,ER,ET,KM,MG,SO,TZ,UG,YT -0117+03649 Africa/Nairobi
KG +4254+07436 Asia/Bishkek
KI,MH,TV,UM,WF +0125+17300 Pacific/Tarawa Gilberts, Marshalls, Wake
@@ -208,8 +209,7 @@ MD +4700+02850 Europe/Chisinau
MH +0905+16720 Pacific/Kwajalein Kwajalein
MM,CC +1647+09610 Asia/Yangon
MN +4755+10653 Asia/Ulaanbaatar most of Mongolia
-MN +4801+09139 Asia/Hovd Bayan-Ölgii, Govi-Altai, Hovd, Uvs, Zavkhan
-MN +4804+11430 Asia/Choibalsan Dornod, Sükhbaatar
+MN +4801+09139 Asia/Hovd Bayan-Ölgii, Hovd, Uvs
MO +221150+1133230 Asia/Macau
MQ +1436-06105 America/Martinique
MT +3554+01431 Europe/Malta
@@ -246,12 +246,12 @@ PF -0900-13930 Pacific/Marquesas Marquesas Islands
PF -2308-13457 Pacific/Gambier Gambier Islands
PG,AQ,FM -0930+14710 Pacific/Port_Moresby Papua New Guinea (most areas), Chuuk, Yap, Dumont d'Urville
PG -0613+15534 Pacific/Bougainville Bougainville
-PH +1435+12100 Asia/Manila
+PH +143512+1205804 Asia/Manila
PK +2452+06703 Asia/Karachi
PL +5215+02100 Europe/Warsaw
PM +4703-05620 America/Miquelon
PN -2504-13005 Pacific/Pitcairn
-PR,AG,CA,AI,AW,BL,BQ,CW,DM,GD,GP,KN,LC,MF,MS,SX,TT,VC,VG,VI +182806-0660622 America/Puerto_Rico AST
+PR,AG,CA,AI,AW,BL,BQ,CW,DM,GD,GP,KN,LC,MF,MS,SX,TT,VC,VG,VI +182806-0660622 America/Puerto_Rico AST - QC (Lower North Shore)
PS +3130+03428 Asia/Gaza Gaza Strip
PS +313200+0350542 Asia/Hebron West Bank
PT +3843-00908 Europe/Lisbon Portugal (mainland)
@@ -287,13 +287,13 @@ RU +4310+13156 Asia/Vladivostok MSK+07 - Amur River
RU +643337+1431336 Asia/Ust-Nera MSK+07 - Oymyakonsky
RU +5934+15048 Asia/Magadan MSK+08 - Magadan
RU +4658+14242 Asia/Sakhalin MSK+08 - Sakhalin Island
-RU +6728+15343 Asia/Srednekolymsk MSK+08 - Sakha (E); N Kuril Is
+RU +6728+15343 Asia/Srednekolymsk MSK+08 - Sakha (E), N Kuril Is
RU +5301+15839 Asia/Kamchatka MSK+09 - Kamchatka
RU +6445+17729 Asia/Anadyr MSK+09 - Bering Sea
SA,AQ,KW,YE +2438+04643 Asia/Riyadh Syowa
SB,FM -0932+16012 Pacific/Guadalcanal Pohnpei
SD +1536+03232 Africa/Khartoum
-SG,MY +0117+10351 Asia/Singapore peninsular Malaysia
+SG,AQ,MY +0117+10351 Asia/Singapore peninsular Malaysia, Concordia
SR +0550-05510 America/Paramaribo
SS +0451+03137 Africa/Juba
ST +0020+00644 Africa/Sao_Tome
@@ -329,7 +329,7 @@ US +470659-1011757 America/North_Dakota/Center Central - ND (Oliver)
US +465042-1012439 America/North_Dakota/New_Salem Central - ND (Morton rural)
US +471551-1014640 America/North_Dakota/Beulah Central - ND (Mercer)
US +394421-1045903 America/Denver Mountain (most areas)
-US +433649-1161209 America/Boise Mountain - ID (south); OR (east)
+US +433649-1161209 America/Boise Mountain - ID (south), OR (east)
US,CA +332654-1120424 America/Phoenix MST - AZ (most areas), Creston BC
US +340308-1181434 America/Los_Angeles Pacific
US +611305-1495401 America/Anchorage Alaska (most areas)
diff --git a/3rdParty/tzdata/zonenow.tab b/3rdParty/tzdata/zonenow.tab
new file mode 100644
index 000000000000..d2c1e48584f8
--- /dev/null
+++ b/3rdParty/tzdata/zonenow.tab
@@ -0,0 +1,296 @@
+# tzdb timezone descriptions, for users who do not care about old timestamps
+#
+# This file is in the public domain.
+#
+# From Paul Eggert (2023-12-18):
+# This file contains a table where each row stands for a timezone
+# where civil timestamps are predicted to agree from now on.
+# This file is like zone1970.tab (see zone1970.tab's comments),
+# but with the following changes:
+#
+# 1. Each timezone corresponds to a set of clocks that are planned
+# to agree from now on. This is a larger set of clocks than in
+# zone1970.tab, where each timezone's clocks must agree from 1970 on.
+# 2. The first column is irrelevant and ignored.
+# 3. The table is sorted in a different way:
+# first by standard time UTC offset;
+# then, if DST is used, by daylight saving UTC offset;
+# then by time zone abbreviation.
+# 4. Every timezone has a nonempty comments column, with wording
+# distinguishing the timezone only from other timezones with the
+# same UTC offset at some point during the year.
+#
+# The format of this table is experimental, and may change in future versions.
+#
+# This table is intended as an aid for users, to help them select timezones
+# appropriate for their practical needs. It is not intended to take or
+# endorse any position on legal or territorial claims.
+#
+#XX coordinates TZ comments
+#
+# -11 - SST
+XX -1416-17042 Pacific/Pago_Pago Midway; Samoa ("SST")
+#
+# -11
+XX -1901-16955 Pacific/Niue Niue
+#
+# -10 - HST
+XX +211825-1575130 Pacific/Honolulu Hawaii ("HST")
+#
+# -10
+XX -1732-14934 Pacific/Tahiti Tahiti; Cook Islands
+#
+# -10/-09 - HST / HDT (North America DST)
+XX +515248-1763929 America/Adak western Aleutians in Alaska ("HST/HDT")
+#
+# -09:30
+XX -0900-13930 Pacific/Marquesas Marquesas
+#
+# -09
+XX -2308-13457 Pacific/Gambier Gambier
+#
+# -09/-08 - AKST/AKDT (North America DST)
+XX +611305-1495401 America/Anchorage most of Alaska ("AKST/AKDT")
+#
+# -08
+XX -2504-13005 Pacific/Pitcairn Pitcairn
+#
+# -08/-07 - PST/PDT (North America DST)
+XX +340308-1181434 America/Los_Angeles Pacific ("PST/PDT") - US & Canada; Mexico near US border
+#
+# -07 - MST
+XX +332654-1120424 America/Phoenix Mountain Standard ("MST") - Arizona; western Mexico; Yukon
+#
+# -07/-06 - MST/MDT (North America DST)
+XX +394421-1045903 America/Denver Mountain ("MST/MDT") - US & Canada; Mexico near US border
+#
+# -06
+XX -0054-08936 Pacific/Galapagos Galápagos
+#
+# -06 - CST
+XX +1924-09909 America/Mexico_City Central Standard ("CST") - Saskatchewan; central Mexico; Central America
+#
+# -06/-05 (Chile DST)
+XX -2709-10926 Pacific/Easter Easter Island
+#
+# -06/-05 - CST/CDT (North America DST)
+XX +415100-0873900 America/Chicago Central ("CST/CDT") - US & Canada; Mexico near US border
+#
+# -05
+XX -1203-07703 America/Lima eastern South America
+#
+# -05 - EST
+XX +175805-0764736 America/Jamaica Eastern Standard ("EST") - Caymans; Jamaica; eastern Mexico; Panama
+#
+# -05/-04 - CST/CDT (Cuba DST)
+XX +2308-08222 America/Havana Cuba
+#
+# -05/-04 - EST/EDT (North America DST)
+XX +404251-0740023 America/New_York Eastern ("EST/EDT") - US & Canada
+#
+# -04
+XX +1030-06656 America/Caracas western South America
+#
+# -04 - AST
+XX +1828-06954 America/Santo_Domingo Atlantic Standard ("AST") - eastern Caribbean
+#
+# -04/-03 (Chile DST)
+XX -3327-07040 America/Santiago most of Chile
+#
+# -04/-03 - AST/ADT (North America DST)
+XX +4439-06336 America/Halifax Atlantic ("AST/ADT") - Canada; Bermuda
+#
+# -03:30/-02:30 - NST/NDT (North America DST)
+XX +4734-05243 America/St_Johns Newfoundland ("NST/NDT")
+#
+# -03
+XX -2332-04637 America/Sao_Paulo eastern South America
+#
+# -03/-02 (North America DST)
+XX +4703-05620 America/Miquelon St Pierre & Miquelon
+#
+# -02
+XX -0351-03225 America/Noronha Fernando de Noronha; South Georgia
+#
+# -02/-01 (EU DST)
+XX +6411-05144 America/Nuuk most of Greenland
+#
+# -01
+XX +1455-02331 Atlantic/Cape_Verde Cape Verde
+#
+# -01/+00 (EU DST)
+XX +3744-02540 Atlantic/Azores Azores
+#
+# +00 - GMT
+XX +0519-00402 Africa/Abidjan far western Africa; Iceland ("GMT")
+#
+# +00/+01 - GMT/BST (EU DST)
+XX +513030-0000731 Europe/London United Kingdom ("GMT/BST")
+#
+# +00/+01 - WET/WEST (EU DST)
+XX +3843-00908 Europe/Lisbon western Europe ("WET/WEST")
+#
+# +00/+02 - Troll DST
+XX -720041+0023206 Antarctica/Troll Troll Station in Antarctica
+#
+# +01 - CET
+XX +3647+00303 Africa/Algiers Algeria, Tunisia ("CET")
+#
+# +01 - WAT
+XX +0627+00324 Africa/Lagos western Africa ("WAT")
+#
+# +01/+00 - IST/GMT (EU DST in reverse)
+XX +5320-00615 Europe/Dublin Ireland ("IST/GMT")
+#
+# +01/+00 - (Morocco DST)
+XX +3339-00735 Africa/Casablanca Morocco
+#
+# +01/+02 - CET/CEST (EU DST)
+XX +4852+00220 Europe/Paris central Europe ("CET/CEST")
+#
+# +02 - CAT
+XX -2558+03235 Africa/Maputo central Africa ("CAT")
+#
+# +02 - EET
+XX +3254+01311 Africa/Tripoli Libya; Kaliningrad ("EET")
+#
+# +02 - SAST
+XX -2615+02800 Africa/Johannesburg southern Africa ("SAST")
+#
+# +02/+03 - EET/EEST (EU DST)
+XX +3758+02343 Europe/Athens eastern Europe ("EET/EEST")
+#
+# +02/+03 - EET/EEST (Egypt DST)
+XX +3003+03115 Africa/Cairo Egypt
+#
+# +02/+03 - EET/EEST (Lebanon DST)
+XX +3353+03530 Asia/Beirut Lebanon
+#
+# +02/+03 - EET/EEST (Moldova DST)
+XX +4700+02850 Europe/Chisinau Moldova
+#
+# +02/+03 - EET/EEST (Palestine DST)
+XX +3130+03428 Asia/Gaza Palestine
+#
+# +02/+03 - IST/IDT (Israel DST)
+XX +314650+0351326 Asia/Jerusalem Israel
+#
+# +03
+XX +4101+02858 Europe/Istanbul Near East; Belarus
+#
+# +03 - EAT
+XX -0117+03649 Africa/Nairobi eastern Africa ("EAT")
+#
+# +03 - MSK
+XX +554521+0373704 Europe/Moscow Moscow ("MSK")
+#
+# +03:30
+XX +3540+05126 Asia/Tehran Iran
+#
+# +04
+XX +2518+05518 Asia/Dubai Russia; Caucasus; Persian Gulf; Seychelles; Réunion
+#
+# +04:30
+XX +3431+06912 Asia/Kabul Afghanistan
+#
+# +05
+XX +4120+06918 Asia/Tashkent Russia; Kazakhstan; Tajikistan; Turkmenistan; Uzbekistan; Maldives
+#
+# +05 - PKT
+XX +2452+06703 Asia/Karachi Pakistan ("PKT")
+#
+# +05:30
+XX +0656+07951 Asia/Colombo Sri Lanka
+#
+# +05:30 - IST
+XX +2232+08822 Asia/Kolkata India ("IST")
+#
+# +05:45
+XX +2743+08519 Asia/Kathmandu Nepal
+#
+# +06
+XX +2343+09025 Asia/Dhaka Russia; Kyrgyzstan; Bhutan; Bangladesh; Chagos
+#
+# +06:30
+XX +1647+09610 Asia/Yangon Myanmar; Cocos
+#
+# +07
+XX +1345+10031 Asia/Bangkok Russia; Indochina; Christmas Island
+#
+# +07 - WIB
+XX -0610+10648 Asia/Jakarta Indonesia ("WIB")
+#
+# +08
+XX +0117+10351 Asia/Singapore Russia; Brunei; Malaysia; Singapore; Concordia
+#
+# +08 - AWST
+XX -3157+11551 Australia/Perth Western Australia ("AWST")
+#
+# +08 - CST
+XX +3114+12128 Asia/Shanghai China ("CST")
+#
+# +08 - HKT
+XX +2217+11409 Asia/Hong_Kong Hong Kong ("HKT")
+#
+# +08 - PHT
+XX +143512+1205804 Asia/Manila Philippines ("PHT")
+#
+# +08 - WITA
+XX -0507+11924 Asia/Makassar Indonesia ("WITA")
+#
+# +08:45
+XX -3143+12852 Australia/Eucla Eucla
+#
+# +09
+XX +5203+11328 Asia/Chita Russia; Palau; East Timor
+#
+# +09 - JST
+XX +353916+1394441 Asia/Tokyo Japan ("JST"); Eyre Bird Observatory
+#
+# +09 - KST
+XX +3733+12658 Asia/Seoul Korea ("KST")
+#
+# +09 - WIT
+XX -0232+14042 Asia/Jayapura Indonesia ("WIT")
+#
+# +09:30 - ACST
+XX -1228+13050 Australia/Darwin Northern Territory ("ACST")
+#
+# +09:30/+10:30 - ACST/ACDT (Australia DST)
+XX -3455+13835 Australia/Adelaide South Australia ("ACST/ACDT")
+#
+# +10
+XX +4310+13156 Asia/Vladivostok Russia; Yap; Chuuk; Papua New Guinea; Dumont d'Urville
+#
+# +10 - AEST
+XX -2728+15302 Australia/Brisbane Queensland ("AEST")
+#
+# +10 - ChST
+XX +1328+14445 Pacific/Guam Mariana Islands ("ChST")
+#
+# +10/+11 - AEST/AEDT (Australia DST)
+XX -3352+15113 Australia/Sydney southeast Australia ("AEST/AEDT")
+#
+# +10:30/+11
+XX -3133+15905 Australia/Lord_Howe Lord Howe Island
+#
+# +11
+XX -0613+15534 Pacific/Bougainville Russia; Kosrae; Bougainville; Solomons
+#
+# +11/+12 (Australia DST)
+XX -2903+16758 Pacific/Norfolk Norfolk Island
+#
+# +12
+XX +5301+15839 Asia/Kamchatka Russia; Tuvalu; Fiji; etc.
+#
+# +12/+13 (New Zealand DST)
+XX -3652+17446 Pacific/Auckland New Zealand ("NZST/NZDT")
+#
+# +12:45/+13:45 (Chatham DST)
+XX -4357-17633 Pacific/Chatham Chatham Islands
+#
+# +13
+XX -210800-1751200 Pacific/Tongatapu Kanton; Tokelau; Samoa (western); Tonga
+#
+# +14
+XX +0152-15720 Pacific/Kiritimati Kiritimati
diff --git a/3rdParty/velocypack b/3rdParty/velocypack
index 3e3e9416c478..bea8fc3afa7a 160000
--- a/3rdParty/velocypack
+++ b/3rdParty/velocypack
@@ -1 +1 @@
-Subproject commit 3e3e9416c478e2f12282d6dc4a619fd5d5f39918
+Subproject commit bea8fc3afa7a9800a563f71c032519bae9d8477e
diff --git a/ARANGO-VERSION b/ARANGO-VERSION
index 3b55403a0c10..c0fe2930c667 100644
--- a/ARANGO-VERSION
+++ b/ARANGO-VERSION
@@ -1 +1 @@
-3.11.0-devel
+3.11.14.1
diff --git a/CHANGELOG b/CHANGELOG
index 5107f1f967ed..183025ff1ae9 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -1,4635 +1,7492 @@
-devel
------
+v3.11.14.2 (XXXX-XX-XX)
+-----------------------
-* Fix incompatibility between 3.9 and 3.10 w.r.t. to serialization of AQL array
- filters (i.e. `[* FILTER ...]`). The array filters were serialized in a
- different way in 3.9 than they are serialized in 3.10. 3.10 also expected the
- new serialization format when unserializing a plan.
- The fix now enables support for both formats.
+* Updated ArangoDB Starter to v0.18.17 and arangosync to v2.19.17.
-* Fixed issue #18769: Input validation allows invalid UTF-8 code points.
-
- This change enforces the validation of UTF-8 surrogate pairs in incoming JSON
- data. Previously, the following loopholes existed when validating UTF-8
- surrogate pair data:
- - a high surrogate, followed by something other than a low surrogate (or the
- end of the string)
- - a low surrogate, not preceeded by a high surrogate
- These loopholes are now closed, which means that any JSON inputs with invalid
- surrogate pair data will be rejected by the server.
+* Rebuilt included rclone v1.62.2 with go1.23.12 and non-vulnerable
+ dependencies.
- Note that the extended validation for surrogates can be turned off along with
- other UTF-8 string validation by setting the server startup option
- `--server.validate-utf8-strings` to `false`. This is not recommended though,
- but should only be used in situations when a database is known to contain
- invalid data and must continue supporting it.
+* Upgraded OpenSSL to 3.5.2 and OpenLDAP to 2.6.10.
-* Updated rclone to v1.62.2 custom build with go1.20.3.
+* Fix BTS-2139: GeoJSON coordinate validation now properly accepts both 2D and 3D
+ coordinates for Point geometries.
-* Changed return code of APIs that create databases from previously 1229
- (`ERROR_ARANGO_DATABASE_NAME_INVALID`) to 1208 (`ERROR_ARANGO_ILLEGAL_NAME`)
- in case an invalid database name is used.
- This is a downwards-incompatible change, but unifies the behavior for
- database creation with the behavior of collection and view creation,
- which also return error 1208 in case the specified name is invalid.
-* FE-236: bugfix - remove unused files, use new tooltip in views UI.
+v3.11.14.1 (2025-07-13)
+-----------------------
-* FE-238: Added auto-login support in core web UI - disabled logout when
- auto-login is enabled, set sessionStorage "jwtUser" value when login is
- skipped.
+* Fix a bug in the PERCENTILE function in AQL, which gave the incorrect result
+ null for small percentiles. This fixes BTS-2169.
-* FE-233: bugfix - fix query spotlight search not working.
-* FE-349: bugfix - filter out empty primarySort field in UI.
+v3.11.14 (2025-05-14)
+---------------------
-* FE-247: bugfix - missing storedValues field in persistent index form.
+* Optimize wait and detach in futures.
-* FE 242, FE-244: bugfix - add support for cache fields, fix inverted index name
- undefined.
+* Updated ArangoDB Starter to v0.18.15.
-* FE-241: bugfix - filter predefined queries based on search term.
+* Updated arangosync to v2.19.15.
-* Adjusted timeouts for cluster internal commit and abort requests to withstand
- network delays better. This fixes some problems when the networking
- infrastructure delays requests.
+* Switched to customized (instead of official) forked rclone v1.62.2 built with
+ go1.23.8 to avoid CVEs.
-* Added sent time accounting and some metrics to fuerte and the NetworkFeature.
- This can detect delays in the network infrastructure.
+* Upgraded OpenSSL to 3.5.0.
-* Added startup option `--server.ensure-whitespace-metrics-format`, which
- controls whether additional whitespace is used in the metrics output
- format. If set to `true`, then whitespace is emitted between the exported
- metric value and the preceeding token (metric name or labels).
- Using whitespace may be required to make the metrics output compatible with
- some processing tools, although Prometheus itself doesn't need it.
+* Remove overeager assert, only relevant for testing.
- The option defaults to `true`, which adds additional whitespace by default.
+* Fix TSAN issue in LogicalCollection.
-* Automatically repair revision trees after several failed shard
- synchronization attempts. This can help to get permanently out-of-sync
- shards back into sync.
+* Fix BTS-2100: Due to a priority inversion it was possible that a lot of
+ scheduled SynchronizeShard actions blocked higher priority
+ TakeoverShardLeadership actions in the cluster Maintenance. This
+ could lead to service interruption during upgrades and after failovers.
- The functionality can be turned off by setting the startup option
- `--replication.auto-repair-revision-trees` to `false` on DB-Servers.
+* Fix a bug in the index API /_api/index?withHidden=true, which can lead to
+ two problems: (1) A newly created index is potentially not shown in the very
+ moment when it is finished. (2) A newly created index is shown twice in
+ the result, once with `isBuilding: true` and once without.
+ This fixes BTS-2044.
-* SEARCH-461: Added option "--arangosearch.columns-cache-only-leader". Used only
- on EE DBServers. Default is false.
- If set to true only leader shards have ArangoSearch caches enabled - this will
- reduce RAM usage. In case of failover happens - in background caches are
- populated for the new leader. Some queries that run at during a failover may
- still run without caches.
+* Fix concurrency bug which can lead to lost threads. See BTS-2087.
-* FE-216: bugfix - make view patches async in the UI.
+* The agency supervision is clearing "finished" jobs too quickly from
+ Target/Finished and Failed. They will be forthwith be kept for at least 1h.
-* FE-212: bugfix: links not getting removed when copying from another view in
- UI.
-* SEARCH-466 Fix leaking into individual link definition inherited properties
- from view.
+v3.11.13 (2025-02-07)
+---------------------
-* FE-222: Fix - Allow additional properties in arangosearch, allow no fields in
- inverted index when 'includeAllFIelds' is true.
+* Disable cluster overwhelm protection on agents. This fixes a bug which happens
+ if the number of dbservers and coordinators grows beyond 4x the number of
+ threads in the agency leader.
-* APM-183: Support UTF-8 on UI (collection/view/index names).
+* Improve geo index performance in the cluster with multiple shards. This fixes
+ BTS-2046. An unnecessary and bad SORT node is removed from the query plan in
+ the case that a geo index is used.
-* FE-199: Remove URL handling of fields on view screen.
+* BTS-2074: fix installing Foxx service from zip file upload using the web UI
+ with multi-coordinator load balancing.
-* MDS-1098: In 3.10 we have introduced an optimization on Traversals to pull
- post-filter conditions into the traversal-statements, like the following:
+* Updated OpenLDAP to 2.6.9.
- FOR v,e,p IN 10 OUTBOUND @start GRAPH "myGraph"
- FILTER v.isRelevant == true
- RETURN p
+* Swagger UI: Remove fragment identifiers from request URLs. They are merely
+ used to disambiguate polymorphic endpoints in the OpenAPI descriptions of the
+ HTTP API documentation.
- If the comparison side contains a variable and the same variable is used as
- the start vertex e.g. like this:
+* Update tzdata as of 23.01.2025.
- FOR candidate IN ["vertices/1", "vertices/2"]
- FOR v,e,p IN 1 OUTBOUND candidate GRAPH "myGraph"
- FILTER e.MostLikedNeighbor == candidate
- RETURN v
+* Rebuilt included rclone v1.62.2 with go1.22.10 and non-vulnerable
+ dependencies.
- There is a chance that we prematurely discarded this variable (candidate in
- the example) if it is not used later. This has lead to incorrect results.
+* Updated ArangoDB Starter to v0.18.12 and arangosync to v2.19.12.
-* Changed the behavior of the following JavaScript functions in arangosh and
- arangod (e.g. when used from a Foxx service):
+* Introduce expiry time for idle TCP/IP connections in the ConnectionCache (for
+ `SimpleHttpClient`) with a default of 120s. This is to prevent errors in
+ replication caused by cloud environments terminating connections. Also add
+ retries in a few places. Also increase the timeout in initial sync to transfer
+ up to 5000 documents from 25s to 900s. This addresses BTS-2011, BTS-2042 and
+ BTS-2035.
- - `db..dropIndex(id)`: this function now throws if no index
- exists with the specified id. Previously the function only returned the
- value `false`.
- - `db._dropIndex(id)`: this function now throws if no index exists with the
- specified id. Previously the function only returned the value `false`.
+* Don't cleanup failed agency jobs if they are subjobs of pending jobs.
+ This avoids a bug in CleanOutServer jobs, where such a job could complete
+ seemingly successfully despite the fact that some MoveShard jobs had actually
+ failed. This fixes BTS-2022.
- These changes are not downwards-compatible, but they can be easily worked
- around by wrapping dropIndex calls into a try ... catch.
+* Upgraded OpenSSL to 3.4.0.
- The HTTP API for dropping indexes is not affected by these changes, as it
- previously returned HTTP 404 already when the specified index could not be
- found.
+* Fix shard synchronisation race where after a shard move the new leader informs
+ followers before it updates Current in the Agency. In some cases the old
+ leader fell subsequently out of sync.
-* Added `--dump-views` option to arangodump, to control whether arangosearch
- view definitions should be stored as part of the dump. The option defaults
- to `true`.
+* BTS-2017: fix CleanOutServer for satellite collections.
-* APM-183: optionally allow special characters and Unicode characters in
- collection names, view names and index names.
+* BTS-2014: Fix delay if write hits early after a hotbackup restore.
- This feature allows toggling the naming convention for collection names,
- view names and index names from the previous strict mode, which only allowed
- selected ASCII characters, to an extended, more relaxed mode. The extended
- mode allows additional ASCII characters as well as non-ASCII UTF-8 characters
- in database names, collection names, index names and view names.
- The extended mode can be enabled by setting the new startup option
- - `--database.extended-names`
- to true. It is turned off by default and requires an explicit opt-in, simply
- because some drivers and client applications may not be ready for it yet.
- The arangod server, the ArangoDB web interface and the following bundled
- client tools are prepared and ready for using the extended names:
- - arangobench
- - arangodump
- - arangoexport
- - arangoimport
- - arangorestore
- - arangosh
- More tools and the drivers shipped by ArangoDB may be added to the list in
- the future.
+* Fix leader resignation race in coordinator, which lead to forgotten collection
+ read locks on dbservers, which in turn could lead to deadlocks in the cluster.
- Please note that the extended names should not be turned on during upgrades
- from previous versions, but only once the upgrade has been completed
- successfully. In addition, the extended names should not be used in
- environments that require extracting data into a previous version of
- ArangoDB, or when database dumps may be restored into a previous version
- of ArangoDB. This is because older versions will not be able to handle the
- extended names.
- Finally, it should not be turned on in environments in which drivers are
- in use that haven't been prepared to work with the extended naming
- convention.
- Warning: turning on the `--database.extended-names` option for a deployment
- requires it to stay enabled permanently, i.e. it can be changed
- from `false` to `true` but not back. When enabling it, it is also required
- to do this consistently on all coordinators and DB servers.
+v3.11.12 (2024-10-31)
+---------------------
- The extended names for databases, collections, views and indexes will be
- enabled by default in one of the future releases of ArangoDB, once enough
- drivers and other client tools have had the chance to adapt.
+* Update tzdata as of 31.10.2024.
-* FE-200: Adds smart & enterprise graph support in the UI.
+* BTS_1792: Call `setLocale` earlier during startup to prevent crashes.
-* Forward the `ttl` cursor option for AQL queries in the JavaScript API
- from the `db._query()` and the `db._createStatement()` methods to the server.
+* Change dumping of VPack to JSON for large double values with absolute
+ value between 2^53 and 2^64. This ensures that dumps to JSON followed
+ by parsing of the JSON back to VPack retain the right numerical values.
+ This fixes a problem in arangodump/arangorestore as well as in replication.
+ Furthermore, JSON output from the API now shows numbers in this area
+ with their correct numerical value.
-* APM-407: add an optimization for inserting multiple documents at the same
- time via an AQL INSERT query.
+* Updated ArangoDB Starter to v0.18.10.
- There is an optimizer rule `optimize-cluster-multiple-document-operations`,
- which fires in case an AQL query has one of the patterns
- - `FOR doc IN @docs INSERT doc INTO ...` (where `@docs` is a bind parameter
- with an array of documents to be inserted),
- - `FOR doc IN [...] INSERT doc INTO ...` (where the FOR loop iterates over
- an array of input documents known at query compile time),
- - `LET docs = [...] FOR doc IN docs INSERT doc INTO ...` (where the documents
- set up by the LET are some static documents known at query compile time),
+* Allow an `arangod` server to startup, even if the option
+ `--database.extended-names` is set to inconsistent values across the
+ cluster. It has been found that bailing out in this case creates more
+ harm than it helps.
- If a query has such pattern, and all the following restrictions are met, then
- the optimization is triggered:
+* Upgraded OpenSSL to 3.3.2.
- - there are no following RETURN nodes (including any RETURN OLD, RETURN NEW)
- - the FOR loop is not contained in another outer FOR loop or subquery
- - there are no other nodes (e.g. LET, FILTER) between the FOR and the INSERT
- - the INSERT is not used on a SmartGraph edge collection
- - the FOR loop is iterating over a constant, deterministic expression
+* Fix data race in cluster selectivity estimates.
- The optimization will then add a `MultipleRemoteExecutionNode` to the query
- execution plan, which will care about inserting all documents into the
- collection in one go. Further optimizer rules are skipped if the optimization
- is triggered.
+* Fix blockage in SynchronizeShard: We do some agency communication there,
+ this should use skipScheduler to avoid being blocked by all scheduler
+ threads being busy. This solves a problem found in RTA on upgrade
+ with resignLeadership.
- Future versions of ArangoDB may lift some of the restrictions for the query
- pattern, so that the optimization may be triggered in more cases in the
- future.
-* FE-200: Add canvas interactions to Graph Viewer.
+v3.11.11 (2024-09-04)
+---------------------
-* Fixed BTS-1292: Added automatic cleanup of dangling ArangoSearch links.
+* Updated ArangoDB Starter to v0.18.9 and arangosync to v2.19.9.
-* FE-218: Updated WebUI dependencies.
+* Rebuilt included rclone v1.62.2 with go1.22.6 and non-vulnerable dependencies.
-* Fixed statistics values for writes executed and writes ignored when a query
- is executed using the rule `optimize-cluster-single-document-operations`.
- It was always increasing the amount of writes executed, even if the operation
- wasn't successful, and also never increasing the amount of writes ignored
- when needed.
+* FE-380: prevent crash when redirecting from query view after page reload.
-* Make REST API `/_admin/shutdown` sleep for only half a second until it
- initiates the server shutdown. Previously it slept for 2 seconds, but half
- a second should already be enough to send the server's response out.
+* Add API GET /_admin/cluster/vpackSortMigration/status to query the status
+ of the vpack sorting migration on dbservers, single servers and
+ coordinators.
-* MDS-1001: Performance improvement in AQL. If you are using a traversal like
- `FOR v, e, p IN <....>` and later in the query access the last vertex on the path e.g.:
- `FILTER p.vertices[-1].name == "ArangoDB"` it will now be transformed to
- `FILTER v.name == "ArangoDB"` which is an equivalent statement. The latter however
- is cheaper to compute, as we do not need to create an in-memory representation
- of the path. Furthermore we can apply additional optimizations on `v` which are not
- possible on `p`. The same optimization holds true for `p.edges[-1]` which is equivalent
- to `e`. The optimization rule for this is called "optimize-traversal-last-element-access".
+* FE-378: fix fields containing spaces during index creation on web UI.
-* Updated arangosync to v2.16.1.
+* Fixed MDS-1225: reloading of AQL user-defined functions inside AQL queries
+ could cause trouble if the `_aqlfunctions` collection is located on different
+ DB server than the leader shards of other collections used in the same query.
-* FE-142: Updates indices view list & index addition to React.
+* Fixed FE-435: drop collections checkbox in graph settings modal was not
+ aligned.
-* A Pregel execution now stores its state during and after execution
- into a system collection. To read or delete entries the new API
- `/_api/control_pregel/history[/]` has been added. Additionally, the
- Pregel JavaScript module has been extended to support access as well.
- Read history `.history()`.
- Remove history `.removeHistory()`.
+* Fix comparison of numeric values in AQL to bring it in line with the
+ now correct VPack numerical sorting.
-* SEARCH-300: Fixed a rare case when arangosearch data folders might be left on
- disk after database is dropped.
+* Include LANGUAGE file in hotbackups. This is necessary to be able to detect
+ locale changes across a hotbackup create/restore process.
-* Marked all memory-mapping options for Pregel as obsolete;
- The memory mapping code was removed as it did not provide any advantages
- over spilling into system-provided swap space.
+* Fix sorting behaviour of VelocyPack values w.r.t. numbers. This has an
+ impact on indexes indexing VPackValues. Therefore, after an upgrade the
+ old sorting order will be retained to allow smooth upgrades. Newly started
+ instances with a fresh database directory will only use the new sorting
+ method. There is also a migration API under
+ GET /_admin/cluster/vpackSortMigration/check and
+ PUT /_admin/cluster/vpackSortMigration/migrate to check for problematic
+ indexes and - provided there are none - to migrate the instance to
+ the new sorting order.
-* FE-139 adds new search view type (search-alias).
+* Forcefully kill all running AQL queries on server shutdown.
+ This allows for a faster server shutdown even if there are some long-running
+ AQL queries ongoing.
-* Fixed BTS-902 (clicking on the search icon in the analyzers filter input used
- to take the user to the collections view).
+* Fixed MDS-1216: restoring the previous value of the "padded" key generator
+ could lead to the key generator's sequence being set to a too low value after
+ recovery.
-* Ran automated migrations on all .scss files to remove deprecated division
- operator usage.
+* Add syslog client implementation for Windows. This allows one to configure
+ syslog logging for Windows deployments.
-* Fixed ES-1508: (EE only) when deleting edges in a SmartGraph via
- DELETE /_api/document/{collection} using _key or _id values as document
- selectors, the INBOUND and OUTBOUND entries of the SmartEdges could diverge.
- Using a document like {_key: "xxxx"} as a selector was always correct.
- Now _key and _id variants are supported as intended.
+* Use __nss_configure_lookup to opt out of /etc/nsswitch.conf.
+ Add the startup option --honor-nsswitch to cancel the opt-out.
-* BTS-1272: Fixed metric `arangodb_connection_pool_connections_current`. In
- some cases where multiple connections to a server are canceled the metric
- could miss-count, as for now it only counted individually closed connections.
- The wrong counted situations are: other server crashes, restore of a
- HotBackup, rotation of JWT secret.
+* Added `verifyCertificates` attribute option for the `requests` JavaScript
+ module. This option defaults to false, so that no certificates will be
+ verified in an HTTPS connection made with the `requests` module. If the option
+ is set to true, the server certificate of the remote server will be verified
+ using the default certificate store of the system.
+ There is also a `verifyDepth` attribute to limit the maximum length of the
+ certificate chain that counts as valid.
-* SEARCH-279 Fix consistency during update/replace operations
- for arangosearch links and inverted indexes.
-* Updated arangosync to v2.16.1-preview-1.
+v3.11.10 (2024-06-28)
+---------------------
-* APM-294: Added telemetrics API that gathers anonymous feature usage
- statistics from a deployment. The API is accessible via the endpoint
- `/_admin/telemetrics`. The API is enabled by default in release builds, but
- disabled by default in maintainer mode. It can be explicitly turned on/off
- with the server startup parameter `--server.telemetrics-api`.
- The required access privileges to access the telemetrics API can be
- configured via the server startup option `--server.support-info-api`.
- The telemetrics API is used by the arangosh: every time the arangosh is
- started, it will send a request to the connected server to gather telemetrics
- from the `/_admin/telemetrics` endpoint. The telemetrics data are then
- sent to an aggregation service that is run by ArangoDB.
+* Update timezone database as of 25.06.2024.
-* Added the following metrics for WAL file tracking:
- - `rocksdb_live_wal_files_size`: cumulated size of alive WAL files (not
- archived)
- - `rocksdb_archived_wal_files_size`: cumulated size of archive WAL files
+* Rebuilt included rclone v1.62.2 with go1.22.4.
-* By default, start pruning of archived WAL files 60 seconds after server
- start. Previously, pruning of WAL files started 180 seconds after server
- startup.
+* Sort out a thread blockage on AQL upsert waiting for replication.
-* Set default threshold value for automatic column flushing to 20 live WAL
- files (previously: 10 files), and retry flushing every 30 minutes (previous
- interval: every 60 minutes).
+* BTS-1909, MDS-1232: Fixed a bug where COLLECT ... AGGREGATE x = UNIQUE(y)
+ could miss some results when multiple shards were aggregated in a cluster.
-* APM-283: Use parallel gather in almost all queries. The only case where we
- cannot use parallel gather is when using traversals, although there are some
- exceptions for disjoint SmartGraphs where the traversal can run completely on
- the local DB-server. All other queries should now be able to parallelize the
- gather node. This can not only speed up queries quite significantly, but also
- overcomes issues with the previous serial processing within gather nodes,
- which could lead to high memory usage on coordinators caused by buffering of
- documents other shards, and timeouts on some DB-Servers because query parts
- were idle for too long.
+* FE-454: Fix saving document in "Tree" mode.
-* Added support to log response bodies as well as HTTP headers (incoming
- and outgoing), when the requests log topic is set to TRACE.
+* Fix a potential data corruption in a collection's Merkle tree, in case
+ a write operation in a streaming transaction ran into the transaction's
+ maximum size limit and failed. If the error was ignored and the transaction
+ committed, the leftovers of the failed operation were not removed from
+ the collection's in-memory Merkle tree buffer and thus be committed as
+ well.
+ This could later cause issues with shards not getting properly in sync.
-* Changed path were test scripts locate configuration files from `etc/relative`
- to `etc/testing`. These paths contain `arangosh.conf`, which we were reading
- from `etc/relative` in test environment.
+* Updated OpenSSL to 3.3.1 and OpenLDAP to 2.6.8.
-* Made the return code configurable that is delivered if a write fails because
- the write concern is not fulfilled (not enough in-sync replicas available).
- Previously (and now by default), a code of HTTP 403 is returned and the
- request returns immediately. If the command line option
- --cluster.failed-write-concern-status-code=503
- is set, then HTTP 503 is returned. Note that no cluster-internal retry
- is happening, such that a client is informed right away about the problem.
- Retry loops have to be organized in the client program.
+* Switch production compiler to clang++ (16.0.6) and runtime to glibc (2.39.0).
-* Added support for sending gzip-compressed responses from the server.
- Previously only deflated responses were supported.
+* Updated ArangoDB Starter to v0.18.6.
-* Stabilized resilience tests. The assumption that an AQL query can run
- without error directly after a leader has been stopped, is wrong.
+* Sort out a thread blockage on AQL write waiting for replication.
-* Auto-flush RocksDB WAL files and in-memory column family data if the number
- of live WAL files exceeds a certain threshold. This is to make sure that
- WAL files are moved to the archive when there are a lot of live WAL files
- present (e.g. after a restart; in this case RocksDB does not count any
- previously existing WAL files when calculating the size of WAL files and
- comparing it `max_total_wal_size`.
- The feature can be configured via the following startup options:
- - `--rocksdb.auto-flush-min-live-wal-files`: minimum number of live WAL
- files that triggers an auto-flush. Defaults to `10`.
- - `--rocksdb.auto-flush-check-interval`: interval (in seconds) in which
- auto-flushes are executed. Defaults to `3600`.
- Note that an auto-flush is only executed if the number of live WAL files
- exceeds the configured threshold and the last auto-flush is longer ago than
- the configured auto-flush check interval. That way too frequent auto-flushes
- can be avoided.
+* Reduce the possibility of cache stampedes during collection count cache
+ refilling.
+
+* Move resolution of replication callbacks on leader DB-Servers to the
+ scheduler's HIGH prio lane. The HIGH lane is justified here because the
+ callback resolution must make progress, as it can unblock another thread
+ waiting in an AQL write query for the replication to return.
-* Updated arangosync to v2.16.0-preview-1.
+* Skip the scheduler for AQL query shutdown on coordinators.
+ During query shutdown, we definitely want to skip the scheduler, as in some
+ cases the thread that orders the query shutdown can be blocked and needs to
+ wait synchronously until the shutdown requests have been responded to.
-* Added the following metrics for WAL file tracking:
- - `rocksdb_live_wal_files`: number of alive WAL files (not archived)
- - `rocksdb_wal_released_tick_flush`: lower bound sequence number from which
- onwards WAL files will be kept (i.e. not deleted from the archive) because
- of external flushing needs. Candidates for these are arangosearch links
- and background index creation.
- - `rocksdb_wal_released_tick_replication`: lower bound sequence number from
- which onwards WAL files will be kept because they may be needed by the
- replication.
- - `arangodb_flush_subscriptions`: number of currently active flush
- subscriptions.
+* Avoid dropping of followers in case a leader resigns and then comes back.
-* Updated internal JavaScript dependencies:
+* Move network retry requests to a dedicated thread.
- - @xmldom/xmldom: 0.8.0 -> 0.8.6
- - accepts: 1.3.7 -> 1.3.8
- - ajv: 8.10.0 -> 8.12.0
- - ansi_up: 5.0.1 -> 5.1.0
- - content-disposition: 0.5.3 -> 0.5.4
- - content-type: 1.0.4 -> 1.0.5
- - error-stack-parser: 2.0.6 -> 2.1.4
- - mime-types: 2.1.31 -> 2.1.35
- - semver: 7.3.5 -> 7.3.8
+* Fixed BTS-1813: Fixed an AQL error that could lead to range-check exceptions
+ in certain queries with subqueries.
-* FE-135: Add new Graph Viewer with vis.js and change the UI.
+* Reduce frequency in which progress of hot backup upload / download progress
+ is reported to the agency. This limits traffic from DB servers to the agency
+ during hot backup uploads and downloads.
-* Updated arangosync to v2.15.0.
+* Fix file counting in hotbackup upload in case of errors in remote-to-remote
+ copies. Also prevent spurious rclone errors in S3.
-* FE-19: Updated ArangoDB logo in web interface.
+* Use posix_spawn instead of fork/exec for subprocesses. This solves a
+ performance issue during hotbackup upload.
-* Make the hashed variant of AQL COLLECT support INTO clauses too.
- Previously only the sorted variant of AQL COLLECT supported INTO clauses.
+* Added new REST API endpoint HTTP DELETE `/_admin/log/level` to reset all log
+ levels to their startup values. The log levels will be reset to their factory
+ defaults unless they were overridden via a configuration file or command-line
+ options. In this case, the log levels will be reset to the value they were
+ configured to at startup.
+ All modifications to the log levels since the instance startup will be lost
+ when calling this API.
-* Upgraded OpenSSL to 3.0.8.
+ This API is useful for tools that temporarily change log levels but do not
+ want to fetch and remember the previous log levels settings. Such tools can
+ now simply call the new API to restore the original log levels.
-* FE-174: Change ViewsUI layout to single-page instead of tabs.
+* Fix a potential race in collectRebalanceInformation.
-* Allow usage of projections and covering indexes in more cases.
- Previously, projections were not used if there were complex filter conditions
- on the index attribute(s) that contained the `[*]` expansion operator
- with inline FILTERs or RETURNs, e.g.
- `FILTER doc.addrs[* FILTER CURRENT.country == 'US'].zip`.
-* PRESUPP-546: make AQL optimizer rule `simplify-conditions` correctly report
- that it was triggered. Previously that rule never reported that it was
- triggered although even though it actually was.
+v3.11.9 (2024-05-18)
+--------------------
-* Added startup option `--rocksdb.auto-refill-index-caches-on-followers` to
- control whether automatic refilling of in-memory caches should happen on
- followers or just leaders. The default value is `true`, i.e. refilling
- happens on followers too.
+* Fixed the listDatabases API: Directly after adding a new empty DBServer, or
+ after restoring a Hotbackup the listDatabases() would return an empty list of
+ existing databases, which goes back to normal quickly. This bug only effected
+ the APIs exposing the list of database names, all databases in fact still
+ exist and are fully functional.
-* Added new geo_s2 ArangoSearch analyzer (Enterprise Only).
+* Remove artificial upper bounds for the command-line options regarding the
+ number of IO threads: `--server.io-threads` and `--network.io-threads` were
+ previously limited to 64 and 16, resp. Now there is no upper bound for these
+ options anymore. The default values remain unchanged.
-* GORDO-1554: Fixes invalid document insertion with invalid user-specified
- keys (e.g. numeric values) into EnterpriseGraph related vertices.
+* Fix potentially hanging threads during index creation if starting one of the
+ parallel index creation threads returned an error.
-* Add peak memory usage to the query object details for queries in the slow
- query history and in the list of currently running queries. The peak memory
- usage is also returned via REST APIs as `peakMemoryUsage`.
+* Fix connection retry attempts for cluster-internal TLS connections that ran
+ into the 15 seconds timeout during the connection establishing attempt.
+ In this case, the low-level socket was repurposed, but not reset properly.
+ This could leave the connection in an improper state and lead to callbacks for
+ some requests to not being called as expected.
+ The connection timeout was also increased from 15 seconds to 60 seconds.
-* Provide options for configuring and enabling RocksDB's blob storage (BlobDB)
- for large documents in the documents column family.
- This is currently an experimental feature.
+* FE-448: auto-repair collection document JSON on save.
- The following experimental options are available:
+* Retry cluster query shutdown in case no connection can be made to the
+ DB-Servers for the shutdown request.
- - `--rocksdb.enable-blob-files`: Enable the usage of blob files for the
- documents column family. This option defaults to `false`. All following
- options are only relevant if this option is set to `true`.
- - `--rocksdb.min-blob-size`: Size threshold for storing large documents in
- blob files (in bytes, 0 = store all documents in blob files).
- - `--rocksdb.blob-file-size`: Size limit for blob files in the documents
- column family (in bytes).
- - `--rocksdb.blob-compression-type`: Compression algorithm to use for blob
- data in the documents column family.
- - `--rocksdb.enable-blob-garbage-collection`: Enable blob garbage collection
- during compaction in the documents column family.
- - `--rocksdb.blob-garbage-collection-age-cutoff`: Age cutoff for garbage
- collecting blob files in the documents column family (percentage value from
- 0 to 1 determines how many blob files are garbage collected during
- compaction).
- - `--rocksdb.blob-garbage-collection-force-threshold`: Garbage ratio
- threshold for scheduling targeted compactions for the oldest blob files
- in the documents column family.
+* Improved the time required to create a new Collection in a database with
+ hundreds of collections. This also improves times for indexes and dropping of
+ collections.
-* FE-132: Added query sorting (in web UI) by modified date, option to sort
- order.
+* Prioritize requests for commiting or aborting streaming transactions on
+ leaders and followers, because they can unblock other operations. Also
+ prioritize requests in already started streaming transactions and AQL queries
+ over new requests because it is assumed that already started streaming
+ transactions and AQL queries can block other operations from starting, so
+ these should be completed first.
+
+ The following request priorities have changed:
+ - cluster-internal requests for continuing already started AQL queries have
+ changed their priority from low to medium. This excludes requests that setup
+ AQL queries on DB servers, which still run with low priority.
+ - requests that include the transaction id header are now elevated to run with
+ medium priority if they originally ran with low priority. This excludes
+ requests that begin new transactions or AQL queries.
+ - requests to commit or abort an already running streaming transaction will be
+ elevated from low to medium priority.
+ - requests to HTTP GET `/_api/collection//shards` and GET
+ `/_api/collection//reponsibleShard` are now running with high
+ priority instead of low. Such requests are only used for inspection and have
+ no dependencies.
+ - requests to push further an existing query cursor via the `/_api/cursor` are
+ now running with medium priority instead of low priority. Requests to start
+ new queries still run with low priority.
+ - follower requests that acquire the lock on the leader while the follower
+ tries to get in sync are now running with low instead of medium priority.
+
+* Updated ArangoDB Starter to v0.18.5.
+
+* Detach threads in getResponsibleServers if they wait for more than 1s.
+
+* Fix an issue that can cause AQL related RestHandlers to wind up in a WAITING
+ state and never get woken up. This implies that the associated query snippet
+ and transaction cannot be released. If the query contains modification
+ operations, this also implies that the associated collection write lock is not
+ released and can therefore prevent other threads from acquiring the exclusive
+ lock (which is required by e.g., the replication).
+
+* Fix an issue that can cause some background load in parallel traversals while
+ waiting for data from upstream.
+ For full details see https://github.com/arangodb/arangodb/pull/20768.
+
+* BTS-1856: Fix a data-race inside the Query class during accesses on a private
+ property (`execStats`).
+
+* Fixed SEARCH-485 (ES-2010): Search files disappeared during creation of
+ HotBackup.
+
+* BTS-1840: active failover: connection/tcp leak in active failover mode in case
+ no leader change happened for a long time.
+
+* Fix potential deadlocks in "fast lock round" when starting transactions in
+ cluster. The "fast lock round" is used to send out transaction begin requests
+ to multiple leaders concurrently, without caring about the order in which
+ requests are send out. This can potentially lead to deadlock with other
+ out-of-order requests. Thus the intention of the "fast lock round" is to use
+ a very low timeout for these requests, so that deadlocks would be detected and
+ rolled back quickly. However, there was a code path that triggered the "fast
+ lock round" with long request timeouts, which could lead to long wait times
+ until deadlocks were detected and rolled back.
+
+* Fixed BTS-1489: Race condition in AsioSocket shutdown when using SSL.
+
+* Rebuilt included rclone v1.62.2 with go1.21.8.
+
+* Repair shard rebalancer if some server has no leaders of a collection
+ but followers. Previously, this server was then not always considered
+ for leader movements.
+
+* Updated ArangoDB Starter to v0.18.4 and arangosync to v2.19.7.
+
+* BTS-1808: fix arangodump format for ZKD indexes. Previously the structural
+ dump of ZKD indexes was missing the "fieldValueTypes" attribute, which was
+ necessary to restore a ZKD index at least in the cluster.
+
+
+v3.11.8 (2024-02-22)
+--------------------
-* Added metric `arangodb_replication_clients` showing the number of currently
- active/connected replication clients for a server.
+* Updated arangosync to v2.19.6.
-* Partial fix for PRESUPP-539: account for memory used during AQL condition
- transformation to disjunctive normal form (DNF). This transformation can use
- a lot of memory for complex filter conditions, which was previously not
- accounted for. Now, if the transformation uses a lot of memory and hits the
- configured query memory limit, the query will rather be aborted with a
- proper error message than overuse memory.
- For very complex conditions that would use massive amounts of memory when
- transformed into DNF, the DNF conversion is also aborted at some threshold
- complexity value. If the threshold is hit, the query continues with a
- simplified representation of the condition, which will not be usable in
- index lookups. However, this should still be better than overusing memory
- or taking a very long time to compute the DNF version.
- The complexity threshold value can be configured per query by setting the
- new `maxDNFConditionMembers` query option. There is also a new startup
- option `--query.max-dnf-condition-members` for coordinators and single
- servers to adjust the threshold value globally.
+* Updated OpenSSL to 3.0.13 and OpenLDAP to 2.6.7.
-* ES-1428: make the maximum number of V8 contexts depend on the maximum number
- of server threads, if `--javascript.v8-contexts` is not set explicitly.
- Previously the maximum number of V8 contexts was hard-coded to 16 when the
- option `--javascript.v8-contexts` option was not set explicitly.
- Now the maximum number defaults to 7/8 of the value of the startup option
- `--server.maximal-threads`, regardless of if it is explicitly configured or
- the default value is used. Only 7/8 are used to leave some headroom for other
- important maintenance tasks.
- A server with default configuration should now not block waiting for V8
- contexts to become available, but it may use more memory for the additional V8
- contexts if there are many concurrent requests that invoke JavaScript actions
- (e.g. requests using the web UI or Foxx).
+* Updated ArangoDB Starter to v0.18.2.
-* Fixed a bug in the API used by `arangorestore`: On restore, a new _rev value
- is generated for each imported document to avoid clashes with previously
- present data. This must be created on the shard leader rather than the
- coordinator. The bug happened, when two coordinators were creating
- the same _rev value for two different documents concurrently.
+* Rebuilt included rclone v1.62.2 with go1.21.6.
-* BTS-1249: Add startup option `--foxx.enable`.
- This startup option determines whether access to user-defined Foxx services
- is possible for the instance. It defaults to `true`.
- If the option is set to `false`, access to Foxx services is forbidden and
- will be responded with an HTTP 403 Forbidden error. Access to ArangoDB's
- built-in web interface, which is also a Foxx service, is still possible even
- with the option set to `false`.
- When setting the option to `false`, access to the management APIs for Foxx
- services will also be disabled. This is the same as manually setting the
- option `--foxx.api false`.
+* ES-1892: Fix hot restores missing user defined analyzers.
-* Updated arangosync to v2.15.0-preview-1.
+* Fix: we cannot update a link, but have to drop and recreate it. Until now,
+ this new index had the same set of labels as the old one. However, on
+ followers (and with replication2 also on the leader), the DropIndex and
+ EnsureIndex actions could run concurrently. I.e., we could try to create the
+ new index before the old one was fully removed. In this case we could get a
+ duplicate metric exception, preventing the index from being created. Such
+ errors are not really handled ATM - they are simply logged and otherwise
+ ignored. That means the index will simply not be available on the affected
+ server, since we will also do not retry to create it at a later time.
+ To avoid this, we add a new `indexId` label to the metric to make it unique.
-* The internal Graph code is completely converted to the new graph engine.
- Last algorithms added to that lists are: ShortestPath, WeightedShortestPath,
- KShortestPaths and WeightedKShortestPaths.
-* Improve memory usage of in-memory edge index cache if most of the edges in an
- index refer to a single or mostly the same collection.
- Previously the full edge ids, consisting of the the referred-to collection
- name and the referred-to key of the edge were stored in full. Now, the first
- edge inserted into an edge index' in-memory cache will determine the
- collection name for which all corresponding edges can be prefix-compressed.
- For example, when inserting an edge pointing to `the-collection/abc` into the
- empty cache, the collection name `the-collection` will be noted for that
- cache as a prefix. The edge will be stored in memory as only `/abc`. Further
- edges that are inserted into the cache and that point to the same collection
- will also be stored prefix-compressed.
- The prefix compression is transparent and does not require configuration or
- setup. Compression is done separately for each cache, i.e. a separate prefix
- can be used for each individual edge index, and separately for the `_from`
- and `_to` parts. Lookups from the in-memory edge cache will not return
- compressed values but the full-length edge ids. The compressed values will
- also be used in memory only and will not be persisted on disk.
+v3.11.7 (2024-01-29)
+--------------------
-* Updated ArangoDB Starter to 0.15.7.
+* BTS-1751: Strange error message when executing a query while creating an index
+ in the background.
+
+* Reduce default amount of per-collection document removals by TTL background
+ index creation thread from 1m to 100k in each iteration. This change gives
+ other collections a chance of being cleaned up as well.
+
+* Do not make AQL query runtime timeout affect TTL index background thread
+ removal queries.
+
+* New API to show progress in background index creation.
+
+* Updated ArangoDB Starter to v0.18.0.
+
+* BTS-1741: fix updates of values in unique persistent indexes with stored
+ values defined for them. When such an index value was updated, it was
+ possible that the stored value was not correctly updated, so that subsequent
+ reads of the index value would run into exceptions such as
+ `Expecting type Array or Object`.
+
+* APM-828: Per collection/database/user monitoring.
+
+ This adds optional metrics for tracking per-shard requests on DB-Servers.
+ The exported metrics are:
+ - `arangodb_collection_leader_reads_total`: number of read requests on
+ leaders, per shard, and optionally also split by user.
+ - `arangodb_collection_leader_writes_total`: number of write requests on
+ leaders, per shard, and optionally also split by user.
+
+ The new startup option `--server.export-shard-usage-metrics` can be used to
+ opt in to these metrics. It can be set to one of the following values on
+ DB-Servers:
+ - `disabled`: no shard usage metrics are recorded nor exported. This is the
+ default value.
+ - `enabled-per-shard`: this will make DB-Servers collect per-shard usage
+ metrics.
+ - `enabled-per-shard-per-user`: this will make DB-Servers collect per-shard
+ and per-user metrics. This is more granular than `enabled-per-shard` but
+ can produce a lot of metrics.
+
+ If enabled, the metrics are only exposed on DB servers and not on
+ Coordinators or single servers.
+
+ Whenever a shard is accessed in read or write mode by one of the following
+ operations, the metrics are populated dynamically, either with a per-user
+ label or not, depending on the above setting.
+ The metrics are retained in memory on DB-Servers. Removing databases,
+ collections or users that are already included in the metrics won't remove the
+ metrics for these databases, collections or users until the DB-Server is
+ restarted.
+
+ The following operations increase the metrics:
+ - AQL queries: an AQL query will increase the read or write counters exactly
+ once for each involved shard. For shards that are accessed in read/write
+ mode, only the write counter will be increased.
+ - Single-document insert, update, replace, and remove operations: for each
+ such operation, the write counter will be increased once for the affected
+ shard.
+ - Multi-document insert, update, replace, and remove operations: for each such
+ operation, the write counter will be increased once for each shard that is
+ affected by the operation. Note that this includes collection truncate
+ operations.
+ - Single- and multi-document read operations: for each such operation, the
+ read counter will be increased once for each shard that is affected by the
+ operation.
+
+ The metrics are increased when any of the above operations start, and they
+ are not decreased should an operation abort or if an operation does not
+ lead to any actual reads or writes.
+
+ Note that internal operations, such as internal queries executed for
+ statistics gathering, internal garbage collection, and TTL index cleanup are
+ not counted in these metrics. Additionally, all requests that use the
+ superuser JWT for authentication and that do not have a specific user set,
+ are not counted.
+ Requests are also only counted if they have an ArangoDB user associated with
+ them, so that the cluster must also be running with authentication turned on.
+
+ As there can be many of these dynamic metrics based on the number of shards
+ and/or users in the deployment, these metrics are turned off by default (see
+ above), and if turned on, are only exposed only via a new HTTP REST API
+ endpoint GET `/_admin/usage-metrics`. They are not exposed via the existing
+ metrics API endpoint GET `/_admin/metrics`.
+
+ Add additional metrics for tracking the number of bytes read or written per
+ shard on DB-Servers:
+
+ - `arangodb_collection_requests_bytes_read_total`:
+ This metric exposes the per-shard number of bytes read by read operation
+ requests on DB-Servers.
+ It is increased by AQL queries that read documents or edges and for single-
+ or multi-document read operations.
+ The metric is normally increased only on the leader, but it can also
+ increase on followers if "reads from followers" are enabled.
+
+ For every read operation, the metric will be increased by the approximate
+ number of bytes read to retrieve the underlying document or edge data. This
+ is also true if a document or edge is served from an in-memory cache.
+ If an operation reads multiple documents/edges, it will increase the
+ counter multiple times, each time with the approximate number of bytes read
+ for the particular document/edge.
+
+ The numbers reported by this metric normally relate to the cumulated sizes
+ of documents/edges read.
+ The metric is also increased for transactions that are started but later
+ aborted.
+ Note that the metric is not increased for secondary index point lookups or
+ scans, or for scans in a collection that iterate over documents but do not
+ read them.
+
+ - `arangodb_collection_requests_bytes_written_total`:
+ This metric exposes the per-shard number of bytes written by write operation
+ requests on DB-Servers, on both leaders and followers.
+ It is increased by AQL queries and single-/multi-document write operations.
+ The metric is first increased only the leader, but for every replication
+ request to followers it is also increased on followers.
+
+ For every write operation, the metric will be increased by the approximate
+ number of bytes written for the document or edge in question.
+ If an operation writes multiple documents/edges, it will increase the
+ counter multiple times, each time with the approximate number of bytes
+ written for the particular document/edge.
+
+ An AQL query will also increase the counter for every document or edge
+ written, each time with the approximate number of bytes written for
+ document/edge.
+
+ The numbers reported by this metric normally relate to the cumulated sizes
+ of documents/edges written. For remove operations however only a fixed
+ number of bytes is counted per removed document/edge. For truncate
+ operations, the metrics will be affected differently depending on how the
+ truncate is executed internally.
+ For truncates on smaller shards, the truncate operation will be executed as
+ the removal of the individual documents in the shard. Thus the metric will
+ also be increased as if the documents were removed individually. Truncate
+ operations on larger shards however will be executed via a special
+ operation in the storage engine, which marks a whole range of documents as
+ removed, but defers the actual removal until much later (compaction
+ process). If a truncate is executed like this, the metric will not be
+ increased at all.
+ Writes into secondary indexes are not counted at all.
+
+ The metric is also increased for transactions that are started but later
+ aborted.
+
+ These metrics are not exposed by default. It is only present if the startup
+ option `--server.export-shard-usage-metrics` is set to either
+ `enabled-per-shard` or `enabled-per-shard-per-user`. With the former setting,
+ the metric will have different labels for each shard that was read from. With
+ the latter setting, the metric will have different labels for each
+ combination of shard and user that accessed the shard.
+
+ If enabled, the metrics are only exposed on DB servers and not on
+ Coordinators or single servers.
+
+ Note that internal operations, such as internal queries executed for
+ statistics gathering, internal garbage collection, and TTL index cleanup are
+ not counted in these metrics. Additionally, all requests that use the
+ superuser JWT for authentication and that do not have a specific user set,
+ are not counted.
+ Requests are also only counted if they have an ArangoDB user associated with
+ them, so that the cluster must also be running with authentication turned on.
+
+* Validate that the attribute stored in the `smartGraphAttribute` of SmartGraph
+ vertex collections exists and is not changed afterwards by update or replace
+ operations. Previously the `smartGraphAttribute` value was checked only when
+ inserting documents into a SmartGraph vertex collection, but not for update or
+ replace operations, although the documentation always stated that the
+ `smartGraphAttribute` value must not be changed after the initial creation of
+ the document.
+ The missing checks on update/replace allowed users to retroactively modify the
+ value of the `smartGraphAttribute` for existing documents, which could have
+ led to problems when the data of such a SmartGraph vertex collection was
+ replicated to a new follower shard. On the new follower shard, the documents
+ went through the full validation, and led to documents with modified
+ `smartGraphAttribute` values being rejected on the follower. This could have
+ led to follower shards not getting in sync.
+ Now the value of the `smartGraphAttribute` will be fully validated with every
+ insert, update or replace operation, and every attempt to modify the value of
+ the `smartGraphAttribute` retroactively will fail with error 4003
+ (`ERROR_KEY_MUST_BE_PREFIXED_WITH_SMART_GRAPH_ATTRIBUTE`, error message "in
+ smart vertex collections _key must be a string and prefixed with the value of
+ the smart graph attribute").
+ Additionally, if upon insert the `smartGraphAttribute` is missing for a
+ SmartGraph vertex, the error code will be error 4001 (
+ `ERROR_NO_SMART_GRAPH_ATTRIBUTE`, error message "smart graph attribute not
+ given") instead of error 4003.
+ To retroactively repair the data in any of the affected collections, it is
+ possible to update every (affected) document with the correct value of the
+ `smartGraphAttribute` via an AQL query as follows:
-* Updated OpenSSL to 1.1.1t and OpenLDAP to 2.6.4.
+ FOR doc IN @@collection
+ LET expected = SUBSTRING(doc._key, 0, FIND_FIRST(doc._key, ':'))
+ LET actual = doc.@attr
+ FILTER expected != actual
+ UPDATE doc WITH {@attr: expected} IN @@collection
+ COLLECT WITH COUNT INTO updated
+ RETURN updated
-* Made all transactions used by the gharial API on coordinators and a few
- others marked "globally managed". This fixes an issue where
- transaction conflicts could lead to a silent out of sync situation
- between a leader shard and its followers.
+ This will update all documents with the correct (expected) value of the
+ `smartGraphAttribute` if it deviates from the expected value. The query will
+ return the number of updated documents as well.
+ The bind parameters necessary to run this query are:
+ - `@@collection`: name of a SmartGraph vertex collection to be updated
+ - `@attr`: attribute name of the `smartGraphAttribute` of the collection
-* BTS-1184: Fixed index hint with `forceIndexHint` set to true not being used
- on query when geo index was present, because it would override the choice of
- the index hint with optimizations related to it.
+* FE-385: fix query import.
-* Updated ArangoDB Starter to 0.15.7-preview-1.
+* BTS-1731: protect streaming transaction garbage-collection from deletion of
+ the transaction's underlying database.
-* BTS-1219: Fix cost estimation for geo index usage and for collection
- enumeration with included filtering. This fixes a regression from 3.9
- where a geo index was no longer used because of an optimizer rule,
- which gained new powers, and wrong cost estimations for execution plans.
+* BTS-1727: Return proper EXIT_UPGRADE_REQUIRED in cluster mode.
-* FE-131: Added search input for query page.
+* Removal artificial upper bound value of `128` for the startup option
+ `--rocksdb.max-background-jobs`.
-* Return peak memory usage and execution time as part of query explain result.
- This helps finding queries that use a lot of memory to build the execution
- plan.
+* Added stored values support for ZKD indexes.
-* Allow usage of document projections and traversal projections in slightly
- more cases, specifically when the document's or traversal's output
- variables were used in subqueries. Previously the usage of the document or
- traversal output variables in subqueries could lead to projections being
- disabled.
+* Fixed a crash during recursive AstNode creation when an exception was thrown.
+ This can happen on DB servers when a query plan snippet is created from
+ VelocyPack, and the query plan snippet would use more memory than is allowed
+ by the query memory limit.
-* FE-133: Alphabetical sorting for collections on user permissions page.
+* Fix cmake setup for jemalloc library for the case of memory profiling.
-* Fixed EE: Concurrent batch insert/update CRUD operations into
- SmartEdgeCollections on conflicting edge keys could get the
- smart edge caching out-of-sync, which would yield different results
- for OUTBOUND/INBOUND search over edges. This is now fixed, however there
- is now a slightly higher chance to get a CONFLICT response back on those
- queries.
+* BTS-1714: Within writing AQL queries the lock timeout was accidentally set to
+ 2 seconds for all write operations on followers. This could lead to dropped
+ followers when an index of a large shard was finalized on an follower.
-* Fixed issue #18053: Computed Values become null when Schema is modified.
+* Fixed BTS-1701: Assertion triggered in AQL Traversal on edge PRUNE.
-* BTS-1193: Fix for schema update. When removing a field and then inserting a
- new field into the schema, previously, both old and new schema would be
- merged, meaning it would maintain the old field and add the new one.
+* Fix an issue when forced index hints were used in a query, but the optimizer
+ selected a query execution plan that would not use the selected index.
+ Previously, the query failed with a "Could not serve index hint" error
+ message. With the fix, the optimizer will select the next best plan(s) until
+ all index hints are either satisfied, or there are no further plans to select
+ from. In the latter case, the query still aborts with said error.
-* Added startup option `--javascript.user-defined-functions`.
- This option controls whether JavaScript user-defined functions (UDFs) can
- be used in AQL queries. The option defaults to `true`. The option can be
- set to `false` to disallow using JavaScript UDFs from inside AQL queries.
- In that case, a parse error will be thrown when trying to run a query that
- invokes a UDF.
-* Updated transitive JS dependency hoek to @hapi/hoek@8.5.1 to resolve
- CVE-2020-36604 in joi.
+v3.11.6 (2023-11-29)
+--------------------
-* Updated JS dependency minimatch to 3.1.2 to resolve CVE-2022-3517.
+* FE-403: Fix loader instantiation.
-* Updated JS dependency qs to 6.11.0 to resolve CVE-2022-24999.
+* Fix display of "unique" column in indexes overview of collections.
-* Allowing enabling/disabling supervision maintenance mode also via followers
- in active failover mode. Previously the supervision maintenance mode could
- only be enabled/disabled by making a call to the active failover leader.
+* Updated OpenSSL to 3.0.12.
-* Activate RDB_CoveringIterator and use it for some geo index queries.
- This speeds up and simplifies geo queries with geo index which do not use
- GEO_DISTANCE.
+* Fixed MDS-1170: Significant performance degradation when queries are executed
+ within transactions that involve edits.
-* Fix bug in hotbackup download/restore to make sure no data is mixed up
- between servers. This fixes a bug introduced in 3.10.
+* Solve a potential blockage during hotbackup by not stopping read-only
+ transactions from committing during hotbackup. Furthermore, improve behavior
+ of authentication in the case that the user cache is outdated.
-* BTS-266: When starting up a cluster without `--cluster.force-one-shard`,
- creating a database and then restarting the cluster with the startup option
- `--cluster.force-one-shard` set to true, when the formerly created database
- has more than one shard, but the flag is set to true, this could lead to
- arangosearch's analyzers to use optimizations that should not be used if not
- in a single shard mode. For this not to happen, the verification of the
- parameter being true as a condition to run optimizations was removed.
+* FE-395: Fix query editor map not loading.
-* Removed CMake variable `ARANGODB_BITS`, which was only used in one place.
+* Stabilize detaching of threads test.
-* Log information about follower state/apply progress in supervision job that
- organizes failover in active failover mode.
+* Rebuilt included rclone v1.62.2 with go1.21.4.
-* Updated arangosync to v2.14.0.
+* Updated ArangoDB Starter to v0.17.2 and arangosync to v2.19.5.
-* Updated ArangoDB Starter to 0.15.6.
+* Track memory usage of internal connection statistics and request statistics:
+ - `arangodb_connection_statistics_memory_usage`
+ - `arangodb_requests_statistics_memory_usage`
+ These metrics will remain at 0 if the server is started with the option
+ `--server.statistics false`. Otherwise they will contain the memory usage used
+ by connection and request statistics. Memory usage should remain pretty
+ constant over time, unless there are bursts of new connections and/or
+ requests.
-* ES-1396: under some rare circumstances it was possible that background index
- creation missed some documents in case the documents were inserted after
- background index creation started and the corresponding WAL files with the
- inserts were already removed before background indexing caught up.
+* Avoid memory leak in case an arangod instance is started with the option
+ `--server.statistics false`. Previously, with that setting the request and
+ connection statistics were built up in memory, but were never released because
+ the statistics background thread was not running.
-* Fixed the issue that the collection view search did not support selecting
- everything using Ctrl + A.
+* Avoid memory leak in case an arangod instance is started with the option
+ `--server.statistics false`. Previously, with that setting the request
+ and connection statistics were built up in memory, but were never released
+ because the statistics background thread was not running.
-* BTS-413: Added more explanatory messages for when the user cannot see the
- statistics for a node in the UI when in cluster mode.
+* Remove version check on startup of arangosh. This can speed up the startup of
+ the arangosh considerably because it won't do a network request to
+ www.arangodb.com.
-* Updated arangosync to v2.14.0-preview-6.
-* Updated ArangoDB Starter to 0.15.6-preview-2.
+v3.11.5 (2023-11-09)
+--------------------
-* Fix coordinator segfault in AQL queries in which the query is invoked from
- within a JavaScript context (e.g. from Foxx or from the server's console
- mode) **and** the query has multiple coordinator snippets of which except
- the outermost one invokes a JavaScript function.
- Instead of crashing, coordinators will now respond with the exception
- "no v8 context available to enter for current transaction context".
- For AQL queries that called one of the AQL functions `CALL` or `APPLY` with
- a fixed function name, e.g. `APPLY('CONCAT', ...)`, it is now also assumed
- correctly that no JavaScript is needed, except if the fixed function name
- is the name of a user-defined function.
- This fixes an issue described in OASIS-24962.
+* Fixed a problem in ReadWriteLock which could prevent waiting readers from
+ being woken up, when a write lock acquire timed out.
+
+* Allow a scheduler thread to detach itself from the scheduler if it sees that
+ it has to perform a potentially long running task like waiting for a lock.
+ This allows a new scheduler thread to be started and avoids that it can happen
+ that all threads are blocked waiting for a lock, which has in the past led to
+ deadlock situations. The number of detached threads is limited by a
+ configurable option. Currently, only threads waiting for more than 1 second on
+ a collection lock will detach themselves.
+
+* MDS-1164: Added Publisher and Company to the Windows binaries.
+
+* Silence TSAN for shutdown for access to the SchedulerFeature::SCHEDULER
+ pointer using atomic references.
+
+* Fixed a race in controlled leader change which could lead to a situation in
+ which a shard follower is dropped when the first write operation happens. This
+ fixes BTS-1647.
+
+* Fixed BTS-1273: While queueing an async server log message, we could be
+ blocked by IO on the log-writer thread. This could slow down the main
+ path. In case the log-writer is configured to use slow device
+ (e.g. using syslog) this could have significant impact.
+
+* Introduced an upper bound of queued async log messages. If we would log
+ more messages then the background logger thread can actually process, we
+ start to write log messages synchronously. This is to prevent the queue
+ to grow indefinitely. The upper bound is configurable via the startup
+ option `--log.max-queued-entries`. The default value is 10000.
+* Fixed an unnecessary follower drop in controlled leader change, which will
+ speed up leader changes. This fixes BTS-1658.
+
+* Fixed BTS-1669 Transaction Manager returns Error if we Abort an Expired
+ Transaction.
+ There was a small time window of around 2 seconds in which aborting expired
+ transactions would return "transaction aborted" instead of returning success.
+ The time window was between when a transaction expired (according to its TTL
+ value) and when the transaction manager's garbage collection aborted the
+ transaction. The issue only happened for transactions which outlived their TTL
+ value and for which an abort operation was attempted in that time window.
+
+* Backport multiple fixes for the scheduler behavior during ArangoDB shutdown.
+
+* FE-374: Query UI - fix switching from table to JSON.
+
+* Make AQL query cursor garbage-collection clean up more expired cursors in a
+ single run than before. This change can help to reclaim memory of expired
+ cursors quicker than in previous versions.
+
+* Updated OpenSSL to 3.0.11.
+
+* Reduce number of atomic shared_ptr copies in in-memory cache subsystem.
+
+* Fixed BTS-1610: In certain situations with at least three levels of nested
+ subqueries, of which some of the outer iterations don't return any results,
+ results of later iterations could be lost.
+
+* Updated arangosync to v2.19.4.
+
+* Add cluster-internal connectivity checks.
+ This makes Coordinators and DB-Servers in a cluster periodically send check
+ requests to each other, in order to check if all servers can connect to each
+ other.
+ If a cluster-internal connection to another Coordinator or DB-Server cannot be
+ established within 10 seconds, a warning will be logged.
+
+ The new startup option `--cluster.connectivity-check-interval` can be used to
+ control the frequency of the connectivity check, in seconds.
+ If set to a value greater than zero, the initial connectivity check is
+ performed approximately 15 seconds after the instance start, and subsequent
+ connectivity checks are executed with the specified frequency.
+ If set to a value of zero, connectivity checks are disabled.
+
+ This change also adds the metrics
+ `arangodb_network_connectivity_failures_coordinators` and
+ `arangodb_network_connectivity_failures_dbservers`, which can be monitored for
+ detecting temporary or permanent connectivity issues.
+
+* Fix ineffective startup option `--rocksdb.partition-files-for-documents`.
+ Setting this option had no effect unless RocksDB version 8 was used.
+
+
+v3.11.4 (2023-10-04)
+--------------------
-* Prevent agency configuration confusion by an agent which comes back without
- its data directory and thus without its UUID.
+* Fix updating of collection properties (schema) when the collection has a view
+ on top.
-* APM-592: In batched query results, when executing requests for `/_api/cursor`,
- there might be a connection error and the user might not be able to retrieve
- the latest batch from the cursor. For that, a query option flag `allowRetry` was
- added. When set to `true`, if the latest batch response object wasn't
- successfully received, the user can send a retry request to receive it with
- a POST request to `/_api/cursor//`. Only the latest batch
- is cached, meaning former batches cannot be retrieved again later.
+* FE-323: allow 'nested' property in view JSON UI.
-* Change the request lane for replication catchup requests that leaders in
- active failover receive from their followers from medium to high. This
- will give catchup requests from followers highest priority, so that the
- leader will preferably execute them compared to regular requests.
+* Slightly extended hot backup release lock timeout on coordinators from
+ `timeout + 5` seconds to `timeout + 30` seconds, to prevent premature release
+ of hot backup commit locks on coordinators.
-* Web UI [FE-48]: Additional fix to the previously introduced license
- information usability improvement. In case the server is being started with
- the additional parameter `--server.harden`, the previous fix did not handle
- that specific edge case.
+* Improve logging in case hot backup locks cannot be taken on coordinators.
-* Use more compact and efficient representation for arrays and objects during
- AQL AST serialization and deserialization. This can help to reduce the size
- of messages exchanged between coordinator and database servers during query
- setup, and also reduce the time needed for parsing these messages. This
- especially helps when there are large bind parameter values that are arrays
- or objects.
- The more efficient format is used also inside an AQL query's "explain" and
- "profile" methods, and thus any callers that process the return values of
- explain and profile operations may now receive the new format. All callers
- inside the ArangoDB code have been adjusted, but any external callers that
- process the JSON response values of AQL query explain or profile operations
- may need to be adjusted to handle the new format.
+* BTS-1618: Preventing the problem from ever occurring on 3.11.
-* Allow cluster database servers to start even when there are existing databases
- that would violate the settings `--cluster.min-replication-factor` or
- `--cluster.max-replication-factor`.
- This allows upgrading from older versions in which the replication factor
- validation for databases was not yet present.
+* Speed up incremental hotbackup upload by parallelization of remote-to-remote
+ copies. Tolerate deletion of old backups during incremental upload.
-* Added new stage "instantiating executors" to the query profiling output.
- The time spent in "instantiating executors" is the time needed to create
- the query executors from the final query execution time. In cluster mode,
- this stage also includes the time needed for physically distributing the
- query snippets to the participating database servers.
- Previously, the time spent for instantiating executors and the physical
- distribution was contained in the "optimizing plan" stage, which was
- misleading.
+* Added the following metrics to improve observability of the in-memory cache
+ subsystem:
+ - `rocksdb_cache_free_memory_tasks_total`: total number of `freeMemory` tasks
+ scheduled
+ - `rocksdb_cache_migrate_tasks_total`: total number of `migrate` tasks
+ scheduled
+ - `rocksdb_cache_free_memory_tasks_duration_total`: total time (microseconds)
+ spent in `freeMemory` tasks
+ - `rocksdb_cache_migrate_tasks_duration_total`: total time (microseconds)
+ spent in `migrate` tasks
-* Removed constant values for query variables from query plan serialization
- in cases they were not needed. Previously, constant values of query variables
- were always serialized for all occurrences of a variable in a query plan.
- If the constant values were large, this contributed to higher serialization
- and thus query setup times. Now the constant values are only serialized
- for relevant parts of query execution plans.
+* Improve performance of the in-memory cache's memory reclamation procedure.
+ The previous implementation acquired too many locks, which could drive system
+ CPU time up.
-* BTS-199: remove check for environment variable `GLIBCXX_FORCE_NEW` from
- server start, and remove setting this variable from startup scripts.
- The reason is that the environment variable only controls the behavior of
- programs linked against glibc, but our release builds are linked to libmusl.
+* Fix MDS-1157: taking hot backups with --allow-inconsistent=false option always
+ reported a warning that the backup was potentially inconsistent, although
+ taking the backup actually succeeded.
-* Make the cache_oblivious option of jemalloc configurable from the
- environment and set its default to `false`. This helps to save
- 4096 bytes of RAM for every allocation which is at least 16384 bytes
- large. This is particularly beneficial for the RocksDB buffer cache.
+ The warning message was `Failed to get write lock before proceeding with
+ backup. Backup may contain some inconsistencies.`, but it was a false positive
+ and the backups taken were actually consistent.
-* Updated ArangoDB Starter to 0.15.6-preview-1.
+* Fixed issue with `keepNull=false` updates not being properly replicated to
+ followers.
-* Acquire a snapshot of the (list of) indexes when starting document insert,
- update/replace and remove operations, and use that snapshot throughout the
- operation. Previously, the list of indexes was acquired multiple times during
- a write operation, and it was (at least in theory) possible that the list of
- indexes changed between the individual acquisitions.
- The PR also contains an optimization to not fetch the full document from the
- storage engine for remove and replace operations in case the full document is
- not needed to process the operation. This is the case when the collection
- does not contain any secondary indexes and `returnOld` is not used.
+* Updated JavaScript dependencies:
+
+ qs: 6.11.0 -> 6.11.2
+ semver: 7.3.8 -> 7.5.4
+
+ This addresses CVE-2022-25883 in the semver module.
+
+* FE-355: fix ctrl + A for search box.
+
+* FE-365: fix query import breaking due to extra fields.
+
+* Fixed BTS-1613: Fixed processing analyzers imported from Cluster dump to the
+ Single server database.
+
+* Removed DocuBlocks and obsolete documentation tooling.
+ The HTTP API descriptions are now in the `arangodb/docs-hugo` repository.
+
+* Added metric `rocksdb_cache_edge_empty_inserts_total` to count the number
+ of inserts into the edge cache for non-connected edges.
+
+* Renamed two edge-cache related metrics to improve naming consistency:
+ - `rocksdb_cache_edge_effective_entries_size` was renamed to
+ `rocksdb_cache_edge_inserts_effective_entries_size_total` and was changed
+ from a gauge to a counter.
+ - `rocksdb_cache_edge_uncompressed_entries_size` was renamed to
+ `rocksdb_cache_edge_inserts_uncompressed_entries_size_total` and is now
+ also a counter instead of a gauge.
+
+* Do not auto-reload entries into in-memory edge cache for edge index entries
+ that were previously not contained in the in-memory edge cache. This change
+ will help to keep the hot set in memory rather than evicting it in favor of
+ "random" other index entries.
+
+* Properly track memory usage for allocated objects in the in-memory cache
+ (e.g. the edge cache or the in-memory cache for other persistent indexes).
+ Previously the memory used for the underlying hash tables was accounted for
+ correctly, but the sizes of the cache payloads (keys and values) were not
+ accounted for under all circumstances (at least for the initial entries in
+ the caches).
+ This change leads to more accurate memory usage tracking and reporting by the
+ in-memory cache subsystem, and to the cache subsystem not exceeding its
+ configured memory usage limit.
+ The cache subsystem was also changed so that it can use as much memory as
+ configured by the global cache memory limit (configurable via startup options
+ `--cache.size` and `--cache.high-water-multiplier`). Previously the cache
+ subsystem was freeing memory as soon as it hit 56% of the configured limit.
+ Overall, the effective memory usage of the cache subsystem can be different
+ to the cache memory usage in previous versions. In previous versions the
+ configured memory usage limit could be temporarily exceeded, but in most
+ cases the cache used considerably less memory than allowed by the limit.
+ Effectively the memory usage was capped at 56% of the configured limit.
+ Now the cache will try to use up to as much memory as allowed by the
+ configured memory usage limit (i.e. `--cache.size` multiplied by the high
+ water multiplier `--cache.high-water-multiplier`). The default value for
+ the high water multiplier is set to 56% in this version to keep compatibility
+ with previous versions.
+
+* Expose the "high water multiplier" for the in-memory cache subsystem. The
+ high water multiplier is used to calculate the effective memory usage limit
+ for the in-memory cache subsystem. The cache's configured memory usage limit
+ (`--cache.size`) is multiplied by the high water multiplier, and the
+ resulting value is used as the effective memory limit. It defaults to 56%
+ to ensure compatibility with previous versions, in which the threshold was
+ effectively hard-coded to the same value.
+
+* Reduce memory usage for empty in-memory edge caches by ~40%. This is achieved
+ by allocating each cache's statistics objects only lazily, when actually
+ needed.
+
+* Added the following metrics for the in-memory edge cache:
+
+ - `rocksdb_cache_edge_inserts_total`: total number of insertions into the
+ in-memory edge cache.
+ - `rocksdb_cache_edge_compressed_inserts_total`: total number of insertions
+ into the in-memory edge cache that used compression.
+
+* Added the startup option `--cache.max-spare-memory-usage` to control memory
+ usage for spare, unused hash tables in the in-memory caching subsystem. This
+ option can be used to cap the memory used by spare tables. It can be set to
+ a value of 0 to not use any memory except for active hash tables.
+
+* Fixed BTS-1556: Potential shutdown race, when the server is shutting down and
+ still AgencyCommunication was going on it could use the scheduler which was
+ already deleted, causing a crash right before a normal shutdown.
+
+* Fixed BTS-1556: Potential shutdown race, when the server is shutting down
+ and still AgencyCommunication was going on it could use the scheduler
+ which was already deleted, causing a crash right before a normal shutdown.
+
+ This should not have any negative effect, as all state preserving operations
+ are done by then, it just wasn't a clean exit.
+
+* Updated ArangoDB Starter to 0.17.1.
+
+* Fixed BTS-1541: Old legacy little endian key format for RocksDB database
+ (created in ArangoDB 3.2 and 3.3) showed wrong behavior on newer versions.
+ This fixes a persistent index corruption bug with the old format.
+
+* Fixed a loophole in COLLECT variable name validation: in COLLECT INTO
+ expressions it was possible to refer to variables that the COLLECT just
+ introduced. This was undefined behavior and not caught by the previous version
+ of COLLECT's variable checking.
+
+* BTS-1598: fix race in agency.
+
+* ES-1727: Fix `UPDATE`, `REPLACE`, and `UPSERT ... UPDATE/REPLACE` failing with
+ "conflict, _rev values do not match" for non-local edges in Smart- and
+ EnterpriseGraphs, when `OPTIONS { ignoreRevs: false }` is supplied.
+
+* Upgraded Swagger-UI to v5.4.1.
+
+* Fixed BTS-1590: Fixed potentially undefined behavior in NetworkFeature, when
+ it was referring to an options object that could have been destroyed already.
+
+* Allow compression content-negotation in metrics API, so that responses from
+ the metrics API at `/_admin/metrics` can be sent compressed if the client
+ supports it.
+
+
+v3.11.3 (2023-08-17)
+--------------------
-* Added experimental startup option `--rocksdb.block-cache-jemalloc-allocator`.
- This option defaults to `false`. When set to `true`, a jemalloc-based memory
- allocator will be used to allocate memory for the RocksDB block cache.
- This allocator will also mark the memory of the block cache to be excluded
- from coredumps, potentially reducing coredump size a lot.
+* Fixed BTS-1553: Fixed a rare occuring issue during AQL queries where the inner
+ amount of a limit in the LimitExecutor is set to a wrong value which lead to
+ some data rows not being returned to the client.
-* Use intermediate commits in old shard synchronization protocol. This avoids
- overly large RocksDB transactions when syncing large shards, which is a
- remedy for OOM kills during restarts.
+* BTS-1544: The _system database now properly reports its sharding value.
-* Remove async mode from pregel.
+* Fixed BTS-1554: wrong aggregation count when an "in" or "or" condition
+ was executed through an index lookup.
-* Added a configuration option (for the agency):
- --agency.supervision-failed-leader-adds-follower
- with a default of `true` (behavior as before). If set to `false`,
- a `FailedLeader` job does not automatically configure a new shard
- follower, thereby preventing unnecessary network traffic, CPU and IO load
- for the case that the server comes back quickly. If the server is
- permanently failed, an `AddFollower` job will be created anyway eventually.
+* BTS-1549: adjust permission handling in experimental dump to the same behavior
+ as in non-experimental dump.
-* Print the pid of the process which sent a SIGABRT or other fatal
- signal that shuts down ArangoDB ungracefully.
+* Fixed AQL WINDOW statement for OneShard databases: Whenever WINDOW is used in
+ the row based variant like (e.g. WINDOW { preceding: 1, following: 1 }) it
+ errored with: mandatory variable "inVariable" not found. This variable is now
+ correctly treated as optional.
-* Avoid write-write conflicts for single document operations performed via the
- document REST API (i.e., no AQL, but also no streaming transactions). This is
- achieved by locking the key of each document before performing the actual
- modification. This lock acquisition effectively serializes all operations on
- the same document. To avoid starvation, the lock acquisition is limited to
- one second. This lock timeout value is currently hardcoded but will be made
- configurable in the future. If the lock cannot be acquired within this time,
- the operation fails with a write-write conflict error as before.
+* Fixed AQL WINDOW statement for OneShard databases: Whenever WINDOW is used on
+ a OneShardDatabase, or on data from a collection that only has one shard, the
+ preceding and following clauses were flipped.
- Performing changes to a unique index entry also requires us to lock that
- index entry to ensure uniqueness. This lock acquisition is subject to the
- same lock timeout as locking the document key.
+* Updated arangosync to v2.19.3.
- We are planning to generalize this for multi-document operations as well as
- AQL and streaming transactions in the future.
+* Harden HTTP/2 internal callback functions against exceptions.
- In case we cannot acquire the lock on the key of the document we want to
- insert/modify, the error message will be
- `Timeout waiting to lock key - in index primary of type primary over '_key'; conflicting key: `
- where `` corresponds to the key of the document we tried to modify.
- In addition, the error object will contain `_key`, `_id` and `_rev` fields.
- The `_key` and `_id` correspond to the document we tried to insert/modify,
- and `_rev` will correspond to the current revision of the document from the
- DB if available, and otherwise empty.
-
- In case we cannot acquire the lock on a unique index entry, the error
- message will be
- `Timeout waiting to lock key - in index of type persistent over ''; document key: ; indexed values: []`
- where `` is the name of the index in which we tried to lock the
- entry, `` is the list of fields included in that index, ``
- corresponds to the key of the document we tried to insert/modify, and
- `` corresponds to the indexed values from our document.
- In addition, the error object will contain `_key`, `_id` and `_rev` fields.
- The `_key` and `_id` correspond to the document we tried to insert/modify,
- and `_rev` will correspond to the current revision of the document from the
- DB if available, and otherwise empty.
+ These callback functions are called from C code which cannot handle exceptions
+ in any way. Instead, we now turn any exception into the return code
+ `HPE_INTERNAL` to signal that an error occurred.
- This addresses GitHub issue #9702 and APM-522
+* Added better diagnostic messages in case documents<->primarx index corruption
+ occurs.
-* Added a feature to the ResignLeadership job. By default, it will now
- undo the leader changes automatically after the server is restarted,
- unless the option `undoMoves` is set to `false`. This will help to
- make rolling upgrades and restarts less troublesome, since the shard
- leaderships will not get unbalanced.
+* Prevent potential buffer overflow in the crash handler.
-* Disallow creating new databases with a `replicationFactor` value set to
- a value lower than `--cluster.min-replication-factor` or higher than
- `--cluster.max-replication-factor`. Previously the `replicationFactor`
- settings for new databases were not bounds-checked, only for new
- collections.
+* Updated OpenSSL to 3.0.10 and OpenLDAP to 2.6.6.
-* BTS-1141: Changed the default value of startup option
- `--rocksdb.enforce-block-cache-size-limit` from `true` to `false`.
- This change prevents RocksDB from going into read-only mode when an internal
- operation tries to insert some value into the block cache, but can't do so
- because the block cache's capacity limit is reached.
+* Updated ArangoDB Starter to 0.17.0.
-* Fixed BTS-418: Suboptimal index range calculation with redundant conditions.
+* ES-1566: instead of potentially accessing a nullptr inside graph traversal
+ setup, throw an exception. This will be handled properly and returned with
+ a proper error message.
-* Don't log Boost ASIO warnings such as `asio IO error: 'stream truncated'`
- when a peer closes an SSL/TLS connection without performing a proper
- connection shutdown.
+* BTS-1511: AQL: Fixed access of integers in the ranges
+ [-36028797018963968, -281474976710657] and
+ [281474976710656, 36028797018963968], i.e. those whose representation require
+ 7 bytes. These values were misinterpreted as different integers.
+ Simple passing of values (i.e. writing to or reading from documents) was not
+ affected: Only accesses of those values as numbers by AQL was. E.g. arithmetic
+ (addition, multiplication, ...), certain AQL functions, comparing/sorting -
+ the latter only if the numbers are compared directly, but not as part of a
+ value. For example, `SORT x` lead to an unexpected order if x was such a
+ number. `SORT [x]` however worked as expected.
-* Added new per-operation option `refillIndexCaches` to write operations,
- namely:
+* Added experimental startup options for RocksDB .sst file partitioning:
- - AQL INSERT/UPDATE/REPLACE/REMOVE modification operations
- - single-document insert, update, replace and remove operations
- - multi-document insert, update, replace and remove operations
+ - `--rocksdb.partition-files-for-documents`
+ - `--rocksdb.partition-files-for-primary-index`
+ - `--rocksdb.partition-files-for-edge-index`
+ - `--rocksdb.partition-files-for-persistent-index`
- If the option is set to `true` every currently running transaction will
- keep track of which in-memory index cache entries were invalidated by
- the transaction, and will try to (re-)fill them later.
- Currently edge indexes and velocypack-based indexes (persistent, hash,
- skiplist index) are supported. For velocypack-based indexes, the refilling
- will only happen if the index was set up with an in-memory cache (i.e. the
- `cacheEnabled` flag was set during index creation).
+ Enabling any of these options will make RocksDB's compaction write the data
+ for different collections/shards/indexes into different .sst files.
+ Otherwise the document data from different collections/shards/indexes can be
+ mixed and written into the same .sst files.
- Example usages:
- - `db..insert({ _from: ..., _to: ..., ... }, { refillIndexCaches: true });`
- - `db..update(key, { _from: ..., _to: ..., ... }, { refillIndexCaches: true });`
- - `db..replace(key, { _from: ..., _to: ..., ... }, { refillIndexCaches: true });`
- - `db..remove(key, { refillIndexCaches: true });`
- - `INSERT { ... } INTO OPTIONS { refillIndexCaches: true }`
- - `UPDATE { ... } WITH { ... } INTO OPTIONS { refillIndexCaches: true }`
- - `REPLACE { ... } WITH { ... } INTO OPTIONS { refillIndexCaches: true }`
- - `REMOVE { ... } IN OPTIONS { refillIndexCaches: true }`
+ Enabling these options usually has the benefit of making the RocksDB
+ compaction more efficient when a lot of different collections/shards/indexes
+ are written to in parallel.
+ The disavantage of enabling this option is that there can be more .sst files
+ than when the option is turned off, and the disk space used by these .sst
+ files can be higher than if there are fewer .sst files (this is because there
+ is some per-.sst file overhead).
+ In particular on deployments with many collections/shards/indexes this can
+ lead to a very high number of .sst files, with the potential of outgrowing the
+ maximum number of file descriptors the ArangoDB process can open.
+ Thus the option should only be enabled on deployments with a limited number of
+ collections/shards/indexes.
- The refilling of the in-memory caches for indexes is performed by a
- background thread, so that the foreground write operation shouldn't be
- slowed down a lot. The background thread may however cause additional
- I/O for looking up the data in RocksDB and for repopulating the caches.
+* Fixed potential deadlock() in cache::Manager::unprepareTask() in case a
+ MigrateTask could not be scheduled successfully (in case of a full scheduler
+ queue on DB servers).
- The background refilling is done in a best-effort way and is not guaranteed
- to always succeed, e.g. if there is no memory available for the cache
- subsystem, or when an in-memory cache table is currently in a migration phase
- (grow/shrink operation).
- There is a new startup option `--rocksdb.auto-refill-index-caches-on-modify`
- for DB-Servers and single servers, which currently defaults to `false`. If
- it is set to `true`, the cache refilling will be turned on automatically for
- all insert/update/replace/remove operations, so that it doesn't need to be
- specified on the per-operation/per-query level.
+v3.11.2 (2023-07-21)
+--------------------
- The new startup option `--rocksdb.auto-refill-index-caches-queue-capacity`
- can be used to limit the number of index cache entries that the background
- thread will queue. This is a safeguard to keep the memory usage at bay in
- case the background thread is slower than concurrent threads that perform
- ingestions.
+* Fix SEARCH-465: Truncate on ArangoSearch index could release a WAL if commit
+ passed.
- There are also new startup options to control whether or not the in-memory
- caches should automatically be seeded upon server restart.
- The option `--rocksdb.auto-fill-index-caches-on-startup` for DB-Servers and
- single servers enables this functionality. It currently defaults to `false`.
- If it is set to `true`, the in-memory caches of all eligible indexes will be
- automatically pre-seeded after the server startup. Note that this may cause
- additional CPU and I/O load.
- The option `--rocksdb.max-concurrent-index-fill-tasks` is available to limit
- the impact of the automatic index filling at startup. It controls how many
- full index filling operations can execute concurrently. The lower this number
- is, the lower the impact of cache filling, but the longer it will take.
- The default value for this option depends on the number of available cores,
- and is at least `1`. A value of `0` cannot be used.
- This option is only relevant if `--rocksdb.auto-fill-index-caches-on-startup`
- is set to `true`.
+* Updated ArangoDB Starter to 0.16.0.
- The PR also adds the following metrics:
- - `rocksdb_cache_auto_refill_loaded_total`: Total number of queued items for
- in-memory index caches refilling. It will always report a value of zero on
- coordinators.
- - `rocksdb_cache_auto_refill_dropped_total`: Total number of dropped items
- for in-memory index caches refilling (because number of queued items would
- exceed the value of `--rocksdb.auto-refill-index-caches-queue-capacity`).
- It will always report a value of zero on coordinators.
- - `rocksdb_cache_full_index_refills_total`: Total number of in-memory index
- caches refill operations for entire indexes. The counter gets increased for
- every index automatically loaded (because startup option
- `--rocksdb.auto-fill-index-caches-on-startup` is set to `true`) or when
- full indexes are loaded into memory manually.
- In cluster deployments the counter will be increased once per eligible
- index per shard. It will always report a value of zero on coordinators.
+* Avoid recursive lock during agency startup.
-* Add missing metrics for user traffic: Histograms:
- `arangodb_client_user_connection_statistics_bytes_received`
- `arangodb_client_user_connection_statistics_bytes_sent`
- These numbers were so far only published via the statistics API.
- This is needed for Oasis traffic accounting.
+* Fixed issue with lock starvation when an AQL insert operation with multiple
+ static documents was executed as part of a streaming transaction.
-* BTS-128: Fixed http request not working when content-type is velocypack.
+* Added startup option `--database.max-databases` to limit the maximum number of
+ databases that can exist in parallel on a deployment. This option can be used
+ to limit resources used by database objects. If the option is used and there
+ already exist as many databases as configured by this option, any attempt to
+ create an additional database will fail with error 32
+ (`ERROR_RESOURCE_LIMIT`). Additional databases can then only be created if
+ other databases are dropped first.
+ The default value for this option is unlimited, so technically an arbitrary
+ amount of databases can be created (although effectively the number of
+ databases is limited by memory and processing resources).
-* Fixed GitHub issue #16451: In certain situations, a LIMIT inside a subquery
- could erroneously reduce the number of results of the containing (sub)query.
+* FE-304, FE-305: use navigator.onLine for checking internet connection, correct
+ path for running/slow queries.
-* Added agency options
- --agency.supervision-delay-add-follower
- and
- --agency.supervision-delay-failed-follower
- to delay supervision actions for a configurable amount of seconds. This is
- desirable in case a DBServer fails and comes back quickly, because it gives
- the cluster a chance to get in sync and fully resilient without deploying
- additional shard replicas and thus without causing any data imbalance.
+* Enforce that server always returns an empty body if the return code is 204.
-* Deleted customizable Pregel (AIR) and Greenspun library.
+* Arangodump retries dump requests in more cases: read, write and connection
+ errors.
-* Add support for terabyte units (t, tb, T, TB, tib, TiB, TIB) in startup
- options.
+* Whenever there is a query request timeout in AQL (e.g., a server died) which
+ causes the query to fail, the DBServers will now all directly kill their
+ potentially ongoing parts of the query and not wait for garbage collection.
-* Fix HTTP/VST traffic accounting in internal statistics / metrics.
+* Use libunwind in jemalloc profiling to make it available with libmusl and
+ static binaries.
-* Updated arangosync to v2.13.0.
+* BTS-1331: Reduced the amount of required network calls when using traversals
+ combined with FILTER and/or PRUNE statements in a GeneralGraph in combination
+ with a clustered environment.
-* Make the deprecated `--server.disable-authentication-unix-sockets` and
- `--server.disable-authentication` startup options obsolete. They were
- deprecated in v3.0 and mapped to `--server.authentication` and
- `--server.authentication-unix-sockets`, which made them do the opposite
- of what their names suggest.
+* Added transparent LZ4 compression for values in the in-memory edge cache if
+ their size exceeds a configurable threshold. This is configurable as an opt-in
+ functionality.
-* Removed more assertions from cluster rebalance js test that obligated the
- rebalance plan to always have moves, but there were cases in which all
- there are none.
+ LZ4 compression of edge index cache values allows to store more data in main
+ memory than without compression, so the available memory can be used more
+ efficiently. The compression is transparent and does not require any change to
+ queries or applications.
+ The compression can add CPU overhead for compressing values when storing them
+ in the cache, and for decompressing values when fetching them from the cache.
+
+ The new startup option `--cache.min-value-size-for-edge-compression` can be
+ used to set a threshold value size for compression edge index cache payload
+ values. The default value is `1GB`, which will effectively turn compression
+ off. Setting the option to a lower value (e.g. `100`) will turn on the
+ compression for any payloads whose size exceeds this value.
+
+ The new startup option `--cache.acceleration-factor-for-edge-compression` can
+ be used to fine-tune the compression. It controls the LZ4-internal
+ "acceleration" factor used for the compression. The default value is `1`.
+ Higher values typically mean less compression but faster speeds.
+
+ The following new metrics can be used to determine the usefulness of
+ compression:
+
+ - `rocksdb_cache_edge_effective_entries_size`: will return the total number of
+ bytes of all entries that were stored in the in-memory edge cache, after
+ compression was attempted/applied. This metric will be populated regardless
+ of whether compression is used or not.
+ - `rocksdb_cache_edge_uncompressed_entries_size`: will return the total number
+ of bytes of all entries that were ever stored in the in-memory edge cache,
+ before compression was applied. This metric will be populated regardless of
+ whether compression is used or not.
+ - `rocksdb_cache_edge_compression_ratio`: will return the effective
+ compression ratio for all edge cache entries ever stored in the cache.
+
+ Note that these metrics will be increased upon every insertion into the edge
+ cache, but not decreased when data gets evicted from the cache.
+
+* Optimize runtime performance of exclusive locks.
+
+* Added startup option `--replication.active-failover-leader-grace-period`.
+ This startup option can be used to set the amount of time (in seconds) for
+ which the current leader in an active failover setup will continue to assume
+ its leadership even if it lost connection to the agency.
+
+ In case the leader cannot contact the agency anymore, the agency will elect a
+ new leader after the supervision grace period has elapsed.
+ In order to avoid a split-brain situation with multiple servers assuming
+ leadership, this option can be used to make a disconnected leader refuse any
+ incoming write operations after the grace period controled by this option has
+ elapsed.
+ Ideally the startup option should be given a value greater than the value of
+ the supervision grace period, in order to avoid a temporarily disconnected
+ leader giving up leadership too early and unnecessarily.
+
+ The default value is 120 seconds. Setting the option to a value of 0 will keep
+ the existing behavior, in which a disconnected leader will not refuse incoming
+ write operations.
+
+* Added new startup options `--cache.ideal-lower-fill-ratio` and
+ `--cache.ideal-upper-fill-ratio` to control the minimum and maximum fill
+ ratios for cache tables that trigger shrinking and growing of the table by the
+ cache rebalancer. The default values are:
+
+ - `0.04` (i.e. 4%) for the lower bound that triggers shrinking
+ - `0.25` (i.e. 25%) for the upper bound that triggers growing
+
+ These values were hard-coded in previous versions of ArangoDB.
+
+* Remove temporary `CREATING_{number}` directories from hot backup in case a hot
+ backup runs into an error.
+
+* BTS-1490: Allow performing AQL updates locally without using DISTRIBUTE in
+ case the update AQL is of the pattern
-* Log startup warnings for any experimental, deprecated, obsolete or renamed
- options at startup of arangod or any of the client tools.
+ FOR doc IN collection
+ UPDATE IN collection
-* Fixed issue #17291: Server crash on error in the PRUNE expression.
- Traversal PRUNE expressions containing JavaScript user-defined functions
- (UDFs) are now properly rejected in single server and cluster mode.
- PRUNE expressions that use UDFs require a V8 context for execution,
- which is not available on DB-servers in a cluster, and also isn't
- necessarily available for regular queries on single servers (a V8 context
- is only available if a query was executed inside Foxx or from inside a JS
- transaction, but not otherwise).
+ Previously the optimization was only possible if the update AQL was of the
+ pattern
-* Fix setting query memory limit to 0 for certain queries if a global memory
- limit is set, but overriding the memory limit is allowed.
+ FOR doc IN collection
+ UPDATE WITH IN collection
-* BTS-1075: AQL: RETURN DOCUMENT ("") inconsistent - single server vs cluster.
+ Both and refer to data from the collection enumeration variable
+ `doc` here.
-* Updated arangosync to v2.13.0-preview-5.
+ Also fix the optimization in case a shard key attribute is updated with a
+ value from a different attribute, e.g.
-* Added option to exclude system collection from rebalance shards plan.
+ FOR doc IN collection
+ UPDATE { _key: doc.abc, abc: doc._key } IN collection
-* Updated arangosync to v2.13.0-preview-4.
+ In this case the optimization was previously applied although it shouldn't.
-* Delay a MoveShard operation for leader change, until the old leader has
- actually assumed its leadership and until the new leader is actually in
- sync. This fixes a bug which could block a shard under certain circumstances.
- This fixes BTS-1110.
+* BTS-1490: query can use a lot more memory when using COLLECT WITH COUNT.
-* Fixed issue #17367: FILTER fails when using negation (!) on variable whose
- name starts with "in". Add trailing context to NOT IN token.
+* Attempt to avoid busy looping when locking collections with a timeout.
-* Do not query vertex data in K_PATHS queries if vertex data is not needed.
+* Add a `description` field to OptimizerRule and dump the explanations via the
+ `GET /_api/query/rules` endpoint.
-* Removed assertions from cluster rebalance js test that obligated the rebalance
- plan to always have moves, but there were cases in which all there are none.
+* FE-20: fix UI placeholder format for locale when creating analyzers.
-* Show number of HTTP requests in cluster query profiles.
+* FE-240: disable JSON editor when viewing inverted index.
-* Improved the syntax highlighter for AQL queries in the web interface
- with support for multi-line strings, multi-line identifiers in forward
- and backticks, colorization of escape sequences, separate tokens for
- pseudo-keywords and pseudo-variables, an updated regex for numbers, the
- addition of the AT LEAST and WITH COUNT INTO constructs, and the
- SHA256() function.
+* FE-287: fix number validation for replicationFactor and writeConcern.
-* Enable "collect-in-cluster" optimizer rule for SmartGraph edge collections.
+* FE-285: Fix query download - use post request for query.
-* Improve performance and memory usage of IN list lookups for hash, skiplist
- and persistent indexes.
+* Fixed Github issue #19175.
+ This fixes a problem in traversal query optimization that was introduced in
+ 3.11 and that can lead to traversal queries being aborted with an error `AQL:
+ cannot and-combine normalized condition`.
-* Improve memory usage tracking for IN list lookups and other RocksDB-based
- lookups.
+* Fixed two possible deadlocks which could occur if all medium priority threads
+ are busy. One is that in this case the AgencyCache could no longer receive
+ updates from the agency and another that queries could no longer be finished.
+ This fixes BTS-1475 and BTS-1486.
-* Remove inactive query plan cache code (was only a stub and never enabled
- before).
-* Fixed BTS-441: Honor read only mode with disabled authentication
+v3.11.1 (2023-06-12)
+--------------------
-* Obsolete startup option `--database.force-sync-properties`. This option
- was useful with the MMFiles storage engine, but didn't have any useful
- effect when used with the RocksDB engine.
+* Updated arangosync to v2.18.1.
-* Added detailed explanations for some startup options.
- They are only exposed via `--dump-options` under the `longDescription` key.
+* SEARCH-480: Speedup ArangoSearch recovery.
-* Updated OpenSSL to 1.1.1s.
+* SEARCH-476: Fix bug in fst builder.
-* BTS-483: Added restriction for usage of query cache for streaming and JS
- transactions when they are not read-only.
+* BTS-1325: AQL: Fixed a possible deadlock with multiple parallel traversals.
-* Remove map and map.gz files from repository and add them to gitignore.
- These files are only used for debugging and therefore should not be
- included in any release. This also reduces the size of release packages.
+* Updated OpenSSL to 3.0.9.
-* Repair "load indexes into memory" function in the web UI.
+* BTS-1435: fixed invalid AQL optimization and added a safeguard.
-* Improved help texts for the collection type and satellite collection
- options in the web UI.
+* APM-766, SEARCH-479: Reduce memory overhead for ArangoSearch removes.
-* APM-517: Add tooltips with values of the displayed properties after
- clicking a node or an edge in the graph viewer.
+* Improve precision for ArangoSearch GEO_IN_RANGE function.
-* Deprecate the startup option `--agency.pool-size`. This option was never
- properly supported for any values other than the value of `--agency.size`.
- Now any value set for `--agency.pool-size` other than the value set for
- `--agency.size` will now produce a fatal error on startup.
+* Updated ArangoDB Starter to 0.15.8.
-* BTS-1082: Updating properties of a satellite collection breaks
- replicationFactor.
+* OASIS-25262: Fixed undefined behavior in IN lookup in unique indexes when the
+ lookup array had to be rebuilt in memory.
-* FE-159: When creating a database in cluster mode, there are several parameters
- required. However they are invisible (nothing shown) if I open DB settings
- after creation. Those settings should be visible in readonly mode (grey out).
+* Invalid keys are now reported as individual errors for batch insert operations
+ and no longer abort the whole batch.
-* BTS-209: Fixed requests to `_admin/execute` treating every payload as plain
- text when they're in JSON or velocypack format, but will only treat the
- payload as velocypack if specified in the header's `content-type`.
+* BTS-1255: Fix sporadic memory usage accounting underflows in in-memory cache
+ subsystem.
+ Also turn the algorithm for freeing memory from a cache's buckets from a
+ non-deterministic one that did not guarantee progress into a bounded algorithm
+ with guaranteed progress.
-* Fixed issue #17394: Unnecessary document-lookup instead of Index-Only query.
- This change improves projection handling so that more projections can be
- served from indexes.
+* ECONNABORTED is treated as a ConnectionClosed error in fuerte.
-* Updated arangosync to v2.13.0-preview-2.
+* Database drop operation no longer fails if we cannot remove the corresponding
+ permissions from the _users collection.
-* BTS-1070: Fixed query explain not dealing with an aggregate function without
- arguments and the WINDOW node not being defined as an Ast node type name.
+* Added startup option `--query.max-collections-per-query` to adjust the limit
+ for the maximum number of collections/shards per query. The option defaults to
+ `2048`, which is equivalent to the previous hardcoded value.
-* Solve a case of excessive memory consumption in certain AQL queries with
- IN filters with very long lists. Free sub-iterators as soon as they are
- exhausted.
+* BTS-1261 For some named graphs in cluster, when creating a debugDump for a
+ traversal query that would use the graph's name, the graph wouldn't be able to
+ be recreated because the info gathered in the process of creating a debugDump
+ was broken. It uses the result form an aql explain to get the graph info and
+ use in the debugDump, but this wouldn't be available because, instead of
+ having the name of the graph as key in the graph object of the explain, there
+ would be an array with edge collection names. It was changed for the case when
+ there's a named graph, but maintained when there's no access to the graph's
+ name.
-* Improved shard distribution during collection creation.
+* When trying to read multiple documents, the coordinator will now handle empty
+ lists gracefully and return an empty result set instead of an error.
-* Change default output format of arangoexport from `json` to `jsonl`.
+* Added metric "rocksdb_total_sst_files" to count the number of sst files,
+ aggregated over all levels of the LSM tree.
-* Added startup option `--query.log-failed` to optionally log all failed AQL
- queries to the server log. The option is turned off by default.
+* Increase too short timeouts for requests made from coordinators to DB-servers
+ when retrieving the number of documents or the index selectivity estimates for
+ SmartGraph edge collection parts. These parts were treated like system
+ collections because of their naming convention, and the requests were run with
+ a timeout of only 5s.
-* Added startup option `--query.log-memory-usage-threshold` to optionally log
- all AQL queries that have a peak memory usage larger than the configured
- value. The default value is 4GB.
+* In batched query results, when executing requests for
+ `/_api/cursor//`, removed the restriction that the user
+ would only be able to fetch the next batch using the next batch id in
+ if the query option `allowRetry` was set to true, but maintained the
+ restriction that the user can only retrieve the latest batch if the query
+ option `allowRetry` is set to true.
-* Added startup option `--query.max-artifact-log-length` to control the
- maximum length of logged query strings and bind parameter values.
- This allows truncating overly long query strings and bind parameter values
- to a reasonable length. Previously the cutoff length was hard-coded.
+* Add a startup parameter `--rclone.argument` which can be utilised to enable
+ debugging with logfiles in hot backup RClone upload operacions:
+ `--rclone.argument=--log-level=DEBUG`
+ `--rclone.argument=-log-file=/tmp/rclone.log`
-* Fixed GitHub issue #17291: Fixed a server crash which could occur in case an
- AQL query using a PRUNE or FILTER statement, combined with UDFs (user defined
- functions), got executed.
+* Fix issue #18982: query editor null/undefined check filters out bindParams
+ with value 0.
-* Improve cardinality estimate for AQL EnumerateCollectionNode in case a
- `SORT RAND() LIMIT 1` is used. Here, the estimated number of items is at
- most 1.
-* ES-1312: fix handling of reaching the WAL archive capacity limit.
+v3.11.0 (2023-05-23)
+--------------------
-* BTS-941: The HTTP API now delivers the correct list of the collection's
- shards in case a collection from an EnterpriseGraph, SmartGraph, Disjoint
- EnterpriseGraph, Disjoint SmartGraph or SatelliteGraph is being used.
+* Convert v3.11.0-rc.2 into v3.11.0.
-* Log the documents counts on leader and follower shards at the end of each
- successful shard synchronization.
+* SEARCH-477: stabilize ArangoSearch (stage of BTS-1416).
-* Changed the encoding of revision ids returned by the following REST APIs:
- - GET /_api/collection//revision: the revision id was
- previously returned as numeric value, and now it will be returned as
- a string value with either numeric encoding or HLC-encoding inside.
- - GET /_api/collection//checksum: the revision id in
- the "revision" attribute was previously encoded as a numeric value
- in single server, and as a string in cluster. This is now unified so
- that the "revision" attribute always contains a string value with
- either numeric encoding or HLC-encoding inside.
-* Fixed handling of empty URL parameters in HTTP request handling.
+v3.11.0-rc.2 (2023-05-17)
+-------------------------
-* Fixed diffing of completely non-overlapping revision trees, which could
- lead to out-of-bounds reads at the right end of the first (smaller) tree.
+* Internal bug-fixes and stabilization improvements.
-* Fixed aborting the server process if an exception was thrown in C++ code
- that was invoked from the llhttp C code dispatcher. That dispatcher code
- couldn't handle C++ exceptions properly.
-* Fixed BTS-1073: Fix encoding and decoding of revision ids in replication
- incremental sync protocol. Previously, the encoding of revision ids could
- be ambiguous under some circumstances, which could prevent shards from
- getting into sync.
+v3.11.0-rc.1 (2023-05-13)
+-------------------------
-* Log better diagnosis information in case multiple servers in a cluster are
- configured to use the same endpoint.
+* FE-211: Allow admins to edit gravatar email
-* Fixed BTS-852 (user's saved queries used to disappear after updating user profile).
+* Fixed BTS-1398: GEO_DISTANCE() for ArangoSearch geo_s2 analyzer.
+
+* Improved error reporting for system calls on Windows and fixed a
+ Windows-specific issue of the IOHeartbeatThread.
+
+* FE-256: Minor Graph Viewer improvements:
+ - re-order right click menu
+ - Make Right click menu and toolbar work in full screen
+ - Remove editable attributes in edit and delete (nodes and edges) modals
+ - Add "tree"-mode to editor in edit (nodes and edges) modals
+
+* FE-266: Use the method 'fromGeoJson()' instead of 'geometry.coordinates'
+ for GeoJSON.
+
+* Fixed issue #18942: arangorestore ignores the HTTP status code.
+
+* FE-267: fix custom analyzer & features not showing up in inverted index view,
+ and support for basic fields definition.
+
+* FE-265: Fix user permissions radio button shadow.
+
+* FE-268: remove 'switch to new graph' when no defined graph is present.
-* MDS-1016: When creating a new collection the fields "Number of Shards" and
- "Replication factor" are greyed out now when the field "Distribute shards
- like" is not empty.
+* FE-258: allow specifying optimizeTopK during arangosearch view creation.
-* MDS-1019: Make user search case-insensitive and allow search by name.
+* FE-257: allow additional properties in inverted index creation.
-* BTS-465: Added tests for RandomGenerator and warning that other options
- for creating random values that are not Mersenne are deprecated.
+* UI Fix - allow empty keys in document key validation.
-* BTS-1008: Update react-autocomplete-input to fix single letter collection bug
- when creating a link in the views in the WebUI.
+* BTS-1181: fix a data race on the collection list in TransactionState.
+ This race could happen in the cluster when a collection is sharded by a
+ different attribute than `_key` and a document lookup by _key is performed.
+ This requires the lookup to be sent to each shard, since we cannot deduce the
+ shard based on the `_key` value. If this lookup is done as part of a stream
+ transaction, and if the collection has not been declared in the transaction's
+ `read` list, then the collection is added lazily. Previously this would result
+ in a race if multiple shards are located on the same server.
+ For transactions with `allowDirtyReads` set to true read collections are
+ always added lazily, which made this race more likely.
-* Improved optimization of functions to be covered by Traversals. Now more functions
- should be optimized into the traversal, and some that are not valid should not be optimized
- anymore. Fixes #16589.
+* BTS-1340: Fixed telemetrics api making shell hang when logging into it and
+ leaving it too quickly so the telemetrics API doesn't have time to send the
+ data to the warehouse. The thread could be hanging on SSL_connect() until the
+ connection timeout if the socket is blocking, so the socket was made
+ non-blocking for the thread to leave SSL_connect() after the connection is
+ interrupted when leaving the shell. Also created startup parameter
+ `--client.failure-points`for arangosh which enables failure points whose names
+ are provided in an array of strings, just like in `--server.failure-point` for
+ arangod.
-* BTS-908: Fixed WebUI GraphViewer not being able to create a new edge relation
- between two nodes in cases where only one edge definition has been defined
- inside the graph definition.
+* BTS-1350: Fixed imprecision in index info in telemetrics object for cluster.
+ When the collection also had an arangosearch view, telemetrics showed
+ imprecise index values because the view was being considered as an index of
+ type arangosearch, and its object wouldn't contain expected fields that would
+ be found in other indexes' objects.
-* Fixed BTS-850: Fixed the removal of already deleted orphan collections out
- of a graph definition. The removal of an already deleted orphan collection
- out of a graph definition failed and has been rejected in case the
- collection got dropped already.
+* Add metric `arangodb_file_descriptors_current` to expose the number of file
+ descriptors currently opened by the arangod process. This metric is available
+ on Linux only.
+ As counting the number of open file descriptors can be expensive, this metric
+ is only updated in a configurable interval. The new startup option
+ `--server.descriptors-count-interval` can be used to specify the update
+ interval (in milliseconds). It defaults to 60,000 (i.e. once per minute).
+ The startup option can be set to a value of `0` to disable the counting of
+ open file descriptors for performance reasons.
+ If counting is turned off, the metric will report a value of `0`.
-* BTS-1061: ARM was not recognized on Apple M1.
+* ES-1566: fix an issue when trying to restrict traversals to non-existing
+ collections with `edgeCollections` traversal option.
-* BTS-977: Added an error message for when an unauthorized user makes an
- HTTP GET request to current database from a database name that exists which
- the user can't access and from a database name that doesn't exist, so both
- requests have the same error message (`_db//_api/database/current`).
+ If a non-existing collection is specified in the `edgeCollections` or
+ `vertexCollections` options of an AQL traversal, the query will now fail
+ with a `collection or view not found` error. Also, if wrong collection
+ types are used (e.g. a document collection or a view for `edgeCollections`),
+ then an error is raised about an invalid collection type being used.
-* BTS-325: Changed the HTTP status code from `400` to `404` of the ArangoDB
- error code `ERROR_GRAPH_REFERENCED_VERTEX_COLLECTION_NOT_USED` to handle
- this error in accordance to our edge errors.
+ This is a behavior change compared to previous versions, which ignored
+ specifying non-existing vertex collections and had undefined behavior when
+ specifying non-existing edge collections or using a vertex collection
+ instead of an edge collection.
-* Added new AQL function SHA256(value).
+* Added metric `rocksdb_cache_peak_allocated` to store the peak memory
+ allocation value for in-memory caches.
-* Adjust permissions for "search-alias" views.
+* Remove leftover in-memory cache tables after dropping collections that
+ had their `cacheEnabled` flag set to `true`. Previously some memory
+ could remain allocated in the in-memory cache even after such collections
+ were dropped.
- Previously, "search-alias" views were visible to users that didn't have read
- permissions on the underlying referenced collections. This was inconsistent,
- because "arangosearch" views weren't shown to users that didn't have read
- permissions on the underlying links.
- Now, the behavior for "search-alias" views is the same as for "arangosearch"
- views, i.e. "search-alias" views are not shown and are not accessible for
- users that don't have at least read permissions on the underlying collections.
+* BTS-1315: Fixed spurious occurring failures during Foxx Application
+ installation in case a Load Balancer is being used in front of the
+ coordinators.
-* BTS-969: Added restriction for HTTP request `/cluster/rebalance`not to
- consider servers that have failed status as a possible target for rebalancing
- shards in its execution plan.
+* Updated arangosync to v2.17.0.
-* Added index cleanup in Supervision. If an index was not created successfully
- and the coordinator which initiated the creation was rebooted or is dead,
- then the agency Supervision will drop the index again. If it was created
- successfully, the agency Supervision will finalize it.
+* BTS-1343: Removed an error message of endpoint being invalid when attempting
+ fetch telemetrics from the servers by having the invalid endpoint as a
+ starting point. As telemetrics is transparent to the end user, the error
+ message interferes with the user experience if keeps appearing.
-* BTS-742: Added restriction for, when in smart graph, not accepting satellites
- in invalid format when storing a graph (like `{satellites: null}`).
+* FE-255: add validation for collection, document, graph, view and database
+ names.
-* Temporary fix for BTS-1006 (hides new view types).
+* Add metric `arangodb_file_descriptors_limit` to expose the system limit for
+ the number of open files for the arangod process.
-* Updated arangosync to v2.12.0.
+* Fix an issue that causes followers to be dropped due to premature transaction
+ abortions as a result of query failures.
+ When we have a query that results in a failure this will cause the leaders to
+ abort the transaction on the followers. However, if the followers have
+ transactions that are shared with leaders of other shards, and if one of those
+ leaders has not yet seen the error, then it will happily continue to replicate
+ to that follower. But if the follower has already aborted the transaction,
+ then it will reject the replication request. Previously this would cause the
+ follower to be dropped, but now this should be handled gracefully.
-* Improve upload and download speed of hotbackup by changing the way we use
- rclone. Empty hash files are now uploaded or downloaded by pattern, and
- all other files are done in batches without remote directory listing,
- which allows rclone to parallelize and avoid a lot of unnecessary network
- traffic. The format of hotbackups does not change at all.
-* Fixed issue BTS-1018: Improve logging of binary velocypack request data.
+v3.11.0-beta.1 (2023-05-07)
+---------------------------
-* BTS-477: added integration tests for covering log parameters.
+* Updated arangosync to v2.17.0-preview-1.
-* Moved the handling of escaping control and unicode chars in the log to
- the Logger instead of LogAppenderFile.
+* FE-243: add support for geo_s2 analyzer.
-* Updated ArangoDB Starter to 0.15.5.
+* Reduce memory usage for incremental sync replication.
-* Updated arangosync to v2.12.0-preview-14.
+ The incremental sync protocol that was used for collections created with 3.8
+ or higher had memory usage issues in case the follower already had some local
+ data and its dataset was much larger than the leader's. For example, this can
+ happen if a follower gets disconnected, then a lot of document removals happen
+ on the leader and afterwards the follower tries to get back into sync.
-* Fixed BTS-1017: Fixed a graph search issue, where subqueries lead to
- incorrect results when they have been pushed down fully onto a DBServer
- when they are in a Hybrid Disjoint SmartGraph context and
- SatelliteCollections were part of it.
+ In this case, the follower buffered the ids of documents it had to remove
+ locally in a vector, which could grow arbitrarily large. The removal of the
+ documents contained in this vector would only happen at the end, potentially
+ even without performing intermediate commits.
-* Fixed issue BTS-1023:
- Added Linux-specific startup option `--use-splice-syscall` to control
- whether the Linux-specific splice() syscall should be used for copying
- file contents. While the syscall is generally available since Linux 2.6.x,
- it is also required that the underlying filesystem supports the splice
- operation. This is not true for some encrypted filesystems, on which
- splice() calls thus fail.
- By setting the startup option `--use-splice-syscall` to `false`, a less
- efficient, but more portable user-space file copying method will be
- used instead, which should work on all filesystems.
- The startup option is not available on other operating systems than Linux.
+ The change in this PR is to trigger the document removal earlier, once the
+ vector has reached some size threshold, and also to use intermediate commits
+ during the removals.
-* Added authenticate header to the HTTP response when status code is 401
- for HTTP/2.
-* Best quality spam pushed down to DEBUG.
+v3.11.0-alpha.1 (2023-05-03)
+----------------------------
-* Updated arangosync to v2.12.0-preview-13.
+* FE-22: Don't display collection content by default when clicking on a
+ collection in the Web UI.
-* Implement prefetch for revision trees, in case a batch is created with
- a distinguished collection as for `SynchronizeShard`. This ensures that
- the revision tree for the batch will be available when needed, even though
- the revision tree for the collection might already have advanced beyond
- the sequence number of the snapshot in the batch. This ensures that
- shards can get in sync more reliably and more quickly.
+* Fixed a race in a test for aborting long running operations.
-* Fixed log with json format not respecting the value of parameter
- `--log.shorten-filenames`.
+* FE-251: bugfix - prohibit multiple expansions of the same node in the graph
+ viewer.
-* Updated arangosync to v2.12.0-preview-12.
+* FE-162: Fix display of geodesic lines.
-* Added "intermediateCommits" statistics return value for AQL queries, to
- relay the number of intermediate commits back that a write query performed.
+* FE-263: Improve forceAtlas layout in the Graph Viewer.
-* Updated ArangoDB Starter to 0.15.5-preview-3.
+* Fixed a race in the Maintenance, impacting leaders that have just been
+ resigned. During a MoveShard job, the old leader could sabotage the whole
+ operation by removing the newly added follower from Current. The solution
+ is to update Current using local collection information, instead of the
+ potentially outdated velocypack slice.
-* Fixed a rare occurring issue where paths inside a DisjointSmart traversal
- containing only satellite relevant nodes were not returned properly
- (ES-1265).
+* Fix unstable test setting the desired number of dbservers.
-* Fixed BTS-926: UI showing the "create index" form to non-admin users.
+* FE-253: bugfix - validate JSON, show errors & disable save when error.
-* Updated Views UI with all changes necessary for the 3.10.0 launch.
+* FE-254: bugfix - filter out empty storedValues for persistent index.
-* Added message on the UI view of Logs when the user has restricted access,
- either because cannot access `_system`, or because is currently in
- another database.
+* FE-252: bugfix - canvas image screenshot breaks graph colors.
-* Fix for the Pregel's HITS algorithm using a fixed value instead of the
- passed "threshold" parameter. The same applied to the new HITSKleinberg.
+* Fix incompatibility between 3.9 and 3.10 w.r.t. to serialization of AQL array
+ filters (i.e. `[* FILTER ...]`). The array filters were serialized in a
+ different way in 3.9 than they are serialized in 3.10. 3.10 also expected the
+ new serialization format when unserializing a plan.
+ The fix now enables support for both formats.
-* Do not drop follower shard after too many failed shard synchronization
- attempts.
+* Fixed issue #18769: Input validation allows invalid UTF-8 code points.
-* Added startup option `--arangosearch.skip-recovery` to skip the recovery
- of arangosearch view links or inverted indexes.
- The startup option can be specified multiple times and is expected to either
- contain the string `all` (will skip the recovery for all view links and
- inverted indexes) or a collection name + link id/name pair (e.g.
- `testCollection/123456`, where `123456` is a link/index id or an index name).
- This new startup option is an emergency means to speed up lengthy recovery
- procedures when there is a large WAL backlog to replay. The normal recovery
- will still take place even with the option set, but recovery data for
- links/indexes can be skipped. This can improve the recovery speed and reduce
- memory usage during the recovery process.
- All links or inverted indexes that are marked as to-be-skipped via the
- option, but for which there is recovery data, will be marked as "out of sync"
- at the end of the recovery.
- The recovery procedure will also print a list of links/indexes which it has
- marked as out-of-sync.
- Additionally, if committing data for a link/index fails for whatever reason,
- the link/index is also marked as being out-of-sync.
+ This change enforces the validation of UTF-8 surrogate pairs in incoming JSON
+ data. Previously, the following loopholes existed when validating UTF-8
+ surrogate pair data:
+ - a high surrogate, followed by something other than a low surrogate (or the
+ end of the string)
+ - a low surrogate, not preceeded by a high surrogate
+ These loopholes are now closed, which means that any JSON inputs with invalid
+ surrogate pair data will be rejected by the server.
- If an out-of-sync link or index can be used in queries depends on another new
- startup option `--arangosearch.fail-queries-on-out-of-sync`. It defaults to
- `false`, meaning that out-of-sync links/indexes can still be queries. It the
- option is set to `true`, queries on such links/indexes will fail with error
- "collection/view is out of sync" (error code 1481).
+ Note that the extended validation for surrogates can be turned off along with
+ other UTF-8 string validation by setting the server startup option
+ `--server.validate-utf8-strings` to `false`. This is not recommended though,
+ but should only be used in situations when a database is known to contain
+ invalid data and must continue supporting it.
- Links/indexes that are marked out-of-sync will keep the out-of-sync flag
- until they are dropped. To get rid of an out-of-sync link/index it is
- recommended to manually drop and recreate it. As recreating a link/index may
- cause high load, this is not done automatically but requires explicit user
- opt-in.
+* Updated rclone to v1.62.2 custom build with go1.20.3.
- The number of out-of-sync links/indexes is also observable via a new metric
- `arangodb_search_num_out_of_sync_links`.
+* Changed return code of APIs that create databases from previously 1229
+ (`ERROR_ARANGO_DATABASE_NAME_INVALID`) to 1208 (`ERROR_ARANGO_ILLEGAL_NAME`)
+ in case an invalid database name is used.
+ This is a downwards-incompatible change, but unifies the behavior for
+ database creation with the behavior of collection and view creation,
+ which also return error 1208 in case the specified name is invalid.
-* Added startup option `--rocksdb.periodic-compaction-ttl`.
- This option controls the TTL (in seconds) for periodic compaction of
- .sst files in RocksDB, based on the .sst file age. The default value
- from RocksDB is ~30 days. To avoid periodic auto-compaction, the option
- can be set to 0.
+* FE-236: bugfix - remove unused files, use new tooltip in views UI.
-* Now the Pregel API returns `{... algorithm: "pagerank", ...}` instead of
-`{... algorithm: "PageRank", ...}` when the Page Rank algorithm is run
-(in accordance to the documentation).
+* FE-238: Added auto-login support in core web UI - disabled logout when
+ auto-login is enabled, set sessionStorage "jwtUser" value when login is
+ skipped.
-* Updated arangosync to v2.12.0-preview-11.
+* FE-233: bugfix - fix query spotlight search not working.
-* Added integration tests for `--log.escape-control-chars` and
- `--log.escape-unicode-chars`.
+* FE-349: bugfix - filter out empty primarySort field in UI.
-* Fix SEARCH-350: Crash during consolidation.
+* FE-247: bugfix - missing storedValues field in persistent index form.
-* SEARCH-357: Added SUBSTRING_BYTES function.
+* FE-242, FE-244: bugfix - add support for cache fields, fix inverted index name
+ undefined.
-* A new Pregel algorithm: the version of Hypertext-Induced Topic Search (HITS)
- as described in the original paper.
+* FE-241: bugfix - filter predefined queries based on search term.
-* Web UI: Reduce size and initial render height of a modal (fixes BTS-940).
+* FE-216: bugfix - make view patches async in the UI.
-* Disable optimization rule to avoid crash (BTS-951).
+* FE-212: bugfix: links not getting removed when copying from another view in
+ UI.
-* Fix comparison of JSON schemas on DB servers after there was a schema change
- via a coordinator: the schema comparison previously did not take into account
- that some ArangoDB versions store an internal `{"type":"json"}` attribute in
- the schema, and some don't. Thus two identical schemas could compare
- differently.
- The correct schema version was always applied and used, and validation of
- documents against the schema was also not affected. However, because two
- schemas could compare unequal, this could have caused unnecessary repeated
- work for background maintenance threads.
+* FE-222: Fix - Allow additional properties in arangosearch, allow no fields in
+ inverted index when 'includeAllFIelds' is true.
-* Removed transitive node dependencies.
+* APM-183: Support UTF-8 on UI (collection/view/index names).
-* Web UI: Now correctly handles the server error response when an error occurred
- during the modification of a document or an edge (BTS-934).
+* FE-199: Remove URL handling of fields on view screen.
-* Make graph search case-insensitive (fixes BTS-882).
+* Changed the behavior of the following JavaScript functions in arangosh and
+ arangod (e.g. when used from a Foxx service):
-* BTS-428: Added function DATE_ISOWEEKYEAR that retrieves the number of the
- week counting from when the year started in ISO calendar and also the year
- it's in.
+ - `db..dropIndex(id)`: this function now throws if no index exists
+ with the specified id. Previously the function only returned the value
+ `false`.
+ - `db._dropIndex(id)`: this function now throws if no index exists with the
+ specified id. Previously the function only returned the value `false`.
-* Added handling of requests with Transfer-Encoding chunked, which is
- not implemented, so returns code HTTP code 501.
+ These changes are not downwards-compatible, but they can be easily worked
+ around by wrapping dropIndex calls into a try ... catch.
-* Add progress reporting to RocksDB WAL recovery, in case there are many WAL
- files to recover.
+ The HTTP API for dropping indexes is not affected by these changes, as it
+ previously returned HTTP 404 already when the specified index could not be
+ found.
-* Updated ArangoDB Starter to 0.15.5-preview-2.
+* Added `--dump-views` option to arangodump, to control whether arangosearch
+ view definitions should be stored as part of the dump. The option defaults to
+ `true`.
-* Fixed BTS-918 (incorrectly navigating back 1 level in history when a modal-dialog element is present).
+* APM-183: optionally allow special characters and Unicode characters in
+ collection names, view names and index names.
-* Updated arangosync to v2.12.0-preview-9.
+ This feature allows toggling the naming convention for collection names, view
+ names and index names from the previous strict mode, which only allowed
+ selected ASCII characters, to an extended, more relaxed mode. The extended
+ mode allows additional ASCII characters as well as non-ASCII UTF-8 characters
+ in database names, collection names, index names and view names.
+ The extended mode can be enabled by setting the new startup option
+ - `--database.extended-names`
+ to true. It is turned off by default and requires an explicit opt-in, simply
+ because some drivers and client applications may not be ready for it yet.
+ The arangod server, the ArangoDB web interface and the following bundled
+ client tools are prepared and ready for using the extended names:
+ - arangobench
+ - arangodump
+ - arangoexport
+ - arangoimport
+ - arangorestore
+ - arangosh
+ More tools and the drivers shipped by ArangoDB may be added to the list in the
+ future.
-* Disallowed index creation that covers fields in which the field's name starts
- or ends with `:` for single server or cluster when the instance is a
- coordinator or single server. This validation only happens for index creation,
- so already existing indexes that might use such field names will remain as they are.
+ Please note that the extended names should not be turned on during upgrades
+ from previous versions, but only once the upgrade has been completed
+ successfully. In addition, the extended names should not be used in
+ environments that require extracting data into a previous version of ArangoDB,
+ or when database dumps may be restored into a previous version of ArangoDB.
+ This is because older versions will not be able to handle the extended names.
+ Finally, it should not be turned on in environments in which drivers are in
+ use that haven't been prepared to work with the extended naming convention.
-* Updated arangosync to v2.12.0-preview-6.
+ Warning: turning on the `--database.extended-names` option for a deployment
+ requires it to stay enabled permanently, i.e. it can be changed
+ from `false` to `true` but not back. When enabling it, it is also required
+ to do this consistently on all coordinators and DB servers.
-* When using `SHORTEST_PATH`, `K_SHORTEST_PATHS`, `ALL_SHORTEST_PATHS`, or
- `K_PATHS` in an AQL Query and the query itself produced warnings during
- execution, the type has been wrongly reported. It reported always with
- `SHORTEST_PATH` and not the specific used one.
+ The extended names for databases, collections, views and indexes will be
+ enabled by default in one of the future releases of ArangoDB, once enough
+ drivers and other client tools have had the chance to adapt.
-* Updated warning messages raised for non accepted query OPTIONS,
- distinguishing between when the OPTIONS attribute is correct, but the value
- is in incorrect format, and when the OPTIONS attribute itself is incorrect.
+* FE-200: Adds smart & enterprise graph support in the UI.
-* Since ArangoDB 3.8 there was a loophole for creating duplicate keys in the
- same collection. The requirements were:
- - cluster deployment
- - needs at least two collections (source and target), and the target
- collection must have more than one shard and must use a custom shard key.
- - inserting documents into the target collection must have happened via an
- AQL query like `FOR doc IN source INSERT doc INTO target`.
- In this particular combination, the document keys (`_key` attribute) from
- the source collection were used as-is for insertion into the target
- collection. However, as the target collection is not sharded by `_key` and
- uses a custom shard key, it is actually not allowed to specify user-defined
- values for `_key`. That check was missing since 3.8 in this particular
- combination and has now been added back. AQL queries attempting to insert
- documents into a collection like this will now fail with the error "must not
- specify _key for this collection", as they used to do before 3.8.
+* Forward the `ttl` cursor option for AQL queries in the JavaScript API
+ from the `db._query()` and the `db._createStatement()` methods to the server.
-* Updated ArangoDB Starter to 0.15.5-preview-1.
+* APM-407: add an optimization for inserting multiple documents at the same time
+ via an AQL INSERT query.
-* Updated arangosync to v2.12.0-preview-4.
+ There is an optimizer rule `optimize-cluster-multiple-document-operations`,
+ which fires in case an AQL query has one of the patterns
+ - `FOR doc IN @docs INSERT doc INTO ...` (where `@docs` is a bind parameter
+ with an array of documents to be inserted),
+ - `FOR doc IN [...] INSERT doc INTO ...` (where the FOR loop iterates over an
+ array of input documents known at query compile time),
+ - `LET docs = [...] FOR doc IN docs INSERT doc INTO ...` (where the documents
+ set up by the LET are some static documents known at query compile time)
-* Improve error handling for passing wrong transaction ids / cursor ids / pregel
- job ids to request forwarding. Also prevent the error "transaction id not
- found" in cases when request forwarding was tried to a coordinator that was
- recently restarted.
-
-* Added startup option `--rocksdb.verify-sst` to validate sst files already
- present in the database directory on startup. Default: false.
-
-* BTS-907: Fixed some rare SortNode related optimizer issues, when at least two
- or more SortNodes appeared in the AQL execution plan.
-
-* Updated arangosync to v2.12.0-preview-3.
-
-* Added new AQL function `VALUE` capable of accessing object attribute by a
- specified path.
-
-* Added OFFSET_INFO function (Enterprise Edition only) to support search results
- highlighting.
-
-* Updated Rclone to v1.59.0.
+ If a query has such pattern, and all the following restrictions are met, then
+ the optimization is triggered:
-* Add serverId parameter to _admin/log/level. Allows you to forward the request to
- other servers.
+ - there are no following RETURN nodes (including any RETURN OLD, RETURN NEW)
+ - the FOR loop is not contained in another outer FOR loop or subquery
+ - there are no other nodes (e.g. LET, FILTER) between the FOR and the INSERT
+ - the INSERT is not used on a SmartGraph edge collection
+ - the FOR loop is iterating over a constant, deterministic expression
-* Updated OpenSSL to 1.1.1q and OpenLDAP to 2.6.3.
+ The optimization will then add a `MultipleRemoteExecutionNode` to the query
+ execution plan, which will care about inserting all documents into the
+ collection in one go. Further optimizer rules are skipped if the optimization
+ is triggered.
-* Updated arangosync to v2.12.0-preview-2.
+ Future versions of ArangoDB may lift some of the restrictions for the query
+ pattern, so that the optimization may be triggered in more cases in the
+ future.
-* ArangoSearch nested search feature. (Enterprise Edition): Added ability to index
- and search nested documents with ArangoSearch views.
+* FE-200: Add canvas interactions to Graph Viewer.
-* Fixed handling of illegal edges in Enterprise Graphs. Adding an edge to a SmartGraph
- vertex collection through document API caused incorrect sharding of the edge. Now
- this edge is rejected as invalid. (BTS-906)
+* FE-218: Updated WebUI dependencies.
-* Removed unused log topics "CLUSTERCOMM", "COLLECTOR" and "PERFORMANCE" from
- the code.
+* Make REST API `/_admin/shutdown` sleep for only half a second until it
+ initiates the server shutdown. Previously it slept for 2 seconds, but half a
+ second should already be enough to send the server's response out.
-* Added ALL_SHORTEST_PATHS functionality to find all shortest paths between two
- given documents.
+* MDS-1001: Performance improvement in AQL. If you are using a traversal like
+ `FOR v, e, p IN <....>` and later in the query access the last vertex on the
+ path e.g.:
+ `FILTER p.vertices[-1].name == "ArangoDB"` it will now be transformed to
+ `FILTER v.name == "ArangoDB"` which is an equivalent statement. The latter
+ however is cheaper to compute, as we do not need to create an in-memory
+ representation of the path. Furthermore we can apply additional optimizations
+ on `v` which are not possible on `p`. The same optimization holds true for
+ `p.edges[-1]` which is equivalent to `e`. The optimization rule for this is
+ called "optimize-traversal-last-element-access".
-* Added another test for computedValues attribute keepNull.
+* FE-142: Updates indices view list & index addition to React.
-* BTS-913: check for proper timezone setup of the system on startup.
- This will then log errors that else would only occur in AQL-Functions at
- runtime.
+* A Pregel execution now stores its state during and after execution into a
+ system collection. To read or delete entries the new API
+ `/_api/control_pregel/history[/]` has been added. Additionally, the Pregel
+ JavaScript module has been extended to support access as well.
+ Read history `.history()`.
+ Remove history `.removeHistory()`.
-* Changed rocksdb default compression type from snappy to lz4.
+* Marked all memory-mapping options for Pregel as obsolete.
+ The memory mapping code was removed as it did not provide any advantages over
+ spilling into system-provided swap space.
-* Fixed a potential deadlock in RocksDB compaction.
- For details see https://github.com/facebook/rocksdb/pull/10355
-
-* Added more specific process exit codes for arangod and all client tools,
- and changed the executables' exit code for the following situations:
- - an unknown startup option name is used: previously the exit code was 1.
- Now the exit code when using an invalid option is 3 (symbolic exit code
- name EXIT_INVALID_OPTION_NAME).
- - an invalid value is used for a startup option (e.g. a number that is
- outside the allowed range for the option's underlying value type, or a
- string value is used for a numeric option): previously the exit code was
- 1. Now the exit code for these case is 4 (symbolic exit code name
- EXIT_INVALID_OPTION_VALUE).
- - a config file is specified that does not exist: previously the exit code
- was either 1 or 6 (symbolic exit code name EXIT_CONFIG_NOT_FOUND). Now
- the exit code in this case is always 6 (EXIT_CONFIG_NOT_FOUND).
- - a structurally invalid config file is used, e.g. the config file contains
- a line that cannot be parsed: previously the exit code in this situation
- was 1. Now it is always 6 (symbolic exit code name EXIT_CONFIG_NOT_FOUND).
+* FE-139 adds new search view type (search-alias).
- Note that this change can affect any custom scripts that check for startup
- failures using the specific exit code 1. These scripts should be adjusted so
- that they check for a non-zero exit code. They can opt-in to more specific
- error handling using the additional exit codes mentioned above, in order to
- distinguish between different kinds of startup errors.
+* Ran automated migrations on all .scss files to remove deprecated division
+ operator usage.
-* arangoimport now supports the option --remove-attribute on type JSON as well.
- Before it was restricted to TSV and CSV only.
+* SEARCH-279: Fix consistency during update/replace operations for arangosearch
+ links and inverted indexes.
-* Added CSP recommended headers to Aardvark app for better security.
+* APM-294: Added telemetrics API that gathers anonymous feature usage statistics
+ from a deployment. The API is accessible via the endpoint
+ `/_admin/telemetrics`. The API is enabled by default in release builds, but
+ disabled by default in maintainer mode. It can be explicitly turned on/off
+ with the server startup parameter `--server.telemetrics-api`.
+ The required access privileges to access the telemetrics API can be configured
+ via the server startup option `--server.support-info-api`.
+ The telemetrics API is used by the arangosh: every time the arangosh is
+ started, it will send a request to the connected server to gather telemetrics
+ from the `/_admin/telemetrics` endpoint. The telemetrics data are then sent to
+ an aggregation service that is run by ArangoDB.
-* Fixed BTS-851: "Could not fetch the applier state of: undefined".
+* APM-283: Use parallel gather in almost all queries. The only case where we
+ cannot use parallel gather is when using traversals, although there are some
+ exceptions for disjoint SmartGraphs where the traversal can run completely on
+ the local DB-server. All other queries should now be able to parallelize the
+ gather node. This can not only speed up queries quite significantly, but also
+ overcomes issues with the previous serial processing within gather nodes,
+ which could lead to high memory usage on coordinators caused by buffering of
+ documents other shards, and timeouts on some DB-Servers because query parts
+ were idle for too long.
-* Removed internal JavaScript dependencies "expect.js", "media-typer" and
- "underscore". We recommend always bundling your own copy of third-party
- modules as all previously included third-party modules are now considered
- deprecated and may be removed in future versions of ArangoD
+* Changed path were test scripts locate configuration files from `etc/relative`
+ to `etc/testing`. These paths contain `arangosh.conf`, which we were reading
+ from `etc/relative` in test environment.
-* APM-84: Added option to spill intermediate AQL query results from RAM to
- disk when their size exceeds certain thresholds. Currently the only AQL
- operation that can make use of this is the SortExecutor (AQL SORT operation
- without using a LIMIT). Further AQL executor types will be supported in
- future releases.
+* Made the return code configurable that is delivered if a write fails because
+ the write concern is not fulfilled (not enough in-sync replicas available).
+ Previously (and now by default), a code of HTTP 403 is returned and the
+ request returns immediately. If the command line option
+ --cluster.failed-write-concern-status-code=503
+ is set, then HTTP 503 is returned. Note that no cluster-internal retry is
+ happening, such that a client is informed right away about the problem.
+ Retry loops have to be organized in the client program.
- Spilling over query results from RAM to disk is off by default and currently
- in an experimental stage. In order to opt-in to the feature, it is required
- to set the following startup option `--temp.intermediate-results-path`.
- The directory specified here must not be located underneath the instance's
- database directory.
- When this startup option is specified, ArangoDB assumes ownership of that
- directory and will wipe its contents on startup and shutdown. The directory
- can be placed on ephemeral storage, as the data stored inside it is there
- only temporarily, while the instance is running. It does not need to be
- persisted across instance restarts and does not need to be backed up.
-
- When a directory is specified via the startup option, the following
- additional configuration options can be used to control the threshold
- values for spilling over data:
-
- * `--temp.intermediate-results-capacity`: maximum on-disk size (in bytes)
- for intermediate results. If set to 0, it means that the on-disk size
- is not constrained. It can be set to a value other than 0 to restrict the
- size of the temporary directory. Once the cumulated on-disk size of
- intermediate results reaches the configured maximum capacity, the
- query will be aborted with failure "disk capacity limit for intermediate
- results exceeded".
- * `--temp.intermediate-results-spillover-threshold-num-rows`: number of
- result rows from which on a spillover from RAM to disk will happen.
- * `--temp.intermediate-results-spillover-threshold-memory-usage`: memory
- usage (in bytes) after which a spillover from RAM to disk will happen.
- * `--temp.intermediate-results-encryption`: whether or not the on-disk
- data should be encrypted. This option is only available in the Enterprise
- Edition.
- * `--temp.-intermediate-results-encryption-hardware-acceleration`: whether
- or not to use hardware acceleration for the on-disk encryption. This
- option is only available in the Enterprise Edition.
+* Added support for sending gzip-compressed responses from the server.
+ Previously only deflated responses were supported.
- Please note that the feature is currently still experimental and may slightly
- change in future releases. As mentioned, the only Executor that can make
- use of spilling data to disk is the SortExecutor (SORT without LIMIT).
- Also note that the query results will still be built up entirely in RAM
- on coordinators and single servers for non-streaming queries. In order to
- avoid the buildup of the entire query result in RAM, a streaming query
- should be used.
+* FE-135: Add new Graph Viewer with vis.js and change the UI.
-* Enterprise only: Added `MINHASH`, `MINHASH_MATCH`, `MINHASH_ERROR`,
- `MINHASH_COUNT` AQL functions.
+* FE-19: Updated ArangoDB logo in web interface.
-* Enterprise only: Added `minhash` analyzer.
+* Make the hashed variant of AQL COLLECT support INTO clauses too.
+ Previously only the sorted variant of AQL COLLECT supported INTO clauses.
-* BugFix in Pregel's status: When loading the graph into memory,
- Pregel's state is now 'loading' instead of 'running'. When loading is finished,
- Pregel's state changes to the 'running' state.
+* Upgraded OpenSSL to 3.0.8.
-* arangoimport now supports an additional option "--overwrite-collection-prefix".
- This option will only help while importing edge collections, and if it is used
- together with "--to-collection-prefix" or "--from-collection-prefix". If there
- are vertex collection prefixes in the file you want to import (e.g. you just
- exported an edge collection from ArangoDB) you allow arangoimport to overwrite
- those with the commandline prefixes. If the option is false (default value)
- only _from and _to values without a prefix will be prefixed by the handed in
- values.
+* FE-174: Change ViewsUI layout to single-page instead of tabs.
-* Added startup option `--rocksdb.compaction-style` to configure the compaction
- style which is used to pick the next file(s) to be compacted.
+* Add peak memory usage to the query object details for queries in the slow
+ query history and in the list of currently running queries. The peak memory
+ usage is also returned via REST APIs as `peakMemoryUsage`.
-* BugFix in Pregel's Label Propagation: the union of three undirected cliques
- of size at least three connected by an undirected triangle now returns
- three communities (each clique is a community) instead of two.
+* Provide options for configuring and enabling RocksDB's blob storage (BlobDB)
+ for large documents in the documents column family.
+ This is currently an experimental feature.
-* Pregel now reports correct and ongoing runtimes for loading, running, and
- storing as well as runtimes for the separate global supersteps.
+ The following experimental options are available:
-* Fixed parsing of K_SHORTEST_PATHS queries to not allow ranges anymore.
+ - `--rocksdb.enable-blob-files`: Enable the usage of blob files for the
+ documents column family. This option defaults to `false`. All following
+ options are only relevant if this option is set to `true`.
+ - `--rocksdb.min-blob-size`: Size threshold for storing large documents in
+ blob files (in bytes, 0 = store all documents in blob files).
+ - `--rocksdb.blob-file-size`: Size limit for blob files in the documents
+ column family (in bytes).
+ - `--rocksdb.blob-compression-type`: Compression algorithm to use for blob
+ data in the documents column family.
+ - `--rocksdb.enable-blob-garbage-collection`: Enable blob garbage collection
+ during compaction in the documents column family.
+ - `--rocksdb.blob-garbage-collection-age-cutoff`: Age cutoff for garbage
+ collecting blob files in the documents column family (percentage value from
+ 0 to 1 determines how many blob files are garbage collected during
+ compaction).
+ - `--rocksdb.blob-garbage-collection-force-threshold`: Garbage ratio threshold
+ for scheduling targeted compactions for the oldest blob files in the
+ documents column family.
-* Updated arangosync to v2.11.0.
+* FE-132: Added query sorting (in web UI) by modified date, option to sort
+ order.
-* Add log.time-format utc-datestring-micros to make debugging of concurrency
- bugs easier.
+* Partial fix for PRESUPP-539: account for memory used during AQL condition
+ transformation to disjunctive normal form (DNF). This transformation can use
+ a lot of memory for complex filter conditions, which was previously not
+ accounted for. Now, if the transformation uses a lot of memory and hits the
+ configured query memory limit, the query will rather be aborted with a proper
+ error message than overuse memory.
+ For very complex conditions that would use massive amounts of memory when
+ transformed into DNF, the DNF conversion is also aborted at some threshold
+ complexity value. If the threshold is hit, the query continues with a
+ simplified representation of the condition, which will not be usable in index
+ lookups. However, this should still be better than overusing memory or taking
+ a very long time to compute the DNF version.
+ The complexity threshold value can be configured per query by setting the new
+ `maxDNFConditionMembers` query option. There is also a new startup option
+ `--query.max-dnf-condition-members` for coordinators and single servers to
+ adjust the threshold value globally.
-* Renamed KShortestPathsNode to EnumeratePathsNote; this is visible in
- explain outputs for AQL queries.
+* The internal Graph code is completely converted to the new graph engine.
+ Last algorithms added to that lists are: ShortestPath, WeightedShortestPath,
+ KShortestPaths and WeightedKShortestPaths.
-* Pregel SSSP now supports `resultField` as well as `_resultField` as
- parameter name to specify the field into which results are stored.
- The name `_resultField` will be deprecated in future.
+* FE-131: Added search input for query page.
-* Update Windows CI compiler to Visual Studio 2022.
+* FE-133: Alphabetical sorting for collections on user permissions page.
-* Web UI: Fixes a GraphViewer issue related to display issues with node
- and edge labels. Boolean node or edge values could not be used as label
- values (ES-1084).
+* Removed CMake variable `ARANGODB_BITS`, which was only used in one place.
-* Made the SortExecutor receive its input incrementally, instead of receiving
- a whole matrix containing all input at once.
+* Fixed the issue that the collection view search did not support selecting
+ everything using Ctrl + A.
-* Optimization for index post-filtering (early pruning): in case an index
- is used for lookups, and the index covers the IndexNode's post-filter
- condition, then loading the full document from the storage engine is
- now deferred until the filter condition is evaluated and it is established
- that the document matches the filter condition.
+* APM-592: In batched query results, when executing requests for `/_api/cursor`,
+ there might be a connection error and the user might not be able to retrieve
+ the latest batch from the cursor. For that, a query option flag `allowRetry`
+ was added. When set to `true`, if the latest batch response object wasn't
+ successfully received, the user can send a retry request to receive it with a
+ POST request to `/_api/cursor//`. Only the latest batch is
+ cached, meaning former batches cannot be retrieved again later.
-* Added a fully functional UI for Views that lets users view, modify mutable
- properties and delete views from the web UI.
+* Use more compact and efficient representation for arrays and objects during
+ AQL AST serialization and deserialization. This can help to reduce the size
+ of messages exchanged between coordinator and database servers during query
+ setup, and also reduce the time needed for parsing these messages. This
+ especially helps when there are large bind parameter values that are arrays or
+ objects.
+ The more efficient format is used also inside an AQL query's "explain" and
+ "profile" methods, and thus any callers that process the return values of
+ explain and profile operations may now receive the new format. All callers
+ inside the ArangoDB code have been adjusted, but any external callers that
+ process the JSON response values of AQL query explain or profile operations
+ may need to be adjusted to handle the new format.
-* Fix thread ids and thread names in log output for threads that are not
- started directly by ArangoDB code, but indirectly via library code.
- Previously, the ids of these threads were always reported as "1", and
- the thread name was "main". Now return proper thread ids and names.
+* Added new stage "instantiating executors" to the query profiling output.
+ The time spent in "instantiating executors" is the time needed to create the
+ query executors from the final query execution time. In cluster mode, this
+ stage also includes the time needed for physically distributing the query
+ snippets to the participating database servers.
+ Previously, the time spent for instantiating executors and the physical
+ distribution was contained in the "optimizing plan" stage, which was
+ misleading.
-* Changed default Linux CI compiler to gcc-11.
+* Removed constant values for query variables from query plan serialization in
+ cases they were not needed. Previously, constant values of query variables
+ were always serialized for all occurrences of a variable in a query plan.
+ If the constant values were large, this contributed to higher serialization
+ and thus query setup times. Now the constant values are only serialized for
+ relevant parts of query execution plans.
-* Updated arangosync to v2.11.0-preview-2.
+* BTS-199: remove check for environment variable `GLIBCXX_FORCE_NEW` from server
+ start, and remove setting this variable from startup scripts.
+ The reason is that the environment variable only controls the behavior of
+ programs linked against glibc, but our release builds are linked to libmusl.
-* Add "AT LEAST" quantifier for array filters in AQL:
+* Acquire a snapshot of the (list of) indexes when starting document insert,
+ update/replace and remove operations, and use that snapshot throughout the
+ operation. Previously, the list of indexes was acquired multiple times during
+ a write operation, and it was (at least in theory) possible that the list of
+ indexes changed between the individual acquisitions.
+ The PR also contains an optimization to not fetch the full document from the
+ storage engine for remove and replace operations in case the full document is
+ not needed to process the operation. This is the case when the collection does
+ not contain any secondary indexes and `returnOld` is not used.
- `RETURN [1,2,3][? AT LEAST (3) FILTER CURRENT > 42]`
- `RETURN [1,2,3] AT LEAST (2) IN [1,2,3,4,5]`
+* Added experimental startup option `--rocksdb.block-cache-jemalloc-allocator`.
+ This option defaults to `false`. When set to `true`, a jemalloc-based memory
+ allocator will be used to allocate memory for the RocksDB block cache.
+ This allocator will also mark the memory of the block cache to be excluded
+ from coredumps, potentially reducing coredump size a lot.
-* Changed default macOS CI compiler to LLVM clang-14.
+* Remove async mode from pregel.
-* Added an automatic cluster rebalance api. Use `GET _admin/cluster/rebalance`
- to receive an analysis of how imbalanced the cluster is. Calling it with
- `POST _admin/cluster/rebalance` computes a plan of move shard operations to
- rebalance the cluster. Options are passed via the request body. After
- reviewing the plan, one can use `POST _admin/cluster/rebalance/execute` to
- put that plan into action.
-
-* Introduce reading from followers in clusters. This works by offering
- an additional HTTP header "x-arango-allow-dirty-read" for certain
- read-only APIs. This header has already been used for active failover
- deployments to allow reading from followers. Using this header leads
- to the fact that coordinators are allowed to read from follower shards
- instead only from leader shards. This can help to spread the read load
- better across the cluster. Obviously, using this header can result in
- "dirty reads", which are read results returning stale data or even
- not-yet-officially committed data. Use at your own risk if performance
- is more important than correctness or if you know that data does not
- change.
- The responses which can contain dirty reads will have set the HTTP header
- "x-arango-potential-dirty-read" set to "true".
- There are the following new metrics showing the use of this feature:
- - `arangodb_dirty_read_transactions_total`
- - `arangodb_potentially_dirty_document_reads_total`
- - `arangodb_dirty_read_queries_total`
+* Print the pid of the process which sent a SIGABRT or other fatal signal that
+ shuts down ArangoDB ungracefully.
-* Changed HTTP response code for error number 1521 from 500 to 400.
+* Avoid write-write conflicts for single document operations performed via the
+ document REST API (i.e., no AQL, but also no streaming transactions). This is
+ achieved by locking the key of each document before performing the actual
+ modification. This lock acquisition effectively serializes all operations on
+ the same document. To avoid starvation, the lock acquisition is limited to
+ one second. This lock timeout value is currently hardcoded but will be made
+ configurable in the future. If the lock cannot be acquired within this time,
+ the operation fails with a write-write conflict error as before.
- Error 1521 (query collection lock failed) is nowadays only emitted by
- traversals, when a collection is accessed during the traversal that has
- not been specified in the WITH statement of the query.
- Thus returning HTTP 500 is not a good idea, as it is clearly a user error
- that triggered the problem.
+ Performing changes to a unique index entry also requires us to lock that index
+ entry to ensure uniqueness. This lock acquisition is subject to the same lock
+ timeout as locking the document key.
-* Renamed the `--frontend.*` startup options to `--web-interface.*`:
+ We are planning to generalize this for multi-document operations as well as
+ AQL and streaming transactions in the future.
- - `--frontend.proxy-request.check` -> `--web-interface.proxy-request.check`
- - `--frontend.trusted-proxy` -> `--web-interface.trusted-proxy`
- - `--frontend.version-check` -> `--web-interface.version-check`
+ In case we cannot acquire the lock on the key of the document we want to
+ insert/modify, the error message will be
+ `Timeout waiting to lock key - in index primary of type primary over '_key';
+ conflicting key: ` where `` corresponds to the key of the document
+ we tried to modify.
+ In addition, the error object will contain `_key`, `_id` and `_rev` fields.
+ The `_key` and `_id` correspond to the document we tried to insert/modify, and
+ `_rev` will correspond to the current revision of the document from the DB if
+ available, and otherwise empty.
+
+ In case we cannot acquire the lock on a unique index entry, the error message
+ will be `Timeout waiting to lock key - in index of type persistent
+ over ''; document key: ; indexed values: []` where
+ `` is the name of the index in which we tried to lock the entry,
+ `` is the list of fields included in that index, `` corresponds
+ to the key of the document we tried to insert/modify, and ``
+ corresponds to the indexed values from our document.
+ In addition, the error object will contain `_key`, `_id` and `_rev` fields.
+ The `_key` and `_id` correspond to the document we tried to insert/modify, and
+ `_rev` will correspond to the current revision of the document from the DB if
+ available, and otherwise empty.
- The former startup options are still supported.
+ This addresses GitHub issue #9702 and APM-522.
-* Added Enterprise Graph feature to enterprise version of ArangoDB.
- The enterprise graph is another graph sharding model that we introduced,
- it is less strict, and therefore easier to start with, then SmartGraphs,
- as it does not require a smartGraphAttribute, and allows free choice of
- vertex _key values. But still maintains performance gains as compared to
- general-graphs. For more details please check documentation.
+* Fixed BTS-418: Suboptimal index range calculation with redundant conditions.
-* APM-135: Added multithreading to assigning non-unique indexes to documents,
- in foreground or background mode. The number of index creation threads
- is hardcoded to 2 for now. Improvements for higher parallelism are expected
- for future versions.
+* Added new per-operation option `refillIndexCaches` to write operations,
+ namely:
-* Issue 15592: Permit `MERGE_RECURSIVE()` to be called with a single argument.
+ - AQL INSERT/UPDATE/REPLACE/REMOVE modification operations
+ - single-document insert, update, replace and remove operations
+ - multi-document insert, update, replace and remove operations
-* Fixed issue 16337: arangoimport with `--headers-file` and `--merge-attributes`
- merges column names instead of row values on the first line of a CSV file.
+ If the option is set to `true` every currently running transaction will keep
+ track of which in-memory index cache entries were invalidated by the
+ transaction, and will try to (re-)fill them later.
+ Currently edge indexes and velocypack-based indexes (persistent, hash,
+ skiplist index) are supported. For velocypack-based indexes, the refilling
+ will only happen if the index was set up with an in-memory cache (i.e. the
+ `cacheEnabled` flag was set during index creation).
- Additionally, floating-point numbers are now merged using their standard
- string representation instead of with a fixed precision of 6 decimal places.
+ Example usages:
+ - `db..insert({ _from: ..., _to: ..., ... },
+ { refillIndexCaches: true });`
+ - `db..update(key, { _from: ..., _to: ..., ... },
+ { refillIndexCaches: true });`
+ - `db..replace(key, { _from: ..., _to: ..., ... },
+ { refillIndexCaches: true });`
+ - `db..remove(key, { refillIndexCaches: true });`
+ - `INSERT { ... } INTO OPTIONS { refillIndexCaches: true }`
+ - `UPDATE { ... } WITH { ... } INTO OPTIONS
+ { refillIndexCaches: true }`
+ - `REPLACE { ... } WITH { ... } INTO OPTIONS
+ { refillIndexCaches: true }`
+ - `REMOVE { ... } IN OPTIONS { refillIndexCaches: true }`
-* Now supporting projections on traversals. In AQL Traversal statements like
- FOR v,e,p IN 1..3 OUTBOUND @start GRAPH @graph RETURN v.name
- we will now detect attribute accesses on the data, in above example "v.name"
- and use it to optimize data-loading, e.g. we will only extract the "name" attribute.
- This optimization will help if you have large document sizes, but only access small
- parts of the documents. By default we will only project up to 5 attributes on each
- vertex, and edge. This limit can be modified by adding OPTIONS {maxProjections: 42}.
- To identify if your query is using projections the explain output will now contain a
- hint like `/* vertex (projections: `name`) */`
- For now only attribute accesses are detected, functions like `KEEP` will not be projected.
+ The refilling of the in-memory caches for indexes is performed by a background
+ thread, so that the foreground write operation shouldn't be slowed down a lot.
+ The background thread may however cause additional I/O for looking up the data
+ in RocksDB and for repopulating the caches.
-* Updated arangosync to v2.11.0-preview-1.
+ The background refilling is done in a best-effort way and is not guaranteed to
+ always succeed, e.g. if there is no memory available for the cache subsystem,
+ or when an in-memory cache table is currently in a migration phase
+ (grow/shrink operation).
-* Change default `format_version` for RocksDB .sst files from 3 to 5.
+ There is a new startup option `--rocksdb.auto-refill-index-caches-on-modify`
+ for DB-Servers and single servers, which currently defaults to `false`. If it
+ is set to `true`, the cache refilling will be turned on automatically for all
+ insert/update/replace/remove operations, so that it doesn't need to be
+ specified on the per-operation/per-query level.
-* Added support for creating autoincrement keys on cluster mode, but only for
- single sharded collections.
+ The new startup option `--rocksdb.auto-refill-index-caches-queue-capacity` can
+ be used to limit the number of index cache entries that the background thread
+ will queue. This is a safeguard to keep the memory usage at bay in case the
+ background thread is slower than concurrent threads that perform ingestions.
-* Add support for LZ4 and LZ4HC compression support for RocksDB.
-
-* Allow parallel access to the shards of smart edge collections in AQL via
- parallel GatherNodes.
+ There are also new startup options to control whether or not the in-memory
+ caches should automatically be seeded upon server restart.
+ The option `--rocksdb.auto-fill-index-caches-on-startup` for DB-Servers and
+ single servers enables this functionality. It currently defaults to `false`.
+ If it is set to `true`, the in-memory caches of all eligible indexes will be
+ automatically pre-seeded after the server startup. Note that this may cause
+ additional CPU and I/O load.
+ The option `--rocksdb.max-concurrent-index-fill-tasks` is available to limit
+ the impact of the automatic index filling at startup. It controls how many
+ full index filling operations can execute concurrently. The lower this number
+ is, the lower the impact of cache filling, but the longer it will take.
+ The default value for this option depends on the number of available cores,
+ and is at least `1`. A value of `0` cannot be used.
+ This option is only relevant if `--rocksdb.auto-fill-index-caches-on-startup`
+ is set to `true`.
-* Update RocksDB internal table checksum type to xxHash64.
+ The PR also adds the following metrics:
+ - `rocksdb_cache_auto_refill_loaded_total`: Total number of queued items for
+ in-memory index caches refilling. It will always report a value of zero on
+ coordinators.
+ - `rocksdb_cache_auto_refill_dropped_total`: Total number of dropped items for
+ in-memory index caches refilling (because number of queued items would
+ exceed the value of `--rocksdb.auto-refill-index-caches-queue-capacity`).
+ It will always report a value of zero on coordinators.
+ - `rocksdb_cache_full_index_refills_total`: Total number of in-memory index
+ caches refill operations for entire indexes. The counter gets increased for
+ every index automatically loaded (because startup option
+ `--rocksdb.auto-fill-index-caches-on-startup` is set to `true`) or when full
+ indexes are loaded into memory manually.
+ In cluster deployments the counter will be increased once per eligible index
+ per shard. It will always report a value of zero on coordinators.
-* Updated arangosync to v2.10.0.
+* BTS-128: Fixed http request not working when content-type is velocypack.
-* Added several startup option to configure parallelism for individual Pregel
- jobs:
+* Deleted customizable Pregel (AIR) and Greenspun library.
- - `--pregel.min-parallelism`: minimum parallelism usable in Pregel jobs.
- - `--pregel.max-parallelism`: maximum parallelism usable in Pregel jobs.
- - `--pregel.parallelism`: default parallelism to use in Pregel jobs.
+* Add support for terabyte units (t, tb, T, TB, tib, TiB, TIB) in startup
+ options.
- These parallelism options can be used by administrators to set concurrency
- defaults and bounds for Pregel jobs. Each individual Pregel job can set
- its own parallelism value using the job's `parallelism` option, but the
- job's parallelism value will be clamped to the bounds defined by
- `--pregel.min-parallelism` and `--pregel.max-parallelism`. If a job does
- not set its `parallelism` value, it will default to the parallelism value
- configured via `--pregel.parallelism`.
+* Make the deprecated `--server.disable-authentication-unix-sockets` and
+ `--server.disable-authentication` startup options obsolete. They were
+ deprecated in v3.0 and mapped to `--server.authentication` and
+ `--server.authentication-unix-sockets`, which made them do the opposite of
+ what their names suggest.
-* Added startup options to configure the usage of memory-mapped files for
- Pregel temporary data:
+* Log startup warnings for any experimental, deprecated, obsolete or renamed
+ options at startup of arangod or any of the client tools.
- - `--pregel.memory-mapped-files`: if set to `true`, Pregel jobs will by
- default store their temporary data in disk-backed memory-mapped files.
- If set to `false`, the temporary data of Pregel jobs will be buffered in
- RAM. The default value is `true`, meaning that memory-mapped files will
- be used. The option can be overridden for each Pregel job by setting the
- `useMemoryMaps` option of the job.
+* Added option to exclude system collection from rebalance shards plan.
- - `--pregel.memory-mapped-files-location-type`: location for memory-mapped
- files written by Pregel. This option is only meaningful if memory-mapped
- files are actually used. The option can have one of the following values:
- - `temp-directory`: store memory-mapped files in the temporary directory,
- as configured via `--temp.path`. If `--temp.path` is not set, the
- system's temporary directory will be used.
- - `database-directory`: store memory-mapped files in a separate directory
- underneath the database directory.
- - `custom`: use a custom directory location for memory-mapped files. The
- exact location must be set via the configuration parameter
- `--pregel.memory-mapped-files-custom-path`.
+* Improve performance and memory usage of IN list lookups for hash, skiplist and
+ persistent indexes.
- The default value for this option is `temp-directory`.
+* Improve memory usage tracking for IN list lookups and other RocksDB-based
+ lookups.
- - `--pregel.memory-mapped-files-custom-path`: custom directory location for
- Pregel's memory-mapped files. This setting can only be used if the option
- `--pregel.memory-mapped-files-location-type` is set to `custom`.
+* Remove inactive query plan cache code (was only a stub and never enabled
+ before).
- The default location for Pregel's memory-mapped files is the temporary
- directory (`temp-directory`), which may not provide enough capacity for
- larger Pregel jobs.
- It may be more sensible to configure a custom directory for memory-mapped
- files and provide the necessary disk space there (`custom`). Such custom
- directory can be mounted on ephemeral storage, as the files are only needed
- temporarily.
- There is also the option to use a subdirectory of the database directory
- as the storage location for the memory-mapped files (`database-directory`).
- The database directory often provides a lot of disk space capacity, but
- when it is used for both the regular database data and Pregel's memory-mapped
- files, it has to provide enough capacity to store both.
+* Fixed BTS-441: Honor read only mode with disabled authentication
-* Pregel status now reports whether memory mapped files are used in a job.
+* Obsolete startup option `--database.force-sync-properties`. This option was
+ useful with the MMFiles storage engine, but didn't have any useful effect when
+ used with the RocksDB engine.
-* Fixed issue BTS-875.
+* BTS-483: Added restriction for usage of query cache for streaming and JS
+ transactions when they are not read-only.
-* Updated arangosync to v2.10.0-preview-1.
+* Remove map and map.gz files from repository and add them to gitignore.
+ These files are only used for debugging and therefore should not be included
+ in any release. This also reduces the size of release packages.
-* Enterprise only: Restricted behavior of Hybrid Disjoint Smart Graphs. Within
- a single traversal or path query we now restrict that you can only switch
- between Smart and Satellite sharding once, all queries where more than one
- switch is (in theory) possible will be rejected. e.g:
- ```
- FOR v IN 2 OUTBOUND @start smartToSatEdges, satToSmartEdges
- ```
- will be rejected (we can go smart -> sat -> smart, so two switches)
- ```
- FOR v1 IN 1 OUTBOUND @start smartToSatEdges
- FOR v2 IN 1 OUTBOUND v1 satToSmartEdges
- ```
- will still be allowed, as each statement only switches once.
- We have decided to take this restrictions as especially for ShortestPath
- queries the results are not well-defined. If you have a use-case where
- this restriction hits you, please contact us.
+* Improved help texts for the collection type and satellite collection options
+ in the web UI.
-* Change default value of `--rocksdb.block-cache-shard-bits` to an automatic
- default value that allows data blocks of at least 128MiB to be stored in each
- cache shard if the block cache's strict capacity limit is used. The strict
- capacity limit for the block cache is enabled by default in 3.10, but can be
- turned off by setting the option `--rocksdb.enforce-block-cache-size-limit`
- to `false`. Also log a startup warning if the resulting cache shard size
- would be smaller than is potentially safe when the strict capacity limit is
- set.
- Enforcing the block cache's capacity limit has the consequence that data
- reads by RocksDB must fit into the block cache or the read operation will
- fail with an "Incomplete" error.
+* Deprecate the startup option `--agency.pool-size`. This option was never
+ properly supported for any values other than the value of `--agency.size`.
+ Now any value set for `--agency.pool-size` other than the value set for
+ `--agency.size` will now produce a fatal error on startup.
-* The API `/_admin/status` now returns a progress attribute that shows the
- server's current state (starting, stopping, etc.), with details about which
- feature is currently started, stopped etc. During recovery, the current WAL
- recovery sequence number is also reported in a sub-attribute of the
- `progress` attribute. Clients can query this attribute to track the
- progress of the WAL recovery.
- The additional progress attribute returned by `/_admin/status` is most
- useful when using the `--server.early-connections true` setting. With that
- setting, the server will respond to incoming requests to a limited set of
- APIs already during server startup. When the setting is not used, the REST
- interface will be opened relatively late during the startup sequence, so
- that the progress attribute will likely not be very useful anymore.
+* BTS-1082: Updating properties of a satellite collection breaks
+ replicationFactor.
-* Optionally start up HTTP interface of servers earlier, so that ping probes
- from tools can already be responded to when the server is not fully started.
- By default, the HTTP interface is opened at the same point during the startup
- sequence as before, but it can optionally be opened earlier by setting the
- new startup option `--server.early-connections` to `true`. This will
- open the HTTP interface early in the startup, so that the server can respond
- to a limited set of REST APIs even during recovery. This can be useful
- because the recovery procedure can take time proportional to the amount of
- data to recover.
- When the `--server.early-connections` option is set to `true`, the
- server will respond to requests to the following APIs during the startup
- already:
- - `/_api/version`
- - `/_admin/version`
- - `/_admin/status`
- All other APIs will be responded to with an HTTP response code 503, so that
- callers can see that the server is not fully ready.
- If authentication is used, then only JWT authentication can be used during
- the early startup phase. Incoming requests relying on other authentication
- mechanisms that require access to the database data will also be responded to
- with HTTP 503 errors, even if correct credentials are used.
+* BTS-209: Fixed requests to `_admin/execute` treating every payload as plain
+ text when they're in JSON or velocypack format, but will only treat the
+ payload as velocypack if specified in the header's `content-type`.
-* Fix behavior when accessing a view instead of a collection by name in a REST
- document operation. Now return a proper error.
+* Fixed issue #17394: Unnecessary document-lookup instead of Index-Only query.
+ This change improves projection handling so that more projections can be
+ served from indexes.
-* Upgraded bundled version of RocksDB to 7.2.
+* Change default output format of arangoexport from `json` to `jsonl`.
-* Fix documentation of collection's `cacheEnabled` property default.
+* BTS-941: The HTTP API now delivers the correct list of the collection's shards
+ in case a collection from an EnterpriseGraph, SmartGraph, Disjoint
+ EnterpriseGraph, Disjoint SmartGraph or SatelliteGraph is being used.
-* Added `[?]` array operator to AQL, which works as follows:
- - `nonArray[?]`: returns `false`
- - `nonArray[? FILTER CURRENT ...]`: returns `false`
- - `array[?]`: returns `false` if array is empty, `true` otherwise
- - `array[? FILTER CURRENT ...]`: returns `false` if no array member
- satisfies the filter condition, returns `true` if at least one member
- satisfies it.
+* BTS-465: Added tests for RandomGenerator and warning that other options for
+ creating random values that are not Mersenne are deprecated.
-* Fixed GitHub issue #16279: assertion failure/crash in AQL query optimizer when
- permuting adjacent FOR loops that depended on each other.
+* BTS-977: Added an error message for when an unauthorized user makes an HTTP
+ GET request to current database from a database name that exists which the
+ user can't access and from a database name that doesn't exist, so both
+ requests have the same error message (`_db//_api/database/current`).
-* No good reason to fatal error in agency state, when local database entries
- lack local timestamp (legacy). In that situation, we will record epoch begin
- as local time.
+* Added new AQL function SHA256(value).
-* Very verbose warning from failing to parse GEO JSON in search. Has lead to
- billions of log lines on deployed services.
+* Added index cleanup in Supervision. If an index was not created successfully
+ and the coordinator which initiated the creation was rebooted or is dead, then
+ the agency Supervision will drop the index again. If it was created
+ successfully, the agency Supervision will finalize it.
-* Put hotbackup requests on the HIGH priority queue to make hotbackups work
- under high load (BTS-865).
+* BTS-742: Added restriction for, when in smart graph, not accepting satellites
+ in invalid format when storing a graph (like `{satellites: null}`).
-* Removed separate FlushThread (for views syncing) and merged it with the
- RocksDBBackgroundThread.
+* BTS-477: added integration tests for covering log parameters.
-* Fix some issues with WAL recovery for views. Previously it was possible that
- changes to a view/link were already recovered and persisted, but that the
- lower bound WAL tick was not moved forward. This could lead to already fully
- recovered views/links being recovered again on the next restart.
+* Moved the handling of escaping control and unicode chars in the log to the
+ Logger instead of LogAppenderFile.
-* Updated OpenSSL to 1.1.1o and OpenLDAP to 2.6.2.
+* Added authenticate header to the HTTP response when status code is 401 for
+ HTTP/2.
-* Upgrade jemalloc to version 5.3.0.
+* Best quality spam pushed down to DEBUG.
-* Fixed BTS-860. Changed ArangoSearch index recovery procedure to
- remove necessity to always fully recreate index if IndexCreation marker
- encountered.
+* Fixed log with json format not respecting the value of parameter
+ `--log.shorten-filenames`.
-* Updated arangosync to v2.9.1.
+* Added "intermediateCommits" statistics return value for AQL queries, to relay
+ the number of intermediate commits back that a write query performed.
-* Added option `--enable-revision-trees` to arangorestore, which will add the
- attributes `syncByRevision` and `usesRevisionsAsDocumentIds` to the collection
- structure if they are missing. As a consequence, these collections created by
- arangorestore will be able to use revision trees and a faster getting-in-sync
- procedure after a restart. The option defaults to `true`, meaning the
- attributes will be added if they are missing. If the option is set to `false`,
- the attributes will not be added to the collection structure.
- If the attributes are already present in the dump data, they will not be
- modified by arangorestore irrespective of the setting of this option.
+* Added message on the UI view of Logs when the user has restricted access,
+ either because cannot access `_system`, or because is currently in another
+ database.
-* Set "useRevisionsAsDocumentIds" to true when restoring collection data
- via arangorestore in case it is not set in the collection structure input
- data. This allows using revision trees for restored collections.
+* Fix for the Pregel's HITS algorithm using a fixed value instead of the passed
+ "threshold" parameter. The same applied to the new HITSKleinberg.
-* Fix: Highly unlikely race in cluster maintenance. For every shard only
- one operation (change attribute, change leadership) should be performed
- at the same time. However if two changes are detected in the same heartbeat
- it could lead to both operations to be executed in parallel. In most cases
- this is also fine, but could lead to races on the same attribute, however
- the race will be sorted out in the next heartbeat interval.
+* Now the Pregel API returns `{... algorithm: "pagerank", ...}` instead of
+ `{... algorithm: "PageRank", ...}` when the Page Rank algorithm is run (in
+ accordance to the documentation).
-* Added new optimization rule "arangosearch-constrained-sort" to perform
- sorting & limiting inside ArangoSearch View enumeration node in case of
- using just scoring for sort.
+* Added integration tests for `--log.escape-control-chars` and
+ `--log.escape-unicode-chars`.
-* Improve log output for WAL recovery, by providing more information and
- making the wording more clear.
+* A new Pregel algorithm: the version of Hypertext-Induced Topic Search (HITS)
+ as described in the original paper.
-* Updated lz4 to version 1.9.3.
+* BTS-428: Added function DATE_ISOWEEKYEAR that retrieves the number of the week
+ counting from when the year started in ISO calendar and also the year it's in.
-* Added option `--custom-query-file` to arangoexport, so that a custom query
- string can also be read from an input file.
+* Added handling of requests with Transfer-Encoding chunked, which is not
+ implemented, so returns code HTTP code 501.
-* Added startup option `--cluster.shard-synchronization-attempt-timeout` to
- limit the amount of time to spend in shard synchronization attempts. The
- default timeout value is 20 minutes.
- Running into the timeout will not lead to a synchronization failure, but
- will continue the synchronization shortly after. Setting a timeout can
- help to split the synchronization of large shards into smaller chunks and
- release snapshots and archived WAL files on the leader earlier.
- This change also introduces a new metric `arangodb_sync_timeouts_total`
- that counts the number of timed-out shard synchronization attempts.
+* Disallowed index creation that covers fields in which the field's name starts
+ or ends with `:` for single server or cluster when the instance is a
+ coordinator or single server. This validation only happens for index creation,
+ so already existing indexes that might use such field names will remain as
+ they are.
-* Updated arangosync to v2.9.1-preview-1.
-* Make sure that newly created TTL indexes do not use index estimates, which
- wouldn't be used for TTL indexes anyway.
+v3.10.6 (2023-04-27)
+--------------------
-* Fix: for the Windows build, the new Snappy version, which was introduced in
- 3.9, generated code that contained BMI2 instructions which where introduced
- with the Intel Haswell architecture. However, our target architecture for 3.9
- is actually Sandy Bridge, which predates Haswell. Running the build on these
- older CPUs thus resulted in illegal instruction exceptions.
+* Fixed BTS-1292: Added automatic cleanup of dangling ArangoSearch links.
-* FE-46: UI improvement on the view UI pages as well as adding tooltips to
- options where necessary. The affected pages are mostly the Info and
- Consolidation Policy pages.
+* Automatically repair revision trees after several failed shard synchronization
+ attempts. This can help to get permanently out-of-sync shards back into sync.
-* FE-44: Moved the Info page to before JSON, making the settings page the
- default page in the view web UI.
+ The functionality can be turned off by setting the startup option
+ `--replication.auto-repair-revision-trees` to `false` on DB-Servers.
-* Refactor internal code paths responsible for `_key` generation. For
- collections with only a single shard, we can now always let the leader
- DB server generate the keys locally. For collections with multiple shards,
- the coordinators are now always responsible for key generation.
- Previously the responsibility was mixed and depended on the type of
- operation executed (document insert API vs. AQL query, single operation
- vs. batch).
+* SEARCH-466 Fix leaking into individual link definition inherited properties
+ from view.
-* Make web UI show the following information for collections:
- * key generator type
- * whether or not the document and primary index cache is enabled
- * if cache is enabled, show cache usage and allocation size in figures
- The `cacheEnabled` property of collections is now also changeable via the
- UI for existing collections.
+* Fix race condition in invalidation of token cache on coordinators.
-* FE-45: Added tooltips with helpful information to the options on the View UI
- settings page.
+* Adjusted timeouts for cluster internal commit and abort requests to withstand
+ network delays better. This fixes some problems when the networking
+ infrastructure delays requests.
-* FE-43: Simplify the workflow on the web view UI (Links page): allow for users
- to view a single link or field with their properties at a time.
+* Added sent time accounting and some metrics to fuerte and the NetworkFeature.
+ This can detect delays in the network infrastructure.
-* Improve validation for variables used in the `KEEP` part of AQL COLLECT
- operations. Previously referring to a variable that was introduced by the
- COLLECT itself from out of the KEEP part triggered an internal error. The
- case is detected properly now and handled with a descriptive error message.
+* Added startup option `--server.ensure-whitespace-metrics-format`, which
+ controls whether additional whitespace is used in the metrics output format.
+ If set to `true`, then whitespace is emitted between the exported metric value
+ and the preceeding token (metric name or labels).
+ Using whitespace may be required to make the metrics output compatible with
+ some processing tools, although Prometheus itself doesn't need it.
-* Updated arangosync to v2.9.0.
+ The option defaults to `true`, which adds additional whitespace by default.
-* Updated arangosync to v2.9.0-preview-6.
+* SEARCH-461: Added option "--arangosearch.columns-cache-only-leader". Used only
+ on EE DBServers. Default is false.
+ If set to true only leader shards have ArangoSearch caches enabled - this will
+ reduce RAM usage. In case of failover happens - in background caches are
+ populated for the new leader. Some queries that run at during a failover may
+ still run without caches.
-* Fixed BTS-811 in which there was an incongruence between data being
- checksummed and data being written to `.sst` files, because checksumming
- should have been made after the encryption of the data, not before it.
+* BTS-1148: Fix a race when aborting/finishing a currently active query on a
+ DB-Server. This race could cause the query to remain in the server's query
+ registry longer than intended, potentially holding some locks. Such queries
+ were garbage collected eventually, but this could take a while, depending on
+ the specified TTL (10min per default).
+ This has now been fixed so that aborted/finished queries are cleaned up in a
+ timely manner.
-* Increase internal transaction lock timeout on followers during cluster
- write operations. Although writes to the same keys on followers should be
- serialized by the key locks held on the leader, it is still possible that
- the global transaction lock striped mutex is a source of contention and
- that concurrent write operations time out while waiting to acquire this
- global mutex. The lock timeout on followers is now significantly increased
- to make this very unlikely.
+* MDS-1098: In 3.10 we have introduced an optimization on Traversals to pull
+ post-filter conditions into the traversal-statements, like the following:
-* Added startup option `--rocksdb.transaction-lock-stripes` to configure the
- number of lock stripes to be used by RocksDB transactions. The option
- defaults to the number of available cores, but is bumped to a value of
- 16 if the number of cores is lower.
+ FOR v,e,p IN 10 OUTBOUND @start GRAPH "myGraph"
+ FILTER v.isRelevant == true
+ RETURN p
-* Make all requests which are needed for shard resync at least medium
- priority to improve getting-in-sync under load.
+ If the comparison side contains a variable and the same variable is used as
+ the start vertex e.g. like this:
-* Added command line option to arangobench to disable implicit collection
- creation. This allows one to run tests against a manually created and
- configured collection.
+ FOR candidate IN ["vertices/1", "vertices/2"]
+ FOR v,e,p IN 1 OUTBOUND candidate GRAPH "myGraph"
+ FILTER e.MostLikedNeighbor == candidate
+ RETURN v
-* Added an IO heartbeat which checks that the underlying volume is writable
- with reasonable performance. The test is done every 15 seconds and can
- explicitly switched off. New metrics to give visibility if the test fails:
- - `arangodb_ioheartbeat_delays_total`: total number of delayed io heartbeats
- - `arangodb_ioheartbeat_duration`: histogram of execution times [us]
- - `arangodb_ioheartbeat_failures_total`: total number of failures
- These metrics are only populated, if `--database.io-heartbeat` is set to
- `true` (which is currently the default).
+ There is a chance that we prematurely discarded this variable (candidate in
+ the example) if it is not used later. This has lead to incorrect results.
-* Fix lock order in Agent::advanceCommitIndex for State's _logLock and
- Agent's _waitForCV.
+* Fixed statistics values for writes executed and writes ignored when a query is
+ executed using the rule `optimize-cluster-single-document-operations`.
+ It was always increasing the amount of writes executed, even if the operation
+ wasn't successful, and also never increasing the amount of writes ignored when
+ needed.
-* Fix deadlocked shard synchronizations when planned shard leader has
- not yet taken over leadership.
+* Updated arangosync to v2.16.1.
-* Resync follower shard after a follower restart immediately and not lazily.
+* Fix potential thread starvation in in-memory edge cache.
-* Unify the creation of normal and SmartGraph collections.
+* SEARCH-300: Fixed a rare case when arangosearch data folders might be left on
+ disk after database is dropped.
- This unifies the code paths for creating collections for normal
- collections and SmartGraph collections, so that the functionality is
- centralized in one place. SmartGraph-specific code for validation and
- collection creation has been moved to enterprise as well.
+* Fixed SEARCH-459 Fixed reporting ArangoSearch inverted index properties from
+ ensureIndex request.
-* Make followers respond to synchronous replication requests with less data.
- Specifically, followers will not build detailed results with _id, _key and
- _rev for the inserted/modified/removed documents, which would be ignored
- by the leader anyway.
+* Changed path where test scripts locate configuration files from `etc/relative`
+ to `etc/testing`. These paths contain `arangosh.conf`, which we were reading
+ from `etc/relative` in test environment.
-* Updated arangosync to v2.9.0-preview-5.
+* Fix issues with deferred database creation:
-* Auto-regenerate exit code and error code files in non-maintainer mode, too.
+ When a database has made it into the Plan part of the agency with some
+ settings, e.g. `replicationFactor` that would violate the current settings for
+ databases (e.g. `--cluster.min-replication-factor` and
+ `--cluster.max-replication-factor`), and then a new DB server is added, it
+ will try to create the database locally with the settings from the Plan.
+ As these settings however violate the min/max replication factor values, the
+ database is not created on the new DB server and an error is written into
+ Current instead.
+ This can cause follow-up errors and the PlanSyncer complaining about missing
+ databases for analyzers etc.
-* Only show slowest optimizer rules in explain output for optimizer rules
- that took a considerable amount of time (>= 0.0002 seconds). Previously
- the slowest 5 optimizer rules were shown, regardless of how long they
- took to execute and even if they executed sufficiently fast.
+* Fixed ES-1508: (EE only) when deleting edges in a SmartGraph via
+ DELETE /_api/document/{collection} using _key or _id values as document
+ selectors, the INBOUND and OUTBOUND entries of the SmartEdges could diverge.
+ Using a document like {_key: "xxxx"} as a selector was always correct. Now
+ _key and _id variants are supported as intended.
+
+* Fixed single-to-single replication that used HTTP authentication to
+ authenticate requests on the leader. This could be broken if the collections
+ on the leader were created with 3.8 or later, and thus used the Merkle tree
+ protocol to exchange different document revisions.
+ When using HTTP authentication, the prefetching code for document revisions
+ did not pass on the authentication credentials, so the leader could reject
+ requests with HTTP 401 or HTTP 403, and replication failed.
+ Replication in the cluster and replication using JWT authentication were not
+ affected.
-* Updated arangosync to v2.9.0-preview-4.
+* Added the following metrics for WAL file tracking:
+ - `rocksdb_live_wal_files_size`: cumulated size of alive WAL files (not
+ archived)
+ - `rocksdb_archived_wal_files_size`: cumulated size of archive WAL files
-* Make the StatisticsFeature start after the NetworkFeature, so that any
- network request issues by cluster statistics gathering can rely on the
- networking functionality being available until shutdown.
+* By default, start pruning of archived WAL files 60 seconds after server
+ start. Previously, pruning of WAL files started 180 seconds after server
+ startup.
-* BugFix (enterprise-only): (BTS-787) In a hybrid disjoint SmartGraph, having
- more than one relation, if you add a new vertex collection to a Smart ->
- Smart edge relation this vertex collection was rejected with "has to be
- satellite" error.
- Now the collection is created as a SmartVertexCollection as desired.
+* Set default threshold value for automatic column flushing to 20 live WAL
+ files (previously: 10 files), and retry flushing every 30 minutes (previous
+ interval: every 60 minutes).
-* Rework internal queues for connection and request statistics. The previous
- implementation allocated a lot of memory at program start for initializing
- fixed-sized queues for the statistics objects.
- The problem with using fixed-sized queues is that they will mostly require
- too much memory for almost all cases, but still do not protect from the
- queues becoming full and not being able to hold more items.
- Now we go with a variable length queue instead, which only requires a
- small amount of memory initially, and allocate more memory only when needed.
- Freelists for reusing statistics items are still present to avoid lots of
- reallocations.
- The change also reduces the size of the executable's .bss section by more
- than 10MB.
+* BTS-1272: Fixed metric `arangodb_connection_pool_connections_current`. In some
+ cases where multiple connections to a server are canceled the metric could
+ miss-count, as for now it only counted individually closed connections.
+ The wrong counted situations are: other server crashes, restore of a
+ HotBackup, rotation of JWT secret.
-* Updated ArangoDB Starter to 0.15.4.
+* Added support to log response bodies as well as HTTP headers (incoming
+ and outgoing), when the requests log topic is set to TRACE.
-* Always open a new, working connection before HTTP request-fuzzing during
- testing. Otherwise the fuzzing results are not 100% comparable from run to
- run.
-* Remove error handling fetching license information to improve user
- experience. To display the license information in the UI is only
- informational. It disturbs the user experience to know something went wrong
- and doesn't provide any important information for the user.
+v3.10.5 (2023-03-16)
+--------------------
-* Updated bundled version of zlib library to 1.2.12.
+* Stabilized resilience tests. The assumption that an AQL query can run
+ without error directly after a leader has been stopped, is wrong.
-* Improve parallelism in arangorestore in case new data format is used.
+* Auto-flush RocksDB WAL files and in-memory column family data if the number of
+ live WAL files exceeds a certain threshold. This is to make sure that WAL
+ files are moved to the archive when there are a lot of live WAL files present
+ (e.g. after a restart; in this case RocksDB does not count any previously
+ existing WAL files when calculating the size of WAL files and comparing it
+ `max_total_wal_size`.
+ The feature can be configured via the following startup options:
+ - `--rocksdb.auto-flush-min-live-wal-files`: minimum number of live WAL files
+ that triggers an auto-flush. Defaults to `10`.
+ - `--rocksdb.auto-flush-check-interval`: interval (in seconds) in which
+ auto-flushes are executed. Defaults to `3600`.
+ Note that an auto-flush is only executed if the number of live WAL files
+ exceeds the configured threshold and the last auto-flush is longer ago than
+ the configured auto-flush check interval. That way too frequent auto-flushes
+ can be avoided.
-* Updated OpenSSL to 1.1.1n and OpenLDAP to 2.6.1.
+* Fix potential memory under-accounting on cache shutdown for in-memory caches
+ for edge indexes.
-* Updated arangosync to v2.9.0-preview-2.
+* Added the following metrics for WAL file tracking:
+ - `rocksdb_live_wal_files`: number of alive WAL files (not archived)
+ - `rocksdb_wal_released_tick_flush`: lower bound sequence number from which
+ onwards WAL files will be kept (i.e. not deleted from the archive) because
+ of external flushing needs. Candidates for these are arangosearch links and
+ background index creation.
+ - `rocksdb_wal_released_tick_replication`: lower bound sequence number from
+ which onwards WAL files will be kept because they may be needed by the
+ replication.
+ - `arangodb_flush_subscriptions`: number of currently active flush
+ subscriptions.
-* Added AQL hint "useCache" for FOR loops, to explicitly disable the usage of
- in-memory caches for lookups.
+* Updated internal JavaScript dependencies:
-* When profiling an AQL query via `db._profileQuery(...)` or via the web UI,
- the query profile will now contain the number of index entries read from
- in-memory caches (usable for edge indexes and indexes of type "persistent",
- "hash" or "skiplist") plus the number of cache misses.
+ - @xmldom/xmldom: 0.8.0 -> 0.8.6
+ - accepts: 1.3.7 -> 1.3.8
+ - ajv: 8.10.0 -> 8.12.0
+ - ansi_up: 5.0.1 -> 5.1.0
+ - content-disposition: 0.5.3 -> 0.5.4
+ - content-type: 1.0.4 -> 1.0.5
+ - error-stack-parser: 2.0.6 -> 2.1.4
+ - mime-types: 2.1.31 -> 2.1.35
+ - semver: 7.3.5 -> 7.3.8
-* The caching subsystem now provides the following 3 additional metrics:
- - `rocksdb_cache_active_tables`: total number of active hash tables used for
- caching index values. There should be 1 table per shard per index for which
- the in-memory cache is enabled. The number also includes temporary tables
- that are built when migrating existing tables to larger equivalents.
- - `rocksdb_cache_unused_memory`: total amount of memory used for inactive
- hash tables used for caching index values. Some inactive tables can be kept
- around after use, so they can be recycled quickly. The overall amount of
- inactive tables is limited, so not much memory will be used here.
- - `rocksdb_cache_unused_tables`: total number of inactive hash tables used
- for caching index values. Some inactive tables are kept around after use,
- so they can be recycled quickly. The overall amount of inactive tables is
- limited, so not much memory will be used here.
+* Updated transitive JS dependency hoek to @hapi/hoek@8.5.1 to resolve
+ CVE-2020-36604 in joi.
-* Added optional in-memory caching for index entries when doing point lookups
- in indexes of type "persistent", "hash" or "skiplist".
- The caching is turned off by default, but can be enabled when creating an
- index of type "persistent", "hash" or "skiplist" by setting the
- "cacheEnabled" flag for the index upon index creation.
- The cache will be initially empty, but will be populated lazily upon querying
- data from the index using equality lookups on all index attributes.
- As the cache is hash-based and unsorted, it cannot be used for full or
- partial range scans, for sorting, or for lookups that do not include all
- index attributes.
- The maximum size of index entries that can be stored is currently 4 MB, i.e.
- the cumulated size of all index entries for any index lookup value must be
- less than 4 MB. This limitation is there to avoid storing the index entries
- of "super nodes" in the cache.
+* Updated JS dependency minimatch to 3.1.2 to resolve CVE-2022-3517.
- The maximum combined memory usage of all in-memory caches can be controlled
- via the existing `--cache.size` startup option, which now not only contains
- the maximum memory usage for edge caches, but also for index caches added
- here.
+* Updated JS dependency qs to 6.11.0 to resolve CVE-2022-24999.
-* Updated arangosync to v2.9.0-preview-1.
+* Updated arangosync to v2.15.0.
-* Updated ArangoDB Starter to 0.15.4-preview-1.
+* Allow usage of projections and covering indexes in more cases.
+ Previously, projections were not used if there were complex filter conditions
+ on the index attribute(s) that contained the `[*]` expansion operator with
+ inline FILTERs or RETURNs, e.g. `FILTER doc.addrs[* FILTER CURRENT.country ==
+ 'US'].zip`.
-* Added API method to query a list of available optimizer rules from the
- arangod process.
+* PRESUPP-546: make AQL optimizer rule `simplify-conditions` correctly report
+ that it was triggered. Previously that rule never reported that it was
+ triggered although even though it actually was.
-* Added new server option: --icu-language. Used instead of --default-language
- to set pure ICU collator.
+* Added startup option `--rocksdb.auto-refill-index-caches-on-followers` to
+ control whether automatic refilling of in-memory caches should happen on
+ followers or just leaders. The default value is `true`, i.e. refilling happens
+ on followers too.
- For example, in Sweden language("sv") lowercase letters should precede
- uppercase ones. You can achieve it using following options when server starts
+* Added new geo_s2 ArangoSearch analyzer (Enterprise Only).
- --icu-language sv
+* GORDO-1554: Fixes invalid document insertion with invalid user-specified keys
+ (e.g. numeric values) into EnterpriseGraph related vertices.
-* Added new AQL function `KEEP_RECURSIVE` to recursively keep attributes from
- objects/documents, as a counterpart to `UNSET_RECURSIVE`.
+* Added metric `arangodb_replication_clients` showing the number of currently
+ active/connected replication clients for a server.
-* No longer put document writes from replication into the audit log by
- default. Same with low priority authentication like internal UI requests
- to .html files for the UI. This solves a performance problem for
- shards getting in sync with audit log switched on.
+* BTS-1249: Add startup option `--foxx.enable`.
+ This startup option determines whether access to user-defined Foxx services is
+ possible for the instance. It defaults to `true`.
+ If the option is set to `false`, access to Foxx services is forbidden and will
+ be responded with an HTTP 403 Forbidden error. Access to ArangoDB's built-in
+ web interface, which is also a Foxx service, is still possible even with the
+ option set to `false`.
+ When setting the option to `false`, access to the management APIs for Foxx
+ services will also be disabled. This is the same as manually setting the
+ option `--foxx.api false`.
-* Added an HTTP fuzzer to arangosh that can send fuzzed requests to the server.
- The amount of requests sent is provided by one of the parameters of the
- new arangosh function `fuzzRequests()`.
- The optional parameters that can be supplied are:
- `fuzzRequests(, , )`
- The parameter numIterations is the amount of times the fuzzer is going to
- perform its random actions on the header, and seed is for the seed that is
- used for randomizing.
- The fuzzer is available only when building with failure points.
+* Fixed a bug in the API used by `arangorestore`: On restore, a new _rev value
+ is generated for each imported document to avoid clashes with previously
+ present data. This must be created on the shard leader rather than the
+ coordinator. The bug happened, when two coordinators were creating the same
+ _rev value for two different documents concurrently.
-* Fixed ES-1078: The REST API endpoint for handling `/_api/user/${user}/config`
- did not work properly. The supplied data by sending a PUT request has not been
- stored to the correct location. The Web UI uses this endpoint to store its
- graph properties for storing the visualization properties. As this endpoint
- did not work as expected, the graph visualization properties did not get
- persisted as well. This is now resolved.
+* ES-1428: make the maximum number of V8 contexts depend on the maximum number
+ of server threads, if `--javascript.v8-contexts` is not set explicitly.
+ Previously the maximum number of V8 contexts was hard-coded to 16 when the
+ option `--javascript.v8-contexts` option was not set explicitly.
+ Now the maximum number defaults to 7/8 of the value of the startup option
+ `--server.maximal-threads`, regardless of if it is explicitly configured or
+ the default value is used. Only 7/8 are used to leave some headroom for other
+ important maintenance tasks.
+ A server with default configuration should now not block waiting for V8
+ contexts to become available, but it may use more memory for the additional V8
+ contexts if there are many concurrent requests that invoke JavaScript actions
+ (e.g. requests using the web UI or Foxx).
-* Speed up initial sync (in case there is already data present) by prefetching
- data from leader.
+* Improve memory usage of in-memory edge index cache if most of the edges in an
+ index refer to a single or mostly the same collection.
+ Previously the full edge ids, consisting of the the referred-to collection
+ name and the referred-to key of the edge were stored in full. Now, the first
+ edge inserted into an edge index' in-memory cache will determine the
+ collection name for which all corresponding edges can be prefix-compressed.
+ For example, when inserting an edge pointing to `the-collection/abc` into the
+ empty cache, the collection name `the-collection` will be noted for that cache
+ as a prefix. The edge will be stored in memory as only `/abc`. Further edges
+ that are inserted into the cache and that point to the same collection will
+ also be stored prefix-compressed.
+ The prefix compression is transparent and does not require configuration or
+ setup. Compression is done separately for each cache, i.e. a separate prefix
+ can be used for each individual edge index, and separately for the `_from` and
+ `_to` parts. Lookups from the in-memory edge cache will not return compressed
+ values but the full-length edge ids. The compressed values will also be used
+ in memory only and will not be persisted on disk.
-* Escape each key in attribute paths of nested attributes in the query explain
- output for SEARCH queries that utilize the primary sort order.
-* Turn off sending "Server" HTTP response header on DB servers if not
- explicitly requested. This saves a tiny bit of traffic on each response
- from a DB server.
+v3.10.4 (2023-02-19)
+--------------------
-* Fix null pointer access when using WINDOW operation with a COUNT/LENGTH
- aggregate function without any arguments.
+* Updated ArangoDB Starter to 0.15.7.
-* Enable range deletions in the WAL for truncate operations in the cluster,
- too. This can speed up truncate operations for large collections/shards.
+* Updated OpenSSL to 1.1.1t and OpenLDAP to 2.6.4.
-* Set max recursion depth for VelocyPack, JSON and JavaScript arrays and
- objects to about 200.
+* BTS-1184: Fixed index hint with `forceIndexHint` set to true not being used on
+ query when geo index was present, because it would override the choice of the
+ index hint with optimizations related to it.
-* Updated snowball to version 2.2.0
+* Fixed EE: Concurrent batch insert/update CRUD operations into
+ SmartEdgeCollections on conflicting edge keys could get the smart edge caching
+ out-of-sync, which would yield different results for OUTBOUND/INBOUND search
+ over edges. This is now fixed, however there is now a slightly higher chance
+ to get a CONFLICT response back on those queries.
-* Fixed: Deadlock created by high load and a follower trying to get into
- sync.
- In the final synchronization phase the follower needs to temporarily block
- writes on the leader so we have a reliable point in time where we can prove
- that the data is consistent.
- If the leader at this point is flooded with write requests to that shard
- there is a chance that all worker threads only pick up those writes, which
- cannot make any progress until the lock is cleared. However, the process to
- clear the lock was on the same priority as those writes.
- Hence this lock clear operations could not bypass the writes. Now we moved
- every follow up request after the lock to HIGH lanes, which will allow them
- to bypass all non-internal operations.
+* Return peak memory usage and execution time as part of query explain result.
+ This helps finding queries that use a lot of memory to build the execution
+ plan.
-* arangosh now uses the same header the UI uses to gain higher priority on
- initial connection.
- This will increase the chance for an arangosh to connect to a server under
- very high load.
+* Made all transactions used by the gharial API on coordinators and a few others
+ marked "globally managed". This fixes an issue where transaction conflicts
+ could lead to a silent out of sync situation between a leader shard and its
+ followers.
-* Removed internal JavaScript dependencies "mocha" and "chalk". We recommend
- always bundling your own copy of third-party modules, even ones listed as
- public.
+* BTS-1219: Fix cost estimation for geo index usage and for collection
+ enumeration with included filtering. This fixes a regression from 3.9 where a
+ geo index was no longer used because of an optimizer rule, which gained new
+ powers, and wrong cost estimations for execution plans.
-* Bugfix: DC2DC Disjoint-SmartGraphs and Hybrid-SmartGraphs are now
- replicated to the follower data-center keeping their sharding intact.
+* Allow usage of document projections and traversal projections in slightly more
+ cases, specifically when the document's or traversal's output variables were
+ used in subqueries. Previously the usage of the document or traversal output
+ variables in subqueries could lead to projections being disabled.
-* Optimize further RocksDB throttle to allow for no change on any
- given calculation cycle.
+* Improved optimization of functions to be covered by Traversals. Now more
+ functions should be optimized into the traversal, and some that are not valid
+ should not be optimized anymore. Fixes #16589.
-* Added a log message that appears upon starting arangod that shows the number
- of the parent process id and, if able to acknowledge it, the name of the
- parent process.
+* BTS-1193: Fix for schema update. When removing a field and then inserting a
+ new field into the schema, previously, both old and new schema would be
+ merged, meaning it would maintain the old field and add the new one.
-* Avoid multiple parallel SIGHUP requests to be handled at the same time.
- Now collapse multiple incoming SIGHUP requests into a single one, which can
- be executed race-free.
+* Fixed issue #18053: Computed Values become null when Schema is modified.
-* Parallelize applying of revision tree changes with fetching next revision
- tree range in incremental collection replication for collections created
- with ArangoDB 3.8 and higher.
+* Set the cache_oblivious option of jemalloc to `false` by default. This helps
+ to save 4096 bytes of RAM for every allocation which is at least 16384 bytes
+ large. This is particularly beneficial for the RocksDB buffer cache.
-* Support JSON schema objects for documenting Foxx endpoints.
+* Added startup option `--javascript.user-defined-functions`.
+ This option controls whether JavaScript user-defined functions (UDFs) can be
+ used in AQL queries. The option defaults to `true`. The option can be set to
+ `false` to disallow using JavaScript UDFs from inside AQL queries.
+ In that case, a parse error will be thrown when trying to run a query that
+ invokes a UDF.
-* Internal refactoring of IndexIterator APIs.
+* Allowing enabling/disabling supervision maintenance mode also via followers in
+ active failover mode. Previously the supervision maintenance mode could only
+ be enabled/disabled by making a call to the active failover leader.
-* Sorted out various geo problems:
+* BTS-266: When starting up a cluster without `--cluster.force-one-shard`,
+ creating a database and then restarting the cluster with the startup option
+ `--cluster.force-one-shard` set to true, when the formerly created database
+ has more than one shard, but the flag is set to true, this could lead to
+ arangosearch's analyzers to use optimizations that should not be used if not
+ in a single shard mode. For this not to happen, the verification of the
+ parameter being true as a condition to run optimizations was removed.
- - No more special detection of "latitude-longitude rectangles" is done,
- since this is in conflict with the definition of polygon boundaries to
- be geodesics.
- - Linear rings in polygons are no longer automatically "normalized", so
- now it is possible to have polygons which cover more than half of the
- Earth.
- - Rules for polygons and multigons have been clarified and are now properly
- enforced for the `GEO_POLYGON` and `GEO_MULTIPOLYGON` AQL functions.
- - Introduced `legacyPolygon` flag for geo indexes to continue to support
- the old behavior in existing geo indexes.
- - Added lots of additional tests, thereby fixing several bugs in geo index
- lookup.
- - Use a faster algorithm for pure `GEO_CONTAINS` and `GEO_INTERSECTS`
- queries.
+* Activate RDB_CoveringIterator and use it for some geo index queries.
+ This speeds up and simplifies geo queries with geo index which do not use
+ GEO_DISTANCE.
-* Fix counts and file size sum in hotbackup META files. Do no longer count
- directories.
-* Fixed an assertion failure which could occur when there was an error in
- the HTTP header, so that the message body was not actually read.
+v3.10.3 (2023-01-23)
+--------------------
-* Fixed a crash which could occur when there was an error in the HTTP
- header parsing).
+* Log information about follower state/apply progress in supervision job that
+ organizes failover in active failover mode.
-* Added back the optimization for empty document update operations (i.e. update
- requests in which no attributes were specified to be updated), handling them
- in a special way without performing any writes, also excluding such special
- cases of operation from replication to followers.
+* Updated arangosync to v2.14.0.
-* Upgraded JavaScript "i" module from 0.3.6 to 0.3.7.
+* Updated ArangoDB Starter to 0.15.6.
-* Bug-Fix: Resolve BTS-673/Issue #15107, a spliced subquery could return
- too few results
+* Fix bug in hotbackup download/restore to make sure no data is mixed up between
+ servers. This fixes a bug introduced in 3.10. Note that previous 3.10 versions
+ may not correctly restore a hotbackup which was uploaded from one cluster and
+ downloaded into another.
-* Changed Foxx service generator output to use static variable names
+* ES-1396: under some rare circumstances it was possible that background index
+ creation missed some documents in case the documents were inserted after
+ background index creation started and the corresponding WAL files with the
+ inserts were already removed before background indexing caught up.
-* As we are now in constant stall regime, stall onset and warnings are
- demoted to DEBUG.
+* Web UI [FE-48]: Additional fix to the previously introduced license
+ information usability improvement. In case the server is being started with
+ the additional parameter `--server.harden`, the previous fix did not handle
+ that specific edge case.
-* Allow early pruning (moving a FILTER condition into an IndexNode or
- EnumerateCollectionNode) in more cases than before. Previously, early
- pruning was only possible if the FILTER condition referred to exactly one
- variable, which had to be the FOR loop's own variable. Now, early
- pruning is possible with arbitrary variables that are accessible by the
- FOR loop.
+* BTS-413: Added more explanatory messages for when the user cannot see the
+ statistics for a node in the UI when in cluster mode.
-* Fixed a race detected with chaos tests, where a db server could have
- momentarily lost leadership, just when it was about to drop a
- follower to that shard.
+* Fix coordinator segfault in AQL queries in which the query is invoked from
+ within a JavaScript context (e.g. from Foxx or from the server's console mode)
+ **and** the query has multiple coordinator snippets of which except the
+ outermost one invokes a JavaScript function.
+ Instead of crashing, coordinators will now respond with the exception "no v8
+ context available to enter for current transaction context".
+ For AQL queries that called one of the AQL functions `CALL` or `APPLY` with a
+ fixed function name, e.g. `APPLY('CONCAT', ...)`, it is now also assumed
+ correctly that no JavaScript is needed, except if the fixed function name is
+ the name of a user-defined function.
+ This fixes an issue described in OASIS-24962.
-* In an attempt to make the performance of the RocksDB throttle much
- more consistent and predictable the default compaction slow down
- trigger is lowered to 128kB.
+* BTS-1192: fix a potential race during hot backup creation, which could result
+ in error messages such as `{backup} Source file engine_rocksdb/002516.sst does
+ not have a hash file.` during hot backup creation. However, despite the error
+ message being logged, the hot backup was still complete.
-* Fixed BTS-750: Fixed the issue restricted to cluster mode in which queries
- containing the keywords UPDATE or REPLACE together with the keyword WITH and
- the same key value would result in an error. For example:
- `UPDATE 'key1' WITH {_key: 'key1'} IN Collection`
- because the same key used to update was provided in the object to update the
- document with.
+* Prevent agency configuration confusion by an agent which comes back without
+ its data directory and thus without its UUID.
-* The multi-dimensional index type `zkd` now supports an optional index hint for
- tweaking performance by prefetching documents:
+* Change the request lane for replication catchup requests that leaders in
+ active failover receive from their followers from medium to high. This will
+ give catchup requests from followers highest priority, so that the leader will
+ preferrably execute them compared to regular requests.
- ```
- FOR app IN appointments OPTIONS { lookahead: 32 }
- FILTER @to <= app.to
- FILTER app.from <= @from
- RETURN app
- ```
+* Allow cluster database servers to start even when there are existing databases
+ that would violate the settings `--cluster.min-replication-factor` or
+ `--cluster.max-replication-factor`.
+ This allows upgrading from older versions in which the replication factor
+ validation for databases was not yet present.
- Specifying a lookahead value greater than zero makes the index fetch more documents
- that are no longer in the search box, before seeking to the next lookup position.
- Because the seek operation is computationally expensive, probing more documents
- before seeking may reduce the number of seeks, if matching documents are found.
- Please keep in mind that it might also affect performance negatively if documents
- are fetched unnecessarily.
+* Remove constant values for query variables from query plan serialization in
+ cases they were not needed. Previously, constant values of query variables
+ were always serialized for all occurrences of a variable in a query plan.
+ If the constant values were large, this contributed to higher serialization
+ and thus query setup times. Now the constant values are only serialized for
+ relevant parts of query execution plans.
-* Replaced internal JS dependency xmldom with @xmldom/xmldom.
+* Added startup option `--rocksdb.bloom-filter-bits-per-key` to configure the
+ average number of bits to use per key in a Bloom filter.
-* Enabled new internal graph refactored code for depth-first, breadth-first and
- weighted traversals by default.
+* Make the cache_oblivious option of jemalloc configurable from the environment.
+ This helps to save 4096 bytes of RAM for every allocation which is at least
+ 16384 bytes large. This is particularly beneficial for the RocksDB buffer
+ cache.
-* Replaced internal JS dependency ansi-html with ansi-html-community.
+* Improve performance of RocksDB's transaction lock manager by using different
+ container types for the locked keys maps.
+ This can improve performance of write-heavy operations that are not I/O-bound
+ by up to 10%.
-* Improved performance of inner joins with dynamic lookup conditions being
- injected from an outer loop, for indexes of type "persistent", "hash" and
- "skiplist". Performance improvements can be expected if the inner join is
- invoked a lot of times with many different values fed in by the outer loop.
- The performance improvements are due to some improved handling of index
- lookup conditions in the internals of the VelocyPack-based index.
-* Upgrade bundled version of jemalloc to 5.3rc (upstream commit a4e8122).
+v3.10.2 (2022-12-16)
+--------------------
-* Improve usefulness of `storedValues` together with late materialization.
+* Added experimental per-operation option `refillIndexCaches` to write
+ operations, namely:
-* Reintroduce shard synchronization cancellation check that was disabled
- before.
+ - AQL INSERT/UPDATE/REPLACE/REMOVE modification operations
+ - single-document insert, update, replace and remove operations
+ - multi-document insert, update, replace and remove operations
-* Bug-Fix: AQL WINDOW statement if applied within a subquery could accidentally
- skip over some subquery results. This did only show up if the subquery fills
- exactly one internal batch before it is completed, so it is rather unlikely.
+ If the option is set to `true` every currently running transaction will keep
+ track of which in-memory edge index cache entries were invalidated by the
+ transaction, and will try to (re-)fill them later.
+ Currently only edge indexes are supported. ArangoDB 3.11 will add support for
+ velocypack-based indexes (persistent, hash, sklipist index).
-* Fix potential access to dangling reference in cancellation of shard
- synchronization.
+ Example usages:
+ - `db..insert({ _from: ..., _to: ..., ... },
+ { refillIndexCaches: true });`
+ - `db..update(key, { _from: ..., _to: ..., ... },
+ { refillIndexCaches: true });`
+ - `db..replace(key, { _from: ..., _to: ..., ... },
+ { refillIndexCaches: true });`
+ - `db..remove(key, { refillIndexCaches: true });`
+ - `INSERT { ... } INTO OPTIONS { refillIndexCaches: true }`
+ - `UPDATE { ... } WITH { ... } INTO OPTIONS { refillIndexCaches:
+ true }`
+ - `REPLACE { ... } WITH { ... } INTO OPTIONS { refillIndexCaches:
+ true }`
+ - `REMOVE { ... } IN OPTIONS { refillIndexCaches: true }`
-* Limited module resolution in arangosh to the path from which arangosh is
- invoked.
+ The refilling of the in-memory caches for indexes is performed by a background
+ thread, so that the foreground write operation shouldn't be slowed down a lot.
+ The background thread may however cause additional I/O for looking up the data
+ in RocksDB and for repopulating the caches.
+
+ The background refilling is done in a best-effort way and is not guaranteed to
+ always succeed, e.g. if there is no memory available for the cache subsystem,
+ or when an in-memory cache table is currently in a migration phase(grow/shrink
+ operation).
+
+ There is a new experimental startup option
+ `--rocksdb.auto-refill-index-caches-on-modify` for DB-Servers and single
+ servers, which currently defaults to `false`. If it is set to `true`, the
+ cache refilling will be turned on automatically for all
+ insert/update/replace/remove operations, so that it doesn't need to be
+ specified on the per-operation/per-query level.
-* Fixed BTS-621 Fixed rare case of segfault in cluster during database recovery
- if DBServer is in upgrade mode in the same time.
+ The experimental option `--rocksdb.auto-refill-index-caches-queue-capacity`
+ can be used to limit the number of index cache entries that the background
+ thread will queue. This is a safeguard to keep the memory usage at bay in case
+ the background thread is slower than concurrent threads that perform
+ ingestions.
-* Changed default value of startup option
- `--rocksdb.cache-index-and-filter-blocks` from `false` to `true`.
- This makes RocksDB track all loaded index and filter blocks in the block
- cache, so they are accounted for in RocksDB's block cache. Also the default
- value for the startup option `--rocksdb.enforce-block-cache-size-limit`
- was flipped from `false` to `true` to make the RocksDB block cache not
- temporarily exceed the configured memory limit (`--rocksdb.block-cache-size`).
+ There are also new experimental startup options to control whether or not the
+ in-memory caches should automatically be seeded upon server restart.
+ The option `--rocksdb.auto-fill-index-caches-on-startup` for DB-Servers and
+ single servers enables this functionality. It currently defaults to `false`.
+ If it is set to `true`, the in-memory caches of all eligible indexes will be
+ automatically pre-seeded after the server startup. Note that this may cause
+ additional CPU and I/O load.
+ The option `--rocksdb.max-concurrent-index-fill-tasks` is available to limit
+ the impact of the automatic index filling at startup. It controls how many
+ full index filling operations can execute concurrently. The lower this number
+ is, the lower the impact of cache filling, but the longer it will take.
+ The default value for this option depends on the number of available cores,
+ and is at least `1`. A value of `0` cannot be used.
+ This option is only relevant if `--rocksdb.auto-fill-index-caches-on-startup`
+ is set to `true`.
- These default value changes will make RocksDB adhere much better to the
- configured memory limit. This is a trade-off between memory usage stability
- and performance. These change may have a small negative impact on performance
- because if the block cache is not large enough to hold the data plus the
- index and filter blocks, additional disk I/O may be performed compared to
- previous versions. In case there is still unused RAM capacity available, it
- may be sensible to increase the total size of the RocksDB block cache.
+ The PR also adds the following metrics:
+ - `rocksdb_cache_auto_refill_loaded_total`: Total number of queued items for
+ in-memory index caches refilling. It will always report a value of zero on
+ coordinators.
+ - `rocksdb_cache_auto_refill_dropped_total`: Total number of dropped items for
+ in-memory index caches refilling (because number of queued items would
+ exceed the value of `--rocksdb.auto-refill-index-caches-queue-capacity`).
+ It will always report a value of zero on coordinators.
+ - `rocksdb_cache_full_index_refills_total`: Total number of in-memory index
+ caches refill operations for entire indexes. The counter gets increased for
+ every index automatically loaded (because startup option
+ `--rocksdb.auto-fill-index-caches-on-startup` is set to `true`) or when full
+ indexes are loaded into memory manually.
+ In cluster deployments the counter will be increased once per eligible index
+ per shard. It will always report a value of zero on coordinators.
-* Add "filtered" column to AQL query profiling output.
- This column shows how many documents were filtered by the node and thus
- provides insights into if additional indexes could help.
+* Use intermediate commits in old shard synchronization protocol. This avoids
+ overly large RocksDB transactions when syncing large shards, which is a remedy
+ for OOM kills during restarts.
-* Reuse ExecutorExpressionContext inside IndexExecutor, so that repeated
- setup and teardown of expression contexts can be avoided.
+* Added a configuration option (for the agency):
+ --agency.supervision-failed-leader-adds-follower
+ with a default of `true` (behavior as before). If set to `false`, a
+ `FailedLeader` job does not automatically configure a new shard follower,
+ thereby preventing unnecessary network traffic, CPU and IO load for the case
+ that the server comes back quickly. If the server is permanently failed, an
+ `AddFollower` job will be created anyway eventually.
-* Reduce memory usage of inner joins if they were performed by the
- IndexExecutor with dynamic index lookup expressions that needed to be
- recomputed for input from the outer loop.
+* Max value of minhash_value was set to 2^53 - 1 (9007199254740991) to stay in
+ safe integer limits for javascript.
- For example, in the query
- ```
- FOR i IN 1..1000
- FOR doc IN collection
- FILTER doc.indexAttribute == i
- RETURN doc
- ```
- the inner loop will be executed 1000 times. The IndexExecutor in the inner
- loop needed to rebuild the index lookup attribute from the value of `i` 1000
- times as well. The memory for index lookup attributes came from the Ast's
- memory allocator and was not freed until the end of the query. In this
- query, it would mean that up to 1000 lookup values were held in memory. With
- larger inputs even more memory would be used.
+* SEARCH-433 Fix Inverted index fields presence checks for IN clauses.
- Now the memory for index lookup values is freed when a new lookup value is
- computed, i.e. only a single lookup value is held in memory.
- This drastically reduces peak memory usage for queries that use index lookups
- in inner loops and that get lots of different inputs from outer loops.
+* BTS-1141: Changed the default value of startup option
+ `--rocksdb.enforce-block-cache-size-limit` from `true` to `false`.
+ This change prevents RocksDB from going into read-only mode when an internal
+ operation tries to insert some value into the block cache, but can't do so
+ because the block cache's capacity limit is reached.
-* Adjust internal RocksDB setting `optimize_filters_for_hits` for Documents
- column family, setting it from `false` to `true`. This should reduce memory
- and disk space requirements for the bottom-most .sst files of the documents
- column family.
+* Don't log Boost ASIO warnings such as `asio IO error: 'stream truncated'` when
+ a peer closes an SSL/TLS connection without performing a proper connection
+ shutdown.
-* Upgrade VelocyPack library to latest version.
+* Disallow creating new databases with a `replicationFactor` value set to a
+ value lower than `--cluster.min-replication-factor` or higher than
+ `--cluster.max-replication-factor`. Previously the `replicationFactor`
+ settings for new databases were not bounds-checked, only for new collections.
-* Slightly improve the explain output of SingleRemoteOperationNodes.
+* Fixed Github issue #16451: In certain situations, a LIMIT inside a subquery
+ could erroneously reduce the number of results of the containing (sub)query.
-* Added more detail to the log messages that display the total time consumption
- and total amount of data parsed for the client tools arangodump and
- arangorestore.
+* Added a feature to the ResignLeadership job. By default, it will now
+ undo the leader changes automatically after the server is restarted,
+ unless the option `undoMoves` is set to `false`. This will help to
+ make rolling upgrades and restarts less troublesome, since the shard
+ leaderships will not get unbalanced.
-* Fixed minDepth handling of weighted traversals. When using a minDepth of 3,
- also paths of length 2 have been returned, on all locally executed variants
- (SingleServer, OneShard, DisjointSmart).
+* Add missing metrics for user traffic: Histograms:
+ `arangodb_client_user_connection_statistics_bytes_received`
+ `arangodb_client_user_connection_statistics_bytes_sent`
+ These numbers were so far only published via the statistics API.
+ This is needed for Oasis traffic accounting.
-* Fixed BTS-728 (no released version infected) fixed: for DisjointSmartGraphs,
- that include a satellite vertex collection, valid disjoint path were not always
- followed, if one of the satellites has a connection to two (or more) vertices
- that have different shardValues that by chance are routed to the same shard.
+* Added agency options
+ --agency.supervision-delay-add-follower
+ and
+ --agency.supervision-delay-failed-follower
+ to delay supervision actions for a configurable amount of seconds. This is
+ desirable in case a DBServer fails and comes back quickly, because it gives
+ the cluster a chance to get in sync and fully resilient without deploying
+ additional shard replicas and thus without causing any data imbalance.
-* Fixed PRESUPP-445: Foxx queues: Some jobs are never run in case of
- multiple Coordinators.
+* Enable "collect-in-cluster" optimizer rule for SmartGraph edge collections.
-* Fixed BTS-740 (no released version infected) fixed Smart<->Sat
- SmartEdgeCollections determining the shard in SingleRemoteModification nodes
- was incorrect. E.g. this could be triggered, by viewing the details of an edge
- in the UI. Only alpha/beta of 3.9.0 contained this bug.
+* Fixed SEARCH-408 Added "cache" columns feature for ArangoSearch.
-* Fixed BTS-729 (no released version affected): Some conditions in a Hybrid
- Smart Graph could led to wrong shard location calculation and therefore to
- wrong graph query results. Only alpha/beta of 3.9.0 contained this bug.
+* Fixed SEARCH-427 Fixed Inverted index usage of field with trackListPositions
+ enabled.
-* Upgraded bundled version of RocksDB to 6.29.
+* Fix HTTP/VST traffic accounting in internal statistics / metrics.
-* Fix creation of satellite graphs with a `numberOfShards` value != 1.
+* Add serverId parameter to _admin/log/level. Allows to forward the request to
+ other servers.
-* Disable optimizer rule "optimize-cluster-single-document-operations" when a
- collection is accessed in exclusive mode, because the optimized query would
- use a slightly different mode of locking then.
+* Delay a MoveShard operation for leader change, until the old leader has
+ actually assumed its leadership and until the new leader is actually in sync.
+ This fixes a bug which could block a shard under certain circumstances. This
+ fixes BTS-1110.
-* Upgraded boost to 1.78.0.
+* Updated arangosync to v2.13.0.
-* Fixed issue #15501: Regression when using "exclusive" query option?
- This fixes a regression in AQL query execution when exclusive locks are used
- for a query and the query also uses the DOCUMENT() AQL function. In this
- case, when there were more concurrent requests to the underlying collection
- than available scheduler threads for the low priority queue, the query
- start successfully acquired the exclusive lock, but could not get its
- follow-up requests through and starve while holding the exclusive lock.
+* Fixed issue #17291: Server crash on error in the PRUNE expression.
+ Traversal PRUNE expressions containing JavaScript user-defined functions
+ (UDFs) are now properly rejected in single server and cluster mode.
+ PRUNE expressions that use UDFs require a V8 context for execution, which is
+ not available on DB-servers in a cluster, and also isn't necessarily available
+ for regular queries on single servers (a V8 context is only available if a
+ query was executed inside Foxx or from inside a JS transaction, but not
+ otherwise).
-* Improve performance of `db._explain()` for very large query execution plans.
- Higher performance is achieved by not serializing some internal data
- structures when serializing execution plans. Serializing internal data is
- now opt-in and turned off if not needed. Apart from performance, there should
- be no end user visible changes.
+* Removed more assertions from cluster rebalance js test that obligated the
+ rebalance plan to always have moves, but there were cases in which all there
+ are none.
-* APM-24: Log messages can be displayed together with some other useful
- parameters, e.g., the name of the database, username, query id, and so on.
- There are some predefined parameters that we consider displaying, but,
- for the moment, only database, username and url are being displayed.
- The usage upon starting the server is, for example:
- `arangod --log.structured-param database --log.structured-param username`
+* Fix setting query memory limit to 0 for certain queries if a global memory
+ limit is set, but overriding the memory limit is allowed.
-* Fixed BTS-712: Collation analyzer now always produces valid UTF-8 sequence.
+* Do not query vertex data in K_PATHS queries if vertex data is not needed.
-* Add optional "storedValues" attribute for persistent indexes.
+* BTS-1075: AQL: RETURN DOCUMENT ("") inconsistent - single server vs cluster.
- This will add the specified extra fields to the index, so that they can be
- used for projections, but not for lookups or sorting.
+* Repair "load indexes into memory" function in the web UI.
- Example:
+* Fixed issue #17367: FILTER fails when using negation (!) on variable whose
+ name starts with "in". Add trailing context to NOT IN token.
- db..ensureIndex({
- type: "persistent",
- fields: ["value1"],
- storedValues: ["value2"]
- });
+* Fix disk space metrics rocksdb_free_disk_space and rocksdb_total_disk_space
+ on macOS. Previously, they seem to have reported wrong values.
- This will index `value1` in the traditional sense, so the index can be
- used for looking up by `value1` or for sorting by `value1`. The index also
- supports projections on `value1` as usual.
- In addition, due to `storedValues` being used here, the index can now also
- supply the values for the `value2` attribute for projections.
+* Show number of HTTP requests in cluster query profiles.
- This allows covering index scans in more cases and helps to avoid making
- extra document lookups in the documents column family. This can have a
- great positive effect on index scan performance if the number of scanned
- index entries is large.
+* Removed assertions from cluster rebalance js test that obligated the rebalance
+ plan to always have moves, but there were cases in which all there are none.
- The maximum number of attributes to store in `storedValues` is 32.
+* Improved the syntax highlighter for AQL queries in the web interface
+ with support for multi-line strings, multi-line identifiers in forward
+ and backticks, colorization of escape sequences, separate tokens for
+ pseudo-keywords and pseudo-variables, an updated regex for numbers, and
+ the addition of the AT LEAST and WITH COUNT INTO constructs.
-* Fixed a bug that hotbackup upload could miss files (fixes BTS-734).
-* Updated Enterprise license behavior: now there will be a 48 hour period for a
- new deployment and upgrade decision to provide the license. After that period,
- the read-only mode will be enforced.
- Upgrade procedure for a deployment without license will not take upgrade
- period into account for the read-only mode will enforcement.
+v3.10.1 (2022-11-04)
+--------------------
-* Added agency push-queue operation.
+* Added detailed explanations for some startup options.
+ They are only exposed via `--dump-options` under the `longDescription` key.
-* Fixed issue #15476: FATAL {crash} occurs on a simple query.
+* Updated OpenSSL to 1.1.1s.
-* Added `disableIndex` index hint for AQL FOR loops. This index hint disables
- the usage of any index (except geo or full text indexes) and will cause a
- full scan over the collection.
- In some circumstances a full scan can be more efficient than an index scan,
- for example if the index scan produces many matches (close to the number of
- documents in the collection) and the index is not fully covering the query.
- The `disableIndex` hint can be given per FOR loop in the query, e.g.:
+* Solve a case of excessive memory consumption in certain AQL queries with IN
+ filters with very long lists. Free sub-iterators as soon as they are
+ exhausted.
- FOR doc IN collection OPTIONS { disableIndex: true }
- RETURN doc.value
+* BTS-1070: Fixed query explain not dealing with an aggregate function without
+ arguments and the WINDOW node not being defined as an Ast node type name.
- The default value of `disableIndex` is `false`.
- In case a different index hint is provided, `disableIndex: true` takes
- precedence and produces a warning about the ambiguous settings.
+* Updating properties of a satellite collection breaks replicationFactor.
-* Added `maxProjections` hint for AQL FOR loops. This hint can be used to set
- the maximum number of document attributes that are taken into account for
- using projections.
+* Log the documents counts on leader and follower shards at the end of each
+ successful shard synchronization.
- For example, in the following query, no projections will be used because the
- number of potential projection attributes (`value1`, value2`, `value3`) is
- higher than the maximum number of projection attributes set via the
- `maxProjections` option:
+* Remove superfluous dash character from startup option name
+ `--temp.-intermediate-results-encryption-hardware-acceleration`.
- FOR doc IN collection OPTIONS { maxProjections: 2 }
- RETURN [ doc.value1, doc.value2, doc.value3 ]
+* FE-159: When creating a database in cluster mode, there are several parameters
+ required. However they are invisible (nothing shown) if I open DB settings
+ after creation. Those settings should be visible in readonly mode (grey out).
- The default value for `maxProjections` is `5`, which is compatible with the
- previous hard-coded default value.
+* Added startup option `--query.log-failed` to optionally log all failed AQL
+ queries to the server log. The option is turned off by default.
-* Fixed potentially undefined behavior for exit code of arangodump.
+* Added startup option `--query.log-memory-usage-threshold` to optionally log
+ all AQL queries that have a peak memory usage larger than the configured
+ value. The default value is 4GB.
-* Ignore signals such as SIGPIPE in client tools.
+* Added startup option `--query.max-artifact-log-length` to control the maximum
+ length of logged query strings and bind parameter values.
+ This allows truncating overly long query strings and bind parameter values to
+ a reasonable length. Previously the cutoff length was hard-coded.
-* Fixed ES-1025: fixed a performance regression caused by different hash
- calculation for primitive types like `uint64_t` and new the `Identifier`
- wrapper and derived types.
+* Improve cardinality estimate for AQL EnumerateCollectionNode in case a `SORT
+ RAND() LIMIT 1` is used. Here, the estimated number of items is at most 1.
-* APM-292: Added new AQL function SHARD_ID.
+* Improved shard distribution during collection creation.
-* BTS-707: rename "hardened" option value for `--server.support-info-api`
- startup option to "admin".
+* Changed the encoding of revision ids returned by the following REST APIs:
+ - GET /_api/collection//revision: the revision id was
+ previously returned as numeric value, and now it will be returned as
+ a string value with either numeric encoding or HLC-encoding inside.
+ - GET /_api/collection//checksum: the revision id in the
+ "revision" attribute was previously encoded as a numeric value in single
+ server, and as a string in cluster. This is now unified so that the
+ "revision" attribute always contains a string value with either numeric
+ encoding or HLC-encoding inside.
-* Extend timeouts for caching collection counts and index selectivity estimates
- on coordinators from 15s/90s to 180s. This change will cause less requests to
- be made from coordinators to DB servers to refresh info about collection
- counts and index estimates as part of AQL queries. The cached info is used in
- cluster query execution plans only and is not required to be fully up-to-date.
+* Fixed handling of empty URL parameters in HTTP request handling.
-* BTS-590: When creating a new database in Web UI the value of the write concern
- has to be smaller or equal to the replication factor. Otherwise an error
- message will be displayed and no database will be created.
+* Fixed diffing of completely non-overlapping revision trees, which could lead
+ to out-of-bounds reads at the right end of the first (smaller) tree.
-* Fixed BTS-693: Sort-limit rule now always ensures proper LIMIT node placement
- to avoid possible invalid results in the fullCount data.
+* Fixed aborting the server process if an exception was thrown in C++ code that
+ was invoked from the llhttp C code dispatcher. That dispatcher code couldn't
+ handle C++ exceptions properly.
-* Updated OpenSSL to 1.1.1m and OpenLDAP to 2.6.0.
+* Fixed BTS-1073: Fix encoding and decoding of revision ids in replication
+ incremental sync protocol. Previously, the encoding of revision ids could be
+ ambiguous under some circumstances, which could prevent shards from getting
+ into sync.
-* Improved performance in replication dump protocol by inserting arrays of
- documents instead of one document at a time and also not retrieving the
- document revision field when not needed.
+* Fixed BTS-852 (user's saved queries used to disappear after updating user
+ profile).
-* Changed various default values for RocksDB to tune operations for different
- typical scenarios like gp2 type volumes and gp3 type volumes and locally
- attached SSDs with RAID0:
- - `--rocksdb.level0-slowdown-trigger` has been decreased from 20 to 16
- - `--rocksdb.level0-stop-trigger` has been increased from 36 to 256
- - `--rocksdb.max-background-jobs` has been increased to the number of cores
- and is no longer limited to 8
- - `--rocksdb.enabled-pipelined-write` is now `true` by default instead of `false`
- - `--rocksdb.throttle-frequency` has been decreased from 60000ms down to
- 1000ms per iteration, which makes the RocksDB throttle react much quicker
- - `--rocksdb.pending-compactions-slowdown-trigger` has been decreased from
- 64 GB down to 8 GB
- - `--rocksdb.pending-compactions-stop-trigger` has been decreased from
- 256 GB down to 16 GB
- - `--rocksdb.throttle-slots` has been increased from 63 to 120
- - `--rocksdb.encryption-hardware-acceleration` is now `true` by default,
- which helps performance and should not create any problems, since we
- require sandybridge anyway.
- Combined, these changes help ArangoDB/RocksDB to react quicker to a backlog
- of background jobs and thus to prevent catastrophic stops which abort
- data ingestion or lead to cluster internal timeouts.
+* ES-1312: fix handling of reaching the WAL archive capacity limit.
-* Added startup options to adjust previously hard-coded parameters for RocksDB's
- behavior:
+* Log better diagnosis information in case multiple servers in a cluster are
+ configured to use the same endpoint.
- - `--rocksdb.pending-compactions-bytes-slowdown-trigger` controls RocksDB's
- setting `soft_pending_compaction_bytes_limit`, which controls how many
- pending compaction bytes RocksDB tolerates before it slows down writes.
- - `--rocksdb.pending-compactions-bytes-stop-trigger` controls RocksDB's
- setting `hard_pending_compaction_bytes_limit`, which controls how many
- pending compaction bytes RocksDB tolerates before it stops writes entirely.
- - `--rocksdb.throttle-lower-bound-bps`, which controls a lower bound for the
+* BTS-908: Fixed WebUI GraphViewer not being able to create a new edge relation
+ between two nodes in cases where only one edge definition has been defined
+ inside the graph definition.
+
+* MDS-1019: Make user search case-insensitive and allow search by name.
+
+* MDS-1016: When creating a new collection the fields "Number of Shards" and
+ "Replication factor" are greyed out now when the field "Distribute shards
+ like" is not empty.
+
+* Fixed BTS-850: Fixed the removal of already deleted orphan collections out of
+ a graph definition. The removal of an already deleted orphan collection out of
+ a graph definition failed and has been rejected in case the collection got
+ dropped already.
+
+* BTS-1008: Update react-autocomplete-input to fix single letter collection bug
+ when creating a link in the views in the WebUI.
+
+* BTS-1061: ARM was not recognized on Apple M1.
+
+* BTS-325: Changed the HTTP status code from `400` to `404` of the ArangoDB
+ error code `ERROR_GRAPH_REFERENCED_VERTEX_COLLECTION_NOT_USED` to handle this
+ error in accordance to our edge errors.
+
+* Adjust permissions for "search-alias" views.
+
+ Previously, "search-alias" views were visible to users that didn't have read
+ permissions on the underlying referenced collections. This was inconsistent,
+ because "arangosearch" views weren't shown to users that didn't have read
+ permissions on the underlying links.
+ Now, the behavior for "search-alias" views is the same as for "arangosearch"
+ views, i.e. "search-alias" views are not shown and are not accessible for
+ users that don't have at least read permissions on the underlying collections.
+
+* BTS-969: Added restriction for HTTP request `/cluster/rebalance`not to
+ consider servers that have failed status as a possible target for rebalancing
+ shards in its execution plan.
+
+* Fix an issue with replication of arangosearch view change entries in single
+ server replication and active failover. Previously, when changing the
+ properties of existing views, the changes were not properly picked up by
+ followers in these setups. Cluster setups were not affected.
+
+
+v3.10.0 (2022-09-29)
+--------------------
+
+* Convert v3.10.0-rc.1 into v3.10.0.
+
+
+v3.10.0-rc.1 (2022-09-25)
+-------------------------
+
+* Temporary fix for BTS-1006 (hides new view types).
+
+* Fixed SEARCH-399 Rule `restrict-to-single-shard` now works properly with
+ inverted index.
+
+* Fixed SEARCH-393 fixed analyzer setting for nested fields.
+
+* APM-517: Add tooltips with values of the displayed properties after clicking a
+ node or an edge in the graph viewer.
+
+* Updated arangosync to v2.12.0.
+
+* Improve upload and download speed of hotbackup by changing the way we use
+ rclone. Empty hash files are now uploaded or downloaded by pattern, and
+ all other files are done in batches without remote directory listing,
+ which allows rclone to parallelize and avoid a lot of unnecessary network
+ traffic. The format of hotbackups does not change at all.
+
+* Fixed SEARCH-392 Fixed field features propagation.
+
+* Fixed SEARCH-388 Fixed handling nested subfields.
+
+* Fixed SEARCH-379 Transaction is properly set to index during optimization.
+
+* Fixed issue BTS-1018: Improve logging of binary velocypack request data.
+
+* Updated ArangoDB Starter to 0.15.5.
+
+* Fixed BTS-1017: Fixed a graph search issue, where subqueries lead to incorrect
+ results when they have been pushed down fully onto a DBServer when they are in
+ a Hybrid Disjoint SmartGraph context and SatelliteCollections were part of it.
+
+* Fixed issue BTS-1023:
+ Added Linux-specific startup option `--use-splice-syscall` to control whether
+ the Linux-specific splice() syscall should be used for copying file contents.
+ While the syscall is generally available since Linux 2.6.x, it is also
+ required that the underlying filesystem supports the splice operation. This is
+ not true for some encrypted filesystems, on which splice() calls thus fail.
+ By setting the startup option `--use-splice-syscall` to `false`, a less
+ efficient, but more portable user-space file copying method will be used
+ instead, which should work on all filesystems.
+ The startup option is not available on other operating systems than Linux.
+
+* Fixed SEARCH-386 Fixed disjunction coverage in the inverted index with
+ non-default analyzers.
+
+* Fixed SEARCH-373 Fixed indexing same field as nested and as non-nested.
+
+
+v3.10.0-beta.1 (2022-09-14)
+---------------------------
+
+* Display progress during Arangosearch link and inverted index recovery.
+
+* Fixed SEARCH-374 and SEARCH-358 Fixed a rare case of wrong seek operation in
+ the sparse_bitset during query ArangoSearch execution.
+
+* Updated arangosync to v2.12.0-preview-12.
+
+* Updated ArangoDB Starter to 0.15.5-preview-3.
+
+* Fixed a rare occurring issue where paths inside a DisjointSmart traversal
+ containing only satellite relevant nodes were not returned properly (ES-1265).
+
+* Implement prefetch for revision trees, in case a batch is created with a
+ distinguished collection as for `SynchronizeShard`. This ensures that the
+ revision tree for the batch will be available when needed, even though the
+ revision tree for the collection might already have advanced beyond the
+ sequence number of the snapshot in the batch. This ensures that shards can get
+ in sync more reliably and more quickly.
+
+* Added startup option `--rocksdb.periodic-compaction-ttl`.
+ This option controls the TTL (in seconds) for periodic compaction of .sst
+ files in RocksDB, based on the .sst file age. The default value from RocksDB
+ is ~30 days. To avoid periodic auto-compaction, the option can be set to 0.
+
+* Fixed SEARCH-366: Fixed extracting sub-attributes using projections for the
+ `_id` attribute (e.g. `RETURN doc.sub._id`).
+
+* Fixed SEARCH-368: Fixed handling of array comparison for inverted index.
+
+* Fix get snapshot on single server for search-alias view when index was removed.
+
+* Fix waitForSync options for SEARCH in non-maintainer build.
+
+* Fix SEARCH-340 and SEARCH-341: add stats and metrics for inverted index.
+
+* Fixed SEARCH-334 Added searchField option for inverted index.
+
+* Fixed SEARCH-376 Assertion fixed for non-array value in array comparison.
+
+* Fixed BTS-926: UI showing the "create index" form to non-admin users.
+
+* Added startup option `--arangosearch.skip-recovery` to skip the recovery of
+ arangosearch view links or inverted indexes.
+ The startup option can be specified multiple times and is expected to either
+ contain the string `all` (will skip the recovery for all view links and
+ inverted indexes) or a collection name + link id/name pair (e.g.
+ `testCollection/123456`, where `123456` is a link/index id or an index name).
+ This new startup option is an emergency means to speed up lengthy recovery
+ procedures when there is a large WAL backlog to replay. The normal recovery
+ will still take place even with the option set, but recovery data for
+ links/indexes can be skipped. This can improve the recovery speed and reduce
+ memory usage during the recovery process.
+ All links or inverted indexes that are marked as to-be-skipped via the option,
+ but for which there is recovery data, will be marked as "out of sync" at the
+ end of the recovery.
+ The recovery procedure will also print a list of links/indexes which it has
+ marked as out-of-sync.
+ Additionally, if committing data for a link/index fails for whatever reason,
+ the link/index is also marked as being out-of-sync.
+
+ If an out-of-sync link or index can be used in queries depends on another new
+ startup option `--arangosearch.fail-queries-on-out-of-sync`. It defaults to
+ `false`, meaning that out-of-sync links/indexes can still be queried. If the
+ option is set to `true`, queries on such links/indexes will fail with error
+ "collection/view is out of sync" (error code 1481).
+
+ Links/indexes that are marked out-of-sync will keep the out-of-sync flag until
+ they are dropped. To get rid of an out-of-sync link/index, it is recommended
+ to manually drop and recreate it. As recreating a link/index may cause high
+ load, this is not done automatically but requires explicit user opt-in.
+
+ The number of out-of-sync links/indexes is also observable via a new metric
+ `arangodb_search_num_out_of_sync_links`.
+
+* Moved extensive log message down to DEBUG level.
+
+* Updated Views UI with all changes necessary for the 3.10.0 launch.
+
+* Fixed SEARCH-343 Fixed iterating all documents with nested fields.
+
+* Fixed SEARCH-322 Fixed executing empty nested condition.
+
+* Do not drop follower shard after too many failed shard synchronization
+ attempts.
+
+* Disable optimization rule to avoid crash (BTS-951).
+
+* Fixed SEARCH-369: add a compatibility check to the search-alias view for
+ indexes from the same collection.
+
+* Fixed BTS-959: no one call shutdown and delete datastore for the inverted
+ index in some C++ tests.
+
+* Fix SEARCH-350: Crash during consolidation.
+
+* When using `SHORTEST_PATH`, `K_SHORTEST_PATHS`, `ALL_SHORTEST_PATHS`, or
+ `K_PATHS` in an AQL Query and the query itself produced warnings during
+ execution, the type has been wrongly reported. It reported always with
+ `SHORTEST_PATH` and not the specific used one.
+
+* Fixed SEARCH-368 Fixed handling of array comparison for inverted index.
+
+* SEARCH-357: Added SUBSTRING_BYTES function.
+
+* Fixed SEARCH-347 Cycle variable reference in nested search query is properly
+ detected and rejected.
+
+* Fixed SEARCH-364 Fixed index fields match check for inverted index.
+
+* Web UI: Reduce size and initial render height of a modal (fixes BTS-940).
+
+* Fix comparison of JSON schemas on DB servers after there was a schema change
+ via a coordinator: the schema comparison previously did not take into account
+ that some ArangoDB versions store an internal `{"type":"json"}` attribute in
+ the schema, and some don't. Thus two identical schemas could compare
+ differently.
+ The correct schema version was always applied and used, and validation of
+ documents against the schema was also not affected. However, because two
+ schemas could compare unequal, this could have caused unnecessary repeated
+ work for background maintenance threads.
+
+* Removed transitive node dependencies is-wsl and media-typer.
+
+* Web UI: Now correctly handles the server error response when an error occurred
+ during the modification of a document or an edge (BTS-934).
+
+* Fixed SEARCH-328 fixed cookie key.
+
+* Make graph search case-insensitive (fixes BTS-882).
+
+* Fixed SEARCH-329 fixed removes with nested documents.
+
+* Fixed parsing on NOT operator in nested query filter.
+
+* Fixed SEARCH-346. If index creation is aborted due to existing same index new
+ index is properly dropped if it was already instantiated.
+
+* Add progress reporting to RocksDB WAL recovery, in case there are many WAL
+ files to recover.
+
+* Updated arangosync to v2.12.0-preview-9.
+
+
+v3.10.0-alpha.1 (2022-08-17)
+----------------------------
+
+* Updated arangosync to v2.12.0-preview-6.
+
+* Updated warning messages raised for non accepted query OPTIONS, distinguishing
+ between when the OPTIONS attribute is correct, but the value is in incorrect
+ format, and when the OPTIONS attribute itself is incorrect.
+
+* Since ArangoDB 3.8 there was a loophole for creating duplicate keys in the
+ same collection. The requirements were:
+ - cluster deployment
+ - needs at least two collections (source and target), and the target
+ collection must have more than one shard and must use a custom shard key.
+ - inserting documents into the target collection must have happened via an AQL
+ query like `FOR doc IN source INSERT doc INTO target`.
+ In this particular combination, the document keys (`_key` attribute) from the
+ source collection were used as-is for insertion into the target collection.
+ However, as the target collection is not sharded by `_key` and uses a custom
+ shard key, it is actually not allowed to specify user-defined values for
+ `_key`. That check was missing since 3.8 in this particular combination and
+ has now been added back. AQL queries attempting to insert documents into a
+ collection like this will now fail with the error "must not specify _key for
+ this collection", as they used to do before 3.8.
+
+* Updated ArangoDB Starter to 0.15.5-preview-1.
+
+* Improve error handling for passing wrong transaction ids / cursor ids / pregel
+ job ids to request forwarding. Also prevent the error "transaction id not
+ found" in cases when request forwarding was tried to a coordinator that was
+ recently restarted.
+
+* Fixed an invalid attribute access in AQL query optimization.
+ Without the fix, a query such as
+
+ LET data = {
+ "a": [
+ ...
+ ],
+ }
+ FOR d IN data["a"]
+ RETURN d
+
+ could fail with error "invalid operand to FOR loop, expecting Array".
+
+* Added support for AT LEAST quantifier for SEARCH.
+
+* Fixed BTS-335 Ranges parsing fixed for nested queries.
+
+* BTS-907: Fixed some rare SortNode related optimizer issues, when at least two
+ or more SortNodes appeared in the AQL execution plan.
+
+* Added new AQL function `VALUE` capable of accessing object attribute by a
+ specified path.
+
+* Fixed BTS-918 (incorrectly navigating back 1 level in history when a
+ modal-dialog element is present).
+
+* Added OFFSET_INFO function (Enterprise Edition only) to support search results
+ highlighting.
+
+* Updated Rclone to v1.59.0.
+
+* Fixed BTS-902 (clicking on the search icon in the analyzers filter input used
+ to take the user to the collections view).
+
+* Fixed BTS-852 (user's saved queries used to disappear after updating user
+ profile).
+
+* ArangoSearch nested search feature (Enterprise Edition): Added ability to
+ index and search nested documents with ArangoSearch views.
+
+* Updated OpenSSL to 1.1.1q and OpenLDAP to 2.6.3.
+
+* Fixed handling of illegal edges in Enterprise Graphs. Adding an edge to a
+ SmartGraph vertex collection through document API caused incorrect sharding of
+ the edge. Now this edge is rejected as invalid. (BTS-906)
+
+* Added CSP recommended headers to Aardvaark app for better security.
+
+* Added more specific process exit codes for arangod and all client tools, and
+ changed the executables' exit code for the following situations:
+
+ - an unknown startup option name is used: previously the exit code was 1. Now
+ the exit code when using an invalid option is 3 (symbolic exit code name
+ EXIT_INVALID_OPTION_NAME).
+ - an invalid value is used for a startup option (e.g. a number that is outside
+ the allowed range for the option's underlying value type, or a string value
+ is used for a numeric option): previously the exit code was 1. Now the exit
+ code for these case is 4 (symbolic exit code name
+ EXIT_INVALID_OPTION_VALUE).
+ - a config file is specified that does not exist: previously the exit code was
+ either 1 or 6 (symbolic exit code name EXIT_CONFIG_NOT_FOUND). Now the exit
+ code in this case is always 6 (EXIT_CONFIG_NOT_FOUND).
+ - a structurally invalid config file is used, e.g. the config file contains a
+ line that cannot be parsed: previously the exit code in this situation was
+ 1. Now it is always 6 (symbolic exit code name EXIT_CONFIG_NOT_FOUND).
+
+ Note that this change can affect any custom scripts that check for startup
+ failures using the specific exit code 1. These scripts should be adjusted so
+ that they check for a non-zero exit code. They can opt-in to more specific
+ error handling using the additional exit codes mentioned above, in order to
+ distinguish between different kinds of startup errors.
+
+* BTS-913: check for proper timezone setup of the system on startup.
+ This will then log errors that else would only occur in AQL-Functions at
+ runtime.
+
+* Added ALL_SHORTEST_PATHS functionality to find all shortest paths between two
+ given documents.
+
+* Fixed a potential deadlock in RocksDB compaction.
+ For details see https://github.com/facebook/rocksdb/pull/10355.
+
+* Changed rocksdb default compression type from snappy to lz4.
+
+* arangoimport now supports the option --remove-attribute on type JSON as well.
+ Before it was restricted to TSV and CSV only.
+
+* Fixed BTS-851: "Could not fetch the applier state of: undefined".
+
+* Removed internal JavaScript dependencies "expect.js", "media-typer" and
+ "underscore". We recommend always bundling your own copy of third-party
+ modules as all previously included third-party modules are now considered
+ deprecated and may be removed in future versions of ArangoD
+
+* APM-84: Added option to spill intermediate AQL query results from RAM to disk
+ when their size exceeds certain thresholds. Currently the only AQL operation
+ that can make use of this is the SortExecutor (AQL SORT operation without
+ using a LIMIT). Further AQL executor types will be supported in future
+ releases.
+
+ Spilling over query results from RAM to disk is off by default and currently
+ in an experimental stage. In order to opt-in to the feature, it is required to
+ set the following startup option `--temp.intermediate-results-path`.
+ The directory specified here must not be located underneath the instance's
+ database directory.
+ When this startup option is specified, ArangoDB assumes ownership of that
+ directory and will wipe its contents on startup and shutdown. The directory
+ can be placed on ephemeral storage, as the data stored inside it is there only
+ temporarily, while the instance is running. It does not need to be persisted
+ across instance restarts and does not need to be backed up.
+
+ When a directory is specified via the startup option, the following additional
+ configuration options can be used to control the threshold values for spilling
+ over data:
+
+ * `--temp.intermediate-results-capacity`: maximum on-disk size (in bytes) for
+ intermediate results. If set to 0, it means that the on-disk size is not
+ constrained. It can be set to a value other than 0 to restrict the size of
+ the temporary directory. Once the cumulated on-disk size of intermediate
+ results reaches the configured maximum capacity, the query will be aborted
+ with failure "disk capacity limit for intermediate results exceeded".
+ * `--temp.intermediate-results-spillover-threshold-num-rows`: number of result
+ rows from which on a spillover from RAM to disk will happen.
+ * `--temp.intermediate-results-spillover-threshold-memory-usage`: memory usage
+ (in bytes) after which a spillover from RAM to disk will happen.
+ * `--temp.intermediate-results-encryption`: whether or not the on-disk data
+ should be encrypted. This option is only available in the Enterprise
+ Edition.
+ * `--temp.-intermediate-results-encryption-hardware-acceleration`: whether or
+ not to use hardware acceleration for the on-disk encryption. This option is
+ only available in the Enterprise Edition.
+
+ Please note that the feature is currently still experimental and may slightly
+ change in future releases. As mentioned, the only Executor that can make use
+ of spilling data to disk is the SortExecutor (SORT without LIMIT).
+ Also note that the query results will still be built up entirely in RAM on
+ coordinators and single servers for non-streaming queries. In order to avoid
+ the buildup of the entire query result in RAM, a streaming query should be
+ used.
+
+* Enterprise only: Added `MINHASH`, `MINHASH_MATCH`, `MINHASH_ERROR`,
+ `MINHASH_COUNT` AQL functions.
+
+* Enterprise only: Added `minhash` analyzer.
+
+* BugFix in Pregel's status: When loading the graph into memory, Pregel's state
+ is now 'loading' instead of 'running'. When loading is finished, Pregel's
+ state changes to the 'running' state.
+
+* arangoimport now supports an additional option
+ "--overwrite-collection-prefix".
+ This option will only help while importing edge collections, and if it is used
+ together with "--to-collection-prefix" or "--from-collection-prefix". If there
+ are vertex collection prefixes in the file you want to import (e.g. you just
+ exported an edge collection from ArangoDB) you allow arangoimport to overwrite
+ those with the commandline prefixes. If the option is false (default value)
+ only _from and _to values without a prefix will be prefixed by the handed in
+ values.
+
+* Added startup option `--rocksdb.compaction-style` to configure the compaction
+ style which is used to pick the next file(s) to be compacted.
+
+* BugFix in Pregel's Label Propagation: the union of three undirected cliques of
+ size at least three connected by an undirected triangle now returns three
+ communities (each clique is a community) instead of two.
+
+* Pregel now reports correct and ongoing runtimes for loading, running, and
+ storing as well as runtimes for the separate global supersteps.
+
+* Fixed parsing of K_SHORTEST_PATHS queries to not allow ranges anymore.
+
+* Add log.time-format utc-datestring-micros to make debugging of concurrency
+ bugs easier.
+
+* Renamed KShortestPathsNode to EnumeratePathsNote; this is visible in explain
+ outputs for AQL queries.
+
+* Pregel SSSP now supports `resultField` as well as `_resultField` as parameter
+ name to specify the field into which results are stored. The name
+ `_resultField` will be deprecated in future.
+
+* Update Windows CI compiler to Visual Studio 2022.
+
+* Web UI: Fixes a GraphViewer issue related to display issues with node and edge
+ labels. Boolean node or edge values could not be used as label values
+ (ES-1084).
+
+* Made the SortExecutor receive its input incrementally, instead of receiving a
+ whole matrix containing all input at once.
+
+* Optimization for index post-filtering (early pruning): in case an index is
+ used for lookups, and the index covers the IndexNode's post-filter condition,
+ then loading the full document from the storage engine is now deferred until
+ the filter condition is evaluated and it is established that the document
+ matches the filter condition.
+
+* Added a fully functional UI for Views that lets users view, modify mutable
+ properties and delete views from the web UI.
+
+* Fix thread ids and thread names in log output for threads that are not started
+ directly by ArangoDB code, but indirectly via library code.
+ Previously, the ids of these threads were always reported as "1", and the
+ thread name was "main". Now return proper thread ids and names.
+
+* Changed default Linux CI compiler to gcc-11.
+
+* Add "AT LEAST" quantifier for array filters in AQL:
+
+ `RETURN [1,2,3][? AT LEAST (3) FILTER CURRENT > 42]`
+ `RETURN [1,2,3] AT LEAST (2) IN [1,2,3,4,5]`
+
+* Changed default macOS CI compiler to LLVM clang-14.
+
+* Added an automatic cluster rebalance api. Use `GET _admin/cluster/rebalance`
+ to receive an analysis of how imbalanced the cluster is. Calling it with
+ `POST _admin/cluster/rebalance` computes a plan of move shard operations to
+ rebalance the cluster. Options are passed via the request body. After
+ reviewing the plan, one can use `POST _admin/cluster/rebalance/execute` to put
+ that plan into action.
+
+* Introduce reading from followers in clusters. This works by offering an
+ additional HTTP header "x-arango-allow-dirty-read" for certain read-only APIs.
+ This header has already been used for active failover deployments to allow
+ reading from followers. Using this header leads to the fact that coordinators
+ are allowed to read from follower shards instead only from leader shards. This
+ can help to spread the read load better across the cluster. Obviously, using
+ this header can result in "dirty reads", which are read results returning
+ stale data or even not-yet-officially committed data. Use at your own risk if
+ performance is more important than correctness or if you know that data does
+ not change.
+ The responses which can contain dirty reads will have set the HTTP header
+ "x-arango-potential-dirty-read" set to "true".
+ There are the following new metrics showing the use of this feature:
+ - `arangodb_dirty_read_transactions_total`
+ - `arangodb_potentially_dirty_document_reads_total`
+ - `arangodb_dirty_read_queries_total`
+
+* Changed HTTP response code for error number 1521 from 500 to 400.
+
+ Error 1521 (query collection lock failed) is nowadays only emitted by
+ traversals, when a collection is accessed during the traversal that has not
+ been specified in the WITH statement of the query.
+ Thus returning HTTP 500 is not a good idea, as it is clearly a user error that
+ triggered the problem.
+
+* Renamed the `--frontend.*` startup options to `--web-interface.*`:
+
+ - `--frontend.proxy-request.check` -> `--web-interface.proxy-request.check`
+ - `--frontend.trusted-proxy` -> `--web-interface.trusted-proxy`
+ - `--frontend.version-check` -> `--web-interface.version-check`
+
+ The former startup options are still supported.
+
+* Added Enterprise Graph feature to enterprise version of ArangoDB.
+ The enterprise graph is another graph sharding model that we introduced, it is
+ less strict, and therefore easier to start with, then SmartGraphs, as it does
+ not require a smartGraphAttribute, and allows free choice of vertex _key
+ values. But still maintains performance gains as compared to general-graphs.
+ For more details please check documentation.
+
+* APM-135: Added multithreading to assigning non-unique indexes to documents, in
+ foreground or background mode. The number of index creation threads is
+ hardcoded to 2 for now. Improvements for higher parallelism are expected for
+ future versions.
+
+* Issue 15592: Permit `MERGE_RECURSIVE()` to be called with a single argument.
+
+* Fixed issue 16337: arangoimport with `--headers-file` and `--merge-attributes`
+ merges column names instead of row values on the first line of a CSV file.
+
+ Additionally, floating-point numbers are now merged using their standard
+ string representation instead of with a fixed precision of 6 decimal places.
+
+* Now supporting projections on traversals. In AQL Traversal statements like
+ FOR v,e,p IN 1..3 OUTBOUND @start GRAPH @graph RETURN v.name
+ we will now detect attribute accesses on the data, in above example "v.name"
+ and use it to optimize data-loading, e.g. we will only extract the "name"
+ attribute.
+ This optimization will help if you have large document sizes, but only access
+ small parts of the documents. By default we will only project up to 5
+ attributes on each vertex, and edge. This limit can be modified by adding
+ OPTIONS {maxProjections: 42}.
+ To identify if your query is using projections the explain output will now
+ contain a hint like `/* vertex (projections: `name`) */`
+ For now only attribute accesses are detected, functions like `KEEP` will not
+ be projected.
+
+* Change default `format_version` for RocksDB .sst files from 3 to 5.
+
+* Added support for creating autoincrement keys on cluster mode, but only for
+ single sharded collections.
+
+* Add support for LZ4 and LZ4HC compression support for RocksDB.
+
+* Allow parallel access to the shards of smart edge collections in AQL via
+ parallel GatherNodes.
+
+* Update RocksDB internal table checksum type to xxHash64.
+
+* Added several startup option to configure parallelism for individual Pregel
+ jobs:
+
+ - `--pregel.min-parallelism`: minimum parallelism usable in Pregel jobs.
+ - `--pregel.max-parallelism`: maximum parallelism usable in Pregel jobs.
+ - `--pregel.parallelism`: default parallelism to use in Pregel jobs.
+
+ These parallelism options can be used by administrators to set concurrency
+ defaults and bounds for Pregel jobs. Each individual Pregel job can set its
+ own parallelism value using the job's `parallelism` option, but the job's
+ parallelism value will be clamped to the bounds defined by
+ `--pregel.min-parallelism` and `--pregel.max-parallelism`. If a job does not
+ set its `parallelism` value, it will default to the parallelism value
+ configured via `--pregel.parallelism`.
+
+* Added startup options to configure the usage of memory-mapped files for Pregel
+ temporary data:
+
+ - `--pregel.memory-mapped-files`: if set to `true`, Pregel jobs will by
+ default store their temporary data in disk-backed memory-mapped files.
+ If set to `false`, the temporary data of Pregel jobs will be buffered in
+ RAM. The default value is `true`, meaning that memory-mapped files will be
+ used. The option can be overridden for each Pregel job by setting the
+ `useMemoryMaps` option of the job.
+
+ - `--pregel.memory-mapped-files-location-type`: location for memory-mapped
+ files written by Pregel. This option is only meaningful if memory-mapped
+ files are actually used. The option can have one of the following values:
+ - `temp-directory`: store memory-mapped files in the temporary directory, as
+ configured via `--temp.path`. If `--temp.path` is not set, the system's
+ temporary directory will be used.
+ - `database-directory`: store memory-mapped files in a separate directory
+ underneath the database directory.
+ - `custom`: use a custom directory location for memory-mapped files. The
+ exact location must be set via the configuration parameter
+ `--pregel.memory-mapped-files-custom-path`.
+
+ The default value for this option is `temp-directory`.
+
+ - `--pregel.memory-mapped-files-custom-path`: custom directory location for
+ Pregel's memory-mapped files. This setting can only be used if the option
+ `--pregel.memory-mapped-files-location-type` is set to `custom`.
+
+ The default location for Pregel's memory-mapped files is the temporary
+ directory (`temp-directory`), which may not provide enough capacity for larger
+ Pregel jobs.
+ It may be more sensible to configure a custom directory for memory-mapped
+ files and provide the necessary disk space there (`custom`). Such custom
+ directory can be mounted on ephemeral storage, as the files are only needed
+ temporarily.
+ There is also the option to use a subdirectory of the database directory as
+ the storage location for the memory-mapped files (`database-directory`).
+ The database directory often provides a lot of disk space capacity, but when
+ it is used for both the regular database data and Pregel's memory-mapped
+ files, it has to provide enough capacity to store both.
+
+* Pregel status now reports whether memory mapped files are used in a job.
+
+* Change default value of `--rocksdb.block-cache-shard-bits` to an automatic
+ default value that allows data blocks of at least 128MiB to be stored in each
+ cache shard if the block cache's strict capacity limit is used. The strict
+ capacity limit for the block cache is enabled by default in 3.10, but can be
+ turned off by setting the option `--rocksdb.enforce-block-cache-size-limit` to
+ `false`. Also log a startup warning if the resulting cache shard size would be
+ smaller than is potentially safe when the strict capacity limit is set.
+ Enforcing the block cache's capacity limit has the consequence that data reads
+ by RocksDB must fit into the block cache or the read operation will fail with
+ an "Incomplete" error.
+
+* The API `/_admin/status` now returns a progress attribute that shows the
+ server's current state (starting, stopping, etc.), with details about which
+ feature is currently started, stopped etc. During recovery, the current WAL
+ recovery sequence number is also reported in a sub-attribute of the `progress`
+ attribute. Clients can query this attribute to track the progress of the WAL
+ recovery.
+ The additional progress attribute returned by `/_admin/status` is most useful
+ when using the `--server.early-connections true` setting. With that setting,
+ the server will respond to incoming requests to a limited set of APIs already
+ during server startup. When the setting is not used, the REST interface will
+ be opened relatively late during the startup sequence, so that the progress
+ attribute will likely not be very useful anymore.
+
+* Optionally start up HTTP interface of servers earlier, so that ping probes
+ from tools can already be responded to when the server is not fully started.
+ By default, the HTTP interface is opened at the same point during the startup
+ sequence as before, but it can optionally be opened earlier by setting the new
+ startup option `--server.early-connections` to `true`. This will open the HTTP
+ interface early in the startup, so that the server can respond to a limited
+ set of REST APIs even during recovery. This can be useful because the recovery
+ procedure can take time proportional to the amount of data to recover.
+ When the `--server.early-connections` option is set to `true`, the server will
+ respond to requests to the following APIs during the startup already:
+ - `/_api/version`
+ - `/_admin/version`
+ - `/_admin/status`
+ All other APIs will be responded to with an HTTP response code 503, so that
+ callers can see that the server is not fully ready.
+ If authentication is used, then only JWT authentication can be used during the
+ early startup phase. Incoming requests relying on other authentication
+ mechanisms that require access to the database data will also be responded to
+ with HTTP 503 errors, even if correct credentials are used.
+
+* Upgraded bundled version of RocksDB to 7.2.
+
+* Added `[?]` array operator to AQL, which works as follows:
+ - `nonArray[?]`: returns `false`
+ - `nonArray[? FILTER CURRENT ...]`: returns `false`
+ - `array[?]`: returns `false` if array is empty, `true` otherwise
+ - `array[? FILTER CURRENT ...]`: returns `false` if no array member
+ satisfies the filter condition, returns `true` if at least one member
+ satisfies it.
+
+* Upgrade jemalloc to version 5.3.0.
+
+* Set "useRevisionsAsDocumentIds" to true when restoring collection data via
+ arangorestore in case it is not set in the collection structure input data.
+ This allows using revision trees for restored collections.
+
+* Added new optimization rule "arangosearch-constrained-sort" to perform sorting
+ & limiting inside ArangoSearch View enumeration node in case of using just
+ scoring for sort.
+
+* Updated lz4 to version 1.9.3.
+
+* Added option `--custom-query-file` to arangoexport, so that a custom query
+ string can also be read from an input file.
+
+* FE-46: UI improvement on the view UI pages as well as adding tooltips to
+ options where necessary. The affected pages are mostly the Info and
+ Consolidation Policy pages.
+
+* FE-44: Moved the Info page to before JSON, making the settings page the
+ default page in the view web UI.
+
+* Refactor internal code paths responsible for `_key` generation. For
+ collections with only a single shard, we can now always let the leader DB
+ server generate the keys locally. For collections with multiple shards, the
+ coordinators are now always responsible for key generation.
+ Previously the responsibility was mixed and depended on the type of operation
+ executed (document insert API vs. AQL query, single operation vs. batch).
+
+* Make web UI show the following information for collections:
+ - key generator type
+ - whether or not the document and primary index cache is enabled
+ - if cache is enabled, show cache usage and allocation size in figures
+ The `cacheEnabled` property of collections is now also changeable via the UI
+ for existing collections.
+
+* FE-45: Added tooltips with helpful information to the options on the View UI
+ settings page.
+
+* FE-43: Simplify the workflow on the web view UI (Links page): allow for users
+ to view a single link or field with their properties at a time.
+
+* Fixed BTS-811 in which there was an incongruence between data being
+ checksummed and data being written to `.sst` files, because checksumming
+ should have been made after the encryption of the data, not before it.
+
+* Added command line option to arangobench to disable implicit collection
+ creation. This allows one to run tests against a manually created and
+ configured collection.
+
+* Fix deadlocked shard synchronizations when planned shard leader has not yet
+ taken over leadership.
+
+* Unify the creation of normal and SmartGraph collections.
+
+ This unifies the code paths for creating collections for normal collections
+ and SmartGraph collections, so that the functionality is centralized in one
+ place. SmartGraph-specific code for validation and collection creation has
+ been moved to enterprise as well.
+
+* Auto-regenerate exit code and error code files in non-maintainer mode, too.
+
+* Only show slowest optimizer rules in explain output for optimizer rules that
+ took a considerable amount of time (>= 0.0002 seconds). Previously the slowest
+ 5 optimizer rules were shown, regardless of how long they took to execute and
+ even if they executed sufficiently fast.
+
+* Make the StatisticsFeature start after the NetworkFeature, so that any network
+ request issues by cluster statistics gathering can rely on the networking
+ functionality being available until shutdown.
+
+* Rework internal queues for connection and request statistics. The previous
+ implementation allocated a lot of memory at program start for initializing
+ fixed-sized queues for the statistics objects.
+ The problem with using fixed-sized queues is that they will mostly require too
+ much memory for almost all cases, but still do not protect from the queues
+ becoming full and not being able to hold more items.
+ Now we go with a variable length queue instead, which only requires a small
+ amount of memory initially, and allocate more memory only when needed.
+ Freelists for reusing statistics items are still present to avoid lots of
+ reallocations.
+ The change also reduces the size of the executable's .bss section by more than
+ 10MB.
+
+* Always open a new, working connection before HTTP request-fuzzing during
+ testing. Otherwise the fuzzing results are not 100% comparable from run to
+ run.
+
+* Updated bundled version of zlib library to 1.2.12.
+
+* Added AQL hint "useCache" for FOR loops, to explicitly disable the usage of
+ in-memory caches for lookups.
+
+* When profiling an AQL query via `db._profileQuery(...)` or via the web UI, the
+ query profile will now contain the number of index entries read from in-memory
+ caches (usable for edge indexes and indexes of type "persistent", "hash" or
+ "skiplist") plus the number of cache misses.
+
+* The caching subsystem now provides the following 3 additional metrics:
+ - `rocksdb_cache_active_tables`: total number of active hash tables used for
+ caching index values. There should be 1 table per shard per index for which
+ the in-memory cache is enabled. The number also includes temporary tables
+ that are built when migrating existing tables to larger equivalents.
+ - `rocksdb_cache_unused_memory`: total amount of memory used for inactive hash
+ tables used for caching index values. Some inactive tables can be kept
+ around after use, so they can be recycled quickly. The overall amount of
+ inactive tables is limited, so not much memory will be used here.
+ - `rocksdb_cache_unused_tables`: total number of inactive hash tables used for
+ caching index values. Some inactive tables are kept around after use, so
+ they can be recycled quickly. The overall amount of inactive tables is
+ limited, so not much memory will be used here.
+
+* Added optional in-memory caching for index entries when doing point lookups in
+ indexes of type "persistent", "hash" or "skiplist".
+ The caching is turned off by default, but can be enabled when creating an
+ index of type "persistent", "hash" or "skiplist" by setting the "cacheEnabled"
+ flag for the index upon index creation.
+ The cache will be initially empty, but will be populated lazily upon querying
+ data from the index using equality lookups on all index attributes.
+ As the cache is hash-based and unsorted, it cannot be used for full or partial
+ range scans, for sorting, or for lookups that do not include all index
+ attributes.
+ The maximum size of index entries that can be stored is currently 4 MB, i.e.
+ the cumulated size of all index entries for any index lookup value must be
+ less than 4 MB. This limitation is there to avoid storing the index entries of
+ "super nodes" in the cache.
+
+ The maximum combined memory usage of all in-memory caches can be controlled
+ via the existing `--cache.size` startup option, which now not only contains
+ the maximum memory usage for edge caches, but also for index caches added
+ here.
+
+* Added new AQL function `KEEP_RECURSIVE` to recursively keep attributes from
+ objects/documents, as a counterpart to `UNSET_RECURSIVE`.
+
+* Added an HTTP fuzzer to arangosh that can send fuzzed requests to the server.
+ The amount of requests sent is provided by one of the parameters of the new
+ arangosh function `fuzzRequests()`.
+ The optional parameters that can be supplied are:
+ `fuzzRequests(, , )`
+ The parameter numIterations is the amount of times the fuzzer is going to
+ perform its random actions on the header, and seed is for the seed that is
+ used for randomizing.
+ The fuzzer is available only when building with failure points.
+
+* Escape each key in attribute paths of nested attributes in the query explain
+ output for SEARCH queries that utilize the primary sort order.
+
+* Turn off sending "Server" HTTP response header on DB servers if not explicitly
+ requested. This saves a tiny bit of traffic on each response from a DB server.
+
+* Enable range deletions in the WAL for truncate operations in the cluster, too.
+ This can speed up truncate operations for large collections/shards.
+
+* Set max recursion depth for VelocyPack, JSON and JavaScript arrays and objects
+ to about 200.
+
+* Updated snowball to version 2.2.0
+
+* Fixed: Deadlock created by high load and a follower trying to get into sync.
+ In the final synchronization phase the follower needs to temporarily block
+ writes on the leader so we have a reliable point in time where we can prove
+ that the data is consistent.
+ If the leader at this point is flooded with write requests to that shard there
+ is a chance that all worker threads only pick up those writes, which cannot
+ make any progress until the lock is cleared. However, the process to clear the
+ lock was on the same priority as those writes.
+ Hence this lock clear operations could not bypass the writes. Now we moved
+ every follow up request after the lock to HIGH lanes, which will allow them to
+ bypass all non-internal operations.
+
+* arangosh now uses the same header the UI uses to gain higher priority on
+ initial connection.
+ This will increase the chance for an arangosh to connect to a server under
+ very high load.
+
+* Bugfix: DC2DC Disjoint-SmartGraphs and Hybrid-SmartGraphs are now replicated
+ to the follower data-center keeping their sharding intact.
+
+* Added a log message that appears upon starting arangod that shows the number
+ of the parent process id and, if able to acknowledge it, the name of the
+ parent process.
+
+* Parallelize applying of revision tree changes with fetching next revision tree
+ range in incremental collection replication for collections created with
+ ArangoDB 3.8 and higher.
+
+* Support JSON schema objects for documenting Foxx endpoints.
+
+* Internal refactoring of IndexIterator APIs.
+
+* Sorted out various geo problems:
+
+ - No more special detection of "latitude-longitude rectangles" is done, since
+ this is in conflict with the definition of polygon boundaries to be
+ geodesics.
+ - Linear rings in polygons are no longer automatically "normalized", so now it
+ is possible to have polygons which cover more than half of the Earth.
+ - Rules for polygons and multigons have been clarified and are now properly
+ enforced for the `GEO_POLYGON` and `GEO_MULTIPOLYGON` AQL functions.
+ - Introduced `legacyPolygon` flag for geo indexes to continue to support the
+ old behavior in existing geo indexes.
+ - Added lots of additional tests, thereby fixing several bugs in geo index
+ lookup.
+ - Use a faster algorithm for pure `GEO_CONTAINS` and `GEO_INTERSECTS` queries.
+
+* Added back the optimization for empty document update operations (i.e. update
+ requests in which no attributes were specified to be updated), handling them
+ in a special way without performing any writes, also excluding such special
+ cases of operation from replication to followers.
+
+* Changed Foxx service generator output to use static variable names.
+
+* Allow early pruning (moving a FILTER condition into an IndexNode or
+ EnumerateCollectionNode) in more cases than before. Previously, early pruning
+ was only possible if the FILTER condition referred to exactly one variable,
+ which had to be the FOR loop's own variable. Now, early pruning is possible
+ with arbitrary variables that are accessible by the FOR loop.
+
+* In an attempt to make the performance of the RocksDB throttle much more
+ consistent and predictable the default compaction slow down trigger is lowered
+ to 128kB.
+
+* The multi-dimensional index type `zkd` now supports an optional index hint for
+ tweaking performance by prefetching documents:
+
+ ```
+ FOR app IN appointments OPTIONS { lookahead: 32 }
+ FILTER @to <= app.to
+ FILTER app.from <= @from
+ RETURN app
+ ```
+
+ Specifying a lookahead value greater than zero makes the index fetch more
+ documents that are no longer in the search box, before seeking to the next
+ lookup position.
+ Because the seek operation is computationally expensive, probing more
+ documents before seeking may reduce the number of seeks, if matching documents
+ are found.
+ Please keep in mind that it might also affect performance negatively if
+ documents are fetched unnecessarily.
+
+* Enabled new internal graph refactored code for depth-first, breadth-first and
+ weighted traversals by default.
+
+* Improved performance of inner joins with dynamic lookup conditions being
+ injected from an outer loop, for indexes of type "persistent", "hash" and
+ "skiplist". Performance improvements can be expected if the inner join is
+ invoked a lot of times with many different values fed in by the outer loop.
+ The performance improvements are due to some improved handling of index
+ lookup conditions in the internals of the VelocyPack-based index.
+
+* Improve usefulness of `storedValues` together with late materialization.
+
+* Limited module resolution in arangosh to the path from which arangosh is
+ invoked.
+
+* Changed default value of startup option
+ `--rocksdb.cache-index-and-filter-blocks` from `false` to `true`.
+ This makes RocksDB track all loaded index and filter blocks in the block
+ cache, so they are accounted for in RocksDB's block cache. Also the default
+ value for the startup option `--rocksdb.enforce-block-cache-size-limit` was
+ flipped from `false` to `true` to make the RocksDB block cache not
+ temporarily exceed the configured memory limit (`--rocksdb.block-cache-size`).
+
+ These default value changes will make RocksDB adhere much better to the
+ configured memory limit. This is a trade-off between memory usage stability
+ and performance. These change may have a small negative impact on performance
+ because if the block cache is not large enough to hold the data plus the index
+ and filter blocks, additional disk I/O may be performed compared to previous
+ versions. In case there is still unused RAM capacity available, it may be
+ sensible to increase the total size of the RocksDB block cache.
+
+* Add "filtered" column to AQL query profiling output.
+ This column shows how many documents were filtered by the node and thus
+ provides insights into if additional indexes could help.
+
+* Reuse ExecutorExpressionContext inside IndexExecutor, so that repeated setup
+ and teardown of expression contexts can be avoided.
+
+* Adjust internal RocksDB setting `optimize_filters_for_hits` for Documents
+ column family, setting it from `false` to `true`. This should reduce memory
+ and disk space requirements for the bottom-most .sst files of the documents
+ column family.
+
+* Upgrade VelocyPack library to latest version.
+
+* Slightly improve the explain output of SingleRemoteOperationNodes.
+
+* Added more detail to the log messages that display the total time consumption
+ and total amount of data parsed for the client tools arangodump and
+ arangorestore.
+
+* Upgraded boost to 1.78.0.
+
+* Fixed issue #15501: Regression when using "exclusive" query option?
+ This fixes a regression in AQL query execution when exclusive locks are used
+ for a query and the query also uses the DOCUMENT() AQL function. In this
+ case, when there were more concurrent requests to the underlying collection
+ than available scheduler threads for the low priority queue, the query
+ start successfully acquired the exclusive lock, but could not get its
+ follow-up requests through and starve while holding the exclusive lock.
+
+* Improve performance of `db._explain()` for very large query execution plans.
+ Higher performance is achieved by not serializing some internal data
+ structures when serializing execution plans. Serializing internal data is now
+ opt-in and turned off if not needed. Apart from performance, there should be
+ no end user visible changes.
+
+* APM-24: Log messages can be displayed together with some other useful
+ parameters, e.g., the name of the database, username, query id, and so on.
+ There are some predefined parameters that we consider displaying, but, for the
+ moment, only database, username and url are being displayed.
+ The usage upon starting the server is, for example:
+ `arangod --log.structured-param database --log.structured-param username`
+
+* Add optional "storedValues" attribute for persistent indexes.
+
+ This will add the specified extra fields to the index, so that they can be
+ used for projections, but not for lookups or sorting.
+
+ Example:
+
+ db..ensureIndex({
+ type: "persistent",
+ fields: ["value1"],
+ storedValues: ["value2"]
+ });
+
+ This will index `value1` in the traditional sense, so the index can be used
+ for looking up by `value1` or for sorting by `value1`. The index also supports
+ projections on `value1` as usual.
+ In addition, due to `storedValues` being used here, the index can now also
+ supply the values for the `value2` attribute for projections.
+
+ This allows covering index scans in more cases and helps to avoid making extra
+ document lookups in the documents column family. This can have a great
+ positive effect on index scan performance if the number of scanned index
+ entries is large.
+
+ The maximum number of attributes to store in `storedValues` is 32.
+
+* Added agency push-queue operation.
+
+* Make per-server values "numberOfCores" and "physicalMemory" available to
+ agency to improve quality of potential future shard rebalancing algorithms.
+
+* Unify the result structure of `db._version(true)` calls for arangosh and
+ server console. Previously such a call in the server console would return a
+ different structure that only consisted of the `details` subobject.
+ This is now unified so that the result structure in the server console is
+ consistent with arangosh, but strictly speaking this is a breaking change.
+
+* Added new option `--custom-query-bindvars` to arangoexport, so queries given
+ via option `--custom-query` can have bind variables in them. Also changed the
+ flag names `--query` to `--custom-query` and `--query-max-runtime` to
+ `--custom-query-max-runtime` to be like in the other client-tools.
+
+* Improve some RocksDB-related error messages during server startup.
+
+* Raised minimal macOS supported version to 10.15 (Catalina).
+
+* Remove background thread `RocksDBShaThread` for background SHA256 checksum
+ calculation for .sst files in the Enterprise Edition. The checksums are now
+ calculated incrementally while writing into the .sst files, and the checksum
+ files will be stored on disk as soon as an .sst file is made durable by
+ RocksDB. There is no more need to periodically scan the database directory and
+ look for any additional .sst files.
+
+* Fix BTS-580: Trimmed the password field from the payload in the client
+ requests when displaying error messages in arangorestore, because they
+ displayed the password as plain text.
+
+* Refactored unit tests with the `grey` keyword, which is for skipping certain
+ tests. A test file that did not perform any test, but only had a function to
+ sleep, was removed. Two test files were renamed so they would not be skipped.
+
+* Defer intermediate commits in the middle of a multi-document (array)
+ operation. This is to ensure that the RocksDB key locks for all participating
+ document keys are still held while the operations are replicating via the
+ synchronous replication.
+
+* Changed arangobench concurrency flag name from `--concurrency` to `--threads`.
+
+* APM-217: deprecate the usage of fulltext indexes.
+
+* Changed "arangosh" directory name to "client-tools", because the directory
+ contains the code for all client tools and not just arangosh.
+
+* Updated immer to version 0.7.0.
+
+* Made the `--version` and `--version-json` commands usable in arangobackup
+ when no positional argument (operation type) was specified. Previously,
+ arangobackup insisted on specifying the operation type alongside the
+ `--version` or `--version-json` commands.
+
+* Removed the following deprecated arangobench testcases:
+ - aqltrx
+ - aqlv8
+ - counttrx
+ - deadlocktrx
+ - multi-collection
+ - multitrx
+ - random-shapes
+ - shapes
+ - shapes-append
+ - skiplist
+ - stream-cursor
+
+* Renamed arangobench testcase "hash" to "persistent-index".
+
+* Add option to content-transfer encode gzip Foxx replies.
+
+* Simplify internal request compression/decompression handling code.
+
+* Added Enterprise Sharded Graphs Simulation: Now it is possible to test
+ SmartGraphs and SatelliteGraphs on a single server instance and then to port
+ them to a cluster with multiple servers. All existing types of SmartGraphs are
+ eligible to this procedure: SmartGraphs themselves, Disjoint SmartGraphs,
+ Hybrid SmartGraphs and Hybrid Disjoint SmartGraphs. One can create a graph of
+ any of those types in the usual way, e.g., using `arangosh`, but on a single
+ server, then dump it, start a cluster (with multiple servers) and restore the
+ graph in the cluster. The graph and the collections will keep all properties
+ that are kept when the graph is already created in a cluster. This feature is
+ only available in the Enterprise Edition.
+
+* Remove unsupported `--server.default-api-compatibility` startup option.
+
+
+v3.9.2 (2022-06-07)
+-------------------
+
+* Enterprise only: Restricted behavior of Hybrid Disjoint Smart Graphs. Within a
+ single traversal or path query we now restrict that you can only switch
+ between Smart and Satellite sharding once, all queries where more than one
+ switch is (in theory) possible will be rejected. e.g:
+ ```
+ FOR v IN 2 OUTBOUND @start smartToSatEdges, satToSmartEdges
+ ```
+ will be rejected (we can go smart -> sat -> smart, so two switches)
+ ```
+ FOR v1 IN 1 OUTBOUND @start smartToSatEdges
+ FOR v2 IN 1 OUTBOUND v1 satToSmartEdges
+ ```
+ will still be allowed, as each statement only switches once.
+ We have decided to take these restrictions as especially for ShortestPath
+ queries the results are not well-defined. If you have a use case where this
+ restriction hits you, please contact us.
+
+* Fixed issue BTS-875.
+
+* Updated arangosync to v2.10.0.
+
+* Make all requests which are needed for shard resync at least medium priority
+ to improve getting-in-sync under load.
+
+* Fix behavior when accessing a view instead of a collection by name in a REST
+ document operation. Now return a proper error.
+
+* Fix documentation of collection's `cacheEnabled` property default.
+
+* Added startup option `--cluster.shard-synchronization-attempt-timeout` to
+ limit the amount of time to spend in shard synchronization attempts. The
+ default timeout value is 20 minutes.
+ Running into the timeout will not lead to a synchronization failure, but will
+ continue the synchronization shortly after. Setting a timeout can help to
+ split the synchronization of large shards into smaller chunks and release
+ snapshots and archived WAL files on the leader earlier.
+ This change also introduces a new metric `arangodb_sync_timeouts_total` that
+ counts the number of timed-out shard synchronization attempts.
+
+* Make followers respond to synchronous replication requests with less data.
+ Specifically, followers will not build detailed results with _id, _key and
+ _rev for the inserted/modified/removed documents, which would be ignored by
+ the leader anyway.
+
+* Very verbose warning from failing to parse GEO JSON in search. Has lead to
+ billions of log lines on deployed services.
+
+* Fixed Github issue #16279: assertion failure/crash in AQL query optimizer when
+ permuting adjacent FOR loops that depended on each other.
+
+* Fixed a potential hang on shutdown, when there were still document operations
+ queued.
+
+* No good reason to fatal error in agency state, when local database entries
+ lack local timestamp (legacy). In that situation, we will record epoch begin
+ as local time.
+
+* Fixed BTS-860. Changed ArangoSearch index recovery procedure to remove
+ necessity to always fully recreate index if IndexCreation marker encountered.
+
+* Removed separate FlushThread (for views syncing) and merged it with the
+ RocksDBBackgroundThread.
+
+* Fix some issues with WAL recovery for views. Previously it was possible that
+ changes to a view/link were already recovered and persisted, but that the
+ lower bound WAL tick was not moved forward. This could lead to already fully
+ recovered views/links being recovered again on the next restart.
+
+* Put hotbackup requests on the HIGH priority queue to make hotbackups work
+ under high load (BTS-865).
+
+* Allow starting with option value `--cache.size 0`, to turn off in-memory
+ caches for indexes entirely (for performance testing or limiting memory
+ usage).
+
+* Updated OpenSSL to 1.1.1o and OpenLDAP to 2.6.2.
+
+* Added option `--enable-revision-trees` to arangorestore, which will add the
+ attributes `syncByRevision` and `usesRevisionsAsDocumentIds` to the
+ collection structure if they are missing. As a consequence, these collections
+ created by arangorestore will be able to use revision trees and a faster
+ getting-in-sync procedure after a restart. The option defaults to `true`,
+ meaning the attributes will be added if they are missing. If the option is
+ set to `false`, the attributes will not be added to the collection structure.
+ If the attributes are already present in the dump data, they will not be
+ modified by arangorestore irrespective of the setting of this option.
+
+* Fix agency inception when one of the gossip peers responds with HTTP 503 or
+ another unexpected error.
+
+* Make sure that newly created TTL indexes do not use index estimates, which
+ wouldn't be used for TTL indexes anyway.
+
+* Improve log output for WAL recovery, by providing more information and making
+ the wording more clear.
+
+* Fix: Highly unlikely race in cluster maintenance. For every shard only one
+ operation (change attribute, change leadership) should be performed at the
+ same time. However if two changes are detected in the same hearbeat it could
+ lead to both operations to be executed in parallel. In most cases this is also
+ fine, but could lead to races on the same attribute, however the race will be
+ sorted out in the next heartbeat interval.
+
+* Fix: for the Windows build, the new Snappy version, which was introduced in
+ 3.9, generated code that contained BMI2 instructions which where introduced
+ with the Intel Haswell architecture. However, our target architecture for 3.9
+ is actually Sandy Bridge, which predates Haswell. Running the build on these
+ older CPUs thus resulted in illegal instruction exceptions.
+
+* Increase internal transaction lock timeout on followers during cluster write
+ operations. Although writes to the same keys on followers should be serialized
+ by the key locks held on the leader, it is still possible that the global
+ transaction lock striped mutex is a source of contention and that concurrent
+ write operations time out while waiting to acquire this global mutex. The lock
+ timeout on followers is now significantly increased to make this very
+ unlikely.
+
+* Improve validation for variables used in the `KEEP` part of AQL COLLECT
+ operations. Previously referring to a variable that was introduced by the
+ COLLECT itself from out of the KEEP part triggered an internal error. The case
+ is detected properly now and handled with a descriptive error message.
+
+* Added startup option `--rocksdb.transaction-lock-stripes` to configure the
+ number of lock stripes to be used by RocksDB transactions. The option defaults
+ to the number of available cores, but is bumped to a value of 16 if the number
+ of cores is lower.
+
+* Fix deadlocked shard synchronisations when planned shard leader has not yet
+ taken over leadership.
+
+* Added an IO heartbeat which checks that the underlying volume is writable with
+ reasonable performance. The test is done every 15 seconds and can be
+ explicitly switched off. New metrics to give visibility if the test fails:
+ - `arangodb_ioheartbeat_delays_total`: total number of delayed io heartbeats
+ - `arangodb_ioheartbeat_duration`: histogram of execution times [us]
+ - `arangodb_ioheartbeat_failures_total`: total number of failures
+ These metrics are only populated, if `--database.io-heartbeat` is set to
+ `true` (which is currently the default).
+
+* Fix lock order in Agent::advanceCommitIndex for State's _logLock and Agent's
+ _waitForCV.
+
+* BugFix (enterprise-only): (BTS-787) In a hybrid disjoint SmartGraph, having
+ more than one relation, if you add a new vertex collection to a Smart -> Smart
+ edge relation this vertex collection was rejected with "has to be satellite"
+ error.
+ Now the collection is created as a SmartVertexCollection as desired.
+
+* Resync follower shard after a follower restart immediately and not lazily.
+
+
+v3.9.1 (2022-04-04)
+-------------------
+
+* Updated ArangoDB Starter to 0.15.4.
+
+* Improve parallelism in arangorestore in case new data format is used.
+
+* Remove error handling fetching license information to improve user
+ experience. To display the license informationn in the UI is only
+ informational. It disturbs the user experience to know somthing went wrong
+ and doesn't provide any important information for the user.
+
+* Added new server option: --icu-language. Used instead of --default-language to
+ set pure ICU collator.
+ For example, in Sweden language("sv") lowercase letters should precede
+ uppercase ones. You can achieve it using following options when server starts:
+
+ `--icu-language sv`
+
+* No longer put document writes from replication into the audit log by default.
+ Same with low priority authentication like internal UI requests to .html files
+ for the UI. This solves a performance problem for shards getting in sync with
+ audit log switched on.
+
+* Updated OpenSSL to 1.1.1n and OpenLDAP to 2.6.1.
+
+* Fixed an assertion failure which could occur when there was an error in the
+ HTTP header, so that the message body was not actually read.
+
+* Fixed a crash which could occur when there was an error in the HTTP header
+ parsing.
+
+* Bug-Fix: Resolve BTS-673/Issue #15107, a spliced subquery could return too few
+ results.
+
+* Speed up initial sync (in case there is already data present) by prefetching
+ data from leader.
+
+* Fixed ES-1078: The REST API endpoint for handling `/_api/user/${user}/config`
+ did not work properly. The supplied data by sending a PUT request has not been
+ stored to the correct location. The Web UI uses this endpoint to store its
+ graph properties for storing the visualization properties. As this endpoint
+ did not work as expected, the graph visualization properties did not get
+ persisted as well. This is now resolved.
+
+* Fix counts and file size sum in hotbackup META files. Do no longer count
+ directories.
+
+* Optimize further RocksDB throttle to allow for no change on any given
+ calculation cycle.
+
+* Fix UI to only fetch license info as admin user.
+
+* Added `disableIndex` index hint for AQL FOR loops. This index hint disables
+ the usage of any index (except geo or full text indexes) and will cause a full
+ scan over the collection.
+ In some circumstances a full scan can be more efficient than an index scan,
+ for example if the index scan produces many matches (close to the number of
+ documents in the collection) and the index is not fully covering the query.
+ The `disableIndex` hint can be given per FOR loop in the query, e.g.:
+
+ FOR doc IN collection OPTIONS { disableIndex: true }
+ RETURN doc.value
+
+ The default value of `disableIndex` is `false`.
+ In case a different index hint is provided, `disableIndex: true` takes
+ precendence and produces a warning about the ambiguous settings.
+
+* Added `maxProjections` hint for AQL FOR loops. This hint can be used to set
+ the maximum number of document attributes that are taken into account for
+ using projections.
+
+ For example, in the following query, no projections will be used because the
+ number of potential projection attributes (`value1`, value2`, `value3`) is
+ higher than the maximum number of projection attributes set via the
+ `maxProjections` option:
+
+ FOR doc IN collection OPTIONS { maxProjections: 2 }
+ RETURN [ doc.value1, doc.value2, doc.value3 ]
+
+ The default value for `maxProjections` is `5`, which is compatible with the
+ previous hard-coded default value.
+
+* Avoid a deadlock in agency, when Inception::gossip() acquired a mutex, then
+ could call Agent under the mutex, and Agent could finally could call
+ Inception::signalConditionVar(), which would try to acquire the mutex again.
+
+* Avoid multiple parallel SIGHUP requests to be handled at the same time.
+ Now collapse multiple incoming SIGHUP requests into a single one, which can be
+ executed race-free.
+
+* Upgraded JavaScript "i" module from 0.3.6 to 0.3.7.
+
+* Removed internal JavaScript dependencies "mocha" and "chalk". We recommend
+ always bundling your own copy of third-party modules, even ones listed as
+ public.
+
+* Reduce memory usage of inner joins if they were performed by the IndexExecutor
+ with dynamic index lookup expressions that needed to be recomputed for input
+ from the outer loop.
+
+ For example, in the query
+ ```
+ FOR i IN 1..1000
+ FOR doc IN collection
+ FILTER doc.indexAttribute == i
+ RETURN doc
+ ```
+ the inner loop will be executed 1000 times. The IndexExecutor in the inner
+ loop needed to rebuild the index lookup attribute from the value of `i` 1000
+ times as well. The memory for index lookup attributes came from the Ast's
+ memory allocator and was not freed until the end of the query. In this query,
+ it would mean that up to 1000 lookup values were held in memory. With larger
+ inputs even more memory would be used.
+
+ Now the memory for index lookup values is freed when a new lookup value is
+ computed, i.e. only a single lookup value is held in memory.
+ This drastically reduces peak memory usage for queries that use index lookups
+ in inner loops and that get lots of different inputs from outer loops.
+
+* Harden validator for binary VelocyPack against additional types of malicious
+ inputs.
+
+* Bugfix: DC-2-DC Disjoint-SmartGraphs and Hybrid-SmartGraphs are now replicated
+ to the follower data-center keeping their sharding intact.
+
+* As we are now in constant stall regime, stall onset and warnings are demoted
+ to DEBUG.
+
+* Shorten the license grace period to the documented 3 hours.
+
+* Replaced internal JS dependency xmldom with @xmldom/xmldom.
+
+* Fixed BTS-750: Fixed the issue restricted to cluster mode in which queries
+ containing the keywords UPDATE or REPLACE together with the keyword WITH and
+ the same key value would result in an error. For example:
+ `UPDATE 'key1' WITH {_key: 'key1'} IN Collection`
+ because the same key used to update was provided in the object to update the
+ document with.
+
+* Replaced internal JS dependency ansi-html with ansi-html-community.
+
+* In an attempt to make the performance of the RocksDB throttle much more
+ consistent and predictable the default compaction slow down trigger is lowered
+ to 128kB.
+
+* Bug-Fix: AQL WINDOW statement if applied within a subquery could accidentally
+ skip over some subquery results. This did only show up if the subquery fills
+ exactly one internal batch before it is completed, so it is rather unlikely.
+
+* Fix null pointer access when using WINDOW operation with a COUNT/LENGTH
+ aggregate function without any arguments.
+
+* Reintroduce shard synchronization cancellation check that was disabled before.
+
+* Fixed BTS-621 Fixed rare case of segfault in cluster during database recovery
+ if DBServer is in upgrade mode in the same time.
+
+* Fixed PRESUPP-445: Foxx queues: Some Jobs are never run in case of multiple
+ Coordinators.
+
+* Fixed a race detected with chaos tests, where a db server could have
+ momentarily lost leadership, just when it was about to drop a follower to a
+ shard.
+
+* Fixed issue #15501: Regression when using "exclusive" query option?
+ This fixes a regression in AQL query execution when exclusive locks are used
+ for a query and the query also uses the DOCUMENT() AQL function. In this case,
+ when there were more concurrent requests to the underlying collection than
+ available scheduler threads for the low priority queue, the query start
+ successfully acquired the exclusive lock, but could not get its follow-up
+ requests through and starve while holding the exclusive lock.
+
+* Disable optimizer rule "optimize-cluster-single-document-operations" when a
+ collection is accessed in exclusive mode, because the optimized query would
+ use a slightly different mode of locking then.
+
+
+v3.9.0 (2022-02-07)
+-------------------
+
+* Convert v3.9.0-rc.1 into v3.9.0 (GA).
+
+
+v3.9.0-rc.1 (2022-02-03)
+------------------------
+
+* Fix potential access to dangling reference in cancellation of shard
+ synchronization.
+
+* Fixed BTS-740 (no released version infected) fixed Smart<->Sat SmartEdgeCollections
+ determining the shard in SingleRemoteModification Nodes was incorrect. E.g. this could
+ be triggered, by viewing the details of an edge in the UI. Only alpha/beta of 3.9.0
+ contained this bug
+
+* Fixed BTS-729 (no released version infected): Some conditions in a Hybrid
+ Smart Graph could led to wrong shard location calculation and therefore to
+ wrong graph query results. Only alpha/beta of 3.9.0 contained this bug.
+
+* Fixed minDepth handling of weighted Traversals. When using a minDepth of 3, also paths of length
+ 2 have been returned, on all locally executed variants (SingleServer, OneShard, DisjointSmart).
+
+* Fixed BTS-728 (no released version infected) fixed: for DisjointSmartGraphs, that include
+ a satellite vertex collection, valid disjoint path were not always followed, if one of the
+ satellites has a connection to two (or more) vertices that have different shardValues that
+ by chance are routed to the same shard.
+
+* Fix creation of satellite graphs with a `numberOfShards` value != 1.
+
+* Fixed BTS-712: Collation analyzer now always produces valid UTF-8 sequence.
+
+* Updated Enterprise license behavior: now there will be a 48 hour period for a
+ new deployment and upgrade decision to provide the license. After that period,
+ the read-only mode will be enforced.
+ Upgrade procedure for a deployment without license will not take upgrade
+ period into account for the read-only mode will enforcement.
+
+* Fixed a bug that hotbackup upload could miss files (fixes BTS-734).
+
+* BTS-590: When creating a new database in Web UI the value of the write concern
+ has to be smaller or equal to the replication factor. Otherwise an error
+ message will be displayed and no database will be created.
+
+* Fixed potentially undefined behavior for exit code of arangodump.
+
+* Ignore signals such as SIGPIPE in client tools.
+
+* Validate that selected `writeConcern` value is less or equal to the selected
+ `replicationFactor` value when creating new databases.
+
+* Fixed issue #15476: FATAL {crash} occurs on a simple query.
+
+
+v3.9.0-beta.1 (2022-01-06)
+--------------------------
+
+* Fixed ES-1025: fixed a performance regression caused by different hash
+ calculation for primitive types like `uint64_t` and new the `Identifier`
+ wrapper and derived types.
+
+* BTS-707: rename "hardened" option value for `--server.support-info-api`
+ startup option to "admin".
+
+* APM-292: Added new AQL function SHARD_ID.
+
+* Extend timeouts for caching collection counts and index selectivity estimates
+ on coordinators from 15s/90s to 180s. This change will cause less requests to
+ be made from coordinators to DB servers to refresh info about collection
+ counts and index estimates as part of AQL queries. The cached info is used in
+ cluster query execution plans only and is not required to be fully up-to-date.
+
+* Improved performance in replication dump protocol by inserting arrays of
+ documents instead of one document at a time and also not retrieving the
+ document revision field when not needed.
+
+* APM-78: Added startup security option `--foxx.allow-install-from-remote` to
+ allow installing Foxx apps from remote URLs other than Github. The option is
+ turned off by default.
+
+* Fixed BTS-693: Sort-limit rule now always ensures proper LIMIT node placement
+ to avoid possible invalid results in the fullCount data
+
+* Updated OpenSSL to 1.1.1m and OpenLDAP to 2.6.0.
+
+* Updated arangosync to 2.7.0.
+
+* Fixed PRESUPP-439: In arangoimport, for CSV and TSV files, it could happen
+ that a buffer containing only the header would be sent to the server, and also
+ batches would contain the documents equivalent to the csv rows in them, but
+ not the header, which should be sent together with the documents.
+
+* Changed various default values for RocksDB to tune operations for different
+ typical scenarios like gp2 type volumes and gp3 type volumes and locally
+ attached SSDs with RAID0:
+ - `--rocksdb.level0-slowdown-trigger` has been decreased from 20 to 16
+ - `--rocksdb.level0-stop-trigger` has been increased from 36 to 256
+ - `--rocksdb.max-background-jobs` has been increased to the number of cores
+ and is no longer limited to 8
+ - `--rocksdb.enabled-pipelined-write` is now `true` by default instead of
+ `false`
+ - `--rocksdb.throttle-frequency` has been decreased from 60000ms down to
+ 1000ms per iteration, which makes the RocksDB throttle react much quicker
+ - `--rocksdb.pending-compactions-slowdown-trigger` has been decreased from 64
+ GB down to 8 GB
+ - `--rocksdb.pending-compactions-stop-trigger` has been decreased from 256 GB
+ down to 16 GB
+ - `--rocksdb.throttle-slots` has been increased from 63 to 120
+ - `--rocksdb.encryption-hardware-acceleration` is now `true` by default,
+ which helps performance and should not create any problems, since we
+ require sandybridge anyway.
+ Combined, these changes help ArangoDB/RocksDB to react quicker to a backlog of
+ background jobs and thus to prevent catastrophic stops which abort data
+ ingestion or lead to cluster internal timeouts.
+
+* Adjust default value for startup option `--rocksdb.max-subcompactions` from 1
+ to 2. This allows compactions jobs to be broken up into disjoint ranges which
+ can be processed in parallel.
+
+* Added startup options to adjust previously hard-coded parameters for RocksDB's
+ behavior:
+
+ - `--rocksdb.pending-compactions-bytes-slowdown-trigger` controls RocksDB's
+ setting `soft_pending_compaction_bytes_limit`, which controls how many
+ pending compaction bytes RocksDB tolerates before it slows down writes.
+ - `--rocksdb.pending-compactions-bytes-stop-trigger` controls RocksDB's
+ setting `hard_pending_compaction_bytes_limit`, which controls how many
+ pending compaction bytes RocksDB tolerates before it stops writes entirely.
+ - `--rocksdb.throttle-lower-bound-bps`, which controls a lower bound for the
bandwidth restriction on RocksDB writes the throttle imposes.
-* Fixed PRESUPP-439: In arangoimport, for CSV and TSV files, it could happen
- that a buffer containing only the header would be sent to the server, and
- also batches would contain the documents equivalent to the csv rows in them,
- but not the header, which should be sent together with the documents.
+* Allow initial, full dump shard synchronization to abort prematurely if it
+ turns out that the follower was removed from the plan as a follower (e.g. if
+ there are enough other in-sync followers).
+
+* Set the limit for ArangoSearch segment size to 256MB during recovery to avoid
+ OOM kill in rare cases.
+
+* Cancel ongoing RocksDB compactions on server shutdown.
+
+* Updated Enterprise license behavior: now there will be a one hour period for
+ a new deployment to provide the license. After that period, the read-only mode
+ will be enforced.
+
+* Added startup options to adjust previously hard-coded parameters for the
+ RocksDB throttle:
+ - `--rocksdb.throttle-frequency`: frequency for write-throttle calculations
+ (in milliseconds, default value is 60000, i.e. 60 seconds).
+ - `--rocksdb.throttle-slots`: number of historic measures to use for throttle
+ value calculation (default value is 63).
+ - `--rocksdb.throttle-scaling-factor`: adaptiveness scaling factor for write-
+ throttle calculations (default value is 17). There is normally no need to
+ change this value.
+ - `--rocksdb.throttle-max-write-rate`: maximum write rate enforced by the
+ throttle (in bytes per second, default value is 0, meaning "unlimited").
+ The actual write rate will be the minimum of this value and the value the
+ throttle calculation produces.
+ - `--rocksdb.throttle-slow-down-writes-trigger`: number of level 0 files whose
+ payload is not considered in throttle calculations when penalizing the
+ presence of L0 files. There is normally no need to change this value.
+
+ All these options will only have an effect if `--rocksdb.throttle` is enabled
+ (which is the default). The configuration options introduced here use the
+ previously hard-coded settings as their default values, so there should not be
+ a change in behavior if the options are not adjusted.
+
+* Improve visibility in case of potential data corruption between primary index
+ and actual document store in documents column family.
+
+* Fixed BTS-611: In some cases AQL queries, in particular in a cluster, reported
+ the wrong fullCount when the optimizer rule(s)`late-document-materialization`
+ and/or `sort-limit` were active.
+
+* Fix BTS-535: TakeoverShardLeadership waits properly for Current data in
+ ClusterInfo. This avoids a fake warning and fake test failure.
+
+* Fix potential read inconsistency for single document operations.
+ When reading a single document that is concurrently being updated or replaced,
+ the read operation could erroneously return a "document not found" error
+ although the document actually existed. This only happened for single document
+ operations, i.e., no transactions or AQL queries.
+
+* APM-256: make arangoexport escape potential formulae in CSV exports.
+ This addresses a potential security issue when exporting specially crafted
+ documents to CSV, opening the CSV file in MS Excel or OpenOffice and then
+ clicking links in any of the tainted cells.
+
+ This change also adds a new option `--escape-csv-formulae` to toggle the
+ escaping behavior for potential formulae values. The option is turned on by
+ default.
+
+* Second step of hotbackup transfer job cleanup. Now locks are also cleaned up
+ as well as old, seemingly unfinished jobs.
+
+* Fix GitHub issue #15084. Fixed a potential use-after-free on Windows for
+ queries that used the NeighborsEnumerator (though other PathEnumerators
+ might have been affected as well).
+
+* BTS-624: The `move-calculations-up` optimization rule is now also applied to
+ subqueries, when they don't have dependencies on the outer nodes, don't have
+ modification nodes and don't read their own writes. This fixed the execution
+ of a query without the splicing-subqueries option being faster than the
+ execution of a query with this option (after version 3.8, this option cannot
+ be switched off).
+
+* Added the following metrics for revision trees:
+ - `arangodb_revision_tree_hibernations_total`: number of times a revision tree
+ was compressed in RAM and set to hibernation
+ - `arangodb_revision_tree_resurrections_total`: number of times a revision
+ tree was resurrected from hibernation and uncompressed in RAM
+ - `arangodb_revision_tree_memory_usage`: total memory usage (in bytes) by all
+ active revision trees
+
+* Added metric `rocksdb_wal_sequence` to track the current tip of the WAL's
+ sequence number.
+
+
+v3.9.0-alpha.1 (2021-11-30)
+---------------------------
+
+* Cleanup and properly fail hotbackup upload and download jobs if a dbserver
+ fails or is restarted during the transfer. This gets rid of upload and
+ download blockages in these circumstances.
+
+* Make `db..figures(true)` operate on the same snapshot when
+ counting the number of documents in the documents column family and the
+ indexes. This ensures consistency for the results of a single figures result.
+
+* Upgraded bundled version of RocksDB to 6.27.
+
+* Improved sync protocol to commit after each chunk and get rid of potentially
+ dangerous NO_INDEXING optimization.
+
+* Removed an invalid assertion that could be triggered during chaos testing in
+ maintainer mode.
+
+* Simplify the tagging of EnumerateCollectionNodes and IndexNodes with the
+ "read-own-writes" flag. Previously the tagging only happened after all query
+ optimizations were completed, making the tag unavailable to the optimizer.
+ Now the tag is set early on, so it is accessible by the query optimizer.
+
+* APM-187: The "Rebalance Shards" button now is displayed in a new tab, and it
+ is displayed for any database in cluster mode. There is also a new flag for
+ arangod, `--cluster.max-number-of-move-shards` (default = 10), which limits
+ the amount of move shards operations each time the button is clicked to
+ rebalance shards. When the button is clicked, the number of move shards
+ operations scheduled is shown, or that no operation was scheduled if the flag
+ `--cluster.max-number-of-move-shards` has a value of 0.
+
+* Make the `--version` and `--version-json` commands usable in arangobackup when
+ no positional argument (operation type) was specified. Previously,
+ arangobackup insisted on specifying the operation type alongside the
+ `--version` or `--version-json` commands.
+
+* Fixed an issue in old incremental sync protocol with document keys that
+ contained special characters (`%`). These keys could be send unencoded in the
+ incremental sync protocol, leading to wrong key ranges being transferred
+ between leader and follower, and thus causing follow-up errors and preventing
+ getting in sync.
+
+* APM-209: Histogram displaying is now switched off by default. For displaying
+ it, the new flag `histogram.generate` must be set to true. Its default value
+ is false for compatibility with other versions and also for complying with the
+ histogram not being displayed by default. If this flag is not set to true, but
+ other histogram flags are addressed, e.g. `--histogram.interval-size 500`,
+ everything will still run normally, but a warning message will be displayed
+ saying that the histogram is switched off and setting this flag would not be
+ of use. When the flag is set to true, the histogram is displayed before the
+ summary in the output.
+
+* In the shards overview the list of servers to move the leader shard to, now
+ also contains the current followers. This means that from now on also active
+ follower servers can be nominated as the leading server for that specific
+ shard.
+
+* Extend Windows minidumps with memory regions referenced from CPU registers or
+ the stack to provide more contextual information in case of crashes.
+
+* Fix issues during rolling upgrades from 3.8.0 to 3.8.x (x >= 1) and from 3.7.x
+ (x <= 12) to 3.8.3. The problem was that older versions did not handle
+ following term ids that are sent from newer versions during synchronous
+ replication operations.
+
+* Increase default stack size on Windows from 1MB to 4MB. This should allow
+ execution of larger queries without overflowing the stack.
+
+* Make background calculation of SHA hashes for RocksDB .sst files less
+ intrusive. The previous implementation frequently iterated over all files in
+ the database directory to check if it needed to ad-hoc calculate the SHA
+ hashes for .sst files it previously missed. The procedure it used was to
+ iterate over all files in the database directory and check if there were
+ matching pairs of .sst files and .sha files. This was expensive, because a
+ full directory iteration was performed and a lot of temporary strings were
+ created for filenames and used in comparisons. This was especially expensive
+ for larger deployments with lots of .sst files.
+ The expensive iteration of files in the directory is now happening less
+ frequently, and will not be as expensive as before if it runs.
+
+* Close a potential gap during shard synchronization when moving from the
+ initial sync step to the WAL tailing step. In this small gap the leader could
+ purge some of the WAL files that would be required by the following WAL
+ tailing step. This was possible because at the end of the initial sync step,
+ the snapshot on the leader is released, and there is a small window of time
+ before the follower will issue its first WAL tailing request.
+
+* Improve Shards overview in web UI: the number of currently syncing shards is
+ now displayed per collection. Additionally, shards on failed servers are now
+ displayed in a different color.
+
+* Fixed BTS-637: Slow SynchronizeShard jobs which need to copy data could block
+ quick SynchronizeShard jobs which have the data and only need to resync.
+
+* DEVSUP-899: Fixed Subquery execution in a very rare case a subquery, nested in
+ another subquery, was not executed, which is fixed now.
+ Technical details:
+ If we have two subqueries: `Outer` and `Nested` the Outer will define the
+ input for Nested. And Outer has the pattern: 1 input, subqueryDone, 1 input,
+ subqueryDone [...] and our internal batching did cut a batch like this:
+ [<...>, input (A)] | [subqueryDone, input (B), subqueryDone, <...>] than
+ Nested on input (B) was not executed. As soon as we have more than 1 input per
+ Outer, or a different cutting position, all was good.
+
+* When enabling the cluster supervision maintenance mode via the web UI, there
+ is now the possibility to select a duration for the maintenance mode.
+ Previous versions of ArangoDB always enabled the maintenance mode for one
+ hour, without allowing any choice here.
+
+* Stop calling unnecessary `/_api/wal/open-transactions` REST API before
+ starting the continuous synchronization in active fail and single server
+ replication. This request is unnecessary with the RocksDB storage engine.
+
+* Fixed potential undefined behavior in edge cache during cache migration tasks.
+ There was a short window of time in which an already freed Table could be used
+ by concurrently running edge lookups.
+
+* BTS-623: The audit log messages, when written, were not showing the log level
+ of the message, as in the example:
+ `2021-10-21T02:28:42Z | hostname | audit-authentication | n/a | _system |
+ 127.0.0.1:52490 | n/a | credentials missing | /_admin/aardvark/favicon.ico`
+ With the new flag `--audit.display-log-level`, the level of the audit log
+ message can be displayed in the log text. When set to true, this behavior is
+ expected, as in the example:
+ `2021-10-21T02:28:42Z | DEBUG | hostname | audit-authentication | n/a |
+ _system | 127.0.0.1:52490 | n/a | credentials missing |
+ /_admin/aardvark/favicon.ico`
+ The default value for the flag is false for compatibility with former
+ versions. When this flag is not used, it is considered to have the default
+ behavior (that is, set to false).
+
+* Fixed SEARCH-261: Fix possible race between file creation and directory
+ cleaner (ArangoSearch).
+
+* Fixed SEARCH-260: Fix invalid sorting order of stored features in presence of
+ primary sort (ArangoSearch).
+
+* Change error message for queries that use too much memory from "resource limit
+ exceeded" to "query would use more memory than allowed".
+
+* When using Indexes within traversals (e.g. [_from, date]) and filter based on
+ a function (e.g. FILTER path.edges[0].date <= DATE_ADD(@now, 5, "day")) this
+ function was passed through to the index. The index cannot evaluate this
+ function and returned incorrect results. Now all functions are evaluted before
+ looking into the index. (Fixes BTS-407)
+
+* Old license mechanism for docker containers removed.
+
+* arangorestore: Fix the order (regarding distributeShardsLike) in which
+ collections are being created during restore, which could result in an error
+ and make manual intervention necessary.
+
+* Single server license output checking fixed.
+
+* Updated ArangoDB Starter to 0.15.3.
+
+* Fix caching of collection counts and index selectivity estimates in cluster.
+ The cache values expired too early in previous versions, making the cache
+ ineffective.
+
+* Add better error message for replication request failures in case requests are
+ retried.
+
+* Make background statistics gathering more efficient by avoiding one AQL query
+ every 10 seconds that fetched the most recent stats entry. Instead, buffer the
+ entry in value after we have written it. Also spread out the statistics calls
+ by different servers more randomly, so that request spikes are avoided for
+ cluster with many coordinators that used to run their statistics queries at
+ about the same time when the instances were started simultaneously.
+
+* Fix a potential overwhelm situation on DB servers that can lead to no further
+ tasks being pulled from a DB servers queue even though there would still be
+ processing capacity and idle threads available.
+
+* Fixed compilation and linking when using glibc 2.34.
+
+* Fuerte: don't fall back to identity encoding in case of unknown encoding.
+
+* Fixed ES-881: Fixed LDAP global options. This needs to use the first active
+ provider, not just the first provider and it should be globally disabled.
+
+* Web UI: Fixes the loading of map tiles which are being used to display the
+ query output based on a world map when using SSL encryption. This lead to not
+ displaying some world map tiles correctly (OASIS-590).
+
+* Web UI - Added missing HTML escaping inside the file upload plugin used in the
+ section of deploying a new Foxx application when uploading a zip file.
+
+* Now, arangoimport supports merging of attributes. When importing data from a
+ file into a collection, a document attribute can be comprised of merging
+ attributes from the file into it, with separators and other literal strings.
+ The new document attribute will result in the concatenation of the literal
+ strings, the values of the attributes and the separators, as in the example:
+
+ arangoimport --merge-attributes fullName=[firstName]:[lastName]
+
+* Do not use an edge index for range queries, i.e. with the comparison operators
+ `>`, `>=`, `<` or `<=`, but only for equality lookups using the `==` and `IN`
+ comparison operators.
+ The edge index is not fully ordered, so while using it for range queries may
+ produce _some_ documents, it is possible that other documents from the range
+ would be skipped.
+
+* Do not rename the arangod process to "arangod [shutting down]" during the
+ server shutdown. The renaming can cause issues with tools that look for
+ the exact process name "arangod".
+
+* Remove internal AQL query option `readCompleteInput` that controled if all
+ input for a modification operation (UPDATE / REPLACE / REMOVE) are read into
+ memory first. This was a necessity with the MMFiles storage engine in cases
+ when a query read from a collection and wrote into it in the same query
+ afterwards. With the RocksDB engine and its snapshots, we never need to read
+ the entire input into memory first.
+
+* Fix windows installer PATH manipulation issue by replacing the NSIS plugin
+ (BTS-176).
+
+* Fixed counting of all read transaction as aborted. Added a new metric to count
+ read transactions.
+
+* Fixed potential issues with revision trees and document counters getting out
+ of sync with the underlying collection data.
+
+* Fix race in RocksDB throttle listener, when it was getting started lazily
+ during server shutdown.
+
+* Extended the Views web UI by letting it capture View properties that are
+ immutable once created.
+
+* Fixed BTS-602 by not starting license feature is upgrade mode.
+
+* APM-173: Now, arangobench, arangodump and arangorestore support multiple
+ coordinators, so the flag `--server.endpoint` can be used multiple times, as
+ in the example below:
+
+ arangobench \
+ --server.endpoint tcp://[::1]::8529 \
+ --server.endpoint tcp://[::1]::8530 \
+ --server.endpoint tcp://[::1]::8531
+
+ This does not compromise the use of the other client tools, which preserve the
+ behavior of having one coordinator.
+
+* The server now has two flags to control the escaping control and Unicode
+ characters in the log. The flag `--log.escape` is now deprecated and, instead,
+ the new flags `--log.escape-control-chars` and `--log.escape-unicode-chars`
+ should be used.
+
+ - `--log.escape-control-chars`: this flag applies to the control characters,
+ which have hex code below `\x20`, and also the character DEL, with hex code
+ of `\x7f`. When its value is set to false, the control character will be
+ retained, and its actual value will be displayed when it is a visible
+ character, or a space ` ` character will be displayed if it is not a visible
+ character. The same will happen to `DEL` character (code `\xF7`), even
+ though it is not a control character, because it is not visible. For
+ example, control character `\n` is visible, so a `\n` will be displayed in
+ the log, and control character `BEL` is not visible, so a space ` ` would be
+ displayed. When its value is set to true, the hex code for the character is
+ displayed, for example, `BEL` character would be displayed as its hex code,
+ `\x07`.
+ The default value for this flag is `true` for compatibility with previous
+ versions.
+
+ - `--log.escape-unicode-chars`: when its value is set to false, the unicode
+ character will be retained, and its actual value will be displayed. For
+ example, `犬` will be displayed as `犬`. When its value is set to true, the
+ character is escaped, and the hex code for the character is displayed. For
+ example, `犬` would be displayed as its hex code, `\u72AC`.
+ The default value for this flag is `false` for compatibility with previous
+ versions.
+
+* Fixed BTS-582: ArangoDB client EXE package for Windows has incorrect metadata.
+
+* Fixed BTS-575: Windows EXE installer doesn't replace service during upgrade in
+ silent (non-UI) mode.
+
+* APM-121: allow the UPSERT query to have indexHint as an extra parameter for
+ OPTIONS. It will be used as a hint by the inner FOR loop that is performed as
+ part of the UPSERT query, and would help in cases such as UPSERT not picking
+ the best index automatically for lookup.
+
+* Fix issue #14819: Query: AQL: missing variable # for node #... location
+ RestCursorHandler.cpp.
+
+* Added enterprise licensing support including (only for Enterprise version):
+ - additional API endpoint `_admin/license(GET/PUT)?force=true `
+ - arangosh functions: `setLicense()`, `getLicense()`
+ - new error codes and metrics support
+
+* Fix issue #14807: Fix crash during optimization of certain AQL queries during
+ the remove-collect-variables optimizer rule, when a COLLECT node without
+ output variables (this includes RETURN DISTINCT) occurred in the plan.
+
+* Update iresearch library to the upstream. Fixed TSan/ASan detected issues.
+
+* Added new ArangoSearch analyzer type 'collation'.
+
+* Add basic overload control to arangod.
+ This change adds the `x-arango-queue-time-seconds` header to all responses
+ sent by arangod. This header contains the most recent request dequeuing time
+ (in seconds) as tracked by the scheduler. This value can be used by client
+ applications and drivers to detect server overload and react on it.
+ The new startup option `--http.return-queue-time-header` can be set to `false`
+ to suppress these headers in responses sent by arangod.
+
+ In addition, client applications and drivers can optionally augment their
+ requests sent to arangod with a header of the same name. If set, the value of
+ the header should contain the maximum queuing time (in seconds) that the
+ client is willing to accept. If the header is set in an incoming request,
+ arangod will compare the current dequeuing time from its scheduler with the
+ maximum queue time value contained in the request. If the current dequeuing
+ time exceeds the value set in the header, arangod will reject the request and
+ return HTTP 412 (precondition failed) with the new error code 21004 (queue
+ time violated).
+
+ There is also a new metric `arangodb_scheduler_queue_time_violations_total`
+ that is increased whenever a request is dropped because of the requested queue
+ time not satisfiable.
+
+* Fixed a bug for array indexes on update of documents (BTS-548).
+
+* Prevent some possible deadlocks under high load regarding transactions and
+ document operations, and also improve performance slightly.
+
+* Hide help text fragment about VST connection strings in client tools that do
+ not support VST.
+
+* Added REST API endpoint `/_admin/debug/failat/all` to retrieve the list of
+ currently enabled failure points. This API is available only if failure
+ testing is enabled, but not in production.
+
+* APM-60: optionally allow special characters and Unicode characters in database
+ names.
+
+ This feature allows toggling the naming convention for database names from the
+ previous strict mode, which only allowed selected ASCII characters in database
+ names, to an extended, more relaxed mode. The extended mode allows additional
+ ASCII characters in database names as well as non-ASCII UTF-8 characters.
+ The extended mode can be enabled by setting the new startup option
+ `--database.extended-names-databases` to true. It is turned off by default and
+ requires an explicit opt-in, simply because some drivers and client
+ applications may not be ready for it yet. The arangod server, the ArangoDB web
+ interface and the following bundled client tools are prepared and ready for
+ using the extended database names:
+ - arangobench
+ - arangodump
+ - arangoexport
+ - arangoimport
+ - arangorestore
+ - arangosh
+ More tools and the drivers shipped by ArangoDB will be added to the list in
+ the future.
+
+ Please note that the extended names for databases should not be turned on
+ during upgrades from previous versions, but only once the upgrade has been
+ completed successfully. In addition, the extended names should not be used in
+ environments that require extracting data into a previous version of ArangoDB,
+ or when database dumps may be restored into a previous version of ArangoDB.
+ This is because older versions may not be able to handle the extended database
+ names. Finally, it should not be turned on in environments in which drivers
+ are in use that haven't been prepared to work with the extended naming
+ convention.
+
+ Warning: turning on the `--database.extended-names-databases` option for a
+ deployment requires it to stay enabled permanently, i.e. it can be changed
+ from `false` to `true` but not back. When enabling it, it is also required to
+ do this consistently on all coordinators and DB servers.
+
+ The extended names for databases will be enabled by default in one of the
+ future releases of ArangoDB, once enough drivers and other client tools have
+ had the chance to adapt.
+
+ Naming conventions for collections, views, analyzers, and document keys
+ (`_key` values) are not affected by this feature and will remain as in
+ previous versions of ArangoDB.
+
+* Prevent stealing of values from AQL const value registers. This fixes an issue
+ for queries that produce constant results (known at query compile time) when
+ he queries are executed directly on a DB server in a cluster (which is not
+ supported, but may happen for troubleshooting).
+
+* Fixed BTS-562: reduce-extraction-to-projection optimization returns null for
+ one attribute if nested attributes are named the same.
+
+* Add `--datatype` startup option to arangoimport, in order to hard-code the
+ datatype (null/boolean/number/string) for certain attributes in the CSV/TSV
+ import.
+ For example, given the following input file:
+
+ key,price,weight,fk
+ 123456,200,5,585852
+ 864924,120,10,9998242
+ 9949,70,11.5,499494
+ 6939926,2130,5,96962612
+
+ When invoking arangoimport with the startup options
+
+ --datatype key=string
+ --datatype price=number
+ --datatype weight=number
+ --datatype fk=string
+
+ it will turn the numeric-looking values in "key" into strings (so that they
+ can be used in the `_key` attribute), but treat the attributes "price" and
+ "weight" as numbers. The values in attribute "fk" finally will be treated as
+ strings again (potentially because they are used for linking to other "_key"
+ values).
+
+* Avoid the acquisition of a recursive read lock on server shutdown, which could
+ in theory lead to shutdown hangs at least if a concurrent thread is trying to
+ modify the list of collections (very unlikely and never observed until now).
+
+* Fixed display of unicode characters in Windows console.
+
+* Fixed issue BTS-531 "Error happens during EXE package installation if
+ non-ASCII characters are present in target path".
+
+* Fix active failover, so that the new host actually has working Foxx services
+ (BTS-558).
+
+* Fixed issue #14720: Bulk import ignores onDuplicate in 3.8.0.
+ The "onDuplicate" attribute was ignored by the `/_api/import` REST API when
+ not specifying the "type" URL parameter.
+
+* Updated OpenSSL to 1.1.1l and OpenLDAP to 2.4.59.
+
+* APM-70: allow PRUNE condition to be stored in a variable.
+
+ This feature allows the PRUNE condition to be stored in a variable, and this
+ variable can be used as a condition for some other statement, such as FILTER.
+
+* Allow startup of arangod with an existing database directory that was missing
+ the ZkdIndex column family.
+
+* Truncate must not trigger intermediate commits while in a streaming
+ transaction, because that would be against the assumption that streaming
+ transactions never do intermediate commits.
+
+* Added ArangoSearch condition optimization: STARTS_WITH is merged with
+ LEVENSHTEIN_MATCH if used in the same AND node and field name and prefix
+ matches.
+
+* Hybrid (Disjoint) SmartGraphs (Enterprise Edition):
+ SmartGraphs have been extended with a new option to create Hybrid SmartGraphs.
+ Hybrid SmartGraphs are capable of using SatelliteCollections within their
+ graph definition. You can now select some VertexCollections to be satellites,
+ and therefore available on all DBServers. The SmartGraph can make use of those
+ to collections to increase the traversal performance by larger local
+ components.
+
+* Added multidimensional indexes which can be used to efficiently intersect
+ multiple range queries. They are currently limited to IEEE-754 double values.
+ Given documents of the form {x: 12.9, y: -284.0, z: 0.02} one can define a
+ multidimensional index using the new type 'zkd' on the fields ["x", "y", "z"].
+
+ The AQL optimizer will then consider this index when doing queries on multiple
+ ranges, for example:
+
+ FOR p IN points
+ FILTER x0 <= p.x && p.x <= x1
+ FILTER y0 <= p.y && p.y <= y1
+ FILTER z0 <= p.z && p.z <= z1
+ RETURN p
+
+ The index implements the relation <=, == and >= natively. Strict relations are
+ emulated using post filtering. Ranges can be unbounded on one or both sides.
+
+* No runtime limits for shard move and server cleanout jobs, instead possibility
+ to cancel them.
+
+* Fix cluster-internal network protocol to HTTP/1 for now. Any other protocol
+ selected via the startup option `--network.protocol` will automatically be
+ switched to HTTP/1. The startup option `--network.protocol` is now deprecated
+ and hidden by default. It will be removed in a future version of arangod.
+ The rationale for this change is to move towards a single protocol for
+ cluster-internal communication instead of 3 different ones.
+
+* Disable RTTI when compiling Snappy. RTTI used to be disabled previously, up
+ until some Merkle tree improvement PR was merged about one month ago, which
+ turned on RTTI for compiling Snappy.
+
+* (EE only) Bug-fix: If you created a ArangoSearch view on satellite collections
+ only and then join with a collection only having a single shard the
+ cluster-one-shard-rule was falsely applied and could lead to empty view
+ results. The Rule will now detect the situation properly, and not trigger.
+
+* (EE only) If you have a query using only satellite collections, now the
+ cluster-one-shard-rule can be applied to improve query performance.
+
+* (Enterprise Edition only): added query option `forceOneShardAttributeValue`
+ to explicitly set a shard key value that will be used during query snippet
+ distribution to limit the query to a specific server in the cluster.
+
+ This query option can be used in complex queries in case the query optimizer
+ cannot automatically detect that the query can be limited to only a single
+ server (e.g. in a disjoint smart graph case).
+ When the option is set to the correct shard key value, the query will be
+ limited to the target server determined by the shard key value. It thus
+ requires that all collections in the query use the same distribution (i.e.
+ `distributeShardsLike` attribute via disjoint SmartGraphs).
+
+ Limiting the query to a single DB server is a performance optimization and may
+ make complex queries run a lot faster because of the reduced setup and
+ teardown costs and the reduced cluster-internal traffic during query
+ execution.
+
+ If the option is set incorrectly, i.e. to a wrong shard key value, then the
+ query may be shipped to a wrong DB server and may not return results (i.e.
+ empty result set). It is thus the caller's responsibility to set the
+ `forceOneShardAttributeValue` correctly or not use it.
+
+ The `forceOneShardAttributeValue` option will only honor string values.
+ All other values as well as the empty string will be ignored and treated as if
+ the option is not set.
+
+ If the option is set and the query satisfies the requirements for using the
+ option, the query's execution plan will contain the "cluster-one-shard"
+ optimizer rule.
+
+* SEARCH-238: Improved SortNodes placement optimization in cluster so
+ late materialization could cover more cases
+
+* Fix some memory leaks after adding optimization rule for AqlAnalyzer.
+
+* Fix internal iterator states after intermediate commits in write transactions.
+ Iterators could point to invalid data after an intermediate commit, producing
+ undefined behavior.
+
+* Fix read-own-write behavior in different scenarios:
+ - in some cases writes performed by an AQL query could be observed within
+ the same query. This was not intended and is fixed now.
+ - AQL queries in streaming transactions could observe their own writes in
+ even more cases, which could potentially result in an endless loop when
+ the query iterates over the same collection that it is inserting documents
+ into.
+ - UPSERT did not find documents inserted by a previous iteration if the
+ subquery relied on a non-unique secondary index.
+ - disabled intermediate commits for queries with UPSERTs, because
+ intermediate commits can invalidate the internal read-own-write iterator
+ required by UPSERT. Previously, UPSERTs that triggered intermediate
+ commits could have produced unexpected results (e.g., previous inserts
+ that have been committed might not be visible) or even crashes.
+ To achieve the correct read-own-write behavior in streaming transactions, we
+ sometimes have to copy the internal WriteBatch from the underlying RocksDB
+ transaction. In particular, the copy is created whenever an AQL query with
+ modification operations (INSERT/REMOVE/UPDATE/UPSERT/REPLACE) is executed in
+ the streaming transaction. If there have not been any other modifications so
+ far (queries/document operations), then the WriteBatch is empty and creating
+ the copy is essentially a no-op. However, if the transaction already contains
+ a lot of modifications, creating the WriteBatch copy might incur some
+ overhead that can now lead to decreased performance.
+
+* Fix rare case of invalid data that could be inserted into the ArangoSearch
+ index if several clients concurrently insert data and use custom analyzer
+ with non-string return type.
+
+* Fix a rare shutdown race in RocksDBShaCalculatorThread.
+
+* Added "Analyzers" view to web UI to let manage ArangoSearch analyzers
+ creation.
+
+* Add pseudo log topic "all" to set the log levels for all log topics at once.
+ For example, this can be used when starting a server with trace or debug
+ logging enabled for all log topics, e.g.
+
+ `--log.level all=debug`
+ `--log.level all=trace`
+
+ This is very coarse and should only be used for such use cases.
+
+* Change the default value for the `--threads` startup parameter of the
+ following client tools from previously 2 to the maximum of 2 and the number of
+ available CPU cores:
+ - arangodump
+ - arangoimport
+ - arangorestore
+
+* Remove old fixPrototypeChain agency migration, which was introduced in 3.2 and
+ is no longer necessary. This will make it impossible to upgrade directly from
+ a version < 3.2 to a version >= 3.9, provided one has a chain of
+ `distributeShardsLike` collections.
+
+* Added metrics for the number of errors and warnings logged:
+ - `arangodb_logger_warnings_total`: total number of warnings (WARN messages)
+ logged since server start
+ - `arangodb_logger_errors_total`: total number of errors (ERR messages)
+ logged since server start
+
+* Added REST API `/_admin/support-info` to retrieve deployment information.
+ As this API may reveal sensitive data about the deployment, it can only be
+ accessed from inside the system database. In addition, there is a policy
+ control startup option `--server.support-info-api` that determines if and to
+ whom the API is made available. This option can have the following values:
+ - `disabled`: support info API is disabled.
+ - `jwt`: support info API can only be accessed via superuser JWT.
+ - `hardened`: if `--server.harden` is set, the support info API can only be
+ accessed via superuser JWT. Otherwise it can be accessed by admin users
+ only.
+ - `public`: everyone with access to `_system` database can access the support
+ info API.
+
+* Send a keystroke to arangod's stdin when a shutdown command is received via
+ the REST API `/_admin/shutdown` and the server is started with the `--console`
+ argument. The keystroke will exit the blocking read loop that is waiting on
+ console input and that otherwise blocks the shutdown.
+ The implementation is based on ioctl and is thus only present on Linux and
+ macOS.
+
+* Some AQL queries erroneously reported the "access after data-modification"
+ error for queries in which there was a read attempt from a collection _before_
+ a data-modification operation. Such access is legal and should not trigger
+ said error anymore. Accessing a collection _after_ in a query a
+ data-modification in the same query is still disallowed.
+
+* Make AQL modification operations in a cluster asynchronous. This allows to
+ free the thread for other work until both the write and synchronous
+ replication are complete.
+
+* Fixed: /_api/transaction/begin called on edge collections of disjoint
+ SmartGraphs falsely returned CollectionNotFound errors.
+
+* Bugfix: In more complex queries there was a code-path where a (Disjoint-)Smart
+ graph access was not properly optimized.
+
+* Add ReplicatedLogs column family.
+
+* Add optimization rule for AqlAnalyzer.
+
+* Change optimization level for debug builds back to `-O0` (from `-Og`) because
+ `-Og` seems to cause debuggability issues in some environments.
+
+* Automatically extend web UI sessions while they are still active.
+ The web UI can now call a backend route to renew its JWT, so there will not be
+ any rude logouts in the middle of an active session.
+
+ Active web UI sessions (here: sessions with user activity within the last 90
+ minutes) will automatically renew their JWT if they get close to the JWT
+ expiry date.
+
+* Reduce memory usage for in-memory revision trees. Previously, a revision tree
+ instance for a non-empty collection/shard was using 4 MB of memory when
+ uncompressed. Trees that were unused for a while were compressed on the fly to
+ use less memory, and later uncompressed again when needed.
+ Now the uncompressed in-memory version of the revision tree will dynamically
+ allocate memory as needed. This allows the initial version of the trees to get
+ away with just 64 KB of memory. Memory usage will grow lazily when more parts
+ of the trees get populated. The compression of unused in-memory tree data is
+ still in place.
+
+* Refactored arangobench:
+ - Updated testcases to show description of them when beginning execution
+ - Fixed testcase histogram with time measures when batch size > 0
+ - Integrated testcases with Velocypack for simplification
+ - Deprecated some testcases
+ - Internal changes for performance optimization
-* Adjust default value for startup option `--rocksdb.max-subcompactions` from 1
- to 2. This allows compactions jobs to be broken up into disjoint ranges which
- can be processed in parallel.
+* Add 3 AQL functions: COSINE_SIMILARITY, L1_DISTANCE and L2_DISTANCE.
-* Allow initial, full dump shard synchronization to abort prematurely if it
- turns out that the follower was removed from the plan as a follower (e.g. if
- there are enough other in-sync followers).
+* Honor the value of startup option `--rocksdb.sync-interval` on Windows, too.
+ Previously, the value was ignored and WAL syncing on Windows was using a
+ different code paths than on the other supported platforms. Now syncing is
+ unified across all platforms, and they all call RocksDB's `SyncWAL()`.
-* Make per-server values "numberOfCores" and "physicalMemory" available to
- agency to improve quality of potential future shard rebalancing algorithms.
+* APM-132: Clean up collection statuses.
+ Removes collection statuses "new born", "loading", "unloading" and "unloaded".
+ These statuses were last relevant with the MMFiles storage engine, when it was
+ important to differentiate which collections are present in main memory and
+ which aren't. With the RocksDB storage engine, all that was automatically
+ handled anyway, and the statuses were not important anymore.
-* Unify the result structure of `db._version(true)` calls for arangosh and
- server console. Previously such a call in the server console would return
- a different structure that only consisted of the `details` subobject.
- This is now unified so that the result structure in the server console is
- consistent with arangosh, but strictly speaking this is a breaking change.
+ The change removes the "Load" and "Unload" buttons for collections from the
+ web interface. All collections in the web interface will be marked as "loaded"
+ permanently.
-* Set the limit for ArangoSearch segment size to 256MB during recovery to avoid
- OOM kill in rare cases.
+ This change also obsoletes the `load()` and `unload()` calls for collections
+ as well as their HTTP API equivalents. The APIs will remain in place for now
+ but are changed to no-ops. They will removed eventually in a future version of
+ ArangoDB. This will be announced separately.
-* Updated arangosync to 2.7.0.
+* Reduce default value for max-nodes-per-callstack to 200 for macOS, because on
+ macOS worker threads have a stack size of only 512kb.
-* Updated Enterprise license behavior: now there will be a one hour period for
- a new deployment to provide the license. After that period, the read-only mode
- will be enforced.
+* Slightly increase internal AQL query and transaction timeout on DB servers
+ from 3 to 5 minutes.
+ Previously, queries and transactions on DB servers could expire quicker, which
+ led to spurious "query ID not found" or "transaction ID not found" errors on
+ DB servers for multi-server queries/transactions with unbalanced access
+ patterns for the different participating DB servers.
+ The timeouts on coordinators remain unchanged, so any queries/transactions
+ that are abandoned will be aborted there, which will also be propagated to DB
+ servers. In addition, if a participating server in an AQL query becomes
+ unavailable, the coordinator is now notified of that and will terminate the
+ query more eagerly.
-* APM-78: Added startup security option `--foxx.allow-install-from-remote` to
- allow installing Foxx apps from remote URLs other than GitHub. The option is
- turned off by default.
+* Add hard-coded complexity limits for AQL queries, in order to prevent
+ programmatically generated large queries from causing trouble (too deep
+ recursion, enormous memory usage, long query optimization and distribution
+ passes etc.).
+ This change introduces 2 limits:
+ - a recursion limit for AQL query expressions. An expression can now be up to
+ 500 levels deep. An example expression is `1 + 2 + 3 + 4`, which is 3 levels
+ deep `1 + (2 + (3 + 4))`.
+ The expression recursion is limited to 500 levels.
+ - a limit for the number of execution nodes in the initial query execution
+ plan.
+ The number of execution nodes is limited to 4,000.
-* Cancel ongoing RocksDB compactions on server shutdown.
+* Remove _msg/please-upgrade handler.
-* Added new option `--custom-query-bindvars` to arangoexport, so queries given
- via option `--custom-query` can have bind variables in them. Also changed the
- flag names `--query` to `--custom-query` and `--query-max-runtime` to
- `--custom-query-max-runtime` to be like in the other client-tools.
+* Adapt various places related to handling of execution plans non-recursive in
+ order to avoid stack overflows. This allows us now to execute much larger
+ queries.
-* Improve some RocksDB-related error messages during server startup.
+* Fix locking of AQL queries write queries on DB servers.
-* Added startup options to adjust previously hard-coded parameters for the
- RocksDB throttle:
- - `--rocksdb.throttle-frequency`: frequency for write-throttle calculations
- (in milliseconds, default value is 60000, i.e. 60 seconds).
- - `--rocksdb.throttle-slots`: number of historic measures to use for throttle
- value calculation (default value is 63).
- - `--rocksdb.throttle-scaling-factor`: adaptiveness scaling factor for write-
- throttle calculations (default value is 17). There is normally no need to
- change this value.
- - `--rocksdb.throttle-max-write-rate`: maximum write rate enforced by the
- throttle (in bytes per second, default value is 0, meaning "unlimited").
- The actual write rate will be the minimum of this value and the value the
- throttle calculation produces.
- - `--rocksdb.throttle-slow-down-writes-trigger`: number of level 0 files whose
- payload is not considered in throttle calculations when penalizing the
- presence of L0 files. There is normally no need to change this value.
- All these options will only have an effect if `--rocksdb.throttle` is enabled
- (which is the default). The configuration options introduced here use the
- previously hard-coded settings as their default values, so there should not be
- a change in behavior if the options are not adjusted.
+* APM-112: invalid use of OPTIONS in AQL queries will now raise a warning in the
+ query.
+ The feature is useful to detect misspelled attribute names in OPTIONS, e.g.
-* Raised minimal macOS supported version to 10.15 (Catalina).
+ INSERT ... INTO collection
+ OPTIONS { overwrightMode: 'ignore' } /* should be 'overwriteMode' */
-* Remove background thread `RocksDBShaThread` for background SHA256 checksum
- calculation for .sst files in the Enterprise Edition. The checksums are now
- calculated incrementally while writing into the .sst files, and the checksum
- files will be stored on disk as soon as an .sst file is made durable by
- RocksDB. There is no more need to periodically scan the database directory
- and look for any additional .sst files.
+ It is also useful to detect the usage of valid OPTIONS attribute names that
+ are used for a wrong query part, e.g.
-* Fix BTS-580: Trimmed the password field from the payload in the client
- requests when displaying error messages in arangorestore, because they
- displayed the password as plain text.
+ FOR doc IN collection
+ FILTER doc.value == 1234
+ INSERT doc INTO other
+ OPTIONS { indexHint: 'myIndex' } /* should be used above for FOR */
-* Refactored unit tests with the `grey` keyword, which is for skipping certain
- tests. A test file that did not perform any test, but only had a function to
- sleep, was removed. Two test files were renamed so they would not be skipped.
+ In case a wrong option attribute is used, a warning with code 1575 will be
+ raised.
+ By default, warnings are reported but do not lead to the query being aborted.
+ This can be toggled by the startup option `--query.fail-on-warnings` or the
+ per-query runtime option `failOnWarnings`.
-* Fixed BTS-611: In some cases AQL queries, in particular in a cluster,
- reported the wrong fullCount when the optimizer rule(s)
- `late-document-materialization and/or `sort-limit` were active.
+* Added new command line-option `--version-json`. This will return the version
+ information as json object.
-* Fix BTS-535: TakeoverShardLeadership waits properly for Current data in
- ClusterInfo. This avoids a fake warning and fake test failure.
+* Fix ArangoAgency::version(), which always returned an empty string instead of
+ the agency's correctly reported version. This also fixes the agency version in
+ the startup log messages of the cluster.
-* APM-256: make arangoexport escape potential formulae in CSV exports.
- This addresses a potential security issue when exporting specially
- crafted documents to CSV, opening the CSV file in MS Excel or OpenOffice
- and then clicking links in any of the tainted cells.
+* Fix potential memleak in Pregel conductor garbage collection.
- This change also adds a new option `--escape-csv-formulae` to
- toggle the escaping behavior for potential formulae values. The
- option is turned on by default.
+* Added garbage collection for finished and failed Pregel conductors.
+ Previously, Pregel executions that finished successfully or unsuccessfully
+ remained in memory until being explicitly canceled. This prevented a cleanup
+ of abandoned jobs. Such jobs are now automatically cleaned about 10 minutes
+ after finalization. The time-to-live values can be overriden per Pregel job by
+ passing a "ttl" value.
-* Second step of hotbackup transfer job cleanup. Now locks are also cleaned up
- as well as old, seemingly unfinished jobs.
+* Revive startup parameter `--server.session-timeout` to control the timeout for
+ web interface sessions and other sessions that are based on JWTs created by
+ the `/_open/auth` API.
-* Defer intermediate commits in the middle of a multi-document (array)
- operation. This is to ensure that the RocksDB key locks for all participating
- document keys are still held while the operations are replicating via the
- synchronous replication.
+ This PR also changes the default session timeout for web interface sessions to
+ one hour. Older versions of ArangoDB had longer session timeouts.
-* Fix potential read inconsistency for single document operations.
- Wen reading a single document that is concurrently being updated or replaced,
- the read operation could erroneously return a "document not found" error
- although the document actually existed. This only happened for single
- document operations, i.e., no transactions or AQL queries.
+* Removed redirects from /_admin/cluster* to /_admin/cluster/*. Adjusted
+ internal requests to use the new url.
-* Fix GitHub issue #15084. Fixed a potential use-after-free on Windows for
- queries that used the NeighborsEnumerator (though other PathEnumerators
- might have been affected as well).
+* Fix potential stack overflow when executing large queries. This is achieved by
+ splitting the callstack and moving part of the execution to a separate thread.
+ The number of execution nodes after which such a callstack split should be
+ performed can be configured via the query option `maxNodesPerCallstack` and
+ the command line option `--query.max-nodes-per-callstack`; the default is 250.
-* Cleanup and properly fail hotbackup upload and download jobs if a dbserver
- fails or is restarted during the transfer. This gets rid of upload and
- download blockages in these circumstances.
+* Fixed invalid shard synchronization for documents not added via INSERT with
+ `overwriteMode` set to `ignore`. In this case, if a document with the given
+ key already exists, it is not changed on the leader (i.e. no write happens on
+ the leader). However, a write was replicated to the follower, which was wrong.
+ This write is now suppressed, which can only make such insert operations
+ faster.
-* Added new option `--custom-query-bindvars` to arangobench, so queries given via
- option `--custom-query` can have bind variables in them.
+* Web UI: Disables the hover tooltip within the statistics view of the memory
+ consumption chart.
-* BTS-624: The `move-calculations-up` optimization rule is now also applied to
- subqueries, when they don't have dependencies on the outer nodes, don't have
- modification nodes and don't read their own writes. This fixed the execution
- of a query without the splicing-subqueries option being faster than the
- execution of a query with this option (after version 3.8, this option cannot
- be switched off).
+* Add 3 AQL functions: DECAY_GAUSS, DECAY_EXP and DECAY_LINEAR.
+
+* Implemented an optimization for Traversals. If you apply a POST filter on the
+ vertex and/or edge result this filter will now be applied during the traversal
+ to avoid generating the full output for AQL. This will have positive effect if
+ you filter on the vertex/edge but return the path, this way the system does
+ only need to produce a path that is allowed to be passed through. E.g.
+
+ FOR v,e,p IN 10 OUTBOUND @start GRAPH "myGraph"
+ FILTER v.isRelevant == true
+ RETURN p
+
+ can now be optimized, and the traversal statement will only produce paths
+ where the last vertex has `isRelevant == true`.
+
+* Fix BTS-446: When finding a not yet fully initialized agency, do not
+ immediately fatal exit. Keep trying for (very generous) 5 minutes.
+
+* Reduced the agency store public members, for simpler support long-term.
+
+* Added a number of tests for the Agency Store public members.
+
+* Updated bundled version of Snappy library to 1.1.9.
+
+* Introduce a new internal error code for cases where a call cannot succeed
+ because the server startup phase is still in progress. This error will be
+ mapped to the HTTP status code 503 (service unavailable).
+ One example where this can happen is when trying to authenticate a request,
+ but the _users collection is not yet available in the cluster.
+
+* Fix DEVSUP-749: Fix potential deadlock when executing concurrent view/link
+ DDL operations and index DDL operations on the same collection.
+
+* Fixed issue #14122: when the optimizer rule "inline-subqueries" is applied,
+ it may rename some variables in the query. The variable renaming was however
+ not carried out for traversal PRUNE conditions, so the PRUNE conditions
+ could still refer to obsolete variables, which would make the query fail with
+ errors such as
+
+ Query: AQL: missing variable ... for node ... while planning registers
+
+* Fixed the error response if the HTTP version is not 1.0 or 1.1 and if
+ the Content-Length is too large (> 1 GB).
+
+* Add a connection cache for internal replication requests.
+
+* Improve legibility of size values (by adding KB, MB, GB, TB suffixes) to
+ output generated by client tools.
+
+* Timely updates of rebootId / cluster membership of DB servers and
+ coordinators in ClusterInfo. Fixes BTS-368 detected in chaos tests.
+
+* Guarded access only to ActionBase::_result.
+
+* Fixed proper return value in sendRequestRetry if server is shutting down.
+
+* Fixed internal issue #798: In rare case when remove request
+ completely cleans just consolidated segment commit could be cancelled
+ and documents removed from collection may be left dangling in the ArangoSearch index.
+ Also fixes ES-810 and BTS-279.
+
+* Retry if an ex-leader can no longer drop a follower because it is no longer
+ leading.
+
+* Fixed a small problem in fuerte which could lead to an assertion failure.
+
+* Upgrade jemalloc version to latest stable dev.
+
+* Fixed issue BTS-373: ASan detected possible heap-buffer-overflow at
+ arangodb::transaction::V8Context::exitV8Context().
+
+* Allow to specify a fail-over LDAP server. Instead of "--ldap.OPTION" you need
+ to specify "--ldap2.OPTION". Authentication / Authorization will first check
+ the primary LDAP server. If this server cannot authenticate a user, it will
+ try the secondary one. It is possible to specify a file containing all users
+ that the primary (or secondary) LDAP server is handling by specifying the
+ option "--ldap.responsible-for". This file must contain the usernames
+ line-by-line.
+
+* Make the time-to-live (TTL) value of a streaming cursor only count after
+ the response has been sent to the client.
+
+* Improve performance of batch CRUD operations (insert, update, replace,
+ remove) if some of the documents in the batch run into write-write conflicts.
+ Rolling back partial operations in case of a failure is very expensive
+ because it requires rebuilding RocksDB write batches for the transaction
+ from scratch. Rebuilding write batches takes time proportional to the number
+ of operations in the batch, and for larger batches the cost can be
+ prohibitive.
+ Now we are not rolling back write batches in some situations when this is
+ not required, so that in many cases running into a conflict does not have
+ that high overhead. There can still be issues when conflicts happen for index
+ entries, but a lot of previously problematic cases should now work better.
+
+* Allow AQL variable names starting with an underscore, as stated in the docs.
+
+* Fix crashes during arangorestore operations due to usage of wrong pointer
+ value for updating user permissions.
+
+* Added option `--query-max-runtime` to arangoexport, in order to control
+ maximum query runtime.
+
+* Fix BTS-340: AQL expressions similar to `x < 3 || x` are no longer erroneously
+ be reduced to `x < 3` by the optimizer rule remove-redundant-or.
+
+* Changed default value of arangodump's `--envelope` option from `true` to
+ `false`. This allows using higher parallelism in arangorestore when
+ restoring large collection dumps. As a side-effect, this will also decrease
+ the size of dumps taken with arangodump, and should slightly improve dump
+ speed.
+
+* Improve parallelism capabilities of arangorestore.
+
+ arangorestore can now dispatch restoring data chunks of a collection to idle
+ background threads, so that multiple restore requests can be in flight for
+ the same collection concurrently.
+
+ This can improve restore speed in situations when there are idle threads
+ left (number of threads can be configured via arangorestore's `--threads`
+ option) and the dump file for the collection is large.
+
+ The improved parallelism is only used when restoring dumps that are in the
+ non-enveloped format. This format has been introduced with ArangoDB 3.8.
+ The reason is that dumps in the non-enveloped format only contain the raw
+ documents, which can be restored independent of each other, i.e. in any
+ order. However, the enveloped format may contain documents and remove
+ operations, which need to be restored in the original order.
+
+* Fix BTS-374: thread race between ArangoSearch link unloading and storage
+ engine WAL flushing.
+
+* Fix thread race between ArangoSearch link unloading and storage engine
+ WAL flushing.
-* Changed arangobench concurrency flag name from `--concurrency` to `--threads`.
+* Add value of `_key` to more insert/update/replace/remove error messages so it
+ is easier to figure out which document caused unique constraint violations
+ and/or write-write conflict during a multi-document write operation.
-* Added the following metrics for revision trees:
- - `arangodb_revision_tree_hibernations_total`: number of times a revision
- tree was compressed in RAM and set to hibernation
- - `arangodb_revision_tree_resurrections_total`: number of times a revision
- tree was resurrected from hibernation and uncompressed in RAM
- - `arangodb_revision_tree_memory_usage`: total memory usage (in bytes) by
- all active revision trees
+* Don't display obsoleted startup options and sections in `--help` and
+ `--help-.` commands. Also rename "global" to "general" options.
-* Added metric `rocksdb_wal_sequence` to track the current tip of the
- WAL's sequence number.
+* Removed assertion for success of a RocksDB function. Throw a proper exception
+ instead.
-* APM-217: deprecate the usage of fulltext indexes.
+* Experimentally switch to wyhash (from xxhash) for velocypack. This is an
+ experiment in devel to check if it produces any observable speedups.
-* Improve visibility in case of potential data corruption between primary
- index and actual document store in documents column family.
+* Remove deprecated HTTP REST API `/_api/export`. This API was deprecated in a
+ previous version because it was not supported in clusters and was also covered
+ completely by streaming AQL queries for the RocksDB storage engine.
-* Changed "arangosh" directory name to "client-tools", because the directory
- contains the code for all client tools and not just arangosh.
+* Added enterprise-build-repository and oskar-build-repository to `--version` as
+ `enterprise-build-repository` and `oskar-build-repository`.
-* Make `db..figures(true)` operate on the same snapshot when
- counting the number of documents in the documents column family and the
- indexes. This ensures consistency for the results of a single figures result.
+* Clean up replication code and remove a 3.2-compatibility mode that was only
+ useful when replicating from a leader < ArangoDB version 3.3.
-* Upgraded bundled version of RocksDB to 6.27.
+* Obsolete option `--database.old-system-collections`. This option has no
+ meaning in ArangoDB 3.9, as old system collections will not be created anymore
+ in this version. The option was deprecated in 3.8 and announced to be
+ obsoleted.
-* Updated immer to version 0.7.0.
+* Upgrade velocypack to latest, C++17-only version.
-* Improved sync protocol to commit after each chunk and get rid of
- potentially dangerous NO_INDEXING optimization.
+* Make arangovpack more powerful, by supporting different input and output
+ formats (json and vpack, plain or hex-encoded).
+ The arangovpack options `--json` and `--pretty` have been removed and have
+ been replaced with separate options for specifying the input and output types:
+ - `--input-type` ("json", "json-hex", "vpack", "vpack-hex")
+ - `--output-type` ("json", "json-pretty", "vpack", "vpack-hex")
+ The previous option `--print-non-json` has been replaced with the option
+ `--fail-on-non-json` which makes arangovpack fail when trying to emit non-JSON
+ types to JSON output.
-* Removed an invalid assertion that could be triggered during chaos testing in
- maintainer mode.
+* Remove obsolete API endpoint /_admin/repair/distributeShardsLike`. This API
+ was intended to correct some bad state introduced before 3.2.12 or 3.3.4,
+ respectively. It had to be invoked manually by callers and there was never any
+ driver support for it.
-* Simplify the tagging of EnumerateCollectionNodes and IndexNodes with the
- "read-own-writes" flag. Previously the tagging only happened after all query
- optimizations were completed, making the tag unavailable to the optimizer.
- Now the tag is set early on, so it is accessible by the query optimizer.
+* Remove now-unused SubqueryExecutor. This is an internal change only and should
+ not have any effect on queries, as from 3.8 onwards only spliced subqueries
+ should be used in query execution plans and during query execution.
-* Made the `--version` and `--version-json` commands usable in arangobackup
- when no positional argument (operation type) was specified. Previously,
- arangobackup insisted on specifying the operation type alongside the
- `--version` or `--version-json` commands.
+* Switched to GCC 10 as the default compiler and use Sandy Bridge as the default
+ required architecture (Linux, macOS binaries).
-* Updated boost to version 1.77.0.
+* Removed obsolete metrics in new v2 metric API. Those metrics' values were
+ identical to the sum value of histograms.
-* Removed the following deprecated arangobench testcases:
- * aqltrx
- * aqlv8
- * counttrx
- * deadlocktrx
- * multi-collection
- * multitrx
- * random-shapes
- * shapes
- * shapes-append
- * skiplist
- * stream-cursor
+* Fix potentially undefined behavior when creating a
+ CalculationTransactionContext for an ArangoSearch analyzer. An uninitialized
+ struct member was passed as an argument to its base class. This potentially
+ had no observable effects, but should be fixed.
-* Renamed arangobench testcase "hash" to "persistent-index".
-* Fixed an issue in old incremental sync protocol with document keys that
- contained special characters (`%`). These keys could be send unencoded in
- the incremental sync protocol, leading to wrong key ranges being transferred
- between leader and follower, and thus causing follow-up errors and preventing
- getting in sync.
+v3.8.1 (2021-08-27)
+-------------------
-* APM-209: Histogram displaying is now switched off by default. For displaying
- it, the new flag `histogram.generate` must be set to true. Its default value
- is false for compatibility with other versions and also for complying with the
- histogram not being displayed by default. If this flag is not set to true, but
- other histogram flags are addressed, e.g. `--histogram.interval-size 500`,
- everything will still run normally, but a warning message will be displayed
- saying that the histogram is switched off and setting this flag would not be
- of use. When the flag is set to true, the histogram is displayed before the
- summary in the output.
+* Reduce internal priority of AQL execution. This prevents possible deadlocks
+ with modification operations in a cluster and replicationFactor >= 2, and can
+ also improve responsiveness under high load of AQL queries.
-* Extend Windows minidumps with memory regions referenced from CPU registers or
- the stack to provide more contextual information in case of crashes.
+* Updated arangosync to 2.6.0.
-* Add option to content-transfer encode gzip Foxx replies.
+* Added protocol specific metrics: histogram about request body size, total
+ number of HTTP/2 connections and total number of VST connections.
-* Simplify internal request compression/decompression handling code.
+* Fix a potential multi-threading issue in index creation on coordinators, when
+ an agency callback was triggered at the same time the method
+ `ensureIndexCoordinatorInner` was left.
-* In the shards overview the list of servers to move the leader shard to, now
- also contains the current followers. This means that from now on also active
- follower servers can be nominated as the leading server for that specific
- shard.
+* Append physical compaction of log collection to every Raft log compaction
+ (BTS-542).
-* Fix issues during rolling upgrades from 3.8.0 to 3.8.x (x >= 1) and from
- 3.7.x (x <= 12) to 3.8.3. The problem was that older versions did not handle
- following term ids that are sent from newer versions during synchronous
- replication operations.
+* Preselect "create index in background" option when creating indexes in the web
+ UI. The "create index in background" option can be less intrusive because it
+ allows other write operations on the collection to proceed.
-* Added Enterprise Sharded Graphs Simulation: Now it is possible to test
- SmartGraphs and SatelliteGraphs on a single server instance and then to port
- them to a cluster with multiple servers. All existing types of SmartGraphs
- are eligible to this procedure: SmartGraphs themselves, Disjoint SmartGraphs,
- Hybrid SmartGraphs and Hybrid Disjoint SmartGraphs. One can create a graph of
- any of those types in the usual way, e.g., using `arangosh`, but on a single
- server, then dump it, start a cluster (with multiple servers) and restore the
- graph in the cluster. The graph and the collections will keep all properties
- that are kept when the graph is already created in a cluster. This feature is
- only available in the Enterprise Edition.
+* Do not block a scheduler thread on the coordinator while an index is being
+ created. Instead, start a background thread for the actual index fill-up work.
+ The original thread can then be relinquished until the index is completely
+ filled or index creation has failed.
+ The default index creation timeout on coordinators has also been extended from
+ 1 hour to 4 days, but it is still configurable via the startup parameter
+ `--cluster.index-create-timeout` in case this is necessary.
+
+* Fixed: getResponsibleShard call on disjoint Smart Graphs if you asked for the
+ responsible shard on a disjoint edge collection where the _from and _to differ
+ (invalid), the server would respond with "DATASOURCE_NOT_FOUND". This is now
+ fixed to "BAD_PARAMETER" to emphasize that the collection is fine but the
+ input is invalid.
+
+* Fixed: _api/transaction/begin called on edge collections of disjoint
+ SmartGraphs falsely returned CollectionNotFound errors.
-* Close a potential gap during shard synchronization when moving from the
- initial sync step to the WAL tailing step. In this small gap the leader
- could purge some of the WAL files that would be required by the following
- WAL tailing step. This was possible because at the end of the initial sync
- step, the snapshot on the leader is released, and there is a small window
- of time before the follower will issue its first WAL tailing request.
+* Bug-Fix: In more complex queries there was a code-path where a (Disjoint-)
+ Smart graph access was not properly optimized.
-* Improve Shards overview in web UI: the number of currently syncing shards is
- now displayed per collection. Additionally, shards on failed servers are now
- displayed in a different color.
+* Fix wrong assertion in fuerte and move it to where the TLA+ model says i
+ should be. This fixes a unit test failure occurring on newer Macs with a
+ certain clang version.
-* Increase default stack size on Windows from 1MB to 4MB. This should allow
- execution of larger queries without overflowing the stack.
+* When creating Pregel memory-mapped files, create them with O_TMPFILE attribute
+ on Linux so that files are guaranteed to vanish even if a process dies.
-* Fixed BTS-637: Slow SynchronizeShard jobs which need to copy data could
- block quick SynchronizeShard jobs which have the data and only need to
- resync.
+* Improve log messages for Pregel runs by giving them more context.
-* When enabling the cluster supervision maintenance mode via the web UI, there
- is now the possibility to select a duration for the maintenance mode.
- Previous versions of ArangoDB always enabled the maintenance mode for one
- hour, without allowing any choice here.
+* Fixed issue BTS-536 "Upgrading without rest-server is aborted by error".
+ Now stating `--server.rest-server false` does not require the additional
+ `--console` argument for upgrading a server.
-* Remove unsupported `--server.default-api-compatibility` startup option.
+* Fixed issue #14592: IS_NULL(@x) isn't recognized as a constant expression.
-* Fixed potential undefined behavior in edge cache during cache migration tasks.
- There was a short window of time in which an already freed Table could be
- used by concurrently running edge lookups.
-
-* DEVSUP-899: Fixed Subquery execution
- in a very rare case a subquery, nested in another subquery, was not
- executed, which is fixed now. Technical details:
- If we have two subqueries: `Outer` and `Nested` the Outer will define
- the input for Nested. And Outer has the pattern: 1 input, subqueryDone, 1
- input, subqueryDone [...] and our internal batching did cut a batch like
- this: [<...>, input (A)] | [subqueryDone, input (B), subqueryDone, <...>]
- than Nested on input (B) was not executed. As soon as we have more than 1
- input per Outer, or a different cutting position, all was good.
+* Fixed issue BTS-539 "Unsynchronized query kill while it's being finalized in
+ another thread was uncovered through `test-kill.js` of `communication_ssl`
+ suite". Fixed possible (but unlikely) crash when killing an AQL query.
-* Fixed SEARCH-261: Fix possible race between file creation and directory
- cleaner (ArangoSearch).
+* Fixed various problems in GEO_INTERSECTS: wrong results, not implemented cases
+ and numerically unstable behaviour. In particular, the case of the
+ intersection of two polygons in which one is an S2LngLatRect is fixed
+ (BTS-475).
-* Fixed SEARCH-260: Fix invalid sorting order of stored features in presence of
- primary sort (ArangoSearch).
+* Fixed ES-867 and ES-922: removed eslint from NPM packages descriptions and
+ updated netmask package to non-vulnerable version.
-* APM-187: The "Rebalance Shards" button now is displayed in a new tab, and it
- is displayed for any database in cluster mode. There is also a new flag for
- arangod, `--cluster.max-number-of-move-shards` (default = 10), which limits
- the amount of move shards operations each time the button is clicked to
- rebalance shards. When the button is clicked, the number of move shards
- operations scheduled is shown, or that no operation was scheduled if the flag
- `--cluster.max-number-of-move-shards` has a value of 0.
+* Web UI: Fixes the loading of map tiles which are being used to display the
+ query output based on a world map when using SSL encryption. This lead to not
+ displaying some world map tiles correctly (OASIS-590).
-* BTS-623: The audit log messages, when written, were not showing the log level
- of the message, as in the example:
- `2021-10-21T02:28:42Z | hostname | audit-authentication | n/a | _system |
- 127.0.0.1:52490 | n/a | credentials missing | /_admin/aardvark/favicon.ico`
- With the new flag `--audit.display-log-level`, the level of the audit log
- message can be displayed in the log text. When set to true, this behavior is
- expected, as in the example:
- `2021-10-21T02:28:42Z | DEBUG | hostname | audit-authentication | n/a |
- _system | 127.0.0.1:52490 | n/a | credentials missing |
- /_admin/aardvark/favicon.ico`
- The default value for the flag is false for compatibility with former
- versions. When this flag is not used, it is considered to have the default
- behavior (that is, set to false).
+* Timely update of database server list on health check fixes BTS-505.
-* Upgrade bundled version of RocksDB to 6.26.
+* Updated JavaScript dependencies, including breaking changes to non-public
+ modules. We recommend always bundling your own copy of third-party modules,
+ even ones listed as public.
-* Change error message for queries that use too much memory from "resource
- limit exceeded" to "query would use more memory than allowed".
+ - accepts: 1.3.5 -> 1.3.7
+ - ansi_up: 4.0.3 -> 5.0.1
+ - content-type: (added) -> 1.0.4
+ - error-stack-parser: 2.0.2 -> 2.0.6
+ - highlight.js: 9.15.6 -> 10.7.3
+ - http-errors: 1.7.2 -> 1.8.0
+ - iconv-lite: 0.4.24 -> 0.6.3
+ - js-yaml: 3.13.1 -> 3.14.1
+ - lodash: 4.17.13 -> 4.17.21
+ - marked: 0.6.2 -> removed
+ - mime-types: 2.1.22 -> 2.1.31
+ - mocha: 6.1.3 -> 6.2.3
+ - netmask: 1.0.6 -> 2.0.2
+ - qs: 6.7.0 -> 6.10.1
+ - range-parser: 1.2.0 -> 1.2.1
+ - semver: 6.0.0 -> 7.3.5
+ - sinon: 1.17.6 -> 1.17.7
+ - timezone: 1.0.22 -> 1.0.23
+ - type-is: 1.6.16 -> 1.6.18
+ - underscore: 1.9.1 -> 1.13.1
+ - xmldom: 0.1.27 -> 0.6.0
-* Single server license output checking fixed.
+* Updated ArangoDB Starter to 0.15.1.
-* Added enterprise license feature visibility for arangosh.
+* Fix BTS-453: Download of a HotBackup from remote source doesn't work on macOS.
-* When using Indexes within traversals (e.g. [_from, date]) and filter based
- on a function (e.g. FILTER path.edges[0].date <= DATE_ADD(@now, 5, "day"))
- this function was passed through to the index. The index cannot evaluate this
- function and returned incorrect results. Now all functions are evaluated
- before looking into the index. (Fixes BTS-407)
+* Web UI: Fixes a logical error which occured after re-visiting the logs view
+ which lead to not displaying the logs view and its entries correctly
+ (BTS-507).
-* arangorestore: Fix the order (regarding distributeShardsLike) in which
- collections are being created during restore, which could result in an error
- and make manual intervention necessary.
+* Raised the versions of the node modules `node-sass` and `sass-loader` to be
+ able to build the Web UI with Node v16+.
-* Updated ArangoDB Starter to 0.15.3.
+* Suppress repeated warnings when settings LDAP options which turn out to be
+ unsupported on the target system. This avoids logging the same warnings
+ repeatedly.
-* Old license mechanism for docker containers removed.
+* Make `--javascript.copy-installation` also copy the `node_modules` sub
+ directory. This is required so we have a full copy of the JavaScript
+ dependencies and not one that excludes some infrequently changed modules.
+ In addition, file copying now intentionally excludes .map files as they are
+ not needed.
-* Add license information to the web UI.
+* Fixed a bug, where Coordinators handled plan changes for databases in
+ heartbeat thread in wrong order. Databases could be listed, but not used.
-* Fix local cluster script for deprecated authentication option.
+* Include K_SHORTEST_PATHS and SHORTEST_PATH execution nodes in AQL query memory
+ usage accounting. The memory used by these execution node types was previously
+ not tracked against the configured query memory limit (BTS-411).
-* Fix caching of collection counts and index selectivity estimates in cluster.
- The cache values expired too early in previous versions, making the cache
- ineffective.
+* Lower log level to warning, when take over shard leadership finds an agency
+ Current entry is missing the server taking over.
-* Add better error message for replication request failures in case requests
- are retried.
+* Fixed BTS-408: treat positive or negative signed numbers as constants
+ immediately during AQL query parsing.
+ Previously, a value of `-1` was parsed initially as `unary minus(value(1))`,
+ which was not treated in the same way as a constant value `value(-1)`.
+ The value was later optimized to just `value(-1)`, but this only happened
+ during constant-folding after parsing. Any operations that referred to the
+ unfolded values during parsing thus did not treat such values as constants.
-* Make background statistics gathering more efficient by avoiding one AQL query
- every 10 seconds that fetched the most recent stats entry. Instead, buffer
- the entry in value after we have written it. Also spread out the statistics
- calls by different servers more randomly, so that request spikes are avoided
- for cluster with many coordinators that used to run their statistics queries
- at about the same time when the instances were started simultaneously.
+* Fix startup issues with encryption-at-rest enabled when there were empty (0
+ byte) RocksDB WAL files present. Such empty files caused RocksDB to abort the
+ startup, reporting corruption. However, empty WAL files are possible in case
+ of server crashes etc. Now, if a WAL file is completely empty, there will be
+ no attempt to read the encryption meta data from it, so the startup succeeds
+ (BTS-392).
-* Fixed compilation and linking when using glibc 2.34.
+* Fixes a bug in the maintenance's error-handling code. A shard error would
+ result in log messages like
+ ```
+ WARNING [ceb1a] {maintenance} caught exception in Maintenance shards error
+ reporting: Expecting Object
+ ERROR [c9a75] {maintenance} Error reporting in current: Expecting Object
+ ```
+ and also prevent the maintenance from reporting the current state to the
+ agency, which in turn can prevent cluster-wide progress of various actions.
-* Fuerte: don't fall back to identity encoding in case of unknown encoding.
+* APM-107: Added metric "rocksdb_read_only" to determine whether RocksDB is
+ currently in read-only mode due to a background error. The metric will have a
+ value of "1" if RocksDB is in read-only mode and "0" if RocksDB is in normal
+ operations mode. If the metric value is "1" it means all writes into RocksDB
+ will fail, so inspecting the logfiles and acting on the actual error situation
+ is required.
+
+* Fix numeric overflow in AQL WINDOW node cost estimation if the number of
+ preceding rows was set to `unbounded`.
-* BTS-616: Added support for client tools arangoimport, arangodump,
- arangorestore, arangobench and arangoexport to handle HTTP responses when
- they are not in JSON format (e.g. text/html).
+* Added a retry loop for arangorestore during the initial connection phase. The
+ number of retries defaults to 3 and can be configured using
+ --initial-connect-retries (BTS-491).
-* Updated ArangoDB Starter to 0.15.3-preview-1.
+* Add following term ids, which prevents old synchronous replication requests
+ to be accepted after a follower was dropped and has gotten in sync again.
+ This makes the chaos tests which delay synchronous replication requests more
+ reliable and prevent inconsistent shard replicas under bad network conditions.
-* Remove dysfunctional CMake variable `USE_OPTIMIZE_FOR_ARCHITECTURE`.
- Architecture detection and architecture-specific optimization is now
- performed unconditionally. It is still possible to inject a certain
- target architecture by setting the CMake variable `TARGET_ARCHITECTURE`.
+* Enable process metrics on agent instances by default. Previously, some metrics
+ (including the metrics starting with `arangodb_process` prefix) were not
+ returned by agent instances.
-* Stop calling unnecessary `/_api/wal/open-transactions` REST API before
- starting the continuous synchronization in active fail and single server
- replication. This request is unnecessary with the RocksDB storage
- engine.
+* Add prefix parameter to LEVENSHTEIN_MATCH function in ArangoSearch
+ (DEVSUPP-753).
+
+* Bug-fix: Pregel WCC algorithm could yield incorrect results if a part of the
+ connected component was only attached via OUTBOUND edges.
+ The underlying algorithm is now modified to properly retain INBOUND edges for
+ the runtime of the execution. This uses more RAM for the algorithm but
+ guarantees correctness.
+
+* Fix serialization of query shutdown error code when sending it to DB servers.
+ The error code is numeric, but it was sent to the `/_api/aql/finish` API as a
+ string. This led to the DB servers always assuming the default error code
+ TRI_ERROR_INTERAL (error code 4). This was not a problem for normal query
+ operations, but it could have led to warnings being logged stating "please
+ contact ArangoDB support". Now the actual error code is using during query
+ shutdown.
-* Make background calculation of SHA hashes for RocksDB .sst files less
- intrusive. The previous implementation frequently iterated over all files
- in the database directory to check if it needed to ad-hoc calculate the
- SHA hashes for .sst files it previously missed. The procedure it used
- was to iterate over all files in the database directory and check if there
- were matching pairs of .sst files and .sha files. This was expensive,
- because a full directory iteration was performed and a lot of temporary
- strings were created for filenames and used in comparisons. This was
- especially expensive for larger deployments with lots of .sst files.
- The expensive iteration of files in the directory is now happening less
- frequently, and will not be as expensive as before if it runs.
+* Fix display of running and slow queries in web UI when there are multiple
+ coordinators. Previously, the display order of queries was undefined, which
+ could lead to queries from one coordinator being display on top once and then
+ the queries from another. That made using this UI harder than necessary.
-* Fix a potential overwhelm situation on DB servers that can lead to no further
- tasks being pulled from a DB servers queue even though there would still be
- processing capacity and idle threads available.
+ Now queries are sorted for display, according to their query IDs.
-* Fixed ES-881: Fixed LDAP global options. This needs to use the first active
- provider, not just the first provider and it should be globally disabled.
+* Fixed an issue in index selection, when the selectivty estimate of another
+ prefix index was used without checking if the other index covered the FILTER
+ condition.
-* Web UI: Fixes the loading of map tiles which are being used to display the
- query output based on a world map when using SSL encryption. This lead to not
- displaying some world map tiles correctly (OASIS-590).
+ For example, given the following indexes:
-* Web UI - Added missing HTML escaping inside the file upload plugin used in the
- section of deploying a new Foxx application when uploading a zip file.
+ - index 1: ["e", "a", "b", "c"]
+ - index 2: ["e", "a", "b"]
+ - index 3: ["d", "e", "f", "g"]
-* Now, arangoimport supports merging of attributes. When importing data from a
- file into a collection, a document attribute can be comprised of merging
- attributes from the file into it, with separators and other literal strings.
- The new document attribute will result in the concatenation of the literal
- strings, the values of the attributes and the separators, as in the example:
+ and the FILTER condition `d == 1 && e == 2 && f == 3`, then the best index to
+ pick would be index 3. However, the optimizer may have picked index 1 here.
+ All indexes are valid candidates for this FILTER condition, but none of the
+ indexes covered all attributes of the FILTER condition. So the index
+ selectivity estimates were (correctly) not used directly to determine the best
+ index.
+ The actual bug happened when comparing the usefulness of the candidate
+ indexes, when figuring out that even though the selectivity estimate for index
+ 1 could not be used, but that there existed a prefix index of index 1 (index
+ 2). The selecivity estimate of this index was taken _without_ checking that
+ prefix index actually satisfied the FILTER condition fully.
+ The prefix index' selectivity estimate must only be used if it fully satisfies
+ the FILTER condition, which was not the case here.
- arangoimport --merge-attributes fullName=[firstName]:[lastName]
+* Fixed DEVSUP-799: unique vertex getter may point to invalid memory after being
+ resetted, resulting in undefined behavior for traversals returning unique
+ vertices from inner FOR loops.
-* Do not use an edge index for range queries, i.e. with the comparison
- operators `>`, `>=`, `<` or `<=`, but only for equality lookups using the
- `==` and `IN` comparison operators.
- The edge index is not fully ordered, so while using it for range queries
- may produce _some_ documents, it is possible that other documents from the
- range would be skipped.
+* Improve usability of hidden options: `--help` mentions that these exist and
+ how to display them.
-* Do not rename the arangod process to "arangod [shutting down]" during the
- server shutdown. The renaming can cause issues with tools that look for
- the exact process name "arangod".
+* Fixed ES-863: reloading of users within the Cluster.
+ If a Coordinator is asked to reload its users (e.g. by the UserManager in
+ Foxx, it is also possible to do via API, but this is internal and on purpose
+ not documented, so unlikely that it is used), in concurrency with user
+ management updates there is a chance that the reload is not correctly
+ performed on this coordinator. It may have missed the last update locally,
+ causing one user to have an older state. It will be fixed on the next
+ modification of any other users/permissions. Unfortunately this bug can
+ cascade and when hit again, the coordinator can now be off by two updates.
+ In DC2DC this situation is more likely to happen on the target datacenter,
+ causing this datacenter to have other users/permissions than the source one.
-* Remove internal AQL query option `readCompleteInput` that controled if
- all input for a modification operation (UPDATE / REPLACE / REMOVE) are
- read into memory first. This was a necessity with the MMFiles storage
- engine in cases when a query read from a collection and wrote into it
- in the same query afterwards. With the RocksDB engine and its snapshots,
- we never need to read the entire input into memory first.
+* Fix BTS-446: When finding a not yet fully initialized agency, do not
+ immediately fatal exit. Keep trying for (very generous) 5 minutes.
-* Fix windows installer PATH manipulation issue by replacing the NSIS plugin
- (BTS-176).
+* Only build actually used subattributes of traversal paths, i.e. "vertices",
+ "edges" or "weights". If any of the paths subcomponents is not used, the
+ optimizer will try to save these components from being built for each result
+ item.
-* Fixed counting of all read transaction as aborted. Added a new
- metric to count read transactions.
+* Backport bugfix from upstream rocksdb repository for calculating the free disk
+ space for the database directory. Before the bugfix, rocksdb could
+ overestimate the amount of free space when the arangod process was run as
+ non-privileged users.
-* Fixed potential issues with revision trees and document counters getting out
- of sync with the underlying collection data.
+* Fixed a problem with active failover, where a failover could take 5 mins
+ because the follower was caught in a bad state during replication. This fixes
+ BTS-425.
-* Fix race in RocksDB throttle listener, when it was getting started lazily
- during server shutdown.
+* Add soft coordinator shutdown: This is a new option `soft=true` for the
+ DELETE /_admin/shutdown API. Has only meaning for coordinators, otherwise
+ ignored. A number of things are allowed to finish but no new things are
+ allowed when in soft coordinator shutdown:
+ - AQL cursors
+ - transactions
+ - asynchronous operations
+ - Pregel runs
+ Once all of the ongoing operations of these have finished and all requests on
+ the low priority queue have been executed, the coordinator shuts down the
+ normal way. This is supposed to make a coordinator restart less intrusive for
+ clients.
-* Extended the Views web UI by letting it capture View properties that are
- immutable once created.
+* Fix BTS-398: Cannot force index hint for primary index if FILTER has multiple
+ OR conditions that require different indexes.
-* Fixed BTS-602 by not starting license feature is upgrade mode.
+* Bug-fix (macOs): in macOs there is an upper bound for descriptors defined by
+ the system, which is independend of the settings in `ulimit -n`. If the hard
+ limit is set above this upper bound value ArangoDB tries to raise the soft
+ limit to the hard limit on boot. This will fail due to the system limit. This
+ could cause ArangoDB to not start, asking you to lower the minimum of required
+ file descriptors. The system set upper bound is now honored and the soft limit
+ will be set to either hard limit or system limit whichever is lower.
-* APM-173: Now, arangobench, arangodump and arangorestore support multiple
- coordinators, so the flag `--server.endpoint` can be used multiple times,
- as in the example below:
+* Implemented APM-86: add query option `fillBlockCache` to control population of
+ RocksDB block cache with data read by the query. The default value for this
+ per-query option is `true`, which mimics the previous behavior.
+ Setting the option to off allows not storing data in RocksDB's block cache for
+ queries that are known to read only semi-relevant or unimportant data.
- arangobench \
- --server.endpoint tcp://[::1]::8529 \
- --server.endpoint tcp://[::1]::8530 \
- --server.endpoint tcp://[::1]::8531
- This does not compromise the use of the other client tools, which preserve
- the behavior of having one coordinator.
+v3.8.0 (2021-07-14)
+-------------------
-* The server now has two flags to control the escaping control and Unicode
- characters in the log. The flag `--log.escape` is now deprecated and, instead,
- the new flags `--log.escape-control-chars` and `--log.escape-unicode-chars`
- should be used.
+* Always remove blocker object for revision trees in case of replication
+ failures.
- - `--log.escape-control-chars`: this flag applies to the control characters,
- which have hex code below `\x20`, and also the character DEL, with hex code
- of `\x7f`. When its value is set to false, the control character will be
- retained, and its actual value will be displayed when it is a visible
- character, or a space ` ` character will be displayed if it is not a
- visible character. The same will happen to `DEL` character (code `\xF7`),
- even though it is not a control character, because it is not visible. For
- example, control character `\n` is visible, so a `\n` will be displayed in
- the log, and control character `BEL` is not visible, so a space ` ` would
- be displayed. When its value is set to true, the hex code for the character
- is displayed, for example, `BEL` character would be displayed as its hex
- code, `\x07`.
- The default value for this flag is `true` for compatibility with
- previous versions.
+* Fix invalid assertion for insert/removal buffers positioning and internals of
+ `hasBlockerUpTo` function.
- - `--log.escape-unicode-chars`: when its value is set to false, the unicode
- character will be retained, and its actual value will be displayed. For
- example, `犬` will be displayed as `犬`. When its value is set to true,
- the character is escaped, and the hex code for the character is displayed.
- For example, `犬` would be displayed as its hex code, `\u72AC`.
- The default value for this flag is `false` for compatibility with
- previous versions.
+* Updated ArangoDB Starter to 0.15.0-1.
-* Fixed BTS-582: ArangoDB client EXE package for Windows has incorrect metadata.
+* Updated arangosync to 2.4.0.
-* Fixed BTS-575: Windows EXE installer doesn't replace service during upgrade in
- silent (non-UI) mode.
+* For cluster AQL queries, let the coordinator determine the query id to be used
+ on DB servers. This allows the coordinator to roll back AQL query setup
+ requests via the query id. Previously, the DB servers each generated a local
+ query id and returned it to the coordinator, who would then keep track of them
+ for later use. The problem with this was that if an AQL query setup request
+ timed out, the coordinator had no way to roll it back.
-* APM-121: allow the UPSERT query to have indexHint as an extra parameter for
- OPTIONS. It will be used as a hint by the inner FOR loop that is performed
- as part of the UPSERT query, and would help in cases such as UPSERT not
- picking the best index automatically for lookup.
+ In addition, if setting up a query takes long on a DB server so that the
+ coordinator sends a rollback request, there are some measures in place for the
+ unlikely case in which the rollback request overtakes the setup request. In
+ this case, the rollback request will not find a query yet, but will register a
+ tombstone for it. Once the query gets registered by the delayed request, it
+ will (correctly) fail because of the tombstone.
-* Fix issue #14819: Query: AQL: missing variable # for node #... location
- RestCursorHandler.cpp.
+* Removed a special case for empty document update operations (i.e. update
+ requests in which no attributes were specified to be updated) were handled in
+ a special way without performing any writes. The problem was that such updates
+ did not update the local state, but could have been replicated to followers.
+ This special empty update case is now removed and update operations that do
+ not update any attributes are treated as normal write operations both locally
+ and in the replication.
-* Added enterprise licensing support including (only for Enterprise version):
- - additional API endpoint `_admin/license(GET/PUT)?force=true `
- - arangosh functions: `setLicense()`, `getLicense()`
- - new error codes and metrics support
+* Fix partial cleanup of internal write batches for multi-document operations of
+ which one or multiple failed. The previous implementation had an unreleased
+ performance optimization that wouldn't clean up the write batch completely.
+ That could have led to a wrong sequence of events being accumulated in the
+ write batch, which may have confused the WAL tailing API later. This bug was
+ only present in 3.8 RCs.
-* Fix issue #14807: Fix crash during optimization of certain AQL queries during
- the remove-collect-variables optimizer rule, when a COLLECT node without output
- variables (this includes RETURN DISTINCT) occurred in the plan.
+* Fix some occurrences in which Merkle trees could silently apply the same
+ change multiple times, which led to data drift between the Merkle tree and the
+ underlying collection's data. This bug was only present in 3.8 but no earlier
+ versions.
-* Update iresearch library to the upstream. Fixed TSan/ASan detected issues.
+* On a failure during synchronous replication, do not remove the failed follower
+ from the list of known servers in the transaction.
+ If we do, we would not be able to send the commit/abort to the follower later.
+ However, we still need to send the commit/abort to the follower at transaction
+ end, because the follower may be responsible for _other_ shards as well.
-* Added new ArangoSearch analyzer type 'collation'.
+ This change also removes dangling transactions that could stay around on
+ followers until they expired after the transaction idle timeout (180 seconds),
+ and that could prevent a follower from getting back in sync during this
+ period.
-* Add basic overload control to arangod.
- This change adds the `x-arango-queue-time-seconds` header to all responses
- sent by arangod. This header contains the most recent request dequeuing time
- (in seconds) as tracked by the scheduler. This value can be used by client
- applications and drivers to detect server overload and react on it.
- The new startup option `--http.return-queue-time-header` can be set to
- `false` to suppress these headers in responses sent by arangod.
+* Added more context to "dropping follower" messages, so it is easier to analyze
+ what exactly went wrong.
- In addition, client applications and drivers can optionally augment their
- requests sent to arangod with a header of the same name. If set, the
- value of the header should contain the maximum queuing time (in seconds)
- that the client is willing to accept. If the header is set in an incoming
- request, arangod will compare the current dequeuing time from its scheduler
- with the maximum queue time value contained in the request. If the current
- dequeuing time exceeds the value set in the header, arangod will reject the
- request and return HTTP 412 (precondition failed) with the new error code
- 21004 (queue time violated).
+* Fixed invalid shard synchronization for documents not added via INSERT with
+ `overwriteMode` set to `ignore`. In this case, if a document with the given
+ key already exists, it is not changed on the leader (i.e. no write happens on
+ the leader). However, a write was replicated to the follower, which was wrong.
+ This write is now suppressed, which can only make such insert operations
+ faster.
- There is also a new metric `arangodb_scheduler_queue_time_violations_total`
- that is increased whenever a request is dropped because of the requested
- queue time not satisfiable.
+* Fix DEVSUP-753: now it is safe to call visit on exhausted disjunction
+ iterator.
-* Fixed a bug for array indexes on update of documents. See BTS-548.
+* Slightly improve specific warning messages for better readability.
-* Prevent some possible deadlocks under high load regarding transactions and
- document operations, and also improve performance slightly.
+* Fix URL request parsing in case data is handed in in small chunks.
+ Previously the URL could be cut off if the chunk size was smaller than the URL
+ size.
-* Hide help text fragment about VST connection strings in client tools that
- do not support VST.
+* Fix BTS-430: Added missing explain output about indexes for SHORTEST_PATH,
+ K_SHORTEST_PATHS and K_PATHS.
-* Added REST API endpoint `/_admin/debug/failat/all` to retrieve the list
- of currently enabled failure points. This API is available only if
- failure testing is enabled, but not in production.
+* Added check to utils/generateAllMetricsDocumentation.py to check that the file
+ name and the value of the name attribute are the same in the metrics
+ documentation snippets. Correct a few such names.
-* APM-60: optionally allow special characters and Unicode characters in
- database names.
+v3.8.0-rc.2 (2021-06-07)
+------------------------
- This feature allows toggling the naming convention for database names
- from the previous strict mode, which only allowed selected ASCII characters
- in database names, to an extended, more relaxed mode. The extended mode
- allows additional ASCII characters in database names as well as non-ASCII
- UTF-8 characters.
- The extended mode can be enabled by setting the new startup option
- `--database.extended-names-databases` to true. It is turned off by default
- and requires an explicit opt-in, simply because some drivers and client
- applications may not be ready for it yet. The arangod server, the
- ArangoDB web interface and the following bundled client tools are prepared
- and ready for using the extended database names:
- - arangobench
- - arangodump
- - arangoexport
- - arangoimport
- - arangorestore
- - arangosh
- More tools and the drivers shipped by ArangoDB will be added to the list in
- the future.
+* Updated arangosync to 2.3.0.
- Please note that the extended names for databases should not be turned on
- during upgrades from previous versions, but only once the upgrade has been
- completed successfully. In addition, the extended names should not be used
- in environments that require extracting data into a previous version of
- ArangoDB, or when database dumps may be restored into a previous version of
- ArangoDB. This is because older versions may not be able to handle the
- extended database names. Finally, it should not be turned on in environments
- in which drivers are in use that haven't been prepared to work with the
- extended naming convention.
+* Fix BTS-456, BTS-457: Make geo intersection between point and rectangle
+ symmetrical.
- Warning: turning on the `--database.extended-names-databases` option for a
- deployment requires it to stay enabled permanently, i.e. it can be changed
- from `false` to `true` but not back. When enabling it, it is also required
- to do this consistently on all coordinators and DB servers.
+* Fix BTS-450: RandomGenerator caught assertion during a value generation within
+ `dump_maskings` testsuite. Ensure correct conversion between 64 and 32bit.
- The extended names for databases will be enabled by default in one of the
- future releases of ArangoDB, once enough drivers and other client tools
- have had the chance to adapt.
+* Added check for data type compatibility between members of pipeline
+ ArangoSearch analyzer.
- Naming conventions for collections, views, analyzers, and document keys
- (`_key` values) are not affected by this feature and will remain as in
- previous versions of ArangoDB.
-* Prevent stealing of values from AQL const value registers. This fixes an
- issue for queries that produce constant results (known at query compile time)
- when the queries are executed directly on a DB server in a cluster (which is
- not supported, but may happen for troubleshooting).
+v3.8.0-rc.1 (2021-05-26)
+------------------------
-* Fixed BTS-562: reduce-extraction-to-projection optimization returns null for
- one attribute if nested attributes are named the same.
+* Fix BTS-442: a query with fullCount on a sharded collection hangs indefinitely
+ when LIMIT is less then number of available documents.
-* Add `--datatype` startup option to arangoimport, in order to hard-code the
- datatype (null/boolean/number/string) for certain attributes in the CSV/TSV import.
- For example, given the following input file:
+* Removed unused documentation snippets (non-Rest DocuBlocks) as well as the
+ documentation about the long deprecated features Simple Queries and
+ JavaScript-based graph traversal. Also removed the descriptions of the JS API
+ methods `collection.range()`, `collection.closedRange()`,
+ `cursor.setBatchSize()` and `cursor.getBatchSize()`. All the functionality is
+ superseded by AQL.
- key,price,weight,fk
- 123456,200,5,585852
- 864924,120,10,9998242
- 9949,70,11.5,499494
- 6939926,2130,5,96962612
+* Fixed ES-881: ensure that LDAP options for async, referrals and restart set
+ the off value correctly. Otherwise, this can result in an "operations error".
- When invoking arangoimport with the startup options
+* Improve Merkle tree memory usage and allow left-growth of trees, too. This can
+ help with insertions of arbitrarily old data.
- --datatype key=string
- --datatype price=number
- --datatype weight=number
- --datatype fk=string
+* Added metric `arangodb_sync_rebuilds_total` to track the full rebuild of a
+ shard follower after too many subsequent shard synchronization failures. This
+ metric should always have a value of 0. Everything else indicates a serious
+ problem.
- it will turn the numeric-looking values in "key" into strings (so that they
- can be used in the `_key` attribute), but treat the attributes "price" and
- "weight" as numbers. The values in attribute "fk" finally will be treated as
- strings again (potentially because they are used for linking to other "_key"
- values).
+* Fixed BTS-422: SingleRemoteModification in AQL behaves different.
-* Avoid the acquisition of a recursive read lock on server shutdown, which
- could in theory lead to shutdown hangs at least if a concurrent thread is
- trying to modify the list of collections (very unlikely and never observed
- until now).
+ This disables the optimizer rule `optimize-cluster-single-document-operations`
+ for array inputs, e.g.
-* Fixed display of unicode characters in Windows console.
+ INSERT [...] INTO collection
+ REMOVE [...] IN collection
-* Fixed issue BTS-531 "Error happens during EXE package installation if
- non-ASCII characters are present in target path".
+ For the cases, the optimization is not pulled off, and the normal insert/
+ update/replace/remove behavior is executed, which will fail because of an
+ array being used as input.
-* Fix active failover, so that the new host actually has working
- Foxx services. (BTS-558).
+* Fix BTS-409: return error 1948 when a negative edge was detected during or was
+ used as default weight in a SHORTEST_PATH or a K_SHOTRTEST_PAHS traversal.
-* Fixed issue #14720: Bulk import ignores onDuplicate in 3.8.0.
- The "onDuplicate" attribute was ignored by the `/_api/import` REST API when
- not specifying the "type" URL parameter.
+* Fixed issue BTS-424: fix invalid input row handling in WINDOW execution.
-* Updated OpenSSL to 1.1.1l and OpenLDAP to 2.4.59.
+* Added 2 options to allow HTTP redirection customization for root ("/") call of
+ HTTP API:
-* APM-70: allow PRUNE condition to be stored in a variable.
+ `--http.permanently-redirect-root`: if true (default), use a permanent
+ redirection (use HTTP 301 code), if false fall back to temporary redirection
+ (use HTTP 302 code);
+ `--http.redirect-root-to`: redirect of root URL to a specified path (redirects
+ to "/_admin/aardvark/index.html" if not set (default)).
- This feature allows the PRUNE condition to be stored in a variable, and
- this variable can be used as a condition for some other statement, such
- as FILTER.
+* Fixed DEVSUP-764 (SEARCH-7): inconsistent BM25 scoring for LEVENSHTEIN_MATCH
+ function.
-* Allow startup of arangod with an existing database directory that was missing
- the ZkdIndex column family.
+* Rename two metrics with previously Prometheus-incompatible names:
+ - `arangodb_aql_global_query_memory_limit_reached` was renamed to
+ `arangodb_aql_global_query_memory_limit_reached_total`
+ - `arangodb_aql_local_query_memory_limit_reached` was renamed to
+ `arangodb_aql_local_query_memory_limit_reached_total`
-* Truncate must not trigger intermediate commits while in a streaming
- transaction, because that would be against the assumption that
- streaming transactions never do intermediate commits.
+ These metrics were introduced in 3.8 so there is no migration for these
+ metrics.
-* Added ArangoSearch condition optimization: STARTS_WITH is merged
- with LEVENSHTEIN_MATCH if used in the same AND node and field name and prefix
- matches.
+* Return error 1948 when a negative edge was detected during a weighted
+ traversal or was used as default weight.
-* Hybrid (Disjoint) SmartGraphs (Enterprise Edition):
- SmartGraphs have been extended with a new option to create Hybrid SmartGraphs.
- Hybrid SmartGraphs are capable of using SatelliteCollections within their
- graph definition. You can now select some VertexCollections to be satellites,
- and therefore available on all DBServers. The SmartGraph can make use of those
- to collections to increase the traversal performance by larger local
- components.
+* Fixes BTS-417. In some cases an index did not consider both bounds (lower and
+ upper) for a close range scan if both bounds are expressed using the same
+ operator, e.g., `FILTER doc.beginDate >= lb AND ub >= doc.beginDate`.
-* Added multidimensional indexes which can be used to efficiently intersect
- multiple range queries. They are currently limited to IEEE-754 double values.
- Given documents of the form {x: 12.9, y: -284.0, z: 0.02} one can define a
- multidimensional index using the new type 'zkd' on the fields ["x", "y", "z"].
+* Fix various issues related to the new WINDOW operation (see BTS-402)
+ - Improved explain output for ISO 8601 duration strings and fixed missing week
+ component.
+ - Improved validation of input data and error messages.
+ - Prevent FILTERs from being moved beyond a WINDOW.
- The AQL optimizer will then consider this index when doing queries on multiple
- ranges, for example:
+* Fixes BTS-416. During shutdown, a Shard leader wrongly reported that it could
+ not drop a shard follower instead of correctly indicating the shutdown as the
+ reason.
- FOR p IN points
- FILTER x0 <= p.x && p.x <= x1
- FILTER y0 <= p.y && p.y <= y1
- FILTER z0 <= p.z && p.z <= z1
- RETURN p
+* Fixes pregel lifetime management. Previously shutting down the server while a
+ pregel job was still running could result in a segfault or a shutdown hanger.
- The index implements the relation <=, == and >= natively. Strict relations are
- emulated using post filtering. Ranges can be unbounded on one or both sides.
+* Improve error reporting for Merkle tree operations and improve memory usage
+ for unused trees by hibernating them. In addition, add some backoff to shard
+ synchronization in case there are repeated sync failures for the same shard.
-* No runtime limits for shard move and server cleanout jobs, instead
- possibility to cancel them.
+* Fixed BTS-403: Hot restores must also clear relevant `Current` keys. The
+ overriding of the `Plan` entries needs to be reflected in `Current` to avoid
+ conflicts in maintenance jobs.
-* Fix cluster-internal network protocol to HTTP/1 for now. Any other protocol
- selected via the startup option `--network.protocol` will automatically be
- switched to HTTP/1. The startup option `--network.protocol` is now deprecated
- and hidden by default. It will be removed in a future version of arangod.
- The rationale for this change is to move towards a single protocol for
- cluster-internal communication instead of 3 different ones.
+* Log a proper message if an unexpected state is encountered when taking over
+ shard leadership. In addition, make the change to the internal followerinfo
+ state atomic so that it cannot be semi-changed.
-* Disable RTTI when compiling Snappy. RTTI used to be disabled previously,
- up until some Merkle tree improvement PR was merged about one month ago,
- which turned on RTTI for compiling Snappy.
+* Fixed two bugs in fuerte with HTTP/2 and VST connections.
+ One could lead to ordered timeouts not being honoured. The other could lead to
+ an ordered callback be called multiple times.
-* (EE only) Bug-fix: If you created a ArangoSearch view on Satellite-
- Collections only and then join with a collection only having a single
- shard the cluster-one-shard-rule was falsely applied and could lead to
- empty view results. The Rule will now detect the situation properly,
- and not trigger.
+* Improve "Shards" view in web UI so that the shards of individual collections
+ can be expanded and collapsed without affecting the display of any other
+ shards. Also added a "Toggle all" button the web UI to expand/collapse the
+ shards for all collections.
-* (EE only) If you have a query using only satellite collections,
- now the cluster-one-shard-rule can be applied to improve
- query performance.
+* Improve exception safety for maintenance thread and shard unlock operations.
-* (Enterprise Edition only): added query option `forceOneShardAttributeValue`
- to explicitly set a shard key value that will be used during query snippet
- distribution to limit the query to a specific server in the cluster.
+* Fixed issue #14122: when the optimizer rule "inline-subqueries" is applied, it
+ may rename some variables in the query. The variable renaming was however not
+ carried out for traversal PRUNE conditions, so the PRUNE conditions could
+ still refer to obsolete variables, which would make the query fail with errors
+ such as
- This query option can be used in complex queries in case the query optimizer
- cannot automatically detect that the query can be limited to only a single
- server (e.g. in a disjoint smart graph case).
- When the option is set to the correct shard key value, the query will be
- limited to the target server determined by the shard key value. It thus
- requires that all collections in the query use the same distribution
- (i.e. `distributeShardsLike` attribute via disjoint SmartGraphs).
+ Query: AQL: missing variable ... for node ... while planning registers
+
+* Improve performance of batch CRUD operations (insert, update, replace, remove)
+ if some of the documents in the batch run into write-write conflicts.
+ Rolling back partial operations in case of a failure is very expensive because
+ it requires rebuilding RocksDB write batches for the transaction from scratch.
+ Rebuilding write batches takes time proportional to the number of operations
+ in the batch, and for larger batches the cost can be prohibitive.
+ Now we are not rolling back write batches in some situations when this is not
+ required, so that in many cases running into a conflict does not have that
+ high overhead. There can still be issues when conflicts happen for index
+ entries, but a lot of previously problematic cases should now work better.
- Limiting the query to a single DB server is a performance optimization
- and may make complex queries run a lot faster because of the reduced
- setup and teardown costs and the reduced cluster-internal traffic during
- query execution.
+ This change also reduces the RocksDB-internal lock timeout for writing to keys
+ locked by another transaction from 1s to 1ms. This will mean that operations
+ that ran into a write-write conflict may fail quicker than before, and not
+ wait and retry to acquire the locked key(s).
- If the option is set incorrectly, i.e. to a wrong shard key value, then
- the query may be shipped to a wrong DB server and may not return results
- (i.e. empty result set). It is thus the caller's responsibility to set
- the `forceOneShardAttributeValue` correctly or not use it.
+* Fix response when isBuilding could not be removed from newly created
+ collection, when agency precondition fails. This can happen, when own rebootId
+ increment has triggered plan entry to be removed.
- The `forceOneShardAttributeValue` option will only honor string values.
- All other values as well as the empty string will be ignored and treated
- as if the option is not set.
+* Fixed issue BTS-354: Assertion related to getCollection.
- If the option is set and the query satisfies the requirements for using
- the option, the query's execution plan will contain the "cluster-one-shard"
- optimizer rule.
+* Fix DEVSUP-749: Fix potential deadlock when executing concurrent view/link DDL
+ operations and index DDL operations on the same collection.
-* Updated ArangoDB Starter to 0.15.2.
+* When writing to starting shard leader respond with specific 503.
+ Fixes BTS-390.
-* SEARCH-238: Improved SortNodes placement optimization in cluster so
- late materialization could cover more cases
+* Fixed a use after free bug in the connection pool.
-* Fix some memory leaks after adding optimization rule for AqlAnalyzer.
+* Show peak memory usage in AQL query profiling output.
-* Fix internal iterator states after intermediate commits in write
- transactions. Iterators could point to invalid data after an
- intermediate commit, producing undefined behavior.
+* Fixed various issues (mainly data races) reported by ThreadSanitizer.
-* Fix read-own-write behavior in different scenarios:
- - in some cases writes performed by an AQL query could be observed within
- the same query. This was not intended and is fixed now.
- - AQL queries in streaming transactions could observe their own writes in
- even more cases, which could potentially result in an endless loop when
- the query iterates over the same collection that it is inserting documents
- into.
- - UPSERT did not find documents inserted by a previous iteration if the
- subquery relied on a non-unique secondary index.
- - disabled intermediate commits for queries with UPSERTs, because
- intermediate commits can invalidate the internal read-own-write iterator
- required by UPSERT. Previously, UPSERTs that triggered intermediate
- commits could have produced unexpected results (e.g., previous inserts
- that have been committed might not be visible) or even crashes.
- To achieve the correct read-own-write behavior in streaming transactions, we
- sometimes have to copy the internal WriteBatch from the underlying RocksDB
- transaction. In particular, the copy is created whenever an AQL query with
- modification operations (INSERT/REMOVE/UPDATE/UPSERT/REPLACE) is executed in
- the streaming transaction. If there have not been any other modifications so
- far (queries/document operations), then the WriteBatch is empty and creating
- the copy is essentially a no-op. However, if the transaction already contains
- a lot of modifications, creating the WriteBatch copy might incur some
- overhead that can now lead to decreased performance.
+* Fixed bug in error reporting when a database create did not work, which lead
+ to a busy loop reporting this error to the agency.
-* Fix rare case of invalid data that could be inserted into the ArangoSearch
- index if several clients concurrently insert data and use custom analyzer
- with non-string return type.
+* Fixed the error response if the HTTP version is not 1.0 or 1.1 and if the
+ Content-Length is too large (> 1 GB).
-* Fix a rare shutdown race in RocksDBShaCalculatorThread.
+* Guarded access only to ActionBase::_result.
-* Added "Analyzers" view to web UI to let manage ArangoSearch analyzers
- creation.
+* Updated arangosync to 2.2.0.
-* Updated ArangoDB Starter to 0.15.2-preview-1.
+* Fixed proper return value in sendRequestRetry if server is shutting down.
-* Reduce internal priority of AQL execution. This prevents possible deadlocks
- with modification operations in a cluster and replicationFactor >= 2, and can
- also improve responsiveness under high load of AQL queries.
+* Fixed internal issue #798: In rare case when remove request completely cleans
+ just consolidated segment commit could be cancelled and documents removed from
+ collection may be left dangling in the ArangoSearch index.
+ Also fixes ES-810 and BTS-279.
-* Updated arangosync to 2.6.0.
+* Fixed a small problem in fuerte which could lead to an assertion failure.
-* Fix a potential multi-threading issue in index creation on coordinators,
- when an agency callback was triggered at the same time the method
- `ensureIndexCoordinatorInner` was left.
+* Retry if an ex-leader can no longer drop a follower because it is no longer
+ leading.
-* Added protocol specific metrics: histogram about request body size, total
- number of HTTP/2 connections and total number of VST connections.
+* Fixed issue BTS-373: ASan detected possible heap-buffer-overflow at
+ arangodb::transaction::V8Context::exitV8Context().
-* Add pseudo log topic "all" to set the log levels for all log topics at
- once. For example, this can be used when starting a server with trace or
- debug logging enabled for all log topics, e.g.
+* Fix a potential buffer overflow in RestReplicationHandler.
- `--log.level all=debug`.
- `--log.level all=trace`.
+* Make the time-to-live (TTL) value of a streaming cursor only count after the
+ response has been sent to the client.
- This is very coarse and should only be used for such use cases.
+* Added option `--query-max-runtime` to arangoexport, in order to control
+ maximum query runtime.
-* Change the default value for the `--threads` startup parameter of the
- following client tools from previously 2 to the maximum of 2 and the
- number of available CPU cores:
- - arangodump
- - arangoimport
- - arangorestore
+* Fix BTS-340: AQL expressions similar to `x < 3 || x` are no longer erroneously
+ be reduced to `x < 3` by the optimizer rule remove-redundant-or.
-* Preselect "create index in background" option when creating indexes in
- the web UI. The "create index in background" option can be less intrusive
- because it allows other write operations on the collection to proceed.
+* Change arangosh client behavior:
+ - *_RAW methods will never add a `body` to HEAD responses
+ - *_RAW methods will now always return velocypack-typed responses in Buffers
+ - `--server.force-json` will now be applied as default, overrideable
+ by user code
-* Do not block a scheduler thread on the coordinator while an index is being
- created. Instead, start a background thread for the actual index
- fill-up work. The original thread can then be relinquished until the index
- is completely filled or index creation has failed.
- The default index creation timeout on coordinators has also been
- extended from 1 hour to 4 days, but it is still configurable via the
- startup parameter `--cluster.index-create-timeout` in case this is
- necessary.
-
-* Fix wrong assertion in fuerte and move it to where the TLA+ model says
- it should be. This fixes a unit test failure occurring on newer Macs
- with a certain clang version.
-
-* Remove old fixPrototypeChain agency migration, which was introduced in 3.2
- and is no longer necessary. This will make it impossible to upgrade
- directly from a version < 3.2 to a version >= 3.9, provided one has
- a chain of `distributeShardsLike` collections.
-* Added metrics for the number of errors and warnings logged:
- - `arangodb_logger_warnings_total`: total number of warnings (WARN messages)
- logged since server start.
- - `arangodb_logger_errors_total`: total number of errors (ERR messages)
- logged since server start.
+v3.8.0-beta.1 (2021-04-20)
+--------------------------
-* Added REST API `/_admin/support-info` to retrieve deployment information.
- As this API may reveal sensitive data about the deployment, it can only
- be accessed from inside the system database. In addition, there is a
- policy control startup option `--server.support-info-api` that
- determines if and to whom the API is made available. This option can
- have the following values:
- - `disabled`: support info API is disabled.
- - `jwt`: support info API can only be accessed via superuser JWT.
- - `hardened`: if `--server.harden` is set, the support info API can
- only be accessed via superuser JWT. Otherwise it can be accessed
- by admin users only.
- - `public`: everyone with access to `_system` database can access the
- support info API.
+* Fix BTS-374: thread race between ArangoSearch link unloading and storage
+ engine WAL flushing.
-* Fixes a bug in the maintenance's error-handling code. A shard error would
- result in log messages like
+* Improve parallelism capabilities of arangorestore.
- WARNING [ceb1a] {maintenance} caught exception in Maintenance shards
- error reporting: Expecting Object
- ERROR [c9a75] {maintenance} Error reporting in current: Expecting Object
+ arangorestore can now dispatch restoring data chunks of a collection to idle
+ background threads, so that multiple restore requests can be in flight for the
+ same collection concurrently.
- and also prevent the maintenance from reporting the current state to the
- agency, which in turn can prevent cluster-wide progress of various actions.
+ This can improve restore speed in situations when there are idle threads left
+ (number of threads can be configured via arangorestore's `--threads` option)
+ and the dump file for the collection is large.
-* Send a keystroke to arangod's stdin when a shutdown command is received via
- the REST API `/_admin/shutdown` and the server is started with the
- `--console` argument. The keystroke will exit the blocking read loop that
- is waiting on console input and that otherwise blocks the shutdown.
- The implementation is based on ioctl and is thus only present on Linux and
- MacOS.
+ The improved parallelism is only used when restoring dumps that are in the
+ non-enveloped format. This format has been introduced with ArangoDB 3.8.
+ The reason is that dumps in the non-enveloped format only contain the raw
+ documents, which can be restored independent of each other, i.e. in any order.
+ However, the enveloped format may contain documents and remove operations,
+ which need to be restored in the original order.
-* Some AQL queries erroneously reported the "access after data-modification"
- error for queries in which there was a read attempt from a collection
- _before_ a data-modification operation. Such access is legal and should not
- trigger said error anymore. Accessing a collection _after_ in a query a
- data-modification in the same query is still disallowed.
+* Fix crashes during arangorestore operations due to usage of wrong pointer
+ value for updating user permissions.
-* Make AQL modification operations in a cluster asynchronous. This allows to
- free the thread for other work until both the write and synchronous
- replication are complete.
+* Fixed BTS-360 and ES-826: sporadic ArangoSearch error `Invalid RL encoding in
+ 'dense_fixed_offset_column_key'`.
-* When creating Pregel memory-mapped files, create them with O_TMPFILE
- attribute on Linux so that files are guaranteed to vanish even if a
- process dies.
+* Add HTTP REST API endpoint POST `/_api/cursor/` as a drop-in
+ replacement for PUT `/_api/cursor/`. The POST API is functionally
+ equivalent to the existing PUT API. The benefit of using the POST API is that
+ HTTP POST requests will not be considered as idempotent, so proxies may not
+ retry them if they fail. This was the case with the existing PUT API, as HTTP
+ PUT requests can be considered idempotent according to the HTTP specification.
-* Fixed: getResponsibleShard call on disjoint Smart Graphs
- if you asked for the responsible shard on a disjoint edge collection
- where the _from and _to differ (invalid), the server would respond with
- "DATASOURCE_NOT_FOUND". This is now fixed to "BAD_PARAMETER"
- to emphasize that the collection is fine but the input is invalid.
+ The POST API is not used internally by ArangoDB's own requests in this
+ version. This means that compatibility to older versions of ArangoDB that do
+ not provide the new API is ensured.
-* Fixed: /_api/transaction/begin called on edge collections of disjoint
- SmartGraphs falsely returned CollectionNotFound errors.
+* Timely updates of rebootId / cluster membership of DB servers and coordinators
+ in ClusterInfo. Fixes BTS-368 detected in chaos tests.
-* Bugfix: In more complex queries there was a code-path where a (Disjoint-)
- Smart graph access was not properly optimized.
+* Fix cluster internal retry behavior for network communications. In particular
+ retry on 421 (leader refuses operation). This leads to the cluster letting
+ less internal errors out to clients.
-* Improve log messages for Pregel runs by giving them more context.
+* Fixed CPPCHECK warning or added suppression.
-* Add ReplicatedLogs column family.
+* Web UI - Added missing HTML escaping inside the file upload plugin used in the
+ section of deploying a new Foxx application when uploading a zip file.
-* Add optimization rule for AqlAnalyzer.
+* Allow to specify a fail-over LDAP server. Instead of "--ldap.OPTION" you need
+ to specify "--ldap2.OPTION". Authentication / Authorization will first check
+ the primary LDAP server. If this server cannot authenticate a user, it will
+ try the secondary one. It is possible to specify a file containing all users
+ that the primary (or secondary) LDAP server is handling by specifying the
+ option "--ldap.responsible-for". This file must contain the usernames
+ line-by-line.
-* Fixed issue #14592: IS_NULL(@x) isn't recognized as a constant expression.
+* Fix BTS-352: removed assertion for success of a RocksDB function and throw a
+ proper exception instead.
-* Fixed issue BTS-539 "Unsynchronized query kill while it's being finalized in
- another thread was uncovered through `test-kill.js` of `communication_ssl`
- suite". Fixed possible (but unlikely) crash when killing an AQL query.
+* Added option `--query.require-with` to make AQL in single server mode also
+ require `WITH` clauses where the cluster would need them.
+ The option is turned off by default, but can be turned on in single servers to
+ remove this behavior difference between single servers and clusters, making
+ later a transition from single server to cluster easier.
-* Append physical compaction of log collection to every Raft log
- compaction (BTS-542).
+* Fixed a problem in document batch operations, where errors from one shard were
+ reported multiple times, if the shard is completely off line.
-* Change optimization level for debug builds back to `-O0` (from `-Og`)
- because `-Og` seems to cause debuggability issues in some environments.
+* Fixed issue #13169: arangoimport tsv conversion of bools and null, although
+ switched off by `--convert false`.
-* Fixed issue BTS-536 "Upgrading without rest-server is aborted by error".
- Now stating `--server.rest-server false` does not require the additional
- `--console` argument for upgrading a server.
+ Importing unquoted `null`, `false` and `true` literals from delimited files
+ get imported as strings now if `convert` is explicitly turned off. It
+ previously affected unquoted numbers only.
-* Fixed various problems in GEO_INTERSECTS: wrong results, not implemented
- cases and numerically unstable behavior. In particular, the case of
- the intersection of two polygons in which one is an S2LngLatRect
- is fixed (BTS-475).
+* Web UI: Highlight binary and hexadecimal integer literals in AQL queries.
-* Fixed ES-867 and ES-922: removed eslint from NPM packages descriptions and
- updated netmask package to non-vulnerable version.
+* Prevent arangod from terminating with "terminate called without an active
+ exception" (SIGABRT) in case an out-of-memory exception occurs during creating
+ an ASIO socket connection.
-* Web UI: Fixes a logical error which occurred after re-visiting the logs view
- which lead to not displaying the logs view and its entries correctly
- (BTS-507).
+* Micro improvements for Pregel job API and documentation:
+ - Added a few useful attributes to Pregel HTTP API docs.
+ - Added "parallelism" attribute to the result of Pregel job status responses,
+ so that the effective parallelism is reported back.
+ - Make sure "computationTime" in Pregel job status response does not underflow
+ in case of errors.
-* Fixed a bug, where Coordinators handled plan changes for databases
- in heartbeat thread in wrong order. Databases could be listed, but
- not used.
+* Fix BTS-350, BTS-358: Fixed potential startup errors due to global replication
+ applier being started before end of database recovery procedure.
+ Also fixed potential shutdown errors due to global replication applier being
+ shut down in parallel to a concurrent shut down attempt.
-* Automatically extend web UI sessions while they are still active.
- The web UI can now call a backend route to renew its JWT, so there will not
- be any rude logouts in the middle of an active session.
-
- Active web UI sessions (here: sessions with user activity within the last
- 90 minutes) will automatically renew their JWT if they get close to the
- JWT expiry date.
-
-* Reduce memory usage for in-memory revision trees. Previously, a revision
- tree instance for a non-empty collection/shard was using 4 MB of memory
- when uncompressed. Trees that were unused for a while were compressed on
- the fly to use less memory, and later uncompressed again when needed.
- Now the uncompressed in-memory version of the revision tree will
- dynamically allocate memory as needed. This allows the initial version
- of the trees to get away with just 64 KB of memory. Memory usage will
- grow lazily when more parts of the trees get populated. The compression
- of unused in-memory tree data is still in place.
+* Fix BTS-357: Fix processing of analyzer with return type by TOKENS function.
-* Refactored arangobench:
- - Updated testcases to show description of them when beginning execution
- - Fixed testcase histogram with time measures when batch size > 0
- - Integrated testcases with Velocypack for simplification
- - Deprecated some testcases
- - Internal changes for performance optimization
+* Fix BTS-346: Improved handling of AQL query kill command in unlikely places,
+ before the query starts to execute and after the query is done but the result
+ is still being written. Now the cleanup of queries works more reliably. This
+ unreliable kill time windows were very short and unlikely to hit, although if
+ one was hit transactions were not aborted, and collection locks could be
+ lingering until query timeout.
-* Timely update of database server list on health check fixes BTS-505.
+* Updated ArangoDB Starter to 0.15.0.
-* Add 3 AQL functions: COSINE_SIMILARITY, L1_DISTANCE and L2_DISTANCE.
+* Fix BTS-357: Fix processing of analyzer with return type by TOKENS function.
-* Updated ArangoDB Starter to 0.15.1.
+* Added error handling for figures command in cluster. Previously errors
+ returned by shards were ignored when aggregating the individual responses.
-* Updated arangosync to 2.5.0.
+* Remove CMake control variable `UNCONDITIONALLY_BUILD_LOG_MESSAGES`.
-* Fix BTS-453: Download of a HotBackup from remote source doesn't work on macOS
+* Fix undefined behavior in dynarray constructor when running into
+ an out-of-memory exception during construction. In arangod, this can only
+ happen during metrics objects construction at program start.
-* Honor the value of startup option `--rocksdb.sync-interval` on Windows, too.
- Previously, the value was ignored and WAL syncing on Windows was using a
- different code paths than on the other supported platforms. Now syncing is
- unified across all platforms, and they all call RocksDB's `SyncWAL()`.
+* Added option `--headers-file` to arangoimport, to optionally read CSV/TSV
+ headers from a separate file.
-* Updated ArangoDB Starter to 0.15.1-preview-4.
+* Fixed issue BTS-353: memleak when running into an out-of-memory situation
+ while repurposing an existing AqlItemBlock.
-* APM-132: Clean up collection statuses
- Removes collection statuses "new born", "loading", "unloading" and "unloaded".
- These statuses were last relevant with the MMFiles storage engine, when it
- was important to differentiate which collections are present in main memory
- memory and which aren't. With the RocksDB storage engine, all that was
- automatically handled anyway, and the statuses were not important anymore.
+* Change metrics' internal `low()` and `high()` methods so that they return by
+ value, not by reference.
- The change removes the "Load" and "Unload" buttons for collections from the
- web interface. All collections in the web interface will be marked as
- "loaded" permanently.
+* Fix logging of urls when using `--log.level requests=debug`. There was an
+ issue since v3.7.7 with the wrong URL being logged in request logging if
+ multiple requests were sent over the same connection. In this case, the
+ request logging only reported the first URL requested in the connection, even
+ for all subsequent requests.
- This change also obsoletes the `load()` and `unload()` calls for collections
- as well as their HTTP API equivalents. The APIs will remain in place for now
- but are changed to no-ops. They will removed eventually in a future version
- of ArangoDB. This will be announced separately.
+* Added startup option `--query.allow-collections-in-expressions` to control
+ whether collection names can be used in arbitrary places in AQL expressions,
+ e.g. `collection + 1`. This was allowed before, as a collection can be seen as
+ an array of documents. However, referring to a collection like this in a query
+ would materialize all the collection's documents in RAM, making such
+ constructs prohibitively expensive for medium-size to large-size collections.
-* Include K_SHORTEST_PATHS and SHORTEST_PATH execution nodes in AQL query
- memory usage accounting. The memory used by these execution node types was
- previously not tracked against the configured query memory limit.
+ The option can now be set to `false` to prohibit accidental usage of
+ collection names in AQL expressions. With that setting, using a collection
+ inside an arbitrary expression will trigger the error `collection used as
+ expression operand` and make the query fail.
+ Even with the option being set to `false`, it is still possible to use
+ collection names in AQL queries where they are expected, e.g. `FOR doc IN
+ collection RETURN doc`.
-* Reduce default value for max-nodes-per-callstack to 200 for OSX, because on
- OSX worker threads have a stack size of only 512kb.
+ The option `--query.allow-collections-in-expressions` is introduced with a
+ default value of `true` in 3.8 to ensure downwards-compatibility, but the
+ default value will change to `false` in 3.9. Furthermore, the option will be
+ deprecated in 3.9 and removed in later versions, in addition to making
+ unintended usage of collection names always an error.
-* Make `--javascript.copy-installation` also copy the `node_modules` sub
- directory. This is required so we have a full copy of the JavaScript
- dependencies and not one that excludes some infrequently changed modules.
- In addition, file copying now intentionally excludes .map files as they
- are not needed.
+* Deprecate option `--rocksdb.exclusive-writes`, which was meant to serve only
+ as a stopgap measure while porting applications from the MMFiles storage
+ engine to RocksDB.
-* Fixed BTS-408: treat positive or negative signed numbers as constants
- immediately during AQL query parsing.
- Previously, a value of `-1` was parsed initially as `unary minus(value(1))`,
- which was not treated in the same way as a constant value `value(-1)`.
- The value was later optimized to just `value(-1)`, but this only happened
- during constant-folding after parsing. Any operations that referred to
- the unfolded values during parsing thus did not treat such values as
- constants.
+* Fix errors caused by creating some log messages in log level DEBUG in log
+ topics PREGEL and GRAPHS. Setting the log level to DEBUG for these topics
+ could lead to errors when running some Pregel jobs or SmartGraph traversals.
-* Slightly increase internal AQL query and transaction timeout on DB servers
- from 3 to 5 minutes.
- Previously, queries and transactions on DB servers could expire quicker,
- which led to spurious "query ID not found" or "transaction ID not found"
- errors on DB servers for multi-server queries/transactions with unbalanced
- access patterns for the different participating DB servers.
- The timeouts on coordinators remain unchanged, so any queries/transactions
- that are abandoned will be aborted there, which will also be propagated to
- DB servers. In addition, if a participating server in an AQL query becomes
- unavailable, the coordinator is now notified of that and will terminate the
- query more eagerly.
-* Add hard-coded complexity limits for AQL queries, in order to prevent
- programmatically generated large queries from causing trouble (too deep
- recursion, enormous memory usage, long query optimization and distribution
- passes etc.).
- This change introduces 2 limits:
- - a recursion limit for AQL query expressions. An expression can now be
- up to 500 levels deep. An example expression is `1 + 2 + 3 + 4`, which
- is 3 levels deep `1 + (2 + (3 + 4))`.
- The expression recursion is limited to 500 levels.
- - a limit for the number of execution nodes in the initial query
- execution plan.
- The number of execution nodes is limited to 4,000.
+v3.8.0-alpha.1 (2021-03-29)
+---------------------------
-* Always remove blocker object for revision trees in case of replication
- failures.
+* Updated ArangoDB Starter to 0.15.0-preview-2.
-* Fix invalid assertion for insert/removal buffers positioning and internals of
- `hasBlockerUpTo` function.
+* Updated OpenSSL to 1.1.1k and OpenLDAP to 2.4.58.
-* Fix startup issues with encryption-at-rest enabled when there were empty (0
- byte) RocksDB WAL files present. Such empty files caused RocksDB to abort the
- startup, reporting corruption. However, empty WAL files are possible in case
- of server crashes etc. Now, if a WAL file is completely empty, there will be
- no attempt to read the encryption meta data from it, so the startup succeeds
- (BTS-392).
+* Updated arangosync to 2.0.1.
-* Remove _msg/please-upgrade handler.
+* Fix connectionTime statistic. This statistic should provide the distribution
+ of the connection lifetimes, but in previous versions the tracking was broken
+ and no values were reported.
-* Updated JavaScript dependencies, including breaking changes to non-public
- modules. We recommend always bundling your own copy of third-party modules,
- even ones listed as public.
+* When using connections to multiple endpoints and switching between them,
+ arangosh can now reuse existing connections by referring to an internal
+ connection cache. This helps for arangosh scripts that repeatedly connect to
+ multiple endpoints, and avoids wasting lots of ephemeral TCP ports remaining
+ in CLOSE_WAIT state.
+ This change is transparent to any arangosh scripts or commands that do not
+ reconnect to other endpoints than the one specified at arangosh start.
- - accepts: 1.3.5 -> 1.3.7
- - ansi_up: 4.0.3 -> 5.0.1
- - content-type: (added) -> 1.0.4
- - error-stack-parser: 2.0.2 -> 2.0.6
- - highlight.js: 9.15.6 -> 10.7.3
- - http-errors: 1.7.2 -> 1.8.0
- - iconv-lite: 0.4.24 -> 0.6.3
- - js-yaml: 3.13.1 -> 3.14.1
- - lodash: 4.17.13 -> 4.17.21
- - marked: 0.6.2 -> removed
- - mime-types: 2.1.22 -> 2.1.31
- - mocha: 6.1.3 -> 6.2.3
- - netmask: 1.0.6 -> 2.0.2
- - qs: 6.7.0 -> 6.10.1
- - range-parser: 1.2.0 -> 1.2.1
- - semver: 6.0.0 -> 7.3.5
- - sinon: 1.17.6 -> 1.17.7
- - timezone: 1.0.22 -> 1.0.23
- - type-is: 1.6.16 -> 1.6.18
- - underscore: 1.9.1 -> 1.13.1
- - xmldom: 0.1.27 -> 0.6.0
+* Add an option for locking down all endpoints in the `/_admin/cluster` REST API
+ for callers without a proper JWT set in the request. There is a new startup
+ option `--cluster.api-jwt-policy` that allows *additional* checks for a valid
+ JWT in requests to sub-routes of `/_admin/cluster`. The possible values for
+ the startup option are:
+
+ - "jwt-all": requires a valid JWT for all accesses to `/_admin/cluster` and
+ its sub-routes. If this configuration is used, the "Cluster" and "Nodes"
+ sections of the web interface will be disabled, as they are relying on the
+ ability to read data from several cluster APIs.
+ - "jwt-write": requires a valid JWT for write accesses (all HTTP methods
+ except HTTP GET) to `/_admin/cluster`. This setting can be used to allow
+ privileged users to read data from the cluster APIs, but not to do any
+ modifications. All existing permissions checks for the cluster API routes
+ are still in effect with this setting, meaning that read operations without
+ a valid JWT may still require dedicated other permissions (as in 3.7).
+ - "jwt-compat": no *additional* access checks are in place for the cluster
+ APIs. However, all existing permissions checks for the cluster API routes
+ are still in effect with this setting, meaning that all operations may still
+ require dedicated other permissions (as in 3.7).
+
+ The default value for the option is `jwt-compat`, which means this option will
+ not cause any extra JWT checks compared to 3.7.
+
+* UI builds are now using the yarn package manager instead of the previously
+ used node package manager.
+
+* Fix shortName labels in metrics, in particular for agents.
+
+* The old metrics api contains the following gauges which should actually be
+ counters:
+ - arangodb_scheduler_jobs_dequeued
+ - arangodb_scheduler_jobs_submitted
+ - arangodb_scheduler_jobs_done
+ Therefore the new v2 metric api adds the following counters:
+ - arangodb_scheduler_jobs_dequeued_total
+ - arangodb_scheduler_jobs_submitted_total
+ - arangodb_scheduler_jobs_done_total
+ These counters are only visible in the new v2 metric and replace the old
+ metrics which are suppressed for v2.
+
+* Fix implicit capture of views in a context of JS transaction.
+
+* Introduce metrics for AQL query memory limit violations:
+ - `arangodb_aql_global_query_memory_limit_reached`: Total number of times the
+ global query memory limit was violated.
+ - `arangodb_aql_local_query_memory_limit_reached`: Total number of times a
+ local query memory limit was violated.
+
+* Set the default value for `--query.global-memory-limit` to around 90% of RAM,
+ so that a global memory limit is now effective by default.
+
+ The default global memory limit value is calculated by a formula depending on
+ the amount of available RAM and will result in the following values for
+ common RAM sizes:
+
+ RAM: 0 (0MiB) Limit: 0 unlimited, %mem: n/a
+ RAM: 134217728 (128MiB) Limit: 33554432 (32MiB), %mem: 25.0
+ RAM: 268435456 (256MiB) Limit: 67108864 (64MiB), %mem: 25.0
+ RAM: 536870912 (512MiB) Limit: 255013683 (243MiB), %mem: 47.5
+ RAM: 805306368 (768MiB) Limit: 510027366 (486MiB), %mem: 63.3
+ RAM: 1073741824 (1024MiB) Limit: 765041049 (729MiB), %mem: 71.2
+ RAM: 2147483648 (2048MiB) Limit: 1785095782 (1702MiB), %mem: 83.1
+ RAM: 4294967296 (4096MiB) Limit: 3825205248 (3648MiB), %mem: 89.0
+ RAM: 8589934592 (8192MiB) Limit: 7752415969 (7393MiB), %mem: 90.2
+ RAM: 17179869184 (16384MiB) Limit: 15504831938 (14786MiB), %mem: 90.2
+ RAM: 25769803776 (24576MiB) Limit: 23257247908 (22179MiB), %mem: 90.2
+ RAM: 34359738368 (32768MiB) Limit: 31009663877 (29573MiB), %mem: 90.2
+ RAM: 42949672960 (40960MiB) Limit: 38762079846 (36966MiB), %mem: 90.2
+ RAM: 68719476736 (65536MiB) Limit: 62019327755 (59146MiB), %mem: 90.2
+ RAM: 103079215104 (98304MiB) Limit: 93028991631 (88719MiB), %mem: 90.2
+ RAM: 137438953472 (131072MiB) Limit: 124038655509 (118292MiB), %mem: 90.2
+ RAM: 274877906944 (262144MiB) Limit: 248077311017 (236584MiB), %mem: 90.2
+ RAM: 549755813888 (524288MiB) Limit: 496154622034 (473169MiB), %mem: 90.2
-* Adapt various places related to handling of execution plans non-recursive
- in order to avoid stack overflows. This allows us now to execute much larger
- queries.
+* Increase default idle timeout in streaming transactions from 10 seconds to
+ 60 seconds, and make the timeout configurable via a startup parameter
+ `--transaction.streaming-idle-timeout`.
-* Lower log level to warning, when take over shard leadership finds
- an agency Current entry is missing the server taking over.
+* Use RebootTracker to abort cluster transactions on DB servers should the
+ originating coordinator die or be rebooted. The previous implementation left
+ the coordinator's transactions open on DB servers until they timed out there.
+ Now, the coordinator's unavailability or reboot will be detected as early as
+ it is reported by the agency, and all open transactions from that coordinator
+ will be auto-aborted on DB servers.
-* Fix locking of AQL queries write queries on DB servers.
+* Update the Web UI's list of built-in AQL functions for proper syntax
+ highlighting in the query editor.
-* APM-112: invalid use of OPTIONS in AQL queries will now raise a warning in
- the query.
- The feature is useful to detect misspelled attribute names in OPTIONS, e.g.
+* Fix a crash caused by returning a result produced by ANALYZER function.
- INSERT ... INTO collection
- OPTIONS { overwrightMode: 'ignore' } /* should be 'overwriteMode' */
+* Fix a race in LogAppender::haveAppenders.
+ `haveAppenders` is called as part of audit logging. It accesses internal maps
+ but previously did not hold a lock while doing so.
- It is also useful to detect the usage of valid OPTIONS attribute names that
- are used for a wrong query part, e.g.
+* Bug-fix in the case of very rare network issues there was a chance that an AQL
+ query could get stuck during a cleanup and after a commit.
+ This would cause the client to receive a timeout, and the Coordinator blocking
+ a Scheduler thread. This situation is sorted out and the thread will not be
+ blocked anymore. We also added logs in case the query could not successfully
+ be cleaned up, which would leave locks on shards behind.
- FOR doc IN collection
- FILTER doc.value == 1234
- INSERT doc INTO other
- OPTIONS { indexHint: 'myIndex' } /* should be used above for FOR */
+* Fix an assertion failure that occurred when restoring view definitions from a
+ cluster into a single server.
- In case a wrong option attribute is used, a warning with code 1575 will be
- raised.
- By default, warnings are reported but do not lead to the query being aborted.
- This can be toggled by the startup option `--query.fail-on-warnings` or the
- per-query runtime option `failOnWarnings`.
+* Added new ArangoSearch analyzer type "stopwords".
-* Added new command line-option `--version-json`. This will return the
- version information as json object.
+* Fix error message in case of index unique constraint violations. They were
+ lacking the actual error message (i.e. "unique constraint violated") and only
+ showed the index details. The issue was introduced only in devel in Feb.
-* Fix ArangoAgency::version(), which always returned an empty string instead
- of the agency's correctly reported version. This also fixes the agency
- version in the startup log messages of the cluster.
+* Removed old metrics in new v2 metric api. Those metric endpoints were
+ identical to the sum value of histograms.
-* Fixed an issue in index selection, when the selectivity estimate of another
- prefix index was used without checking if the other index covered the
- FILTER condition.
+* Allow process-specific logfile names.
- For example, given the following indexes:
+ This change allows replacing '$PID' with the current process id in the
+ `--log.output` and `--audit.output` startup parameters.
+ This way it is easier to write process-specific logfiles.
- - index 1: ["e", "a", "b", "c"]
- - index 2: ["e", "a", "b"]
- - index 3: ["d", "e", "f", "g"]
+* Backport a bugfix from upstream RocksDB for opening encrypted files with small
+ sizes. Without the bugfix, the server may run into assertion failures during
+ recovery.
- and the FILTER condition `d == 1 && e == 2 && f == 3`, then the best index
- to pick would be index 3. However, the optimizer may have picked index 1
- here.
- All indexes are valid candidates for this FILTER condition, but none of the
- indexes covered all attributes of the FILTER condition. So the index
- selectivity estimates were (correctly) not used directly to determine the
- best index.
- The actual bug happened when comparing the usefulness of the candidate
- indexes, when figuring out that even though the selectivity estimate for
- index 1 could not be used, but that there existed a prefix index of index
- 1 (index 2). The selectivity estimate of this index was taken _without_
- checking that prefix index actually satisfied the FILTER condition fully.
- The prefix index' selectivity estimate must only be used if it fully
- satisfies the FILTER condition, which was not the case here.
+* Fix duplicate leaving of V8 contexts when returning streaming cursors.
+ The `exitContext` call done on query shutdown could previously try to exit the
+ V8 context multiple times, which would cause undefined behavior. Now we are
+ tracking if we already left the context to prevent duplicate invocation.
-* Add following term ids, which prevents old synchronous replication requests
- to be accepted after a follower was dropped and has gotten in sync again.
- This makes the chaos tests which delay synchronous replication requests
- more reliable and prevent inconsistent shard replicas under bad network
- conditions.
+* In a cluster, do not create the collections `_statistics`, `_statistics15` and
+ `statisticsRaw` on DB servers. These collections should only be created by the
+ coordinator, and should translate into 2 shards each on DB servers. But there
+ shouldn't be shards named `_statistics*` on DB servers.
+
+* Fixed two bogus messages about hotbackup restore:
+ - Coordinators unconditionally logged the message "Got a hotbackup restore
+ event, getting new cluster-wide unique IDs..." on shutdown. This was not
+ necessarily related to a hotbackup restore.
+ - DB servers unconditionally logged the message "Strange, we could not
+ unregister the hotbackup restore callback." on shutdown, although this was
+ meaningless.
-* Enable process metrics on agent instances by default. Previously, some
- metrics (including the metrics starting with `arangodb_process` prefix) were
- not returned by agent instances.
+* Rename "save" return attribute to "dst" in AQL functions `DATE_UTCTOLOCAL` and
+ `DATE_LOCALTOUTC`.
-* APM-107: Added metric "rocksdb_read_only" to determine whether RocksDB is
- currently in read-only mode due to a background error. The metric will have
- a value of "1" if RocksDB is in read-only mode and "0" if RocksDB is in
- normal operations mode. If the metric value is "1" it means all writes into
- RocksDB will fail, so inspecting the logfiles and acting on the actual error
- situation is required.
+* Fix potentially undefined behavior when creating a
+ CalculationTransactionContext for an arangosearch analyzer. An uninitialized
+ struct member was passed as an argument to its base class. This potentially
+ had no observable effects, but should be fixed.
-* Fix potential memleak in Pregel conductor garbage collection.
+* Retry a cluster internal network request if the connection comes from the pool
+ and turns out to be stale (connection immediately closed). This fixes some
+ spurious errors after a hotbackup restore.
-* Added a retry loop for arangorestore during the initial connection phase. The
- number of retries defaults to 3 and can be configured using
- --initial-connect-retries (BTS-491).
+* Fix progress reporting for arangoimport with large files on Windows.
+ Previously, progress was only reported for the first 2GB of data due to an int
+ overflow.
-* Fix numeric overflow in AQL WINDOW node cost estimation if the number of
- preceding rows was set to `unbounded`.
+* Log the actual signal instead of "control-c" and also include the process id
+ of the process that sent the signal.
-* Updated ArangoDB Starter to 0.15.1-preview-3.
+* Fixed GitHub issue #13665: Improve index selection when there are multiple
+ candidate indexes.
-* Added garbage collection for finished and failed Pregel conductors.
- Previously, Pregel executions that finished successfully or unsuccessfully
- remained in memory until being explicitly canceled. This prevented a
- cleanup of abandoned jobs. Such jobs are now automatically cleaned
- about 10 minutes after finalization. The time-to-live values can be
- overriden per Pregel job by passing a "ttl" value.
+* When dropping a collection or an index with a larger amount of documents, the
+ key range for the collection/index in RocksDB gets compacted. Previously, the
+ compaction was running in foreground and thus would block the deletion
+ operations.
+ Now, the compaction is running in background, so that the deletion operations
+ can return earlier.
+ The maximum number of compaction jobs that are executed in background can be
+ configured using the new startup parameter
+ `--rocksdb.max-parallel-compactions`, which defaults to 2.
-* Revive startup parameter `--server.session-timeout` to control the timeout
- for web interface sessions and other sessions that are based on JWTs created
- by the `/_open/auth` API.
+* Put Sync/LatestID into hotbackup and restore it on hotbackup restore if it is
+ in the backup. This helps with unique key generation after a hotbackup is
+ restored to a young cluster.
- This PR also changes the default session timeout for web interface sessions
- to one hour. Older versions of ArangoDB had longer session timeouts.
+* Fixed a bug in the index count optimization that doubled counted documents
+ when using array expansions in the fields definition.
-* Add prefix parameter to LEVENSHTEIN_MATCH function in ArangoSearch
- (DEVSUP-753).
+* Don't store selectivity estimate values for newly created system collections.
-* Removed redirects from /_admin/cluster* to /_admin/cluster/*. Adjusted
- internal requests to use the new url.
+ Not storing the estimates has a benefit especially for the `_statistics`
+ system collections, which are written to periodically even on otherwise idle
+ servers. In this particular case, the actual statistics data was way smaller
+ than the writes caused by the index estimate values, causing a disproportional
+ overhead just for maintaining the selectivity estimates.
+ The change now turns off the selectivity estimates for indexes in all newly
+ created system collections, and for new user-defined indexes of type
+ "persistent", "hash" or "skiplist", there is now an attribute "estimates"
+ which can be set to `false` to disable the selectivity estimates for the
+ index.
+ The attribute is optional. Not setting it will lead to the index being created
+ with selectivity estimates, so this is a downwards-compatible change for
+ user-defined indexes.
-* Fix display of running and slow queries in web UI when there are multiple
- coordinators. Previously, the display order of queries was undefined, which
- could lead to queries from one coordinator being display on top once and
- then the queries from another. That made using this UI harder than necessary.
+* Added startup option `--query.global-memory-limit` to set a limit on the
+ combined estimated memory usage of all AQL queries (in bytes).
+ If this option has a value of `0`, then no memory limit is in place.
+ This is also the default value and the same behavior as in previous versions
+ of ArangoDB.
+ Setting the option to a value greater than zero will mean that the total
+ memory usage of all AQL queries will be limited approximately to the
+ configured value.
+ The limit is enforced by each server in a cluster independently, i.e. it can
+ be set separately for coordinators, DB servers etc. The memory usage of a
+ query that runs on multiple servers in parallel is not summed up, but tracked
+ separately on each server.
+ If a memory allocation in a query would lead to the violation of the
+ configured global memory limit, then the query is aborted with error code 32
+ ("resource limit exceeded").
+ The global memory limit is approximate, in the same fashion as the per-query
+ limit provided by the option `--query.memory-limit` is. Some operations,
+ namely calls to AQL functions and their intermediate results, are currently
+ not properly tracked.
+ If both `--query.global-memory-limit` and `--query.memory-limit` are set, the
+ former must be set at least as high as the latter.
- Now queries are sorted for display, according to their query IDs.
+ To reduce the cost of globally tracking the memory usage of AQL queries, the
+ global memory usage counter is only updated in steps of 32 kb, making this
+ also the minimum granularity of the global memory usage figure.
+ In the same fashion, the granularity of the peak memory usage counter inside
+ each query was also adjusted to steps of 32 kb.
-* Updated ArangoDB Starter to 0.15.1-preview-2.
+* Added startup option `--query.memory-limit-override` to control whether
+ individual AQL queries can increase their memory limit via the `memoryLimit`
+ query option. This is the default, so a query that increases its memory limit
+ is allowed to use more memory.
+ The new option `--query.memory-limit-override` allows turning this behavior
+ off, so that individual queries can only lower their maximum allowed memory
+ usage.
-* Fix potential stack overflow when executing large queries. This is
- achieved by splitting the callstack and moving part of the execution
- to a separate thread. The number of execution nodes after which such
- a callstack split should be performed can be configured via the query
- option `maxNodesPerCallstack` and the command line option
- `--query.max-nodes-per-callstack`; the default is 250.
+* Added metric `arangodb_aql_global_memory_usage` to expose the total amount of
+ memory (in steps of 32 kb) that is currently in use by all AQL queries.
-* Bug-Fix: Pregel WCC algorithm could yield incorrect results if a
- part of the connected component was only attached via OUTBOUND edges.
- The underlying algorithm is now modified to properly retain INBOUND
- edges for the runtime of the execution. This uses more RAM for the
- algorithm but guarantees correctness.
+* Added metric `arangodb_aql_global_memory_limit` to expose the memory limit
+ from startup option `--query.global-memory-limit`.
-* Updated ArangoDB Starter to 0.15.1-preview-1.
+* Allow setting path to the timezone information via the `TZ_DATA` environment
+ variable, in the same fashion as the currently existing `ICU_DATA` environment
+ variable. The `TZ_DATA` variable is useful in environments` that start arangod
+ from some unusual locations, when it can't find its `tzdata` directory
+ automatically.
-* Updated arangosync to 2.4.0.
+* Fixed a bug in query cost estimation when a NoResults node occurred in a
+ spliced subquery. This could lead to a server crash.
-* Fixed DEVSUP-799: unique vertex getter may point to invalid memory after being
- reset, resulting in undefined behavior for traversals returning unique
- vertices from inner FOR loops.
+* Fix slower-than-necessary arangoimport behavior:
+ arangoimport has a built-in rate limiter, which can be useful for importing
+ data with a somewhat constant rate. However, it is enabled by default and
+ limits imports to 1MB per second. These settings are not useful.
-* For cluster AQL queries, let the coordinator determine the query id to be
- used on DB servers. This allows the coordinator to roll back AQL query setup
- requests via the query id. Previously, the DB servers each generated a local
- query id and returned it to the coordinator, who would then keep track of
- them for later use. The problem with this was that if an AQL query setup
- request timed out, the coordinator had no way to roll it back.
+ This change turns the rate limiting off by default, and sets the default chunk
+ size to 8MB (up from 1MB) as well. This means that arangoimport will send
+ larger batches to the server by default. The already existing `--batch-size`
+ option can be used to control the maximum size of each batch.
- In addition, if setting up a query takes long on a DB server so that the
- coordinator sends a rollback request, there are some measures in place for
- the unlikely case in which the rollback request overtakes the setup request.
- In this case, the rollback request will not find a query yet, but will
- register a tombstone for it. Once the query gets registered by the delayed
- request, it will (correctly) fail because of the tombstone.
+ The new parameter `--auto-rate-limit` can now be used to toggle rate limiting.
+ It defaults to off, whereas previously rate limiting was enabled by default
+ unless `--batch-size` was specified when arangoimport was invoked.
-* Removed a special case for empty document update operations (i.e. update
- requests in which no attributes were specified to be updated) were handled
- in a special way without performing any writes. The problem was that such
- updates did not update the local state, but could have been replicated to
- followers.
- This special empty update case is now removed and update operations that do
- not update any attributes are treated as normal write operations both
- locally and in the replication.
+* The cluster dashboard charts in the web UI are now more readable during the
+ initialization phase. Additionally, the amount of agents are now displayed
+ there as well. An agent failure will also appear here in case it exists.
-* Fix partial cleanup of internal write batches for multi-document operations of
- which one or multiple failed. The previous implementation had an unreleased
- performance optimization that wouldn't clean up the write batch completely.
- That could have led to a wrong sequence of events being accumulated in the
- write batch, which may have confused the WAL tailing API later.
+* Added more useful information during the SmartGraph creation in the web UI
+ in case the current database is a OneShard database.
-* Fix some occurrences in which Merkle trees could silently apply the same change
- multiple times, which led to data drift between the Merkle tree and the
- underlying collection's data.
+* Add support for building with Zen 3 CPU when optimizing for the local
+ architecture.
-* On a failure during synchronous replication, do not remove the failed follower
- from the list of known servers in the transaction.
- If we do, we would not be able to send the commit/abort to the follower later.
- However, we still need to send the commit/abort to the follower at transaction
- end, because the follower may be responsible for _other_ shards as well.
+* The web UI's node overview now displays also agent information (cluster only).
- This change also removes dangling transactions that could stay around on
- followers until they expired after the transaction idle timeout (180 seconds),
- and that could prevent a follower from getting back in sync during this period.
+* The statistics view in the web UI does now provide more system specific
+ information in case the Metrics API is enabled. Different statistics may be
+ visible depending on the operating system.
-* Added more context to "dropping follower" messages, so it is easier to analyze
- what exactly went wrong.
+* Added metrics documentation snippets and infrastructure for that.
-* Fixed invalid shard synchronization for documents not added via INSERT with
- `overwriteMode` set to `ignore`. In this case, if a document with the given key
- already exists, it is not changed on the leader (i.e. no write happens on the
- leader). However, a write was replicated to the follower, which was wrong.
- This write is now suppressed, which can only make such insert operations faster.
+* Added a new cluster distribution view to the web UI. The view includes general
+ details about cluster-wide distribution in general as well as more detailed
+ shard distribution specific information.
-* Web UI: Disables the hover tooltip within the statistics view of the
- memory consumption chart.
+* Reasonably harden MoveShard against invalid VelocyPack input.
-* Raised the versions of the node modules `node-sass` and `sass-loader`
- to be able to build the Web UI with Node v16+.
+* Removed older reference to VelocyPackDumper.
-* Improve usability of hidden options: `--help` mentions that these exist
- and how to display them.
+* Added `--documents-per-batch` option to arangoexport.
+ This option allows to control the number of documents to be returned by each
+ server-side batch. It can be used to limit the number of documents per batch
+ when exporting collections with large documents.
-* Fix DEVSUP-753: now it is safe to call visit on exhausted disjunction
- iterator.
+* Added a new metrics view to the web UI. This view can be used in a clustered
+ environment as well as in a single instance. Metrics are displayed either in
+ a tabular format or as plain text (Prometheus Text-based format).
+ Additionally, the metrics can be downloaded there.
-* Fixed ES-863: reloading of users within the Cluster.
- If a Coordinator is asked to reload its users (e.g. by the UserManager in
- Foxx, it is also possible to do via API, but this is internal and on purpose
- not documented, so unlikely that it is used), in concurrency with user
- management updates there is a chance that the reload is not correctly
- performed on this coordinator. It may have missed the last update locally,
- causing one user to have an older state. It will be fixed on the next
- modification of any other users/permissions. Unfortunately this bug can
- cascade and when hit again, the coordinator can now be off by two updates.
- In DC2DC this situation is more likely to happen on the target datacenter,
- causing this datacenter to have other users/permissions than the source one.
+* Added a new maintenance mode tab to the web UI in cluster mode.
+ The new tab shows the current state of the cluster supervision maintenance and
+ allows to enable/disable the maintenance mode from there. The tab will only be
+ visible in the `_system` database. The required privileges for displaying the
+ maintenance mode status and/or changing it are the as for using the REST APIs
+ for the maintenance mode.
-* Slightly improve specific warning messages for better readability.
+* Added ability to display Coordinator and DBServer logs from inside the Web UI
+ in a clustered environment when privileges are sufficient.
+ Additionally, displayed log entries can now be downloaded from the web UI in
+ single server and in cluster mode.
-* Add 3 AQL functions: DECAY_GAUSS, DECAY_EXP and DECAY_LINEAR.
+* The Web UI's info view of a collection now displays additional properties and
+ statistics (e.g. RocksDB related figures, sharding information and more).
-* Fix URL request parsing in case data is handed in in small chunks.
- Previously the URL could be cut off if the chunk size was smaller than
- the URL size.
+* Improve progress reporting for shard synchronization in the web UI.
+ The UI will now show how many shards are actively syncing data, and will
+ provide a better progress indicator, especially if there is more than one
+ follower for a shard.
-* Backport bugfix from upstream rocksdb repository for calculating the
- free disk space for the database directory. Before the bugfix, rocksdb
- could overestimate the amount of free space when the arangod process
- was run as non-privileged users.
+* Added `--shard` option to arangodump, so that dumps can be restricted to one
+ or multiple shards only.
-* Add soft coordinator shutdown: This is a new option `soft=true` for the
- DELETE /_admin/shutdown API. Has only meaning for coordinators, otherwise
- ignored. A number of things are allowed to finish but no new things are
- allowed when in soft coordinator shutdown:
- - AQL cursors
- - transactions
- - asynchronous operations
- - Pregel runs
- Once all of the ongoing operations of these have finished and all requests
- on the low priority queue have been executed, the coordinator shuts down
- the normal way. This is supposed to make a coordinator restart less
- intrusive for clients.
+* Add optional hostname logging to log messages.
+ Whether or not the hostname is added to each log message can be controlled via
+ the new startup option `--log.hostname`. Its default value is the empty
+ string, meaning no hostname will be added to log messages.
+ Setting the option to an arbitrary string value will make this string be
+ logged in front of each regular log message, and inside the `hostname`
+ attribute in case of JSON-based logging. Setting the option to a value of
+ `auto` will use the hostname as returned by `gethostbyname`.
-* Fix BTS-398: Cannot force index hint for primary index if FILTER has multiple
- OR conditions that require different indexes.
+* Added list-repeat AIR primitive that creates a list containing n copies of the
+ input value.
-* Fixed a problem with active failover, where a failover could take 5 mins
- because the follower was caught in a bad state during replication. This
- fixes BTS-425.
+* Prevent arangosh from trying to connect after every executed command.
+ This fixes the case when arangosh is started with default options, but no
+ server is running on localhost:8529. In this particular case, arangosh will
+ try to connect on startup and after every executed shell command. The connect
+ attempts all fail and time out after 300ms.
+ In this case we now don't try to reconnect after every command.
-* Added check to utils/generateAllMetricsDocumentation.py to check that
- the file name and the value of the name attribute are the same in the
- metrics documentation snippets. Correct a few such names.
+* Added 'custom-query' testcase to arangobench to allow execution of custom
+ queries.
+ This also adds the options `--custom-query` and `--custom-query-file` for
+ arangobench.
-* Fix BTS-456, BTS-457: Make geo intersection between point and rectangle
- symmetrical.
+* Addition to the internal Refactoring of K_PATHS feature: K_PATHS queries are
+ now being executed on the new refactored graph engine in a clustered
+ environment. This change should not have any visible effect on users.
-* Fix BTS-430: Added missing explain output about indexes for SHORTEST_PATH,
- K_SHORTEST_PATHS and K_PATHS.
+* Reduce memory footprint of agency Store in Node class.
-* Updated arangosync to 2.3.0.
+* On Windows create a minidump in case of an unhandled SEH exception for
+ post-mortem debugging.
-* Added check for data type compatibility between members of pipeline
- ArangoSearch analyzer.
+* Add JWT secret support for arangodump and arangorestore, i.e. they now also
+ provide the command-line options `--server.ask-jwt-secret` and
+ `--server.jwt-secret-keyfile` with the same meanings as in arangosh.
-* Implemented an optimization for Traversals. If you apply a POST filter on
- the vertex and/or edge result this filter will now be applied during the
- traversal to avoid generating the full output for AQL. This will have
- positive effect if you filter on the vertex/edge but return the path,
- this way the system does only need to produce a path that is allowed to
- be passed through.
- e.g.
+* Add optional hyperlink to program option sections for information purposes,
+ and add optional sub-headlines to program options for better grouping.
+ These changes will be visible only when using `--help`.
- FOR v,e,p IN 10 OUTBOUND @start GRAPH "myGraph"
- FILTER v.isRelevant == true
- RETURN p
+* For Windows builds, remove the defines
+ `_SILENCE_ALL_CXX17_DEPRECATION_WARNINGS` and `_ENABLE_ATOMIC_ALIGNMENT_FIX`
+ that were needed to build Boost components with MSVC in older versions of
+ Boost and MSVC.
+ Both of these defines are obsolete nowadays.
- can now be optimized, and the traversal statement will only produce
- paths where the last vertex has `isRelevant == true`.
+* Database initial sync considers document count on leader for estimating
+ timeouts when over 1 million docs on leader.
-* Fix BTS-450: RandomGenerator caught assertion during a value generation within
- `dump_maskings` testsuite. Ensure correct conversion between 64 and 32bit.
+* Fixed issue #13117: Aardvark: Weird cursor offsets in query editor.
-* Fix BTS-442: a query with fullCount on a sharded collection hangs
- indefinitely when LIMIT is less than number of available documents.
+ Disabled font ligatures for Ace editor in Web UI to avoid rare display issue.
-* Bug-Fix (MacOs): In MacOs there is an upper bound for descriptors defined by
- the system, which is independent of the settings in `ulimit -n`. If the
- hard limit is set above this upper bound value ArangoDB tries to raise the
- soft limit to the hard limit on boot. This will fail due to the system
- limit. This could cause ArangoDB to not start, asking you to lower the
- minimum of required file descriptors. The system-set upper bound is now
- honored and the soft limit will be set to either hard limit or system limit,
- whichever is lower.
+* Make all AQL cursors return compact result arrays.
-* Fix BTS-409: return error 1948 when a negative edge was detected during or was
- used as default weight in a SHORTEST_PATH or a K_SHORTEST_PATHS traversal.
+ As a side-effect of this change, this makes profiling (i.e. using
+ `db._profileQuery(...)` work for streaming queries as well. Previously,
+ profiling a streaming query could have led to some internal errors, and even
+ query results being returned, even though profiling a query should not return
+ any query results.
-* Fix BTS-446: When finding a not yet fully initialized agency, do not
- immediately fatal exit. Keep trying for (very generous) 5
- minutes.
+* Try to raise file descriptors limit in local start scripts (in `scripts/`
+ directory - used for development only).
-* Removed unused documentation snippets (non-Rest DocuBlocks) as well as the
- documentation about the long deprecated features Simple Queries and
- JavaScript-based graph traversal. Also removed the descriptions of the
- JS API methods `collection.range()`, `collection.closedRange()`,
- `cursor.setBatchSize()` and `cursor.getBatchSize()`. All the functionality
- is superseded by AQL.
+* Fixed replication bug in MerkleTree sync protocol, which could lead to data
+ corruption. The visible effect was that shards could no longer get in sync
+ since the counts would not match after sync, even after a recount.
+ This corruption only happened if there were large amounts of differences (at
+ least 65537) and the destination side had newer revisions for some keys than
+ the source side.
-* Implemented APM-86: add query option `fillBlockCache` to control population
- of RocksDB block cache with data read by the query. The default value for
- this per-query option is `true`, which mimics the previous behavior.
- Setting the option to off allows not storing data in RocksDB's block cache
- for queries that are known to read only semi-relevant or unimportant data.
+* Simplify the DistributeExecutor and avoid implicit modification of its input
+ variable. Previously the DistributeExecutor could update the input variable
+ in-place, leading to unexpected results (see #13509).
+ The modification logic has now been moved into three new _internal_ AQL
+ functions (MAKE_DISTRIBUTE_INPUT, MAKE_DISTRIBUTE_INPUT_WITH_KEY_CREATION, and
+ MAKE_DISTRIBUTE_GRAPH_INPUT) and an additional calculation node with an
+ according function call will be introduced if we need to prepare the input
+ data for the distribute node.
-* Improve Merkle tree memory usage and allow left-growth of trees, too. This
- can help with insertions of arbitrarily old data.
+* Added new REST APIs for retrieving the sharding distribution:
-* Added metric `arangodb_sync_rebuilds_total` to track the full rebuild of a
- shard follower after too many subsequent shard synchronization failures. This
- metric should always have a value of 0. Everything else indicates a serious
- problem.
+ - GET `/_api/database/shardDistribution` will return the number of
+ collections, shards, leaders and followers for the database it is run
+ inside. The request can optionally be restricted to include data from only a
+ single DB server, by passing the `DBserver` URL parameter.
-* Fixed BTS-422: SingleRemoteModification in AQL behaves different.
+ This API can only be used on coordinators.
- This disables the optimizer rule `optimize-cluster-single-document-operations`
- for array inputs, e.g.
+ - GET `/_admin/cluster/shardDistribution` will return global statistics on the
+ current shard distribution, showing the total number of databases,
+ collections, shards, leaders and followers for the entire cluster.
+ The results can optionally be restricted to include data from only a single
+ DB server, by passing the `DBserver` URL parameter.
+ By setting the `details` URL parameter, the response will not contain
+ aggregates, but instead one entry per available database will be returned.
- INSERT [...] INTO collection
- REMOVE [...] IN collection
+ This API can only be used in the `_system` database of coordinators, and
+ requires admin user privileges.
- For the cases, the optimization is not pulled off, and the normal insert/
- update/replace/remove behavior is executed, which will fail because of an
- array being used as input.
+* Decrease the size of serialized index estimates, by introducing a compressed
+ serialization format. The compressed format uses the previous uncompressed
+ format internally, compresses it, and stores the compressed data instead. This
+ makes serialized index estimates a lot smaller, which in turn decreases the
+ size of I/O operations for index maintenance.
-* Fixed issue BTS-424: fix invalid input row handling in WINDOW execution.
+* More improvements for logging:
-* Fixed ES-881: ensure that LDAP options for async, referrals and restart set
- the off value correctly. Otherwise, this can result in an "operations error".
+ - Added new REST API endpoint GET `/_admin/log/entries` to return log entries
+ in a more intuitive format, putting each log entry with all its properties
+ into an object. The API response is an array with all log message objects
+ that match the search criteria.
+ This is an extension to the already existing API endpoint GET `/_admin/log`,
+ which returned log messages fragmented into 5 separate arrays.
-* Fixed DEVSUP-764 (SEARCH-7): inconsistent BM25 scoring for LEVENSHTEIN_MATCH
- function.
+ The already existing API endpoint GET `/_admin/log` for retrieving log
+ messages is now deprecated, although it will stay available for some time.
-* Return error 1948 when a negative edge was detected during a
- weighted traversal or was used as default weight.
+ - Truncation of log messages now takes JSON format into account, so that the
+ truncation of oversized JSON log messages still keeps a valid JSON structure
+ even after the truncation.
-* Added 2 options to allow HTTP redirection customization for root ("/") call of
- HTTP API:
+ - The maximum size of in-memory log messages was doubled from 256 to 512
+ chars, so that longer parts of each log message can be preserved now.
- `--http.permanently-redirect-root`: if true (default), use a permanent
- redirection (use HTTP 301 code), if false fall back to temporary redirection
- (use HTTP 302 code);
- `--http.redirect-root-to`: redirect of root URL to a specified path (redirects
- to "/_admin/aardvark/index.html" if not set (default)).
+* Fix `/_admin/cluster/removeServer` API.
+ This often returned HTTP 500 with an error message "Need open Array" due to an
+ internal error when setting up agency preconditions.
-* Fixes BTS-416. During shutdown, a shard leader wrongly reported that
- it could not drop a shard follower instead of correctly indicating
- the shutdown as reason.
+* Remove logging startup options `--log.api-enabled` and `--log.keep-logrotate`
+ for all client tools (arangosh, arangodump, arangorestore etc.), as these
+ options are only meaningful for arangod.
-* Fix various issues related to the new WINDOW operation (see BTS-402)
- - Improved explain output for ISO 8601 duration strings and fixed missing week
- component.
- - Improved validation of input data and error messages.
- - Prevent FILTERs from being moved beyond a WINDOW.
+* Extend the "move-calculations-up" optimizer rule so that it can move
+ calculations out of subqueries into the outer query.
-* Fixes BTS-417. In some cases an index did not consider both bounds (lower and
- upper) for a close range scan if both bounds are expressed using the same
- operator, e.g., `FILTER doc.beginDate >= lb AND ub >= doc.beginDate`.
+* Don't allocate ahead-of-time memory for striped PRNG array in arangod, but
+ instead use thread-local PRNG instances. Not only does this save a few
+ megabytes of memory, but it also avoids potential (but unlikely) sharing of
+ the same PRNG instance by multiple threads.
-* When writing to starting shard leader respond with specific
- 503. Fixes BTS-390.
+* Remove undocumented CMake variable `USE_BACKTRACE`, and remove define
+ `ARANGODB_ENABLE_BACKTRACE`. Both were turned off by default before, and when
+ turned on allow to produce backtraces from within the executable in case debug
+ symbols were available, working and the build was also compiled with
+ `USE_MAINTAINER_MODE=On`. Some code in this context was obviously unreachable,
+ so now it has all been removed.
+ To log a backtrace from within arangod, it is now possible to call
+ `CrashHandler::logBacktrace()`, which will log a backtrace of the calling
+ thread to the arangod log. This is restricted to Linux builds only.
-* Reduced the agency store public members, for simpler support long-term.
+* Fix warnings about suggest-override which can break builds when warnings aret
+ reated as errors.
-* Fixed bug in error reporting when a database create did not work, which
- lead to a busy loop reporting this error to the agency.
+* Turn off option `--server.export-read-write-metrics` for now, until there is
+ certainty about the runtime overhead it introduces.
-* Added a number of tests for the Agency Store public members.
+* Remove unsafe query option `inspectSimplePlans`. This option previously
+ defaulted to `true`, and turning it off could make particular queries fail.
+ The option was ignored in the cluster previously, and turning it off only had
+ an effect in single server, there making very simple queries (queries not
+ containing any FOR loops) not going through the optimizer's complete pipeline
+ as a performance optimization. However, the optimization was only possible for
+ a very small number of queries and even had adverse effects, so it is now
+ removed entirely.
-* Improve error reporting for Merkle tree operations and improve memory usage
- for unused trees by hibernating them. In addition, add some backoff to shard
- synchronization in case there are repeated sync failures for the same shard.
+* On Linux and MacOS, require at least 8192 usable file descriptors at startup.
+ If less file descriptors are available to the arangod process, then the
+ startup is automatically aborted.
-* Fixes pregel lifetime management. Previously shutting down the server while a
- pregel job was still running could result in a segfault or a shutdown hanger.
+ Even the chosen minimum value of 8192 will often not be high enough to store
+ considerable amounts of data. However, no higher value was chosen in order to
+ not make too many existing small installations fail at startup after
+ upgrading.
-* Updated bundled version of Snappy library to 1.1.9.
+ The required number of file descriptors can be configured using the startup
+ option `--server.descriptors-minimum`. It defaults to 8192, but it can be
+ increased to ensure that arangod can make use of a sufficiently high number of
+ files. Setting `--server.descriptors-minimum` to a value of `0` will make the
+ startup require only an absolute minimum limit of 1024 file descriptors,
+ effectively disabling the change.
+ Such low values should only be used to bypass the file descriptors check in
+ case of an emergency, but this is not recommended for production.
-* Fixed various issues (mainly data races) reported by ThreadSanitizer.
+* Added metric `arangodb_transactions_expired` to track the total number of
+ expired and then garbage-collected transactions.
-* Improve "Shards" view in web UI so that the shards of individual collections
- can be expanded and collapsed without affecting the display of any other
- shards. Also added a "Toggle all" button the web UI to expand/collapse the
- shards for all collections.
+* Allow toggling the document read/write counters and histograms via the new
+ startup option `--server.export-read-write-metrics false`. This option
+ defaults to `true`, so these metrics will be exposed by default.
-* Fixed BTS-403: Hot restores must also clear relevant `Current` keys. The
- overriding of the `Plan` entries needs to be reflected in `Current` to avoid
- conflicts in maintenance jobs.
+* Upgraded bundled version of libunwind to v1.5.
-* Log a proper message if an unexpected state is encountered when taking over
- shard leadership. In addition, make the change to the internal followerinfo
- state atomic so that it cannot be semi-changed.
+* Added startup option `--javascript.tasks` to allow turning off JavaScript
+ tasks if not needed. The default value for this option is `true`, meaning
+ JavaScript tasks are available as before.
+ However, with this option they can be turned off by admins to limit the amount
+ of JavaScript user code that is executed.
-* Improve exception safety for maintenance thread and shard unlock
- operations.
+* Only instantiate a striped PRNG instance for the arangod server, but not for
+ any of the client tools (e.g. arangosh, arangodump, arangorestore).
+ The client tools do not use the striped PRNG, so we can save a few MBs of
+ memory for allocating the striped PRNG instance there, plus some CPU time for
+ initializing it.
-* Fixed two bugs in fuerte with HTTP/2 and VST connections.
- One could lead to ordered timeouts not being honoured. The other could
- lead to an ordered callback be called multiple times.
+* Improve shard synchronization protocol by only transferring the required parts
+ of the inventory from leader to follower. Previously, for each shard the
+ entire inventory was exchanged, which included all shards of the respective
+ database with all their details.
+ In addition, save 3 cluster-internal requests per shard in the initial shard
+ synchronization protocol by reusing already existing information in the
+ different steps of the replication process.
-* Fix response when isBuilding could not be removed from newly created
- collection, when agency precondition fails. This can happen, when own
- rebootId increment has triggered plan entry to be removed.
+* Added metric `arangodb_scheduler_low_prio_queue_last_dequeue_time` that
+ provides the time (in milliseconds) it took for the most recent low priority
+ scheduler queue item to bubble up to the queue's head. This metric can be used
+ to estimate the queuing time for incoming requests.
+ The metric will be updated probabilistically when a request is pulled from the
+ scheduler queue, and may remain at its previous value for a while if only few
+ requests are coming in or remain permanently at its previous value if no
+ further requests are incoming at all.
-* When writing to starting shard leader respond with specific
- 503. Fixes BTS-390.
+* Allow {USER} placeholder string also in `--ldap.search-filter`.
-* Introduce a new internal error code for cases where a call cannot succeed
- because the server startup phase is still in progress. This error will be
- mapped to the HTTP status code 503 (service unavailable).
- One example where this can happen is when trying to authenticate a request,
- but the _users collection is not yet available in the cluster.
+* Fixed some wrong behavior in single document updates. If the option
+ ignoreRevs=false was given and the precondition _rev was given in the body but
+ the _key was given in the URL path, then the rev was wrongly taken as 0,
+ rather than using the one from the document body.
-* Fixed issue BTS-354: Assertion related to getCollection.
+* Improved logging for error 1489 ("a shard leader refuses to perform a
+ replication operation"). The log message will now provide the database and
+ shard name plus the differing information about the shard leader.
-* Fixed a use after free bug in the connection pool.
+* Add shard-parallelism to arangodump when dumping collections with multiple
+ shards.
+ Previously, arangodump could execute a dump concurrently on different
+ collections, but it did not parallelize the dump for multiple shards of the
+ same collection.
+ This change should speed up dumping of collections with multiple shards.
+ When dumping multiple shards of the same collection concurrently, parallelism
+ is still limited by all these threads needing to serialize their chunks into
+ the same (shared) output file.
-* Fix DEVSUP-749: Fix potential deadlock when executing concurrent view/link
- DDL operations and index DDL operations on the same collection.
+* Add option `--envelope` for arangodump, to control if each dumped document
+ should be wrapped into a small JSON envelope (e.g.
+ `{"type":2300,"data":{...}}`). This JSON envelope is not necessary anymore
+ since ArangoDB 3.8, so omitting it can produce smaller (and slightly faster)
+ dumps.
+ Restoring a dump without these JSON envelopers is handled automatically by
+ ArangoDB 3.8 and higher. Restoring a dump without these JSON envelopes into
+ previous versions (pre 3.8) however is not supported. Thus the option should
+ only be used if the client tools (arangodump, arangorestore) and the arangod
+ server are all using v3.8 or higher, and the dumps will never be stored into
+ earlier versions.
+ The default value for this option is `true`, meaning the JSON wrappers will be
+ stored as part of the dump. This is compatible with all previous versions.
-* Fixed issue #14122: when the optimizer rule "inline-subqueries" is applied,
- it may rename some variables in the query. The variable renaming was however
- not carried out for traversal PRUNE conditions, so the PRUNE conditions
- could still refer to obsolete variables, which would make the query fail with
- errors such as
+* Make AQL optimizer rule "splice-subqueries" mandatory, in the sense that it
+ cannot be disabled anymore. As a side effect of this change, there will no
+ query execution plans created by 3.8 that contain execution nodes of type
+ `SubqueryNode`. `SubqueryNode`s will only be used during query planning and
+ optimization, but at the end of the query optimization phase will all have
+ been replaced with nodes of types `SubqueryStartNode` and `SubqueryEndNode`.
+ The code to execute non-spliced subqueries remains in place so that 3.8 can
+ still execute queries planned on a 3.7 instance with the "splice-subqueries"
+ optimizer rule intentionally turned off. The code for executing non-spliced
+ subqueries can be removed in 3.9.
- Query: AQL: missing variable ... for node ... while planning registers
+* AQL query execution plan register usage optimization.
-* Fixed bug in error reporting when a database create did not work, which led
- to a busy loop reporting this error to the agency.
+ This is a performance optimization that may positively affect some AQL queries
+ that use a lot of variables that are only needed in certain parts of the
+ query.
+ The positive effect will come from saving registers, which directly translates
+ to saving columns in AqlItemBlocks.
+
+ Previously, the number of registers that were planned for each depth level of
+ the query never decreased when going from one level to the next. Even though
+ unused registers were recycled since 3.7, this did not lead to unused
+ registers being completely dismantled.
+
+ Now there is an extra step at the end of the register planning that keeps
+ track of the actually used registers on each depth, and that will shrink the
+ number of registers for the depth to the id of the maximum register. This is
+ done for each depth separately.
+ Unneeded registers on the right hand side of the maximum used register are now
+ discarded. Unused registers on the left hand side of the maximum used register
+ id are not discarded, because we still need to guarantee that registers from
+ depths above stay in the same slot when starting a new depth.
-* Fixed the error response if the HTTP version is not 1.0 or 1.1 and if
- the Content-Length is too large (> 1 GB).
+* Added metric `arangodb_aql_current_query` to track the number of currently
+ executing AQL queries.
-* Add a connection cache for internal replication requests.
+* Internal refactoring of K_PATH feature, with the goal to have all graph
+ algorithms on the same framework. This change should not have any visible
+ effect on users.
-* Improve legibility of size values (by adding KB, MB, GB, TB suffixes) to
- output generated by client tools.
+* Removed server-side JavaScript object `ArangoClusterComm`, so it cannot be
+ used from inside JavaScript operations or Foxx.
+ The `ArangoClusterComm` object was previously used inside a few internal
+ JavaScript operations, but was not part of the public APIs.
-* Timely updates of rebootId / cluster membership of DB servers and
- coordinators in ClusterInfo. Fixes BTS-368 detected in chaos tests.
+* Restrict access to functions inside JavaScript objects `ArangoAgency` and
+ `ArangoAgent` to JavaScript code that is running in privileged mode, i.e. via
+ the server's emergency console, the `/_admin/execute` API (if turned on) or
+ internal bootstrap scripts.
-* Guarded access only to ActionBase::_result.
+* Added startup option `--javascript.transactions` to allow turning off
+ JavaScript transactions if not needed. The default value for this option is
+ `true`, meaning JavaScript transactions are available as before.
+ However, with this option they can be turned off by admins to limit the amount
+ of JavaScript user code that is executed.
-* Fixed proper return value in sendRequestRetry if server is shutting down.
+* Introduce a default memory limit for AQL queries, to prevent rogue queries
+ from consuming the entire memory available to an arangod instance.
-* Updated arangosync to 2.2.0.
+ The limit is introduced via changing the default value of the option
+ `--query.memory-limit` from previously `0` (meaning: no limit) to a
+ dynamically calculated value.
+ The per-query memory limits defaults are now:
-* Fixed internal issue #798: In rare case when remove request
- completely cleans just consolidated segment commit could be cancelled
- and documents removed from collection may be left dangling in the ArangoSearch index.
- Also fixes ES-810 and BTS-279.
+ Available memory: 0 (0MiB) Limit: 0 unlimited, %mem: n/a
+ Available memory: 134217728 (128MiB) Limit: 33554432 (32MiB), %mem: 25.0
+ Available memory: 268435456 (256MiB) Limit: 67108864 (64MiB), %mem: 25.0
+ Available memory: 536870912 (512MiB) Limit: 201326592 (192MiB), %mem: 37.5
+ Available memory: 805306368 (768MiB) Limit: 402653184 (384MiB), %mem: 50.0
+ Available memory: 1073741824 (1024MiB) Limit: 603979776 (576MiB), %mem: 56.2
+ Available memory: 2147483648 (2048MiB) Limit: 1288490189 (1228MiB), %mem: 60.0
+ Available memory: 4294967296 (4096MiB) Limit: 2576980377 (2457MiB), %mem: 60.0
+ Available memory: 8589934592 (8192MiB) Limit: 5153960755 (4915MiB), %mem: 60.0
+ Available memory: 17179869184 (16384MiB) Limit: 10307921511 (9830MiB), %mem: 60.0
+ Available memory: 25769803776 (24576MiB) Limit: 15461882265 (14745MiB), %mem: 60.0
+ Available memory: 34359738368 (32768MiB) Limit: 20615843021 (19660MiB), %mem: 60.0
+ Available memory: 42949672960 (40960MiB) Limit: 25769803776 (24576MiB), %mem: 60.0
+ Available memory: 68719476736 (65536MiB) Limit: 41231686041 (39321MiB), %mem: 60.0
+ Available memory: 103079215104 (98304MiB) Limit: 61847529063 (58982MiB), %mem: 60.0
+ Available memory: 137438953472 (131072MiB) Limit: 82463372083 (78643MiB), %mem: 60.0
+ Available memory: 274877906944 (262144MiB) Limit: 164926744167 (157286MiB), %mem: 60.0
+ Available memory: 549755813888 (524288MiB) Limit: 329853488333 (314572MiB), %mem: 60.0
-* Retry if an ex-leader can no longer drop a follower because it is no longer
- leading.
+ As previously, a memory limit value of `0` means no limitation.
+ The limit values are per AQL query, so they may still be too high in case
+ queries run in parallel. The defaults are intentionally high in order to not
+ stop any valid, previously working queries from succeeding.
-* Fixed a small problem in fuerte which could lead to an assertion failure.
+* Added startup option `--audit.queue` to control audit logging queuing behavior
+ (Enterprise Edition only):
-* Upgrade jemalloc version to latest stable dev.
+ The option controls whether audit log messages are submitted to a queue and
+ written to disk in batches or if they should be written to disk directly
+ without being queued.
+ Queueing audit log entries may be beneficial for latency, but can lead to
+ unqueued messages being lost in case of a power loss or crash. Setting this
+ option to `false` mimics the behavior from 3.7 and before, where audit log
+ messages were not queued but written in a blocking fashion.
-* Fixed issue BTS-373: ASan detected possible heap-buffer-overflow at
- arangodb::transaction::V8Context::exitV8Context().
+* Added metric `arangodb_server_statistics_cpu_cores` to provide the number of
+ CPU cores visible to the arangod process. This is the number of CPU cores
+ reported by the operating system to the process.
+ If the environment variable `ARANGODB_OVERRIDE_DETECTED_NUMBER_OF_CORES` is
+ set to a positive value at instance startup, this value will be returned
+ instead.
-* Allow to specify a fail-over LDAP server. Instead of "--ldap.OPTION" you need
- to specify "--ldap2.OPTION". Authentication / Authorization will first check
- the primary LDAP server. If this server cannot authenticate a user, it will
- try the secondary one. It is possible to specify a file containing all users
- that the primary (or secondary) LDAP server is handling by specifying the
- option "--ldap.responsible-for". This file must contain the usernames
- line-by-line.
+* `COLLECT WITH COUNT INTO x` and `COLLECT var = expr WITH COUNT INTO x` are now
+ internally transformed into `COLLECT AGGREGATE x = LENGTH()` and
+ `COLLECT var = expr AGGREGATE x = LENGTH()` respectively. In addition, any
+ argument passed to the `COUNT`/`LENGTH` aggregator functions are now optimized
+ away. This not only simplified the code, but also allows more query
+ optimizations:
+ - If the variable in `COLLECT WITH COUNT INTO var` is not used, the implicit
+ aggregator is now removed.
+ - All queries of the form `COLLECT AGGREGATE x = LENGTH()` are now executed
+ using the count executor, which can result in significantly improved
+ performance.
-* Make the time-to-live (TTL) value of a streaming cursor only count after
- the response has been sent to the client.
+* Added AQL timezone functions `DATE_TIMEZONE` and `DATE_TIMEZONES`.
-* Improve performance of batch CRUD operations (insert, update, replace,
- remove) if some of the documents in the batch run into write-write conflicts.
- Rolling back partial operations in case of a failure is very expensive
- because it requires rebuilding RocksDB write batches for the transaction
- from scratch. Rebuilding write batches takes time proportional to the number
- of operations in the batch, and for larger batches the cost can be
- prohibitive.
- Now we are not rolling back write batches in some situations when this is
- not required, so that in many cases running into a conflict does not have
- that high overhead. There can still be issues when conflicts happen for index
- entries, but a lot of previously problematic cases should now work better.
+* Make DB servers report storage engine health to the agency, via a new "health"
+ attribute in requests sent to Sync/ServerStates/.
+ The supervision can in the future check this attribute if it is posted, and
+ mark servers as BAD or FAILED in case an unhealthy status is reported.
+ DB server health is currently determined by whether or not the storage engine
+ (RocksDB) has reported a background error, and by whether or not the free disk
+ space has reached a critical low amount. The current threshold for free disk
+ space is set at 1% of the disk capacity (only the disk is considered that
+ contains the RocksDB database directory).
+ The minimum required free disk space percentage can be configured using the
+ new startup option `--rocksdb.minimum-disk-free-percent`, which needs to be
+ between 0 and 1 (including). A value of 0 disables the check.
+ The minimum required free disk space can also be configured in bytes using the
+ new startup option `--rocksdb.minimum-disk-free-bytes`. A value of 0 disables
+ this check, too.
-* Allow AQL variable names starting with an underscore, as stated in the docs.
+* Failed servers are now reported consistently in the web interface, at
+ approximately the same time in the navigation bar and in the nodes view.
+ Previously these two places had their own, independent poll mechanism for the
+ nodes' health, and they were updated independently, which could cause an
+ inconsistent view of the nodes' availability.
+ Using only one poll mechanism instead also saves some period background
+ requests for the second availability check.
-* Fix crashes during arangorestore operations due to usage of wrong pointer
- value for updating user permissions.
+* Stabilize a Foxx cleanup test.
-* Added option `--query-max-runtime` to arangoexport, in order to control
- maximum query runtime.
+* Drop a pair of braces {} in /_admin/metrics in case of empty labels, which
+ makes the API adhere better to the official Prometheus syntax.
-* Fix BTS-340: AQL expressions similar to `x < 3 || x` are no longer erroneously
- be reduced to `x < 3` by the optimizer rule remove-redundant-or.
+* Add some more metrics to the ConnectionPool.
-* Changed default value of arangodump's `--envelope` option from `true` to
- `false`. This allows using higher parallelism in arangorestore when
- restoring large collection dumps. As a side-effect, this will also decrease
- the size of dumps taken with arangodump, and should slightly improve dump
- speed.
+* Reduce overhead of audit logging functionality if audit logging is turned off.
-* Improve parallelism capabilities of arangorestore.
+* Add several more attributes to audit-logged queries, namely query execution
+ time and exit code (0 = no error, other values correspond to general ArangoDB
+ error codes).
- arangorestore can now dispatch restoring data chunks of a collection to idle
- background threads, so that multiple restore requests can be in flight for
- the same collection concurrently.
+* Fixed a bug in maintainer mode sorting followerinfo lists the wrong way.
- This can improve restore speed in situations when there are idle threads
- left (number of threads can be configured via arangorestore's `--threads`
- option) and the dump file for the collection is large.
+* Limit value of `--rocksdb.block-cache-size` to 1 GB for agent instances to
+ reduce agency RAM usage, unless configured otherwise. In addition, limit the
+ value of `--rocksdb.total-write-buffer-size` to 512 MB on agent instances for
+ the same reason.
- The improved parallelism is only used when restoring dumps that are in the
- non-enveloped format. This format has been introduced with ArangoDB 3.8.
- The reason is that dumps in the non-enveloped format only contain the raw
- documents, which can be restored independent of each other, i.e. in any
- order. However, the enveloped format may contain documents and remove
- operations, which need to be restored in the original order.
+* Added new `rocksdb_write_stalls` and `rocksdb_write_stops` counter metrics,
+ which should be more accurate than existing metrics related to the underlying
+ conditions.
-* Fix BTS-374: thread race between ArangoSearch link unloading and storage
- engine WAL flushing.
+* Increased the default value of `--rocksdb.min-write-buffer-number-to-merge` in
+ some cases when we have allocated a sufficient amount of memory to the write
+ buffers for this to make sense. The increased value should help prevent
+ compaction-induced write stalls/stops, and should only be enabled when under
+ conditions such that it shouldn't greatly increase the chance of flush-induced
+ write stalls/stops.
-* Fix thread race between ArangoSearch link unloading and storage engine
- WAL flushing.
+* Changed the default values for `--rocksdb.cache-index-and-filter-blocks` and
+ `--rocksdb.cache-index-and-filter-blocks-with-high-priority` to true to
+ improve control over memory usage.
-* change arangosh client behavior:
- - *_RAW methods will never add a `body` to HEAD responses
- - *_RAW methods will now always return velocypack-typed responses in Buffers
- - `--server.force-json` will now be applied as default, overridable
- by user code
+* Lowered the minimum allowed value for `--rocksdb.max-write-buffer-number` from
+ 9 to 4 to allow more fine-grained memory usage control.
-* Add HTTP REST API endpoint POST `/_api/cursor/` as a drop-in
- replacement for PUT `/_api/cursor/`. The POST API is functionally
- equivalent to the existing PUT API. The benefit of using the POST API is
- that HTTP POST requests will not be considered as idempotent, so proxies
- may not retry them if they fail. This was the case with the existing PUT
- API, as HTTP PUT requests can be considered idempotent according to the
- HTTP specification.
-
- The POST API is now used internally by ArangoDB's own requests, including
- the web UI and the client tools. That means the web UI and client tools
- will only work with ArangoDB versions that have support for the new POST
- API. This is true for recent 3.7 and 3.8 versions, as the POST API will be
- backported there as well.
+* Added new ArangoSearch view option 'countApproximate' for customizing view
+ count strategy.
-* Fixed BTS-360 and ES-826: sporadic ArangoSearch error `Invalid RL encoding in
- 'dense_fixed_offset_column_key'`.
+* Views on SmartGraph Edge collections do not contain some documents twice.
-* Add value of `_key` to more insert/update/replace/remove error messages
- so it is easier to figure out which document caused unique constraint
- violations and/or write-write conflict during a multi-document write
- operation.
+* Fixed issue #12248: Web UI - Added missing HTML escaping in the setup script
+ section of a Foxx app.
-* Fix cluster internal retry behavior for network communications. In particular
- retry on 421 (leader refuses operation). This leads to the cluster letting
- less internal errors out to clients.
+* The scheduler will now run a minimum of 4 threads at all times, and the
+ default and minimal value for `--server.maximal-threads` has been lowered from
+ 64 to the greater of 32 and twice the number of detected cores.
-* Don't display obsoleted startup options and sections in `--help` and
- `--help-.` commands. Also rename "global" to "general" options.
+* Throttle work coming from low priority queue, according to a constant and to
+ an estimate taking into account fanout for multi-shard operations.
-* Added option `--query.require-with` to make AQL in single server mode also
- require `WITH` clauses where the cluster would need them.
- The option is turned off by default, but can be turned on in single servers
- to remove this behavior difference between single servers and clusters,
- making later a transition from single server to cluster easier.
+* Move to 4 priority levels "low", "medium", "high" and "maintenance" in
+ scheduler to ensure that maintenance work and diagnostics is always possible,
+ even in the case of RocksDB throttles. Do not allow any RocksDB work on
+ "maintenance".
-* Fixed a problem in document batch operations, where errors from one shard
- were reported multiple times, if the shard is completely off line.
+* Commit replications on high priority queue.
-* Removed assertion for success of a RocksDB function. Throw a proper
- exception instead.
+* Essentially get rid of timeout in replication to drop followers. This is now
+ entirely handled via reboot and failure tracking. The timeout has now a
+ default minimum of 15 minutes but can still be configured via options.
-* Show peak memory usage in AQL query profiling output.
+* Additional metrics for all queue lengths and low prio ongoing work.
-* Micro improvements for Pregel job API and documentation:
- - Added a few useful attributes to Pregel HTTP API docs.
- - Added "parallelism" attribute to the result of Pregel job status responses,
- so that the effective parallelism is reported back.
- - Make sure "computationTime" in Pregel job status response does not
- underflow in case of errors.
+* New metric for number and total time of replication operations.
-* Prevent arangod from terminating with "terminate called without an active
- exception" (SIGABRT) in case an out-of-memory exception occurs during
- creating an ASIO socket connection.
+* New metrics for number of internal requests in flight, internal request
+ duration, and internal request timeouts
-* UI builds are now using the yarn package manager instead of the previously
- used node package manager.
+* Fix `Gauge` assignment operators.
-* Fixed issue #13169: arangoimport tsv conversion of bools and null, although
- switched off by `--convert false`.
+* Add cluster support for collection.checksum() method to calculate CRC
+ checksums for collections.
- Importing unquoted `null`, `false` and `true` literals from delimited files
- get imported as strings now if `convert` is explicitly turned off. It
- previously affected unquoted numbers only.
+* Make all Pregel HTTP and JavaScript APIs also accept stringified execution
+ number values, in addition to numeric ones.
-* Web UI: Highlight binary and hexadecimal integer literals in AQL queries.
+ This allows passing larger execution numbers as strings, so that any data loss
+ due to numeric data type conversion (uint32 => double) can be avoided.
-* Fix BTS-350, BTS-358: Fixed potential startup errors due to global
- replication applier being started before end of database recovery procedure.
- Also fixed potential shut down errors due to global replication applier
- being shut down in parallel to a concurrent shut down attempt.
+ The change also makes the Pregel HTTP and JavaScript APIs for starting a run
+ return a stringified execution number, e.g. "12345" instead of 12345.
-* Experimentally switch to wyhash (from xxhash) for velocypack. This is an
- experiment in devel to check if it produces any observable speedups.
+* Turn off `StatisticsWorker` thread on DB servers.
+ This thread was previously only running queries on the local RocksDB instance,
+ but using the cluster-wide collection names. So effectively it did nothing
+ except use a bit of background CPU. In this case it is better to turn off the
+ background thread entirely on the DB servers.
+
+* Avoid the usage of std::regex when constructing date/time string values for
+ log messages. This is a performance optimization only.
+
+* Increase background garbage-collection interval for cluster transactions from
+ 1 second to 2 seconds. This change should reduce the amount of background task
+ activity a tiny bit (though hardly measurable on an otherwise idle server).
+
+* Make the audit log honor the configured logging date/time output format (i.e.
+ `--log.time-format` option). Previously the audit logging always created a
+ time value in the server's local time, and logged it in format
+ YYYY-MM-DDTHH:MM:SS.
+
+ From 3.8 onwards, the audit logger will honor the date/time format specified
+ via the `--log.time-format` option, which defaults to `utc-datestring`. This
+ means the audit logging will by default log all dates/times in UTC time. To
+ restore the pre-3.8 behavior, please set the option `--log.time-format` to
+ `local-datestring`, which will make the audit logger (and all other server log
+ messages) use the server's local time.
-* Updated ArangoDB Starter to 0.15.0.
+* Added metrics for the system CPU usage:
+ - `arangodb_server_statistics_user_percent`: Percentage of time that the
+ system CPUs have spent in user mode
+ - `arangodb_server_statistics_system_percent`: Percentage of time that the
+ system CPUs have spent in kernel mode
+ - `arangodb_server_statistics_idle_percent`: Percentage of time that the
+ system CPUs have been idle
+ - `arangodb_server_statistics_iowait_percent`: Percentage of time that the
+ system CPUs have been waiting for I/O
-* Remove deprecated HTTP REST API `/_api/export`. This API was deprecated
- in a previous version because it was not supported in clusters and was
- also covered completely by streaming AQL queries for the RocksDB storage
- engine.
+ These metrics resemble the overall CPU usage metrics in `top`. They are
+ available on Linux only.
-* Added error handling for figures command in cluster. Previously errors
- returned by shards were ignored when aggregating the individual responses.
+* Fix log topic of general shutdown message from "cluster" to general.
-* Updated ArangoDB Starter to 0.15.0-preview-4.
+* Automatically add "www-authenticate" headers to server HTTP 401 responses, as
+ required by the HTTP specification.
-* Fixed CPPCHECK warning or added suppression.
+* Enable HTTP request statistics and provide metrics even in case
+ `--server.statistics-history` is set to `false` (this option will set itself
+ to off automatically on agency instances on startup if not explicitly set).
+ This change provides more metrics on all server instances, without the need to
+ persist them in the instance's RocksDB storage engine.
-* Added enterprise-build-repository and oskar-build-repository to `--version`
- as `enterprise-build-repository` and `oskar-build-repository`.
+* Remove extra CMake option `DEBUG_SYNC_REPLICATION` and use the already
+ existing `USE_FAILURE_TESTS` options for its purpose.
-* Clean up replication code and remove a 3.2-compatibility mode that was
- only useful when replicating from a leader < ArangoDB version 3.3.
+* Updated bundled version of Snappy compression/decompression library to 1.1.8.
-* Obsolete option `--database.old-system-collections`. This option has no
- meaning in ArangoDB 3.9, as old system collections will not be created
- anymore in this version. The option was deprecated in 3.8 and announced
- to be obsoleted.
+* Added support of `GEO_DISTANCE`, `GEO_CONTAINS`, `GEO_INTERSECTS`,
+ `GEO_IN_RANGE` to ArangoSearch.
-* Upgrade velocypack to latest, C++17-only version.
+* Added new `GeoJSON` ArangoSearch analyzer.
-* Make arangovpack more powerful, by supporting different input and output
- formats (json and vpack, plain or hex-encoded).
- The arangovpack options `--json` and `--pretty` have been removed and have
- been replaced with separate options for specifying the input and output
- types:
- * `--input-type` ("json", "json-hex", "vpack", "vpack-hex")
- * `--output-type` ("json", "json-pretty", "vpack", "vpack-hex")
- The previous option `--print-non-json` has been replaced with the option
- `--fail-on-non-json` which makes arangovpack fail when trying to emit
- non-JSON types to JSON output.
+* Added new `GeoPoint` ArangoSearch analyzer.
-* Fix undefined behavior in dynarray constructor when running into
- an out-of-memory exception during construction. In arangod, this can only
- happen during metrics objects construction at program start.
+* Added new `GEO_IN_RANGE` AQL function.
-* Added option `--headers-file` to arangoimport, to optionally read CSV/TSV
- headers from a separate file.
+* Added new 'aql' type for ArangoSearch analyzers.
-* Updated ArangoDB Starter to 0.15.0-preview-3.
+* Obsoleted the startup options `--database.throw-collection-not-loaded-error`
+ and `--ttl.only-loaded-collection`.
-* Fixed issue BTS-353: memleak when running into an out-of-memory situation
- while repurposing an existing AqlItemBlock.
+ These options were meaningful for the MMFiles storage engine only, but for the
+ RocksDB storage engine they did not make any difference. Using these startup
+ options is still possible, but will have no effect other than generating a
+ warning at server startup.
-* Fix logging of urls when using `--log.level requests=debug`. There was an
- issue since v3.7.7 with the wrong URL being logged in request logging if
- multiple requests were sent over the same connection. In this case, the
- request logging only reported the first URL requested in the connection,
- even for all subsequent requests.
+* Added CMake option `USE_MINIMAL_DEBUGINFO`.
+ This option is turned off by default. If turned on, the created binaries
+ will contain only a minimum amount of debug symbols, reducing the size of the
+ executables. If turned off (which is the default), the binaries will contain
+ full debug information, which will make them bigger in size unless the debug
+ information is later stripped again.
-* Deprecate option `--rocksdb.exclusive-writes`, which was meant to serve
- only as a stopgap measure while porting applications from the MMFiles
- storage engine to RocksDB.
+* Modified the returned error code for calling the `shards()` function on a
+ collection in single-server from "internal error" (error number 4) to "shards
+ API is only available in cluster" and error number 9, HTTP status code 501.
-* Added startup option `--query.allow-collections-in-expressions` to control
- whether collection names can be used in arbitrary places in AQL expressions,
- e.g. `collection + 1`. This was allowed before, as a collection can be seen
- as an array of documents. However, referring to a collection like this in a
- query would materialize all the collection's documents in RAM, making such
- constructs prohibitively expensive for medium-size to large-size collections.
+* Added WINDOW keyword to AQL to allow aggregations on related rows.
- The option can now be set to `false` to prohibit accidental usage of
- collection names in AQL expressions. With that setting, using a collection
- inside an arbitrary expression will trigger the error `collection used as
- expression operand` and make the query fail.
- Even with the option being set to `false`, it is still possible to use
- collection names in AQL queries where they are expected, e.g. `FOR doc IN
- collection RETURN doc`.
+* Added new graph method K_PATHS to AQL. This will enumerate all paths between a
+ source and a target vertex that match the given length.
+ For example, the query
+ ```
+ FOR path IN 2..4 OUTBOUND K_PATHS "v/source" TO "v/target" GRAPH "g"
+ RETURN path
+ ```
+ will yield all paths in format
+ {
+ vertices: [v/source, ... , v/target],
+ edges: [v/source -> v/1, ..., v/n -> v/target
+ }
+ that have length exactly 2 or 3 or 4, start at v/source and end at v/target.
+ The order of those paths in the result set is not guaranteed.
-* Remove obsolete API endpoint /_admin/repair/distributeShardsLike`. This
- API was intended to correct some bad state introduced before 3.2.12 or 3.3.4,
- respectively. It had to be invoked manually by callers and there was never
- any driver support for it.
+* Fixed issue BTS-195: AQL update queries using the `keepNull` option set to
+ false had an inconsistent behavior. For example, given a collection `test`
+ with an empty document with just key `testDoc`, the following query would
+ return different results when running for the first time or the second time:
-* Remove now-unused SubqueryExecutor. This is an internal change only and
- should not have any effect on queries, as from 3.8 onwards only spliced
- subqueries should be used in query execution plans and during query
- execution.
+ UPDATE 'testDoc'
+ WITH {test: {sub1: true, sub2: null}} IN test
+ OPTIONS { keepNull: false, mergeObjects: true }
-* Remove CMake control variable `UNCONDITIONALLY_BUILD_LOG_MESSAGES`.
- Now, any maintainer mode build will build all log messages automatically,
- so we will have full coverage of log message construction during our
- tests. In non-maintainer mode, log messages are still only built when
- actually required. This simplifies the build and increases coverage.
+ For its first run, the query would return
-* Updated ArangoDB Starter to 0.15.0-preview-2.
+ {
+ "_key": "testDoc",
+ "test": {
+ "sub1": true,
+ "sub2": null
+ }
+ }
-* Updated OpenSSL to 1.1.1k and OpenLDAP to 2.4.58.
+ (with the `null` attribute value not being removed). For all subsequent runs,
+ the same query would return
-* Updated arangosync to 2.0.1.
+ {
+ "_key": "testDoc",
+ "test": {
+ "sub1": true,
+ }
+ }
-* Introduce metrics for AQL query memory limit violations:
- - `arangodb_aql_global_query_memory_limit_reached`: Total number of times the
- global query memory limit was violated.
- - `arangodb_aql_local_query_memory_limit_reached`: Total number of times a
- local query memory limit was violated.
+ (with the `null` value removed as requested).
-* Set the default value for `--query.global-memory-limit` to around 90% of RAM,
- so that a global memory limit is now effective by default.
+ This inconsistency was due to how the `keepNull` attribute was handled if the
+ attribute already existed in the to-be-updated document or not. The behavior
+ is now consistent, so `null` values are now properly removed from
+ sub-attributes even if in the to-be-updated document the target attribute did
+ not yet exist. This makes such updates idempotent again.
- The default global memory limit value is calculated by a formula depending on
- the amount of available RAM and will result in the following values for
- common RAM sizes:
+ This a behavior change compared previous versions, but it will only have
+ effect when `keepNull` is set to `false` (the default value is `true`
+ however), and only when just-inserted object sub-attributes contained `null`
+ values.
- RAM: 0 (0MiB) Limit: 0 unlimited, %mem: n/a
- RAM: 134217728 (128MiB) Limit: 33554432 (32MiB), %mem: 25.0
- RAM: 268435456 (256MiB) Limit: 67108864 (64MiB), %mem: 25.0
- RAM: 536870912 (512MiB) Limit: 255013683 (243MiB), %mem: 47.5
- RAM: 805306368 (768MiB) Limit: 510027366 (486MiB), %mem: 63.3
- RAM: 1073741824 (1024MiB) Limit: 765041049 (729MiB), %mem: 71.2
- RAM: 2147483648 (2048MiB) Limit: 1785095782 (1702MiB), %mem: 83.1
- RAM: 4294967296 (4096MiB) Limit: 3825205248 (3648MiB), %mem: 89.0
- RAM: 8589934592 (8192MiB) Limit: 7752415969 (7393MiB), %mem: 90.2
- RAM: 17179869184 (16384MiB) Limit: 15504831938 (14786MiB), %mem: 90.2
- RAM: 25769803776 (24576MiB) Limit: 23257247908 (22179MiB), %mem: 90.2
- RAM: 34359738368 (32768MiB) Limit: 31009663877 (29573MiB), %mem: 90.2
- RAM: 42949672960 (40960MiB) Limit: 38762079846 (36966MiB), %mem: 90.2
- RAM: 68719476736 (65536MiB) Limit: 62019327755 (59146MiB), %mem: 90.2
- RAM: 103079215104 (98304MiB) Limit: 93028991631 (88719MiB), %mem: 90.2
- RAM: 137438953472 (131072MiB) Limit: 124038655509 (118292MiB), %mem: 90.2
- RAM: 274877906944 (262144MiB) Limit: 248077311017 (236584MiB), %mem: 90.2
- RAM: 549755813888 (524288MiB) Limit: 496154622034 (473169MiB), %mem: 90.2
+* Optimization of empty append entries.
+
+* Remove any special handling for obsoleted collection attributes
+ `indexBuckets`, `journalSize`, `doCompact` and `isVolatile`. These attributes
+ were meaningful only with the MMFiles storage engine and have no meaning with
+ the RocksDB storage engine. Thus any special handling for these attributes can
+ be removed in the internal code.
+ Client applications and tests that rely on the behavior that setting any of
+ these attributes produces an error when using the RocksDB engine may need
+ adjustment now.
-* The old metrics API contains the following gauges which should actually be
- counters:
- * arangodb_scheduler_jobs_dequeued
- * arangodb_scheduler_jobs_submitted
- * arangodb_scheduler_jobs_done
- Therefore the new v2 metric api adds the following counters:
- * arangodb_scheduler_jobs_dequeued_total
- * arangodb_scheduler_jobs_submitted_total
- * arangodb_scheduler_jobs_done_total
- These counters are only visible in the new v2 metrics API and replace the old
- metrics which are suppressed for v2.
+* Added a --continue option to arangorestore. arangorestore now keeps track of
+ the progress and can continue the restore operation when some error occured.
-* Fix connectionTime statistic. This statistic should provide the distribution
- of the connection lifetimes, but in previous versions the tracking was broken
- and no values were reported.
+* Don't respond with misleading error in smart vertex collections.
-* Add an option for locking down all endpoints in the `/_admin/cluster` REST
- API for callers without a proper JWT set in the request. There is a new
- startup option `--cluster.api-jwt-policy` that allows *additional* checks
- for a valid JWT in requests to sub-routes of `/_admin/cluster`. The
- possible values for the startup option are:
+ When inserting a document with a non-conforming key pattern into a smart
+ vertex collection, the response error code and message are 1466
+ (ERROR_CLUSTER_MUST_NOT_SPECIFY_KEY) and "must not specify _key for this
+ collection".
+ This is misleading, because it is actually allowed to specify a key value for
+ documents in such collection. However, there are some restrictions for valid
+ key values (e.g. the key must be a string and contain the smart graph
+ attribute value at the front, followed by a colon.
+ If any of these restrictions are not met, the server currently responds with
+ "must not specify key for this collection", which is misleading. This change
+ rectifies it so that the server responds with error 4003
+ (ERROR_KEY_MUST_BE_PREFIXED_WITH_SMART_GRAPH_ATTRIBUTE) and message "in smart
+ vertex collections _key must be a string and prefixed with the value of the
+ smart graph attribute". This should make it a lot easier to understand what
+ the actual problem is.
- - "jwt-all": requires a valid JWT for all accesses to `/_admin/cluster` and
- its sub-routes. If this configuration is used, the "Cluster" and "Nodes"
- sections of the web interface will be disabled, as they are relying on the
- ability to read data from several cluster APIs.
- - "jwt-write": requires a valid JWT for write accesses (all HTTP methods
- except HTTP GET) to `/_admin/cluster`. This setting can be used to allow
- privileged users to read data from the cluster APIs, but not to do any
- modifications. All existing permissions checks for the cluster API routes
- are still in effect with this setting, meaning that read operations without
- a valid JWT may still require dedicated other permissions (as in 3.7).
- - "jwt-compat": no *additional* access checks are in place for the cluster
- APIs. However, all existing permissions checks for the cluster API routes
- are still in effect with this setting, meaning that all operations may
- still require dedicated other permissions (as in 3.7).
+* Fix an issue in arangoimport improperly handling filenames with less than 3
+ characters. The specified input filename was checked for a potential ".gz"
+ ending, but the check required the filename to have at least 3 characters.
+ This is now fixed.
- The default value for the option is `jwt-compat`, which means this option
- will not cause any extra JWT checks compared to 3.7.
+* Fix for BTS-191: Made transaction API database-aware.
-* Increase default idle timeout in streaming transactions from 10 seconds to
- 60 seconds, and make the timeout configurable via a startup parameter
- `--transaction.streaming-idle-timeout`.
+* Minor clean up of and less verbosity in agent callbacks.
-* Use RebootTracker to abort cluster transactions on DB servers should the
- originating coordinator die or be rebooted. The previous implementation left
- the coordinator's transactions open on DB servers until they timed out there.
- Now, the coordinator's unavailability or reboot will be detected as early as
- it is reported by the agency, and all open transactions from that coordinator
- will be auto-aborted on DB servers.
+* Speed up initial replication of collections/shards data by not wrapping each
+ document in a separate `{"type":2300,"data":...}` envelope. In addition, the
+ follower side of the replication will request data from leaders in VelocyPack
+ format if the leader is running at least version 3.8.
+ Stripping the envelopes and using VelocyPack for transfer allows for smaller
+ data sizes when exchanging the documents and faster processing, and thus can
+ lead to time savings in document packing and unpacking as well as reduce the
+ number of required HTTP requests.
-* Fix shortName labels in metrics, in particular for agents.
+* Added metric `arangodb_agency_callback_registered counter` for tracking the
+ total number of agency callbacks that were registered.
-* Fix a race in LogAppender::haveAppenders.
- `haveAppenders` is called as part of audit logging. It accesses internal maps
- but previously did not hold a lock while doing so.
+* Added weighted traversal. Use `mode: "weighted"` as option to enumerate paths
+ by increasing weights. The cost of an edge can be read from an attribute which
+ can be specified using `weightAttribute` option.
-* Fix implicit capture of views in a context of JS transaction.
+* Fixed issue ES-696: SEARCH vs FILTER lookup performance.
+ Consolidation functionality for ArangoSearch view links was able to hit non-
+ mergable enormous amount of segments due to improper scheduling logic.
-* Fix a crash caused by returning a result produced by ANALYZER function.
+* Make scheduler react and start new threads slightly faster in case a lot of
+ new work arrives.
-* Update the Web UI's list of built-in AQL functions for proper syntax
- highlighting in the query editor.
+* Added new ArangoSearch "pipeline" analyzer type.
-* Bug-fix in the case of very rare network issues there was a chance that
- an AQL query could get stuck during a cleanup and after a commit.
- This would cause the client to receive a timeout, and the Coordinator
- blocking a Scheduler thread. This situation is sorted out and the thread
- will not be blocked anymore. We also added logs in case the query
- could not successfully be cleaned up, which would leave locks on shards
- behind.
+* Added replication metrics `arangodb_replication_initial_sync_bytes_received`
+ for the number of bytes received during replication initial sync operations
+ and `arangodb_replication_tailing_bytes_received` for the number of bytes
+ received for replication tailing requests.
+ Also added `arangodb_replication_failed_connects` to track the number of
+ connection failures or non-OK response during replication.
-* Switched to GCC 10 as the default compiler and use Sandy Bridge as the
- default required architecture (Linux, macOS binaries).
+* Added metrics `rocksdb_free_inodes` and `rocksdb_total_inodes` to track the
+ number of free inodes and the total/maximum number of inodes for the file
+ system the RocksDB database directory is located in. These metrics will always
+ be 0 on Windows.
-* Fix an assertion failure that occurred when restoring view definitions from
- a cluster into a single server.
+* Fixed slightly wrong log level for authentication and also added login event
+ to the standard log.
-* Added new ArangoSearch analyzer type "stopwords".
+* Added new metrics for the total and the free disk space for the mount used for
+ the RocksDB database directory:
-* Fix error message in case of index unique constraint violations. They were
- lacking the actual error message (i.e. "unique constraint violated") and
- only showed the index details. The issue was introduced only in devel in Feb.
+ - `arangodb_rocksdb_free_disk_space`: provides the free disk space for the
+ mount, in bytes
+ - `arangodb_rocksdb_total_disk_space`: provides the total disk space of the
+ mount, in bytes
-* Removed obsolete metrics in new v2 metric API. Those metrics' values were
- identical to the sum value of histograms.
+* Apply user-defined idle connection timeouts for HTTP/2 and VST connections.
+ The timeout value for idle HTTP/2 and VST connections can now be configured
+ via the configuration option `--http.keep-alive-timeout` in the same way as
+ for HTTP/1 connections.
+ HTTP/2 and VST connections that are sending data back to the client are now
+ closed after 300 seconds or the configured idle timeout (the higher of both
+ values is used here).
+ Before this change, the timeouts for HTTP/2 and VST connections were
+ hardcoded to 120 seconds, and even non-idle connections were closed after this
+ timeout.
-* Allow process-specific logfile names.
+* Added new metrics for replication:
+ - `arangodb_replication_dump_requests`: number of replication dump requests
+ made.
+ - `arangodb_replication_dump_bytes_received`: number of bytes received in
+ replication dump requests.
+ - `arangodb_replication_dump_documents`: number of documents received in
+ replication dump requests.
+ - `arangodb_replication_dump_request_time`: wait time for replication dump
+ requests.
+ - `arangodb_replication_dump_apply_time`: time required for applying data from
+ replication dump responses.
+ - `arangodb_replication_initial_sync_keys_requests`: number of replication
+ initial sync keys requests made.
+ - `arangodb_replication_initial_sync_docs_requests`: number of replication
+ initial sync docs requests made.
+ - `arangodb_replication_initial_sync_docs_requested`: number of documents
+ requested via replication initial sync requests.
+ - `arangodb_replication_initial_sync_docs_inserted`: number of documents
+ inserted by replication initial sync.
+ - `arangodb_replication_initial_sync_docs_removed`: number of documents
+ inserted by replication initial sync.
+ - `arangodb_replication_initial_chunks_requests_time`: wait time histogram for
+ replication key chunks determination requests.
+ - `arangodb_replication_initial_keys_requests_time`: wait time for replication
+ keys requests.
+ - `arangodb_replication_initial_docs_requests_time`: time needed to apply
+ replication docs data.
+ - `arangodb_replication_initial_insert_apply_time`: time needed to apply
+ replication initial sync insertions.
+ - `arangodb_replication_initial_remove_apply_time`: time needed to apply
+ replication initial sync removals.
+ - `arangodb_replication_initial_lookup_time`: time needed for replication
+ initial sync key lookups.
+ - `arangodb_replication_tailing_requests`: number of replication tailing
+ requests.
+ - `arangodb_replication_tailing_follow_tick_failures`: number of replication
+ tailing failures due to missing tick on leader.
+ - `arangodb_replication_tailing_markers`: number of replication tailing
+ markers processed.
+ - `arangodb_replication_tailing_documents`: number of replication tailing
+ document inserts/replaces processed.
+ - `arangodb_replication_tailing_removals`: number of replication tailing
+ document removals processed.
+ - `arangodb_replication_tailing_bytes_received`: number of bytes received for
+ replication tailing requests.
+ - `arangodb_replication_tailing_request_time`: wait time for replication
+ tailing requests.
+ - `arangodb_replication_tailing_apply_time`: time needed to apply replication
+ tailing markers.
- This change allows replacing '$PID' with the current process id in the
- `--log.output` and `--audit.output` startup parameters.
- This way it is easier to write process-specific logfiles.
+* Allow calling of REST APIs `/_api/engine/stats`, GET `/_api/collection`, GET
+ `/_api/database/current` and GET `/_admin/metrics` on followers in active
+ failover deployments. This can help debugging and inspecting the follower.
-* Backport a bugfix from upstream RocksDB for opening encrypted files with
- small sizes. Without the bugfix, the server may run into assertion failures
- during recovery.
+* Support projections on sub-attributes (e.g. `a.b.c`).
-* Fix duplicate leaving of V8 contexts when returning streaming cursors.
- The `exitContext` call done on query shutdown could previously try to exit
- the V8 context multiple times, which would cause undefined behavior. Now
- we are tracking if we already left the context to prevent duplicate invocation.
+ In previous versions of ArangoDB, projections were only supported on top-level
+ attributes. For example, in the query
-* In a cluster, do not create the collections `_statistics`, `_statistics15` and
- `statisticsRaw` on DB servers. These collections should only be created by the
- coordinator, and should translate into 2 shards each on DB servers. But there
- shouldn't be shards named `_statistics*` on DB servers.
+ FOR doc IN collection
+ RETURN doc.a.b
-* Fixed two bogus messages about hotbackup restore:
- - Coordinators unconditionally logged the message "Got a hotbackup restore
- event, getting new cluster-wide unique IDs..." on shutdown. This was not
- necessarily related to a hotbackup restore.
- - DB servers unconditionally logged the message "Strange, we could not
- unregister the hotbackup restore callback." on shutdown, although this was
- meaningless.
+ the projection that was used was just `a`. Now the projection will be `a.b`,
+ which can help reduce the amount of data to be extracted from documents, when
+ only some sub-attributes are accessed.
-* Rename "save" return attribute to "dst" in AQL functions `DATE_UTCTOLOCAL` and
- `DATE_LOCALTOUTC`.
+ In addition, indexes can now be used to extract the data of sub-attributes for
+ projections. If for the above example query an index on `a.b` exists, it will
+ be used now. Previously, no index could be used for this projection.
-* Fix potentially undefined behavior when creating a CalculationTransactionContext
- for an arangosearch analyzer. An uninitialized struct member was passed as an
- argument to its base class. This potentially had no observable effects, but
- should be fixed.
+ Projections now can also be fed by any attribute in a combined index. For
+ example, in the query
-* Retry a cluster internal network request if the connection comes from the
- pool and turns out to be stale (connection immediately closed). This fixes
- some spurious errors after a hotbackup restore.
+ FOR doc IN collection
+ RETURN doc.b
-* Fix progress reporting for arangoimport with large files on Windows.
- Previously, progress was only reported for the first 2GB of data due to an
- int overflow.
+ the projection can be satisfied by a single-attribute index on attribute `b`,
+ but now also by a combined index on attributes `a` and `b` (or `b` and `a`).
-* Log the actual signal instead of "control-c" and also include the process id
- of the process that sent the signal.
+* Remove some JavaScript files containing testsuites and test utilities from our
+ official release packages.
-* Fixed GitHub issue #13665: Improve index selection when there are multiple
- candidate indexes.
+* Show optimizer rules with highest execution times in explain output.
-* When dropping a collection or an index with a larger amount of documents, the
- key range for the collection/index in RocksDB gets compacted. Previously, the
- compaction was running in foreground and thus would block the deletion operations.
- Now, the compaction is running in background, so that the deletion operations
- can return earlier.
- The maximum number of compaction jobs that are executed in background can be
- configured using the new startup parameter `--rocksdb.max-parallel-compactions`,
- which defaults to 2.
+* Renamed "master" to "leader" and "slave" to "follower" in replication
+ messages.
+ This will change the contents of replication log messages as well the string
+ contents of replication-related error messages.
-* Put Sync/LatestID into hotbackup and restore it on hotbackup restore
- if it is in the backup. This helps with unique key generation after
- a hotbackup is restored to a young cluster.
+ The messages of the error codes 1402, 1403 and 1404 were also changed
+ accordingly, as well as the identifiers:
+ - `TRI_ERROR_REPLICATION_MASTER_ERROR` renamed to
+ `TRI_ERROR_REPLICATION_LEADER_ERROR`
+ - `TRI_ERROR_REPLICATION_MASTER_INCOMPATIBLE` renamed to
+ `TRI_ERROR_REPLICATION_LEADER_INCOMPATIBLE`
+ - `TRI_ERROR_REPLICATION_MASTER_CHANGE` renamed to
+ `TRI_ERROR_REPLICATION_LEADER_CHANGE`
-* Fixed a bug in the index count optimization that doubled counted documents
- when using array expansions in the fields definition.
+ This change also renames the API endpoint `/_api/replication/make-slave` to
+ `/_api/replication/make-follower`. The API is still available under the old
+ name, but using it is deprecated.
-* Don't store selectivity estimate values for newly created system collections.
+* Make optimizer rule "remove-filters-covered-by-index" remove FILTERs that were
+ referring to aliases of the collection variable, e.g.
- Not storing the estimates has a benefit especially for the `_statistics`
- system collections, which are written to periodically even on otherwise
- idle servers. In this particular case, the actual statistics data was way
- smaller than the writes caused by the index estimate values, causing a
- disproportional overhead just for maintaining the selectivity estimates.
- The change now turns off the selectivity estimates for indexes in all newly
- created system collections, and for new user-defined indexes of type
- "persistent", "hash" or "skiplist", there is now an attribute "estimates"
- which can be set to `false` to disable the selectivity estimates for the index.
- The attribute is optional. Not setting it will lead to the index being
- created with selectivity estimates, so this is a downwards-compatible change
- for user-defined indexes.
+ FOR doc IN collection
+ LET value = doc.indexedAttribute
+ FILTER value == ...
-* Added startup option `--query.global-memory-limit` to set a limit on the
- combined estimated memory usage of all AQL queries (in bytes).
- If this option has a value of `0`, then no memory limit is in place.
- This is also the default value and the same behavior as in previous versions
- of ArangoDB.
- Setting the option to a value greater than zero will mean that the total memory
- usage of all AQL queries will be limited approximately to the configured value.
- The limit is enforced by each server in a cluster independently, i.e. it can
- be set separately for coordinators, DB servers etc. The memory usage of a
- query that runs on multiple servers in parallel is not summed up, but tracked
- separately on each server.
- If a memory allocation in a query would lead to the violation of the configured
- global memory limit, then the query is aborted with error code 32 ("resource
- limit exceeded").
- The global memory limit is approximate, in the same fashion as the per-query
- limit provided by the option `--query.memory-limit` is. Some operations,
- namely calls to AQL functions and their intermediate results, are currently
- not properly tracked.
- If both `--query.global-memory-limit` and `--query.memory-limit` are set,
- the former must be set at least as high as the latter.
+ Previously, FILTERs that were using aliases were not removed by that optimizer
+ rule.
+ In addition, the optimizer rule "remove-unnecessary-calculations" will now run
+ again in case it successfully removed variables. This can unlock further
+ removal of unused variables in sequences such as
- To reduce the cost of globally tracking the memory usage of AQL queries, the
- global memory usage counter is only updated in steps of 32 kb, making
- this also the minimum granularity of the global memory usage figure.
- In the same fashion, the granularity of the peak memory usage counter inside
- each query was also adjusted to steps of 32 kb.
+ FOR doc IN collection
+ LET value = doc.indexedAttribute
+ LET tmp1 = value > ...
+ LET tmp2 = value < ...
-* Added startup option `--query.memory-limit-override` to control whether
- individual AQL queries can increase their memory limit via the `memoryLimit`
- query option. This is the default, so a query that increases its memory limit
- is allowed to use more memory.
- The new option `--query.memory-limit-override` allows turning this behavior
- off, so that individual queries can only lower their maximum allowed memory
- usage.
+ when the removal of `tmp1` and `tmp2` makes it possible to also remove the
+ calculation of `value`.
-* Added metric `arangodb_aql_global_memory_usage` to expose the total amount
- of memory (in steps of 32 kb) that is currently in use by all AQL queries.
+* Fixed issue BTS-168: Fixed undefined behavior that did trigger segfaults on
+ cluster startups. It is only witnessed for macOS based builds. The issue could
+ be triggered by any network connection.
+ This behavior is not part of any released version.
-* Added metric `arangodb_aql_global_memory_limit` to expose the memory limit
- from startup option `--query.global-memory-limit`.
+* Hard-code returned "planVersion" attribute of collections to a value of 1.
+ Before 3.7, the most recent Plan version from the agency was returned inside
+ "planVersion".
+ In 3.7, the attribute contained the Plan version that was in use when the
+ in-memory LogicalCollection object was last constructed. The object was always
+ reconstructed in case the underlying Plan data for the collection changed or
+ when a collection contained links to arangosearch views.
+ This made the attribute relatively useless for any real-world use cases, and
+ so we are now hard-coding it to simplify the internal code. Using the
+ attribute in client applications is also deprecated.
-* Allow setting path to the timezone information via the `TZ_DATA` environment
- variable, in the same fashion as the currently existing `ICU_DATA` environment
- variable. The `TZ_DATA` variable is useful in environments` that start arangod
- from some unusual locations, when it can't find its `tzdata` directory
- automatically.
+* Don't prevent concurrent synchronization of different shards from the same
+ database. Previously only one shard was synchronized at a time per database.
-* Fixed a bug in query cost estimation when a NoResults node occurred in a spliced
- subquery. This could lead to a server crash.
+* Wait until restore task queue is idle before shutting down.
-* Fix slower-than-necessary arangoimport behavior:
- arangoimport has a built-in rate limiter, which can be useful for importing
- data with a somewhat constant rate. However, it is enabled by default and
- limits imports to 1MB per second. These settings are not useful.
+* Fix a race problem in the unit tests w.r.t. PlanSyncer.
- This change turns the rate limiting off by default, and sets the default
- chunk size to 8MB (up from 1MB) as well. This means that arangoimport will
- send larger batches to the server by default. The already existing `--batch-size`
- option can be used to control the maximum size of each batch.
+* Errors with error code 1200 (Arango conflict) will now get the HTTP response
+ code 409 (Conflict) instead of 412 (Precondition failed), unless "if-match"
+ header was used in `_api/document` or `_api/gharial`.
- The new parameter `--auto-rate-limit` can now be used to toggle rate limiting.
- It defaults to off, whereas previously rate limiting was enabled by default
- unless `--batch-size` was specified when arangoimport was invoked.
+* Keep the list of last-acknowledged entries in Agency more consistent.
+ During leadership take-over it was possible to get into a situation that the
+ new leader does not successfully report the agency config, which was
+ eventually fixed by the Agent itself. Now this situation is impossible.
-* The cluster dashboard charts in the web UI are now more readable during the
- initialization phase. Additionally, the amount of agents are now displayed
- there as well. An agent failure will also appear here in case it exists.
+* Added support `db._engineStats()` API in coordinator. Previously calling this
+ API always produced an empty result. Now it will return the engine statistics
+ as an object, with an entry for each individual DB-Server.
-* Added more useful information during the SmartGraph creation in the web UI
- in case the current database is a OneShard database.
+* Added option `--log.use-json-format` to switch log output to JSON format.
+ Each log message then produces a seperate line with JSON-encoded log data,
+ which can be consumed by applications.
-* Add support for building with Zen 3 CPU when optimizing for the local
- architecture.
+* Added option `--log.process` to toggle the logging of the process id (pid) in
+ log messages. Logging the process id is useless when running arangod in Docker
+ containers, as the pid will always be 1. So one may as well turn it off in
+ these contexts.
-* The web UI's node overview now displays also agent information (cluster only).
+* Added option `--log.in-memory` to toggle storing log messages in memory, from
+ which they can be consumed via the `/_admin/log` and by the web UI. By
+ default, this option is turned on, so log messages are consumable via API and
+ the web UI. Turning this option off will disable that functionality and save a
+ tiny bit of memory for the in-memory log buffers.
-* The statistics view in the web UI does now provide more system specific
- information in case the Metrics API is enabled. Different statistics may
- be visible depending on the operating system.
-* Added metrics documentation snippets and infrastructure for that.
+v3.7.10 (2021-03-14)
+--------------------
-* Added a new cluster distribution view to the web UI. The view includes general
- details about cluster-wide distribution in general as well as more detailed
- shard distribution specific information.
+* Reasonably harden MoveShard against unexpected VelocyPack input.
-* Follower primaries respond with
- TRI_ERROR_CLUSTER_SHARD_FOLLOWER_REFUSES_OPERATION
- to any read request. Fixes a wrongly responded 404 from chaos
- tests.
+* Follower DB servers will now respond with error code
+ `TRI_ERROR_CLUSTER_SHARD_FOLLOWER_REFUSES_OPERATION`
+ to any read request. This fixes inadequate HTTP 404 responses from followers,
+ e.g. during chaos tests.
-* Fixed GitHub issue #13632: Query Fails on Upsert with Replace_nth.
+* Fixed Github issue #13632: Query Fails on Upsert with Replace_nth.
-* Reasonably harden MoveShard against invalid VelocyPack input.
+* Updated arangosync to 1.2.3.
-* Removed older reference to VelocyPackDumper.
+* Backported AQL sort performance improvements from devel.
+ This change can improve the performance of local sorts operations, e.g.
-* Added `--documents-per-batch` option to arangoexport.
- This option allows to control the number of documents to be returned by each
- server-side batch. It can be used to limit the number of documents per batch
- when exporting collections with large documents.
+ Baseline (3.7.9):
-* Added a new metrics view to the web UI. This view can be used in a clustered
- environment as well as in a single instance. Metrics are displayed either in
- a tabular format or as plain text (Prometheus Text-based format).
- Additionally, the metrics can be downloaded there.
+ Query String (94 chars, cacheable: false):
+ FOR i IN 1..500000 LET value = CONCAT('testvalue-to-be-sorted', i) SORT value ASC RETURN value
-* Added a new maintenance mode tab to the web UI in cluster mode.
- The new tab shows the current state of the cluster supervision maintenance
- and allows to enable/disable the maintenance mode from there. The tab will
- only be visible in the `_system` database. The required privileges for
- displaying the maintenance mode status and/or changing it are the as for
- using the REST APIs for the maintenance mode.
+ Execution plan:
+ Id NodeType Calls Items Runtime [s] Comment
+ 1 SingletonNode 1 1 0.00003 * ROOT
+ 2 CalculationNode 1 1 0.00003 - LET #2 = 1 .. 500000 /* range */ /* simple expression */
+ 3 EnumerateListNode 500 500000 0.08725 - FOR i IN #2 /* list iteration */
+ 4 CalculationNode 500 500000 0.22722 - LET value = CONCAT("testvalue-to-be-sorted", i) /* simple expression */
+ 5 SortNode 500 500000 2.05180 - SORT value ASC /* sorting strategy: standard */
+ 6 ReturnNode 500 500000 0.02911 - RETURN value
+
+ Query Statistics:
+ Writes Exec Writes Ign Scan Full Scan Index Filtered Exec Time [s]
+ 0 0 0 0 0 2.39644
+
+ With sort optimization (3.7.10):
+
+ Query String (94 chars, cacheable: false):
+ FOR i IN 1..500000 LET value = CONCAT('testvalue-to-be-sorted', i) SORT value ASC RETURN value
-* Fixed a problem that coordinators would vanish from the UI and the Health
- API if one switched the agency Supervision into maintenance mode and kept
- left that maintenance mode on for more than 24h.
+ Execution plan:
+ Id NodeType Calls Items Runtime [s] Comment
+ 1 SingletonNode 1 1 0.00002 * ROOT
+ 2 CalculationNode 1 1 0.00003 - LET #2 = 1 .. 500000 /* range */ /* simple expression */
+ 3 EnumerateListNode 500 500000 0.08755 - FOR i IN #2 /* list iteration */
+ 4 CalculationNode 500 500000 0.26161 - LET value = CONCAT("testvalue-to-be-sorted", i) /* simple expression */
+ 5 SortNode 500 500000 1.36070 - SORT value ASC /* sorting strategy: standard */
+ 6 ReturnNode 500 500000 0.02864 - RETURN value
+
+ Query Statistics:
+ Writes Exec Writes Ign Scan Full Scan Index Filtered Exec Time [s]
+ 0 0 0 0 0 1.73940
+
+* Fixed a problem that coordinators would vanish from the UI and the Health API
+ if one switched the agency Supervision into maintenance mode and kept left
+ that maintenance mode on for more than 24h.
* Fixed a bug in the web interface that displayed the error "Not authorized to
execute this request" when trying to create an index in the web interface in a
@@ -4637,102 +7494,78 @@ devel
permissions for the `_system` database.
The error message previously displayed error actually came from an internal
request made by the web interface, but it did not affect the actual index
- creation.
-
-* Added ability to display Coordinator and DBServer logs from inside the Web UI
- in a clustered environment when privileges are sufficient.
- Additionally, displayed log entries can now be downloaded from the web UI in
- single server and in cluster mode.
-
-* The Web UI's info view of a collection now displays additional properties and
- statistics (e.g. RocksDB related figures, sharding information and more).
-
-* Improve progress reporting for shard synchronization in the web UI.
- The UI will now show how many shards are actively syncing data, and will
- provide a better progress indicator, especially if there is more than one
- follower for a shard.
+ creation.
* Fixed issue BTS-309: The Graph API (Gharial) did not respond with the correct
HTTP status code when validating edges. It now responds with 400 (Bad Request)
- as documented and a new, more precise error code (1947) and message if a vertex
- collection referenced in the _from or _to attribute is not part of the graph.
+ as documented and a new, more precise error code (1947) and message if a
+ vertex collection referenced in the _from or _to attribute is not part of the
+ graph.
+
+
+v3.7.9 (2021-03-01)
+-------------------
-* Added `--shard` option to arangodump, so that dumps can be restricted to one or
- multiple shards only.
+* Fix issue #13476: The Java driver v6.9.0 (and older) has bad performance when
+ iterating over AQL cursor results in certain cases. This works around this.
+ This workaround will no longer be available in 3.8.
* Enable statistics in web UI in non-`_system` databases in cluster mode.
- In cluster mode, the web UI dashboard did not display statistics properly
- when not being logged into the `_system` database. For all other databases
- than `_system`, no statistics were displayed but just some "No data..."
+ In cluster mode, the web UI dashboard did not display statistics properly when
+ not being logged in to the `_system` database. For all other databases than
+ `_system`, no statistics were displayed but just some "No data..."
placeholders.
Statistics for non-`_system` databases were not properly displayed since
3.7.6 due to an internal change in the statistics processing.
- In addition, a new startup option `--server.statistics-all-databases`
- controls whether cluster statistics are displayed in the web interface for
- all databases (if the option is set to `true`) or just for the system
- database (if the option is set to `false`).
+ In addition, a new startup option `--server.statistics-all-databases` controls
+ whether cluster statistics are displayed in the web interface for all
+ databases (if the option is set to `true`) or just for the system database (if
+ the option is set to `false`).
The default value for the option is `true`, meaning statistics will be
displayed in the web interface for all databases.
-* Add optional hostname logging to log messages.
- Whether or not the hostname is added to each log message can be controlled via
- the new startup option `--log.hostname`. Its default value is the empty string,
- meaning no hostname will be added to log messages.
- Setting the option to an arbitrary string value will make this string be logged
- in front of each regular log message, and inside the `hostname` attribute in
- case of JSON-based logging. Setting the option to a value of `auto` will use
- the hostname as returned by `gethostbyname`.
-
-* Added logging of elapsed time of ArangoSearch commit/consolidation/cleanup
- jobs.
-
-* Added list-repeat AIR primitive that creates a list containing n copies of the input value.
-
* Updated OpenSSL to 1.1.1j and OpenLDAP to 2.4.57.
-* Prevent arangosh from trying to connect after every executed command.
- This fixes the case when arangosh is started with default options, but no
- server is running on localhost:8529. In this particular case, arangosh will
- try to connect on startup and after every executed shell command. The
- connect attempts all fail and time out after 300ms.
- In this case we now don't try to reconnect after every command.
-
-* Added 'custom-query' testcase to arangobench to allow execution of custom
- queries.
- This also adds the options `--custom-query` and `--custom-query-file` for
- arangobench.
+* Cleanup old HotBackup transfer jobs in agency.
-* Addition to the internal Refactoring of K_PATHS feature: K_PATHS queries are
- now being executed on the new refactored graph engine in a clustered
- environment. This change should not have any visible effect on users.
+* Added logging of elapsed time of ArangoSearch commit/consolidation/cleanup
+ jobs.
-* Reduce memory footprint of agency Store in Node class.
+* Fix too early stop of replication, when waiting for keys in large
+ collections/shards.
-* Cleanup old hotbackup transfer jobs in agency.
+* Fixed issue BTS-268: fix a flaky Foxx self-heal procedure.
-* On Windows create a minidump in case of an unhandled SEH exception for
- post-mortem debugging.
+* Fixed issue DEVSUP-720: Within an AQL query, the "COLLECT WITH COUNT INTO"
+ statement could lead to a wrong count output when used in combination with an
+ index which has been created with an array index attribute.
-* Add JWT secret support for arangodump and arangorestore, i.e. they now also
- provide the command-line options `--server.ask-jwt-secret` and
- `--server.jwt-secret-keyfile` with the same meanings as in arangosh.
+* Fix profiling of AQL queries with the `silent` and `stream` options sets in
+ combination. Using the `silent` option makes a query execute, but discard all
+ its results instantly. This led to some confusion in streaming queries, which
+ can return the first query results once they are available, but don't
+ necessarily execute the full query.
+ Now, `silent` correctly discards all results even in streaming queries, but
+ this has the effect that a streaming query will likely be executed completely
+ when the `silent` option is set. This is not the default however, and the
+ `silent` option is normally not set. There is no change for streaming queries
+ if the `silent` option is not set.
-* Add optional hyperlink to program option sections for information purposes,
- and add optional sub-headlines to program options for better grouping.
- These changes will be visible only when using `--help`.
+ As a side-effect of this change, this makes profiling (i.e. using
+ `db._profileQuery(...)` work for streaming queries as well. Previously,
+ profiling a streaming query could have led to some internal errors, and even
+ query results being returned, even though profiling a query should not return
+ any query results.
-* For Windows builds, remove the defines `_SILENCE_ALL_CXX17_DEPRECATION_WARNINGS`
- and `_ENABLE_ATOMIC_ALIGNMENT_FIX` that were needed to build Boost components
- with MSVC in older versions of Boost and MSVC.
- Both of these defines are obsolete nowadays.
+* Improved the wording for sharding options displayed in the web interface.
-* Database initial sync considers document count on leader for
- estimating timeouts when over 1 million docs on leader.
+ Instead of offering `flexible` and `single`, now use the more intuitive
+ `Sharded` and `OneShard` options, and update the help text for them.
-* EE only bugfix: On DisjointSmartGraphs that are used in anonymous way,
- there was a chance that the query could fail, if non-disjoint collections
- were part of the query. Named DisjointSmartGraphs have been save to this bug.
+* EE only bugfix: On DisjointSmartGraphs that are used in anonymous way, there
+ was a chance that the query could fail, if non-disjoint collections were part
+ of the query. Named DisjointSmartGraphs have been save to this bug.
Example:
DisjointSmartGraph (graph) on vertices -edges-> vertices
Query:
@@ -4742,8 +7575,8 @@ devel
FOR u IN unrelated
RETURN [out, u]
- The "unrelated" collection was pulled into the DisjointSmartGraph, causing
- the AQL setup to create erroneous state.
+ The "unrelated" collection was pulled into the DisjointSmartGraph, causing the
+ AQL setup to create erroneous state.
This is now fixed and the above query works.
This query:
@@ -4754,59 +7587,33 @@ devel
was not affected by this bug.
-* Fixed issue BTS-268: fix a flaky Foxx self-heal procedure.
-
-* Fixed issue DEVSUP-720: Within an AQL query, the "COLLECT WITH COUNT INTO"
- statement could lead to a wrong count output when used in combination with
- an index which has been created with an array index attribute.
-
-* Fixed issue #13117: Aardvark: Weird cursor offsets in query editor.
-
- Disabled font ligatures for Ace editor in Web UI to avoid rare display issue.
-
-* Fixed ES-784 regression related to encryption cipher propagation to
- ArangoSearch data.
-
-* Improved the wording for sharding options displayed in the web interface.
-
- Instead of offering `flexible` and `single`, now use the more intuitive
- `Sharded` and `OneShard` options, and update the help text for them.
-
-* Make all AQL cursors return compact result arrays.
-
-* Fix profiling of AQL queries with the `silent` and `stream` options sets in
- combination. Using the `silent` option makes a query execute, but discard all
- its results instantly. This led to some confusion in streaming queries, which
- can return the first query results once they are available, but don't
- necessarily execute the full query.
- Now, `silent` correctly discards all results even in streaming queries, but
- this has the effect that a streaming query will likely be executed completely
- when the `silent` option is set. This is not the default however, and the
- `silent` option is normally not set. There is no change for streaming queries
- if the `silent` option is not set.
+* Avoid a potential deadlock when dropping indexes.
- As a side-effect of this change, this makes profiling (i.e. using
- `db._profileQuery(...)` work for streaming queries as well. Previously,
- profiling a streaming query could have led to some internal errors, and even
- query results being returned, even though profiling a query should not return
- any query results.
+ A deadlock could theoretically happen for a thread that is attempting to drop
+ an index in case there was another thread that tried to create or drop an
+ index in the very same collection at the very same time. We haven't managed to
+ trigger the deadlock with concurrency tests, so it may have been a theoretical
+ issue only. The underlying code was changed anyway to make sure this will not
+ cause problems in reality.
* Make dropping of indexes in cluster retry in case of precondition failed.
When dropping an indexes of a collection in the cluster, the operation could
- fail with a "precondition failed" error in case there were simultaneous
- index creation or drop actions running for the same collection. The error
- was returned properly internally, but got lost at the point when
+ fail with a "precondition failed" error in case there were simultaneous index
+ creation or drop actions running for the same collection. The error was
+ returned properly internally, but got lost at the point when
`.dropIndex()` simply converted any error to just `false`.
We can't make `dropIndex()` throw an exception for any error, because that
would affect downwards-compatibility. But in case there is a simultaneous
change to the collection indexes, we can just retry our own operation and
check if it succeeds then. This is what `dropIndex()` will do now.
-* Try to raise file descriptors limit in local start scripts (in `scripts/`
- directory - used for development only).
-
-* Fix error reporting in the reloadTLS route.
+* Improve incremental sync replication for single server and cluster to cope
+ with multiple secondary index unique constraint violations (before this was
+ limited to a failure in a single unique secondary index). This allows
+ replicating the leader state to the follower in basically any order, as any
+ *other* conflicting documents in unique secondary indexes will be detected
+ and removed on the follower.
* Fix potential undefined behavior when iterating over connected nodes in an
execution plan and calling callbacks for each of the nodes: if the callbacks
@@ -4814,13 +7621,6 @@ devel
from, this could lead to potentially undefined behavior due to iterator
invalidation. The issue occurred when using a debug STL via `_GLIBCXX_DEBUG`.
-* Fixed replication bug in MerkleTree sync protocol, which could lead to
- data corruption. The visible effect was that shards could no longer get
- in sync since the counts would not match after sync, even after a recount.
- This corruption only happened if there were large amounts of differences
- (at least 65537) and the destination side had newer revisions for some
- keys than the source side.
-
* Fixed a RocksDB bug which could lead to an assertion failure when compiling
with STL debug mode -D_GLIBCXX_DEBUG.
@@ -4831,67 +7631,13 @@ devel
checks for the query being killed during the filtering of documents, resulting
in the maxRuntime option and manual kill of a query not working timely.
-* Simplify the DistributeExecutor and avoid implicit modification of its input
- variable. Previously the DistributeExecutor could update the input variable
- in-place, leading to unexpected results (see #13509).
- The modification logic has now been moved into three new _internal_ AQL
- functions (MAKE_DISTRIBUTE_INPUT, MAKE_DISTRIBUTE_INPUT_WITH_KEY_CREATION,
- and MAKE_DISTRIBUTE_GRAPH_INPUT) and an additional calculation node with an
- according function call will be introduced if we need to prepare the input
- data for the distribute node.
-
-* Added new REST APIs for retrieving the sharding distribution:
-
- - GET `/_api/database/shardDistribution` will return the number of
- collections, shards, leaders and followers for the database it is run
- inside. The request can optionally be restricted to include data from
- only a single DB server, by passing the `DBserver` URL parameter.
-
- This API can only be used on coordinators.
-
- - GET `/_admin/cluster/shardDistribution` will return global statistics
- on the current shard distribution, showing the total number of databases,
- collections, shards, leaders and followers for the entire cluster.
- The results can optionally be restricted to include data from only a
- single DB server, by passing the `DBserver` URL parameter.
- By setting the `details` URL parameter, the response will not contain
- aggregates, but instead one entry per available database will be returned.
-
- This API can only be used in the `_system` database of coordinators, and
- requires admin user privileges.
-
-* Decrease the size of serialized index estimates, by introducing a
- compressed serialization format. The compressed format uses the previous
- uncompressed format internally, compresses it, and stores the compressed
- data instead. This makes serialized index estimates a lot smaller, which
- in turn decreases the size of I/O operations for index maintenance.
-
* Do not create index estimator objects for proxy collection objects on
- coordinators and DB servers. Proxy objects are created on coordinators and
- DB servers for all shards, and they also make index objects available. In
- order to reduce the memory usage by these objects, we don't create any
- index estimator objects for indexes in those proxy objects. Index estimators
- usually take several KB of memory each, so not creating them will pay out
- for higher numbers of collections/shards.
-
-* More improvements for logging:
-
- * Added new REST API endpoint GET `/_admin/log/entries` to return log entries
- in a more intuitive format, putting each log entry with all its properties
- into an object. The API response is an array with all log message objects
- that match the search criteria.
- This is an extension to the already existing API endpoint GET `/_admin/log`,
- which returned log messages fragmented into 5 separate arrays.
-
- The already existing API endpoint GET `/_admin/log` for retrieving log
- messages is now deprecated, although it will stay available for some time.
-
- * Truncation of log messages now takes JSON format into account, so that
- the truncation of oversized JSON log messages still keeps a valid JSON
- structure even after the truncation.
-
- * The maximum size of in-memory log messages was doubled from 256 to 512
- chars, so that longer parts of each log message can be preserved now.
+ coordinators and DB servers. Proxy objects are created on coordinators and DB
+ servers for all shards, and they also make index objects available. In order
+ to reduce the memory usage by these objects, we don't create any index
+ estimator objects for indexes in those proxy objects. Index estimators usually
+ take several KB of memory each, so not creating them will pay out for higher
+ numbers of collections/shards.
* Improvements for logging. This adds the following startup options to arangod:
@@ -4902,152 +7648,54 @@ devel
suffix '...' will be added to them. The purpose of this parameter is to
shorten long log messages in case there is not a lot of space for logfiles,
and to keep rogue log messages from overusing resources.
- The default value is 128 MB, which is very high and should effectively
- mean downwards-compatibility with previous arangod versions, which did not
+ The default value is 128 MB, which is very high and should effectively mean
+ downwards-compatiblity with previous arangod versions, which did not
restrict the maximum size of log messages.
- `--audit.max-entry-length`: controls the maximum line length for individual
audit log messages that are written into audit logs by arangod. Any audit
log messages longer than the specified value will be truncated and the
suffix '...' will be added to them.
- The default value is 128 MB, which is very high and should effectively
- mean downwards-compatibility with previous arangod versions, which did not
+ The default value is 128 MB, which is very high and should effectively mean
+ downwards-compatiblity with previous arangod versions, which did not
restrict the maximum size of log messages.
- `--log.in-memory-level`: controls which log messages are preserved in
- memory (in case `--log.in-memory` is set to `true`). The default value is
- `info`, meaning all log messages of types `info`, `warning`, `error` and
- `fatal` will be stored by an instance in memory (this was also the behavior
- in previous versions of ArangoDB). By setting this option to `warning`,
- only `warning`, `error` and `fatal` log messages will be preserved in memory,
- and by setting the option to `error` only error and fatal messages will be kept.
+ memory. The default value is `info`, meaning all log messages of types
+ `info`, `warning`, `error` and `fatal` will be stored by an instance in
+ memory (this was also the behavior in previous versions of ArangoDB).
+ By setting this option to `warning`, only `warning` log messages will be
+ preserved in memory, and by setting the option to `error` only error
+ messages will be kept.
This option is useful because the number of in-memory log messages is
limited to the latest 2048 messages, and these slots are by default shared
between informational, warning and error messages.
* Honor the value of startup option `--log.api-enabled` when set to `false`.
- The desired behavior in this case is to turn off the REST API for logging,
- but was not implemented. The default value for the option is `true`, so the
- REST API is enabled. This behavior did not change, and neither did the
- behavior when setting the option to a value of `jwt` (meaning the REST API
- for logging is only available for superusers with a valid JWT token).
-
-* Split the update operations for the _fishbowl system collection with Foxx
- apps into separate insert/replace and remove operations. This makes the
- overall update not atomic, but as removes are unlikely here, we can now get
- away with a simple multi-document insert-replace operation instead of a
- truncate and an exclusive transaction, which was used before.
-
-* Fix `/_admin/cluster/removeServer` API.
- This often returned HTTP 500 with an error message "Need open Array" due to
- an internal error when setting up agency preconditions.
-
-* Remove logging startup options `--log.api-enabled` and `--log.keep-logrotate`
- for all client tools (arangosh, arangodump, arangorestore etc.), as these
- options are only meaningful for arangod.
-
-* Fixed BTS-284: upgrading from 3.6 to 3.7 in cluster enviroment.
- Moved upgrade ArangoSearch links task to later step as it needs cluster
- connection. Removed misleading error log records for failed ArangoSearch index
- creation during upgrade phase.
-
-* Extend the "move-calculations-up" optimizer rule so that it can move
- calculations out of subqueries into the outer query.
-
-* Don't allocate ahead-of-time memory for striped PRNG array in arangod,
- but instead use thread-local PRNG instances. Not only does this save a
- few megabytes of memory, but it also avoids potential (but unlikely)
- sharing of the same PRNG instance by multiple threads.
-
-* Remove undocumented CMake variable `USE_BACKTRACE`, and remove define
- `ARANGODB_ENABLE_BACKTRACE`. Both were turned off by default before, and
- when turned on allow to produce backtraces from within the executable in
- case debug symbols were available, working and the build was also compiled
- with `USE_MAINTAINER_MODE=On`. Some code in this context was obviously
- unreachable, so now it has all been removed.
- To log a backtrace from within arangod, it is now possible to call
- `CrashHandler::logBacktrace()`, which will log a backtrace of the calling
- thread to the arangod log. This is restricted to Linux builds only.
-
-* Fix warnings about suggest-override which can break builds when warnings
- are treated as errors.
-
-* Turn off option `--server.export-read-write-metrics` for now, until there
- is certainty about the runtime overhead it introduces.
-
-* Fixed issue #12543: Unused Foxx service config can not be discarded.
-
-* Fixed issue #12363: Foxx HTTP API upgrade/replace always enables
- development mode.
-
-* Remove unsafe query option `inspectSimplePlans`. This option previously
- defaulted to `true`, and turning it off could make particular queries fail.
- The option was ignored in the cluster previously, and turning it off only
- had an effect in single server, there making very simple queries (queries
- not containing any FOR loops) not going through the optimizer's complete
- pipeline as a performance optimization. However, the optimization was only
- possible for a very small number of queries and even had adverse effects,
- so it is now removed entirely.
-
-* On Linux and MacOS, require at least 8192 usable file descriptors at startup.
- If less file descriptors are available to the arangod process, then the
- startup is automatically aborted.
-
- Even the chosen minimum value of 8192 will often not be high enough to
- store considerable amounts of data. However, no higher value was chosen
- in order to not make too many existing small installations fail at startup
- after upgrading.
-
- The required number of file descriptors can be configured using the startup
- option `--server.descriptors-minimum`. It defaults to 8192, but it can be
- increased to ensure that arangod can make use of a sufficiently high number
- of files. Setting `--server.descriptors-minimum` to a value of `0` will
- make the startup require only an absolute minimum limit of 1024 file
- descriptors, effectively disabling the change.
- Such low values should only be used to bypass the file descriptors check
- in case of an emergency, but this is not recommended for production.
-
-* Added metric `arangodb_transactions_expired` to track the total number
- of expired and then garbage-collected transactions.
-
-* Allow toggling the document read/write counters and histograms via the
- new startup option `--server.export-read-write-metrics false`. This
- option defaults to `true`, so these metrics will be exposed by default.
+ The desired behavior in this case is to turn off the REST API for logging, but
+ was not implemented. The default value for the option is `true`, so the REST
+ API is enabled. This behavior did not change, and neither did the behavior
+ when setting the option to a value of `jwt` (meaning the REST API for logging
+ is only available for superusers with a valid JWT token).
-* Upgraded bundled version of libunwind to v1.5.
+* Fix error reporting in the reloadTLS route.
-* Added startup option `--javascript.tasks` to allow turning off JavaScript
- tasks if not needed. The default value for this option is `true`, meaning
- JavaScript tasks are available as before.
- However, with this option they can be turned off by admins to limit the
- amount of JavaScript user code that is executed.
+* Split the update operations for the _fishbowl system collection with Foxx apps
+ into separate insert/replace and remove operations. This makes the overall
+ update not atomic, but as removes are unlikely here, we can now get away with
+ a simple multi-document insert-replace operation instead of a truncate and an
+ exclusive transaction, which was used before.
-* Only instantiate a striped PRNG instance for the arangod server, but not
- for any of the client tools (e.g. arangosh, arangodump, arangorestore).
- The client tools do not use the striped PRNG, so we can save a few MBs of
- memory for allocating the striped PRNG instance there, plus some CPU time
- for initializing it.
-* Improve shard synchronization protocol by only transferring the required
- parts of the inventory from leader to follower. Previously, for each shard
- the entire inventory was exchanged, which included all shards of the
- respective database with all their details.
- In addition, save 3 cluster-internal requests per shard in the initial shard
- synchronization protocol by reusing already existing information in the
- different steps of the replication process.
+v3.7.8 (2021-02-16)
+-------------------
-* Added metric `arangodb_scheduler_low_prio_queue_last_dequeue_time` that
- provides the time (in milliseconds) it took for the most recent low priority
- scheduler queue item to bubble up to the queue's head. This metric can be
- used to estimate the queuing time for incoming requests.
- The metric will be updated probabilistically when a request is pulled from
- the scheduler queue, and may remain at its previous value for a while if
- only few requests are coming in or remain permanently at its previous value
- if no further requests are incoming at all.
+* Fixed ES-784 regression related to encryption cipher propagation to
+ ArangoSearch data.
-* Allow {USER} placeholder string also in `--ldap.search-filter`.
-* Fix agency restart with mismatching compaction and log indexes.
+v3.7.7 (2021-02-05)
+-------------------
* Added metrics for document read and write operations:
@@ -5061,35 +7709,47 @@ devel
- `arangodb_collection_truncates_replication`: Total number of collection
truncate operations (successful and failed) by synchronous replication.
- `arangodb_document_read_time`: Execution time histogram of all document
- primary key read operations (successful and failed) [s]. Note: this
- does not include secondary index lookups, range scans and full collection
- scans.
- - `arangodb_document_insert_time`: Execution time histogram of all
- document insert operations (successful and failed) [s].
- - `arangodb_document_replace_time`: Execution time histogram of all
- document replace operations (successful and failed) [s].
- - `arangodb_document_remove_time`: Execution time histogram of all
- document remove operations (successful and failed) [s].
- - `arangodb_document_update_time`: Execution time histogram of all
- document update operations (successful and failed) [s].
+ primary key read operations (successful and failed) [s]. Note: this does not
+ include secondary index lookups, range scans and full collection scans.
+ - `arangodb_document_insert_time`: Execution time histogram of all document
+ insert operations (successful and failed) [s].
+ - `arangodb_document_replace_time`: Execution time histogram of all document
+ replace operations (successful and failed) [s].
+ - `arangodb_document_remove_time`: Execution time histogram of all document
+ remove operations (successful and failed) [s].
+ - `arangodb_document_update_time`: Execution time histogram of all document
+ update operations (successful and failed) [s].
- `arangodb_collection_truncate_time`: Execution time histogram of all
collection truncate operations (successful and failed) [s].
- The timer metrics are turned off by default, and can be enabled by setting
- the startup option `--server.export-read-write-metrics true`.
+ The timer metrics are turned off by default, and can be enabled by setting the
+ startup option `--server.export-read-write-metrics true`.
-* Fixed some wrong behavior in single document updates. If the option
- ignoreRevs=false was given and the precondition _rev was given in the body
- but the _key was given in the URL path, then the rev was wrongly taken
- as 0, rather than using the one from the document body.
+* Fixed issue #12543: Unused Foxx service config can not be discarded.
-* Improved logging for error 1489 ("a shard leader refuses to perform a
- replication operation"). The log message will now provide the database and
- shard name plus the differing information about the shard leader.
+* Fixed issue #12363: Foxx HTTP API upgrade/replace always enables development
+ mode.
+
+* Fixed BTS-284: upgrading from 3.6 to 3.7 in cluster enviroment.
+ Moved upgrade ArangoSearch links task to later step as it needs cluster
+ connection. Removed misleading error log records for failed ArangoSearch index
+ creation during upgrade phase.
+
+* Normalize user-provided input/output directory names in arangoimport,
+ arangoexport, arangodump and arangorestore before splitting them into path
+ components, in the sense that now both forward and backward slashes can be
+ used on Windows, even interchangingly.
+
+* Fixed some wrong behaviour in single document updates. If the option
+ ignoreRevs=false was given and the precondition _rev was given in the body but
+ the _key was given in the URL path, then the rev was wrongly taken as 0,
+ rather than using the one from the document body.
+
+* Allow {USER} paceholder string also in `--ldap.search-filter`.
* Make `padded` and `autoincrement` key generators export their `lastValue`
- values, so that they are available in dumps and can be restored elsewhere
- from a dump.
+ values, so that they are available in dumps and can be restored elsewhere from
+ a dump.
* Fix decoding of values in `padded` key generator when restoring from a dump.
@@ -5097,65 +7757,55 @@ devel
coordinators. This could for example swallow out of disk errors during
hotbackup restore.
-* Fixed rare objectId conflict for indexes.
-
-* Fix for OASIS-409. Fixed indexing _id attribute at recovery.
+* Fix decoding of values in `padded` key generator when restoring from a dump.
-* Add shard-parallelism to arangodump when dumping collections with multiple
- shards.
- Previously, arangodump could execute a dump concurrently on different
- collections, but it did not parallelize the dump for multiple shards of the
- same collection.
- This change should speed up dumping of collections with multiple shards.
- When dumping multiple shards of the same collection concurrently, parallelism
- is still limited by all these threads needing to serialize their chunks into
- the same (shared) output file.
+* Fixed some situations of
+ [...]
+ SUBQUERY
+ FILTER
+ LIMIT
+ [...]
+ in AQL queries, yielding incorrect responses. A distributed state within the
+ subquery was not resetted correctly. This could also lead into "shrink" errors
+ of AQL item blocks, or much higher query runtimes.
+ Fixes:
+ - BTS-252
+ - ES-687
+ - github issue: #13099
+ - github issue: #13124
+ - github issue: #13147
+ - github issue: #13305
+ - DEVSUP-665
-* Add option `--envelope` for arangodump, to control if each dumped document
- should be wrapped into a small JSON envelope (e.g.
- `{"type":2300,"data":{...}}`). This JSON envelope is not necessary anymore
- since ArangoDB 3.8, so omitting it can produce smaller (and slightly faster)
- dumps.
- Restoring a dump without these JSON envelopers is handled automatically by
- ArangoDB 3.8 and higher. Restoring a dump without these JSON envelopes into
- previous versions (pre 3.8) however is not supported. Thus the option should
- only be used if the client tools (arangodump, arangorestore) and the arangod
- server are all using v3.8 or higher, and the dumps will never be stored into
- earlier versions.
- The default value for this option is `true`, meaning the JSON wrappers will
- be stored as part of the dump. This is compatible with all previous versions.
+* Fix a bug in the agency Supervision which could lead to removeFollower
+ jobs constantly being created and immediately stopped again.
-* Fix some issues with key generators not properly taking into account the
- `allowUserKeys` attribute when in a cluster.
+* Limit additional replicas in failover cases to +2.
-* Make AQL optimizer rule "splice-subqueries" mandatory, in the sense that it
- cannot be disabled anymore. As a side effect of this change, there will no
- query execution plans created by 3.8 that contain execution nodes of type
- `SubqueryNode`. `SubqueryNode`s will only be used during query planning and
- optimization, but at the end of the query optimization phase will all have
- been replaced with nodes of types `SubqueryStartNode` and `SubqueryEndNode`.
- The code to execute non-spliced subqueries remains in place so that 3.8 can
- still execute queries planned on a 3.7 instance with the "splice-subqueries"
- optimizer rule intentionally turned off. The code for executing non-spliced
- subqueries can be removed in 3.9.
+* Prepare register planning for rolling upgrades. Previously, changes in
+ register planning from 3.7 to a minor future version (i.e. 3.8) could cause
+ queries executed by a 3.7 coordinator in combination with a minor future
+ version (i.e. 3.8) DBServer to fail during a rolling upgrade.
-* Normalize user-provided input/output directory names in arangoimport,
- arangoexport, arangodump and arangorestore before splitting them into path
- components, in the sense that now both forward and backward slashes can be
- used on Windows, even interchangeably.
+* Fixed rare objectId conflict for indexes.
+
+* Fix for OASIS-409: fixed indexing _id attribute at recovery.
+
+* Fix some issues with key generators not properly taking into account the
+ `allowUserKeys` attribute when in a cluster.
* Added the following bit handling functions to AQL:
- BIT_AND(array): and-combined result
- BIT_OR(array): or-combined result
- BIT_XOR(array): xor-combined result
- - BIT_NEGATE(value, bits): bitwise negation of `value`, with a mask of
- `bits` length
+ - BIT_NEGATE(value, bits): bitwise negation of `value`, with a mask of `bits`
+ length
- BIT_TEST(value, index): test if bit at position `index` is set in `value`
(indexes are 0-based)
- BIT_POPCOUNT(value): return number of bits set in `value`
- - BIT_SHIFT_LEFT(value, shift, bits): bitwise shift-left of `value` by
- `shift` bits, with a mask of `bits` length
+ - BIT_SHIFT_LEFT(value, shift, bits): bitwise shift-left of `value` by `shift`
+ bits, with a mask of `bits` length
- BIT_SHIFT_RIGHT(value, shift, bits): bitwise shift-right of `value` by
`shift` bits, with a mask of `bits` length
- BIT_CONSTRUCT(array): construct a number with bits set at the positions
@@ -5177,372 +7827,249 @@ devel
The prefix for binary integer literals is `0b`, e.g. `0b10101110`.
The prefix for hexadecimal integer literals i `0x`, e.g. `0xabcdef02`.
- Binary and hexadecimal integer literals can only be used for unsigned integers.
+ Binary and hexadecimal integer literals can only be used for unsigned
+ integers.
The maximum supported value is `(2 ^ 32) - 1`, i.e. `0xffffffff` (hexadecimal)
or `0b11111111111111111111111111111111` (binary).
-* AQL query execution plan register usage optimization.
-
- This is a performance optimization that may positively affect some AQL
- queries that use a lot of variables that are only needed in certain
- parts of the query.
- The positive effect will come from saving registers, which directly
- translates to saving columns in AqlItemBlocks.
-
- Previously, the number of registers that were planned for each depth
- level of the query never decreased when going from one level to the
- next. Even though unused registers were recycled since 3.7, this did
- not lead to unused registers being completely dismantled.
-
- Now there is an extra step at the end of the register planning that
- keeps track of the actually used registers on each depth, and that
- will shrink the number of registers for the depth to the id of the
- maximum register. This is done for each depth separately.
- Unneeded registers on the right hand side of the maximum used register
- are now discarded. Unused registers on the left hand side of the maximum
- used register id are not discarded, because we still need to guarantee
- that registers from depths above stay in the same slot when starting
- a new depth.
+* Print a version mismatch (major/minor version difference) between the arangosh
+ version and the remote arangod version at arangosh startup.
-* Added metric `arangodb_aql_current_query` to track the number of currently
- executing AQL queries.
+* Fix a potential shutdown deadlock in AgencyCache.
* Updated arangosync to 1.2.2.
-* Fix a bug in the agency Supervision which could lead to removeFollower
- jobs constantly being created and immediately stopped again.
-
-* Limit additional replicas in failover cases to +2.
-
-* Print a version mismatch (major/minor version difference) between the
- arangosh version and the remote arangod version at arangosh startup.
-
-* Internal refactoring of K_PATH feature, with the goal to have all graph
- algorithms on the same framework. This change should not have any visible
- effect on users.
-
-* Fixed an endless busy loop which could happen if a coordinator tries to
- roll back a database creation, but the database has already been dropped
- by other means.
-
-* Removed server-side JavaScript object `ArangoClusterComm`, so it cannot be
- used from inside JavaScript operations or Foxx.
- The `ArangoClusterComm` object was previously used inside a few internal
- JavaScript operations, but was not part of the public APIs.
-
-* Restrict access to functions inside JavaScript objects `ArangoAgency` and
- `ArangoAgent` to JavaScript code that is running in privileged mode, i.e.
- via the server's emergency console, the `/_admin/execute` API (if turned on)
- or internal bootstrap scripts.
-
-* Added startup option `--javascript.transactions` to allow turning off JavaScript
- transactions if not needed. The default value for this option is `true`, meaning
- JavaScript transactions are available as before.
- However, with this option they can be turned off by admins to limit the amount
- of JavaScript user code that is executed.
-
-* Introduce a default memory limit for AQL queries, to prevent rogue queries from
- consuming the entire memory available to an arangod instance.
-
- The limit is introduced via changing the default value of the option `--query.memory-limit`
- from previously `0` (meaning: no limit) to a dynamically calculated value.
- The per-query memory limits defaults are now:
-
- Available memory: 0 (0MiB) Limit: 0 unlimited, %mem: n/a
- Available memory: 134217728 (128MiB) Limit: 33554432 (32MiB), %mem: 25.0
- Available memory: 268435456 (256MiB) Limit: 67108864 (64MiB), %mem: 25.0
- Available memory: 536870912 (512MiB) Limit: 201326592 (192MiB), %mem: 37.5
- Available memory: 805306368 (768MiB) Limit: 402653184 (384MiB), %mem: 50.0
- Available memory: 1073741824 (1024MiB) Limit: 603979776 (576MiB), %mem: 56.2
- Available memory: 2147483648 (2048MiB) Limit: 1288490189 (1228MiB), %mem: 60.0
- Available memory: 4294967296 (4096MiB) Limit: 2576980377 (2457MiB), %mem: 60.0
- Available memory: 8589934592 (8192MiB) Limit: 5153960755 (4915MiB), %mem: 60.0
- Available memory: 17179869184 (16384MiB) Limit: 10307921511 (9830MiB), %mem: 60.0
- Available memory: 25769803776 (24576MiB) Limit: 15461882265 (14745MiB), %mem: 60.0
- Available memory: 34359738368 (32768MiB) Limit: 20615843021 (19660MiB), %mem: 60.0
- Available memory: 42949672960 (40960MiB) Limit: 25769803776 (24576MiB), %mem: 60.0
- Available memory: 68719476736 (65536MiB) Limit: 41231686041 (39321MiB), %mem: 60.0
- Available memory: 103079215104 (98304MiB) Limit: 61847529063 (58982MiB), %mem: 60.0
- Available memory: 137438953472 (131072MiB) Limit: 82463372083 (78643MiB), %mem: 60.0
- Available memory: 274877906944 (262144MiB) Limit: 164926744167 (157286MiB), %mem: 60.0
- Available memory: 549755813888 (524288MiB) Limit: 329853488333 (314572MiB), %mem: 60.0
-
- As previously, a memory limit value of `0` means no limitation.
- The limit values are per AQL query, so they may still be too high in case queries
- run in parallel. The defaults are intentionally high in order to not stop any valid,
- previously working queries from succeeding.
-
-* Added startup option `--audit.queue` to control audit logging queuing
- behavior (Enterprise Edition only):
-
- The option controls whether audit log messages are submitted to a queue
- and written to disk in batches or if they should be written to disk directly
- without being queued.
- Queueing audit log entries may be beneficial for latency, but can lead to
- unqueued messages being lost in case of a power loss or crash. Setting
- this option to `false` mimics the behavior from 3.7 and before, where
- audit log messages were not queued but written in a blocking fashion.
-
-* Fixed some situations of
- [...]
- SUBQUERY
- FILTER
- LIMIT
- [...]
- in AQL queries, yielding incorrect responses. A distributed
- state within the subquery was not reset correctly.
- This could also lead into "shrink" errors of AQL item blocks,
- or much higher query runtimes.
- Fixes:
- - BTS-252
- - ES-687
- - github issue: #13099
- - github issue: #13124
- - github issue: #13147
- - github issue: #13305
- - DEVSUP-665
-
-* Added metric `arangodb_server_statistics_cpu_cores` to provide the number of
- CPU cores visible to the arangod process. This is the number of CPU cores
- reported by the operating system to the process.
- If the environment variable `ARANGODB_OVERRIDE_DETECTED_NUMBER_OF_CORES` is
- set to a positive value at instance startup, this value will be returned
- instead.
-
-* `COLLECT WITH COUNT INTO x` and `COLLECT var = expr WITH COUNT INTO x` are now
- internally transformed into `COLLECT AGGREGATE x = LENGTH()` and
- `COLLECT var = expr AGGREGATE x = LENGTH()` respectively. In addition, any
- argument passed to the `COUNT`/`LENGTH` aggregator functions are now optimized
- away. This not only simplified the code, but also allows more query optimizations:
- - If the variable in `COLLECT WITH COUNT INTO var` is not used, the implicit
- aggregator is now removed.
- - All queries of the form `COLLECT AGGREGATE x = LENGTH()` are now executed
- using the count executor, which can result in significantly improved
- performance.
-
* Minor and rare AQL performance improvement, in nested subqueries:
LET sq1 ([..] FILTER false == true LET sq2 = () [..])
- where sq1 produces no data (e.g. by the above filter) for sq2,
- the part have been asked two times (second returns empty result),
- instead of one, if and only if the main query executes sq1 exactly one time.
+ where sq1 produces no data (e.g. by the above filter) for sq2, the part
+ have been asked two times (second returns empty result), instead of one, if
+ and only if the mainquery executes sq1 exactly one time.
Now we get away with one call only.
- In the case sq1 has data, or sq1 is executed more often, only one call was needed
- (assuming the data fits in one batch).
+ In the case sq1 has data, or sq1 is executed more often, only one call was
+ needed (assuming the data fits in one batch).
-* Updated OpenSSL to 1.1.1i and OpenLDAP to 2.4.56.
+* Improve internal error reporting by cluster maintenance.
* Bug-Fix: In one-shard-database setups that were created in 3.6.* and then
upgraded to 3.7.5 the DOCUMENT method in AQL will now return documents again.
-* Make internal ClusterInfo::getPlan() wait for initial plan load from agency.
-
-* Added AQL timezone functions `DATE_TIMEZONE` and `DATE_TIMEZONES`.
-
-* Make DB servers report storage engine health to the agency, via a new "health"
- attribute in requests sent to Sync/ServerStates/.
- The supervision can in the future check this attribute if it is posted,
- and mark servers as BAD or FAILED in case an unhealthy status is reported.
- DB server health is currently determined by whether or not the storage engine
- (RocksDB) has reported a background error, and by whether or not the free disk
- space has reached a critical low amount. The current threshold for free disk
- space is set at 1% of the disk capacity (only the disk is considered that
- contains the RocksDB database directory).
- The minimum required free disk space percentage can be configured using the new
- startup option `--rocksdb.minimum-disk-free-percent`, which needs to be between
- 0 and 1 (including). A value of 0 disables the check.
- The minimum required free disk space can also be configured in bytes using the
- new startup option `--rocksdb.minimum-disk-free-bytes`. A value of 0 disables
- this check, too.
-
-* Failed servers are now reported consistently in the web interface, at
- approximately the same time in the navigation bar and in the nodes view.
- Previously these two places had their own, independent poll mechanism for the
- nodes' health, and they were updated independently, which could cause an
- inconsistent view of the nodes' availability.
- Using only one poll mechanism instead also saves some period background requests
- for the second availability check.
-
-* Updated arangosync to 1.2.1.
-
-* Clean up callback bin and empty promises in single-host-agency.
-
-* Stabilize a Foxx cleanup test.
-
-* Drop a pair of braces {} in /_admin/metrics in case of empty labels, which
- makes the API adhere better to the official Prometheus syntax.
-* Add some more metrics to the ConnectionPool.
+v3.7.6 (2021-01-04)
+-------------------
-* Remove HTTP "Connection" header when forwarding requests in the cluster
- from one coordinator to another, and let the internal network layer
- handle closing of connections and keep-alive.
+* Updated OpenSSL to 1.1.1i and OpenLDAP to 2.4.56.
* Added new metric: "arangodb_collection_lock_sequential_mode" this will count
how many times we need to do a sequential locking of collections. If this
metric increases this indicates lock contention in transaction setup.
- Most likely this is caused by exclusive locks used on collections with
- more than one shard.
+ Most likely this is caused by exlcusive locks used on collections with more
+ than one shard.
* Fix for BTS-213
Changed the transaction locking mechanism in the cluster case.
For all installations that do not use "exclusive" collection locks this change
will not be noticable. In case of "exclusive" locks, and collections with more
than one shard, it is now less likely to get a LOCK_TIMEOUT (ErrorNum 18).
- It is still possible to get into the LOCK_TIMEOUT case, especially if
- the "exclusive" operation(s) are long-running.
+ It is still possible to get into the LOCK_TIMEOUT case, especially if the
+ "exclusive" operation(s) are long-running.
-* Reduce overhead of audit logging functionality if audit logging is turned
- off.
+* Fixed an endless busy loop which could happen if a coordinator tries to roll
+ back a database creation, but the database has already been dropped by other
+ means.
-* Add several more attributes to audit-logged queries, namely query execution
- time and exit code (0 = no error, other values correspond to general ArangoDB
- error codes).
+* Make internal ClusterInfo::getPlan() wait for initial plan load from agency.
+
+* Remove HTTP "Connection" header when forwarding requests in the cluster from
+ one coordinator to another, and let the internal network layer handle closing
+ of connections and keep-alive.
+
+* Prevent a write to RocksDB during recovery in the case that the database
+ already exists. The write at startup is potentially blocking, and will delay
+ the startup for servers that were shut down while in a write-stopped state.
+
+* Fix recovery of "clientId" values in Agency when restarting an agent from
+ persistence.
* Added "startupTime", "computationTime" and "storageTime" to Pregel result
statistics.
-* Fixed a bug in maintainer mode sorting followerinfo lists the wrong way.
-
-* Limit value of `--rocksdb.block-cache-size` to 1 GB for agent instances to
- reduce agency RAM usage, unless configured otherwise. In addition, limit the
- value of `--rocksdb.total-write-buffer-size` to 512 MB on agent instances for
- the same reason.
+* Add query execution time and query id to audit log query messages.
* Fixed issue #13238 Thread naming API on Windows are now used only if
available in KERNEL32.DLL
-* When querying the list of currently running or slow AQL queries, ignore not-yet
- created databases on other coordinators.
+* Fix for issue #772: Optimized document counting for ArangoSearch views.
+ Added new ArangoSearch view option 'countApproximate' for customizing view
+ count strategy.
-* Added support for fetching the list of currently running and slow AQL queries
- from all databases at once, by adding an `all` parameter to the following
- query APIs:
+* Fix ordering of FollowerInfo lists in maintainer mode.
- * `require("@arangodb/aql/queries").current({ all: true })`: will return the
- currently running queries from all databases, not just the currently
- selected database.
- * HTTP GET `/_api/query/current?all=true`: same, but for the HTTP REST API.
- * `require("@arangodb/aql/queries").slow({ all: true })`: will return the
- slow query history from all databases, not just the currently selected
- database.
- * HTTP GET `/_api/query/slow?all=true`: same, but for the HTTP REST API.
- * `require("@arangodb/aql/queries").clearSlow({ all: true })`: will clear
- the slow query history for all databases, not just the currently selected
- database.
- * HTTP DELETE `/_api/query/slow?all=true`: same, but for the HTTP REST API.
+* Fix AR-113. Disallow non-values in the AQL geo-index-optimizer rule.
- Using the `all` parameter is only allowed when making the call inside the
- `_system` database and with superuser privileges.
+* Added SNI support for arangosh.
+
+* Fix agency restart with mismatching compation and log indexes.
* Improve performance and memory efficiency of agency restart from persisted
database directory.
* Added the following agency-related metrics:
- - `arangodb_agency_client_lookup_table_size`: current number of entries
- in agency client id lookup table. This gauge is available only on
- agent instances.
- - `arangodb_agency_cache_callback_count`: current number of entries in
- agency cache callbacks table. This gauge will be effective on coordinators
- and DB servers.
+ - `arangodb_agency_client_lookup_table_size`: current number of entries in
+ agency client id lookup table. This gauge is available only on agent
+ instances.
+ - `arangodb_agency_cache_callback_count`: current number of entries in agency
+ cache callbacks table. This gauge will be effective on coordinators and DB
+ servers.
- `arangodb_agency_callback_count`: current number of agency callbacks
registered. This gauge will be effective on coordinators and DB servers.
-* Clean up agency change log, cluster info caches.
-
-* Added SNI support for arangosh.
-
-* Added new `rocksdb_write_stalls` and `rocksdb_write_stops` counter metrics,
- which should be more accurate than existing metrics related to the underlying
- conditions.
-
-* Increased the default value of `--rocksdb.min-write-buffer-number-to-merge` in
- some cases when we have allocated a sufficient amount of memory to the write
- buffers for this to make sense. The increased value should help prevent
- compaction-induced write stalls/stops, and should only be enabled when under
- conditions such that it shouldn't greatly increase the chance of flush-induced
- write stalls/stops.
-
-* Changed the default values for `--rocksdb.cache-index-and-filter-blocks` and
- `--rocksdb.cache-index-and-filter-blocks-with-high-priority` to true to
- improve control over memory usage.
-
-* Lowered the minimum allowed value for `--rocksdb.max-write-buffer-number` from
- 9 to 4 to allow more fine-grained memory usage control.
-
-* Fix for issue #772: Optimized document counting for ArangoSearch views.
- Added new ArangoSearch view option 'countApproximate' for customizing
- view count strategy.
-
-* Fix AR-113. Disallow non-values in the AQL geo-index-optimizer rule.
+* Fix cluster-internal replication of documents with special keys (percent
+ character, which has a special meaning when used inside URLs).
-* Views on SmartGraph Edge collections do not contain some documents
- twice.
+* Improvements for the Pregel distributed graph processing feature:
+ - during the loading/startup phase, the in-memory edge cache is now
+ intentionally bypassed. The reason for this is that any edges are looked up
+ exactly once, so caching them is not beneficial, but would only lead to
+ cache pollution.
+ - the loading/startup phase can now load multiple collections in parallel,
+ whereas previously it was only loading multiple shards of the same
+ collection in parallel. This change helps to reduce load times in case there
+ are many collections with few shards, and on single server.
+ - the loading and result storage phases code has been overhauled so that it
+ runs slightly faster.
+ - for Pregel runs that are based on named graphs (in contrast to explicit
+ naming of the to-be-used vertex and edge collections), only those edge
+ collections are considered that, according to the graph definition, can have
+ connections with the vertex. This change can reduce the loading time
+ substantially in case the graph contains many edge definitions.
+ - the number of executed rounds for the underlying Pregel algorithm now does
+ not vary for different `parallelism` values.
-* Fixed agency redirect in poll api.
+* Reimplement coordshort request handler. The new implementation only runs two
+ DB queries without any additional requests to other coordinators, resulting in
+ reduced load on the cluster. Previously this involved requests to all
+ coordinators, where each of them ran two DB queries.
-* Fixed issue #12248: Web UI - Added missing HTML escaping in the setup script
- section of a Foxx app.
+* When querying the list of currently running or slow AQL queries, ignore
+ not-yet created databases on other coordinators.
-* The scheduler will now run a minimum of 4 threads at all times, and the
- default and minimal value for `--server.maximal-threads` has been lowered from
- 64 to the greater of 32 and twice the number of detected cores.
+* Fix AQL cost estimate of spliced subqueries which could lead to overly large
+ numbers in the explain output of such queries.
-* Throttle work coming from low priority queue, according to a constant
- and to an estimate taking into account fanout for multi-shard operations.
+* Add an AQL query kill check during early pruning. Fixes issue #13141.
-* Move to 4 priority levels "low", "medium", "high" and "maintenance" in
- scheduler to ensure that maintenance work and diagnostics is always
- possible, even in the case of RocksDB throttles. Do not allow any
- RocksDB work on "maintenance".
+* Fix Windows directory creation error handling.
-* Commit replications on high priority queue.
+* Added new metrics for tracking AQL queries and slow queries:
+ * `arangodb_aql_query_time`: histogram with AQL query times distribution.
+ * `arangodb_aql_slow_query_time`: histogram with AQL slow query times
+ distribution.
-* Essentially get rid of timeout in replication to drop followers. This
- is now entirely handled via reboot and failure tracking. The timeout
- has now a default minimum of 15 minutes but can still be configured via
- options.
+* Reduce the number of dropped followers when running larger (>= 128 MB) write
+ transactions.
-* Additional metrics for all queue lengths and low prio ongoing work.
+* Remove a case in which followers were dropped unnecessarily in streaming
+ transactions that replicated to the same follower.
-* New metric for number and total time of replication operations.
+* Added metrics for collection locks:
+ - `arangodb_collection_lock_timeouts_exclusive`: Number of lock timeouts when
+ trying to acquire collection exclusive locks
+ - `arangodb_collection_lock_timeouts_write`: Number of lock timeouts when
+ trying to acquire collection write locks
+ - `arangodb_collection_lock_acquisition_micros`: Total amount of collection
+ lock acquisition time [μs]
+ - `arangodb_collection_lock_acquisition_time`: Total collection lock
+ acquisition time histogram [s]
-* New metrics for number of internal requests in flight, internal request
- duration, and internal request timeouts
+* Reduce lock timeout on followers to 15 seconds.
+ Rationale: we should not have any locking conflicts on followers, generally.
+ Any shard locking should be performed on leaders first, which will then,
+ eventually replicate changes to followers. replication to followers is only
+ done once the locks have been acquired on the leader(s).
-* Fix `Gauge` assignment operators.
+* Better tracking of memory used in AQL graph traversals, COLLECT and SORT
+ operations. From this version onwards, certain AQL queries can report a higher
+ memory usage than in previous versions of ArangoDB. This is not because the
+ queries use more memory than before, but because the memory usage tracking has
+ been improved.
+ A side effect of this change is that queries with a memory limit set may now
+ be aborted whereas in previous versions they ran through successfully (but
+ actually violated the limit). In this case it may be necessary to adjust (i.e.
+ raise) query memory limits accordingly.
+
+* Added startup option `--foxx.force-update-on-startup` to toggle waiting for
+ all Foxx services in all databases to be propagated to a coordinator before it
+ completes the boot sequence.
+ In case the option is set to `false` (i.e. no waiting), the coordinator will
+ complete the boot sequence faster, and the Foxx services will be propagated
+ lazily. Until the initialization procedure has completed for the local Foxx
+ apps, any request to a Foxx app will be responded to with an HTTP 503 error
+ and message
-* Fixed and extended LDAP log messages.
+ waiting for initialization of Foxx services in this database
-* Added LDAP_OFF if referrals and restart are false.
+ This can cause an unavailability window for Foxx services on coordinator
+ startup for the initial requests to Foxx apps until the app propagation has
+ completed.
+
+ When not using Foxx, this option should be set to `false` to benefit from a
+ faster coordinator startup.
+ Deployments relying on Foxx apps being available as soon as a coordinator is
+ integrated or responding should set this option to `true` (which is the
+ default value).
+ The option only has an effect for cluster setups.
+ On single servers and in active failover mode, all Foxx apps will be available
+ from the very beginning.
+ Note: ArangoDB 3.6 and 3.7 introduce this option with a default value of
+ `true`. ArangoDB 3.8 changes the default value to `false`.
-* If LDAP search fails, also retry (update to given number of retries).
+* Changed the server-side implementation of the following internal JavaScript
+ APIs to no-ops:
+ * `internal.reloadAqlFunctions()`: this is a no-op function now
+ * `@arangodb/actions.buildRouting()`: this is a no-op function now
+ * `@arangodb/actions.routingTree`: will return an empty object
+ * `@arangodb/actions.routingList`: will return an empty object
-* Add cluster support for collection.checksum() method to calculate CRC
- checksums for collections.
+ All the above APIs were intended to be used for internal means only. These
+ APIs are deprecated now and will be removed in ArangoDB v3.9.
-* Fix cluster-internal replication of documents with special keys (percent
- character, which has a special meaning when used inside URLs).
+* Fix HTTP/1.1 status response header in fuerte responses
-* Fix AQL cost estimate of spliced subqueries which could lead to overly large
- numbers in the explain output of such queries.
+ This change makes fuerte return the full status header, including the numeric
+ status code and the status string in the `http/1.1` header of fuerte
+ responses.
-* Make all Pregel HTTP and JavaScript APIs also accept stringified execution
- number values, in addition to numeric ones.
+ Previously, the return header lacked the numeric status code, so it looked
+ like
+ ```
+ "http/1.1" : "Ok"
+ ```
+ Now, with the numeric status code, the response header will look like
+ ```
+ "http/1.1" : "200 Ok"
+ ```
+ This PR also adds a protocol() method for arango client connections in order
+ to check the protocol in use. The possible return values are
+ - "http" for HTTP/1.1 connections
+ - "http2" for HTTP/2 connections
+ - "vst" for VST connections
+ - "unknown" for everyhting else
+ This is needed during testing, but can also be used for other purposes.
- This allows passing larger execution numbers as strings, so that any data
- loss due to numeric data type conversion (uint32 => double) can be avoided.
+* Fixed bug in the connection pool which could prevent connection reusage under
+ high load and lead to lots of new connection creations, in particular with
+ TLS.
- The change also makes the Pregel HTTP and JavaScript APIs for starting a
- run return a stringified execution number, e.g. "12345" instead of 12345.
+* Added more metrics around connection pool.
-* Updated ArangoDB Starter to 0.14.15-1.
+* Fix a potential nullptr access in AsyncAgencyComm in case there was a specific
+ error when sending an agency request.
-* Fix Windows directory creation error handling.
+* Clean up agency change log, cluster info caches.
-* Add an AQL query kill check during early pruning. Fixes issue #13141.
-* Remove a case in which followers were dropped unnecessarily in streaming
- transactions that replicated to the same follower.
+v3.7.5 (2020-12-09)
+-------------------
* Fixed ES-662 by introducing refactored thread pool to make more efficient
consolidation and commit routines for links of ArangoSearch views.
@@ -5564,63 +8091,85 @@ devel
if set to non-autodetect value > 0
- `--arangosearch.threads-limit`
-* Turn off `StatisticsWorker` thread on DB servers.
- This thread was previously only running queries on the local RocksDB
- instance, but using the cluster-wide collection names. So effectively it
- did nothing except use a bit of background CPU. In this case it is better
- to turn off the background thread entirely on the DB servers.
+* Updated ArangoDB Starter to 0.14.15-1.
-* Avoid the usage of std::regex when constructing date/time string values
- for log messages. This is a performance optimization only.
+* Fixed agency redirect in poll API.
-* Reimplement coordshort request handler. The new implementation only runs
- two DB queries without any additional requests to other coordinators,
- resulting in reduced load on the cluster. Previously this involved
- requests to all coordinators, where each of them ran two DB queries.
+* Updated arangosync to 1.2.1.
-* Fixed initial population of local AgencyCache values after a server restart.
- Previously the local cache was populated from the agency using a commit index
- value of 1, whereas it should have been 0 to get the full agency snapshot.
+* Added support for fetching the list of currently running and slow AQL queries
+ from all databases at once, by adding an `all` parameter to the following
+ query APIs:
-* Better tracking of memory used in AQL graph traversals, COLLECT and SORT
- operations. From this version onwards, certain AQL queries can report a
- higher memory usage than in previous versions of ArangoDB. This is not
- because the queries use more memory than before, but because the memory
- usage tracking has been improved.
- A side effect of this change is that queries with a memory limit set may
- now be aborted whereas in previous versions they ran through successfully
- (but actually violated the limit). In this case it may be necessary to
- adjust (i.e. raise) query memory limits accordingly.
-
-* Increase background garbage-collection interval for cluster transactions
- from 1 second to 2 seconds. This change should reduce the amount of
- background task activity a tiny bit (though hardly measurable on an
- otherwise idle server).
-
-* Make the audit log honor the configured logging date/time output format
- (i.e. `--log.time-format` option). Previously the audit logging always
- created a time value in the server's local time, and logged it in
- format YYYY-MM-DDTHH:MM:SS.
-
- From 3.8 onwards, the audit logger will honor the date/time format
- specified via the `--log.time-format` option, which defaults to
- `utc-datestring`. The means the audit logging will by default log all
- dates/times in UTC time. To restore the pre-3.8 behavior, please set
- the option `--log.time-format` to `local-datestring`, which will make
- the audit logger (and all other server log messages) use the server's
- local time.
+ * `require("@arangodb/aql/queries").current({ all: true })`: will return the
+ currently running queries from all databases, not just the currently
+ selected database.
+ * HTTP GET `/_api/query/current?all=true`: same, but for the HTTP REST API.
+ * `require("@arangodb/aql/queries").slow({ all: true })`: will return the slow
+ query history from all databases, not just the currently selected database.
+ * HTTP GET `/_api/query/slow?all=true`: same, but for the HTTP REST API.
+ * `require("@arangodb/aql/queries").clearSlow({ all: true })`: will clear the
+ slow query history for all databases, not just the currently selected
+ database.
+ * HTTP DELETE `/_api/query/slow?all=true`: same, but for the HTTP REST API.
+
+ Using the `all` parameter is only allowed when making the call inside the
+ `_system` database and with superuser privileges.
+
+* Fixed issue #12734: Accept HTTP headers into Foxx framework.
+
+* Fix Gauge class' assignment operators.
+
+* Clean up callback bin and empty promises in single-host-agency.
+
+* Fix an issue where a query would not return a result when the geo index was
+ used.
* Fix the activation of the agency supervision maintenance via the REST API
- `/_admin/cluster/maintenance`. This API stored a boolean value instead of
- an (expected) maintenance period end date/time string.
+ `/_admin/cluster/maintenance`. This API stored a boolean value instead of an
+ (expected) maintenance period end date/time string.
-* Make the cancel operation safe for asynchronously started JavaScript
+* Make the cancel operation safe for asynchronoulsly started JavaScript
transactions (via HTTP POST to `/_api/transaction` with the `x-arango-async`
header set).
+* Fixed initial population of local AgencyCache values after a server restart.
+ Previously the local cache was populated from the agency using a commit index
+ value of 1, whereas it should have been 0 to get the full agency snapshot.
+
* Updated OpenSSL to 1.1.1h.
-* Added new scheduler metrics:
+* Make the number of network I/O threads properly configurable via the startup
+ option `--network.io-threads`. This option existed before, but its configured
+ value was effectively clamped to a value of `1`. ArangoDB 3.7.5 thus also uses
+ a default value of `1` for this option to remain compatible in terms of
+ default option values.
+
+* Fix internal issue #777: Fixed memory access while substituting stored values
+ for ArangoSearch view optimization.
+
+* Added new metric `arangodb_network_forwarded_requests` to track the number
+ of requests forwarded from one coordinator to another in a load-balancing
+ context.
+
+* Added new metric `arangodb_replication_cluster_inventory_requests` to track
+ the number of requests received for cluster inventories. The cluster
+ inventory API is called at the beginning of a dump process or by arangosync.
+
+* Added new AQL metrics:
+ - `arangodb_aql_total_query_time_msec": Total execution time of all AQL
+ queries (ms)
+ - `arangodb_aql_all_query`: total number of all AQL queries
+
+* Added new metric `arangodb_aql_total_query_time_msec` to track the combined
+ runtime of AQL queries (slow queries and non-slow queries).
+
+* Added more scheduler metrics:
+
+ - `arangodb_scheduler_threads_started`: Total number of scheduler threads
+ started
+ - `arangodb_scheduler_threads_stopped`: Total number of scheduler threads
+ stopped
- `arangodb_scheduler_jobs_done`: Total number of scheduler queue jobs done
- `arangodb_scheduler_jobs_submitted`: Total number of jobs submitted to the
scheduler queue
@@ -5629,10 +8178,6 @@ devel
- `arangodb_scheduler_num_working_threads`: Number of currently working
scheduler threads
-* Added new metric `arangodb_replication_cluster_inventory_requests` to track
- the number of requests received for cluster inventories. The cluster
- inventory API is called at the beginning of a dump process or by arangosync.
-
* Added startup option `--server.unavailability-queue-fill-grade`. This option
has a consequence for the `/_admin/server/availability` API only, which is
often called by load-balancers and other availability probing systems.
@@ -5640,129 +8185,43 @@ devel
grade of the scheduler's queue is below the configured value, or HTTP 503 if
the fill grade is above it. This can be used to flag a server as unavailable
in case it is already highly loaded.
-
- The default value for this option is `0.75`, i.e. 75%. This is a change
- compared to previous versions of ArangoDB, where the default value was `1`.
+ The default value for this option is `1`, which will mean that the
+ availability API will start returning HTTP 503 responses in case the scheduler
+ queue is completely full. This is mostly compatible with previous versions of
+ ArangoDB.
+ Previously the availability API still returned HTTP 200 in this situation, but
+ this can be considered a bug, because the server was effectively totally
+ overloaded.
+ To restore 100% compatible behavior with previous version, it is possible to
+ set the option to a value of `0`, which is a special value indicating that the
+ queue fill grade will not be honored.
To prevent sending more traffic to an already overloaded server, it can be
- sensible to reduce the default value to even `0.5`.
- This would mean that instances with a queue longer than 50% of their
- maximum queue capacity would return HTTP 503 instead of HTTP 200 when their
- availability API is probed.
+ sensible to reduce the default value to `0.75` or even `0.5`.
+ This would mean that instances with a queue longer than 75% (or 50%, resp.) of
+ their maximum queue capacity would return HTTP 503 instead of HTTP 200 when
+ their availability API is probed.
nb: the default value for the scheduler queue length is 4096.
-* Added metrics for the system CPU usage:
- - `arangodb_server_statistics_user_percent`: Percentage of time that the
- system CPUs have spent in user mode
- - `arangodb_server_statistics_system_percent`: Percentage of time that
- the system CPUs have spent in kernel mode
- - `arangodb_server_statistics_idle_percent`: Percentage of time that the
- system CPUs have been idle
- - `arangodb_server_statistics_iowait_percent`: Percentage of time that
- the system CPUs have been waiting for I/O
-
- These metrics resemble the overall CPU usage metrics in `top`.
- They are available on Linux only.
-
-* Fix internal issue #777: Fixed memory access while substituting
- stored values for ArangoSearch view optimization
-
-* Make the number of network I/O threads properly configurable via the
- startup option `--network.io-threads`. This option existed before, but its
- configured value was effectively clamped to a value of `1`.
-
-* Improvements for the Pregel distributed graph processing feature:
- - during the loading/startup phase, the in-memory edge cache is now
- intentionally bypassed. The reason for this is that any edges are
- looked up exactly once, so caching them is not beneficial, but would
- only lead to cache pollution.
- - the loading/startup phase can now load multiple collections in parallel,
- whereas previously it was only loading multiple shards of the same
- collection in parallel. This change helps to reduce load times in case
- there are many collections with few shards, and on single server.
- - the loading and result storage phases code has been overhauled so that
- it runs slightly faster.
- - for Pregel runs that are based on named graphs (in contrast to explicit
- naming of the to-be-used vertex and edge collections), only those edge
- collections are considered that, according to the graph definition, can
- have connections with the vertex. This change can reduce the loading
- time substantially in case the graph contains many edge definitions.
- - the number of executed rounds for the underlying Pregel algorithm now
- does not vary for different `parallelism` values.
-
-* Fix HTTP/1.1 status response header in fuerte responses
-
- This change makes fuerte return the full status header, including the
- numeric status code and the status string in the `http/1.1` header of
- fuerte responses.
-
- Previously, the return header lacked the numeric status code, so it
- looked like
- ```
- "http/1.1" : "Ok"
- ```
- Now, with the numeric status code, the response header will look like
- ```
- "http/1.1" : "200 Ok"
- ```
- This PR also adds a protocol() method for arango client connections in
- order to check the protocol in use. The possible return values are
- - "http" for HTTP/1.1 connections
- - "http2" for HTTP/2 connections
- - "vst" for VST connections
- - "unknown" for everything else
- This is needed during testing, but can also be used for other purposes.
-
-* Updated arangosync to 0.7.12.
-
-* Fix log topic of general shutdown message from "cluster" to general.
-
-* Automatically add "www-authenticate" headers to server HTTP 401 responses,
- as required by the HTTP specification.
+* Fixed bug with ArangoSearch views on SmartGraph edge collections which could
+ contain some documents twice.
+ This change removes `_to_*` local auxiliary link creation and existence within
+ a view linked with a SmartGraph edge collection.
* Fixed an AQL bug that ignored PRUNE statements in OneShard setups.
-* Make the DOCUMENT AQL function eligible for running on DB servers in
- OneShard deployment mode. This allows pushing more query parts to DB servers
- for execution.
-
-* Enable HTTP request statistics and provide metrics even in case
- `--server.statistics-history` is set to `false` (this option will set
- itself to off automatically on agency instances on startup if not
- explicitly set).
- This change provides more metrics on all server instances, without the
- need to persist them in the instance's RocksDB storage engine.
-
-* Fixed a deadlock between AQL write transactions and hotbackup, since
- in AQL write transactions follower transactions did not know they are
- follower transactions.
-
-* Added metrics for collection locks:
- - `arangodb_collection_lock_timeouts_exclusive`: Number of lock timeouts
- when trying to acquire collection exclusive locks
- - `arangodb_collection_lock_timeouts_write`: Number of lock timeouts when
- trying to acquire collection write locks
- - `arangodb_collection_lock_acquisition_micros`: Total amount of collection
- lock acquisition time [μs]
- - `arangodb_collection_lock_acquisition_time`: Total collection lock
- acquisition time histogram [s]
-
-* Reduce lock timeout on followers to 15 seconds.
- Rationale: we should not have any locking conflicts on followers, generally.
- Any shard locking should be performed on leaders first, which will then,
- eventually replicate changes to followers. replication to followers is only
- done once the locks have been acquired on the leader(s).
-
-* Fix a memory leak because cluster internal connections were not cleaned
- up for agency communication.
-
-* Added compile option USE_JEMALLOC_PROF to enable memory profiling.
+* Added arangobench options:
+ `--create-database` to create the test database on start
+ `--duration` to run test for a duration rather than a defined count
-* Remove extra CMake option `DEBUG_SYNC_REPLICATION` and use the already
- existing `USE_FAILURE_TESTS` options for its purpose.
+* Fixed a deadlock between AQL write transactions and hotbackup, since in AQL
+ write transactions follower transactions did not know they are follower
+ transactions.
-* Updated bundled version of Snappy compression/decompression library to 1.1.8.
+* Make the DOCUMENT AQL function eligible for running on DB servers in OneShard
+ deployment mode. This allows pushing more query parts to DB servers for
+ execution.
* Fix REST API endpoint PUT `/_api/collection//recalculateCount` on
coordinators. Coordinators sent a wrong message body to DB servers here, so
@@ -5770,176 +8229,68 @@ devel
* Fixed issue #12778: fails validation if additionalProperties: false.
-* Fixed BTS-233 issue: Fixed invalid IndexId comparator.
+* Added missing exceptions catch clause for some parts of supervision and
+ heartbeat threads.
-* Fixed potential deadlock in cluster transactions if a transaction is
- returned that was soft-aborted by transaction garbage collection before.
+* Fixed potential deadlock in cluster transactions if a transaction is returned
+ that was soft-aborted by transaction garbage collection before.
This deadlock should rarely ever occur in practice, as it can only be
triggered once during the server shutdown sequence.
-* Added support of `GEO_DISTANCE`, `GEO_CONTAINS`, `GEO_INTERSECTS`,
- `GEO_IN_RANGE` to ArangoSearch.
-
-* Added new `GeoJSON` ArangoSearch analyzer.
-
-* Added new `GeoPoint` ArangoSearch analyzer.
+* Fix a memory leak because server internal connections were not cleaned up for
+ agency communication.
-* Added new `GEO_IN_RANGE` AQL function.
+* Added compile option USE_JEMALLOC_PROF to enable memory profiling.
-* Fixed handling of failedLeaderJob. In case of a plan modification, that
- removes a server from the plan, e.g. reduce replication factor. Directly
- followed by a failure of the current shard leader, would reinsert
- the just removed server in the plan, which is undesired, we first need
- to have a full "desync" cycle on this server to be reusable in the plan
- again.
+* Fixed BTS-233 issue: Fixed invalid IndexId comparator.
* Fixed very spurious errors if the `holdReadLockCollection` replication API for
the getting-in-sync procedure of shards was called during server shutdown.
In this case that method could ask the transaction manager for a specific
transaction, but wasn't returning one due to the server shutdown.
-* Added new 'aql' type for ArangoSearch analyzers.
-
-* Obsoleted the startup options `--database.throw-collection-not-loaded-error`
- and `--ttl.only-loaded-collection`.
-
- These options were meaningful for the MMFiles storage engine only, but for
- the RocksDB storage engine they did not make any difference. Using these startup
- options is still possible, but will have no effect other than generating a
- warning at server startup.
-
-* Added CMake option `USE_MINIMAL_DEBUGINFO`.
- This option is turned off by default. If turned on, the created binaries
- will contain only a minimum amount of debug symbols, reducing the size
- of the executables. If turned off (which is the default), the binaries
- will contain full debug information, which will make them bigger in size
- unless the debug information is later stripped again.
-
-* Modified the returned error code for calling the `shards()` function on a
- collection in single-server from "internal error" (error number 4) to "shards
- API is only available in cluster" and error number 9, HTTP status code 501.
+* Agency cache clears change history. This keeps the change history, introduced
+ in v3.7.4, from growing in size too much.
* Bug-fix: Allow to unlink a view created on a SmartGraphEdge collection.
-* If a collection (or database) is dropped during the instantiation of an AQL query,
- the setup code now aborts with an ERROR_QUERY_COLLECTION_LOCK_FAILED and earlier.
+* If a collection (or database) is dropped during the instantiation of an AQL
+ query, the setup code now aborts with an ERROR_QUERY_COLLECTION_LOCK_FAILED
+ and earlier.
Before the setup code could abort with TRI_ERROR_INTERNAL in the same case.
-* Added WINDOW keyword to AQL to allow aggregations on related rows.
-
-* Added new graph method K_PATHS to AQL. This will enumerate all paths between a
- source and a target vertex that match the given length.
- For example, the query
- ```
- FOR path IN 2..4 OUTBOUND K_PATHS "v/source" TO "v/target" GRAPH "g"
- RETURN path
- ```
- will yield all paths in format
- {
- vertices: [v/source, ... , v/target],
- edges: [v/source -> v/1, ..., v/n -> v/target
- }
- that have length exactly 2 or 3 or 4, start at v/source and end at v/target.
- The order of those paths in the result set is not guaranteed.
-
-* Fixed issue BTS-195: AQL update queries using the `keepNull` option set to
- false had an inconsistent behavior. For example, given a collection `test`
- with an empty document with just key `testDoc`, the following query
- would return different results when running for the first time or the second
- time:
-
- UPDATE 'testDoc'
- WITH {test: {sub1: true, sub2: null}} IN test
- OPTIONS { keepNull: false, mergeObjects: true }
-
- For its first run, the query would return
-
- {
- "_key": "testDoc",
- "test": {
- "sub1": true,
- "sub2": null
- }
- }
-
- (with the `null` attribute value not being removed). For all subsequent runs,
- the same query would return
-
- {
- "_key": "testDoc",
- "test": {
- "sub1": true,
- }
- }
-
- (with the `null` value removed as requested).
-
- This inconsistency was due to how the `keepNull` attribute was handled if
- the attribute already existed in the to-be-updated document or not. The
- behavior is now consistent, so `null` values are now properly removed from
- sub-attributes even if in the to-be-updated document the target attribute
- did not yet exist. This makes such updates idempotent again.
-
- This a behavior change compared previous versions, but it will only have
- effect when `keepNull` is set to `false` (the default value is `true` however),
- and only when just-inserted object sub-attributes contained `null` values.
-
-* Optimization of empty append entries.
-
* Bug-fix: Creating an additional index on the edge collection of a disjoint
- SmartGraph could falsely result into an error:
+ SmartGraph could falsely result in an error:
`Could not find all smart collections ...`
This is now ruled out and indexes can be created as expected.
-* Added startup option `--foxx.force-update-on-startup` to toggle waiting
- for all Foxx services in all databases to be propagated to a coordinator
- before it completes the boot sequence.
- In case the option is set to `false` (i.e. no waiting), the coordinator
- will complete the boot sequence faster, and the Foxx services will be
- propagated lazily. Until the initialization procedure has completed for
- the local Foxx apps, any request to a Foxx app will be responded to with
- an HTTP 503 error and message
+* Fixed issue #12248: Web UI - Added missing HTML escaping in the setup script
+ section of a foxx app.
- waiting for initialization of Foxx services in this database
+* Add parameter so `db.collection.truncate({compact: false})` will stop
+ compaction from happening. Compaction may have performance impacts even if the
+ truncate was invoked on nearly empty collections.
- This can cause an unavailability window for Foxx services on coordinator
- startup for the initial requests to Foxx apps until the app propagation
- has completed.
+* Instead of failing to connect to INADDR_ANY refuse it as a parameter, with a
+ descriptive error message for novice users (issue #12871).
- When not using Foxx, this option should be set to `false` (default) to
- benefit from a faster coordinator startup.
- Deployments relying on Foxx apps being available as soon as a coordinator
- is integrated or responding should set this option to `true`.
- The option only has an effect for cluster setups.
- On single servers and in active failover mode, all Foxx apps will be
- available from the very beginning.
- Note: ArangoDB 3.6 and 3.7 also introduced this option, but with a default
- value of `true`. ArangoDB 3.8 changes the default to `false`.
+* Fixed collection count which could be off after a server crash.
-* Changed the server-side implementation of the following internal JavaScript
- APIs to no-ops:
- * `internal.reloadAqlFunctions()`: this is a no-op function now
- * `@arangodb/actions.buildRouting()`: this is a no-op function now
- * `@arangodb/actions.routingTree`: will return an empty object
- * `@arangodb/actions.routingList`: will return an empty object
- All the above APIs were intended to be used for internal means only. These
- APIs are deprecated now and will be removed in ArangoDB v3.9.
+v3.7.4 (2020-10-16)
+-------------------
-* Instead of failing to connect to INADDR_ANY refuse it as a parameter, with a
- descriptive error message for novice users (issue #12871).
+* Data definition reconciliation in cluster has been modified
+ extensively to greatly accelerate the creation of 1000s of
+ databases through following means:
+ - AgencyCache offers change sets API based on Raft index.
+ - ClusterInfo caches are only updated using change sets.
+ - Maintenance uses local as well as agency change sets to limit
+ the scope of every runtime to these change sets.
-* Remove any special handling for obsoleted collection attributes
- `indexBuckets`, `journalSize`, `doCompact` and `isVolatile`. These
- attributes were meaningful only with the MMFiles storage engine and have
- no meaning with the RocksDB storage engine. Thus any special handling
- for these attributes can be removed in the internal code.
- Client applications and tests that rely on the behavior that setting
- any of these attributes produces an error when using the RocksDB engine
- may need adjustment now.
-* Added a --continue option to arangorestore. arangorestore now keeps track of the progress
- and can continue the restore operation when some error occurred.
+v3.7.3 (2020-10-14)
+-------------------
* Added the following metrics for synchronous replication in the cluster:
@@ -5949,29 +8300,31 @@ devel
checksum was detected when syncing shards. In case this happens, a resync
will be triggered for the shard.
-* Don't respond with misleading error in smart vertex collections.
+* Fixed handling of failedLeaderJob. In case of a plan modification, that
+ removes a server from the plan, e.g. reduce replication factor. Directly
+ followed by a failure of the current shard leader, would reinsert the just
+ removed server in the plan, which is undesired, we first need to have a full
+ "desync" cycle on this server to be reusable in the plan again.
- When inserting a document with a non-conforming key pattern into
- a smart vertex collection, the response error code and message are
- 1466 (ERROR_CLUSTER_MUST_NOT_SPECIFY_KEY) and "must not specify _key
- for this collection".
- This is misleading, because it is actually allowed to specify a key
- value for documents in such collection. However, there are some
- restrictions for valid key values (e.g. the key must be a string and
- contain the smart graph attribute value at the front, followed by a
- colon.
- If any of these restrictions are not met, the server currently
- responds with "must not specify key for this collection", which is
- misleading. This change rectifies it so that the server responds with
- error 4003 (ERROR_KEY_MUST_BE_PREFIXED_WITH_SMART_GRAPH_ATTRIBUTE)
- and message "in smart vertex collections _key must be a string and
- prefixed with the value of the smart graph attribute". This should
- make it a lot easier to understand what the actual problem is.
+* Make sure the optimizer doesn't pick another index than the TTL index itself
+ while fulfilling the expiry of TTL.
-* Fixed handling of failoverCandidates. Sometimes, a server can still be a
- failoverCandidate even though it has been taken out of the Plan. With this
- fix, such a server is quickly taken out of failoverCandidates and it can never
- be re-added to the Plan before this has happened.
+* Added optional verbose logging for agency write operations. This logging is
+ configurable by using the new log topic "agencystore".
+
+ The following log levels can be used for for the "agencystore" log topic to
+ log writes to the agency:
+ - DEBUG: will log all writes on the leader
+ - TRACE: will log all writes on both leaders and followers
+ The default log level for the "agencystore" log topic is WARN, meaning no
+ agency writes will be logged.
+ Turning on this logging can be used for auditing and debugging, but it is not
+ recommended in the general case, as it can lead to large amounts of data being
+ logged, which can have a performance impact and will lead to higher disk space
+ usage.
+
+* Print image base address and CPU context (if available) in crash handler
+ messages.
* Added configuration option `--query.tracking-slow-queries` to decide whether
slow queries are tracked extra.
@@ -5979,6 +8332,7 @@ devel
* Added configuration option `--query.tracking-with-querystring` to decide
whether the query string is shown in the slow query log and the list of
currently running queries. The option is true by default.
+
When turned off, querystrings in the slow query log and the list of currently
running queries are just shown as "".
@@ -5988,47 +8342,24 @@ devel
When turned on, the names of data sources used by the query will be shown in
the slow query log and the list of currently running queries.
-* Fix an issue in arangoimport improperly handling filenames with less than 3
- characters. The specified input filename was checked for a potential ".gz"
- ending, but the check required the filename to have at least 3 characters.
- This is now fixed.
-
-* Added optional verbose logging for agency write operations. This logging
- is configurable by using the new log topic "agencystore".
-
- The following log levels can be used for the "agencystore" log topic
- to log writes to the agency:
- - DEBUG: will log all writes on the leader
- - TRACE: will log all writes on both leaders and followers
- The default log level for the "agencystore" log topic is WARN, meaning no
- agency writes will be logged.
- Turning on this logging can be used for auditing and debugging, but it is
- not recommended in the general case, as it can lead to large amounts of
- data being logged, which can have a performance impact and will lead to
- higher disk space usage.
+* Fixed handling of failoverCandidates. Sometimes, a server can still be a
+ failoverCandidate even though it has been taken out of the Plan. With this
+ fix, such a server is quickly taken out of failoverCandidates and it can never
+ be re-added to the Plan before this has happened.
* Fix #12693: SORT inside a subquery could sometimes swallow part of its input
when it crossed boundaries of internal row batches.
+* Fixed issue BTS-212: Web UI doesn't let to make partial view update and
+ partial view update should be audited (also reported as ES-700).
+ Fixed link definition comparison logic: equality wasn`t properly detected and
+ led to link recreation.
+
* Added configuration option `--rocksdb.sync-delay-threshold`.
This option can be used to track if any RocksDB WAL sync operation is
delayed by more than the configured value (in milliseconds). The intention
is to get aware of severely delayed WAL sync operations.
-* Fix for BTS-191: Made transaction API database-aware.
-
-* Minor clean up of and less verbosity in agent callbacks.
-
-* Speed up initial replication of collections/shards data by not wrapping
- each document in a separate `{"type":2300,"data":...}` envelope. In
- addition, the follower side of the replication will request data from
- leaders in VelocyPack format if the leader is running at least version
- 3.8.
- Stripping the envelopes and using VelocyPack for transfer allows for
- smaller data sizes when exchanging the documents and faster processing,
- and thus can lead to time savings in document packing and unpacking as
- well as reduce the number of required HTTP requests.
-
* Add database, shard name and error information to several shard-related log
messages.
@@ -6043,37 +8374,24 @@ devel
- `arangodb_http_request_statistics_user_requests`: Total number of HTTP
requests executed by clients
-* Added metric `arangodb_agency_callback_registered counter` for tracking the
- total number of agency callbacks that were registered.
-
* Fixed a bug in handling of followers which refuse to replicate operations.
In the case that the follower has simply been dropped in the meantime, we now
avoid an error reported by the shard leader.
-* Added weighted traversal. Use `mode: "weighted"` as option to enumerate
- paths by increasing weights. The cost of an edge can be read from an
- attribute which can be specified using `weightAttribute` option.
-
* Fix a performance regression when a LIMIT is combined with a COLLECT WITH
COUNT INTO. Reported in ES-692.
-* Fixed issue ES-696: SEARCH vs FILTER lookup performance.
- Consolidation functionality for ArangoSearch view links was able to hit non-
- mergeable enormous amount of segments due to improper scheduling logic.
-
-* Data definition reconciliation in cluster has been modified
- extensively to greatly accelerate the creation of 1000s of
- databases through following means:
- - AgencyCache offers change sets API based on Raft index.
- - ClusterInfo caches are only updated using change sets.
- - Maintenance uses local as well as agency change sets to limit
- the scope of every runtime to these change sets.
+* Fix REST handler GET /_admin/status when called with URL parameter value
+ `overview=true`. For generating the `hash` attribute in the response, the
+ current Plan was retrieved and analyzed. Due to a change in the internal Plan
+ format the REST handler code failed to pick up the number of servers, which
+ resulted in the REST handler returning HTTP 500 in cluster mode.
-* Make scheduler react and start new threads slightly faster in case a lot
- of new work arrives.
+* Use rclone built from v1.51.0 source with go1.15.2 instead of prebuilt
+ v1.51.0 release.
-* Make scheduler properly count down the number of working threads in case
- an exception happens in a worker thread.
+* Fixed a bug in AQL COLLECT with OPTIONS { "hash" } that led to a quadratic
+ runtime in the number of output rows.
* Added startup option `--database.old-system-collections` to toggle automatic
creation of system collections `_modules` and `_fishbowl`, along with their
@@ -6082,108 +8400,103 @@ devel
The `_modules` collection is only used to register custom JavaScript modules,
for which there exists no API, and `_fishbowl` is used to store the temporary
list of Foxx apps retrieved from the GitHub Foxx store.
- If the option value is `false` (which is the default from v3.8 onwards), the
- two collections will not be created for any new database. The `_fishbowl`
+ If the option value is `false` (which is the default from v3.8 onwards, but
+ for v3.7 the default value is `true` for downwards-compatibility), the two
+ collections will not be created for any new database. The `_fishbowl`
collection will still be created dynamically when needed. If the option value
- is `true`, the collections will be created regularly as before.
- The option will also be introduced to v3.7, where it will have a default
- value of `true`, meaning the collections will still be created there.
+ is `true` (the default value in v3.7), the collections will be created
+ regularly as before.
+ The default value for the option is going to change to `false` in v3.8,
+ meaning the collections will not be created anymore there by default.
Any functionality related to the `_modules` system collection is deprecated
and will be removed in ArangoDB v3.9.
- Two side effects of turning this option off (which is the default) are:
- * there will be no iteration over all databases at server startup just to check
- the contents of all `_modules` collections.
+ Two side effects of turning this option off are:
+ * there will no be iteration over all databases at server startup just to
+ check the contents of all `_modules` collections.
* less collections/shards will be around for deployments that create a large
number of databases.
Already existing `_modules` and `_fishbowl` system collections will not be
- modified by this change, even though they will likely be empty and unused.
+ modified by this PR, even though they will likely be empty and unused.
* Don't iterate over all databases at server startup in order to initialize the
routing information. This is not necessary, as the routing information is
global and not tied to a specific database.
- Any functionality related to the `_modules` system collection is deprecated
- and will be removed in ArangoDB v3.9.
-
-* Use rclone built from v1.51.0 source with go1.15.2 instead of prebuilt
- v1.53.0 release.
-
* Fixed a possible crash during instantiation of an AQL graph traversal.
Reported in #12597.
-* Added new ArangoSearch "pipeline" analyzer type
+* Added safeguards against using V8 internally in environments that have
+ JavaScript turned off via the `--javascript.enabled false` option.
-* Reduce the number of dropped followers when running larger (>= 128 MB)
- write transactions.
+* Make scheduler properly count down the number of working threads in case an
+ exception happens in a worker thread.
-* Fixed a bug in AQL COLLECT with OPTIONS { "hash" } that led to a quadratic
- runtime in the number of output rows.
+* Turn off upgrade checks in arangod in alpha/beta/preview Enterprise builds,
+ too.
+ Previously it was already turned off in arangod for Enterprise builds already,
+ but only for stable releases and not preview releases.
-* Make the reboot tracker catch failed coordinators, too. Previously the
- reboot tracker was invoked only when a DB server failed or was restarted,
- and when a coordinator was restarted. Now it will also act if a coordinator
- just fails (without restart).
+* Fixed and extended LDAP log messages.
-* Added scheduler thread creation/destruction metrics:
+* Added LDAP_OFF if referrals and restart are false.
- - `arangodb_scheduler_threads_started`: Number of scheduler threads started
- - `arangodb_scheduler_threads_stopped`: Number of scheduler threads stopped
+* If LDAP search fails, also retry (update to given number of retries).
-* Added replication metrics `arangodb_replication_initial_sync_bytes_received`
- for the number of bytes received during replication initial sync operations
- and `arangodb_replication_tailing_bytes_received` for the number of bytes
- received for replication tailing requests.
- Also added `arangodb_replication_failed_connects` to track the number of
- connection failures or non-OK response during replication.
+* Fixed infinite reload of the login window after logout of an LDAP user.
-* Added metrics `rocksdb_free_inodes` and `rocksdb_total_inodes` to track the
- number of free inodes and the total/maximum number of inodes for the file
- system the RocksDB database directory is located in. These metrics will
- always be 0 on Windows.
+* Make the reboot tracker catch failed coordinators, too. Previously the reboot
+ tracker was invoked only when a DB server failed or was restarted, and when a
+ coordinator was restarted. Now it will also act if a coordinator just fails
+ (without restart).
-* Fixed infinite reload of the login window after logout of an LDAP user.
+* Added scheduler thread creation/destruction metrics:
+
+ - `arangodb_scheduler_threads_started`: Number of scheduler threads started
+ - `arangodb_scheduler_threads_stopped`: Number of scheduler threads stopped
-* Added startup option `--query.max-runtime` to limit the maximum runtime of
- all AQL queries to a specified threshold value (in seconds). By default,
- the threshold is 0, meaning that the runtime of AQL queries is not limited.
- Setting it to any positive value will restrict the runtime of all AQL
- queries unless it is overwritten in the per-query "maxRuntime" query option.
+* Added startup option `--query.max-runtime` to limit the maximum runtime of all
+ AQL queries to a specified threshold value (in seconds). By default, the
+ threshold is 0, meaning that the runtime of AQL queries is not limited.
+ Setting it to any positive value will restrict the runtime of all AQL queries
+ unless it is overwritten in the per-query "maxRuntime" query option.
Please note that setting this option will affect *all* queries in all
databases, and also queries issues for administration and database-internal
purposes.
If a query exceeds the configured runtime, it will be killed on the next
- occasion when the query checks its own status. Killing is best effort,
- so it is not guaranteed that a query will no longer than exactly the
- configured amount of time.
-
-* Updated rclone to 1.53.0.
-
-* Fixed slightly wrong log level for authentication and also added login event
- to the standard log.
+ occasion when the query checks its own status. Killing is best effort, so it
+ is not guaranteed that a query will no longer than exactly the configured
+ amount of time.
-* Ensure that the argument to an AQL OPTIONS clause is always an object
- which does not contain any dynamic (run-time) values. Previously, this
- was only enforced for traversal options and options for data-modification
- queries. This change extends the check to all occurrences of OPTIONS.
+* Ensure that the argument to an AQL OPTIONS clause is always an object which
+ does not contain any dynamic (run-time) values. Previously, this was only
+ enforced for traversal options and options for data-modification queries. This
+ change extends the check to all occurrences of OPTIONS.
* Added `details` option to figures command of a collection:
`collection.figures(details)`
Setting `details` to `true` will return extended storage engine-specific
- details to the figures. The details are intended for debugging ArangoDB
- itself and their format is subject to change. There is not much use in using
- the details from a client application.
+ details to the figures. The details are intended for debugging ArangoDB itself
+ and their format is subject to change. There is not much use in using the
+ details from a client application.
By default, `details` is set to `false`, so no details are returned and the
behavior is identical to previous versions of ArangoDB.
-* Enforce a maximum result register usage limit in AQL queries. In an AQL
- query, every user-defined or internal (unnamed) variable will need a
- register to store results in.
+* Implement RebootTracker usage for AQL queries in case of coordinator restarts
+ or failures. This will clean up the rest of an AQL query on dbservers more
+ quickly and in particular release locks faster.
+
+* Serialize maintenance actions for each shard. This addresses lost document
+ problems found in chaos testing.
+
+* Enforce a maximum result register usage limit in AQL queries. In an AQL query,
+ every user-defined or internal (unnamed) variable will need a register to
+ store results in.
- AQL queries that use more result registers than allowed (currently 1000)
- will now abort deterministically during the planning stage with error 32
+ AQL queries that use more result registers than allowed (currently 1000) will
+ now abort deterministically during the planning stage with error 32
(`resource limit exceeded`) and the error message
"too many registers (1000) needed for AQL query".
@@ -6191,125 +8504,27 @@ devel
crashed the server when assertions were turned on, and the behavior was
undefined when assertions were turned off.
-* Implement RebootTracker usage for AQL queries in case of coordinator
- restarts or failures. This will clean up the rest of an AQL query
- on dbservers more quickly and in particular release locks faster.
-
-* Serialize maintenance actions for each shard. This addresses lost document
- problems found in chaos testing.
-
-* Fixed an issue with audit logging misreporting some document requests as
- internal instead of logging the proper request information
-
-* Add option `--rocksdb.max-write-buffer-size-to-maintain` with default of 0.
- This configures how much memory RocksDB is allowed to use for immutable
- flushed memtables/write-buffers. The default of 0 will usually be good
- for all purposes and restores the 3.6 memory usage for write-buffers.
-
-* Updated arangosync to 0.7.10.
-
-* Make followers in active failover run a compaction after they process a
- truncate operation and the truncate removed more than 4k documents. This
- can help to reclaim disk space on the follower earlier than without running
- the truncate.
-
-* Added REST API PUT `/_admin/compact` for compacting the entire database
- data. This endpoint can be used to reclaim disk space after substantial data
- deletions have taken place. The command is also exposed via the JavaScript
- API as `db._compact();`.
-
- This command can cause a full rewrite of all data in all databases, which
- may take very long for large databases. It should thus only be used with care
- and only when additional I/O load can be tolerated for a prolonged time.
-
- This command requires superuser access.
-
-* Added new metrics for the total and the free disk space for the mount
- used for the RocksDB database directory:
-
- * `arangodb_rocksdb_free_disk_space`: provides the free disk space for
- the mount, in bytes
- * `arangodb_rocksdb_total_disk_space`: provides the total disk space of
- the mount, in bytes
-
* Fixed some cases where subqueries in PRUNE did not result in a parse error,
but either in an incomprehensible error (in 3.7), or undefined behavior
during execution (pre 3.7).
-* Apply user-defined idle connection timeouts for HTTP/2 and VST connections.
- The timeout value for idle HTTP/2 and VST connections can now be configured
- via the configuration option `--http.keep-alive-timeout` in the same way
- as for HTTP/1 connections.
- HTTP/2 and VST connections that are sending data back to the client are now
- closed after 300 seconds or the configured idle timeout (the higher of both
- values is used here).
- Before this change, the timeouts for HTTP/2 and VST connections were hard-
- coded to 120 seconds, and even non-idle connections were closed after this
- timeout.
-
-* Added new metric `arangodb_network_forwarded_requests` to track the number
- of requests forwarded from one coordinator to another in a load-balancing
- context.
+* Fixed an issue with audit logging misreporting some document requests as
+ internal instead of logging the proper request information.
-* Added new metrics for tracking AQL queries and slow queries:
- * `arangodb_aql_query_time`: histogram with AQL query times distribution.
- * `arangodb_aql_slow_query_time`: histogram with AQL slow query times
- distribution.
- * `arangodb_aql_all_query`: total number of all AQL queries.
+* Add attributes `database` and `user` when tracking current and slow AQL
+ queries.
+ `database` contains the name of the database the query is/was running in,
+ `user` contains the name of the user that started the query.
+ These attributes will be returned in addition when calling the APIs for
+ current and slow query inspection:
+ * GET `/_api/query/current` and `require("arangodb/aql/queries").current()`
+ * GET `/_api/query/slow` and `require("arangodb/aql/queries").slow()`
-* Added new metrics for replication:
- * `arangodb_replication_dump_requests`: number of replication dump requests
- made.
- * `arangodb_replication_dump_bytes_received`: number of bytes received in
- replication dump requests.
- * `arangodb_replication_dump_documents`: number of documents received in
- replication dump requests.
- * `arangodb_replication_dump_request_time`: wait time for replication dump
- requests.
- * `arangodb_replication_dump_apply_time`: time required for applying data
- from replication dump responses.
- * `arangodb_replication_initial_sync_keys_requests`: number of replication
- initial sync keys requests made.
- * `arangodb_replication_initial_sync_docs_requests`: number of replication
- initial sync docs requests made.
- * `arangodb_replication_initial_sync_docs_requested`: number of documents
- requested via replication initial sync requests.
- * `arangodb_replication_initial_sync_docs_inserted`: number of documents
- inserted by replication initial sync.
- * `arangodb_replication_initial_sync_docs_removed`: number of documents
- inserted by replication initial sync.
- * `arangodb_replication_initial_chunks_requests_time`: wait time histogram
- for replication key chunks determination requests.
- * `arangodb_replication_initial_keys_requests_time`: wait time for replication
- keys requests.
- * `arangodb_replication_initial_docs_requests_time`: time needed to apply
- replication docs data.
- * `arangodb_replication_initial_insert_apply_time`: time needed to apply
- replication initial sync insertions.
- * `arangodb_replication_initial_remove_apply_time`: time needed to apply
- replication initial sync removals.
- * `arangodb_replication_initial_lookup_time`: time needed for replication
- initial sync key lookups.
- * `arangodb_replication_tailing_requests`: number of replication tailing
- requests.
- * `arangodb_replication_tailing_follow_tick_failures`: number of replication
- tailing failures due to missing tick on leader.
- * `arangodb_replication_tailing_markers`: number of replication tailing
- markers processed.
- * `arangodb_replication_tailing_documents`: number of replication tailing
- document inserts/replaces processed.
- * `arangodb_replication_tailing_removals`: number of replication tailing
- document removals processed.
- * `arangodb_replication_tailing_bytes_received`: number of bytes received
- for replication tailing requests.
- * `arangodb_replication_tailing_request_time`: wait time for replication
- tailing requests.
- * `arangodb_replication_tailing_apply_time`: time needed to apply replication
- tailing markers.
+ The "slow query" log message has also been augmented to contain the database
+ name and the user name.
-* Allow calling of REST APIs `/_api/engine/stats`, GET `/_api/collection`,
- GET `/_api/database/current` and GET `/_admin/metrics` on followers in active
- failover deployments. This can help debugging and inspecting the follower.
+ The `user` attribute is now also displayed in the web interface in the
+ "Running queries" and "Slow queries" views.
* Added metrics for V8 contexts usage:
* `arangodb_v8_context_alive`: number of V8 contexts currently alive.
@@ -6319,105 +8534,116 @@ devel
* `arangodb_v8_context_max`: maximum number of concurrent V8 contexts.
* `arangodb_v8_context_min`: minimum number of concurrent V8 contexts.
-* Fix for issue BTS-183: added pending operations purging before ArangoSearch
- index truncation
-
-* Don't allow creation of smart satellite graphs or collections (i.e. using
- `"isSmart":true` together with `"replicationFactor":"satellite"` when creating
- graphs or collections. This combination of parameters makes no sense, so that
- the server will now respond with "bad parameter" and an HTTP status code of
- HTTP 400 ("Bad request").
-
-* Fixed: More cases in AQL can now react to a query being killed, so reaction
- time to query abortion is now shortened. This was a regression in comparison
- to 3.6 series.
-
-* Support projections on sub-attributes (e.g. `a.b.c`).
-
- In previous versions of ArangoDB, projections were only supported on
- top-level attributes. For example, in the query
-
- FOR doc IN collection
- RETURN doc.a.b
-
- the projection that was used was just `a`. Now the projection will be `a.b`,
- which can help reduce the amount of data to be extracted from documents,
- when only some sub-attributes are accessed.
+* Updated arangosync to 0.7.11.
- In addition, indexes can now be used to extract the data of sub-attributes
- for projections. If for the above example query an index on `a.b` exists,
- it will be used now. Previously, no index could be used for this projection.
+* Make followers in active failover run a compaction after they process a
+ truncate operation and the truncate removed more than 4k documents. This can
+ help to reclaim disk space on the follower earlier than without running the
+ truncate.
- Projections now can also be fed by any attribute in a combined index. For
- example, in the query
+* The REST API PUT `/_api/collection//truncate` will now also run a
+ compaction if the truncation affected more than 4k documents. This may add
+ extra latency to the truncate operation, but can help to reclaim disk space
+ earlier.
- FOR doc IN collection
- RETURN doc.b
+* Added REST API PUT `/_admin/compact` for compacting the entire database data.
+ This endpoint can be used to reclaim disk space after substantial data
+ deletions have taken place. The command is also exposed via the JavaScript API
+ as `db._compact();`.
- the projection can be satisfied by a single-attribute index on attribute `b`,
- but now also by a combined index on attributes `a` and `b` (or `b` and `a`).
+ This command can cause a full rewrite of all data in all databases, which may
+ take very long for large databases. It should thus only be used with care and
+ only when additional I/O load can be tolerated for a prolonged time.
-* Remove some JavaScript files containing testsuites and test utilities from our
- official release packages.
+ This command requires superuser access and is only available for the RocksDB
+ storage engine.
-* Fixed internal issue #741: STARTS_WITH fails to accept 'array' as variable.
+* Don't allow creation of smart satellite graphs or collections (i.e. using
+ `"isSmart":true` together with `"replicationFactor":"satellite"` when creating
+ graphs or collections. This combination of parameters makes no sense, so that
+ the server will now respond with "bad parameter" and an HTTP status code of
+ HTTP 400 ("Bad request").
-* Fixed internal issue #738: PHRASE doesn't accept a reference to an array of
- arguments.
+* Add exit code for ICU database loading startup errors.
-* Fixed internal issue #747: fixed possible dangling open files in ArangoSearch
- index after remove operations.
+* Fixed issue #12507: SegFault when using an AQL for loop through edges.
-* Make the `IS_IPV4` AQL function behave identical on MacOS as on other
- platforms. It previously allowed leading zeros in octets on MacOS,
- whereas on other platforms they were disallowed.
- Now this is disallowed on MacOS as well.
+* Make the `IS_IPV4` AQL function behave identical on macOS as on other
+ platforms. It previously allowed leading zeros in octets on macOS, whereas on
+ other platforms they were disallowed.
+ Now this is disallowed on macOS as well.
* Added new metric "arangodb_aql_slow_query" for slow AQL queries, so this can
be monitored more easily.
+* Added new metric "arangodb_scheduler_queue_length" for the scheduler's
+ internal queue length.
+
* Added new metric "arangodb_scheduler_queue_full_failures" for tracking cases
of a full scheduler queue and dropping requests.
-* Added new metrics for the number of V8 contexts dynamically created and destroyed
- ("arangodb_v8_context_created" and "arangodb_v8_context_destroyed") and for the
- number of times a V8 context was entered and left ("arangodb_v8_context_entered"
- and "arangodb_v8_context_exited"). There is also a new metric for tracking the
- cases when a V8 context cannot be successfully acquired and an operation is not
- performed ("arangodb_v8_context_enter_failures").
+* Added new metrics for the number of V8 contexts dynamically created and
+ destroyed ("arangodb_v8_context_created" and "arangodb_v8_context_destroyed")
+ and for the number of times a V8 context was entered and left
+ ("arangodb_v8_context_entered" and "arangodb_v8_context_exited"). There is
+ also a new metric for tracking the cases when a V8 context cannot be
+ successfully acquired and an operation is not performed
+ ("arangodb_v8_context_enter_failures").
* Added extra info to "queue full" and "giving up waiting for unused v8 context"
log messages.
-* Request to the `/_admin/statistics` API now processed via the CLIENT_FAST lane.
+* Request to the `/_admin/statistics` API now processed via the CLIENT_FAST
+ lane.
Previously they were handled in the CLIENT_SLOW lane, meaning that monitoring
requests using that API didn't get through when the queue was rather full.
+* Introduce an internal high-water mark for the maximum row number that was
+ written to in an AqlItemBlock. Using this number several operations on the
+ whole block, such as cleaning up or copying can be made more efficient when
+ run on only partially filled blocks.
+
* Fixed issue BTS-169: cost estimation for LIMIT nodes showed wrong number of
estimated items.
-* Fixed issue #12507: SegFault when using an AQL for loop through edges.
-* Add attributes `database` and `user` when tracking current and slow AQL queries.
- `database` contains the name of the database the query is/was running in, `user`
- contains the name of the user that started the query.
- These attributes will be returned in addition when calling the APIs for current
- and slow query inspection:
- * GET `/_api/query/current` and `require("arangodb/aql/queries").current()`
- * GET `/_api/query/slow` and `require("arangodb/aql/queries").slow()`
+v3.7.2.2 (2020-10-07)
+---------------------
- The "slow query" log message has also been augmented to contain the database
- name and the user name.
+* Fixed issue ES-664: SEARCH vs FILTER lookup performance.
+ Consolidation functionality for ArangoSearch view links was able to hit non-
+ mergable enormous amount of segments due to improper scheduling logic.
- The `user` attribute is now also displayed in the web interface in the "Running
- queries" and "Slow queries" views.
+* Fix for issue BTS-183: added pending operations purging before ArangoSearch
+ index truncation.
-* Introduce an internal high-water mark for the maximum row number that was
- written to in an AqlItemBlock. Using this number several operations on the
- whole block, such as cleaning up or copying can be made more efficient when
- run on only partially filled blocks.
+* Fixed: More cases in AQL can now react to a query being killed, so reaction
+ time to query abortion is now shortened. This was a regression in comparison
+ to 3.6 series.
-* Updated arangosync to 0.7.10.
+* Fixed internal issue #741: STARTS_WITH fails to accept 'array' as variable.
+
+* Fixed internal issue #738: PHRASE doesn't accept a reference to an array of
+ arguments.
+
+* Fixed internal issue #747: fixed possible dangling open files in ArangoSearch
+ index after remove operations.
+
+
+v3.7.2.1 (2020-09-02)
+---------------------
+
+* Add option `--rocksdb.max-write-buffer-size-to-maintain` with default of 0.
+ This configures how much memory RocksDB is allowed to use for immutable
+ flushed memtables/write-buffers. The default of 0 will usually be good for all
+ purposes and restores the 3.6 memory usage for write-buffers.
+
+
+v3.7.2 (2020-08-21)
+-------------------
+
+* Fixed internal issue #744: LIMIT with only offset and constrained heap
+ optimization will use estimation value for ArangoSearch views.
* Make UPSERT statement with collection bind parameter behave identical to its
non-bind parameter counterpart.
@@ -6436,11 +8662,19 @@ devel
with a hard-coded collection name would succeed. This is now fixed so both
queries have the same behavior (no failure) in single server.
-* Fixed internal issue #744: LIMIT with only offset and constrained heap
- optimization will use estimation value for ArangoSearch views.
+* Updated arangosync to 0.7.10.
+
+* Fixed internal issue #742: Add tick storage in index meta payload for
+ ArangoSearch view links after collection truncate operation.
-* Fix internal issue #742: Add tick storage in index meta payload after
- truncate operation
+* Fixed issue #12304: insert in transaction causing
+ com.arangodb.ArangoDBException: Response: 500, Error: 4 - Builder value not
+ yet sealed.
+
+ This happened when too deeply-nested documents (more than 63 levels of
+ nesting) were inserted. While indefinite nesting is still not supported, the
+ error message has been corrected from the internal HTTP 500 error "Builder
+ value not yet sealed" to the correct HTTP 400 "Bad parameter".
* Fixed: During a move-shard job which moves the leader there is a
situation in which the old owner of a shard can reclaim ownership
@@ -6450,120 +8684,70 @@ devel
Supervision job would then leave the shard in a bad configuration
with a resigned leader permanently in charge.
-* Fixed issue #12304: insert in transaction causing com.arangodb.ArangoDBException:
- Response: 500, Error: 4 - Builder value not yet sealed.
-
- This happened when too deeply-nested documents (more than 63 levels of nesting)
- were inserted. While indefinite nesting is still not supported, the error message
- has been corrected from the internal HTTP 500 error "Builder value not yet sealed"
- to the correct HTTP 400 "Bad parameter".
-
-* Show optimizer rules with highest execution times in explain output.
-
-* Renamed "master" to "leader" and "slave" to "follower" in replication messages.
- This will change the contents of replication log messages as well the string
- contents of replication-related error messages.
-
- The messages of the error codes 1402, 1403 and 1404 were also changed accordingly,
- as well as the identifiers:
- - `TRI_ERROR_REPLICATION_MASTER_ERROR` renamed to `TRI_ERROR_REPLICATION_LEADER_ERROR`
- - `TRI_ERROR_REPLICATION_MASTER_INCOMPATIBLE` renamed to `TRI_ERROR_REPLICATION_LEADER_INCOMPATIBLE`
- - `TRI_ERROR_REPLICATION_MASTER_CHANGE` renamed to `TRI_ERROR_REPLICATION_LEADER_CHANGE`
-
- This change also renames the API endpoint `/_api/replication/make-slave` to
- `/_api/replication/make-follower`. The API is still available under the old
- name, but using it is deprecated.
-
-* Fixed that dropping a vanished follower works again. An exception response
- to the replication request is now handled properly.
-
-* Make optimizer rule "remove-filters-covered-by-index" remove FILTERs that were
- referring to aliases of the collection variable, e.g.
-
- FOR doc IN collection
- LET value = doc.indexedAttribute
- FILTER value == ...
-
- Previously, FILTERs that were using aliases were not removed by that optimizer
- rule.
- In addition, the optimizer rule "remove-unnecessary-calculations" will now run
- again in case it successfully removed variables. This can unlock further removal
- of unused variables in sequences such as
-
- FOR doc IN collection
- LET value = doc.indexedAttribute
- LET tmp1 = value > ...
- LET tmp2 = value < ...
+* Fixed a problem with potentially lost updates because a failover could happen
+ at a wrong time or a restarted leader could come back at an unlucky time.
- when the removal of `tmp1` and `tmp2` makes it possible to also remove the
- calculation of `value`.
+* Fixed BTS-167: Lingering Queries - Canceled from the UI.
+ This fixes queries not vanishing from the list of running queries in the web
+ UI in case the query was canceled using the "Cancel" button in web UI's query
+ editor.
* Fixed bad behavior in agency supervision in some corner cases involving
already resigned leaders in Current.
-* Fixed a problem with potentially lost updates because a failover could
- happen at a wrong time or a restarted leader could come back at an
- unlucky time.
-
-* Fixed issue BTS-168: Fixed undefined behavior that did trigger
- segfaults on cluster startups. It is only witnessed for
- MacOS based builds. The issue could be triggered by any network connection.
- This behavior is not part of any released version.
-
* Fixed issue ES-664: the optimizer rule `inline-subqueries` must not pull out
- subqueries that contains a COLLECT statement if the subquery is itself called
+ subqueries that contain a COLLECT statement if the subquery is itself called
from within a loop. Otherwise the COLLECT will be applied to the values in the
outer FOR loop, which can produce a different result.
-* Fixed a blockage on hotbackup when writes are happening concurrently, since
- followers could no longer replicate leader transactions.
-
-* Updated arangosync to 0.7.9.
-
-* Fixed hotbackup S3 credentials validation and error reporting for upload
- and download.
+* Fixed that dropping a vanished follower works again. An exception response
+ to the replication request is now handled properly.
* Make AQL user-defined functions (UDFs) work in a cluster in case the UDF runs
an AQL query inside its own function code (BTS-159).
-* Fix: writeConcern is now honored correctly (ES-655).
+* Fixed hotbackup S3 credentials validation and error reporting for upload and
+ download.
+
+* Fixed a blockage on hotbackup when writes are happening concurrently, since
+ followers could no longer replicate leader transactions.
* Fix: The 'sorted' COLLECT variant would return undefined instead of null when
grouping by a null value.
-* Hard-code returned "planVersion" attribute of collections to a value of 1.
- Before 3.7, the most recent Plan version from the agency was returned inside
- "planVersion".
- In 3.7, the attribute contained the Plan version that was in use when the
- in-memory LogicalCollection object was last constructed. The object was
- always reconstructed in case the underlying Plan data for the collection
- changed or when a collection contained links to arangosearch views.
- This made the attribute relatively useless for any real-world use cases, and
- so we are now hard-coding it to simplify the internal code. Using the attribute
- in client applications is also deprecated.
+* Fix: writeConcern is now honored correctly (ES-655).
-* Slightly improve the performance of cluster DDL maintenance operations.
+* Fixed internal issue #739: ArangoSearch filter volatility takes into account
+ calculation nodes dependencies.
-* Don't prevent concurrent synchronization of different shards from the same
- database. Previously only one shard was synchronized at a time per database.
+* Fixed OASIS-278 issue: Added proper sort/calc nodes cleanup for late
+ materialzation after OneShard optimization.
+
+* Slightly improve the performance of cluster DDL maintenance operations.
-* Fixed OASIS-278 issue: Added proper sort/calc nodes cleanup for late materialization
- after OneShard optimization
+* Added AQL functions `IS_IPV4`, `IPV4_TO_NUMBER`, `IPV4_FROM_NUMBER` for IPv4
+ address checks and conversions.
-* Improve performance of many non-subquery AQL queries, by optimizing away
- some storage overhead for subquery context data.
+* Added AQL function `PRODUCT` to calculate the product of an array.
* Improve performance of internal cluster Plan and Current reload operations.
* Fixed issue #12349: arangosh compact Arangoerror 404.
-* Wait until restore task queue is idle before shutting down.
-
-* Fix a race problem in the unit tests w.r.t. PlanSyncer.
+* Improve performance of many non-subquery AQL queries, by optimizing away some
+ storage overhead for subquery context data.
* Always fetch data for /_api/cluster/agency-dump from leader of the agency.
Add option "redirectToLeader=true" to internal /_api/agency/state API.
+* Fixed regression with view use-after-create in cluster (BTS-137).
+
+* Slightly improved the performance of some k-shortest-path queries.
+
+
+v3.7.1 (2020-08-07)
+-------------------
+
* Fixed issue #12297: ArangoDB 3.6.5 Swagger Error?
This issue caused the Swagger UI for displaying the APIs of user-defined Foxx
services to remain invisible in the web UI, because of a JavaScript exception.
@@ -6575,16 +8759,23 @@ devel
UI. The indexes were created successfully despite the error message popping
up. This fix removes the misleading unconditional error message.
-* Slightly improved the performance of some k-shortest-path queries.
-* Added startup option `--rocksdb.encryption-key-rotation` to activate/deactivate
- the encryption key rotation REST API. The API is disabled by default.
+v3.7.1-rc.1 (2020-07-24)
+------------------------
+
+* Added startup option `--rocksdb.encryption-key-rotation` to
+ activate/deactivate the encryption key rotation REST API. The API is disabled
+ by default.
+
+* Add internal caching for LogicalCollection objects inside
+ ClusterInfo::loadPlan.
-* Add internal caching for LogicalCollection objects inside ClusterInfo::loadPlan.
This allows avoiding the recreation of LogicalCollection objects that did not
change from one loadPlan run to the next. It reduces CPU usage considerably on
both Coordinators and DB-servers.
+* Fixed reading analyzers revisions for freshly updated cluster.
+
* Fixed undefined behavior in AQL COLLECT with multiple group variables (issue
#12267).
If you are grouping on "large" values that occur multiple times in different
@@ -6601,35 +8792,35 @@ devel
* Revive faster out-of-range comparator for secondary index scans that do a full
collection index scan for index types "hash", "skiplist", "persistent".
-* Fixed internal issue #733: Primary sort compression in views now used properly.
-
-* Errors with error code 1200 (Arango conflict) will now get the HTTP response
- code 409 (Conflict) instead of 412 (Precondition failed), unless "if-match" header
- was used in `_api/document` or `_api/gharial`.
-
-* Fix spurious lock timeout errors when restoring collections.
+* Conflict error codes (1200) will now use the proper error message instead of a
+ generic and misleading "precondition failed".
* Improve performance of agency cache by not copying the hierarchical Node
tree result for serialization, but serializing it directly.
-* Make sure cluster statistics in web UI work in case a coordinator is down.
+* Turn off maintenance threads on Coordinators, as they are not needed there.
+
+* Fixed internal issue #733: Primary sort compression in ArangoSearch views now
+ used properly.
* Change HTTP response code for error 1450 ("too many shards") from HTTP 500 to
HTTP 400, as this is clearly a client error.
-* Turn off maintenance threads on Coordinators, as they are not needed there.
+* Fix spurious lock timeout errors when restoring collections.
-* Fixed crash in cleanup of parallel traversal queries.
+* Make sure cluster statistics in web UI work in case a coordinator is down.
* Updated arangosync to 0.7.8.
+* Fixed a race between a new request and the keepAlive timeout.
+
* Fixed hotbackup upload and download with encryption at rest key indirection.
-* Fixed a race between a new request and the keepAlive timeout.
+* Fixed crash in cleanup of parallel traversal queries.
* Added cluster metrics `arangodb_load_plan_accum_runtime_msec` and
- `arangodb_load_current_accum_runtime_msec` to track the total time spent
- in `loadPlan()` and `loadCurrent()` operations.
+ `arangodb_load_current_accum_runtime_msec` to track the total time spent in
+ `loadPlan()` and `loadCurrent()` operations.
* Fixed wrong reporting of failures in all maintenance failure counter metrics
(`arangodb_maintenance_action_failure_counter`). Previously, each successful
@@ -6639,7 +8830,8 @@ devel
* Adjusted the scale of the `arangodb_maintenance_action_queue_time_msec` to
cover a more useful range.
-* The filter executor will now overfetch data again if followed by a limit, same as in 3.6 series.
+* The filter executor will now overfetch data again if followed by a limit, same
+ as in 3.6 series.
The following queries are effected:
```
something
@@ -6648,42 +8840,53 @@ devel
```
something will now be asked for a full batch, instead of only 10 documents.
-* In rare cases SmartBFS could use a wrong index for looking up edges. This is fixed now.
+* In rare cases SmartBFS could use a wrong index for looking up edges. This is
+ fixed now.
-* The internally used JS-based ClusterComm send request function can now again use JSON, and does
- not require VelocyPack anymore. This fixes an issue with Foxx-App management (install, updated, remove)
- got delayed in a sharded environment, all servers do get all apps eventually, now the fast-path
- will work again.
+* The internally used JS-based ClusterComm send request function can now again
+ use JSON, and does not require VelocyPack anymore. This fixes an issue with
+ Foxx-App management (install, updated, remove) got delayed in a sharded
+ environment, all servers do get all apps eventually, now the fast-path will
+ work again.
-* Fixed a rare race in Agents, if the leader is rebooted quickly there is a chance
- that it is still assumed to be the leader, but delivers a state shortly in the
- past.
+* Fixed a rare race in Agents, if the leader is rebooted quickly there is a
+ chance that it is still assumed to be the leader, but delivers a state shortly
+ in the past.
-* Fixed a race in the ConnectionPool which could lease out a connection
- that got its idle timeout after the lease was completed. This could lead
- to sporadic network failures in TLS and to inefficiencies with TCP.
+* Keep the list of last-acknowledged entires in Agency more consistent.
+ During leadership take-over it was possible to get into a situation that the
+ new leader does not successfully report the agency config, which was
+ eventually fixed by the Agent itself. Now this situation is impossible.
-* Fixed restoring a SmartGraph into a database that already contains that same graph.
- The use case is restoring a SmartGraph from backup, apply some modifications, which are
- undesired, and then resetting it to the restored state, without dropping the database.
- One way to achieve this is to use arangorestore with the `overwrite` option on the same dataset,
- effectively resetting the SmartGraph to the original state.
- Without this fix, the workaround for is to either drop the graph (or the database) before the
- restore call, yielding an identical result.
+* Fixed a race in the ConnectionPool which could lease out a connection that got
+ its idle timeout after the lease was completed. This could lead to sporadic
+ network failures in TLS and to inefficiencies with TCP.
-* Keep the list of last-acknowledged entries in Agency more consistent.
- During leadership take-over it was possible to get into a situation that
- the new leader does not successfully report the agency config, which
- was eventually fixed by the Agent itself. Now this situation is impossible.
+* Fixed restoring a SmartGraph into a database that already contains that same
+ graph.
+ The use case is restoring a SmartGraph from backup, apply some modifications,
+ which are undesired, and then resetting it to the restored state, without
+ dropping the database. One way to achieve this is to use arangorestore with
+ the `overwrite` option on the same dataset, effectively resetting the
+ SmartGraph to the original state. Without this fix, the workaround for is to
+ either drop the graph (or the database) before the restore call, yielding an
+ identical result.
-* Allow changing collection properties for smart edge collections as well.
+* Fixed potential garbled output in syslog log output for the program name.
-* Fixed that the hotbackup agency lock is released under all circumstances
- using scope guards. This addresses a rare case in which the lock was left
- behind.
+* Fixed infinite recursion when printing error messages about invalid logger
+ configuration on startup.
+
+* Fixed sporadic use-after-free ASan issue in logger shutdown.
-* Privatized load plan / current in cluster info and cleanup following
- agency cache implementation.
+* Added missing state rollback for failed attempt-based write locking of spin
+ locks.
+
+* Disable internal network protocol switch for cluster-internal communication,
+ and hard-code the internal communication protocol to HTTP.
+
+* Added vertex collection validation in case of a SmartGraph edge definition
+ update.
* Fix cluster-internal request forwarding for VST requests that do not have any
Content-Type header set. Such requests could have been caused by the Java
@@ -6691,49 +8894,18 @@ devel
* Fixed issue OASIS-252: Hotbackup agency locks without clientId.
-* The `_from` and `_to` attributes of an edge document can now be edited from
- within the UI.
-
-* Added vertex collection validation in case of a SmartGraph edge definition
- update.
+* Fixed internal-issue #726: added restore handling for custom analyzers.
-* Updated arangosync to 0.7.7.
+* Fixed a potential agency crash if trace logging is on.
-* Added support `db._engineStats()` API in coordinator. Previously calling this
- API always produced an empty result. Now it will return the engine statistics
- as an object, with an entry for each individual DB-Server.
+* Network layer now reports connection setup issues in more cases this replaces
+ some INTERNAL_ERROR reports by more precise errors, those are only reached
+ during failover scenarios.
* Fixed a document parsing bug in the Web UI. This issue occurred in the
document list view in case a document had an attribute called `length`.
The result was an incorrect representation of the document preview.
-* Improve readability of running and slow queries in web UI by properly left-
- aligning the query strings.
-
-* The Web UI is not disabling the query import button after file upload takes
- place.
-
-* The Web UI is now reporting errors properly in case of editing ArangoSearch
- Views with invalid properties.
-
-* In case of a graph deletion failure, the Web UI displays now the correct
- error message.
-
-* In case a document got requested via the UI of a collection which does not
- exist, the UI now properly displays an error view instead of having a bad
- display state.
-
-* Removed the edge id's hover styling in case of embedded document editor in
- the Web UI as this functionality is disabled. This was misleading because
- the elements are not clickable.
-
-* The Web UI now displays an error message inside the node information view in
- case the user has no access to retrieve the necessary information.
-
-* Web UI: Removed unnecessary menubar entry in case of database node inspection.
-
-* Fixed a potential agency crash if trace logging is on.
-
* Re-enable access to GET `/_admin/cluster/numberOfServers` for all users by
default. Requests to PUT `/_admin/cluster/numberOfServers` require admin
user privileges. This restores the pre-3.7 behavior.
@@ -6742,40 +8914,45 @@ devel
users, too. This can be used to lock down this API for non-admin users
entirely.
-* Network layer now reports connection setup issues in more cases
- this replaces some INTERNAL_ERROR reports by more precise errors,
- those are only reached during failover scenarios.
+* Fixed crash in HTTP2 implementation.
* Improve readability of running and slow queries in web UI by properly left-
aligning the query strings.
+* ClusterInfo will wait for syncer threads to shutdown.
+
+* Correct some log entries.
+
* Allow changing collection properties for smart edge collections as well.
Previously, collection property changes for smart edge collections were not
propagated.
+* Fixed BTS-110: Fulltext index with minLength <= 0 not allowed.
+
+* Web UI: Removed unnecessary menubar entry in case of database node inspection.
+
* Adjust arangodump integration test to desired behavior, and make sure
arangodump behaves as specified when invoking it with non-existing
collections.
-* Fixed BTS-110: Fulltext index with minLength <= 0 not allowed.
-
* Disallow using V8 dependent functions in SEARCH statement.
* Remove superfluous `%>` output in the UI modal dialog in case the JSON editor
was embedded.
-* Fixed a misleading error message in AQL.
+* The Web UI now displays an error message inside the node information view in
+ case the user has no access to retrieve the neccessary information.
+
+* The `_from` and `_to` attributes of an edge document can now be edited from
+ within the UI.
-* Fix undistribute-remove-after-enum-coll which would allow calculations
- to be pushed to a DBServer which are not allowed to run there.
+* Fixed a misleading error message in AQL.
* Fixed issue ES-609: "Transaction already in use" error when running
transaction.
Added option `--transaction.streaming-lock-timeout` to control the timeout in
seconds in case of parallel access to a streaming transaction.
-* Returned `AQL_WARNING()` to emit warnings from UDFs.
-
* Fixed internal issue BTS-107, offset over the main query passed through a
subquery which has modification access to shards could yield incorrect
results, if shards are large enough and skipping was large enough, both to
@@ -6797,59 +8974,84 @@ devel
queries, but reports their results in addition. This has the negative side
effect that merging the subqueries back together was off.
-* Correct some log entries.
-
-* Allow removal of existing schemas by saving a schema of either `null` or `{}`
- (empty object). Using an empty string as schema will produce an error in the
- web interface and will not remove the schema.
+* Fix undistribute-remove-after-enum-coll which would allow calculations to be
+ pushed to a DBServer which are not allowed to run there.
- The change also adjusts the behavior for the SCHEMA_VALIDATE AQL function in
- case the first parameter was no document/object. In this case, the function
- will now return null and register a warning in the query, so the user can
- handle it.
+* Removed the edge id's hover styling in case of embedded document editor in the
+ Web UI as this functionality is disabled. This was misleading because the
+ elements are not clickable.
-* Internal issue BTS-71: Added a precondition to prevent creating a collection
- with an invalid `distributeShardsLike` property.
+* Internal issue BTS-71: Fixed error handling regarding communication with the
+ agency. This could in a specific case cause collection creation in a cluster
+ report success when it failed.
* Internal issue BTS-71: In a cluster, for collections in creation, suspend
supervision jobs concerning replication factor until creation is completed.
Previously, it could cause collection creation to fail (e.g. when a server
failed during creation), even when it didn't have to.
-* Internal issue BTS-71: Fixed error handling regarding communication with the
- agency. This could in a specific case cause collection creation in a cluster
- report success when it failed.
+* Internal issue BTS-71: Added a precondition to prevent creating a collection
+ with an invalid `distributeShardsLike` property.
+
+* In case a document got requested via the UI of a collection which does not
+ exist, the UI now properly displays an error view instead of having a bad
+ display state.
+
+* Returned `AQL_WARNING()` to emit warnings from UDFs.
* Fixed internal issue #725: Added analyzers revision for _system database in
queries.
-* Allow restoring collections from v3.3.0 with their all-numeric collection
- GUID values, by creating a new, unambiguous collection GUID for them.
- v3.3.0 had a bug because it created all-numeric GUID values, which can be
- confused with numeric collection ids in lookups. v3.3.1 already changed the
- GUID routine to produce something non-numeric already, but collections
- created with v3.3.0 can still have an ambiguous GUID. This fix adjusts
- the restore routine to drop such GUID values, so it only changes something
- if v3.3.0 collections are dumped, dropped on the server and then restored
- with the flawed GUIDs.
+* The Web UI is not disabling the query import button after file upload takes
+ place.
-* Fixed bug in IResearchViewExecutor that lead to only up to 1000 rows being
- produced.
+* In case of a graph deletion failure, the Web UI displays now the correct
+ error message.
-* Changing the current users profile icon in the Web UI now renders the new
- icon directly without the need of a full UI browser reload.
+* Privatized load plan / current in cluster info and cleanup following agency
+ cache implementation.
-* The Web UI's replication view is now checking the replication state
- automatically without the need of a manual reload.
+* Allow removal of existing schemas by saving a schema of either `null` or `{}`
+ (empty object). Using an empty string as schema will produce an error in the
+ web interface and will not remove the schema.
+
+ The change also adjusts the behavior for the SCHEMA_VALIDATE AQL function in
+ case the first parameter was no document/object. In this case, the function
+ will now return nulland register a warning in the query, so the user can
+ handle it.
+
+* The Web UI is now reporting errors properly in case of editing ArangoSearch
+ Views with invalid properties.
+
+* Fixed bug in IResearchViewExecutor that lead to only up to 1000 rows being
+ produced.
* Fixed an error scenario where a call could miss count skip.
It was triggered in the case of Gather in Cluster, if we skipped over a full
shard, and the shard did actually skip, but there are more documents to skip
on another shard.
+* Fixed that the hotbackup agency lock is released under all circumstances
+ using scope guards. This addresses a rare case in which the lock was left
+ behind.
+
+* Allow restoring collections from v3.3.0 with their all-numeric collection GUID
+ values, by creating a new, unambiguous collection GUID for them.
+ v3.3.0 had a bug because it created all-numeric GUID values, which can be
+ confused with numeric collection ids in lookups. v3.3.1 already changed the
+ GUID routine to produce something non-numeric already, but collections created
+ with v3.3.0 can still have an ambiguous GUID. This fix adjusts the restore
+ routine to drop such GUID values, so it only changes something if v3.3.0
+ collections are dumped, dropped on the server and then restored with the
+ flawed GUIDs.
+
* Fixed hotbackup agency lock cleanup procedure.
-* Only advance shard version after follower is reported in-sync in agency.
+* The Web UI's replication view is now checking the replication state
+ automatically without the need of a manual reload.
+
+* Changing the current users profile icon in the Web UI now renders the new icon
+ and data directly without the need of a full UI browser reload.
* Fixed cluster behavior with HotBackup and non-existing backups on DB-Servers.
@@ -6860,20 +9062,12 @@ devel
the graph lookup code due to a wrong error code being used from Fuerte.
We now generate a more appropriate 503 - Service Unavailable error.
-* added option `--log.use-json-format` to switch log output to JSON format.
- Each log message then produces a separate line with JSON-encoded log data,
- which can be consumed by applications.
+* Fixed bad behavior that led to unnecessary additional revision tree rebuilding
+ on server restart.
-* added option `--log.process` to toggle the logging of the process id
- (pid) in log messages. Logging the process id is useless when running
- arangod in Docker containers, as the pid will always be 1. So one may
- as well turn it off in these contexts.
+* Only advance shard version after follower is reported in sync in agency.
-* added option `--log.in-memory` to toggle storing log messages in memory,
- from which they can be consumed via the `/_admin/log` and by the web UI. By
- default, this option is turned on, so log messages are consumable via API
- and the web UI. Turning this option off will disable that functionality and
- save a tiny bit of memory for the in-memory log buffers.
+* Disabled new, potentially unsafe revision-based storage format.
* Allow for faster cluster shutdown. This should reduce the number of shutdown
hangers in case the agents are stopped already and then coordinators or
@@ -6884,14 +9078,14 @@ devel
* Fixed non-deterministic test failure in Pregel WCC test.
+* Add a recovery test to check that there are no warnings at server start after
+ a graceful shutdown.
+
* Fixed unintentional connection re-use for cluster-internal communications.
* Fixed problem with newer replication protocol and ArangoSearch which could
lead to server crashes during normal operation.
-* Fixed bad behavior that led to unnecessary additional revision tree rebuilding
- on server restart.
-
* Allow AQL queries on DB-Servers again. This is not an official supported
feature, but is sometimes used for debugging. Previous changes made it
impossible to run a query on a local shard.
@@ -6899,33 +9093,31 @@ devel
* Fix restoring old arangodumps from ArangoDB 3.3 and before, which had index
information stored in slightly different places in the dump.
+* Fix internal test helper function `removeCost` to really remove costs.
+
* Fix invalid calls to `AgencyCommResult::slice()` method, which must only be
made in case of an agency result was retrieved successfully. In case the call
to the agency was not successful, `slice()` must not be called on it. This
change makes sure this invariant holds true.
-* Fix internal test helper function `removeCost` to really remove costs.
-
* Fix potential AQL query shutdown hanger.
-* Use smarter default value preset in web UI for replication factor in case
- there are constraints established for the replication factor by the
- startup options `--cluster.min-replication-factor` and
- `--cluster.max-replication-factor`.
-
* Modified the exception handling in the RestHandler. Asynchronous communication
could lead to less detailed failure information.
+* Use smarter default value preset in web UI for replication factor in case
+ there are constraints established for the replication factor by the startup
+ options `--cluster.min-replication-factor` and
+ `--cluster.max-replication-factor`.
+
* Added permissions check before trying to read data from `_analyzers`
collection.
If these permissions are not there, no load is performed (user can not use
analyzers from database anyway).
-* Updated arangosync to 0.7.6.
-
-* Make the reboot tracker catch a failed server and permanently removed
- servers. This allows other servers in the cluster to move on more quickly,
- when a server fails and does not immediately come back.
+* Make the reboot tracker catch a failed server and permanently removed servers.
+ This allows other servers in the cluster to move on more quickly, when a
+ server fails and does not immediately come back.
* Added WCC pregel algorithm for weakly connected components.
@@ -6959,9 +9151,15 @@ devel
distribution on the DB-Servers.
This change transparently handles missing "shardingStrategy" entries.
-* Taken collection dropping from fast track in maintenance. This avoids
- blocking fast track maintenance threads when a shard cannot immediately
- be dropped because of some pending lock.
+
+v3.7.1-beta.1 (2020-06-07)
+--------------------------
+
+* Fixed issue #8941: `frontendConfig.basePath` is duplicated for users API call
+
+* Taken collection dropping from fast track in maintenance. This avoids blocking
+ fast track maintenance threads when a shard cannot immediately be dropped
+ because of some pending lock.
* Updated ArangoDB Starter to 0.14.15.
@@ -6971,8 +9169,8 @@ devel
* Fixed `"Plan" is not an object in agency` error messages when restarting
DB-Servers that contained ArangoSearch Views with links.
-* Fixed misordering of skipped number of rows report. Only triggered if you do
- a modification on a subquery nesting level 2 or more, e.g.:
+* Fixed misordering of skipped number of rows report. Only triggered if you do a
+ modification on a subquery nesting level 2 or more, e.g.:
```
LET s1 = (
LET s2 = (
@@ -6984,16 +9182,13 @@ devel
```
Here the c would be off, or a count on main query would be off.
-* Fix crash in execution of non-spliced subqueries if remainder of subquery is
+* Fixed crash in execution of non-spliced subqueries if remainder of subquery is
skipped.
* Added missing mutex to ConnectionPool::cancelConnections().
* Foxx API now respects "idiomatic" flag being explicitly set to false.
-* Fixed crash in execution of non-spliced subqueries if remainder of subquery is
- skipped.
-
* Made modification subqueries non-passthrough. The passthrough logic only works
if exactly as many output rows are produced as input rows are injected.
If the subquery with modification is skipped however this API is violated, we
@@ -7012,23 +9207,23 @@ devel
* arangodump and arangorestore will now fail when using the `--collection`
option and none of the specified collections actually exist in the database
- (on dump) or in the dump to restore (on restore). In case some of the specified
- collections exist, arangodump/restore will issue warnings about the invalid
- collections, but will continue to work for the valid collections.
+ (on dump) or in the dump to restore (on restore). In case some of the
+ specified collections exist, arangodump/restore will issue warnings about the
+ invalid collections, but will continue to work for the valid collections.
* Improved network send request for more robustness.
* Added multiple RocksDB configuration options to arangod:
- * `--rocksdb.cache-index-and-filter-blocks` to make the RocksDB block cache quota
- also include RocksDB memtable sizes
- * `--rocksdb.cache-index-and-filter-blocks-with-high-priority` to use cache index
- and filter blocks with high priority making index and filter blocks be less
- likely to be evicted than data blocks
- * `--rocksdb.pin-l0-filter-and-index-blocks-in-cache` make filter and index blocks
- be pinned and only evicted from cache when the table reader is freed
- * `--rocksdb.pin-top-level-index-and-filter` make the top-level index of partitioned
- filter and index blocks pinned and only be evicted from cache when the table reader
- is freed
+ * `--rocksdb.cache-index-and-filter-blocks` to make the RocksDB block cache
+ quota also include RocksDB memtable sizes.
+ * `--rocksdb.cache-index-and-filter-blocks-with-high-priority` to use cache
+ index and filter blocks with high priority making index and filter blocks be
+ less likely to be evicted than data blocks.
+ * `--rocksdb.pin-l0-filter-and-index-blocks-in-cache` make filter and index
+ blocks be pinned and only evicted from cache when the table reader is freed.
+ * `--rocksdb.pin-top-level-index-and-filter` make the top-level index of
+ partitioned filter and index blocks pinned and only be evicted from cache
+ when the table reader is freed.
* Don't move potentially expensive AQL function calls into loops in the
`remove-unnecessary-calculations-rule`.
@@ -7047,14 +9242,15 @@ devel
inner loop, which could be a pessimization.
Now the optimizer will not move the calculation of values into the loop when
- it merges calculations in the `remove-unnecessary-calculations` optimizer rule.
+ it merges calculations in the `remove-unnecessary-calculations` optimizer
+ rule.
-* Fixed modification executors inside of a subquery, where the subquery decided to
- fetch all rows from upstream first and the amount of rows is higher then the
- batch size.
+* Fixed modification executors inside of a subquery, where the subquery decided
+ to fetch all rows from upstream first and the amount of rows is higher then
+ the batch size.
-* Fixed reporting of skipped number of documents if we have a LIMIT x, 0
- right after the modification.
+* Fixed reporting of skipped number of documents if we have a LIMIT x, 0 right
+ after the modification.
* Added exceptions catching in agency callbacks.
@@ -7064,9 +9260,9 @@ devel
* Fixed bad behavior when dropping followers. A follower should be dropped
immediately when it is officially FAILED, not only after a longish timeout.
-* Fixed a bug in CollectionNameResolver which could lead to an extended
- busy spin on a core when a collection was dropped, but documents of it
- still remained in the WAL.
+* Fixed a bug in CollectionNameResolver which could lead to an extended busy
+ spin on a core when a collection was dropped, but documents of it still
+ remained in the WAL.
* Fixed return value of fuerte::H1Connection in case of timeout.
@@ -7077,37 +9273,37 @@ devel
no warnings, so missing a collection by misspelling its name could easily
go unnoticed.
- when a restore is restricted to one or multiple collections using the
- `--collection` option of arangorestore, warnings are issued for all specified
- collections that are not present in the dump. Previously there were
- no warnings, so missing a collection by misspelling its name could easily
- go unnoticed.
+ `--collection` option of arangorestore, warnings are issued for all
+ specified collections that are not present in the dump. Previously there
+ were no warnings, so missing a collection by misspelling its name could
+ easily go unnoticed.
- when a dump was taken using the `--overwrite` option, there was no check
that validated whether the encryption mode used in the existing dump
- directory was the same as the requested encryption mode. This could have
- led to dump directories with both encrypted and unencrypted files. This
- was only the case when using `--overwrite true`, which is not the default.
- - when a restore was performed using the `--encryption.keyfile` option,
- there was no check whether the to-be-restored files were actually encrypted.
- Now this check is enforced and arangorestore will bail out with an error
- if the requested encryption mode for restore is not the same as for the
- stored dump files.
-
-* Fixed traversal issue: If you have a traversal with different minDepth and maxDepth
- values and filter on path elements that are larger then minDepth, in a way that a
- shorter path would match the condition, the shorter paths would in some cases
- not be returned, even if they are valid. e.g.
+ directory was the same as the requested encryption mode. This could have led
+ to dump directories with both encrypted and unencrypted files. This was only
+ the case when using `--overwrite true`, which is not the default.
+ - when a restore was performed using the `--encryption.keyfile` option, there
+ was no check whether the to-be-restored files were actually encrypted.
+ Now this check is enforced and arangorestore will bail out with an error if
+ the requested encryption mode for restore is not the same as for the stored
+ dump files.
+
+* Fixed traversal issue: If you have a traversal with different minDepth and
+ maxDepth values and filter on path elements that are larger then minDepth, in
+ a way that a shorter path would match the condition, the shorter paths would
+ in some cases not be returned, even if they are valid. e.g.
FOR v, e, p IN 1..3 OUTBOUND @start Graph "myGraph"
FILTER p.vertices[2].label != "foo"
RETURN p
In the above query, a path of length 1 would be valid. There p.vertices[2]
- does not exist => Evaluated to `null`. `null`.label is again evaluated to `null`
- => `null != "foo"` is true, so the path is valid.
+ does not exist => Evaluated to `null`. `null`.label is again evaluated to
+ `null` => `null != "foo"` is true, so the path is valid.
* Fixed traversal issue: If you have a filter on the path that is based on a
- variable value, which could not be deduced as constant during runtime, in
- a sharded GeneralGraph the filter was not applied correctly.
+ variable value, which could not be deduced as constant during runtime, in a
+ sharded GeneralGraph the filter was not applied correctly.
SmartGraphs and SingleServer traversals are not effected by this issue.
Also OneShard traversals are not effected.
@@ -7117,11 +9313,12 @@ devel
* Foxx routes now always have a Swagger `operationId`. If the route is unnamed,
a distinct operationId will be generated based on the HTTP method and URL.
-* Fixed, if you have a collection access within a Subquery, where the main query is fully skipped
- and the "splice-subqueries" rule is active. The information of the skip was not transported
- correctly. This could cause incorrect counting reports.
- If splice-subqueries are disabled, or the main-query is only partly skipped, everything worked
- as expected.
+* Fixed, if you have a collection access within a Subquery, where the main query
+ is fully skipped and the "splice-subqueries" rule is active. The information
+ of the skip was not transported correctly. This could cause incorrect counting
+ reports.
+ If splice-subqueries are disabled, or the main-query is only partly skipped,
+ everything worked as expected.
* Expanded -std=c++17 flag to all compilers.
@@ -7136,6 +9333,10 @@ devel
were excluded from the request statistics if and only if the requested
database was the `_system` database.
+
+v3.7.1-alpha.2 (2020-05-27)
+---------------------------
+
* Fixed an issue with truncate of a collection after a dbserver was restarted
very quickly. This could block arangosync from making progress because the
_jobs collection could no longer be truncated.
@@ -7165,6 +9366,10 @@ devel
* Updated arangosync to 0.7.5.
+
+v3.7.1-alpha.1 (2020-05-15)
+---------------------------
+
* Fixed ability to edit graph edge in Graph Viewer of web UI.
* Fixed issue #10371: For k-shortest-paths queries on certain graphs a condition
@@ -7173,7 +9378,7 @@ devel
* Added feature: Disjoint SmartGraphs
- SmartGraphs have been extended to a new subtype, called **Disjoint SmartGraphs**.
+ SmartGraphs have been extended to a new subtype, called `Disjoint SmartGraphs`.
A Disjoint SmartGraph prohibits edges between different SmartGraph components.
In case the graph schema can be represented without the need of connected
SmartGraph components, a Disjoint SmartGraph should be used as this knowledge
@@ -7184,12 +9389,12 @@ devel
* Fixed a lockup in dropCollection due to a mutex being held for too long.
-* Add an optimizer rule that enables execution of certain subqueries on a
- DB Server. For this optimization to work, the subquery must contain exactly
- one DISTRIBUTE/GATHER pair and only access at most one collection.
+* Add an optimizer rule that enables execution of certain subqueries on a DB
+ Server. For this optimization to work, the subquery must contain exactly one
+ DISTRIBUTE/GATHER pair and only access at most one collection.
This proves particularly useful for traversals, shortest path, and k-shortest
- paths queries on Disjoint SmartGraphs where the entire traversal is executed
+ paths queries on disjoint smart graphs where the entire traversal is executed
on the DB Server without involvement of a coordinator.
* ClusterInfo does its own updating of plan / current caches.
@@ -7206,17 +9411,17 @@ devel
* Fixed issue #11590: Querying for document by _key returning only a single
seemingly random property on entity ("old", in this case).
- This fixes single-key document lookups in the cluster for simple by-key
- AQL queries, such as `FOR doc IN collection FILTER doc._key == @key RETURN
- doc` in case the document has either an "old" or a "new" attribute.
+ This fixes single-key document lookups in the cluster for simple by-key AQL
+ queries, such as `FOR doc IN collection FILTER doc._key == @key RETURN doc`
+ in case the document has either an "old" or a "new" attribute.
* Restored behavior of Foxx API documentation being expanded to show all routes
rather than collapsing all sections by default.
-* Add optimization for subqueries for which only the number of results
- matters. The optimization will be triggered for read-only subqueries that
- use a full collection scan or an index scan, without any additional filtering
- (early pruning or document post-filtering) and without LIMIT.
+* Add optimization for subqueries for which only the number of results matters.
+ The optimization will be triggered for read-only subqueries that use a full
+ collection scan or an index scan, without any additional filtering (early
+ pruning or document post-filtering) and without LIMIT.
It will help in the following situation:
@@ -7229,14 +9434,14 @@ devel
...
The restrictions are that the subquery result must only be used with the
- COUNT/LENGTH function and not for anything else. The subquery itself must
- be read-only (no data-modification subquery), not use nested FOR loops nor
- LIMIT, nor a FILTER condition or calculation that requires accessing the
- document data. Accessing index data is supported for filtering, but not
- for further calculations.
+ COUNT/LENGTH function and not for anything else. The subquery itself must be
+ read-only (no data-modification subquery), not use nested FOR loops nor LIMIT,
+ nor a FILTER condition or calculation that requires accessing the document
+ data. Accessing index data is supported for filtering, but not for further
+ calculations.
- If the optimization is triggered, it will show up in the query execution
- plan under the name `optimize-count`.
+ If the optimization is triggered, it will show up in the query execution plan
+ under the name `optimize-count`.
* Integrated libiresearch log topic properly into ArangoDB logging system.
@@ -7244,8 +9449,8 @@ devel
priority.
* Allow specifying graph names as unquoted string in an AQL graph traversal
- query, e.g. `FOR ... IN ... GRAPH abc`. Previously, the graph name had
- to be a bind parameter or a string enclosed in quotes.
+ query, e.g. `FOR ... IN ... GRAPH abc`. Previously, the graph name had to be a
+ bind parameter or a string enclosed in quotes.
* loadPlan and loadCurrent have been fixed to not miss out on increments.
@@ -7255,17 +9460,18 @@ devel
* Agency offers the new poll API to subscribe to the Raft log stream.
-* Added option `--rocksdb.edge-cache` to toggle in-memory caching for
- edges. The option is turned on by default. This normally helps with
- performance in read-only and read-mostly workloads.
+* Added option `--rocksdb.edge-cache` to toggle in-memory caching for edges. The
+ option is turned on by default. This normally helps with performance in
+ read-only and read-mostly workloads.
* Fixed a bug in Maintenance which could prevent collection creation from
working (made CreateCollection action idempotent).
-* Fix potential undefined behavior in some operations issued to the REST
- handler at `/_api/collection` in cluster mode.
+* Fix potential undefined behavior in some operations issued to the REST handler
+ at `/_api/collection` in cluster mode.
-* `--cluster.agency-prefix` marked as obsolete. Did never work and is not supported.
+* `--cluster.agency-prefix` marked as obsolete. Did never work and is not
+ supported.
* Removed old AgencyComm.
@@ -7273,8 +9479,8 @@ devel
on lane CLUSTER_AQL instead of CLIENT_AQL. This leads to MEDIUM prio instead
of LOW.
-* Fixed a sleeping barber in fuerte. Added TLA+ model to prove that there
- is not another one hiding somewhere.
+* Fixed a sleeping barber in fuerte. Added TLA+ model to prove that there is not
+ another one hiding somewhere.
* Fix spurious bugs in `resilience_move` tests due to replication context of
to-be-dropped collections lingering around until timeout.
@@ -7326,12 +9532,9 @@ devel
deployments that are known to already contain invalid UTF-8 data and to keep
them operational until the wrong string encoding is fixed in the data.
-* Fixed a bug which occurred if a DB-Server was shut down exactly
- when it was supposed to resign from its leadership for a shard.
-
-* Improve continuation behavior of AQL queries. We post the continuation
- handler on lane CLUSTER_AQL instead of CLIENT_AQL. This leads to MEDIUM
- prio instead of LOW.
+* Improve continuation behavior of AQL queries. We post the continuation handler
+ on lane CLUSTER_AQL instead of CLIENT_AQL. This leads to MEDIUM prio instead
+ of LOW.
* When relaying requests to other coordinators in a load-balanced setup, don't
forward the "http/1.1" HTTP header from the remote response. Forwarding that
@@ -7344,101 +9547,92 @@ devel
* Obsoleted startup option `--database.maximal-journal-size`. This option was
useful for the MMFiles storage engine only, but did not have an effect with
- the RocksDB engine. Using this startup option is not an error, but has
- no effect anymore.
+ the RocksDB engine. Using this startup option is not an error, but has no
+ effect anymore.
* Added `JACCARD` AQL function.
* storedValues property is removed from ArangoSearch link properties output.
-* Added primarySortCompression property to ArangoSearch Views.
+* Added primarySortCompression property to ArangoSearch views.
-* Added compression property to ArangoSearch View storedValues.
+* Added compression property to ArangoSearch view storedValues.
-* Added overwrite mode "ignore" for document inserts. This mode allows
- ignoring primary key conflicts on insert when the target document already
- exists.
+* Removed deprecated MMFiles storage engine and also the `arango-dfdb`(datafile
+ debugger) executable that could be used to validate MMFiles datafiles.
- The following overwrite modes now exist:
+ This change also obsoletes all MMFiles-specific startup options in the
+ `--wal.*` section. Using these startup options is not an error, but has no
+ effect anymore.
- * "ignore": if a document with the specified `_key` value exists already,
- nothing will be done and no write operation will be carried out. The
- insert operation will return success in this case. This mode does not
- support returning the old document version using the `returnOld`
- attribute. `returnNew` will only set the `new` attribute in the response
- if a new document was inserted.
- * "replace": if a document with the specified `_key` value exists already,
- it will be overwritten with the specified document value. This mode will
- also be used when no overwrite mode is specified but the `overwrite`
- flag is set to `true`.
- * "update": if a document with the specified `_key` value exists already,
- it will be patched (partially updated) with the specified document value.
- * "conflict": if a document with the specified `_key` value exists already,
- return a unique constraint violation error so that the insert operation
- fails. This is also the default behavior in case the overwrite mode is
- not set, and the *overwrite* flag is *false* or not set either.
+* Fixed a bug in the agency supervision, which ignored the `failoverCandidates`
+ field.
- The overwrite mode "ignore" can also be used from AQL INSERT operations
- by specifying it in the INSERT's `OPTIONS`, e.g.
+* Added `INTERLEAVE` AQL function.
- INSERT { _key: ..., .... } INTO collection OPTIONS { overwriteMode: "ignore" }
+* Upgraded bundled RocksDB library to version 6.8.0.
- Again, when the overwrite mode "ignore" is used from AQL, it does not
- support returning the old document version. Using "RETURN OLD" in
- an INSERT operation that uses the "ignore" overwrite mode will trigger
- a parse error, as there will be no old version to return. "RETURN NEW"
- will only return the document in case it was inserted. In case the
- document already existed, "RETURN NEW" will return "null".
- The main use case of inserting documents with overwrite mode "ignore" is
- to make sure that certain documents exist in the cheapest possible way.
- In case the target document already exists, the "ignore" mode is most
- efficient, as it will not retrieve the existing document from storage and
- not write any updates to it.
+v3.7.0 (2020-04-11)
+-------------------
-* Added feature: SatelliteGraphs
+* Updated OpenSSL to 1.1.1f.
- SatelliteGraphs are a new type of graph, added in addition to the existing
- ones, General Graphs and SmartGraphs.
+* Fixed a bug which occurred if a DB-Server was shut down exactly when it was
+ supposed to resign from its leadership for a shard.
- When doing joins involving graph traversals, shortest path or k-shortest paths
- computation in an ArangoDB cluster, data has to be exchanged between different
- servers. In particular graph traversals are usually executed on a Coordinator,
- because they need global information. This results in a lot of network traffic
- and potentially slow query execution.
+* Fix a bug in the agency supervision, which could declare an already FAILED
+ dbserver temporarily as GOOD again after an agency leader change.
- SatelliteGraphs are the natural extension of the concept of
- SatelliteCollections to graphs. All of the usual benefits and caveats apply.
- SatelliteGraphs are synchronously replicated to all DB-Servers that are part of
- a cluster, which enables DB-Servers to execute graph traversals locally. This
- includes (k-)shortest path(s) computation and possibly joins with traversals
- and greatly improves performance for such queries.
+* Added overwrite mode "ignore" for document inserts. This mode allows ignoring
+ primary key conflicts on insert when the target document already exists.
-* Removed deprecated MMFiles storage engine and also the `arango-dfdb`
- (datafile debugger) executable that could be used to validate MMFiles
- datafiles.
+ The following overwrite modes now exist:
- This change also obsoletes all MMFiles-specific startup options in the
- `--wal.*` section. Using these startup options is not an error, but has
- no effect anymore.
+ - "ignore": if a document with the specified `_key` value already exists,
+ nothing will be done, and no write operation will be carried out. The
+ insert operation will return success in this case. This mode does not
+ support returning the old or new document versions using the `returnOld`
+ and `returnNew` attributes.
+ - "replace": if a document with the specified `_key` value already exists,
+ it will be overwritten with the specified document value. This mode will
+ also be used when no overwrite mode is specified but the `overwrite`
+ flag is set to `true`.
+ - "update": if a document with the specified `_key` value already exists,
+ it will be patched (partially updated) with the specified document value.
-* Fixed a bug in the agency supervision, which ignored the `failoverCandidates`
- field.
+ The overwrite mode "ignore" can also be used from AQL INSERT operations by
+ specifying it in the INSERT's `OPTIONS`, e.g.
-* Fixed a bug in the agency supervision, which could declare an already FAILED
- DB-Server temporarily as GOOD again after an agency leader change.
+ INSERT { _key: ..., .... } INTO collection OPTIONS { overwriteMode: "ignore" }
-* Added `INTERLEAVE` AQL function.
+ Again, when the overwrite mode "ignore" is used from AQL, it does not support
+ returning the old or new document versions. Using "RETURN OLD" in an INSERT
+ operation that uses the "ignore" overwrite mode will trigger a parse error, as
+ there will be no old version returned, and "RETURN NEW" will only return the
+ document in case it was inserted. In case the document already existed,
+ "RETURN NEW" will return "null".
-* Upgraded bundled RocksDB library to version 6.8.0.
+ The main use case of inserting documents with overwrite mode "ignore" is to
+ make sure that certain documents exist in the cheapest possible way.
+ In case the target document already exists, the "ignore" mode is most
+ efficient, as it will not retrieve the existing document from storage and not
+ write any updates to it.
+
+
+v3.7.0-preview.1 (2020-03-27)
+-----------------------------
* Added AQL function `IN_RANGE`.
-* Added startup option `--ssl.prefer-http1-in-alpn` to optionally let the
- server prefer HTTP/1.1 over HTTP/2 in ALPN protocol negotiations.
+* Added startup option `--ssl.prefer-http1-in-alpn` to optionally let the server
+ prefer HTTP/1.1 over HTTP/2 in ALPN protocol negotiations.
+
+* Compilation issues with wrong cv-qualifiers and unnecessary temporary copying.
+
-* Compilation issues with wrong cv-qualifiers and unnecessary
- temporary copying.
+v3.7.0-alpha.2 (2020-03-20)
+---------------------------
* Add DTRACE points to track a request through the infrastructure.
@@ -7449,8 +9643,8 @@ devel
* Reactive REST API endpoint at `/_admin/auth/reload`, as it is called by DC2DC.
-* Fix an endless loop in FollowerInfo::persistInAgency which could trigger
- a hanger if a collection was dropped at the wrong time.
+* Fix an endless loop in FollowerInfo::persistInAgency which could trigger a
+ hanger if a collection was dropped at the wrong time.
* Updated LZ4 dependency to version 1.9.2.
@@ -7463,7 +9657,7 @@ devel
"numberOfShards" for each new collection. The default values were "_graphs"
and 1 and were not modified if the user did not alter them, but it was still
possible to alter them.
- This is now (silently) ignored. Any attempt to set any value for
+ This is now (silenly) ignored. Any attempt to set any value for
"distributeShardsLike" or "numberOfShards" for new collections in a OneShard
database will silently be ignored. The collection will automatically be
sharded like the sharding prototype and will have a single shard.
@@ -7479,8 +9673,8 @@ devel
The web interface now also hides the "Distribute shards like" settings in this
case, and makes the "Number of shards" input box read-only.
-* Fix premature access to temporary path before a user-specified path was
- read from the config options.
+* Fix premature access to temporary path before a user-specified path was read
+ from the config options.
* Rebuild UI and update swagger.
@@ -7497,18 +9691,20 @@ devel
* Allow to override the detected total amount of memory via an environment
variable ARANGODB_OVERRIDE_DETECTED_TOTAL_MEMORY.
-* `splice-subqueries` optimization is not limited by any type of operation within the
- subquery any more. It can now be applied on every subquery and will be by default.
- However they may be a performance impact on some queries where splice-subqueries
- are not as performant as non-spliced subqueries. This is due to internal memory
- management right now and will be addressed in future versions. Spliced subqueries
- can be less performant if the query around the subquery is complex and requires
- lots of variables, or variables with large content, but the subquery itself
- does not require a lot of variables and produces many intermediate results
- s.t. good batching within the query does not pay off against memory overhead.
-
-* Supervision to clean up zombie servers after 24h, if no
- responsibility for shards.
+* `splice-subqueries` optimization is not limited by any type of operation
+ within the subquery any more. It can now be applied on every subquery and will
+ be by default.
+ However they may be a performance impact on some queries where
+ splice-subqueries are not as performant as non-spliced subqueries. This is due
+ to internal memory management right now and will be addressed in future
+ versions. Spliced subqueries can be less performant if the query around the
+ subquery is complex and requires lots of variables, or variables with large
+ content, but the subquery itself does not require a lot of variables and
+ produces many intermediate results s.t. good batching within the query does
+ not pay off against memory overhead.
+
+* Supervision to clean up zombie servers after 24h, if no responsibility for
+ shards.
* Fix SORT RAND() LIMIT 1 optimization for RocksDB when only a projection of the
attributes was used. When a projection was used and that projection was
@@ -7517,32 +9713,31 @@ devel
which always resulted in the same index entry to be returned and not a random
one.
-* Mark server startup options `--foxx.*`, `--frontend.*` and `--javascript.*`
- as single server and Coordinator only for documentation (`--dump-options`).
+* Mark server startup options `--foxx.*`, `--frontend.*` and `--javascript.*` as
+ single server and Coordinator only for documentation (`--dump-options`).
* Supervision hot backup and supervision maintenance modes ttl fix.
-* Fix a bug that leads to graph traversals yielding empty output when none of the
- output variables (vertex, edge, path) are used. This is relevant when a query
- is only interested in a COUNT of the outputs, for example.
+* Fix a bug that leads to graph traversals yielding empty output when none of
+ the output variables (vertex, edge, path) are used. This is relevant when a
+ query is only interested in a COUNT of the outputs, for example.
-* MoveShard to check, if target is in sync follower before promotion
- to leader.
+* MoveShard to check, if target is in sync follower before promotion to leader.
-* Agency ttl bug fix
+* Agency ttl bug fix.
* Added the SNI feature for TLS. This means that one can configure multiple
- server keys and certificate chains and the system dynamically uses
- the right one depending on the value of the TLS servername extension.
- This allows to use different TLS setups for the same server which is
- reachable behind different DNS names, for example (Enterprise Edition only).
+ server keys and certificate chains and the system dynamically uses the right
+ one depending on the value of the TLS servername extension.
+ This allows to use different TLS setups for the same server which is reachable
+ behind different DNS names, for example (Enterprise Edition only).
* Do not create a reboot tracker for empty serverId ubin sync repl.
* Fix the usage of the AQL functions `CALL` and `APPLY` for calling user-defined
- AQL functions when invoking an AQL query from the arangosh or a client application.
- Previously, invoking an AQL query and using the `CALL` or `APPLY` AQL functions
- to call user-defined AQL function caused undefined behavior.
+ AQL functions when invoking an AQL query from the arangosh or a client
+ application. Previously, invoking an AQL query and using the `CALL` or `APPLY`
+ AQL functions to call user-defined AQL function caused undefined behavior.
* Improved graph traversal performance via some internal code refactoring:
@@ -7552,54 +9747,57 @@ devel
invariants.
- Each vertex lookup needs to perform slightly less work.
- The traversal speedups observed by these changes alone were around 8 to 10% for
- single-server traversals and traversals in OneShard setups. Cluster traversals
- will also benefit from these changes, but to a lesser extent. This is because the
- network roundtrips have a higher share of the total query execution times there.
+ The traversal speedups observed by these changes alone were around 8 to 10%
+ for single-server traversals and traversals in OneShard setups. Cluster
+ traversals will also benefit from these changes, but to a lesser extent. This
+ is because the network roundtrips have a higher share of the total query
+ execution times there.
-* Traversal performance can also be improved by not fetching the visited vertices
- from the storage engine in case the traversal query does not refer to them.
+* Traversal performance can also be improved by not fetching the visited
+ vertices from the storage engine in case the traversal query does not refer to
+ them.
For example, in the query
FOR v, e, p IN 1..3 OUTBOUND 'collection/startVertex' edges
RETURN e
- the vertex variable (`v`) is never accessed, making it unnecessary to fetch the
- vertices from storage. If this optimization is applied, the traversal node will be
- marked with `/* vertex optimized away */` in the query's execution plan output.
+ the vertex variable (`v`) is never accessed, making it unnecessary to fetch
+ the vertices from storage. If this optimization is applied, the traversal node
+ will be marked with `/* vertex optimized away */` in the query's execution
+ plan output.
* The existing optimizer rule "move-calculations-down" is now able to also move
- unrelated subqueries beyond SORT and LIMIT instructions, which can help avoid the
- execution of subqueries for which the results are later discarded.
+ unrelated subqueries beyond SORT and LIMIT instructions, which can help avoid
+ the execution of subqueries for which the results are later discarded.
For example, in the query
- FOR doc IN collection1
- LET sub1 = FIRST(FOR sub IN collection2 FILTER sub.ref == doc._key RETURN sub)
- LET sub2 = FIRST(FOR sub IN collection3 FILTER sub.ref == doc._key RETURN sub)
- SORT sub1
- LIMIT 10
- RETURN { doc, sub1, sub2 }
+ FOR doc IN collection1
+ LET sub1 = FIRST(FOR sub IN coll2 FILTER sub.ref == doc._key RETURN sub)
+ LET sub2 = FIRST(FOR sub IN coll3 FILTER sub.ref == doc._key RETURN sub)
+ SORT sub1
+ LIMIT 10
+ RETURN { doc, sub1, sub2 }
- the execution of the `sub2` subquery can be delayed to after the SORT and LIMIT,
- turning it into
+ the execution of the `sub2` subquery can be delayed to after the SORT and
+ LIMIT, turning it into
- FOR doc IN collection1
- LET sub1 = FIRST(FOR sub IN collection2 FILTER sub.ref == doc._key RETURN sub)
- SORT sub1
- LIMIT 10
- LET sub2 = FIRST(FOR sub IN collection3 FILTER sub.ref == doc._key RETURN sub)
- RETURN { doc, sub1, sub2 }
+ FOR doc IN collection1
+ LET sub1 = FIRST(FOR sub IN coll2 FILTER sub.ref == doc._key RETURN sub)
+ SORT sub1
+ LIMIT 10
+ LET sub2 = FIRST(FOR sub IN coll3 FILTER sub.ref == doc._key RETURN sub)
+ RETURN { doc, sub1, sub2 }
* Added JSON-Schema (draft-4) document validation. The validation can be
- specified by providing the new `schema` collection property when creating a
- new collection or when updating the properties of an existing collection:
+ specified by providing the new `schema` collection property when creating
+ a new collection or when updating the properties of an existing collection.
- db.mycollection.properties({
- schema: {
- rule : { nums : { type : "array", items : { type : "number", maximum : 6 }}},
- message : "Json-Schema validation failed"
- }
- });
+ db.mycollection.properties({
+ schema: {
+ rule : { a : { type : "array", items : { type : "number", maximum : 6 }}},
+ message : "Json-Schema validation failed"
+ }
+ });
* Fix supervision mode detection when unlocking agency in hot backup.
@@ -7615,8 +9813,8 @@ devel
* Added traversal options `vertexCollections` and `edgeCollections` to restrict
traversal to certain vertex or edge collections.
- The use case for `vertexCollections` is to not follow any edges that will point
- to other than the specified vertex collections, e.g.
+ The use case for `vertexCollections` is to not follow any edges that will
+ point to other than the specified vertex collections, e.g.
FOR v, e, p IN 1..3 OUTBOUND 'products/123' components
OPTIONS { vertexCollections: [ "bolts", "screws" ] }
@@ -7640,12 +9838,12 @@ devel
* Make arangobench return a proper error message when its initial attempt to
create the test collection fails.
-* In some cases with a COLLECT LIMIT situation on a small limit the collect
- does more calls to upstream than without a limit to provide the same
- result. We improved this situation and made sure that LIMIT does
- not cause the COLLECT to fetch too few input rows. There is a chance
- that queries with a very small amount of data might suffer from this
- modification, most queries will benefit however.
+* In some cases with a COLLECT LIMIT situation on a small limit the collect does
+ more calls to upstream than without a limit to provide the same result. We
+ improved this situation and made sure that LIMIT does not cause the COLLECT to
+ fetch too few input rows. There is a chance that queries with a very small
+ amount of data might suffer from this modification, most queries will benefit
+ however.
* Use OpenSSL's EVP interface for SHA256 instead of the deprecated low-level
message digest APIs.
@@ -7664,6 +9862,10 @@ devel
between the COLLECT and the source data. In this case it is not safe to apply
the distributed collect, as it may alter the results.
+
+v3.7.0-alpha.1 (2020-02-19)
+---------------------------
+
* Rebuild UI.
* Fix arangorestore:
@@ -7677,7 +9879,7 @@ devel
would not be inserted. This is fixed by ignoring this inconsistency in
the case of restore.
-* Updated rclone to 1.51.1.
+* Updated rclone to 1.51.0.
* Fixed a memory leak in ModificationExecutors.
@@ -7692,25 +9894,26 @@ devel
`geometry` attribute that had a non-object value (e.g. `null`) the web UI
threw a JavaScript exception and would not display AQL query results properly.
-* Disable "collect-in-cluster" AQL optimizer rule in case a LIMIT node is between
- the COLLECT and the source data. In this case it is not safe to apply the
- distributed collect, as it may alter the results.
+* Disable "collect-in-cluster" AQL optimizer rule in case a LIMIT node is
+ between the COLLECT and the source data. In this case it is not safe to apply
+ the distributed collect, as it may alter the results.
* Fixed internal issue #4932: COLLECT WITH COUNT together with FILTER yields
zero.
This bugfix fixes an issue when skipping over documents in an index scan
- using a covering index and also at the same time using an early-pruning filter.
+ using a covering index and also at the same time using an early-pruning
+ filter.
In this case wrong document data may have been injected into the filter
condition for filtering, with the filter wrongfully deciding which documents
to filter out.
* Added crash handler for Linux builds that taps into the following signals:
- * SIGSEGV (segmentation violation)
- * SIGBUS (bus error)
- * SIGILL (illegal instruction)
- * SIGFPE (floating point exception)
+ - SIGSEGV (segmentation violation)
+ - SIGBUS (bus error)
+ - SIGILL (illegal instruction)
+ - SIGFPE (floating point exception)
In case the arangod process catches one these signals, the crash handler
tries to log a message and a backtrace into the installation's logfile before
@@ -7726,10 +9929,11 @@ devel
case of a database upgrade. These commands are highly platform-dependent and
also depend on whether ArangoDB is started manually, via the ArangoDB starter
or as a service.
- In order to not confuse end users, remove the potentially misleading instructions.
+ In order to not confuse end users, remove the potentially misleading
+ instructions.
-* Clear backups from DB servers and agency, when plan unchanged not
- met and not allowing for inconsistency.
+* Clear backups from DB servers and agency, when plan unchanged not met and not
+ allowing for inconsistency.
* V8 version upgrade to 7.9.317; ICU version upgrade to 64.2.
- JSON parsing is roughly 60% faster than in V8 7.1; you should prefer
@@ -7738,8 +9942,8 @@ devel
* Clean out server job checks preconditions plan version unchanged for start.
-* Cluster collection creation preconditioned on involved db servers not
- in process of being cleaned and Fixed.
+* Cluster collection creation preconditioned on involved db servers not in
+ process of being cleaned and Fixed.
* Fixed a bug, where a single host agency logged too early.
@@ -7751,18 +9955,18 @@ devel
This change prevents the "late-document-materialization" optimizer rule from
kicking in in cases when an index scan also uses early pruning of index
entries. In this case, the late document materialization will lead to the
- filter condition used in early pruning not being applied correctly, potentially
- producing wrong results.
+ filter condition used in early pruning not being applied correctly,
+ potentially producing wrong results.
* Fixed internal issue #596: Added ability to disable DNF conversion in SEARCH
condition for ArangoSearch views. As this conversion might result in
overcomplicated disjunction and heavily slowdown execution.
* Implement a new API to reload TLS server key and certificates as well as
- client certificate CA. This allows to rotate TLS certificates without
- a restart. One can also query the currently loaded state of the TLS
- data to decide if a reload has actually happened. The new certificates
- will only be used for new TLS-based connections.
+ client certificate CA. This allows to rotate TLS certificates without a
+ restart. One can also query the currently loaded state of the TLS data to
+ decide if a reload has actually happened. The new certificates will only be
+ used for new TLS-based connections.
* Add acquisition of system report to arangod instances.
@@ -7770,19 +9974,21 @@ devel
* Fix a bug in collection creation with `distributeShardsLike`.
-* Added load-balancing support for listing currently running and slow AQL queries,
- killing running AQL queries and clearing the list of slow AQL queries.
+* Added load-balancing support for listing currently running and slow AQL
+ queries, killing running AQL queries and clearing the list of slow AQL
+ queries.
- This change also will also modify the values returned in the "id" attribute of AQL
- query objects. While the values of the "id" attribute remain strings with numeric
- content, the ids will now start at arbitrary offsets after server start and are
- supposed to have much higher numeric values than in previous ArangoDB versions.
- In previous ArangoDB versions, query ids always started at value 1 after a server
- start/restart and were increased in increments of 1.
+ This change also will also modify the values returned in the "id" attribute of
+ AQL query objects. While the values of the "id" attribute remain strings with
+ numeric content, the ids will now start at arbitrary offsets after server
+ start and are supposed to have much higher numeric values than in previous
+ ArangoDB versions.
+ In previous ArangoDB versions, query ids always started at value 1 after a
+ server start/restart and were increased in increments of 1.
This change may lead to query ids being greater than what a 4 byte integer can
- hold, which may affect client applications that treat the ids as numeric values
- and do not have proper support for integer numbers requiring more than 4 byte
- storage.
+ hold, which may affect client applications that treat the ids as numeric
+ values and do not have proper support for integer numbers requiring more than
+ 4 byte storage.
* remove unused REST API /_admin/aql/reload, and make the JavaScript bindings
for `internal.reloadAqlFunctions()` do nothing. The reason for this is that
@@ -7791,102 +9997,135 @@ devel
* Fixed issue #10949: k shortest paths behavior wrong with zero weights.
-* added `REPLACE_NTH` AQL function to replace a member inside an array.
+* Added `REPLACE_NTH` AQL function to replace a member inside an array.
* Added JWT secret rotation (Enterprise Edition only).
* The `--dump-options` command for arangod now also provides the "components"
attribute that shows for which components (agent, coordinator, db server,
- single server) an option is relevant. Additionally, each option provides an "os"
- attribute that indicates on which operating systems the option is supported.
+ single server) an option is relevant. Additionally, each option provides an
+ "os" attribute that indicates on which operating systems the option is
+ supported.
This can be used when reading the options descriptions programmatically, e.g.
for auto-generating the documentation.
-* Add update-insert operation similar to existing replace-insert functionality of insert.
+* Add update-insert operation similar to existing replace-insert functionality
+ of insert.
Like for the existing variant the `overwrite` flag has to be set to true.
- Then the update version can be selected by setting the `overwriteMode` to `"update"`.
+ Then the update version can be selected by setting the `overwriteMode` to
+ `"update"`.
+
+
+v3.6.1 (2020-01-29)
+-------------------
* Updated ArangoDB Starter to 0.14.13.
* Added HotBackup events into auditing.
+* Internal statistics API now uses integer timestamps instead of doubles. The
+ old behavior sometimes leads to failed requests because of parse errors
+ which occurred in the internally used JavaScript joi library.
+
+* Fixed issue #10896: Variables defined inside spliced subqueries would leak
+ into following COLLECT ... INTO var operations.
+
+* Fixed COLLECT not invalidating variables for following COLLECT ... INTO var
+ operations.
+
* Removed an unnecessary and wrong request from within the Web UI to the
`/_admin/cluster/health` API. This lead to unauthorized network calls.
* Allowed PHRASE function to process empty arrays without generating error.
+* Add acquisition of system report to arangod instances.
+
+* React dev mode now supports hot reload combined with proxy for development.
+
* Fix issue #10897, using COLLECT in a subquery could lead to unexpected and
confusing error messages
-* Internal statistics API now uses integer timestamps instead of doubles. The
- old behavior sometimes leads to failed requests because of parse errors
- which occurred in the internal used JavaScript joi library.
-
-* Support trailing commas in AQL object and array definitions as follows:
+* Fix potential nullptr dereference in view optimizer rule, when there were was
+ a LIMIT outside of a FOR loop.
- [ 1, 2, 3, ]
- { a: 1, b: 2, }
+* Added missing "global" parameter in Swagger REST API documentation for some
+ replication endpoints.
- In previous versions of ArangoDB, such trailing commas resulted in a query
- parse error.
+* Fixed an edge case in VelocyPack when the padding of a 1-byte offsetSize array
+ is removed but the first few entries of the array contain a Slice of type
+ None.
-* Make sure heartbeats are actually sent out and received every second.
+* Fix Foxxmaster failover retry loop to not spin forever and give up after a
+ while.
-* React dev mode now supports hot reload combined with proxy for development.
+* Fix "random" Valgrind "invalid free/delete" errors caused by usage of
+ `alignas(64)` for shared_ptrs in the SupervisedScheduler.
-* Added read-write-locks for parallel move shard operations.
+* MoveShard to check, if target is in sync follower before promotion to leader.
-* Fixed issue #10896: Variables defined inside spliced subqueries would leak
- into following COLLECT ... INTO var operations.
+* Fixed issue #10867: arangod based binaries lack product version info and icon
+ on Windows
-* Fixed COLLECT not invalidating variables for following COLLECT ... INTO var
- operations.
+* Fixed internal traversal edge collection cache being filled up correctly.
+ Edges are able to point to other edges, but those we're not applied to the
+ cache.
-* Fixed issue #10867: arangod based binaries lack product version info and icon
- on Windows.
+* Fixed issue #10852: Nested spliced subqueries could return wrong results in
+ some cases when some of the concerned subqueries did not return any values.
* Fix string comparison bug that lead to traversal queries accepting prefixes of
"edges" and "vertices" to be used as object accessors for the path object.
-* Fixed internal traversal edge collection cache being filled up correctly.
- Edges are able to point to other edges, but those we're not applied to the
- cache.
+* Fix bug affecting spliced subqueries when memory blocks are reused.
+
+* Now clearing an internal map inside the traverser engine correctly.
+
+* Add ability to choose logging to file in Windows installer of ArangoDB server
+ (enabled by default).
+
+ ArangoDB-logs folder with arangod.log should be stored in %PROGRAMDATA% and
+ %LOCALAPPDATA% for all users and single user installation respectively.
+
+* Fix installation of arangoinspect and libraries in Windows client installer.
-* Fix string comparison bug that lead to traversal queries accepting
- prefixes of "edges" and "vertices" to be used as object accessors
- for the path object.
+* Disable cluster AQL parallelization for queries that contain traversal,
+ shortest path or k_shortest_path nodes.
+ This avoids potential undefined behavior in case a parallel GatherNode is used
+ in graph queries.
-* Fix #10852. Nested spliced subqueries could return wrong results in some cases
- when some of the concerned subqueries did not return any values.
+* Fixed internal issue #656: While executing large amount of concurrent
+ insert/removes iresearch seems to leak open file handles. This results in
+ error 32 in cleanup (observed only on Windows as Linux doesn`t have file
+ sharing locks).
-* Add ability to choose logging to file in Windows installer of ArangoDB server
- (enabled by default).
+* Updated arangosync to 0.7.2.
- ArangoDB-logs folder with arangod.log should be stored in %PROGRAMDATA% and
- %LOCALAPPDATA% for all users and single user installation respectively.
+* Fix Windows client package JS installation paths.
-* Fixed issue #10725: Wrong document count shown inside an empty collection in
- the web UI.
+* Added option that makes ArangoDB write logfiles in the Windows installer.
-* Fix installation of arangoinspect and libraries in Windows client installer.
-* Now clearing an internal map inside the traverser engine correctly.
+v3.6.0 (2020-01-08)
+-------------------
-* Ported `/_admin/cluster*` API to C++.
+* Do not create a reboot tracker for empty serverId ubin sync repl.
-* Introduced AsyncAgencyComm for asynchronous internal agency requests.
+* Update swagger.
-* Fix bug affecting spliced subqueries when memory blocks are reused.
-* Hide swagger api calls in list by default
+v3.6.0-rc.2 (2019-12-23)
+------------------------
-* Fix Windows client package JS installation paths.
+* Fixed issue #10725: Wrong document count shown inside an empty collection in
+ the web UI.
-* Updated arangosync to 0.7.2.
+* Fixed bug that made AQL queries eligible for parallelization even in case
+ they couldn't be parallelized, leading to undefined behavior due to the
+ thread races.
-* Renamed document / index / vertex "handle" to "identifier" / "id" for
- consistency in documentation and error messages.
+* Fixed bug in `--query.parallelize-gather-writes-behavior` which led to
+ the option not working correctly.
* Agency relational operators TTL fix.
@@ -7895,37 +10134,43 @@ devel
* Fixed internal issue #4748: Editing a single edgeDefinition using the graph
API failed if it was not shared between all available graphs.
-* Changed HTTP return code for an error case in /_api/cluster/endpoints REST API.
- Now, if the API is called on a single server, it will return HTTP 501 instead
- of HTTP 403.
-
-* Changed HTTP return code for an error case in /_api/cluster/agency-dump REST API.
- Now, if the API is called on a server type other than coordinator, it will return
- HTTP 501 instead of HTTP 403.
-
* Fixed undefined behavior on node delete.
-* Fixed agency invalid operation
+* Fixed agency invalid operation.
+
+* Added google tests for Node.
* Bugfix: An AQL ternary expression with the condition being true at query
compile time would not execute its false branch.
-* Fixed uptime in _admin/statistics.
+* When starting the RocksDB engine, first create small-sized WAL files.
-* Fixed a bug in SmartGraph bfs traversals that might violate path uniqueness
- requirements in rare cases.
+ This is a precaution so that when repeatedly trying to start an arangod
+ instance, an instant instance startup failure will not lead to the disk
+ filling up so quickly with WAL file data.
-* Add acquisition of system report to arangod instances.
+ The WAL file size is increased later on in the startup sequence, so
+ everything should be fine if the startup works.
-* Fix execution ability in CentOS 6 regarding newer boost.
+ This fixes a problem with the disk filling up quickly when the arangodb
+ starter tries to start an instance 100 times in a row but instantaneously
+ gets a failure back from it.
-* Fixed permissions for dump/restore.
+* Fixed a permissions bug for /_admin/cluster/rebalanceShards.
-* The _users collection is now properly restored when using arangorestore.
-* Updated arangosync to 0.7.1.
+v3.6.0-rc.1 (2019-12-10)
+------------------------
+
+* Renamed document / index / vertex "handle" to "identifier" / "id" for
+ consistency in documentation and error messages.
+
+* Fixed a bug in smart graph bfs traversals that might violate path uniqueness
+ requirements in rare cases.
+
+* Fixed permissions for dump/restore.
-* rename `minReplicationFactor` into `writeConcern` to make it consistent with
+* Rename `minReplicationFactor` into `writeConcern` to make it consistent with
`--cluster.write-concern` and avoid confusion with
`--cluster.min-replication-factor`
@@ -7933,7 +10178,6 @@ devel
It supports React now, but the previous framework is still in use.
* Enable the `parallelize-gather` AQL optimizer rule for certain write queries.
-
The optimization is turned on by default and can be disabled by setting the
startup option `--query.parallelize-gather-writes` to `false`.
@@ -7944,10 +10188,6 @@ devel
of documents in the first shards consumed, the rest of the documents was not
returned.
-* Fixed GET _api/gharial to also include the name property in every returned graph.
- This is a consistency fix within the API as all other APIs include the name.
- As a workaround the returned _key can be used, which is identical to the name.
-
* REMOTE and GATHER no longer make subqueries unsuitable for the
`splice-subqueries` optimization.
@@ -7956,8 +10196,9 @@ devel
* Add a Prometheus endpoint for metrics, expose new metrics, old statistics
and RocksDB metrics.
-* Fixed known issue #509: ArangoSearch index consolidation does not work during creation of a link
- on existing collection which may lead to massive file descriptors consumption.
+* Fixed known issue #509: ArangoSearch index consolidation does not work during
+ creation of a link on existing collection which may lead to massive file
+ descriptors consumption.
* Added support of array comparison operators to ArangoSearch.
@@ -7965,11 +10206,11 @@ devel
* Added support of arrays to PHRASE function.
-* Added a new optimization called `late-document-materialization`, `late-document-materialization`
- for indexes and views correspondingly.
+* Added a new optimization called `late-document-materialization`,
+ `late-document-materialization` for indexes and views correspondingly.
- This optimization reduces amount of documents to read from storage engine to the limit explicitly
- stated in LIMIT node.
+ This optimization reduces amount of documents to read from storage engine to
+ the limit explicitly stated in LIMIT node.
* Added support of "Edge N-grams" to `text` analyzer.
@@ -7994,55 +10235,214 @@ devel
The implementation of this feature required reworking the dataflow
query execution.
-* Fixed issue #10470: The WebUI now shows potential errors and details which occurred using _api/import (e.g.
- unique constraint violated).
-
-* Added startup option `--query.optimizer-rules` to selectively enable or disable
- optimizer rules by default. The option can be specified multiple times, and takes
- the same input as the query option of the same name.
+* Added startup option `--query.optimizer-rules` to selectively enable or
+ disable optimizer rules by default. The option can be specified multiple
+ times, and takes the same input as the query option of the same name.
For example, to turn off the rule _use-indexes-for-sort_, use
--query.optimizer-rules "-use-indexes-for-sort"
- The purpose of this option is to be able to enable potential future experimental
- optimizer rules, which may be shipped in a disabled-by-default state.
+ The purpose of this option is to be able to enable potential future
+ experimental optimizer rules, which may be shipped in a disabled-by-default
+ state.
-* Allow the AQL query optimizer to remove the DistributeNodes for several data-modification
- queries. So far, only REMOVE queries benefitted. Now the optimization can also be
- applied for REPLACE and UPDATE queries in case the query does not use LIMIT and
- there is no further cluster-internal communication after the REMOVE, REPLACE or UPDATE
- node.
+* Allow the AQL query optimizer to remove the DistributeNodes for several
+ data-modification queries. So far, only REMOVE queries benefitted. Now the
+ optimization can also be applied for REPLACE and UPDATE queries in case the
+ query does not use LIMIT and there is no further cluster-internal
+ communication after the REMOVE, REPLACE or UPDATE node.
* Include ArangoSearch data in HotBackups.
* Allow to restore 3.5 HotBackups in 3.6.
-* Fixed ArangoSearch index removes being discarded on committing consolidation results with
- pending removes after some segments under consolidation were already committed
-
* Fixed an issue where removeServer left behind current coordinators
* Allow usage of AQL function `RANDOM_TOKEN` with an argument value of `0`. This
now produces an empty string, whereas in older versions this threw an invalid
value exception.
-* Add startup option `--rocksdb.exclusive-writes` to avoid write-write conflicts.
+* Add startup option `--rocksdb.exclusive-writes` to avoid write-write
+ conflicts.
- This options allows for an easier transition from MMFiles to the RocksDB storage
- engine, but comes with a big performance penalty as all collections will be locked
- exclusively for writes.
+ This options allows for an easier transition from MMFiles to the RocksDB
+ storage engine, but comes with a big performance penalty as all collections
+ will be locked exclusively for writes.
-* Added new maxRuntime option for queries. If a query does not finish execution within
- the given time (in seconds) it will be killed.
+* Added new maxRuntime option for queries. If a query does not finish execution
+ within the given time (in seconds) it will be killed.
-* Fixed undefined behavior with creation of ArangoSearch links with custom
- analyzers in cluster environment.
+* Added limit for AQL range materialization to prevent out-of-memory errors.
-* Fixed internal issue #651: analyzer duplication in _analyzers collection.
+ When materializing ranges created by either the AQL `RANGE` function or by
+ using the built-in `..` operator (e.g. `1 .. 1000000`), a check is now
+ performed if the range is too big to be materialized. The threshold value is
+ set to 10 million members. Ranges with at most that many members can be
+ materialized, ranges with more members will fail to materialize and abort the
+ query with the exception `number out of range` (error code 1504).
-* Fixed internal issue #4597: rebalanceShards API cannot work on any database
- other than the _system database.
+ It is still possible to create ranges with more than 10 million members as
+ long as they are not materialized. For example, the following is still valid:
+
+ FOR i IN 1 .. 1000000000 INSERT {_key: CONCAT('test', i)} INTO collection
+
+* No longer put system services into `_apps` on single server. On cluster, this
+ has never worked. This was unnecessary.
+
+* Added AQL optimizer rule "move-filters-into-enumerate", to allow for early
+ pruning of non-matching documents while full-scanning or index-scanning
+ documents. This optimization can help to avoid a lot of temporary document
+ copies.
+
+* Added "SmartJoins for Views" to the ArangoDB Enterprise Edition that allows
+ running cluster joins between two certain sharded collections or views with
+ performance close to that of a local join operation.
+
+* Allow collection names to be at most 256 characters long, instead of 64
+ characters in previous versions.
+
+* Upgraded bundled Boost library version to 1.71.
+
+* Use `-std=c++17` for ArangoDB compilation.
+
+* Made the mechanism in the Web UI of replacing and upgrading a foxx app more
+ clear.
+
+* Show shards of all collections (including system collections) in the web UI's
+ shard distribution view.
+
+ This is necessary to access the prototype collections of a collection sharded
+ via `distributeShardsLike` in case the prototype is a system collection, and
+ the prototype should be moved to another server.
+
+* Rclone URL normalization.
+
+* Disallow using `_id` or `_rev` as shard keys in clustered collections.
+
+ Using these attributes for sharding was not supported before, but didn't
+ trigger any errors. Instead, collections were created and silently using
+ `_key` as the shard key, without making the caller aware of that an
+ unsupported shard key was used.
+
+* Use execvp instead of execv in HotBackup restore.
+
+* Re-enabled the AQL sort-limit optimization rule in conjunction with fullCount
+ in the cluster. It now also may speed up fullCount with sorted indexes and a
+ limit.
+
+* Make the scheduler enforce the configured queue lengths. The values of the
+ options `--server.scheduler-queue-size`, `--server.prio1-size` and
+ `--server.maximal-queue-size` will now be honored and not exceeded.
+
+ The default queue sizes in the scheduler for requests buffering have
+ also been changed as follows:
+
+ request type before now
+ -----------------------------------
+ high priority 128 4096
+ medium priority 1048576 4096
+ low priority 4096 4096
+
+ The queue sizes can still be adjusted at server start using the above-
+ mentioned startup options.
+
+* Add replicationFactor, minReplicationFactor and sharding strategy to database
+ creation dialog in web UI. Preselect database default values for collection
+ creation in web UI.
+
+* Add new JavaScript function `db._properties()` that provides information about
+ the current database's properties.
+
+* Add new options `sharding` and `replicationFactor` for database creation
+ methods. The specified values will provide the defaults for all collections
+ created in a database.
+
+ Valid values for `sharding` are `""`, "flexible", "single". The first 2 values
+ are treated equally. Values for `replicationFactor` are natural numbers or the
+ string `satellite`.
+
+* Add new server option `--cluster.default-replication-factor` that allows to
+ set the default replication factor for non-system collections (default: 1).
+
+* Enabled IPO with cmake as an option, default is on for release builds without
+ google tests.
+
+* Bugfix: The AQL sort-limit optimization was applied in some cases it
+ shouldn't, resulting in undefined behavior.
+
+* Remove operations for documents in the cluster will now use an optimization,
+ if all sharding keys are specified. Should the sharding keys not match the
+ values in the actual document, a not found error will be returned.
+
+* Retry hot backup list in cluster for 2 minutes before reporting error.
+
+* Allowing inconsistent rather than forcing hot backups.
+
+* Geo functions will now have better error reporting on invalid input.
+
+* Upgraded bundled jemalloc library to version 5.2.1.
+
+* Added TransactionStatistics to ServerStatistics (transactions started /
+ aborted / committed and number of intermediate commits).
+
+* Added AQL function DATE_ROUND to bin a date/time into a set of equal-distance
+ buckets.
+
+* Enforced the valid date range for working with date/time in AQL. The valid
+ date ranges for any AQL date/time function are:
+
+ - for string date/time values: `"0000-01-01T00:00:00.000Z"` (including) up to
+ `"9999-12-31T23:59:59.999Z"` (including)
+ - for numeric date/time values: -62167219200000 (including) up to
+ 253402300799999 (including). These values are the numeric equivalents of
+ `"0000-01-01T00:00:00.000Z"` and `"9999-12-31T23:59:59.999Z"`.
+
+ Any date/time values outside the given range that are passed into an AQL date
+ function will make the function return `null` and trigger a warning in the
+ query, which can optionally be escalated to an error and stop the query.
+
+ Any date/time operations that produce date/time outside the valid ranges
+ stated above will make the function return `null` and trigger a warning too.
+ An example for this is.
+
+ DATE_SUBTRACT("2018-08-22T10:49:00+02:00", 100000, "years")
+
+* Fixed bug in MoveShard::abort which causes a duplicate entry in the follower
+ list. (Internal Bug #4378)
+
+* Updated TOKENS function to deal with primitive types and arrays.
+
+
+v3.5.3 (2019-11-28)
+-------------------
+
+* Fixed GET _api/gharial to also include the name property in every returned
+ graph. This is a consistency fix within the API as all other APIs include the
+ name. As a work around the returned _key can be used, which is identical to
+ the name.
+
+* The _users collection is now properly restored when using arangorestore.
+
+* Allow the optimizer to use indexes when a collection attribute is compared to
+ an expansion followed by an attribute name, e.g.
+ `doc.value IN something[*].name`.
+
+* Updated arangosync to 0.7.0.
+
+* Fixed issue #10470: The WebUI now shows potential errors and details which
+ occured using _api/import (e.g. unique constraint violated).
+
+* Fixed issue #10440: Incorrect sorting with sort criteria partially covered
+ by index.
+
+* Make the timeouts for replication requests (for active failover and master-slave
+ replication configurable via startup options:
+
+ --replication.connect-timeout
+ --replication.request-timeout
+
+* Fixed internal issue #4647: dead Coordinators are not removed for agency.
* Fixed UPSERT matching.
@@ -8058,174 +10458,276 @@ devel
This will now correctly insert a document instead of updating the existing,
that only partially matches the upsert-expression.
+* Fixed undefined behaviour with creation of ArangoSearch links with custom
+ analyzers in cluster environment.
+
+* Fixed internal issue #651: analyzer duplication in _analyzers collection.
+
+* Fixed internal issue #4597: rebalanceShards API cannot work on any database
+ other than the _system database.
+
+* Stop putting system services in _apps on single server, this has never
+ worked on cluster and was not needed.
+
* Fixed issue #10371: K_SHORTEST_PATHS LIMIT 1 can not return the shortest path.
Now the shortest path is returned as the first one in such queries.
-* Added limit for AQL range materialization to prevent out-of-memory errors.
+* Improve killability of some types of cluster AQL queries. Previously, several
+ cluster queries, especially those containing a `DistributeNode` in their
+ execution plans, did not respond to a kill instruction.
- When materializing ranges created by either the AQL `RANGE` function or by using
- the built-in `..` operator (e.g. `1 .. 1000000`), a check is now performed if
- the range is too big to be materialized. The threshold value is set to 10 million
- members. Ranges with at most that many members can be materialized, ranges with
- more members will fail to materialize and abort the query with the exception
- `number out of range` (error code 1504).
+ This change also introduces a new query status "killed", which may now be
+ returned by the REST APIs at `/_api/query/current` and `/_api/query/slow` in
+ the `state` attribute of each query.
- It is still possible to create ranges with more than 10 million members as long
- as they are not materialized. For example, the following is still valid:
+* Improve shutdown of some cluster AQL queries on the coordinator in case the
+ query has multiple coordinator snippets (true for queries involving more than
+ one collection) and the database server(s) cannot be reached on query
+ shutdown. In this case the proper shutdown of the coordinator parts of the
+ query previously was deferred until the coordinator snippets were removed by
+ the automatic garbage collection. Now, the cleanup of the coordinator snippets
+ will happen much more quickly, which reduces the chances of the queries
+ blocking resources.
- FOR i IN 1 .. 1000000000 INSERT {_key: CONCAT('test', i)} INTO collection
+* Fixed ArangoSearch index removes being discarded on commiting consolidation
+ results with pending removes after some segments under consolidation were
+ already committed.
-* Separately account for superuser and user request traffic. This is
- needed for Oasis.
+* Assertion fail when no timestamp in agency's persistence.
-* No longer put system services into `_apps` on single server. On cluster, this
- has never worked. This was unnecessary.
+* Fixed internal issue #647: custom analyzer provokes errors on Active Failover
+ deployment.
-* Fixed available flag for HotBackup.
+* Upgraded bundled version of libcurl to 7.66.0.
+* When starting a coordinator, wait up to 15 seconds for it to appear
+ in the agency under key `Supervision/Health` before reporting as "ready".
+ This is necessary because if the coordinator reports ready beforehand
+ and is used to create databases etc., the supervision may remove all
+ of the jobs started by non-ready coordinators, considering them to be
+ from a failed coordinator.
+ To avoid huge startup delays, the startup will proceed after waiting
+ futilely for 15 seconds and log a message.
-* Fixed list with id for partially available HotBackups.
+* Fixed issue #10270: Query: Expecting type Array or Object (while executing).
-* Added AQL optimizer rule "move-filters-into-enumerate", to allow for
- early pruning of non-matching documents while full-scanning or index-
- scanning documents. This optimization can help to avoid a lot of
- temporary document copies.
+* Fix a problem with AQL constrained sort in the cluster, which might abort
+ queries. The AQL sort-limit optimization rule may now also speed up fullCount
+ with sorted indexes and a limit in the cluster.
-* Added "SmartJoins for Views" to the ArangoDB Enterprise Edition that allows running
- cluster joins between two certain sharded collections or views with performance close
- to that of a local join operation.
+* Prevent spurious log message "Scheduler queue is filled more than 50% in last
+ x s" from occurring when this is not the case. Due to a data race, the
+ message could previously also occur if the queue was empty.
-* Allow collection names to be at most 256 characters long, instead of 64 characters
- in previous versions.
+* The General Graph document API is now consistent with the document API in its
+ error messages. When attempting to create / modify edges pointing to non
+ existing vertex collections HTTP 400 is returned instead of 404.
-* Upgraded bundled Boost library version to 1.71.
+* Disallow the usage of subqueries inside AQL traversal PRUNE conditions.
+ Using subqueries inside PRUNE conditions causes undefined behavior,
+ so such queries will now be aborted early on with a parse error
+ instead of running into undefined behavior.
-* The General Graph document API is now consistent with the document API in its error messages.
- When attempting to create / modify edges pointing to non-existent vertex collections
- HTTP 400 is returned instead of 404.
+* Fixed available flag for hotbackup.
-* Use `-std=c++17` for ArangoDB compilation.
+* Fixed list with id for partially available hotbackups.
-* Made the mechanism in the Web UI of replacing and upgrading a foxx app more clear.
+* Fixed agency TTL bug happening under certain rare conditions.
+
+* Improved performance of some agency helper functions.
* Fixed search not working in document view while in code mode.
-* Show shards of all collections (including system collections) in the web UI's shard
- distribution view.
+* Fixed issue #10090: fix repeatable seek to the same document in
+ SEARCH operations for ArangoSearch views.
- This is necessary to access the prototype collections of a collection sharded via
- `distributeShardsLike` in case the prototype is a system collection, and the prototype
- should be moved to another server.
+* Fixed issue #10193: Arangoexport does not handle line feeds when exporting as
+ csv.
-* Rclone URL normalization.
+* Removed debug log messages "found comm task ..." that could be logged
+ on server shutdown.
-* Fixed unintended multiple unlock commands from coordinator to
- transaction locked db servers.
+* Fixed issue #10183: Arangoimport imports on _system when you try to
+ create a new database.
-* Disallow using `_id` or `_rev` as shard keys in clustered collections.
+ This bugfix fixes the output of arangoimport, which could display a
+ wrong target database for the import if the option `--create-database`
+ was used.
- Using these attributes for sharding was not supported before, but didn't trigger
- any errors. Instead, collections were created and silently using `_key` as
- the shard key, without making the caller aware of that an unsupported shard
- key was used.
+* Fixed issue #10158: Invalid Query Crashes ArangoDB.
-* DB server locking / unlocking for hot backup revisited and enhanced.
+ This fixes traversal queries that are run on a static empty start vertex
+ string.
-* Use execvp instead of execv in HotBackup restore.
-* Re-enabled the AQL sort-limit optimization rule in conjunction with fullCount
- in the cluster. It now also may speed up fullCount with sorted indexes and a
- limit.
+v.3.5.2 (2019-11-06)
+--------------------
-* Fix config directory handling, so we don't trap into UNC path lookups on Windows.
+* Fixed ArangoSearch upgrade procedure from previous minor version and
+ patches.
-* Prevent spurious log message "Scheduler queue is filled more than 50% in last x s"
- from occurring when this is not the case. Due to a data race, the message could
- previously also occur if the queue was empty.
+* Separately account for superuser and user request traffic.
-* Make the scheduler enforce the configured queue lengths. The values of the options
- `--server.scheduler-queue-size`, `--server.prio1-size` and `--server.maximal-queue-size`
- will now be honored and not exceeded.
- The default queue sizes in the scheduler for requests buffering have
- also been changed as follows:
+v3.5.1 (2019-10-07)
+-------------------
- request type before now
- -----------------------------------
- high priority 128 4096
- medium priority 1048576 4096
- low priority 4096 4096
+* Properly report parse errors for extraneous unterminated string literals
+ at the end of AQL query strings. For example, in the query `RETURN 1 "abc`,
+ the `RETURN 1` part was parsed fully, and the `"abc` part at the end was
+ parsed until the EOF and then forgotten. But as the fully parsed tokens
+ `RETURN 1` already form a proper query, the unterminated string literal
+ at the end was not reported as a parse error.
+ This is now fixed for unterminated string literals in double and single
+ quotes as well as unterminated multi-line comments at the end of the query
+ string.
- The queue sizes can still be adjusted at server start using the above-
- mentioned startup options.
+* Fix config directory handling, so we don't trap into UNC path lookups on Windows.
-* Fix compilation issue with clang 10.
+* Ignore symlinks when copying JavaScript files at startup via the option
+ `--javascript.copy-installation`. This potentially fixes the following
+ error message at startup:
+
+ Error copying JS installation files to '...':
+ failed to open source file ...: No such file or directory
+
+* Added startup option `--cluster.max-number-of-shards` for restricting the
+ maximum number of shards when creating new collections. The default
+ value for this setting is `1000`, which is also the previously hard-coded
+ built-in limit. A value of `0` for this option means "unrestricted".
+ When the setting is adjusted, it will not affect already existing
+ collections, but only collections that are created or restored
+ afterwards.
+
+* Added startup options for managing the replication factor for newly
+ created collections:
+
+ - `--cluster.min-replication-factor`: this settings controls the minimum
+ replication factor value that is permitted when creating new collections.
+ No collections can be created which have a replication factor value
+ below this setting's value. The default value is 1.
+ - `--cluster.max-replication-factor`: this settings controls the maximum
+ replication factor value that is permitted when creating new collections.
+ No collections can be created which have a replication factor value
+ above this setting's value. The default value is 10.
+ - `--cluster.default-replication-factor`: this settings controls the default
+ replication factor value that is used when creating new collections and
+ no value of replication factor has been specified.
+ If no value is set for this option, the value of the option
+ `--cluster.min-replication-factor` will be used.
-* Fixed issue #10062: AQL: Could not extract custom attribute.
+* Fixed unintended multiple unlock commands from coordinator to
+ transaction locked db servers.
-* Add replicationFactor, minReplicationFactor and sharding strategy to database creation
- dialog in web UI. Preselect database default values for collection creation in web UI.
+* DB server locking / unlocking for hot backup revisited and enhanced.
-* Add new JavaScript function `db._properties()` that provides information about
- the current database's properties.
+* Rely on reboot ids for declaring end of cluster hot restore on coordinators.
-* Add new options `sharding` and `replicationFactor` for database creation methods. The
- specified values will provide the defaults for all collections created in a database.
+* Obtain new unique IDs via a background thread.
- Valid values for `sharding` are `""`, "flexible", "single". The first 2 values are
- treated equally. Values for `replicationFactor` are natural numbers or the string
- `satellite`.
+* Fixed issue #10078: FULLTEXT with sort on same field not working.
-* Add new server option `--cluster.default-replication-factor` that allows to set the
- default replication factor for non-system collections (default: 1).
+* Fixed issue #10062: AQL: could not extract custom attribute.
-* Made the mechanism in the Web UI of replacing and upgrading a foxx app more clear.
+* Fix compilation issue with clang 10.
-* Fix a problem with AQL constrained sort in the cluster, which might abort
- queries.
+* Fixed error message for error 1928 ("not in orphan") to "collection is
+ not in list of orphan collections".
* Fix strange shutdown hanger which came from the fact that currently
libgcc/libmusl wrongly detect multi-threadedness in statically linked
executables.
-* Enabled IPO with cmake as an option, default is on for release builds without
- google tests.
+* Fixed a shutdown bug coming from a read/write lock race.
-* Bugfix: The AQL sort-limit optimization was applied in some cases it shouldn't,
- resulting in undefined behavior.
+* Fixed a bug in the edge cache's internal memory accounting, which led
+ to the edge cache underreporting its current memory usage.
-* Remove operations for documents in the cluster will now use an optimization,
- if all sharding keys are specified. Should the sharding keys not match the values in
- the actual document, a not found error will be returned.
+* Fixed "ArangoDB is not running in cluster mode" errors in active failover setups.
+ This affected at least /_admin/cluster/health.
+
+* Made the mechanism in the Web UI of replacing and upgrading a foxx app more clear.
+
+* Fixed the AQL sort-limit optimization which was applied in some cases it should
+ not, resulting in undefined behaviour.
+
+* Add --server.statistics-history flag to allow disabling of only the historical
+ statistics. Also added rocksdbengine.write.amplification.x100 statistics
+ for measurement of compaction option impact. Enabled non-historical
+ statistics for agents.
* Fixed AQL constrained-heap sort in conjunction with fullCount.
-* Fixed "ArangoDB is not running in cluster mode" errors in active failover setups.
- This affected at least /_admin/cluster/health.
+* Added support for AQL expressions such as `a NOT LIKE b`, `a NOT =~ b` and
+ `a NOT !~ b`. Previous versions of ArangoDB did not support these expressions,
+ and using them in an AQL query resulted in a parse error.
-* Fixed the removal (including a collection drop) of an orphanCollection from a
- graph definition when using the ArangoShell. The boolean
- flag whether to drop the collection or not was not transferred properly.
+* Disallow creation of TTL indexes on sub-attributes.
-* Retry hot backup list in cluster for 2 minutes before reporting error.
+ Creation of such indexes was not caught before, but the resulting
+ indexes were defunct. From now on the creation of TTL indexes on sub-
+ attributes is disallowed.
+
+* Added HotBackup feature.
-* Improved database creation within the cluster. In the case of coordinator outages
- during the creation of the database there was a chance that not all relevant
- system collections had been created. Although this database was accessible now
- some features did not work as expected (e.g. creation of graphs). We modified
- creation of a new database as an all or nothing operation and only allow access
- to the database after all system collections are properly prepared to avoid the
- above inconsistencies. Also creation of databases are now secured against
- coordinator outages, they will either be fully created or not visible and
- eventually dropped. This does not require any change on the client code.
+* Improved database creation within the cluster. In the case of
+ coordinator outages during the creation of the database there was a
+ chance that not all relevant system collections had been created.
+ Although this database was accessible now some features did not work
+ as expected (e.g. creation of graphs). We modified creation of a new
+ database as an all or nothing operation and only allow access to the
+ database after all system collections are properly perpared to avoid
+ the above inconsistencies. Also creation of databases are now secured
+ against coordinator outages, they will either be fully created or not
+ visible and eventually dropped. This does not require any change on the
+ client code.
* Added UI support to create documents in a collection using smartGraphAttribute
and/or smartJoinAttribute.
-* Allowing inconsistent rather than forcing hot backups.
+* Add count of objects to latency reporting in arangoimport.
+
+* Harden database creation against spurious "duplicate name" errors that
+ were caused by other parallel operations lazily creating required
+ system collections in the same database.
+
+* Fixed internal issue #633: made ArangoSearch functions BOOST, ANALYZER, MIN_MATCH
+ callable with constant arguments. This will allow running queries where all arguments
+ for these functions are deterministic and do not depend on loop variables.
+
+* Automatically turn values for deprecated startup option parameters
+ `--log.use-microtime` and `--log.use-local-time` into equivalent option values
+ of the new, preferred option `--log.time-format`.
+
+* Drop collection action to timeout more quickly to stay on fast lane.
+
+* Make arangorestore restore data into the `_users` collection last. This is to
+ ensure that arangorestore does not overwrite the credentials of its invoker while
+ the restore is running, but only at the very end of the process.
+ This change also makes arangorestore restore the `_system` database last if it
+ is started with the `--all-databases` option.
+
+* Fixed the removal (including a collection drop) of an orphanCollection from a
+ graph definition when using the arango shell. The boolean flag whether to drop
+ the collection or not was not transferred properly.
+
+* Check for duplicate server endpoints registered in the agency in sub-keys of
+ `/Current/ServersRegistered`.
+
+ Duplicate endpoints will be registered if more than one arangod instance is
+ started with the same value for startup option `--cluster.my-address`. This can
+ happen unintentionally due to typos in the configuration, copy&paste remainders etc.
+
+ In case a duplicate endpoint is detected on startup, a warning will be written
+ to the log, indicating which other server has already "acquired" the same endpoint.
+
+* Make graph operations in general-graph transaction-aware.
* Fixed adding an orphan collections as the first collection in a SmartGraph.
-* Fixed issue #9862: ServerException: RestHandler/RestCursorHandler.cpp:279.
+* Fixed non-deterministic occurrences of "document not found" errors in sharded
+ collections with custom shard keys (i.e. non-`_key`) and multi-document lookups.
+
+* Fixed issue #9862: ServerException: RestHandler/RestCursorHandler.cpp:279
This fixes an issue with the RocksDB primary index IN iterator not resetting its
internal iterator after being rearmed with new lookup values (which only happens
@@ -8233,27 +10735,23 @@ devel
* Geo functions will now have better error reporting on invalid input.
-* Fixed issue #9795. Fixed NOT IN clause in ArangoSearch.
-
* The graph viewer of the web interface now tries to find a vertex document of
all available vertex collections before it aborts.
-* Upgraded bundled jemalloc library to version 5.2.1.
+* Fixed issue #9795: fix AQL `NOT IN` clause in SEARCH operations for ArangoSearch
+ views.
-* Fixed internal issue #4407: remove storage engine warning.
+* Make minimum timeout for synchronous replication configurable via parameter
+ (--cluster.synchronous-replication-timeout-minimum) and increase default value
+ to prevent dropping followers unnecessarily.
* Added support for TLS 1.3 for the arangod server and the client tools.
- The arangod server can be started with option `--ssl.protocol 6` to make it require
- TLS 1.3 for incoming client connections. The server can be started with option
- `--ssl.protocol 5` to make it require TLS 1.2, as in previous versions of arangod.
+ The default TLS protocol for the arangod server is still TLS 1.2 however, in order
+ to keep compatibility with previous versions of ArangoDB.
- The default TLS protocol for the arangod server is now generic TLS, which will allow
- the negotiation of the TLS version between the client and the server.
-
- All client tools also support TLS 1.3, by using the `--ssl.protocol 6` option when
- invoking them. The client tools will use TLS 1.2 by default, in order to be
- compatible with older versions of ArangoDB that may be contacted by these tools.
+ The arangod server and any of the client tools can be started with option
+ `--ssl.protocol 6` to make use of TLS 1.3.
To configure the TLS version for arangod instances started by the ArangoDB starter,
one can use the `--all.ssl.protocol=VALUE` startup option for the ArangoDB starter,
@@ -8262,59 +10760,130 @@ devel
- 4 = TLSv1
- 5 = TLSv1.2
- 6 = TLSv1.3
- - 9 = generic TLS
-* Added TransactionStatistics to ServerStatistics (transactions started /
+* Fixed parsing of "NOT IN" in AQL, which previously didn't correctly parse
+ "NOT IN_RANGE(...)" because it checked if the "NOT" token was followed by
+ whitespace and then the two letters "IN".
+
+* Changed log level for message "keep alive timeout - closing stream!" from INFO to
+ DEBUG.
+
+* Don't create temporary directories named "arangosh_XXXXXX" anymore.
+
+* Add TransactionStatistics to ServerStatistics (transactions started /
aborted / committed and number of intermediate commits).
+* Upgraded version of bundled curl library to 7.65.3.
+
+* Don't retry persisting follower information for collections/shards already
+ dropped. The previous implementation retried (unsuccessfully in this case)
+ for up to 2 hours, occupying one scheduler thread.
+
+* Fixed internal issue #4407: remove storage engine warning.
+
* Agents to remove callback entries when responded to with code 404.
-* Added AQL function DATE_ROUND to bin a date/time into a set of equal-distance
- buckets.
+* Fixed internal issue #622: Analyzer cache is now invalidated for dropped database.
-* Enforced the valid date range for working with date/time in AQL. The valid date
- ranges for any AQL date/time function are:
+* Show query string length and cacheability information in query explain output.
- - for string date/time values: `"0000-01-01T00:00:00.000Z"` (including) up to
- `"9999-12-31T23:59:59.999Z"` (including)
- - for numeric date/time values: -62167219200000 (including) up to 253402300799999
- (including). These values are the numeric equivalents of
- `"0000-01-01T00:00:00.000Z"` and `"9999-12-31T23:59:59.999Z"`.
+* The AQL functions `FULLTEXT`, `NEAR`, `WITHIN` and `WITHIN_RECTANGLE` are now
+ marked as cacheable, so they can be used in conjunction with the AQL query
+ results cache on a single server.
- Any date/time values outside the given range that are passed into an AQL date
- function will make the function return `null` and trigger a warning in the query,
- which can optionally be escalated to an error and stop the query.
+* Fixed issue #9612: fix ArangoSearch views getting out of sync with collection.
- Any date/time operations that produce date/time outside the valid ranges stated
- above will make the function return `null` and trigger a warning too. An example
- for this is
+* Fix an issue with potential spurious wakeups in the internal scheduler code.
- DATE_SUBTRACT("2018-08-22T10:49:00+02:00", 100000, "years")
+* Changes the _idle_ timeout of stream transactions to 10 seconds and the total
+ per DB server size of stream transaction data to 128 MB. The idle timer is
+ restarted after every operation in a stream transaction, so it is not the
+ total timeout for the transaction.
-* Fixed bug in MoveShard::abort which causes a duplicate entry in the follower list. (Internal Bug #4378)
+ These limits were documented in the manual for stream transactions since 3.5.0,
+ but are enforced only as of 3.5.1. Enforcing the limits is useful to free up
+ resources from abandoned transactions.
+
+* Consistently honor the return value of all attempts to queue tasks in the
+ internal scheduler.
+
+ Previously some call sites did not check the return value of internal queueing
+ operations, and if the scheduler queue was full, operations that were thought
+ to be requeued were silently dropped. Now, there will be reactions on such
+ failures. Requeuing an important task with a time offset (Scheduler::queueDelay)
+ is now also retried on failure (queue full) up to at most five minutes. If after
+ five minutes such a task still cannot be queued, a fatal error will be logged
+ and the server process will be aborted.
+
+* Made index selection much more deterministic in case there are
+ multiple competing indexes.
+
+* Fixed issue #9654: honor value of `--rocksdb.max-write-buffer-number` if it
+ is set to at least 9 (which is the recommended value). Ignore it if it is
+ set to a lower value than 9, and warn the end user about it.
+
+ Previous versions of ArangoDB always silently ignored the value of this setting
+ and effectively hard-coded the value to 9.
+
+* Fixed internal issue #4378: fix bug in MoveShard::abort which causes a
+ duplicate entry in the follower list.
* Fixed cut'n'pasting code from the documentation into arangosh.
-* Added initial support for wgs84 reference ellipsoid in GEO_DISTANCE through third
- optional parameter to AQL function.
+* Fixed issue #9652: fix ArangoSearch wrong name collision and raising
+ "Name collision detected" error during creation of a custom analyzer with
+ stopwords.
+
+* Fixed an agency bug found in Windows tests.
+
+* Added initial support for wgs84 reference ellipsoid in GEO_DISTANCE through
+ third optional parameter to AQL function.
* Added support for area calculations with GEO_AREA AQL function.
+* Correct RocksDB statistics to report sums from column families instead of
+ single value from default column family.
+
+* Fixed agency nodes to not create bogus keys on delete / observe / unobserve.
+
+* Fixed issue #9660: fix multiple plans processing during optimization of AQL
+ query which uses ArangoSearch scorers.
+
+* Fixed issue #9679: improved error message for FULLTEXT function invocation
+ failures.
+
+* Fixed error message "Invalid argument: assume_tracked is set but it is not
+ tracked yet" when trying to write to the same keys in concurrent RocksDB
+ transactions. This error will now be reported as a "Lock timeout" error,
+ with error code 1200.
+
* Added resign leadership job to supervision.
+* Hide MMFiles-specific information in web UI (in detail view of a collection)
+ when the storage engine is not MMFiles or when the information is not
+ available.
+
* Keep followers in sync if the old leader resigned and stopped writes.
* Abort a FailedLeader job when its _to_ server fails.
-* Removed content from Documentation/Books, but keeping the subfolders.
- The documentation is in a separate repository (except DocuBlocks and Scripts):
- https://github.com/arangodb/docs.git
+* Decreased unnecessary wait times for agency callbacks in case they were
+ called earlier than expected by main thread.
-* Updated TOKENS function to deal with primitive types and arrays.
+* Make arangosh not close the connection after the user aborts an operation.
+ This restores the same behavior as in previous versions of ArangoDB, which
+ also left the connection open.
-* Fixed agency nodes to not create bogus keys on delete / observe / unobserve.
+* Refactor maintenance to use a TakeoverShardLeadership job. This fixes a bug
+ that a shard follower could have set the wrong leader for the shard locally.
-* Fixed an agency bug found in Windows tests.
+
+v3.5.0 (2019-08-20)
+-------------------
+
+* Fix web UI link to ArangoSearch views documentation.
+
+* Rebuild swagger for web UI and documentation.
v3.5.0-rc.7 (2019-08-01)
@@ -8471,7 +11040,7 @@ v3.5.0-rc.3 (2019-05-31)
* The system collection '_jobs' will from now on be created with non-unique, non-sparse indexes.
-* Bugfix for SmartGraph traversals with uniqueVertices: path, which could
+* Bugfix for smart graph traversals with uniqueVertices: path, which could
sometimes lead to erroneous traversal results.
* Pregel algorithms can be run with the option "useMemoryMaps: true" to be
@@ -8626,7 +11195,7 @@ v3.5.0-rc.1 (2019-05-14)
local join operation.
* Fixed internal issue #3815: fixed the removal of connected edges when
- removing a vertex graph node in a SmartGraph environment.
+ removing a vertex graph node in a smart graph environment.
* Show startup warning in case kernel setting `vm.overcommit_memory` is set
to a value of 2 and the jemalloc memory allocator is in use. This combination
@@ -8742,7 +11311,7 @@ v3.5.0-rc.1 (2019-05-14)
* Allowed MoveShard from leader to a follower, thus swapping the two.
-* Supervision fix: SatelliteCollections, various fixes.
+* Supervision fix: Satellite collections, various fixes.
* Added coordinator route for agency dump.
@@ -9118,7 +11687,7 @@ v3.4.7 (2019-07-02)
* Pregel algorithms can be run with the option "useMemoryMaps: true" to be able to run algorithms
on data that is bigger than the available RAM.
-* Bugfix for SmartGraph traversals with uniqueVertices: path, which could
+* Bugfix for smart graph traversals with uniqueVertices: path, which could
sometimes lead to erroneous traversal results
* The system-collection '_jobs' will from now on use non-unique, non-sparse indexes.
@@ -9202,7 +11771,7 @@ v3.4.6 (2019-05-21)
* removed bug during start up with a single agent, that leads to dbserver crash.
* fix the creation of debug packages (in the web interface) for queries that
- involve SmartGraphs and/or multiple edge collections from a traversal
+ involve smart graphs and/or multiple edge collections from a traversal
* add --compress-output flag to arangodump. Activates gzip compression for
collection data. Metadata files, such as .structure.json and .view.json,
@@ -9239,7 +11808,7 @@ v3.4.5 (2019-03-27)
active Pregel jobs executing
* fixed internal issue #3815: fixed the removal of connected edges when
- removing a vertex graph node in a SmartGraph environment.
+ removing a vertex graph node in a smart graph environment.
* added AQL functions CRC32 and FNV64 for hashing data
@@ -9268,7 +11837,7 @@ v3.4.5 (2019-03-27)
double resync was made
* don't check for the presence of ArangoDB upgrades available when firing up an
- arangosh Enterprise Edition build
+ arangosh enterprise edition build
* added startup option `--rocksdb.allow-fallocate`
@@ -9696,7 +12265,7 @@ v3.4.1 (2018-12-19)
v3.4.0 (2018-12-06)
-------------------
-* Add license key checking to Enterprise Edition in Docker containers.
+* Add license key checking to enterprise version in Docker containers.
v3.4.0-rc.5 (2018-11-29)
@@ -9911,7 +12480,7 @@ v3.4.0-rc.3 (2018-10-23)
* fix internal issue #2785: web ui's sort dialog sometimes got rendered, even
if it should not.
-* fix internal issue #2764: the waitForSync property of a SatelliteCollection
+* fix internal issue #2764: the waitForSync property of a satellite collection
could not be changed via the Web UI
* dynamically manage libcurl's number of open connections to increase performance
@@ -10326,7 +12895,7 @@ v3.4.0-rc.1 (2018-09-06)
The `padded` key generator generates keys of a fixed length (16 bytes) in
ascending lexicographical sort order.
-* The REST API of `/_admin/status` added: "operationMode" field with same meaning as
+* The REST API of `/_admin/status` added: "operationMode" filed with same meaning as
the "mode" field and field "readOnly" that has the inverted meaning of the field
"writeOpsEnabled". The old field names will be deprecated in upcoming versions.
@@ -10796,7 +13365,7 @@ v3.3.19 (2018-10-20)
* fix internal issue #2785: web ui's sort dialog sometimes got rendered, even
if it should not.
-* fix internal issue #2764: the waitForSync property of a SatelliteCollection
+* fix internal issue #2764: the waitForSync property of a satellite collection
could not be changed via the Web UI
* improved logging in case of replication errors
@@ -11365,7 +13934,7 @@ v3.3.8 (2018-04-24)
are still existing and are supposed to be dropped on restore we ended up in
duplicate name error. This is now gone and the SmartGraph is correctly restored.
-* fix lookups by `_id` in SmartGraph edge collections
+* fix lookups by `_id` in smart graph edge collections
* improve startup resilience in case there are datafile errors (MMFiles)
@@ -11406,10 +13975,10 @@ v3.3.7 (2018-04-11)
* fixed internal issue #2237: AQL queries on collections with replicationFactor:
"satellite" crashed arangod in single server mode
-* fixed restore of SatelliteCollections: replicationFactor was set to 1 during
+* fixed restore of satellite collections: replicationFactor was set to 1 during
restore
-* fixed dump and restore of SmartGraphs:
+* fixed dump and restore of smart graphs:
a) The dump will not include the hidden shadow collections anymore, they were dumped
accidentally and only contain duplicated data.
b) Restore will now ignore hidden shadow collections as all data is contained
@@ -11865,7 +14434,7 @@ v3.3.rc1 (2017-11-17)
* performance improvements for full collection scans and a few other operations
in MMFiles engine
-* added `--rocksdb.encryption-key-generator` to Enterprise Edition
+* added `--rocksdb.encryption-key-generator` for enterprise
* removed `--compat28` parameter from arangodump and replication API
@@ -12230,7 +14799,7 @@ v3.2.7 (2017-11-13)
these changes speed up arangodump in cluster context
-* SmartGraphs now return a proper inventory in response to replication inventory
+* smart graphs now return a proper inventory in response to replication inventory
requests
* fixed issue #3618: Inconsistent behavior of OR statement with object bind parameters
@@ -13292,7 +15861,7 @@ v3.1.17 (2017-04-04)
* fixed issue #2397
-* ui - fixed SmartGraph option not appearing
+* ui - fixed smart graph option not appearing
* fixed issue #2389
@@ -13417,7 +15986,7 @@ v3.1.11 (2017-02-17)
* fixed a race between connection closing and sending out last chunks of data to clients
when the "Connection: close" HTTP header was set in requests
-* ui: optimized SmartGraph creation usability
+* ui: optimized smart graph creation usability
* ui: fixed #2308
@@ -15305,7 +17874,7 @@ v2.8.0-alpha1 (2015-12-03)
the HTTP response header `"Server: ArangoDB"` in its HTTP responses. By default,
the option is turned off so the header is still sent as usual.
-* added new AQL function `UNSET_RECURSIVE` to recursively unset attributes from
+* added new AQL function `UNSET_RECURSIVE` to recursively unset attritutes from
objects/documents
* switched command-line editor in ArangoShell and arangod to linenoise-ng
diff --git a/CMakeLists.txt b/CMakeLists.txt
index aaf543798d4b..b4d23ae1a4e8 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -82,8 +82,8 @@ set(ARANGODB_VERSION_MINOR "11")
# when building the nightly ARANGODB_VERSION_PATCH will be set
if (NOT DEFINED ARANGODB_VERSION_PATCH)
- set(ARANGODB_VERSION_PATCH "0")
- set(ARANGODB_VERSION_RELEASE_TYPE "devel")
+ set(ARANGODB_VERSION_PATCH "14")
+ set(ARANGODB_VERSION_RELEASE_TYPE "1")
set(ARANGODB_VERSION_RELEASE_NUMBER "")
else()
unset (ARANGODB_VERSION_RELEASE_TYPE) # do not remove space
@@ -223,7 +223,7 @@ set(ARANGODB_PACKAGE_VENDOR "ArangoDB GmbH")
set(ARANGODB_PACKAGE_CONTACT "info@arangodb.com")
set(ARANGODB_DISPLAY_NAME "ArangoDB")
set(ARANGODB_URL_INFO_ABOUT "https://www.arangodb.com")
-set(ARANGODB_HELP_LINK "https://www.arangodb.com/docs/${ARANGODB_VERSION_MAJOR}.${ARANGODB_VERSION_MINOR}/")
+set(ARANGODB_HELP_LINK "https://docs.arangodb.com/${ARANGODB_VERSION_MAJOR}.${ARANGODB_VERSION_MINOR}/")
set(ARANGODB_CONTACT "hackers@arangodb.com")
set(ARANGODB_FRIENDLY_STRING "ArangoDB - the native multi-model NoSQL database")
@@ -260,14 +260,27 @@ set(INSTALL_CONFIGFILES_LIST)
# update files containing VERSION information
# ------------------------------------------------------------------------------
-string(TIMESTAMP ARANGODB_BUILD_DATE "%Y-%m-%d %H:%M:%S")
-
configure_file(
"${CMAKE_CURRENT_SOURCE_DIR}/lib/Basics/build.h.in"
"${CMAKE_CURRENT_BINARY_DIR}/lib/Basics/build.h"
NEWLINE_STYLE UNIX
)
+option(ARANGODB_BUILD_DATE "Specific build date set from the outside (leave empty to auto-generate)" "")
+if (ARANGODB_BUILD_DATE STREQUAL "" OR ARANGODB_BUILD_DATE STREQUAL "OFF")
+ if (NOT EXISTS "${CMAKE_CURRENT_BINARY_DIR}/lib/Basics/build-date.h")
+ # auto-generate build date
+ string(TIMESTAMP ARANGODB_BUILD_DATE "%Y-%m-%d %H:%M:%S")
+ set(GENERATE_BUILD_DATE ON)
+ else ()
+ # build-date.h file already exists. whatever is in there will be kept
+ set(GENERATE_BUILD_DATE OFF)
+ endif ()
+else ()
+ # forcefully recreate build-date.h file from provided date
+ set(GENERATE_BUILD_DATE ON)
+endif ()
+
if (NOT DEFINED GENERATE_BUILD_DATE OR GENERATE_BUILD_DATE)
set(GENERATE_BUILD_DATE ON CACHE INTERNAL "whether we should generate the build date")
configure_file(
@@ -275,9 +288,9 @@ if (NOT DEFINED GENERATE_BUILD_DATE OR GENERATE_BUILD_DATE)
"${CMAKE_CURRENT_BINARY_DIR}/lib/Basics/build-date.h"
NEWLINE_STYLE UNIX
)
-else()
+else ()
set(GENERATE_BUILD_DATE OFF CACHE INTERNAL "whether we should generate the build date")
-endif()
+endif ()
configure_file(
"${CMAKE_CURRENT_SOURCE_DIR}/lib/Basics/VERSION.in"
@@ -289,46 +302,70 @@ configure_file(
## Find the git revision
################################################################################
-find_program (GIT_EXE git)
-if (DEFINED GIT_EXE AND IS_DIRECTORY "${CMAKE_SOURCE_DIR}/.git")
+function(determine_repository_version source_dir build_repository have_build_repository)
+ # Get commit hash
execute_process(
- WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
-
- COMMAND ${GIT_EXE} describe --all --tags --long --dirty=-dirty
- OUTPUT_VARIABLE GIT_OUTPUT)
+ WORKING_DIRECTORY ${source_dir}
+ COMMAND ${GIT_EXE} rev-parse --short HEAD
+ OUTPUT_VARIABLE COMMIT_RAW
+ )
+ if (NOT COMMIT_RAW)
+ message(FATAL_ERROR "Can't extract current commit with the command: 'git rev-parse --short HEAD'")
+ endif()
- # this may fail on shallow clones that only knows about a limited number of commits.
- # if there is an older merged revision the head, it may not be available to git.
- if (NOT GIT_OUTPUT)
- set(ARANGODB_BUILD_REPOSITORY "GIT FAILED TO RETRIEVE THE VERSION - SHALLOW CLONE?")
- set(HAVE_ARANGODB_BUILD_REPOSITORY "1")
+ string(STRIP ${COMMIT_RAW} COMMIT_SHORT)
+
+ if (NOT DEFINED BUILD_REPO_INFO OR BUILD_REPO_INFO STREQUAL "default")
+ execute_process(
+ WORKING_DIRECTORY ${source_dir}
+ COMMAND ${GIT_EXE} branch --show-current
+ OUTPUT_VARIABLE BRANCH_NAME_RAW)
+ if (NOT BRANCH_NAME_RAW)
+ # For example, in docker we do 'checkout'. Hence, it is impossible to detect branch
+ set(${build_repository} "${COMMIT_SHORT}" PARENT_SCOPE)
+ set(${have_build_repository} "1" PARENT_SCOPE)
else()
- string(STRIP ${GIT_OUTPUT} REPOSITORY_VERSION)
- set(ARANGODB_BUILD_REPOSITORY ${REPOSITORY_VERSION})
- set(HAVE_ARANGODB_BUILD_REPOSITORY "1")
+ string(STRIP ${BRANCH_NAME_RAW} BRANCH_NAME)
+ set(${build_repository} "refs/${BRANCH_NAME} ${COMMIT_SHORT}" PARENT_SCOPE)
+ set(${have_build_repository} "1" PARENT_SCOPE)
endif()
-else ()
+ elseif(BUILD_REPO_INFO STREQUAL "release")
+ if ("${ARANGODB_VERSION_RELEASE_NUMBER}" STREQUAL "" AND ARANGODB_VERSION_RELEASE_TYPE MATCHES "^[1-9][0-9]*$")
+ string(REPLACE "-" "." RELEASE_TAG ${ARANGODB_VERSION})
+ else()
+ set(RELEASE_TAG ${ARANGODB_VERSION})
+ endif()
+ set(RELEASE_TAG "v${RELEASE_TAG}")
+ execute_process(
+ WORKING_DIRECTORY ${source_dir}
+ COMMAND ${GIT_EXE} describe --all --tags --match ${RELEASE_TAG}
+ OUTPUT_VARIABLE TAG_RAW)
+ if (NOT TAG_RAW)
+ message(FATAL_ERROR "Can't extract tag using the command: 'git describe --all --tags --match v${ARANGODB_PLAIN_VERSION}")
+ else()
+ string(STRIP ${TAG_RAW} TAG)
+ set(${build_repository} "refs/${TAG} ${COMMIT_SHORT}" PARENT_SCOPE)
+ set(${have_build_repository} "1" PARENT_SCOPE)
+ endif()
+ elseif(BUILD_REPO_INFO STREQUAL "nightly")
+ set(${build_repository} "refs/head/${ARANGODB_VERSION_MAJOR}.${ARANGODB_VERSION_MINOR} ${COMMIT_SHORT}" PARENT_SCOPE)
+ set(${have_build_repository} "1" PARENT_SCOPE)
+ else ()
+ set(${build_repository} "GIT FAILED TO RETRIEVE THE VERSION - UNSUPPORTED BUILD MODE" PARENT_SCOPE)
+ set(${have_build_repository} "1" PARENT_SCOPE)
+ endif()
+endfunction()
+
+find_program (GIT_EXE git)
+if (DEFINED GIT_EXE AND IS_DIRECTORY "${CMAKE_SOURCE_DIR}/.git")
+ determine_repository_version(${CMAKE_SOURCE_DIR} ARANGODB_BUILD_REPOSITORY HAVE_ARANGODB_BUILD_REPOSITORY)
+else()
set(ARANGODB_BUILD_REPOSITORY "")
set(HAVE_ARANGODB_BUILD_REPOSITORY "0")
endif()
if (DEFINED GIT_EXE AND USE_ENTERPRISE AND IS_DIRECTORY "${CMAKE_SOURCE_DIR}/enterprise/.git")
- execute_process(
- WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}/enterprise
-
- COMMAND ${GIT_EXE} describe --all --tags --long --dirty=-dirty
- OUTPUT_VARIABLE GIT_OUTPUT)
-
- # this may fail on shallow clones that only knows about a limited number of commits.
- # if there is an older merged revision the head, it may not be available to git.
- if (NOT GIT_OUTPUT)
- set(ENTERPRISE_BUILD_REPOSITORY "GIT FAILED TO RETRIEVE THE VERSION - SHALLOW CLONE?")
- set(HAVE_ENTERPRISE_BUILD_REPOSITORY "1")
- else()
- string(STRIP ${GIT_OUTPUT} REPOSITORY_VERSION)
- set(ENTERPRISE_BUILD_REPOSITORY ${REPOSITORY_VERSION})
- set(HAVE_ENTERPRISE_BUILD_REPOSITORY "1")
- endif()
+ determine_repository_version(${CMAKE_SOURCE_DIR}/enterprise ENTERPRISE_BUILD_REPOSITORY HAVE_ENTERPRISE_BUILD_REPOSITORY)
else ()
set(ENTERPRISE_BUILD_REPOSITORY "")
set(HAVE_ENTERPRISE_BUILD_REPOSITORY "0")
@@ -349,6 +386,7 @@ configure_file(
if (VERBOSE)
message(STATUS "ARANGODB_BUILD_REPOSITORY=\"${ARANGODB_BUILD_REPOSITORY}\"")
+ message(STATUS "ENTERPRISE_BUILD_REPOSITORY=\"${ENTERPRISE_BUILD_REPOSITORY}\"")
endif ()
################################################################################
@@ -601,51 +639,8 @@ endif ()
## TARGET ARCHITECTURE
################################################################################
-set(ARANGODB_SSE42_FLAGS "")
-set(BUILDING_FOR_ARM64 OFF)
-if (NOT WINDOWS)
- include(TargetArch)
-
- target_architecture(CMAKE_TARGET_ARCHITECTURES)
- list(LENGTH CMAKE_TARGET_ARCHITECTURES cmake_target_arch_len)
-
- if (NOT "${cmake_target_arch_len}" EQUAL "1")
- set(CMAKE_TARGET_ARCHITECTURE_UNIVERSAL TRUE)
- set(CMAKE_TARGET_ARCHITECTURE_CODE "universal")
- else ()
- set(CMAKE_TARGET_ARCHITECTURE_UNIVERSAL FALSE)
- set(CMAKE_TARGET_ARCHITECTURE_CODE "${CMAKE_TARGET_ARCHITECTURES}")
- endif ()
-
- if (NOT CMAKE_SYSTEM_PROCESSOR STREQUAL CMAKE_TARGET_ARCHITECTURE_CODE)
- # Probably we are cross-compiling or CMAKE_SYSTEM_PROCESSOR is an empty string.
- message(WARNING "Changing CMAKE_SYSTEM_PROCESSOR from ${CMAKE_SYSTEM_PROCESSOR} to ${CMAKE_TARGET_ARCHITECTURE_CODE}")
- set(CMAKE_SYSTEM_PROCESSOR "${CMAKE_TARGET_ARCHITECTURE_CODE}")
- endif()
-
- include(VcMacros)
-
- # Detect target architecture properties. This honors
- # any target architecture set via -DTARGET_ARCHITECTURE=...
- include(OptimizeForArchitecture)
- OptimizeForArchitecture()
-
- if (USE_SSE4_2)
- set(ARANGODB_SSE42_FLAGS "-msse4.2")
- endif ()
-
- set(BASE_FLAGS "${Vc_ARCHITECTURE_FLAGS} ${BASE_FLAGS}")
-endif ()
-
-if(CMAKE_SYSTEM_PROCESSOR MATCHES "(aarch64.*|AARCH64.*|arm64.*|ARM64.*)")
- set(BUILDING_FOR_ARM64 ON)
-else()
- set(BUILDING_FOR_ARM64 OFF)
-endif()
-
-message(STATUS "Building for processor ${CMAKE_SYSTEM_PROCESSOR}")
-
-set(ARCHITECTURE_OPTIMIZATIONS "\"${Vc_ARCHITECTURE_FLAGS}\"")
+include(OptimizeForArchitecture)
+set(BASE_FLAGS "${BASE_FLAGS} ${ARCHITECTURE_OPTIMIZATIONS}")
################################################################################
## BACKTRACE
@@ -668,11 +663,11 @@ endif()
################################################################################
# Allow to prohibit assembler optimization code explicitly
-if (BUILDING_FOR_ARM64)
- SET(ASM_OPTIMIZATIONS_DEFAULT OFF)
-else (BUILDING_FOR_ARM64)
+if (ARCH_AMD64)
SET(ASM_OPTIMIZATIONS_DEFAULT ON)
-endif (BUILDING_FOR_ARM64)
+else (ARCH_AMD64)
+ SET(ASM_OPTIMIZATIONS_DEFAULT OFF)
+endif (ARCH_AMD64)
option(ASM_OPTIMIZATIONS "whether hand-optimized assembler code should be used"
${ASM_OPTIMIZATIONS_DEFAULT})
@@ -800,10 +795,13 @@ endfunction()
# JEMALLOC
# ------------------------------------------------------------------------------
-option(USE_JEMALLOC_PROF "use jemalloc profiler" OFF)
+option(USE_JEMALLOC_PROF "use jemalloc profiler" ON)
if (USE_JEMALLOC)
add_definitions("-DARANGODB_HAVE_JEMALLOC=1")
+else ()
+ # Must not compile in profiling stuff if we are not using JEMALLOC
+ set(USE_JEMALLOC_PROF OFF)
endif ()
if (USE_JEMALLOC_PROF)
@@ -1368,13 +1366,11 @@ add_custom_target(clean_autogenerated_files
message(STATUS "building for git revision: ${ARANGODB_BUILD_REPOSITORY}")
if (USE_ENTERPRISE)
- add_custom_target(arangodb
- DEPENDS arangod arangosh arangodump arangoexport arangoimport arangorestore arangobench arangobackup)
add_definitions("-DUSE_ENTERPRISE=1")
add_subdirectory(enterprise)
-else ()
- add_custom_target(arangodb
- DEPENDS arangod arangosh arangodump arangoexport arangoimport arangorestore arangobench)
endif ()
+add_custom_target(arangodb
+ DEPENDS arangod client-tools)
+
add_subdirectory(utils/gdb-pretty-printers/immer/test)
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 727bc1c28907..25b013421620 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -30,7 +30,7 @@ yet.
- If the modifications change any documented behavior or add new features,
document the changes. It should be written in American English.
- The documentation can be found in [docs repository](https://github.com/arangodb/docs#readme).
+ The documentation can be found in [`docs-hugo` repository](https://github.com/arangodb/docs-hugo#readme).
- When done, run the complete test suite and make sure all tests pass.
@@ -287,10 +287,15 @@ favorite browser and open the web interface.
All changes to any source will automatically re-build and reload your browser.
Enjoy :)
-### Cross Origin Policy (CORS) ERROR
+#### Cross Origin Policy (CORS) ERROR
-Our front-end development server currently runs on port:`3000`, while the backend runs on port:`8529` respectively. This implies that when the front-end sends a request to the backend would result in Cross-Origin-Policy security checks which recently got enforced by some browsers for security reasons. Until recently, we never had reports of CORS errors when running both the backend and front-end dev servers independently, however,
-we recently confirmed that this error occurs in ( Chrome v: 98.0.4758.102 and Firefox v: 96.0.1 ).
+Our front-end development server currently runs on port:`3000`, while the backend
+runs on port:`8529` respectively. This implies that when the front-end sends a
+request to the backend would result in Cross-Origin-Policy security checks which
+recently got enforced by some browsers for security reasons. Until recently, we
+never had reports of CORS errors when running both the backend and front-end dev
+servers independently, however, we recently confirmed that this error occurs in
+(Chrome version 98.0.4758.102 and Firefox version 96.0.1).
In case you run into CORS errors while running the development server, here is a quick fix:
@@ -346,6 +351,58 @@ For example to commit a patch for the transitive dependency `is-wsl` of the depe
and then run `npx patch-package node-netstat/is-wsl` in `js/node` and commit the resulting
patch file in `js/node/patches`.
+#### Build the HTTP API documentation for Swagger-UI
+
+The REST HTTP API of the ArangoDB server is described using the OpenAPI
+specification (formerly Swagger). The source code is in documentation repository
+at .
+
+To build the `api-docs.json` file for viewing the API documentation in the
+Swagger-UI of the web interface (**SUPPORT** section, **Rest API** tab), run
+the following commands in a terminal:
+
+1. Get a working copy of the documentation content with Git:
+
+ `git clone https://github.com/arangodb/docs-hugo`
+
+2. Enter the `docs-hugo` folder:
+
+ `cd docs-hugo`
+
+3. Optional: Switch to a tag, branch, or commit if you want to build the
+ API documentation for a specific version of the docs:
+
+ `git checkout `
+
+4. Enter the folder of the Docker toolchain, `amd64` on the x86-64 architecture
+ and `arm64` on ARM CPUs:
+
+ ```shell
+ cd toolchain/docker/amd64 # x86-64
+ cd toolchain/docker/arm64 # ARM 64-bit
+ ```
+
+5. Set the environment variable `ENV` to any value other than `local` to make
+ the documentation tooling not start a live server in watch mode but rather
+ create and static build and exit:
+
+ ```shell
+ export ENV=static # Bash
+ set -xg ENV static # Fish
+ $Env:ENV='static' # PowerShell
+ ```
+
+6. Run Docker Compose using the plain build configuration for the documentation:
+
+ `docker compose -f docker-compose.plain-build.yml up --abort-on-container-exit`
+
+7. When the docs building finishes successfully, you can find the `api-docs.json`
+ files in `site/data//`.
+
+8. Copy the respective `api-docs.json` file into the ArangoDB working copy or
+ installation folder under `js/apps/system/_admin/aardvark/APP/api-docs.json`
+ and refresh the web interface.
+
---
## Running
@@ -358,6 +415,12 @@ Depending on the platform, ArangoDB tries to locate the temporary directory:
- Windows: the [W32 API function GetTempPath()](https://msdn.microsoft.com/en-us/library/windows/desktop/aa364992%28v=vs.85%29.aspx) is called
- all platforms: `--temp.path` overrules the above system provided settings.
+Our testing framework uses this path in the cluster test cases to set an
+environment variable `ARANGOTEST_ROOT_DIR` which is global to the running
+cluster, but specific to the current test suite. You can access this as
+`global.instanceManager.rootDir` in Javascript client tests and via the
+environment variable on the C++ level.
+
### Local Cluster Startup
The scripts `scripts/startLocalCluster` helps you to quickly fire up a testing
@@ -801,17 +864,19 @@ There are several major places where unittests live:
Special patterns in the test filenames are used to select tests to be executed
or skipped depending on parameters:
-| Substring | Description |
-| :-------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
-| `-cluster` | These tests will only run if clustering is tested (option 'cluster' needs to be true). |
-| `-noncluster` | These tests will only run if no cluster is used (option 'cluster' needs to be false) |
-| `-noasan` | These tests will not be ran if *san instrumented binaries are used |
-| `-noinstr` | These tests will not be ran if instrumented binaries are used, be it *san or gcov |
-| `-nocov` | These tests will not be ran if gcov instrumented binaries are used. |
-| `-timecritical` | These tests are critical to execution time - and thus may fail if arangod is to slow. This may happen i.e. if you run the tests in valgrind, so you want to avoid them since they will fail anyways. To skip them, set the option `skipTimeCritical` to _true_. |
-| `-spec` | These tests are run using the mocha framework instead of jsunity. |
-| `-nightly` | These tests produce a certain thread on infrastructure or the test system, and therefore should only be executed once per day. |
-| `-grey` | These tests are currently listed as "grey", which means that they are known to be unstable or broken. These tests will not be executed by the testing framework if the option `--skipGrey` is given. If `--onlyGrey` option is given then non-"grey" tests are skipped. See `tests/Greylist.txt` for up-to-date information about greylisted tests. Please help to keep this file up to date. |
+| Substring | Description |
+| :----------------------- | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
+| `-cluster` | These tests will only run if clustering is tested (option 'cluster' needs to be true). |
+| `-noncluster` | These tests will only run if no cluster is used (option 'cluster' needs to be false) |
+| `-noinstr_or_noncluster` | These tests will not be ran if instrumented binaries are used and we are running in cluster mode |
+| `-noasan` | These tests will not be ran if *san instrumented binaries are used |
+| `-noinstr` | These tests will not be ran if instrumented binaries are used, be it *san or gcov |
+| `-nocov` | These tests will not be ran if gcov instrumented binaries are used. |
+| `-fp` | These tests will only be ran if failurepoints are enabled while building the binaries to be used in the tests |
+| `-timecritical` | These tests are critical to execution time - and thus may fail if arangod is to slow. This may happen i.e. if you run the tests in valgrind, so you want to avoid them since they will fail anyways. To skip them, set the option `skipTimeCritical` to _true_. |
+| `-spec` | These tests are run using the mocha framework instead of jsunity. |
+| `-nightly` | These tests produce a certain thread on infrastructure or the test system, and therefore should only be executed once per day. |
+| `-grey` | These tests are currently listed as "grey", which means that they are known to be unstable or broken. These tests will not be executed by the testing framework if the option `--skipGrey` is given. If `--onlyGrey` option is given then non-"grey" tests are skipped. See `tests/Greylist.txt` for up-to-date information about greylisted tests. Please help to keep this file up to date. |
### JavaScript Framework
@@ -1199,6 +1264,12 @@ Debugging a storage engine:
(gdb) r
arangod> require("jsunity").runTest("tests/js/client/shell/shell-client.js");
+### Filtering GDB stacktraces
+`scripts/filter_stacktraces.js [list of gdb output files] --extremeVerbosity true`
+- reads `js/client/modules/@arangodb/testutils/filter_gdb_stacks.json`
+- applies it to all files with the output of gdb with stacktraces, filtering out threads in the json file.
+- `--extremeVerbosity` will print unfiltered stacks in order to ease adding them to `filter_gdb_stacks.json`.
+
### Forcing downgrade from VPack to JSON
While velocypack is better for the machine to machine communication, JSON does a better job
diff --git a/Documentation/Books/AQL/.gitkeep b/Documentation/Books/AQL/.gitkeep
deleted file mode 100644
index 936ca3adc4e3..000000000000
--- a/Documentation/Books/AQL/.gitkeep
+++ /dev/null
@@ -1,5 +0,0 @@
-Git can not track empty repositories.
-This file ensures that the directory is kept.
-
-Some of the old documentation building scripts are still
-used by the new system which copy files into this folder.
\ No newline at end of file
diff --git a/Documentation/Books/Drivers/.gitkeep b/Documentation/Books/Drivers/.gitkeep
deleted file mode 100644
index 37f8db1fe281..000000000000
--- a/Documentation/Books/Drivers/.gitkeep
+++ /dev/null
@@ -1,5 +0,0 @@
-Git can not track empty repositories.
-This file ensures that the directory is kept.
-
-Some of the old documentation building scripts are still
-used by the new system which copy files into this folder.
diff --git a/Documentation/Books/HTTP/.gitkeep b/Documentation/Books/HTTP/.gitkeep
deleted file mode 100644
index 936ca3adc4e3..000000000000
--- a/Documentation/Books/HTTP/.gitkeep
+++ /dev/null
@@ -1,5 +0,0 @@
-Git can not track empty repositories.
-This file ensures that the directory is kept.
-
-Some of the old documentation building scripts are still
-used by the new system which copy files into this folder.
\ No newline at end of file
diff --git a/Documentation/Books/Manual/.gitkeep b/Documentation/Books/Manual/.gitkeep
deleted file mode 100644
index 936ca3adc4e3..000000000000
--- a/Documentation/Books/Manual/.gitkeep
+++ /dev/null
@@ -1,5 +0,0 @@
-Git can not track empty repositories.
-This file ensures that the directory is kept.
-
-Some of the old documentation building scripts are still
-used by the new system which copy files into this folder.
\ No newline at end of file
diff --git a/Documentation/CMakeLists.txt b/Documentation/CMakeLists.txt
index 7f669934ab14..fb80ca73367f 100644
--- a/Documentation/CMakeLists.txt
+++ b/Documentation/CMakeLists.txt
@@ -1,15 +1,5 @@
# -*- mode: CMAKE; -*-
-# swagger
-add_custom_target(swagger
- COMMAND ${PROJECT_SOURCE_DIR}/utils/generateSwagger.sh
- WORKING_DIRECTORY ${PROJECT_SOURCE_DIR})
-
-# swagger
-add_custom_target(examples
- COMMAND ${PROJECT_SOURCE_DIR}/utils/generateExamples.sh
- WORKING_DIRECTORY ${PROJECT_SOURCE_DIR})
-
# manual pages
set(MAN_NAMES
man1/arangobench.1
@@ -66,4 +56,3 @@ add_custom_target(clean_man_autogenerated
list(APPEND CLEAN_AUTOGENERATED_FILES clean_man_autogenerated)
set(CLEAN_AUTOGENERATED_FILES ${CLEAN_AUTOGENERATED_FILES} PARENT_SCOPE)
-
diff --git a/Documentation/DocuBlocks/Rest/Administration/delete_api_shutdown.md b/Documentation/DocuBlocks/Rest/Administration/delete_api_shutdown.md
deleted file mode 100644
index 2273116e0755..000000000000
--- a/Documentation/DocuBlocks/Rest/Administration/delete_api_shutdown.md
+++ /dev/null
@@ -1,43 +0,0 @@
-@startDocuBlock delete_api_shutdown
-@brief initiates the shutdown sequence
-
-@RESTHEADER{DELETE /_admin/shutdown, Initiate shutdown sequence, startShutdown}
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{soft,boolean,optional}
-Introduced in: v3.7.12, v3.8.1, v3.9.0
-
-If set to `true`, this initiates a soft shutdown. This is only available
-on Coordinators. When issued, the Coordinator tracks a number of ongoing
-operations, waits until all have finished, and then shuts itself down
-normally. It will still accept new operations.
-
-This feature can be used to make restart operations of Coordinators less
-intrusive for clients. It is designed for setups with a load balancer in front
-of Coordinators. Remove the designated Coordinator from the load balancer before
-issuing the soft-shutdown. The remaining Coordinators will internally forward
-requests that need to be handled by the designated Coordinator. All other
-requests will be handled by the remaining Coordinators, reducing the designated
-Coordinator's load.
-
-The following types of operations are tracked:
-
- - AQL cursors (in particular streaming cursors)
- - Transactions (in particular stream transactions)
- - Pregel runs (conducted by this Coordinator)
- - Ongoing asynchronous requests (using the `x-arango-async: store` HTTP header)
- - Finished asynchronous requests, whose result has not yet been
- collected
- - Queued low priority requests (most normal requests)
- - Ongoing low priority requests
-
-@RESTDESCRIPTION
-This call initiates a clean shutdown sequence. Requires administrative privileges.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-is returned in all cases, `OK` will be returned in the result buffer on success.
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Administration/get_admin_database_target_version.md b/Documentation/DocuBlocks/Rest/Administration/get_admin_database_target_version.md
index 88f55d29c4b6..94a6c63ba6e4 100644
--- a/Documentation/DocuBlocks/Rest/Administration/get_admin_database_target_version.md
+++ b/Documentation/DocuBlocks/Rest/Administration/get_admin_database_target_version.md
@@ -1,11 +1,16 @@
@startDocuBlock get_admin_database_target_version
-@brief returns the version of the database.
-@RESTHEADER{GET /_admin/database/target-version, Return the required version of the database, getDatabaseVersion}
+@RESTHEADER{GET /_admin/database/target-version, Get the required database version (deprecated), getDatabaseVersion}
+
+@HINTS
+{% hint 'warning' %}
+This endpoint is deprecated and should no longer be used. It will be removed from version 3.12.0 onward.
+Use `GET /_api/version` instead.
+{% endhint %}
@RESTDESCRIPTION
Returns the database version that this server requires.
-The version is returned in the *version* attribute of the result.
+The version is returned in the `version` attribute of the result.
@RESTRETURNCODES
diff --git a/Documentation/DocuBlocks/Rest/Administration/get_admin_license.md b/Documentation/DocuBlocks/Rest/Administration/get_admin_license.md
deleted file mode 100644
index 66c936cc709c..000000000000
--- a/Documentation/DocuBlocks/Rest/Administration/get_admin_license.md
+++ /dev/null
@@ -1,52 +0,0 @@
-
-@startDocuBlock get_admin_license
-@brief Get license information
-
-@RESTHEADER{GET /_admin/license, Return information about the current license, getLicense}
-
-@RESTDESCRIPTION
-View the license information and status of an Enterprise Edition instance.
-Can be called on single servers, Coordinators, and DB-Servers.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-
-@RESTREPLYBODY{features,object,required,license_features}
-The properties of the license.
-
-@RESTSTRUCT{expires,license_features,number,required,}
-The `expires` key lists the expiry date as Unix timestamp (seconds since
-January 1st, 1970 UTC).
-
-@RESTREPLYBODY{license,string,required,}
-The encrypted license key in Base64 encoding.
-
-@RESTREPLYBODY{version,number,required,}
-The license version number.
-
-@RESTREPLYBODY{status,string,required,}
-The `status` key allows you to confirm the state of the installed license on a
-glance. The possible values are as follows:
-
-- `good`: The license is valid for more than 2 weeks.
-- `expiring`: The license is valid for less than 2 weeks.
-- `expired`: The license has expired. In this situation, no new
- Enterprise Edition features can be utilized.
-- `read-only`: The license is expired over 2 weeks. The instance is now
- restricted to read-only mode.
-
-@EXAMPLES
-
-@EXAMPLE_ARANGOSH_RUN{RestAdminLicenseGet_cluster}
- var assertTypeOf = require("jsunity").jsUnity.assertions.assertTypeOf;
- var url = "/_admin/license";
- var response = logCurlRequest('GET', url);
-
- assert(response.code === 200);
- assertTypeOf("string", response.parsedBody.license);
-
- logJsonResponse(response);
-@END_EXAMPLE_ARANGOSH_RUN
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Administration/get_admin_server_availability.md b/Documentation/DocuBlocks/Rest/Administration/get_admin_server_availability.md
deleted file mode 100644
index 1b6b1f03dda7..000000000000
--- a/Documentation/DocuBlocks/Rest/Administration/get_admin_server_availability.md
+++ /dev/null
@@ -1,28 +0,0 @@
-@startDocuBlock get_admin_server_availability
-@brief Return whether or not a server is available
-
-@RESTHEADER{GET /_admin/server/availability, Return whether or not a server is available, getServerAvailability}
-
-@RESTDESCRIPTION
-Return availability information about a server.
-
-This is a public API so it does *not* require authentication. It is meant to be
-used only in the context of server monitoring.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-This API will return HTTP 200 in case the server is up and running and usable for
-arbitrary operations, is not set to read-only mode and is currently not a follower
-in case of an Active Failover deployment setup.
-
-@RESTRETURNCODE{503}
-HTTP 503 will be returned in case the server is during startup or during shutdown,
-is set to read-only mode or is currently a follower in an Active Failover deployment setup.
-
-In addition, HTTP 503 will be returned in case the fill grade of the scheduler
-queue exceeds the configured high-water mark (adjustable via startup option
-`--server.unavailability-queue-fill-grade`), which by default is set to 75 % of
-the maximum queue length.
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Administration/get_admin_server_mode.md b/Documentation/DocuBlocks/Rest/Administration/get_admin_server_mode.md
deleted file mode 100644
index b9e3fabf0321..000000000000
--- a/Documentation/DocuBlocks/Rest/Administration/get_admin_server_mode.md
+++ /dev/null
@@ -1,19 +0,0 @@
-@startDocuBlock get_admin_server_mode
-@brief Return the mode of this server (read-only or default)
-
-@RESTHEADER{GET /_admin/server/mode, Return whether or not a server is in read-only mode, getServerMode}
-
-@RESTDESCRIPTION
-Return mode information about a server. The json response will contain
-a field `mode` with the value `readonly` or `default`. In a read-only server
-all write operations will fail with an error code of `1004` (_ERROR_READ_ONLY_).
-Creating or dropping of databases and collections will also fail with error code `11` (_ERROR_FORBIDDEN_).
-
-This API requires authentication.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-This API will return HTTP 200 if everything is ok
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Administration/get_admin_status.md b/Documentation/DocuBlocks/Rest/Administration/get_admin_status.md
deleted file mode 100644
index 0b44d431e1b7..000000000000
--- a/Documentation/DocuBlocks/Rest/Administration/get_admin_status.md
+++ /dev/null
@@ -1,156 +0,0 @@
-
-@startDocuBlock get_admin_status
-@brief returns Status information of the server.
-
-@RESTHEADER{GET /_admin/status, Return status information, getStatus}
-
-@RESTDESCRIPTION
-Returns status information about the server.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-Status information was returned successfully.
-
-@RESTREPLYBODY{server,string,required,}
-Always `"arango"`.
-
-@RESTREPLYBODY{license,string,required,}
-ArangoDB Edition, either `"community"` or `"enterprise"`.
-
-@RESTREPLYBODY{version,string,required,}
-The server version as a string.
-
-@RESTREPLYBODY{mode,string,required,}
-Either `"server"` or `"console"`. **Deprecated**, use `operationMode` instead.
-
-@RESTREPLYBODY{operationMode,string,required,}
-Either `"server"` or `"console"`.
-
-@RESTREPLYBODY{foxxApi,boolean,required,}
-Whether the Foxx API is enabled.
-
-@RESTREPLYBODY{host,string,required,}
-A host identifier defined by the `HOST` or `NODE_NAME` environment variable,
-or a fallback value using a machine identifier or the cluster/Agency address.
-
-@RESTREPLYBODY{hostname,string,optional,}
-A hostname defined by the `HOSTNAME` environment variable.
-
-@RESTREPLYBODY{pid,number,required,}
-The process ID of _arangod_.
-
-@RESTREPLYBODY{serverInfo,object,required,get_admin_status_server_info}
-Information about the server status.
-
-@RESTSTRUCT{progress,get_admin_status_server_info,object,required,get_admin_status_server_info_progress}
-Startup and recovery information.
-
-You can check for changes to determine whether progress was made between two
-calls, but you should not rely on specific values as they may change between
-ArangoDB versions. The values are only expected to change during the startup and
-shutdown, i.e. while `maintenance` is `true`.
-
-You need to start _arangod_ with the `--server.early-connections` startup option
-enabled to be able to query the endpoint during the startup process.
-If authentication is enabled, then you need to use the super-user JWT for the
-request because the user management is not available during the startup.
-
-@RESTSTRUCT{phase,get_admin_status_server_info_progress,string,required,}
-Name of the lifecycle phase the instance is currently in. Normally one of
-`"in prepare"`, `"in start"`, `"in wait"`, `"in shutdown"`, `"in stop"`,
-or `"in unprepare"`.
-
-@RESTSTRUCT{feature,get_admin_status_server_info_progress,string,required,}
-Internal name of the feature that is currently being prepared, started,
-stopped or unprepared.
-
-@RESTSTRUCT{recoveryTick,get_admin_status_server_info_progress,number,required,}
-Current recovery sequence number value, if the instance is currently recovering.
-If the instance is already past the recovery, this attribute will contain the
-last handled recovery sequence number.
-
-@RESTSTRUCT{role,get_admin_status_server_info,string,required,}
-Either `"SINGLE"`, `"COORDINATOR"`, `"PRIMARY"` (DB-Server), or `"AGENT"`.
-
-@RESTSTRUCT{writeOpsEnabled,get_admin_status_server_info,boolean,required,}
-Whether writes are enabled. **Deprecated**, use `readOnly` instead.
-
-@RESTSTRUCT{readOnly,get_admin_status_server_info,boolean,required,}
-Whether writes are disabled.
-
-@RESTSTRUCT{maintenance,get_admin_status_server_info,boolean,required,}
-Whether the maintenance mode is enabled.
-
-@RESTSTRUCT{persistedId,get_admin_status_server_info,string,optional,}
-The persisted ID, e. g. `"CRDN-e427b441-5087-4a9a-9983-2fb1682f3e2a"`.
-*Cluster only* (Agents, Coordinators, and DB-Servers).
-
-@RESTSTRUCT{rebootId,get_admin_status_server_info,number,optional,}
-The reboot ID. Changes on every restart.
-*Cluster only* (Agents, Coordinators, and DB-Servers).
-
-@RESTSTRUCT{state,get_admin_status_server_info,string,optional,}
-Either `"STARTUP"`, `"SERVING"`, or `"SHUTDOWN"`.
-*Cluster only* (Coordinators and DB-Servers).
-
-@RESTSTRUCT{address,get_admin_status_server_info,string,optional,}
-The address of the server, e.g. `tcp://[::1]:8530`.
-*Cluster only* (Coordinators and DB-Servers).
-
-@RESTSTRUCT{serverId,get_admin_status_server_info,string,optional,}
-The server ID, e.g. `"CRDN-e427b441-5087-4a9a-9983-2fb1682f3e2a"`.
-*Cluster only* (Coordinators and DB-Servers).
-
-@RESTREPLYBODY{agency,object,optional,get_admin_status_agency}
-Information about the Agency.
-*Cluster only* (Coordinators and DB-Servers).
-
-@RESTSTRUCT{agencyComm,get_admin_status_agency,object,optional,get_admin_status_agency_comm}
-Information about the communication with the Agency.
-*Cluster only* (Coordinators and DB-Servers).
-
-@RESTSTRUCT{endpoints,get_admin_status_agency_comm,array,optional,string}
-A list of possible Agency endpoints.
-
-@RESTREPLYBODY{coordinator,object,optional,get_admin_status_coordinator}
-Information about the Coordinators.
-*Cluster only* (Coordinators)
-
-@RESTSTRUCT{foxxmaster,get_admin_status_coordinator,array,optional,string}
-The server ID of the Coordinator that is the Foxx master.
-
-@RESTSTRUCT{isFoxxmaster,get_admin_status_coordinator,array,optional,string}
-Whether the queried Coordinator is the Foxx master.
-
-@RESTREPLYBODY{agent,object,optional,get_admin_status_agent}
-Information about the Agents.
-*Cluster only* (Agents)
-
-@RESTSTRUCT{id,get_admin_status_agent,string,optional,}
-Server ID of the queried Agent.
-
-@RESTSTRUCT{leaderId,get_admin_status_agent,string,optional,}
-Server ID of the leading Agent.
-
-@RESTSTRUCT{leading,get_admin_status_agent,boolean,optional,}
-Whether the queried Agent is the leader.
-
-@RESTSTRUCT{endpoint,get_admin_status_agent,string,optional,}
-The endpoint of the queried Agent.
-
-@RESTSTRUCT{term,get_admin_status_agent,number,optional,}
-The current term number.
-
-@EXAMPLES
-
-@EXAMPLE_ARANGOSH_RUN{RestAdminStatus_cluster}
- var url = "/_admin/status";
- var response = logCurlRequest("GET", url);
-
- assert(response.code === 200);
-
- logJsonResponse(response);
-@END_EXAMPLE_ARANGOSH_RUN
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Administration/get_admin_support_info.md b/Documentation/DocuBlocks/Rest/Administration/get_admin_support_info.md
deleted file mode 100644
index ecbababe5fb3..000000000000
--- a/Documentation/DocuBlocks/Rest/Administration/get_admin_support_info.md
+++ /dev/null
@@ -1,65 +0,0 @@
-@startDocuBlock get_admin_support_info
-@brief Get deployment information
-
-@RESTHEADER{GET /_admin/support-info, Get information about the deployment, getSupportInfo}
-
-@RESTDESCRIPTION
-Retrieves deployment information for support purposes. The endpoint returns data
-about the ArangoDB version used, the host (operating system, server ID, CPU and
-storage capacity, current utilization, a few metrics) and the other servers in
-the deployment (in case of Active Failover or cluster deployments).
-
-As this API may reveal sensitive data about the deployment, it can only be
-accessed from inside the `_system` database. In addition, there is a policy
-control startup option `--server.support-info-api` that controls if and to whom
-the API is made available.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-
-@RESTREPLYBODY{date,string,required,}
-ISO 8601 datetime string of when the information was requested.
-
-@RESTREPLYBODY{deployment,object,required,}
-An object with at least a `type` attribute, indicating the deployment type.
-
-In case of a `"single"` server, additional information is provided in the
-top-level `host` attribute.
-
-In case of a `"cluster"`, there is a `servers` object that contains a nested
-object for each Coordinator and DB-Server, using the server ID as key. Each
-object holds information about the ArangoDB instance as well as the host machine.
-There are additional attributes for the number of `agents`, `coordinators`,
-`dbServers`, and `shards`.
-
-@RESTREPLYBODY{host,object,optional,}
-An object that holds information about the ArangoDB instance as well as the
-host machine. Only set in case of single servers.
-
-@RESTRETURNCODE{404}
-The support info API is turned off.
-
-@EXAMPLES
-
-Query support information from a single server
-
-@EXAMPLE_ARANGOSH_RUN{RestAdminSupportInfo}
- var url = "/_admin/support-info";
- var response = logCurlRequest("GET", url);
- assert(response.code === 200);
- assert(response.parsedBody.host !== undefined);
- logJsonResponse(response);
-@END_EXAMPLE_ARANGOSH_RUN
-
-Query support information from a cluster
-
-@EXAMPLE_ARANGOSH_RUN{RestAdminSupportInfo_cluster}
- var url = "/_admin/support-info";
- var response = logCurlRequest("GET", url);
- assert(response.code === 200);
- assert(response.parsedBody.deployment.servers !== undefined);
- logJsonResponse(response);
-@END_EXAMPLE_ARANGOSH_RUN
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Administration/get_admin_time.md b/Documentation/DocuBlocks/Rest/Administration/get_admin_time.md
deleted file mode 100644
index c79f364f9722..000000000000
--- a/Documentation/DocuBlocks/Rest/Administration/get_admin_time.md
+++ /dev/null
@@ -1,25 +0,0 @@
-
-@startDocuBlock get_admin_time
-@brief Get the current time of the system
-
-@RESTHEADER{GET /_admin/time, Return system time, getTime}
-
-@RESTDESCRIPTION
-The call returns an object with the attribute *time*. This contains the
-current system time as a Unix timestamp with microsecond precision.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-Time was returned successfully.
-
-@RESTREPLYBODY{error,boolean,required,}
-boolean flag to indicate whether an error occurred (*false* in this case)
-
-@RESTREPLYBODY{code,integer,required,int64}
-the HTTP status code
-
-@RESTREPLYBODY{time,number,required,float}
-The current system time as a Unix timestamp with microsecond precision of the server
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Administration/get_api_endpoint.md b/Documentation/DocuBlocks/Rest/Administration/get_api_endpoint.md
deleted file mode 100644
index 02ce62552e56..000000000000
--- a/Documentation/DocuBlocks/Rest/Administration/get_api_endpoint.md
+++ /dev/null
@@ -1,47 +0,0 @@
-
-@startDocuBlock get_api_endpoint
-@brief This API call returns the list of all endpoints (single server).
-
-@RESTHEADER{GET /_api/endpoint, Return list of all endpoints, listEndpoints}
-
-@HINTS
-{% hint 'warning' %}
-This route should no longer be used.
-It is considered as deprecated from version 3.4.0 on.
-{% endhint %}
-
-@RESTDESCRIPTION
-Returns an array of all configured endpoints the server is listening on.
-
-The result is a JSON array of JSON objects, each with `"entrypoint"` as
-the only attribute, and with the value being a string describing the
-endpoint.
-
-**Note**: retrieving the array of all endpoints is allowed in the system database
-only. Calling this action in any other database will make the server return
-an error.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-is returned when the array of endpoints can be determined successfully.
-
-@RESTRETURNCODE{400}
-is returned if the action is not carried out in the system database.
-
-@RESTRETURNCODE{405}
-The server will respond with *HTTP 405* if an unsupported HTTP method is used.
-
-@EXAMPLES
-
-@EXAMPLE_ARANGOSH_RUN{RestEndpointGet}
- var url = "/_api/endpoint";
-
- var response = logCurlRequest('GET', url);
-
- assert(response.code === 200);
-
- logJsonResponse(response);
-@END_EXAMPLE_ARANGOSH_RUN
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Administration/get_api_engine.md b/Documentation/DocuBlocks/Rest/Administration/get_api_engine.md
deleted file mode 100644
index 718d2255586b..000000000000
--- a/Documentation/DocuBlocks/Rest/Administration/get_api_engine.md
+++ /dev/null
@@ -1,31 +0,0 @@
-
-@startDocuBlock get_api_engine
-@brief returns the engine the type the server is running with
-
-@RESTHEADER{GET /_api/engine, Return server database engine type, getEngine}
-
-@RESTDESCRIPTION
-Returns the storage engine the server is configured to use.
-The response is a JSON object with the following attributes:
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-is returned in all cases.
-
-@RESTREPLYBODY{name,string,required,string}
-will be *rocksdb*
-
-@EXAMPLES
-
-Return the active storage engine with the RocksDB storage engine in use:
-
-@EXAMPLE_ARANGOSH_RUN{RestEngine}
- var response = logCurlRequest('GET', '/_api/engine');
-
- assert(response.code === 200);
-
- logJsonResponse(response);
-@END_EXAMPLE_ARANGOSH_RUN
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Administration/get_api_shutdown.md b/Documentation/DocuBlocks/Rest/Administration/get_api_shutdown.md
deleted file mode 100644
index d938ecbeebd3..000000000000
--- a/Documentation/DocuBlocks/Rest/Administration/get_api_shutdown.md
+++ /dev/null
@@ -1,59 +0,0 @@
-
-@startDocuBlock get_api_shutdown
-@brief query progress of soft shutdown process
-
-@RESTHEADER{GET /_admin/shutdown, Query progress of soft shutdown process, getShutdownProgress}
-
-@RESTDESCRIPTION
-Introduced in: v3.7.12, v3.8.1, v3.9.0
-
-This call reports progress about a soft Coordinator shutdown (see
-documentation of `DELETE /_admin/shutdown?soft=true`).
-In this case, the following types of operations are tracked:
-
- - AQL cursors (in particular streaming cursors)
- - Transactions (in particular stream transactions)
- - Pregel runs (conducted by this Coordinator)
- - Ongoing asynchronous requests (using the `x-arango-async: store` HTTP header
- - Finished asynchronous requests, whose result has not yet been
- collected
- - Queued low priority requests (most normal requests)
- - Ongoing low priority requests
-
-This API is only available on Coordinators.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-The response indicates the fact that a soft shutdown is ongoing and the
-number of active operations of the various types. Once all numbers have gone
-to 0, the flag `allClear` is set and the Coordinator shuts down automatically.
-
-@RESTREPLYBODY{softShutdownOngoing,boolean,required,}
-Whether a soft shutdown of the Coordinator is in progress.
-
-@RESTREPLYBODY{AQLcursors,number,required,}
-Number of AQL cursors that are still active.
-
-@RESTREPLYBODY{transactions,number,required,}
-Number of ongoing transactions.
-
-@RESTREPLYBODY{pendingJobs,number,required,}
-Number of ongoing asynchronous requests.
-
-@RESTREPLYBODY{doneJobs,number,required,}
-Number of finished asynchronous requests, whose result has not yet been collected.
-
-@RESTREPLYBODY{pregelConductors,number,required,}
-Number of ongoing Pregel jobs.
-
-@RESTREPLYBODY{lowPrioOngoingRequests,number,required,}
-Number of queued low priority requests.
-
-@RESTREPLYBODY{lowPrioQueuedRequests,number,required,}
-Number of ongoing low priority requests.
-
-@RESTREPLYBODY{allClear,boolean,required,}
-Whether all active operations finished.
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Administration/get_api_version.md b/Documentation/DocuBlocks/Rest/Administration/get_api_version.md
deleted file mode 100644
index e16c023f4fb6..000000000000
--- a/Documentation/DocuBlocks/Rest/Administration/get_api_version.md
+++ /dev/null
@@ -1,154 +0,0 @@
-
-@startDocuBlock get_api_version
-@brief returns the server version number
-
-@RESTHEADER{GET /_api/version, Return server version, getVersion}
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{details,boolean,optional}
-If set to *true*, the response will contain a *details* attribute with
-additional information about included components and their versions. The
-attribute names and internals of the *details* object may vary depending on
-platform and ArangoDB version.
-
-@RESTDESCRIPTION
-Returns the server name and version number. The response is a JSON object
-with the following attributes:
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-is returned in all cases.
-
-@RESTREPLYBODY{server,string,required,string}
-will always contain *arango*
-
-@RESTREPLYBODY{version,string,required,string}
-the server version string. The string has the format
-"*major*.*minor*.*sub*". *major* and *minor* will be numeric, and *sub*
-may contain a number or a textual version.
-
-@RESTREPLYBODY{details,object,optional,version_details_struct}
-an optional JSON object with additional details. This is
-returned only if the *details* query parameter is set to *true* in the
-request.
-
-@RESTSTRUCT{architecture,version_details_struct,string,optional,}
-The CPU architecture, i.e. *64bit*
-
-@RESTSTRUCT{arm,version_details_struct,string,optional,}
-*false* - this is not running on an ARM cpu
-
-@RESTSTRUCT{asan,version_details_struct,string,optional,}
-has this been compiled with the asan address sanitizer turned on? (should be false)
-
-@RESTSTRUCT{assertions,version_details_struct,string,optional,}
-do we have assertions compiled in (=> developer version)
-
-@RESTSTRUCT{boost-version,version_details_struct,string,optional,}
-which boost version do we bind
-
-@RESTSTRUCT{build-date,version_details_struct,string,optional,}
-the date when this binary was created
-
-@RESTSTRUCT{build-repository,version_details_struct,string,optional,}
-reference to the git-ID this was compiled from
-
-@RESTSTRUCT{compiler,version_details_struct,string,optional,}
-which compiler did we use
-
-@RESTSTRUCT{cplusplus,version_details_struct,string,optional,}
-C++ standards version
-
-@RESTSTRUCT{debug,version_details_struct,string,optional,}
-*false* for production binaries
-
-@RESTSTRUCT{endianness,version_details_struct,string,optional,}
-currently only *little* is supported
-
-@RESTSTRUCT{failure-tests,version_details_struct,string,optional,}
-*false* for production binaries (the facility to invoke fatal errors is disabled)
-
-@RESTSTRUCT{fd-client-event-handler,version_details_struct,string,optional,}
-which method do we use to handle fd-sets, *poll* should be here on linux.
-
-@RESTSTRUCT{fd-setsize,version_details_struct,string,optional,}
-if not *poll* the fd setsize is valid for the maximum number of file descriptors
-
-@RESTSTRUCT{full-version-string,version_details_struct,string,optional,}
-The full version string
-
-@RESTSTRUCT{icu-version,version_details_struct,string,optional,}
-Which version of ICU do we bundle
-
-@RESTSTRUCT{jemalloc,version_details_struct,string,optional,}
-*true* if we use jemalloc
-
-@RESTSTRUCT{maintainer-mode,version_details_struct,string,optional,}
-*false* if this is a production binary
-
-@RESTSTRUCT{openssl-version,version_details_struct,string,optional,}
-which openssl version do we link?
-
-@RESTSTRUCT{platform,version_details_struct,string,optional,}
-the host os - *linux*, *windows* or *darwin*
-
-@RESTSTRUCT{reactor-type,version_details_struct,string,optional,}
-*epoll* TODO
-
-@RESTSTRUCT{rocksdb-version,version_details_struct,string,optional,}
-the rocksdb version this release bundles
-
-@RESTSTRUCT{server-version,version_details_struct,string,optional,}
-the ArangoDB release version
-
-@RESTSTRUCT{sizeof int,version_details_struct,string,optional,}
-number of bytes for *integers*
-
-@RESTSTRUCT{sizeof void*,version_details_struct,string,optional,}
-number of bytes for *void pointers*
-
-@RESTSTRUCT{sse42,version_details_struct,string,optional,}
-do we have a SSE 4.2 enabled cpu?
-
-@RESTSTRUCT{unaligned-access,version_details_struct,string,optional,}
-does this system support unaligned memory access?
-
-@RESTSTRUCT{v8-version,version_details_struct,string,optional,}
-the bundled V8 javascript engine version
-
-@RESTSTRUCT{vpack-version,version_details_struct,string,optional,}
-the version of the used velocypack implementation
-
-@RESTSTRUCT{zlib-version,version_details_struct,string,optional,}
-the version of the bundled zlib
-
-@RESTSTRUCT{mode,version_details_struct,string,optional,}
-the mode we're running as - one of [*server*, *console*, *script*]
-
-@RESTSTRUCT{host,version_details_struct,string,optional,}
-the host ID
-
-@EXAMPLES
-
-Return the version information
-
-@EXAMPLE_ARANGOSH_RUN{RestVersion}
- var response = logCurlRequest('GET', '/_api/version');
-
- assert(response.code === 200);
-
- logJsonResponse(response);
-@END_EXAMPLE_ARANGOSH_RUN
-
-Return the version information with details
-
-@EXAMPLE_ARANGOSH_RUN{RestVersionDetails}
- var response = logCurlRequest('GET', '/_api/version?details=true');
-
- assert(response.code === 200);
-
- logJsonResponse(response);
-@END_EXAMPLE_ARANGOSH_RUN
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Administration/post_admin_echo.md b/Documentation/DocuBlocks/Rest/Administration/post_admin_echo.md
deleted file mode 100644
index 9ce5ddc19cd5..000000000000
--- a/Documentation/DocuBlocks/Rest/Administration/post_admin_echo.md
+++ /dev/null
@@ -1,98 +0,0 @@
-
-@startDocuBlock post_admin_echo
-@brief Send back what was sent in, headers, post body etc.
-
-@RESTHEADER{POST /_admin/echo, Return current request, echoRequest}
-
-@RESTALLBODYPARAM{body,string,required}
-The request body can be of any type and is simply forwarded.
-
-@RESTDESCRIPTION
-The call returns an object with the servers request information
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-Echo was returned successfully.
-
-@RESTREPLYBODY{authorized,boolean,required,}
-Whether the session is authorized
-
-@RESTREPLYBODY{user,string,required,}
-The name of the current user that sent this request
-
-@RESTREPLYBODY{isAdminUser,boolean,required,}
-Whether the current user is an administrator
-
-@RESTREPLYBODY{database,string,required,}
-The name of the database this request was executed on
-
-@RESTREPLYBODY{url,string,required,}
-The raw request URL
-
-@RESTREPLYBODY{protocol,string,required,}
-The transport protocol, one of `"http"`, `"https"`, `"velocystream"`
-
-@RESTREPLYBODY{portType,string,required,}
-The type of the socket, one of `"tcp/ip"`, `"unix"`, `"unknown"`
-
-@RESTREPLYBODY{server,object,required,admin_echo_server_struct}
-Attributes of the server connection
-
-@RESTSTRUCT{address,admin_echo_server_struct,string,required,}
-The bind address of the endpoint this request was sent to
-
-@RESTSTRUCT{port,admin_echo_server_struct,integer,required,}
-The port this request was sent to
-
-@RESTSTRUCT{endpoint,admin_echo_server_struct,string,required,}
-The endpoint this request was sent to
-
-@RESTREPLYBODY{client,object,required,admin_echo_client_struct}
-Attributes of the client connection
-
-@RESTSTRUCT{address,admin_echo_client_struct,integer,required,}
-The IP address of the client
-
-@RESTSTRUCT{port,admin_echo_client_struct,integer,required,}
-The port of the TCP connection on the client-side
-
-@RESTSTRUCT{id,admin_echo_client_struct,string,required,}
-A server generated ID
-
-@RESTREPLYBODY{internals,object,required,}
-Contents of the server internals struct
-
-@RESTREPLYBODY{prefix,object,required,}
-The prefix of the database
-
-@RESTREPLYBODY{headers,object,required,}
-The list of the HTTP headers you sent
-
-@RESTREPLYBODY{requestType,string,required,}
-The HTTP method that was used for the request (`"POST"`). The endpoint can be
-queried using other verbs, too (`"GET"`, `"PUT"`, `"PATCH"`, `"DELETE"`).
-
-@RESTREPLYBODY{requestBody,string,required,}
-Stringified version of the request body you sent
-
-@RESTREPLYBODY{rawRequestBody,object,required,}
-The sent payload as a JSON-encoded Buffer object
-
-@RESTREPLYBODY{parameters,object,required,}
-An object containing the query parameters
-
-@RESTREPLYBODY{cookies,object,required,}
-A list of the cookies you sent
-
-@RESTREPLYBODY{suffix,array,required,string}
-A list of the decoded URL path suffixes. You can query the endpoint with
-arbitrary suffixes, e.g. `/_admin/echo/foo/123`
-
-@RESTREPLYBODY{rawSuffix,array,required,string}
-A list of the percent-encoded URL path suffixes
-
-@RESTREPLYBODY{path,string,required,}
-The relative path of this request (decoded, excluding `/_admin/echo`)
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Administration/post_admin_execute.md b/Documentation/DocuBlocks/Rest/Administration/post_admin_execute.md
deleted file mode 100644
index 46189eb8677a..000000000000
--- a/Documentation/DocuBlocks/Rest/Administration/post_admin_execute.md
+++ /dev/null
@@ -1,37 +0,0 @@
-
-@startDocuBlock post_admin_execute
-@brief Execute a script on the server.
-
-@RESTHEADER{POST /_admin/execute, Execute program, executeCode}
-
-@RESTALLBODYPARAM{body,string,required}
-The request body is the JavaScript code to be executed.
-
-@RESTDESCRIPTION
-Executes the javascript code in the body on the server as the body
-of a function with no arguments. If you have a *return* statement
-then the return value you produce will be returned as content type
-*application/json*. If the parameter *returnAsJSON* is set to
-*true*, the result will be a JSON object describing the return value
-directly, otherwise a string produced by JSON.stringify will be
-returned.
-
-Note that this API endpoint will only be present if the server was
-started with the option `--javascript.allow-admin-execute true`.
-
-The default value of this option is `false`, which disables the execution of
-user-defined code and disables this API endpoint entirely.
-This is also the recommended setting for production.
-
-@RESTRETURNCODE{200}
-is returned when everything went well, or if a timeout occurred. In the
-latter case a body of type application/json indicating the timeout
-is returned. depending on *returnAsJSON* this is a json object or a plain string.
-
-@RESTRETURNCODE{403}
-is returned if ArangoDB is not running in cluster mode.
-
-@RESTRETURNCODE{404}
-is returned if ArangoDB was not compiled for cluster operation.
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Administration/post_admin_routing_reload.md b/Documentation/DocuBlocks/Rest/Administration/post_admin_routing_reload.md
deleted file mode 100644
index 86f71bfc7cbc..000000000000
--- a/Documentation/DocuBlocks/Rest/Administration/post_admin_routing_reload.md
+++ /dev/null
@@ -1,14 +0,0 @@
-
-@startDocuBlock post_admin_routing_reload
-@brief Reload the routing table.
-
-@RESTHEADER{POST /_admin/routing/reload, Reloads the routing information, reloadRouting}
-
-@RESTDESCRIPTION
-Reloads the routing information from the collection *routing*.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-Routing information was reloaded successfully.
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Administration/put_admin_compact.md b/Documentation/DocuBlocks/Rest/Administration/put_admin_compact.md
deleted file mode 100644
index ac27042be1ad..000000000000
--- a/Documentation/DocuBlocks/Rest/Administration/put_admin_compact.md
+++ /dev/null
@@ -1,44 +0,0 @@
-
-@startDocuBlock put_admin_compact
-@brief compact all databases
-
-@RESTHEADER{PUT /_admin/compact, Compact the entire database system data, compactAllDatabases}
-
-@HINTS
-{% hint 'warning' %}
-This command can cause a full rewrite of all data in all databases, which may
-take very long for large databases. It should thus only be used with care and
-only when additional I/O load can be tolerated for a prolonged time.
-{% endhint %}
-
-@RESTDESCRIPTION
-This endpoint can be used to reclaim disk space after substantial data
-deletions have taken place. It requires superuser access.
-
-@RESTBODYPARAM{changeLevel,boolean,optional,}
-whether or not compacted data should be moved to the minimum possible level.
-The default value is *false*.
-
-@RESTBODYPARAM{compactBottomMostLevel,boolean,optional,}
-Whether or not to compact the bottommost level of data.
-The default value is *false*.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-Compaction started successfully
-
-@RESTRETURNCODE{401}
-if the request was not authenticated as a user with sufficient rights
-
-@EXAMPLES
-
-@EXAMPLE_ARANGOSH_RUN{RestAdminCompact}
- var response = logCurlRequest('PUT', '/_admin/compact', '');
-
- assert(response.code === 200);
-
- logJsonResponse(response);
-@END_EXAMPLE_ARANGOSH_RUN
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Administration/put_admin_license.md b/Documentation/DocuBlocks/Rest/Administration/put_admin_license.md
deleted file mode 100644
index 400a4e142991..000000000000
--- a/Documentation/DocuBlocks/Rest/Administration/put_admin_license.md
+++ /dev/null
@@ -1,59 +0,0 @@
-
-@startDocuBlock put_admin_license
-@brief Set a new license
-
-@RESTHEADER{PUT /_admin/license, Set a new license, setLicense}
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{force,boolean,optional}
-Set to `true` to change the license even if it expires sooner than the current one.
-
-@RESTALLBODYPARAM{license,string,required}
-The request body has to contain the Base64-encoded string wrapped in double quotes.
-
-@RESTDESCRIPTION
-Set a new license for an Enterprise Edition instance.
-Can be called on single servers, Coordinators, and DB-Servers.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{400}
-If the license expires earlier than the previously installed one.
-
-@RESTRETURNCODE{201}
-License successfully deployed.
-
-@EXAMPLES
-
-```
-shell> curl -XPUT http://localhost:8529/_admin/license -d '""'
-```
-
-Server response in case of success:
-
-```json
-{
- "result": {
- "error": false,
- "code": 201
- }
-}
-```
-
-Server response if the new license expires sooner than the current one (requires
-`?force=true` to update the license anyway):
-
-```json
-{
- "code": 400,
- "error": true,
- "errorMessage": "This license expires sooner than the existing. You may override this by specifying force=true with invocation.",
- "errorNum": 9007
-}
-```
-
-In case of a different error related to an expired or invalid license, please
-contact ArangoDB sales.
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Administration/put_admin_server_mode.md b/Documentation/DocuBlocks/Rest/Administration/put_admin_server_mode.md
deleted file mode 100644
index 2fb376e47b7e..000000000000
--- a/Documentation/DocuBlocks/Rest/Administration/put_admin_server_mode.md
+++ /dev/null
@@ -1,28 +0,0 @@
-
-@startDocuBlock put_admin_server_mode
-@brief Update the mode of this server (read-only or default)
-
-@RESTHEADER{PUT /_admin/server/mode, Update whether or not a server is in read-only mode, setServerMode}
-
-@RESTBODYPARAM{mode,string,required,string}
-The mode of the server `readonly` or `default`.
-
-@RESTDESCRIPTION
-Update mode information about a server. The json response will contain
-a field `mode` with the value `readonly` or `default`. In a read-only server
-all write operations will fail with an error code of `1004` (_ERROR_READ_ONLY_).
-Creating or dropping of databases and collections will also fail with error
-code `11` (_ERROR_FORBIDDEN_).
-
-This is a protected API. It requires authentication and administrative
-server rights.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-This API will return HTTP 200 if everything is ok
-
-@RESTRETURNCODE{401}
-if the request was not authenticated as a user with sufficient rights
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Analyzers/delete_api_analyzer_analyzer.md b/Documentation/DocuBlocks/Rest/Analyzers/delete_api_analyzer_analyzer.md
deleted file mode 100644
index 1b506298e371..000000000000
--- a/Documentation/DocuBlocks/Rest/Analyzers/delete_api_analyzer_analyzer.md
+++ /dev/null
@@ -1,100 +0,0 @@
-@startDocuBlock delete_api_analyzer_analyzer
-@brief removes an Analyzer configuration
-
-@RESTHEADER{DELETE /_api/analyzer/{analyzer-name}, Remove an Analyzer, deleteAnalyzer}
-
-@RESTURLPARAMETERS
-
-@RESTURLPARAM{analyzer-name,string,required}
-The name of the Analyzer to remove.
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{force,boolean,optional}
-The Analyzer configuration should be removed even if it is in-use.
-The default value is *false*.
-
-@RESTDESCRIPTION
-Removes an Analyzer configuration identified by *analyzer-name*.
-
-If the Analyzer definition was successfully dropped, an object is returned with
-the following attributes:
-- *error*: *false*
-- *name*: The name of the removed Analyzer
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-The Analyzer configuration was removed successfully.
-
-@RESTRETURNCODE{400}
-The *analyzer-name* was not supplied or another request parameter was not
-valid.
-
-@RESTRETURNCODE{403}
-The user does not have permission to remove this Analyzer configuration.
-
-@RESTRETURNCODE{404}
-Such an Analyzer configuration does not exist.
-
-@RESTRETURNCODE{409}
-The specified Analyzer configuration is still in use and *force* was omitted or
-*false* specified.
-
-@EXAMPLES
-
-Removing without *force*:
-
-@EXAMPLE_ARANGOSH_RUN{RestAnalyzerDelete}
- var analyzers = require("@arangodb/analyzers");
- var db = require("@arangodb").db;
- var analyzerName = "testAnalyzer";
- analyzers.save(analyzerName, "identity");
-
- // removal
- var url = "/_api/analyzer/" + encodeURIComponent(analyzerName);
- var response = logCurlRequest('DELETE', url);
-console.error(JSON.stringify(response));
- assert(response.code === 200);
-
- logJsonResponse(response);
-@END_EXAMPLE_ARANGOSH_RUN
-
-Removing with *force*:
-
-@EXAMPLE_ARANGOSH_RUN{RestAnalyzerDeleteForce}
- var analyzers = require("@arangodb/analyzers");
- var db = require("@arangodb").db;
- var analyzerName = "testAnalyzer";
- analyzers.save(analyzerName, "identity");
-
- // create Analyzer reference
- var url = "/_api/collection";
- var body = { name: "testCollection" };
- var response = logCurlRequest('POST', url, body);
- assert(response.code === 200);
- var url = "/_api/view";
- var body = {
- name: "testView",
- type: "arangosearch",
- links: { testCollection: { analyzers: [ analyzerName ] } }
- };
- var response = logCurlRequest('POST', url, body);
-
- // removal (fail)
- var url = "/_api/analyzer/" + encodeURIComponent(analyzerName) + "?force=false";
- var response = logCurlRequest('DELETE', url);
- assert(response.code === 409);
-
- // removal
- var url = "/_api/analyzer/" + encodeURIComponent(analyzerName) + "?force=true";
- var response = logCurlRequest('DELETE', url);
- assert(response.code === 200);
-
- logJsonResponse(response);
-
- db._dropView("testView");
- db._drop("testCollection");
-@END_EXAMPLE_ARANGOSH_RUN
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Analyzers/get_api_analyzer_analyzer.md b/Documentation/DocuBlocks/Rest/Analyzers/get_api_analyzer_analyzer.md
deleted file mode 100644
index 80d536b6ebed..000000000000
--- a/Documentation/DocuBlocks/Rest/Analyzers/get_api_analyzer_analyzer.md
+++ /dev/null
@@ -1,47 +0,0 @@
-@startDocuBlock get_api_analyzer_analyzer
-@brief returns an Analyzer definition
-
-@RESTHEADER{GET /_api/analyzer/{analyzer-name}, Return the Analyzer definition, getAnalyzer}
-
-@RESTURLPARAMETERS
-
-@RESTURLPARAM{analyzer-name,string,required}
-The name of the Analyzer to retrieve.
-
-@RESTDESCRIPTION
-Retrieves the full definition for the specified Analyzer name.
-The resulting object contains the following attributes:
-- *name*: the Analyzer name
-- *type*: the Analyzer type
-- *properties*: the properties used to configure the specified type
-- *features*: the set of features to set on the Analyzer generated fields
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-The Analyzer definition was retrieved successfully.
-
-@RESTRETURNCODE{404}
-Such an Analyzer configuration does not exist.
-
-@EXAMPLES
-
-Retrieve an Analyzer definition:
-
-@EXAMPLE_ARANGOSH_RUN{RestAnalyzerGet}
- var analyzers = require("@arangodb/analyzers");
- var db = require("@arangodb").db;
- var analyzerName = "testAnalyzer";
- analyzers.save(analyzerName, "identity");
-
- // retrieval
- var url = "/_api/analyzer/" + encodeURIComponent(analyzerName);
- var response = logCurlRequest('GET', url);
- assert(response.code === 200);
-
- logJsonResponse(response);
-
- analyzers.remove(analyzerName, true);
-@END_EXAMPLE_ARANGOSH_RUN
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Analyzers/get_api_analyzers.md b/Documentation/DocuBlocks/Rest/Analyzers/get_api_analyzers.md
deleted file mode 100644
index 37d9ce119545..000000000000
--- a/Documentation/DocuBlocks/Rest/Analyzers/get_api_analyzers.md
+++ /dev/null
@@ -1,32 +0,0 @@
-@startDocuBlock get_api_analyzer
-@brief returns a listing of available Analyzer definitions
-
-@RESTHEADER{GET /_api/analyzer, List all Analyzers, listAnalyzers}
-
-@RESTDESCRIPTION
-Retrieves a an array of all Analyzer definitions.
-The resulting array contains objects with the following attributes:
-- *name*: the Analyzer name
-- *type*: the Analyzer type
-- *properties*: the properties used to configure the specified type
-- *features*: the set of features to set on the Analyzer generated fields
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-The Analyzer definitions was retrieved successfully.
-
-@EXAMPLES
-
-Retrieve all Analyzer definitions:
-
-@EXAMPLE_ARANGOSH_RUN{RestAnalyzersGet}
- // retrieval
- var url = "/_api/analyzer";
- var response = logCurlRequest('GET', url);
- assert(response.code === 200);
-
- logJsonResponse(response);
-@END_EXAMPLE_ARANGOSH_RUN
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Analyzers/post_api_analyzer.md b/Documentation/DocuBlocks/Rest/Analyzers/post_api_analyzer.md
deleted file mode 100644
index ad1136dfb8e9..000000000000
--- a/Documentation/DocuBlocks/Rest/Analyzers/post_api_analyzer.md
+++ /dev/null
@@ -1,58 +0,0 @@
-@startDocuBlock post_api_analyzer
-@brief creates a new Analyzer based on the provided definition
-
-@RESTHEADER{POST /_api/analyzer, Create an Analyzer with the supplied definition, createAnalyzer}
-
-@RESTBODYPARAM{name,string,required,string}
-The Analyzer name.
-
-@RESTBODYPARAM{type,string,required,string}
-The Analyzer type.
-
-@RESTBODYPARAM{properties,object,optional,}
-The properties used to configure the specified Analyzer type.
-
-@RESTBODYPARAM{features,array,optional,string}
-The set of features to set on the Analyzer generated fields.
-The default value is an empty array.
-
-@RESTDESCRIPTION
-Creates a new Analyzer based on the provided configuration.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-An Analyzer with a matching name and definition already exists.
-
-@RESTRETURNCODE{201}
-A new Analyzer definition was successfully created.
-
-@RESTRETURNCODE{400}
-One or more of the required parameters is missing or one or more of the parameters
-is not valid.
-
-@RESTRETURNCODE{403}
-The user does not have permission to create and Analyzer with this configuration.
-
-@EXAMPLES
-
-@EXAMPLE_ARANGOSH_RUN{RestAnalyzerPost}
- var analyzers = require("@arangodb/analyzers");
- var db = require("@arangodb").db;
- var analyzerName = "testAnalyzer";
-
- // creation
- var url = "/_api/analyzer";
- var body = {
- name: "testAnalyzer",
- type: "identity"
- };
- var response = logCurlRequest('POST', url, body);
- assert(response.code === 201);
-
- logJsonResponse(response);
-
- analyzers.remove(analyzerName, true);
-@END_EXAMPLE_ARANGOSH_RUN
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Authentication/1_structs.md b/Documentation/DocuBlocks/Rest/Authentication/1_structs.md
deleted file mode 100644
index 620aaa378ae5..000000000000
--- a/Documentation/DocuBlocks/Rest/Authentication/1_structs.md
+++ /dev/null
@@ -1,15 +0,0 @@
-@RESTSTRUCT{error,admin_server_jwt,boolean,required,}
-boolean flag to indicate whether an error occurred (*false* in this case)
-
-@RESTSTRUCT{code,admin_server_jwt,integer,required,int64}
-the HTTP status code - 200 in this case
-
-@RESTSTRUCT{result,admin_server_jwt,object,required,jwt_secret_struct}
-The result object.
-
-@RESTSTRUCT{active,jwt_secret_struct,object,required,}
-An object with the SHA-256 hash of the active secret.
-
-@RESTSTRUCT{passive,jwt_secret_struct,array,required,object}
-An array of objects with the SHA-256 hashes of the passive secrets.
-Can be empty.
diff --git a/Documentation/DocuBlocks/Rest/Authentication/get_admin_server_jwt.md b/Documentation/DocuBlocks/Rest/Authentication/get_admin_server_jwt.md
deleted file mode 100644
index 5039a076e2e2..000000000000
--- a/Documentation/DocuBlocks/Rest/Authentication/get_admin_server_jwt.md
+++ /dev/null
@@ -1,22 +0,0 @@
-@startDocuBlock get_admin_server_jwt
-@brief Retrieve JWT secrets info
-
-@RESTHEADER{GET /_admin/server/jwt, Fetch information about the currently loaded secrets, getServerJwtSecrets}
-
-@RESTDESCRIPTION
-Get information about the currently loaded secrets.
-
-To utilize the API a superuser JWT token is necessary, otherwise the response
-will be _HTTP 403 Forbidden_.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-
-@RESTREPLYBODY{,object,required,admin_server_jwt}
-The reply with the JWT secrets information.
-
-@RESTRETURNCODE{403}
-if the request was not authenticated as a user with sufficient rights
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Authentication/post_admin_server_jwt.md b/Documentation/DocuBlocks/Rest/Authentication/post_admin_server_jwt.md
deleted file mode 100644
index c3c2a430fd37..000000000000
--- a/Documentation/DocuBlocks/Rest/Authentication/post_admin_server_jwt.md
+++ /dev/null
@@ -1,27 +0,0 @@
-
-@startDocuBlock post_admin_server_jwt
-@brief Hot-reload JWT secrets
-
-@RESTHEADER{POST /_admin/server/jwt, Hot-reload the JWT secret(s) from disk, reloadServerJwtSecrets}
-
-@RESTDESCRIPTION
-Sending a request without payload to this endpoint reloads the JWT secret(s)
-from disk. Only the files specified via the arangod startup option
-`--server.jwt-secret-keyfile` or `--server.jwt-secret-folder` are used.
-It is not possible to change the locations where files are loaded from
-without restarting the process.
-
-To utilize the API a superuser JWT token is necessary, otherwise the response
-will be _HTTP 403 Forbidden_.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-
-@RESTREPLYBODY{,object,required,admin_server_jwt}
-The reply with the JWT secrets information.
-
-@RESTRETURNCODE{403}
-if the request was not authenticated as a user with sufficient rights
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Authentication/post_open_auth.md b/Documentation/DocuBlocks/Rest/Authentication/post_open_auth.md
deleted file mode 100644
index 368aa7f22250..000000000000
--- a/Documentation/DocuBlocks/Rest/Authentication/post_open_auth.md
+++ /dev/null
@@ -1,39 +0,0 @@
-@startDocuBlock post_open_auth
-@brief Create a JWT access token
-
-@RESTHEADER{POST /_open/auth, Create a JWT session token, createSessionToken}
-
-@RESTDESCRIPTION
-Obtain a JSON Web Token (JWT) from the credentials of an ArangoDB user account.
-You can use the JWT in the `Authorization` HTTP header as a `Bearer` token to
-authenticate requests.
-
-The lifetime for the token is controlled by the `--server.session-timeout`
-startup option.
-
-@RESTBODYPARAM{username,string,required,}
-The name of an ArangoDB user.
-
-@RESTBODYPARAM{password,string,required,}
-The password of the ArangoDB user.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-
-@RESTREPLYBODY{jwt,string,required,}
-The encoded JWT session token.
-
-@RESTRETURNCODE{400}
-An HTTP `400 Bad Request` status code is returned if the request misses required
-attributes or if it is otherwise malformed.
-
-@RESTRETURNCODE{401}
-An HTTP `401 Unauthorized` status code is returned if the user credentials are
-incorrect.
-
-@RESTRETURNCODE{404}
-An HTTP `404 Not Found` status code is returned if the server has authentication
-disabled and the endpoint is thus not available.
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Batch Requests/post_api_batch.md b/Documentation/DocuBlocks/Rest/Batch Requests/post_api_batch.md
deleted file mode 100644
index e36657b30d3e..000000000000
--- a/Documentation/DocuBlocks/Rest/Batch Requests/post_api_batch.md
+++ /dev/null
@@ -1,141 +0,0 @@
-
-@startDocuBlock post_api_batch
-@brief executes a batch request
-
-@RESTHEADER{POST /_api/batch,executes a batch request, executeBatchRequest}
-
-@RESTALLBODYPARAM{body,string,required}
-The multipart batch request, consisting of the envelope and the individual
-batch parts.
-
-@RESTDESCRIPTION
-Executes a batch request. A batch request can contain any number of
-other requests that can be sent to ArangoDB in isolation. The benefit of
-using batch requests is that batching requests requires less client/server
-roundtrips than when sending isolated requests.
-
-All parts of a batch request are executed serially on the server. The
-server will return the results of all parts in a single response when all
-parts are finished.
-
-Technically, a batch request is a multipart HTTP request, with
-content-type `multipart/form-data`. A batch request consists of an
-envelope and the individual batch part actions. Batch part actions
-are "regular" HTTP requests, including full header and an optional body.
-Multiple batch parts are separated by a boundary identifier. The
-boundary identifier is declared in the batch envelope. The MIME content-type
-for each individual batch part must be `application/x-arango-batchpart`.
-
-Please note that when constructing the individual batch parts, you must
-use CRLF (`\r\n`) as the line terminator as in regular HTTP messages.
-
-The response sent by the server will be an `HTTP 200` response, with an
-optional error summary header `x-arango-errors`. This header contains the
-number of batch part operations that failed with an HTTP error code of at
-least 400. This header is only present in the response if the number of
-errors is greater than zero.
-
-The response sent by the server is a multipart response, too. It contains
-the individual HTTP responses for all batch parts, including the full HTTP
-result header (with status code and other potential headers) and an
-optional result body. The individual batch parts in the result are
-separated using the same boundary value as specified in the request.
-
-The order of batch parts in the response will be the same as in the
-original client request. Client can additionally use the `Content-Id`
-MIME header in a batch part to define an individual id for each batch part.
-The server will return this id is the batch part responses, too.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-is returned if the batch was received successfully. HTTP 200 is returned
-even if one or multiple batch part actions failed.
-
-@RESTRETURNCODE{400}
-is returned if the batch envelope is malformed or incorrectly formatted.
-This code will also be returned if the content-type of the overall batch
-request or the individual MIME parts is not as expected.
-
-@RESTRETURNCODE{405}
-is returned when an invalid HTTP method is used.
-
-@EXAMPLES
-
-Sending a batch request with five batch parts:
-
-- GET /_api/version
-
-- DELETE /_api/collection/products
-
-- POST /_api/collection/products
-
-- GET /_api/collection/products/figures
-
-- DELETE /_api/collection/products
-
-The boundary (`SomeBoundaryValue`) is passed to the server in the HTTP
-`Content-Type` HTTP header.
-*Please note the reply is not displayed all accurate.*
-
-@EXAMPLE_ARANGOSH_RUN{RestBatchMultipartHeader}
- var parts = [
- "Content-Type: application/x-arango-batchpart\r\n" +
- "Content-Id: myId1\r\n\r\n" +
- "GET /_api/version HTTP/1.1\r\n",
-
- "Content-Type: application/x-arango-batchpart\r\n" +
- "Content-Id: myId2\r\n\r\n" +
- "DELETE /_api/collection/products HTTP/1.1\r\n",
-
- "Content-Type: application/x-arango-batchpart\r\n" +
- "Content-Id: someId\r\n\r\n" +
- "POST /_api/collection/products HTTP/1.1\r\n\r\n" +
- "{\"name\": \"products\" }\r\n",
-
- "Content-Type: application/x-arango-batchpart\r\n" +
- "Content-Id: nextId\r\n\r\n" +
- "GET /_api/collection/products/figures HTTP/1.1\r\n",
-
- "Content-Type: application/x-arango-batchpart\r\n" +
- "Content-Id: otherId\r\n\r\n" +
- "DELETE /_api/collection/products HTTP/1.1\r\n"
- ];
- var boundary = "SomeBoundaryValue";
- var headers = { "Content-Type" : "multipart/form-data; boundary=" +
- boundary };
- var body = "--" + boundary + "\r\n" +
- parts.join("\r\n" + "--" + boundary + "\r\n") +
- "--" + boundary + "--\r\n";
-
- var response = logCurlRequestPlain('POST', '/_api/batch', body, headers);
-
- assert(response.code === 200);
-
- logPlainResponse(response);
-@END_EXAMPLE_ARANGOSH_RUN
-
-Sending a batch request, setting the boundary implicitly (the server will
-in this case try to find the boundary at the beginning of the request body).
-
-@EXAMPLE_ARANGOSH_RUN{RestBatchImplicitBoundary}
- var parts = [
- "Content-Type: application/x-arango-batchpart\r\n\r\n" +
- "DELETE /_api/collection/nonexistent1 HTTP/1.1\r\n",
- "Content-Type: application/x-arango-batchpart\r\n\r\n" +
- "DELETE _api/collection/nonexistent2 HTTP/1.1\r\n"
- ];
- var boundary = "SomeBoundaryValue";
- var body = "--" + boundary + "\r\n" +
- parts.join("\r\n" + "--" + boundary + "\r\n") +
- "--" + boundary + "--\r\n";
-
- var response = logCurlRequestPlain('POST', '/_api/batch', body);
-
- assert(response.code === 200);
- assert(response.headers['x-arango-errors'] == 2);
-
- logPlainResponse(response);
-@END_EXAMPLE_ARANGOSH_RUN
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Cluster/1_structs.md b/Documentation/DocuBlocks/Rest/Cluster/1_structs.md
deleted file mode 100644
index ca5465a5d6f3..000000000000
--- a/Documentation/DocuBlocks/Rest/Cluster/1_structs.md
+++ /dev/null
@@ -1,104 +0,0 @@
-@RESTSTRUCT{version,rebalance_compute,number,required,}
-Must be set to `1`.
-
-@RESTSTRUCT{maximumNumberOfMoves,rebalance_compute,number,optional,}
-Maximum number of moves to be computed. (Default: `1000`)
-
-@RESTSTRUCT{leaderChanges,rebalance_compute,boolean,optional,}
-Allow leader changes without moving data. (Default: `true`)
-
-@RESTSTRUCT{moveLeaders,rebalance_compute,boolean,optional,}
-Allow moving leaders. (Default: `false`)
-
-@RESTSTRUCT{moveFollowers,rebalance_compute,boolean,optional,}
-Allow moving followers. (Default: `false`)
-
-@RESTSTRUCT{excludeSystemCollections,rebalance_compute,boolean,optional,}
-Remove system collections from the rebalance plan. (Default: `false`)
-
-@RESTSTRUCT{piFactor,rebalance_compute,number,optional,}
-(Default: `256e6`)
-
-@RESTSTRUCT{databasesExcluded,rebalance_compute,array,optional,string}
-A list of database names to exclude from the analysis. (Default: `[]`)
-
-@RESTSTRUCT{leader,rebalance_imbalance,object,required,leader_imbalance_struct}
-Information about the leader imbalance.
-
-@RESTSTRUCT{weightUsed,leader_imbalance_struct,array,required,integer}
-The weight of leader shards per DB-Server. A leader has a weight of 1 by default
-but it is higher if collections can only be moved together because of
-`distributeShardsLike`.
-
-@RESTSTRUCT{targetWeight,leader_imbalance_struct,array,required,integer}
-The ideal weight of leader shards per DB-Server.
-
-@RESTSTRUCT{numberShards,leader_imbalance_struct,array,required,integer}
-The number of leader shards per DB-Server.
-
-@RESTSTRUCT{leaderDupl,leader_imbalance_struct,array,required,integer}
-The measure of the leader shard distribution. The higher the number, the worse
-the distribution.
-
-@RESTSTRUCT{totalWeight,leader_imbalance_struct,integer,required,}
-The sum of all weights.
-
-@RESTSTRUCT{imbalance,leader_imbalance_struct,integer,required,}
-The measure of the total imbalance. A high value indicates a high imbalance.
-
-@RESTSTRUCT{totalShards,leader_imbalance_struct,integer,required,}
-The sum of shards, counting leader shards only.
-
-@RESTSTRUCT{shards,rebalance_imbalance,object,required,shard_imbalance_struct}
-Information about the shard imbalance.
-
-@RESTSTRUCT{sizeUsed,shard_imbalance_struct,array,required,integer}
-The size of shards per DB-Server.
-
-@RESTSTRUCT{targetSize,shard_imbalance_struct,array,required,integer}
-The ideal size of shards per DB-Server.
-
-@RESTSTRUCT{numberShards,shard_imbalance_struct,array,required,integer}
-The number of leader and follower shards per DB-Server.
-
-@RESTSTRUCT{totalUsed,shard_imbalance_struct,integer,required,}
-The sum of the sizes.
-
-@RESTSTRUCT{totalShards,shard_imbalance_struct,integer,required,}
-The sum of shards, counting leader and follower shards.
-
-@RESTSTRUCT{imbalance,shard_imbalance_struct,integer,required,}
-The measure of the total imbalance. A high value indicates a high imbalance.
-
-@RESTSTRUCT{from,move_shard_operation,string,required,}
-The server name from which to move.
-
-@RESTSTRUCT{to,move_shard_operation,string,required,}
-The ID of the destination server.
-
-@RESTSTRUCT{shard,move_shard_operation,string,required,}
-Shard ID of the shard to be moved.
-
-@RESTSTRUCT{collection,move_shard_operation,number,required,}
-Collection ID of the collection the shard belongs to.
-
-@RESTSTRUCT{isLeader,move_shard_operation,boolean,required,}
-True if this is a leader move shard operation.
-
-@RESTSTRUCT{code,rebalance_moves,number,required,}
-The status code.
-
-@RESTSTRUCT{error,rebalance_moves,boolean,required,}
-Whether an error occurred. `false` in this case.
-
-@RESTSTRUCT{result,rebalance_moves,object,required,rebalance_result}
-The result object.
-
-@RESTSTRUCT{imbalanceBefore,rebalance_result,object,required,rebalance_imbalance}
-Imbalance before the suggested move shard operations are applied.
-
-@RESTSTRUCT{imbalanceAfter,rebalance_result,object,required,rebalance_imbalance}
-Expected imbalance after the suggested move shard operations are applied.
-
-@RESTSTRUCT{moves,rebalance_result,array,required,move_shard_operation}
-The suggested move shard operations.
diff --git a/Documentation/DocuBlocks/Rest/Cluster/get_admin_cluster_health.md b/Documentation/DocuBlocks/Rest/Cluster/get_admin_cluster_health.md
deleted file mode 100644
index b7a4d22943cc..000000000000
--- a/Documentation/DocuBlocks/Rest/Cluster/get_admin_cluster_health.md
+++ /dev/null
@@ -1,42 +0,0 @@
-
-@startDocuBlock get_admin_cluster_health
-@brief Returns the health of the cluster as assessed by the supervision (Agency)
-
-@RESTHEADER{GET /_admin/cluster/health, Queries the health of cluster for monitoring, getClusterHealth}
-
-@RESTDESCRIPTION
-Queries the health of the cluster for monitoring purposes. The response is a JSON object, containing the standard `code`, `error`, `errorNum`, and `errorMessage` fields as appropriate. The endpoint-specific fields are as follows:
-
-- `ClusterId`: A UUID string identifying the cluster
-- `Health`: An object containing a descriptive sub-object for each node in the cluster.
- - ``: Each entry in `Health` will be keyed by the node ID and contain the following attributes:
- - `Endpoint`: A string representing the network endpoint of the server.
- - `Role`: The role the server plays. Possible values are `"AGENT"`, `"COORDINATOR"`, and `"DBSERVER"`.
- - `CanBeDeleted`: Boolean representing whether the node can safely be removed from the cluster.
- - `Version`: Version String of ArangoDB used by that node.
- - `Engine`: Storage Engine used by that node.
- - `Status`: A string indicating the health of the node as assessed by the supervision (Agency). This should be considered primary source of truth for Coordinator and DB-Servers node health. If the node is responding normally to requests, it is `"GOOD"`. If it has missed one heartbeat, it is `"BAD"`. If it has been declared failed by the supervision, which occurs after missing heartbeats for about 15 seconds, it will be marked `"FAILED"`.
-
- Additionally it will also have the following attributes for:
-
- **Coordinators** and **DB-Servers**
- - `SyncStatus`: The last sync status reported by the node. This value is primarily used to determine the value of `Status`. Possible values include `"UNKNOWN"`, `"UNDEFINED"`, `"STARTUP"`, `"STOPPING"`, `"STOPPED"`, `"SERVING"`, `"SHUTDOWN"`.
- - `LastAckedTime`: ISO 8601 timestamp specifying the last heartbeat received.
- - `ShortName`: A string representing the shortname of the server, e.g. `"Coordinator0001"`.
- - `Timestamp`: ISO 8601 timestamp specifying the last heartbeat received. (deprecated)
- - `Host`: An optional string, specifying the host machine if known.
-
- **Coordinators** only
- - `AdvertisedEndpoint`: A string representing the advertised endpoint, if set. (e.g. external IP address or load balancer, optional)
-
- **Agents**
- - `Leader`: ID of the Agent this node regards as leader.
- - `Leading`: Whether this Agent is the leader (true) or not (false).
- - `LastAckedTime`: Time since last `acked` in seconds.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-is returned when everything went well.
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Cluster/get_admin_cluster_maintenance_dbserver.md b/Documentation/DocuBlocks/Rest/Cluster/get_admin_cluster_maintenance_dbserver.md
deleted file mode 100644
index fabaa24711bf..000000000000
--- a/Documentation/DocuBlocks/Rest/Cluster/get_admin_cluster_maintenance_dbserver.md
+++ /dev/null
@@ -1,46 +0,0 @@
-
-@startDocuBlock get_admin_cluster_maintenance_dbserver
-@brief Check what the maintenance status of a DB-Server is
-
-@RESTHEADER{GET /_admin/cluster/maintenance/{DB-Server-ID}, Query the maintenance status of a DB-Server, getDbserverMaintenance}
-
-@RESTDESCRIPTION
-Check whether the specified DB-Server is in maintenance mode and until when.
-
-@RESTURLPARAMETERS
-
-@RESTURLPARAM{DB-Server-ID,string,required}
-The ID of a DB-Server.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-The request was successful.
-
-@RESTREPLYBODY{error,boolean,required,}
-Whether an error occurred. `false` in this case.
-
-@RESTREPLYBODY{code,integer,required,}
-The status code. `200` in this case.
-
-@RESTREPLYBODY{result,object,optional,get_cluster_maintenance_dbserver_result}
-The result object with the status. This attribute is omitted if the DB-Server
-is in normal mode.
-
-@RESTSTRUCT{Mode,get_cluster_maintenance_dbserver_result,string,required,}
-The mode of the DB-Server. The value is `"maintenance"`.
-
-@RESTSTRUCT{Until,get_cluster_maintenance_dbserver_result,string,required,dateTime}
-Until what date and time the maintenance mode currently lasts, in the
-ISO 8601 date/time format.
-
-@RESTRETURNCODE{400}
-if the request contained an invalid body
-
-@RESTRETURNCODE{412}
-if the request was sent to an Agent node
-
-@RESTRETURNCODE{504}
-if the request timed out while enabling the maintenance mode
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Cluster/get_admin_cluster_rebalance.md b/Documentation/DocuBlocks/Rest/Cluster/get_admin_cluster_rebalance.md
deleted file mode 100644
index 136d0cfc00e7..000000000000
--- a/Documentation/DocuBlocks/Rest/Cluster/get_admin_cluster_rebalance.md
+++ /dev/null
@@ -1,36 +0,0 @@
-@startDocuBlock get_admin_cluster_rebalance
-@brief Computes the current cluster imbalance.
-
-@RESTHEADER{GET /_admin/cluster/rebalance, Compute the current cluster imbalance, getClusterImbalance}
-
-@RESTDESCRIPTION
-Computes the current cluster imbalance and returns the result.
-It additionally shows the amount of ongoing and pending move shard operations.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-This API returns HTTP 200.
-
-@RESTREPLYBODY{code,number,required,}
-The status code.
-
-@RESTREPLYBODY{error,boolean,required,}
-Whether an error occurred. `false` in this case.
-
-@RESTREPLYBODY{result,object,required,get_admin_cluster_rebalance_result}
-The result object.
-
-@RESTSTRUCT{leader,get_admin_cluster_rebalance_result,object,required,leader_imbalance_struct}
-Information about the leader imbalance.
-
-@RESTSTRUCT{shards,get_admin_cluster_rebalance_result,object,required,shard_imbalance_struct}
-Information about the shard imbalance.
-
-@RESTREPLYBODY{pendingMoveShards,number,required,}
-The number of pending move shard operations.
-
-@RESTREPLYBODY{todoMoveShards,number,required,}
-The number of planned move shard operations.
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Cluster/get_admin_cluster_statistics.md b/Documentation/DocuBlocks/Rest/Cluster/get_admin_cluster_statistics.md
deleted file mode 100644
index 4a3be93aa384..000000000000
--- a/Documentation/DocuBlocks/Rest/Cluster/get_admin_cluster_statistics.md
+++ /dev/null
@@ -1,26 +0,0 @@
-
-@startDocuBlock get_admin_cluster_statistics
-@brief allows to query the statistics of a DB-Server in the cluster
-
-@RESTHEADER{GET /_admin/cluster/statistics, Queries statistics of a DB-Server, getClusterStatistics}
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{DBserver,string,required}
-The ID of a DB-Server.
-
-@RESTDESCRIPTION
-Queries the statistics of the given DB-Server
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-is returned when everything went well.
-
-@RESTRETURNCODE{400}
-The `DBserver` parameter was not specified or is not the ID of a DB-Server.
-
-@RESTRETURNCODE{403}
-The specified server is not a DB-Server.
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Cluster/get_admin_server_id.md b/Documentation/DocuBlocks/Rest/Cluster/get_admin_server_id.md
deleted file mode 100644
index 0a01a8eea8eb..000000000000
--- a/Documentation/DocuBlocks/Rest/Cluster/get_admin_server_id.md
+++ /dev/null
@@ -1,18 +0,0 @@
-@startDocuBlock get_admin_server_id
-@brief Get to know the internal id of the server
-
-@RESTHEADER{GET /_admin/server/id, Return id of a server in a cluster, getServerId}
-
-@RESTDESCRIPTION
-Returns the id of a server in a cluster. The request will fail if the
-server is not running in cluster mode.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-Is returned when the server is running in cluster mode.
-
-@RESTRETURNCODE{500}
-Is returned when the server is not running in cluster mode.
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Cluster/get_admin_server_role.md b/Documentation/DocuBlocks/Rest/Cluster/get_admin_server_role.md
deleted file mode 100644
index 230f2e9eaa79..000000000000
--- a/Documentation/DocuBlocks/Rest/Cluster/get_admin_server_role.md
+++ /dev/null
@@ -1,36 +0,0 @@
-
-@startDocuBlock get_admin_server_role
-@brief Return the role of a server in a cluster
-
-@RESTHEADER{GET /_admin/server/role, Return the role of a server in a cluster, getServerRole}
-
-@RESTDESCRIPTION
-Returns the role of a server in a cluster.
-The role is returned in the *role* attribute of the result.
-Possible return values for *role* are:
-- *SINGLE*: the server is a standalone server without clustering
-- *COORDINATOR*: the server is a Coordinator in a cluster
-- *PRIMARY*: the server is a DB-Server in a cluster
-- *SECONDARY*: this role is not used anymore
-- *AGENT*: the server is an Agency node in a cluster
-- *UNDEFINED*: in a cluster, *UNDEFINED* is returned if the server role cannot be
- determined.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-Is returned in all cases.
-
-@RESTREPLYBODY{error,boolean,required,}
-always *false*
-
-@RESTREPLYBODY{code,integer,required,int64}
-the HTTP status code, always 200
-
-@RESTREPLYBODY{errorNum,integer,required,int64}
-the server error number
-
-@RESTREPLYBODY{role,string,required,string}
-one of [ *SINGLE*, *COORDINATOR*, *PRIMARY*, *SECONDARY*, *AGENT*, *UNDEFINED*]
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Cluster/get_api_cluster_endpoints.md b/Documentation/DocuBlocks/Rest/Cluster/get_api_cluster_endpoints.md
deleted file mode 100644
index 6fecd052803e..000000000000
--- a/Documentation/DocuBlocks/Rest/Cluster/get_api_cluster_endpoints.md
+++ /dev/null
@@ -1,34 +0,0 @@
-@startDocuBlock get_api_cluster_endpoints
-@brief This API call returns information about all Coordinator endpoints (cluster only).
-
-@RESTHEADER{GET /_api/cluster/endpoints, Get information about all Coordinator endpoints, listClusterEndpoints}
-
-@RESTDESCRIPTION
-Returns an object with an attribute `endpoints`, which contains an
-array of objects, which each have the attribute `endpoint`, whose value
-is a string with the endpoint description. There is an entry for each
-Coordinator in the cluster. This method only works on Coordinators in
-cluster mode. In case of an error the `error` attribute is set to
-`true`.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-is returned when everything went well.
-
-@RESTREPLYBODY{error,boolean,required,}
-boolean flag to indicate whether an error occurred (*true* in this case)
-
-@RESTREPLYBODY{code,integer,required,int64}
-the HTTP status code - 200
-
-@RESTREPLYBODY{endpoints,array,required,cluster_endpoints_struct}
-A list of active cluster endpoints.
-
-@RESTSTRUCT{endpoint,cluster_endpoints_struct,string,required,}
-The bind of the Coordinator, like `tcp://[::1]:8530`
-
-@RESTRETURNCODE{501}
-server is not a Coordinator or method was not GET.
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Cluster/post_admin_cluster_rebalance.md b/Documentation/DocuBlocks/Rest/Cluster/post_admin_cluster_rebalance.md
deleted file mode 100644
index 5c70f9c8957d..000000000000
--- a/Documentation/DocuBlocks/Rest/Cluster/post_admin_cluster_rebalance.md
+++ /dev/null
@@ -1,21 +0,0 @@
-
-@startDocuBlock post_admin_cluster_rebalance
-@brief Compute a set of move shard operations to improve balance.
-
-@RESTHEADER{POST /_admin/cluster/rebalance, Compute a set of move shard operations to improve balance, computeClusterRebalancePlan}
-
-@RESTBODYPARAM{,object,required,rebalance_compute}
-The options for the rebalance plan.
-
-@RESTDESCRIPTION
-Compute a set of move shard operations to improve balance.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-This API returns HTTP 200.
-
-@RESTREPLYBODY{,object,required,rebalance_moves}
-The rebalance plan.
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Cluster/post_admin_cluster_rebalance_execute.md b/Documentation/DocuBlocks/Rest/Cluster/post_admin_cluster_rebalance_execute.md
deleted file mode 100644
index 0bcdeb9ba720..000000000000
--- a/Documentation/DocuBlocks/Rest/Cluster/post_admin_cluster_rebalance_execute.md
+++ /dev/null
@@ -1,26 +0,0 @@
-
-@startDocuBlock post_admin_cluster_rebalance_execute
-@brief Executes the given set of move shard operations.
-
-@RESTHEADER{POST /_admin/cluster/rebalance/execute, Execute a set of move shard operations, executeClusterRebalancePlan}
-
-@RESTDESCRIPTION
-Execute the given set of move shard operations. You can use the
-`POST /_admin/cluster/rebalance` endpoint to calculate these operations to improve
-the balance of shards, leader shards, and follower shards.
-
-@RESTBODYPARAM{version,number,required,}
-Must be set to `1`.
-
-@RESTBODYPARAM{moves,array,required,move_shard_operation}
-A set of move shard operations to execute.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-This API returns HTTP 200 if no operations are provided.
-
-@RESTRETURNCODE{202}
-This API returns HTTP 202 if the operations have been accepted and scheduled for execution.
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Cluster/put_admin_cluster_maintenance.md b/Documentation/DocuBlocks/Rest/Cluster/put_admin_cluster_maintenance.md
deleted file mode 100644
index 2cd9b06d1ba1..000000000000
--- a/Documentation/DocuBlocks/Rest/Cluster/put_admin_cluster_maintenance.md
+++ /dev/null
@@ -1,39 +0,0 @@
-
-@startDocuBlock put_admin_cluster_maintenance
-@brief Enable or disable the cluster supervision (Agency) maintenance mode
-
-@RESTHEADER{PUT /_admin/cluster/maintenance, Enable or disable the supervision maintenance mode, setClusterMaintenance}
-
-@RESTDESCRIPTION
-This API allows to temporarily enable the supervision maintenance mode. Please be aware that no
-automatic failovers of any kind will take place while the maintenance mode is enabled.
-The cluster supervision reactivates itself automatically at some point after disabling it.
-
-To enable the maintenance mode the request body must contain the string `"on"`
-(Please note it _must_ be lowercase as well as include the quotes). This will enable the
-maintenance mode for 60 minutes, i.e. the supervision maintenance will reactivate itself
-after 60 minutes.
-
-Since ArangoDB 3.8.3 it is possible to enable the maintenance mode for a different
-duration than 60 minutes, it is possible to send the desired duration value (in seconds)
-as a string in the request body. For example, sending `"7200"`
-(including the quotes) will enable the maintenance mode for 7200 seconds, i.e. 2 hours.
-
-To disable the maintenance mode the request body must contain the string `"off"`
-(Please note it _must_ be lowercase as well as include the quotes).
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-is returned when everything went well.
-
-@RESTRETURNCODE{400}
-if the request contained an invalid body
-
-@RESTRETURNCODE{501}
-if the request was sent to a node other than a Coordinator or single-server
-
-@RESTRETURNCODE{504}
-if the request timed out while enabling the maintenance mode
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Cluster/put_admin_cluster_maintenance_dbserver.md b/Documentation/DocuBlocks/Rest/Cluster/put_admin_cluster_maintenance_dbserver.md
deleted file mode 100644
index 18c0824671f0..000000000000
--- a/Documentation/DocuBlocks/Rest/Cluster/put_admin_cluster_maintenance_dbserver.md
+++ /dev/null
@@ -1,48 +0,0 @@
-
-@startDocuBlock put_admin_cluster_maintenance_dbserver
-@brief Enable or disable the maintenance mode of a DB-Server
-
-@RESTHEADER{PUT /_admin/cluster/maintenance/{DB-Server-ID}, Enable or disable the DB-Server maintenance mode, setDbserverMaintenance}
-
-@RESTDESCRIPTION
-For rolling upgrades or rolling restarts, DB-Servers can be put into
-maintenance mode, so that no attempts are made to re-distribute the data in a
-cluster for such planned events. DB-Servers in maintenance mode are not
-considered viable failover targets because they are likely restarted soon.
-
-@RESTURLPARAMETERS
-
-@RESTURLPARAM{DB-Server-ID,string,required}
-The ID of a DB-Server.
-
-@RESTBODYPARAM{mode,string,required,}
-The mode to put the DB-Server in. Possible values:
-- `"maintenance"`
-- `"normal"`
-
-@RESTBODYPARAM{timeout,integer,optional,}
-After how many seconds the maintenance mode shall automatically end.
-You can send another request when the DB-Server is already in maintenance mode
-to extend the timeout.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-The request was successful.
-
-@RESTREPLYBODY{error,boolean,required,}
-Whether an error occurred. `false` in this case.
-
-@RESTREPLYBODY{code,integer,required,}
-The status code. `200` in this case.
-
-@RESTRETURNCODE{400}
-if the request contained an invalid body
-
-@RESTRETURNCODE{412}
-if the request was sent to an Agency node
-
-@RESTRETURNCODE{504}
-if the request timed out while enabling the maintenance mode
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Cluster/put_admin_cluster_rebalance.md b/Documentation/DocuBlocks/Rest/Cluster/put_admin_cluster_rebalance.md
deleted file mode 100644
index e48d24fb7c4c..000000000000
--- a/Documentation/DocuBlocks/Rest/Cluster/put_admin_cluster_rebalance.md
+++ /dev/null
@@ -1,22 +0,0 @@
-
-@startDocuBlock put_admin_cluster_rebalance
-@brief Computes and executes a set of move shard operations to improve balance.
-
-@RESTHEADER{PUT /_admin/cluster/rebalance, Compute and execute a set of move shard operations to improve balance, startClusterRebalance}
-
-@RESTBODYPARAM{,object,required,rebalance_compute}
-The options for the rebalancing.
-
-@RESTDESCRIPTION
-Compute a set of move shard operations to improve balance.
-These moves are then immediately executed.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-This API returns HTTP 200.
-
-@RESTREPLYBODY{,object,required,rebalance_moves}
-The executed move shard operations.
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Collections/1_structs.md b/Documentation/DocuBlocks/Rest/Collections/1_structs.md
deleted file mode 100644
index 2da79911c8aa..000000000000
--- a/Documentation/DocuBlocks/Rest/Collections/1_structs.md
+++ /dev/null
@@ -1,141 +0,0 @@
-@RESTSTRUCT{waitForSync,collection_info,boolean,required,}
-If `true`, creating, changing, or removing
-documents waits until the data has been synchronized to disk.
-
-@RESTSTRUCT{schema,collection_info,object,optional,}
-An object that specifies the collection-level schema for documents.
-
-@RESTSTRUCT{computedValues,collection_info,array,optional,computed_field}
-A list of objects, each representing a computed value.
-
-@RESTSTRUCT{name,computed_field,string,required,}
-The name of the target attribute.
-
-@RESTSTRUCT{expression,computed_field,string,required,}
-An AQL `RETURN` operation with an expression that computes the desired value.
-
-@RESTSTRUCT{overwrite,computed_field,boolean,required,}
-Whether the computed value takes precedence over a user-provided or
-existing attribute.
-
-@RESTSTRUCT{computeOn,computed_field,array,optional,string}
-An array of strings that defines on which write operations the value is
-computed. The possible values are `"insert"`, `"update"`, and `"replace"`.
-
-@RESTSTRUCT{keepNull,computed_field,boolean,optional,}
-Whether the target attribute is set if the expression evaluates to `null`.
-
-@RESTSTRUCT{failOnWarning,computed_field,boolean,optional,}
-Whether the write operation fails if the expression produces a warning.
-
-@RESTSTRUCT{keyOptions,collection_info,object,required,key_generator_type}
-An object which contains key generation options.
-
-@RESTSTRUCT{type,key_generator_type,string,required,}
-Specifies the type of the key generator. Possible values:
-- `"traditional"`
-- `"autoincrement"`
-- `"uuid"`
-- `"padded"`
-
-@RESTSTRUCT{allowUserKeys,key_generator_type,boolean,required,}
-If set to `true`, then you are allowed to supply
-own key values in the `_key` attribute of a document. If set to
-`false`, then the key generator is solely responsible for
-generating keys and an error is raised if you supply own key values in the
-`_key` attribute of documents.
-
-@RESTSTRUCT{increment,key_generator_type,integer,optional,}
-The increment value for the `autoincrement` key generator.
-Not used for other key generator types.
-
-@RESTSTRUCT{offset,key_generator_type,integer,optional,}
-The initial offset value for the `autoincrement` key generator.
-Not used for other key generator types.
-
-@RESTSTRUCT{lastValue,key_generator_type,integer,required,}
-The current offset value of the `autoincrement` or `padded` key generator.
-This is an internal property for restoring dumps properly.
-
-@RESTSTRUCT{cacheEnabled,collection_info,boolean,required,}
-Whether the in-memory hash cache for documents is enabled for this
-collection.
-
-@RESTSTRUCT{numberOfShards,collection_info,integer,optional,}
-The number of shards of the collection. _(cluster only)_
-
-@RESTSTRUCT{shardKeys,collection_info,array,optional,string}
-Contains the names of document attributes that are used to
-determine the target shard for documents. _(cluster only)_
-
-@RESTSTRUCT{replicationFactor,collection_info,integer,optional,}
-Contains how many copies of each shard are kept on different DB-Servers.
-It is an integer number in the range of 1-10 or the string `"satellite"`
-for SatelliteCollections (Enterprise Edition only). _(cluster only)_
-
-@RESTSTRUCT{writeConcern,collection_info,integer,optional,}
-Determines how many copies of each shard are required to be
-in-sync on the different DB-Servers. If there are less than these many copies
-in the cluster, a shard refuses to write. Writes to shards with enough
-up-to-date copies succeed at the same time, however. The value of
-`writeConcern` cannot be greater than `replicationFactor`.
-For SatelliteCollections, the `writeConcern` is automatically controlled to
-equal the number of DB-Servers and has a value of `0`. _(cluster only)_
-
-@RESTSTRUCT{shardingStrategy,collection_info,string,optional,}
-The sharding strategy selected for the collection. _(cluster only)_
-
-Possible values:
-- `"community-compat"`
-- `"enterprise-compat"`
-- `"enterprise-smart-edge-compat"`
-- `"hash"`
-- `"enterprise-hash-smart-edge"`
-- `"enterprise-hex-smart-vertex"`
-
-@RESTSTRUCT{distributeShardsLike,collection_info,string,optional,string}
-The name of another collection. This collection uses the `replicationFactor`,
-`numberOfShards` and `shardingStrategy` properties of the other collection and
-the shards of this collection are distributed in the same way as the shards of
-the other collection.
-
-@RESTSTRUCT{isSmart,collection_info,boolean,optional,}
-Whether the collection is used in a SmartGraph or EnterpriseGraph (Enterprise Edition only).
-This is an internal property. _(cluster only)_
-
-@RESTSTRUCT{isDisjoint,collection_info,boolean,optional,}
-Whether the SmartGraph or EnterpriseGraph this collection belongs to is disjoint
-(Enterprise Edition only). This is an internal property. _(cluster only)_
-
-@RESTSTRUCT{smartGraphAttribute,collection_info,string,optional,}
-The attribute that is used for sharding: vertices with the same value of
-this attribute are placed in the same shard. All vertices are required to
-have this attribute set and it has to be a string. Edges derive the
-attribute from their connected vertices (Enterprise Edition only). _(cluster only)_
-
-@RESTSTRUCT{smartJoinAttribute,collection_info,string,optional,}
-Determines an attribute of the collection that must contain the shard key value
-of the referred-to SmartJoin collection (Enterprise Edition only). _(cluster only)_
-
-@RESTSTRUCT{name,collection_info,string,optional,}
-The name of this collection.
-
-@RESTSTRUCT{id,collection_info,string,optional,}
-A unique identifier of the collection (deprecated).
-
-@RESTSTRUCT{type,collection_info,integer,optional,}
-The type of the collection:
- - `0`: "unknown"
- - `2`: regular document collection
- - `3`: edge collection
-
-@RESTSTRUCT{isSystem,collection_info,boolean,optional,}
-Whether the collection is a system collection. Collection names that starts with
-an underscore are usually system collections.
-
-@RESTSTRUCT{syncByRevision,collection_info,boolean,required,}
-Whether the newer revision-based replication protocol is
-enabled for this collection. This is an internal property.
-
-@RESTSTRUCT{globallyUniqueId,collection_info,string,optional,}
-A unique identifier of the collection. This is an internal property.
diff --git a/Documentation/DocuBlocks/Rest/Collections/delete_api_collection_collection.md b/Documentation/DocuBlocks/Rest/Collections/delete_api_collection_collection.md
deleted file mode 100644
index 853cbe0f2eb9..000000000000
--- a/Documentation/DocuBlocks/Rest/Collections/delete_api_collection_collection.md
+++ /dev/null
@@ -1,92 +0,0 @@
-
-@startDocuBlock delete_api_collection_collection
-@brief drops a collection
-
-@RESTHEADER{DELETE /_api/collection/{collection-name}, Drops a collection, deleteCollection}
-
-@HINTS
-{% hint 'warning' %}
-Accessing collections by their numeric ID is deprecated from version 3.4.0 on.
-You should reference them via their names instead.
-{% endhint %}
-
-@RESTURLPARAMETERS
-
-@RESTURLPARAM{collection-name,string,required}
-The name of the collection to drop.
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{isSystem,boolean,optional}
-Whether or not the collection to drop is a system collection. This parameter
-must be set to *true* in order to drop a system collection.
-
-@RESTDESCRIPTION
-Drops the collection identified by *collection-name*.
-
-If the collection was successfully dropped, an object is returned with
-the following attributes:
-
-- *error*: *false*
-
-- *id*: The identifier of the dropped collection.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{400}
-If the *collection-name* is missing, then a *HTTP 400* is
-returned.
-
-@RESTRETURNCODE{404}
-If the *collection-name* is unknown, then a *HTTP 404* is returned.
-
-@EXAMPLES
-
-Using an identifier:
-
-@EXAMPLE_ARANGOSH_RUN{RestCollectionDeleteCollectionIdentifier}
- var cn = "products1";
- var coll = db._create(cn, { waitForSync: true });
- var url = "/_api/collection/"+ coll._id;
-
- var response = logCurlRequest('DELETE', url);
- db[cn] = undefined;
-
- assert(response.code === 200);
-
- logJsonResponse(response);
-@END_EXAMPLE_ARANGOSH_RUN
-
-Using a name:
-
-@EXAMPLE_ARANGOSH_RUN{RestCollectionDeleteCollectionName}
- var cn = "products1";
- db._drop(cn);
- db._create(cn);
- var url = "/_api/collection/products1";
-
- var response = logCurlRequest('DELETE', url);
- db[cn] = undefined;
-
- assert(response.code === 200);
-
- logJsonResponse(response);
-@END_EXAMPLE_ARANGOSH_RUN
-
-Dropping a system collection
-
-@EXAMPLE_ARANGOSH_RUN{RestCollectionDeleteCollectionSystem}
- var cn = "_example";
- db._drop(cn, { isSystem: true });
- db._create(cn, { isSystem: true });
- var url = "/_api/collection/_example?isSystem=true";
-
- var response = logCurlRequest('DELETE', url);
- db[cn] = undefined;
-
- assert(response.code === 200);
-
- logJsonResponse(response);
-@END_EXAMPLE_ARANGOSH_RUN
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Collections/get_api_collection.md b/Documentation/DocuBlocks/Rest/Collections/get_api_collection.md
deleted file mode 100644
index 75721d0ac2a9..000000000000
--- a/Documentation/DocuBlocks/Rest/Collections/get_api_collection.md
+++ /dev/null
@@ -1,38 +0,0 @@
-
-@startDocuBlock get_api_collection
-@brief returns all collections
-
-@RESTHEADER{GET /_api/collection,reads all collections, listCollections}
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{excludeSystem,boolean,optional}
-Whether or not system collections should be excluded from the result.
-
-@RESTDESCRIPTION
-Returns an object with an attribute *result* containing an
-array of all collection descriptions.
-
-By providing the optional query parameter *excludeSystem* with a value of
-*true*, all system collections will be excluded from the response.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-The list of collections
-
-@EXAMPLES
-
-Return information about all collections:
-
-@EXAMPLE_ARANGOSH_RUN{RestCollectionGetAllCollections}
- var url = "/_api/collection";
-
- var response = logCurlRequest('GET', url);
-
- assert(response.code === 200);
-
- logJsonResponse(response);
-@END_EXAMPLE_ARANGOSH_RUN
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Collections/get_api_collection_collection.md b/Documentation/DocuBlocks/Rest/Collections/get_api_collection_collection.md
deleted file mode 100644
index 5810f9774ec8..000000000000
--- a/Documentation/DocuBlocks/Rest/Collections/get_api_collection_collection.md
+++ /dev/null
@@ -1,43 +0,0 @@
-
-@startDocuBlock get_api_collection_collection
-@brief returns a collection
-
-@RESTHEADER{GET /_api/collection/{collection-name}, Return information about a collection, getCollection}
-
-@HINTS
-{% hint 'warning' %}
-Accessing collections by their numeric ID is deprecated from version 3.4.0 on.
-You should reference them via their names instead.
-{% endhint %}
-
-@RESTURLPARAMETERS
-
-@RESTURLPARAM{collection-name,string,required}
-The name of the collection.
-
-@RESTDESCRIPTION
-The result is an object describing the collection with the following
-attributes:
-
-- *id*: The identifier of the collection.
-
-- *name*: The name of the collection.
-
-- *status*: The status of the collection as number.
- - 3: loaded
- - 5: deleted
-
-Every other status indicates a corrupted collection.
-
-- *type*: The type of the collection as number.
- - 2: document collection (normal case)
- - 3: edge collection
-
-- *isSystem*: If *true* then the collection is a system collection.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{404}
-If the *collection-name* is unknown, then a *HTTP 404* is
-returned.
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Collections/get_api_collection_collection_checksum.md b/Documentation/DocuBlocks/Rest/Collections/get_api_collection_collection_checksum.md
deleted file mode 100644
index d37e98118b36..000000000000
--- a/Documentation/DocuBlocks/Rest/Collections/get_api_collection_collection_checksum.md
+++ /dev/null
@@ -1,98 +0,0 @@
-
-@startDocuBlock get_api_collection_collection_checksum
-@brief returns a checksum for the specified collection
-
-@RESTHEADER{GET /_api/collection/{collection-name}/checksum, Return checksum for the collection, getCollectionChecksum}
-
-@HINTS
-{% hint 'warning' %}
-Accessing collections by their numeric ID is deprecated from version 3.4.0 on.
-You should reference them via their names instead.
-{% endhint %}
-
-@RESTURLPARAMETERS
-
-@RESTURLPARAM{collection-name,string,required}
-The name of the collection.
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{withRevisions,boolean,optional}
-Whether or not to include document revision ids in the checksum calculation.
-
-@RESTQUERYPARAM{withData,boolean,optional}
-Whether or not to include document body data in the checksum calculation.
-
-@RESTDESCRIPTION
-Will calculate a checksum of the meta-data (keys and optionally revision ids) and
-optionally the document data in the collection.
-
-The checksum can be used to compare if two collections on different ArangoDB
-instances contain the same contents. The current revision of the collection is
-returned too so one can make sure the checksums are calculated for the same
-state of data.
-
-By default, the checksum will only be calculated on the *_key* system attribute
-of the documents contained in the collection. For edge collections, the system
-attributes *_from* and *_to* will also be included in the calculation.
-
-By setting the optional query parameter *withRevisions* to *true*, then revision
-ids (*_rev* system attributes) are included in the checksumming.
-
-By providing the optional query parameter *withData* with a value of *true*,
-the user-defined document attributes will be included in the calculation too.
-**Note**: Including user-defined attributes will make the checksumming slower.
-
-The response is a JSON object with the following attributes:
-
-- *checksum*: The calculated checksum as a number.
-
-- *revision*: The collection revision id as a string.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{400}
-If the *collection-name* is missing, then a *HTTP 400* is
-returned.
-
-@RESTRETURNCODE{404}
-If the *collection-name* is unknown, then a *HTTP 404*
-is returned.
-
-@EXAMPLES
-
-Retrieving the checksum of a collection:
-
-@EXAMPLE_ARANGOSH_RUN{RestCollectionGetCollectionChecksum}
- var cn = "products";
- db._drop(cn);
- var coll = db._create(cn);
- coll.save({ foo: "bar" });
- var url = "/_api/collection/" + coll.name() + "/checksum";
-
- var response = logCurlRequest('GET', url);
-
- assert(response.code === 200);
-
- logJsonResponse(response);
- db._drop(cn);
-@END_EXAMPLE_ARANGOSH_RUN
-
-Retrieving the checksum of a collection including the collection data,
-but not the revisions:
-
-@EXAMPLE_ARANGOSH_RUN{RestCollectionGetCollectionChecksumNoRev}
- var cn = "products";
- db._drop(cn);
- var coll = db._create(cn);
- coll.save({ foo: "bar" });
- var url = "/_api/collection/" + coll.name() + "/checksum?withRevisions=false&withData=true";
-
- var response = logCurlRequest('GET', url);
-
- assert(response.code === 200);
-
- logJsonResponse(response);
- db._drop(cn);
-@END_EXAMPLE_ARANGOSH_RUN
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Collections/get_api_collection_collection_count.md b/Documentation/DocuBlocks/Rest/Collections/get_api_collection_collection_count.md
deleted file mode 100644
index afc9a05dd262..000000000000
--- a/Documentation/DocuBlocks/Rest/Collections/get_api_collection_collection_count.md
+++ /dev/null
@@ -1,54 +0,0 @@
-
-@startDocuBlock get_api_collection_collection_count
-@brief Counts the documents in a collection
-
-@RESTHEADER{GET /_api/collection/{collection-name}/count, Return number of documents in a collection, getCollectionCount}
-
-@HINTS
-{% hint 'warning' %}
-Accessing collections by their numeric ID is deprecated from version 3.4.0 on.
-You should reference them via their names instead.
-{% endhint %}
-
-@RESTURLPARAMETERS
-
-@RESTURLPARAM{collection-name,string,required}
-The name of the collection.
-
-@RESTDESCRIPTION
-In addition to the above, the result also contains the number of documents.
-**Note** that this will always load the collection into memory.
-
-- *count*: The number of documents inside the collection.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{400}
-If the *collection-name* is missing, then a *HTTP 400* is
-returned.
-
-@RESTRETURNCODE{404}
-If the *collection-name* is unknown, then a *HTTP 404*
-is returned.
-
-@EXAMPLES
-
-Requesting the number of documents:
-
-@EXAMPLE_ARANGOSH_RUN{RestCollectionGetCollectionCount}
- var cn = "products";
- db._drop(cn);
- var coll = db._create(cn, { waitForSync: true });
- for(var i=0;i<100;i++) {
- coll.save({"count" : i });
- }
- var url = "/_api/collection/"+ coll.name() + "/count";
-
- var response = logCurlRequest('GET', url);
-
- assert(response.code === 200);
-
- logJsonResponse(response);
- db._drop(cn);
-@END_EXAMPLE_ARANGOSH_RUN
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Collections/get_api_collection_collection_figures.md b/Documentation/DocuBlocks/Rest/Collections/get_api_collection_collection_figures.md
deleted file mode 100644
index 5537e7f7f464..000000000000
--- a/Documentation/DocuBlocks/Rest/Collections/get_api_collection_collection_figures.md
+++ /dev/null
@@ -1,98 +0,0 @@
-
-@startDocuBlock get_api_collection_collection_figures
-@brief Fetch the statistics of a collection
-
-@RESTHEADER{GET /_api/collection/{collection-name}/figures, Return statistics for a collection, getCollectionFigures}
-
-@HINTS
-{% hint 'warning' %}
-Accessing collections by their numeric ID is deprecated from version 3.4.0 on.
-You should reference them via their names instead.
-{% endhint %}
-
-@RESTURLPARAMETERS
-
-@RESTURLPARAM{collection-name,string,required}
-The name of the collection.
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{details,boolean,optional}
-Setting `details` to `true` will return extended storage engine-specific
-details to the figures. The details are intended for debugging ArangoDB itself
-and their format is subject to change. By default, `details` is set to `false`,
-so no details are returned and the behavior is identical to previous versions
-of ArangoDB.
-Please note that requesting `details` may cause additional load and thus have
-an impact on performance.
-
-@RESTDESCRIPTION
-In addition to the above, the result also contains the number of documents
-and additional statistical information about the collection.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-Returns information about the collection:
-
-@RESTREPLYBODY{count,integer,required,int64}
-The number of documents currently present in the collection.
-
-@RESTREPLYBODY{figures,object,required,collection_figures}
-The metrics of the collection.
-
-@RESTSTRUCT{indexes,collection_figures,object,required,collection_figures_indexes}
-The index metrics.
-
-@RESTSTRUCT{count,collection_figures_indexes,integer,required,int64}
-The total number of indexes defined for the collection, including the pre-defined
-indexes (e.g. primary index).
-
-@RESTSTRUCT{size,collection_figures_indexes,integer,required,int64}
-The total memory allocated for indexes in bytes.
-
-@RESTRETURNCODE{400}
-If the *collection-name* is missing, then a *HTTP 400* is
-returned.
-
-@RESTRETURNCODE{404}
-If the *collection-name* is unknown, then a *HTTP 404*
-is returned.
-
-@EXAMPLES
-
-Using an identifier and requesting the figures of the collection:
-
-@EXAMPLE_ARANGOSH_RUN{RestCollectionGetCollectionFigures}
- var cn = "products";
- db._drop(cn);
- var coll = db._create(cn);
- coll.save({"test":"hello"});
- require("internal").wal.flush(true, true);
- var url = "/_api/collection/"+ coll.name() + "/figures";
-
- var response = logCurlRequest('GET', url);
-
- assert(response.code === 200);
-
- logJsonResponse(response);
- db._drop(cn);
-@END_EXAMPLE_ARANGOSH_RUN
-
-@EXAMPLE_ARANGOSH_RUN{RestCollectionGetCollectionFiguresDetails}
- var cn = "products";
- db._drop(cn);
- var coll = db._create(cn);
- coll.save({"test":"hello"});
- require("internal").wal.flush(true, true);
- var url = "/_api/collection/"+ coll.name() + "/figures?details=true";
-
- var response = logCurlRequest('GET', url);
-
- assert(response.code === 200);
-
- logJsonResponse(response);
- db._drop(cn);
-@END_EXAMPLE_ARANGOSH_RUN
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Collections/get_api_collection_collection_properties.md b/Documentation/DocuBlocks/Rest/Collections/get_api_collection_collection_properties.md
deleted file mode 100644
index e4f658413cca..000000000000
--- a/Documentation/DocuBlocks/Rest/Collections/get_api_collection_collection_properties.md
+++ /dev/null
@@ -1,68 +0,0 @@
-
-@startDocuBlock get_api_collection_collection_properties
-@brief reads the properties of the specified collection
-
-@RESTHEADER{GET /_api/collection/{collection-name}/properties, Read properties of a collection, getCollectionProperties}
-
-@HINTS
-{% hint 'warning' %}
-Accessing collections by their numeric ID is deprecated from version 3.4.0 on.
-You should reference them via their names instead.
-{% endhint %}
-
-@RESTURLPARAMETERS
-
-@RESTURLPARAM{collection-name,string,required}
-The name of the collection.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{400}
-If the *collection-name* is missing, then a *HTTP 400* is
-returned.
-
-@RESTRETURNCODE{404}
-If the *collection-name* is unknown, then a *HTTP 404*
-is returned.
-
-@RESTRETURNCODE{200}
-
-@RESTREPLYBODY{,object,required,collection_info}
-
-@RESTDESCRIPTION
-Returns all properties of the specified collection.
-
-@EXAMPLES
-
-Using an identifier:
-
-@EXAMPLE_ARANGOSH_RUN{RestCollectionGetCollectionIdentifier}
- var cn = "products";
- db._drop(cn);
- var coll = db._create(cn, { waitForSync: true });
- var url = "/_api/collection/"+ coll._id + "/properties";
-
- var response = logCurlRequest('GET', url);
-
- assert(response.code === 200);
-
- logJsonResponse(response);
- ~ db._drop(cn);
-@END_EXAMPLE_ARANGOSH_RUN
-
-Using a name:
-
-@EXAMPLE_ARANGOSH_RUN{RestCollectionGetCollectionName}
- var cn = "products";
- db._drop(cn);
- db._create(cn, { waitForSync: true });
- var url = "/_api/collection/products/properties";
-
- var response = logCurlRequest('GET', url);
-
- assert(response.code === 200);
-
- logJsonResponse(response);
- db._drop(cn);
-@END_EXAMPLE_ARANGOSH_RUN
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Collections/get_api_collection_collection_revision.md b/Documentation/DocuBlocks/Rest/Collections/get_api_collection_collection_revision.md
deleted file mode 100644
index 6483830dad88..000000000000
--- a/Documentation/DocuBlocks/Rest/Collections/get_api_collection_collection_revision.md
+++ /dev/null
@@ -1,52 +0,0 @@
-
-@startDocuBlock get_api_collection_collection_revision
-@brief Retrieve the collections revision id
-
-@RESTHEADER{GET /_api/collection/{collection-name}/revision, Return collection revision id, getCollectionRevision}
-
-@HINTS
-{% hint 'warning' %}
-Accessing collections by their numeric ID is deprecated from version 3.4.0 on.
-You should reference them via their names instead.
-{% endhint %}
-
-@RESTURLPARAMETERS
-
-@RESTURLPARAM{collection-name,string,required}
-The name of the collection.
-
-@RESTDESCRIPTION
-The response will contain the collection's latest used revision id.
-The revision id is a server-generated string that clients can use to
-check whether data in a collection has changed since the last revision check.
-
-- *revision*: The collection revision id as a string.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{400}
-If the *collection-name* is missing, then a *HTTP 400* is
-returned.
-
-@RESTRETURNCODE{404}
-If the *collection-name* is unknown, then a *HTTP 404*
-is returned.
-
-@EXAMPLES
-
-Retrieving the revision of a collection
-
-@EXAMPLE_ARANGOSH_RUN{RestCollectionGetCollectionRevision}
- var cn = "products";
- db._drop(cn);
- var coll = db._create(cn, { waitForSync: false });
- var url = "/_api/collection/"+ coll.name() + "/revision";
-
- var response = logCurlRequest('GET', url);
-
- assert(response.code === 200);
-
- logJsonResponse(response);
- db._drop(cn);
-@END_EXAMPLE_ARANGOSH_RUN
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Collections/get_api_collection_collection_shards.md b/Documentation/DocuBlocks/Rest/Collections/get_api_collection_collection_shards.md
deleted file mode 100644
index 12a9a6c5b786..000000000000
--- a/Documentation/DocuBlocks/Rest/Collections/get_api_collection_collection_shards.md
+++ /dev/null
@@ -1,72 +0,0 @@
-
-@startDocuBlock get_api_collection_collection_shards
-@brief Return the shard ids of a collection
-
-@RESTHEADER{GET /_api/collection/{collection-name}/shards, Return the shard ids of a collection, getCollectionShards}
-
-@RESTURLPARAMETERS
-
-@RESTURLPARAM{collection-name,string,required}
-The name of the collection.
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{details,boolean,optional}
-If set to true, the return value will also contain the responsible servers for the collections' shards.
-
-@RESTDESCRIPTION
-By default returns a JSON array with the shard IDs of the collection.
-
-If the `details` parameter is set to `true`, it will return a JSON object with the
-shard IDs as object attribute keys, and the responsible servers for each shard mapped to them.
-In the detailed response, the leader shards will be first in the arrays.
-
-**Note** : This method is only available in a cluster Coordinator.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-Returns the collection's shards.
-
-@RESTRETURNCODE{400}
-If the *collection-name* is missing, then a *HTTP 400* is
-returned.
-
-@RESTRETURNCODE{404}
-If the *collection-name* is unknown, then an *HTTP 404*
-is returned.
-
-@RESTRETURNCODE{501}
-*HTTP 501* is returned if the method is called on a single server.
-
-@EXAMPLES
-
-Retrieves the list of shards:
-
-@EXAMPLE_ARANGOSH_RUN{RestGetShards_cluster}
- var cn = "testCollection";
- db._drop(cn);
- db._create(cn, { numberOfShards: 3 });
-
- var response = logCurlRequest('GET', "/_api/collection/" + cn + "/shards");
-
- assert(response.code === 200);
- logRawResponse(response);
- db._drop(cn);
-@END_EXAMPLE_ARANGOSH_RUN
-
-Retrieves the list of shards with the responsible servers:
-
-@EXAMPLE_ARANGOSH_RUN{RestGetShardsWithDetails_cluster}
- var cn = "testCollection";
- db._drop(cn);
- db._create(cn, { numberOfShards: 3 });
-
- var response = logCurlRequest('GET', "/_api/collection/" + cn + "/shards?details=true");
-
- assert(response.code === 200);
- logRawResponse(response);
- db._drop(cn);
-@END_EXAMPLE_ARANGOSH_RUN
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Collections/post_api_collection.md b/Documentation/DocuBlocks/Rest/Collections/post_api_collection.md
deleted file mode 100644
index 2bf3fc0401ad..000000000000
--- a/Documentation/DocuBlocks/Rest/Collections/post_api_collection.md
+++ /dev/null
@@ -1,324 +0,0 @@
-
-@startDocuBlock post_api_collection
-@brief creates a collection
-
-@RESTHEADER{POST /_api/collection, Create collection, createCollection}
-
-@HINTS
-{% hint 'warning' %}
-Accessing collections by their numeric ID is deprecated from version 3.4.0 on.
-You should reference them via their names instead.
-{% endhint %}
-
-@RESTDESCRIPTION
-Creates a new collection with a given name. The request must contain an
-object with the following attributes.
-
-@RESTBODYPARAM{name,string,required,string}
-The name of the collection.
-
-@RESTBODYPARAM{waitForSync,boolean,optional,}
-If `true` then the data is synchronized to disk before returning from a
-document create, update, replace or removal operation. (Default: `false`)
-
-@RESTBODYPARAM{isSystem,boolean,optional,}
-If `true`, create a system collection. In this case, the `collection-name`
-should start with an underscore. End-users should normally create non-system
-collections only. API implementors may be required to create system
-collections in very special occasions, but normally a regular collection will do.
-(The default is `false`)
-
-@RESTBODYPARAM{schema,object,optional,}
-Optional object that specifies the collection level schema for
-documents. The attribute keys `rule`, `level` and `message` must follow the
-rules documented in [Document Schema Validation](https://www.arangodb.com/docs/stable/data-modeling-documents-schema-validation.html)
-
-@RESTBODYPARAM{computedValues,array,optional,post_api_collection_computed_field}
-An optional list of objects, each representing a computed value.
-
-@RESTSTRUCT{name,post_api_collection_computed_field,string,required,}
-The name of the target attribute. Can only be a top-level attribute, but you
-may return a nested object. Cannot be `_key`, `_id`, `_rev`, `_from`, `_to`,
-or a shard key attribute.
-
-@RESTSTRUCT{expression,post_api_collection_computed_field,string,required,}
-An AQL `RETURN` operation with an expression that computes the desired value.
-See [Computed Value Expressions](https://www.arangodb.com/docs/stable/data-modeling-documents-computed-values.html#computed-value-expressions) for details.
-
-@RESTSTRUCT{overwrite,post_api_collection_computed_field,boolean,required,}
-Whether the computed value shall take precedence over a user-provided or
-existing attribute.
-
-@RESTSTRUCT{computeOn,post_api_collection_computed_field,array,optional,string}
-An array of strings to define on which write operations the value shall be
-computed. The possible values are `"insert"`, `"update"`, and `"replace"`.
-The default is `["insert", "update", "replace"]`.
-
-@RESTSTRUCT{keepNull,post_api_collection_computed_field,boolean,optional,}
-Whether the target attribute shall be set if the expression evaluates to `null`.
-You can set the option to `false` to not set (or unset) the target attribute if
-the expression returns `null`. The default is `true`.
-
-@RESTSTRUCT{failOnWarning,post_api_collection_computed_field,boolean,optional,}
-Whether to let the write operation fail if the expression produces a warning.
-The default is `false`.
-
-@RESTBODYPARAM{keyOptions,object,optional,post_api_collection_opts}
-additional options for key generation. If specified, then `keyOptions`
-should be a JSON object containing the following attributes:
-
-@RESTSTRUCT{type,post_api_collection_opts,string,required,string}
-specifies the type of the key generator. The currently available generators are
-`traditional`, `autoincrement`, `uuid` and `padded`.
-
-- The `traditional` key generator generates numerical keys in ascending order.
- The sequence of keys is not guaranteed to be gap-free.
-
-- The `autoincrement` key generator generates numerical keys in ascending order,
- the initial offset and the spacing can be configured (**note**: `autoincrement`
- is currently only supported for non-sharded collections).
- The sequence of generated keys is not guaranteed to be gap-free, because a new key
- will be generated on every document insert attempt, not just for successful
- inserts.
-
-- The `padded` key generator generates keys of a fixed length (16 bytes) in
- ascending lexicographical sort order. This is ideal for usage with the _RocksDB_
- engine, which will slightly benefit keys that are inserted in lexicographically
- ascending order. The key generator can be used in a single-server or cluster.
- The sequence of generated keys is not guaranteed to be gap-free.
-
-- The `uuid` key generator generates universally unique 128 bit keys, which
- are stored in hexadecimal human-readable format. This key generator can be used
- in a single-server or cluster to generate "seemingly random" keys. The keys
- produced by this key generator are not lexicographically sorted.
-
-Please note that keys are only guaranteed to be truly ascending in single
-server deployments and for collections that only have a single shard (that includes
-collections in a OneShard database).
-The reason is that for collections with more than a single shard, document keys
-are generated on Coordinator(s). For collections with a single shard, the document
-keys are generated on the leader DB-Server, which has full control over the key
-sequence.
-
-@RESTSTRUCT{allowUserKeys,post_api_collection_opts,boolean,required,}
-If set to `true`, then you are allowed to supply own key values in the
-`_key` attribute of documents. If set to `false`, then the key generator
-is solely be responsible for generating keys and an error is raised if you
-supply own key values in the `_key` attribute of documents.
-
-@RESTSTRUCT{increment,post_api_collection_opts,integer,required,int64}
-increment value for `autoincrement` key generator. Not used for other key
-generator types.
-
-@RESTSTRUCT{offset,post_api_collection_opts,integer,required,int64}
-Initial offset value for `autoincrement` key generator.
-Not used for other key generator types.
-
-@RESTBODYPARAM{type,integer,optional,int64}
-(The default is `2`): the type of the collection to create.
-The following values for `type` are valid:
-
-- `2`: document collection
-- `3`: edge collection
-
-@RESTBODYPARAM{cacheEnabled,boolean,optional,}
-Whether the in-memory hash cache for documents should be enabled for this
-collection (default: `false`). Can be controlled globally with the `--cache.size`
-startup option. The cache can speed up repeated reads of the same documents via
-their document keys. If the same documents are not fetched often or are
-modified frequently, then you may disable the cache to avoid the maintenance
-costs.
-
-@RESTBODYPARAM{numberOfShards,integer,optional,int64}
-(The default is `1`): in a cluster, this value determines the
-number of shards to create for the collection. In a single
-server setup, this option is meaningless.
-
-@RESTBODYPARAM{shardKeys,string,optional,string}
-(The default is `[ "_key" ]`): in a cluster, this attribute determines
-which document attributes are used to determine the target shard for documents.
-Documents are sent to shards based on the values of their shard key attributes.
-The values of all shard key attributes in a document are hashed,
-and the hash value is used to determine the target shard.
-**Note**: Values of shard key attributes cannot be changed once set.
- This option is meaningless in a single server setup.
-
-@RESTBODYPARAM{replicationFactor,integer,optional,int64}
-(The default is `1`): in a cluster, this attribute determines how many copies
-of each shard are kept on different DB-Servers. The value 1 means that only one
-copy (no synchronous replication) is kept. A value of k means that k-1 replicas
-are kept. For SatelliteCollections, it needs to be the string `"satellite"`,
-which matches the replication factor to the number of DB-Servers
-(Enterprise Edition only).
-
-Any two copies reside on different DB-Servers. Replication between them is
-synchronous, that is, every write operation to the "leader" copy will be replicated
-to all "follower" replicas, before the write operation is reported successful.
-
-If a server fails, this is detected automatically and one of the servers holding
-copies take over, usually without an error being reported.
-
-@RESTBODYPARAM{writeConcern,integer,optional,int64}
-Write concern for this collection (default: 1).
-It determines how many copies of each shard are required to be
-in sync on the different DB-Servers. If there are less than these many copies
-in the cluster, a shard refuses to write. Writes to shards with enough
-up-to-date copies succeed at the same time, however. The value of
-`writeConcern` cannot be greater than `replicationFactor`.
-For SatelliteCollections, the `writeConcern` is automatically controlled to
-equal the number of DB-Servers and has a value of `0`. _(cluster only)_
-
-@RESTBODYPARAM{shardingStrategy,string,optional,string}
-This attribute specifies the name of the sharding strategy to use for
-the collection. Since ArangoDB 3.4 there are different sharding strategies
-to select from when creating a new collection. The selected `shardingStrategy`
-value remains fixed for the collection and cannot be changed afterwards.
-This is important to make the collection keep its sharding settings and
-always find documents already distributed to shards using the same
-initial sharding algorithm.
-
-The available sharding strategies are:
-- `community-compat`: default sharding used by ArangoDB
- Community Edition before version 3.4
-- `enterprise-compat`: default sharding used by ArangoDB
- Enterprise Edition before version 3.4
-- `enterprise-smart-edge-compat`: default sharding used by smart edge
- collections in ArangoDB Enterprise Edition before version 3.4
-- `hash`: default sharding used for new collections starting from version 3.4
- (excluding smart edge collections)
-- `enterprise-hash-smart-edge`: default sharding used for new
- smart edge collections starting from version 3.4
-- `enterprise-hex-smart-vertex`: sharding used for vertex collections of
- EnterpriseGraphs
-
-If no sharding strategy is specified, the default is `hash` for
-all normal collections, `enterprise-hash-smart-edge` for all smart edge
-collections, and `enterprise-hex-smart-vertex` for EnterpriseGraph
-vertex collections (the latter two require the *Enterprise Edition* of ArangoDB).
-Manually overriding the sharding strategy does not yet provide a
-benefit, but it may later in case other sharding strategies are added.
-
-@RESTBODYPARAM{distributeShardsLike,string,optional,string}
-The name of another collection. If this property is set in a cluster, the
-collection copies the `replicationFactor`, `numberOfShards` and `shardingStrategy`
-properties from the specified collection (referred to as the _prototype collection_)
-and distributes the shards of this collection in the same way as the shards of
-the other collection. In an Enterprise Edition cluster, this data co-location is
-utilized to optimize queries.
-
-You need to use the same number of `shardKeys` as the prototype collection, but
-you can use different attributes.
-
-The default is `""`.
-
-**Note**: Using this parameter has consequences for the prototype
-collection. It can no longer be dropped, before the sharding-imitating
-collections are dropped. Equally, backups and restores of imitating
-collections alone generate warnings (which can be overridden)
-about a missing sharding prototype.
-
-@RESTBODYPARAM{isSmart,boolean,optional,}
-Whether the collection is for a SmartGraph or EnterpriseGraph
-(Enterprise Edition only). This is an internal property.
-
-@RESTBODYPARAM{isDisjoint,boolean,optional,}
-Whether the collection is for a Disjoint SmartGraph
-(Enterprise Edition only). This is an internal property.
-
-@RESTBODYPARAM{smartGraphAttribute,string,optional,string}
-The attribute that is used for sharding: vertices with the same value of
-this attribute are placed in the same shard. All vertices are required to
-have this attribute set and it has to be a string. Edges derive the
-attribute from their connected vertices.
-
-This feature can only be used in the *Enterprise Edition*.
-
-@RESTBODYPARAM{smartJoinAttribute,string,optional,string}
-In an *Enterprise Edition* cluster, this attribute determines an attribute
-of the collection that must contain the shard key value of the referred-to
-SmartJoin collection. Additionally, the shard key for a document in this
-collection must contain the value of this attribute, followed by a colon,
-followed by the actual primary key of the document.
-
-This feature can only be used in the *Enterprise Edition* and requires the
-`distributeShardsLike` attribute of the collection to be set to the name
-of another collection. It also requires the `shardKeys` attribute of the
-collection to be set to a single shard key attribute, with an additional ':'
-at the end.
-A further restriction is that whenever documents are stored or updated in the
-collection, the value stored in the `smartJoinAttribute` must be a string.
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{waitForSyncReplication,boolean,optional}
-The default is `true`, which means the server only reports success back to the
-client when all replicas have created the collection. Set it to `false` if you want
-faster server responses and don't care about full replication.
-
-@RESTQUERYPARAM{enforceReplicationFactor,boolean,optional}
-The default is `true`, which means the server checks if there are enough replicas
-available at creation time and bail out otherwise. Set it to `false` to disable
-this extra check.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{400}
-If the `collection-name` is missing, then an *HTTP 400* is
-returned.
-
-@RESTRETURNCODE{404}
-If the `collection-name` is unknown, then an *HTTP 404* is returned.
-
-@RESTRETURNCODE{200}
-
-@RESTREPLYBODY{,object,required,collection_info}
-
-@EXAMPLES
-
-@EXAMPLE_ARANGOSH_RUN{RestCollectionCreateCollection}
- var url = "/_api/collection";
- var body = {
- name: "testCollectionBasics"
- };
-
- var response = logCurlRequest('POST', url, body);
-
- assert(response.code === 200);
-
- logJsonResponse(response);
- body = {
- name: "testCollectionEdges",
- type : 3
- };
-
- var response = logCurlRequest('POST', url, body);
-
- assert(response.code === 200);
- logJsonResponse(response);
-
- db._flushCache();
- db._drop("testCollectionBasics");
- db._drop("testCollectionEdges");
-@END_EXAMPLE_ARANGOSH_RUN
-
-@EXAMPLE_ARANGOSH_RUN{RestCollectionCreateKeyopt}
- var url = "/_api/collection";
- var body = {
- name: "testCollectionUsers",
- keyOptions : {
- type : "autoincrement",
- increment : 5,
- allowUserKeys : true
- }
- };
-
- var response = logCurlRequest('POST', url, body);
-
- assert(response.code === 200);
- logJsonResponse(response);
-
- db._flushCache();
- db._drop("testCollectionUsers");
-@END_EXAMPLE_ARANGOSH_RUN
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Collections/put_api_collection_collection_compact.md b/Documentation/DocuBlocks/Rest/Collections/put_api_collection_collection_compact.md
deleted file mode 100644
index 4f19fc4e3ef8..000000000000
--- a/Documentation/DocuBlocks/Rest/Collections/put_api_collection_collection_compact.md
+++ /dev/null
@@ -1,47 +0,0 @@
-
-@startDocuBlock put_api_collection_collection_compact
-@brief compact collection
-
-@RESTHEADER{PUT /_api/collection/{collection-name}/compact, Compact the data of a collection, compactCollection}
-
-@RESTDESCRIPTION
-Compacts the data of a collection in order to reclaim disk space.
-The operation will compact the document and index data by rewriting the
-underlying .sst files and only keeping the relevant entries.
-
-Under normal circumstances, running a compact operation is not necessary, as
-the collection data will eventually get compacted anyway. However, in some
-situations, e.g. after running lots of update/replace or remove operations,
-the disk data for a collection may contain a lot of outdated data for which the
-space shall be reclaimed. In this case the compaction operation can be used.
-
-@RESTURLPARAMETERS
-
-@RESTURLPARAM{collection-name,string,required}
-Name of the collection to compact
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-Compaction started successfully
-
-@RESTRETURNCODE{401}
-if the request was not authenticated as a user with sufficient rights
-
-@EXAMPLES
-
-@EXAMPLE_ARANGOSH_RUN{RestApiCollectionCompact}
- var cn = "testCollection";
- db._drop(cn);
- db._create(cn);
-
- var response = logCurlRequest('PUT', '/_api/collection/' + cn + '/compact', '');
-
- assert(response.code === 200);
-
- logJsonResponse(response);
-
- db._drop(cn);
-@END_EXAMPLE_ARANGOSH_RUN
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Collections/put_api_collection_collection_load.md b/Documentation/DocuBlocks/Rest/Collections/put_api_collection_collection_load.md
deleted file mode 100644
index 45f9b0ec6d74..000000000000
--- a/Documentation/DocuBlocks/Rest/Collections/put_api_collection_collection_load.md
+++ /dev/null
@@ -1,79 +0,0 @@
-
-@startDocuBlock put_api_collection_collection_load
-@brief loads a collection
-
-@RESTHEADER{PUT /_api/collection/{collection-name}/load, Load collection, loadCollection}
-
-@HINTS
-{% hint 'warning' %}
-The load function is deprecated from version 3.8.0 onwards and is a no-op
-from version 3.9.0 onwards. It should no longer be used, as it may be removed
-in a future version of ArangoDB.
-{% endhint %}
-
-{% hint 'warning' %}
-Accessing collections by their numeric ID is deprecated from version 3.4.0 on.
-You should reference them via their names instead.
-{% endhint %}
-
-@RESTURLPARAMETERS
-
-@RESTURLPARAM{collection-name,string,required}
-The name of the collection.
-
-@RESTDESCRIPTION
-Since ArangoDB version 3.9.0 this API does nothing. Previously it used to
-load a collection into memory.
-
-The request body object might optionally contain the following attribute:
-
-- *count*: If set, this controls whether the return value should include
- the number of documents in the collection. Setting *count* to
- *false* may speed up loading a collection. The default value for
- *count* is *true*.
-
-A call to this API returns an object with the following attributes for
-compatibility reasons:
-
-- *id*: The identifier of the collection.
-
-- *name*: The name of the collection.
-
-- *count*: The number of documents inside the collection. This is only
- returned if the *count* input parameters is set to *true* or has
- not been specified.
-
-- *status*: The status of the collection as number.
-
-- *type*: The collection type. Valid types are:
- - 2: document collection
- - 3: edge collection
-
-- *isSystem*: If *true* then the collection is a system collection.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{400}
-If the *collection-name* is missing, then a *HTTP 400* is
-returned.
-
-@RESTRETURNCODE{404}
-If the *collection-name* is unknown, then a *HTTP 404*
-is returned.
-
-@EXAMPLES
-
-@EXAMPLE_ARANGOSH_RUN{RestCollectionIdentifierLoad}
- var cn = "products";
- db._drop(cn);
- var coll = db._create(cn, { waitForSync: true });
- var url = "/_api/collection/"+ coll.name() + "/load";
-
- var response = logCurlRequest('PUT', url, '');
-
- assert(response.code === 200);
-
- logJsonResponse(response);
- db._drop(cn);
-@END_EXAMPLE_ARANGOSH_RUN
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Collections/put_api_collection_collection_loadIndexesIntoMemory.md b/Documentation/DocuBlocks/Rest/Collections/put_api_collection_collection_loadIndexesIntoMemory.md
deleted file mode 100644
index 25a963672ff3..000000000000
--- a/Documentation/DocuBlocks/Rest/Collections/put_api_collection_collection_loadIndexesIntoMemory.md
+++ /dev/null
@@ -1,71 +0,0 @@
-
-@startDocuBlock put_api_collection_collection_loadIndexesIntoMemory
-@brief Load Indexes into Memory
-
-@RESTHEADER{PUT /_api/collection/{collection-name}/loadIndexesIntoMemory, Load Indexes into Memory, loadCollectionIndexes}
-
-@HINTS
-{% hint 'warning' %}
-Accessing collections by their numeric ID is deprecated from version 3.4.0 on.
-You should reference them via their names instead.
-{% endhint %}
-
-@RESTURLPARAMETERS
-
-@RESTURLPARAM{collection-name,string,required}
-The name of the collection.
-
-@RESTDESCRIPTION
-You can call this endpoint to try to cache this collection's index entries in
-the main memory. Index lookups served from the memory cache can be much faster
-than lookups not stored in the cache, resulting in a performance boost.
-
-The endpoint iterates over suitable indexes of the collection and stores the
-indexed values (not the entire document data) in memory. This is implemented for
-edge indexes only.
-
-The endpoint returns as soon as the index warmup has been scheduled. The index
-warmup may still be ongoing in the background, even after the return value has
-already been sent. As all suitable indexes are scanned, it may cause significant
-I/O activity and background load.
-
-This feature honors memory limits. If the indexes you want to load are smaller
-than your memory limit, this feature guarantees that most index values are
-cached. If the index is greater than your memory limit, this feature fills
-up values up to this limit. You cannot control which indexes of the collection
-should have priority over others.
-
-It is guaranteed that the in-memory cache data is consistent with the stored
-index data at all times.
-
-On success, this endpoint returns an object with attribute `result` set to `true`.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-If the index loading has been scheduled for all suitable indexes.
-
-@RESTRETURNCODE{400}
-If the `collection-name` is missing, then a *HTTP 400* is
-returned.
-
-@RESTRETURNCODE{404}
-If the `collection-name` is unknown, then a *HTTP 404* is returned.
-
-@EXAMPLES
-
-@EXAMPLE_ARANGOSH_RUN{RestCollectionIdentifierLoadIndexesIntoMemory}
- var cn = "products";
- db._drop(cn);
- var coll = db._create(cn);
- var url = "/_api/collection/"+ coll.name() + "/loadIndexesIntoMemory";
-
- var response = logCurlRequest('PUT', url, '');
-
- assert(response.code === 200);
-
- logJsonResponse(response);
- db._drop(cn);
-@END_EXAMPLE_ARANGOSH_RUN
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Collections/put_api_collection_collection_properties.md b/Documentation/DocuBlocks/Rest/Collections/put_api_collection_collection_properties.md
deleted file mode 100644
index a0b4bdee7b06..000000000000
--- a/Documentation/DocuBlocks/Rest/Collections/put_api_collection_collection_properties.md
+++ /dev/null
@@ -1,121 +0,0 @@
-
-@startDocuBlock put_api_collection_collection_properties
-@brief changes a collection
-
-@RESTHEADER{PUT /_api/collection/{collection-name}/properties, Change properties of a collection, updateCollectionProperties}
-
-@HINTS
-{% hint 'warning' %}
-Accessing collections by their numeric ID is deprecated from version 3.4.0 on.
-You should reference them via their names instead.
-{% endhint %}
-
-@RESTURLPARAMETERS
-
-@RESTURLPARAM{collection-name,string,required}
-The name of the collection.
-
-@RESTDESCRIPTION
-Changes the properties of a collection. Only the provided attributes are
-updated. Collection properties **cannot be changed** once a collection is
-created except for the listed properties, as well as the collection name via
-the rename endpoint (but not in clusters).
-
-@RESTBODYPARAM{waitForSync,boolean,optional,}
-If *true* then the data is synchronized to disk before returning from a
-document create, update, replace or removal operation. (default: false)
-
-@RESTBODYPARAM{cacheEnabled,boolean,optional,}
-Whether the in-memory hash cache for documents should be enabled for this
-collection (default: *false*). Can be controlled globally with the `--cache.size`
-startup option. The cache can speed up repeated reads of the same documents via
-their document keys. If the same documents are not fetched often or are
-modified frequently, then you may disable the cache to avoid the maintenance
-costs.
-
-@RESTBODYPARAM{schema,object,optional,}
-Optional object that specifies the collection level schema for
-documents. The attribute keys `rule`, `level` and `message` must follow the
-rules documented in [Document Schema Validation](https://www.arangodb.com/docs/stable/data-modeling-documents-schema-validation.html)
-
-@RESTBODYPARAM{computedValues,array,optional,put_api_collection_properties_computed_field}
-An optional list of objects, each representing a computed value.
-
-@RESTSTRUCT{name,put_api_collection_properties_computed_field,string,required,}
-The name of the target attribute. Can only be a top-level attribute, but you
-may return a nested object. Cannot be `_key`, `_id`, `_rev`, `_from`, `_to`,
-or a shard key attribute.
-
-@RESTSTRUCT{expression,put_api_collection_properties_computed_field,string,required,}
-An AQL `RETURN` operation with an expression that computes the desired value.
-See [Computed Value Expressions](https://www.arangodb.com/docs/stable/data-modeling-documents-computed-values.html#computed-value-expressions) for details.
-
-@RESTSTRUCT{overwrite,put_api_collection_properties_computed_field,boolean,required,}
-Whether the computed value shall take precedence over a user-provided or
-existing attribute.
-
-@RESTSTRUCT{computeOn,put_api_collection_properties_computed_field,array,optional,string}
-An array of strings to define on which write operations the value shall be
-computed. The possible values are `"insert"`, `"update"`, and `"replace"`.
-The default is `["insert", "update", "replace"]`.
-
-@RESTSTRUCT{keepNull,put_api_collection_properties_computed_field,boolean,optional,}
-Whether the target attribute shall be set if the expression evaluates to `null`.
-You can set the option to `false` to not set (or unset) the target attribute if
-the expression returns `null`. The default is `true`.
-
-@RESTSTRUCT{failOnWarning,put_api_collection_properties_computed_field,boolean,optional,}
-Whether to let the write operation fail if the expression produces a warning.
-The default is `false`.
-
-@RESTBODYPARAM{replicationFactor,integer,optional,int64}
-(The default is *1*): in a cluster, this attribute determines how many copies
-of each shard are kept on different DB-Servers. The value 1 means that only one
-copy (no synchronous replication) is kept. A value of k means that k-1 replicas
-are kept. For SatelliteCollections, it needs to be the string `"satellite"`,
-which matches the replication factor to the number of DB-Servers
-(Enterprise Edition only).
-
-Any two copies reside on different DB-Servers. Replication between them is
-synchronous, that is, every write operation to the "leader" copy will be replicated
-to all "follower" replicas, before the write operation is reported successful.
-
-If a server fails, this is detected automatically and one of the servers holding
-copies take over, usually without an error being reported.
-
-@RESTBODYPARAM{writeConcern,integer,optional,int64}
-Write concern for this collection (default: 1).
-It determines how many copies of each shard are required to be
-in sync on the different DB-Servers. If there are less than these many copies
-in the cluster, a shard refuses to write. Writes to shards with enough
-up-to-date copies succeed at the same time, however. The value of
-`writeConcern` cannot be greater than `replicationFactor`.
-For SatelliteCollections, the `writeConcern` is automatically controlled to
-equal the number of DB-Servers and has a value of `0`. _(cluster only)_
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{400}
-If the *collection-name* is missing, then a *HTTP 400* is
-returned.
-
-@RESTRETURNCODE{404}
-If the *collection-name* is unknown, then a *HTTP 404*
-is returned.
-
-@EXAMPLES
-
-@EXAMPLE_ARANGOSH_RUN{RestCollectionIdentifierPropertiesSync}
- var cn = "products";
- db._drop(cn);
- var coll = db._create(cn, { waitForSync: true });
- var url = "/_api/collection/"+ coll.name() + "/properties";
-
- var response = logCurlRequest('PUT', url, {"waitForSync" : true });
-
- assert(response.code === 200);
-
- logJsonResponse(response);
- db._drop(cn);
-@END_EXAMPLE_ARANGOSH_RUN
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Collections/put_api_collection_collection_recalculateCount.md b/Documentation/DocuBlocks/Rest/Collections/put_api_collection_collection_recalculateCount.md
deleted file mode 100644
index 4edcc198e63a..000000000000
--- a/Documentation/DocuBlocks/Rest/Collections/put_api_collection_collection_recalculateCount.md
+++ /dev/null
@@ -1,27 +0,0 @@
-
-@startDocuBlock put_api_collection_collection_recalculateCount
-@brief recalculates the document count of a collection
-
-@RESTHEADER{PUT /_api/collection/{collection-name}/recalculateCount, Recalculate count of a collection, recalculateCollectionCount}
-
-@RESTURLPARAMETERS
-
-@RESTURLPARAM{collection-name,string,required}
-The name of the collection.
-
-@RESTDESCRIPTION
-Recalculates the document count of a collection, if it ever becomes inconsistent.
-
-It returns an object with the attributes
-
-- *result*: will be *true* if recalculating the document count succeeded.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-If the document count was recalculated successfully, *HTTP 200* is returned.
-
-@RESTRETURNCODE{404}
-If the *collection-name* is unknown, then a *HTTP 404* is returned.
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Collections/put_api_collection_collection_rename.md b/Documentation/DocuBlocks/Rest/Collections/put_api_collection_collection_rename.md
deleted file mode 100644
index 1a5d1d9db139..000000000000
--- a/Documentation/DocuBlocks/Rest/Collections/put_api_collection_collection_rename.md
+++ /dev/null
@@ -1,69 +0,0 @@
-
-@startDocuBlock put_api_collection_collection_rename
-@brief renames a collection
-
-@RESTHEADER{PUT /_api/collection/{collection-name}/rename, Rename collection, renameCollection}
-
-@HINTS
-{% hint 'warning' %}
-Accessing collections by their numeric ID is deprecated from version 3.4.0 on.
-You should reference them via their names instead.
-{% endhint %}
-
-@RESTURLPARAMETERS
-
-@RESTURLPARAM{collection-name,string,required}
-The name of the collection to rename.
-
-@RESTDESCRIPTION
-Renames a collection. Expects an object with the attribute(s)
-
-- *name*: The new name.
-
-It returns an object with the attributes
-
-- *id*: The identifier of the collection.
-
-- *name*: The new name of the collection.
-
-- *status*: The status of the collection as number.
-
-- *type*: The collection type. Valid types are:
- - 2: document collection
- - 3: edges collection
-
-- *isSystem*: If *true* then the collection is a system collection.
-
-If renaming the collection succeeds, then the collection is also renamed in
-all graph definitions inside the `_graphs` collection in the current database.
-
-**Note**: this method is not available in a cluster.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{400}
-If the *collection-name* is missing, then a *HTTP 400* is
-returned.
-
-@RESTRETURNCODE{404}
-If the *collection-name* is unknown, then a *HTTP 404*
-is returned.
-@EXAMPLES
-
-@EXAMPLE_ARANGOSH_RUN{RestCollectionIdentifierRename}
- var cn = "products1";
- var cnn = "newname";
- db._drop(cn);
- db._drop(cnn);
- var coll = db._create(cn);
- var url = "/_api/collection/" + coll.name() + "/rename";
-
- var response = logCurlRequest('PUT', url, { name: cnn });
-
- assert(response.code === 200);
- db._flushCache();
- db._drop(cnn);
-
- logJsonResponse(response);
-@END_EXAMPLE_ARANGOSH_RUN
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Collections/put_api_collection_collection_responsibleShard.md b/Documentation/DocuBlocks/Rest/Collections/put_api_collection_collection_responsibleShard.md
deleted file mode 100644
index 3e9240bb7ed8..000000000000
--- a/Documentation/DocuBlocks/Rest/Collections/put_api_collection_collection_responsibleShard.md
+++ /dev/null
@@ -1,65 +0,0 @@
-
-@startDocuBlock put_api_collection_collection_responsibleShard
-@brief Return the responsible shard for a document
-
-@RESTHEADER{PUT /_api/collection/{collection-name}/responsibleShard, Return responsible shard for a document, getResponsibleShard}
-
-@RESTALLBODYPARAM{document,object,required}
-The request body must be a JSON object with at least the shard key
-attributes set to some values, but it may also be a full document.
-
-@RESTURLPARAMETERS
-
-@RESTURLPARAM{collection-name,string,required}
-The name of the collection.
-
-@RESTDESCRIPTION
-Returns the ID of the shard that is responsible for the given document
-(if the document exists) or that would be responsible if such document
-existed.
-
-The request must body must contain a JSON document with at least the
-collection's shard key attributes set to some values.
-
-The response is a JSON object with a *shardId* attribute, which will
-contain the ID of the responsible shard.
-
-**Note** : This method is only available in a cluster Coordinator.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-Returns the ID of the responsible shard.
-
-@RESTRETURNCODE{400}
-If the *collection-name* is missing, then a *HTTP 400* is
-returned.
-Additionally, if not all of the collection's shard key
-attributes are present in the input document, then a
-*HTTP 400* is returned as well.
-
-@RESTRETURNCODE{404}
-If the *collection-name* is unknown, then an *HTTP 404*
-is returned.
-
-@RESTRETURNCODE{501}
-*HTTP 501* is returned if the method is called on a single server.
-
-@EXAMPLES
-
-@EXAMPLE_ARANGOSH_RUN{RestGetResponsibleShardExample_cluster}
- var cn = "testCollection";
- db._drop(cn);
- db._create(cn, { numberOfShards: 3, shardKeys: ["_key"] });
-
- var body = JSON.stringify({ _key: "testkey", value: 23 });
- var response = logCurlRequestRaw('PUT', "/_api/collection/" + cn + "/responsibleShard", body);
-
- assert(response.code === 200);
- assert(response.parsedBody.hasOwnProperty("shardId"));
-
- logJsonResponse(response);
- db._drop(cn);
-@END_EXAMPLE_ARANGOSH_RUN
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Collections/put_api_collection_collection_truncate.md b/Documentation/DocuBlocks/Rest/Collections/put_api_collection_collection_truncate.md
deleted file mode 100644
index 1cbd6eda8714..000000000000
--- a/Documentation/DocuBlocks/Rest/Collections/put_api_collection_collection_truncate.md
+++ /dev/null
@@ -1,58 +0,0 @@
-
-@startDocuBlock put_api_collection_collection_truncate
-@brief truncates a collection
-
-@RESTHEADER{PUT /_api/collection/{collection-name}/truncate, Truncate collection, truncateCollection}
-
-@HINTS
-{% hint 'warning' %}
-Accessing collections by their numeric ID is deprecated from version 3.4.0 on.
-You should reference them via their names instead.
-{% endhint %}
-
-@RESTURLPARAMETERS
-
-@RESTURLPARAM{collection-name,string,required}
-The name of the collection.
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{waitForSync,boolean,optional}
-If *true* then the data is synchronized to disk before returning from the
-truncate operation (default: *false*)
-
-@RESTQUERYPARAM{compact,boolean,optional}
-If *true* (default) then the storage engine is told to start a compaction
-in order to free up disk space. This can be resource intensive. If the only
-intention is to start over with an empty collection, specify *false*.
-
-@RESTDESCRIPTION
-Removes all documents from the collection, but leaves the indexes intact.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{400}
-If the *collection-name* is missing, then a *HTTP 400* is
-returned.
-
-@RESTRETURNCODE{404}
-If the *collection-name* is unknown, then a *HTTP 404*
-is returned.
-
-@EXAMPLES
-
-@EXAMPLE_ARANGOSH_RUN{RestCollectionIdentifierTruncate}
- var cn = "products";
- db._drop(cn);
- var coll = db._create(cn, { waitForSync: true });
- var url = "/_api/collection/"+ coll.name() + "/truncate";
-
- var response = logCurlRequest('PUT', url, '');
-
- assert(response.code === 200);
-
- logJsonResponse(response);
- db._drop(cn);
-@END_EXAMPLE_ARANGOSH_RUN
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Collections/put_api_collection_collection_unload.md b/Documentation/DocuBlocks/Rest/Collections/put_api_collection_collection_unload.md
deleted file mode 100644
index 0999baff55c7..000000000000
--- a/Documentation/DocuBlocks/Rest/Collections/put_api_collection_collection_unload.md
+++ /dev/null
@@ -1,66 +0,0 @@
-
-@startDocuBlock put_api_collection_collection_unload
-@brief unloads a collection
-
-@RESTHEADER{PUT /_api/collection/{collection-name}/unload, Unload collection, unloadCollection}
-
-@HINTS
-{% hint 'warning' %}
-The unload function is deprecated from version 3.8.0 onwards and is a no-op
-from version 3.9.0 onwards. It should no longer be used, as it may be removed
-in a future version of ArangoDB.
-{% endhint %}
-
-{% hint 'warning' %}
-Accessing collections by their numeric ID is deprecated from version 3.4.0 on.
-You should reference them via their names instead.
-{% endhint %}
-
-@RESTURLPARAMETERS
-
-@RESTURLPARAM{collection-name,string,required}
-The name of the collection.
-
-@RESTDESCRIPTION
-Since ArangoDB version 3.9.0 this API does nothing. Previously it used to
-unload a collection from memory, while preserving all documents.
-When calling the API an object with the following attributes is
-returned for compatibility reasons:
-
-- *id*: The identifier of the collection.
-
-- *name*: The name of the collection.
-
-- *status*: The status of the collection as number.
-
-- *type*: The collection type. Valid types are:
- - 2: document collection
- - 3: edges collection
-
-- *isSystem*: If *true* then the collection is a system collection.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{400}
-If the *collection-name* is missing, then a *HTTP 400* is
-returned.
-
-@RESTRETURNCODE{404}
-If the *collection-name* is unknown, then a *HTTP 404* is returned.
-
-@EXAMPLES
-
-@EXAMPLE_ARANGOSH_RUN{RestCollectionIdentifierUnload}
- var cn = "products";
- db._drop(cn);
- var coll = db._create(cn, { waitForSync: true });
- var url = "/_api/collection/"+ coll.name() + "/unload";
-
- var response = logCurlRequest('PUT', url, '');
-
- assert(response.code === 200);
-
- logJsonResponse(response);
- db._drop(cn);
-@END_EXAMPLE_ARANGOSH_RUN
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Databases/delete_api_database_database.md b/Documentation/DocuBlocks/Rest/Databases/delete_api_database_database.md
deleted file mode 100644
index 2296a7029b7e..000000000000
--- a/Documentation/DocuBlocks/Rest/Databases/delete_api_database_database.md
+++ /dev/null
@@ -1,46 +0,0 @@
-
-@startDocuBlock delete_api_database_database
-@brief drop an existing database
-
-@RESTHEADER{DELETE /_api/database/{database-name}, Drop database, deleteDatabase}
-
-@RESTURLPARAMETERS
-
-@RESTURLPARAM{database-name,string,required}
-The name of the database
-
-@RESTDESCRIPTION
-Drops the database along with all data stored in it.
-
-**Note**: dropping a database is only possible from within the *_system* database.
-The *_system* database itself cannot be dropped.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-is returned if the database was dropped successfully.
-
-@RESTRETURNCODE{400}
-is returned if the request is malformed.
-
-@RESTRETURNCODE{403}
-is returned if the request was not executed in the *_system* database.
-
-@RESTRETURNCODE{404}
-is returned if the database could not be found.
-
-@EXAMPLES
-
-@EXAMPLE_ARANGOSH_RUN{RestDatabaseDrop}
- var url = "/_api/database";
- var name = "example";
-
- db._createDatabase(name);
- var response = logCurlRequest('DELETE', url + '/' + name);
-
- assert(response.code === 200);
-
- logJsonResponse(response);
-@END_EXAMPLE_ARANGOSH_RUN
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Databases/get_api_database.md b/Documentation/DocuBlocks/Rest/Databases/get_api_database.md
deleted file mode 100644
index 48cc427d63ed..000000000000
--- a/Documentation/DocuBlocks/Rest/Databases/get_api_database.md
+++ /dev/null
@@ -1,35 +0,0 @@
-
-@startDocuBlock get_api_database
-@brief retrieves a list of all existing databases
-
-@RESTHEADER{GET /_api/database, List of databases, listDatabases}
-
-@RESTDESCRIPTION
-Retrieves the list of all existing databases
-
-**Note**: retrieving the list of databases is only possible from within the *_system* database.
-
-**Note**: You should use the *GET user API* to fetch the list of the available databases now.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-is returned if the list of database was compiled successfully.
-
-@RESTRETURNCODE{400}
-is returned if the request is invalid.
-
-@RESTRETURNCODE{403}
-is returned if the request was not executed in the *_system* database.
-
-@EXAMPLES
-
-@EXAMPLE_ARANGOSH_RUN{RestDatabaseGet}
- var url = "/_api/database";
- var response = logCurlRequest('GET', url);
-
- assert(response.code === 200);
-
- logJsonResponse(response);
-@END_EXAMPLE_ARANGOSH_RUN
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Databases/get_api_database_current.md b/Documentation/DocuBlocks/Rest/Databases/get_api_database_current.md
deleted file mode 100644
index 5f0273cd37f7..000000000000
--- a/Documentation/DocuBlocks/Rest/Databases/get_api_database_current.md
+++ /dev/null
@@ -1,48 +0,0 @@
-
-@startDocuBlock get_api_database_current
-@brief retrieves information about the current database
-
-@RESTHEADER{GET /_api/database/current, Information of the database, getCurrentDatabase}
-
-@RESTDESCRIPTION
-Retrieves the properties of the current database
-
-The response is a JSON object with the following attributes:
-
-- *name*: the name of the current database
-
-- *id*: the id of the current database
-
-- *path*: the filesystem path of the current database
-
-- *isSystem*: whether or not the current database is the *_system* database
-
-- *sharding*: the default sharding method for collections created in this database
-
-- *replicationFactor*: the default replication factor for collections in this database
-
-- *writeConcern*: the default write concern for collections in this database
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-is returned if the information was retrieved successfully.
-
-@RESTRETURNCODE{400}
-is returned if the request is invalid.
-
-@RESTRETURNCODE{404}
-is returned if the database could not be found.
-
-@EXAMPLES
-
-@EXAMPLE_ARANGOSH_RUN{RestDatabaseGetInfo}
- var url = "/_api/database/current";
- var response = logCurlRequest('GET', url);
-
- assert(response.code === 200);
-
- logJsonResponse(response);
-@END_EXAMPLE_ARANGOSH_RUN
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Databases/get_api_database_user.md b/Documentation/DocuBlocks/Rest/Databases/get_api_database_user.md
deleted file mode 100644
index 440c3f8d845f..000000000000
--- a/Documentation/DocuBlocks/Rest/Databases/get_api_database_user.md
+++ /dev/null
@@ -1,30 +0,0 @@
-
-@startDocuBlock get_api_database_user
-@brief retrieves a list of all databases the current user can access
-
-@RESTHEADER{GET /_api/database/user, List of accessible databases, listUserAccessibleDatabases}
-
-@RESTDESCRIPTION
-Retrieves the list of all databases the current user can access without
-specifying a different username or password.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-is returned if the list of database was compiled successfully.
-
-@RESTRETURNCODE{400}
-is returned if the request is invalid.
-
-@EXAMPLES
-
-@EXAMPLE_ARANGOSH_RUN{RestDatabaseGetUser}
- var url = "/_api/database/user";
- var response = logCurlRequest('GET', url);
-
- assert(response.code === 200);
-
- logJsonResponse(response);
-@END_EXAMPLE_ARANGOSH_RUN
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Databases/post_api_database.md b/Documentation/DocuBlocks/Rest/Databases/post_api_database.md
deleted file mode 100644
index 001b4646998d..000000000000
--- a/Documentation/DocuBlocks/Rest/Databases/post_api_database.md
+++ /dev/null
@@ -1,147 +0,0 @@
-
-@startDocuBlock post_api_database
-@brief creates a new database
-
-@RESTHEADER{POST /_api/database, Create database, createDatabase}
-
-@RESTBODYPARAM{name,string,required,string}
-Has to contain a valid database name. The name must conform to the selected
-naming convention for databases. If the name contains Unicode characters, the
-name must be [NFC-normalized](https://en.wikipedia.org/wiki/Unicode_equivalence#Normal_forms).
-Non-normalized names will be rejected by arangod.
-
-@RESTBODYPARAM{options,object,optional,get_api_database_new_OPTIONS}
-Optional object which can contain the following attributes:
-
-@RESTSTRUCT{sharding,get_api_database_new_OPTIONS,string,optional,}
-The sharding method to use for new collections in this database. Valid values
-are: "", "flexible", or "single". The first two are equivalent. _(cluster only)_
-
-@RESTSTRUCT{replicationFactor,get_api_database_new_OPTIONS,integer,optional,}
-Default replication factor for new collections created in this database.
-Special values include "satellite", which will replicate the collection to
-every DB-Server (Enterprise Edition only), and 1, which disables replication.
-_(cluster only)_
-
-@RESTSTRUCT{writeConcern,get_api_database_new_OPTIONS,number,optional,}
-Default write concern for new collections created in this database.
-It determines how many copies of each shard are required to be
-in sync on the different DB-Servers. If there are less than these many copies
-in the cluster, a shard refuses to write. Writes to shards with enough
-up-to-date copies succeed at the same time, however. The value of
-`writeConcern` cannot be greater than `replicationFactor`.
-For SatelliteCollections, the `writeConcern` is automatically controlled to
-equal the number of DB-Servers and has a value of `0`. _(cluster only)_
-
-@RESTBODYPARAM{users,array,optional,get_api_database_new_USERS}
-An array of user objects. The users will be granted *Administrate* permissions
-for the new database. Users that do not exist yet will be created.
-If *users* is not specified or does not contain any users, the default user
-*root* will be used to ensure that the new database will be accessible after it
-is created. The *root* user is created with an empty password should it not
-exist. Each user object can contain the following attributes:
-
-@RESTSTRUCT{username,get_api_database_new_USERS,string,required,}
-Login name of an existing user or one to be created.
-
-@RESTSTRUCT{passwd,get_api_database_new_USERS,string,optional,password}
-The user password as a string. If not specified, it will default to an empty
-string. The attribute is ignored for users that already exist.
-
-@RESTSTRUCT{active,get_api_database_new_USERS,boolean,optional,}
-A flag indicating whether the user account should be activated or not.
-The default value is *true*. If set to *false*, then the user won't be able to
-log into the database. The default is *true*. The attribute is ignored for users
-that already exist.
-
-@RESTSTRUCT{extra,get_api_database_new_USERS,object,optional,}
-A JSON object with extra user information. It is used by the web interface
-to store graph viewer settings and saved queries. Should not be set or
-modified by end users, as custom attributes will not be preserved.
-
-@RESTDESCRIPTION
-Creates a new database
-
-The response is a JSON object with the attribute *result* set to *true*.
-
-**Note**: creating a new database is only possible from within the *_system* database.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{201}
-is returned if the database was created successfully.
-
-@RESTRETURNCODE{400}
-is returned if the request parameters are invalid or if a database with the
-specified name already exists.
-
-@RESTRETURNCODE{403}
-is returned if the request was not executed in the *_system* database.
-
-@RESTRETURNCODE{409}
-is returned if a database with the specified name already exists.
-
-@EXAMPLES
-
-Creating a database named *example*.
-
-@EXAMPLE_ARANGOSH_RUN{RestDatabaseCreate}
- var url = "/_api/database";
- var name = "example";
- try {
- db._dropDatabase(name);
- }
- catch (err) {
- }
-
- var data = {
- name: name,
- options: {
- sharding: "flexible",
- replicationFactor: 3
- }
- };
- var response = logCurlRequest('POST', url, data);
-
- db._dropDatabase(name);
- assert(response.code === 201);
-
- logJsonResponse(response);
-@END_EXAMPLE_ARANGOSH_RUN
-
-Creating a database named *mydb* with two users, flexible sharding and
-default replication factor of 3 for collections that will be part of
-the newly created database.
-
-@EXAMPLE_ARANGOSH_RUN{RestDatabaseCreateUsers}
- var url = "/_api/database";
- var name = "mydb";
- try {
- db._dropDatabase(name);
- }
- catch (err) {
- }
-
- var data = {
- name: name,
- users: [
- {
- username: "admin",
- passwd: "secret",
- active: true
- },
- {
- username: "tester",
- passwd: "test001",
- active: false
- }
- ]
- };
- var response = logCurlRequest('POST', url, data);
-
- db._dropDatabase(name);
- assert(response.code === 201);
-
- logJsonResponse(response);
-@END_EXAMPLE_ARANGOSH_RUN
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Documents/delete_api_document_collection.md b/Documentation/DocuBlocks/Rest/Documents/delete_api_document_collection.md
deleted file mode 100644
index 0b3fa21e5b41..000000000000
--- a/Documentation/DocuBlocks/Rest/Documents/delete_api_document_collection.md
+++ /dev/null
@@ -1,285 +0,0 @@
-@startDocuBlock delete_api_document_collection
-@brief removes multiple document
-
-@RESTHEADER{DELETE /_api/document/{collection},Removes multiple documents,deleteDocuments}
-
-@RESTALLBODYPARAM{documents,json,required}
-A JSON array of strings or documents.
-
-@RESTURLPARAMETERS
-
-@RESTURLPARAM{collection,string,required}
-Collection from which documents are removed.
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{waitForSync,boolean,optional}
-Wait until deletion operation has been synced to disk.
-
-@RESTQUERYPARAM{returnOld,boolean,optional}
-Return additionally the complete previous revision of the changed
-document under the attribute `old` in the result.
-
-@RESTQUERYPARAM{silent,boolean,optional}
-If set to `true`, an empty object is returned as response if all document operations
-succeed. No meta-data is returned for the deleted documents. If at least one of
-the operations raises an error, an array with the error object(s) is returned.
-
-You can use this option to save network traffic but you cannot map any errors
-to the inputs of your request.
-
-@RESTQUERYPARAM{ignoreRevs,boolean,optional}
-If set to `true`, ignore any `_rev` attribute in the selectors. No
-revision check is performed. If set to `false` then revisions are checked.
-The default is `true`.
-
-@RESTQUERYPARAM{refillIndexCaches,boolean,optional}
-Whether to delete existing entries from in-memory index caches and refill them
-if document removals affect the edge index or cache-enabled persistent indexes.
-
-@RESTDESCRIPTION
-The body of the request is an array consisting of selectors for
-documents. A selector can either be a string with a key or a string
-with a document identifier or an object with a `_key` attribute. This
-API call removes all specified documents from `collection`.
-If the `ignoreRevs` query parameter is `false` and the
-selector is an object and has a `_rev` attribute, it is a
-precondition that the actual revision of the removed document in the
-collection is the specified one.
-
-The body of the response is an array of the same length as the input
-array. For each input selector, the output contains a JSON object
-with the information about the outcome of the operation. If no error
-occurred, an object is built in which the attribute `_id` contains
-the known *document ID* of the removed document, `_key` contains
-the key which uniquely identifies a document in a given collection,
-and the attribute `_rev` contains the document revision. In case of
-an error, an object with the attribute `error` set to `true` and
-`errorCode` set to the error code is built.
-
-If the `waitForSync` parameter is not specified or set to `false`,
-then the collection's default `waitForSync` behavior is applied.
-The `waitForSync` query parameter cannot be used to disable
-synchronization for collections that have a default `waitForSync`
-value of `true`.
-
-If the query parameter `returnOld` is `true`, then
-the complete previous revision of the document
-is returned under the `old` attribute in the result.
-
-Note that if any precondition is violated or an error occurred with
-some of the documents, the return code is still 200 or 202, but
-the additional HTTP header `X-Arango-Error-Codes` is set, which
-contains a map of the error codes that occurred together with their
-multiplicities, as in: `1200:17,1205:10` which means that in 17
-cases the error 1200 "revision conflict" and in 10 cases the error
-1205 "illegal document handle" has happened.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-is returned if `waitForSync` was `true`.
-
-@RESTRETURNCODE{202}
-is returned if `waitForSync` was `false`.
-
-@RESTRETURNCODE{403}
-with the error code `1004` is returned if the specified write concern for the
-collection cannot be fulfilled. This can happen if less than the number of
-specified replicas for a shard are currently in-sync with the leader. For example,
-if the write concern is `2` and the replication factor is `3`, then the
-write concern is not fulfilled if two replicas are not in-sync.
-
-Note that the HTTP status code is configurable via the
-`--cluster.failed-write-concern-status-code` startup option. It defaults to `403`
-but can be changed to `503` to signal client applications that it is a
-temporary error.
-
-@RESTRETURNCODE{404}
-is returned if the collection was not found.
-The response body contains an error document in this case.
-
-@RESTRETURNCODE{503}
-is returned if the system is temporarily not available. This can be a system
-overload or temporary failure. In this case it makes sense to retry the request
-later.
-
-If the error code is `1429`, then the write concern for the collection cannot be
-fulfilled. This can happen if less than the number of specified replicas for
-a shard are currently in-sync with the leader. For example, if the write concern
-is `2` and the replication factor is `3`, then the write concern is not fulfilled
-if two replicas are not in-sync.
-
-Note that the HTTP status code is configurable via the
-`--cluster.failed-write-concern-status-code` startup option. It defaults to `403`
-but can be changed to `503` to signal client applications that it is a
-temporary error.
-
-@EXAMPLES
-
-Using document keys:
-
-@EXAMPLE_ARANGOSH_RUN{RestDocumentHandlerDeleteDocumentKeyMulti}
- ~ var assertEqual = require("jsunity").jsUnity.assertions.assertEqual;
- var cn = "products";
- db._drop(cn);
- db._create(cn, { waitForSync: true });
-
- | var documents = db.products.save( [
- | { "_key": "1", "type": "tv" },
- | { "_key": "2", "type": "cookbook" }
- ] );
-
- var url = "/_api/document/" + cn;
-
- var body = [ "1", "2" ];
- var response = logCurlRequest('DELETE', url, body);
-
- assert(response.code === 200);
- assertEqual(response.parsedBody, documents);
-
- logJsonResponse(response);
- ~ db._drop(cn);
-@END_EXAMPLE_ARANGOSH_RUN
-
-Using document identifiers:
-
-@EXAMPLE_ARANGOSH_RUN{RestDocumentHandlerDeleteDocumentIdentifierMulti}
- ~ var assertEqual = require("jsunity").jsUnity.assertions.assertEqual;
- var cn = "products";
- db._drop(cn);
- db._create(cn, { waitForSync: true });
-
- | var documents = db.products.save( [
- | { "_key": "1", "type": "tv" },
- | { "_key": "2", "type": "cookbook" }
- ] );
-
- var url = "/_api/document/" + cn;
-
- var body = [ "products/1", "products/2" ];
- var response = logCurlRequest('DELETE', url, body);
-
- assert(response.code === 200);
- assertEqual(response.parsedBody, documents);
-
- logJsonResponse(response);
- ~ db._drop(cn);
-@END_EXAMPLE_ARANGOSH_RUN
-
-Using objects with document keys:
-
-@EXAMPLE_ARANGOSH_RUN{RestDocumentHandlerDeleteDocumentObjectMulti}
- ~ var assertEqual = require("jsunity").jsUnity.assertions.assertEqual;
- var cn = "products";
- db._drop(cn);
- db._create(cn, { waitForSync: true });
-
- | var documents = db.products.save( [
- | { "_key": "1", "type": "tv" },
- | { "_key": "2", "type": "cookbook" }
- ] );
-
- var url = "/_api/document/" + cn;
-
- var body = [ { "_key": "1" }, { "_key": "2" } ];
- var response = logCurlRequest('DELETE', url, body);
-
- assert(response.code === 200);
- assertEqual(response.parsedBody, documents);
-
- logJsonResponse(response);
- ~ db._drop(cn);
-@END_EXAMPLE_ARANGOSH_RUN
-
-Unknown documents:
-
-@EXAMPLE_ARANGOSH_RUN{RestDocumentHandlerDeleteDocumentUnknownMulti}
- var cn = "products";
- db._drop(cn);
- db._drop("other");
- db._create(cn, { waitForSync: true });
- db._create("other", { waitForSync: true });
-
- | var documents = db.products.save( [
- | { "_key": "1", "type": "tv" },
- | { "_key": "2", "type": "cookbook" }
- ] );
- db.products.remove(documents);
- db.other.save( { "_key": "2" } );
-
- var url = "/_api/document/" + cn;
-
- var body = [ "1", "other/2" ];
- var response = logCurlRequest('DELETE', url, body);
-
- assert(response.code === 202);
- | response.parsedBody.forEach(function(doc) {
- | assert(doc.error === true);
- | assert(doc.errorNum === 1202);
- });
-
- logJsonResponse(response);
- ~ db._drop(cn);
- ~ db._drop("other");
-@END_EXAMPLE_ARANGOSH_RUN
-
-Check revisions:
-
-@EXAMPLE_ARANGOSH_RUN{RestDocumentHandlerDeleteDocumentRevMulti}
- ~ var assertEqual = require("jsunity").jsUnity.assertions.assertEqual;
- var cn = "products";
- db._drop(cn);
- db._create(cn, { waitForSync: true });
-
- | var documents = db.products.save( [
- | { "_key": "1", "type": "tv" },
- | { "_key": "2", "type": "cookbook" }
- ] );
-
- var url = "/_api/document/" + cn + "?ignoreRevs=false";
- | var body = [
- | { "_key": "1", "_rev": documents[0]._rev },
- | { "_key": "2", "_rev": documents[1]._rev }
- ];
-
- var response = logCurlRequest('DELETE', url, body);
-
- assert(response.code === 200);
- assertEqual(response.parsedBody, documents);
-
- logJsonResponse(response);
- ~ db._drop(cn);
-@END_EXAMPLE_ARANGOSH_RUN
-
-Revision conflict:
-
-@EXAMPLE_ARANGOSH_RUN{RestDocumentHandlerDeleteDocumentRevConflictMulti}
- var cn = "products";
- db._drop(cn);
- db._create(cn, { waitForSync: true });
-
- | var documents = db.products.save( [
- | { "_key": "1", "type": "tv" },
- | { "_key": "2", "type": "cookbook" }
- ] );
-
- var url = "/_api/document/" + cn + "?ignoreRevs=false";
- | var body = [
- | { "_key": "1", "_rev": "non-matching revision" },
- | { "_key": "2", "_rev": "non-matching revision" }
- ];
-
- var response = logCurlRequest('DELETE', url, body);
-
- assert(response.code === 202);
- | response.parsedBody.forEach(function(doc) {
- | assert(doc.error === true);
- | assert(doc.errorNum === 1200);
- });
-
- logJsonResponse(response);
- ~ db._drop(cn);
-@END_EXAMPLE_ARANGOSH_RUN
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Documents/delete_api_document_collection_key.md b/Documentation/DocuBlocks/Rest/Documents/delete_api_document_collection_key.md
deleted file mode 100644
index f518a9affb98..000000000000
--- a/Documentation/DocuBlocks/Rest/Documents/delete_api_document_collection_key.md
+++ /dev/null
@@ -1,170 +0,0 @@
-@startDocuBlock delete_api_document_collection_key
-@brief removes a document
-
-@RESTHEADER{DELETE /_api/document/{collection}/{key},Removes a document,deleteDocument}
-
-@RESTURLPARAMETERS
-
-@RESTURLPARAM{collection,string,required}
-Name of the `collection` in which the document is to be deleted.
-
-@RESTURLPARAM{key,string,required}
-The document key.
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{waitForSync,boolean,optional}
-Wait until deletion operation has been synced to disk.
-
-@RESTQUERYPARAM{returnOld,boolean,optional}
-Return additionally the complete previous revision of the changed
-document under the attribute `old` in the result.
-
-@RESTQUERYPARAM{silent,boolean,optional}
-If set to `true`, an empty object is returned as response if the document operation
-succeeds. No meta-data is returned for the deleted document. If the
-operation raises an error, an error object is returned.
-
-You can use this option to save network traffic.
-
-@RESTQUERYPARAM{refillIndexCaches,boolean,optional}
-Whether to delete existing entries from in-memory index caches and refill them
-if document removals affect the edge index or cache-enabled persistent indexes.
-
-@RESTHEADERPARAMETERS
-
-@RESTHEADERPARAM{If-Match,string,optional}
-You can conditionally remove a document based on a target revision id by
-using the `if-match` HTTP header.
-
-@RESTDESCRIPTION
-If `silent` is not set to `true`, the body of the response contains a JSON
-object with the information about the identifier and the revision. The attribute
-`_id` contains the known *document ID* of the removed document, `_key`
-contains the key which uniquely identifies a document in a given collection,
-and the attribute `_rev` contains the document revision.
-
-If the `waitForSync` parameter is not specified or set to `false`,
-then the collection's default `waitForSync` behavior is applied.
-The `waitForSync` query parameter cannot be used to disable
-synchronization for collections that have a default `waitForSync`
-value of `true`.
-
-If the query parameter `returnOld` is `true`, then
-the complete previous revision of the document
-is returned under the `old` attribute in the result.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-is returned if the document was removed successfully and
-`waitForSync` was `true`.
-
-@RESTRETURNCODE{202}
-is returned if the document was removed successfully and
-`waitForSync` was `false`.
-
-@RESTRETURNCODE{403}
-with the error code `1004` is returned if the specified write concern for the
-collection cannot be fulfilled. This can happen if less than the number of
-specified replicas for a shard are currently in-sync with the leader. For example,
-if the write concern is `2` and the replication factor is `3`, then the
-write concern is not fulfilled if two replicas are not in-sync.
-
-Note that the HTTP status code is configurable via the
-`--cluster.failed-write-concern-status-code` startup option. It defaults to `403`
-but can be changed to `503` to signal client applications that it is a
-temporary error.
-
-@RESTRETURNCODE{404}
-is returned if the collection or the document was not found.
-The response body contains an error document in this case.
-
-@RESTRETURNCODE{409}
-is returned if locking the document key failed due to another
-concurrent operation that operates on the same document.
-This is also referred to as a _write-write conflict_.
-The response body contains an error document with the
-`errorNum` set to `1200` (`ERROR_ARANGO_CONFLICT`) in this case.
-
-@RESTRETURNCODE{412}
-is returned if a "If-Match" header or `rev` is given and the found
-document has a different version. The response also contain the found
-document's current revision in the `_rev` attribute. Additionally, the
-attributes `_id` and `_key` are returned.
-
-@RESTRETURNCODE{503}
-is returned if the system is temporarily not available. This can be a system
-overload or temporary failure. In this case it makes sense to retry the request
-later.
-
-If the error code is `1429`, then the write concern for the collection cannot be
-fulfilled. This can happen if less than the number of specified replicas for
-a shard are currently in-sync with the leader. For example, if the write concern
-is `2` and the replication factor is `3`, then the write concern is not fulfilled
-if two replicas are not in-sync.
-
-Note that the HTTP status code is configurable via the
-`--cluster.failed-write-concern-status-code` startup option. It defaults to `403`
-but can be changed to `503` to signal client applications that it is a
-temporary error.
-
-@EXAMPLES
-
-Using document identifier:
-
-@EXAMPLE_ARANGOSH_RUN{RestDocumentHandlerDeleteDocument}
- var cn = "products";
- db._drop(cn);
- db._create(cn, { waitForSync: true });
- var document = db.products.save({"hello":"world"});
-
- var url = "/_api/document/" + document._id;
-
- var response = logCurlRequest('DELETE', url);
-
- assert(response.code === 200);
-
- logJsonResponse(response);
- ~ db._drop(cn);
-@END_EXAMPLE_ARANGOSH_RUN
-
-Unknown document identifier:
-
-@EXAMPLE_ARANGOSH_RUN{RestDocumentHandlerDeleteDocumentUnknownHandle}
- var cn = "products";
- db._drop(cn);
- db._create(cn, { waitForSync: true });
- var document = db.products.save({"hello":"world"});
- db.products.remove(document._id);
-
- var url = "/_api/document/" + document._id;
-
- var response = logCurlRequest('DELETE', url);
-
- assert(response.code === 404);
-
- logJsonResponse(response);
- ~ db._drop(cn);
-@END_EXAMPLE_ARANGOSH_RUN
-
-Revision conflict:
-
-@EXAMPLE_ARANGOSH_RUN{RestDocumentHandlerDeleteDocumentIfMatchOther}
- var cn = "products";
- db._drop(cn);
- db._create(cn);
-
- var document = db.products.save({"hello":"world"});
- var document2 = db.products.save({"hello2":"world"});
- var url = "/_api/document/" + document._id;
- var headers = {"If-Match": "\"" + document2._rev + "\""};
-
- var response = logCurlRequest('DELETE', url, "", headers);
-
- assert(response.code === 412);
-
- logJsonResponse(response);
- ~ db._drop(cn);
-@END_EXAMPLE_ARANGOSH_RUN
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Documents/get_api_document_collection.md b/Documentation/DocuBlocks/Rest/Documents/get_api_document_collection.md
deleted file mode 100644
index 89cde60b8fe6..000000000000
--- a/Documentation/DocuBlocks/Rest/Documents/get_api_document_collection.md
+++ /dev/null
@@ -1,94 +0,0 @@
-
-@startDocuBlock get_api_document_collection
-@brief reads a single document
-
-@RESTHEADER{PUT /_api/document/{collection}#get,Read multiple documents, getDocuments}
-
-@RESTURLPARAMETERS
-
-@RESTURLPARAM{collection,string,required}
-Name of the *collection* from which the documents are to be read.
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{onlyget,boolean,required}
-This parameter is required to be **true**, otherwise a replace
-operation is executed!
-
-@RESTQUERYPARAM{ignoreRevs,string,optional}
-Should the value be *true* (the default):
-If a search document contains a value for the *_rev* field,
-then the document is only returned if it has the same revision value.
-Otherwise a precondition failed error is returned.
-
-@RESTHEADERPARAMETERS
-
-@RESTHEADERPARAM{x-arango-allow-dirty-read,boolean,optional}
-Set this header to `true` to allow the Coordinator to ask any shard replica for
-the data, not only the shard leader. This may result in "dirty reads".
-
-The header is ignored if this operation is part of a Stream Transaction
-(`x-arango-trx-id` header). The header set when creating the transaction decides
-about dirty reads for the entire transaction, not the individual read operations.
-
-@RESTHEADERPARAM{x-arango-trx-id,string,optional}
-To make this operation a part of a Stream Transaction, set this header to the
-transaction ID returned by the `POST /_api/transaction/begin` call.
-
-@RESTALLBODYPARAM{documents,json,required}
-An array of documents to retrieve.
-
-@RESTDESCRIPTION
-Returns the documents identified by their *_key* in the body objects.
-The body of the request _must_ contain a JSON array of either
-strings (the *_key* values to lookup) or search documents.
-
-A search document _must_ contain at least a value for the *_key* field.
-A value for `_rev` _may_ be specified to verify whether the document
-has the same revision value, unless _ignoreRevs_ is set to false.
-
-Cluster only: The search document _may_ contain
-values for the collection's pre-defined shard keys. Values for the shard keys
-are treated as hints to improve performance. Should the shard keys
-values be incorrect ArangoDB may answer with a *not found* error.
-
-The returned array of documents contain three special attributes: *_id* containing the document
-identifier, *_key* containing key which uniquely identifies a document
-in a given collection and *_rev* containing the revision.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-is returned if no error happened
-
-@RESTRETURNCODE{400}
-is returned if the body does not contain a valid JSON representation
-of an array of documents. The response body contains
-an error document in this case.
-
-@RESTRETURNCODE{404}
-is returned if the collection was not found.
-
-@EXAMPLES
-
-Reading multiple documents identifier:
-
-@EXAMPLE_ARANGOSH_RUN{RestDocumentHandlerReadMultiDocument}
- var cn = "products";
- db._drop(cn);
- db._create(cn);
-
- db.products.save({"_key":"doc1", "hello":"world"});
- db.products.save({"_key":"doc2", "say":"hi to mom"});
- var url = "/_api/document/products?onlyget=true";
- var body = '["doc1", {"_key":"doc2"}]';
-
- var response = logCurlRequest('PUT', url, body);
-
- assert(response.code === 200);
-
- logJsonResponse(response);
- ~ db._drop(cn);
-@END_EXAMPLE_ARANGOSH_RUN
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Documents/get_api_document_collection_key.md b/Documentation/DocuBlocks/Rest/Documents/get_api_document_collection_key.md
deleted file mode 100644
index 6e0f3e668143..000000000000
--- a/Documentation/DocuBlocks/Rest/Documents/get_api_document_collection_key.md
+++ /dev/null
@@ -1,111 +0,0 @@
-
-@startDocuBlock get_api_document_collection_key
-@brief reads a single document
-
-@RESTHEADER{GET /_api/document/{collection}/{key},Read document,getDocument}
-
-@RESTURLPARAMETERS
-
-@RESTURLPARAM{collection,string,required}
-Name of the *collection* from which the document is to be read.
-
-@RESTURLPARAM{key,string,required}
-The document key.
-
-@RESTHEADERPARAMETERS
-
-@RESTHEADERPARAM{If-None-Match,string,optional}
-If the "If-None-Match" header is given, then it must contain exactly one
-Etag. The document is returned, if it has a different revision than the
-given Etag. Otherwise an *HTTP 304* is returned.
-
-@RESTHEADERPARAM{If-Match,string,optional}
-If the "If-Match" header is given, then it must contain exactly one
-Etag. The document is returned, if it has the same revision as the
-given Etag. Otherwise a *HTTP 412* is returned.
-
-@RESTHEADERPARAM{x-arango-allow-dirty-read,boolean,optional}
-Set this header to `true` to allow the Coordinator to ask any shard replica for
-the data, not only the shard leader. This may result in "dirty reads".
-
-The header is ignored if this operation is part of a Stream Transaction
-(`x-arango-trx-id` header). The header set when creating the transaction decides
-about dirty reads for the entire transaction, not the individual read operations.
-
-@RESTHEADERPARAM{x-arango-trx-id,string,optional}
-To make this operation a part of a Stream Transaction, set this header to the
-transaction ID returned by the `POST /_api/transaction/begin` call.
-
-@RESTDESCRIPTION
-Returns the document identified by *document-id*. The returned
-document contains three special attributes: *_id* containing the document
-identifier, *_key* containing key which uniquely identifies a document
-in a given collection and *_rev* containing the revision.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-is returned if the document was found
-
-@RESTRETURNCODE{304}
-is returned if the "If-None-Match" header is given and the document has
-the same version
-
-@RESTRETURNCODE{404}
-is returned if the document or collection was not found
-
-@RESTRETURNCODE{412}
-is returned if an "If-Match" header is given and the found
-document has a different version. The response will also contain the found
-document's current revision in the *_rev* attribute. Additionally, the
-attributes *_id* and *_key* will be returned.
-
-@EXAMPLES
-
-Use a document identifier:
-
-@EXAMPLE_ARANGOSH_RUN{RestDocumentHandlerReadDocument}
- var cn = "products";
- db._drop(cn);
- db._create(cn);
-
- var document = db.products.save({"hello":"world"});
- var url = "/_api/document/" + document._id;
-
- var response = logCurlRequest('GET', url);
-
- assert(response.code === 200);
-
- logJsonResponse(response);
- ~ db._drop(cn);
-@END_EXAMPLE_ARANGOSH_RUN
-
-Use a document identifier and an Etag:
-
-@EXAMPLE_ARANGOSH_RUN{RestDocumentHandlerReadDocumentIfNoneMatch}
- var cn = "products";
- db._drop(cn);
- db._create(cn);
-
- var document = db.products.save({"hello":"world"});
- var url = "/_api/document/" + document._id;
- var headers = {"If-None-Match": "\"" + document._rev + "\""};
-
- var response = logCurlRequest('GET', url, "", headers);
-
- assert(response.code === 304);
- ~ db._drop(cn);
-@END_EXAMPLE_ARANGOSH_RUN
-
-Unknown document identifier:
-
-@EXAMPLE_ARANGOSH_RUN{RestDocumentHandlerReadDocumentUnknownHandle}
- var url = "/_api/document/products/unknown-identifier";
-
- var response = logCurlRequest('GET', url);
-
- assert(response.code === 404);
-
- logJsonResponse(response);
-@END_EXAMPLE_ARANGOSH_RUN
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Documents/head_api_document_collection_key.md b/Documentation/DocuBlocks/Rest/Documents/head_api_document_collection_key.md
deleted file mode 100644
index ae61bc4c05b4..000000000000
--- a/Documentation/DocuBlocks/Rest/Documents/head_api_document_collection_key.md
+++ /dev/null
@@ -1,77 +0,0 @@
-
-@startDocuBlock head_api_document_collection_key
-@brief reads a single document head
-
-@RESTHEADER{HEAD /_api/document/{collection}/{key},Read document header,getDocumentHeader}
-
-@RESTURLPARAMETERS
-
-@RESTURLPARAM{collection,string,required}
-Name of the *collection* from which the document is to be read.
-
-@RESTURLPARAM{key,string,required}
-The document key.
-
-@RESTHEADERPARAMETERS
-
-@RESTHEADERPARAM{If-None-Match,string,optional}
-If the "If-None-Match" header is given, then it must contain exactly one
-Etag. If the current document revision is not equal to the specified Etag,
-an *HTTP 200* response is returned. If the current document revision is
-identical to the specified Etag, then an *HTTP 304* is returned.
-
-@RESTHEADERPARAM{If-Match,string,optional}
-If the "If-Match" header is given, then it must contain exactly one
-Etag. The document is returned, if it has the same revision as the
-given Etag. Otherwise a *HTTP 412* is returned.
-
-@RESTHEADERPARAM{x-arango-allow-dirty-read,boolean,optional}
-Set this header to `true` to allow the Coordinator to ask any shard replica for
-the data, not only the shard leader. This may result in "dirty reads".
-
-The header is ignored if this operation is part of a Stream Transaction
-(`x-arango-trx-id` header). The header set when creating the transaction decides
-about dirty reads for the entire transaction, not the individual read operations.
-
-@RESTHEADERPARAM{x-arango-trx-id,string,optional}
-To make this operation a part of a Stream Transaction, set this header to the
-transaction ID returned by the `POST /_api/transaction/begin` call.
-
-@RESTDESCRIPTION
-Like *GET*, but only returns the header fields and not the body. You
-can use this call to get the current revision of a document or check if
-the document was deleted.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-is returned if the document was found
-
-@RESTRETURNCODE{304}
-is returned if the "If-None-Match" header is given and the document has
-the same version
-
-@RESTRETURNCODE{404}
-is returned if the document or collection was not found
-
-@RESTRETURNCODE{412}
-is returned if an "If-Match" header is given and the found
-document has a different version. The response will also contain the found
-document's current revision in the *Etag* header.
-
-@EXAMPLES
-
-@EXAMPLE_ARANGOSH_RUN{RestDocumentHandlerReadDocumentHead}
- var cn = "products";
- db._drop(cn);
- db._create(cn);
-
- var document = db.products.save({"hello":"world"});
- var url = "/_api/document/" + document._id;
-
- var response = logCurlRequest('HEAD', url);
-
- assert(response.code === 200);
- ~ db._drop(cn);
-@END_EXAMPLE_ARANGOSH_RUN
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Documents/patch_api_document_collection.md b/Documentation/DocuBlocks/Rest/Documents/patch_api_document_collection.md
deleted file mode 100644
index 849e2ea0a7cf..000000000000
--- a/Documentation/DocuBlocks/Rest/Documents/patch_api_document_collection.md
+++ /dev/null
@@ -1,167 +0,0 @@
-
-@startDocuBlock patch_api_document_collection
-@brief updates multiple documents
-
-@RESTHEADER{PATCH /_api/document/{collection},Update documents,updateDocuments}
-
-@RESTALLBODYPARAM{documents,json,required}
-A JSON representation of an array of document updates as objects.
-
-@RESTURLPARAMETERS
-
-@RESTURLPARAM{collection,string,required}
-Name of the `collection` in which the documents are to be updated.
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{keepNull,boolean,optional}
-If the intention is to delete existing attributes with the patch
-command, the URL query parameter `keepNull` can be used with a value
-of `false`. This modifies the behavior of the patch command to
-remove any attributes from the existing document that are contained
-in the patch document with an attribute value of `null`.
-
-@RESTQUERYPARAM{mergeObjects,boolean,optional}
-Controls whether objects (not arrays) are merged if present in
-both the existing and the patch document. If set to `false`, the
-value in the patch document overwrites the existing document's
-value. If set to `true`, objects are merged. The default is
-`true`.
-
-@RESTQUERYPARAM{waitForSync,boolean,optional}
-Wait until the new documents have been synced to disk.
-
-@RESTQUERYPARAM{ignoreRevs,boolean,optional}
-By default, or if this is set to `true`, the `_rev` attributes in
-the given documents are ignored. If this is set to `false`, then
-any `_rev` attribute given in a body document is taken as a
-precondition. The document is only updated if the current revision
-is the one specified.
-
-@RESTQUERYPARAM{returnOld,boolean,optional}
-Return additionally the complete previous revision of the changed
-documents under the attribute `old` in the result.
-
-@RESTQUERYPARAM{returnNew,boolean,optional}
-Return additionally the complete new documents under the attribute `new`
-in the result.
-
-@RESTQUERYPARAM{silent,boolean,optional}
-If set to `true`, an empty object is returned as response if all document operations
-succeed. No meta-data is returned for the updated documents. If at least one
-operation raises an error, an array with the error object(s) is returned.
-
-You can use this option to save network traffic but you cannot map any errors
-to the inputs of your request.
-
-@RESTQUERYPARAM{refillIndexCaches,boolean,optional}
-Whether to update existing entries in in-memory index caches if document updates
-affect the edge index or cache-enabled persistent indexes.
-
-@RESTDESCRIPTION
-Partially updates documents, the documents to update are specified
-by the `_key` attributes in the body objects. The body of the
-request must contain a JSON array of document updates with the
-attributes to patch (the patch documents). All attributes from the
-patch documents are added to the existing documents if they do
-not yet exist, and overwritten in the existing documents if they do
-exist there.
-
-The value of the `_key` attribute as well as attributes
-used as sharding keys may not be changed.
-
-Setting an attribute value to `null` in the patch documents causes a
-value of `null` to be saved for the attribute by default.
-
-If `ignoreRevs` is `false` and there is a `_rev` attribute in a
-document in the body and its value does not match the revision of
-the corresponding document in the database, the precondition is
-violated.
-
-Cluster only: The patch document _may_ contain
-values for the collection's pre-defined shard keys. Values for the shard keys
-are treated as hints to improve performance. Should the shard keys
-values be incorrect ArangoDB may answer with a *not found* error
-
-Optionally, the query parameter `waitForSync` can be used to force
-synchronization of the document replacement operation to disk even in case
-that the `waitForSync` flag had been disabled for the entire collection.
-Thus, the `waitForSync` query parameter can be used to force synchronization
-of just specific operations. To use this, set the `waitForSync` parameter
-to `true`. If the `waitForSync` parameter is not specified or set to
-`false`, then the collection's default `waitForSync` behavior is
-applied. The `waitForSync` query parameter cannot be used to disable
-synchronization for collections that have a default `waitForSync` value
-of `true`.
-
-The body of the response contains a JSON array of the same length
-as the input array with the information about the identifier and the
-revision of the updated documents. In each entry, the attribute
-`_id` contains the known *document ID* of each updated document,
-`_key` contains the key which uniquely identifies a document in a
-given collection, and the attribute `_rev` contains the new document
-revision. In case of an error or violated precondition, an error
-object with the attribute `error` set to `true` and the attribute
-`errorCode` set to the error code is built.
-
-If the query parameter `returnOld` is `true`, then, for each
-generated document, the complete previous revision of the document
-is returned under the `old` attribute in the result.
-
-If the query parameter `returnNew` is `true`, then, for each
-generated document, the complete new document is returned under
-the `new` attribute in the result.
-
-Note that if any precondition is violated or an error occurred with
-some of the documents, the return code is still 201 or 202, but
-the additional HTTP header `X-Arango-Error-Codes` is set, which
-contains a map of the error codes that occurred together with their
-multiplicities, as in: `1200:17,1205:10` which means that in 17
-cases the error 1200 "revision conflict" and in 10 cases the error
-1205 "illegal document handle" has happened.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{201}
-is returned if `waitForSync` was `true` and operations were processed.
-
-@RESTRETURNCODE{202}
-is returned if `waitForSync` was `false` and operations were processed.
-
-@RESTRETURNCODE{400}
-is returned if the body does not contain a valid JSON representation
-of an array of documents. The response body contains
-an error document in this case.
-
-@RESTRETURNCODE{403}
-with the error code `1004` is returned if the specified write concern for the
-collection cannot be fulfilled. This can happen if less than the number of
-specified replicas for a shard are currently in-sync with the leader. For example,
-if the write concern is `2` and the replication factor is `3`, then the
-write concern is not fulfilled if two replicas are not in-sync.
-
-Note that the HTTP status code is configurable via the
-`--cluster.failed-write-concern-status-code` startup option. It defaults to `403`
-but can be changed to `503` to signal client applications that it is a
-temporary error.
-
-@RESTRETURNCODE{404}
-is returned if the collection was not found.
-
-@RESTRETURNCODE{503}
-is returned if the system is temporarily not available. This can be a system
-overload or temporary failure. In this case it makes sense to retry the request
-later.
-
-If the error code is `1429`, then the write concern for the collection cannot be
-fulfilled. This can happen if less than the number of specified replicas for
-a shard are currently in-sync with the leader. For example, if the write concern
-is `2` and the replication factor is `3`, then the write concern is not fulfilled
-if two replicas are not in-sync.
-
-Note that the HTTP status code is configurable via the
-`--cluster.failed-write-concern-status-code` startup option. It defaults to `403`
-but can be changed to `503` to signal client applications that it is a
-temporary error.
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Documents/patch_api_document_collection_key.md b/Documentation/DocuBlocks/Rest/Documents/patch_api_document_collection_key.md
deleted file mode 100644
index 562e4a39f3a0..000000000000
--- a/Documentation/DocuBlocks/Rest/Documents/patch_api_document_collection_key.md
+++ /dev/null
@@ -1,257 +0,0 @@
-
-@startDocuBlock patch_api_document_collection_key
-@brief updates a document
-
-@RESTHEADER{PATCH /_api/document/{collection}/{key},Update document,updateDocument}
-
-@RESTALLBODYPARAM{document,object,required}
-A JSON representation of a document update as an object.
-
-@RESTURLPARAMETERS
-
-@RESTURLPARAM{collection,string,required}
-Name of the `collection` in which the document is to be updated.
-
-@RESTURLPARAM{key,string,required}
-The document key.
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{keepNull,boolean,optional}
-If the intention is to delete existing attributes with the patch
-command, the URL query parameter `keepNull` can be used with a value
-of `false`. This modifies the behavior of the patch command to
-remove any attributes from the existing document that are contained
-in the patch document with an attribute value of `null`.
-
-@RESTQUERYPARAM{mergeObjects,boolean,optional}
-Controls whether objects (not arrays) are merged if present in
-both the existing and the patch document. If set to `false`, the
-value in the patch document overwrites the existing document's
-value. If set to `true`, objects are merged. The default is
-`true`.
-
-@RESTQUERYPARAM{waitForSync,boolean,optional}
-Wait until document has been synced to disk.
-
-@RESTQUERYPARAM{ignoreRevs,boolean,optional}
-By default, or if this is set to `true`, the `_rev` attributes in
-the given document is ignored. If this is set to `false`, then
-the `_rev` attribute given in the body document is taken as a
-precondition. The document is only updated if the current revision
-is the one specified.
-
-@RESTQUERYPARAM{returnOld,boolean,optional}
-Return additionally the complete previous revision of the changed
-document under the attribute `old` in the result.
-
-@RESTQUERYPARAM{returnNew,boolean,optional}
-Return additionally the complete new document under the attribute `new`
-in the result.
-
-@RESTQUERYPARAM{silent,boolean,optional}
-If set to `true`, an empty object is returned as response if the document operation
-succeeds. No meta-data is returned for the updated document. If the
-operation raises an error, an error object is returned.
-
-You can use this option to save network traffic.
-
-@RESTQUERYPARAM{refillIndexCaches,boolean,optional}
-Whether to update existing entries in in-memory index caches if document updates
-affect the edge index or cache-enabled persistent indexes.
-
-@RESTHEADERPARAMETERS
-
-@RESTHEADERPARAM{If-Match,string,optional}
-You can conditionally update a document based on a target revision id by
-using the `if-match` HTTP header.
-
-@RESTDESCRIPTION
-Partially updates the document identified by the *document ID*.
-The body of the request must contain a JSON document with the
-attributes to patch (the patch document). All attributes from the
-patch document are added to the existing document if they do not
-yet exist, and overwritten in the existing document if they do exist
-there.
-
-The value of the `_key` attribute as well as attributes
-used as sharding keys may not be changed.
-
-Setting an attribute value to `null` in the patch document causes a
-value of `null` to be saved for the attribute by default.
-
-If the `If-Match` header is specified and the revision of the
-document in the database is unequal to the given revision, the
-precondition is violated.
-
-If `If-Match` is not given and `ignoreRevs` is `false` and there
-is a `_rev` attribute in the body and its value does not match
-the revision of the document in the database, the precondition is
-violated.
-
-If a precondition is violated, an *HTTP 412* is returned.
-
-If the document exists and can be updated, then an *HTTP 201* or
-an *HTTP 202* is returned (depending on `waitForSync`, see below),
-the `Etag` header field contains the new revision of the document
-(in double quotes) and the `Location` header contains a complete URL
-under which the document can be queried.
-
-Cluster only: The patch document _may_ contain
-values for the collection's pre-defined shard keys. Values for the shard keys
-are treated as hints to improve performance. Should the shard keys
-values be incorrect ArangoDB may answer with a `not found` error
-
-Optionally, the query parameter `waitForSync` can be used to force
-synchronization of the updated document operation to disk even in case
-that the `waitForSync` flag had been disabled for the entire collection.
-Thus, the `waitForSync` query parameter can be used to force synchronization
-of just specific operations. To use this, set the `waitForSync` parameter
-to `true`. If the `waitForSync` parameter is not specified or set to
-`false`, then the collection's default `waitForSync` behavior is
-applied. The `waitForSync` query parameter cannot be used to disable
-synchronization for collections that have a default `waitForSync` value
-of `true`.
-
-If `silent` is not set to `true`, the body of the response contains a JSON
-object with the information about the identifier and the revision. The attribute
-`_id` contains the known *document ID* of the updated document, `_key`
-contains the key which uniquely identifies a document in a given collection,
-and the attribute `_rev` contains the new document revision.
-
-If the query parameter `returnOld` is `true`, then
-the complete previous revision of the document
-is returned under the `old` attribute in the result.
-
-If the query parameter `returnNew` is `true`, then
-the complete new document is returned under
-the `new` attribute in the result.
-
-If the document does not exist, then a *HTTP 404* is returned and the
-body of the response contains an error document.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{201}
-is returned if the document was updated successfully and
-`waitForSync` was `true`.
-
-@RESTRETURNCODE{202}
-is returned if the document was updated successfully and
-`waitForSync` was `false`.
-
-@RESTRETURNCODE{400}
-is returned if the body does not contain a valid JSON representation
-of a document. The response body contains
-an error document in this case.
-
-@RESTRETURNCODE{403}
-with the error code `1004` is returned if the specified write concern for the
-collection cannot be fulfilled. This can happen if less than the number of
-specified replicas for a shard are currently in-sync with the leader. For example,
-if the write concern is `2` and the replication factor is `3`, then the
-write concern is not fulfilled if two replicas are not in-sync.
-
-Note that the HTTP status code is configurable via the
-`--cluster.failed-write-concern-status-code` startup option. It defaults to `403`
-but can be changed to `503` to signal client applications that it is a
-temporary error.
-
-@RESTRETURNCODE{404}
-is returned if the collection or the document was not found.
-
-@RESTRETURNCODE{409}
-There are two possible reasons for this error:
-
-- The update causes a unique constraint violation in a secondary index.
- The response body contains an error document with the `errorNum` set to
- `1210` (`ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED`) in this case.
-- Locking the document key or some unique index entry failed due to another
- concurrent operation that operates on the same document. This is also referred
- to as a _write-write conflict_. The response body contains an error document
- with the `errorNum` set to `1200` (`ERROR_ARANGO_CONFLICT`) in this case.
-
-@RESTRETURNCODE{412}
-is returned if the precondition was violated. The response also contains
-the found documents' current revisions in the `_rev` attributes.
-Additionally, the attributes `_id` and `_key` are returned.
-
-@RESTRETURNCODE{503}
-is returned if the system is temporarily not available. This can be a system
-overload or temporary failure. In this case it makes sense to retry the request
-later.
-
-If the error code is `1429`, then the write concern for the collection cannot be
-fulfilled. This can happen if less than the number of specified replicas for
-a shard are currently in-sync with the leader. For example, if the write concern
-is `2` and the replication factor is `3`, then the write concern is not fulfilled
-if two replicas are not in-sync.
-
-Note that the HTTP status code is configurable via the
-`--cluster.failed-write-concern-status-code` startup option. It defaults to `403`
-but can be changed to `503` to signal client applications that it is a
-temporary error.
-
-@EXAMPLES
-
-Patches an existing document with new content.
-
-@EXAMPLE_ARANGOSH_RUN{RestDocumentHandlerPatchDocument}
- var cn = "products";
- db._drop(cn);
- db._create(cn);
-
- var document = db.products.save({"one":"world"});
- var url = "/_api/document/" + document._id;
-
- var response = logCurlRequest("PATCH", url, { "hello": "world" });
-
- assert(response.code === 202);
-
- logJsonResponse(response);
- var response2 = logCurlRequest("PATCH", url, { "numbers": { "one": 1, "two": 2, "three": 3, "empty": null } });
- assert(response2.code === 202);
- logJsonResponse(response2);
- var response3 = logCurlRequest("GET", url);
- assert(response3.code === 200);
- logJsonResponse(response3);
- var response4 = logCurlRequest("PATCH", url + "?keepNull=false", { "hello": null, "numbers": { "four": 4 } });
- assert(response4.code === 202);
- logJsonResponse(response4);
- var response5 = logCurlRequest("GET", url);
- assert(response5.code === 200);
- logJsonResponse(response5);
- ~ db._drop(cn);
-@END_EXAMPLE_ARANGOSH_RUN
-
-Merging attributes of an object using `mergeObjects`:
-
-@EXAMPLE_ARANGOSH_RUN{RestDocumentHandlerPatchDocumentMerge}
- var cn = "products";
- db._drop(cn);
- db._create(cn);
-
- var document = db.products.save({"inhabitants":{"china":1366980000,"india":1263590000,"usa":319220000}});
- var url = "/_api/document/" + document._id;
-
- var response = logCurlRequest("GET", url);
- assert(response.code === 200);
- logJsonResponse(response);
-
- var response = logCurlRequest("PATCH", url + "?mergeObjects=true", { "inhabitants": {"indonesia":252164800,"brazil":203553000 }});
- assert(response.code === 202);
-
- var response2 = logCurlRequest("GET", url);
- assert(response2.code === 200);
- logJsonResponse(response2);
-
- var response3 = logCurlRequest("PATCH", url + "?mergeObjects=false", { "inhabitants": { "pakistan":188346000 }});
- assert(response3.code === 202);
- logJsonResponse(response3);
-
- var response4 = logCurlRequest("GET", url);
- assert(response4.code === 200);
- logJsonResponse(response4);
- ~ db._drop(cn);
-@END_EXAMPLE_ARANGOSH_RUN
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Documents/post_api_document_collection.md b/Documentation/DocuBlocks/Rest/Documents/post_api_document_collection.md
deleted file mode 100644
index fdace01129e6..000000000000
--- a/Documentation/DocuBlocks/Rest/Documents/post_api_document_collection.md
+++ /dev/null
@@ -1,317 +0,0 @@
-@startDocuBlock post_api_document_collection
-@brief creates documents
-
-@RESTHEADER{POST /_api/document/{collection},Create document,createDocument}
-
-@RESTURLPARAMETERS
-
-@RESTURLPARAM{collection,string,required}
-Name of the `collection` in which the document is to be created.
-
-@RESTALLBODYPARAM{document,object,required}
-A JSON representation of a single document.
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{collection,string,optional}
-The name of the collection. This query parameter is only for backward compatibility.
-In ArangoDB versions < 3.0, the URL path was `/_api/document` and
-this query parameter was required. This combination still works, but
-the recommended way is to specify the collection in the URL path.
-
-@RESTQUERYPARAM{waitForSync,boolean,optional}
-Wait until document has been synced to disk.
-
-@RESTQUERYPARAM{returnNew,boolean,optional}
-Additionally return the complete new document under the attribute `new`
-in the result.
-
-@RESTQUERYPARAM{returnOld,boolean,optional}
-Additionally return the complete old document under the attribute `old`
-in the result. Only available if the overwrite option is used.
-
-@RESTQUERYPARAM{silent,boolean,optional}
-If set to `true`, an empty object is returned as response if the document operation
-succeeds. No meta-data is returned for the created document. If the
-operation raises an error, an error object is returned.
-
-You can use this option to save network traffic.
-
-@RESTQUERYPARAM{overwrite,boolean,optional}
-If set to `true`, the insert becomes a replace-insert. If a document with the
-same `_key` already exists, the new document is not rejected with unique
-constraint violation error but replaces the old document. Note that operations
-with `overwrite` parameter require a `_key` attribute in the request payload,
-therefore they can only be performed on collections sharded by `_key`.
-
-@RESTQUERYPARAM{overwriteMode,string,optional}
-This option supersedes `overwrite` and offers the following modes:
-- `"ignore"`: if a document with the specified `_key` value exists already,
- nothing is done and no write operation is carried out. The
- insert operation returns success in this case. This mode does not
- support returning the old document version using `RETURN OLD`. When using
- `RETURN NEW`, `null` is returned in case the document already existed.
-- `"replace"`: if a document with the specified `_key` value exists already,
- it is overwritten with the specified document value. This mode is
- also used when no overwrite mode is specified but the `overwrite`
- flag is set to `true`.
-- `"update"`: if a document with the specified `_key` value exists already,
- it is patched (partially updated) with the specified document value.
- The overwrite mode can be further controlled via the `keepNull` and
- `mergeObjects` parameters.
-- `"conflict"`: if a document with the specified `_key` value exists already,
- return a unique constraint violation error so that the insert operation
- fails. This is also the default behavior in case the overwrite mode is
- not set, and the `overwrite` flag is `false` or not set either.
-
-@RESTQUERYPARAM{keepNull,boolean,optional}
-If the intention is to delete existing attributes with the update-insert
-command, the URL query parameter `keepNull` can be used with a value of
-`false`. This modifies the behavior of the patch command to remove any
-attributes from the existing document that are contained in the patch document
-with an attribute value of `null`.
-This option controls the update-insert behavior only.
-
-@RESTQUERYPARAM{mergeObjects,boolean,optional}
-Controls whether objects (not arrays) are merged if present in both, the
-existing and the update-insert document. If set to `false`, the value in the
-patch document overwrites the existing document's value. If set to `true`,
-objects are merged. The default is `true`.
-This option controls the update-insert behavior only.
-
-@RESTQUERYPARAM{refillIndexCaches,boolean,optional}
-Whether to add new entries to in-memory index caches if document insertions
-affect the edge index or cache-enabled persistent indexes.
-
-@RESTDESCRIPTION
-Creates a new document from the document given in the body, unless there
-is already a document with the `_key` given. If no `_key` is given, a new
-unique `_key` is generated automatically.
-
-Possibly given `_id` and `_rev` attributes in the body are always ignored,
-the URL part or the query parameter collection respectively counts.
-
-If the document was created successfully, then the `Location` header
-contains the path to the newly created document. The `Etag` header field
-contains the revision of the document. Both are only set in the single
-document case.
-
-If `silent` is not set to `true`, the body of the response contains a
-JSON object with the following attributes:
-
- - `_id` contains the document identifier of the newly created document
- - `_key` contains the document key
- - `_rev` contains the document revision
-
-If the collection parameter `waitForSync` is `false`, then the call
-returns as soon as the document has been accepted. It does not wait
-until the documents have been synced to disk.
-
-Optionally, the query parameter `waitForSync` can be used to force
-synchronization of the document creation operation to disk even in
-case that the `waitForSync` flag had been disabled for the entire
-collection. Thus, the `waitForSync` query parameter can be used to
-force synchronization of just this specific operations. To use this,
-set the `waitForSync` parameter to `true`. If the `waitForSync`
-parameter is not specified or set to `false`, then the collection's
-default `waitForSync` behavior is applied. The `waitForSync` query
-parameter cannot be used to disable synchronization for collections
-that have a default `waitForSync` value of `true`.
-
-If the query parameter `returnNew` is `true`, then, for each
-generated document, the complete new document is returned under
-the `new` attribute in the result.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{201}
-is returned if the documents were created successfully and
-`waitForSync` was `true`.
-
-@RESTRETURNCODE{202}
-is returned if the documents were created successfully and
-`waitForSync` was `false`.
-
-@RESTRETURNCODE{400}
-is returned if the body does not contain a valid JSON representation
-of one document. The response body contains
-an error document in this case.
-
-@RESTRETURNCODE{403}
-with the error code `1004` is returned if the specified write concern for the
-collection cannot be fulfilled. This can happen if less than the number of
-specified replicas for a shard are currently in-sync with the leader. For example,
-if the write concern is `2` and the replication factor is `3`, then the
-write concern is not fulfilled if two replicas are not in-sync.
-
-Note that the HTTP status code is configurable via the
-`--cluster.failed-write-concern-status-code` startup option. It defaults to `403`
-but can be changed to `503` to signal client applications that it is a
-temporary error.
-
-@RESTRETURNCODE{404}
-is returned if the collection specified by `collection` is unknown.
-The response body contains an error document in this case.
-
-@RESTRETURNCODE{409}
-There are two possible reasons for this error in the single document case:
-
-- A document with the same qualifiers in an indexed attribute conflicts with an
- already existing document and thus violates the unique constraint.
- The response body contains an error document with the `errorNum` set to
- `1210` (`ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED`) in this case.
-- Locking the document key or some unique index entry failed to due to another
- concurrent operation that operates on the same document. This is also referred
- to as a _write-write conflict_. The response body contains an error document
- with the `errorNum` set to `1200` (`ERROR_ARANGO_CONFLICT`) in this case.
-
-@RESTRETURNCODE{503}
-is returned if the system is temporarily not available. This can be a system
-overload or temporary failure. In this case it makes sense to retry the request
-later.
-
-If the error code is `1429`, then the write concern for the collection cannot be
-fulfilled. This can happen if less than the number of specified replicas for
-a shard are currently in-sync with the leader. For example, if the write concern
-is `2` and the replication factor is `3`, then the write concern is not fulfilled
-if two replicas are not in-sync.
-
-Note that the HTTP status code is configurable via the
-`--cluster.failed-write-concern-status-code` startup option. It defaults to `403`
-but can be changed to `503` to signal client applications that it is a
-temporary error.
-
-@EXAMPLES
-
-Create a document in a collection named `products`. Note that the
-revision identifier might or might not by equal to the auto-generated
-key.
-
-@EXAMPLE_ARANGOSH_RUN{RestDocumentHandlerPostCreate1}
- var cn = "products";
- db._drop(cn);
- db._create(cn, { waitForSync: true });
-
- var url = "/_api/document/" + cn;
- var body = '{ "Hello": "World" }';
-
- var response = logCurlRequest('POST', url, body);
-
- assert(response.code === 201);
-
- logJsonResponse(response);
- db._drop(cn);
-@END_EXAMPLE_ARANGOSH_RUN
-
-Create a document in a collection named `products` with a collection-level
-`waitForSync` value of `false`.
-
-@EXAMPLE_ARANGOSH_RUN{RestDocumentHandlerPostAccept1}
- var cn = "products";
- db._drop(cn);
- db._create(cn, { waitForSync: false });
-
- var url = "/_api/document/" + cn;
- var body = '{ "Hello": "World" }';
-
- var response = logCurlRequest('POST', url, body);
-
- assert(response.code === 202);
-
- logJsonResponse(response);
- db._drop(cn);
-@END_EXAMPLE_ARANGOSH_RUN
-
-Create a document in a collection with a collection-level `waitForSync`
-value of `false`, but using the `waitForSync` query parameter.
-
-@EXAMPLE_ARANGOSH_RUN{RestDocumentHandlerPostWait1}
- var cn = "products";
- db._drop(cn);
- db._create(cn, { waitForSync: false });
-
- var url = "/_api/document/" + cn + "?waitForSync=true";
- var body = '{ "Hello": "World" }';
-
- var response = logCurlRequest('POST', url, body);
-
- assert(response.code === 201);
-
- logJsonResponse(response);
- db._drop(cn);
-@END_EXAMPLE_ARANGOSH_RUN
-
-Unknown collection name
-
-@EXAMPLE_ARANGOSH_RUN{RestDocumentHandlerPostUnknownCollection1}
- var cn = "products";
-
- var url = "/_api/document/" + cn;
- var body = '{ "Hello": "World" }';
-
- var response = logCurlRequest('POST', url, body);
-
- assert(response.code === 404);
-
- logJsonResponse(response);
-@END_EXAMPLE_ARANGOSH_RUN
-
-Illegal document
-
-@EXAMPLE_ARANGOSH_RUN{RestDocumentHandlerPostBadJson1}
- var cn = "products";
- db._drop(cn);
- db._create(cn);
-
- var url = "/_api/document/" + cn;
- var body = '{ 1: "World" }';
-
- var response = logCurlRequest('POST', url, body);
-
- assert(response.code === 400);
-
- logJsonResponse(response);
- db._drop(cn);
-@END_EXAMPLE_ARANGOSH_RUN
-
-Use of returnNew:
-
-@EXAMPLE_ARANGOSH_RUN{RestDocumentHandlerPostReturnNew}
- var cn = "products";
- db._drop(cn);
- db._create(cn);
-
- var url = "/_api/document/" + cn + "?returnNew=true";
- var body = '{"Hello":"World"}';
-
- var response = logCurlRequest('POST', url, body);
-
- assert(response.code === 202);
-
- logJsonResponse(response);
- db._drop(cn);
-@END_EXAMPLE_ARANGOSH_RUN
-
-@EXAMPLE_ARANGOSH_RUN{RestDocumentHandlerPostOverwrite}
- var cn = "products";
- db._drop(cn);
- db._create(cn, { waitForSync: true });
-
- var url = "/_api/document/" + cn;
- var body = '{ "Hello": "World", "_key" : "lock" }';
- var response = logCurlRequest('POST', url, body);
- // insert
- assert(response.code === 201);
- logJsonResponse(response);
-
- body = '{ "Hello": "Universe", "_key" : "lock" }';
- url = "/_api/document/" + cn + "?overwrite=true";
- response = logCurlRequest('POST', url, body);
- // insert same key
- assert(response.code === 201);
- logJsonResponse(response);
-
- db._drop(cn);
-@END_EXAMPLE_ARANGOSH_RUN
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Documents/post_api_document_collection_multiple.md b/Documentation/DocuBlocks/Rest/Documents/post_api_document_collection_multiple.md
deleted file mode 100644
index beb75e97ada0..000000000000
--- a/Documentation/DocuBlocks/Rest/Documents/post_api_document_collection_multiple.md
+++ /dev/null
@@ -1,235 +0,0 @@
-@startDocuBlock post_api_document_collection_multiple
-@brief creates multiple documents
-
-@RESTHEADER{POST /_api/document/{collection}#multiple,Create multiple documents,createDocuments}
-
-@RESTURLPARAMETERS
-
-@RESTURLPARAM{collection,string,required}
-Name of the `collection` in which the documents are to be created.
-
-@RESTALLBODYPARAM{documents,json,required}
-An array of documents to create.
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{collection,string,optional}
-The name of the collection. This is only for backward compatibility.
-In ArangoDB versions < 3.0, the URL path was `/_api/document` and
-this query parameter was required. This combination still works, but
-the recommended way is to specify the collection in the URL path.
-
-@RESTQUERYPARAM{waitForSync,boolean,optional}
-Wait until document has been synced to disk.
-
-@RESTQUERYPARAM{returnNew,boolean,optional}
-Additionally return the complete new document under the attribute `new`
-in the result.
-
-@RESTQUERYPARAM{returnOld,boolean,optional}
-Additionally return the complete old document under the attribute `old`
-in the result. Only available if the overwrite option is used.
-
-@RESTQUERYPARAM{silent,boolean,optional}
-If set to `true`, an empty object is returned as response if all document operations
-succeed. No meta-data is returned for the created documents. If any of the
-operations raises an error, an array with the error object(s) is returned.
-
-You can use this option to save network traffic but you cannot map any errors
-to the inputs of your request.
-
-@RESTQUERYPARAM{overwrite,boolean,optional}
-If set to `true`, the insert becomes a replace-insert. If a document with the
-same `_key` already exists, the new document is not rejected with a unique
-constraint violation error but replaces the old document. Note that operations
-with `overwrite` parameter require a `_key` attribute in the request payload,
-therefore they can only be performed on collections sharded by `_key`.
-
-@RESTQUERYPARAM{overwriteMode,string,optional}
-This option supersedes `overwrite` and offers the following modes:
-- `"ignore"`: if a document with the specified `_key` value exists already,
- nothing is done and no write operation is carried out. The
- insert operation returns success in this case. This mode does not
- support returning the old document version using `RETURN OLD`. When using
- `RETURN NEW`, `null` is returned in case the document already existed.
-- `"replace"`: if a document with the specified `_key` value exists already,
- it is overwritten with the specified document value. This mode is
- also used when no overwrite mode is specified but the `overwrite`
- flag is set to `true`.
-- `"update"`: if a document with the specified `_key` value exists already,
- it is patched (partially updated) with the specified document value.
- The overwrite mode can be further controlled via the `keepNull` and
- `mergeObjects` parameters.
-- `"conflict"`: if a document with the specified `_key` value exists already,
- return a unique constraint violation error so that the insert operation
- fails. This is also the default behavior in case the overwrite mode is
- not set, and the `overwrite` flag is `false` or not set either.
-
-@RESTQUERYPARAM{keepNull,boolean,optional}
-If the intention is to delete existing attributes with the update-insert
-command, the URL query parameter `keepNull` can be used with a value of
-`false`. This modifies the behavior of the patch command to remove any
-attributes from the existing document that are contained in the patch document
-with an attribute value of `null`.
-This option controls the update-insert behavior only.
-
-@RESTQUERYPARAM{mergeObjects,boolean,optional}
-Controls whether objects (not arrays) are merged if present in both, the
-existing and the update-insert document. If set to `false`, the value in the
-patch document overwrites the existing document's value. If set to `true`,
-objects are merged. The default is `true`.
-This option controls the update-insert behavior only.
-
-@RESTQUERYPARAM{refillIndexCaches,boolean,optional}
-Whether to add new entries to in-memory index caches if document insertions
-affect the edge index or cache-enabled persistent indexes.
-
-@RESTDESCRIPTION
-Creates new documents from the documents given in the body, unless there
-is already a document with the `_key` given. If no `_key` is given, a new
-unique `_key` is generated automatically.
-
-The result body contains a JSON array of the
-same length as the input array, and each entry contains the result
-of the operation for the corresponding input. In case of an error
-the entry is a document with attributes `error` set to `true` and
-errorCode set to the error code that has happened.
-
-Possibly given `_id` and `_rev` attributes in the body are always ignored,
-the URL part or the query parameter collection respectively counts.
-
-If `silent` is not set to `true`, the body of the response contains an
-array of JSON objects with the following attributes:
-
- - `_id` contains the document identifier of the newly created document
- - `_key` contains the document key
- - `_rev` contains the document revision
-
-If the collection parameter `waitForSync` is `false`, then the call
-returns as soon as the documents have been accepted. It does not wait
-until the documents have been synced to disk.
-
-Optionally, the query parameter `waitForSync` can be used to force
-synchronization of the document creation operation to disk even in
-case that the `waitForSync` flag had been disabled for the entire
-collection. Thus, the `waitForSync` query parameter can be used to
-force synchronization of just this specific operations. To use this,
-set the `waitForSync` parameter to `true`. If the `waitForSync`
-parameter is not specified or set to `false`, then the collection's
-default `waitForSync` behavior is applied. The `waitForSync` query
-parameter cannot be used to disable synchronization for collections
-that have a default `waitForSync` value of `true`.
-
-If the query parameter `returnNew` is `true`, then, for each
-generated document, the complete new document is returned under
-the `new` attribute in the result.
-
-Should an error have occurred with some of the documents
-the additional HTTP header `X-Arango-Error-Codes` is set, which
-contains a map of the error codes that occurred together with their
-multiplicities, as in: `1205:10,1210:17` which means that in 10
-cases the error 1205 "illegal document handle" and in 17 cases the
-error 1210 "unique constraint violated" has happened.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{201}
-is returned if `waitForSync` was `true` and operations were processed.
-
-@RESTRETURNCODE{202}
-is returned if `waitForSync` was `false` and operations were processed.
-
-@RESTRETURNCODE{400}
-is returned if the body does not contain a valid JSON representation
-of an array of documents. The response body contains
-an error document in this case.
-
-@RESTRETURNCODE{403}
-with the error code `1004` is returned if the specified write concern for the
-collection cannot be fulfilled. This can happen if less than the number of
-specified replicas for a shard are currently in-sync with the leader. For example,
-if the write concern is `2` and the replication factor is `3`, then the
-write concern is not fulfilled if two replicas are not in-sync.
-
-Note that the HTTP status code is configurable via the
-`--cluster.failed-write-concern-status-code` startup option. It defaults to `403`
-but can be changed to `503` to signal client applications that it is a
-temporary error.
-
-@RESTRETURNCODE{404}
-is returned if the collection specified by `collection` is unknown.
-The response body contains an error document in this case.
-
-@RESTRETURNCODE{503}
-is returned if the system is temporarily not available. This can be a system
-overload or temporary failure. In this case it makes sense to retry the request
-later.
-
-If the error code is `1429`, then the write concern for the collection cannot be
-fulfilled. This can happen if less than the number of specified replicas for
-a shard are currently in-sync with the leader. For example, if the write concern
-is `2` and the replication factor is `3`, then the write concern is not fulfilled
-if two replicas are not in-sync.
-
-Note that the HTTP status code is configurable via the
-`--cluster.failed-write-concern-status-code` startup option. It defaults to `403`
-but can be changed to `503` to signal client applications that it is a
-temporary error.
-
-@EXAMPLES
-
-Insert multiple documents:
-
-@EXAMPLE_ARANGOSH_RUN{RestDocumentHandlerPostMulti1}
- var cn = "products";
- db._drop(cn);
- db._create(cn);
-
- var url = "/_api/document/" + cn;
- var body = '[{"Hello":"Earth"}, {"Hello":"Venus"}, {"Hello":"Mars"}]';
-
- var response = logCurlRequest('POST', url, body);
-
- assert(response.code === 202);
-
- logJsonResponse(response);
- db._drop(cn);
-@END_EXAMPLE_ARANGOSH_RUN
-
-Use of returnNew:
-
-@EXAMPLE_ARANGOSH_RUN{RestDocumentHandlerPostMulti2}
- var cn = "products";
- db._drop(cn);
- db._create(cn);
-
- var url = "/_api/document/" + cn + "?returnNew=true";
- var body = '[{"Hello":"Earth"}, {"Hello":"Venus"}, {"Hello":"Mars"}]';
-
- var response = logCurlRequest('POST', url, body);
-
- assert(response.code === 202);
-
- logJsonResponse(response);
- db._drop(cn);
-@END_EXAMPLE_ARANGOSH_RUN
-
-Partially illegal documents:
-
-@EXAMPLE_ARANGOSH_RUN{RestDocumentHandlerPostBadJsonMulti}
- var cn = "products";
- db._drop(cn);
- db._create(cn);
-
- var url = "/_api/document/" + cn;
- var body = '[{ "_key": 111 }, {"_key":"abc"}]';
-
- var response = logCurlRequest('POST', url, body);
-
- assert(response.code === 202);
-
- logJsonResponse(response);
- db._drop(cn);
-@END_EXAMPLE_ARANGOSH_RUN
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Documents/put_api_document_collection.md b/Documentation/DocuBlocks/Rest/Documents/put_api_document_collection.md
deleted file mode 100644
index fb6f20c9fa18..000000000000
--- a/Documentation/DocuBlocks/Rest/Documents/put_api_document_collection.md
+++ /dev/null
@@ -1,147 +0,0 @@
-
-@startDocuBlock put_api_document_collection
-@brief replaces multiple documents
-
-@RESTHEADER{PUT /_api/document/{collection},Replace documents,replaceDocuments}
-
-@RESTALLBODYPARAM{documents,json,required}
-A JSON representation of an array of documents.
-
-@RESTURLPARAMETERS
-
-@RESTURLPARAM{collection,string,required}
-This URL parameter is the name of the collection in which the
-documents are replaced.
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{waitForSync,boolean,optional}
-Wait until the new documents have been synced to disk.
-
-@RESTQUERYPARAM{ignoreRevs,boolean,optional}
-By default, or if this is set to `true`, the `_rev` attributes in
-the given documents are ignored. If this is set to `false`, then
-any `_rev` attribute given in a body document is taken as a
-precondition. The document is only replaced if the current revision
-is the one specified.
-
-@RESTQUERYPARAM{returnOld,boolean,optional}
-Return additionally the complete previous revision of the changed
-documents under the attribute `old` in the result.
-
-@RESTQUERYPARAM{returnNew,boolean,optional}
-Return additionally the complete new documents under the attribute `new`
-in the result.
-
-@RESTQUERYPARAM{silent,boolean,optional}
-If set to `true`, an empty object is returned as response if all document operations
-succeed. No meta-data is returned for the replaced documents. If at least one
-operation raises an error, an array with the error object(s) is returned.
-
-You can use this option to save network traffic but you cannot map any errors
-to the inputs of your request.
-
-@RESTQUERYPARAM{refillIndexCaches,boolean,optional}
-Whether to update existing entries in in-memory index caches if documents
-replacements affect the edge index or cache-enabled persistent indexes.
-
-@RESTDESCRIPTION
-Replaces multiple documents in the specified collection with the
-ones in the body, the replaced documents are specified by the `_key`
-attributes in the body documents.
-
-The value of the `_key` attribute as well as attributes
-used as sharding keys may not be changed.
-
-If `ignoreRevs` is `false` and there is a `_rev` attribute in a
-document in the body and its value does not match the revision of
-the corresponding document in the database, the precondition is
-violated.
-
-Cluster only: The replace documents _may_ contain
-values for the collection's pre-defined shard keys. Values for the shard keys
-are treated as hints to improve performance. Should the shard keys
-values be incorrect ArangoDB may answer with a `not found` error.
-
-Optionally, the query parameter `waitForSync` can be used to force
-synchronization of the document replacement operation to disk even in case
-that the `waitForSync` flag had been disabled for the entire collection.
-Thus, the `waitForSync` query parameter can be used to force synchronization
-of just specific operations. To use this, set the `waitForSync` parameter
-to `true`. If the `waitForSync` parameter is not specified or set to
-`false`, then the collection's default `waitForSync` behavior is
-applied. The `waitForSync` query parameter cannot be used to disable
-synchronization for collections that have a default `waitForSync` value
-of `true`.
-
-The body of the response contains a JSON array of the same length
-as the input array with the information about the identifier and the
-revision of the replaced documents. In each entry, the attribute
-`_id` contains the known `document-id` of each updated document,
-`_key` contains the key which uniquely identifies a document in a
-given collection, and the attribute `_rev` contains the new document
-revision. In case of an error or violated precondition, an error
-object with the attribute `error` set to `true` and the attribute
-`errorCode` set to the error code is built.
-
-If the query parameter `returnOld` is `true`, then, for each
-generated document, the complete previous revision of the document
-is returned under the `old` attribute in the result.
-
-If the query parameter `returnNew` is `true`, then, for each
-generated document, the complete new document is returned under
-the `new` attribute in the result.
-
-Note that if any precondition is violated or an error occurred with
-some of the documents, the return code is still 201 or 202, but
-the additional HTTP header `X-Arango-Error-Codes` is set, which
-contains a map of the error codes that occurred together with their
-multiplicities, as in: `1200:17,1205:10` which means that in 17
-cases the error 1200 "revision conflict" and in 10 cases the error
-1205 "illegal document handle" has happened.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{201}
-is returned if `waitForSync` was `true` and operations were processed.
-
-@RESTRETURNCODE{202}
-is returned if `waitForSync` was `false` and operations were processed.
-
-@RESTRETURNCODE{400}
-is returned if the body does not contain a valid JSON representation
-of an array of documents. The response body contains
-an error document in this case.
-
-@RESTRETURNCODE{403}
-with the error code `1004` is returned if the specified write concern for the
-collection cannot be fulfilled. This can happen if less than the number of
-specified replicas for a shard are currently in-sync with the leader. For example,
-if the write concern is `2` and the replication factor is `3`, then the
-write concern is not fulfilled if two replicas are not in-sync.
-
-Note that the HTTP status code is configurable via the
-`--cluster.failed-write-concern-status-code` startup option. It defaults to `403`
-but can be changed to `503` to signal client applications that it is a
-temporary error.
-
-@RESTRETURNCODE{404}
-is returned if the collection was not found.
-
-@RESTRETURNCODE{503}
-is returned if the system is temporarily not available. This can be a system
-overload or temporary failure. In this case it makes sense to retry the request
-later.
-
-If the error code is `1429`, then the write concern for the collection cannot be
-fulfilled. This can happen if less than the number of specified replicas for
-a shard are currently in-sync with the leader. For example, if the write concern
-is `2` and the replication factor is `3`, then the write concern is not fulfilled
-if two replicas are not in-sync.
-
-Note that the HTTP status code is configurable via the
-`--cluster.failed-write-concern-status-code` startup option. It defaults to `403`
-but can be changed to `503` to signal client applications that it is a
-temporary error.
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Documents/put_api_document_collection_key.md b/Documentation/DocuBlocks/Rest/Documents/put_api_document_collection_key.md
deleted file mode 100644
index 7f7e84ed0182..000000000000
--- a/Documentation/DocuBlocks/Rest/Documents/put_api_document_collection_key.md
+++ /dev/null
@@ -1,232 +0,0 @@
-
-@startDocuBlock put_api_document_collection_key
-@brief replaces a document
-
-@RESTHEADER{PUT /_api/document/{collection}/{key},Replace document,replaceDocument}
-
-@RESTALLBODYPARAM{document,object,required}
-A JSON representation of a single document.
-
-@RESTURLPARAMETERS
-
-@RESTURLPARAM{collection,string,required}
-Name of the `collection` in which the document is to be replaced.
-
-@RESTURLPARAM{key,string,required}
-The document key.
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{waitForSync,boolean,optional}
-Wait until document has been synced to disk.
-
-@RESTQUERYPARAM{ignoreRevs,boolean,optional}
-By default, or if this is set to `true`, the `_rev` attributes in
-the given document is ignored. If this is set to `false`, then
-the `_rev` attribute given in the body document is taken as a
-precondition. The document is only replaced if the current revision
-is the one specified.
-
-@RESTQUERYPARAM{returnOld,boolean,optional}
-Return additionally the complete previous revision of the changed
-document under the attribute `old` in the result.
-
-@RESTQUERYPARAM{returnNew,boolean,optional}
-Return additionally the complete new document under the attribute `new`
-in the result.
-
-@RESTQUERYPARAM{silent,boolean,optional}
-If set to `true`, an empty object is returned as response if the document operation
-succeeds. No meta-data is returned for the replaced document. If the
-operation raises an error, an error object is returned.
-
-You can use this option to save network traffic.
-
-@RESTQUERYPARAM{refillIndexCaches,boolean,optional}
-Whether to update existing entries in in-memory index caches if documents
-replacements affect the edge index or cache-enabled persistent indexes.
-
-@RESTHEADERPARAMETERS
-
-@RESTHEADERPARAM{If-Match,string,optional}
-You can conditionally replace a document based on a target revision id by
-using the `if-match` HTTP header.
-
-@RESTDESCRIPTION
-Replaces the specified document with the one in the body, provided there is
-such a document and no precondition is violated.
-
-The value of the `_key` attribute as well as attributes
-used as sharding keys may not be changed.
-
-If the `If-Match` header is specified and the revision of the
-document in the database is unequal to the given revision, the
-precondition is violated.
-
-If `If-Match` is not given and `ignoreRevs` is `false` and there
-is a `_rev` attribute in the body and its value does not match
-the revision of the document in the database, the precondition is
-violated.
-
-If a precondition is violated, an *HTTP 412* is returned.
-
-If the document exists and can be updated, then an *HTTP 201* or
-an *HTTP 202* is returned (depending on `waitForSync`, see below),
-the `Etag` header field contains the new revision of the document
-and the `Location` header contains a complete URL under which the
-document can be queried.
-
-Cluster only: The replace documents _may_ contain
-values for the collection's pre-defined shard keys. Values for the shard keys
-are treated as hints to improve performance. Should the shard keys
-values be incorrect ArangoDB may answer with a *not found* error.
-
-Optionally, the query parameter `waitForSync` can be used to force
-synchronization of the document replacement operation to disk even in case
-that the `waitForSync` flag had been disabled for the entire collection.
-Thus, the `waitForSync` query parameter can be used to force synchronization
-of just specific operations. To use this, set the `waitForSync` parameter
-to `true`. If the `waitForSync` parameter is not specified or set to
-`false`, then the collection's default `waitForSync` behavior is
-applied. The `waitForSync` query parameter cannot be used to disable
-synchronization for collections that have a default `waitForSync` value
-of `true`.
-
-If `silent` is not set to `true`, the body of the response contains a JSON
-object with the information about the identifier and the revision. The attribute
-`_id` contains the known *document ID* of the updated document, `_key`
-contains the key which uniquely identifies a document in a given collection,
-and the attribute `_rev` contains the new document revision.
-
-If the query parameter `returnOld` is `true`, then
-the complete previous revision of the document
-is returned under the `old` attribute in the result.
-
-If the query parameter `returnNew` is `true`, then
-the complete new document is returned under
-the `new` attribute in the result.
-
-If the document does not exist, then a *HTTP 404* is returned and the
-body of the response contains an error document.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{201}
-is returned if the document was replaced successfully and
-`waitForSync` was `true`.
-
-@RESTRETURNCODE{202}
-is returned if the document was replaced successfully and
-`waitForSync` was `false`.
-
-@RESTRETURNCODE{400}
-is returned if the body does not contain a valid JSON representation
-of a document. The response body contains
-an error document in this case.
-
-@RESTRETURNCODE{403}
-with the error code `1004` is returned if the specified write concern for the
-collection cannot be fulfilled. This can happen if less than the number of
-specified replicas for a shard are currently in-sync with the leader. For example,
-if the write concern is `2` and the replication factor is `3`, then the
-write concern is not fulfilled if two replicas are not in-sync.
-
-Note that the HTTP status code is configurable via the
-`--cluster.failed-write-concern-status-code` startup option. It defaults to `403`
-but can be changed to `503` to signal client applications that it is a
-temporary error.
-
-@RESTRETURNCODE{404}
-is returned if the collection or the document was not found.
-
-@RESTRETURNCODE{409}
-There are two possible reasons for this error:
-
-- The replace operation causes a unique constraint violation in a secondary
- index. The response body contains an error document with the `errorNum` set to
- `1210` (`ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED`) in this case.
-- Locking the document key or some unique index entry failed due to another
- concurrent operation that operates on the same document. This is also referred
- to as a _write-write conflict_. The response body contains an error document
- with the `errorNum` set to `1200` (`ERROR_ARANGO_CONFLICT`) in this case.
-
-@RESTRETURNCODE{412}
-is returned if the precondition is violated. The response also contains
-the found documents' current revisions in the `_rev` attributes.
-Additionally, the attributes `_id` and `_key` are returned.
-
-@RESTRETURNCODE{503}
-is returned if the system is temporarily not available. This can be a system
-overload or temporary failure. In this case it makes sense to retry the request
-later.
-
-If the error code is `1429`, then the write concern for the collection cannot be
-fulfilled. This can happen if less than the number of specified replicas for
-a shard are currently in-sync with the leader. For example, if the write concern
-is `2` and the replication factor is `3`, then the write concern is not fulfilled
-if two replicas are not in-sync.
-
-Note that the HTTP status code is configurable via the
-`--cluster.failed-write-concern-status-code` startup option. It defaults to `403`
-but can be changed to `503` to signal client applications that it is a
-temporary error.
-
-@EXAMPLES
-
-Using a document identifier
-
-@EXAMPLE_ARANGOSH_RUN{RestDocumentHandlerUpdateDocument}
- var cn = "products";
- db._drop(cn);
- db._create(cn);
-
- var document = db.products.save({"hello":"world"});
- var url = "/_api/document/" + document._id;
-
- var response = logCurlRequest('PUT', url, '{"Hello": "you"}');
-
- assert(response.code === 202);
-
- logJsonResponse(response);
- ~ db._drop(cn);
-@END_EXAMPLE_ARANGOSH_RUN
-
-Unknown document identifier
-
-@EXAMPLE_ARANGOSH_RUN{RestDocumentHandlerUpdateDocumentUnknownHandle}
- var cn = "products";
- db._drop(cn);
- db._create(cn);
-
- var document = db.products.save({"hello":"world"});
- db.products.remove(document._id);
- var url = "/_api/document/" + document._id;
-
- var response = logCurlRequest('PUT', url, "{}");
-
- assert(response.code === 404);
-
- logJsonResponse(response);
- ~ db._drop(cn);
-@END_EXAMPLE_ARANGOSH_RUN
-
-Produce a revision conflict
-
-@EXAMPLE_ARANGOSH_RUN{RestDocumentHandlerUpdateDocumentIfMatchOther}
- var cn = "products";
- db._drop(cn);
- db._create(cn);
-
- var document = db.products.save({"hello":"world"});
- var document2 = db.products.save({"hello2":"world"});
- var url = "/_api/document/" + document._id;
- var headers = {"If-Match": "\"" + document2._rev + "\""};
-
- var response = logCurlRequest('PUT', url, '{"other":"content"}', headers);
-
- assert(response.code === 412);
-
- logJsonResponse(response);
- ~ db._drop(cn);
-@END_EXAMPLE_ARANGOSH_RUN
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Foxx/delete_api_foxx_development.md b/Documentation/DocuBlocks/Rest/Foxx/delete_api_foxx_development.md
deleted file mode 100644
index 0ac5e4368664..000000000000
--- a/Documentation/DocuBlocks/Rest/Foxx/delete_api_foxx_development.md
+++ /dev/null
@@ -1,23 +0,0 @@
-@startDocuBlock delete_api_foxx_development
-@brief disable development mode
-
-@RESTHEADER{DELETE /_api/foxx/development, Disable development mode, disableFoxxDevelopmentMode}
-
-@RESTDESCRIPTION
-Puts the service at the given mount path into production mode.
-
-When running ArangoDB in a cluster with multiple Coordinators this will
-replace the service on all other Coordinators with the version on this
-Coordinator.
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{mount,string,required}
-Mount path of the installed service.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-Returned if the request was successful.
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Foxx/delete_api_foxx_service.md b/Documentation/DocuBlocks/Rest/Foxx/delete_api_foxx_service.md
deleted file mode 100644
index a8cb31186fc8..000000000000
--- a/Documentation/DocuBlocks/Rest/Foxx/delete_api_foxx_service.md
+++ /dev/null
@@ -1,24 +0,0 @@
-@startDocuBlock delete_api_foxx_service
-@brief uninstall service
-
-@RESTHEADER{DELETE /_api/foxx/service, Uninstall service, deleteFoxxService}
-
-@RESTDESCRIPTION
-Removes the service at the given mount path from the database and file system.
-
-Returns an empty response on success.
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{mount,string,required}
-Mount path of the installed service.
-
-@RESTQUERYPARAM{teardown,boolean,optional}
-Set to `false` to not run the service's teardown script.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{204}
-Returned if the request was successful.
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Foxx/get_api_foxx.md b/Documentation/DocuBlocks/Rest/Foxx/get_api_foxx.md
deleted file mode 100644
index 861b58fc48f2..000000000000
--- a/Documentation/DocuBlocks/Rest/Foxx/get_api_foxx.md
+++ /dev/null
@@ -1,31 +0,0 @@
-@startDocuBlock get_api_foxx
-@brief list installed services
-
-@RESTHEADER{GET /_api/foxx, List installed services, listFoxxServices}
-
-@RESTDESCRIPTION
-Fetches a list of services installed in the current database.
-
-Returns a list of objects with the following attributes:
-
-- *mount*: the mount path of the service
-- *development*: *true* if the service is running in development mode
-- *legacy*: *true* if the service is running in 2.8 legacy compatibility mode
-- *provides*: the service manifest's *provides* value or an empty object
-
-Additionally the object may contain the following attributes if they have been set on the manifest:
-
-- *name*: a string identifying the service type
-- *version*: a semver-compatible version string
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{excludeSystem,boolean,optional}
-Whether or not system services should be excluded from the result.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-Returned if the request was successful.
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Foxx/get_api_foxx_configuration.md b/Documentation/DocuBlocks/Rest/Foxx/get_api_foxx_configuration.md
deleted file mode 100644
index e8ad1baf238c..000000000000
--- a/Documentation/DocuBlocks/Rest/Foxx/get_api_foxx_configuration.md
+++ /dev/null
@@ -1,22 +0,0 @@
-@startDocuBlock get_api_foxx_configuration
-@brief get configuration options
-
-@RESTHEADER{GET /_api/foxx/configuration, Get configuration options, getFoxxConfiguration}
-
-@RESTDESCRIPTION
-Fetches the current configuration for the service at the given mount path.
-
-Returns an object mapping the configuration option names to their definitions
-including a human-friendly *title* and the *current* value (if any).
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{mount,string,required}
-Mount path of the installed service.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-Returned if the request was successful.
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Foxx/get_api_foxx_dependencies.md b/Documentation/DocuBlocks/Rest/Foxx/get_api_foxx_dependencies.md
deleted file mode 100644
index bf5f47170f68..000000000000
--- a/Documentation/DocuBlocks/Rest/Foxx/get_api_foxx_dependencies.md
+++ /dev/null
@@ -1,22 +0,0 @@
-@startDocuBlock get_api_foxx_dependencies
-@brief get dependency options
-
-@RESTHEADER{GET /_api/foxx/dependencies, Get dependency options, getFoxxDependencies}
-
-@RESTDESCRIPTION
-Fetches the current dependencies for service at the given mount path.
-
-Returns an object mapping the dependency names to their definitions
-including a human-friendly *title* and the *current* mount path (if any).
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{mount,string,required}
-Mount path of the installed service.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-Returned if the request was successful.
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Foxx/get_api_foxx_readme.md b/Documentation/DocuBlocks/Rest/Foxx/get_api_foxx_readme.md
deleted file mode 100644
index 72a70d1ed9e1..000000000000
--- a/Documentation/DocuBlocks/Rest/Foxx/get_api_foxx_readme.md
+++ /dev/null
@@ -1,22 +0,0 @@
-@startDocuBlock get_api_foxx_readme
-@brief service README
-
-@RESTHEADER{GET /_api/foxx/readme, Service README, getFoxxReadme}
-
-@RESTDESCRIPTION
-Fetches the service's README or README.md file's contents if any.
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{mount,string,required}
-Mount path of the installed service.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-Returned if the request was successful.
-
-@RESTRETURNCODE{204}
-Returned if no README file was found.
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Foxx/get_api_foxx_scripts.md b/Documentation/DocuBlocks/Rest/Foxx/get_api_foxx_scripts.md
deleted file mode 100644
index de8979d316ca..000000000000
--- a/Documentation/DocuBlocks/Rest/Foxx/get_api_foxx_scripts.md
+++ /dev/null
@@ -1,21 +0,0 @@
-@startDocuBlock get_api_foxx_scripts
-@brief list service scripts
-
-@RESTHEADER{GET /_api/foxx/scripts, List service scripts, listFoxxScripts}
-
-@RESTDESCRIPTION
-Fetches a list of the scripts defined by the service.
-
-Returns an object mapping the raw script names to human-friendly names.
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{mount,string,required}
-Mount path of the installed service.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-Returned if the request was successful.
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Foxx/get_api_foxx_service.md b/Documentation/DocuBlocks/Rest/Foxx/get_api_foxx_service.md
deleted file mode 100644
index b6cc5b9408c7..000000000000
--- a/Documentation/DocuBlocks/Rest/Foxx/get_api_foxx_service.md
+++ /dev/null
@@ -1,35 +0,0 @@
-@startDocuBlock get_api_foxx_service
-@brief service metadata
-
-@RESTHEADER{GET /_api/foxx/service, Service description, getFoxxServiceDescription}
-
-@RESTDESCRIPTION
-Fetches detailed information for the service at the given mount path.
-
-Returns an object with the following attributes:
-
-- *mount*: the mount path of the service
-- *path*: the local file system path of the service
-- *development*: *true* if the service is running in development mode
-- *legacy*: *true* if the service is running in 2.8 legacy compatibility mode
-- *manifest*: the normalized JSON manifest of the service
-
-Additionally the object may contain the following attributes if they have been set on the manifest:
-
-- *name*: a string identifying the service type
-- *version*: a semver-compatible version string
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{mount,string,required}
-Mount path of the installed service.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-Returned if the request was successful.
-
-@RESTRETURNCODE{400}
-Returned if the mount path is unknown.
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Foxx/get_api_foxx_swagger.md b/Documentation/DocuBlocks/Rest/Foxx/get_api_foxx_swagger.md
deleted file mode 100644
index 9100c73478b1..000000000000
--- a/Documentation/DocuBlocks/Rest/Foxx/get_api_foxx_swagger.md
+++ /dev/null
@@ -1,21 +0,0 @@
-@startDocuBlock get_api_foxx_swagger
-@brief swagger description
-
-@RESTHEADER{GET /_api/foxx/swagger, Swagger description, getFoxxSwaggerDescription}
-
-@RESTDESCRIPTION
-Fetches the Swagger API description for the service at the given mount path.
-
-The response body will be an OpenAPI 2.0 compatible JSON description of the service API.
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{mount,string,required}
-Mount path of the installed service.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-Returned if the request was successful.
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Foxx/patch_api_foxx_configuration.md b/Documentation/DocuBlocks/Rest/Foxx/patch_api_foxx_configuration.md
deleted file mode 100644
index a0d3a8fc5165..000000000000
--- a/Documentation/DocuBlocks/Rest/Foxx/patch_api_foxx_configuration.md
+++ /dev/null
@@ -1,23 +0,0 @@
-@startDocuBlock patch_api_foxx_configuration
-@brief update configuration options
-
-@RESTHEADER{PATCH /_api/foxx/configuration, Update configuration options, updateFoxxConfiguration}
-
-@RESTDESCRIPTION
-Replaces the given service's configuration.
-
-Returns an object mapping all configuration option names to their new values.
-
-@RESTALLBODYPARAM{options,object,required}
-A JSON object mapping configuration option names to their new values.
-Any omitted options will be ignored.
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{mount,string,required}
-Mount path of the installed service.
-
-@RESTRETURNCODE{200}
-Returned if the request was successful.
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Foxx/patch_api_foxx_dependencies.md b/Documentation/DocuBlocks/Rest/Foxx/patch_api_foxx_dependencies.md
deleted file mode 100644
index ba90ce206b45..000000000000
--- a/Documentation/DocuBlocks/Rest/Foxx/patch_api_foxx_dependencies.md
+++ /dev/null
@@ -1,23 +0,0 @@
-@startDocuBlock patch_api_foxx_dependencies
-@brief update dependencies options
-
-@RESTHEADER{PATCH /_api/foxx/dependencies, Update dependencies options, updateFoxxDependencies}
-
-@RESTDESCRIPTION
-Replaces the given service's dependencies.
-
-Returns an object mapping all dependency names to their new mount paths.
-
-@RESTALLBODYPARAM{options,object,required}
-A JSON object mapping dependency names to their new mount paths.
-Any omitted dependencies will be ignored.
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{mount,string,required}
-Mount path of the installed service.
-
-@RESTRETURNCODE{200}
-Returned if the request was successful.
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Foxx/patch_api_foxx_service.md b/Documentation/DocuBlocks/Rest/Foxx/patch_api_foxx_service.md
deleted file mode 100644
index 772fcd19d602..000000000000
--- a/Documentation/DocuBlocks/Rest/Foxx/patch_api_foxx_service.md
+++ /dev/null
@@ -1,64 +0,0 @@
-@startDocuBlock patch_api_foxx_service
-@brief upgrade a service
-
-@RESTHEADER{PATCH /_api/foxx/service, Upgrade service, upgradeFoxxService}
-
-@RESTDESCRIPTION
-Installs the given new service on top of the service currently installed at the given mount path.
-This is only recommended for switching between different versions of the same service.
-
-Unlike replacing a service, upgrading a service retains the old service's configuration
-and dependencies (if any) and should therefore only be used to migrate an existing service
-to a newer or equivalent service.
-
-The request body can be any of the following formats:
-
-- `application/zip`: a raw zip bundle containing a service
-- `application/javascript`: a standalone JavaScript file
-- `application/json`: a service definition as JSON
-- `multipart/form-data`: a service definition as a multipart form
-
-A service definition is an object or form with the following properties or fields:
-
-- *configuration*: a JSON object describing configuration values
-- *dependencies*: a JSON object describing dependency settings
-- *source*: a fully qualified URL or an absolute path on the server's file system
-
-When using multipart data, the *source* field can also alternatively be a file field
-containing either a zip bundle or a standalone JavaScript file.
-
-When using a standalone JavaScript file the given file will be executed
-to define our service's HTTP endpoints. It is the same which would be defined
-in the field `main` of the service manifest.
-
-If *source* is a URL, the URL must be reachable from the server.
-If *source* is a file system path, the path will be resolved on the server.
-In either case the path or URL is expected to resolve to a zip bundle,
-JavaScript file or (in case of a file system path) directory.
-
-Note that when using file system paths in a cluster with multiple Coordinators
-the file system path must resolve to equivalent files on every Coordinator.
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{mount,string,required}
-Mount path of the installed service.
-
-@RESTQUERYPARAM{teardown,boolean,optional}
-Set to `true` to run the old service's teardown script.
-
-@RESTQUERYPARAM{setup,boolean,optional}
-Set to `false` to not run the new service's setup script.
-
-@RESTQUERYPARAM{legacy,boolean,optional}
-Set to `true` to install the new service in 2.8 legacy compatibility mode.
-
-@RESTQUERYPARAM{force,boolean,optional}
-Set to `true` to force service install even if no service is installed under given mount.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-Returned if the request was successful.
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Foxx/post_api_foxx.md b/Documentation/DocuBlocks/Rest/Foxx/post_api_foxx.md
deleted file mode 100644
index 61fa9321be32..000000000000
--- a/Documentation/DocuBlocks/Rest/Foxx/post_api_foxx.md
+++ /dev/null
@@ -1,56 +0,0 @@
-@startDocuBlock post_api_foxx
-@brief install new service
-
-@RESTHEADER{POST /_api/foxx, Install new service, createFoxxService}
-
-@RESTDESCRIPTION
-Installs the given new service at the given mount path.
-
-The request body can be any of the following formats:
-
-- `application/zip`: a raw zip bundle containing a service
-- `application/javascript`: a standalone JavaScript file
-- `application/json`: a service definition as JSON
-- `multipart/form-data`: a service definition as a multipart form
-
-A service definition is an object or form with the following properties or fields:
-
-- *configuration*: a JSON object describing configuration values
-- *dependencies*: a JSON object describing dependency settings
-- *source*: a fully qualified URL or an absolute path on the server's file system
-
-When using multipart data, the *source* field can also alternatively be a file field
-containing either a zip bundle or a standalone JavaScript file.
-
-When using a standalone JavaScript file the given file will be executed
-to define our service's HTTP endpoints. It is the same which would be defined
-in the field `main` of the service manifest.
-
-If *source* is a URL, the URL must be reachable from the server.
-If *source* is a file system path, the path will be resolved on the server.
-In either case the path or URL is expected to resolve to a zip bundle,
-JavaScript file or (in case of a file system path) directory.
-
-Note that when using file system paths in a cluster with multiple Coordinators
-the file system path must resolve to equivalent files on every Coordinator.
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{mount,string,required}
-Mount path the service should be installed at.
-
-@RESTQUERYPARAM{development,boolean,optional}
-Set to `true` to enable development mode.
-
-@RESTQUERYPARAM{setup,boolean,optional}
-Set to `false` to not run the service's setup script.
-
-@RESTQUERYPARAM{legacy,boolean,optional}
-Set to `true` to install the service in 2.8 legacy compatibility mode.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{201}
-Returned if the request was successful.
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Foxx/post_api_foxx_commit.md b/Documentation/DocuBlocks/Rest/Foxx/post_api_foxx_commit.md
deleted file mode 100644
index 26af6a255006..000000000000
--- a/Documentation/DocuBlocks/Rest/Foxx/post_api_foxx_commit.md
+++ /dev/null
@@ -1,19 +0,0 @@
-@startDocuBlock post_api_foxx_commit
-@brief commit local service state
-
-@RESTHEADER{POST /_api/foxx/commit, Commit local service state, commitFoxxServiceState}
-
-@RESTDESCRIPTION
-Commits the local service state of the Coordinator to the database.
-
-This can be used to resolve service conflicts between Coordinators that cannot be fixed automatically due to missing data.
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{replace,boolean,optional}
-Overwrite existing service files in database even if they already exist.
-
-@RESTRETURNCODE{204}
-Returned if the request was successful.
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Foxx/post_api_foxx_development.md b/Documentation/DocuBlocks/Rest/Foxx/post_api_foxx_development.md
deleted file mode 100644
index d72a2133e267..000000000000
--- a/Documentation/DocuBlocks/Rest/Foxx/post_api_foxx_development.md
+++ /dev/null
@@ -1,28 +0,0 @@
-@startDocuBlock post_api_foxx_development
-@brief enable development mode
-
-@RESTHEADER{POST /_api/foxx/development, Enable development mode, enableFoxxDevelopmentMode}
-
-@RESTDESCRIPTION
-Puts the service into development mode.
-
-While the service is running in development mode the service will be reloaded
-from the filesystem and its setup script (if any) will be re-executed every
-time the service handles a request.
-
-When running ArangoDB in a cluster with multiple Coordinators note that changes
-to the filesystem on one Coordinator will not be reflected across the other
-Coordinators. This means you should treat your Coordinators as inconsistent
-as long as any service is running in development mode.
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{mount,string,required}
-Mount path of the installed service.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-Returned if the request was successful.
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Foxx/post_api_foxx_download.md b/Documentation/DocuBlocks/Rest/Foxx/post_api_foxx_download.md
deleted file mode 100644
index 0815290da7c6..000000000000
--- a/Documentation/DocuBlocks/Rest/Foxx/post_api_foxx_download.md
+++ /dev/null
@@ -1,27 +0,0 @@
-@startDocuBlock post_api_foxx_download
-@brief download service bundle
-
-@RESTHEADER{POST /_api/foxx/download, Download service bundle, downloadFoxxService}
-
-@RESTDESCRIPTION
-Downloads a zip bundle of the service directory.
-
-When development mode is enabled, this always creates a new bundle.
-
-Otherwise the bundle will represent the version of a service that
-is installed on that ArangoDB instance.
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{mount,string,required}
-Mount path of the installed service.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-Returned if the request was successful.
-
-@RESTRETURNCODE{400}
-Returned if the mount path is unknown.
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Foxx/post_api_foxx_scripts_script.md b/Documentation/DocuBlocks/Rest/Foxx/post_api_foxx_scripts_script.md
deleted file mode 100644
index 5af89bc9d294..000000000000
--- a/Documentation/DocuBlocks/Rest/Foxx/post_api_foxx_scripts_script.md
+++ /dev/null
@@ -1,30 +0,0 @@
-@startDocuBlock post_api_foxx_scripts_script
-@brief run service script
-
-@RESTHEADER{POST /_api/foxx/scripts/{name}, Run service script, runFoxxScript}
-
-@RESTALLBODYPARAM{data,json,optional}
-An arbitrary JSON value that will be parsed and passed to the
-script as its first argument.
-
-@RESTURLPARAMETERS
-
-@RESTURLPARAM{name,string,required}
-Name of the script to run.
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{mount,string,required}
-Mount path of the installed service.
-
-@RESTDESCRIPTION
-Runs the given script for the service at the given mount path.
-
-Returns the exports of the script, if any.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-Returned if the request was successful.
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Foxx/post_api_foxx_tests.md b/Documentation/DocuBlocks/Rest/Foxx/post_api_foxx_tests.md
deleted file mode 100644
index 730c9f1a4275..000000000000
--- a/Documentation/DocuBlocks/Rest/Foxx/post_api_foxx_tests.md
+++ /dev/null
@@ -1,50 +0,0 @@
-@startDocuBlock post_api_foxx_tests
-@brief run service tests
-
-@RESTHEADER{POST /_api/foxx/tests, Run service tests, runFoxxTests}
-
-@RESTDESCRIPTION
-Runs the tests for the service at the given mount path and returns the results.
-
-Supported test reporters are:
-
-- *default*: a simple list of test cases
-- *suite*: an object of test cases nested in suites
-- *stream*: a raw stream of test results
-- *xunit*: an XUnit/JUnit compatible structure
-- *tap*: a raw TAP compatible stream
-
-The *Accept* request header can be used to further control the response format:
-
-When using the *stream* reporter `application/x-ldjson` will result
-in the response body being formatted as a newline-delimited JSON stream.
-
-When using the *tap* reporter `text/plain` or `text/*` will result
-in the response body being formatted as a plain text TAP report.
-
-When using the *xunit* reporter `application/xml` or `text/xml` will result
-in the response body being formatted as XML instead of JSONML.
-
-Otherwise the response body will be formatted as non-prettyprinted JSON.
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{mount,string,required}
-Mount path of the installed service.
-
-@RESTQUERYPARAM{reporter,string,optional}
-Test reporter to use.
-
-@RESTQUERYPARAM{idiomatic,boolean,optional}
-Use the matching format for the reporter, regardless of the *Accept* header.
-
-@RESTQUERYPARAM{filter,string,optional}
-Only run tests where the full name (including full test suites and test case)
-matches this string.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-Returned if the request was successful.
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Foxx/put_api_foxx_configuration.md b/Documentation/DocuBlocks/Rest/Foxx/put_api_foxx_configuration.md
deleted file mode 100644
index c1b372283754..000000000000
--- a/Documentation/DocuBlocks/Rest/Foxx/put_api_foxx_configuration.md
+++ /dev/null
@@ -1,23 +0,0 @@
-@startDocuBlock put_api_foxx_configuration
-@brief replace configuration options
-
-@RESTHEADER{PUT /_api/foxx/configuration, Replace configuration options, replaceFoxxConfiguration}
-
-@RESTDESCRIPTION
-Replaces the given service's configuration completely.
-
-Returns an object mapping all configuration option names to their new values.
-
-@RESTALLBODYPARAM{options,object,required}
-A JSON object mapping configuration option names to their new values.
-Any omitted options will be reset to their default values or marked as unconfigured.
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{mount,string,required}
-Mount path of the installed service.
-
-@RESTRETURNCODE{200}
-Returned if the request was successful.
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Foxx/put_api_foxx_dependencies.md b/Documentation/DocuBlocks/Rest/Foxx/put_api_foxx_dependencies.md
deleted file mode 100644
index 52b6807acb89..000000000000
--- a/Documentation/DocuBlocks/Rest/Foxx/put_api_foxx_dependencies.md
+++ /dev/null
@@ -1,23 +0,0 @@
-@startDocuBlock put_api_foxx_dependencies
-@brief replace dependencies options
-
-@RESTHEADER{PUT /_api/foxx/dependencies, Replace dependencies options, replaceFoxxDependencies}
-
-@RESTDESCRIPTION
-Replaces the given service's dependencies completely.
-
-Returns an object mapping all dependency names to their new mount paths.
-
-@RESTALLBODYPARAM{options,object,required}
-A JSON object mapping dependency names to their new mount paths.
-Any omitted dependencies will be disabled.
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{mount,string,required}
-Mount path of the installed service.
-
-@RESTRETURNCODE{200}
-Returned if the request was successful.
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Foxx/put_api_foxx_service.md b/Documentation/DocuBlocks/Rest/Foxx/put_api_foxx_service.md
deleted file mode 100644
index 0c6088edb497..000000000000
--- a/Documentation/DocuBlocks/Rest/Foxx/put_api_foxx_service.md
+++ /dev/null
@@ -1,64 +0,0 @@
-@startDocuBlock put_api_foxx_service
-@brief replace a service
-
-@RESTHEADER{PUT /_api/foxx/service, Replace service, replaceFoxxService}
-
-@RESTDESCRIPTION
-Removes the service at the given mount path from the database and file system.
-Then installs the given new service at the same mount path.
-
-This is a slightly safer equivalent to performing an uninstall of the old service
-followed by installing the new service. The new service's main and script files
-(if any) will be checked for basic syntax errors before the old service is removed.
-
-The request body can be any of the following formats:
-
-- `application/zip`: a raw zip bundle containing a service
-- `application/javascript`: a standalone JavaScript file
-- `application/json`: a service definition as JSON
-- `multipart/form-data`: a service definition as a multipart form
-
-A service definition is an object or form with the following properties or fields:
-
-- *configuration*: a JSON object describing configuration values
-- *dependencies*: a JSON object describing dependency settings
-- *source*: a fully qualified URL or an absolute path on the server's file system
-
-When using multipart data, the *source* field can also alternatively be a file field
-containing either a zip bundle or a standalone JavaScript file.
-
-When using a standalone JavaScript file the given file will be executed
-to define our service's HTTP endpoints. It is the same which would be defined
-in the field `main` of the service manifest.
-
-If *source* is a URL, the URL must be reachable from the server.
-If *source* is a file system path, the path will be resolved on the server.
-In either case the path or URL is expected to resolve to a zip bundle,
-JavaScript file or (in case of a file system path) directory.
-
-Note that when using file system paths in a cluster with multiple Coordinators
-the file system path must resolve to equivalent files on every Coordinator.
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{mount,string,required}
-Mount path of the installed service.
-
-@RESTQUERYPARAM{teardown,boolean,optional}
-Set to `false` to not run the old service's teardown script.
-
-@RESTQUERYPARAM{setup,boolean,optional}
-Set to `false` to not run the new service's setup script.
-
-@RESTQUERYPARAM{legacy,boolean,optional}
-Set to `true` to install the new service in 2.8 legacy compatibility mode.
-
-@RESTQUERYPARAM{force,boolean,optional}
-Set to `true` to force service install even if no service is installed under given mount.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-Returned if the request was successful.
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Graphs/1_structs.md b/Documentation/DocuBlocks/Rest/Graphs/1_structs.md
deleted file mode 100644
index f7cedab15060..000000000000
--- a/Documentation/DocuBlocks/Rest/Graphs/1_structs.md
+++ /dev/null
@@ -1,81 +0,0 @@
-@RESTSTRUCT{name,graph_representation,string,required,}
-The name of the graph.
-
-@RESTSTRUCT{edgeDefinitions,graph_representation,array,required,graph_edge_definition}
-An array of definitions for the relations of the graph.
-Each has the following type:
-
-@RESTSTRUCT{orphanCollections,graph_representation,array,required,string}
-An array of additional vertex collections.
-Documents within these collections do not have edges within this graph.
-
-@RESTSTRUCT{numberOfShards,graph_representation,integer,required,}
-Number of shards created for every new collection in the graph.
-
-@RESTSTRUCT{_id,graph_representation,string,required,}
-The internal id value of this graph.
-
-@RESTSTRUCT{_rev,graph_representation,string,required,}
-The revision of this graph. Can be used to make sure to not override
-concurrent modifications to this graph.
-
-@RESTSTRUCT{replicationFactor,graph_representation,integer,required,}
-The replication factor used for every new collection in the graph.
-For SatelliteGraphs, it is the string `"satellite"` (Enterprise Edition only).
-
-@RESTSTRUCT{writeConcern,graph_representation,integer,optional,}
-The default write concern for new collections in the graph.
-It determines how many copies of each shard are required to be
-in sync on the different DB-Servers. If there are less than these many copies
-in the cluster, a shard refuses to write. Writes to shards with enough
-up-to-date copies succeed at the same time, however. The value of
-`writeConcern` cannot be greater than `replicationFactor`.
-For SatelliteGraphs, the `writeConcern` is automatically controlled to equal the
-number of DB-Servers and the attribute is not available. _(cluster only)_
-
-@RESTSTRUCT{isSmart,graph_representation,boolean,required,}
-Whether the graph is a SmartGraph (Enterprise Edition only).
-
-@RESTSTRUCT{isDisjoint,graph_representation,boolean,required,}
-Whether the graph is a Disjoint SmartGraph (Enterprise Edition only).
-
-@RESTSTRUCT{smartGraphAttribute,graph_representation,string,optional,}
-Name of the sharding attribute in the SmartGraph case (Enterprise Edition only).
-
-@RESTSTRUCT{isSatellite,graph_representation,boolean,required,}
-Flag if the graph is a SatelliteGraph (Enterprise Edition only) or not.
-
-@RESTSTRUCT{_id,vertex_representation,string,required,}
-The _id value of the stored data.
-
-@RESTSTRUCT{_key,vertex_representation,string,required,}
-The _key value of the stored data.
-
-@RESTSTRUCT{_rev,vertex_representation,string,required,}
-The _rev value of the stored data.
-
-@RESTSTRUCT{_id,edge_representation,string,required,}
-The _id value of the stored data.
-
-@RESTSTRUCT{_key,edge_representation,string,required,}
-The _key value of the stored data.
-
-@RESTSTRUCT{_rev,edge_representation,string,required,}
-The _rev value of the stored data.
-
-@RESTSTRUCT{_from,edge_representation,string,required,}
-The _from value of the stored data.
-
-@RESTSTRUCT{_to,edge_representation,string,required,}
-The _to value of the stored data.
-
-@RESTSTRUCT{collection,graph_edge_definition,string,required,}
-Name of the edge collection, where the edge are stored in.
-
-@RESTSTRUCT{from,graph_edge_definition,array,required,string}
-List of vertex collection names.
-Edges in collection can only be inserted if their _from is in any of the collections here.
-
-@RESTSTRUCT{to,graph_edge_definition,array,required,string}
-List of vertex collection names.
-Edges in collection can only be inserted if their _to is in any of the collections here.
diff --git a/Documentation/DocuBlocks/Rest/Graphs/delete_api_gharial_graph.md b/Documentation/DocuBlocks/Rest/Graphs/delete_api_gharial_graph.md
deleted file mode 100644
index 0c1a16b77691..000000000000
--- a/Documentation/DocuBlocks/Rest/Graphs/delete_api_gharial_graph.md
+++ /dev/null
@@ -1,80 +0,0 @@
-@startDocuBlock delete_api_gharial_graph
-@brief delete an existing graph
-
-@RESTHEADER{DELETE /_api/gharial/{graph}, Drop a graph, deleteGraph}
-
-@RESTDESCRIPTION
-Drops an existing graph object by name.
-Optionally all collections not used by other graphs
-can be dropped as well.
-
-@RESTURLPARAMETERS
-
-@RESTURLPARAM{graph,string,required}
-The name of the graph.
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{dropCollections,boolean,optional}
-Drop collections of this graph as well. Collections will only be
-dropped if they are not used in other graphs.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{201}
-Is returned if the graph could be dropped and waitForSync is enabled
-for the `_graphs` collection, or given in the request.
-
-@RESTRETURNCODE{202}
-Is returned if the graph could be dropped and waitForSync is disabled
-for the `_graphs` collection and not given in the request.
-
-@RESTRETURNCODE{403}
-Returned if your user has insufficient rights.
-In order to drop a graph you at least need to have the following privileges:
- 1. `Administrate` access on the Database.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is true in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{errorNum,integer,required,}
-ArangoDB error number for the error that occurred.
-
-@RESTREPLYBODY{errorMessage,string,required,}
-A message created for this error.
-
-@RESTRETURNCODE{404}
-Returned if no graph with this name could be found.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is true in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{errorNum,integer,required,}
-ArangoDB error number for the error that occurred.
-
-@RESTREPLYBODY{errorMessage,string,required,}
-A message created for this error.
-
-@EXAMPLES
-
-@EXAMPLE_ARANGOSH_RUN{HttpGharialDrop}
- var examples = require("@arangodb/graph-examples/example-graph.js");
-~ examples.dropGraph("social");
- examples.loadGraph("social");
- var url = "/_api/gharial/social?dropCollections=true";
- var response = logCurlRequest('DELETE', url);
-
- assert(response.code === 202);
-
- logJsonResponse(response);
-~ examples.dropGraph("social");
-@END_EXAMPLE_ARANGOSH_RUN
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Graphs/delete_api_gharial_graph_edge_collection_edge.md b/Documentation/DocuBlocks/Rest/Graphs/delete_api_gharial_graph_edge_collection_edge.md
deleted file mode 100644
index ac2ead698dfb..000000000000
--- a/Documentation/DocuBlocks/Rest/Graphs/delete_api_gharial_graph_edge_collection_edge.md
+++ /dev/null
@@ -1,144 +0,0 @@
-@startDocuBlock delete_api_gharial_graph_edge_collection_edge
-@brief removes an edge from graph
-
-@RESTHEADER{DELETE /_api/gharial/{graph}/edge/{collection}/{edge}, Remove an edge, deleteEdge}
-
-@RESTDESCRIPTION
-Removes an edge from the collection.
-
-@RESTURLPARAMETERS
-
-@RESTURLPARAM{graph,string,required}
-The name of the graph.
-
-@RESTURLPARAM{collection,string,required}
-The name of the edge collection the edge belongs to.
-
-@RESTURLPARAM{edge,string,required}
-The *_key* attribute of the edge.
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{waitForSync,boolean,optional}
-Define if the request should wait until synced to disk.
-
-@RESTQUERYPARAM{returnOld,boolean,optional}
-Define if a presentation of the deleted document should
-be returned within the response object.
-
-@RESTHEADERPARAMETERS
-
-@RESTHEADERPARAM{if-match,string,optional}
-If the "If-Match" header is given, then it must contain exactly one Etag. The document is updated,
-if it has the same revision as the given Etag. Otherwise a HTTP 412 is returned. As an alternative
-you can supply the Etag in an attribute rev in the URL.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-Returned if the edge could be removed.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is false in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{removed,boolean,required,}
-Is set to true if the remove was successful.
-
-@RESTREPLYBODY{old,object,optional,edge_representation}
-The complete deleted edge document.
-Includes all attributes stored before this operation.
-Will only be present if returnOld is true.
-
-@RESTRETURNCODE{202}
-Returned if the request was successful but waitForSync is false.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is false in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{removed,boolean,required,}
-Is set to true if the remove was successful.
-
-@RESTREPLYBODY{old,object,optional,edge_representation}
-The complete deleted edge document.
-Includes all attributes stored before this operation.
-Will only be present if returnOld is true.
-
-@RESTRETURNCODE{403}
-Returned if your user has insufficient rights.
-In order to delete vertices in the graph you at least need to have the following privileges:
-
-1. `Read Only` access on the Database.
-2. `Write` access on the given collection.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is true in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{errorNum,integer,required,}
-ArangoDB error number for the error that occurred.
-
-@RESTREPLYBODY{errorMessage,string,required,}
-A message created for this error.
-
-@RESTRETURNCODE{404}
-Returned in the following cases:
-* No graph with this name could be found.
-* This collection is not part of the graph.
-* The edge to remove does not exist.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is true in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{errorNum,integer,required,}
-ArangoDB error number for the error that occurred.
-
-@RESTREPLYBODY{errorMessage,string,required,}
-A message created for this error.
-
-@RESTRETURNCODE{412}
-Returned if if-match header is given, but the stored documents revision is different.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is true in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{errorNum,integer,required,}
-ArangoDB error number for the error that occurred.
-
-@RESTREPLYBODY{errorMessage,string,required,}
-A message created for this error.
-
-@EXAMPLES
-
-@EXAMPLE_ARANGOSH_RUN{HttpGharialDeleteEdge}
- var examples = require("@arangodb/graph-examples/example-graph.js");
-~ examples.dropGraph("social");
- examples.loadGraph("social");
- var any = require("@arangodb").db.relation.any();
- var url = "/_api/gharial/social/edge/relation/" + any._key;
- var response = logCurlRequest('DELETE', url);
-
- assert(response.code === 202);
-
- logJsonResponse(response);
- examples.dropGraph("social");
-@END_EXAMPLE_ARANGOSH_RUN
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Graphs/delete_api_gharial_graph_edge_definition.md b/Documentation/DocuBlocks/Rest/Graphs/delete_api_gharial_graph_edge_definition.md
deleted file mode 100644
index d76a344d342f..000000000000
--- a/Documentation/DocuBlocks/Rest/Graphs/delete_api_gharial_graph_edge_definition.md
+++ /dev/null
@@ -1,108 +0,0 @@
-@startDocuBlock delete_api_gharial_graph_edge_definition
-@brief Remove an edge definition form the graph
-
-@RESTHEADER{DELETE /_api/gharial/{graph}/edge/{collection}, Remove an edge definition from the graph, deleteEdgeDefinition}
-
-@RESTDESCRIPTION
-Remove one edge definition from the graph. This will only remove the
-edge collection, the vertex collections remain untouched and can still
-be used in your queries.
-
-@RESTURLPARAMETERS
-
-@RESTURLPARAM{graph,string,required}
-The name of the graph.
-
-@RESTURLPARAM{collection,string,required}
-The name of the edge collection used in the edge definition.
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{waitForSync,boolean,optional}
-Define if the request should wait until synced to disk.
-
-@RESTQUERYPARAM{dropCollections,boolean,optional}
-Drop the collection as well.
-Collection will only be dropped if it is not used in other graphs.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{201}
-Returned if the edge definition could be removed from the graph
-and waitForSync is true.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is false in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{graph,object,required,graph_representation}
-The information about the modified graph.
-
-@RESTRETURNCODE{202}
-Returned if the edge definition could be removed from the graph and
-waitForSync is false.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is false in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{graph,object,required,graph_representation}
-The information about the modified graph.
-
-@RESTRETURNCODE{403}
-Returned if your user has insufficient rights.
-In order to drop a vertex you at least need to have the following privileges:
- 1. `Administrate` access on the Database.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is true in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{errorNum,integer,required,}
-ArangoDB error number for the error that occurred.
-
-@RESTREPLYBODY{errorMessage,string,required,}
-A message created for this error.
-
-@RESTRETURNCODE{404}
-Returned if no graph with this name could be found,
-or if no edge definition with this name is found in the graph.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is true in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{errorNum,integer,required,}
-ArangoDB error number for the error that occurred.
-
-@RESTREPLYBODY{errorMessage,string,required,}
-A message created for this error.
-
-@EXAMPLES
-
-@EXAMPLE_ARANGOSH_RUN{HttpGharialEdgeDefinitionRemove}
- var examples = require("@arangodb/graph-examples/example-graph.js");
-~ examples.dropGraph("social");
- examples.loadGraph("social");
- var url = "/_api/gharial/social/edge/relation";
- var response = logCurlRequest('DELETE', url);
-
- assert(response.code === 202);
-
- logJsonResponse(response);
- db._drop("relation");
- examples.dropGraph("social");
-@END_EXAMPLE_ARANGOSH_RUN
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Graphs/delete_api_gharial_graph_vertex_collection.md b/Documentation/DocuBlocks/Rest/Graphs/delete_api_gharial_graph_vertex_collection.md
deleted file mode 100644
index 45e60d16cf43..000000000000
--- a/Documentation/DocuBlocks/Rest/Graphs/delete_api_gharial_graph_vertex_collection.md
+++ /dev/null
@@ -1,144 +0,0 @@
-@startDocuBlock delete_api_gharial_graph_vertex_collection
-@brief Remove a vertex collection form the graph.
-
-@RESTHEADER{DELETE /_api/gharial/{graph}/vertex/{collection}, Remove vertex collection, deleteVertexCollection}
-
-@RESTDESCRIPTION
-Removes a vertex collection from the graph and optionally deletes the collection,
-if it is not used in any other graph.
-It can only remove vertex collections that are no longer part of edge definitions,
-if they are used in edge definitions you are required to modify those first.
-
-@RESTURLPARAMETERS
-
-@RESTURLPARAM{graph,string,required}
-The name of the graph.
-
-@RESTURLPARAM{collection,string,required}
-The name of the vertex collection.
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{dropCollection,boolean,optional}
-Drop the collection as well.
-Collection will only be dropped if it is not used in other graphs.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-Returned if the vertex collection was removed from the graph successfully
-and waitForSync is true.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is false in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{graph,object,required,graph_representation}
-The information about the newly created graph
-
-@RESTRETURNCODE{202}
-Returned if the request was successful but waitForSync is false.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is false in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{graph,object,required,graph_representation}
-The information about the newly created graph
-
-@RESTRETURNCODE{400}
-Returned if the vertex collection is still used in an edge definition.
-In this case it cannot be removed from the graph yet, it has to be
-removed from the edge definition first.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is true in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{errorNum,integer,required,}
-ArangoDB error number for the error that occurred.
-
-@RESTREPLYBODY{errorMessage,string,required,}
-A message created for this error.
-
-@RESTRETURNCODE{403}
-Returned if your user has insufficient rights.
-In order to drop a vertex you at least need to have the following privileges:
- 1. `Administrate` access on the Database.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is true in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{errorNum,integer,required,}
-ArangoDB error number for the error that occurred.
-
-@RESTREPLYBODY{errorMessage,string,required,}
-A message created for this error.
-
-@RESTRETURNCODE{404}
-Returned if no graph with this name could be found.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is true in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{errorNum,integer,required,}
-ArangoDB error number for the error that occurred.
-
-@RESTREPLYBODY{errorMessage,string,required,}
-A message created for this error.
-
-@EXAMPLES
-
-You can remove vertex collections that are not used in any edge collection:
-
-@EXAMPLE_ARANGOSH_RUN{HttpGharialRemoveVertexCollection}
- var examples = require("@arangodb/graph-examples/example-graph.js");
-~ examples.dropGraph("social");
- var g = examples.loadGraph("social");
- g._addVertexCollection("otherVertices");
- var url = "/_api/gharial/social/vertex/otherVertices";
- var response = logCurlRequest('DELETE', url);
-
- assert(response.code === 202);
-
- logJsonResponse(response);
-~ examples.dropGraph("social");
-~ db._drop("otherVertices");
-@END_EXAMPLE_ARANGOSH_RUN
-
-You cannot remove vertex collections that are used in edge collections:
-
-@EXAMPLE_ARANGOSH_RUN{HttpGharialRemoveVertexCollectionFailed}
- var examples = require("@arangodb/graph-examples/example-graph.js");
-~ examples.dropGraph("social");
- var g = examples.loadGraph("social");
- var url = "/_api/gharial/social/vertex/male";
- var response = logCurlRequest('DELETE', url);
-
- assert(response.code === 400);
-
- logJsonResponse(response);
- db._drop("male");
- db._drop("female");
- db._drop("relation");
-~ examples.dropGraph("social");
-@END_EXAMPLE_ARANGOSH_RUN
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Graphs/delete_api_gharial_graph_vertex_collection_vertex.md b/Documentation/DocuBlocks/Rest/Graphs/delete_api_gharial_graph_vertex_collection_vertex.md
deleted file mode 100644
index 79925b1eea35..000000000000
--- a/Documentation/DocuBlocks/Rest/Graphs/delete_api_gharial_graph_vertex_collection_vertex.md
+++ /dev/null
@@ -1,143 +0,0 @@
-@startDocuBlock delete_api_gharial_graph_vertex_collection_vertex
-@brief removes a vertex from a graph
-
-@RESTHEADER{DELETE /_api/gharial/{graph}/vertex/{collection}/{vertex}, Remove a vertex, deleteVertex}
-
-@RESTDESCRIPTION
-Removes a vertex from the collection.
-
-@RESTURLPARAMETERS
-
-@RESTURLPARAM{graph,string,required}
-The name of the graph.
-
-@RESTURLPARAM{collection,string,required}
-The name of the vertex collection the vertex belongs to.
-
-@RESTURLPARAM{vertex,string,required}
-The *_key* attribute of the vertex.
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{waitForSync,boolean,optional}
-Define if the request should wait until synced to disk.
-
-@RESTQUERYPARAM{returnOld,boolean,optional}
-Define if a presentation of the deleted document should
-be returned within the response object.
-
-@RESTHEADERPARAMETERS
-
-@RESTHEADERPARAM{if-match,string,optional}
-If the "If-Match" header is given, then it must contain exactly one Etag. The document is updated,
-if it has the same revision as the given Etag. Otherwise a HTTP 412 is returned. As an alternative
-you can supply the Etag in an attribute rev in the URL.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-Returned if the vertex could be removed.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is false in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{removed,boolean,required,}
-Is set to true if the remove was successful.
-
-@RESTREPLYBODY{old,object,optional,vertex_representation}
-The complete deleted vertex document.
-Includes all attributes stored before this operation.
-Will only be present if returnOld is true.
-
-@RESTRETURNCODE{202}
-Returned if the request was successful but waitForSync is false.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is false in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{removed,boolean,required,}
-Is set to true if the remove was successful.
-
-@RESTREPLYBODY{old,object,optional,vertex_representation}
-The complete deleted vertex document.
-Includes all attributes stored before this operation.
-Will only be present if returnOld is true.
-
-@RESTRETURNCODE{403}
-Returned if your user has insufficient rights.
-In order to delete vertices in the graph you at least need to have the following privileges:
-
-1. `Read Only` access on the Database.
-2. `Write` access on the given collection.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is true in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{errorNum,integer,required,}
-ArangoDB error number for the error that occurred.
-
-@RESTREPLYBODY{errorMessage,string,required,}
-A message created for this error.
-
-@RESTRETURNCODE{404}
-Returned in the following cases:
-* No graph with this name could be found.
-* This collection is not part of the graph.
-* The vertex to remove does not exist.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is true in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{errorNum,integer,required,}
-ArangoDB error number for the error that occurred.
-
-@RESTREPLYBODY{errorMessage,string,required,}
-A message created for this error.
-
-@RESTRETURNCODE{412}
-Returned if if-match header is given, but the stored documents revision is different.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is true in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{errorNum,integer,required,}
-ArangoDB error number for the error that occurred.
-
-@RESTREPLYBODY{errorMessage,string,required,}
-A message created for this error.
-
-@EXAMPLES
-
-@EXAMPLE_ARANGOSH_RUN{HttpGharialDeleteVertex}
- var examples = require("@arangodb/graph-examples/example-graph.js");
-~ examples.dropGraph("social");
- examples.loadGraph("social");
- var url = "/_api/gharial/social/vertex/female/alice";
- var response = logCurlRequest('DELETE', url);
-
- assert(response.code === 202);
-
- logJsonResponse(response);
- examples.dropGraph("social");
-@END_EXAMPLE_ARANGOSH_RUN
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Graphs/get_api_edges_collection.md b/Documentation/DocuBlocks/Rest/Graphs/get_api_edges_collection.md
deleted file mode 100644
index 2aada2489c5e..000000000000
--- a/Documentation/DocuBlocks/Rest/Graphs/get_api_edges_collection.md
+++ /dev/null
@@ -1,120 +0,0 @@
-@startDocuBlock get_api_edges_collection
-@brief get edges
-
-@RESTHEADER{GET /_api/edges/{collection-id}, Read in- or outbound edges, getVertexEdges}
-
-@RESTURLPARAMETERS
-
-@RESTURLPARAM{collection-id,string,required}
-The id of the collection.
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{vertex,string,required}
-The id of the start vertex.
-
-@RESTQUERYPARAM{direction,string,optional}
-Selects *in* or *out* direction for edges. If not set, any edges are
-returned.
-
-@RESTHEADERPARAMETERS
-
-@RESTHEADERPARAM{x-arango-allow-dirty-read,boolean,optional}
-Set this header to `true` to allow the Coordinator to ask any shard replica for
-the data, not only the shard leader. This may result in "dirty reads".
-
-@RESTDESCRIPTION
-Returns an array of edges starting or ending in the vertex identified by
-*vertex*.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-is returned if the edge collection was found and edges were retrieved.
-
-@RESTRETURNCODE{400}
-is returned if the request contains invalid parameters.
-
-@RESTRETURNCODE{404}
-is returned if the edge collection was not found.
-
-@EXAMPLES
-
-Any direction
-
-@EXAMPLE_ARANGOSH_RUN{RestEdgesReadEdgesAny}
- var db = require("@arangodb").db;
- db._create("vertices");
- db._createEdgeCollection("edges");
-
- db.vertices.save({_key: "1"});
- db.vertices.save({_key: "2"});
- db.vertices.save({_key: "3"});
- db.vertices.save({_key: "4"});
-
- db.edges.save({_from: "vertices/1", _to: "vertices/3", _key: "5", "$label": "v1 -> v3"});
- db.edges.save({_from: "vertices/2", _to: "vertices/1", _key: "6", "$label": "v2 -> v1"});
- db.edges.save({_from: "vertices/4", _to: "vertices/1", _key: "7", "$label": "v4 -> v1"});
-
- var url = "/_api/edges/edges?vertex=vertices/1";
- var response = logCurlRequest('GET', url);
-
- assert(response.code === 200);
-
- logJsonResponse(response);
- db._drop("edges");
- db._drop("vertices");
-@END_EXAMPLE_ARANGOSH_RUN
-
-In edges
-
-@EXAMPLE_ARANGOSH_RUN{RestEdgesReadEdgesIn}
- var db = require("@arangodb").db;
- db._create("vertices");
- db._createEdgeCollection("edges");
-
- db.vertices.save({_key: "1"});
- db.vertices.save({_key: "2"});
- db.vertices.save({_key: "3"});
- db.vertices.save({_key: "4"});
-
- db.edges.save({_from: "vertices/1", _to: "vertices/3", _key: "5", "$label": "v1 -> v3"});
- db.edges.save({_from: "vertices/2", _to: "vertices/1", _key: "6", "$label": "v2 -> v1"});
- db.edges.save({_from: "vertices/4", _to: "vertices/1", _key: "7", "$label": "v4 -> v1"});
-
- var url = "/_api/edges/edges?vertex=vertices/1&direction=in";
- var response = logCurlRequest('GET', url);
-
- assert(response.code === 200);
-
- logJsonResponse(response);
- db._drop("edges");
- db._drop("vertices");
-@END_EXAMPLE_ARANGOSH_RUN
-
-Out edges
-
-@EXAMPLE_ARANGOSH_RUN{RestEdgesReadEdgesOut}
- var db = require("@arangodb").db;
- db._create("vertices");
- db._createEdgeCollection("edges");
-
- db.vertices.save({_key: "1"});
- db.vertices.save({_key: "2"});
- db.vertices.save({_key: "3"});
- db.vertices.save({_key: "4"});
-
- db.edges.save({_from: "vertices/1", _to: "vertices/3", _key: "5", "$label": "v1 -> v3"});
- db.edges.save({_from: "vertices/2", _to: "vertices/1", _key: "6", "$label": "v2 -> v1"});
- db.edges.save({_from: "vertices/4", _to: "vertices/1", _key: "7", "$label": "v4 -> v1"});
-
- var url = "/_api/edges/edges?vertex=vertices/1&direction=out";
- var response = logCurlRequest('GET', url);
-
- assert(response.code === 200);
-
- logJsonResponse(response);
- db._drop("edges");
- db._drop("vertices");
-@END_EXAMPLE_ARANGOSH_RUN
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Graphs/get_api_gharial.md b/Documentation/DocuBlocks/Rest/Graphs/get_api_gharial.md
deleted file mode 100644
index c3d8c7614208..000000000000
--- a/Documentation/DocuBlocks/Rest/Graphs/get_api_gharial.md
+++ /dev/null
@@ -1,44 +0,0 @@
-
-@startDocuBlock get_api_gharial
-@brief Lists all graphs known to the graph module.
-
-@RESTHEADER{GET /_api/gharial, List all graphs, listGraphs}
-
-@RESTDESCRIPTION
-Lists all graphs stored in this database.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-Is returned if the module is available and the graphs could be listed.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is false in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{graphs,array,required,graph_list}
-A list of all named graphs.
-
-@RESTSTRUCT{graph,graph_list,object,optional,graph_representation}
-The properties of the named graph.
-
-@EXAMPLES
-
-@EXAMPLE_ARANGOSH_RUN{HttpGharialList}
- var examples = require("@arangodb/graph-examples/example-graph.js");
-~ examples.dropGraph("social");
- examples.loadGraph("social");
- examples.loadGraph("routeplanner");
- var url = "/_api/gharial";
- var response = logCurlRequest('GET', url);
-
- assert(response.code === 200);
-
- logJsonResponse(response);
-~ examples.dropGraph("social");
-~ examples.dropGraph("routeplanner");
-@END_EXAMPLE_ARANGOSH_RUN
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Graphs/get_api_gharial_graph.md b/Documentation/DocuBlocks/Rest/Graphs/get_api_gharial_graph.md
deleted file mode 100644
index 56628bc81ffe..000000000000
--- a/Documentation/DocuBlocks/Rest/Graphs/get_api_gharial_graph.md
+++ /dev/null
@@ -1,71 +0,0 @@
-
-@startDocuBlock get_api_gharial_graph
-@brief Get a graph from the graph module.
-
-@RESTHEADER{GET /_api/gharial/{graph}, Get a graph, getGraph}
-
-@RESTDESCRIPTION
-Selects information for a given graph.
-Will return the edge definitions as well as the orphan collections.
-Or returns a 404 if the graph does not exist.
-
-@RESTURLPARAMETERS
-
-@RESTURLPARAM{graph,string,required}
-The name of the graph.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-Returns the graph if it could be found.
-The result will have the following format:
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is false in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{graph,object,required,graph_representation}
-The information about the newly created graph
-
-@RESTRETURNCODE{404}
-Returned if no graph with this name could be found.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is true in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{errorNum,integer,required,}
-ArangoDB error number for the error that occurred.
-
-@RESTREPLYBODY{errorMessage,string,required,}
-A message created for this error.
-
-@EXAMPLES
-
-@EXAMPLE_ARANGOSH_RUN{HttpGharialGetGraph}
- var graph = require("@arangodb/general-graph");
-| if (graph._exists("myGraph")) {
-| graph._drop("myGraph", true);
- }
- graph._create("myGraph", [{
- collection: "edges",
- from: [ "startVertices" ],
- to: [ "endVertices" ]
- }]);
- var url = "/_api/gharial/myGraph";
-
- var response = logCurlRequest('GET', url);
-
- assert(response.code === 200);
-
- logJsonResponse(response);
-
- graph._drop("myGraph", true);
-@END_EXAMPLE_ARANGOSH_RUN
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Graphs/get_api_gharial_graph_edge.md b/Documentation/DocuBlocks/Rest/Graphs/get_api_gharial_graph_edge.md
deleted file mode 100644
index df4c38607dea..000000000000
--- a/Documentation/DocuBlocks/Rest/Graphs/get_api_gharial_graph_edge.md
+++ /dev/null
@@ -1,61 +0,0 @@
-
-@startDocuBlock get_api_gharial_graph_edge
-@brief Lists all edge definitions
-
-@RESTHEADER{GET /_api/gharial/{graph}/edge, List edge definitions, listEdgeDefinitions}
-
-@RESTDESCRIPTION
-Lists all edge collections within this graph.
-
-@RESTURLPARAMETERS
-
-@RESTURLPARAM{graph,string,required}
-The name of the graph.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-Is returned if the edge definitions could be listed.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is false in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{collections,array,required,string}
-The list of all vertex collections within this graph.
-Includes collections in edgeDefinitions as well as orphans.
-
-@RESTRETURNCODE{404}
-Returned if no graph with this name could be found.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is true in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{errorNum,integer,required,}
-ArangoDB error number for the error that occurred.
-
-@RESTREPLYBODY{errorMessage,string,required,}
-A message created for this error.
-
-@EXAMPLES
-
-@EXAMPLE_ARANGOSH_RUN{HttpGharialListEdge}
- var examples = require("@arangodb/graph-examples/example-graph.js");
-~ examples.dropGraph("social");
- examples.loadGraph("social");
- var url = "/_api/gharial/social/edge";
- var response = logCurlRequest('GET', url);
-
- assert(response.code === 200);
-
- logJsonResponse(response);
-~ examples.dropGraph("social");
-@END_EXAMPLE_ARANGOSH_RUN
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Graphs/get_api_gharial_graph_edge_collection_edge.md b/Documentation/DocuBlocks/Rest/Graphs/get_api_gharial_graph_edge_collection_edge.md
deleted file mode 100644
index c7b02fac09eb..000000000000
--- a/Documentation/DocuBlocks/Rest/Graphs/get_api_gharial_graph_edge_collection_edge.md
+++ /dev/null
@@ -1,143 +0,0 @@
-@startDocuBlock get_api_gharial_graph_edge_collection_edge
-@brief fetch an edge
-
-@RESTHEADER{GET /_api/gharial/{graph}/edge/{collection}/{edge}, Get an edge, getEdge}
-
-@RESTDESCRIPTION
-Gets an edge from the given collection.
-
-@RESTURLPARAMETERS
-
-@RESTURLPARAM{graph,string,required}
-The name of the graph.
-
-@RESTURLPARAM{collection,string,required}
-The name of the edge collection the edge belongs to.
-
-@RESTURLPARAM{edge,string,required}
-The *_key* attribute of the edge.
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{rev,string,optional}
-Must contain a revision.
-If this is set a document is only returned if
-it has exactly this revision.
-Also see if-match header as an alternative to this.
-
-@RESTHEADERPARAMETERS
-
-@RESTHEADERPARAM{if-match,string,optional}
-If the "If-Match" header is given, then it must contain exactly one Etag. The document is returned,
-if it has the same revision as the given Etag. Otherwise a HTTP 412 is returned. As an alternative
-you can supply the Etag in an attribute rev in the URL.
-
-@RESTHEADERPARAM{if-none-match,string,optional}
-If the "If-None-Match" header is given, then it must contain exactly one Etag. The document is returned,
-only if it has a different revision as the given Etag. Otherwise a HTTP 304 is returned.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-Returned if the edge could be found.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is false in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{edge,object,required,edge_representation}
-The complete edge.
-
-@RESTRETURNCODE{304}
-Returned if the if-none-match header is given and the
-currently stored edge still has this revision value.
-So there was no update between the last time the edge
-was fetched by the caller.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is true in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{errorNum,integer,required,}
-ArangoDB error number for the error that occurred.
-
-@RESTREPLYBODY{errorMessage,string,required,}
-A message created for this error.
-
-@RESTRETURNCODE{403}
-Returned if your user has insufficient rights.
-In order to update vertices in the graph you at least need to have the following privileges:
-
-1. `Read Only` access on the Database.
-2. `Read Only` access on the given collection.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is true in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{errorNum,integer,required,}
-ArangoDB error number for the error that occurred.
-
-@RESTREPLYBODY{errorMessage,string,required,}
-A message created for this error.
-
-@RESTRETURNCODE{404}
-Returned in the following cases:
-* No graph with this name could be found.
-* This collection is not part of the graph.
-* The edge does not exist.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is true in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{errorNum,integer,required,}
-ArangoDB error number for the error that occurred.
-
-@RESTREPLYBODY{errorMessage,string,required,}
-A message created for this error.
-
-@RESTRETURNCODE{412}
-Returned if if-match header is given, but the stored documents revision is different.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is true in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{errorNum,integer,required,}
-ArangoDB error number for the error that occurred.
-
-@RESTREPLYBODY{errorMessage,string,required,}
-A message created for this error.
-
-@EXAMPLES
-
-@EXAMPLE_ARANGOSH_RUN{HttpGharialGetEdge}
- var examples = require("@arangodb/graph-examples/example-graph.js");
-~ examples.dropGraph("social");
- examples.loadGraph("social");
- var any = require("@arangodb").db.relation.any();
- var url = "/_api/gharial/social/edge/relation/" + any._key;
- var response = logCurlRequest('GET', url);
-
- assert(response.code === 200);
-
- logJsonResponse(response);
- examples.dropGraph("social");
-@END_EXAMPLE_ARANGOSH_RUN
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Graphs/get_api_gharial_graph_vertex.md b/Documentation/DocuBlocks/Rest/Graphs/get_api_gharial_graph_vertex.md
deleted file mode 100644
index d4d46c42f1dd..000000000000
--- a/Documentation/DocuBlocks/Rest/Graphs/get_api_gharial_graph_vertex.md
+++ /dev/null
@@ -1,60 +0,0 @@
-@startDocuBlock get_api_gharial_graph_vertex
-@brief Lists all vertex collections used in this graph.
-
-@RESTHEADER{GET /_api/gharial/{graph}/vertex, List vertex collections, listVertexCollections}
-
-@RESTDESCRIPTION
-Lists all vertex collections within this graph.
-
-@RESTURLPARAMETERS
-
-@RESTURLPARAM{graph,string,required}
-The name of the graph.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-Is returned if the collections could be listed.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is false in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{collections,array,required,string}
-The list of all vertex collections within this graph.
-Includes collections in edgeDefinitions as well as orphans.
-
-@RESTRETURNCODE{404}
-Returned if no graph with this name could be found.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is true in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{errorNum,integer,required,}
-ArangoDB error number for the error that occurred.
-
-@RESTREPLYBODY{errorMessage,string,required,}
-A message created for this error.
-
-@EXAMPLES
-
-@EXAMPLE_ARANGOSH_RUN{HttpGharialListVertex}
- var examples = require("@arangodb/graph-examples/example-graph.js");
-~ examples.dropGraph("social");
- examples.loadGraph("social");
- var url = "/_api/gharial/social/vertex";
- var response = logCurlRequest('GET', url);
-
- assert(response.code === 200);
-
- logJsonResponse(response);
-~ examples.dropGraph("social");
-@END_EXAMPLE_ARANGOSH_RUN
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Graphs/get_api_gharial_graph_vertex_collection_vertex.md b/Documentation/DocuBlocks/Rest/Graphs/get_api_gharial_graph_vertex_collection_vertex.md
deleted file mode 100644
index 325e3baf950c..000000000000
--- a/Documentation/DocuBlocks/Rest/Graphs/get_api_gharial_graph_vertex_collection_vertex.md
+++ /dev/null
@@ -1,142 +0,0 @@
-@startDocuBlock get_api_gharial_graph_vertex_collection_vertex
-@brief fetches an existing vertex
-
-@RESTHEADER{GET /_api/gharial/{graph}/vertex/{collection}/{vertex}, Get a vertex, getVertex}
-
-@RESTDESCRIPTION
-Gets a vertex from the given collection.
-
-@RESTURLPARAMETERS
-
-@RESTURLPARAM{graph,string,required}
-The name of the graph.
-
-@RESTURLPARAM{collection,string,required}
-The name of the vertex collection the vertex belongs to.
-
-@RESTURLPARAM{vertex,string,required}
-The *_key* attribute of the vertex.
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{rev,string,optional}
-Must contain a revision.
-If this is set a document is only returned if
-it has exactly this revision.
-Also see if-match header as an alternative to this.
-
-@RESTHEADERPARAMETERS
-
-@RESTHEADERPARAM{if-match,string,optional}
-If the "If-Match" header is given, then it must contain exactly one Etag. The document is returned,
-if it has the same revision as the given Etag. Otherwise a HTTP 412 is returned. As an alternative
-you can supply the Etag in an query parameter *rev*.
-
-@RESTHEADERPARAM{if-none-match,string,optional}
-If the "If-None-Match" header is given, then it must contain exactly one Etag. The document is returned,
-only if it has a different revision as the given Etag. Otherwise a HTTP 304 is returned.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-Returned if the vertex could be found.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is false in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{vertex,object,required,vertex_representation}
-The complete vertex.
-
-@RESTRETURNCODE{304}
-Returned if the if-none-match header is given and the
-currently stored vertex still has this revision value.
-So there was no update between the last time the vertex
-was fetched by the caller.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is true in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{errorNum,integer,required,}
-ArangoDB error number for the error that occurred.
-
-@RESTREPLYBODY{errorMessage,string,required,}
-A message created for this error.
-
-@RESTRETURNCODE{403}
-Returned if your user has insufficient rights.
-In order to update vertices in the graph you at least need to have the following privileges:
-
-1. `Read Only` access on the Database.
-2. `Read Only` access on the given collection.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is true in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{errorNum,integer,required,}
-ArangoDB error number for the error that occurred.
-
-@RESTREPLYBODY{errorMessage,string,required,}
-A message created for this error.
-
-@RESTRETURNCODE{404}
-Returned in the following cases:
-* No graph with this name could be found.
-* This collection is not part of the graph.
-* The vertex does not exist.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is true in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{errorNum,integer,required,}
-ArangoDB error number for the error that occurred.
-
-@RESTREPLYBODY{errorMessage,string,required,}
-A message created for this error.
-
-@RESTRETURNCODE{412}
-Returned if if-match header is given, but the stored documents revision is different.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is true in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{errorNum,integer,required,}
-ArangoDB error number for the error that occurred.
-
-@RESTREPLYBODY{errorMessage,string,required,}
-A message created for this error.
-
-@EXAMPLES
-
-@EXAMPLE_ARANGOSH_RUN{HttpGharialGetVertex}
- var examples = require("@arangodb/graph-examples/example-graph.js");
-~ examples.dropGraph("social");
- examples.loadGraph("social");
- var url = "/_api/gharial/social/vertex/female/alice";
- var response = logCurlRequest('GET', url);
-
- assert(response.code === 200);
-
- logJsonResponse(response);
- examples.dropGraph("social");
-@END_EXAMPLE_ARANGOSH_RUN
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Graphs/patch_api_gharial_graph_edge_collection_edge.md b/Documentation/DocuBlocks/Rest/Graphs/patch_api_gharial_graph_edge_collection_edge.md
deleted file mode 100644
index cb71b5bb8834..000000000000
--- a/Documentation/DocuBlocks/Rest/Graphs/patch_api_gharial_graph_edge_collection_edge.md
+++ /dev/null
@@ -1,172 +0,0 @@
-@startDocuBlock patch_api_gharial_graph_edge_collection_edge
-@brief modify an existing edge
-
-@RESTHEADER{PATCH /_api/gharial/{graph}/edge/{collection}/{edge}, Modify an edge, updateEdge}
-
-@RESTDESCRIPTION
-Updates the data of the specific edge in the collection.
-
-@RESTURLPARAMETERS
-
-@RESTURLPARAM{graph,string,required}
-The name of the graph.
-
-@RESTURLPARAM{collection,string,required}
-The name of the edge collection the edge belongs to.
-
-@RESTURLPARAM{edge,string,required}
-The *_key* attribute of the vertex.
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{waitForSync,boolean,optional}
-Define if the request should wait until synced to disk.
-
-@RESTQUERYPARAM{keepNull,boolean,optional}
-Define if values set to null should be stored.
-By default (true) the given documents attribute(s) will be set to null.
-If this parameter is false the attribute(s) will instead be deleted from the
-document.
-
-@RESTQUERYPARAM{returnOld,boolean,optional}
-Define if a presentation of the deleted document should
-be returned within the response object.
-
-@RESTQUERYPARAM{returnNew,boolean,optional}
-Define if a presentation of the new document should
-be returned within the response object.
-
-@RESTHEADERPARAMETERS
-
-@RESTHEADERPARAM{if-match,string,optional}
-If the "If-Match" header is given, then it must contain exactly one Etag. The document is updated,
-if it has the same revision as the given Etag. Otherwise a HTTP 412 is returned. As an alternative
-you can supply the Etag in an attribute rev in the URL.
-
-@RESTALLBODYPARAM{edge,object,required}
-The body has to contain a JSON object containing exactly the attributes that should be overwritten, all other attributes remain unchanged.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-Returned if the edge could be updated, and waitForSync is false.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is false in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{edge,object,required,edge_representation}
-The internal attributes for the edge.
-
-@RESTREPLYBODY{new,object,optional,edge_representation}
-The complete newly written edge document.
-Includes all written attributes in the request body
-and all internal attributes generated by ArangoDB.
-Will only be present if returnNew is true.
-
-@RESTREPLYBODY{old,object,optional,edge_representation}
-The complete overwritten edge document.
-Includes all attributes stored before this operation.
-Will only be present if returnOld is true.
-
-@RESTRETURNCODE{202}
-Returned if the request was successful but waitForSync is false.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is false in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{edge,object,required,edge_representation}
-The internal attributes for the edge.
-
-@RESTREPLYBODY{new,object,optional,edge_representation}
-The complete newly written edge document.
-Includes all written attributes in the request body
-and all internal attributes generated by ArangoDB.
-Will only be present if returnNew is true.
-
-@RESTREPLYBODY{old,object,optional,edge_representation}
-The complete overwritten edge document.
-Includes all attributes stored before this operation.
-Will only be present if returnOld is true.
-
-@RESTRETURNCODE{403}
-Returned if your user has insufficient rights.
-In order to update edges in the graph you at least need to have the following privileges:
-
-1. `Read Only` access on the Database.
-2. `Write` access on the given collection.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is true in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{errorNum,integer,required,}
-ArangoDB error number for the error that occurred.
-
-@RESTREPLYBODY{errorMessage,string,required,}
-A message created for this error.
-
-@RESTRETURNCODE{404}
-Returned in the following cases:
-* No graph with this name could be found.
-* This collection is not part of the graph.
-* The edge to update does not exist.
-* either `_from` or `_to` vertex does not exist (if updated).
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is true in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{errorNum,integer,required,}
-ArangoDB error number for the error that occurred.
-
-@RESTREPLYBODY{errorMessage,string,required,}
-A message created for this error.
-
-@RESTRETURNCODE{412}
-Returned if if-match header is given, but the stored documents revision is different.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is true in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{errorNum,integer,required,}
-ArangoDB error number for the error that occurred.
-
-@RESTREPLYBODY{errorMessage,string,required,}
-A message created for this error.
-
-@EXAMPLES
-
-@EXAMPLE_ARANGOSH_RUN{HttpGharialPatchEdge}
- var examples = require("@arangodb/graph-examples/example-graph.js");
-~ examples.dropGraph("social");
- examples.loadGraph("social");
- var any = require("@arangodb").db.relation.any();
- var url = "/_api/gharial/social/edge/relation/" + any._key;
- body = {
- since: "01.01.2001"
- }
- var response = logCurlRequest('PATCH', url, body);
- assert(response.code === 202);
-
- logJsonResponse(response);
- examples.dropGraph("social");
-@END_EXAMPLE_ARANGOSH_RUN
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Graphs/patch_api_gharial_graph_vertex_collection_vertex.md b/Documentation/DocuBlocks/Rest/Graphs/patch_api_gharial_graph_vertex_collection_vertex.md
deleted file mode 100644
index e0006061ff30..000000000000
--- a/Documentation/DocuBlocks/Rest/Graphs/patch_api_gharial_graph_vertex_collection_vertex.md
+++ /dev/null
@@ -1,171 +0,0 @@
-@startDocuBlock patch_api_gharial_graph_vertex_collection_vertex
-@brief update an existing vertex
-
-@RESTHEADER{PATCH /_api/gharial/{graph}/vertex/{collection}/{vertex}, Update a vertex, updateVertex}
-
-@RESTDESCRIPTION
-Updates the data of the specific vertex in the collection.
-
-@RESTURLPARAMETERS
-
-@RESTURLPARAM{graph,string,required}
-The name of the graph.
-
-@RESTURLPARAM{collection,string,required}
-The name of the vertex collection the vertex belongs to.
-
-@RESTURLPARAM{vertex,string,required}
-The *_key* attribute of the vertex.
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{waitForSync,boolean,optional}
-Define if the request should wait until synced to disk.
-
-@RESTQUERYPARAM{keepNull,boolean,optional}
-Define if values set to null should be stored.
-By default (true) the given documents attribute(s) will be set to null.
-If this parameter is false the attribute(s) will instead be delete from the
-document.
-
-@RESTQUERYPARAM{returnOld,boolean,optional}
-Define if a presentation of the deleted document should
-be returned within the response object.
-
-@RESTQUERYPARAM{returnNew,boolean,optional}
-Define if a presentation of the new document should
-be returned within the response object.
-
-@RESTHEADERPARAMETERS
-
-@RESTHEADERPARAM{if-match,string,optional}
-If the "If-Match" header is given, then it must contain exactly one Etag. The document is updated,
-if it has the same revision as the given Etag. Otherwise a HTTP 412 is returned. As an alternative
-you can supply the Etag in an attribute rev in the URL.
-
-@RESTALLBODYPARAM{vertex,object,required}
-The body has to contain a JSON object containing exactly the attributes that should be overwritten, all other attributes remain unchanged.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-Returned if the vertex could be updated, and waitForSync is true.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is false in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{vertex,object,required,vertex_representation}
-The internal attributes for the vertex.
-
-@RESTREPLYBODY{new,object,optional,vertex_representation}
-The complete newly written vertex document.
-Includes all written attributes in the request body
-and all internal attributes generated by ArangoDB.
-Will only be present if returnNew is true.
-
-@RESTREPLYBODY{old,object,optional,vertex_representation}
-The complete overwritten vertex document.
-Includes all attributes stored before this operation.
-Will only be present if returnOld is true.
-
-@RESTRETURNCODE{202}
-Returned if the request was successful, and waitForSync is false.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is false in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{vertex,object,required,vertex_representation}
-The internal attributes for the vertex.
-
-@RESTREPLYBODY{new,object,optional,vertex_representation}
-The complete newly written vertex document.
-Includes all written attributes in the request body
-and all internal attributes generated by ArangoDB.
-Will only be present if returnNew is true.
-
-@RESTREPLYBODY{old,object,optional,vertex_representation}
-The complete overwritten vertex document.
-Includes all attributes stored before this operation.
-Will only be present if returnOld is true.
-
-@RESTRETURNCODE{403}
-Returned if your user has insufficient rights.
-In order to update vertices in the graph you at least need to have the following privileges:
-
-1. `Read Only` access on the Database.
-2. `Write` access on the given collection.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is true in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{errorNum,integer,required,}
-ArangoDB error number for the error that occurred.
-
-@RESTREPLYBODY{errorMessage,string,required,}
-A message created for this error.
-
-@RESTRETURNCODE{404}
-Returned in the following cases:
-* No graph with this name could be found.
-* This collection is not part of the graph.
-* The vertex to update does not exist.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is true in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{errorNum,integer,required,}
-ArangoDB error number for the error that occurred.
-
-@RESTREPLYBODY{errorMessage,string,required,}
-A message created for this error.
-
-@RESTRETURNCODE{412}
-Returned if if-match header is given, but the stored documents revision is different.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is true in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{errorNum,integer,required,}
-ArangoDB error number for the error that occurred.
-
-@RESTREPLYBODY{errorMessage,string,required,}
-A message created for this error.
-
-@EXAMPLES
-
-@EXAMPLE_ARANGOSH_RUN{HttpGharialModifyVertex}
- var examples = require("@arangodb/graph-examples/example-graph.js");
-~ examples.dropGraph("social");
- examples.loadGraph("social");
- body = {
- age: 26
- }
- var url = "/_api/gharial/social/vertex/female/alice";
- var response = logCurlRequest('PATCH', url, body);
-
- assert(response.code === 202);
-
- logJsonResponse(response);
- examples.dropGraph("social");
-@END_EXAMPLE_ARANGOSH_RUN
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Graphs/post_api_gharial.md b/Documentation/DocuBlocks/Rest/Graphs/post_api_gharial.md
deleted file mode 100644
index 6d0b71b9cf07..000000000000
--- a/Documentation/DocuBlocks/Rest/Graphs/post_api_gharial.md
+++ /dev/null
@@ -1,368 +0,0 @@
-@startDocuBlock post_api_gharial
-@brief Create a new graph in the graph module.
-
-@RESTHEADER{POST /_api/gharial, Create a graph, createGraph}
-
-@RESTDESCRIPTION
-The creation of a graph requires the name of the graph and a
-definition of its edges.
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{waitForSync,boolean,optional}
-define if the request should wait until everything is synced to disc.
-Will change the success response code.
-
-@RESTBODYPARAM{name,string,required,string}
-Name of the graph.
-
-@RESTBODYPARAM{edgeDefinitions,array,optional,graph_edge_definition}
-An array of definitions for the relations of the graph.
-Each has the following type:
-
-@RESTBODYPARAM{orphanCollections,array,optional,string}
-An array of additional vertex collections.
-Documents within these collections do not have edges within this graph.
-
-@RESTBODYPARAM{isSmart,boolean,optional,}
-Define if the created graph should be smart (Enterprise Edition only).
-
-@RESTBODYPARAM{isDisjoint,boolean,optional,}
-Whether to create a Disjoint SmartGraph instead of a regular SmartGraph
-(Enterprise Edition only).
-
-@RESTBODYPARAM{options,object,optional,post_api_gharial_create_opts}
-a JSON object to define options for creating collections within this graph.
-It can contain the following attributes:
-
-@RESTSTRUCT{smartGraphAttribute,post_api_gharial_create_opts,string,optional,}
-Only has effect in Enterprise Edition and it is required if isSmart is true.
-The attribute name that is used to smartly shard the vertices of a graph.
-Every vertex in this SmartGraph has to have this attribute.
-Cannot be modified later.
-
-@RESTSTRUCT{satellites,post_api_gharial_create_opts,array,optional,string}
-An array of collection names that is used to create SatelliteCollections
-for a (Disjoint) SmartGraph using SatelliteCollections (Enterprise Edition only).
-Each array element must be a string and a valid collection name.
-The collection type cannot be modified later.
-
-@RESTSTRUCT{numberOfShards,post_api_gharial_create_opts,integer,required,}
-The number of shards that is used for every collection within this graph.
-Cannot be modified later.
-
-@RESTSTRUCT{replicationFactor,post_api_gharial_create_opts,integer,required,}
-The replication factor used when initially creating collections for this graph.
-Can be set to `"satellite"` to create a SatelliteGraph, which then ignores
-`numberOfShards`, `minReplicationFactor`, and `writeConcern`
-(Enterprise Edition only).
-
-@RESTSTRUCT{writeConcern,post_api_gharial_create_opts,integer,optional,}
-Write concern for new collections in the graph.
-It determines how many copies of each shard are required to be
-in sync on the different DB-Servers. If there are less than these many copies
-in the cluster, a shard refuses to write. Writes to shards with enough
-up-to-date copies succeed at the same time, however. The value of
-`writeConcern` cannot be greater than `replicationFactor`.
-For SatelliteGraphs, the `writeConcern` is automatically controlled to equal the
-number of DB-Servers and the attribute is not available. _(cluster only)_
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{201}
-Is returned if the graph could be created and waitForSync is enabled
-for the `_graphs` collection, or given in the request.
-The response body contains the graph configuration that has been stored.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is false in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{graph,object,required,graph_representation}
-The information about the newly created graph.
-
-@RESTRETURNCODE{202}
-Is returned if the graph could be created and waitForSync is disabled
-for the `_graphs` collection and not given in the request.
-The response body contains the graph configuration that has been stored.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is false in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{graph,object,required,graph_representation}
-The information about the newly created graph.
-
-@RESTRETURNCODE{400}
-Returned if the request is in a wrong format.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is true in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{errorNum,integer,required,}
-ArangoDB error number for the error that occurred.
-
-@RESTREPLYBODY{errorMessage,string,required,}
-A message created for this error.
-
-@RESTRETURNCODE{403}
-Returned if your user has insufficient rights.
-In order to create a graph you at least need to have the following privileges:
-
-1. `Administrate` access on the Database.
-2. `Read Only` access on every collection used within this graph.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is true in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{errorNum,integer,required,}
-ArangoDB error number for the error that occurred.
-
-@RESTREPLYBODY{errorMessage,string,required,}
-A message created for this error.
-
-@RESTRETURNCODE{409}
-Returned if there is a conflict storing the graph. This can occur
-either if a graph with this name is already stored, or if there is one
-edge definition with a the same edge collection but a different signature
-used in any other graph.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is true in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{errorNum,integer,required,}
-ArangoDB error number for the error that occurred.
-
-@RESTREPLYBODY{errorMessage,string,required,}
-A message created for this error.
-
-@EXAMPLES
-
-Create a General Graph. This graph type does not make use of any sharding
-strategy and is useful on the single server.
-
-@EXAMPLE_ARANGOSH_RUN{HttpGharialCreate}
- var graph = require("@arangodb/general-graph");
-| if (graph._exists("myGraph")) {
-| graph._drop("myGraph", true);
- }
- var url = "/_api/gharial";
- body = {
- name: "myGraph",
- edgeDefinitions: [{
- collection: "edges",
- from: [ "startVertices" ],
- to: [ "endVertices" ]
- }]
- };
-
- var response = logCurlRequest('POST', url, body);
-
- assert(response.code === 202);
-
- logJsonResponse(response);
-
- graph._drop("myGraph", true);
-@END_EXAMPLE_ARANGOSH_RUN
-
-Create a SmartGraph. This graph uses 9 shards and
-is sharded by the "region" attribute.
-Available in the Enterprise Edition only.
-
-
-@EXAMPLE_ARANGOSH_RUN{HttpGharialCreateSmart}
- var graph = require("@arangodb/general-graph");
-| if (graph._exists("smartGraph")) {
-| graph._drop("smartGraph", true);
- }
- var url = "/_api/gharial";
- body = {
- name: "smartGraph",
- edgeDefinitions: [{
- collection: "edges",
- from: [ "startVertices" ],
- to: [ "endVertices" ]
- }],
- orphanCollections: [ "orphanVertices" ],
- isSmart: true,
- options: {
- replicationFactor: 2,
- numberOfShards: 9,
- smartGraphAttribute: "region"
- }
- };
-
- var response = logCurlRequest('POST', url, body);
-
- assert(response.code === 202);
-
- logJsonResponse(response);
-
- graph._drop("smartGraph", true);
-@END_EXAMPLE_ARANGOSH_RUN
-
-Create a disjoint SmartGraph. This graph uses 9 shards and
-is sharded by the "region" attribute.
-Available in the Enterprise Edition only.
-Note that as you are using a disjoint version, you can only
-create edges between vertices sharing the same region.
-
-@EXAMPLE_ARANGOSH_RUN{HttpGharialCreateDisjointSmart}
-var graph = require("@arangodb/general-graph");
-| if (graph._exists("disjointSmartGraph")) {
-| graph._drop("disjointSmartGraph", true);
-}
-var url = "/_api/gharial";
-body = {
-name: "disjointSmartGraph",
-edgeDefinitions: [{
-collection: "edges",
-from: [ "startVertices" ],
-to: [ "endVertices" ]
-}],
-orphanCollections: [ "orphanVertices" ],
-isSmart: true,
-options: {
-isDisjoint: true,
-replicationFactor: 2,
-numberOfShards: 9,
-smartGraphAttribute: "region"
-}
-};
-
-var response = logCurlRequest('POST', url, body);
-
-assert(response.code === 202);
-
-logJsonResponse(response);
-
-graph._drop("disjointSmartGraph", true);
-@END_EXAMPLE_ARANGOSH_RUN
-
-Create a SmartGraph with a satellite vertex collection.
-It uses the collection "endVertices" as a satellite collection.
-This collection is cloned to all servers, all other vertex
-collections are split into 9 shards
-and are sharded by the "region" attribute.
-Available in the Enterprise Edition only.
-
-@EXAMPLE_ARANGOSH_RUN{HttpGharialCreateSmartWithSatellites}
-var graph = require("@arangodb/general-graph");
-| if (graph._exists("smartGraph")) {
-| graph._drop("smartGraph", true);
-}
-var url = "/_api/gharial";
-body = {
-name: "smartGraph",
-edgeDefinitions: [{
-collection: "edges",
-from: [ "startVertices" ],
-to: [ "endVertices" ]
-}],
-orphanCollections: [ "orphanVertices" ],
-isSmart: true,
-options: {
-replicationFactor: 2,
-numberOfShards: 9,
-smartGraphAttribute: "region",
-satellites: [ "endVertices" ]
-}
-};
-
-var response = logCurlRequest('POST', url, body);
-
-assert(response.code === 202);
-
-logJsonResponse(response);
-
-graph._drop("smartGraph", true);
-@END_EXAMPLE_ARANGOSH_RUN
-
-Create an EnterpriseGraph. This graph uses 9 shards,
-it does not make use of specific sharding attributes.
-Available in the Enterprise Edition only.
-
-@EXAMPLE_ARANGOSH_RUN{HttpGharialCreateEnterprise}
-var graph = require("@arangodb/general-graph");
-| if (graph._exists("enterpriseGraph")) {
-| graph._drop("enterpriseGraph", true);
-}
-var url = "/_api/gharial";
-body = {
-name: "enterpriseGraph",
-edgeDefinitions: [{
-collection: "edges",
-from: [ "startVertices" ],
-to: [ "endVertices" ]
-}],
-orphanCollections: [ ],
-isSmart: true,
-options: {
-replicationFactor: 2,
-numberOfShards: 9,
-}
-};
-
-var response = logCurlRequest('POST', url, body);
-
-assert(response.code === 202);
-
-logJsonResponse(response);
-
-graph._drop("enterpriseGraph", true);
-@END_EXAMPLE_ARANGOSH_RUN
-
-
-Create a SatelliteGraph. A SatelliteGraph does not use
-shards, but uses "satellite" as replicationFactor.
-Make sure to keep this graph small as it is cloned
-to every server.
-Available in the Enterprise Edition only.
-
-@EXAMPLE_ARANGOSH_RUN{HttpGharialCreateSatellite}
-var graph = require("@arangodb/general-graph");
-| if (graph._exists("satelliteGraph")) {
-| graph._drop("satelliteGraph", true);
-}
-var url = "/_api/gharial";
-body = {
-name: "satelliteGraph",
-edgeDefinitions: [{
-collection: "edges",
-from: [ "startVertices" ],
-to: [ "endVertices" ]
-}],
-orphanCollections: [ ],
-options: {
-replicationFactor: "satellite"
-}
-};
-
-var response = logCurlRequest('POST', url, body);
-
-assert(response.code === 202);
-
-logJsonResponse(response);
-
-graph._drop("satelliteGraph", true);
-@END_EXAMPLE_ARANGOSH_RUN
-
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Graphs/post_api_gharial_graph_edge.md b/Documentation/DocuBlocks/Rest/Graphs/post_api_gharial_graph_edge.md
deleted file mode 100644
index 45cbae31afb3..000000000000
--- a/Documentation/DocuBlocks/Rest/Graphs/post_api_gharial_graph_edge.md
+++ /dev/null
@@ -1,146 +0,0 @@
-@startDocuBlock post_api_gharial_graph_edge
-@brief Add a new edge definition to the graph
-
-@RESTHEADER{POST /_api/gharial/{graph}/edge, Add edge definition, createEdgeDefinition}
-
-@RESTDESCRIPTION
-Adds an additional edge definition to the graph.
-
-This edge definition has to contain a *collection* and an array of
-each *from* and *to* vertex collections. An edge definition can only
-be added if this definition is either not used in any other graph, or
-it is used with exactly the same definition. It is not possible to
-store a definition "e" from "v1" to "v2" in the one graph, and "e"
-from "v2" to "v1" in the other graph.
-
-Additionally, collection creation options can be set.
-
-@RESTURLPARAMETERS
-
-@RESTURLPARAM{graph,string,required}
-The name of the graph.
-
-@RESTBODYPARAM{collection,string,required,string}
-The name of the edge collection to be used.
-
-@RESTBODYPARAM{from,array,required,string}
-One or many vertex collections that can contain source vertices.
-
-@RESTBODYPARAM{to,array,required,string}
-One or many vertex collections that can contain target vertices.
-
-@RESTBODYPARAM{options,object,optional,post_api_edgedef_create_opts}
-A JSON object to set options for creating collections within this
-edge definition.
-
-@RESTSTRUCT{satellites,post_api_edgedef_create_opts,array,optional,string}
-An array of collection names that is used to create SatelliteCollections
-for a (Disjoint) SmartGraph using SatelliteCollections (Enterprise Edition only).
-Each array element must be a string and a valid collection name.
-The collection type cannot be modified later.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{201}
-Returned if the definition could be added successfully and
-waitForSync is enabled for the `_graphs` collection.
-The response body contains the graph configuration that has been stored.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is false in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{graph,object,required,graph_representation}
-The information about the modified graph.
-
-@RESTRETURNCODE{202}
-Returned if the definition could be added successfully and
-waitForSync is disabled for the `_graphs` collection.
-The response body contains the graph configuration that has been stored.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is false in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{graph,object,required,graph_representation}
-The information about the modified graph.
-
-@RESTRETURNCODE{400}
-Returned if the definition could not be added.
-This could be because it is ill-formed, or
-if the definition is used in another graph with a different signature.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is true in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{errorNum,integer,required,}
-ArangoDB error number for the error that occurred.
-
-@RESTREPLYBODY{errorMessage,string,required,}
-A message created for this error.
-
-@RESTRETURNCODE{403}
-Returned if your user has insufficient rights.
-In order to modify a graph you at least need to have the following privileges:
-
-1. `Administrate` access on the Database.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is true in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{errorNum,integer,required,}
-ArangoDB error number for the error that occurred.
-
-@RESTREPLYBODY{errorMessage,string,required,}
-A message created for this error.
-
-@RESTRETURNCODE{404}
-Returned if no graph with this name could be found.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is true in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{errorNum,integer,required,}
-ArangoDB error number for the error that occurred.
-
-@RESTREPLYBODY{errorMessage,string,required,}
-A message created for this error.
-
-@EXAMPLES
-
-@EXAMPLE_ARANGOSH_RUN{HttpGharialAddEdgeCol}
- var examples = require("@arangodb/graph-examples/example-graph.js");
-~ examples.dropGraph("social");
- examples.loadGraph("social");
- var url = "/_api/gharial/social/edge";
- body = {
- collection: "works_in",
- from: ["female", "male"],
- to: ["city"]
- };
- var response = logCurlRequest('POST', url, body);
-
- assert(response.code === 202);
-
- logJsonResponse(response);
- examples.dropGraph("social");
-@END_EXAMPLE_ARANGOSH_RUN
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Graphs/post_api_gharial_graph_edge_collection.md b/Documentation/DocuBlocks/Rest/Graphs/post_api_gharial_graph_edge_collection.md
deleted file mode 100644
index 7643fa03764a..000000000000
--- a/Documentation/DocuBlocks/Rest/Graphs/post_api_gharial_graph_edge_collection.md
+++ /dev/null
@@ -1,157 +0,0 @@
-@startDocuBlock post_api_gharial_graph_edge_collection
-@brief Creates an edge in an existing graph
-
-@RESTHEADER{POST /_api/gharial/{graph}/edge/{collection}, Create an edge, createEdge}
-
-@RESTDESCRIPTION
-Creates a new edge in the collection.
-Within the body the edge has to contain a *_from* and *_to* value referencing to valid vertices in the graph.
-Furthermore the edge has to be valid in the definition of the used edge collection.
-
-@RESTURLPARAMETERS
-
-@RESTURLPARAM{graph,string,required}
-The name of the graph.
-
-@RESTURLPARAM{collection,string,required}
-The name of the edge collection the edge belongs to.
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{waitForSync,boolean,optional}
-Define if the request should wait until synced to disk.
-
-@RESTQUERYPARAM{returnNew,boolean,optional}
-Define if the response should contain the complete
-new version of the document.
-
-@RESTBODYPARAM{_from,string,required,}
-The source vertex of this edge. Has to be valid within
-the used edge definition.
-
-@RESTBODYPARAM{_to,string,required,}
-The target vertex of this edge. Has to be valid within
-the used edge definition.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{201}
-Returned if the edge could be created and waitForSync is true.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is false in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{edge,object,required,edge_representation}
-The internal attributes for the edge.
-
-@RESTREPLYBODY{new,object,optional,edge_representation}
-The complete newly written edge document.
-Includes all written attributes in the request body
-and all internal attributes generated by ArangoDB.
-Will only be present if returnNew is true.
-
-@RESTRETURNCODE{202}
-Returned if the request was successful but waitForSync is false.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is false in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{edge,object,required,edge_representation}
-The internal attributes for the edge.
-
-@RESTREPLYBODY{new,object,optional,edge_representation}
-The complete newly written edge document.
-Includes all written attributes in the request body
-and all internal attributes generated by ArangoDB.
-Will only be present if returnNew is true.
-
-@RESTRETURNCODE{400}
-Returned if the input document is invalid.
-This can for instance be the case if the `_from` or `_to` attribute is missing
-or malformed.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is true in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{errorNum,integer,required,}
-ArangoDB error number for the error that occurred.
-
-@RESTREPLYBODY{errorMessage,string,required,}
-A message created for this error.
-
-@RESTRETURNCODE{403}
-Returned if your user has insufficient rights.
-In order to insert edges into the graph you at least need to have the following privileges:
-
-1. `Read Only` access on the Database.
-2. `Write` access on the given collection.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is true in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{errorNum,integer,required,}
-ArangoDB error number for the error that occurred.
-
-@RESTREPLYBODY{errorMessage,string,required,}
-A message created for this error.
-
-@RESTRETURNCODE{404}
-Returned in any of the following cases:
-* no graph with this name could be found.
-* the edge collection is not part of the graph.
-* the vertex collection referenced in the `_from` or `_to` attribute is not part of the graph.
-* the vertex collection is part of the graph, but does not exist.
-* `_from` or `_to` vertex does not exist.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is true in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{errorNum,integer,required,}
-ArangoDB error number for the error that occurred.
-
-@RESTREPLYBODY{errorMessage,string,required,}
-A message created for this error.
-
-@EXAMPLES
-
-@EXAMPLE_ARANGOSH_RUN{HttpGharialAddEdge}
- var examples = require("@arangodb/graph-examples/example-graph.js");
-~ examples.dropGraph("social");
-~ require("internal").db._drop("relation");
-~ require("internal").db._drop("female");
-~ require("internal").db._drop("male");
- examples.loadGraph("social");
- var url = "/_api/gharial/social/edge/relation";
- body = {
- type: "friend",
- _from: "female/alice",
- _to: "female/diana"
- };
- var response = logCurlRequest('POST', url, body);
-
- assert(response.code === 202);
-
- logJsonResponse(response);
- examples.dropGraph("social");
-@END_EXAMPLE_ARANGOSH_RUN
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Graphs/post_api_gharial_graph_vertex.md b/Documentation/DocuBlocks/Rest/Graphs/post_api_gharial_graph_vertex.md
deleted file mode 100644
index f109dfe61f50..000000000000
--- a/Documentation/DocuBlocks/Rest/Graphs/post_api_gharial_graph_vertex.md
+++ /dev/null
@@ -1,125 +0,0 @@
-@startDocuBlock post_api_gharial_graph_vertex
-@brief Add an additional vertex collection to the graph.
-
-@RESTHEADER{POST /_api/gharial/{graph}/vertex, Add vertex collection, addVertexCollection}
-
-@RESTDESCRIPTION
-Adds a vertex collection to the set of orphan collections of the graph.
-If the collection does not exist, it will be created.
-
-@RESTURLPARAMETERS
-
-@RESTURLPARAM{graph,string,required}
-The name of the graph.
-
-@RESTBODYPARAM{options,object,optional,post_api_vertex_create_opts}
-A JSON object to set options for creating vertex collections.
-
-@RESTSTRUCT{satellites,post_api_vertex_create_opts,array,optional,string}
-An array of collection names that is used to create SatelliteCollections
-for a (Disjoint) SmartGraph using SatelliteCollections (Enterprise Edition only).
-Each array element must be a string and a valid collection name.
-The collection type cannot be modified later.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{201}
-Is returned if the collection could be created and waitForSync is enabled
-for the `_graphs` collection, or given in the request.
-The response body contains the graph configuration that has been stored.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is false in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{graph,object,required,graph_representation}
-The information about the modified graph.
-
-@RESTRETURNCODE{202}
-Is returned if the collection could be created and waitForSync is disabled
-for the `_graphs` collection, or given in the request.
-The response body contains the graph configuration that has been stored.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is false in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{graph,object,required,graph_representation}
-The information about the newly created graph
-
-@RESTRETURNCODE{400}
-Returned if the request is in an invalid format.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is true in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{errorNum,integer,required,}
-ArangoDB error number for the error that occurred.
-
-@RESTREPLYBODY{errorMessage,string,required,}
-A message created for this error.
-
-@RESTRETURNCODE{403}
-Returned if your user has insufficient rights.
-In order to modify a graph you at least need to have the following privileges:
-
-1. `Administrate` access on the Database.
-2. `Read Only` access on every collection used within this graph.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is true in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{errorNum,integer,required,}
-ArangoDB error number for the error that occurred.
-
-@RESTREPLYBODY{errorMessage,string,required,}
-A message created for this error.
-
-@RESTRETURNCODE{404}
-Returned if no graph with this name could be found.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is true in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{errorNum,integer,required,}
-ArangoDB error number for the error that occurred.
-
-@RESTREPLYBODY{errorMessage,string,required,}
-A message created for this error.
-
-@EXAMPLES
-
-@EXAMPLE_ARANGOSH_RUN{HttpGharialAddVertexCol}
- var examples = require("@arangodb/graph-examples/example-graph.js");
-~ examples.dropGraph("social");
- examples.loadGraph("social");
- var url = "/_api/gharial/social/vertex";
- body = {
- collection: "otherVertices"
- };
- var response = logCurlRequest('POST', url, body);
-
- assert(response.code === 202);
-
- logJsonResponse(response);
- examples.dropGraph("social");
-@END_EXAMPLE_ARANGOSH_RUN
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Graphs/post_api_gharial_graph_vertex_collection.md b/Documentation/DocuBlocks/Rest/Graphs/post_api_gharial_graph_vertex_collection.md
deleted file mode 100644
index 5ebcf6dda4d7..000000000000
--- a/Documentation/DocuBlocks/Rest/Graphs/post_api_gharial_graph_vertex_collection.md
+++ /dev/null
@@ -1,124 +0,0 @@
-@startDocuBlock post_api_gharial_graph_vertex_collection
-@brief create a new vertex
-
-@RESTHEADER{POST /_api/gharial/{graph}/vertex/{collection}, Create a vertex, createVertex}
-
-@RESTDESCRIPTION
-Adds a vertex to the given collection.
-
-@RESTURLPARAMETERS
-
-@RESTURLPARAM{graph,string,required}
-The name of the graph.
-
-@RESTURLPARAM{collection,string,required}
-The name of the vertex collection the vertex should be inserted into.
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{waitForSync,boolean,optional}
-Define if the request should wait until synced to disk.
-
-@RESTQUERYPARAM{returnNew,boolean,optional}
-Define if the response should contain the complete
-new version of the document.
-
-@RESTALLBODYPARAM{vertex,object,required}
-The body has to be the JSON object to be stored.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{201}
-Returned if the vertex could be added and waitForSync is true.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is false in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{vertex,object,required,vertex_representation}
-The internal attributes for the vertex.
-
-@RESTREPLYBODY{new,object,optional,vertex_representation}
-The complete newly written vertex document.
-Includes all written attributes in the request body
-and all internal attributes generated by ArangoDB.
-Will only be present if returnNew is true.
-
-@RESTRETURNCODE{202}
-Returned if the request was successful but waitForSync is false.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is false in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{vertex,object,required,vertex_representation}
-The internal attributes generated while storing the vertex.
-Does not include any attribute given in request body.
-
-@RESTREPLYBODY{new,object,optional,vertex_representation}
-The complete newly written vertex document.
-Includes all written attributes in the request body
-and all internal attributes generated by ArangoDB.
-Will only be present if returnNew is true.
-
-@RESTRETURNCODE{403}
-Returned if your user has insufficient rights.
-In order to insert vertices into the graph you at least need to have the following privileges:
-
-1. `Read Only` access on the Database.
-2. `Write` access on the given collection.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is true in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{errorNum,integer,required,}
-ArangoDB error number for the error that occurred.
-
-@RESTREPLYBODY{errorMessage,string,required,}
-A message created for this error.
-
-@RESTRETURNCODE{404}
-Returned if no graph with this name could be found.
-Or if a graph is found but this collection is not part of the graph.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is true in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{errorNum,integer,required,}
-ArangoDB error number for the error that occurred.
-
-@RESTREPLYBODY{errorMessage,string,required,}
-A message created for this error.
-
-@EXAMPLES
-
-@EXAMPLE_ARANGOSH_RUN{HttpGharialAddVertex}
- var examples = require("@arangodb/graph-examples/example-graph.js");
-~ examples.dropGraph("social");
- examples.loadGraph("social");
- var url = "/_api/gharial/social/vertex/male";
- body = {
- name: "Francis"
- }
- var response = logCurlRequest('POST', url, body);
-
- assert(response.code === 202);
-
- logJsonResponse(response);
- examples.dropGraph("social");
-@END_EXAMPLE_ARANGOSH_RUN
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Graphs/put_api_gharial_graph_edge_collection_edge.md b/Documentation/DocuBlocks/Rest/Graphs/put_api_gharial_graph_edge_collection_edge.md
deleted file mode 100644
index 1f555e89dc0b..000000000000
--- a/Documentation/DocuBlocks/Rest/Graphs/put_api_gharial_graph_edge_collection_edge.md
+++ /dev/null
@@ -1,177 +0,0 @@
-@startDocuBlock put_api_gharial_graph_edge_collection_edge
-@brief replace the content of an existing edge
-
-@RESTHEADER{PUT /_api/gharial/{graph}/edge/{collection}/{edge}, Replace an edge, replaceEdge}
-
-@RESTDESCRIPTION
-Replaces the data of an edge in the collection.
-
-@RESTURLPARAMETERS
-
-@RESTURLPARAM{graph,string,required}
-The name of the graph.
-
-@RESTURLPARAM{collection,string,required}
-The name of the edge collection the edge belongs to.
-
-@RESTURLPARAM{edge,string,required}
-The *_key* attribute of the vertex.
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{waitForSync,boolean,optional}
-Define if the request should wait until synced to disk.
-
-@RESTQUERYPARAM{keepNull,boolean,optional}
-Define if values set to null should be stored. By default the key is not removed from the document.
-
-@RESTQUERYPARAM{returnOld,boolean,optional}
-Define if a presentation of the deleted document should
-be returned within the response object.
-
-@RESTQUERYPARAM{returnNew,boolean,optional}
-Define if a presentation of the new document should
-be returned within the response object.
-
-@RESTHEADERPARAMETERS
-
-@RESTHEADERPARAM{if-match,string,optional}
-If the "If-Match" header is given, then it must contain exactly one Etag. The document is updated,
-if it has the same revision as the given Etag. Otherwise a HTTP 412 is returned. As an alternative
-you can supply the Etag in an attribute rev in the URL.
-
-@RESTBODYPARAM{_from,string,required,}
-The source vertex of this edge. Has to be valid within
-the used edge definition.
-
-@RESTBODYPARAM{_to,string,required,}
-The target vertex of this edge. Has to be valid within
-the used edge definition.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{201}
-Returned if the request was successful but waitForSync is true.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is false in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{edge,object,required,edge_representation}
-The internal attributes for the edge
-
-@RESTREPLYBODY{new,object,optional,edge_representation}
-The complete newly written edge document.
-Includes all written attributes in the request body
-and all internal attributes generated by ArangoDB.
-Will only be present if returnNew is true.
-
-@RESTREPLYBODY{old,object,optional,edge_representation}
-The complete overwritten edge document.
-Includes all attributes stored before this operation.
-Will only be present if returnOld is true.
-
-@RESTRETURNCODE{202}
-Returned if the request was successful but waitForSync is false.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is false in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{edge,object,required,edge_representation}
-The internal attributes for the edge
-
-@RESTREPLYBODY{new,object,optional,edge_representation}
-The complete newly written edge document.
-Includes all written attributes in the request body
-and all internal attributes generated by ArangoDB.
-Will only be present if returnNew is true.
-
-@RESTREPLYBODY{old,object,optional,edge_representation}
-The complete overwritten edge document.
-Includes all attributes stored before this operation.
-Will only be present if returnOld is true.
-
-@RESTRETURNCODE{403}
-Returned if your user has insufficient rights.
-In order to replace edges in the graph you at least need to have the following privileges:
-
- 1. `Read Only` access on the Database.
- 2. `Write` access on the given collection.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is true in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{errorNum,integer,required,}
-ArangoDB error number for the error that occurred.
-
-@RESTREPLYBODY{errorMessage,string,required,}
-A message created for this error.
-
-@RESTRETURNCODE{404}
-Returned in the following cases:
-* No graph with this name could be found.
-* This collection is not part of the graph.
-* The edge to replace does not exist.
-* either `_from` or `_to` vertex does not exist.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is true in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{errorNum,integer,required,}
-ArangoDB error number for the error that occurred.
-
-@RESTREPLYBODY{errorMessage,string,required,}
-A message created for this error.
-
-@RESTRETURNCODE{412}
-Returned if if-match header is given, but the stored documents revision is different.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is true in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{errorNum,integer,required,}
-ArangoDB error number for the error that occurred.
-
-@RESTREPLYBODY{errorMessage,string,required,}
-A message created for this error.
-
-@EXAMPLES
-
-@EXAMPLE_ARANGOSH_RUN{HttpGharialPutEdge}
- var examples = require("@arangodb/graph-examples/example-graph.js");
-~ examples.dropGraph("social");
- examples.loadGraph("social");
- var any = require("@arangodb").db.relation.any();
- var url = "/_api/gharial/social/edge/relation/" + any._key;
- body = {
- type: "divorced",
- _from: "female/alice",
- _to: "male/bob"
- }
- var response = logCurlRequest('PUT', url, body);
-
- assert(response.code === 202);
-
- logJsonResponse(response);
- examples.dropGraph("social");
-@END_EXAMPLE_ARANGOSH_RUN
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Graphs/put_api_gharial_graph_edge_definition.md b/Documentation/DocuBlocks/Rest/Graphs/put_api_gharial_graph_edge_definition.md
deleted file mode 100644
index 8835e7ed387f..000000000000
--- a/Documentation/DocuBlocks/Rest/Graphs/put_api_gharial_graph_edge_definition.md
+++ /dev/null
@@ -1,144 +0,0 @@
-@startDocuBlock put_api_gharial_graph_edge_definition
-@brief Replace an existing edge definition
-
-@RESTHEADER{PUT /_api/gharial/{graph}/edge/{collection}, Replace an edge definition, replaceEdgeDefinition}
-
-@RESTDESCRIPTION
-Change one specific edge definition.
-This will modify all occurrences of this definition in all graphs known to your database.
-
-@RESTURLPARAMETERS
-
-@RESTURLPARAM{graph,string,required}
-The name of the graph.
-
-@RESTURLPARAM{collection,string,required}
-The name of the edge collection used in the edge definition.
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{waitForSync,boolean,optional}
-Define if the request should wait until synced to disk.
-
-@RESTQUERYPARAM{dropCollections,boolean,optional}
-Drop the collection as well.
-Collection will only be dropped if it is not used in other graphs.
-
-@RESTBODYPARAM{collection,string,required,string}
-The name of the edge collection to be used.
-
-@RESTBODYPARAM{from,array,required,string}
-One or many vertex collections that can contain source vertices.
-
-@RESTBODYPARAM{to,array,required,string}
-One or many vertex collections that can contain target vertices.
-
-@RESTBODYPARAM{options,object,optional,post_api_edgedef_modify_opts}
-A JSON object to set options for modifying collections within this
-edge definition.
-
-@RESTSTRUCT{satellites,post_api_edgedef_modify_opts,array,optional,string}
-An array of collection names that is used to create SatelliteCollections
-for a (Disjoint) SmartGraph using SatelliteCollections (Enterprise Edition only).
-Each array element must be a string and a valid collection name.
-The collection type cannot be modified later.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{201}
-Returned if the request was successful and waitForSync is true.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is false in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{graph,object,required,graph_representation}
-The information about the modified graph.
-
-@RESTRETURNCODE{202}
-Returned if the request was successful but waitForSync is false.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is false in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{graph,object,required,graph_representation}
-The information about the modified graph.
-
-@RESTRETURNCODE{400}
-Returned if the new edge definition is ill-formed and cannot be used.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is true in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{errorNum,integer,required,}
-ArangoDB error number for the error that occurred.
-
-@RESTREPLYBODY{errorMessage,string,required,}
-A message created for this error.
-
-@RESTRETURNCODE{403}
-Returned if your user has insufficient rights.
-In order to drop a vertex you at least need to have the following privileges:
- 1. `Administrate` access on the Database.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is true in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{errorNum,integer,required,}
-ArangoDB error number for the error that occurred.
-
-@RESTREPLYBODY{errorMessage,string,required,}
-A message created for this error.
-
-@RESTRETURNCODE{404}
-Returned if no graph with this name could be found, or if no edge definition
-with this name is found in the graph.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is true in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{errorNum,integer,required,}
-ArangoDB error number for the error that occurred.
-
-@RESTREPLYBODY{errorMessage,string,required,}
-A message created for this error.
-
-@EXAMPLES
-
-@EXAMPLE_ARANGOSH_RUN{HttpGharialReplaceEdgeCol}
- var examples = require("@arangodb/graph-examples/example-graph.js");
-~ examples.dropGraph("social");
- examples.loadGraph("social");
- var url = "/_api/gharial/social/edge/relation";
- body = {
- collection: "relation",
- from: ["female", "male", "animal"],
- to: ["female", "male", "animal"]
- };
- var response = logCurlRequest('PUT', url, body);
-
- assert(response.code === 202);
-
- logJsonResponse(response);
- examples.dropGraph("social");
-@END_EXAMPLE_ARANGOSH_RUN
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Graphs/put_api_gharial_graph_vertex_collection_vertex.md b/Documentation/DocuBlocks/Rest/Graphs/put_api_gharial_graph_vertex_collection_vertex.md
deleted file mode 100644
index f6c7b406e523..000000000000
--- a/Documentation/DocuBlocks/Rest/Graphs/put_api_gharial_graph_vertex_collection_vertex.md
+++ /dev/null
@@ -1,169 +0,0 @@
-@startDocuBlock put_api_gharial_graph_vertex_collection_vertex
-@brief replaces an existing vertex
-
-@RESTHEADER{PUT /_api/gharial/{graph}/vertex/{collection}/{vertex}, Replace a vertex, replaceVertex}
-
-@RESTDESCRIPTION
-Replaces the data of a vertex in the collection.
-
-@RESTURLPARAMETERS
-
-@RESTURLPARAM{graph,string,required}
-The name of the graph.
-
-@RESTURLPARAM{collection,string,required}
-The name of the vertex collection the vertex belongs to.
-
-@RESTURLPARAM{vertex,string,required}
-The *_key* attribute of the vertex.
-
-@RESTQUERYPARAMETERS
-
-@RESTQUERYPARAM{waitForSync,boolean,optional}
-Define if the request should wait until synced to disk.
-
-@RESTQUERYPARAM{keepNull,boolean,optional}
-Define if values set to null should be stored. By default the key is not removed from the document.
-
-@RESTQUERYPARAM{returnOld,boolean,optional}
-Define if a presentation of the deleted document should
-be returned within the response object.
-
-@RESTQUERYPARAM{returnNew,boolean,optional}
-Define if a presentation of the new document should
-be returned within the response object.
-
-@RESTHEADERPARAMETERS
-
-@RESTHEADERPARAM{if-match,string,optional}
-If the "If-Match" header is given, then it must contain exactly one Etag. The document is updated,
-if it has the same revision as the given Etag. Otherwise a HTTP 412 is returned. As an alternative
-you can supply the Etag in an attribute rev in the URL.
-
-@RESTALLBODYPARAM{vertex,object,required}
-The body has to be the JSON object to be stored.
-
-@RESTRETURNCODES
-
-@RESTRETURNCODE{200}
-Returned if the vertex could be replaced, and waitForSync is true.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is false in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{vertex,object,required,vertex_representation}
-The internal attributes for the vertex.
-
-@RESTREPLYBODY{new,object,optional,vertex_representation}
-The complete newly written vertex document.
-Includes all written attributes in the request body
-and all internal attributes generated by ArangoDB.
-Will only be present if returnNew is true.
-
-@RESTREPLYBODY{old,object,optional,vertex_representation}
-The complete overwritten vertex document.
-Includes all attributes stored before this operation.
-Will only be present if returnOld is true.
-
-@RESTRETURNCODE{202}
-Returned if the vertex could be replaced, and waitForSync is false.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is false in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{vertex,object,required,vertex_representation}
-The internal attributes for the vertex.
-
-@RESTREPLYBODY{new,object,optional,vertex_representation}
-The complete newly written vertex document.
-Includes all written attributes in the request body
-and all internal attributes generated by ArangoDB.
-Will only be present if returnNew is true.
-
-@RESTREPLYBODY{old,object,optional,vertex_representation}
-The complete overwritten vertex document.
-Includes all attributes stored before this operation.
-Will only be present if returnOld is true.
-
-@RESTRETURNCODE{403}
-Returned if your user has insufficient rights.
-In order to replace vertices in the graph you at least need to have the following privileges:
-
-1. `Read Only` access on the Database.
-2. `Write` access on the given collection.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is true in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{errorNum,integer,required,}
-ArangoDB error number for the error that occurred.
-
-@RESTREPLYBODY{errorMessage,string,required,}
-A message created for this error.
-
-@RESTRETURNCODE{404}
-Returned in the following cases:
-* No graph with this name could be found.
-* This collection is not part of the graph.
-* The vertex to replace does not exist.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is true in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{errorNum,integer,required,}
-ArangoDB error number for the error that occurred.
-
-@RESTREPLYBODY{errorMessage,string,required,}
-A message created for this error.
-
-@RESTRETURNCODE{412}
-Returned if if-match header is given, but the stored documents revision is different.
-
-@RESTREPLYBODY{error,boolean,required,}
-Flag if there was an error (true) or not (false).
-It is true in this response.
-
-@RESTREPLYBODY{code,integer,required,}
-The response code.
-
-@RESTREPLYBODY{errorNum,integer,required,}
-ArangoDB error number for the error that occurred.
-
-@RESTREPLYBODY{errorMessage,string,required,}
-A message created for this error.
-
-@EXAMPLES
-
-@EXAMPLE_ARANGOSH_RUN{HttpGharialReplaceVertex}
- var examples = require("@arangodb/graph-examples/example-graph.js");
-~ examples.dropGraph("social");
- examples.loadGraph("social");
- body = {
- name: "Alice Cooper",
- age: 26
- }
- var url = "/_api/gharial/social/vertex/female/alice";
- var response = logCurlRequest('PUT', url, body);
-
- assert(response.code === 202);
-
- logJsonResponse(response);
- examples.dropGraph("social");
-@END_EXAMPLE_ARANGOSH_RUN
-@endDocuBlock
diff --git a/Documentation/DocuBlocks/Rest/Hot Backups/post_admin_backup_create.md b/Documentation/DocuBlocks/Rest/Hot Backups/post_admin_backup_create.md
deleted file mode 100644
index f9aa389691e5..000000000000
--- a/Documentation/DocuBlocks/Rest/Hot Backups/post_admin_backup_create.md
+++ /dev/null
@@ -1,81 +0,0 @@
-@startDocuBlock post_admin_backup_create
-@brief creates a local backup
-
-@RESTHEADER{POST /_admin/backup/create, Create backup, createBackup}
-
-@RESTDESCRIPTION
-Creates a consistent backup "as soon as possible", very much
-like a snapshot in time, with a given label. The ambiguity in the
-phrase "as soon as possible" refers to the next window during which a
-global write lock across all databases can be obtained in order to
-guarantee consistency. Note that the backup at first resides on the
-same machine and hard drive as the original data. Make sure to upload
-it to a remote site for an actual backup.
-
-@RESTBODYPARAM{label,string,optional,string}
-The label for this backup. The label is used together with a
-timestamp string create a unique backup identifier, `_