diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS
new file mode 100644
index 00000000..0248ade1
--- /dev/null
+++ b/.github/CODEOWNERS
@@ -0,0 +1 @@
+* @aviau @sebito91 @xginn8
diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
new file mode 100644
index 00000000..7a7927c1
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE.md
@@ -0,0 +1,4 @@
+- **InfluxDB version:** e.g. 1.7.7 (output of the `influx version` command)
+- **InfluxDB-python version:** e.g. 5.2.2 (output of the `python -c "from __future__ import print_function; import influxdb; print(influxdb.__version__)"` command)
+- **Python version:** e.g. 3.7.4 (output of the `python --version` command)
+- **Operating system version:** e.g. Windows 10, Ubuntu 18.04, macOS 10.14.5
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
new file mode 100644
index 00000000..84729d17
--- /dev/null
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -0,0 +1,5 @@
+---
+##### Contributor checklist
+
+- [ ] Builds are passing
+- [ ] New tests have been added (for feature additions)
diff --git a/.gitignore b/.gitignore
index d88a1c42..d970c44c 100644
--- a/.gitignore
+++ b/.gitignore
@@ -7,6 +7,7 @@ build/
mock*/
nose*/
.pybuild/
+.mypy_cache/
debian/files
debian/python-influxdb.debhelper.log
debian/python-influxdb.postinst.debhelper
@@ -20,3 +21,4 @@ debian/python3-influxdb/
docs/build/
.coverage
cover
+env
diff --git a/.travis.yml b/.travis.yml
index 485c0c0f..9d45f19b 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -1,28 +1,62 @@
language: python
+
+python:
+ - "2.7"
+ - "3.5"
+ - "3.6"
+ - "3.7"
+ - "pypy"
+ - "pypy3"
+
env:
- - TOX_ENV=py27
- - TOX_ENV=py32
- - TOX_ENV=py33
- - TOX_ENV=py34
- - TOX_ENV=pypy
- - TOX_ENV=pypy3
- - TOX_ENV=docs
- - TOX_ENV=flake8
- - TOX_ENV=coverage
+ - INFLUXDB_VER=1.2.4 # 2017-05-08
+ - INFLUXDB_VER=1.3.9 # 2018-01-19
+ - INFLUXDB_VER=1.4.3 # 2018-01-30
+ - INFLUXDB_VER=1.5.4 # 2018-06-22
+ - INFLUXDB_VER=1.6.4 # 2018-10-24
+ - INFLUXDB_VER=1.7.4 # 2019-02-14
+
+addons:
+ apt:
+ packages:
+ - wget
+
+matrix:
+ include:
+ - python: 3.7
+ env: TOX_ENV=pep257
+ - python: 3.7
+ env: TOX_ENV=docs
+ - python: 3.7
+ env: TOX_ENV=flake8
+ - python: 3.7
+ env: TOX_ENV=coverage
+ - python: 3.7
+ env: TOX_ENV=mypy
+
install:
- - sudo pip install tox
- - sudo pip install coveralls
+ - pip install tox-travis
+ - pip install setuptools
+ - pip install coveralls
+ - mkdir -p "influxdb_install/${INFLUXDB_VER}"
+ - if [ -n "${INFLUXDB_VER}" ] ; then wget "https://dl.influxdata.com/influxdb/releases/influxdb_${INFLUXDB_VER}_amd64.deb" ; fi
+ - if [ -n "${INFLUXDB_VER}" ] ; then dpkg -x influxdb*.deb "influxdb_install/${INFLUXDB_VER}" ; fi
+
script:
- - tox -e $TOX_ENV
+ - export "INFLUXDB_PYTHON_INFLUXD_PATH=$(pwd)/influxdb_install/${INFLUXDB_VER}/usr/bin/influxd"
+ - if [ -n "${TOX_ENV}" ]; then tox -e "${TOX_ENV}"; else tox; fi
+
after_success:
- - if [ "$TOX_ENV" == "coverage" ] ; then coveralls; fi
-deploy:
- provider: pypi
- user: errplane
- password:
- secure: C20cSqrCtd7Ng2oxSy9YSQS72aeqMjrRaZTYKIIF4eSR4JzCawasFhof6Pq/mUqx6fJCBTZ7yMUqfK22JAQ2iUoUnBF04IHASR3iwqjdCRbXGtzX1J9Bw//6iCHBE5fgGEHQc8Mw5wKDIy5RvbjiR9ADCW/cIlpVSF9QzH/RA24=
- on:
- tags: true
- repo: influxdb/influxdb-python
+ - if [ "${TOX_ENV}" == "coverage" ] ; then coveralls; fi
+
notifications:
email: false
+
+sudo: false
+
+# Travis caching
+cache: false
+# directories:
+# - $HOME/.cache/pip
+#before_cache:
+# - rm -f $HOME/.cache/pip/log/debug.log
diff --git a/CHANGELOG.md b/CHANGELOG.md
new file mode 100644
index 00000000..bfd27d38
--- /dev/null
+++ b/CHANGELOG.md
@@ -0,0 +1,375 @@
+# Changelog
+All notable changes to this project will be documented in this file.
+
+The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/)
+and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html).
+
+## [v5.3.2] - 2024-04-17
+
+### Changed
+- Correctly serialize nanosecond dataframe timestamps (#926)
+
+## [v5.3.1] - 2022-11-14
+
+### Added
+- Add support for custom headers in the InfluxDBClient (#710 thx @nathanielatom)
+- Add support for custom indexes for query in the DataFrameClient (#785)
+
+### Changed
+- Amend retry to avoid sleep after last retry before raising exception (#790 thx @krzysbaranski)
+- Remove msgpack pinning for requirements (#818 thx @prometheanfire)
+- Update support for HTTP headers in the InfluxDBClient (#851 thx @bednar)
+
+### Removed
+
+## [v5.3.0] - 2020-04-10
+
+### Added
+- Add mypy testing framework (#756)
+- Add support for messagepack (#734 thx @lovasoa)
+- Add support for 'show series' (#357 thx @gaker)
+- Add support for custom request session in InfluxDBClient (#360 thx @dschien)
+- Add support for handling np.nan and np.inf values in DataFrameClient (#436 thx @nmerket)
+- Add support for optional `time_precision` in the SeriesHelper (#502 && #719 thx @appunni-dishq && @klDen)
+- Add ability to specify retention policy in SeriesHelper (#723 thx @csanz91)
+- Add gzip compression for post and response data (#732 thx @KEClaytor)
+- Add support for chunked responses in ResultSet (#753 and #538 thx @hrbonz && @psy0rz)
+- Add support for empty string fields (#766 thx @gregschrock)
+- Add support for context managers to InfluxDBClient (#721 thx @JustusAdam)
+
+### Changed
+- Clean up stale CI config (#755)
+- Add legacy client test (#752 & #318 thx @oldmantaiter & @sebito91)
+- Update make_lines section in line_protocol.py to split out core function (#375 thx @aisbaa)
+- Fix nanosecond time resolution for points (#407 thx @AndreCAndersen && @clslgrnc)
+- Fix import of distutils.spawn (#805 thx @Hawk777)
+- Update repr of float values including properly handling of boolean (#488 thx @ghost)
+- Update DataFrameClient to fix faulty empty tags (#770 thx @michelfripiat)
+- Update DataFrameClient to properly return `dropna` values (#778 thx @jgspiro)
+- Update DataFrameClient to test for pd.DataTimeIndex before blind conversion (#623 thx @testforvin)
+- Update client to type-set UDP port to int (#651 thx @yifeikong)
+- Update batched writing support for all iterables (#746 thx @JayH5)
+- Update SeriesHelper to enable class instantiation when not initialized (#772 thx @ocworld)
+- Update UDP test case to add proper timestamp to datapoints (#808 thx @shantanoo-desai)
+
+### Removed
+
+## [v5.2.3] - 2019-08-19
+
+### Added
+- Add consistency param to InfluxDBClient.write_points (#643 thx @RonRothman)
+- Add UDP example (#648 thx @shantanoo-desai)
+- Add consistency paramter to `write_points` (#664 tx @RonRothman)
+- The query() function now accepts a bind_params argument for parameter binding (#678 thx @clslgrnc)
+- Add `get_list_continuous_queries`, `drop_continuous_query`, and `create_continuous_query` management methods for
+ continuous queries (#681 thx @lukaszdudek-silvair && @smolse)
+- Mutual TLS authentication (#702 thx @LloydW93)
+
+### Changed
+- Update test suite to add support for Python 3.7 and InfluxDB v1.6.4 and 1.7.4 (#692 thx @clslgrnc)
+- Update supported versions of influxdb + python (#693 thx @clslgrnc)
+- Fix for the line protocol issue with leading comma (#694 thx @d3banjan)
+- Update classifiers tuple to list in setup.py (#697 thx @Hanaasagi)
+- Update documentation for empty `delete_series` confusion (#699 thx @xginn8)
+- Fix newline character issue in tag value (#716 thx @syhan)
+- Update tests/tutorials_pandas.py to reference `line` protocol, bug in `json` (#737 thx @Aeium)
+
+### Removed
+
+## [v5.2.2] - 2019-03-14
+### Added
+
+### Changed
+- Fix 'TypeError: Already tz-aware' introduced with recent versions of Panda (#671, #676, thx @f4bsch @clslgrnc)
+
+## [v5.2.1] - 2018-12-07
+### Added
+
+### Changed
+- Pass through the "method" kwarg to DataFrameClient queries
+
+### Removed
+
+## [v5.2.0] - 2018-07-10
+### Added
+- Finally add a CHANGELOG.md to communicate breaking changes (#598)
+- Test multiple versions of InfluxDB in travis
+- Add SHARD DURATION parameter to retention policy create/alter
+### Changed
+- Update POST/GET requests to follow verb guidelines from InfluxDB documentation
+- Update test suite to support InfluxDB v1.3.9, v1.4.2, and v1.5.4
+- Fix performance degradation when removing NaN values via line protocol (#592)
+### Removed
+- Dropped support for Python3.4
+
+## [v5.1.0] - 2018-06-26
+### Added
+- Connect to InfluxDB path running on server (#556 thx @gladhorn)
+- Escape measurement names in DataFrameClient (#542 thx @tzonghao)
+- Escape tags that end with a backslash (#537 thx @vaniakov)
+- Add back mistakenly-dropped database parameter (#540)
+- Add PyPI status to README.md
+### Changed
+- Fix bad session mount scheme (#571 thx @vaniakov)
+- Fixed issue with DataFrameClient calling to_datetime function (#593 thx @dragoshenron)
+- Escape columns in DataFrameClient for line protocol (#584 thx @dmuiruri)
+- Convert DataFrameClient times from int to np.int64 (#495 thx patrickhoebeke)
+- Updated pandas tutorial (#547 thx @techaddicted)
+- Explicitly set numpy version for tox (#563)
+### Removed
+- Removed UDP precision restrictions on timestamp (#557 thx @mdhausman)
+
+## [v5.0.0] - 2017-11-20
+### Added
+- Add pool size parameter to client constructor (#534 thx @vaniakov)
+- Add ping method to client for checking connectivity (#409 thx @pmenglund)
+- Add retry logic & exponential backoff when a connection fails (#508)
+- Declare which setuptools version is required in PyPy env
+- Functions for drop_measurement and get_list_measurements in InfluxDBClient (#402 thx @Vic020)
+- Allow single string as data argument in write (#492 thx @baftek)
+- Support chunked queries in DataFrameClient (#439 thx @gusutabopb)
+- Add close method to InfluxDBClient (#465 thx @Linux-oiD)
+- PEP257 linting & code compliance (#473)
+### Changed
+- Fix broken tags filtering on a ResultSet (#511)
+- Improve retry codepath for connecting to InfluxDB (#536 thx @swails)
+- Clean up imports using six instead of sys.version (#536 thx @swails)
+- Replace references to dataframe.ix with dataframe.iloc (#528)
+- Improve performance of tag processing when converting DataFrameClient to line protocol (#503 thx @tzonghao)
+- Typo in Content-Type header (#513 thx @milancermak)
+- Clean up README.md formatting
+- Catch TypeError when casting to float to return False with objects (#475 thx @BenHewins)
+- Improve efficiency of tag appending in DataFrameClient when converting to line protocol (#486 thx @maxdolle)
+### Removed
+- Drop requirement for all fields in SeriesHelper (#518 thx @spott)
+- use_udp and udp_port are now private properties in InfluxDBClient
+
+## [v4.1.1] - 2017-06-06
+### Added
+### Changed
+### Removed
+
+## [v4.1.0] - 2017-04-12
+### Added
+### Changed
+### Removed
+
+## [v4.0.0] - 2016-12-07
+### Added
+### Changed
+### Removed
+
+## [v3.0.0] - 2016-06-26
+### Added
+### Changed
+### Removed
+
+## [v2.12.0] - 2016-01-29
+### Added
+### Changed
+### Removed
+
+## [v2.11.0] - 2016-01-11
+### Added
+### Changed
+### Removed
+
+## [v2.10.0] - 2015-11-13
+### Added
+### Changed
+### Removed
+
+## [v2.9.3] - 2015-10-30
+### Added
+### Changed
+### Removed
+
+## [v2.9.2] - 2015-10-07
+### Added
+### Changed
+### Removed
+
+## [v2.9.1] - 2015-08-30
+### Added
+### Changed
+### Removed
+
+## [v2.9.0] - 2015-08-28
+### Added
+### Changed
+### Removed
+
+## [v2.8.0] - 2015-08-06
+### Added
+### Changed
+### Removed
+
+## [v2.7.3] - 2015-07-31
+### Added
+### Changed
+### Removed
+
+## [v2.7.2] - 2015-07-31
+### Added
+### Changed
+### Removed
+
+## [v2.7.1] - 2015-07-26
+### Added
+### Changed
+### Removed
+
+## [v2.7.0] - 2015-07-23
+### Added
+### Changed
+### Removed
+
+## [v2.6.0] - 2015-06-16
+### Added
+### Changed
+### Removed
+
+## [v2.5.1] - 2015-06-15
+### Added
+### Changed
+### Removed
+
+## [v2.5.0] - 2015-06-15
+### Added
+### Changed
+### Removed
+
+## [v2.4.0] - 2015-06-12
+### Added
+### Changed
+### Removed
+
+## [v2.3.0] - 2015-05-13
+### Added
+### Changed
+### Removed
+
+## [v2.2.0] - 2015-05-05
+### Added
+### Changed
+### Removed
+
+## [v2.1.0] - 2015-04-24
+### Added
+### Changed
+### Removed
+
+## [v2.0.2] - 2015-04-22
+### Added
+### Changed
+### Removed
+
+## [v2.0.1] - 2015-04-17
+### Added
+### Changed
+### Removed
+
+## [v2.0.0] - 2015-04-17
+### Added
+### Changed
+### Removed
+
+## [v1.0.1] - 2015-03-30
+### Added
+### Changed
+### Removed
+
+## [v1.0.0] - 2015-03-20
+### Added
+### Changed
+### Removed
+
+## [v0.4.1] - 2015-03-18
+### Added
+### Changed
+### Removed
+
+## [v0.4.0] - 2015-03-17
+### Added
+### Changed
+### Removed
+
+## [v0.3.1] - 2015-02-23
+### Added
+### Changed
+### Removed
+
+## [v0.3.0] - 2015-02-17
+### Added
+### Changed
+### Removed
+
+## [v0.2.0] - 2015-01-23
+### Added
+### Changed
+### Removed
+
+## [v0.1.13] - 2014-11-12
+### Added
+### Changed
+### Removed
+
+## [v0.1.12] - 2014-08-22
+### Added
+### Changed
+### Removed
+
+## [v0.1.11] - 2014-06-20
+### Added
+### Changed
+### Removed
+
+## [v0.1.10] - 2014-06-09
+### Added
+### Changed
+### Removed
+
+## [v0.1.9] - 2014-06-06
+### Added
+### Changed
+### Removed
+
+## [v0.1.8] - 2014-06-06
+### Added
+### Changed
+### Removed
+
+## [v0.1.7] - 2014-05-21
+### Added
+### Changed
+### Removed
+
+## [v0.1.6] - 2014-04-02
+### Added
+### Changed
+### Removed
+
+## [v0.1.5] - 2014-03-25
+### Added
+### Changed
+### Removed
+
+## [v0.1.4] - 2014-03-03
+### Added
+### Changed
+### Removed
+
+## [v0.1.3] - 2014-02-11
+### Added
+### Changed
+### Removed
+
+## [v0.1.2] - 2013-12-09
+### Added
+### Changed
+### Removed
+
+## [v0.1.1] - 2013-11-14
+### Added
+### Changed
+### Removed
diff --git a/LICENSE b/LICENSE
index 38ee2491..a49a5410 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,6 +1,6 @@
The MIT License (MIT)
-Copyright (c) 2013 InfluxDB
+Copyright (c) 2020 InfluxDB
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
diff --git a/MANIFEST.in b/MANIFEST.in
index 1395a993..76466bf2 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,3 +1,5 @@
include requirements.txt
include test-requirements.txt
include dev-requirements.txt
+include LICENSE
+include README.rst
diff --git a/README.rst b/README.rst
index 64009c7c..048db045 100644
--- a/README.rst
+++ b/README.rst
@@ -1,45 +1,61 @@
+InfluxDB-Python
+===============
-
-InfluxDB-Python is a client for interacting with InfluxDB_.
-
-.. image:: https://travis-ci.org/influxdb/influxdb-python.svg?branch=master
- :target: https://travis-ci.org/influxdb/influxdb-python
-
+.. image:: https://travis-ci.org/influxdata/influxdb-python.svg?branch=master
+ :target: https://travis-ci.org/influxdata/influxdb-python
.. image:: https://readthedocs.org/projects/influxdb-python/badge/?version=latest&style
- :target: https://readthedocs.org/projects/influxdb-python/?badge=latest
+ :target: http://influxdb-python.readthedocs.org/
:alt: Documentation Status
-.. image:: https://img.shields.io/coveralls/influxdb/influxdb-python.svg
- :target: https://coveralls.io/r/influxdb/influxdb-python
+.. image:: https://img.shields.io/coveralls/influxdata/influxdb-python.svg
+ :target: https://coveralls.io/r/influxdata/influxdb-python
:alt: Coverage
-.. image:: https://pypip.in/download/influxdb/badge.svg
- :target: https://pypi.python.org/pypi//influxdb/
- :alt: Downloads
+.. image:: https://img.shields.io/pypi/v/influxdb.svg
+ :target: https://pypi.python.org/pypi/influxdb
+ :alt: PyPI Status
+
+
+.. important::
+
+ **This project is no longer in development**
+
+ This v1 client library is for interacting with `InfluxDB 1.x `_ and 1.x-compatible endpoints in `InfluxDB 2.x `_.
+ Use it to:
+
+ - Write data in line protocol.
+ - Query data with `InfluxQL `_.
-.. image:: https://pypip.in/version/influxdb/badge.svg
- :target: https://pypi.python.org/pypi/influxdb/
- :alt: Latest Version
+ If you use `InfluxDB 2.x (TSM storage engine) `_ and `Flux `_, see the `v2 client library `_.
-.. image:: https://pypip.in/py_versions/influxdb/badge.svg
- :target: https://pypi.python.org/pypi/influxdb/
- :alt: Supported Python versions
+ If you use `InfluxDB 3.0 `_, see the `v3 client library `_.
-.. image:: https://pypip.in/license/influxdb/badge.svg
- :target: https://pypi.python.org/pypi/influxdb/
- :alt: License
+ For new projects, consider using InfluxDB 3.0 and v3 client libraries.
+
+Description
+===========
+
+InfluxDB-python, the InfluxDB Python Client (1.x), is a client library for interacting with `InfluxDB 1.x `_ instances.
.. _readme-about:
-InfluxDB is an open-source distributed time series database, find more about InfluxDB_ at http://influxdb.com/
+`InfluxDB`_ is the time series platform designed to handle high write and query loads.
.. _installation:
-Installation
-============
-Install, upgrade and uninstall InfluxDB-Python with these commands::
+For InfluxDB pre-v1.1.0 users
+-----------------------------
+
+This module is tested with InfluxDB versions v1.2.4, v1.3.9, v1.4.3, v1.5.4, v1.6.4, and 1.7.4.
+
+Users on InfluxDB v0.8.x may still use the legacy client by importing ``from influxdb.influxdb08 import InfluxDBClient``.
+
+For InfluxDB v1.1+ users
+------------------------
+
+Install, upgrade and uninstall influxdb-python with these commands::
$ pip install influxdb
$ pip install --upgrade influxdb
@@ -50,26 +66,29 @@ On Debian/Ubuntu, you can install it with this command::
$ sudo apt-get install python-influxdb
Dependencies
-============
+------------
+
+The influxdb-python distribution is supported and tested on Python 2.7, 3.5, 3.6, 3.7, PyPy and PyPy3.
-The InfluxDB-Python distribution is supported and tested on Python 2.7 and Python 3.3.
+**Note:** Python <3.5 are currently untested. See ``.travis.yml``.
-Main dependencie is:
+Main dependency is:
- Requests: HTTP library for human beings (http://docs.python-requests.org/)
Additional dependencies are:
+- pandas: for writing from and reading to DataFrames (http://pandas.pydata.org/)
- Sphinx: Tool to create and manage the documentation (http://sphinx-doc.org/)
- Nose: to auto-discover tests (http://nose.readthedocs.org/en/latest/)
- Mock: to mock tests (https://pypi.python.org/pypi/mock)
Documentation
-=============
+-------------
-InfluxDB-Python documentation is available at http://influxdb-python.readthedocs.org
+Documentation is available at https://influxdb-python.readthedocs.io/en/latest/.
You will need Sphinx_ installed to generate the documentation.
@@ -82,7 +101,7 @@ Generated documentation can be found in the *docs/build/html/* directory.
Examples
-========
+--------
Here's a basic example (for more see the examples directory)::
@@ -90,14 +109,19 @@ Here's a basic example (for more see the examples directory)::
>>> from influxdb import InfluxDBClient
- >>> json_body = [{
- "points": [
- ["1", 1, 1.0],
- ["2", 2, 2.0]
- ],
- "name": "foo",
- "columns": ["column_one", "column_two", "column_three"]
- }]
+ >>> json_body = [
+ {
+ "measurement": "cpu_load_short",
+ "tags": {
+ "host": "server01",
+ "region": "us-west"
+ },
+ "time": "2009-11-10T23:00:00Z",
+ "fields": {
+ "value": 0.64
+ }
+ }
+ ]
>>> client = InfluxDBClient('localhost', 8086, 'root', 'root', 'example')
@@ -105,13 +129,13 @@ Here's a basic example (for more see the examples directory)::
>>> client.write_points(json_body)
- >>> result = client.query('select column_one from foo;')
+ >>> result = client.query('select value from cpu_load_short;')
>>> print("Result: {0}".format(result))
Testing
-=======
+-------
Make sure you have tox by running the following::
@@ -123,14 +147,22 @@ To test influxdb-python with multiple version of Python, you can use Tox_::
Support
-=======
+-------
For issues with, questions about, or feedback for InfluxDB_, please look into
our community page: http://influxdb.com/community/.
+We are also lurking on the following:
+
+- #influxdb on irc.freenode.net
+- #influxdb on gophers.slack.com
+
Development
-===========
+-----------
+
+The v1 client libraries for InfluxDB 1.x were typically developed and maintained by InfluxDB community members. If you are an InfluxDB v1 user interested in maintaining this client library (at a minimum, keeping it updated with security patches) please contact the InfluxDB team at on the `Community Forums `_ or
+`InfluxData Slack `_.
All development is done on Github_. Use Issues_ to report
problems or submit contributions.
@@ -138,13 +170,32 @@ problems or submit contributions.
.. _Github: https://github.com/influxdb/influxdb-python/
.. _Issues: https://github.com/influxdb/influxdb-python/issues
+Please note that we will answer you question as quickly as possible.
+
+Maintainers:
+
++-----------+-------------------------------+
+| Github ID | URL |
++===========+===============================+
+| @aviau | (https://github.com/aviau) |
++-----------+-------------------------------+
+| @xginn8 | (https://github.com/xginn8) |
++-----------+-------------------------------+
+| @sebito91 | (https://github.com/sebito91) |
++-----------+-------------------------------+
Source code
-===========
+-----------
+
+The source code for the InfluxDB Python Client (1.x) is currently available on Github: https://github.com/influxdata/influxdb-python
+
+
+TODO
+----
-The source code is currently available on Github: https://github.com/influxdb/influxdb-python
+The TODO/Roadmap can be found in Github bug tracker: https://github.com/influxdata/influxdb-python/issues
-.. _InfluxDB: http://influxdb.com/
+.. _InfluxDB: https://influxdata.com/
.. _Sphinx: http://sphinx.pocoo.org/
.. _Tox: https://tox.readthedocs.org
diff --git a/dev-requirements.txt b/dev-requirements.txt
index 1e78633e..bc7b4c87 100644
--- a/dev-requirements.txt
+++ b/dev-requirements.txt
@@ -1,5 +1,8 @@
-requests
+requests>=2.17.0
nose
mock
-Sphinx==1.2.3
-sphinx_rtd_theme
\ No newline at end of file
+pandas==0.20.1
+Sphinx==1.5.5
+sphinx_rtd_theme
+wheel
+twine
diff --git a/docs/source/api-documentation.rst b/docs/source/api-documentation.rst
index 5ebff397..35fdb291 100644
--- a/docs/source/api-documentation.rst
+++ b/docs/source/api-documentation.rst
@@ -13,16 +13,24 @@ ports. The below instantiation statements are all equivalent::
from influxdb import InfluxDBClient
# using Http
- client = InfluxDBClient(dbname='dbname')
- client = InfluxDBClient(host='127.0.0.1', port=8086, dbname='dbname')
- client = InfluxDBClient(host='127.0.0.1', port=8086, username='root', password='root', dbname='dbname')
+ client = InfluxDBClient(database='dbname')
+ client = InfluxDBClient(host='127.0.0.1', port=8086, database='dbname')
+ client = InfluxDBClient(host='127.0.0.1', port=8086, username='root', password='root', database='dbname')
# using UDP
- client = InfluxDBClient(host='127.0.0.1', dbname='dbname', use_udp=True, udp_port=4444)
+ client = InfluxDBClient(host='127.0.0.1', database='dbname', use_udp=True, udp_port=4444)
+To write pandas DataFrames or to read data into a
+pandas DataFrame, use a :py:class:`~influxdb.DataFrameClient` object.
+These clients are initiated in the same way as the
+:py:class:`~influxdb.InfluxDBClient`::
-.. note:: Only when using UDP (use_udp=True) the connections is established.
+ from influxdb import DataFrameClient
+ client = DataFrameClient(host='127.0.0.1', port=8086, username='root', password='root', database='dbname')
+
+
+.. note:: Only when using UDP (use_udp=True) the connection is established.
.. _InfluxDBClient-api:
@@ -36,3 +44,34 @@ ports. The below instantiation statements are all equivalent::
.. autoclass:: influxdb.InfluxDBClient
:members:
:undoc-members:
+
+------------------------
+:class:`DataFrameClient`
+------------------------
+
+
+.. currentmodule:: influxdb.DataFrameClient
+.. autoclass:: influxdb.DataFrameClient
+ :members:
+ :undoc-members:
+
+-----------------------
+:class:`SeriesHelper`
+-----------------------
+
+
+.. currentmodule:: influxdb.SeriesHelper
+.. autoclass:: influxdb.SeriesHelper
+ :members:
+ :undoc-members:
+
+-----------------------
+:class:`ResultSet`
+-----------------------
+
+See the :ref:`resultset` page for more information.
+
+.. currentmodule:: influxdb.ResultSet
+.. autoclass:: influxdb.resultset.ResultSet
+ :members:
+ :undoc-members:
diff --git a/docs/source/conf.py b/docs/source/conf.py
index f55684d5..efc22f88 100644
--- a/docs/source/conf.py
+++ b/docs/source/conf.py
@@ -1,5 +1,7 @@
# -*- coding: utf-8 -*-
#
+"""InfluxDB documentation build configuration file."""
+
# InfluxDB documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 16 00:33:06 2014.
#
@@ -115,7 +117,8 @@
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
-html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
+# Calling get_html_theme_path is deprecated.
+# html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# " v documentation".
diff --git a/docs/source/examples.rst b/docs/source/examples.rst
index 967b2c00..841ad8b1 100644
--- a/docs/source/examples.rst
+++ b/docs/source/examples.rst
@@ -13,3 +13,27 @@ Tutorials - Basic
.. literalinclude:: ../../examples/tutorial.py
:language: python
:linenos:
+
+Tutorials - pandas
+==================
+
+.. literalinclude:: ../../examples/tutorial_pandas.py
+ :language: python
+
+Tutorials - SeriesHelper
+========================
+
+.. literalinclude:: ../../examples/tutorial_serieshelper.py
+ :language: python
+
+Tutorials - UDP
+===============
+
+.. literalinclude:: ../../examples/tutorial_udp.py
+ :language: python
+
+Tutorials - Authorization by Token
+==================================
+
+.. literalinclude:: ../../examples/tutorial_authorization.py
+ :language: python
diff --git a/docs/source/exceptions.rst b/docs/source/exceptions.rst
new file mode 100644
index 00000000..178255b8
--- /dev/null
+++ b/docs/source/exceptions.rst
@@ -0,0 +1,12 @@
+
+.. _exceptions:
+
+==========
+Exceptions
+==========
+
+
+.. currentmodule:: influxdb.exceptions
+
+.. autoclass:: InfluxDBClientError
+.. autoclass:: InfluxDBServerError
diff --git a/docs/source/index.rst b/docs/source/index.rst
index 077f681b..6e5b2ef3 100644
--- a/docs/source/index.rst
+++ b/docs/source/index.rst
@@ -19,6 +19,8 @@ Contents
include-readme
api-documentation
+ exceptions
+ resultset
examples
diff --git a/docs/source/resultset.rst b/docs/source/resultset.rst
new file mode 100644
index 00000000..b1c3206f
--- /dev/null
+++ b/docs/source/resultset.rst
@@ -0,0 +1,45 @@
+
+.. _resultset:
+
+================================
+Query response object: ResultSet
+================================
+
+Using the ``InfluxDBClient.query()`` function will return a ``ResultSet`` Object.
+
+A ResultSet can be browsed in several ways. Its ``get_points`` method can be used to retrieve points generators that filter either by measurement, tags, or both.
+
+Getting all points
+------------------
+
+Using ``rs.get_points()`` will return a generator for all the points in the ResultSet.
+
+
+Filtering by measurement
+------------------------
+
+Using ``rs.get_points('cpu')`` will return a generator for all the points that are in a series with measurement name ``cpu``, no matter the tags.
+::
+
+ rs = cli.query("SELECT * from cpu")
+ cpu_points = list(rs.get_points(measurement='cpu'))
+
+Filtering by tags
+-----------------
+
+Using ``rs.get_points(tags={'host_name': 'influxdb.com'})`` will return a generator for all the points that are tagged with the specified tags, no matter the measurement name.
+::
+
+ rs = cli.query("SELECT * from cpu")
+ cpu_influxdb_com_points = list(rs.get_points(tags={"host_name": "influxdb.com"}))
+
+Filtering by measurement and tags
+---------------------------------
+
+Using measurement name and tags will return a generator for all the points that are in a series with the specified measurement name AND whose tags match the given tags.
+::
+
+ rs = cli.query("SELECT * from cpu")
+ points = list(rs.get_points(measurement='cpu', tags={'host_name': 'influxdb.com'}))
+
+See the :ref:`api-documentation` page for more information.
diff --git a/examples/tutorial.py b/examples/tutorial.py
index 442d2242..12cd49c1 100644
--- a/examples/tutorial.py
+++ b/examples/tutorial.py
@@ -1,43 +1,45 @@
+# -*- coding: utf-8 -*-
+"""Tutorial on using the InfluxDB client."""
+
import argparse
from influxdb import InfluxDBClient
def main(host='localhost', port=8086):
+ """Instantiate a connection to the InfluxDB."""
user = 'root'
password = 'root'
dbname = 'example'
dbuser = 'smly'
dbuser_password = 'my_secret_password'
- query = 'select column_one from foo;'
- json_body = [{
- "points": [
- ["1", 1, 1.0],
- ["2", 2, 2.0]
- ],
- "name": "foo",
- "columns": ["column_one", "column_two", "column_three"]
- }]
+ query = 'select Float_value from cpu_load_short;'
+ query_where = 'select Int_value from cpu_load_short where host=$host;'
+ bind_params = {'host': 'server01'}
+ json_body = [
+ {
+ "measurement": "cpu_load_short",
+ "tags": {
+ "host": "server01",
+ "region": "us-west"
+ },
+ "time": "2009-11-10T23:00:00Z",
+ "fields": {
+ "Float_value": 0.64,
+ "Int_value": 3,
+ "String_value": "Text",
+ "Bool_value": True
+ }
+ }
+ ]
client = InfluxDBClient(host, port, user, password, dbname)
print("Create database: " + dbname)
client.create_database(dbname)
- dbusers = client.get_database_users()
- print("Get list of database users: {0}".format(dbusers))
-
- print("Add database user: " + dbuser)
- client.add_database_user(dbuser, dbuser_password)
-
- print("Make user a database admin")
- client.set_database_admin(dbuser)
-
- print("Remove admin privilege from user")
- client.unset_database_admin(dbuser)
-
- dbusers = client.get_database_users()
- print("Get list of database users again: {0}".format(dbusers))
+ print("Create a retention policy")
+ client.create_retention_policy('awesome_policy', '3d', 3, default=True)
print("Switch user: " + dbuser)
client.switch_user(dbuser, dbuser_password)
@@ -45,22 +47,29 @@ def main(host='localhost', port=8086):
print("Write points: {0}".format(json_body))
client.write_points(json_body)
- print("Queying data: " + query)
+ print("Querying data: " + query)
result = client.query(query)
print("Result: {0}".format(result))
+ print("Querying data: " + query_where)
+ result = client.query(query_where, bind_params=bind_params)
+
+ print("Result: {0}".format(result))
+
print("Switch user: " + user)
client.switch_user(user, password)
- print("Delete database: " + dbname)
- client.delete_database(dbname)
+ print("Drop database: " + dbname)
+ client.drop_database(dbname)
def parse_args():
+ """Parse the args."""
parser = argparse.ArgumentParser(
description='example code to play with InfluxDB')
- parser.add_argument('--host', type=str, required=False, default='localhost',
+ parser.add_argument('--host', type=str, required=False,
+ default='localhost',
help='hostname of InfluxDB http API')
parser.add_argument('--port', type=int, required=False, default=8086,
help='port of InfluxDB http API')
diff --git a/examples/tutorial_authorization.py b/examples/tutorial_authorization.py
new file mode 100644
index 00000000..9d9a800f
--- /dev/null
+++ b/examples/tutorial_authorization.py
@@ -0,0 +1,32 @@
+# -*- coding: utf-8 -*-
+"""Tutorial how to authorize InfluxDB client by custom Authorization token."""
+
+import argparse
+from influxdb import InfluxDBClient
+
+
+def main(token='my-token'):
+ """Instantiate a connection to the InfluxDB."""
+ client = InfluxDBClient(username=None, password=None,
+ headers={"Authorization": token})
+
+ print("Use authorization token: " + token)
+
+ version = client.ping()
+ print("Successfully connected to InfluxDB: " + version)
+ pass
+
+
+def parse_args():
+ """Parse the args from main."""
+ parser = argparse.ArgumentParser(
+ description='example code to play with InfluxDB')
+ parser.add_argument('--token', type=str, required=False,
+ default='my-token',
+ help='Authorization token for the proxy that is ahead the InfluxDB.')
+ return parser.parse_args()
+
+
+if __name__ == '__main__':
+ args = parse_args()
+ main(token=args.token)
diff --git a/examples/tutorial_pandas.py b/examples/tutorial_pandas.py
new file mode 100644
index 00000000..13e72f8c
--- /dev/null
+++ b/examples/tutorial_pandas.py
@@ -0,0 +1,55 @@
+# -*- coding: utf-8 -*-
+"""Tutorial for using pandas and the InfluxDB client."""
+
+import argparse
+import pandas as pd
+
+from influxdb import DataFrameClient
+
+
+def main(host='localhost', port=8086):
+ """Instantiate the connection to the InfluxDB client."""
+ user = 'root'
+ password = 'root'
+ dbname = 'demo'
+ protocol = 'line'
+
+ client = DataFrameClient(host, port, user, password, dbname)
+
+ print("Create pandas DataFrame")
+ df = pd.DataFrame(data=list(range(30)),
+ index=pd.date_range(start='2014-11-16',
+ periods=30, freq='H'), columns=['0'])
+
+ print("Create database: " + dbname)
+ client.create_database(dbname)
+
+ print("Write DataFrame")
+ client.write_points(df, 'demo', protocol=protocol)
+
+ print("Write DataFrame with Tags")
+ client.write_points(df, 'demo',
+ {'k1': 'v1', 'k2': 'v2'}, protocol=protocol)
+
+ print("Read DataFrame")
+ client.query("select * from demo")
+
+ print("Delete database: " + dbname)
+ client.drop_database(dbname)
+
+
+def parse_args():
+ """Parse the args from main."""
+ parser = argparse.ArgumentParser(
+ description='example code to play with InfluxDB')
+ parser.add_argument('--host', type=str, required=False,
+ default='localhost',
+ help='hostname of InfluxDB http API')
+ parser.add_argument('--port', type=int, required=False, default=8086,
+ help='port of InfluxDB http API')
+ return parser.parse_args()
+
+
+if __name__ == '__main__':
+ args = parse_args()
+ main(host=args.host, port=args.port)
diff --git a/examples/tutorial_serieshelper.py b/examples/tutorial_serieshelper.py
new file mode 100644
index 00000000..72b80bb5
--- /dev/null
+++ b/examples/tutorial_serieshelper.py
@@ -0,0 +1,62 @@
+# -*- coding: utf-8 -*-
+"""Tutorial how to use the class helper `SeriesHelper`."""
+
+from influxdb import InfluxDBClient
+from influxdb import SeriesHelper
+
+# InfluxDB connections settings
+host = 'localhost'
+port = 8086
+user = 'root'
+password = 'root'
+dbname = 'mydb'
+
+myclient = InfluxDBClient(host, port, user, password, dbname)
+
+# Uncomment the following code if the database is not yet created
+# myclient.create_database(dbname)
+# myclient.create_retention_policy('awesome_policy', '3d', 3, default=True)
+
+
+class MySeriesHelper(SeriesHelper):
+ """Instantiate SeriesHelper to write points to the backend."""
+
+ class Meta:
+ """Meta class stores time series helper configuration."""
+
+ # The client should be an instance of InfluxDBClient.
+ client = myclient
+
+ # The series name must be a string. Add dependent fields/tags
+ # in curly brackets.
+ series_name = 'events.stats.{server_name}'
+
+ # Defines all the fields in this time series.
+ fields = ['some_stat', 'other_stat']
+
+ # Defines all the tags for the series.
+ tags = ['server_name']
+
+ # Defines the number of data points to store prior to writing
+ # on the wire.
+ bulk_size = 5
+
+ # autocommit must be set to True when using bulk_size
+ autocommit = True
+
+
+# The following will create *five* (immutable) data points.
+# Since bulk_size is set to 5, upon the fifth construction call, *all* data
+# points will be written on the wire via MySeriesHelper.Meta.client.
+MySeriesHelper(server_name='us.east-1', some_stat=159, other_stat=10)
+MySeriesHelper(server_name='us.east-1', some_stat=158, other_stat=20)
+MySeriesHelper(server_name='us.east-1', some_stat=157, other_stat=30)
+MySeriesHelper(server_name='us.east-1', some_stat=156, other_stat=30)
+MySeriesHelper(server_name='us.east-1', some_stat=156)
+MySeriesHelper(server_name='us.east-1', some_stat=155, other_stat=50)
+
+# To manually submit data points which are not yet written, call commit:
+MySeriesHelper.commit()
+
+# To inspect the JSON which will be written, call _json_body_():
+MySeriesHelper._json_body_()
diff --git a/examples/tutorial_server_data.py b/examples/tutorial_server_data.py
index 2e9bba15..cb903fad 100644
--- a/examples/tutorial_server_data.py
+++ b/examples/tutorial_server_data.py
@@ -1,9 +1,15 @@
+# -*- coding: utf-8 -*-
+"""Tutorial on using the server functions."""
+
+from __future__ import print_function
import argparse
-from influxdb import InfluxDBClient
import datetime
import random
+import time
+from influxdb import InfluxDBClient
+from influxdb.client import InfluxDBClientError
USER = 'root'
PASSWORD = 'root'
@@ -11,46 +17,69 @@
def main(host='localhost', port=8086, nb_day=15):
-
+ """Instantiate a connection to the backend."""
nb_day = 15 # number of day to generate time series
timeinterval_min = 5 # create an event every x minutes
total_minutes = 1440 * nb_day
total_records = int(total_minutes / timeinterval_min)
now = datetime.datetime.today()
- cpu_series = [{
- 'name': "server_data.cpu_idle",
- 'columns': ["time", "value", "hostName"],
- 'points': []
- }]
+ metric = "server_data.cpu_idle"
+ series = []
for i in range(0, total_records):
past_date = now - datetime.timedelta(minutes=i * timeinterval_min)
value = random.randint(0, 200)
hostName = "server-%d" % random.randint(1, 5)
- pointValues = [int(past_date.strftime('%s')), value, hostName]
- cpu_series[0]['points'].append(pointValues)
+ # pointValues = [int(past_date.strftime('%s')), value, hostName]
+ pointValues = {
+ "time": int(past_date.strftime('%s')),
+ "measurement": metric,
+ "fields": {
+ "value": value,
+ },
+ "tags": {
+ "hostName": hostName,
+ },
+ }
+ series.append(pointValues)
+
+ print(series)
client = InfluxDBClient(host, port, USER, PASSWORD, DBNAME)
print("Create database: " + DBNAME)
- client.create_database(DBNAME)
+ try:
+ client.create_database(DBNAME)
+ except InfluxDBClientError:
+ # Drop and create
+ client.drop_database(DBNAME)
+ client.create_database(DBNAME)
+
+ print("Create a retention policy")
+ retention_policy = 'server_data'
+ client.create_retention_policy(retention_policy, '3d', 3, default=True)
print("Write points #: {0}".format(total_records))
- client.write_points(cpu_series)
+ client.write_points(series, retention_policy=retention_policy)
+
+ time.sleep(2)
- query = 'SELECT MEAN(value) FROM server_data.cpu_idle GROUP BY time(30m) WHERE time > now() - 1d;'
- print("Queying data: " + query)
- result = client.query(query)
+ query = "SELECT MEAN(value) FROM {} WHERE \
+ time > now() - 10d GROUP BY time(500m)".format(metric)
+ result = client.query(query, database=DBNAME)
+ print(result)
print("Result: {0}".format(result))
- print("Delete database: " + DBNAME)
- client.delete_database(DBNAME)
+ print("Drop database: {}".format(DBNAME))
+ client.drop_database(DBNAME)
def parse_args():
+ """Parse the args."""
parser = argparse.ArgumentParser(
description='example code to play with InfluxDB')
- parser.add_argument('--host', type=str, required=False, default='localhost',
+ parser.add_argument('--host', type=str, required=False,
+ default='localhost',
help='hostname influxdb http API')
parser.add_argument('--port', type=int, required=False, default=8086,
help='port influxdb http API')
diff --git a/examples/tutorial_sine_wave.py b/examples/tutorial_sine_wave.py
index ea8aee63..5dfebf3c 100644
--- a/examples/tutorial_sine_wave.py
+++ b/examples/tutorial_sine_wave.py
@@ -1,9 +1,13 @@
+# -*- coding: utf-8 -*-
+"""Tutorial using all elements to define a sine wave."""
+
import argparse
-from influxdb import InfluxDBClient
import math
import datetime
+import time
+from influxdb import InfluxDBClient
USER = 'root'
PASSWORD = 'root'
@@ -11,52 +15,58 @@
def main(host='localhost', port=8086):
- """
- main function to generate the sin wave
- """
+ """Define function to generate the sin wave."""
now = datetime.datetime.today()
- data = [{
- 'name': "foobar",
- 'columns': ["time", "value"],
- 'points': []
- }]
+ points = []
for angle in range(0, 360):
y = 10 + math.sin(math.radians(angle)) * 10
- point = [int(now.strftime('%s')) + angle, y]
- data[0]['points'].append(point)
+
+ point = {
+ "measurement": 'foobar',
+ "time": int(now.strftime('%s')) + angle,
+ "fields": {
+ "value": y
+ }
+ }
+ points.append(point)
client = InfluxDBClient(host, port, USER, PASSWORD, DBNAME)
print("Create database: " + DBNAME)
client.create_database(DBNAME)
+ client.switch_database(DBNAME)
+
+ # Write points
+ client.write_points(points)
- #Write points
- client.write_points(data)
+ time.sleep(3)
- query = 'SELECT time, value FROM foobar GROUP BY value, time(1s)'
- print("Queying data: " + query)
- result = client.query(query)
+ query = 'SELECT * FROM foobar'
+ print("Querying data: " + query)
+ result = client.query(query, database=DBNAME)
print("Result: {0}".format(result))
"""
- You might want to comment the delete and plot the result on InfluxDB Interface
- Connect on InfluxDB Interface at http://127.0.0.1:8083/
- Select the database tutorial -> Explore Data
+ You might want to comment the delete and plot the result on InfluxDB
+ Interface. Connect on InfluxDB Interface at http://127.0.0.1:8083/
+ Select the database tutorial -> Explore Data
Then run the following query:
- SELECT time, value FROM foobar GROUP BY value, time(1s)
+ SELECT * from foobar
"""
print("Delete database: " + DBNAME)
- client.delete_database(DBNAME)
+ client.drop_database(DBNAME)
def parse_args():
+ """Parse the args."""
parser = argparse.ArgumentParser(
description='example code to play with InfluxDB')
- parser.add_argument('--host', type=str, required=False, default='localhost',
+ parser.add_argument('--host', type=str, required=False,
+ default='localhost',
help='hostname influxdb http API')
parser.add_argument('--port', type=int, required=False, default=8086,
help='port influxdb http API')
diff --git a/examples/tutorial_udp.py b/examples/tutorial_udp.py
new file mode 100644
index 00000000..93b923d7
--- /dev/null
+++ b/examples/tutorial_udp.py
@@ -0,0 +1,67 @@
+# -*- coding: utf-8 -*-
+"""Example for sending batch information to InfluxDB via UDP."""
+
+"""
+INFO: In order to use UDP, one should enable the UDP service from the
+`influxdb.conf` under section
+ [[udp]]
+ enabled = true
+ bind-address = ":8089" # port number for sending data via UDP
+ database = "udp1" # name of database to be stored
+ [[udp]]
+ enabled = true
+ bind-address = ":8090"
+ database = "udp2"
+"""
+
+
+import argparse
+
+from influxdb import InfluxDBClient
+
+
+def main(uport):
+ """Instantiate connection to the InfluxDB."""
+ # NOTE: structure of the UDP packet is different than that of information
+ # sent via HTTP
+ json_body = {
+ "tags": {
+ "host": "server01",
+ "region": "us-west"
+ },
+ "points": [{
+ "measurement": "cpu_load_short",
+ "fields": {
+ "value": 0.64
+ },
+ "time": "2009-11-10T23:00:00Z",
+ },
+ {
+ "measurement": "cpu_load_short",
+ "fields": {
+ "value": 0.67
+ },
+ "time": "2009-11-10T23:05:00Z"
+ }]
+ }
+
+ # make `use_udp` True and add `udp_port` number from `influxdb.conf` file
+ # no need to mention the database name since it is already configured
+ client = InfluxDBClient(use_udp=True, udp_port=uport)
+
+ # Instead of `write_points` use `send_packet`
+ client.send_packet(json_body)
+
+
+def parse_args():
+ """Parse the args."""
+ parser = argparse.ArgumentParser(
+ description='example code to play with InfluxDB along with UDP Port')
+ parser.add_argument('--uport', type=int, required=True,
+ help=' UDP port of InfluxDB')
+ return parser.parse_args()
+
+
+if __name__ == '__main__':
+ args = parse_args()
+ main(uport=args.uport)
diff --git a/influxdb/__init__.py b/influxdb/__init__.py
index d8871cb2..e66f80ea 100644
--- a/influxdb/__init__.py
+++ b/influxdb/__init__.py
@@ -1,7 +1,21 @@
# -*- coding: utf-8 -*-
-from influxdb.client import InfluxDBClient
+"""Initialize the influxdb package."""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
-__all__ = ['InfluxDBClient']
+from .client import InfluxDBClient
+from .dataframe_client import DataFrameClient
+from .helper import SeriesHelper
-__version__ = '0.1.12'
+
+__all__ = [
+ 'InfluxDBClient',
+ 'DataFrameClient',
+ 'SeriesHelper',
+]
+
+
+__version__ = '5.3.2'
diff --git a/influxdb/_dataframe_client.py b/influxdb/_dataframe_client.py
new file mode 100644
index 00000000..907db2cb
--- /dev/null
+++ b/influxdb/_dataframe_client.py
@@ -0,0 +1,497 @@
+# -*- coding: utf-8 -*-
+"""DataFrame client for InfluxDB."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import math
+from collections import defaultdict
+
+import pandas as pd
+import numpy as np
+
+from .client import InfluxDBClient
+from .line_protocol import _escape_tag
+
+
+def _pandas_time_unit(time_precision):
+ unit = time_precision
+ if time_precision == 'm':
+ unit = 'ms'
+ elif time_precision == 'u':
+ unit = 'us'
+ elif time_precision == 'n':
+ unit = 'ns'
+ assert unit in ('s', 'ms', 'us', 'ns')
+ return unit
+
+
+def _escape_pandas_series(s):
+ return s.apply(lambda v: _escape_tag(v))
+
+
+class DataFrameClient(InfluxDBClient):
+ """DataFrameClient instantiates InfluxDBClient to connect to the backend.
+
+ The ``DataFrameClient`` object holds information necessary to connect
+ to InfluxDB. Requests can be made to InfluxDB directly through the client.
+ The client reads and writes from pandas DataFrames.
+ """
+
+ EPOCH = pd.Timestamp('1970-01-01 00:00:00.000+00:00')
+
+ def write_points(self,
+ dataframe,
+ measurement,
+ tags=None,
+ tag_columns=None,
+ field_columns=None,
+ time_precision=None,
+ database=None,
+ retention_policy=None,
+ batch_size=None,
+ protocol='line',
+ numeric_precision=None):
+ """Write to multiple time series names.
+
+ :param dataframe: data points in a DataFrame
+ :param measurement: name of measurement
+ :param tags: dictionary of tags, with string key-values
+ :param tag_columns: [Optional, default None] List of data tag names
+ :param field_columns: [Options, default None] List of data field names
+ :param time_precision: [Optional, default None] Either 's', 'ms', 'u'
+ or 'n'.
+ :param batch_size: [Optional] Value to write the points in batches
+ instead of all at one time. Useful for when doing data dumps from
+ one database to another or when doing a massive write operation
+ :type batch_size: int
+ :param protocol: Protocol for writing data. Either 'line' or 'json'.
+ :param numeric_precision: Precision for floating point values.
+ Either None, 'full' or some int, where int is the desired decimal
+ precision. 'full' preserves full precision for int and float
+ datatypes. Defaults to None, which preserves 14-15 significant
+ figures for float and all significant figures for int datatypes.
+ """
+ if tag_columns is None:
+ tag_columns = []
+
+ if field_columns is None:
+ field_columns = []
+
+ if batch_size:
+ number_batches = int(math.ceil(len(dataframe) / float(batch_size)))
+
+ for batch in range(number_batches):
+ start_index = batch * batch_size
+ end_index = (batch + 1) * batch_size
+
+ if protocol == 'line':
+ points = self._convert_dataframe_to_lines(
+ dataframe.iloc[start_index:end_index].copy(),
+ measurement=measurement,
+ global_tags=tags,
+ time_precision=time_precision,
+ tag_columns=tag_columns,
+ field_columns=field_columns,
+ numeric_precision=numeric_precision)
+ else:
+ points = self._convert_dataframe_to_json(
+ dataframe.iloc[start_index:end_index].copy(),
+ measurement=measurement,
+ tags=tags,
+ time_precision=time_precision,
+ tag_columns=tag_columns,
+ field_columns=field_columns)
+
+ super(DataFrameClient, self).write_points(
+ points,
+ time_precision,
+ database,
+ retention_policy,
+ protocol=protocol)
+
+ return True
+
+ if protocol == 'line':
+ points = self._convert_dataframe_to_lines(
+ dataframe,
+ measurement=measurement,
+ global_tags=tags,
+ tag_columns=tag_columns,
+ field_columns=field_columns,
+ time_precision=time_precision,
+ numeric_precision=numeric_precision)
+ else:
+ points = self._convert_dataframe_to_json(
+ dataframe,
+ measurement=measurement,
+ tags=tags,
+ time_precision=time_precision,
+ tag_columns=tag_columns,
+ field_columns=field_columns)
+
+ super(DataFrameClient, self).write_points(
+ points,
+ time_precision,
+ database,
+ retention_policy,
+ protocol=protocol)
+
+ return True
+
+ def query(self,
+ query,
+ params=None,
+ bind_params=None,
+ epoch=None,
+ expected_response_code=200,
+ database=None,
+ raise_errors=True,
+ chunked=False,
+ chunk_size=0,
+ method="GET",
+ dropna=True,
+ data_frame_index=None):
+ """
+ Query data into a DataFrame.
+
+ .. danger::
+ In order to avoid injection vulnerabilities (similar to `SQL
+ injection `_
+ vulnerabilities), do not directly include untrusted data into the
+ ``query`` parameter, use ``bind_params`` instead.
+
+ :param query: the actual query string
+ :param params: additional parameters for the request, defaults to {}
+ :param bind_params: bind parameters for the query:
+ any variable in the query written as ``'$var_name'`` will be
+ replaced with ``bind_params['var_name']``. Only works in the
+ ``WHERE`` clause and takes precedence over ``params['params']``
+ :param epoch: response timestamps to be in epoch format either 'h',
+ 'm', 's', 'ms', 'u', or 'ns',defaults to `None` which is
+ RFC3339 UTC format with nanosecond precision
+ :param expected_response_code: the expected status code of response,
+ defaults to 200
+ :param database: database to query, defaults to None
+ :param raise_errors: Whether or not to raise exceptions when InfluxDB
+ returns errors, defaults to True
+ :param chunked: Enable to use chunked responses from InfluxDB.
+ With ``chunked`` enabled, one ResultSet is returned per chunk
+ containing all results within that chunk
+ :param chunk_size: Size of each chunk to tell InfluxDB to use.
+ :param dropna: drop columns where all values are missing
+ :param data_frame_index: the list of columns that
+ are used as DataFrame index
+ :returns: the queried data
+ :rtype: :class:`~.ResultSet`
+ """
+ query_args = dict(params=params,
+ bind_params=bind_params,
+ epoch=epoch,
+ expected_response_code=expected_response_code,
+ raise_errors=raise_errors,
+ chunked=chunked,
+ database=database,
+ method=method,
+ chunk_size=chunk_size)
+ results = super(DataFrameClient, self).query(query, **query_args)
+ if query.strip().upper().startswith("SELECT"):
+ if len(results) > 0:
+ return self._to_dataframe(results, dropna,
+ data_frame_index=data_frame_index)
+ else:
+ return {}
+ else:
+ return results
+
+ def _to_dataframe(self, rs, dropna=True, data_frame_index=None):
+ result = defaultdict(list)
+ if isinstance(rs, list):
+ return map(self._to_dataframe, rs,
+ [dropna for _ in range(len(rs))])
+
+ for key, data in rs.items():
+ name, tags = key
+ if tags is None:
+ key = name
+ else:
+ key = (name, tuple(sorted(tags.items())))
+ df = pd.DataFrame(data)
+ df.time = pd.to_datetime(df.time)
+
+ if data_frame_index:
+ df.set_index(data_frame_index, inplace=True)
+ else:
+ df.set_index('time', inplace=True)
+ if df.index.tzinfo is None:
+ df.index = df.index.tz_localize('UTC')
+ df.index.name = None
+
+ result[key].append(df)
+ for key, data in result.items():
+ df = pd.concat(data).sort_index()
+ if dropna:
+ df.dropna(how='all', axis=1, inplace=True)
+ result[key] = df
+
+ return result
+
+ @staticmethod
+ def _convert_dataframe_to_json(dataframe,
+ measurement,
+ tags=None,
+ tag_columns=None,
+ field_columns=None,
+ time_precision=None):
+
+ if not isinstance(dataframe, pd.DataFrame):
+ raise TypeError('Must be DataFrame, but type was: {0}.'
+ .format(type(dataframe)))
+ if not (isinstance(dataframe.index, pd.PeriodIndex) or
+ isinstance(dataframe.index, pd.DatetimeIndex)):
+ raise TypeError('Must be DataFrame with DatetimeIndex or '
+ 'PeriodIndex.')
+
+ # Make sure tags and tag columns are correctly typed
+ tag_columns = tag_columns if tag_columns is not None else []
+ field_columns = field_columns if field_columns is not None else []
+ tags = tags if tags is not None else {}
+ # Assume field columns are all columns not included in tag columns
+ if not field_columns:
+ field_columns = list(
+ set(dataframe.columns).difference(set(tag_columns)))
+
+ if not isinstance(dataframe.index, pd.DatetimeIndex):
+ dataframe.index = pd.to_datetime(dataframe.index)
+ if dataframe.index.tzinfo is None:
+ dataframe.index = dataframe.index.tz_localize('UTC')
+
+ # Convert column to strings
+ dataframe.columns = dataframe.columns.astype('str')
+
+ # Convert dtype for json serialization
+ dataframe = dataframe.astype('object')
+
+ precision_factor = {
+ "n": 1,
+ "u": 1e3,
+ "ms": 1e6,
+ "s": 1e9,
+ "m": 1e9 * 60,
+ "h": 1e9 * 3600,
+ }.get(time_precision, 1)
+
+ if not tag_columns:
+ points = [
+ {'measurement': measurement,
+ 'fields':
+ rec.replace([np.inf, -np.inf], np.nan).dropna().to_dict(),
+ 'time': np.int64(ts.value / precision_factor)}
+ for ts, (_, rec) in zip(
+ dataframe.index,
+ dataframe[field_columns].iterrows()
+ )
+ ]
+
+ return points
+
+ points = [
+ {'measurement': measurement,
+ 'tags': dict(list(tag.items()) + list(tags.items())),
+ 'fields':
+ rec.replace([np.inf, -np.inf], np.nan).dropna().to_dict(),
+ 'time': np.int64(ts.value / precision_factor)}
+ for ts, tag, (_, rec) in zip(
+ dataframe.index,
+ dataframe[tag_columns].to_dict('record'),
+ dataframe[field_columns].iterrows()
+ )
+ ]
+
+ return points
+
+ def _convert_dataframe_to_lines(self,
+ dataframe,
+ measurement,
+ field_columns=None,
+ tag_columns=None,
+ global_tags=None,
+ time_precision=None,
+ numeric_precision=None):
+
+ dataframe = dataframe.dropna(how='all').copy()
+ if len(dataframe) == 0:
+ return []
+
+ if not isinstance(dataframe, pd.DataFrame):
+ raise TypeError('Must be DataFrame, but type was: {0}.'
+ .format(type(dataframe)))
+ if not (isinstance(dataframe.index, pd.PeriodIndex) or
+ isinstance(dataframe.index, pd.DatetimeIndex)):
+ raise TypeError('Must be DataFrame with DatetimeIndex or '
+ 'PeriodIndex.')
+
+ dataframe = dataframe.rename(
+ columns={item: _escape_tag(item) for item in dataframe.columns})
+ # Create a Series of columns for easier indexing
+ column_series = pd.Series(dataframe.columns)
+
+ if field_columns is None:
+ field_columns = []
+
+ if tag_columns is None:
+ tag_columns = []
+
+ if global_tags is None:
+ global_tags = {}
+
+ # Make sure field_columns and tag_columns are lists
+ field_columns = list(field_columns) if list(field_columns) else []
+ tag_columns = list(tag_columns) if list(tag_columns) else []
+
+ # If field columns but no tag columns, assume rest of columns are tags
+ if field_columns and (not tag_columns):
+ tag_columns = list(column_series[~column_series.isin(
+ field_columns)])
+
+ # If no field columns, assume non-tag columns are fields
+ if not field_columns:
+ field_columns = list(column_series[~column_series.isin(
+ tag_columns)])
+
+ precision_factor = {
+ "n": 1,
+ "u": 1e3,
+ "ms": 1e6,
+ "s": 1e9,
+ "m": 1e9 * 60,
+ "h": 1e9 * 3600,
+ }.get(time_precision, 1)
+
+ # Make array of timestamp ints
+ if isinstance(dataframe.index, pd.PeriodIndex):
+ time = ((dataframe.index.to_timestamp().values.astype(np.int64) //
+ precision_factor).astype(np.int64).astype(str))
+ else:
+ time = ((pd.to_datetime(dataframe.index).values.astype(np.int64) //
+ precision_factor).astype(np.int64).astype(str))
+
+ # If tag columns exist, make an array of formatted tag keys and values
+ if tag_columns:
+
+ # Make global_tags as tag_columns
+ if global_tags:
+ for tag in global_tags:
+ dataframe[tag] = global_tags[tag]
+ tag_columns.append(tag)
+
+ tag_df = dataframe[tag_columns]
+ tag_df = tag_df.fillna('') # replace NA with empty string
+ tag_df = tag_df.sort_index(axis=1)
+ tag_df = self._stringify_dataframe(
+ tag_df, numeric_precision, datatype='tag')
+
+ # join prepended tags, leaving None values out
+ tags = tag_df.apply(
+ lambda s: [',' + s.name + '=' + v if v else '' for v in s])
+ tags = tags.sum(axis=1)
+
+ del tag_df
+ elif global_tags:
+ tag_string = ''.join(
+ [",{}={}".format(k, _escape_tag(v))
+ if v not in [None, ''] else ""
+ for k, v in sorted(global_tags.items())]
+ )
+ tags = pd.Series(tag_string, index=dataframe.index)
+ else:
+ tags = ''
+
+ # Make an array of formatted field keys and values
+ field_df = dataframe[field_columns].replace([np.inf, -np.inf], np.nan)
+ nans = pd.isnull(field_df)
+
+ field_df = self._stringify_dataframe(field_df,
+ numeric_precision,
+ datatype='field')
+
+ field_df = (field_df.columns.values + '=').tolist() + field_df
+ field_df[field_df.columns[1:]] = ',' + field_df[field_df.columns[1:]]
+ field_df[nans] = ''
+
+ fields = field_df.sum(axis=1).map(lambda x: x.lstrip(','))
+ del field_df
+
+ # Generate line protocol string
+ measurement = _escape_tag(measurement)
+ points = (measurement + tags + ' ' + fields + ' ' + time).tolist()
+ return points
+
+ @staticmethod
+ def _stringify_dataframe(dframe, numeric_precision, datatype='field'):
+
+ # Prevent modification of input dataframe
+ dframe = dframe.copy()
+
+ # Find int and string columns for field-type data
+ int_columns = dframe.select_dtypes(include=['integer']).columns
+ string_columns = dframe.select_dtypes(include=['object']).columns
+
+ # Convert dframe to string
+ if numeric_precision is None:
+ # If no precision specified, convert directly to string (fast)
+ dframe = dframe.astype(str)
+ elif numeric_precision == 'full':
+ # If full precision, use repr to get full float precision
+ float_columns = (dframe.select_dtypes(
+ include=['floating']).columns)
+ nonfloat_columns = dframe.columns[~dframe.columns.isin(
+ float_columns)]
+ dframe[float_columns] = dframe[float_columns].applymap(repr)
+ dframe[nonfloat_columns] = (dframe[nonfloat_columns].astype(str))
+ elif isinstance(numeric_precision, int):
+ # If precision is specified, round to appropriate precision
+ float_columns = (dframe.select_dtypes(
+ include=['floating']).columns)
+ nonfloat_columns = dframe.columns[~dframe.columns.isin(
+ float_columns)]
+ dframe[float_columns] = (dframe[float_columns].round(
+ numeric_precision))
+
+ # If desired precision is > 10 decimal places, need to use repr
+ if numeric_precision > 10:
+ dframe[float_columns] = (dframe[float_columns].applymap(repr))
+ dframe[nonfloat_columns] = (dframe[nonfloat_columns]
+ .astype(str))
+ else:
+ dframe = dframe.astype(str)
+ else:
+ raise ValueError('Invalid numeric precision.')
+
+ if datatype == 'field':
+ # If dealing with fields, format ints and strings correctly
+ dframe[int_columns] += 'i'
+ dframe[string_columns] = '"' + dframe[string_columns] + '"'
+ elif datatype == 'tag':
+ dframe = dframe.apply(_escape_pandas_series)
+
+ dframe.columns = dframe.columns.astype(str)
+
+ return dframe
+
+ def _datetime_to_epoch(self, datetime, time_precision='s'):
+ seconds = (datetime - self.EPOCH).total_seconds()
+ if time_precision == 'h':
+ return seconds / 3600
+ elif time_precision == 'm':
+ return seconds / 60
+ elif time_precision == 's':
+ return seconds
+ elif time_precision == 'ms':
+ return seconds * 1e3
+ elif time_precision == 'u':
+ return seconds * 1e6
+ elif time_precision == 'n':
+ return seconds * 1e9
diff --git a/influxdb/chunked_json.py b/influxdb/chunked_json.py
new file mode 100644
index 00000000..4e40f01a
--- /dev/null
+++ b/influxdb/chunked_json.py
@@ -0,0 +1,27 @@
+# -*- coding: utf-8 -*-
+"""Module to generate chunked JSON replies."""
+
+#
+# Author: Adrian Sampson
+# Source: https://gist.github.com/sampsyo/920215
+#
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import json
+
+
+def loads(s):
+ """Generate a sequence of JSON values from a string."""
+ _decoder = json.JSONDecoder()
+
+ while s:
+ s = s.strip()
+ obj, pos = _decoder.raw_decode(s)
+ if not pos:
+ raise ValueError('no JSON object found at %i' % pos)
+ yield obj
+ s = s[pos:]
diff --git a/influxdb/client.py b/influxdb/client.py
index dfae9613..c535a3f1 100644
--- a/influxdb/client.py
+++ b/influxdb/client.py
@@ -1,57 +1,99 @@
# -*- coding: utf-8 -*-
-"""
-Python client for InfluxDB
-"""
-import json
-import socket
-import requests
+"""Python client for InfluxDB."""
-try:
- xrange
-except NameError:
- xrange = range
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
-session = requests.Session()
+import datetime
+import gzip
+import itertools
+import io
+import json
+import random
+import socket
+import struct
+import time
+from itertools import chain, islice
+import msgpack
+import requests
+import requests.exceptions
+from requests.adapters import HTTPAdapter
+from six.moves.urllib.parse import urlparse
-class InfluxDBClientError(Exception):
- "Raised when an error occurs in the request"
- def __init__(self, content, code):
- super(InfluxDBClientError, self).__init__(
- "{0}: {1}".format(code, content))
- self.content = content
- self.code = code
+from influxdb.line_protocol import make_lines, quote_ident, quote_literal
+from influxdb.resultset import ResultSet
+from .exceptions import InfluxDBClientError
+from .exceptions import InfluxDBServerError
class InfluxDBClient(object):
+ """InfluxDBClient primary client object to connect InfluxDB.
- """
- The ``InfluxDBClient`` object holds information necessary to connect
- to InfluxDB. Requests can be made to InfluxDB directly through the client.
+ The :class:`~.InfluxDBClient` object holds information necessary to
+ connect to InfluxDB. Requests can be made to InfluxDB directly through
+ the client.
+
+ The client supports the use as a `context manager
+ `_.
:param host: hostname to connect to InfluxDB, defaults to 'localhost'
- :type host: string
- :param port: port to connect to InfluxDB, defaults to 'localhost'
+ :type host: str
+ :param port: port to connect to InfluxDB, defaults to 8086
:type port: int
:param username: user to connect, defaults to 'root'
- :type username: string
+ :type username: str
:param password: password of the user, defaults to 'root'
- :type password: string
- :param database: database name to connect to, defaults is None
- :type database: string
- :param ssl: use https instead of http to connect to InfluxDB, defaults is
+ :type password: str
+ :param pool_size: urllib3 connection pool size, defaults to 10.
+ :type pool_size: int
+ :param database: database name to connect to, defaults to None
+ :type database: str
+ :param ssl: use https instead of http to connect to InfluxDB, defaults to
False
- :type ssl: boolean
- :param verify_ssl: verify SSL certificates for HTTPS requests, defaults is
+ :type ssl: bool
+ :param verify_ssl: verify SSL certificates for HTTPS requests, defaults to
False
- :type verify_ssl: boolean
+ :type verify_ssl: bool
:param timeout: number of seconds Requests will wait for your client to
establish a connection, defaults to None
:type timeout: int
- :param use_udp: use UDP to connect to InfluxDB, defaults is False
- :type use_udp: int
- :param udp_port: UDP port to connect to InfluxDB, defaults is 4444
+ :param retries: number of attempts your client will make before aborting,
+ defaults to 3
+ 0 - try until success
+ 1 - attempt only once (without retry)
+ 2 - maximum two attempts (including one retry)
+ 3 - maximum three attempts (default option)
+ :type retries: int
+ :param use_udp: use UDP to connect to InfluxDB, defaults to False
+ :type use_udp: bool
+ :param udp_port: UDP port to connect to InfluxDB, defaults to 4444
:type udp_port: int
+ :param proxies: HTTP(S) proxy to use for Requests, defaults to {}
+ :type proxies: dict
+ :param path: path of InfluxDB on the server to connect, defaults to ''
+ :type path: str
+ :param cert: Path to client certificate information to use for mutual TLS
+ authentication. You can specify a local cert to use
+ as a single file containing the private key and the certificate, or as
+ a tuple of both files’ paths, defaults to None
+ :type cert: str
+ :param gzip: use gzip content encoding to compress requests
+ :type gzip: bool
+ :param session: allow for the new client request to use an existing
+ requests Session, defaults to None
+ :type session: requests.Session
+ :param headers: headers to add to Requests, will add 'Content-Type'
+ and 'Accept' unless these are already present, defaults to {}
+ :type headers: dict
+ :param socket_options: use custom tcp socket options,
+ If not specified, then defaults are loaded from
+ ``HTTPConnection.default_socket_options``
+ :type socket_options: list
+
+ :raises ValueError: if cert is provided but ssl is disabled (set to False)
"""
def __init__(self,
@@ -63,654 +105,1172 @@ def __init__(self,
ssl=False,
verify_ssl=False,
timeout=None,
+ retries=3,
use_udp=False,
- udp_port=4444):
- """
- Construct a new InfluxDBClient object.
- """
- self._host = host
- self._port = port
+ udp_port=4444,
+ proxies=None,
+ pool_size=10,
+ path='',
+ cert=None,
+ gzip=False,
+ session=None,
+ headers=None,
+ socket_options=None,
+ ):
+ """Construct a new InfluxDBClient object."""
+ self.__host = host
+ self.__port = int(port)
self._username = username
self._password = password
self._database = database
self._timeout = timeout
+ self._retries = retries
self._verify_ssl = verify_ssl
- self.use_udp = use_udp
- self.udp_port = udp_port
+ self.__use_udp = use_udp
+ self.__udp_port = int(udp_port)
+
+ if not session:
+ session = requests.Session()
+
+ self._session = session
+ adapter = _SocketOptionsAdapter(
+ pool_connections=int(pool_size),
+ pool_maxsize=int(pool_size),
+ socket_options=socket_options
+ )
+
if use_udp:
self.udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ if not path:
+ self.__path = ''
+ elif path[0] == '/':
+ self.__path = path
+ else:
+ self.__path = '/' + path
+
self._scheme = "http"
if ssl is True:
self._scheme = "https"
- self._baseurl = "{0}://{1}:{2}".format(
- self._scheme,
- self._host,
- self._port)
-
- self._headers = {
- 'Content-type': 'application/json',
- 'Accept': 'text/plain'}
-
- # Change member variables
-
- def switch_db(self, database):
- """
- switch_db()
-
- Change client database.
-
- :param database: the new database name to switch to
- :type database: string
- """
- self._database = database
-
- def switch_user(self, username, password):
- """
- switch_user()
-
- Change client username.
-
- :param username: the new username to switch to
- :type username: string
- :param password: the new password to switch to
- :type password: string
- """
- self._username = username
- self._password = password
-
- def request(self, url, method='GET', params=None, data=None,
- expected_response_code=200):
- """
- Make a http request to API
- """
- url = "{0}/{1}".format(self._baseurl, url)
-
- if params is None:
- params = {}
-
- auth = {
- 'u': self._username,
- 'p': self._password
- }
-
- params.update(auth)
+ self._session.mount(self._scheme + '://', adapter)
- if data is not None and not isinstance(data, str):
- data = json.dumps(data)
-
- response = session.request(
- method=method,
- url=url,
- params=params,
- data=data,
- headers=self._headers,
- verify=self._verify_ssl,
- timeout=self._timeout
- )
-
- if response.status_code == expected_response_code:
- return response
+ if proxies is None:
+ self._proxies = {}
else:
- raise InfluxDBClientError(response.content, response.status_code)
-
- # Writing Data
- #
- # Assuming you have a database named foo_production you can write data
- # by doing a POST to /db/foo_production/series?u=some_user&p=some_password
- # with a JSON body of points.
-
- def write_points(self, *args, **kwargs):
- """
- write_points()
+ self._proxies = proxies
- Write to multiple time series names.
-
- :param batch_size: [Optional] Value to write the points in batches
- instead of all at one time. Useful for when doing data dumps from
- one database to another or when doing a massive write operation
- :type batch_size: int
- """
+ if cert:
+ if not ssl:
+ raise ValueError(
+ "Client certificate provided but ssl is disabled."
+ )
+ else:
+ self._session.cert = cert
- def list_chunks(l, n):
- """ Yield successive n-sized chunks from l.
- """
- for i in xrange(0, len(l), n):
- yield l[i:i + n]
+ self.__baseurl = "{0}://{1}:{2}{3}".format(
+ self._scheme,
+ self._host,
+ self._port,
+ self._path)
- batch_size = kwargs.get('batch_size')
- if batch_size:
- for data in kwargs.get('data'):
- name = data.get('name')
- columns = data.get('columns')
- point_list = data.get('points')
+ if headers is None:
+ headers = {}
+ headers.setdefault('Content-Type', 'application/json')
+ headers.setdefault('Accept', 'application/x-msgpack')
+ self._headers = headers
- for batch in list_chunks(point_list, batch_size):
- data = [{
- "points": batch,
- "name": name,
- "columns": columns
- }]
- time_precision = kwargs.get('time_precision', 's')
- self.write_points_with_precision(
- data=data,
- time_precision=time_precision)
+ self._gzip = gzip
- return True
+ def __enter__(self):
+ """Enter function as used by context manager."""
+ return self
- return self.write_points_with_precision(*args, **kwargs)
+ def __exit__(self, _exc_type, _exc_value, _traceback):
+ """Exit function as used by context manager."""
+ self.close()
- def write_points_with_precision(self, data, time_precision='s'):
- """
- Write to multiple time series names
- """
- if time_precision not in ['s', 'm', 'ms', 'u']:
- raise Exception(
- "Invalid time precision is given. (use 's', 'm', 'ms' or 'u')")
+ @property
+ def _baseurl(self):
+ return self.__baseurl
- if self.use_udp and time_precision != 's':
- raise Exception(
- "InfluxDB only supports seconds precision for udp writes"
- )
+ @property
+ def _host(self):
+ return self.__host
- url = "db/{0}/series".format(self._database)
+ @property
+ def _port(self):
+ return self.__port
- params = {
- 'time_precision': time_precision
- }
+ @property
+ def _path(self):
+ return self.__path
- if self.use_udp:
- self.send_packet(data)
- else:
- self.request(
- url=url,
- method='POST',
- params=params,
- data=data,
- expected_response_code=200
- )
+ @property
+ def _udp_port(self):
+ return self.__udp_port
- return True
+ @property
+ def _use_udp(self):
+ return self.__use_udp
- # One Time Deletes
+ @classmethod
+ def from_dsn(cls, dsn, **kwargs):
+ r"""Generate an instance of InfluxDBClient from given data source name.
- def delete_points(self, name):
- """
- Delete an entire series
- """
- url = "db/{0}/series/{1}".format(self._database, name)
+ Return an instance of :class:`~.InfluxDBClient` from the provided
+ data source name. Supported schemes are "influxdb", "https+influxdb"
+ and "udp+influxdb". Parameters for the :class:`~.InfluxDBClient`
+ constructor may also be passed to this method.
- self.request(
- url=url,
- method='DELETE',
- expected_response_code=204
- )
+ :param dsn: data source name
+ :type dsn: string
+ :param kwargs: additional parameters for `InfluxDBClient`
+ :type kwargs: dict
+ :raises ValueError: if the provided DSN has any unexpected values
- return True
+ :Example:
- # Regularly Scheduled Deletes
+ ::
- def create_scheduled_delete(self, json_body):
- """
- TODO: Create scheduled delete
+ >> cli = InfluxDBClient.from_dsn('influxdb://username:password@\
+ localhost:8086/databasename', timeout=5)
+ >> type(cli)
+
+ >> cli = InfluxDBClient.from_dsn('udp+influxdb://username:pass@\
+ localhost:8086/databasename', timeout=5, udp_port=159)
+ >> print('{0._baseurl} - {0.use_udp} {0.udp_port}'.format(cli))
+ http://localhost:8086 - True 159
- 2013-11-08: This endpoint has not been implemented yet in ver0.0.8,
- but it is documented in http://influxdb.org/docs/api/http.html.
- See also: src/api/http/api.go:l57
+ .. note:: parameters provided in `**kwargs` may override dsn parameters
+ .. note:: when using "udp+influxdb" the specified port (if any) will
+ be used for the TCP connection; specify the UDP port with the
+ additional `udp_port` parameter (cf. examples).
"""
- raise NotImplementedError()
+ init_args = _parse_dsn(dsn)
+ host, port = init_args.pop('hosts')[0]
+ init_args['host'] = host
+ init_args['port'] = port
+ init_args.update(kwargs)
- # get list of deletes
- # curl http://localhost:8086/db/site_dev/scheduled_deletes
- #
- # remove a regularly scheduled delete
- # curl -X DELETE http://localhost:8086/db/site_dev/scheduled_deletes/:id
+ return cls(**init_args)
- def get_list_scheduled_delete(self):
- """
- TODO: Get list of scheduled deletes
+ def switch_database(self, database):
+ """Change the client's database.
- 2013-11-08: This endpoint has not been implemented yet in ver0.0.8,
- but it is documented in http://influxdb.org/docs/api/http.html.
- See also: src/api/http/api.go:l57
+ :param database: the name of the database to switch to
+ :type database: str
"""
- raise NotImplementedError()
+ self._database = database
- def remove_scheduled_delete(self, delete_id):
- """
- TODO: Remove scheduled delete
+ def switch_user(self, username, password):
+ """Change the client's username.
- 2013-11-08: This endpoint has not been implemented yet in ver0.0.8,
- but it is documented in http://influxdb.org/docs/api/http.html.
- See also: src/api/http/api.go:l57
+ :param username: the username to switch to
+ :type username: str
+ :param password: the password for the username
+ :type password: str
"""
- raise NotImplementedError()
+ self._username = username
+ self._password = password
- # Querying Data
- #
- # GET db/:name/series. It takes five parameters
- def query(self, query, time_precision='s', chunked=False):
- """
- Quering data
+ def request(self, url, method='GET', params=None, data=None, stream=False,
+ expected_response_code=200, headers=None):
+ """Make a HTTP request to the InfluxDB API.
+
+ :param url: the path of the HTTP request, e.g. write, query, etc.
+ :type url: str
+ :param method: the HTTP method for the request, defaults to GET
+ :type method: str
+ :param params: additional parameters for the request, defaults to None
+ :type params: dict
+ :param data: the data of the request, defaults to None
+ :type data: str
+ :param stream: True if a query uses chunked responses
+ :type stream: bool
+ :param expected_response_code: the expected response code of
+ the request, defaults to 200
+ :type expected_response_code: int
+ :param headers: headers to add to the request
+ :type headers: dict
+ :returns: the response from the request
+ :rtype: :class:`requests.Response`
+ :raises InfluxDBServerError: if the response code is any server error
+ code (5xx)
+ :raises InfluxDBClientError: if the response code is not the
+ same as `expected_response_code` and is not a server error code
"""
- if time_precision not in ['s', 'm', 'ms', 'u']:
- raise Exception(
- "Invalid time precision is given. (use 's', 'm', 'ms' or 'u')")
-
- if chunked is True:
- chunked_param = 'true'
- else:
- chunked_param = 'false'
-
- # Build the URL of the serie to query
- url = "db/{0}/series".format(self._database)
-
- params = {
- 'q': query,
- 'time_precision': time_precision,
- 'chunked': chunked_param
- }
-
- response = self.request(
- url=url,
- method='GET',
- params=params,
- expected_response_code=200
- )
-
- return response.json()
+ url = "{0}/{1}".format(self._baseurl, url)
- # Creating and Dropping Databases
- #
- # ### create a database
- # curl -X POST http://localhost:8086/db -d '{"name": "site_development"}'
- #
- # ### drop a database
- # curl -X DELETE http://localhost:8086/db/site_development
+ if headers is None:
+ headers = self._headers
- def create_database(self, database):
- """
- create_database()
+ if params is None:
+ params = {}
- Create a database on the InfluxDB server.
+ if isinstance(data, (dict, list)):
+ data = json.dumps(data)
- :param database: the name of the database to create
- :type database: string
- :rtype: boolean
- """
- url = "db"
+ if self._gzip:
+ # Receive and send compressed data
+ headers.update({
+ 'Accept-Encoding': 'gzip',
+ 'Content-Encoding': 'gzip',
+ })
+ if data is not None:
+ # For Py 2.7 compatability use Gzipfile
+ compressed = io.BytesIO()
+ with gzip.GzipFile(
+ compresslevel=9,
+ fileobj=compressed,
+ mode='w'
+ ) as f:
+ f.write(data)
+ data = compressed.getvalue()
+
+ # Try to send the request more than once by default (see #103)
+ retry = True
+ _try = 0
+ while retry:
+ try:
+ if "Authorization" in headers:
+ auth = (None, None)
+ else:
+ auth = (self._username, self._password)
+ response = self._session.request(
+ method=method,
+ url=url,
+ auth=auth if None not in auth else None,
+ params=params,
+ data=data,
+ stream=stream,
+ headers=headers,
+ proxies=self._proxies,
+ verify=self._verify_ssl,
+ timeout=self._timeout
+ )
+ break
+ except (requests.exceptions.ConnectionError,
+ requests.exceptions.HTTPError,
+ requests.exceptions.Timeout):
+ _try += 1
+ if self._retries != 0:
+ retry = _try < self._retries
+ if not retry:
+ raise
+ if method == "POST":
+ time.sleep((2 ** _try) * random.random() / 100.0)
+
+ type_header = response.headers and response.headers.get("Content-Type")
+ if type_header == "application/x-msgpack" and response.content:
+ response._msgpack = msgpack.unpackb(
+ packed=response.content,
+ ext_hook=_msgpack_parse_hook,
+ raw=False)
+ else:
+ response._msgpack = None
+
+ def reformat_error(response):
+ if response._msgpack:
+ return json.dumps(response._msgpack, separators=(',', ':'))
+ else:
+ return response.content
+
+ # if there's not an error, there must have been a successful response
+ if 500 <= response.status_code < 600:
+ raise InfluxDBServerError(reformat_error(response))
+ elif response.status_code == expected_response_code:
+ return response
+ else:
+ err_msg = reformat_error(response)
+ raise InfluxDBClientError(err_msg, response.status_code)
+
+ def write(self, data, params=None, expected_response_code=204,
+ protocol='json'):
+ """Write data to InfluxDB.
+
+ :param data: the data to be written
+ :type data: (if protocol is 'json') dict
+ (if protocol is 'line') sequence of line protocol strings
+ or single string
+ :param params: additional parameters for the request, defaults to None
+ :type params: dict
+ :param expected_response_code: the expected response code of the write
+ operation, defaults to 204
+ :type expected_response_code: int
+ :param protocol: protocol of input data, either 'json' or 'line'
+ :type protocol: str
+ :returns: True, if the write operation is successful
+ :rtype: bool
+ """
+ headers = self._headers.copy()
+ headers['Content-Type'] = 'application/octet-stream'
+
+ if params:
+ precision = params.get('precision')
+ else:
+ precision = None
- data = {'name': database}
+ if protocol == 'json':
+ data = make_lines(data, precision).encode('utf-8')
+ elif protocol == 'line':
+ if isinstance(data, str):
+ data = [data]
+ data = ('\n'.join(data) + '\n').encode('utf-8')
self.request(
- url=url,
+ url="write",
method='POST',
+ params=params,
data=data,
- expected_response_code=201
+ expected_response_code=expected_response_code,
+ headers=headers
)
-
return True
- def delete_database(self, database):
+ @staticmethod
+ def _read_chunked_response(response, raise_errors=True):
+ for line in response.iter_lines():
+ if isinstance(line, bytes):
+ line = line.decode('utf-8')
+ data = json.loads(line)
+ result_set = {}
+ for result in data.get('results', []):
+ for _key in result:
+ if isinstance(result[_key], list):
+ result_set.setdefault(
+ _key, []).extend(result[_key])
+ yield ResultSet(result_set, raise_errors=raise_errors)
+
+ def query(self,
+ query,
+ params=None,
+ bind_params=None,
+ epoch=None,
+ expected_response_code=200,
+ database=None,
+ raise_errors=True,
+ chunked=False,
+ chunk_size=0,
+ method="GET"):
+ """Send a query to InfluxDB.
+
+ .. danger::
+ In order to avoid injection vulnerabilities (similar to `SQL
+ injection `_
+ vulnerabilities), do not directly include untrusted data into the
+ ``query`` parameter, use ``bind_params`` instead.
+
+ :param query: the actual query string
+ :type query: str
+
+ :param params: additional parameters for the request,
+ defaults to {}
+ :type params: dict
+
+ :param bind_params: bind parameters for the query:
+ any variable in the query written as ``'$var_name'`` will be
+ replaced with ``bind_params['var_name']``. Only works in the
+ ``WHERE`` clause and takes precedence over ``params['params']``
+ :type bind_params: dict
+
+ :param epoch: response timestamps to be in epoch format either 'h',
+ 'm', 's', 'ms', 'u', or 'ns',defaults to `None` which is
+ RFC3339 UTC format with nanosecond precision
+ :type epoch: str
+
+ :param expected_response_code: the expected status code of response,
+ defaults to 200
+ :type expected_response_code: int
+
+ :param database: database to query, defaults to None
+ :type database: str
+
+ :param raise_errors: Whether or not to raise exceptions when InfluxDB
+ returns errors, defaults to True
+ :type raise_errors: bool
+
+ :param chunked: Enable to use chunked responses from InfluxDB.
+ With ``chunked`` enabled, one ResultSet is returned per chunk
+ containing all results within that chunk
+ :type chunked: bool
+
+ :param chunk_size: Size of each chunk to tell InfluxDB to use.
+ :type chunk_size: int
+
+ :param method: the HTTP method for the request, defaults to GET
+ :type method: str
+
+ :returns: the queried data
+ :rtype: :class:`~.ResultSet`
"""
- delete_database()
+ if params is None:
+ params = {}
- Drop a database on the InfluxDB server.
+ if bind_params is not None:
+ params_dict = json.loads(params.get('params', '{}'))
+ params_dict.update(bind_params)
+ params['params'] = json.dumps(params_dict)
- :param database: the name of the database to delete
- :type database: string
- :rtype: boolean
- """
- url = "db/{0}".format(database)
+ params['q'] = query
+ params['db'] = database or self._database
- self.request(
- url=url,
- method='DELETE',
- expected_response_code=204
- )
+ if epoch is not None:
+ params['epoch'] = epoch
- return True
+ if chunked:
+ params['chunked'] = 'true'
+ if chunk_size > 0:
+ params['chunk_size'] = chunk_size
- # ### get list of databases
- # curl -X GET http://localhost:8086/db
+ if query.lower().startswith("select ") and " into " in query.lower():
+ method = "POST"
- def get_database_list(self):
- """
- Get the list of databases
- """
- url = "db"
+ response = self.request(
+ url="query",
+ method=method,
+ params=params,
+ data=None,
+ stream=chunked,
+ expected_response_code=expected_response_code
+ )
+ data = response._msgpack
+ if not data:
+ if chunked:
+ return self._read_chunked_response(response)
+ data = response.json()
+
+ results = [
+ ResultSet(result, raise_errors=raise_errors)
+ for result
+ in data.get('results', [])
+ ]
+
+ # TODO(aviau): Always return a list. (This would be a breaking change)
+ if len(results) == 1:
+ return results[0]
+
+ return results
+
+ def write_points(self,
+ points,
+ time_precision=None,
+ database=None,
+ retention_policy=None,
+ tags=None,
+ batch_size=None,
+ protocol='json',
+ consistency=None
+ ):
+ """Write to multiple time series names.
+
+ :param points: the list of points to be written in the database
+ :type points: list of dictionaries, each dictionary represents a point
+ :type points: (if protocol is 'json') list of dicts, where each dict
+ represents a point.
+ (if protocol is 'line') sequence of line protocol strings.
+
+ :param time_precision: Either 's', 'm', 'ms' or 'u', defaults to None
+ :type time_precision: str
+ :param database: the database to write the points to. Defaults to
+ the client's current database
+ :type database: str
+ :param tags: a set of key-value pairs associated with each point. Both
+ keys and values must be strings. These are shared tags and will be
+ merged with point-specific tags, defaults to None
+ :type tags: dict
+ :param retention_policy: the retention policy for the points. Defaults
+ to None
+ :type retention_policy: str
+ :param batch_size: value to write the points in batches
+ instead of all at one time. Useful for when doing data dumps from
+ one database to another or when doing a massive write operation,
+ defaults to None
+ :type batch_size: int
+ :param protocol: Protocol for writing data. Either 'line' or 'json'.
+ :type protocol: str
+ :param consistency: Consistency for the points.
+ One of {'any','one','quorum','all'}.
+ :type consistency: str
+ :returns: True, if the operation is successful
+ :rtype: bool
+
+ .. note:: if no retention policy is specified, the default retention
+ policy for the database is used
+ """
+ if batch_size and batch_size > 0:
+ for batch in self._batches(points, batch_size):
+ self._write_points(points=batch,
+ time_precision=time_precision,
+ database=database,
+ retention_policy=retention_policy,
+ tags=tags, protocol=protocol,
+ consistency=consistency)
+ return True
+
+ return self._write_points(points=points,
+ time_precision=time_precision,
+ database=database,
+ retention_policy=retention_policy,
+ tags=tags, protocol=protocol,
+ consistency=consistency)
+
+ def ping(self):
+ """Check connectivity to InfluxDB.
+
+ :returns: The version of the InfluxDB the client is connected to
+ """
response = self.request(
- url=url,
+ url="ping",
method='GET',
- expected_response_code=200
+ expected_response_code=204
)
- return response.json()
+ return response.headers['X-Influxdb-Version']
+
+ @staticmethod
+ def _batches(iterable, size):
+ # Iterate over an iterable producing iterables of batches. Based on:
+ # http://code.activestate.com/recipes/303279-getting-items-in-batches/
+ iterator = iter(iterable)
+ while True:
+ try: # Try get the first element in the iterator...
+ head = (next(iterator),)
+ except StopIteration:
+ return # ...so that we can stop if there isn't one
+ # Otherwise, lazily slice the rest of the batch
+ rest = islice(iterator, size - 1)
+ yield chain(head, rest)
+
+ def _write_points(self,
+ points,
+ time_precision,
+ database,
+ retention_policy,
+ tags,
+ protocol='json',
+ consistency=None):
+ if time_precision not in ['n', 'u', 'ms', 's', 'm', 'h', None]:
+ raise ValueError(
+ "Invalid time precision is given. "
+ "(use 'n', 'u', 'ms', 's', 'm' or 'h')")
+
+ if consistency not in ['any', 'one', 'quorum', 'all', None]:
+ raise ValueError('Invalid consistency: {}'.format(consistency))
+
+ if protocol == 'json':
+ data = {
+ 'points': points
+ }
+
+ if tags is not None:
+ data['tags'] = tags
+ else:
+ data = points
- def delete_series(self, series):
- """
- delete_series()
+ params = {
+ 'db': database or self._database
+ }
- Drop a series on the InfluxDB server.
+ if consistency is not None:
+ params['consistency'] = consistency
- :param series: the name of the series to delete
- :type series: string
- :rtype: boolean
- """
- url = "db/{0}/series/{1}".format(
- self._database,
- series
- )
+ if time_precision is not None:
+ params['precision'] = time_precision
- self.request(
- url=url,
- method='DELETE',
- expected_response_code=204
- )
+ if retention_policy is not None:
+ params['rp'] = retention_policy
+
+ if self._use_udp:
+ self.send_packet(
+ data, protocol=protocol, time_precision=time_precision
+ )
+ else:
+ self.write(
+ data=data,
+ params=params,
+ expected_response_code=204,
+ protocol=protocol
+ )
return True
- def get_list_series(self):
- """
- Get a list of all time series in a database
- """
+ def get_list_database(self):
+ """Get the list of databases in InfluxDB.
- response = self.query('list series')
+ :returns: all databases in InfluxDB
+ :rtype: list of dictionaries
- series_list = []
- for series in response[0]['points']:
- series_list.append(series[1])
+ :Example:
- return series_list
+ ::
- def get_list_continuous_queries(self):
+ >> dbs = client.get_list_database()
+ >> dbs
+ [{u'name': u'db1'}, {u'name': u'db2'}, {u'name': u'db3'}]
"""
- Get a list of continuous queries
+ return list(self.query("SHOW DATABASES").get_points())
+
+ def get_list_series(self, database=None, measurement=None, tags=None):
"""
+ Query SHOW SERIES returns the distinct series in your database.
- response = self.query('list continuous queries')
- queries_list = []
- for query in response[0]['points']:
- queries_list.append(query[2])
+ FROM and WHERE clauses are optional.
- return queries_list
+ :param measurement: Show all series from a measurement
+ :type id: string
+ :param tags: Show all series that match given tags
+ :type id: dict
+ :param database: the database from which the series should be
+ shows, defaults to client's current database
+ :type database: str
+ """
+ database = database or self._database
+ query_str = 'SHOW SERIES'
- # Security
- # get list of cluster admins
- # curl http://localhost:8086/cluster_admins?u=root&p=root
+ if measurement:
+ query_str += ' FROM "{0}"'.format(measurement)
- # add cluster admin
- # curl -X POST http://localhost:8086/cluster_admins?u=root&p=root \
- # -d '{"name": "paul", "password": "i write teh docz"}'
+ if tags:
+ query_str += ' WHERE ' + ' and '.join(["{0}='{1}'".format(k, v)
+ for k, v in tags.items()])
- # update cluster admin password
- # curl -X POST http://localhost:8086/cluster_admins/paul?u=root&p=root \
- # -d '{"password": "new pass"}'
+ return list(
+ itertools.chain.from_iterable(
+ [
+ x.values()
+ for x in (self.query(query_str, database=database)
+ .get_points())
+ ]
+ )
+ )
- # delete cluster admin
- # curl -X DELETE http://localhost:8086/cluster_admins/paul?u=root&p=root
+ def create_database(self, dbname):
+ """Create a new database in InfluxDB.
+
+ :param dbname: the name of the database to create
+ :type dbname: str
+ """
+ self.query("CREATE DATABASE {0}".format(quote_ident(dbname)),
+ method="POST")
+
+ def drop_database(self, dbname):
+ """Drop a database from InfluxDB.
+
+ :param dbname: the name of the database to drop
+ :type dbname: str
+ """
+ self.query("DROP DATABASE {0}".format(quote_ident(dbname)),
+ method="POST")
+
+ def get_list_measurements(self):
+ """Get the list of measurements in InfluxDB.
+
+ :returns: all measurements in InfluxDB
+ :rtype: list of dictionaries
+
+ :Example:
+
+ ::
+
+ >> dbs = client.get_list_measurements()
+ >> dbs
+ [{u'name': u'measurements1'},
+ {u'name': u'measurements2'},
+ {u'name': u'measurements3'}]
+ """
+ return list(self.query("SHOW MEASUREMENTS").get_points())
+
+ def drop_measurement(self, measurement):
+ """Drop a measurement from InfluxDB.
+
+ :param measurement: the name of the measurement to drop
+ :type measurement: str
+ """
+ self.query("DROP MEASUREMENT {0}".format(quote_ident(measurement)),
+ method="POST")
+
+ def create_retention_policy(self, name, duration, replication,
+ database=None,
+ default=False, shard_duration="0s"):
+ """Create a retention policy for a database.
+
+ :param name: the name of the new retention policy
+ :type name: str
+ :param duration: the duration of the new retention policy.
+ Durations such as 1h, 90m, 12h, 7d, and 4w, are all supported
+ and mean 1 hour, 90 minutes, 12 hours, 7 day, and 4 weeks,
+ respectively. For infinite retention - meaning the data will
+ never be deleted - use 'INF' for duration.
+ The minimum retention period is 1 hour.
+ :type duration: str
+ :param replication: the replication of the retention policy
+ :type replication: str
+ :param database: the database for which the retention policy is
+ created. Defaults to current client's database
+ :type database: str
+ :param default: whether or not to set the policy as default
+ :type default: bool
+ :param shard_duration: the shard duration of the retention policy.
+ Durations such as 1h, 90m, 12h, 7d, and 4w, are all supported and
+ mean 1 hour, 90 minutes, 12 hours, 7 day, and 4 weeks,
+ respectively. Infinite retention is not supported. As a workaround,
+ specify a "1000w" duration to achieve an extremely long shard group
+ duration. Defaults to "0s", which is interpreted by the database
+ to mean the default value given the duration.
+ The minimum shard group duration is 1 hour.
+ :type shard_duration: str
+ """
+ query_string = \
+ "CREATE RETENTION POLICY {0} ON {1} " \
+ "DURATION {2} REPLICATION {3} SHARD DURATION {4}".format(
+ quote_ident(name), quote_ident(database or self._database),
+ duration, replication, shard_duration)
+
+ if default is True:
+ query_string += " DEFAULT"
+
+ self.query(query_string, method="POST")
+
+ def alter_retention_policy(self, name, database=None,
+ duration=None, replication=None,
+ default=None, shard_duration=None):
+ """Modify an existing retention policy for a database.
+
+ :param name: the name of the retention policy to modify
+ :type name: str
+ :param database: the database for which the retention policy is
+ modified. Defaults to current client's database
+ :type database: str
+ :param duration: the new duration of the existing retention policy.
+ Durations such as 1h, 90m, 12h, 7d, and 4w, are all supported
+ and mean 1 hour, 90 minutes, 12 hours, 7 day, and 4 weeks,
+ respectively. For infinite retention, meaning the data will
+ never be deleted, use 'INF' for duration.
+ The minimum retention period is 1 hour.
+ :type duration: str
+ :param replication: the new replication of the existing
+ retention policy
+ :type replication: int
+ :param default: whether or not to set the modified policy as default
+ :type default: bool
+ :param shard_duration: the shard duration of the retention policy.
+ Durations such as 1h, 90m, 12h, 7d, and 4w, are all supported and
+ mean 1 hour, 90 minutes, 12 hours, 7 day, and 4 weeks,
+ respectively. Infinite retention is not supported. As a workaround,
+ specify a "1000w" duration to achieve an extremely long shard group
+ duration.
+ The minimum shard group duration is 1 hour.
+ :type shard_duration: str
+
+ .. note:: at least one of duration, replication, or default flag
+ should be set. Otherwise the operation will fail.
+ """
+ query_string = (
+ "ALTER RETENTION POLICY {0} ON {1}"
+ ).format(quote_ident(name),
+ quote_ident(database or self._database))
+ if duration:
+ query_string += " DURATION {0}".format(duration)
+ if shard_duration:
+ query_string += " SHARD DURATION {0}".format(shard_duration)
+ if replication:
+ query_string += " REPLICATION {0}".format(replication)
+ if default is True:
+ query_string += " DEFAULT"
+
+ self.query(query_string, method="POST")
+
+ def drop_retention_policy(self, name, database=None):
+ """Drop an existing retention policy for a database.
+
+ :param name: the name of the retention policy to drop
+ :type name: str
+ :param database: the database for which the retention policy is
+ dropped. Defaults to current client's database
+ :type database: str
+ """
+ query_string = (
+ "DROP RETENTION POLICY {0} ON {1}"
+ ).format(quote_ident(name), quote_ident(database or self._database))
+ self.query(query_string, method="POST")
+
+ def get_list_retention_policies(self, database=None):
+ """Get the list of retention policies for a database.
+
+ :param database: the name of the database, defaults to the client's
+ current database
+ :type database: str
+ :returns: all retention policies for the database
+ :rtype: list of dictionaries
+
+ :Example:
+
+ ::
+
+ >> ret_policies = client.get_list_retention_policies('my_db')
+ >> ret_policies
+ [{u'default': True,
+ u'duration': u'0',
+ u'name': u'default',
+ u'replicaN': 1}]
+ """
+ if not (database or self._database):
+ raise InfluxDBClientError(
+ "get_list_retention_policies() requires a database as a "
+ "parameter or the client to be using a database")
+
+ rsp = self.query(
+ "SHOW RETENTION POLICIES ON {0}".format(
+ quote_ident(database or self._database))
+ )
+ return list(rsp.get_points())
- # Database admins, with a database name of site_dev
- # get list of database admins
- # curl http://localhost:8086/db/site_dev/admins?u=root&p=root
+ def get_list_users(self):
+ """Get the list of all users in InfluxDB.
- # add database admin
- # curl -X POST http://localhost:8086/db/site_dev/admins?u=root&p=root \
- # -d '{"name": "paul", "password": "i write teh docz"}'
+ :returns: all users in InfluxDB
+ :rtype: list of dictionaries
- # update database admin password
- # curl -X POST http://localhost:8086/db/site_dev/admins/paul?u=root&p=root\
- # -d '{"password": "new pass"}'
+ :Example:
- # delete database admin
- # curl -X DELETE \
- # http://localhost:8086/db/site_dev/admins/paul?u=root&p=root
+ ::
- def get_list_cluster_admins(self):
+ >> users = client.get_list_users()
+ >> users
+ [{u'admin': True, u'user': u'user1'},
+ {u'admin': False, u'user': u'user2'},
+ {u'admin': False, u'user': u'user3'}]
"""
- Get list of cluster admins
- """
- response = self.request(
- url="cluster_admins",
- method='GET',
- expected_response_code=200
- )
+ return list(self.query("SHOW USERS").get_points())
- return response.json()
+ def create_user(self, username, password, admin=False):
+ """Create a new user in InfluxDB.
- def add_cluster_admin(self, new_username, new_password):
+ :param username: the new username to create
+ :type username: str
+ :param password: the password for the new user
+ :type password: str
+ :param admin: whether the user should have cluster administration
+ privileges or not
+ :type admin: boolean
"""
- Add cluster admin
- """
- data = {
- 'name': new_username,
- 'password': new_password
- }
+ text = "CREATE USER {0} WITH PASSWORD {1}".format(
+ quote_ident(username), quote_literal(password))
+ if admin:
+ text += ' WITH ALL PRIVILEGES'
+ self.query(text, method="POST")
- self.request(
- url="cluster_admins",
- method='POST',
- data=data,
- expected_response_code=200
- )
+ def drop_user(self, username):
+ """Drop a user from InfluxDB.
- return True
-
- def update_cluster_admin_password(self, username, new_password):
- """
- Update cluster admin password
+ :param username: the username to drop
+ :type username: str
"""
- url = "cluster_admins/{0}".format(username)
-
- data = {
- 'password': new_password
- }
+ text = "DROP USER {0}".format(quote_ident(username))
+ self.query(text, method="POST")
- self.request(
- url=url,
- method='POST',
- data=data,
- expected_response_code=200
- )
-
- return True
+ def set_user_password(self, username, password):
+ """Change the password of an existing user.
- def delete_cluster_admin(self, username):
+ :param username: the username who's password is being changed
+ :type username: str
+ :param password: the new password for the user
+ :type password: str
"""
- Delete cluster admin
- """
- url = "cluster_admins/{0}".format(username)
+ text = "SET PASSWORD FOR {0} = {1}".format(
+ quote_ident(username), quote_literal(password))
+ self.query(text)
- self.request(
- url=url,
- method='DELETE',
- expected_response_code=200
- )
+ def delete_series(self, database=None, measurement=None, tags=None):
+ """Delete series from a database.
- return True
+ Series must be filtered by either measurement and tags.
+ This method cannot be used to delete all series, use
+ `drop_database` instead.
- def set_database_admin(self, username):
+ :param database: the database from which the series should be
+ deleted, defaults to client's current database
+ :type database: str
+ :param measurement: Delete all series from a measurement
+ :type measurement: str
+ :param tags: Delete all series that match given tags
+ :type tags: dict
"""
- Set user as database admin
- """
- return self.alter_database_admin(username, True)
+ database = database or self._database
+ query_str = 'DROP SERIES'
+ if measurement:
+ query_str += ' FROM {0}'.format(quote_ident(measurement))
- def unset_database_admin(self, username):
- """
- Unset user as database admin
- """
- return self.alter_database_admin(username, False)
+ if tags:
+ tag_eq_list = ["{0}={1}".format(quote_ident(k), quote_literal(v))
+ for k, v in tags.items()]
+ query_str += ' WHERE ' + ' AND '.join(tag_eq_list)
+ self.query(query_str, database=database, method="POST")
- def alter_database_admin(self, username, is_admin):
- url = "db/{0}/users/{1}".format(self._database, username)
+ def grant_admin_privileges(self, username):
+ """Grant cluster administration privileges to a user.
- data = {'admin': is_admin}
+ :param username: the username to grant privileges to
+ :type username: str
- self.request(
- url=url,
- method='POST',
- data=data,
- expected_response_code=200
- )
-
- return True
-
- def get_list_database_admins(self):
+ .. note:: Only a cluster administrator can create/drop databases
+ and manage users.
"""
- TODO: Get list of database admins
+ text = "GRANT ALL PRIVILEGES TO {0}".format(quote_ident(username))
+ self.query(text, method="POST")
- 2013-11-08: This endpoint has not been implemented yet in ver0.0.8,
- but it is documented in http://influxdb.org/docs/api/http.html.
- See also: src/api/http/api.go:l57
- """
- raise NotImplementedError()
+ def revoke_admin_privileges(self, username):
+ """Revoke cluster administration privileges from a user.
- def add_database_admin(self, new_username, new_password):
- """
- TODO: Add cluster admin
+ :param username: the username to revoke privileges from
+ :type username: str
- 2013-11-08: This endpoint has not been implemented yet in ver0.0.8,
- but it is documented in http://influxdb.org/docs/api/http.html.
- See also: src/api/http/api.go:l57
+ .. note:: Only a cluster administrator can create/ drop databases
+ and manage users.
"""
- raise NotImplementedError()
+ text = "REVOKE ALL PRIVILEGES FROM {0}".format(quote_ident(username))
+ self.query(text, method="POST")
- def update_database_admin_password(self, username, new_password):
- """
- TODO: Update database admin password
+ def grant_privilege(self, privilege, database, username):
+ """Grant a privilege on a database to a user.
- 2013-11-08: This endpoint has not been implemented yet in ver0.0.8,
- but it is documented in http://influxdb.org/docs/api/http.html.
- See also: src/api/http/api.go:l57
+ :param privilege: the privilege to grant, one of 'read', 'write'
+ or 'all'. The string is case-insensitive
+ :type privilege: str
+ :param database: the database to grant the privilege on
+ :type database: str
+ :param username: the username to grant the privilege to
+ :type username: str
"""
- raise NotImplementedError()
+ text = "GRANT {0} ON {1} TO {2}".format(privilege,
+ quote_ident(database),
+ quote_ident(username))
+ self.query(text, method="POST")
- def delete_database_admin(self, username):
- """
- TODO: Delete database admin
+ def revoke_privilege(self, privilege, database, username):
+ """Revoke a privilege on a database from a user.
- 2013-11-08: This endpoint has not been implemented yet in ver0.0.8,
- but it is documented in http://influxdb.org/docs/api/http.html.
- See also: src/api/http/api.go:l57
+ :param privilege: the privilege to revoke, one of 'read', 'write'
+ or 'all'. The string is case-insensitive
+ :type privilege: str
+ :param database: the database to revoke the privilege on
+ :type database: str
+ :param username: the username to revoke the privilege from
+ :type username: str
"""
- raise NotImplementedError()
-
- ###
- # Limiting User Access
-
- # Database users
- # get list of database users
- # curl http://localhost:8086/db/site_dev/users?u=root&p=root
-
- # add database user
- # curl -X POST http://localhost:8086/db/site_dev/users?u=root&p=root \
- # -d '{"name": "paul", "password": "i write teh docz"}'
-
- # update database user password
- # curl -X POST http://localhost:8086/db/site_dev/users/paul?u=root&p=root \
- # -d '{"password": "new pass"}'
+ text = "REVOKE {0} ON {1} FROM {2}".format(privilege,
+ quote_ident(database),
+ quote_ident(username))
+ self.query(text, method="POST")
- # delete database user
- # curl -X DELETE http://localhost:8086/db/site_dev/users/paul?u=root&p=root
+ def get_list_privileges(self, username):
+ """Get the list of all privileges granted to given user.
- def get_database_users(self):
- """
- Get list of database users
- """
- url = "db/{0}/users".format(self._database)
+ :param username: the username to get privileges of
+ :type username: str
- response = self.request(
- url=url,
- method='GET',
- expected_response_code=200
- )
+ :returns: all privileges granted to given user
+ :rtype: list of dictionaries
- return response.json()
+ :Example:
- def add_database_user(self, new_username, new_password, permissions=None):
- """
- Add database user
+ ::
- :param permissions: A ``(readFrom, writeTo)`` tuple
+ >> privileges = client.get_list_privileges('user1')
+ >> privileges
+ [{u'privilege': u'WRITE', u'database': u'db1'},
+ {u'privilege': u'ALL PRIVILEGES', u'database': u'db2'},
+ {u'privilege': u'NO PRIVILEGES', u'database': u'db3'}]
"""
- url = "db/{0}/users".format(self._database)
+ text = "SHOW GRANTS FOR {0}".format(quote_ident(username))
+ return list(self.query(text).get_points())
- data = {
- 'name': new_username,
- 'password': new_password
- }
-
- if permissions:
- try:
- data['readFrom'], data['writeTo'] = permissions
- except (ValueError, TypeError):
- raise TypeError(
- "'permissions' must be (readFrom, writeTo) tuple"
- )
-
- self.request(
- url=url,
- method='POST',
- data=data,
- expected_response_code=200
- )
-
- return True
+ def get_list_continuous_queries(self):
+ """Get the list of continuous queries in InfluxDB.
+
+ :return: all CQs in InfluxDB
+ :rtype: list of dictionaries
+
+ :Example:
+
+ ::
+
+ >> cqs = client.get_list_cqs()
+ >> cqs
+ [
+ {
+ u'db1': []
+ },
+ {
+ u'db2': [
+ {
+ u'name': u'vampire',
+ u'query': u'CREATE CONTINUOUS QUERY vampire ON '
+ 'mydb BEGIN SELECT count(dracula) INTO '
+ 'mydb.autogen.all_of_them FROM '
+ 'mydb.autogen.one GROUP BY time(5m) END'
+ }
+ ]
+ }
+ ]
+ """
+ query_string = "SHOW CONTINUOUS QUERIES"
+ return [{sk[0]: list(p)} for sk, p in self.query(query_string).items()]
+
+ def create_continuous_query(self, name, select, database=None,
+ resample_opts=None):
+ r"""Create a continuous query for a database.
+
+ :param name: the name of continuous query to create
+ :type name: str
+ :param select: select statement for the continuous query
+ :type select: str
+ :param database: the database for which the continuous query is
+ created. Defaults to current client's database
+ :type database: str
+ :param resample_opts: resample options
+ :type resample_opts: str
+
+ :Example:
+
+ ::
+
+ >> select_clause = 'SELECT mean("value") INTO "cpu_mean" ' \
+ ... 'FROM "cpu" GROUP BY time(1m)'
+ >> client.create_continuous_query(
+ ... 'cpu_mean', select_clause, 'db_name', 'EVERY 10s FOR 2m'
+ ... )
+ >> client.get_list_continuous_queries()
+ [
+ {
+ 'db_name': [
+ {
+ 'name': 'cpu_mean',
+ 'query': 'CREATE CONTINUOUS QUERY "cpu_mean" '
+ 'ON "db_name" '
+ 'RESAMPLE EVERY 10s FOR 2m '
+ 'BEGIN SELECT mean("value") '
+ 'INTO "cpu_mean" FROM "cpu" '
+ 'GROUP BY time(1m) END'
+ }
+ ]
+ }
+ ]
+ """
+ query_string = (
+ "CREATE CONTINUOUS QUERY {0} ON {1}{2} BEGIN {3} END"
+ ).format(quote_ident(name), quote_ident(database or self._database),
+ ' RESAMPLE ' + resample_opts if resample_opts else '', select)
+ self.query(query_string)
+
+ def drop_continuous_query(self, name, database=None):
+ """Drop an existing continuous query for a database.
+
+ :param name: the name of continuous query to drop
+ :type name: str
+ :param database: the database for which the continuous query is
+ dropped. Defaults to current client's database
+ :type database: str
+ """
+ query_string = (
+ "DROP CONTINUOUS QUERY {0} ON {1}"
+ ).format(quote_ident(name), quote_ident(database or self._database))
+ self.query(query_string)
+
+ def send_packet(self, packet, protocol='json', time_precision=None):
+ """Send an UDP packet.
+
+ :param packet: the packet to be sent
+ :type packet: (if protocol is 'json') dict
+ (if protocol is 'line') list of line protocol strings
+ :param protocol: protocol of input data, either 'json' or 'line'
+ :type protocol: str
+ :param time_precision: Either 's', 'm', 'ms' or 'u', defaults to None
+ :type time_precision: str
+ """
+ if protocol == 'json':
+ data = make_lines(packet, time_precision).encode('utf-8')
+ elif protocol == 'line':
+ data = ('\n'.join(packet) + '\n').encode('utf-8')
+ self.udp_socket.sendto(data, (self._host, self._udp_port))
+
+ def close(self):
+ """Close http session."""
+ if isinstance(self._session, requests.Session):
+ self._session.close()
+
+
+def _parse_dsn(dsn):
+ """Parse data source name.
+
+ This is a helper function to split the data source name provided in
+ the from_dsn classmethod
+ """
+ conn_params = urlparse(dsn)
+ init_args = {}
+ scheme_info = conn_params.scheme.split('+')
+ if len(scheme_info) == 1:
+ scheme = scheme_info[0]
+ modifier = None
+ else:
+ modifier, scheme = scheme_info
+
+ if scheme != 'influxdb':
+ raise ValueError('Unknown scheme "{0}".'.format(scheme))
+
+ if modifier:
+ if modifier == 'udp':
+ init_args['use_udp'] = True
+ elif modifier == 'https':
+ init_args['ssl'] = True
+ else:
+ raise ValueError('Unknown modifier "{0}".'.format(modifier))
- def update_database_user_password(self, username, new_password):
- """
- Update password
- """
- url = "db/{0}/users/{1}".format(self._database, username)
+ netlocs = conn_params.netloc.split(',')
- data = {
- 'password': new_password
- }
+ init_args['hosts'] = []
+ for netloc in netlocs:
+ parsed = _parse_netloc(netloc)
+ init_args['hosts'].append((parsed['host'], int(parsed['port'])))
+ init_args['username'] = parsed['username']
+ init_args['password'] = parsed['password']
- self.request(
- url=url,
- method='POST',
- data=data,
- expected_response_code=200
- )
+ if conn_params.path and len(conn_params.path) > 1:
+ init_args['database'] = conn_params.path[1:]
- if username == self._username:
- self._password = new_password
+ return init_args
- return True
- def delete_database_user(self, username):
- """
- Delete database user
- """
- url = "db/{0}/users/{1}".format(self._database, username)
+def _parse_netloc(netloc):
+ info = urlparse("http://{0}".format(netloc))
+ return {'username': info.username or None,
+ 'password': info.password or None,
+ 'host': info.hostname or 'localhost',
+ 'port': info.port or 8086}
- self.request(
- url=url,
- method='DELETE',
- expected_response_code=200
- )
- return True
+def _msgpack_parse_hook(code, data):
+ if code == 5:
+ (epoch_s, epoch_ns) = struct.unpack(">QI", data)
+ timestamp = datetime.datetime.utcfromtimestamp(epoch_s)
+ timestamp += datetime.timedelta(microseconds=(epoch_ns / 1000))
+ return timestamp.isoformat() + 'Z'
+ return msgpack.ExtType(code, data)
- # update the user by POSTing to db/site_dev/users/paul
- def update_permission(self, username, json_body):
- """
- TODO: Update read/write permission
+class _SocketOptionsAdapter(HTTPAdapter):
+ """_SocketOptionsAdapter injects socket_options into HTTP Adapter."""
- 2013-11-08: This endpoint has not been implemented yet in ver0.0.8,
- but it is documented in http://influxdb.org/docs/api/http.html.
- See also: src/api/http/api.go:l57
- """
- raise NotImplementedError()
+ def __init__(self, *args, **kwargs):
+ self.socket_options = kwargs.pop("socket_options", None)
+ super(_SocketOptionsAdapter, self).__init__(*args, **kwargs)
- def send_packet(self, packet):
- data = json.dumps(packet)
- byte = data.encode('utf-8')
- self.udp_socket.sendto(byte, (self._host, self.udp_port))
+ def init_poolmanager(self, *args, **kwargs):
+ if self.socket_options is not None:
+ kwargs["socket_options"] = self.socket_options
+ super(_SocketOptionsAdapter, self).init_poolmanager(*args, **kwargs)
diff --git a/influxdb/dataframe_client.py b/influxdb/dataframe_client.py
new file mode 100644
index 00000000..babfe0dd
--- /dev/null
+++ b/influxdb/dataframe_client.py
@@ -0,0 +1,28 @@
+# -*- coding: utf-8 -*-
+"""DataFrame client for InfluxDB."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+__all__ = ['DataFrameClient']
+
+try:
+ import pandas
+ del pandas
+except ImportError as err:
+ from .client import InfluxDBClient
+
+ class DataFrameClient(InfluxDBClient):
+ """DataFrameClient default class instantiation."""
+
+ err = err
+
+ def __init__(self, *a, **kw):
+ """Initialize the default DataFrameClient."""
+ super(DataFrameClient, self).__init__()
+ raise ImportError("DataFrameClient requires Pandas "
+ "which couldn't be imported: %s" % self.err)
+else:
+ from ._dataframe_client import DataFrameClient # type: ignore
diff --git a/influxdb/exceptions.py b/influxdb/exceptions.py
new file mode 100644
index 00000000..bd71d301
--- /dev/null
+++ b/influxdb/exceptions.py
@@ -0,0 +1,35 @@
+# -*- coding: utf-8 -*-
+"""Exception handler for InfluxDBClient."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+
+class InfluxDBClientError(Exception):
+ """Raised when an error occurs in the request."""
+
+ def __init__(self, content, code=None):
+ """Initialize the InfluxDBClientError handler."""
+ if isinstance(content, type(b'')):
+ content = content.decode('UTF-8', 'replace')
+
+ if code is not None:
+ message = "%s: %s" % (code, content)
+ else:
+ message = content
+
+ super(InfluxDBClientError, self).__init__(
+ message
+ )
+ self.content = content
+ self.code = code
+
+
+class InfluxDBServerError(Exception):
+ """Raised when a server error occurs."""
+
+ def __init__(self, content):
+ """Initialize the InfluxDBServerError handler."""
+ super(InfluxDBServerError, self).__init__(content)
diff --git a/influxdb/helper.py b/influxdb/helper.py
new file mode 100644
index 00000000..138cf6e8
--- /dev/null
+++ b/influxdb/helper.py
@@ -0,0 +1,206 @@
+# -*- coding: utf-8 -*-
+"""Helper class for InfluxDB."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+from collections import namedtuple, defaultdict
+from datetime import datetime
+from warnings import warn
+
+import six
+
+
+class SeriesHelper(object):
+ """Subclass this helper eases writing data points in bulk.
+
+ All data points are immutable, ensuring they do not get overwritten.
+ Each subclass can write to its own database.
+ The time series names can also be based on one or more defined fields.
+ The field "time" can be specified when creating a point, and may be any of
+ the time types supported by the client (i.e. str, datetime, int).
+ If the time is not specified, the current system time (utc) will be used.
+
+ Annotated example::
+
+ class MySeriesHelper(SeriesHelper):
+ class Meta:
+ # Meta class stores time series helper configuration.
+ series_name = 'events.stats.{server_name}'
+ # Series name must be a string, curly brackets for dynamic use.
+ fields = ['time', 'server_name']
+ # Defines all the fields in this time series.
+ ### Following attributes are optional. ###
+ client = TestSeriesHelper.client
+ # Client should be an instance of InfluxDBClient.
+ :warning: Only used if autocommit is True.
+ bulk_size = 5
+ # Defines the number of data points to write simultaneously.
+ # Only applicable if autocommit is True.
+ autocommit = True
+ # If True and no bulk_size, then will set bulk_size to 1.
+ retention_policy = 'your_retention_policy'
+ # Specify the retention policy for the data points
+ time_precision = "h"|"m"|s"|"ms"|"u"|"ns"
+ # Default is ns (nanoseconds)
+ # Setting time precision while writing point
+ # You should also make sure time is set in the given precision
+
+ """
+
+ __initialized__ = False
+
+ def __new__(cls, *args, **kwargs):
+ """Initialize class attributes for subsequent constructor calls.
+
+ :note: *args and **kwargs are not explicitly used in this function,
+ but needed for Python 2 compatibility.
+ """
+ if not cls.__initialized__:
+ cls.__initialized__ = True
+ try:
+ _meta = getattr(cls, 'Meta')
+ except AttributeError:
+ raise AttributeError(
+ 'Missing Meta class in {0}.'.format(
+ cls.__name__))
+
+ for attr in ['series_name', 'fields', 'tags']:
+ try:
+ setattr(cls, '_' + attr, getattr(_meta, attr))
+ except AttributeError:
+ raise AttributeError(
+ 'Missing {0} in {1} Meta class.'.format(
+ attr,
+ cls.__name__))
+
+ cls._autocommit = getattr(_meta, 'autocommit', False)
+ cls._time_precision = getattr(_meta, 'time_precision', None)
+
+ allowed_time_precisions = ['h', 'm', 's', 'ms', 'u', 'ns', None]
+ if cls._time_precision not in allowed_time_precisions:
+ raise AttributeError(
+ 'In {}, time_precision is set, but invalid use any of {}.'
+ .format(cls.__name__, ','.join(allowed_time_precisions)))
+
+ cls._retention_policy = getattr(_meta, 'retention_policy', None)
+
+ cls._client = getattr(_meta, 'client', None)
+ if cls._autocommit and not cls._client:
+ raise AttributeError(
+ 'In {0}, autocommit is set to True, but no client is set.'
+ .format(cls.__name__))
+
+ try:
+ cls._bulk_size = getattr(_meta, 'bulk_size')
+ if cls._bulk_size < 1 and cls._autocommit:
+ warn(
+ 'Definition of bulk_size in {0} forced to 1, '
+ 'was less than 1.'.format(cls.__name__))
+ cls._bulk_size = 1
+ except AttributeError:
+ cls._bulk_size = -1
+ else:
+ if not cls._autocommit:
+ warn(
+ 'Definition of bulk_size in {0} has no affect because'
+ ' autocommit is false.'.format(cls.__name__))
+
+ cls._datapoints = defaultdict(list)
+
+ if 'time' in cls._fields:
+ cls._fields.remove('time')
+ cls._type = namedtuple(cls.__name__,
+ ['time'] + cls._tags + cls._fields)
+ cls._type.__new__.__defaults__ = (None,) * len(cls._fields)
+
+ return super(SeriesHelper, cls).__new__(cls)
+
+ def __init__(self, **kw):
+ """Call to constructor creates a new data point.
+
+ :note: Data points written when `bulk_size` is reached per Helper.
+ :warning: Data points are *immutable* (`namedtuples`).
+ """
+ cls = self.__class__
+ timestamp = kw.pop('time', self._current_timestamp())
+ tags = set(cls._tags)
+ fields = set(cls._fields)
+ keys = set(kw.keys())
+
+ # all tags should be passed, and keys - tags should be a subset of keys
+ if not (tags <= keys):
+ raise NameError(
+ 'Expected arguments to contain all tags {0}, instead got {1}.'
+ .format(cls._tags, kw.keys()))
+ if not (keys - tags <= fields):
+ raise NameError('Got arguments not in tags or fields: {0}'
+ .format(keys - tags - fields))
+
+ cls._datapoints[cls._series_name.format(**kw)].append(
+ cls._type(time=timestamp, **kw)
+ )
+
+ if cls._autocommit and \
+ sum(len(series) for series in cls._datapoints.values()) \
+ >= cls._bulk_size:
+ cls.commit()
+
+ @classmethod
+ def commit(cls, client=None):
+ """Commit everything from datapoints via the client.
+
+ :param client: InfluxDBClient instance for writing points to InfluxDB.
+ :attention: any provided client will supersede the class client.
+ :return: result of client.write_points.
+ """
+ if not client:
+ client = cls._client
+
+ rtn = client.write_points(
+ cls._json_body_(),
+ time_precision=cls._time_precision,
+ retention_policy=cls._retention_policy)
+ # will be None if not set and will default to ns
+ cls._reset_()
+ return rtn
+
+ @classmethod
+ def _json_body_(cls):
+ """Return the JSON body of given datapoints.
+
+ :return: JSON body of these datapoints.
+ """
+ json = []
+ if not cls.__initialized__:
+ cls._reset_()
+ for series_name, data in six.iteritems(cls._datapoints):
+ for point in data:
+ json_point = {
+ "measurement": series_name,
+ "fields": {},
+ "tags": {},
+ "time": getattr(point, "time")
+ }
+
+ for field in cls._fields:
+ value = getattr(point, field)
+ if value is not None:
+ json_point['fields'][field] = value
+
+ for tag in cls._tags:
+ json_point['tags'][tag] = getattr(point, tag)
+
+ json.append(json_point)
+ return json
+
+ @classmethod
+ def _reset_(cls):
+ """Reset data storage."""
+ cls._datapoints = defaultdict(list)
+
+ @staticmethod
+ def _current_timestamp():
+ return datetime.utcnow()
diff --git a/influxdb/influxdb08/__init__.py b/influxdb/influxdb08/__init__.py
new file mode 100644
index 00000000..f4e6c082
--- /dev/null
+++ b/influxdb/influxdb08/__init__.py
@@ -0,0 +1,18 @@
+# -*- coding: utf-8 -*-
+"""Define the influxdb08 package."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+from .client import InfluxDBClient
+from .dataframe_client import DataFrameClient
+from .helper import SeriesHelper
+
+
+__all__ = [
+ 'InfluxDBClient',
+ 'DataFrameClient',
+ 'SeriesHelper',
+]
diff --git a/influxdb/influxdb08/chunked_json.py b/influxdb/influxdb08/chunked_json.py
new file mode 100644
index 00000000..d6847de1
--- /dev/null
+++ b/influxdb/influxdb08/chunked_json.py
@@ -0,0 +1,27 @@
+# -*- coding: utf-8 -*-
+"""Module to generate chunked JSON replies for influxdb08."""
+
+#
+# Author: Adrian Sampson
+# Source: https://gist.github.com/sampsyo/920215
+#
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import json
+
+
+def loads(s):
+ """Generate a sequence of JSON values from a string."""
+ _decoder = json.JSONDecoder()
+
+ while s:
+ s = s.strip()
+ obj, pos = _decoder.raw_decode(s)
+ if not pos:
+ raise ValueError('no JSON object found at %i' % pos)
+ yield obj
+ s = s[pos:]
diff --git a/influxdb/influxdb08/client.py b/influxdb/influxdb08/client.py
new file mode 100644
index 00000000..40c58145
--- /dev/null
+++ b/influxdb/influxdb08/client.py
@@ -0,0 +1,843 @@
+# -*- coding: utf-8 -*-
+"""Python client for InfluxDB v0.8."""
+
+import warnings
+
+import json
+import socket
+import requests
+import requests.exceptions
+from six.moves import xrange
+from six.moves.urllib.parse import urlparse
+
+from influxdb import chunked_json
+
+session = requests.Session()
+
+
+class InfluxDBClientError(Exception):
+ """Raised when an error occurs in the request."""
+
+ def __init__(self, content, code=-1):
+ """Initialize an InfluxDBClientError handler."""
+ super(InfluxDBClientError, self).__init__(
+ "{0}: {1}".format(code, content))
+ self.content = content
+ self.code = code
+
+
+class InfluxDBClient(object):
+ """Define the standard InfluxDBClient for influxdb v0.8.
+
+ The ``InfluxDBClient`` object holds information necessary to connect
+ to InfluxDB. Requests can be made to InfluxDB directly through the client.
+
+ :param host: hostname to connect to InfluxDB, defaults to 'localhost'
+ :type host: string
+ :param port: port to connect to InfluxDB, defaults to 'localhost'
+ :type port: int
+ :param username: user to connect, defaults to 'root'
+ :type username: string
+ :param password: password of the user, defaults to 'root'
+ :type password: string
+ :param database: database name to connect to, defaults is None
+ :type database: string
+ :param ssl: use https instead of http to connect to InfluxDB, defaults is
+ False
+ :type ssl: boolean
+ :param verify_ssl: verify SSL certificates for HTTPS requests, defaults is
+ False
+ :type verify_ssl: boolean
+ :param retries: number of retries your client will try before aborting,
+ defaults to 3. 0 indicates try until success
+ :type retries: int
+ :param timeout: number of seconds Requests will wait for your client to
+ establish a connection, defaults to None
+ :type timeout: int
+ :param use_udp: use UDP to connect to InfluxDB, defaults is False
+ :type use_udp: int
+ :param udp_port: UDP port to connect to InfluxDB, defaults is 4444
+ :type udp_port: int
+ """
+
+ def __init__(self,
+ host='localhost',
+ port=8086,
+ username='root',
+ password='root',
+ database=None,
+ ssl=False,
+ verify_ssl=False,
+ timeout=None,
+ retries=3,
+ use_udp=False,
+ udp_port=4444):
+ """Construct a new InfluxDBClient object."""
+ self._host = host
+ self._port = port
+ self._username = username
+ self._password = password
+ self._database = database
+ self._timeout = timeout
+ self._retries = retries
+
+ self._verify_ssl = verify_ssl
+
+ self._use_udp = use_udp
+ self._udp_port = udp_port
+ if use_udp:
+ self.udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+
+ self._scheme = "http"
+
+ if ssl is True:
+ self._scheme = "https"
+
+ self._baseurl = "{0}://{1}:{2}".format(
+ self._scheme,
+ self._host,
+ self._port)
+
+ self._headers = {
+ 'Content-type': 'application/json',
+ 'Accept': 'text/plain'}
+
+ @staticmethod
+ def from_dsn(dsn, **kwargs):
+ r"""Return an instaance of InfluxDBClient from given data source name.
+
+ Returns an instance of InfluxDBClient from the provided data source
+ name. Supported schemes are "influxdb", "https+influxdb",
+ "udp+influxdb". Parameters for the InfluxDBClient constructor may be
+ also be passed to this function.
+
+ Examples:
+ >> cli = InfluxDBClient.from_dsn('influxdb://username:password@\
+ ... localhost:8086/databasename', timeout=5)
+ >> type(cli)
+
+ >> cli = InfluxDBClient.from_dsn('udp+influxdb://username:pass@\
+ ... localhost:8086/databasename', timeout=5, udp_port=159)
+ >> print('{0._baseurl} - {0.use_udp} {0.udp_port}'.format(cli))
+ http://localhost:8086 - True 159
+
+ :param dsn: data source name
+ :type dsn: string
+ :param **kwargs: additional parameters for InfluxDBClient.
+ :type **kwargs: dict
+ :note: parameters provided in **kwargs may override dsn parameters.
+ :note: when using "udp+influxdb" the specified port (if any) will be
+ used for the TCP connection; specify the udp port with the additional
+ udp_port parameter (cf. examples).
+ :raise ValueError: if the provided DSN has any unexpected value.
+
+ """
+ init_args = {}
+ conn_params = urlparse(dsn)
+ scheme_info = conn_params.scheme.split('+')
+
+ if len(scheme_info) == 1:
+ scheme = scheme_info[0]
+ modifier = None
+ else:
+ modifier, scheme = scheme_info
+
+ if scheme != 'influxdb':
+ raise ValueError('Unknown scheme "{0}".'.format(scheme))
+
+ if modifier:
+ if modifier == 'udp':
+ init_args['use_udp'] = True
+ elif modifier == 'https':
+ init_args['ssl'] = True
+ else:
+ raise ValueError('Unknown modifier "{0}".'.format(modifier))
+
+ if conn_params.hostname:
+ init_args['host'] = conn_params.hostname
+ if conn_params.port:
+ init_args['port'] = conn_params.port
+ if conn_params.username:
+ init_args['username'] = conn_params.username
+ if conn_params.password:
+ init_args['password'] = conn_params.password
+ if conn_params.path and len(conn_params.path) > 1:
+ init_args['database'] = conn_params.path[1:]
+
+ init_args.update(kwargs)
+
+ return InfluxDBClient(**init_args)
+
+ # Change member variables
+
+ def switch_database(self, database):
+ """Change client database.
+
+ :param database: the new database name to switch to
+ :type database: string
+ """
+ self._database = database
+
+ def switch_db(self, database):
+ """Change client database.
+
+ DEPRECATED.
+ """
+ warnings.warn(
+ "switch_db is deprecated, and will be removed "
+ "in future versions. Please use "
+ "``InfluxDBClient.switch_database(database)`` instead.",
+ FutureWarning)
+ return self.switch_database(database)
+
+ def switch_user(self, username, password):
+ """Change client username.
+
+ :param username: the new username to switch to
+ :type username: string
+ :param password: the new password to switch to
+ :type password: string
+ """
+ self._username = username
+ self._password = password
+
+ def request(self, url, method='GET', params=None, data=None,
+ expected_response_code=200):
+ """Make a http request to API."""
+ url = "{0}/{1}".format(self._baseurl, url)
+
+ if params is None:
+ params = {}
+
+ auth = {
+ 'u': self._username,
+ 'p': self._password
+ }
+
+ params.update(auth)
+
+ if data is not None and not isinstance(data, str):
+ data = json.dumps(data)
+
+ retry = True
+ _try = 0
+ # Try to send the request more than once by default (see #103)
+ while retry:
+ try:
+ response = session.request(
+ method=method,
+ url=url,
+ params=params,
+ data=data,
+ headers=self._headers,
+ verify=self._verify_ssl,
+ timeout=self._timeout
+ )
+ break
+ except (requests.exceptions.ConnectionError,
+ requests.exceptions.Timeout):
+ _try += 1
+ if self._retries != 0:
+ retry = _try < self._retries
+ else:
+ raise requests.exceptions.ConnectionError
+
+ if response.status_code == expected_response_code:
+ return response
+ else:
+ raise InfluxDBClientError(response.content, response.status_code)
+
+ def write(self, data):
+ """Provide as convenience for influxdb v0.9.0, this may change."""
+ self.request(
+ url="write",
+ method='POST',
+ params=None,
+ data=data,
+ expected_response_code=200
+ )
+ return True
+
+ # Writing Data
+ #
+ # Assuming you have a database named foo_production you can write data
+ # by doing a POST to /db/foo_production/series?u=some_user&p=some_password
+ # with a JSON body of points.
+
+ def write_points(self, data, time_precision='s', *args, **kwargs):
+ """Write to multiple time series names.
+
+ An example data blob is:
+
+ data = [
+ {
+ "points": [
+ [
+ 12
+ ]
+ ],
+ "name": "cpu_load_short",
+ "columns": [
+ "value"
+ ]
+ }
+ ]
+
+ :param data: A list of dicts in InfluxDB 0.8.x data format.
+ :param time_precision: [Optional, default 's'] Either 's', 'm', 'ms'
+ or 'u'.
+ :param batch_size: [Optional] Value to write the points in batches
+ instead of all at one time. Useful for when doing data dumps from
+ one database to another or when doing a massive write operation
+ :type batch_size: int
+
+ """
+ def list_chunks(data_list, n):
+ """Yield successive n-sized chunks from l."""
+ for i in xrange(0, len(data_list), n):
+ yield data_list[i:i + n]
+
+ batch_size = kwargs.get('batch_size')
+ if batch_size and batch_size > 0:
+ for item in data:
+ name = item.get('name')
+ columns = item.get('columns')
+ point_list = item.get('points', [])
+
+ for batch in list_chunks(point_list, batch_size):
+ item = [{
+ "points": batch,
+ "name": name,
+ "columns": columns
+ }]
+ self._write_points(
+ data=item,
+ time_precision=time_precision)
+ return True
+
+ return self._write_points(data=data,
+ time_precision=time_precision)
+
+ def write_points_with_precision(self, data, time_precision='s'):
+ """Write to multiple time series names.
+
+ DEPRECATED.
+ """
+ warnings.warn(
+ "write_points_with_precision is deprecated, and will be removed "
+ "in future versions. Please use "
+ "``InfluxDBClient.write_points(time_precision='..')`` instead.",
+ FutureWarning)
+ return self._write_points(data=data, time_precision=time_precision)
+
+ def _write_points(self, data, time_precision):
+ if time_precision not in ['s', 'm', 'ms', 'u']:
+ raise Exception(
+ "Invalid time precision is given. (use 's', 'm', 'ms' or 'u')")
+
+ if self._use_udp and time_precision != 's':
+ raise Exception(
+ "InfluxDB only supports seconds precision for udp writes"
+ )
+
+ url = "db/{0}/series".format(self._database)
+
+ params = {
+ 'time_precision': time_precision
+ }
+
+ if self._use_udp:
+ self.send_packet(data)
+ else:
+ self.request(
+ url=url,
+ method='POST',
+ params=params,
+ data=data,
+ expected_response_code=200
+ )
+
+ return True
+
+ # One Time Deletes
+
+ def delete_points(self, name):
+ """Delete an entire series."""
+ url = "db/{0}/series/{1}".format(self._database, name)
+
+ self.request(
+ url=url,
+ method='DELETE',
+ expected_response_code=204
+ )
+
+ return True
+
+ # Regularly Scheduled Deletes
+
+ def create_scheduled_delete(self, json_body):
+ """Create schedule delete from database.
+
+ 2013-11-08: This endpoint has not been implemented yet in ver0.0.8,
+ but it is documented in http://influxdb.org/docs/api/http.html.
+ See also: src/api/http/api.go:l57
+
+ """
+ raise NotImplementedError()
+
+ # get list of deletes
+ # curl http://localhost:8086/db/site_dev/scheduled_deletes
+ #
+ # remove a regularly scheduled delete
+ # curl -X DELETE http://localhost:8086/db/site_dev/scheduled_deletes/:id
+
+ def get_list_scheduled_delete(self):
+ """Get list of scheduled deletes.
+
+ 2013-11-08: This endpoint has not been implemented yet in ver0.0.8,
+ but it is documented in http://influxdb.org/docs/api/http.html.
+ See also: src/api/http/api.go:l57
+
+ """
+ raise NotImplementedError()
+
+ def remove_scheduled_delete(self, delete_id):
+ """Remove scheduled delete.
+
+ 2013-11-08: This endpoint has not been implemented yet in ver0.0.8,
+ but it is documented in http://influxdb.org/docs/api/http.html.
+ See also: src/api/http/api.go:l57
+
+ """
+ raise NotImplementedError()
+
+ def query(self, query, time_precision='s', chunked=False):
+ """Query data from the influxdb v0.8 database.
+
+ :param time_precision: [Optional, default 's'] Either 's', 'm', 'ms'
+ or 'u'.
+ :param chunked: [Optional, default=False] True if the data shall be
+ retrieved in chunks, False otherwise.
+ """
+ return self._query(query, time_precision=time_precision,
+ chunked=chunked)
+
+ # Querying Data
+ #
+ # GET db/:name/series. It takes five parameters
+ def _query(self, query, time_precision='s', chunked=False):
+ if time_precision not in ['s', 'm', 'ms', 'u']:
+ raise Exception(
+ "Invalid time precision is given. (use 's', 'm', 'ms' or 'u')")
+
+ if chunked is True:
+ chunked_param = 'true'
+ else:
+ chunked_param = 'false'
+
+ # Build the URL of the series to query
+ url = "db/{0}/series".format(self._database)
+
+ params = {
+ 'q': query,
+ 'time_precision': time_precision,
+ 'chunked': chunked_param
+ }
+
+ response = self.request(
+ url=url,
+ method='GET',
+ params=params,
+ expected_response_code=200
+ )
+
+ if chunked:
+ try:
+ decoded = chunked_json.loads(response.content.decode())
+ except UnicodeDecodeError:
+ decoded = chunked_json.loads(response.content.decode('utf-8'))
+
+ return list(decoded)
+
+ return response.json()
+
+ # Creating and Dropping Databases
+ #
+ # ### create a database
+ # curl -X POST http://localhost:8086/db -d '{"name": "site_development"}'
+ #
+ # ### drop a database
+ # curl -X DELETE http://localhost:8086/db/site_development
+
+ def create_database(self, database):
+ """Create a database on the InfluxDB server.
+
+ :param database: the name of the database to create
+ :type database: string
+ :rtype: boolean
+ """
+ url = "db"
+
+ data = {'name': database}
+
+ self.request(
+ url=url,
+ method='POST',
+ data=data,
+ expected_response_code=201
+ )
+
+ return True
+
+ def delete_database(self, database):
+ """Drop a database on the InfluxDB server.
+
+ :param database: the name of the database to delete
+ :type database: string
+ :rtype: boolean
+ """
+ url = "db/{0}".format(database)
+
+ self.request(
+ url=url,
+ method='DELETE',
+ expected_response_code=204
+ )
+
+ return True
+
+ # ### get list of databases
+ # curl -X GET http://localhost:8086/db
+
+ def get_list_database(self):
+ """Get the list of databases."""
+ url = "db"
+
+ response = self.request(
+ url=url,
+ method='GET',
+ expected_response_code=200
+ )
+
+ return response.json()
+
+ def get_database_list(self):
+ """Get the list of databases.
+
+ DEPRECATED.
+ """
+ warnings.warn(
+ "get_database_list is deprecated, and will be removed "
+ "in future versions. Please use "
+ "``InfluxDBClient.get_list_database`` instead.",
+ FutureWarning)
+ return self.get_list_database()
+
+ def delete_series(self, series):
+ """Drop a series on the InfluxDB server.
+
+ :param series: the name of the series to delete
+ :type series: string
+ :rtype: boolean
+ """
+ url = "db/{0}/series/{1}".format(
+ self._database,
+ series
+ )
+
+ self.request(
+ url=url,
+ method='DELETE',
+ expected_response_code=204
+ )
+
+ return True
+
+ def get_list_series(self):
+ """Get a list of all time series in a database."""
+ response = self._query('list series')
+ return [series[1] for series in response[0]['points']]
+
+ def get_list_continuous_queries(self):
+ """Get a list of continuous queries."""
+ response = self._query('list continuous queries')
+ return [query[2] for query in response[0]['points']]
+
+ # Security
+ # get list of cluster admins
+ # curl http://localhost:8086/cluster_admins?u=root&p=root
+
+ # add cluster admin
+ # curl -X POST http://localhost:8086/cluster_admins?u=root&p=root \
+ # -d '{"name": "paul", "password": "i write teh docz"}'
+
+ # update cluster admin password
+ # curl -X POST http://localhost:8086/cluster_admins/paul?u=root&p=root \
+ # -d '{"password": "new pass"}'
+
+ # delete cluster admin
+ # curl -X DELETE http://localhost:8086/cluster_admins/paul?u=root&p=root
+
+ # Database admins, with a database name of site_dev
+ # get list of database admins
+ # curl http://localhost:8086/db/site_dev/admins?u=root&p=root
+
+ # add database admin
+ # curl -X POST http://localhost:8086/db/site_dev/admins?u=root&p=root \
+ # -d '{"name": "paul", "password": "i write teh docz"}'
+
+ # update database admin password
+ # curl -X POST http://localhost:8086/db/site_dev/admins/paul?u=root&p=root\
+ # -d '{"password": "new pass"}'
+
+ # delete database admin
+ # curl -X DELETE \
+ # http://localhost:8086/db/site_dev/admins/paul?u=root&p=root
+
+ def get_list_cluster_admins(self):
+ """Get list of cluster admins."""
+ response = self.request(
+ url="cluster_admins",
+ method='GET',
+ expected_response_code=200
+ )
+
+ return response.json()
+
+ def add_cluster_admin(self, new_username, new_password):
+ """Add cluster admin."""
+ data = {
+ 'name': new_username,
+ 'password': new_password
+ }
+
+ self.request(
+ url="cluster_admins",
+ method='POST',
+ data=data,
+ expected_response_code=200
+ )
+
+ return True
+
+ def update_cluster_admin_password(self, username, new_password):
+ """Update cluster admin password."""
+ url = "cluster_admins/{0}".format(username)
+
+ data = {
+ 'password': new_password
+ }
+
+ self.request(
+ url=url,
+ method='POST',
+ data=data,
+ expected_response_code=200
+ )
+
+ return True
+
+ def delete_cluster_admin(self, username):
+ """Delete cluster admin."""
+ url = "cluster_admins/{0}".format(username)
+
+ self.request(
+ url=url,
+ method='DELETE',
+ expected_response_code=200
+ )
+
+ return True
+
+ def set_database_admin(self, username):
+ """Set user as database admin."""
+ return self.alter_database_admin(username, True)
+
+ def unset_database_admin(self, username):
+ """Unset user as database admin."""
+ return self.alter_database_admin(username, False)
+
+ def alter_database_admin(self, username, is_admin):
+ """Alter the database admin."""
+ url = "db/{0}/users/{1}".format(self._database, username)
+
+ data = {'admin': is_admin}
+
+ self.request(
+ url=url,
+ method='POST',
+ data=data,
+ expected_response_code=200
+ )
+
+ return True
+
+ def get_list_database_admins(self):
+ """Get list of database admins.
+
+ 2013-11-08: This endpoint has not been implemented yet in ver0.0.8,
+ but it is documented in http://influxdb.org/docs/api/http.html.
+ See also: src/api/http/api.go:l57
+
+ """
+ raise NotImplementedError()
+
+ def add_database_admin(self, new_username, new_password):
+ """Add cluster admin.
+
+ 2013-11-08: This endpoint has not been implemented yet in ver0.0.8,
+ but it is documented in http://influxdb.org/docs/api/http.html.
+ See also: src/api/http/api.go:l57
+
+ """
+ raise NotImplementedError()
+
+ def update_database_admin_password(self, username, new_password):
+ """Update database admin password.
+
+ 2013-11-08: This endpoint has not been implemented yet in ver0.0.8,
+ but it is documented in http://influxdb.org/docs/api/http.html.
+ See also: src/api/http/api.go:l57
+
+ """
+ raise NotImplementedError()
+
+ def delete_database_admin(self, username):
+ """Delete database admin.
+
+ 2013-11-08: This endpoint has not been implemented yet in ver0.0.8,
+ but it is documented in http://influxdb.org/docs/api/http.html.
+ See also: src/api/http/api.go:l57
+
+ """
+ raise NotImplementedError()
+
+ ###
+ # Limiting User Access
+
+ # Database users
+ # get list of database users
+ # curl http://localhost:8086/db/site_dev/users?u=root&p=root
+
+ # add database user
+ # curl -X POST http://localhost:8086/db/site_dev/users?u=root&p=root \
+ # -d '{"name": "paul", "password": "i write teh docz"}'
+
+ # update database user password
+ # curl -X POST http://localhost:8086/db/site_dev/users/paul?u=root&p=root \
+ # -d '{"password": "new pass"}'
+
+ # delete database user
+ # curl -X DELETE http://localhost:8086/db/site_dev/users/paul?u=root&p=root
+
+ def get_database_users(self):
+ """Get list of database users."""
+ url = "db/{0}/users".format(self._database)
+
+ response = self.request(
+ url=url,
+ method='GET',
+ expected_response_code=200
+ )
+
+ return response.json()
+
+ def add_database_user(self, new_username, new_password, permissions=None):
+ """Add database user.
+
+ :param permissions: A ``(readFrom, writeTo)`` tuple
+ """
+ url = "db/{0}/users".format(self._database)
+
+ data = {
+ 'name': new_username,
+ 'password': new_password
+ }
+
+ if permissions:
+ try:
+ data['readFrom'], data['writeTo'] = permissions
+ except (ValueError, TypeError):
+ raise TypeError(
+ "'permissions' must be (readFrom, writeTo) tuple"
+ )
+
+ self.request(
+ url=url,
+ method='POST',
+ data=data,
+ expected_response_code=200
+ )
+
+ return True
+
+ def update_database_user_password(self, username, new_password):
+ """Update password."""
+ return self.alter_database_user(username, new_password)
+
+ def alter_database_user(self, username, password=None, permissions=None):
+ """Alter a database user and/or their permissions.
+
+ :param permissions: A ``(readFrom, writeTo)`` tuple
+ :raise TypeError: if permissions cannot be read.
+ :raise ValueError: if neither password nor permissions provided.
+ """
+ url = "db/{0}/users/{1}".format(self._database, username)
+
+ if not password and not permissions:
+ raise ValueError("Nothing to alter for user {0}.".format(username))
+
+ data = {}
+
+ if password:
+ data['password'] = password
+
+ if permissions:
+ try:
+ data['readFrom'], data['writeTo'] = permissions
+ except (ValueError, TypeError):
+ raise TypeError(
+ "'permissions' must be (readFrom, writeTo) tuple"
+ )
+
+ self.request(
+ url=url,
+ method='POST',
+ data=data,
+ expected_response_code=200
+ )
+
+ if username == self._username:
+ self._password = password
+
+ return True
+
+ def delete_database_user(self, username):
+ """Delete database user."""
+ url = "db/{0}/users/{1}".format(self._database, username)
+
+ self.request(
+ url=url,
+ method='DELETE',
+ expected_response_code=200
+ )
+
+ return True
+
+ # update the user by POSTing to db/site_dev/users/paul
+
+ def update_permission(self, username, json_body):
+ """Update read/write permission.
+
+ 2013-11-08: This endpoint has not been implemented yet in ver0.0.8,
+ but it is documented in http://influxdb.org/docs/api/http.html.
+ See also: src/api/http/api.go:l57
+
+ """
+ raise NotImplementedError()
+
+ def send_packet(self, packet):
+ """Send a UDP packet along the wire."""
+ data = json.dumps(packet)
+ byte = data.encode('utf-8')
+ self.udp_socket.sendto(byte, (self._host, self._udp_port))
diff --git a/influxdb/influxdb08/dataframe_client.py b/influxdb/influxdb08/dataframe_client.py
new file mode 100644
index 00000000..2867125d
--- /dev/null
+++ b/influxdb/influxdb08/dataframe_client.py
@@ -0,0 +1,177 @@
+# -*- coding: utf-8 -*-
+"""DataFrame client for InfluxDB v0.8."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import math
+import warnings
+
+from .client import InfluxDBClient
+
+
+class DataFrameClient(InfluxDBClient):
+ """Primary defintion of the DataFrameClient for v0.8.
+
+ The ``DataFrameClient`` object holds information necessary to connect
+ to InfluxDB. Requests can be made to InfluxDB directly through the client.
+ The client reads and writes from pandas DataFrames.
+ """
+
+ def __init__(self, ignore_nan=True, *args, **kwargs):
+ """Initialize an instance of the DataFrameClient."""
+ super(DataFrameClient, self).__init__(*args, **kwargs)
+
+ try:
+ global pd
+ import pandas as pd
+ except ImportError as ex:
+ raise ImportError('DataFrameClient requires Pandas, '
+ '"{ex}" problem importing'.format(ex=str(ex)))
+
+ self.EPOCH = pd.Timestamp('1970-01-01 00:00:00.000+00:00')
+ self.ignore_nan = ignore_nan
+
+ def write_points(self, data, *args, **kwargs):
+ """Write to multiple time series names.
+
+ :param data: A dictionary mapping series names to pandas DataFrames
+ :param time_precision: [Optional, default 's'] Either 's', 'm', 'ms'
+ or 'u'.
+ :param batch_size: [Optional] Value to write the points in batches
+ instead of all at one time. Useful for when doing data dumps from
+ one database to another or when doing a massive write operation
+ :type batch_size: int
+ """
+ batch_size = kwargs.get('batch_size')
+ time_precision = kwargs.get('time_precision', 's')
+ if batch_size:
+ kwargs.pop('batch_size') # don't hand over to InfluxDBClient
+ for key, data_frame in data.items():
+ number_batches = int(math.ceil(
+ len(data_frame) / float(batch_size)))
+ for batch in range(number_batches):
+ start_index = batch * batch_size
+ end_index = (batch + 1) * batch_size
+ outdata = [
+ self._convert_dataframe_to_json(
+ name=key,
+ dataframe=data_frame
+ .iloc[start_index:end_index].copy(),
+ time_precision=time_precision)]
+ InfluxDBClient.write_points(self, outdata, *args, **kwargs)
+ return True
+
+ outdata = [
+ self._convert_dataframe_to_json(name=key, dataframe=dataframe,
+ time_precision=time_precision)
+ for key, dataframe in data.items()]
+ return InfluxDBClient.write_points(self, outdata, *args, **kwargs)
+
+ def write_points_with_precision(self, data, time_precision='s'):
+ """Write to multiple time series names.
+
+ DEPRECATED
+ """
+ warnings.warn(
+ "write_points_with_precision is deprecated, and will be removed "
+ "in future versions. Please use "
+ "``DataFrameClient.write_points(time_precision='..')`` instead.",
+ FutureWarning)
+ return self.write_points(data, time_precision='s')
+
+ def query(self, query, time_precision='s', chunked=False):
+ """Query data into DataFrames.
+
+ Returns a DataFrame for a single time series and a map for multiple
+ time series with the time series as value and its name as key.
+
+ :param time_precision: [Optional, default 's'] Either 's', 'm', 'ms'
+ or 'u'.
+ :param chunked: [Optional, default=False] True if the data shall be
+ retrieved in chunks, False otherwise.
+ """
+ result = InfluxDBClient.query(self, query=query,
+ time_precision=time_precision,
+ chunked=chunked)
+ if len(result) == 0:
+ return result
+ elif len(result) == 1:
+ return self._to_dataframe(result[0], time_precision)
+ else:
+ ret = {}
+ for time_series in result:
+ ret[time_series['name']] = self._to_dataframe(time_series,
+ time_precision)
+ return ret
+
+ @staticmethod
+ def _to_dataframe(json_result, time_precision):
+ dataframe = pd.DataFrame(data=json_result['points'],
+ columns=json_result['columns'])
+ if 'sequence_number' in dataframe.keys():
+ dataframe.sort_values(['time', 'sequence_number'], inplace=True)
+ else:
+ dataframe.sort_values(['time'], inplace=True)
+
+ pandas_time_unit = time_precision
+ if time_precision == 'm':
+ pandas_time_unit = 'ms'
+ elif time_precision == 'u':
+ pandas_time_unit = 'us'
+
+ dataframe.index = pd.to_datetime(list(dataframe['time']),
+ unit=pandas_time_unit,
+ utc=True)
+ del dataframe['time']
+ return dataframe
+
+ def _convert_dataframe_to_json(self, dataframe, name, time_precision='s'):
+ if not isinstance(dataframe, pd.DataFrame):
+ raise TypeError('Must be DataFrame, but type was: {0}.'
+ .format(type(dataframe)))
+ if not (isinstance(dataframe.index, pd.PeriodIndex) or
+ isinstance(dataframe.index, pd.DatetimeIndex)):
+ raise TypeError('Must be DataFrame with DatetimeIndex or \
+ PeriodIndex.')
+
+ if isinstance(dataframe.index, pd.PeriodIndex):
+ dataframe.index = dataframe.index.to_timestamp()
+ else:
+ dataframe.index = pd.to_datetime(dataframe.index)
+
+ if dataframe.index.tzinfo is None:
+ dataframe.index = dataframe.index.tz_localize('UTC')
+ dataframe['time'] = [self._datetime_to_epoch(dt, time_precision)
+ for dt in dataframe.index]
+ data = {'name': name,
+ 'columns': [str(column) for column in dataframe.columns],
+ 'points': [self._convert_array(x) for x in dataframe.values]}
+ return data
+
+ def _convert_array(self, array):
+ try:
+ global np
+ import numpy as np
+ except ImportError as ex:
+ raise ImportError('DataFrameClient requires Numpy, '
+ '"{ex}" problem importing'.format(ex=str(ex)))
+
+ if self.ignore_nan:
+ number_types = (int, float, np.number)
+ condition = (all(isinstance(el, number_types) for el in array) and
+ np.isnan(array))
+ return list(np.where(condition, None, array))
+
+ return list(array)
+
+ def _datetime_to_epoch(self, datetime, time_precision='s'):
+ seconds = (datetime - self.EPOCH).total_seconds()
+ if time_precision == 's':
+ return seconds
+ elif time_precision == 'm' or time_precision == 'ms':
+ return seconds * 1000
+ elif time_precision == 'u':
+ return seconds * 1000000
diff --git a/influxdb/influxdb08/helper.py b/influxdb/influxdb08/helper.py
new file mode 100644
index 00000000..5f2d4614
--- /dev/null
+++ b/influxdb/influxdb08/helper.py
@@ -0,0 +1,155 @@
+# -*- coding: utf-8 -*-
+"""Helper class for InfluxDB for v0.8."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+from collections import namedtuple, defaultdict
+from warnings import warn
+
+import six
+
+
+class SeriesHelper(object):
+ """Define the SeriesHelper object for InfluxDB v0.8.
+
+ Subclassing this helper eases writing data points in bulk.
+ All data points are immutable, ensuring they do not get overwritten.
+ Each subclass can write to its own database.
+ The time series names can also be based on one or more defined fields.
+
+ Annotated example::
+
+ class MySeriesHelper(SeriesHelper):
+ class Meta:
+ # Meta class stores time series helper configuration.
+ series_name = 'events.stats.{server_name}'
+ # Series name must be a string, curly brackets for dynamic use.
+ fields = ['time', 'server_name']
+ # Defines all the fields in this time series.
+ ### Following attributes are optional. ###
+ client = TestSeriesHelper.client
+ # Client should be an instance of InfluxDBClient.
+ :warning: Only used if autocommit is True.
+ bulk_size = 5
+ # Defines the number of data points to write simultaneously.
+ # Only applicable if autocommit is True.
+ autocommit = True
+ # If True and no bulk_size, then will set bulk_size to 1.
+
+ """
+
+ __initialized__ = False
+
+ def __new__(cls, *args, **kwargs):
+ """Initialize class attributes for subsequent constructor calls.
+
+ :note: *args and **kwargs are not explicitly used in this function,
+ but needed for Python 2 compatibility.
+ """
+ if not cls.__initialized__:
+ cls.__initialized__ = True
+ try:
+ _meta = getattr(cls, 'Meta')
+ except AttributeError:
+ raise AttributeError(
+ 'Missing Meta class in {0}.'.format(
+ cls.__name__))
+
+ for attr in ['series_name', 'fields']:
+ try:
+ setattr(cls, '_' + attr, getattr(_meta, attr))
+ except AttributeError:
+ raise AttributeError(
+ 'Missing {0} in {1} Meta class.'.format(
+ attr,
+ cls.__name__))
+
+ cls._autocommit = getattr(_meta, 'autocommit', False)
+
+ cls._client = getattr(_meta, 'client', None)
+ if cls._autocommit and not cls._client:
+ raise AttributeError(
+ 'In {0}, autocommit is set to True, but no client is set.'
+ .format(cls.__name__))
+
+ try:
+ cls._bulk_size = getattr(_meta, 'bulk_size')
+ if cls._bulk_size < 1 and cls._autocommit:
+ warn(
+ 'Definition of bulk_size in {0} forced to 1, '
+ 'was less than 1.'.format(cls.__name__))
+ cls._bulk_size = 1
+ except AttributeError:
+ cls._bulk_size = -1
+ else:
+ if not cls._autocommit:
+ warn(
+ 'Definition of bulk_size in {0} has no affect because'
+ ' autocommit is false.'.format(cls.__name__))
+
+ cls._datapoints = defaultdict(list)
+ cls._type = namedtuple(cls.__name__, cls._fields)
+
+ return super(SeriesHelper, cls).__new__(cls)
+
+ def __init__(self, **kw):
+ """Create a new data point.
+
+ All fields must be present.
+
+ :note: Data points written when `bulk_size` is reached per Helper.
+ :warning: Data points are *immutable* (`namedtuples`).
+ """
+ cls = self.__class__
+
+ if sorted(cls._fields) != sorted(kw.keys()):
+ raise NameError(
+ 'Expected {0}, got {1}.'.format(
+ cls._fields,
+ kw.keys()))
+
+ cls._datapoints[cls._series_name.format(**kw)].append(cls._type(**kw))
+
+ if cls._autocommit and \
+ sum(len(series) for series in cls._datapoints.values()) \
+ >= cls._bulk_size:
+ cls.commit()
+
+ @classmethod
+ def commit(cls, client=None):
+ """Commit everything from datapoints via the client.
+
+ :param client: InfluxDBClient instance for writing points to InfluxDB.
+ :attention: any provided client will supersede the class client.
+ :return: result of client.write_points.
+ """
+ if not client:
+ client = cls._client
+ rtn = client.write_points(cls._json_body_())
+ cls._reset_()
+ return rtn
+
+ @classmethod
+ def _json_body_(cls):
+ """Return JSON body of the datapoints.
+
+ :return: JSON body of the datapoints.
+ """
+ json = []
+ if not cls.__initialized__:
+ cls._reset_()
+ for series_name, data in six.iteritems(cls._datapoints):
+ json.append({'name': series_name,
+ 'columns': cls._fields,
+ 'points': [[getattr(point, k) for k in cls._fields]
+ for point in data]
+ })
+ return json
+
+ @classmethod
+ def _reset_(cls):
+ """Reset data storage."""
+ cls._datapoints = defaultdict(list)
diff --git a/influxdb/line_protocol.py b/influxdb/line_protocol.py
new file mode 100644
index 00000000..25dd2ad7
--- /dev/null
+++ b/influxdb/line_protocol.py
@@ -0,0 +1,210 @@
+# -*- coding: utf-8 -*-
+"""Define the line_protocol handler."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+from datetime import datetime
+from numbers import Integral
+
+from pytz import UTC
+from dateutil.parser import parse
+from six import binary_type, text_type, integer_types, PY2
+
+EPOCH = UTC.localize(datetime.utcfromtimestamp(0))
+
+
+def _to_nanos(timestamp):
+ delta = timestamp - EPOCH
+ nanos_in_days = delta.days * 86400 * 10 ** 9
+ nanos_in_seconds = delta.seconds * 10 ** 9
+ nanos_in_micros = delta.microseconds * 10 ** 3
+ return nanos_in_days + nanos_in_seconds + nanos_in_micros
+
+
+def _convert_timestamp(timestamp, precision=None):
+ if isinstance(timestamp, Integral):
+ return timestamp # assume precision is correct if timestamp is int
+
+ if isinstance(_get_unicode(timestamp), text_type):
+ timestamp = parse(timestamp)
+
+ if isinstance(timestamp, datetime):
+ if not timestamp.tzinfo:
+ timestamp = UTC.localize(timestamp)
+
+ ns = _to_nanos(timestamp)
+ if precision is None or precision == 'n':
+ return ns
+
+ if precision == 'u':
+ return ns / 10**3
+
+ if precision == 'ms':
+ return ns / 10**6
+
+ if precision == 's':
+ return ns / 10**9
+
+ if precision == 'm':
+ return ns / 10**9 / 60
+
+ if precision == 'h':
+ return ns / 10**9 / 3600
+
+ raise ValueError(timestamp)
+
+
+def _escape_tag(tag):
+ tag = _get_unicode(tag, force=True)
+ return tag.replace(
+ "\\", "\\\\"
+ ).replace(
+ " ", "\\ "
+ ).replace(
+ ",", "\\,"
+ ).replace(
+ "=", "\\="
+ ).replace(
+ "\n", "\\n"
+ )
+
+
+def _escape_tag_value(value):
+ ret = _escape_tag(value)
+ if ret.endswith('\\'):
+ ret += ' '
+ return ret
+
+
+def quote_ident(value):
+ """Indent the quotes."""
+ return "\"{}\"".format(value
+ .replace("\\", "\\\\")
+ .replace("\"", "\\\"")
+ .replace("\n", "\\n"))
+
+
+def quote_literal(value):
+ """Quote provided literal."""
+ return "'{}'".format(value
+ .replace("\\", "\\\\")
+ .replace("'", "\\'"))
+
+
+def _is_float(value):
+ try:
+ float(value)
+ except (TypeError, ValueError):
+ return False
+
+ return True
+
+
+def _escape_value(value):
+ if value is None:
+ return ''
+
+ value = _get_unicode(value)
+ if isinstance(value, text_type):
+ return quote_ident(value)
+
+ if isinstance(value, integer_types) and not isinstance(value, bool):
+ return str(value) + 'i'
+
+ if isinstance(value, bool):
+ return str(value)
+
+ if _is_float(value):
+ return repr(float(value))
+
+ return str(value)
+
+
+def _get_unicode(data, force=False):
+ """Try to return a text aka unicode object from the given data."""
+ if isinstance(data, binary_type):
+ return data.decode('utf-8')
+
+ if data is None:
+ return ''
+
+ if force:
+ if PY2:
+ return unicode(data)
+ return str(data)
+
+ return data
+
+
+def make_line(measurement, tags=None, fields=None, time=None, precision=None):
+ """Extract the actual point from a given measurement line."""
+ tags = tags or {}
+ fields = fields or {}
+
+ line = _escape_tag(_get_unicode(measurement))
+
+ # tags should be sorted client-side to take load off server
+ tag_list = []
+ for tag_key in sorted(tags.keys()):
+ key = _escape_tag(tag_key)
+ value = _escape_tag(tags[tag_key])
+
+ if key != '' and value != '':
+ tag_list.append(
+ "{key}={value}".format(key=key, value=value)
+ )
+
+ if tag_list:
+ line += ',' + ','.join(tag_list)
+
+ field_list = []
+ for field_key in sorted(fields.keys()):
+ key = _escape_tag(field_key)
+ value = _escape_value(fields[field_key])
+
+ if key != '' and value != '':
+ field_list.append("{key}={value}".format(
+ key=key,
+ value=value
+ ))
+
+ if field_list:
+ line += ' ' + ','.join(field_list)
+
+ if time is not None:
+ timestamp = _get_unicode(str(int(
+ _convert_timestamp(time, precision)
+ )))
+ line += ' ' + timestamp
+
+ return line
+
+
+def make_lines(data, precision=None):
+ """Extract points from given dict.
+
+ Extracts the points from the given dict and returns a Unicode string
+ matching the line protocol introduced in InfluxDB 0.9.0.
+ """
+ lines = []
+ static_tags = data.get('tags')
+ for point in data['points']:
+ if static_tags:
+ tags = dict(static_tags) # make a copy, since we'll modify
+ tags.update(point.get('tags') or {})
+ else:
+ tags = point.get('tags') or {}
+
+ line = make_line(
+ point.get('measurement', data.get('measurement')),
+ tags=tags,
+ fields=point.get('fields'),
+ precision=precision,
+ time=point.get('time')
+ )
+ lines.append(line)
+
+ return '\n'.join(lines) + '\n'
diff --git a/influxdb/resultset.py b/influxdb/resultset.py
new file mode 100644
index 00000000..ba4f3c13
--- /dev/null
+++ b/influxdb/resultset.py
@@ -0,0 +1,206 @@
+# -*- coding: utf-8 -*-
+"""Module to prepare the resultset."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import warnings
+
+from influxdb.exceptions import InfluxDBClientError
+
+_sentinel = object()
+
+
+class ResultSet(object):
+ """A wrapper around a single InfluxDB query result."""
+
+ def __init__(self, series, raise_errors=True):
+ """Initialize the ResultSet."""
+ self._raw = series
+ self._error = self._raw.get('error', None)
+
+ if self.error is not None and raise_errors is True:
+ raise InfluxDBClientError(self.error)
+
+ @property
+ def raw(self):
+ """Raw JSON from InfluxDB."""
+ return self._raw
+
+ @raw.setter
+ def raw(self, value):
+ self._raw = value
+
+ @property
+ def error(self):
+ """Error returned by InfluxDB."""
+ return self._error
+
+ def __getitem__(self, key):
+ """Retrieve the series name or specific set based on key.
+
+ :param key: Either a series name, or a tags_dict, or
+ a 2-tuple(series_name, tags_dict).
+ If the series name is None (or not given) then any serie
+ matching the eventual given tags will be given its points
+ one after the other.
+ To get the points of every series in this resultset then
+ you have to provide None as key.
+ :return: A generator yielding `Point`s matching the given key.
+ NB:
+ The order in which the points are yielded is actually undefined but
+ it might change..
+ """
+ warnings.warn(
+ ("ResultSet's ``__getitem__`` method will be deprecated. Use"
+ "``get_points`` instead."),
+ DeprecationWarning
+ )
+
+ if isinstance(key, tuple):
+ if len(key) != 2:
+ raise TypeError('only 2-tuples allowed')
+
+ name = key[0]
+ tags = key[1]
+
+ if not isinstance(tags, dict) and tags is not None:
+ raise TypeError('tags should be a dict')
+ elif isinstance(key, dict):
+ name = None
+ tags = key
+ else:
+ name = key
+ tags = None
+
+ return self.get_points(name, tags)
+
+ def get_points(self, measurement=None, tags=None):
+ """Return a generator for all the points that match the given filters.
+
+ :param measurement: The measurement name
+ :type measurement: str
+
+ :param tags: Tags to look for
+ :type tags: dict
+
+ :return: Points generator
+ """
+ # Raise error if measurement is not str or bytes
+ if not isinstance(measurement,
+ (bytes, type(b''.decode()), type(None))):
+ raise TypeError('measurement must be an str or None')
+
+ for series in self._get_series():
+ series_name = series.get('measurement',
+ series.get('name', 'results'))
+ if series_name is None:
+ # this is a "system" query or a query which
+ # doesn't return a name attribute.
+ # like 'show retention policies' ..
+ if tags is None:
+ for item in self._get_points_for_series(series):
+ yield item
+
+ elif measurement in (None, series_name):
+ # by default if no tags was provided then
+ # we will matches every returned series
+ series_tags = series.get('tags', {})
+ for item in self._get_points_for_series(series):
+ if tags is None or \
+ self._tag_matches(item, tags) or \
+ self._tag_matches(series_tags, tags):
+ yield item
+
+ def __repr__(self):
+ """Representation of ResultSet object."""
+ items = []
+
+ for item in self.items():
+ items.append("'%s': %s" % (item[0], list(item[1])))
+
+ return "ResultSet({%s})" % ", ".join(items)
+
+ def __iter__(self):
+ """Yield one dict instance per series result."""
+ for key in self.keys():
+ yield list(self.__getitem__(key))
+
+ @staticmethod
+ def _tag_matches(tags, filter):
+ """Check if all key/values in filter match in tags."""
+ for tag_name, tag_value in filter.items():
+ # using _sentinel as I'm not sure that "None"
+ # could be used, because it could be a valid
+ # series_tags value : when a series has no such tag
+ # then I think it's set to /null/None/.. TBC..
+ series_tag_value = tags.get(tag_name, _sentinel)
+ if series_tag_value != tag_value:
+ return False
+
+ return True
+
+ def _get_series(self):
+ """Return all series."""
+ return self.raw.get('series', [])
+
+ def __len__(self):
+ """Return the len of the keys in the ResultSet."""
+ return len(self.keys())
+
+ def keys(self):
+ """Return the list of keys in the ResultSet.
+
+ :return: List of keys. Keys are tuples (series_name, tags)
+ """
+ keys = []
+ for series in self._get_series():
+ keys.append(
+ (series.get('measurement',
+ series.get('name', 'results')),
+ series.get('tags', None))
+ )
+ return keys
+
+ def items(self):
+ """Return the set of items from the ResultSet.
+
+ :return: List of tuples, (key, generator)
+ """
+ items = []
+ for series in self._get_series():
+ series_key = (series.get('measurement',
+ series.get('name', 'results')),
+ series.get('tags', None))
+ items.append(
+ (series_key, self._get_points_for_series(series))
+ )
+ return items
+
+ def _get_points_for_series(self, series):
+ """Return generator of dict from columns and values of a series.
+
+ :param series: One series
+ :return: Generator of dicts
+ """
+ for point in series.get('values', []):
+ yield self.point_from_cols_vals(
+ series['columns'],
+ point
+ )
+
+ @staticmethod
+ def point_from_cols_vals(cols, vals):
+ """Create a dict from columns and values lists.
+
+ :param cols: List of columns
+ :param vals: List of values
+ :return: Dict where keys are columns.
+ """
+ point = {}
+ for col_index, col_name in enumerate(cols):
+ point[col_name] = vals[col_index]
+
+ return point
diff --git a/influxdb/tests/__init__.py b/influxdb/tests/__init__.py
new file mode 100644
index 00000000..f7c5dfb9
--- /dev/null
+++ b/influxdb/tests/__init__.py
@@ -0,0 +1,21 @@
+# -*- coding: utf-8 -*-
+"""Configure the tests package for InfluxDBClient."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import sys
+import os
+
+import unittest
+
+using_pypy = hasattr(sys, "pypy_version_info")
+skip_if_pypy = unittest.skipIf(using_pypy, "Skipping this test on pypy.")
+
+_skip_server_tests = os.environ.get(
+ 'INFLUXDB_PYTHON_SKIP_SERVER_TESTS',
+ None) == 'True'
+skip_server_tests = unittest.skipIf(_skip_server_tests,
+ "Skipping server tests...")
diff --git a/influxdb/tests/chunked_json_test.py b/influxdb/tests/chunked_json_test.py
new file mode 100644
index 00000000..f633bcb1
--- /dev/null
+++ b/influxdb/tests/chunked_json_test.py
@@ -0,0 +1,51 @@
+# -*- coding: utf-8 -*-
+"""Chunked JSON test."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import unittest
+
+from influxdb import chunked_json
+
+
+class TestChunkJson(unittest.TestCase):
+ """Set up the TestChunkJson object."""
+
+ @classmethod
+ def setUpClass(cls):
+ """Initialize the TestChunkJson object."""
+ super(TestChunkJson, cls).setUpClass()
+
+ def test_load(self):
+ """Test reading a sequence of JSON values from a string."""
+ example_response = \
+ '{"results": [{"series": [{"measurement": "sdfsdfsdf", ' \
+ '"columns": ["time", "value"], "values": ' \
+ '[["2009-11-10T23:00:00Z", 0.64]]}]}, {"series": ' \
+ '[{"measurement": "cpu_load_short", "columns": ["time", "value"],'\
+ '"values": [["2009-11-10T23:00:00Z", 0.64]]}]}]}'
+
+ res = list(chunked_json.loads(example_response))
+ # import ipdb; ipdb.set_trace()
+
+ self.assertListEqual(
+ [
+ {
+ 'results': [
+ {'series': [{
+ 'values': [['2009-11-10T23:00:00Z', 0.64]],
+ 'measurement': 'sdfsdfsdf',
+ 'columns':
+ ['time', 'value']}]},
+ {'series': [{
+ 'values': [['2009-11-10T23:00:00Z', 0.64]],
+ 'measurement': 'cpu_load_short',
+ 'columns': ['time', 'value']}]}
+ ]
+ }
+ ],
+ res
+ )
diff --git a/influxdb/tests/client_test.py b/influxdb/tests/client_test.py
new file mode 100644
index 00000000..115fbc48
--- /dev/null
+++ b/influxdb/tests/client_test.py
@@ -0,0 +1,1557 @@
+# -*- coding: utf-8 -*-
+"""Unit tests for the InfluxDBClient.
+
+NB/WARNING:
+This module implements tests for the InfluxDBClient class
+but does so
+ + without any server instance running
+ + by mocking all the expected responses.
+
+So any change of (response format from) the server will **NOT** be
+detected by this module.
+
+See client_test_with_server.py for tests against a running server instance.
+
+"""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import random
+import socket
+import unittest
+import warnings
+
+import io
+import gzip
+import json
+import mock
+import requests
+import requests.exceptions
+import requests_mock
+
+from nose.tools import raises
+from urllib3.connection import HTTPConnection
+
+from influxdb import InfluxDBClient
+from influxdb.resultset import ResultSet
+
+
+def _build_response_object(status_code=200, content=""):
+ resp = requests.Response()
+ resp.status_code = status_code
+ resp._content = content.encode("utf8")
+ return resp
+
+
+def _mocked_session(cli, method="GET", status_code=200, content=""):
+ method = method.upper()
+
+ def request(*args, **kwargs):
+ """Request content from the mocked session."""
+ c = content
+
+ # Check method
+ assert method == kwargs.get('method', 'GET')
+
+ if method == 'POST':
+ data = kwargs.get('data', None)
+
+ if data is not None:
+ # Data must be a string
+ assert isinstance(data, str)
+
+ # Data must be a JSON string
+ assert c == json.loads(data, strict=True)
+
+ c = data
+
+ # Anyway, Content must be a JSON string (or empty string)
+ if not isinstance(c, str):
+ c = json.dumps(c)
+
+ return _build_response_object(status_code=status_code, content=c)
+
+ return mock.patch.object(cli._session, 'request', side_effect=request)
+
+
+class TestInfluxDBClient(unittest.TestCase):
+ """Set up the TestInfluxDBClient object."""
+
+ def setUp(self):
+ """Initialize an instance of TestInfluxDBClient object."""
+ # By default, raise exceptions on warnings
+ warnings.simplefilter('error', FutureWarning)
+
+ self.cli = InfluxDBClient('localhost', 8086, 'username', 'password')
+ self.dummy_points = [
+ {
+ "measurement": "cpu_load_short",
+ "tags": {
+ "host": "server01",
+ "region": "us-west"
+ },
+ "time": "2009-11-10T23:00:00.123456Z",
+ "fields": {
+ "value": 0.64
+ }
+ }
+ ]
+
+ self.dsn_string = 'influxdb://uSr:pWd@my.host.fr:1886/db'
+
+ def test_scheme(self):
+ """Set up the test schema for TestInfluxDBClient object."""
+ cli = InfluxDBClient('host', 8086, 'username', 'password', 'database')
+ self.assertEqual('http://host:8086', cli._baseurl)
+
+ cli = InfluxDBClient(
+ 'host', 8086, 'username', 'password', 'database', ssl=True
+ )
+ self.assertEqual('https://host:8086', cli._baseurl)
+
+ cli = InfluxDBClient(
+ 'host', 8086, 'username', 'password', 'database', ssl=True,
+ path="somepath"
+ )
+ self.assertEqual('https://host:8086/somepath', cli._baseurl)
+
+ cli = InfluxDBClient(
+ 'host', 8086, 'username', 'password', 'database', ssl=True,
+ path=None
+ )
+ self.assertEqual('https://host:8086', cli._baseurl)
+
+ cli = InfluxDBClient(
+ 'host', 8086, 'username', 'password', 'database', ssl=True,
+ path="/somepath"
+ )
+ self.assertEqual('https://host:8086/somepath', cli._baseurl)
+
+ def test_dsn(self):
+ """Set up the test datasource name for TestInfluxDBClient object."""
+ cli = InfluxDBClient.from_dsn('influxdb://192.168.0.1:1886')
+ self.assertEqual('http://192.168.0.1:1886', cli._baseurl)
+
+ cli = InfluxDBClient.from_dsn(self.dsn_string)
+ self.assertEqual('http://my.host.fr:1886', cli._baseurl)
+ self.assertEqual('uSr', cli._username)
+ self.assertEqual('pWd', cli._password)
+ self.assertEqual('db', cli._database)
+ self.assertFalse(cli._use_udp)
+
+ cli = InfluxDBClient.from_dsn('udp+' + self.dsn_string)
+ self.assertTrue(cli._use_udp)
+
+ cli = InfluxDBClient.from_dsn('https+' + self.dsn_string)
+ self.assertEqual('https://my.host.fr:1886', cli._baseurl)
+
+ cli = InfluxDBClient.from_dsn('https+' + self.dsn_string,
+ **{'ssl': False})
+ self.assertEqual('http://my.host.fr:1886', cli._baseurl)
+
+ def test_cert(self):
+ """Test mutual TLS authentication for TestInfluxDBClient object."""
+ cli = InfluxDBClient(ssl=True, cert='/etc/pki/tls/private/dummy.crt')
+ self.assertEqual(cli._session.cert, '/etc/pki/tls/private/dummy.crt')
+
+ with self.assertRaises(ValueError):
+ cli = InfluxDBClient(cert='/etc/pki/tls/private/dummy.crt')
+
+ def test_switch_database(self):
+ """Test switch database in TestInfluxDBClient object."""
+ cli = InfluxDBClient('host', 8086, 'username', 'password', 'database')
+ cli.switch_database('another_database')
+ self.assertEqual('another_database', cli._database)
+
+ def test_switch_user(self):
+ """Test switch user in TestInfluxDBClient object."""
+ cli = InfluxDBClient('host', 8086, 'username', 'password', 'database')
+ cli.switch_user('another_username', 'another_password')
+ self.assertEqual('another_username', cli._username)
+ self.assertEqual('another_password', cli._password)
+
+ def test_write(self):
+ """Test write in TestInfluxDBClient object."""
+ with requests_mock.Mocker() as m:
+ m.register_uri(
+ requests_mock.POST,
+ "http://localhost:8086/write",
+ status_code=204
+ )
+ cli = InfluxDBClient(database='db')
+ cli.write(
+ {"database": "mydb",
+ "retentionPolicy": "mypolicy",
+ "points": [{"measurement": "cpu_load_short",
+ "tags": {"host": "server01",
+ "region": "us-west"},
+ "time": "2009-11-10T23:00:00Z",
+ "fields": {"value": 0.64}}]}
+ )
+
+ self.assertEqual(
+ m.last_request.body,
+ b"cpu_load_short,host=server01,region=us-west "
+ b"value=0.64 1257894000000000000\n",
+ )
+
+ def test_write_points(self):
+ """Test write points for TestInfluxDBClient object."""
+ with requests_mock.Mocker() as m:
+ m.register_uri(
+ requests_mock.POST,
+ "http://localhost:8086/write",
+ status_code=204
+ )
+
+ cli = InfluxDBClient(database='db')
+ cli.write_points(
+ self.dummy_points,
+ )
+ self.assertEqual(
+ 'cpu_load_short,host=server01,region=us-west '
+ 'value=0.64 1257894000123456000\n',
+ m.last_request.body.decode('utf-8'),
+ )
+
+ def test_write_gzip(self):
+ """Test write in TestInfluxDBClient object."""
+ with requests_mock.Mocker() as m:
+ m.register_uri(
+ requests_mock.POST,
+ "http://localhost:8086/write",
+ status_code=204
+ )
+
+ cli = InfluxDBClient(database='db', gzip=True)
+ cli.write(
+ {"database": "mydb",
+ "retentionPolicy": "mypolicy",
+ "points": [{"measurement": "cpu_load_short",
+ "tags": {"host": "server01",
+ "region": "us-west"},
+ "time": "2009-11-10T23:00:00Z",
+ "fields": {"value": 0.64}}]}
+ )
+
+ compressed = io.BytesIO()
+ with gzip.GzipFile(
+ compresslevel=9,
+ fileobj=compressed,
+ mode='w'
+ ) as f:
+ f.write(
+ b"cpu_load_short,host=server01,region=us-west "
+ b"value=0.64 1257894000000000000\n"
+ )
+
+ self.assertEqual(
+ m.last_request.body,
+ compressed.getvalue(),
+ )
+
+ def test_write_points_gzip(self):
+ """Test write points for TestInfluxDBClient object."""
+ with requests_mock.Mocker() as m:
+ m.register_uri(
+ requests_mock.POST,
+ "http://localhost:8086/write",
+ status_code=204
+ )
+
+ cli = InfluxDBClient(database='db', gzip=True)
+ cli.write_points(
+ self.dummy_points,
+ )
+
+ compressed = io.BytesIO()
+ with gzip.GzipFile(
+ compresslevel=9,
+ fileobj=compressed,
+ mode='w'
+ ) as f:
+ f.write(
+ b'cpu_load_short,host=server01,region=us-west '
+ b'value=0.64 1257894000123456000\n'
+ )
+ self.assertEqual(
+ m.last_request.body,
+ compressed.getvalue(),
+ )
+
+ def test_write_points_toplevel_attributes(self):
+ """Test write points attrs for TestInfluxDBClient object."""
+ with requests_mock.Mocker() as m:
+ m.register_uri(
+ requests_mock.POST,
+ "http://localhost:8086/write",
+ status_code=204
+ )
+
+ cli = InfluxDBClient(database='db')
+ cli.write_points(
+ self.dummy_points,
+ database='testdb',
+ tags={"tag": "hello"},
+ retention_policy="somepolicy"
+ )
+ self.assertEqual(
+ 'cpu_load_short,host=server01,region=us-west,tag=hello '
+ 'value=0.64 1257894000123456000\n',
+ m.last_request.body.decode('utf-8'),
+ )
+
+ def test_write_points_batch(self):
+ """Test write points batch for TestInfluxDBClient object."""
+ dummy_points = [
+ {"measurement": "cpu_usage", "tags": {"unit": "percent"},
+ "time": "2009-11-10T23:00:00Z", "fields": {"value": 12.34}},
+ {"measurement": "network", "tags": {"direction": "in"},
+ "time": "2009-11-10T23:00:00Z", "fields": {"value": 123.00}},
+ {"measurement": "network", "tags": {"direction": "out"},
+ "time": "2009-11-10T23:00:00Z", "fields": {"value": 12.00}}
+ ]
+ expected_last_body = (
+ "network,direction=out,host=server01,region=us-west "
+ "value=12.0 1257894000000000000\n"
+ )
+
+ with requests_mock.Mocker() as m:
+ m.register_uri(requests_mock.POST,
+ "http://localhost:8086/write",
+ status_code=204)
+ cli = InfluxDBClient(database='db')
+ cli.write_points(points=dummy_points,
+ database='db',
+ tags={"host": "server01",
+ "region": "us-west"},
+ batch_size=2)
+ self.assertEqual(m.call_count, 2)
+ self.assertEqual(expected_last_body,
+ m.last_request.body.decode('utf-8'))
+
+ def test_write_points_batch_generator(self):
+ """Test write points batch from a generator for TestInfluxDBClient."""
+ dummy_points = [
+ {"measurement": "cpu_usage", "tags": {"unit": "percent"},
+ "time": "2009-11-10T23:00:00Z", "fields": {"value": 12.34}},
+ {"measurement": "network", "tags": {"direction": "in"},
+ "time": "2009-11-10T23:00:00Z", "fields": {"value": 123.00}},
+ {"measurement": "network", "tags": {"direction": "out"},
+ "time": "2009-11-10T23:00:00Z", "fields": {"value": 12.00}}
+ ]
+ dummy_points_generator = (point for point in dummy_points)
+ expected_last_body = (
+ "network,direction=out,host=server01,region=us-west "
+ "value=12.0 1257894000000000000\n"
+ )
+
+ with requests_mock.Mocker() as m:
+ m.register_uri(requests_mock.POST,
+ "http://localhost:8086/write",
+ status_code=204)
+ cli = InfluxDBClient(database='db')
+ cli.write_points(points=dummy_points_generator,
+ database='db',
+ tags={"host": "server01",
+ "region": "us-west"},
+ batch_size=2)
+ self.assertEqual(m.call_count, 2)
+ self.assertEqual(expected_last_body,
+ m.last_request.body.decode('utf-8'))
+
+ def test_write_points_udp(self):
+ """Test write points UDP for TestInfluxDBClient object."""
+ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ port = random.randint(4000, 8000)
+ s.bind(('0.0.0.0', port))
+
+ cli = InfluxDBClient(
+ 'localhost', 8086, 'root', 'root',
+ 'test', use_udp=True, udp_port=port
+ )
+ cli.write_points(self.dummy_points)
+
+ received_data, addr = s.recvfrom(1024)
+
+ self.assertEqual(
+ 'cpu_load_short,host=server01,region=us-west '
+ 'value=0.64 1257894000123456000\n',
+ received_data.decode()
+ )
+
+ @raises(Exception)
+ def test_write_points_fails(self):
+ """Test write points fail for TestInfluxDBClient object."""
+ cli = InfluxDBClient('host', 8086, 'username', 'password', 'db')
+ with _mocked_session(cli, 'post', 500):
+ cli.write_points([])
+
+ def test_write_points_with_precision(self):
+ """Test write points with precision for TestInfluxDBClient object."""
+ with requests_mock.Mocker() as m:
+ m.register_uri(
+ requests_mock.POST,
+ "http://localhost:8086/write",
+ status_code=204
+ )
+
+ cli = InfluxDBClient(database='db')
+
+ cli.write_points(self.dummy_points, time_precision='n')
+ self.assertEqual(
+ b'cpu_load_short,host=server01,region=us-west '
+ b'value=0.64 1257894000123456000\n',
+ m.last_request.body,
+ )
+
+ cli.write_points(self.dummy_points, time_precision='u')
+ self.assertEqual(
+ b'cpu_load_short,host=server01,region=us-west '
+ b'value=0.64 1257894000123456\n',
+ m.last_request.body,
+ )
+
+ cli.write_points(self.dummy_points, time_precision='ms')
+ self.assertEqual(
+ b'cpu_load_short,host=server01,region=us-west '
+ b'value=0.64 1257894000123\n',
+ m.last_request.body,
+ )
+
+ cli.write_points(self.dummy_points, time_precision='s')
+ self.assertEqual(
+ b"cpu_load_short,host=server01,region=us-west "
+ b"value=0.64 1257894000\n",
+ m.last_request.body,
+ )
+
+ cli.write_points(self.dummy_points, time_precision='m')
+ self.assertEqual(
+ b'cpu_load_short,host=server01,region=us-west '
+ b'value=0.64 20964900\n',
+ m.last_request.body,
+ )
+
+ cli.write_points(self.dummy_points, time_precision='h')
+ self.assertEqual(
+ b'cpu_load_short,host=server01,region=us-west '
+ b'value=0.64 349415\n',
+ m.last_request.body,
+ )
+
+ def test_write_points_with_consistency(self):
+ """Test write points with consistency for TestInfluxDBClient object."""
+ with requests_mock.Mocker() as m:
+ m.register_uri(
+ requests_mock.POST,
+ 'http://localhost:8086/write',
+ status_code=204
+ )
+
+ cli = InfluxDBClient(database='db')
+
+ cli.write_points(self.dummy_points, consistency='any')
+ self.assertEqual(
+ m.last_request.qs,
+ {'db': ['db'], 'consistency': ['any']}
+ )
+
+ def test_write_points_with_precision_udp(self):
+ """Test write points with precision for TestInfluxDBClient object."""
+ s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
+ port = random.randint(4000, 8000)
+ s.bind(('0.0.0.0', port))
+
+ cli = InfluxDBClient(
+ 'localhost', 8086, 'root', 'root',
+ 'test', use_udp=True, udp_port=port
+ )
+
+ cli.write_points(self.dummy_points, time_precision='n')
+ received_data, addr = s.recvfrom(1024)
+ self.assertEqual(
+ b'cpu_load_short,host=server01,region=us-west '
+ b'value=0.64 1257894000123456000\n',
+ received_data,
+ )
+
+ cli.write_points(self.dummy_points, time_precision='u')
+ received_data, addr = s.recvfrom(1024)
+ self.assertEqual(
+ b'cpu_load_short,host=server01,region=us-west '
+ b'value=0.64 1257894000123456\n',
+ received_data,
+ )
+
+ cli.write_points(self.dummy_points, time_precision='ms')
+ received_data, addr = s.recvfrom(1024)
+ self.assertEqual(
+ b'cpu_load_short,host=server01,region=us-west '
+ b'value=0.64 1257894000123\n',
+ received_data,
+ )
+
+ cli.write_points(self.dummy_points, time_precision='s')
+ received_data, addr = s.recvfrom(1024)
+ self.assertEqual(
+ b"cpu_load_short,host=server01,region=us-west "
+ b"value=0.64 1257894000\n",
+ received_data,
+ )
+
+ cli.write_points(self.dummy_points, time_precision='m')
+ received_data, addr = s.recvfrom(1024)
+ self.assertEqual(
+ b'cpu_load_short,host=server01,region=us-west '
+ b'value=0.64 20964900\n',
+ received_data,
+ )
+
+ cli.write_points(self.dummy_points, time_precision='h')
+ received_data, addr = s.recvfrom(1024)
+ self.assertEqual(
+ b'cpu_load_short,host=server01,region=us-west '
+ b'value=0.64 349415\n',
+ received_data,
+ )
+
+ def test_write_points_bad_precision(self):
+ """Test write points w/bad precision TestInfluxDBClient object."""
+ cli = InfluxDBClient()
+ with self.assertRaisesRegexp(
+ Exception,
+ "Invalid time precision is given. "
+ "\(use 'n', 'u', 'ms', 's', 'm' or 'h'\)"
+ ):
+ cli.write_points(
+ self.dummy_points,
+ time_precision='g'
+ )
+
+ def test_write_points_bad_consistency(self):
+ """Test write points w/bad consistency value."""
+ cli = InfluxDBClient()
+ with self.assertRaises(ValueError):
+ cli.write_points(
+ self.dummy_points,
+ consistency='boo'
+ )
+
+ @raises(Exception)
+ def test_write_points_with_precision_fails(self):
+ """Test write points w/precision fail for TestInfluxDBClient object."""
+ cli = InfluxDBClient('host', 8086, 'username', 'password', 'db')
+ with _mocked_session(cli, 'post', 500):
+ cli.write_points_with_precision([])
+
+ def test_query(self):
+ """Test query method for TestInfluxDBClient object."""
+ example_response = (
+ '{"results": [{"series": [{"measurement": "sdfsdfsdf", '
+ '"columns": ["time", "value"], "values": '
+ '[["2009-11-10T23:00:00Z", 0.64]]}]}, {"series": '
+ '[{"measurement": "cpu_load_short", "columns": ["time", "value"], '
+ '"values": [["2009-11-10T23:00:00Z", 0.64]]}]}]}'
+ )
+
+ with requests_mock.Mocker() as m:
+ m.register_uri(
+ requests_mock.GET,
+ "http://localhost:8086/query",
+ text=example_response
+ )
+ rs = self.cli.query('select * from foo')
+
+ self.assertListEqual(
+ list(rs[0].get_points()),
+ [{'value': 0.64, 'time': '2009-11-10T23:00:00Z'}]
+ )
+
+ def test_query_msgpack(self):
+ """Test query method with a messagepack response."""
+ example_response = bytes(bytearray.fromhex(
+ "81a7726573756c74739182ac73746174656d656e745f696400a673657269"
+ "65739183a46e616d65a161a7636f6c756d6e7392a474696d65a176a67661"
+ "6c7565739192c70c05000000005d26178a019096c8cb3ff0000000000000"
+ ))
+
+ with requests_mock.Mocker() as m:
+ m.register_uri(
+ requests_mock.GET,
+ "http://localhost:8086/query",
+ request_headers={"Accept": "application/x-msgpack"},
+ headers={"Content-Type": "application/x-msgpack"},
+ content=example_response
+ )
+ rs = self.cli.query('select * from a')
+
+ self.assertListEqual(
+ list(rs.get_points()),
+ [{'v': 1.0, 'time': '2019-07-10T16:51:22.026253Z'}]
+ )
+
+ def test_select_into_post(self):
+ """Test SELECT.*INTO is POSTed."""
+ example_response = (
+ '{"results": [{"series": [{"measurement": "sdfsdfsdf", '
+ '"columns": ["time", "value"], "values": '
+ '[["2009-11-10T23:00:00Z", 0.64]]}]}, {"series": '
+ '[{"measurement": "cpu_load_short", "columns": ["time", "value"], '
+ '"values": [["2009-11-10T23:00:00Z", 0.64]]}]}]}'
+ )
+
+ with requests_mock.Mocker() as m:
+ m.register_uri(
+ requests_mock.POST,
+ "http://localhost:8086/query",
+ text=example_response
+ )
+ rs = self.cli.query('select * INTO newmeas from foo')
+
+ self.assertListEqual(
+ list(rs[0].get_points()),
+ [{'value': 0.64, 'time': '2009-11-10T23:00:00Z'}]
+ )
+
+ @unittest.skip('Not implemented for 0.9')
+ def test_query_chunked(self):
+ """Test chunked query for TestInfluxDBClient object."""
+ cli = InfluxDBClient(database='db')
+ example_object = {
+ 'points': [
+ [1415206250119, 40001, 667],
+ [1415206244555, 30001, 7],
+ [1415206228241, 20001, 788],
+ [1415206212980, 10001, 555],
+ [1415197271586, 10001, 23]
+ ],
+ 'measurement': 'foo',
+ 'columns': [
+ 'time',
+ 'sequence_number',
+ 'val'
+ ]
+ }
+ example_response = \
+ json.dumps(example_object) + json.dumps(example_object)
+
+ with requests_mock.Mocker() as m:
+ m.register_uri(
+ requests_mock.GET,
+ "http://localhost:8086/db/db/series",
+ text=example_response
+ )
+
+ self.assertListEqual(
+ cli.query('select * from foo', chunked=True),
+ [example_object, example_object]
+ )
+
+ @raises(Exception)
+ def test_query_fail(self):
+ """Test query failed for TestInfluxDBClient object."""
+ with _mocked_session(self.cli, 'get', 401):
+ self.cli.query('select column_one from foo;')
+
+ def test_ping(self):
+ """Test ping querying InfluxDB version."""
+ with requests_mock.Mocker() as m:
+ m.register_uri(
+ requests_mock.GET,
+ "http://localhost:8086/ping",
+ status_code=204,
+ headers={'X-Influxdb-Version': '1.2.3'}
+ )
+ version = self.cli.ping()
+ self.assertEqual(version, '1.2.3')
+
+ def test_create_database(self):
+ """Test create database for TestInfluxDBClient object."""
+ with requests_mock.Mocker() as m:
+ m.register_uri(
+ requests_mock.POST,
+ "http://localhost:8086/query",
+ text='{"results":[{}]}'
+ )
+ self.cli.create_database('new_db')
+ self.assertEqual(
+ m.last_request.qs['q'][0],
+ 'create database "new_db"'
+ )
+
+ def test_create_numeric_named_database(self):
+ """Test create db w/numeric name for TestInfluxDBClient object."""
+ with requests_mock.Mocker() as m:
+ m.register_uri(
+ requests_mock.POST,
+ "http://localhost:8086/query",
+ text='{"results":[{}]}'
+ )
+ self.cli.create_database('123')
+ self.assertEqual(
+ m.last_request.qs['q'][0],
+ 'create database "123"'
+ )
+
+ @raises(Exception)
+ def test_create_database_fails(self):
+ """Test create database fail for TestInfluxDBClient object."""
+ with _mocked_session(self.cli, 'post', 401):
+ self.cli.create_database('new_db')
+
+ def test_drop_database(self):
+ """Test drop database for TestInfluxDBClient object."""
+ with requests_mock.Mocker() as m:
+ m.register_uri(
+ requests_mock.POST,
+ "http://localhost:8086/query",
+ text='{"results":[{}]}'
+ )
+ self.cli.drop_database('new_db')
+ self.assertEqual(
+ m.last_request.qs['q'][0],
+ 'drop database "new_db"'
+ )
+
+ def test_drop_measurement(self):
+ """Test drop measurement for TestInfluxDBClient object."""
+ with requests_mock.Mocker() as m:
+ m.register_uri(
+ requests_mock.POST,
+ "http://localhost:8086/query",
+ text='{"results":[{}]}'
+ )
+ self.cli.drop_measurement('new_measurement')
+ self.assertEqual(
+ m.last_request.qs['q'][0],
+ 'drop measurement "new_measurement"'
+ )
+
+ def test_drop_numeric_named_database(self):
+ """Test drop numeric db for TestInfluxDBClient object."""
+ with requests_mock.Mocker() as m:
+ m.register_uri(
+ requests_mock.POST,
+ "http://localhost:8086/query",
+ text='{"results":[{}]}'
+ )
+ self.cli.drop_database('123')
+ self.assertEqual(
+ m.last_request.qs['q'][0],
+ 'drop database "123"'
+ )
+
+ def test_get_list_database(self):
+ """Test get list of databases for TestInfluxDBClient object."""
+ data = {'results': [
+ {'series': [
+ {'name': 'databases',
+ 'values': [
+ ['new_db_1'],
+ ['new_db_2']],
+ 'columns': ['name']}]}
+ ]}
+
+ with _mocked_session(self.cli, 'get', 200, json.dumps(data)):
+ self.assertListEqual(
+ self.cli.get_list_database(),
+ [{'name': 'new_db_1'}, {'name': 'new_db_2'}]
+ )
+
+ @raises(Exception)
+ def test_get_list_database_fails(self):
+ """Test get list of dbs fail for TestInfluxDBClient object."""
+ cli = InfluxDBClient('host', 8086, 'username', 'password')
+ with _mocked_session(cli, 'get', 401):
+ cli.get_list_database()
+
+ def test_get_list_measurements(self):
+ """Test get list of measurements for TestInfluxDBClient object."""
+ data = {
+ "results": [{
+ "series": [
+ {"name": "measurements",
+ "columns": ["name"],
+ "values": [["cpu"], ["disk"]
+ ]}]}
+ ]
+ }
+
+ with _mocked_session(self.cli, 'get', 200, json.dumps(data)):
+ self.assertListEqual(
+ self.cli.get_list_measurements(),
+ [{'name': 'cpu'}, {'name': 'disk'}]
+ )
+
+ def test_get_list_series(self):
+ """Test get a list of series from the database."""
+ data = {'results': [
+ {'series': [
+ {
+ 'values': [
+ ['cpu_load_short,host=server01,region=us-west'],
+ ['memory_usage,host=server02,region=us-east']],
+ 'columns': ['key']
+ }
+ ]}
+ ]}
+
+ with _mocked_session(self.cli, 'get', 200, json.dumps(data)):
+ self.assertListEqual(
+ self.cli.get_list_series(),
+ ['cpu_load_short,host=server01,region=us-west',
+ 'memory_usage,host=server02,region=us-east'])
+
+ def test_get_list_series_with_measurement(self):
+ """Test get a list of series from the database by filter."""
+ data = {'results': [
+ {'series': [
+ {
+ 'values': [
+ ['cpu_load_short,host=server01,region=us-west']],
+ 'columns': ['key']
+ }
+ ]}
+ ]}
+
+ with _mocked_session(self.cli, 'get', 200, json.dumps(data)):
+ self.assertListEqual(
+ self.cli.get_list_series(measurement='cpu_load_short'),
+ ['cpu_load_short,host=server01,region=us-west'])
+
+ def test_get_list_series_with_tags(self):
+ """Test get a list of series from the database by tags."""
+ data = {'results': [
+ {'series': [
+ {
+ 'values': [
+ ['cpu_load_short,host=server01,region=us-west']],
+ 'columns': ['key']
+ }
+ ]}
+ ]}
+
+ with _mocked_session(self.cli, 'get', 200, json.dumps(data)):
+ self.assertListEqual(
+ self.cli.get_list_series(tags={'region': 'us-west'}),
+ ['cpu_load_short,host=server01,region=us-west'])
+
+ @raises(Exception)
+ def test_get_list_series_fails(self):
+ """Test get a list of series from the database but fail."""
+ cli = InfluxDBClient('host', 8086, 'username', 'password')
+ with _mocked_session(cli, 'get', 401):
+ cli.get_list_series()
+
+ def test_create_retention_policy_default(self):
+ """Test create default ret policy for TestInfluxDBClient object."""
+ example_response = '{"results":[{}]}'
+
+ with requests_mock.Mocker() as m:
+ m.register_uri(
+ requests_mock.POST,
+ "http://localhost:8086/query",
+ text=example_response
+ )
+ self.cli.create_retention_policy(
+ 'somename', '1d', 4, default=True, database='db'
+ )
+
+ self.assertEqual(
+ m.last_request.qs['q'][0],
+ 'create retention policy "somename" on '
+ '"db" duration 1d replication 4 shard duration 0s default'
+ )
+
+ def test_create_retention_policy(self):
+ """Test create retention policy for TestInfluxDBClient object."""
+ example_response = '{"results":[{}]}'
+
+ with requests_mock.Mocker() as m:
+ m.register_uri(
+ requests_mock.POST,
+ "http://localhost:8086/query",
+ text=example_response
+ )
+ self.cli.create_retention_policy(
+ 'somename', '1d', 4, database='db'
+ )
+
+ self.assertEqual(
+ m.last_request.qs['q'][0],
+ 'create retention policy "somename" on '
+ '"db" duration 1d replication 4 shard duration 0s'
+ )
+
+ def test_create_retention_policy_shard_duration(self):
+ """Test create retention policy with a custom shard duration."""
+ example_response = '{"results":[{}]}'
+
+ with requests_mock.Mocker() as m:
+ m.register_uri(
+ requests_mock.POST,
+ "http://localhost:8086/query",
+ text=example_response
+ )
+ self.cli.create_retention_policy(
+ 'somename2', '1d', 4, database='db',
+ shard_duration='1h'
+ )
+
+ self.assertEqual(
+ m.last_request.qs['q'][0],
+ 'create retention policy "somename2" on '
+ '"db" duration 1d replication 4 shard duration 1h'
+ )
+
+ def test_create_retention_policy_shard_duration_default(self):
+ """Test create retention policy with a default shard duration."""
+ example_response = '{"results":[{}]}'
+
+ with requests_mock.Mocker() as m:
+ m.register_uri(
+ requests_mock.POST,
+ "http://localhost:8086/query",
+ text=example_response
+ )
+ self.cli.create_retention_policy(
+ 'somename3', '1d', 4, database='db',
+ shard_duration='1h', default=True
+ )
+
+ self.assertEqual(
+ m.last_request.qs['q'][0],
+ 'create retention policy "somename3" on '
+ '"db" duration 1d replication 4 shard duration 1h '
+ 'default'
+ )
+
+ def test_alter_retention_policy(self):
+ """Test alter retention policy for TestInfluxDBClient object."""
+ example_response = '{"results":[{}]}'
+
+ with requests_mock.Mocker() as m:
+ m.register_uri(
+ requests_mock.POST,
+ "http://localhost:8086/query",
+ text=example_response
+ )
+ # Test alter duration
+ self.cli.alter_retention_policy('somename', 'db',
+ duration='4d')
+ self.assertEqual(
+ m.last_request.qs['q'][0],
+ 'alter retention policy "somename" on "db" duration 4d'
+ )
+ # Test alter replication
+ self.cli.alter_retention_policy('somename', 'db',
+ replication=4)
+ self.assertEqual(
+ m.last_request.qs['q'][0],
+ 'alter retention policy "somename" on "db" replication 4'
+ )
+
+ # Test alter shard duration
+ self.cli.alter_retention_policy('somename', 'db',
+ shard_duration='1h')
+ self.assertEqual(
+ m.last_request.qs['q'][0],
+ 'alter retention policy "somename" on "db" shard duration 1h'
+ )
+
+ # Test alter default
+ self.cli.alter_retention_policy('somename', 'db',
+ default=True)
+ self.assertEqual(
+ m.last_request.qs['q'][0],
+ 'alter retention policy "somename" on "db" default'
+ )
+
+ @raises(Exception)
+ def test_alter_retention_policy_invalid(self):
+ """Test invalid alter ret policy for TestInfluxDBClient object."""
+ cli = InfluxDBClient('host', 8086, 'username', 'password')
+ with _mocked_session(cli, 'get', 400):
+ self.cli.alter_retention_policy('somename', 'db')
+
+ def test_drop_retention_policy(self):
+ """Test drop retention policy for TestInfluxDBClient object."""
+ example_response = '{"results":[{}]}'
+
+ with requests_mock.Mocker() as m:
+ m.register_uri(
+ requests_mock.POST,
+ "http://localhost:8086/query",
+ text=example_response
+ )
+ self.cli.drop_retention_policy('somename', 'db')
+ self.assertEqual(
+ m.last_request.qs['q'][0],
+ 'drop retention policy "somename" on "db"'
+ )
+
+ @raises(Exception)
+ def test_drop_retention_policy_fails(self):
+ """Test failed drop ret policy for TestInfluxDBClient object."""
+ cli = InfluxDBClient('host', 8086, 'username', 'password')
+ with _mocked_session(cli, 'delete', 401):
+ cli.drop_retention_policy('default', 'db')
+
+ def test_get_list_retention_policies(self):
+ """Test get retention policies for TestInfluxDBClient object."""
+ example_response = \
+ '{"results": [{"series": [{"values": [["fsfdsdf", "24h0m0s", 2]],'\
+ ' "columns": ["name", "duration", "replicaN"]}]}]}'
+
+ with requests_mock.Mocker() as m:
+ m.register_uri(
+ requests_mock.GET,
+ "http://localhost:8086/query",
+ text=example_response
+ )
+ self.assertListEqual(
+ self.cli.get_list_retention_policies("db"),
+ [{'duration': '24h0m0s',
+ 'name': 'fsfdsdf', 'replicaN': 2}]
+ )
+
+ @mock.patch('requests.Session.request')
+ def test_request_retry(self, mock_request):
+ """Test that two connection errors will be handled."""
+ class CustomMock(object):
+ """Create custom mock object for test."""
+
+ def __init__(self):
+ self.i = 0
+
+ def connection_error(self, *args, **kwargs):
+ """Handle a connection error for the CustomMock object."""
+ self.i += 1
+
+ if self.i < 3:
+ raise requests.exceptions.ConnectionError
+
+ r = requests.Response()
+ r.status_code = 204
+ return r
+
+ mock_request.side_effect = CustomMock().connection_error
+
+ cli = InfluxDBClient(database='db')
+ cli.write_points(
+ self.dummy_points
+ )
+
+ @mock.patch('requests.Session.request')
+ def test_request_retry_raises(self, mock_request):
+ """Test that three requests errors will not be handled."""
+ class CustomMock(object):
+ """Create custom mock object for test."""
+
+ def __init__(self):
+ self.i = 0
+
+ def connection_error(self, *args, **kwargs):
+ """Handle a connection error for the CustomMock object."""
+ self.i += 1
+
+ if self.i < 4:
+ raise requests.exceptions.HTTPError
+ else:
+ r = requests.Response()
+ r.status_code = 200
+ return r
+
+ mock_request.side_effect = CustomMock().connection_error
+
+ cli = InfluxDBClient(database='db')
+
+ with self.assertRaises(requests.exceptions.HTTPError):
+ cli.write_points(self.dummy_points)
+
+ @mock.patch('requests.Session.request')
+ def test_random_request_retry(self, mock_request):
+ """Test that a random number of connection errors will be handled."""
+ class CustomMock(object):
+ """Create custom mock object for test."""
+
+ def __init__(self, retries):
+ self.i = 0
+ self.retries = retries
+
+ def connection_error(self, *args, **kwargs):
+ """Handle a connection error for the CustomMock object."""
+ self.i += 1
+
+ if self.i < self.retries:
+ raise requests.exceptions.ConnectionError
+ else:
+ r = requests.Response()
+ r.status_code = 204
+ return r
+
+ retries = random.randint(1, 5)
+ mock_request.side_effect = CustomMock(retries).connection_error
+
+ cli = InfluxDBClient(database='db', retries=retries)
+ cli.write_points(self.dummy_points)
+
+ @mock.patch('requests.Session.request')
+ def test_random_request_retry_raises(self, mock_request):
+ """Test a random number of conn errors plus one will not be handled."""
+ class CustomMock(object):
+ """Create custom mock object for test."""
+
+ def __init__(self, retries):
+ self.i = 0
+ self.retries = retries
+
+ def connection_error(self, *args, **kwargs):
+ """Handle a connection error for the CustomMock object."""
+ self.i += 1
+
+ if self.i < self.retries + 1:
+ raise requests.exceptions.ConnectionError
+ else:
+ r = requests.Response()
+ r.status_code = 200
+ return r
+
+ retries = random.randint(1, 5)
+ mock_request.side_effect = CustomMock(retries).connection_error
+
+ cli = InfluxDBClient(database='db', retries=retries)
+
+ with self.assertRaises(requests.exceptions.ConnectionError):
+ cli.write_points(self.dummy_points)
+
+ def test_get_list_users(self):
+ """Test get users for TestInfluxDBClient object."""
+ example_response = (
+ '{"results":[{"series":[{"columns":["user","admin"],'
+ '"values":[["test",false]]}]}]}'
+ )
+
+ with requests_mock.Mocker() as m:
+ m.register_uri(
+ requests_mock.GET,
+ "http://localhost:8086/query",
+ text=example_response
+ )
+
+ self.assertListEqual(
+ self.cli.get_list_users(),
+ [{'user': 'test', 'admin': False}]
+ )
+
+ def test_get_list_users_empty(self):
+ """Test get empty userlist for TestInfluxDBClient object."""
+ example_response = (
+ '{"results":[{"series":[{"columns":["user","admin"]}]}]}'
+ )
+ with requests_mock.Mocker() as m:
+ m.register_uri(
+ requests_mock.GET,
+ "http://localhost:8086/query",
+ text=example_response
+ )
+
+ self.assertListEqual(self.cli.get_list_users(), [])
+
+ def test_grant_admin_privileges(self):
+ """Test grant admin privs for TestInfluxDBClient object."""
+ example_response = '{"results":[{}]}'
+
+ with requests_mock.Mocker() as m:
+ m.register_uri(
+ requests_mock.POST,
+ "http://localhost:8086/query",
+ text=example_response
+ )
+ self.cli.grant_admin_privileges('test')
+
+ self.assertEqual(
+ m.last_request.qs['q'][0],
+ 'grant all privileges to "test"'
+ )
+
+ @raises(Exception)
+ def test_grant_admin_privileges_invalid(self):
+ """Test grant invalid admin privs for TestInfluxDBClient object."""
+ cli = InfluxDBClient('host', 8086, 'username', 'password')
+ with _mocked_session(cli, 'get', 400):
+ self.cli.grant_admin_privileges('')
+
+ def test_revoke_admin_privileges(self):
+ """Test revoke admin privs for TestInfluxDBClient object."""
+ example_response = '{"results":[{}]}'
+
+ with requests_mock.Mocker() as m:
+ m.register_uri(
+ requests_mock.POST,
+ "http://localhost:8086/query",
+ text=example_response
+ )
+ self.cli.revoke_admin_privileges('test')
+
+ self.assertEqual(
+ m.last_request.qs['q'][0],
+ 'revoke all privileges from "test"'
+ )
+
+ @raises(Exception)
+ def test_revoke_admin_privileges_invalid(self):
+ """Test revoke invalid admin privs for TestInfluxDBClient object."""
+ cli = InfluxDBClient('host', 8086, 'username', 'password')
+ with _mocked_session(cli, 'get', 400):
+ self.cli.revoke_admin_privileges('')
+
+ def test_grant_privilege(self):
+ """Test grant privs for TestInfluxDBClient object."""
+ example_response = '{"results":[{}]}'
+
+ with requests_mock.Mocker() as m:
+ m.register_uri(
+ requests_mock.POST,
+ "http://localhost:8086/query",
+ text=example_response
+ )
+ self.cli.grant_privilege('read', 'testdb', 'test')
+
+ self.assertEqual(
+ m.last_request.qs['q'][0],
+ 'grant read on "testdb" to "test"'
+ )
+
+ @raises(Exception)
+ def test_grant_privilege_invalid(self):
+ """Test grant invalid privs for TestInfluxDBClient object."""
+ cli = InfluxDBClient('host', 8086, 'username', 'password')
+ with _mocked_session(cli, 'get', 400):
+ self.cli.grant_privilege('', 'testdb', 'test')
+
+ def test_revoke_privilege(self):
+ """Test revoke privs for TestInfluxDBClient object."""
+ example_response = '{"results":[{}]}'
+
+ with requests_mock.Mocker() as m:
+ m.register_uri(
+ requests_mock.POST,
+ "http://localhost:8086/query",
+ text=example_response
+ )
+ self.cli.revoke_privilege('read', 'testdb', 'test')
+
+ self.assertEqual(
+ m.last_request.qs['q'][0],
+ 'revoke read on "testdb" from "test"'
+ )
+
+ @raises(Exception)
+ def test_revoke_privilege_invalid(self):
+ """Test revoke invalid privs for TestInfluxDBClient object."""
+ cli = InfluxDBClient('host', 8086, 'username', 'password')
+ with _mocked_session(cli, 'get', 400):
+ self.cli.revoke_privilege('', 'testdb', 'test')
+
+ def test_get_list_privileges(self):
+ """Test get list of privs for TestInfluxDBClient object."""
+ data = {'results': [
+ {'series': [
+ {'columns': ['database', 'privilege'],
+ 'values': [
+ ['db1', 'READ'],
+ ['db2', 'ALL PRIVILEGES'],
+ ['db3', 'NO PRIVILEGES']]}
+ ]}
+ ]}
+
+ with _mocked_session(self.cli, 'get', 200, json.dumps(data)):
+ self.assertListEqual(
+ self.cli.get_list_privileges('test'),
+ [{'database': 'db1', 'privilege': 'READ'},
+ {'database': 'db2', 'privilege': 'ALL PRIVILEGES'},
+ {'database': 'db3', 'privilege': 'NO PRIVILEGES'}]
+ )
+
+ @raises(Exception)
+ def test_get_list_privileges_fails(self):
+ """Test failed get list of privs for TestInfluxDBClient object."""
+ cli = InfluxDBClient('host', 8086, 'username', 'password')
+ with _mocked_session(cli, 'get', 401):
+ cli.get_list_privileges('test')
+
+ def test_get_list_continuous_queries(self):
+ """Test getting a list of continuous queries."""
+ data = {
+ "results": [
+ {
+ "statement_id": 0,
+ "series": [
+ {
+ "name": "testdb01",
+ "columns": ["name", "query"],
+ "values": [["testname01", "testquery01"],
+ ["testname02", "testquery02"]]
+ },
+ {
+ "name": "testdb02",
+ "columns": ["name", "query"],
+ "values": [["testname03", "testquery03"]]
+ },
+ {
+ "name": "testdb03",
+ "columns": ["name", "query"]
+ }
+ ]
+ }
+ ]
+ }
+
+ with _mocked_session(self.cli, 'get', 200, json.dumps(data)):
+ self.assertListEqual(
+ self.cli.get_list_continuous_queries(),
+ [
+ {
+ 'testdb01': [
+ {'name': 'testname01', 'query': 'testquery01'},
+ {'name': 'testname02', 'query': 'testquery02'}
+ ]
+ },
+ {
+ 'testdb02': [
+ {'name': 'testname03', 'query': 'testquery03'}
+ ]
+ },
+ {
+ 'testdb03': []
+ }
+ ]
+ )
+
+ @raises(Exception)
+ def test_get_list_continuous_queries_fails(self):
+ """Test failing to get a list of continuous queries."""
+ with _mocked_session(self.cli, 'get', 400):
+ self.cli.get_list_continuous_queries()
+
+ def test_create_continuous_query(self):
+ """Test continuous query creation."""
+ data = {"results": [{}]}
+ with requests_mock.Mocker() as m:
+ m.register_uri(
+ requests_mock.GET,
+ "http://localhost:8086/query",
+ text=json.dumps(data)
+ )
+ query = 'SELECT count("value") INTO "6_months"."events" FROM ' \
+ '"events" GROUP BY time(10m)'
+ self.cli.create_continuous_query('cq_name', query, 'db_name')
+ self.assertEqual(
+ m.last_request.qs['q'][0],
+ 'create continuous query "cq_name" on "db_name" begin select '
+ 'count("value") into "6_months"."events" from "events" group '
+ 'by time(10m) end'
+ )
+ self.cli.create_continuous_query('cq_name', query, 'db_name',
+ 'EVERY 10s FOR 2m')
+ self.assertEqual(
+ m.last_request.qs['q'][0],
+ 'create continuous query "cq_name" on "db_name" resample '
+ 'every 10s for 2m begin select count("value") into '
+ '"6_months"."events" from "events" group by time(10m) end'
+ )
+
+ @raises(Exception)
+ def test_create_continuous_query_fails(self):
+ """Test failing to create a continuous query."""
+ with _mocked_session(self.cli, 'get', 400):
+ self.cli.create_continuous_query('cq_name', 'select', 'db_name')
+
+ def test_drop_continuous_query(self):
+ """Test dropping a continuous query."""
+ data = {"results": [{}]}
+ with requests_mock.Mocker() as m:
+ m.register_uri(
+ requests_mock.GET,
+ "http://localhost:8086/query",
+ text=json.dumps(data)
+ )
+ self.cli.drop_continuous_query('cq_name', 'db_name')
+ self.assertEqual(
+ m.last_request.qs['q'][0],
+ 'drop continuous query "cq_name" on "db_name"'
+ )
+
+ @raises(Exception)
+ def test_drop_continuous_query_fails(self):
+ """Test failing to drop a continuous query."""
+ with _mocked_session(self.cli, 'get', 400):
+ self.cli.drop_continuous_query('cq_name', 'db_name')
+
+ def test_invalid_port_fails(self):
+ """Test invalid port fail for TestInfluxDBClient object."""
+ with self.assertRaises(ValueError):
+ InfluxDBClient('host', '80/redir', 'username', 'password')
+
+ def test_chunked_response(self):
+ """Test chunked response for TestInfluxDBClient object."""
+ example_response = \
+ u'{"results":[{"statement_id":0,"series":[{"columns":["key"],' \
+ '"values":[["cpu"],["memory"],["iops"],["network"]],"partial":' \
+ 'true}],"partial":true}]}\n{"results":[{"statement_id":0,' \
+ '"series":[{"columns":["key"],"values":[["qps"],["uptime"],' \
+ '["df"],["mount"]]}]}]}\n'
+
+ with requests_mock.Mocker() as m:
+ m.register_uri(
+ requests_mock.GET,
+ "http://localhost:8086/query",
+ text=example_response
+ )
+ response = self.cli.query('show series',
+ chunked=True, chunk_size=4)
+ res = list(response)
+ self.assertTrue(len(res) == 2)
+ self.assertEqual(res[0].__repr__(), ResultSet(
+ {'series': [{
+ 'columns': ['key'],
+ 'values': [['cpu'], ['memory'], ['iops'], ['network']]
+ }]}).__repr__())
+ self.assertEqual(res[1].__repr__(), ResultSet(
+ {'series': [{
+ 'columns': ['key'],
+ 'values': [['qps'], ['uptime'], ['df'], ['mount']]
+ }]}).__repr__())
+
+ def test_auth_default(self):
+ """Test auth with default settings."""
+ with requests_mock.Mocker() as m:
+ m.register_uri(
+ requests_mock.GET,
+ "http://localhost:8086/ping",
+ status_code=204,
+ headers={'X-Influxdb-Version': '1.2.3'}
+ )
+
+ cli = InfluxDBClient()
+ cli.ping()
+
+ self.assertEqual(m.last_request.headers["Authorization"],
+ "Basic cm9vdDpyb290")
+
+ def test_auth_username_password(self):
+ """Test auth with custom username and password."""
+ with requests_mock.Mocker() as m:
+ m.register_uri(
+ requests_mock.GET,
+ "http://localhost:8086/ping",
+ status_code=204,
+ headers={'X-Influxdb-Version': '1.2.3'}
+ )
+
+ cli = InfluxDBClient(username='my-username',
+ password='my-password')
+ cli.ping()
+
+ self.assertEqual(m.last_request.headers["Authorization"],
+ "Basic bXktdXNlcm5hbWU6bXktcGFzc3dvcmQ=")
+
+ def test_auth_username_password_none(self):
+ """Test auth with not defined username or password."""
+ with requests_mock.Mocker() as m:
+ m.register_uri(
+ requests_mock.GET,
+ "http://localhost:8086/ping",
+ status_code=204,
+ headers={'X-Influxdb-Version': '1.2.3'}
+ )
+
+ cli = InfluxDBClient(username=None, password=None)
+ cli.ping()
+ self.assertFalse('Authorization' in m.last_request.headers)
+
+ cli = InfluxDBClient(username=None)
+ cli.ping()
+ self.assertFalse('Authorization' in m.last_request.headers)
+
+ cli = InfluxDBClient(password=None)
+ cli.ping()
+ self.assertFalse('Authorization' in m.last_request.headers)
+
+ def test_auth_token(self):
+ """Test auth with custom authorization header."""
+ with requests_mock.Mocker() as m:
+ m.register_uri(
+ requests_mock.GET,
+ "http://localhost:8086/ping",
+ status_code=204,
+ headers={'X-Influxdb-Version': '1.2.3'}
+ )
+
+ cli = InfluxDBClient(username=None, password=None,
+ headers={"Authorization": "my-token"})
+ cli.ping()
+ self.assertEqual(m.last_request.headers["Authorization"],
+ "my-token")
+
+ def test_custom_socket_options(self):
+ """Test custom socket options."""
+ test_socket_options = HTTPConnection.default_socket_options + \
+ [(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),
+ (socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, 60),
+ (socket.IPPROTO_TCP, socket.TCP_KEEPCNT, 15)]
+
+ cli = InfluxDBClient(username=None, password=None,
+ socket_options=test_socket_options)
+
+ self.assertEquals(cli._session.adapters.get("http://").socket_options,
+ test_socket_options)
+ self.assertEquals(cli._session.adapters.get("http://").poolmanager.
+ connection_pool_kw.get("socket_options"),
+ test_socket_options)
+
+ connection_pool = cli._session.adapters.get("http://").poolmanager \
+ .connection_from_url(
+ url="http://localhost:8086")
+ new_connection = connection_pool._new_conn()
+ self.assertEquals(new_connection.socket_options, test_socket_options)
+
+ def test_none_socket_options(self):
+ """Test default socket options."""
+ cli = InfluxDBClient(username=None, password=None)
+ self.assertEquals(cli._session.adapters.get("http://").socket_options,
+ None)
+ connection_pool = cli._session.adapters.get("http://").poolmanager \
+ .connection_from_url(
+ url="http://localhost:8086")
+ new_connection = connection_pool._new_conn()
+ self.assertEquals(new_connection.socket_options,
+ HTTPConnection.default_socket_options)
+
+
+class FakeClient(InfluxDBClient):
+ """Set up a fake client instance of InfluxDBClient."""
+
+ def __init__(self, *args, **kwargs):
+ """Initialize an instance of the FakeClient object."""
+ super(FakeClient, self).__init__(*args, **kwargs)
+
+ def query(self,
+ query,
+ params=None,
+ expected_response_code=200,
+ database=None):
+ """Query data from the FakeClient object."""
+ if query == 'Fail':
+ raise Exception("Fail")
+ elif query == 'Fail once' and self._host == 'host1':
+ raise Exception("Fail Once")
+ elif query == 'Fail twice' and self._host in 'host1 host2':
+ raise Exception("Fail Twice")
+ else:
+ return "Success"
diff --git a/influxdb/tests/dataframe_client_test.py b/influxdb/tests/dataframe_client_test.py
new file mode 100644
index 00000000..87b8e0d8
--- /dev/null
+++ b/influxdb/tests/dataframe_client_test.py
@@ -0,0 +1,1348 @@
+# -*- coding: utf-8 -*-
+"""Unit tests for misc module."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+from datetime import timedelta
+
+import json
+import unittest
+import warnings
+import requests_mock
+
+from nose.tools import raises
+from influxdb.tests import skip_if_pypy, using_pypy
+
+from .client_test import _mocked_session
+
+if not using_pypy:
+ import pandas as pd
+ from pandas.util.testing import assert_frame_equal
+ from influxdb import DataFrameClient
+ import numpy as np
+
+
+@skip_if_pypy
+class TestDataFrameClient(unittest.TestCase):
+ """Set up a test DataFrameClient object."""
+
+ def setUp(self):
+ """Instantiate a TestDataFrameClient object."""
+ # By default, raise exceptions on warnings
+ warnings.simplefilter('error', FutureWarning)
+
+ def test_write_points_from_dataframe(self):
+ """Test write points from df in TestDataFrameClient object."""
+ now = pd.Timestamp('1970-01-01 00:00+00:00')
+ dataframe = pd.DataFrame(data=[["1", 1, 1.0], ["2", 2, 2.0]],
+ index=[now, now + timedelta(hours=1)],
+ columns=["column_one", "column_two",
+ "column_three"])
+ expected = (
+ b"foo column_one=\"1\",column_two=1i,column_three=1.0 0\n"
+ b"foo column_one=\"2\",column_two=2i,column_three=2.0 "
+ b"3600000000000\n"
+ )
+
+ with requests_mock.Mocker() as m:
+ m.register_uri(requests_mock.POST,
+ "http://localhost:8086/write",
+ status_code=204)
+
+ cli = DataFrameClient(database='db')
+
+ cli.write_points(dataframe, 'foo')
+ self.assertEqual(m.last_request.body, expected)
+
+ cli.write_points(dataframe, 'foo', tags=None)
+ self.assertEqual(m.last_request.body, expected)
+
+ def test_dataframe_write_points_with_whitespace_measurement(self):
+ """write_points should escape white space in measurements."""
+ now = pd.Timestamp('1970-01-01 00:00+00:00')
+ dataframe = pd.DataFrame(data=[["1", 1, 1.0], ["2", 2, 2.0]],
+ index=[now, now + timedelta(hours=1)],
+ columns=["column_one", "column_two",
+ "column_three"])
+ expected = (
+ b"meas\\ with\\ space "
+ b"column_one=\"1\",column_two=1i,column_three=1.0 0\n"
+ b"meas\\ with\\ space "
+ b"column_one=\"2\",column_two=2i,column_three=2.0 "
+ b"3600000000000\n"
+ )
+ with requests_mock.Mocker() as m:
+ m.register_uri(requests_mock.POST,
+ "http://localhost:8086/write",
+ status_code=204)
+ cli = DataFrameClient(database='db')
+ cli.write_points(dataframe, 'meas with space')
+ self.assertEqual(m.last_request.body, expected)
+
+ def test_dataframe_write_points_with_whitespace_in_column_names(self):
+ """write_points should escape white space in column names."""
+ now = pd.Timestamp('1970-01-01 00:00+00:00')
+ dataframe = pd.DataFrame(data=[["1", 1, 1.0], ["2", 2, 2.0]],
+ index=[now, now + timedelta(hours=1)],
+ columns=["column one", "column two",
+ "column three"])
+ expected = (
+ b"foo column\\ one=\"1\",column\\ two=1i,column\\ three=1.0 0\n"
+ b"foo column\\ one=\"2\",column\\ two=2i,column\\ three=2.0 "
+ b"3600000000000\n"
+ )
+ with requests_mock.Mocker() as m:
+ m.register_uri(requests_mock.POST,
+ "http://localhost:8086/write",
+ status_code=204)
+ cli = DataFrameClient(database='db')
+ cli.write_points(dataframe, 'foo')
+ self.assertEqual(m.last_request.body, expected)
+
+ def test_write_points_from_dataframe_with_none(self):
+ """Test write points from df in TestDataFrameClient object."""
+ now = pd.Timestamp('1970-01-01 00:00+00:00')
+ dataframe = pd.DataFrame(data=[["1", None, 1.0], ["2", 2.0, 2.0]],
+ index=[now, now + timedelta(hours=1)],
+ columns=["column_one", "column_two",
+ "column_three"])
+ expected = (
+ b"foo column_one=\"1\",column_three=1.0 0\n"
+ b"foo column_one=\"2\",column_two=2.0,column_three=2.0 "
+ b"3600000000000\n"
+ )
+
+ with requests_mock.Mocker() as m:
+ m.register_uri(requests_mock.POST,
+ "http://localhost:8086/write",
+ status_code=204)
+
+ cli = DataFrameClient(database='db')
+
+ cli.write_points(dataframe, 'foo')
+ self.assertEqual(m.last_request.body, expected)
+
+ cli.write_points(dataframe, 'foo', tags=None)
+ self.assertEqual(m.last_request.body, expected)
+
+ def test_write_points_from_dataframe_with_line_of_none(self):
+ """Test write points from df in TestDataFrameClient object."""
+ now = pd.Timestamp('1970-01-01 00:00+00:00')
+ dataframe = pd.DataFrame(data=[[None, None, None], ["2", 2.0, 2.0]],
+ index=[now, now + timedelta(hours=1)],
+ columns=["column_one", "column_two",
+ "column_three"])
+ expected = (
+ b"foo column_one=\"2\",column_two=2.0,column_three=2.0 "
+ b"3600000000000\n"
+ )
+
+ with requests_mock.Mocker() as m:
+ m.register_uri(requests_mock.POST,
+ "http://localhost:8086/write",
+ status_code=204)
+
+ cli = DataFrameClient(database='db')
+
+ cli.write_points(dataframe, 'foo')
+ self.assertEqual(m.last_request.body, expected)
+
+ cli.write_points(dataframe, 'foo', tags=None)
+ self.assertEqual(m.last_request.body, expected)
+
+ def test_write_points_from_dataframe_with_all_none(self):
+ """Test write points from df in TestDataFrameClient object."""
+ now = pd.Timestamp('1970-01-01 00:00+00:00')
+ dataframe = pd.DataFrame(data=[[None, None, None], [None, None, None]],
+ index=[now, now + timedelta(hours=1)],
+ columns=["column_one", "column_two",
+ "column_three"])
+ expected = (
+ b"\n"
+ )
+
+ with requests_mock.Mocker() as m:
+ m.register_uri(requests_mock.POST,
+ "http://localhost:8086/write",
+ status_code=204)
+
+ cli = DataFrameClient(database='db')
+
+ cli.write_points(dataframe, 'foo')
+ self.assertEqual(m.last_request.body, expected)
+
+ cli.write_points(dataframe, 'foo', tags=None)
+ self.assertEqual(m.last_request.body, expected)
+
+ def test_write_points_from_dataframe_in_batches(self):
+ """Test write points in batch from df in TestDataFrameClient object."""
+ now = pd.Timestamp('1970-01-01 00:00+00:00')
+ dataframe = pd.DataFrame(data=[["1", 1, 1.0], ["2", 2, 2.0]],
+ index=[now, now + timedelta(hours=1)],
+ columns=["column_one", "column_two",
+ "column_three"])
+ with requests_mock.Mocker() as m:
+ m.register_uri(requests_mock.POST,
+ "http://localhost:8086/write",
+ status_code=204)
+
+ cli = DataFrameClient(database='db')
+ self.assertTrue(cli.write_points(dataframe, "foo", batch_size=1))
+
+ def test_write_points_from_dataframe_with_tag_columns(self):
+ """Test write points from df w/tag in TestDataFrameClient object."""
+ now = pd.Timestamp('1970-01-01 00:00+00:00')
+ dataframe = pd.DataFrame(data=[['blue', 1, "1", 1, 1.0],
+ ['red', 0, "2", 2, 2.0]],
+ index=[now, now + timedelta(hours=1)],
+ columns=["tag_one", "tag_two", "column_one",
+ "column_two", "column_three"])
+ expected = (
+ b"foo,tag_one=blue,tag_two=1 "
+ b"column_one=\"1\",column_two=1i,column_three=1.0 "
+ b"0\n"
+ b"foo,tag_one=red,tag_two=0 "
+ b"column_one=\"2\",column_two=2i,column_three=2.0 "
+ b"3600000000000\n"
+ )
+
+ with requests_mock.Mocker() as m:
+ m.register_uri(requests_mock.POST,
+ "http://localhost:8086/write",
+ status_code=204)
+
+ cli = DataFrameClient(database='db')
+
+ cli.write_points(dataframe, 'foo',
+ tag_columns=['tag_one', 'tag_two'])
+ self.assertEqual(m.last_request.body, expected)
+
+ cli.write_points(dataframe, 'foo',
+ tag_columns=['tag_one', 'tag_two'], tags=None)
+ self.assertEqual(m.last_request.body, expected)
+
+ def test_write_points_from_dataframe_with_tag_cols_and_global_tags(self):
+ """Test write points from df w/tag + cols in TestDataFrameClient."""
+ now = pd.Timestamp('1970-01-01 00:00+00:00')
+ dataframe = pd.DataFrame(data=[['blue', 1, "1", 1, 1.0],
+ ['red', 0, "2", 2, 2.0]],
+ index=[now, now + timedelta(hours=1)],
+ columns=["tag_one", "tag_two", "column_one",
+ "column_two", "column_three"])
+ expected = (
+ b"foo,global_tag=value,tag_one=blue,tag_two=1 "
+ b"column_one=\"1\",column_two=1i,column_three=1.0 "
+ b"0\n"
+ b"foo,global_tag=value,tag_one=red,tag_two=0 "
+ b"column_one=\"2\",column_two=2i,column_three=2.0 "
+ b"3600000000000\n"
+ )
+
+ with requests_mock.Mocker() as m:
+ m.register_uri(requests_mock.POST,
+ "http://localhost:8086/write",
+ status_code=204)
+
+ cli = DataFrameClient(database='db')
+
+ cli.write_points(dataframe, 'foo',
+ tag_columns=['tag_one', 'tag_two'],
+ tags={'global_tag': 'value'})
+ self.assertEqual(m.last_request.body, expected)
+
+ def test_write_points_from_dataframe_with_tag_cols_and_defaults(self):
+ """Test default write points from df w/tag in TestDataFrameClient."""
+ now = pd.Timestamp('1970-01-01 00:00+00:00')
+ dataframe = pd.DataFrame(data=[['blue', 1, "1", 1, 1.0, 'hot'],
+ ['red', 0, "2", 2, 2.0, 'cold']],
+ index=[now, now + timedelta(hours=1)],
+ columns=["tag_one", "tag_two", "column_one",
+ "column_two", "column_three",
+ "tag_three"])
+ expected_tags_and_fields = (
+ b"foo,tag_one=blue "
+ b"column_one=\"1\",column_two=1i "
+ b"0\n"
+ b"foo,tag_one=red "
+ b"column_one=\"2\",column_two=2i "
+ b"3600000000000\n"
+ )
+
+ expected_tags_no_fields = (
+ b"foo,tag_one=blue,tag_two=1 "
+ b"column_one=\"1\",column_two=1i,column_three=1.0,"
+ b"tag_three=\"hot\" 0\n"
+ b"foo,tag_one=red,tag_two=0 "
+ b"column_one=\"2\",column_two=2i,column_three=2.0,"
+ b"tag_three=\"cold\" 3600000000000\n"
+ )
+
+ expected_fields_no_tags = (
+ b"foo,tag_one=blue,tag_three=hot,tag_two=1 "
+ b"column_one=\"1\",column_two=1i,column_three=1.0 "
+ b"0\n"
+ b"foo,tag_one=red,tag_three=cold,tag_two=0 "
+ b"column_one=\"2\",column_two=2i,column_three=2.0 "
+ b"3600000000000\n"
+ )
+
+ expected_no_tags_no_fields = (
+ b"foo "
+ b"tag_one=\"blue\",tag_two=1i,column_one=\"1\","
+ b"column_two=1i,column_three=1.0,tag_three=\"hot\" "
+ b"0\n"
+ b"foo "
+ b"tag_one=\"red\",tag_two=0i,column_one=\"2\","
+ b"column_two=2i,column_three=2.0,tag_three=\"cold\" "
+ b"3600000000000\n"
+ )
+
+ with requests_mock.Mocker() as m:
+ m.register_uri(requests_mock.POST,
+ "http://localhost:8086/write",
+ status_code=204)
+
+ cli = DataFrameClient(database='db')
+
+ cli.write_points(dataframe, 'foo',
+ field_columns=['column_one', 'column_two'],
+ tag_columns=['tag_one'])
+ self.assertEqual(m.last_request.body, expected_tags_and_fields)
+
+ cli.write_points(dataframe, 'foo',
+ tag_columns=['tag_one', 'tag_two'])
+ self.assertEqual(m.last_request.body, expected_tags_no_fields)
+
+ cli.write_points(dataframe, 'foo',
+ field_columns=['column_one', 'column_two',
+ 'column_three'])
+ self.assertEqual(m.last_request.body, expected_fields_no_tags)
+
+ cli.write_points(dataframe, 'foo')
+ self.assertEqual(m.last_request.body, expected_no_tags_no_fields)
+
+ def test_write_points_from_dataframe_with_tag_escaped(self):
+ """Test write points from df w/escaped tag in TestDataFrameClient."""
+ now = pd.Timestamp('1970-01-01 00:00+00:00')
+ dataframe = pd.DataFrame(
+ data=[
+ ['blue orange', "1", 1, 'hot=cold'], # space, equal
+ ['red,green', "2", 2, r'cold\fire'], # comma, backslash
+ ['some', "2", 2, ''], # skip empty
+ ['some', "2", 2, None], # skip None
+ ['', "2", 2, None], # all tags empty
+ ],
+ index=pd.period_range(now, freq='H', periods=5),
+ columns=["tag_one", "column_one", "column_two", "tag_three"]
+ )
+
+ expected_escaped_tags = (
+ b"foo,tag_one=blue\\ orange,tag_three=hot\\=cold "
+ b"column_one=\"1\",column_two=1i "
+ b"0\n"
+ b"foo,tag_one=red\\,green,tag_three=cold\\\\fire "
+ b"column_one=\"2\",column_two=2i "
+ b"3600000000000\n"
+ b"foo,tag_one=some "
+ b"column_one=\"2\",column_two=2i "
+ b"7200000000000\n"
+ b"foo,tag_one=some "
+ b"column_one=\"2\",column_two=2i "
+ b"10800000000000\n"
+ b"foo "
+ b"column_one=\"2\",column_two=2i "
+ b"14400000000000\n"
+ )
+
+ with requests_mock.Mocker() as m:
+ m.register_uri(requests_mock.POST,
+ "http://localhost:8086/write",
+ status_code=204)
+ cli = DataFrameClient(database='db')
+ cli.write_points(dataframe, 'foo',
+ field_columns=['column_one', 'column_two'],
+ tag_columns=['tag_one', 'tag_three'])
+ self.assertEqual(m.last_request.body, expected_escaped_tags)
+
+ def test_write_points_from_dataframe_with_numeric_column_names(self):
+ """Test write points from df with numeric cols."""
+ now = pd.Timestamp('1970-01-01 00:00+00:00')
+ # df with numeric column names
+ dataframe = pd.DataFrame(data=[["1", 1, 1.0], ["2", 2, 2.0]],
+ index=[now, now + timedelta(hours=1)])
+
+ expected = (
+ b'foo,hello=there 0=\"1\",1=1i,2=1.0 0\n'
+ b'foo,hello=there 0=\"2\",1=2i,2=2.0 3600000000000\n'
+ )
+
+ with requests_mock.Mocker() as m:
+ m.register_uri(requests_mock.POST,
+ "http://localhost:8086/write",
+ status_code=204)
+
+ cli = DataFrameClient(database='db')
+ cli.write_points(dataframe, "foo", {"hello": "there"})
+
+ self.assertEqual(m.last_request.body, expected)
+
+ def test_write_points_from_dataframe_with_leading_none_column(self):
+ """write_points detect erroneous leading comma for null first field."""
+ dataframe = pd.DataFrame(
+ dict(
+ first=[1, None, None, 8, 9],
+ second=[2, None, None, None, 10],
+ third=[3, 4.1, None, None, 11],
+ first_tag=["one", None, None, "eight", None],
+ second_tag=["two", None, None, None, None],
+ third_tag=["three", "four", None, None, None],
+ comment=[
+ "All columns filled",
+ "First two of three empty",
+ "All empty",
+ "Last two of three empty",
+ "Empty tags with values",
+ ]
+ ),
+ index=pd.date_range(
+ start=pd.to_datetime('2018-01-01'),
+ freq='1D',
+ periods=5,
+ )
+ )
+ expected = (
+ b'foo,first_tag=one,second_tag=two,third_tag=three'
+ b' comment="All columns filled",first=1.0,second=2.0,third=3.0'
+ b' 1514764800000000000\n'
+ b'foo,third_tag=four'
+ b' comment="First two of three empty",third=4.1'
+ b' 1514851200000000000\n'
+ b'foo comment="All empty" 1514937600000000000\n'
+ b'foo,first_tag=eight'
+ b' comment="Last two of three empty",first=8.0'
+ b' 1515024000000000000\n'
+ b'foo'
+ b' comment="Empty tags with values",first=9.0,second=10.0'
+ b',third=11.0'
+ b' 1515110400000000000\n'
+ )
+
+ with requests_mock.Mocker() as m:
+ m.register_uri(requests_mock.POST,
+ "http://localhost:8086/write",
+ status_code=204)
+
+ cli = DataFrameClient(database='db')
+
+ colnames = [
+ "first_tag",
+ "second_tag",
+ "third_tag",
+ "comment",
+ "first",
+ "second",
+ "third"
+ ]
+ cli.write_points(dataframe.loc[:, colnames], 'foo',
+ tag_columns=[
+ "first_tag",
+ "second_tag",
+ "third_tag"])
+
+ self.assertEqual(m.last_request.body, expected)
+
+ def test_write_points_from_dataframe_with_numeric_precision(self):
+ """Test write points from df with numeric precision."""
+ now = pd.Timestamp('1970-01-01 00:00+00:00')
+ # df with numeric column names
+ dataframe = pd.DataFrame(data=[["1", 1, 1.1111111111111],
+ ["2", 2, 2.2222222222222]],
+ index=[now, now + timedelta(hours=1)])
+
+ if np.lib.NumpyVersion(np.__version__) <= '1.13.3':
+ expected_default_precision = (
+ b'foo,hello=there 0=\"1\",1=1i,2=1.11111111111 0\n'
+ b'foo,hello=there 0=\"2\",1=2i,2=2.22222222222 3600000000000\n'
+ )
+ else:
+ expected_default_precision = (
+ b'foo,hello=there 0=\"1\",1=1i,2=1.1111111111111 0\n'
+ b'foo,hello=there 0=\"2\",1=2i,2=2.2222222222222 3600000000000\n' # noqa E501 line too long
+ )
+
+ expected_specified_precision = (
+ b'foo,hello=there 0=\"1\",1=1i,2=1.1111 0\n'
+ b'foo,hello=there 0=\"2\",1=2i,2=2.2222 3600000000000\n'
+ )
+
+ expected_full_precision = (
+ b'foo,hello=there 0=\"1\",1=1i,2=1.1111111111111 0\n'
+ b'foo,hello=there 0=\"2\",1=2i,2=2.2222222222222 3600000000000\n'
+ )
+
+ with requests_mock.Mocker() as m:
+ m.register_uri(requests_mock.POST,
+ "http://localhost:8086/write",
+ status_code=204)
+
+ cli = DataFrameClient(database='db')
+ cli.write_points(dataframe, "foo", {"hello": "there"})
+
+ print(expected_default_precision)
+ print(m.last_request.body)
+
+ self.assertEqual(m.last_request.body, expected_default_precision)
+
+ cli = DataFrameClient(database='db')
+ cli.write_points(dataframe, "foo", {"hello": "there"},
+ numeric_precision=4)
+
+ self.assertEqual(m.last_request.body, expected_specified_precision)
+
+ cli = DataFrameClient(database='db')
+ cli.write_points(dataframe, "foo", {"hello": "there"},
+ numeric_precision='full')
+
+ self.assertEqual(m.last_request.body, expected_full_precision)
+
+ def test_write_points_from_dataframe_with_period_index(self):
+ """Test write points from df with period index."""
+ dataframe = pd.DataFrame(data=[["1", 1, 1.0], ["2", 2, 2.0]],
+ index=[pd.Period('1970-01-01'),
+ pd.Period('1970-01-02')],
+ columns=["column_one", "column_two",
+ "column_three"])
+
+ expected = (
+ b"foo column_one=\"1\",column_two=1i,column_three=1.0 0\n"
+ b"foo column_one=\"2\",column_two=2i,column_three=2.0 "
+ b"86400000000000\n"
+ )
+
+ with requests_mock.Mocker() as m:
+ m.register_uri(requests_mock.POST,
+ "http://localhost:8086/write",
+ status_code=204)
+
+ cli = DataFrameClient(database='db')
+ cli.write_points(dataframe, "foo")
+
+ self.assertEqual(m.last_request.body, expected)
+
+ def test_write_points_from_dataframe_with_time_precision(self):
+ """Test write points from df with time precision."""
+ now = pd.Timestamp('1970-01-01 00:00+00:00')
+ dataframe = pd.DataFrame(data=[["1", 1, 1.0], ["2", 2, 2.0]],
+ index=[now, now + timedelta(hours=1)],
+ columns=["column_one", "column_two",
+ "column_three"])
+
+ with requests_mock.Mocker() as m:
+ m.register_uri(requests_mock.POST,
+ "http://localhost:8086/write",
+ status_code=204)
+
+ cli = DataFrameClient(database='db')
+ measurement = "foo"
+
+ cli.write_points(dataframe, measurement, time_precision='h')
+ self.assertEqual(m.last_request.qs['precision'], ['h'])
+ self.assertEqual(
+ b'foo column_one="1",column_two=1i,column_three=1.0 0\nfoo '
+ b'column_one="2",column_two=2i,column_three=2.0 1\n',
+ m.last_request.body,
+ )
+
+ cli.write_points(dataframe, measurement, time_precision='m')
+ self.assertEqual(m.last_request.qs['precision'], ['m'])
+ self.assertEqual(
+ b'foo column_one="1",column_two=1i,column_three=1.0 0\nfoo '
+ b'column_one="2",column_two=2i,column_three=2.0 60\n',
+ m.last_request.body,
+ )
+
+ cli.write_points(dataframe, measurement, time_precision='s')
+ self.assertEqual(m.last_request.qs['precision'], ['s'])
+ self.assertEqual(
+ b'foo column_one="1",column_two=1i,column_three=1.0 0\nfoo '
+ b'column_one="2",column_two=2i,column_three=2.0 3600\n',
+ m.last_request.body,
+ )
+
+ cli.write_points(dataframe, measurement, time_precision='ms')
+ self.assertEqual(m.last_request.qs['precision'], ['ms'])
+ self.assertEqual(
+ b'foo column_one="1",column_two=1i,column_three=1.0 0\nfoo '
+ b'column_one="2",column_two=2i,column_three=2.0 3600000\n',
+ m.last_request.body,
+ )
+
+ cli.write_points(dataframe, measurement, time_precision='u')
+ self.assertEqual(m.last_request.qs['precision'], ['u'])
+ self.assertEqual(
+ b'foo column_one="1",column_two=1i,column_three=1.0 0\nfoo '
+ b'column_one="2",column_two=2i,column_three=2.0 3600000000\n',
+ m.last_request.body,
+ )
+
+ cli.write_points(dataframe, measurement, time_precision='n')
+ self.assertEqual(m.last_request.qs['precision'], ['n'])
+ self.assertEqual(
+ b'foo column_one="1",column_two=1i,column_three=1.0 0\n'
+ b'foo column_one="2",column_two=2i,column_three=2.0 '
+ b'3600000000000\n',
+ m.last_request.body,
+ )
+
+ @raises(TypeError)
+ def test_write_points_from_dataframe_fails_without_time_index(self):
+ """Test failed write points from df without time index."""
+ dataframe = pd.DataFrame(data=[["1", 1, 1.0], ["2", 2, 2.0]],
+ columns=["column_one", "column_two",
+ "column_three"])
+
+ with requests_mock.Mocker() as m:
+ m.register_uri(requests_mock.POST,
+ "http://localhost:8086/db/db/series",
+ status_code=204)
+
+ cli = DataFrameClient(database='db')
+ cli.write_points(dataframe, "foo")
+
+ @raises(TypeError)
+ def test_write_points_from_dataframe_fails_with_series(self):
+ """Test failed write points from df with series."""
+ now = pd.Timestamp('1970-01-01 00:00+00:00')
+ dataframe = pd.Series(data=[1.0, 2.0],
+ index=[now, now + timedelta(hours=1)])
+
+ with requests_mock.Mocker() as m:
+ m.register_uri(requests_mock.POST,
+ "http://localhost:8086/db/db/series",
+ status_code=204)
+
+ cli = DataFrameClient(database='db')
+ cli.write_points(dataframe, "foo")
+
+ def test_create_database(self):
+ """Test create database for TestInfluxDBClient object."""
+ cli = DataFrameClient(database='db')
+ with requests_mock.Mocker() as m:
+ m.register_uri(
+ requests_mock.POST,
+ "http://localhost:8086/query",
+ text='{"results":[{}]}'
+ )
+ cli.create_database('new_db')
+ self.assertEqual(
+ m.last_request.qs['q'][0],
+ 'create database "new_db"'
+ )
+
+ def test_create_numeric_named_database(self):
+ """Test create db w/numeric name for TestInfluxDBClient object."""
+ cli = DataFrameClient(database='db')
+ with requests_mock.Mocker() as m:
+ m.register_uri(
+ requests_mock.POST,
+ "http://localhost:8086/query",
+ text='{"results":[{}]}'
+ )
+ cli.create_database('123')
+ self.assertEqual(
+ m.last_request.qs['q'][0],
+ 'create database "123"'
+ )
+
+ @raises(Exception)
+ def test_create_database_fails(self):
+ """Test create database fail for TestInfluxDBClient object."""
+ cli = DataFrameClient(database='db')
+ with _mocked_session(cli, 'post', 401):
+ cli.create_database('new_db')
+
+ def test_drop_database(self):
+ """Test drop database for TestInfluxDBClient object."""
+ cli = DataFrameClient(database='db')
+ with requests_mock.Mocker() as m:
+ m.register_uri(
+ requests_mock.POST,
+ "http://localhost:8086/query",
+ text='{"results":[{}]}'
+ )
+ cli.drop_database('new_db')
+ self.assertEqual(
+ m.last_request.qs['q'][0],
+ 'drop database "new_db"'
+ )
+
+ def test_drop_measurement(self):
+ """Test drop measurement for TestInfluxDBClient object."""
+ cli = DataFrameClient(database='db')
+ with requests_mock.Mocker() as m:
+ m.register_uri(
+ requests_mock.POST,
+ "http://localhost:8086/query",
+ text='{"results":[{}]}'
+ )
+ cli.drop_measurement('new_measurement')
+ self.assertEqual(
+ m.last_request.qs['q'][0],
+ 'drop measurement "new_measurement"'
+ )
+
+ def test_drop_numeric_named_database(self):
+ """Test drop numeric db for TestInfluxDBClient object."""
+ cli = DataFrameClient(database='db')
+ with requests_mock.Mocker() as m:
+ m.register_uri(
+ requests_mock.POST,
+ "http://localhost:8086/query",
+ text='{"results":[{}]}'
+ )
+ cli.drop_database('123')
+ self.assertEqual(
+ m.last_request.qs['q'][0],
+ 'drop database "123"'
+ )
+
+ @raises(Exception)
+ def test_get_list_database_fails(self):
+ """Test get list of dbs fail for TestInfluxDBClient object."""
+ cli = DataFrameClient('host', 8086, 'username', 'password')
+ with _mocked_session(cli, 'get', 401):
+ cli.get_list_database()
+
+ def test_get_list_measurements(self):
+ """Test get list of measurements for TestInfluxDBClient object."""
+ cli = DataFrameClient(database='db')
+ data = {
+ "results": [{
+ "series": [
+ {"name": "measurements",
+ "columns": ["name"],
+ "values": [["cpu"], ["disk"]
+ ]}]}
+ ]
+ }
+
+ with _mocked_session(cli, 'get', 200, json.dumps(data)):
+ self.assertListEqual(
+ cli.get_list_measurements(),
+ [{'name': 'cpu'}, {'name': 'disk'}]
+ )
+
+ def test_create_retention_policy_default(self):
+ """Test create default ret policy for TestInfluxDBClient object."""
+ cli = DataFrameClient(database='db')
+ example_response = '{"results":[{}]}'
+
+ with requests_mock.Mocker() as m:
+ m.register_uri(
+ requests_mock.POST,
+ "http://localhost:8086/query",
+ text=example_response
+ )
+ cli.create_retention_policy(
+ 'somename', '1d', 4, default=True, database='db'
+ )
+
+ self.assertEqual(
+ m.last_request.qs['q'][0],
+ 'create retention policy "somename" on '
+ '"db" duration 1d replication 4 shard duration 0s default'
+ )
+
+ def test_create_retention_policy(self):
+ """Test create retention policy for TestInfluxDBClient object."""
+ cli = DataFrameClient(database='db')
+ example_response = '{"results":[{}]}'
+
+ with requests_mock.Mocker() as m:
+ m.register_uri(
+ requests_mock.POST,
+ "http://localhost:8086/query",
+ text=example_response
+ )
+ cli.create_retention_policy(
+ 'somename', '1d', 4, database='db'
+ )
+
+ self.assertEqual(
+ m.last_request.qs['q'][0],
+ 'create retention policy "somename" on '
+ '"db" duration 1d replication 4 shard duration 0s'
+ )
+
+ def test_alter_retention_policy(self):
+ """Test alter retention policy for TestInfluxDBClient object."""
+ cli = DataFrameClient(database='db')
+ example_response = '{"results":[{}]}'
+
+ with requests_mock.Mocker() as m:
+ m.register_uri(
+ requests_mock.POST,
+ "http://localhost:8086/query",
+ text=example_response
+ )
+ # Test alter duration
+ cli.alter_retention_policy('somename', 'db',
+ duration='4d')
+ self.assertEqual(
+ m.last_request.qs['q'][0],
+ 'alter retention policy "somename" on "db" duration 4d'
+ )
+ # Test alter replication
+ cli.alter_retention_policy('somename', 'db',
+ replication=4)
+ self.assertEqual(
+ m.last_request.qs['q'][0],
+ 'alter retention policy "somename" on "db" replication 4'
+ )
+
+ # Test alter shard duration
+ cli.alter_retention_policy('somename', 'db',
+ shard_duration='1h')
+ self.assertEqual(
+ m.last_request.qs['q'][0],
+ 'alter retention policy "somename" on "db" shard duration 1h'
+ )
+
+ # Test alter default
+ cli.alter_retention_policy('somename', 'db',
+ default=True)
+ self.assertEqual(
+ m.last_request.qs['q'][0],
+ 'alter retention policy "somename" on "db" default'
+ )
+
+ @raises(Exception)
+ def test_alter_retention_policy_invalid(self):
+ """Test invalid alter ret policy for TestInfluxDBClient object."""
+ cli = DataFrameClient('host', 8086, 'username', 'password')
+ with _mocked_session(cli, 'get', 400):
+ cli.alter_retention_policy('somename', 'db')
+
+ def test_drop_retention_policy(self):
+ """Test drop retention policy for TestInfluxDBClient object."""
+ cli = DataFrameClient(database='db')
+ example_response = '{"results":[{}]}'
+
+ with requests_mock.Mocker() as m:
+ m.register_uri(
+ requests_mock.POST,
+ "http://localhost:8086/query",
+ text=example_response
+ )
+ cli.drop_retention_policy('somename', 'db')
+ self.assertEqual(
+ m.last_request.qs['q'][0],
+ 'drop retention policy "somename" on "db"'
+ )
+
+ @raises(Exception)
+ def test_drop_retention_policy_fails(self):
+ """Test failed drop ret policy for TestInfluxDBClient object."""
+ cli = DataFrameClient('host', 8086, 'username', 'password')
+ with _mocked_session(cli, 'delete', 401):
+ cli.drop_retention_policy('default', 'db')
+
+ def test_get_list_retention_policies(self):
+ """Test get retention policies for TestInfluxDBClient object."""
+ cli = DataFrameClient(database='db')
+ example_response = \
+ '{"results": [{"series": [{"values": [["fsfdsdf", "24h0m0s", 2]],'\
+ ' "columns": ["name", "duration", "replicaN"]}]}]}'
+
+ with requests_mock.Mocker() as m:
+ m.register_uri(
+ requests_mock.GET,
+ "http://localhost:8086/query",
+ text=example_response
+ )
+ self.assertListEqual(
+ cli.get_list_retention_policies("db"),
+ [{'duration': '24h0m0s',
+ 'name': 'fsfdsdf', 'replicaN': 2}]
+ )
+
+ def test_query_into_dataframe(self):
+ """Test query into df for TestDataFrameClient object."""
+ data = {
+ "results": [{
+ "series": [
+ {"measurement": "network",
+ "tags": {"direction": ""},
+ "columns": ["time", "value"],
+ "values": [["2009-11-10T23:00:00Z", 23422]]
+ },
+ {"measurement": "network",
+ "tags": {"direction": "in"},
+ "columns": ["time", "value"],
+ "values": [["2009-11-10T23:00:00Z", 23422],
+ ["2009-11-10T23:00:00Z", 23422],
+ ["2009-11-10T23:00:00Z", 23422]]
+ }
+ ]
+ }]
+ }
+
+ pd1 = pd.DataFrame(
+ [[23422]], columns=['value'],
+ index=pd.to_datetime(["2009-11-10T23:00:00Z"]))
+ if pd1.index.tzinfo is None:
+ pd1.index = pd1.index.tz_localize('UTC')
+ pd2 = pd.DataFrame(
+ [[23422], [23422], [23422]], columns=['value'],
+ index=pd.to_datetime(["2009-11-10T23:00:00Z",
+ "2009-11-10T23:00:00Z",
+ "2009-11-10T23:00:00Z"]))
+ if pd2.index.tzinfo is None:
+ pd2.index = pd2.index.tz_localize('UTC')
+ expected = {
+ ('network', (('direction', ''),)): pd1,
+ ('network', (('direction', 'in'),)): pd2
+ }
+
+ cli = DataFrameClient('host', 8086, 'username', 'password', 'db')
+ with _mocked_session(cli, 'GET', 200, data):
+ result = cli.query('select value from network group by direction;')
+ for k in expected:
+ assert_frame_equal(expected[k], result[k])
+
+ def test_multiquery_into_dataframe(self):
+ """Test multiquery into df for TestDataFrameClient object."""
+ data = {
+ "results": [
+ {
+ "series": [
+ {
+ "name": "cpu_load_short",
+ "columns": ["time", "value"],
+ "values": [
+ ["2015-01-29T21:55:43.702900257Z", 0.55],
+ ["2015-01-29T21:55:43.702900257Z", 23422],
+ ["2015-06-11T20:46:02Z", 0.64]
+ ]
+ }
+ ]
+ }, {
+ "series": [
+ {
+ "name": "cpu_load_short",
+ "columns": ["time", "count"],
+ "values": [
+ ["1970-01-01T00:00:00Z", 3]
+ ]
+ }
+ ]
+ }
+ ]
+ }
+
+ pd1 = pd.DataFrame(
+ [[0.55], [23422.0], [0.64]], columns=['value'],
+ index=pd.to_datetime([
+ "2015-01-29 21:55:43.702900257+0000",
+ "2015-01-29 21:55:43.702900257+0000",
+ "2015-06-11 20:46:02+0000"]))
+ if pd1.index.tzinfo is None:
+ pd1.index = pd1.index.tz_localize('UTC')
+ pd2 = pd.DataFrame(
+ [[3]], columns=['count'],
+ index=pd.to_datetime(["1970-01-01 00:00:00+00:00"]))
+ if pd2.index.tzinfo is None:
+ pd2.index = pd2.index.tz_localize('UTC')
+ expected = [{'cpu_load_short': pd1}, {'cpu_load_short': pd2}]
+
+ cli = DataFrameClient('host', 8086, 'username', 'password', 'db')
+ iql = "SELECT value FROM cpu_load_short WHERE region=$region;"\
+ "SELECT count(value) FROM cpu_load_short WHERE region=$region"
+ bind_params = {'region': 'us-west'}
+ with _mocked_session(cli, 'GET', 200, data):
+ result = cli.query(iql, bind_params=bind_params)
+ for r, e in zip(result, expected):
+ for k in e:
+ assert_frame_equal(e[k], r[k])
+
+ def test_multiquery_into_dataframe_dropna(self):
+ """Test multiquery into df for TestDataFrameClient object."""
+ data = {
+ "results": [
+ {
+ "series": [
+ {
+ "name": "cpu_load_short",
+ "columns": ["time", "value", "value2", "value3"],
+ "values": [
+ ["2015-01-29T21:55:43.702900257Z",
+ 0.55, 0.254, np.NaN],
+ ["2015-01-29T21:55:43.702900257Z",
+ 23422, 122878, np.NaN],
+ ["2015-06-11T20:46:02Z",
+ 0.64, 0.5434, np.NaN]
+ ]
+ }
+ ]
+ }, {
+ "series": [
+ {
+ "name": "cpu_load_short",
+ "columns": ["time", "count"],
+ "values": [
+ ["1970-01-01T00:00:00Z", 3]
+ ]
+ }
+ ]
+ }
+ ]
+ }
+
+ pd1 = pd.DataFrame(
+ [[0.55, 0.254, np.NaN],
+ [23422.0, 122878, np.NaN],
+ [0.64, 0.5434, np.NaN]],
+ columns=['value', 'value2', 'value3'],
+ index=pd.to_datetime([
+ "2015-01-29 21:55:43.702900257+0000",
+ "2015-01-29 21:55:43.702900257+0000",
+ "2015-06-11 20:46:02+0000"]))
+
+ if pd1.index.tzinfo is None:
+ pd1.index = pd1.index.tz_localize('UTC')
+
+ pd1_dropna = pd.DataFrame(
+ [[0.55, 0.254], [23422.0, 122878], [0.64, 0.5434]],
+ columns=['value', 'value2'],
+ index=pd.to_datetime([
+ "2015-01-29 21:55:43.702900257+0000",
+ "2015-01-29 21:55:43.702900257+0000",
+ "2015-06-11 20:46:02+0000"]))
+
+ if pd1_dropna.index.tzinfo is None:
+ pd1_dropna.index = pd1_dropna.index.tz_localize('UTC')
+
+ pd2 = pd.DataFrame(
+ [[3]], columns=['count'],
+ index=pd.to_datetime(["1970-01-01 00:00:00+00:00"]))
+
+ if pd2.index.tzinfo is None:
+ pd2.index = pd2.index.tz_localize('UTC')
+
+ expected_dropna_true = [
+ {'cpu_load_short': pd1_dropna},
+ {'cpu_load_short': pd2}]
+ expected_dropna_false = [
+ {'cpu_load_short': pd1},
+ {'cpu_load_short': pd2}]
+
+ cli = DataFrameClient('host', 8086, 'username', 'password', 'db')
+ iql = "SELECT value FROM cpu_load_short WHERE region=$region;" \
+ "SELECT count(value) FROM cpu_load_short WHERE region=$region"
+ bind_params = {'region': 'us-west'}
+
+ for dropna in [True, False]:
+ with _mocked_session(cli, 'GET', 200, data):
+ result = cli.query(iql, bind_params=bind_params, dropna=dropna)
+ expected = \
+ expected_dropna_true if dropna else expected_dropna_false
+ for r, e in zip(result, expected):
+ for k in e:
+ assert_frame_equal(e[k], r[k])
+
+ # test default value (dropna = True)
+ with _mocked_session(cli, 'GET', 200, data):
+ result = cli.query(iql, bind_params=bind_params)
+ for r, e in zip(result, expected_dropna_true):
+ for k in e:
+ assert_frame_equal(e[k], r[k])
+
+ def test_query_with_empty_result(self):
+ """Test query with empty results in TestDataFrameClient object."""
+ cli = DataFrameClient('host', 8086, 'username', 'password', 'db')
+ with _mocked_session(cli, 'GET', 200, {"results": [{}]}):
+ result = cli.query('select column_one from foo;')
+ self.assertEqual(result, {})
+
+ def test_get_list_database(self):
+ """Test get list of databases in TestDataFrameClient object."""
+ data = {'results': [
+ {'series': [
+ {'measurement': 'databases',
+ 'values': [
+ ['new_db_1'],
+ ['new_db_2']],
+ 'columns': ['name']}]}
+ ]}
+
+ cli = DataFrameClient('host', 8086, 'username', 'password', 'db')
+ with _mocked_session(cli, 'get', 200, json.dumps(data)):
+ self.assertListEqual(
+ cli.get_list_database(),
+ [{'name': 'new_db_1'}, {'name': 'new_db_2'}]
+ )
+
+ def test_datetime_to_epoch(self):
+ """Test convert datetime to epoch in TestDataFrameClient object."""
+ timestamp = pd.Timestamp('2013-01-01 00:00:00.000+00:00')
+ cli = DataFrameClient('host', 8086, 'username', 'password', 'db')
+
+ self.assertEqual(
+ cli._datetime_to_epoch(timestamp),
+ 1356998400.0
+ )
+ self.assertEqual(
+ cli._datetime_to_epoch(timestamp, time_precision='h'),
+ 1356998400.0 / 3600
+ )
+ self.assertEqual(
+ cli._datetime_to_epoch(timestamp, time_precision='m'),
+ 1356998400.0 / 60
+ )
+ self.assertEqual(
+ cli._datetime_to_epoch(timestamp, time_precision='s'),
+ 1356998400.0
+ )
+ self.assertEqual(
+ cli._datetime_to_epoch(timestamp, time_precision='ms'),
+ 1356998400000.0
+ )
+ self.assertEqual(
+ cli._datetime_to_epoch(timestamp, time_precision='u'),
+ 1356998400000000.0
+ )
+ self.assertEqual(
+ cli._datetime_to_epoch(timestamp, time_precision='n'),
+ 1356998400000000000.0
+ )
+
+ def test_dsn_constructor(self):
+ """Test data source name deconstructor in TestDataFrameClient."""
+ client = DataFrameClient.from_dsn('influxdb://localhost:8086')
+ self.assertIsInstance(client, DataFrameClient)
+ self.assertEqual('http://localhost:8086', client._baseurl)
+
+ def test_write_points_from_dataframe_with_nan_line(self):
+ """Test write points from dataframe with Nan lines."""
+ now = pd.Timestamp('1970-01-01 00:00+00:00')
+ dataframe = pd.DataFrame(data=[["1", 1, np.inf], ["2", 2, np.nan]],
+ index=[now, now + timedelta(hours=1)],
+ columns=["column_one", "column_two",
+ "column_three"])
+ expected = (
+ b"foo column_one=\"1\",column_two=1i 0\n"
+ b"foo column_one=\"2\",column_two=2i "
+ b"3600000000000\n"
+ )
+
+ with requests_mock.Mocker() as m:
+ m.register_uri(requests_mock.POST,
+ "http://localhost:8086/write",
+ status_code=204)
+
+ cli = DataFrameClient(database='db')
+
+ cli.write_points(dataframe, 'foo', protocol='line')
+ self.assertEqual(m.last_request.body, expected)
+
+ cli.write_points(dataframe, 'foo', tags=None, protocol='line')
+ self.assertEqual(m.last_request.body, expected)
+
+ def test_write_points_from_dataframe_with_nan_json(self):
+ """Test write points from json with NaN lines."""
+ now = pd.Timestamp('1970-01-01 00:00+00:00')
+ dataframe = pd.DataFrame(data=[["1", 1, np.inf], ["2", 2, np.nan]],
+ index=[now, now + timedelta(hours=1)],
+ columns=["column_one", "column_two",
+ "column_three"])
+ expected = (
+ b"foo column_one=\"1\",column_two=1i 0\n"
+ b"foo column_one=\"2\",column_two=2i "
+ b"3600000000000\n"
+ )
+
+ with requests_mock.Mocker() as m:
+ m.register_uri(requests_mock.POST,
+ "http://localhost:8086/write",
+ status_code=204)
+
+ cli = DataFrameClient(database='db')
+
+ cli.write_points(dataframe, 'foo', protocol='json')
+ self.assertEqual(m.last_request.body, expected)
+
+ cli.write_points(dataframe, 'foo', tags=None, protocol='json')
+ self.assertEqual(m.last_request.body, expected)
+
+ def test_write_points_from_dataframe_with_tags_and_nan_line(self):
+ """Test write points from dataframe with NaN lines and tags."""
+ now = pd.Timestamp('1970-01-01 00:00+00:00')
+ dataframe = pd.DataFrame(data=[['blue', 1, "1", 1, np.inf],
+ ['red', 0, "2", 2, np.nan]],
+ index=[now, now + timedelta(hours=1)],
+ columns=["tag_one", "tag_two", "column_one",
+ "column_two", "column_three"])
+ expected = (
+ b"foo,tag_one=blue,tag_two=1 "
+ b"column_one=\"1\",column_two=1i "
+ b"0\n"
+ b"foo,tag_one=red,tag_two=0 "
+ b"column_one=\"2\",column_two=2i "
+ b"3600000000000\n"
+ )
+
+ with requests_mock.Mocker() as m:
+ m.register_uri(requests_mock.POST,
+ "http://localhost:8086/write",
+ status_code=204)
+
+ cli = DataFrameClient(database='db')
+
+ cli.write_points(dataframe, 'foo', protocol='line',
+ tag_columns=['tag_one', 'tag_two'])
+ self.assertEqual(m.last_request.body, expected)
+
+ cli.write_points(dataframe, 'foo', tags=None, protocol='line',
+ tag_columns=['tag_one', 'tag_two'])
+ self.assertEqual(m.last_request.body, expected)
+
+ def test_write_points_from_dataframe_with_tags_and_nan_json(self):
+ """Test write points from json with NaN lines and tags."""
+ now = pd.Timestamp('1970-01-01 00:00+00:00')
+ dataframe = pd.DataFrame(data=[['blue', 1, "1", 1, np.inf],
+ ['red', 0, "2", 2, np.nan]],
+ index=[now, now + timedelta(hours=1)],
+ columns=["tag_one", "tag_two", "column_one",
+ "column_two", "column_three"])
+ expected = (
+ b"foo,tag_one=blue,tag_two=1 "
+ b"column_one=\"1\",column_two=1i "
+ b"0\n"
+ b"foo,tag_one=red,tag_two=0 "
+ b"column_one=\"2\",column_two=2i "
+ b"3600000000000\n"
+ )
+
+ with requests_mock.Mocker() as m:
+ m.register_uri(requests_mock.POST,
+ "http://localhost:8086/write",
+ status_code=204)
+
+ cli = DataFrameClient(database='db')
+
+ cli.write_points(dataframe, 'foo', protocol='json',
+ tag_columns=['tag_one', 'tag_two'])
+ self.assertEqual(m.last_request.body, expected)
+
+ cli.write_points(dataframe, 'foo', tags=None, protocol='json',
+ tag_columns=['tag_one', 'tag_two'])
+ self.assertEqual(m.last_request.body, expected)
+
+ def test_query_custom_index(self):
+ """Test query with custom indexes."""
+ data = {
+ "results": [
+ {
+ "series": [
+ {
+ "name": "cpu_load_short",
+ "columns": ["time", "value", "host"],
+ "values": [
+ [1, 0.55, "local"],
+ [2, 23422, "local"],
+ [3, 0.64, "local"]
+ ]
+ }
+ ]
+ }
+ ]
+ }
+
+ cli = DataFrameClient('host', 8086, 'username', 'password', 'db')
+ iql = "SELECT value FROM cpu_load_short WHERE region=$region;" \
+ "SELECT count(value) FROM cpu_load_short WHERE region=$region"
+ bind_params = {'region': 'us-west'}
+ with _mocked_session(cli, 'GET', 200, data):
+ result = cli.query(iql, bind_params=bind_params,
+ data_frame_index=["time", "host"])
+
+ _data_frame = result['cpu_load_short']
+ print(_data_frame)
+
+ self.assertListEqual(["time", "host"],
+ list(_data_frame.index.names))
+
+ def test_dataframe_nanosecond_precision(self):
+ """Test nanosecond precision."""
+ for_df_dict = {
+ "nanFloats": [1.1, float('nan'), 3.3, 4.4],
+ "onlyFloats": [1.1, 2.2, 3.3, 4.4],
+ "strings": ['one_one', 'two_two', 'three_three', 'four_four']
+ }
+ df = pd.DataFrame.from_dict(for_df_dict)
+ df['time'] = ['2019-10-04 06:27:19.850557111+00:00',
+ '2019-10-04 06:27:19.850557184+00:00',
+ '2019-10-04 06:27:42.251396864+00:00',
+ '2019-10-04 06:27:42.251396974+00:00']
+ df['time'] = pd.to_datetime(df['time'], unit='ns')
+ df = df.set_index('time')
+
+ expected = (
+ b'foo nanFloats=1.1,onlyFloats=1.1,strings="one_one" 1570170439850557111\n' # noqa E501 line too long
+ b'foo onlyFloats=2.2,strings="two_two" 1570170439850557184\n' # noqa E501 line too long
+ b'foo nanFloats=3.3,onlyFloats=3.3,strings="three_three" 1570170462251396864\n' # noqa E501 line too long
+ b'foo nanFloats=4.4,onlyFloats=4.4,strings="four_four" 1570170462251396974\n' # noqa E501 line too long
+ )
+
+ with requests_mock.Mocker() as m:
+ m.register_uri(
+ requests_mock.POST,
+ "http://localhost:8086/write",
+ status_code=204
+ )
+
+ cli = DataFrameClient(database='db')
+ cli.write_points(df, 'foo', time_precision='n')
+
+ self.assertEqual(m.last_request.body, expected)
+
+ def test_dataframe_nanosecond_precision_one_microsecond(self):
+ """Test nanosecond precision within one microsecond."""
+ # 1 microsecond = 1000 nanoseconds
+ start = np.datetime64('2019-10-04T06:27:19.850557000')
+ end = np.datetime64('2019-10-04T06:27:19.850558000')
+
+ # generate timestamps with nanosecond precision
+ timestamps = np.arange(
+ start,
+ end + np.timedelta64(1, 'ns'),
+ np.timedelta64(1, 'ns')
+ )
+ # generate values
+ values = np.arange(0.0, len(timestamps))
+
+ df = pd.DataFrame({'value': values}, index=timestamps)
+ with requests_mock.Mocker() as m:
+ m.register_uri(
+ requests_mock.POST,
+ "http://localhost:8086/write",
+ status_code=204
+ )
+
+ cli = DataFrameClient(database='db')
+ cli.write_points(df, 'foo', time_precision='n')
+
+ lines = m.last_request.body.decode('utf-8').split('\n')
+ self.assertEqual(len(lines), 1002)
+
+ for index, line in enumerate(lines):
+ if index == 1001:
+ self.assertEqual(line, '')
+ continue
+ self.assertEqual(
+ line,
+ f"foo value={index}.0 157017043985055{7000 + index:04}"
+ )
diff --git a/influxdb/tests/helper_test.py b/influxdb/tests/helper_test.py
new file mode 100644
index 00000000..6737f921
--- /dev/null
+++ b/influxdb/tests/helper_test.py
@@ -0,0 +1,437 @@
+# -*- coding: utf-8 -*-
+"""Set of series helper functions for test."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+from datetime import datetime, timedelta
+
+import unittest
+import warnings
+
+import mock
+from influxdb import SeriesHelper, InfluxDBClient
+from requests.exceptions import ConnectionError
+
+
+class TestSeriesHelper(unittest.TestCase):
+ """Define the SeriesHelper test object."""
+
+ @classmethod
+ def setUpClass(cls):
+ """Set up the TestSeriesHelper object."""
+ super(TestSeriesHelper, cls).setUpClass()
+
+ TestSeriesHelper.client = InfluxDBClient(
+ 'host',
+ 8086,
+ 'username',
+ 'password',
+ 'database'
+ )
+
+ class MySeriesHelper(SeriesHelper):
+ """Define a SeriesHelper object."""
+
+ class Meta:
+ """Define metadata for the SeriesHelper object."""
+
+ client = TestSeriesHelper.client
+ series_name = 'events.stats.{server_name}'
+ fields = ['some_stat']
+ tags = ['server_name', 'other_tag']
+ bulk_size = 5
+ autocommit = True
+
+ TestSeriesHelper.MySeriesHelper = MySeriesHelper
+
+ def setUp(self):
+ """Check that MySeriesHelper has empty datapoints."""
+ super(TestSeriesHelper, self).setUp()
+ self.assertEqual(
+ TestSeriesHelper.MySeriesHelper._json_body_(),
+ [],
+ 'Resetting helper in teardown did not empty datapoints.')
+
+ def tearDown(self):
+ """Deconstruct the TestSeriesHelper object."""
+ super(TestSeriesHelper, self).tearDown()
+ TestSeriesHelper.MySeriesHelper._reset_()
+ self.assertEqual(
+ TestSeriesHelper.MySeriesHelper._json_body_(),
+ [],
+ 'Resetting helper did not empty datapoints.')
+
+ def test_auto_commit(self):
+ """Test write_points called after valid number of events."""
+ class AutoCommitTest(SeriesHelper):
+ """Define a SeriesHelper instance to test autocommit."""
+
+ class Meta:
+ """Define metadata for AutoCommitTest."""
+
+ series_name = 'events.stats.{server_name}'
+ fields = ['some_stat']
+ tags = ['server_name', 'other_tag']
+ bulk_size = 5
+ client = InfluxDBClient()
+ autocommit = True
+
+ fake_write_points = mock.MagicMock()
+ AutoCommitTest(server_name='us.east-1', some_stat=159, other_tag='gg')
+ AutoCommitTest._client.write_points = fake_write_points
+ AutoCommitTest(server_name='us.east-1', some_stat=158, other_tag='gg')
+ AutoCommitTest(server_name='us.east-1', some_stat=157, other_tag='gg')
+ AutoCommitTest(server_name='us.east-1', some_stat=156, other_tag='gg')
+ self.assertFalse(fake_write_points.called)
+ AutoCommitTest(server_name='us.east-1', some_stat=3443, other_tag='gg')
+ self.assertTrue(fake_write_points.called)
+
+ @mock.patch('influxdb.helper.SeriesHelper._current_timestamp')
+ def testSingleSeriesName(self, current_timestamp):
+ """Test JSON conversion when there is only one series name."""
+ current_timestamp.return_value = current_date = datetime.today()
+ TestSeriesHelper.MySeriesHelper(
+ server_name='us.east-1', other_tag='ello', some_stat=159)
+ TestSeriesHelper.MySeriesHelper(
+ server_name='us.east-1', other_tag='ello', some_stat=158)
+ TestSeriesHelper.MySeriesHelper(
+ server_name='us.east-1', other_tag='ello', some_stat=157)
+ TestSeriesHelper.MySeriesHelper(
+ server_name='us.east-1', other_tag='ello', some_stat=156)
+ expectation = [
+ {
+ "measurement": "events.stats.us.east-1",
+ "tags": {
+ "other_tag": "ello",
+ "server_name": "us.east-1"
+ },
+ "fields": {
+ "some_stat": 159
+ },
+ "time": current_date,
+ },
+ {
+ "measurement": "events.stats.us.east-1",
+ "tags": {
+ "other_tag": "ello",
+ "server_name": "us.east-1"
+ },
+ "fields": {
+ "some_stat": 158
+ },
+ "time": current_date,
+ },
+ {
+ "measurement": "events.stats.us.east-1",
+ "tags": {
+ "other_tag": "ello",
+ "server_name": "us.east-1"
+ },
+ "fields": {
+ "some_stat": 157
+ },
+ "time": current_date,
+ },
+ {
+ "measurement": "events.stats.us.east-1",
+ "tags": {
+ "other_tag": "ello",
+ "server_name": "us.east-1"
+ },
+ "fields": {
+ "some_stat": 156
+ },
+ "time": current_date,
+ }
+ ]
+
+ rcvd = TestSeriesHelper.MySeriesHelper._json_body_()
+ self.assertTrue(all([el in expectation for el in rcvd]) and
+ all([el in rcvd for el in expectation]),
+ 'Invalid JSON body of time series returned from '
+ '_json_body_ for one series name: {0}.'.format(rcvd))
+
+ @mock.patch('influxdb.helper.SeriesHelper._current_timestamp')
+ def testSeveralSeriesNames(self, current_timestamp):
+ """Test JSON conversion when there are multiple series names."""
+ current_timestamp.return_value = current_date = datetime.today()
+ TestSeriesHelper.MySeriesHelper(
+ server_name='us.east-1', some_stat=159, other_tag='ello')
+ TestSeriesHelper.MySeriesHelper(
+ server_name='fr.paris-10', some_stat=158, other_tag='ello')
+ TestSeriesHelper.MySeriesHelper(
+ server_name='lu.lux', some_stat=157, other_tag='ello')
+ TestSeriesHelper.MySeriesHelper(
+ server_name='uk.london', some_stat=156, other_tag='ello')
+ expectation = [
+ {
+ 'fields': {
+ 'some_stat': 157
+ },
+ 'measurement': 'events.stats.lu.lux',
+ 'tags': {
+ 'other_tag': 'ello',
+ 'server_name': 'lu.lux'
+ },
+ "time": current_date,
+ },
+ {
+ 'fields': {
+ 'some_stat': 156
+ },
+ 'measurement': 'events.stats.uk.london',
+ 'tags': {
+ 'other_tag': 'ello',
+ 'server_name': 'uk.london'
+ },
+ "time": current_date,
+ },
+ {
+ 'fields': {
+ 'some_stat': 158
+ },
+ 'measurement': 'events.stats.fr.paris-10',
+ 'tags': {
+ 'other_tag': 'ello',
+ 'server_name': 'fr.paris-10'
+ },
+ "time": current_date,
+ },
+ {
+ 'fields': {
+ 'some_stat': 159
+ },
+ 'measurement': 'events.stats.us.east-1',
+ 'tags': {
+ 'other_tag': 'ello',
+ 'server_name': 'us.east-1'
+ },
+ "time": current_date,
+ }
+ ]
+
+ rcvd = TestSeriesHelper.MySeriesHelper._json_body_()
+ self.assertTrue(all([el in expectation for el in rcvd]) and
+ all([el in rcvd for el in expectation]),
+ 'Invalid JSON body of time series returned from '
+ '_json_body_ for several series names: {0}.'
+ .format(rcvd))
+
+ @mock.patch('influxdb.helper.SeriesHelper._current_timestamp')
+ def testSeriesWithoutTimeField(self, current_timestamp):
+ """Test that time is optional on a series without a time field."""
+ current_date = datetime.today()
+ yesterday = current_date - timedelta(days=1)
+ current_timestamp.return_value = yesterday
+ TestSeriesHelper.MySeriesHelper(
+ server_name='us.east-1', other_tag='ello',
+ some_stat=159, time=current_date
+ )
+ TestSeriesHelper.MySeriesHelper(
+ server_name='us.east-1', other_tag='ello',
+ some_stat=158,
+ )
+ point1, point2 = TestSeriesHelper.MySeriesHelper._json_body_()
+ self.assertTrue('time' in point1 and 'time' in point2)
+ self.assertEqual(point1['time'], current_date)
+ self.assertEqual(point2['time'], yesterday)
+
+ def testSeriesWithoutAllTags(self):
+ """Test that creating a data point without a tag throws an error."""
+ class MyTimeFieldSeriesHelper(SeriesHelper):
+
+ class Meta:
+ client = TestSeriesHelper.client
+ series_name = 'events.stats.{server_name}'
+ fields = ['some_stat', 'time']
+ tags = ['server_name', 'other_tag']
+ bulk_size = 5
+ autocommit = True
+
+ self.assertRaises(NameError, MyTimeFieldSeriesHelper,
+ **{"server_name": 'us.east-1',
+ "some_stat": 158})
+
+ @mock.patch('influxdb.helper.SeriesHelper._current_timestamp')
+ def testSeriesWithTimeField(self, current_timestamp):
+ """Test that time is optional on a series with a time field."""
+ current_date = datetime.today()
+ yesterday = current_date - timedelta(days=1)
+ current_timestamp.return_value = yesterday
+
+ class MyTimeFieldSeriesHelper(SeriesHelper):
+
+ class Meta:
+ client = TestSeriesHelper.client
+ series_name = 'events.stats.{server_name}'
+ fields = ['some_stat', 'time']
+ tags = ['server_name', 'other_tag']
+ bulk_size = 5
+ autocommit = True
+
+ MyTimeFieldSeriesHelper(
+ server_name='us.east-1', other_tag='ello',
+ some_stat=159, time=current_date
+ )
+ MyTimeFieldSeriesHelper(
+ server_name='us.east-1', other_tag='ello',
+ some_stat=158,
+ )
+ point1, point2 = MyTimeFieldSeriesHelper._json_body_()
+ self.assertTrue('time' in point1 and 'time' in point2)
+ self.assertEqual(point1['time'], current_date)
+ self.assertEqual(point2['time'], yesterday)
+
+ def testInvalidHelpers(self):
+ """Test errors in invalid helpers."""
+ class MissingMeta(SeriesHelper):
+ """Define instance of SeriesHelper for missing meta."""
+
+ pass
+
+ class MissingClient(SeriesHelper):
+ """Define SeriesHelper for missing client data."""
+
+ class Meta:
+ """Define metadat for MissingClient."""
+
+ series_name = 'events.stats.{server_name}'
+ fields = ['time', 'server_name']
+ autocommit = True
+
+ class MissingSeriesName(SeriesHelper):
+ """Define instance of SeriesHelper for missing series."""
+
+ class Meta:
+ """Define metadata for MissingSeriesName."""
+
+ fields = ['time', 'server_name']
+
+ class MissingFields(SeriesHelper):
+ """Define instance of SeriesHelper for missing fields."""
+
+ class Meta:
+ """Define metadata for MissingFields."""
+
+ series_name = 'events.stats.{server_name}'
+
+ class InvalidTimePrecision(SeriesHelper):
+ """Define instance of SeriesHelper for invalid time precision."""
+
+ class Meta:
+ """Define metadata for InvalidTimePrecision."""
+
+ series_name = 'events.stats.{server_name}'
+ time_precision = "ks"
+ fields = ['time', 'server_name']
+ autocommit = True
+
+ for cls in [MissingMeta, MissingClient, MissingFields,
+ MissingSeriesName, InvalidTimePrecision]:
+ self.assertRaises(
+ AttributeError, cls, **{'time': 159,
+ 'server_name': 'us.east-1'})
+
+ @unittest.skip("Fails on py32")
+ def testWarnBulkSizeZero(self):
+ """Test warning for an invalid bulk size."""
+ class WarnBulkSizeZero(SeriesHelper):
+
+ class Meta:
+ client = TestSeriesHelper.client
+ series_name = 'events.stats.{server_name}'
+ fields = ['time', 'server_name']
+ tags = []
+ bulk_size = 0
+ autocommit = True
+
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter("always")
+ try:
+ WarnBulkSizeZero(time=159, server_name='us.east-1')
+ except ConnectionError:
+ # Server defined in the client is invalid, we're testing
+ # the warning only.
+ pass
+ self.assertEqual(len(w), 1,
+ '{0} call should have generated one warning.'
+ .format(WarnBulkSizeZero))
+ self.assertIn('forced to 1', str(w[-1].message),
+ 'Warning message did not contain "forced to 1".')
+
+ def testWarnBulkSizeNoEffect(self):
+ """Test warning for a set bulk size but autocommit False."""
+ class WarnBulkSizeNoEffect(SeriesHelper):
+ """Define SeriesHelper for warning on bulk size."""
+
+ class Meta:
+ """Define metadat for WarnBulkSizeNoEffect."""
+
+ series_name = 'events.stats.{server_name}'
+ fields = ['time', 'server_name']
+ bulk_size = 5
+ tags = []
+ autocommit = False
+
+ with warnings.catch_warnings(record=True) as w:
+ warnings.simplefilter("always")
+ WarnBulkSizeNoEffect(time=159, server_name='us.east-1')
+ self.assertEqual(len(w), 1,
+ '{0} call should have generated one warning.'
+ .format(WarnBulkSizeNoEffect))
+ self.assertIn('has no affect', str(w[-1].message),
+ 'Warning message did not contain "has not affect".')
+
+ def testSeriesWithRetentionPolicy(self):
+ """Test that the data is saved with the specified retention policy."""
+ my_policy = 'my_policy'
+
+ class RetentionPolicySeriesHelper(SeriesHelper):
+
+ class Meta:
+ client = InfluxDBClient()
+ series_name = 'events.stats.{server_name}'
+ fields = ['some_stat', 'time']
+ tags = ['server_name', 'other_tag']
+ bulk_size = 2
+ autocommit = True
+ retention_policy = my_policy
+
+ fake_write_points = mock.MagicMock()
+ RetentionPolicySeriesHelper(
+ server_name='us.east-1', some_stat=159, other_tag='gg')
+ RetentionPolicySeriesHelper._client.write_points = fake_write_points
+ RetentionPolicySeriesHelper(
+ server_name='us.east-1', some_stat=158, other_tag='aa')
+
+ kall = fake_write_points.call_args
+ args, kwargs = kall
+ self.assertTrue('retention_policy' in kwargs)
+ self.assertEqual(kwargs['retention_policy'], my_policy)
+
+ def testSeriesWithoutRetentionPolicy(self):
+ """Test that the data is saved without any retention policy."""
+ class NoRetentionPolicySeriesHelper(SeriesHelper):
+
+ class Meta:
+ client = InfluxDBClient()
+ series_name = 'events.stats.{server_name}'
+ fields = ['some_stat', 'time']
+ tags = ['server_name', 'other_tag']
+ bulk_size = 2
+ autocommit = True
+
+ fake_write_points = mock.MagicMock()
+ NoRetentionPolicySeriesHelper(
+ server_name='us.east-1', some_stat=159, other_tag='gg')
+ NoRetentionPolicySeriesHelper._client.write_points = fake_write_points
+ NoRetentionPolicySeriesHelper(
+ server_name='us.east-1', some_stat=158, other_tag='aa')
+
+ kall = fake_write_points.call_args
+ args, kwargs = kall
+ self.assertTrue('retention_policy' in kwargs)
+ self.assertEqual(kwargs['retention_policy'], None)
diff --git a/influxdb/tests/influxdb08/__init__.py b/influxdb/tests/influxdb08/__init__.py
new file mode 100644
index 00000000..0e79ed1c
--- /dev/null
+++ b/influxdb/tests/influxdb08/__init__.py
@@ -0,0 +1,2 @@
+# -*- coding: utf-8 -*-
+"""Define the influxdb08 test package."""
diff --git a/tests/influxdb/client_test.py b/influxdb/tests/influxdb08/client_test.py
similarity index 54%
rename from tests/influxdb/client_test.py
rename to influxdb/tests/influxdb08/client_test.py
index 101d4145..39ab52d6 100644
--- a/tests/influxdb/client_test.py
+++ b/influxdb/tests/influxdb08/client_test.py
@@ -1,17 +1,34 @@
# -*- coding: utf-8 -*-
-"""
-unit tests
-"""
+"""Client unit tests."""
+
import json
-import requests
import socket
+import sys
import unittest
+import random
+import warnings
+
+import mock
+import requests
+import requests.exceptions
import requests_mock
+
from nose.tools import raises
from mock import patch
-from influxdb import InfluxDBClient
-from influxdb.client import session
+from influxdb.influxdb08 import InfluxDBClient
+from influxdb.influxdb08.client import session
+
+if sys.version < '3':
+ import codecs
+
+ def u(x):
+ """Test codec."""
+ return codecs.unicode_escape_decode(x)[0]
+else:
+ def u(x):
+ """Test codec."""
+ return x
def _build_response_object(status_code=200, content=""):
@@ -22,10 +39,10 @@ def _build_response_object(status_code=200, content=""):
def _mocked_session(method="GET", status_code=200, content=""):
-
method = method.upper()
def request(*args, **kwargs):
+ """Define a request for the _mocked_session."""
c = content
# Check method
@@ -59,8 +76,13 @@ def request(*args, **kwargs):
class TestInfluxDBClient(unittest.TestCase):
+ """Define a TestInfluxDBClient object."""
def setUp(self):
+ """Set up a TestInfluxDBClient object."""
+ # By default, raise exceptions on warnings
+ warnings.simplefilter('error', FutureWarning)
+
self.dummy_points = [
{
"points": [
@@ -72,27 +94,88 @@ def setUp(self):
}
]
+ self.dsn_string = 'influxdb://uSr:pWd@host:1886/db'
+
def test_scheme(self):
+ """Test database scheme for TestInfluxDBClient object."""
cli = InfluxDBClient('host', 8086, 'username', 'password', 'database')
- assert cli._baseurl == 'http://host:8086'
+ self.assertEqual(cli._baseurl, 'http://host:8086')
cli = InfluxDBClient(
'host', 8086, 'username', 'password', 'database', ssl=True
)
- assert cli._baseurl == 'https://host:8086'
+ self.assertEqual(cli._baseurl, 'https://host:8086')
+
+ def test_dsn(self):
+ """Test datasource name for TestInfluxDBClient object."""
+ cli = InfluxDBClient.from_dsn(self.dsn_string)
+ self.assertEqual('http://host:1886', cli._baseurl)
+ self.assertEqual('uSr', cli._username)
+ self.assertEqual('pWd', cli._password)
+ self.assertEqual('db', cli._database)
+ self.assertFalse(cli._use_udp)
+
+ cli = InfluxDBClient.from_dsn('udp+' + self.dsn_string)
+ self.assertTrue(cli._use_udp)
+
+ cli = InfluxDBClient.from_dsn('https+' + self.dsn_string)
+ self.assertEqual('https://host:1886', cli._baseurl)
+
+ cli = InfluxDBClient.from_dsn('https+' + self.dsn_string,
+ **{'ssl': False})
+ self.assertEqual('http://host:1886', cli._baseurl)
+
+ def test_switch_database(self):
+ """Test switch database for TestInfluxDBClient object."""
+ cli = InfluxDBClient('host', 8086, 'username', 'password', 'database')
+ cli.switch_database('another_database')
+ self.assertEqual(cli._database, 'another_database')
- def test_switch_db(self):
+ @raises(FutureWarning)
+ def test_switch_db_deprecated(self):
+ """Test deprecated switch database for TestInfluxDBClient object."""
cli = InfluxDBClient('host', 8086, 'username', 'password', 'database')
cli.switch_db('another_database')
- assert cli._database == 'another_database'
+ self.assertEqual(cli._database, 'another_database')
def test_switch_user(self):
+ """Test switch user for TestInfluxDBClient object."""
cli = InfluxDBClient('host', 8086, 'username', 'password', 'database')
cli.switch_user('another_username', 'another_password')
- assert cli._username == 'another_username'
- assert cli._password == 'another_password'
+ self.assertEqual(cli._username, 'another_username')
+ self.assertEqual(cli._password, 'another_password')
+
+ def test_write(self):
+ """Test write to database for TestInfluxDBClient object."""
+ with requests_mock.Mocker() as m:
+ m.register_uri(
+ requests_mock.POST,
+ "http://localhost:8086/write"
+ )
+ cli = InfluxDBClient(database='db')
+ cli.write(
+ {"database": "mydb",
+ "retentionPolicy": "mypolicy",
+ "points": [{"name": "cpu_load_short",
+ "tags": {"host": "server01",
+ "region": "us-west"},
+ "timestamp": "2009-11-10T23:00:00Z",
+ "values": {"value": 0.64}}]}
+ )
+
+ self.assertEqual(
+ json.loads(m.last_request.body),
+ {"database": "mydb",
+ "retentionPolicy": "mypolicy",
+ "points": [{"name": "cpu_load_short",
+ "tags": {"host": "server01",
+ "region": "us-west"},
+ "timestamp": "2009-11-10T23:00:00Z",
+ "values": {"value": 0.64}}]}
+ )
def test_write_points(self):
+ """Test write points for TestInfluxDBClient object."""
with requests_mock.Mocker() as m:
m.register_uri(
requests_mock.POST,
@@ -110,6 +193,7 @@ def test_write_points(self):
)
def test_write_points_string(self):
+ """Test write string points for TestInfluxDBClient object."""
with requests_mock.Mocker() as m:
m.register_uri(
requests_mock.POST,
@@ -127,29 +211,69 @@ def test_write_points_string(self):
)
def test_write_points_batch(self):
- with _mocked_session('post', 200, self.dummy_points):
- cli = InfluxDBClient('host', 8086, 'username', 'password', 'db')
- assert cli.write_points(
- data=self.dummy_points,
- batch_size=2
- ) is True
+ """Test write batch points for TestInfluxDBClient object."""
+ with requests_mock.Mocker() as m:
+ m.register_uri(requests_mock.POST,
+ "http://localhost:8086/db/db/series")
+ cli = InfluxDBClient('localhost', 8086,
+ 'username', 'password', 'db')
+ cli.write_points(data=self.dummy_points, batch_size=2)
+ self.assertEqual(1, m.call_count)
+
+ def test_write_points_batch_invalid_size(self):
+ """Test write batch points invalid size for TestInfluxDBClient."""
+ with requests_mock.Mocker() as m:
+ m.register_uri(requests_mock.POST,
+ "http://localhost:8086/db/db/series")
+ cli = InfluxDBClient('localhost', 8086,
+ 'username', 'password', 'db')
+ cli.write_points(data=self.dummy_points, batch_size=-2)
+ self.assertEqual(1, m.call_count)
+
+ def test_write_points_batch_multiple_series(self):
+ """Test write points batch multiple series."""
+ dummy_points = [
+ {"points": [["1", 1, 1.0], ["2", 2, 2.0], ["3", 3, 3.0],
+ ["4", 4, 4.0], ["5", 5, 5.0]],
+ "name": "foo",
+ "columns": ["val1", "val2", "val3"]},
+ {"points": [["1", 1, 1.0], ["2", 2, 2.0], ["3", 3, 3.0],
+ ["4", 4, 4.0], ["5", 5, 5.0], ["6", 6, 6.0],
+ ["7", 7, 7.0], ["8", 8, 8.0]],
+ "name": "bar",
+ "columns": ["val1", "val2", "val3"]},
+ ]
+ expected_last_body = [{'points': [['7', 7, 7.0], ['8', 8, 8.0]],
+ 'name': 'bar',
+ 'columns': ['val1', 'val2', 'val3']}]
+ with requests_mock.Mocker() as m:
+ m.register_uri(requests_mock.POST,
+ "http://localhost:8086/db/db/series")
+ cli = InfluxDBClient('localhost', 8086,
+ 'username', 'password', 'db')
+ cli.write_points(data=dummy_points, batch_size=3)
+ self.assertEqual(m.call_count, 5)
+ self.assertEqual(expected_last_body, m.request_history[4].json())
def test_write_points_udp(self):
+ """Test write points UDP for TestInfluxDBClient object."""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
- s.bind(('0.0.0.0', 4444))
+ port = random.randint(4000, 8000)
+ s.bind(('0.0.0.0', port))
cli = InfluxDBClient(
'localhost', 8086, 'root', 'root',
- 'test', use_udp=True, udp_port=4444
+ 'test', use_udp=True, udp_port=port
)
cli.write_points(self.dummy_points)
received_data, addr = s.recvfrom(1024)
- assert self.dummy_points == \
- json.loads(received_data.decode(), strict=True)
+ self.assertEqual(self.dummy_points,
+ json.loads(received_data.decode(), strict=True))
def test_write_bad_precision_udp(self):
+ """Test write UDP w/bad precision."""
cli = InfluxDBClient(
'localhost', 8086, 'root', 'root',
'test', use_udp=True, udp_port=4444
@@ -159,72 +283,83 @@ def test_write_bad_precision_udp(self):
Exception,
"InfluxDB only supports seconds precision for udp writes"
):
- cli.write_points_with_precision(
+ cli.write_points(
self.dummy_points,
time_precision='ms'
)
@raises(Exception)
def test_write_points_fails(self):
+ """Test failed write points for TestInfluxDBClient object."""
with _mocked_session('post', 500):
cli = InfluxDBClient('host', 8086, 'username', 'password', 'db')
cli.write_points([])
def test_write_points_with_precision(self):
+ """Test write points with precision."""
with _mocked_session('post', 200, self.dummy_points):
cli = InfluxDBClient('host', 8086, 'username', 'password', 'db')
- assert cli.write_points_with_precision(self.dummy_points) is True
+ self.assertTrue(cli.write_points(self.dummy_points))
def test_write_points_bad_precision(self):
+ """Test write points with bad precision."""
cli = InfluxDBClient()
with self.assertRaisesRegexp(
Exception,
"Invalid time precision is given. \(use 's', 'm', 'ms' or 'u'\)"
):
- cli.write_points_with_precision(
+ cli.write_points(
self.dummy_points,
time_precision='g'
)
@raises(Exception)
def test_write_points_with_precision_fails(self):
+ """Test write points where precision fails."""
with _mocked_session('post', 500):
cli = InfluxDBClient('host', 8086, 'username', 'password', 'db')
cli.write_points_with_precision([])
def test_delete_points(self):
+ """Test delete points for TestInfluxDBClient object."""
with _mocked_session('delete', 204) as mocked:
cli = InfluxDBClient('host', 8086, 'username', 'password', 'db')
- assert cli.delete_points("foo") is True
+ self.assertTrue(cli.delete_points("foo"))
- assert len(mocked.call_args_list) == 1
+ self.assertEqual(len(mocked.call_args_list), 1)
args, kwds = mocked.call_args_list[0]
- assert kwds['params'] == {'u': 'username', 'p': 'password'}
- assert kwds['url'] == 'http://host:8086/db/db/series/foo'
+ self.assertEqual(kwds['params'],
+ {'u': 'username', 'p': 'password'})
+ self.assertEqual(kwds['url'], 'http://host:8086/db/db/series/foo')
@raises(Exception)
def test_delete_points_with_wrong_name(self):
+ """Test delete points with wrong name."""
with _mocked_session('delete', 400):
cli = InfluxDBClient('host', 8086, 'username', 'password', 'db')
cli.delete_points("nonexist")
@raises(NotImplementedError)
def test_create_scheduled_delete(self):
+ """Test create scheduled deletes."""
cli = InfluxDBClient('host', 8086, 'username', 'password', 'db')
cli.create_scheduled_delete([])
@raises(NotImplementedError)
def test_get_list_scheduled_delete(self):
+ """Test get schedule list of deletes TestInfluxDBClient."""
cli = InfluxDBClient('host', 8086, 'username', 'password', 'db')
cli.get_list_scheduled_delete()
@raises(NotImplementedError)
def test_remove_scheduled_delete(self):
+ """Test remove scheduled delete TestInfluxDBClient."""
cli = InfluxDBClient('host', 8086, 'username', 'password', 'db')
cli.remove_scheduled_delete(1)
def test_query(self):
+ """Test query for TestInfluxDBClient object."""
data = [
{
"name": "foo",
@@ -238,11 +373,12 @@ def test_query(self):
with _mocked_session('get', 200, data):
cli = InfluxDBClient('host', 8086, 'username', 'password', 'db')
result = cli.query('select column_one from foo;')
- assert len(result[0]['points']) == 4
+ self.assertEqual(len(result[0]['points']), 4)
def test_query_chunked(self):
+ """Test chunked query for TestInfluxDBClient object."""
cli = InfluxDBClient(database='db')
- example_response = {
+ example_object = {
'points': [
[1415206250119, 40001, 667],
[1415206244555, 30001, 7],
@@ -257,26 +393,60 @@ def test_query_chunked(self):
'val'
]
}
+ example_response = \
+ json.dumps(example_object) + json.dumps(example_object)
with requests_mock.Mocker() as m:
m.register_uri(
requests_mock.GET,
"http://localhost:8086/db/db/series",
- text=json.dumps(example_response)
+ text=example_response
)
- self.assertDictEqual(
+ self.assertListEqual(
cli.query('select * from foo', chunked=True),
- example_response
+ [example_object, example_object]
+ )
+
+ def test_query_chunked_unicode(self):
+ """Test unicode chunked query for TestInfluxDBClient object."""
+ cli = InfluxDBClient(database='db')
+ example_object = {
+ 'points': [
+ [1415206212980, 10001, u('unicode-\xcf\x89')],
+ [1415197271586, 10001, u('more-unicode-\xcf\x90')]
+ ],
+ 'name': 'foo',
+ 'columns': [
+ 'time',
+ 'sequence_number',
+ 'val'
+ ]
+ }
+ example_response = \
+ json.dumps(example_object) + json.dumps(example_object)
+
+ with requests_mock.Mocker() as m:
+ m.register_uri(
+ requests_mock.GET,
+ "http://localhost:8086/db/db/series",
+ text=example_response
+ )
+
+ self.assertListEqual(
+ cli.query('select * from foo', chunked=True),
+ [example_object, example_object]
)
@raises(Exception)
def test_query_fail(self):
+ """Test failed query for TestInfluxDBClient."""
with _mocked_session('get', 401):
cli = InfluxDBClient('host', 8086, 'username', 'password', 'db')
cli.query('select column_one from foo;')
def test_query_bad_precision(self):
+ """Test query with bad precision for TestInfluxDBClient."""
cli = InfluxDBClient()
with self.assertRaisesRegexp(
Exception,
@@ -285,54 +455,74 @@ def test_query_bad_precision(self):
cli.query('select column_one from foo', time_precision='g')
def test_create_database(self):
+ """Test create database for TestInfluxDBClient."""
with _mocked_session('post', 201, {"name": "new_db"}):
cli = InfluxDBClient('host', 8086, 'username', 'password', 'db')
- assert cli.create_database('new_db') is True
+ self.assertTrue(cli.create_database('new_db'))
@raises(Exception)
def test_create_database_fails(self):
+ """Test failed create database for TestInfluxDBClient."""
with _mocked_session('post', 401):
cli = InfluxDBClient('host', 8086, 'username', 'password', 'db')
cli.create_database('new_db')
def test_delete_database(self):
+ """Test delete database for TestInfluxDBClient."""
with _mocked_session('delete', 204):
cli = InfluxDBClient('host', 8086, 'username', 'password', 'db')
- assert cli.delete_database('old_db') is True
+ self.assertTrue(cli.delete_database('old_db'))
@raises(Exception)
def test_delete_database_fails(self):
+ """Test failed delete database for TestInfluxDBClient."""
with _mocked_session('delete', 401):
cli = InfluxDBClient('host', 8086, 'username', 'password', 'db')
cli.delete_database('old_db')
- def test_get_database_list(self):
+ def test_get_list_database(self):
+ """Test get list of databases for TestInfluxDBClient."""
data = [
{"name": "a_db"}
]
with _mocked_session('get', 200, data):
cli = InfluxDBClient('host', 8086, 'username', 'password')
- assert len(cli.get_database_list()) == 1
- assert cli.get_database_list()[0]['name'] == 'a_db'
+ self.assertEqual(len(cli.get_list_database()), 1)
+ self.assertEqual(cli.get_list_database()[0]['name'], 'a_db')
@raises(Exception)
- def test_get_database_list_fails(self):
+ def test_get_list_database_fails(self):
+ """Test failed get list of databases for TestInfluxDBClient."""
with _mocked_session('get', 401):
cli = InfluxDBClient('host', 8086, 'username', 'password')
- cli.get_database_list()
+ cli.get_list_database()
+
+ @raises(FutureWarning)
+ def test_get_database_list_deprecated(self):
+ """Test deprecated get database list for TestInfluxDBClient."""
+ data = [
+ {"name": "a_db"}
+ ]
+ with _mocked_session('get', 200, data):
+ cli = InfluxDBClient('host', 8086, 'username', 'password')
+ self.assertEqual(len(cli.get_database_list()), 1)
+ self.assertEqual(cli.get_database_list()[0]['name'], 'a_db')
def test_delete_series(self):
+ """Test delete series for TestInfluxDBClient."""
with _mocked_session('delete', 204):
cli = InfluxDBClient('host', 8086, 'username', 'password', 'db')
cli.delete_series('old_series')
@raises(Exception)
def test_delete_series_fails(self):
+ """Test failed delete series for TestInfluxDBClient."""
with _mocked_session('delete', 401):
cli = InfluxDBClient('host', 8086, 'username', 'password', 'db')
cli.delete_series('old_series')
def test_get_series_list(self):
+ """Test get list of series for TestInfluxDBClient."""
cli = InfluxDBClient(database='db')
with requests_mock.Mocker() as m:
@@ -352,6 +542,7 @@ def test_get_series_list(self):
)
def test_get_continuous_queries(self):
+ """Test get continuous queries for TestInfluxDBClient."""
cli = InfluxDBClient(database='db')
with requests_mock.Mocker() as m:
@@ -384,9 +575,11 @@ def test_get_continuous_queries(self):
)
def test_get_list_cluster_admins(self):
+ """Test get list of cluster admins, not implemented."""
pass
def test_add_cluster_admin(self):
+ """Test add cluster admin for TestInfluxDBClient."""
with requests_mock.Mocker() as m:
m.register_uri(
requests_mock.POST,
@@ -408,6 +601,7 @@ def test_add_cluster_admin(self):
)
def test_update_cluster_admin_password(self):
+ """Test update cluster admin pass for TestInfluxDBClient."""
with requests_mock.Mocker() as m:
m.register_uri(
requests_mock.POST,
@@ -426,6 +620,7 @@ def test_update_cluster_admin_password(self):
)
def test_delete_cluster_admin(self):
+ """Test delete cluster admin for TestInfluxDBClient."""
with requests_mock.Mocker() as m:
m.register_uri(
requests_mock.DELETE,
@@ -439,12 +634,15 @@ def test_delete_cluster_admin(self):
self.assertIsNone(m.last_request.body)
def test_set_database_admin(self):
+ """Test set database admin for TestInfluxDBClient."""
pass
def test_unset_database_admin(self):
+ """Test unset database admin for TestInfluxDBClient."""
pass
def test_alter_database_admin(self):
+ """Test alter database admin for TestInfluxDBClient."""
with requests_mock.Mocker() as m:
m.register_uri(
requests_mock.POST,
@@ -466,29 +664,34 @@ def test_alter_database_admin(self):
@raises(NotImplementedError)
def test_get_list_database_admins(self):
+ """Test get list of database admins for TestInfluxDBClient."""
cli = InfluxDBClient('host', 8086, 'username', 'password', 'db')
cli.get_list_database_admins()
@raises(NotImplementedError)
def test_add_database_admin(self):
+ """Test add database admins for TestInfluxDBClient."""
cli = InfluxDBClient('host', 8086, 'username', 'password', 'db')
cli.add_database_admin('admin', 'admin_secret_password')
@raises(NotImplementedError)
def test_update_database_admin_password(self):
+ """Test update database admin pass for TestInfluxDBClient."""
cli = InfluxDBClient('host', 8086, 'username', 'password', 'db')
cli.update_database_admin_password('admin', 'admin_secret_password')
@raises(NotImplementedError)
def test_delete_database_admin(self):
+ """Test delete database admin for TestInfluxDBClient."""
cli = InfluxDBClient('host', 8086, 'username', 'password', 'db')
cli.delete_database_admin('admin')
def test_get_database_users(self):
+ """Test get database users for TestInfluxDBClient."""
cli = InfluxDBClient('localhost', 8086, 'username', 'password', 'db')
example_response = \
- '[{"name":"paul","isAdmin":false,"writeTo":".*","readFrom":".*"},' \
+ '[{"name":"paul","isAdmin":false,"writeTo":".*","readFrom":".*"},'\
'{"name":"bobby","isAdmin":false,"writeTo":".*","readFrom":".*"}]'
with requests_mock.Mocker() as m:
@@ -502,6 +705,7 @@ def test_get_database_users(self):
self.assertEqual(json.loads(example_response), users)
def test_add_database_user(self):
+ """Test add database user for TestInfluxDBClient."""
with requests_mock.Mocker() as m:
m.register_uri(
requests_mock.POST,
@@ -525,6 +729,7 @@ def test_add_database_user(self):
)
def test_add_database_user_bad_permissions(self):
+ """Test add database user with bad perms for TestInfluxDBClient."""
cli = InfluxDBClient()
with self.assertRaisesRegexp(
@@ -537,7 +742,8 @@ def test_add_database_user_bad_permissions(self):
permissions=('hello', 'hello', 'hello')
)
- def test_update_database_user_password(self):
+ def test_alter_database_user_password(self):
+ """Test alter database user pass for TestInfluxDBClient."""
with requests_mock.Mocker() as m:
m.register_uri(
requests_mock.POST,
@@ -545,17 +751,66 @@ def test_update_database_user_password(self):
)
cli = InfluxDBClient(database='db')
- cli.update_database_user_password(
+ cli.alter_database_user(
username='paul',
- new_password='laup'
+ password='n3wp4ss!'
)
self.assertDictEqual(
json.loads(m.last_request.body),
- {'password': 'laup'}
+ {
+ 'password': 'n3wp4ss!'
+ }
+ )
+
+ def test_alter_database_user_permissions(self):
+ """Test alter database user perms for TestInfluxDBClient."""
+ with requests_mock.Mocker() as m:
+ m.register_uri(
+ requests_mock.POST,
+ "http://localhost:8086/db/db/users/paul"
+ )
+
+ cli = InfluxDBClient(database='db')
+ cli.alter_database_user(
+ username='paul',
+ permissions=('^$', '.*')
+ )
+
+ self.assertDictEqual(
+ json.loads(m.last_request.body),
+ {
+ 'readFrom': '^$',
+ 'writeTo': '.*'
+ }
+ )
+
+ def test_alter_database_user_password_and_permissions(self):
+ """Test alter database user pass and perms for TestInfluxDBClient."""
+ with requests_mock.Mocker() as m:
+ m.register_uri(
+ requests_mock.POST,
+ "http://localhost:8086/db/db/users/paul"
+ )
+
+ cli = InfluxDBClient(database='db')
+ cli.alter_database_user(
+ username='paul',
+ password='n3wp4ss!',
+ permissions=('^$', '.*')
+ )
+
+ self.assertDictEqual(
+ json.loads(m.last_request.body),
+ {
+ 'password': 'n3wp4ss!',
+ 'readFrom': '^$',
+ 'writeTo': '.*'
+ }
)
def test_update_database_user_password_current_user(self):
+ """Test update database user pass for TestInfluxDBClient."""
cli = InfluxDBClient(
username='root',
password='hello',
@@ -575,6 +830,7 @@ def test_update_database_user_password_current_user(self):
self.assertEqual(cli._password, 'bye')
def test_delete_database_user(self):
+ """Test delete database user for TestInfluxDBClient."""
with requests_mock.Mocker() as m:
m.register_uri(
requests_mock.DELETE,
@@ -588,5 +844,61 @@ def test_delete_database_user(self):
@raises(NotImplementedError)
def test_update_permission(self):
+ """Test update permission for TestInfluxDBClient."""
cli = InfluxDBClient('host', 8086, 'username', 'password', 'db')
cli.update_permission('admin', [])
+
+ @mock.patch('requests.Session.request')
+ def test_request_retry(self, mock_request):
+ """Test that two connection errors will be handled."""
+ class CustomMock(object):
+ """Define CustomMock object."""
+
+ def __init__(self):
+ self.i = 0
+
+ def connection_error(self, *args, **kwargs):
+ """Test connection error in CustomMock."""
+ self.i += 1
+
+ if self.i < 3:
+ raise requests.exceptions.ConnectionError
+ else:
+ r = requests.Response()
+ r.status_code = 200
+ return r
+
+ mock_request.side_effect = CustomMock().connection_error
+
+ cli = InfluxDBClient(database='db')
+ cli.write_points(
+ self.dummy_points
+ )
+
+ @mock.patch('requests.Session.request')
+ def test_request_retry_raises(self, mock_request):
+ """Test that three connection errors will not be handled."""
+ class CustomMock(object):
+ """Define CustomMock object."""
+
+ def __init__(self):
+ """Initialize the object."""
+ self.i = 0
+
+ def connection_error(self, *args, **kwargs):
+ """Test the connection error for CustomMock."""
+ self.i += 1
+
+ if self.i < 4:
+ raise requests.exceptions.ConnectionError
+ else:
+ r = requests.Response()
+ r.status_code = 200
+ return r
+
+ mock_request.side_effect = CustomMock().connection_error
+
+ cli = InfluxDBClient(database='db')
+
+ with self.assertRaises(requests.exceptions.ConnectionError):
+ cli.write_points(self.dummy_points)
diff --git a/influxdb/tests/influxdb08/dataframe_client_test.py b/influxdb/tests/influxdb08/dataframe_client_test.py
new file mode 100644
index 00000000..0a766af0
--- /dev/null
+++ b/influxdb/tests/influxdb08/dataframe_client_test.py
@@ -0,0 +1,331 @@
+# -*- coding: utf-8 -*-
+"""Unit tests for misc module."""
+
+from datetime import timedelta
+
+import copy
+import json
+import unittest
+import warnings
+
+import requests_mock
+
+from nose.tools import raises
+
+from influxdb.tests import skip_if_pypy, using_pypy
+
+from .client_test import _mocked_session
+
+if not using_pypy:
+ import pandas as pd
+ from pandas.util.testing import assert_frame_equal
+ from influxdb.influxdb08 import DataFrameClient
+
+
+@skip_if_pypy
+class TestDataFrameClient(unittest.TestCase):
+ """Define the DataFramClient test object."""
+
+ def setUp(self):
+ """Set up an instance of TestDataFrameClient object."""
+ # By default, raise exceptions on warnings
+ warnings.simplefilter('error', FutureWarning)
+
+ def test_write_points_from_dataframe(self):
+ """Test write points from dataframe."""
+ now = pd.Timestamp('1970-01-01 00:00+00:00')
+ dataframe = pd.DataFrame(data=[["1", 1, 1.0], ["2", 2, 2.0]],
+ index=[now, now + timedelta(hours=1)],
+ columns=["column_one", "column_two",
+ "column_three"])
+ points = [
+ {
+ "points": [
+ ["1", 1, 1.0, 0],
+ ["2", 2, 2.0, 3600]
+ ],
+ "name": "foo",
+ "columns": ["column_one", "column_two", "column_three", "time"]
+ }
+ ]
+
+ with requests_mock.Mocker() as m:
+ m.register_uri(requests_mock.POST,
+ "http://localhost:8086/db/db/series")
+
+ cli = DataFrameClient(database='db')
+ cli.write_points({"foo": dataframe})
+
+ self.assertListEqual(json.loads(m.last_request.body), points)
+
+ def test_write_points_from_dataframe_with_float_nan(self):
+ """Test write points from dataframe with NaN float."""
+ now = pd.Timestamp('1970-01-01 00:00+00:00')
+ dataframe = pd.DataFrame(data=[[1, float("NaN"), 1.0], [2, 2, 2.0]],
+ index=[now, now + timedelta(hours=1)],
+ columns=["column_one", "column_two",
+ "column_three"])
+ points = [
+ {
+ "points": [
+ [1, None, 1.0, 0],
+ [2, 2, 2.0, 3600]
+ ],
+ "name": "foo",
+ "columns": ["column_one", "column_two", "column_three", "time"]
+ }
+ ]
+
+ with requests_mock.Mocker() as m:
+ m.register_uri(requests_mock.POST,
+ "http://localhost:8086/db/db/series")
+
+ cli = DataFrameClient(database='db')
+ cli.write_points({"foo": dataframe})
+
+ self.assertListEqual(json.loads(m.last_request.body), points)
+
+ def test_write_points_from_dataframe_in_batches(self):
+ """Test write points from dataframe in batches."""
+ now = pd.Timestamp('1970-01-01 00:00+00:00')
+ dataframe = pd.DataFrame(data=[["1", 1, 1.0], ["2", 2, 2.0]],
+ index=[now, now + timedelta(hours=1)],
+ columns=["column_one", "column_two",
+ "column_three"])
+ with requests_mock.Mocker() as m:
+ m.register_uri(requests_mock.POST,
+ "http://localhost:8086/db/db/series")
+
+ cli = DataFrameClient(database='db')
+ self.assertTrue(cli.write_points({"foo": dataframe}, batch_size=1))
+
+ def test_write_points_from_dataframe_with_numeric_column_names(self):
+ """Test write points from dataframe with numeric columns."""
+ now = pd.Timestamp('1970-01-01 00:00+00:00')
+ # df with numeric column names
+ dataframe = pd.DataFrame(data=[["1", 1, 1.0], ["2", 2, 2.0]],
+ index=[now, now + timedelta(hours=1)])
+ points = [
+ {
+ "points": [
+ ["1", 1, 1.0, 0],
+ ["2", 2, 2.0, 3600]
+ ],
+ "name": "foo",
+ "columns": ['0', '1', '2', "time"]
+ }
+ ]
+
+ with requests_mock.Mocker() as m:
+ m.register_uri(requests_mock.POST,
+ "http://localhost:8086/db/db/series")
+
+ cli = DataFrameClient(database='db')
+ cli.write_points({"foo": dataframe})
+
+ self.assertListEqual(json.loads(m.last_request.body), points)
+
+ def test_write_points_from_dataframe_with_period_index(self):
+ """Test write points from dataframe with period index."""
+ dataframe = pd.DataFrame(data=[["1", 1, 1.0], ["2", 2, 2.0]],
+ index=[pd.Period('1970-01-01'),
+ pd.Period('1970-01-02')],
+ columns=["column_one", "column_two",
+ "column_three"])
+ points = [
+ {
+ "points": [
+ ["1", 1, 1.0, 0],
+ ["2", 2, 2.0, 86400]
+ ],
+ "name": "foo",
+ "columns": ["column_one", "column_two", "column_three", "time"]
+ }
+ ]
+
+ with requests_mock.Mocker() as m:
+ m.register_uri(requests_mock.POST,
+ "http://localhost:8086/db/db/series")
+
+ cli = DataFrameClient(database='db')
+ cli.write_points({"foo": dataframe})
+
+ self.assertListEqual(json.loads(m.last_request.body), points)
+
+ def test_write_points_from_dataframe_with_time_precision(self):
+ """Test write points from dataframe with time precision."""
+ now = pd.Timestamp('1970-01-01 00:00+00:00')
+ dataframe = pd.DataFrame(data=[["1", 1, 1.0], ["2", 2, 2.0]],
+ index=[now, now + timedelta(hours=1)],
+ columns=["column_one", "column_two",
+ "column_three"])
+ points = [
+ {
+ "points": [
+ ["1", 1, 1.0, 0],
+ ["2", 2, 2.0, 3600]
+ ],
+ "name": "foo",
+ "columns": ["column_one", "column_two", "column_three", "time"]
+ }
+ ]
+
+ points_ms = copy.deepcopy(points)
+ points_ms[0]["points"][1][-1] = 3600 * 1000
+
+ points_us = copy.deepcopy(points)
+ points_us[0]["points"][1][-1] = 3600 * 1000000
+
+ with requests_mock.Mocker() as m:
+ m.register_uri(requests_mock.POST,
+ "http://localhost:8086/db/db/series")
+
+ cli = DataFrameClient(database='db')
+
+ cli.write_points({"foo": dataframe}, time_precision='s')
+ self.assertListEqual(json.loads(m.last_request.body), points)
+
+ cli.write_points({"foo": dataframe}, time_precision='m')
+ self.assertListEqual(json.loads(m.last_request.body), points_ms)
+
+ cli.write_points({"foo": dataframe}, time_precision='u')
+ self.assertListEqual(json.loads(m.last_request.body), points_us)
+
+ @raises(TypeError)
+ def test_write_points_from_dataframe_fails_without_time_index(self):
+ """Test write points from dataframe that fails without time index."""
+ dataframe = pd.DataFrame(data=[["1", 1, 1.0], ["2", 2, 2.0]],
+ columns=["column_one", "column_two",
+ "column_three"])
+
+ with requests_mock.Mocker() as m:
+ m.register_uri(requests_mock.POST,
+ "http://localhost:8086/db/db/series")
+
+ cli = DataFrameClient(database='db')
+ cli.write_points({"foo": dataframe})
+
+ @raises(TypeError)
+ def test_write_points_from_dataframe_fails_with_series(self):
+ """Test failed write points from dataframe with series."""
+ now = pd.Timestamp('1970-01-01 00:00+00:00')
+ dataframe = pd.Series(data=[1.0, 2.0],
+ index=[now, now + timedelta(hours=1)])
+
+ with requests_mock.Mocker() as m:
+ m.register_uri(requests_mock.POST,
+ "http://localhost:8086/db/db/series")
+
+ cli = DataFrameClient(database='db')
+ cli.write_points({"foo": dataframe})
+
+ def test_query_into_dataframe(self):
+ """Test query into a dataframe."""
+ data = [
+ {
+ "name": "foo",
+ "columns": ["time", "sequence_number", "column_one"],
+ "points": [
+ [3600, 16, 2], [3600, 15, 1],
+ [0, 14, 2], [0, 13, 1]
+ ]
+ }
+ ]
+ # dataframe sorted ascending by time first, then sequence_number
+ dataframe = pd.DataFrame(data=[[13, 1], [14, 2], [15, 1], [16, 2]],
+ index=pd.to_datetime([0, 0,
+ 3600, 3600],
+ unit='s', utc=True),
+ columns=['sequence_number', 'column_one'])
+ with _mocked_session('get', 200, data):
+ cli = DataFrameClient('host', 8086, 'username', 'password', 'db')
+ result = cli.query('select column_one from foo;')
+ assert_frame_equal(dataframe, result)
+
+ def test_query_multiple_time_series(self):
+ """Test query for multiple time series."""
+ data = [
+ {
+ "name": "series1",
+ "columns": ["time", "mean", "min", "max", "stddev"],
+ "points": [[0, 323048, 323048, 323048, 0]]
+ },
+ {
+ "name": "series2",
+ "columns": ["time", "mean", "min", "max", "stddev"],
+ "points": [[0, -2.8233, -2.8503, -2.7832, 0.0173]]
+ },
+ {
+ "name": "series3",
+ "columns": ["time", "mean", "min", "max", "stddev"],
+ "points": [[0, -0.01220, -0.01220, -0.01220, 0]]
+ }
+ ]
+ dataframes = {
+ 'series1': pd.DataFrame(data=[[323048, 323048, 323048, 0]],
+ index=pd.to_datetime([0], unit='s',
+ utc=True),
+ columns=['mean', 'min', 'max', 'stddev']),
+ 'series2': pd.DataFrame(data=[[-2.8233, -2.8503, -2.7832, 0.0173]],
+ index=pd.to_datetime([0], unit='s',
+ utc=True),
+ columns=['mean', 'min', 'max', 'stddev']),
+ 'series3': pd.DataFrame(data=[[-0.01220, -0.01220, -0.01220, 0]],
+ index=pd.to_datetime([0], unit='s',
+ utc=True),
+ columns=['mean', 'min', 'max', 'stddev'])
+ }
+ with _mocked_session('get', 200, data):
+ cli = DataFrameClient('host', 8086, 'username', 'password', 'db')
+ result = cli.query("""select mean(value), min(value), max(value),
+ stddev(value) from series1, series2, series3""")
+ self.assertEqual(dataframes.keys(), result.keys())
+ for key in dataframes.keys():
+ assert_frame_equal(dataframes[key], result[key])
+
+ def test_query_with_empty_result(self):
+ """Test query with empty results."""
+ with _mocked_session('get', 200, []):
+ cli = DataFrameClient('host', 8086, 'username', 'password', 'db')
+ result = cli.query('select column_one from foo;')
+ self.assertEqual(result, [])
+
+ def test_list_series(self):
+ """Test list of series for dataframe object."""
+ response = [
+ {
+ 'columns': ['time', 'name'],
+ 'name': 'list_series_result',
+ 'points': [[0, 'seriesA'], [0, 'seriesB']]
+ }
+ ]
+ with _mocked_session('get', 200, response):
+ cli = DataFrameClient('host', 8086, 'username', 'password', 'db')
+ series_list = cli.get_list_series()
+ self.assertEqual(series_list, ['seriesA', 'seriesB'])
+
+ def test_datetime_to_epoch(self):
+ """Test convert datetime to epoch."""
+ timestamp = pd.Timestamp('2013-01-01 00:00:00.000+00:00')
+ cli = DataFrameClient('host', 8086, 'username', 'password', 'db')
+
+ self.assertEqual(
+ cli._datetime_to_epoch(timestamp),
+ 1356998400.0
+ )
+ self.assertEqual(
+ cli._datetime_to_epoch(timestamp, time_precision='s'),
+ 1356998400.0
+ )
+ self.assertEqual(
+ cli._datetime_to_epoch(timestamp, time_precision='m'),
+ 1356998400000.0
+ )
+ self.assertEqual(
+ cli._datetime_to_epoch(timestamp, time_precision='ms'),
+ 1356998400000.0
+ )
+ self.assertEqual(
+ cli._datetime_to_epoch(timestamp, time_precision='u'),
+ 1356998400000000.0
+ )
diff --git a/influxdb/tests/influxdb08/helper_test.py b/influxdb/tests/influxdb08/helper_test.py
new file mode 100644
index 00000000..2e305f3f
--- /dev/null
+++ b/influxdb/tests/influxdb08/helper_test.py
@@ -0,0 +1,228 @@
+# -*- coding: utf-8 -*-
+"""Define set of helper functions for the dataframe."""
+
+import unittest
+import warnings
+
+import mock
+from influxdb.influxdb08 import SeriesHelper, InfluxDBClient
+from requests.exceptions import ConnectionError
+
+
+class TestSeriesHelper(unittest.TestCase):
+ """Define the SeriesHelper for test."""
+
+ @classmethod
+ def setUpClass(cls):
+ """Set up an instance of the TestSerisHelper object."""
+ super(TestSeriesHelper, cls).setUpClass()
+
+ TestSeriesHelper.client = InfluxDBClient(
+ 'host',
+ 8086,
+ 'username',
+ 'password',
+ 'database'
+ )
+
+ class MySeriesHelper(SeriesHelper):
+ """Define a subset SeriesHelper instance."""
+
+ class Meta:
+ """Define metadata for the TestSeriesHelper object."""
+
+ client = TestSeriesHelper.client
+ series_name = 'events.stats.{server_name}'
+ fields = ['time', 'server_name']
+ bulk_size = 5
+ autocommit = True
+
+ TestSeriesHelper.MySeriesHelper = MySeriesHelper
+
+ def test_auto_commit(self):
+ """Test that write_points called after the right number of events."""
+ class AutoCommitTest(SeriesHelper):
+ """Define an instance of SeriesHelper for AutoCommit test."""
+
+ class Meta:
+ """Define metadata AutoCommitTest object."""
+
+ series_name = 'events.stats.{server_name}'
+ fields = ['time', 'server_name']
+ bulk_size = 5
+ client = InfluxDBClient()
+ autocommit = True
+
+ fake_write_points = mock.MagicMock()
+ AutoCommitTest(server_name='us.east-1', time=159)
+ AutoCommitTest._client.write_points = fake_write_points
+ AutoCommitTest(server_name='us.east-1', time=158)
+ AutoCommitTest(server_name='us.east-1', time=157)
+ AutoCommitTest(server_name='us.east-1', time=156)
+ self.assertFalse(fake_write_points.called)
+ AutoCommitTest(server_name='us.east-1', time=3443)
+ self.assertTrue(fake_write_points.called)
+
+ def testSingleSeriesName(self):
+ """Test JSON conversion when there is only one series name."""
+ TestSeriesHelper.MySeriesHelper(server_name='us.east-1', time=159)
+ TestSeriesHelper.MySeriesHelper(server_name='us.east-1', time=158)
+ TestSeriesHelper.MySeriesHelper(server_name='us.east-1', time=157)
+ TestSeriesHelper.MySeriesHelper(server_name='us.east-1', time=156)
+ expectation = [{'points': [[159, 'us.east-1'],
+ [158, 'us.east-1'],
+ [157, 'us.east-1'],
+ [156, 'us.east-1']],
+ 'name': 'events.stats.us.east-1',
+ 'columns': ['time', 'server_name']}]
+
+ rcvd = TestSeriesHelper.MySeriesHelper._json_body_()
+ self.assertTrue(all([el in expectation for el in rcvd]) and
+ all([el in rcvd for el in expectation]),
+ 'Invalid JSON body of time series returned from '
+ '_json_body_ for one series name: {0}.'.format(rcvd))
+ TestSeriesHelper.MySeriesHelper._reset_()
+ self.assertEqual(
+ TestSeriesHelper.MySeriesHelper._json_body_(),
+ [],
+ 'Resetting helper did not empty datapoints.')
+
+ def testSeveralSeriesNames(self):
+ """Test JSON conversion when there is only one series name."""
+ TestSeriesHelper.MySeriesHelper(server_name='us.east-1', time=159)
+ TestSeriesHelper.MySeriesHelper(server_name='fr.paris-10', time=158)
+ TestSeriesHelper.MySeriesHelper(server_name='lu.lux', time=157)
+ TestSeriesHelper.MySeriesHelper(server_name='uk.london', time=156)
+ expectation = [{'points': [[157, 'lu.lux']],
+ 'name': 'events.stats.lu.lux',
+ 'columns': ['time', 'server_name']},
+ {'points': [[156, 'uk.london']],
+ 'name': 'events.stats.uk.london',
+ 'columns': ['time', 'server_name']},
+ {'points': [[158, 'fr.paris-10']],
+ 'name': 'events.stats.fr.paris-10',
+ 'columns': ['time', 'server_name']},
+ {'points': [[159, 'us.east-1']],
+ 'name': 'events.stats.us.east-1',
+ 'columns': ['time', 'server_name']}]
+
+ rcvd = TestSeriesHelper.MySeriesHelper._json_body_()
+ self.assertTrue(all([el in expectation for el in rcvd]) and
+ all([el in rcvd for el in expectation]),
+ 'Invalid JSON body of time series returned from '
+ '_json_body_ for several series names: {0}.'
+ .format(rcvd))
+ TestSeriesHelper.MySeriesHelper._reset_()
+ self.assertEqual(
+ TestSeriesHelper.MySeriesHelper._json_body_(),
+ [],
+ 'Resetting helper did not empty datapoints.')
+
+ def testInvalidHelpers(self):
+ """Test errors in invalid helpers."""
+ class MissingMeta(SeriesHelper):
+ """Define SeriesHelper object for MissingMeta test."""
+
+ pass
+
+ class MissingClient(SeriesHelper):
+ """Define SeriesHelper object for MissingClient test."""
+
+ class Meta:
+ """Define metadata for MissingClient object."""
+
+ series_name = 'events.stats.{server_name}'
+ fields = ['time', 'server_name']
+ autocommit = True
+
+ class MissingSeriesName(SeriesHelper):
+ """Define SeriesHelper object for MissingSeries test."""
+
+ class Meta:
+ """Define metadata for MissingSeriesName object."""
+
+ fields = ['time', 'server_name']
+
+ class MissingFields(SeriesHelper):
+ """Define SeriesHelper for MissingFields test."""
+
+ class Meta:
+ """Define metadata for MissingFields object."""
+
+ series_name = 'events.stats.{server_name}'
+
+ for cls in [MissingMeta, MissingClient, MissingFields,
+ MissingSeriesName]:
+ self.assertRaises(
+ AttributeError, cls, **{'time': 159,
+ 'server_name': 'us.east-1'})
+
+ def testWarnBulkSizeZero(self):
+ """Test warning for an invalid bulk size."""
+ class WarnBulkSizeZero(SeriesHelper):
+ """Define SeriesHelper for WarnBulkSizeZero test."""
+
+ class Meta:
+ """Define metadata for WarnBulkSizeZero object."""
+
+ client = TestSeriesHelper.client
+ series_name = 'events.stats.{server_name}'
+ fields = ['time', 'server_name']
+ bulk_size = 0
+ autocommit = True
+
+ with warnings.catch_warnings(record=True) as rec_warnings:
+ warnings.simplefilter("always")
+ # Server defined in the client is invalid, we're testing
+ # the warning only.
+ with self.assertRaises(ConnectionError):
+ WarnBulkSizeZero(time=159, server_name='us.east-1')
+
+ self.assertGreaterEqual(
+ len(rec_warnings), 1,
+ '{0} call should have generated one warning.'
+ 'Actual generated warnings: {1}'.format(
+ WarnBulkSizeZero, '\n'.join(map(str, rec_warnings))))
+
+ expected_msg = (
+ 'Definition of bulk_size in WarnBulkSizeZero forced to 1, '
+ 'was less than 1.')
+
+ self.assertIn(expected_msg, list(w.message.args[0]
+ for w in rec_warnings),
+ 'Warning message did not contain "forced to 1".')
+
+ def testWarnBulkSizeNoEffect(self):
+ """Test warning for a set bulk size but autocommit False."""
+ class WarnBulkSizeNoEffect(SeriesHelper):
+ """Define SeriesHelper for WarnBulkSizeNoEffect object."""
+
+ class Meta:
+ """Define metadata for WarnBulkSizeNoEffect object."""
+
+ series_name = 'events.stats.{server_name}'
+ fields = ['time', 'server_name']
+ bulk_size = 5
+ autocommit = False
+
+ with warnings.catch_warnings(record=True) as rec_warnings:
+ warnings.simplefilter("always")
+ WarnBulkSizeNoEffect(time=159, server_name='us.east-1')
+
+ self.assertGreaterEqual(
+ len(rec_warnings), 1,
+ '{0} call should have generated one warning.'
+ 'Actual generated warnings: {1}'.format(
+ WarnBulkSizeNoEffect, '\n'.join(map(str, rec_warnings))))
+
+ expected_msg = (
+ 'Definition of bulk_size in WarnBulkSizeNoEffect has no affect '
+ 'because autocommit is false.')
+
+ self.assertIn(expected_msg, list(w.message.args[0]
+ for w in rec_warnings),
+ 'Warning message did not contain the expected_msg.')
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/influxdb/tests/misc.py b/influxdb/tests/misc.py
new file mode 100644
index 00000000..324d13c4
--- /dev/null
+++ b/influxdb/tests/misc.py
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+"""Define the misc handler for InfluxDBClient test."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import socket
+
+
+def get_free_ports(num_ports, ip='127.0.0.1'):
+ """Determine free ports on provided interface.
+
+ Get `num_ports` free/available ports on the interface linked to the `ip`
+ :param int num_ports: The number of free ports to get
+ :param str ip: The ip on which the ports have to be taken
+ :return: a set of ports number
+ """
+ sock_ports = []
+ ports = set()
+ try:
+ for _ in range(num_ports):
+ sock = socket.socket()
+ cur = [sock, -1]
+ # append the socket directly,
+ # so that it'll be also closed (no leaked resource)
+ # in the finally here after.
+ sock_ports.append(cur)
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
+ sock.bind((ip, 0))
+ cur[1] = sock.getsockname()[1]
+ finally:
+ for sock, port in sock_ports:
+ sock.close()
+ ports.add(port)
+ assert num_ports == len(ports)
+ return ports
+
+
+def is_port_open(port, ip='127.0.0.1'):
+ """Check if given TCP port is open for connection."""
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ try:
+ result = sock.connect_ex((ip, port))
+ if not result:
+ sock.shutdown(socket.SHUT_RDWR)
+ return result == 0
+ finally:
+ sock.close()
diff --git a/influxdb/tests/resultset_test.py b/influxdb/tests/resultset_test.py
new file mode 100644
index 00000000..83faa4dd
--- /dev/null
+++ b/influxdb/tests/resultset_test.py
@@ -0,0 +1,202 @@
+# -*- coding: utf-8 -*-
+"""Define the resultset test package."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import unittest
+
+from influxdb.exceptions import InfluxDBClientError
+from influxdb.resultset import ResultSet
+
+
+class TestResultSet(unittest.TestCase):
+ """Define the ResultSet test object."""
+
+ def setUp(self):
+ """Set up an instance of TestResultSet."""
+ self.query_response = {
+ "results": [
+ {"series": [{"name": "cpu_load_short",
+ "columns": ["time", "value", "host", "region"],
+ "values": [
+ ["2015-01-29T21:51:28.968422294Z",
+ 0.64,
+ "server01",
+ "us-west"],
+ ["2015-01-29T21:51:28.968422294Z",
+ 0.65,
+ "server02",
+ "us-west"],
+ ]},
+ {"name": "other_series",
+ "columns": ["time", "value", "host", "region"],
+ "values": [
+ ["2015-01-29T21:51:28.968422294Z",
+ 0.66,
+ "server01",
+ "us-west"],
+ ]}]}
+ ]
+ }
+
+ self.rs = ResultSet(self.query_response['results'][0])
+
+ def test_filter_by_name(self):
+ """Test filtering by name in TestResultSet object."""
+ expected = [
+ {'value': 0.64,
+ 'time': '2015-01-29T21:51:28.968422294Z',
+ 'host': 'server01',
+ 'region': 'us-west'},
+ {'value': 0.65,
+ 'time': '2015-01-29T21:51:28.968422294Z',
+ 'host': 'server02',
+ 'region': 'us-west'},
+ ]
+
+ self.assertEqual(expected, list(self.rs['cpu_load_short']))
+ self.assertEqual(expected,
+ list(self.rs.get_points(
+ measurement='cpu_load_short')))
+
+ def test_filter_by_tags(self):
+ """Test filter by tags in TestResultSet object."""
+ expected = [
+ {'value': 0.64,
+ 'time': '2015-01-29T21:51:28.968422294Z',
+ 'host': 'server01',
+ 'region': 'us-west'},
+ {'value': 0.66,
+ 'time': '2015-01-29T21:51:28.968422294Z',
+ 'host': 'server01',
+ 'region': 'us-west'},
+ ]
+
+ self.assertEqual(
+ expected,
+ list(self.rs[{"host": "server01"}])
+ )
+
+ self.assertEqual(
+ expected,
+ list(self.rs.get_points(tags={'host': 'server01'}))
+ )
+
+ def test_filter_by_name_and_tags(self):
+ """Test filter by name and tags in TestResultSet object."""
+ self.assertEqual(
+ list(self.rs[('cpu_load_short', {"host": "server01"})]),
+ [{'value': 0.64,
+ 'time': '2015-01-29T21:51:28.968422294Z',
+ 'host': 'server01',
+ 'region': 'us-west'}]
+ )
+
+ self.assertEqual(
+ list(self.rs[('cpu_load_short', {"region": "us-west"})]),
+ [
+ {'value': 0.64,
+ 'time': '2015-01-29T21:51:28.968422294Z',
+ 'host': 'server01',
+ 'region': 'us-west'},
+ {'value': 0.65,
+ 'time': '2015-01-29T21:51:28.968422294Z',
+ 'host': 'server02',
+ 'region': 'us-west'},
+ ]
+ )
+
+ def test_keys(self):
+ """Test keys in TestResultSet object."""
+ self.assertEqual(
+ self.rs.keys(),
+ [
+ ('cpu_load_short', None),
+ ('other_series', None),
+ ]
+ )
+
+ def test_len(self):
+ """Test length in TestResultSet object."""
+ self.assertEqual(
+ len(self.rs),
+ 2
+ )
+
+ def test_items(self):
+ """Test items in TestResultSet object."""
+ items = list(self.rs.items())
+ items_lists = [(item[0], list(item[1])) for item in items]
+
+ self.assertEqual(
+ items_lists,
+ [
+ (
+ ('cpu_load_short', None),
+ [
+ {'time': '2015-01-29T21:51:28.968422294Z',
+ 'value': 0.64,
+ 'host': 'server01',
+ 'region': 'us-west'},
+ {'time': '2015-01-29T21:51:28.968422294Z',
+ 'value': 0.65,
+ 'host': 'server02',
+ 'region': 'us-west'}]),
+ (
+ ('other_series', None),
+ [
+ {'time': '2015-01-29T21:51:28.968422294Z',
+ 'value': 0.66,
+ 'host': 'server01',
+ 'region': 'us-west'}])]
+ )
+
+ def test_point_from_cols_vals(self):
+ """Test points from columns in TestResultSet object."""
+ cols = ['col1', 'col2']
+ vals = [1, '2']
+
+ point = ResultSet.point_from_cols_vals(cols, vals)
+ self.assertDictEqual(
+ point,
+ {'col1': 1, 'col2': '2'}
+ )
+
+ def test_system_query(self):
+ """Test system query capabilities in TestResultSet object."""
+ rs = ResultSet(
+ {'series': [
+ {'values': [['another', '48h0m0s', 3, False],
+ ['default', '0', 1, False],
+ ['somename', '24h0m0s', 4, True]],
+ 'columns': ['name', 'duration',
+ 'replicaN', 'default']}]}
+ )
+
+ self.assertEqual(
+ rs.keys(),
+ [('results', None)]
+ )
+
+ self.assertEqual(
+ list(rs['results']),
+ [
+ {'duration': '48h0m0s', 'default': False, 'replicaN': 3,
+ 'name': 'another'},
+ {'duration': '0', 'default': False, 'replicaN': 1,
+ 'name': 'default'},
+ {'duration': '24h0m0s', 'default': True, 'replicaN': 4,
+ 'name': 'somename'}
+ ]
+ )
+
+ def test_resultset_error(self):
+ """Test returning error in TestResultSet object."""
+ with self.assertRaises(InfluxDBClientError):
+ ResultSet({
+ "series": [],
+ "error": "Big error, many problems."
+ })
diff --git a/influxdb/tests/server_tests/__init__.py b/influxdb/tests/server_tests/__init__.py
new file mode 100644
index 00000000..ce149ab4
--- /dev/null
+++ b/influxdb/tests/server_tests/__init__.py
@@ -0,0 +1 @@
+"""Define the server tests package."""
diff --git a/influxdb/tests/server_tests/base.py b/influxdb/tests/server_tests/base.py
new file mode 100644
index 00000000..45a9ec80
--- /dev/null
+++ b/influxdb/tests/server_tests/base.py
@@ -0,0 +1,138 @@
+# -*- coding: utf-8 -*-
+"""Define the base module for server test."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import sys
+
+from influxdb.tests import using_pypy
+from influxdb.tests.server_tests.influxdb_instance import InfluxDbInstance
+
+from influxdb.client import InfluxDBClient
+
+if not using_pypy:
+ from influxdb.dataframe_client import DataFrameClient
+
+
+def _setup_influxdb_server(inst):
+ inst.influxd_inst = InfluxDbInstance(
+ inst.influxdb_template_conf,
+ udp_enabled=getattr(inst, 'influxdb_udp_enabled', False),
+ )
+
+ inst.cli = InfluxDBClient('localhost',
+ inst.influxd_inst.http_port,
+ 'root',
+ '',
+ database='db')
+ if not using_pypy:
+ inst.cliDF = DataFrameClient('localhost',
+ inst.influxd_inst.http_port,
+ 'root',
+ '',
+ database='db')
+
+
+def _setup_gzip_client(inst):
+ inst.cli = InfluxDBClient('localhost',
+ inst.influxd_inst.http_port,
+ 'root',
+ '',
+ database='db',
+ gzip=True)
+
+
+def _teardown_influxdb_server(inst):
+ remove_tree = sys.exc_info() == (None, None, None)
+ inst.influxd_inst.close(remove_tree=remove_tree)
+
+
+class SingleTestCaseWithServerMixin(object):
+ """Define the single testcase with server mixin.
+
+ A mixin for unittest.TestCase to start an influxdb server instance
+ in a temporary directory **for each test function/case**
+ """
+
+ # 'influxdb_template_conf' attribute must be set
+ # on the TestCase class or instance.
+
+ @classmethod
+ def setUp(cls):
+ """Set up an instance of the SingleTestCaseWithServerMixin."""
+ _setup_influxdb_server(cls)
+
+ @classmethod
+ def tearDown(cls):
+ """Tear down an instance of the SingleTestCaseWithServerMixin."""
+ _teardown_influxdb_server(cls)
+
+
+class ManyTestCasesWithServerMixin(object):
+ """Define the many testcase with server mixin.
+
+ Same as the SingleTestCaseWithServerMixin but this module creates
+ a single instance for the whole class. Also pre-creates a fresh
+ database: 'db'.
+ """
+
+ # 'influxdb_template_conf' attribute must be set on the class itself !
+
+ @classmethod
+ def setUpClass(cls):
+ """Set up an instance of the ManyTestCasesWithServerMixin."""
+ _setup_influxdb_server(cls)
+
+ def setUp(self):
+ """Set up an instance of the ManyTestCasesWithServerMixin."""
+ self.cli.create_database('db')
+
+ @classmethod
+ def tearDownClass(cls):
+ """Deconstruct an instance of ManyTestCasesWithServerMixin."""
+ _teardown_influxdb_server(cls)
+
+ def tearDown(self):
+ """Deconstruct an instance of ManyTestCasesWithServerMixin."""
+ self.cli.drop_database('db')
+
+
+class SingleTestCaseWithServerGzipMixin(object):
+ """Define the single testcase with server with gzip client mixin.
+
+ Same as the SingleTestCaseWithServerGzipMixin but the InfluxDBClient has
+ gzip=True
+ """
+
+ @classmethod
+ def setUp(cls):
+ """Set up an instance of the SingleTestCaseWithServerGzipMixin."""
+ _setup_influxdb_server(cls)
+ _setup_gzip_client(cls)
+
+ @classmethod
+ def tearDown(cls):
+ """Tear down an instance of the SingleTestCaseWithServerMixin."""
+ _teardown_influxdb_server(cls)
+
+
+class ManyTestCasesWithServerGzipMixin(object):
+ """Define the many testcase with server with gzip client mixin.
+
+ Same as the ManyTestCasesWithServerMixin but the InfluxDBClient has
+ gzip=True.
+ """
+
+ @classmethod
+ def setUpClass(cls):
+ """Set up an instance of the ManyTestCasesWithServerGzipMixin."""
+ _setup_influxdb_server(cls)
+ _setup_gzip_client(cls)
+
+ @classmethod
+ def tearDown(cls):
+ """Tear down an instance of the SingleTestCaseWithServerMixin."""
+ _teardown_influxdb_server(cls)
diff --git a/influxdb/tests/server_tests/client_test_with_server.py b/influxdb/tests/server_tests/client_test_with_server.py
new file mode 100644
index 00000000..a0263243
--- /dev/null
+++ b/influxdb/tests/server_tests/client_test_with_server.py
@@ -0,0 +1,966 @@
+# -*- coding: utf-8 -*-
+"""Unit tests for checking the InfluxDB server.
+
+The good/expected interaction between:
+
++ the python client.. (obviously)
++ and a *_real_* server instance running.
+
+This basically duplicates what's in client_test.py
+but without mocking around every call.
+"""
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+from functools import partial
+import os
+import time
+import unittest
+import warnings
+
+from influxdb import InfluxDBClient
+from influxdb.exceptions import InfluxDBClientError
+
+from influxdb.tests import skip_if_pypy, using_pypy, skip_server_tests
+from influxdb.tests.server_tests.base import ManyTestCasesWithServerMixin
+from influxdb.tests.server_tests.base import SingleTestCaseWithServerMixin
+from influxdb.tests.server_tests.base import ManyTestCasesWithServerGzipMixin
+from influxdb.tests.server_tests.base import SingleTestCaseWithServerGzipMixin
+
+# By default, raise exceptions on warnings
+warnings.simplefilter('error', FutureWarning)
+
+if not using_pypy:
+ import pandas as pd
+ from pandas.util.testing import assert_frame_equal
+
+
+THIS_DIR = os.path.abspath(os.path.dirname(__file__))
+
+
+def point(series_name, timestamp=None, tags=None, **fields):
+ """Define what a point looks like."""
+ res = {'measurement': series_name}
+
+ if timestamp:
+ res['time'] = timestamp
+
+ if tags:
+ res['tags'] = tags
+
+ res['fields'] = fields
+ return res
+
+
+dummy_point = [ # some dummy points
+ {
+ "measurement": "cpu_load_short",
+ "tags": {
+ "host": "server01",
+ "region": "us-west"
+ },
+ "time": "2009-11-10T23:00:00Z",
+ "fields": {
+ "value": 0.64
+ }
+ }
+]
+
+dummy_points = [ # some dummy points
+ dummy_point[0],
+ {
+ "measurement": "memory",
+ "tags": {
+ "host": "server01",
+ "region": "us-west"
+ },
+ "time": "2009-11-10T23:01:35Z",
+ "fields": {
+ "value": 33.0
+ }
+ }
+]
+
+if not using_pypy:
+ dummy_point_df = {
+ "measurement": "cpu_load_short",
+ "tags": {"host": "server01",
+ "region": "us-west"},
+ "dataframe": pd.DataFrame(
+ [[0.64]], columns=['value'],
+ index=pd.to_datetime(["2009-11-10T23:00:00Z"]))
+ }
+ dummy_points_df = [{
+ "measurement": "cpu_load_short",
+ "tags": {"host": "server01", "region": "us-west"},
+ "dataframe": pd.DataFrame(
+ [[0.64]], columns=['value'],
+ index=pd.to_datetime(["2009-11-10T23:00:00Z"])),
+ }, {
+ "measurement": "memory",
+ "tags": {"host": "server01", "region": "us-west"},
+ "dataframe": pd.DataFrame(
+ [[33]], columns=['value'],
+ index=pd.to_datetime(["2009-11-10T23:01:35Z"])
+ )
+ }]
+
+
+dummy_point_without_timestamp = [
+ {
+ "measurement": "cpu_load_short",
+ "tags": {
+ "host": "server02",
+ "region": "us-west"
+ },
+ "fields": {
+ "value": 0.64
+ }
+ }
+]
+
+
+@skip_server_tests
+class SimpleTests(SingleTestCaseWithServerMixin, unittest.TestCase):
+ """Define the class of simple tests."""
+
+ influxdb_template_conf = os.path.join(THIS_DIR, 'influxdb.conf.template')
+
+ def test_fresh_server_no_db(self):
+ """Test a fresh server without database."""
+ self.assertEqual([], self.cli.get_list_database())
+
+ def test_create_database(self):
+ """Test create a database."""
+ self.assertIsNone(self.cli.create_database('new_db_1'))
+ self.assertIsNone(self.cli.create_database('new_db_2'))
+ self.assertEqual(
+ self.cli.get_list_database(),
+ [{'name': 'new_db_1'}, {'name': 'new_db_2'}]
+ )
+
+ def test_drop_database(self):
+ """Test drop a database."""
+ self.test_create_database()
+ self.assertIsNone(self.cli.drop_database('new_db_1'))
+ self.assertEqual([{'name': 'new_db_2'}], self.cli.get_list_database())
+
+ def test_query_fail(self):
+ """Test that a query failed."""
+ with self.assertRaises(InfluxDBClientError) as ctx:
+ self.cli.query('select column_one from foo')
+ self.assertIn('database not found: db',
+ ctx.exception.content)
+
+ def test_query_fail_ignore_errors(self):
+ """Test query failed but ignore errors."""
+ result = self.cli.query('select column_one from foo',
+ raise_errors=False)
+ self.assertEqual(result.error, 'database not found: db')
+
+ def test_create_user(self):
+ """Test create user."""
+ self.cli.create_user('test_user', 'secret_password')
+ rsp = list(self.cli.query("SHOW USERS")['results'])
+ self.assertIn({'user': 'test_user', 'admin': False},
+ rsp)
+
+ def test_create_user_admin(self):
+ """Test create admin user."""
+ self.cli.create_user('test_user', 'secret_password', True)
+ rsp = list(self.cli.query("SHOW USERS")['results'])
+ self.assertIn({'user': 'test_user', 'admin': True},
+ rsp)
+
+ def test_create_user_blank_password(self):
+ """Test create user with a blank pass."""
+ self.cli.create_user('test_user', '')
+ rsp = list(self.cli.query("SHOW USERS")['results'])
+ self.assertIn({'user': 'test_user', 'admin': False},
+ rsp)
+
+ def test_get_list_users_empty(self):
+ """Test get list of users, but empty."""
+ rsp = self.cli.get_list_users()
+ self.assertEqual([], rsp)
+
+ def test_get_list_users(self):
+ """Test get list of users."""
+ self.cli.query("CREATE USER test WITH PASSWORD 'test'")
+ rsp = self.cli.get_list_users()
+
+ self.assertEqual(
+ [{'user': 'test', 'admin': False}],
+ rsp
+ )
+
+ def test_create_user_blank_username(self):
+ """Test create blank username."""
+ with self.assertRaises(InfluxDBClientError) as ctx:
+ self.cli.create_user('', 'secret_password')
+ self.assertIn('username required',
+ ctx.exception.content)
+ rsp = list(self.cli.query("SHOW USERS")['results'])
+ self.assertEqual(rsp, [])
+
+ def test_drop_user(self):
+ """Test drop a user."""
+ self.cli.query("CREATE USER test WITH PASSWORD 'test'")
+ self.cli.drop_user('test')
+ users = list(self.cli.query("SHOW USERS")['results'])
+ self.assertEqual(users, [])
+
+ def test_drop_user_nonexisting(self):
+ """Test dropping a nonexistent user."""
+ with self.assertRaises(InfluxDBClientError) as ctx:
+ self.cli.drop_user('test')
+ self.assertIn('user not found',
+ ctx.exception.content)
+
+ @unittest.skip("Broken as of 0.9.0")
+ def test_revoke_admin_privileges(self):
+ """Test revoking admin privs, deprecated as of v0.9.0."""
+ self.cli.create_user('test', 'test', admin=True)
+ self.assertEqual([{'user': 'test', 'admin': True}],
+ self.cli.get_list_users())
+ self.cli.revoke_admin_privileges('test')
+ self.assertEqual([{'user': 'test', 'admin': False}],
+ self.cli.get_list_users())
+
+ def test_grant_privilege(self):
+ """Test grant privs to user."""
+ self.cli.create_user('test', 'test')
+ self.cli.create_database('testdb')
+ self.cli.grant_privilege('all', 'testdb', 'test')
+ # TODO: when supported by InfluxDB, check if privileges are granted
+
+ def test_grant_privilege_invalid(self):
+ """Test grant invalid privs to user."""
+ self.cli.create_user('test', 'test')
+ self.cli.create_database('testdb')
+ with self.assertRaises(InfluxDBClientError) as ctx:
+ self.cli.grant_privilege('', 'testdb', 'test')
+ self.assertEqual(400, ctx.exception.code)
+ self.assertIn('{"error":"error parsing query: ',
+ ctx.exception.content)
+
+ def test_revoke_privilege(self):
+ """Test revoke privs from user."""
+ self.cli.create_user('test', 'test')
+ self.cli.create_database('testdb')
+ self.cli.revoke_privilege('all', 'testdb', 'test')
+ # TODO: when supported by InfluxDB, check if privileges are revoked
+
+ def test_revoke_privilege_invalid(self):
+ """Test revoke invalid privs from user."""
+ self.cli.create_user('test', 'test')
+ self.cli.create_database('testdb')
+ with self.assertRaises(InfluxDBClientError) as ctx:
+ self.cli.revoke_privilege('', 'testdb', 'test')
+ self.assertEqual(400, ctx.exception.code)
+ self.assertIn('{"error":"error parsing query: ',
+ ctx.exception.content)
+
+ def test_invalid_port_fails(self):
+ """Test invalid port access fails."""
+ with self.assertRaises(ValueError):
+ InfluxDBClient('host', '80/redir', 'username', 'password')
+
+
+@skip_server_tests
+class CommonTests(ManyTestCasesWithServerMixin, unittest.TestCase):
+ """Define a class to handle common tests for the server."""
+
+ influxdb_template_conf = os.path.join(THIS_DIR, 'influxdb.conf.template')
+
+ def test_write(self):
+ """Test write to the server."""
+ self.assertIs(True, self.cli.write(
+ {'points': dummy_point},
+ params={'db': 'db'},
+ ))
+
+ def test_write_check_read(self):
+ """Test write and check read of data to server."""
+ self.test_write()
+ time.sleep(1)
+ rsp = self.cli.query('SELECT * FROM cpu_load_short', database='db')
+ self.assertListEqual([{'value': 0.64, 'time': '2009-11-10T23:00:00Z',
+ "host": "server01", "region": "us-west"}],
+ list(rsp.get_points()))
+
+ def test_write_points(self):
+ """Test writing points to the server."""
+ self.assertIs(True, self.cli.write_points(dummy_point))
+
+ @skip_if_pypy
+ def test_write_points_DF(self):
+ """Test writing points with dataframe."""
+ self.assertIs(
+ True,
+ self.cliDF.write_points(
+ dummy_point_df['dataframe'],
+ dummy_point_df['measurement'],
+ dummy_point_df['tags']
+ )
+ )
+
+ def test_write_points_check_read(self):
+ """Test writing points and check read back."""
+ self.test_write_points()
+ time.sleep(1) # same as test_write_check_read()
+ rsp = self.cli.query('SELECT * FROM cpu_load_short')
+
+ self.assertEqual(
+ list(rsp),
+ [[
+ {'value': 0.64,
+ 'time': '2009-11-10T23:00:00Z',
+ "host": "server01",
+ "region": "us-west"}
+ ]]
+ )
+
+ rsp2 = list(rsp.get_points())
+ self.assertEqual(len(rsp2), 1)
+ pt = rsp2[0]
+
+ self.assertEqual(
+ pt,
+ {'time': '2009-11-10T23:00:00Z',
+ 'value': 0.64,
+ "host": "server01",
+ "region": "us-west"}
+ )
+
+ @unittest.skip("Broken as of 0.9.0")
+ def test_write_points_check_read_DF(self):
+ """Test write points and check back with dataframe."""
+ self.test_write_points_DF()
+ time.sleep(1) # same as test_write_check_read()
+
+ rsp = self.cliDF.query('SELECT * FROM cpu_load_short')
+ assert_frame_equal(
+ rsp['cpu_load_short'],
+ dummy_point_df['dataframe']
+ )
+
+ # Query with Tags
+ rsp = self.cliDF.query(
+ "SELECT * FROM cpu_load_short GROUP BY *")
+ assert_frame_equal(
+ rsp[('cpu_load_short',
+ (('host', 'server01'), ('region', 'us-west')))],
+ dummy_point_df['dataframe']
+ )
+
+ def test_write_multiple_points_different_series(self):
+ """Test write multiple points to different series."""
+ self.assertIs(True, self.cli.write_points(dummy_points))
+ time.sleep(1)
+ rsp = self.cli.query('SELECT * FROM cpu_load_short')
+ lrsp = list(rsp)
+
+ self.assertEqual(
+ [[
+ {'value': 0.64,
+ 'time': '2009-11-10T23:00:00Z',
+ "host": "server01",
+ "region": "us-west"}
+ ]],
+ lrsp
+ )
+
+ rsp = list(self.cli.query('SELECT * FROM memory'))
+
+ self.assertEqual(
+ rsp,
+ [[
+ {'value': 33,
+ 'time': '2009-11-10T23:01:35Z',
+ "host": "server01",
+ "region": "us-west"}
+ ]]
+ )
+
+ def test_select_into_as_post(self):
+ """Test SELECT INTO is POSTed."""
+ self.assertIs(True, self.cli.write_points(dummy_points))
+ time.sleep(1)
+ rsp = self.cli.query('SELECT * INTO "newmeas" FROM "memory"')
+ rsp = self.cli.query('SELECT * FROM "newmeas"')
+ lrsp = list(rsp)
+
+ self.assertEqual(
+ lrsp,
+ [[
+ {'value': 33,
+ 'time': '2009-11-10T23:01:35Z',
+ "host": "server01",
+ "region": "us-west"}
+ ]]
+ )
+
+ @unittest.skip("Broken as of 0.9.0")
+ def test_write_multiple_points_different_series_DF(self):
+ """Test write multiple points using dataframe to different series."""
+ for i in range(2):
+ self.assertIs(
+ True, self.cliDF.write_points(
+ dummy_points_df[i]['dataframe'],
+ dummy_points_df[i]['measurement'],
+ dummy_points_df[i]['tags']))
+ time.sleep(1)
+ rsp = self.cliDF.query('SELECT * FROM cpu_load_short')
+
+ assert_frame_equal(
+ rsp['cpu_load_short'],
+ dummy_points_df[0]['dataframe']
+ )
+
+ rsp = self.cliDF.query('SELECT * FROM memory')
+ assert_frame_equal(
+ rsp['memory'],
+ dummy_points_df[1]['dataframe']
+ )
+
+ def test_write_points_batch(self):
+ """Test writing points in a batch."""
+ dummy_points = [
+ {"measurement": "cpu_usage", "tags": {"unit": "percent"},
+ "time": "2009-11-10T23:00:00Z", "fields": {"value": 12.34}},
+ {"measurement": "network", "tags": {"direction": "in"},
+ "time": "2009-11-10T23:00:00Z", "fields": {"value": 123.00}},
+ {"measurement": "network", "tags": {"direction": "out"},
+ "time": "2009-11-10T23:00:00Z", "fields": {"value": 12.00}}
+ ]
+ self.cli.write_points(points=dummy_points,
+ tags={"host": "server01",
+ "region": "us-west"},
+ batch_size=2)
+ time.sleep(5)
+ net_in = self.cli.query("SELECT value FROM network "
+ "WHERE direction=$dir",
+ bind_params={'dir': 'in'}
+ ).raw
+ net_out = self.cli.query("SELECT value FROM network "
+ "WHERE direction='out'").raw
+ cpu = self.cli.query("SELECT value FROM cpu_usage").raw
+ self.assertIn(123, net_in['series'][0]['values'][0])
+ self.assertIn(12, net_out['series'][0]['values'][0])
+ self.assertIn(12.34, cpu['series'][0]['values'][0])
+
+ def test_write_points_batch_generator(self):
+ """Test writing points in a batch from a generator."""
+ dummy_points = [
+ {"measurement": "cpu_usage", "tags": {"unit": "percent"},
+ "time": "2009-11-10T23:00:00Z", "fields": {"value": 12.34}},
+ {"measurement": "network", "tags": {"direction": "in"},
+ "time": "2009-11-10T23:00:00Z", "fields": {"value": 123.00}},
+ {"measurement": "network", "tags": {"direction": "out"},
+ "time": "2009-11-10T23:00:00Z", "fields": {"value": 12.00}}
+ ]
+ dummy_points_generator = (point for point in dummy_points)
+ self.cli.write_points(points=dummy_points_generator,
+ tags={"host": "server01",
+ "region": "us-west"},
+ batch_size=2)
+ time.sleep(5)
+ net_in = self.cli.query("SELECT value FROM network "
+ "WHERE direction=$dir",
+ bind_params={'dir': 'in'}
+ ).raw
+ net_out = self.cli.query("SELECT value FROM network "
+ "WHERE direction='out'").raw
+ cpu = self.cli.query("SELECT value FROM cpu_usage").raw
+ self.assertIn(123, net_in['series'][0]['values'][0])
+ self.assertIn(12, net_out['series'][0]['values'][0])
+ self.assertIn(12.34, cpu['series'][0]['values'][0])
+
+ def test_query(self):
+ """Test querying data back from server."""
+ self.assertIs(True, self.cli.write_points(dummy_point))
+
+ @unittest.skip('Not implemented for 0.9')
+ def test_query_chunked(self):
+ """Test query for chunked response from server."""
+ cli = InfluxDBClient(database='db')
+ example_object = {
+ 'points': [
+ [1415206250119, 40001, 667],
+ [1415206244555, 30001, 7],
+ [1415206228241, 20001, 788],
+ [1415206212980, 10001, 555],
+ [1415197271586, 10001, 23]
+ ],
+ 'name': 'foo',
+ 'columns': [
+ 'time',
+ 'sequence_number',
+ 'val'
+ ]
+ }
+ del cli
+ del example_object
+ # TODO ?
+
+ def test_delete_series_invalid(self):
+ """Test delete invalid series."""
+ with self.assertRaises(InfluxDBClientError):
+ self.cli.delete_series()
+
+ def test_default_retention_policy(self):
+ """Test add default retention policy."""
+ rsp = self.cli.get_list_retention_policies()
+ self.assertEqual(
+ [
+ {'name': 'autogen',
+ 'duration': '0s',
+ 'replicaN': 1,
+ 'shardGroupDuration': u'168h0m0s',
+ 'default': True}
+ ],
+ rsp
+ )
+
+ def test_create_retention_policy_default(self):
+ """Test create a new default retention policy."""
+ self.cli.create_retention_policy('somename', '1d', 1, default=True)
+ self.cli.create_retention_policy('another', '2d', 1, default=False)
+ rsp = self.cli.get_list_retention_policies()
+
+ self.assertEqual(
+ [
+ {'duration': '0s',
+ 'default': False,
+ 'replicaN': 1,
+ 'shardGroupDuration': u'168h0m0s',
+ 'name': 'autogen'},
+ {'duration': '24h0m0s',
+ 'default': True,
+ 'replicaN': 1,
+ 'shardGroupDuration': u'1h0m0s',
+ 'name': 'somename'},
+ {'duration': '48h0m0s',
+ 'default': False,
+ 'replicaN': 1,
+ 'shardGroupDuration': u'24h0m0s',
+ 'name': 'another'}
+ ],
+ rsp
+ )
+
+ def test_create_retention_policy(self):
+ """Test creating a new retention policy, not default."""
+ self.cli.create_retention_policy('somename', '1d', 1)
+ # NB: creating a retention policy without specifying
+ # shard group duration
+ # leads to a shard group duration of 1 hour
+ rsp = self.cli.get_list_retention_policies()
+ self.assertEqual(
+ [
+ {'duration': '0s',
+ 'default': True,
+ 'replicaN': 1,
+ 'shardGroupDuration': u'168h0m0s',
+ 'name': 'autogen'},
+ {'duration': '24h0m0s',
+ 'default': False,
+ 'replicaN': 1,
+ 'shardGroupDuration': u'1h0m0s',
+ 'name': 'somename'}
+ ],
+ rsp
+ )
+
+ self.cli.drop_retention_policy('somename', 'db')
+ # recreate the RP
+ self.cli.create_retention_policy('somename', '1w', 1,
+ shard_duration='1h')
+
+ rsp = self.cli.get_list_retention_policies()
+ self.assertEqual(
+ [
+ {'duration': '0s',
+ 'default': True,
+ 'replicaN': 1,
+ 'shardGroupDuration': u'168h0m0s',
+ 'name': 'autogen'},
+ {'duration': '168h0m0s',
+ 'default': False,
+ 'replicaN': 1,
+ 'shardGroupDuration': u'1h0m0s',
+ 'name': 'somename'}
+ ],
+ rsp
+ )
+
+ self.cli.drop_retention_policy('somename', 'db')
+ # recreate the RP
+ self.cli.create_retention_policy('somename', '1w', 1)
+
+ rsp = self.cli.get_list_retention_policies()
+ self.assertEqual(
+ [
+ {'duration': '0s',
+ 'default': True,
+ 'replicaN': 1,
+ 'shardGroupDuration': u'168h0m0s',
+ 'name': 'autogen'},
+ {'duration': '168h0m0s',
+ 'default': False,
+ 'replicaN': 1,
+ 'shardGroupDuration': u'24h0m0s',
+ 'name': 'somename'}
+ ],
+ rsp
+ )
+
+ def test_alter_retention_policy(self):
+ """Test alter a retention policy, not default."""
+ self.cli.create_retention_policy('somename', '1d', 1)
+
+ # Test alter duration
+ self.cli.alter_retention_policy('somename', 'db',
+ duration='4d',
+ shard_duration='2h')
+ # NB: altering retention policy doesn't change shard group duration
+ rsp = self.cli.get_list_retention_policies()
+ self.assertEqual(
+ [
+ {'duration': '0s',
+ 'default': True,
+ 'replicaN': 1,
+ 'shardGroupDuration': u'168h0m0s',
+ 'name': 'autogen'},
+ {'duration': '96h0m0s',
+ 'default': False,
+ 'replicaN': 1,
+ 'shardGroupDuration': u'2h0m0s',
+ 'name': 'somename'}
+ ],
+ rsp
+ )
+
+ # Test alter replication
+ self.cli.alter_retention_policy('somename', 'db',
+ replication=4)
+
+ # NB: altering retention policy doesn't change shard group duration
+ rsp = self.cli.get_list_retention_policies()
+ self.assertEqual(
+ [
+ {'duration': '0s',
+ 'default': True,
+ 'replicaN': 1,
+ 'shardGroupDuration': u'168h0m0s',
+ 'name': 'autogen'},
+ {'duration': '96h0m0s',
+ 'default': False,
+ 'replicaN': 4,
+ 'shardGroupDuration': u'2h0m0s',
+ 'name': 'somename'}
+ ],
+ rsp
+ )
+
+ # Test alter default
+ self.cli.alter_retention_policy('somename', 'db',
+ default=True)
+ # NB: altering retention policy doesn't change shard group duration
+ rsp = self.cli.get_list_retention_policies()
+ self.assertEqual(
+ [
+ {'duration': '0s',
+ 'default': False,
+ 'replicaN': 1,
+ 'shardGroupDuration': u'168h0m0s',
+ 'name': 'autogen'},
+ {'duration': '96h0m0s',
+ 'default': True,
+ 'replicaN': 4,
+ 'shardGroupDuration': u'2h0m0s',
+ 'name': 'somename'}
+ ],
+ rsp
+ )
+
+ # Test alter shard_duration
+ self.cli.alter_retention_policy('somename', 'db',
+ shard_duration='4h')
+
+ rsp = self.cli.get_list_retention_policies()
+ self.assertEqual(
+ [
+ {'duration': '0s',
+ 'default': False,
+ 'replicaN': 1,
+ 'shardGroupDuration': u'168h0m0s',
+ 'name': 'autogen'},
+ {'duration': '96h0m0s',
+ 'default': True,
+ 'replicaN': 4,
+ 'shardGroupDuration': u'4h0m0s',
+ 'name': 'somename'}
+ ],
+ rsp
+ )
+
+ def test_alter_retention_policy_invalid(self):
+ """Test invalid alter retention policy."""
+ self.cli.create_retention_policy('somename', '1d', 1)
+ with self.assertRaises(InfluxDBClientError) as ctx:
+ self.cli.alter_retention_policy('somename', 'db')
+ self.assertEqual(400, ctx.exception.code)
+ self.assertIn('{"error":"error parsing query: ',
+ ctx.exception.content)
+ rsp = self.cli.get_list_retention_policies()
+ self.assertEqual(
+ [
+ {'duration': '0s',
+ 'default': True,
+ 'replicaN': 1,
+ 'shardGroupDuration': u'168h0m0s',
+ 'name': 'autogen'},
+ {'duration': '24h0m0s',
+ 'default': False,
+ 'replicaN': 1,
+ 'shardGroupDuration': u'1h0m0s',
+ 'name': 'somename'}
+ ],
+ rsp
+ )
+
+ def test_drop_retention_policy(self):
+ """Test drop a retention policy."""
+ self.cli.create_retention_policy('somename', '1d', 1)
+
+ # Test drop retention
+ self.cli.drop_retention_policy('somename', 'db')
+ rsp = self.cli.get_list_retention_policies()
+ self.assertEqual(
+ [
+ {'duration': '0s',
+ 'default': True,
+ 'replicaN': 1,
+ 'shardGroupDuration': u'168h0m0s',
+ 'name': 'autogen'}
+ ],
+ rsp
+ )
+
+ def test_create_continuous_query(self):
+ """Test continuous query creation."""
+ self.cli.create_retention_policy('some_rp', '1d', 1)
+ query = 'select count("value") into "some_rp"."events" from ' \
+ '"events" group by time(10m)'
+ self.cli.create_continuous_query('test_cq', query, 'db')
+ cqs = self.cli.get_list_continuous_queries()
+ expected_cqs = [
+ {
+ 'db': [
+ {
+ 'name': 'test_cq',
+ 'query': 'CREATE CONTINUOUS QUERY test_cq ON db '
+ 'BEGIN SELECT count(value) INTO '
+ 'db.some_rp.events FROM db.autogen.events '
+ 'GROUP BY time(10m) END'
+ }
+ ]
+ }
+ ]
+ self.assertEqual(cqs, expected_cqs)
+
+ def test_drop_continuous_query(self):
+ """Test continuous query drop."""
+ self.test_create_continuous_query()
+ self.cli.drop_continuous_query('test_cq', 'db')
+ cqs = self.cli.get_list_continuous_queries()
+ expected_cqs = [{'db': []}]
+ self.assertEqual(cqs, expected_cqs)
+
+ def test_issue_143(self):
+ """Test for PR#143 from repo."""
+ pt = partial(point, 'a_series_name', timestamp='2015-03-30T16:16:37Z')
+ pts = [
+ pt(value=15),
+ pt(tags={'tag_1': 'value1'}, value=5),
+ pt(tags={'tag_1': 'value2'}, value=10),
+ ]
+ self.cli.write_points(pts)
+ time.sleep(1)
+ rsp = list(self.cli.query('SELECT * FROM a_series_name \
+GROUP BY tag_1').get_points())
+
+ self.assertEqual(
+ [
+ {'time': '2015-03-30T16:16:37Z', 'value': 15},
+ {'time': '2015-03-30T16:16:37Z', 'value': 5},
+ {'time': '2015-03-30T16:16:37Z', 'value': 10}
+ ],
+ rsp
+ )
+
+ # a slightly more complex one with 2 tags values:
+ pt = partial(point, 'series2', timestamp='2015-03-30T16:16:37Z')
+ pts = [
+ pt(tags={'tag1': 'value1', 'tag2': 'v1'}, value=0),
+ pt(tags={'tag1': 'value1', 'tag2': 'v2'}, value=5),
+ pt(tags={'tag1': 'value2', 'tag2': 'v1'}, value=10),
+ ]
+ self.cli.write_points(pts)
+ time.sleep(1)
+ rsp = self.cli.query('SELECT * FROM series2 GROUP BY tag1,tag2')
+
+ self.assertEqual(
+ [
+ {'value': 0, 'time': '2015-03-30T16:16:37Z'},
+ {'value': 5, 'time': '2015-03-30T16:16:37Z'},
+ {'value': 10, 'time': '2015-03-30T16:16:37Z'}
+ ],
+ list(rsp['series2'])
+ )
+
+ all_tag2_equal_v1 = list(rsp.get_points(tags={'tag2': 'v1'}))
+
+ self.assertEqual(
+ [{'value': 0, 'time': '2015-03-30T16:16:37Z'},
+ {'value': 10, 'time': '2015-03-30T16:16:37Z'}],
+ all_tag2_equal_v1,
+ )
+
+ def test_query_multiple_series(self):
+ """Test query for multiple series."""
+ pt = partial(point, 'series1', timestamp='2015-03-30T16:16:37Z')
+ pts = [
+ pt(tags={'tag1': 'value1', 'tag2': 'v1'}, value=0),
+ ]
+ self.cli.write_points(pts)
+
+ pt = partial(point, 'series2', timestamp='1970-03-30T16:16:37Z')
+ pts = [
+ pt(tags={'tag1': 'value1', 'tag2': 'v1'},
+ value=0, data1=33, data2="bla"),
+ ]
+ self.cli.write_points(pts)
+
+ def test_get_list_series(self):
+ """Test get a list of series from the database."""
+ dummy_points = [
+ {
+ "measurement": "cpu_load_short",
+ "tags": {
+ "host": "server01",
+ "region": "us-west"
+ },
+ "time": "2009-11-10T23:00:00.123456Z",
+ "fields": {
+ "value": 0.64
+ }
+ }
+ ]
+
+ dummy_points_2 = [
+ {
+ "measurement": "memory_usage",
+ "tags": {
+ "host": "server02",
+ "region": "us-east"
+ },
+ "time": "2009-11-10T23:00:00.123456Z",
+ "fields": {
+ "value": 80
+ }
+ }
+ ]
+
+ self.cli.write_points(dummy_points)
+ self.cli.write_points(dummy_points_2)
+
+ self.assertEquals(
+ self.cli.get_list_series(),
+ ['cpu_load_short,host=server01,region=us-west',
+ 'memory_usage,host=server02,region=us-east']
+ )
+
+ self.assertEquals(
+ self.cli.get_list_series(measurement='memory_usage'),
+ ['memory_usage,host=server02,region=us-east']
+ )
+
+ self.assertEquals(
+ self.cli.get_list_series(measurement='memory_usage'),
+ ['memory_usage,host=server02,region=us-east']
+ )
+
+ self.assertEquals(
+ self.cli.get_list_series(tags={'host': 'server02'}),
+ ['memory_usage,host=server02,region=us-east'])
+
+ self.assertEquals(
+ self.cli.get_list_series(
+ measurement='cpu_load_short', tags={'host': 'server02'}),
+ [])
+
+
+@skip_server_tests
+class UdpTests(ManyTestCasesWithServerMixin, unittest.TestCase):
+ """Define a class to test UDP series."""
+
+ influxdb_udp_enabled = True
+ influxdb_template_conf = os.path.join(THIS_DIR,
+ 'influxdb.conf.template')
+
+ def test_write_points_udp(self):
+ """Test write points UDP."""
+ cli = InfluxDBClient(
+ 'localhost',
+ self.influxd_inst.http_port,
+ 'root',
+ '',
+ database='db',
+ use_udp=True,
+ udp_port=self.influxd_inst.udp_port
+ )
+ cli.write_points(dummy_point)
+
+ # The points are not immediately available after write_points.
+ # This is to be expected because we are using udp (no response !).
+ # So we have to wait some time,
+ time.sleep(3) # 3 sec seems to be a good choice.
+ rsp = self.cli.query('SELECT * FROM cpu_load_short')
+
+ self.assertEqual(
+ # this is dummy_points :
+ [
+ {'value': 0.64,
+ 'time': '2009-11-10T23:00:00Z',
+ "host": "server01",
+ "region": "us-west"}
+ ],
+ list(rsp['cpu_load_short'])
+ )
+
+
+# Run the tests again, but with gzip enabled this time
+@skip_server_tests
+class GzipSimpleTests(SimpleTests, SingleTestCaseWithServerGzipMixin):
+ """Repeat the simple tests with InfluxDBClient where gzip=True."""
+
+ pass
+
+
+@skip_server_tests
+class GzipCommonTests(CommonTests, ManyTestCasesWithServerGzipMixin):
+ """Repeat the common tests with InfluxDBClient where gzip=True."""
+
+ pass
+
+
+@skip_server_tests
+class GzipUdpTests(UdpTests, ManyTestCasesWithServerGzipMixin):
+ """Repeat the UDP tests with InfluxDBClient where gzip=True."""
+
+ pass
diff --git a/influxdb/tests/server_tests/influxdb.conf.template b/influxdb/tests/server_tests/influxdb.conf.template
new file mode 100644
index 00000000..efcff78a
--- /dev/null
+++ b/influxdb/tests/server_tests/influxdb.conf.template
@@ -0,0 +1,29 @@
+bind-address = ":{global_port}"
+
+[meta]
+ dir = "{meta_dir}"
+ hostname = "localhost"
+ bind-address = ":{meta_port}"
+
+[data]
+ dir = "{data_dir}"
+ wal-dir = "{wal_dir}"
+ index-version = "tsi1"
+
+[http]
+ enabled = true
+ bind-address = ":{http_port}"
+ auth-enabled = false
+
+[[udp]]
+ enabled = {udp_enabled}
+ bind-address = ":{udp_port}"
+ database = "db"
+ batch-size = 0
+ batch-timeout = "0"
+
+[monitor]
+ store-enabled = false
+
+[hinted-handoff]
+ dir = "{handoff_dir}"
diff --git a/influxdb/tests/server_tests/influxdb_instance.py b/influxdb/tests/server_tests/influxdb_instance.py
new file mode 100644
index 00000000..2dd823ff
--- /dev/null
+++ b/influxdb/tests/server_tests/influxdb_instance.py
@@ -0,0 +1,198 @@
+# -*- coding: utf-8 -*-
+"""Define the test module for an influxdb instance."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import datetime
+import distutils.spawn
+import os
+import tempfile
+import shutil
+import subprocess
+import sys
+import time
+import unittest
+
+from influxdb.tests.misc import is_port_open, get_free_ports
+
+# hack in check_output if it's not defined, like for python 2.6
+if "check_output" not in dir(subprocess):
+ def f(*popenargs, **kwargs):
+ """Check for output."""
+ if 'stdout' in kwargs:
+ raise ValueError(
+ 'stdout argument not allowed, it will be overridden.'
+ )
+ process = subprocess.Popen(stdout=subprocess.PIPE,
+ *popenargs,
+ **kwargs)
+ output, unused_err = process.communicate()
+ retcode = process.poll()
+ if retcode:
+ cmd = kwargs.get("args")
+ if cmd is None:
+ cmd = popenargs[0]
+ raise subprocess.CalledProcessError(retcode, cmd)
+ return output
+ subprocess.check_output = f
+
+
+class InfluxDbInstance(object):
+ """Define an instance of InfluxDB.
+
+ A class to launch of fresh influxdb server instance
+ in a temporary place, using a config file template.
+ """
+
+ def __init__(self, conf_template, udp_enabled=False):
+ """Initialize an instance of InfluxDbInstance."""
+ if os.environ.get("INFLUXDB_PYTHON_SKIP_SERVER_TESTS", None) == 'True':
+ raise unittest.SkipTest(
+ "Skipping server test (INFLUXDB_PYTHON_SKIP_SERVER_TESTS)"
+ )
+
+ self.influxd_path = self.find_influxd_path()
+
+ errors = 0
+ while True:
+ try:
+ self._start_server(conf_template, udp_enabled)
+ break
+ # Happens when the ports are already in use.
+ except RuntimeError as e:
+ errors += 1
+ if errors > 2:
+ raise e
+
+ def _start_server(self, conf_template, udp_enabled):
+ # create a temporary dir to store all needed files
+ # for the influxdb server instance :
+ self.temp_dir_base = tempfile.mkdtemp()
+
+ # "temp_dir_base" will be used for conf file and logs,
+ # while "temp_dir_influxdb" is for the databases files/dirs :
+ tempdir = self.temp_dir_influxdb = tempfile.mkdtemp(
+ dir=self.temp_dir_base)
+
+ # find a couple free ports :
+ free_ports = get_free_ports(4)
+ ports = {}
+ for service in 'http', 'global', 'meta', 'udp':
+ ports[service + '_port'] = free_ports.pop()
+ if not udp_enabled:
+ ports['udp_port'] = -1
+
+ conf_data = dict(
+ meta_dir=os.path.join(tempdir, 'meta'),
+ data_dir=os.path.join(tempdir, 'data'),
+ wal_dir=os.path.join(tempdir, 'wal'),
+ cluster_dir=os.path.join(tempdir, 'state'),
+ handoff_dir=os.path.join(tempdir, 'handoff'),
+ logs_file=os.path.join(self.temp_dir_base, 'logs.txt'),
+ udp_enabled='true' if udp_enabled else 'false',
+ )
+ conf_data.update(ports)
+ self.__dict__.update(conf_data)
+
+ conf_file = os.path.join(self.temp_dir_base, 'influxdb.conf')
+ with open(conf_file, "w") as fh:
+ with open(conf_template) as fh_template:
+ fh.write(fh_template.read().format(**conf_data))
+
+ # now start the server instance:
+ self.proc = subprocess.Popen(
+ [self.influxd_path, '-config', conf_file],
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE
+ )
+
+ print(
+ "%s > Started influxdb bin in %r with ports %s and %s.." % (
+ datetime.datetime.now(),
+ self.temp_dir_base,
+ self.global_port,
+ self.http_port
+ )
+ )
+
+ # wait for it to listen on the broker and admin ports:
+ # usually a fresh instance is ready in less than 1 sec ..
+ timeout = time.time() + 10 # so 10 secs should be enough,
+ # otherwise either your system load is high,
+ # or you run a 286 @ 1Mhz ?
+ try:
+ while time.time() < timeout:
+ if (is_port_open(self.http_port) and
+ is_port_open(self.global_port)):
+ # it's hard to check if a UDP port is open..
+ if udp_enabled:
+ # so let's just sleep 0.5 sec in this case
+ # to be sure that the server has open the port
+ time.sleep(0.5)
+ break
+ time.sleep(0.5)
+ if self.proc.poll() is not None:
+ raise RuntimeError('influxdb prematurely exited')
+ else:
+ self.proc.terminate()
+ self.proc.wait()
+ raise RuntimeError('Timeout waiting for influxdb to listen'
+ ' on its ports (%s)' % ports)
+ except RuntimeError as err:
+ data = self.get_logs_and_output()
+ data['reason'] = str(err)
+ data['now'] = datetime.datetime.now()
+ raise RuntimeError("%(now)s > %(reason)s. RC=%(rc)s\n"
+ "stdout=%(out)s\nstderr=%(err)s\nlogs=%(logs)r"
+ % data)
+
+ def find_influxd_path(self):
+ """Find the path for InfluxDB."""
+ influxdb_bin_path = os.environ.get(
+ 'INFLUXDB_PYTHON_INFLUXD_PATH',
+ None
+ )
+
+ if influxdb_bin_path is None:
+ influxdb_bin_path = distutils.spawn.find_executable('influxd')
+ if not influxdb_bin_path:
+ try:
+ influxdb_bin_path = subprocess.check_output(
+ ['which', 'influxd']
+ ).strip()
+ except subprocess.CalledProcessError:
+ # fallback on :
+ influxdb_bin_path = '/opt/influxdb/influxd'
+
+ if not os.path.isfile(influxdb_bin_path):
+ raise unittest.SkipTest("Could not find influxd binary")
+
+ version = subprocess.check_output([influxdb_bin_path, 'version'])
+ print("InfluxDB version: %s" % version, file=sys.stderr)
+
+ return influxdb_bin_path
+
+ def get_logs_and_output(self):
+ """Query for logs and output."""
+ proc = self.proc
+ try:
+ with open(self.logs_file) as fh:
+ logs = fh.read()
+ except IOError as err:
+ logs = "Couldn't read logs: %s" % err
+ return {
+ 'rc': proc.returncode,
+ 'out': proc.stdout.read(),
+ 'err': proc.stderr.read(),
+ 'logs': logs
+ }
+
+ def close(self, remove_tree=True):
+ """Close an instance of InfluxDB."""
+ self.proc.terminate()
+ self.proc.wait()
+ if remove_tree:
+ shutil.rmtree(self.temp_dir_base)
diff --git a/influxdb/tests/test_line_protocol.py b/influxdb/tests/test_line_protocol.py
new file mode 100644
index 00000000..5b344990
--- /dev/null
+++ b/influxdb/tests/test_line_protocol.py
@@ -0,0 +1,205 @@
+# -*- coding: utf-8 -*-
+"""Define the line protocol test module."""
+
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import unittest
+
+from datetime import datetime
+from decimal import Decimal
+
+from pytz import UTC, timezone
+from influxdb import line_protocol
+
+
+class TestLineProtocol(unittest.TestCase):
+ """Define the LineProtocol test object."""
+
+ def test_make_lines(self):
+ """Test make new lines in TestLineProtocol object."""
+ data = {
+ "tags": {
+ "empty_tag": "",
+ "none_tag": None,
+ "backslash_tag": "C:\\",
+ "integer_tag": 2,
+ "string_tag": "hello"
+ },
+ "points": [
+ {
+ "measurement": "test",
+ "fields": {
+ "string_val": "hello!",
+ "int_val": 1,
+ "float_val": 1.1,
+ "none_field": None,
+ "bool_val": True,
+ }
+ }
+ ]
+ }
+
+ self.assertEqual(
+ line_protocol.make_lines(data),
+ 'test,backslash_tag=C:\\\\,integer_tag=2,string_tag=hello '
+ 'bool_val=True,float_val=1.1,int_val=1i,string_val="hello!"\n'
+ )
+
+ def test_timezone(self):
+ """Test timezone in TestLineProtocol object."""
+ dt = datetime(2009, 11, 10, 23, 0, 0, 123456)
+ utc = UTC.localize(dt)
+ berlin = timezone('Europe/Berlin').localize(dt)
+ eastern = berlin.astimezone(timezone('US/Eastern'))
+ data = {
+ "points": [
+ {"measurement": "A", "fields": {"val": 1},
+ "time": 0},
+ {"measurement": "A", "fields": {"val": 1},
+ "time": "2009-11-10T23:00:00.123456Z"},
+ {"measurement": "A", "fields": {"val": 1}, "time": dt},
+ {"measurement": "A", "fields": {"val": 1}, "time": utc},
+ {"measurement": "A", "fields": {"val": 1}, "time": berlin},
+ {"measurement": "A", "fields": {"val": 1}, "time": eastern},
+ ]
+ }
+ self.assertEqual(
+ line_protocol.make_lines(data),
+ '\n'.join([
+ 'A val=1i 0',
+ 'A val=1i 1257894000123456000',
+ 'A val=1i 1257894000123456000',
+ 'A val=1i 1257894000123456000',
+ 'A val=1i 1257890400123456000',
+ 'A val=1i 1257890400123456000',
+ ]) + '\n'
+ )
+
+ def test_string_val_newline(self):
+ """Test string value with newline in TestLineProtocol object."""
+ data = {
+ "points": [
+ {
+ "measurement": "m1",
+ "fields": {
+ "multi_line": "line1\nline1\nline3"
+ }
+ }
+ ]
+ }
+
+ self.assertEqual(
+ line_protocol.make_lines(data),
+ 'm1 multi_line="line1\\nline1\\nline3"\n'
+ )
+
+ def test_make_lines_unicode(self):
+ """Test make unicode lines in TestLineProtocol object."""
+ data = {
+ "tags": {
+ "unicode_tag": "\'Привет!\'" # Hello! in Russian
+ },
+ "points": [
+ {
+ "measurement": "test",
+ "fields": {
+ "unicode_val": "Привет!", # Hello! in Russian
+ }
+ }
+ ]
+ }
+
+ self.assertEqual(
+ line_protocol.make_lines(data),
+ 'test,unicode_tag=\'Привет!\' unicode_val="Привет!"\n'
+ )
+
+ def test_make_lines_empty_field_string(self):
+ """Test make lines with an empty string field."""
+ data = {
+ "points": [
+ {
+ "measurement": "test",
+ "fields": {
+ "string": "",
+ }
+ }
+ ]
+ }
+
+ self.assertEqual(
+ line_protocol.make_lines(data),
+ 'test string=""\n'
+ )
+
+ def test_tag_value_newline(self):
+ """Test make lines with tag value contains newline."""
+ data = {
+ "tags": {
+ "t1": "line1\nline2"
+ },
+ "points": [
+ {
+ "measurement": "test",
+ "fields": {
+ "val": "hello"
+ }
+ }
+ ]
+ }
+
+ self.assertEqual(
+ line_protocol.make_lines(data),
+ 'test,t1=line1\\nline2 val="hello"\n'
+ )
+
+ def test_quote_ident(self):
+ """Test quote indentation in TestLineProtocol object."""
+ self.assertEqual(
+ line_protocol.quote_ident(r"""\foo ' bar " Örf"""),
+ r'''"\\foo ' bar \" Örf"'''
+ )
+
+ def test_quote_literal(self):
+ """Test quote literal in TestLineProtocol object."""
+ self.assertEqual(
+ line_protocol.quote_literal(r"""\foo ' bar " Örf"""),
+ r"""'\\foo \' bar " Örf'"""
+ )
+
+ def test_float_with_long_decimal_fraction(self):
+ """Ensure precision is preserved when casting floats into strings."""
+ data = {
+ "points": [
+ {
+ "measurement": "test",
+ "fields": {
+ "float_val": 1.0000000000000009,
+ }
+ }
+ ]
+ }
+ self.assertEqual(
+ line_protocol.make_lines(data),
+ 'test float_val=1.0000000000000009\n'
+ )
+
+ def test_float_with_long_decimal_fraction_as_type_decimal(self):
+ """Ensure precision is preserved when casting Decimal into strings."""
+ data = {
+ "points": [
+ {
+ "measurement": "test",
+ "fields": {
+ "float_val": Decimal(0.8289445733333332),
+ }
+ }
+ ]
+ }
+ self.assertEqual(
+ line_protocol.make_lines(data),
+ 'test float_val=0.8289445733333332\n'
+ )
diff --git a/mypy.ini b/mypy.ini
new file mode 100644
index 00000000..308aa62d
--- /dev/null
+++ b/mypy.ini
@@ -0,0 +1,8 @@
+[mypy]
+ignore_missing_imports = True
+warn_unused_ignores = True
+warn_unused_configs = True
+warn_redundant_casts = True
+warn_no_return = True
+no_implicit_optional = True
+strict_equality = True
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 00000000..1b68d94e
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,3 @@
+[build-system]
+requires = ["setuptools>=42", "wheel"]
+build-backend = "setuptools.build_meta"
\ No newline at end of file
diff --git a/release.sh b/release.sh
index d0337897..d94e16fd 100755
--- a/release.sh
+++ b/release.sh
@@ -1,3 +1,4 @@
#!/usr/bin/env bash
-python setup.py sdist upload
+python setup.py sdist bdist_wheel
+twine upload dist/*
diff --git a/requirements.txt b/requirements.txt
index c87c909f..a3df3154 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1 +1,5 @@
-requests>=1.0.3
+python-dateutil>=2.6.0
+pytz>=2016.10
+requests>=2.17.0
+six>=1.10.0
+msgpack>=0.5.0
diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 00000000..217d437b
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,5 @@
+[bdist_rpm]
+requires=python-dateutil
+
+[wheel]
+universal = 1
diff --git a/setup.py b/setup.py
index fe09ce36..8ac7d1a7 100755
--- a/setup.py
+++ b/setup.py
@@ -1,5 +1,6 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
+"""Define the setup options."""
try:
import distribute_setup
@@ -22,6 +23,11 @@
with open('requirements.txt', 'r') as f:
requires = [x.strip() for x in f if x.strip()]
+# Debugging: Print the requires values
+print("install_requires values:")
+for req in requires:
+ print(f"- {req}")
+
with open('test-requirements.txt', 'r') as f:
test_requires = [x.strip() for x in f if x.strip()]
@@ -41,7 +47,7 @@
tests_require=test_requires,
install_requires=requires,
extras_require={'test': test_requires},
- classifiers=(
+ classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
@@ -49,8 +55,10 @@
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
- 'Programming Language :: Python :: 3.3',
+ 'Programming Language :: Python :: 3.4',
+ 'Programming Language :: Python :: 3.5',
+ 'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
- ),
+ ],
)
diff --git a/test-requirements.txt b/test-requirements.txt
index 4b80095c..9b31f5f1 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -1,3 +1,4 @@
nose
+nose-cov
mock
requests-mock
diff --git a/tests/__init__.py b/tests/__init__.py
deleted file mode 100644
index 40a96afc..00000000
--- a/tests/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# -*- coding: utf-8 -*-
diff --git a/tests/influxdb/__init__.py b/tests/influxdb/__init__.py
deleted file mode 100644
index 40a96afc..00000000
--- a/tests/influxdb/__init__.py
+++ /dev/null
@@ -1 +0,0 @@
-# -*- coding: utf-8 -*-
diff --git a/tox.ini b/tox.ini
index 38f82fba..a1005abb 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,28 +1,59 @@
[tox]
-envlist = py33, py27, flake8
+envlist = py27, py35, py36, py37, pypy, pypy3, flake8, pep257, coverage, docs, mypy
[testenv]
+passenv = INFLUXDB_PYTHON_INFLUXD_PATH
+setenv = INFLUXDB_PYTHON_SKIP_SERVER_TESTS=False
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
-commands = nosetests
+ py27: pandas==0.21.1
+ py27: numpy==1.13.3
+ py35: pandas==0.22.0
+ py35: numpy==1.14.6
+ py36: pandas==0.23.4
+ py36: numpy==1.15.4
+ py37: pandas>=0.24.2
+ py37: numpy>=1.16.2
+# Only install pandas with non-pypy interpreters
+# Testing all combinations would be too expensive
+commands = nosetests -v --with-doctest {posargs}
[testenv:flake8]
deps =
flake8
pep8-naming
-commands = flake8 influxdb tests
+commands = flake8 influxdb
+
+[testenv:pep257]
+deps = pydocstyle
+commands = pydocstyle --count -ve examples influxdb
[testenv:coverage]
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
+ pandas==0.24.2
coverage
+ numpy
commands = nosetests -v --with-coverage --cover-html --cover-package=influxdb
[testenv:docs]
-deps = Sphinx==1.2.3
+deps = -r{toxinidir}/requirements.txt
+ pandas>=0.24.2
+ numpy>=1.16.2
+ Sphinx>=1.8.5
sphinx_rtd_theme
commands = sphinx-build -b html docs/source docs/build
+[testenv:mypy]
+deps = -r{toxinidir}/test-requirements.txt
+ mypy==0.720
+commands = mypy --config-file mypy.ini -p influxdb
+
[flake8]
-# Ignored because of nosetests's setUp function
-ignore = N802
+ignore = W503,W504,W605,N802,F821,E402
+# W503: Line break occurred before a binary operator
+# W504: Line break occurred after a binary operator
+# W605: invalid escape sequence
+# N802: nosetests's setUp function
+# F821: False positive in intluxdb/dataframe_client.py
+# E402: module level import not at top of file