diff --git a/AUTHORS.md b/AUTHORS.md index daa8ceba..09f25107 100644 --- a/AUTHORS.md +++ b/AUTHORS.md @@ -7,3 +7,5 @@ The following individuals have contributed code to this repository * Andrea Fagan * Brian Brazil +* Paul Logston + diff --git a/README.md b/README.md index 5a2ae7da..ee41e811 100644 --- a/README.md +++ b/README.md @@ -11,7 +11,7 @@ pip install prometheus_client **Two**: Paste the following into a Python interpreter: ```python -from prometheus_client import start_http_server,Summary +from prometheus_client import start_http_server, Summary import random import time @@ -219,6 +219,7 @@ server in a daemon thread on the given port: ```python from prometheus_client import start_http_server + start_http_server(8000) ``` @@ -238,7 +239,8 @@ about a machine system that the Node exporter does not support or would not make to perform at every scrape (for example, anything involving subprocesses). ```python -from prometheus_client import CollectorRegistry,Gauge,write_to_textfile +from prometheus_client import CollectorRegistry, Gauge, write_to_textfile + registry = CollectorRegistry() g = Gauge('raid_status', '1 if raid array is okay', registry=registry) g.set(1) @@ -254,7 +256,8 @@ The [Pushgateway](https://github.com/prometheus/pushgateway) allows ephemeral and batch jobs to expose their metrics to Prometheus. ```python -from prometheus_client import CollectorRegistry,Gauge,push_to_gateway +from prometheus_client import CollectorRegistry, Gauge, push_to_gateway + registry = CollectorRegistry() g = Gauge('job_last_success_unixtime', 'Last time a batch job successfully finished', registry=registry) g.set_to_current_time() @@ -287,6 +290,7 @@ Metrics are pushed over TCP in the Graphite plaintext format. ```python from prometheus_client.bridge.graphite import GraphiteBridge + gb = GraphiteBridge(('graphite.your.org', 2003)) # Push once. gb.push() @@ -332,4 +336,3 @@ for family in text_string_to_metric_families("my_gauge 1.0\n"): print("Name: {0} Labels: {1} Value: {2}".format(*sample)) ``` - diff --git a/prometheus_client/core.py b/prometheus_client/core.py index 14b53947..720efe6a 100644 --- a/prometheus_client/core.py +++ b/prometheus_client/core.py @@ -256,12 +256,25 @@ def __init__(self, wrappedClass, name, labelnames, **kwargs): def labels(self, *labelvalues): '''Return the child for the given labelset. - Labels can be provided as a tuple or as a dict: - c = Counter('c', 'counter', ['l', 'm']) - # Set labels by position - c.labels('0', '1').inc() - # Set labels by name - c.labels({'l': '0', 'm': '1'}).inc() + All metrics can have labels, allowing grouping of related time series. + Taking a counter as an example: + + from prometheus_client import Counter + + c = Counter('my_requests_total', 'HTTP Failures', ['method', 'endpoint']) + c.labels('get', '/').inc() + c.labels('post', '/submit').inc() + + Labels can also be provided as a dict: + + from prometheus_client import Counter + + c = Counter('my_requests_total', 'HTTP Failures', ['method', 'endpoint']) + c.labels({'method': 'get', 'endpoint': '/'}).inc() + c.labels({'method': 'post', 'endpoint': '/submit'}).inc() + + See the best practices on [naming](http://prometheus.io/docs/practices/naming/) + and [labels](http://prometheus.io/docs/practices/instrumentation/#use-labels). ''' if len(labelvalues) == 1 and type(labelvalues[0]) == dict: if sorted(labelvalues[0].keys()) != sorted(self._labelnames): @@ -347,10 +360,24 @@ class Counter(object): An example for a Counter: - from prometheus_client import Counter - c = Counter('my_failures_total', 'Description of counter') - c.inc() # Increment by 1 - c.inc(1.6) # Increment by given value + from prometheus_client import Counter + + c = Counter('my_failures_total', 'Description of counter') + c.inc() # Increment by 1 + c.inc(1.6) # Increment by given value + + There are utilities to count exceptions raised: + + @c.count_exceptions() + def f(): + pass + + with c.count_exceptions(): + pass + + # Count only one type of exception + with c.count_exceptions(ValueError): + pass ''' _type = 'counter' _reserved_labelnames = [] @@ -401,19 +428,38 @@ class Gauge(object): '''Gauge metric, to report instantaneous values. Examples of Gauges include: - Inprogress requests - Number of items in a queue - Free memory - Total memory - Temperature + - Inprogress requests + - Number of items in a queue + - Free memory + - Total memory + - Temperature Gauges can go both up and down. from prometheus_client import Gauge + g = Gauge('my_inprogress_requests', 'Description of gauge') g.inc() # Increment by 1 g.dec(10) # Decrement by given value g.set(4.2) # Set to a given value + + There are utilities for common use cases: + + g.set_to_current_time() # Set to current unixtime + + # Increment when entered, decrement when exited. + @g.track_inprogress() + def f(): + pass + + with g.track_inprogress(): + pass + + A Gauge can also take its value from a callback: + + d = Gauge('data_objects', 'Number of objects') + my_dict = {} + d.set_function(lambda: len(my_dict)) ''' _type = 'gauge' _reserved_labelnames = [] @@ -494,8 +540,7 @@ def set_function(self, f): '''Call the provided function to return the Gauge value. The function must return a float, and may be called from - multiple threads. - All other methods of the Gauge become NOOPs. + multiple threads. All other methods of the Gauge become NOOPs. ''' def samples(self): return (('', {}, float(f())), ) @@ -515,19 +560,26 @@ class Summary(object): Example for a Summary: - from prometheus_client import Summary - s = Summary('request_size_bytes', 'Request size (bytes)') - s.observe(512) # Observe 512 (bytes) + from prometheus_client import Summary + + s = Summary('request_size_bytes', 'Request size (bytes)') + s.observe(512) # Observe 512 (bytes) Example for a Summary using time: - from prometheus_client import Summary - REQUEST_TIME = Summary('response_latency_seconds', 'Response latency (seconds)') - @REQUEST_TIME.time() - def create_response(request): - """A dummy function""" - time.sleep(1) + from prometheus_client import Summary + + REQUEST_TIME = Summary('response_latency_seconds', 'Response latency (seconds)') + + @REQUEST_TIME.time() + def create_response(request): + """A dummy function""" + time.sleep(1) + Example for using the same Summary object as a context manager: + + with REQUEST_TIME.time(): + pass # Logic to be timed ''' _type = 'summary' _reserved_labelnames = ['quantile'] @@ -596,22 +648,31 @@ class Histogram(object): Example for a Histogram: - from prometheus_client import Histogram - h = Histogram('request_size_bytes', 'Request size (bytes)') - h.observe(512) # Observe 512 (bytes) + from prometheus_client import Histogram + h = Histogram('request_size_bytes', 'Request size (bytes)') + h.observe(512) # Observe 512 (bytes) Example for a Histogram using time: - from prometheus_client import Histogram - REQUEST_TIME = Histogram('response_latency_seconds', 'Response latency (seconds)') - @REQUEST_TIME.time() - def create_response(request): - """A dummy function""" - time.sleep(1) + from prometheus_client import Histogram + + REQUEST_TIME = Histogram('response_latency_seconds', 'Response latency (seconds)') + + @REQUEST_TIME.time() + def create_response(request): + """A dummy function""" + time.sleep(1) + + Example of using the same Histogram object as a context manager: + + with REQUEST_TIME.time(): + pass # Logic to be timed The default buckets are intended to cover a typical web/rpc request from milliseconds to seconds. They can be overridden by passing `buckets` keyword argument to `Histogram`. + + **NB** The Python client doesn't store or expose quantile information at this time. ''' _type = 'histogram' _reserved_labelnames = ['histogram']