aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorChristopher Baines <mail@cbaines.net>2015-12-22 17:59:32 +0000
committerChristopher Baines <mail@cbaines.net>2015-12-22 17:59:32 +0000
commitbcc61110d5b76580a2da0d72d07de8efd7525292 (patch)
tree00d691a550156997acf793d84afc230f00153c3b
downloadpython-prometheus-client-upstream.tar
python-prometheus-client-upstream.tar.gz
Import python-prometheus-client_0.0.13.orig.tar.gzupstream-0.0.13upstream
-rw-r--r--.gitignore5
-rw-r--r--AUTHORS.md9
-rw-r--r--CONTRIBUTING.md12
-rw-r--r--LICENSE201
-rw-r--r--NOTICE2
-rw-r--r--README.md335
-rw-r--r--prometheus_client/__init__.py49
-rw-r--r--prometheus_client/bridge/__init__.py0
-rw-r--r--prometheus_client/bridge/graphite.py80
-rw-r--r--prometheus_client/core.py679
-rw-r--r--prometheus_client/exposition.py131
-rw-r--r--prometheus_client/parser.py224
-rw-r--r--prometheus_client/process_collector.py95
-rw-r--r--setup.py24
-rw-r--r--tests/__init__.py0
-rw-r--r--tests/graphite_bridge.py69
-rw-r--r--tests/proc/26231/fd/00
-rw-r--r--tests/proc/26231/fd/10
-rw-r--r--tests/proc/26231/fd/20
-rw-r--r--tests/proc/26231/fd/30
-rw-r--r--tests/proc/26231/fd/40
-rw-r--r--tests/proc/26231/limits15
-rw-r--r--tests/proc/26231/stat1
-rw-r--r--tests/proc/584/stat1
-rw-r--r--tests/proc/stat16
-rw-r--r--tests/test_client.py373
-rw-r--r--tests/test_exposition.py167
-rw-r--r--tests/test_parser.py188
-rw-r--r--tests/test_process_collector.py62
29 files changed, 2738 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..2849cc3
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,5 @@
+build
+dist
+*.egg-info
+*.pyc
+*.swp
diff --git a/AUTHORS.md b/AUTHORS.md
new file mode 100644
index 0000000..daa8ceb
--- /dev/null
+++ b/AUTHORS.md
@@ -0,0 +1,9 @@
+Maintainers of this repository:
+
+* Brian Brazil <brian.brazil@gmail.com>
+
+The following individuals have contributed code to this repository
+(listed in alphabetical order):
+
+* Andrea Fagan <andreafagan28@gmail.com>
+* Brian Brazil <brian.brazil@gmail.com>
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
new file mode 100644
index 0000000..5c8ea59
--- /dev/null
+++ b/CONTRIBUTING.md
@@ -0,0 +1,12 @@
+# Contributing
+
+Prometheus uses GitHub to manage reviews of pull requests.
+
+* If you have a trivial fix or improvement, go ahead and create a pull
+ request, addressing (with `@...`) one or more of the maintainers
+ (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request.
+
+* If you plan to do something more involved, first discuss your ideas
+ on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers).
+ This will avoid unnecessary work and surely give you and us a good deal
+ of inspiration.
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/NOTICE b/NOTICE
new file mode 100644
index 0000000..0675ae1
--- /dev/null
+++ b/NOTICE
@@ -0,0 +1,2 @@
+Prometheus instrumentation library for Python applications
+Copyright 2015 The Prometheus Authors
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..d7cd429
--- /dev/null
+++ b/README.md
@@ -0,0 +1,335 @@
+# Prometheus Python Client
+
+The official Python 2 and 3 client for [Prometheus](http://prometheus.io).
+
+## Three Step Demo
+
+**One**: Install the client:
+```
+pip install prometheus_client
+```
+
+**Two**: Paste the following into a Python interpreter:
+```python
+from prometheus_client import start_http_server,Summary
+import random
+import time
+
+# Create a metric to track time spent and requests made.
+REQUEST_TIME = Summary('request_processing_seconds', 'Time spent processing request')
+
+# Decorate function with metric.
+@REQUEST_TIME.time()
+def process_request(t):
+ """A dummy function that takes some time."""
+ time.sleep(t)
+
+if __name__ == '__main__':
+ # Start up the server to expose the metrics.
+ start_http_server(8000)
+ # Generate some requests.
+ while True:
+ process_request(random.random())
+```
+
+**Three**: Visit [http://localhost:8000/](http://localhost:8000/) to view the metrics.
+
+From one easy to use decorator you get:
+ * `request_processing_seconds_count`: Number of times this function was called.
+ * `request_processing_seconds_sum`: Total amount of time spent in this function.
+
+Prometheus's `rate` function allows calculation of both requests per second,
+and latency over time from this data.
+
+In addition if you're on Linux the `process` metrics expose CPU, memory and
+other information about the process for free!
+
+## Installation
+
+```
+pip install prometheus_client
+```
+
+This package can be found on
+[PyPI](https://pypi.python.org/pypi/prometheus_client).
+
+## Instrumenting
+
+Four types of metric are offered: Counter, Gauge, Summary and Histogram.
+See the documentation on [metric types](http://prometheus.io/docs/concepts/metric_types/)
+and [instrumentation best practices](http://prometheus.io/docs/practices/instrumentation/#counter-vs.-gauge,-summary-vs.-histogram)
+on how to use them.
+
+### Counter
+
+Counters go up, and reset when the process restarts.
+
+
+```python
+from prometheus_client import Counter
+c = Counter('my_failures_total', 'Description of counter')
+c.inc() # Increment by 1
+c.inc(1.6) # Increment by given value
+```
+
+There are utilities to count exceptions raised:
+
+```python
+@c.count_exceptions()
+def f():
+ pass
+
+with c.count_exceptions():
+ pass
+
+# Count only one type of exception
+with c.count_exceptions(ValueError):
+ pass
+```
+
+### Gauge
+
+Gauges can go up and down.
+
+```python
+from prometheus_client import Gauge
+g = Gauge('my_inprogress_requests', 'Description of gauge')
+g.inc() # Increment by 1
+g.dec(10) # Decrement by given value
+g.set(4.2) # Set to a given value
+```
+
+There are utilities for common use cases:
+
+```python
+g.set_to_current_time() # Set to current unixtime
+
+# Increment when entered, decrement when exited.
+@g.track_inprogress()
+def f():
+ pass
+
+with g.track_inprogress():
+ pass
+```
+
+A Gauge can also take it's value from a callback:
+
+```python
+d = Gauge('data_objects', 'Number of objects')
+my_dict = {}
+d.set_function(lambda: len(my_dict))
+```
+
+### Summary
+
+Summaries track the size and number of events.
+
+```python
+from prometheus_client import Summary
+s = Summary('request_latency_seconds', 'Description of summary')
+s.observe(4.7) # Observe 4.7 (seconds in this case)
+```
+
+There are utilities for timing code:
+
+```python
+@s.time()
+def f():
+ pass
+
+with s.time():
+ pass
+```
+
+The Python client doesn't store or expose quantile information at this time.
+
+### Histogram
+
+Histograms track the size and number of events in buckets.
+This allows for aggregatable calculation of quantiles.
+
+```python
+from prometheus_client import Histogram
+h = Histogram('request_latency_seconds', 'Description of histogram')
+h.observe(4.7) # Observe 4.7 (seconds in this case)
+```
+
+The default buckets are intended to cover a typical web/rpc request from milliseconds to seconds.
+They can be overridden by passing `buckets` keyword argument to `Histogram`.
+
+There are utilities for timing code:
+
+```python
+@h.time()
+def f():
+ pass
+
+with h.time():
+ pass
+```
+
+### Labels
+
+All metrics can have labels, allowing grouping of related time series.
+
+See the best practices on [naming](http://prometheus.io/docs/practices/naming/)
+and [labels](http://prometheus.io/docs/practices/instrumentation/#use-labels).
+
+Taking a counter as an example:
+
+```python
+from prometheus_client import Counter
+c = Counter('my_requests_total', 'HTTP Failures', ['method', 'endpoint'])
+c.labels('get', '/').inc()
+c.labels('post', '/submit').inc()
+```
+
+Labels can also be provided as a dict:
+
+```python
+from prometheus_client import Counter
+c = Counter('my_requests_total', 'HTTP Failures', ['method', 'endpoint'])
+c.labels({'method': 'get', 'endpoint': '/'}).inc()
+c.labels({'method': 'post', 'endpoint': '/submit'}).inc()
+```
+
+### Process Collector
+
+The Python client automatically exports metrics about process CPU usage, RAM,
+file descriptors and start time. These all have the prefix `process`, and
+are only currently available on Linux.
+
+The namespace and pid constructor arguments allows for exporting metrics about
+other processes, for example:
+```
+ProcessCollector(namespace='mydaemon', pid=lambda: open('/var/run/daemon.pid').read())
+```
+
+## Exporting
+
+There are several options for exporting metrics.
+
+### HTTP
+
+Metrics are usually exposed over HTTP, to be read by the Prometheus server.
+
+The easiest way to do this is via `start_http_server`, which will start a HTTP
+server in a daemon thread on the given port:
+
+```python
+from prometheus_client import start_http_server
+start_http_server(8000)
+```
+
+Visit [http://localhost:8000/](http://localhost:8000/) to view the metrics.
+
+To add Prometheus exposition to an existing HTTP server, see the `MetricsServlet` class
+which provides a `BaseHTTPRequestHandler`. It also serves as a simple example of how
+to write a custom endpoint.
+
+### Node exporter textfile collector
+
+The [textfile collector](https://github.com/prometheus/node_exporter#textfile-collector)
+allows machine-level statistics to be exported out via the Node exporter.
+
+This is useful for monitoring cronjobs, or for writing cronjobs to expose metrics
+about a machine system that the Node exporter does not support or would not make sense
+to perform at every scrape (for example, anything involving subprocesses).
+
+```python
+from prometheus_client import CollectorRegistry,Gauge,write_to_textfile
+registry = CollectorRegistry()
+g = Gauge('raid_status', '1 if raid array is okay', registry=registry)
+g.set(1)
+write_to_textfile('/configured/textfile/path/raid.prom', registry)
+```
+
+A separate registry is used, as the default registry may contain other metrics
+such as those from the Process Collector.
+
+## Exporting to a Pushgateway
+
+The [Pushgateway](https://github.com/prometheus/pushgateway)
+allows ephemeral and batch jobs to expose their metrics to Prometheus.
+
+```python
+from prometheus_client import CollectorRegistry,Gauge,push_to_gateway
+registry = CollectorRegistry()
+g = Gauge('job_last_success_unixtime', 'Last time a batch job successfully finished', registry=registry)
+g.set_to_current_time()
+push_to_gateway('localhost:9091', job='batchA', registry=registry)
+```
+
+A separate registry is used, as the default registry may contain other metrics
+such as those from the Process Collector.
+
+Pushgateway functions take a grouping key. `push_to_gateway` replaces metrics
+with the same grouping key, `pushadd_to_gateway` only replaces metrics with the
+same name and grouping key and `delete_from_gateway` deletes metrics with the
+given job and grouping key. See the
+[Pushgateway documentation](https://github.com/prometheus/pushgateway/blob/master/README.md)
+for more information.
+
+`instance_ip_grouping_key` returns a grouping key with the instance label set
+to the host's IP address.
+
+
+## Bridges
+
+It is also possible to expose metrics to systems other than Prometheus.
+This allows you to take advantage of Prometheus instrumentation even
+if you are not quite ready to fully transition to Prometheus yet.
+
+### Graphite
+
+Metrics are pushed over TCP in the Graphite plaintext format.
+
+```python
+from prometheus_client.bridge.graphite import GraphiteBridge
+gb = GraphiteBridge(('graphite.your.org', 2003))
+# Push once.
+gb.push()
+# Push every 10 seconds in a daemon thread.
+gb.start(10.0)
+```
+
+## Custom Collectors
+
+Sometimes it is not possible to directly instrument code, as it is not
+in your control. This requires you to proxy metrics from other systems.
+
+To do so you need to create a custom collector, for example:
+
+```python
+from prometheus_client.core import GaugeMetricFamily, CounterMetricFamily, REGISTRY
+
+class CustomCollector(object):
+ def collect(self):
+ yield GaugeMetricFamily('my_gauge', 'Help text', value=7)
+ c = CounterMetricFamily('my_counter_total', 'Help text', labels=['foo'])
+ c.add_metric(['bar'], 1.7)
+ c.add_metric(['baz'], 3.8)
+ yield c
+
+REGISTRY.register(CustomCollector())
+```
+
+`SummaryMetricFamily` and `HistogramMetricFamily` work similarly.
+
+
+## Parser
+
+The Python client supports parsing the Promeheus text format.
+This is intended for advanced use cases where you have servers
+exposing Prometheus metrics and need to get them into some other
+system.
+
+```
+from prometheus_client.parser import text_string_to_metric_families
+for family in text_string_to_metric_families("my_gauge 1.0\n"):
+ for sample in family.samples:
+ print("Name: {0} Labels: {1} Value: {2}".format(*sample))
+```
+
+
diff --git a/prometheus_client/__init__.py b/prometheus_client/__init__.py
new file mode 100644
index 0000000..80424db
--- /dev/null
+++ b/prometheus_client/__init__.py
@@ -0,0 +1,49 @@
+#!/usr/bin/python
+
+from . import core
+from . import exposition
+from . import process_collector
+
+__all__ = ['Counter', 'Gauge', 'Summary', 'Histogram']
+# http://stackoverflow.com/questions/19913653/no-unicode-in-all-for-a-packages-init
+__all__ = [n.encode('ascii') for n in __all__]
+
+CollectorRegistry = core.CollectorRegistry
+REGISTRY = core.REGISTRY
+Metric = core.Metric
+Counter = core.Counter
+Gauge = core.Gauge
+Summary = core.Summary
+Histogram = core.Histogram
+
+CONTENT_TYPE_LATEST = exposition.CONTENT_TYPE_LATEST
+generate_latest = exposition.generate_latest
+MetricsHandler = exposition.MetricsHandler
+start_http_server = exposition.start_http_server
+write_to_textfile = exposition.write_to_textfile
+push_to_gateway = exposition.push_to_gateway
+pushadd_to_gateway = exposition.pushadd_to_gateway
+delete_from_gateway = exposition.delete_from_gateway
+instance_ip_grouping_key = exposition.instance_ip_grouping_key
+
+ProcessCollector = process_collector.ProcessCollector
+PROCESS_COLLECTOR = process_collector.PROCESS_COLLECTOR
+
+
+if __name__ == '__main__':
+ c = Counter('cc', 'A counter')
+ c.inc()
+
+ g = Gauge('gg', 'A gauge')
+ g.set(17)
+
+ s = Summary('ss', 'A summary', ['a', 'b'])
+ s.labels('c', 'd').observe(17)
+
+ h = Histogram('hh', 'A histogram')
+ h.observe(.6)
+
+ start_http_server(8000)
+ import time
+ while True:
+ time.sleep(1)
diff --git a/prometheus_client/bridge/__init__.py b/prometheus_client/bridge/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/prometheus_client/bridge/__init__.py
diff --git a/prometheus_client/bridge/graphite.py b/prometheus_client/bridge/graphite.py
new file mode 100644
index 0000000..a01c312
--- /dev/null
+++ b/prometheus_client/bridge/graphite.py
@@ -0,0 +1,80 @@
+#!/usr/bin/python
+from __future__ import unicode_literals
+
+import logging
+import re
+import socket
+import time
+import threading
+
+from .. import core
+
+# Roughly, have to keep to what works as a file name.
+# We also remove periods, so labels can be distinguished.
+_INVALID_GRAPHITE_CHARS = re.compile(r"[^a-zA-Z0-9_-]")
+
+
+def _sanitize(s):
+ return _INVALID_GRAPHITE_CHARS.sub('_', s)
+
+
+class _RegularPush(threading.Thread):
+ def __init__(self, pusher, interval, prefix):
+ super(_RegularPush, self).__init__()
+ self._pusher = pusher
+ self._interval = interval
+ self._prefix = prefix
+
+ def run(self):
+ wait_until = time.time()
+ while True:
+ while True:
+ now = time.time()
+ if now >= wait_until:
+ # May need to skip some pushes.
+ while wait_until < now:
+ wait_until += self._interval
+ break
+ # time.sleep can return early.
+ time.sleep(wait_until - now)
+ try:
+ self._pusher.push(prefix=self._prefix)
+ except IOError:
+ logging.exception("Push failed")
+
+
+class GraphiteBridge(object):
+ def __init__(self, address, registry=core.REGISTRY, timeout_seconds=30, _time=time):
+ self._address = address
+ self._registry = registry
+ self._timeout = timeout_seconds
+ self._time = _time
+
+ def push(self, prefix=''):
+ now = int(self._time.time())
+ output = []
+
+ prefixstr = ''
+ if prefix:
+ prefixstr = prefix + '.'
+
+ for metric in self._registry.collect():
+ for name, labels, value in metric.samples:
+ if labels:
+ labelstr = '.' + '.'.join(
+ ['{0}.{1}'.format(
+ _sanitize(k), _sanitize(v))
+ for k, v in sorted(labels.items())])
+ else:
+ labelstr = ''
+ output.append('{0}{1}{2} {3} {4}\n'.format(
+ prefixstr, _sanitize(name), labelstr, float(value), now))
+
+ conn = socket.create_connection(self._address, self._timeout)
+ conn.sendall(''.join(output).encode('ascii'))
+ conn.close()
+
+ def start(self, interval=60.0, prefix=''):
+ t = _RegularPush(self, interval, prefix)
+ t.daemon = True
+ t.start()
diff --git a/prometheus_client/core.py b/prometheus_client/core.py
new file mode 100644
index 0000000..14b5394
--- /dev/null
+++ b/prometheus_client/core.py
@@ -0,0 +1,679 @@
+#!/usr/bin/python
+
+from __future__ import unicode_literals
+
+import copy
+import math
+import re
+import time
+import types
+
+try:
+ from BaseHTTPServer import BaseHTTPRequestHandler
+except ImportError:
+ # Python 3
+ unicode = str
+
+from functools import wraps
+from threading import Lock
+
+_METRIC_NAME_RE = re.compile(r'^[a-zA-Z_:][a-zA-Z0-9_:]*$')
+_METRIC_LABEL_NAME_RE = re.compile(r'^[a-zA-Z_:][a-zA-Z0-9_:]*$')
+_RESERVED_METRIC_LABEL_NAME_RE = re.compile(r'^__.*$')
+_INF = float("inf")
+_MINUS_INF = float("-inf")
+
+
+class CollectorRegistry(object):
+ '''Metric collector registry.
+
+ Collectors must have a no-argument method 'collect' that returns a list of
+ Metric objects. The returned metrics should be consistent with the Prometheus
+ exposition formats.
+ '''
+ def __init__(self):
+ self._collectors = set()
+ self._lock = Lock()
+
+ def register(self, collector):
+ '''Add a collector to the registry.'''
+ with self._lock:
+ self._collectors.add(collector)
+
+ def unregister(self, collector):
+ '''Remove a collector from the registry.'''
+ with self._lock:
+ self._collectors.remove(collector)
+
+ def collect(self):
+ '''Yields metrics from the collectors in the registry.'''
+ collectors = None
+ with self._lock:
+ collectors = copy.copy(self._collectors)
+ for collector in collectors:
+ for metric in collector.collect():
+ yield metric
+
+ def get_sample_value(self, name, labels=None):
+ '''Returns the sample value, or None if not found.
+
+ This is inefficient, and intended only for use in unittests.
+ '''
+ if labels is None:
+ labels = {}
+ for metric in self.collect():
+ for n, l, value in metric.samples:
+ if n == name and l == labels:
+ return value
+ return None
+
+
+REGISTRY = CollectorRegistry()
+'''The default registry.'''
+
+_METRIC_TYPES = ('counter', 'gauge', 'summary', 'histogram', 'untyped')
+
+
+class Metric(object):
+ '''A single metric family and its samples.
+
+ This is intended only for internal use by the instrumentation client.
+
+ Custom collectors should use GaugeMetricFamily, CounterMetricFamily
+ and SummaryMetricFamily instead.
+ '''
+ def __init__(self, name, documentation, typ):
+ self.name = name
+ self.documentation = documentation
+ if typ not in _METRIC_TYPES:
+ raise ValueError('Invalid metric type: ' + typ)
+ self.type = typ
+ self.samples = []
+
+ def add_sample(self, name, labels, value):
+ '''Add a sample to the metric.
+
+ Internal-only, do not use.'''
+ self.samples.append((name, labels, value))
+
+ def __eq__(self, other):
+ return (isinstance(other, Metric)
+ and self.name == other.name
+ and self.documentation == other.documentation
+ and self.type == other.type
+ and self.samples == other.samples)
+
+
+class CounterMetricFamily(Metric):
+ '''A single counter and its samples.
+
+ For use by custom collectors.
+ '''
+ def __init__(self, name, documentation, value=None, labels=None):
+ Metric.__init__(self, name, documentation, 'counter')
+ if labels is not None and value is not None:
+ raise ValueError('Can only specify at most one of value and labels.')
+ if labels is None:
+ labels = []
+ self._labelnames = labels
+ if value is not None:
+ self.add_metric([], value)
+
+ def add_metric(self, labels, value):
+ '''Add a metric to the metric family.
+
+ Args:
+ labels: A list of label values
+ value: The value of the metric.
+ '''
+ self.samples.append((self.name, dict(zip(self._labelnames, labels)), value))
+
+
+class GaugeMetricFamily(Metric):
+ '''A single gauge and its samples.
+
+ For use by custom collectors.
+ '''
+ def __init__(self, name, documentation, value=None, labels=None):
+ Metric.__init__(self, name, documentation, 'gauge')
+ if labels is not None and value is not None:
+ raise ValueError('Can only specify at most one of value and labels.')
+ if labels is None:
+ labels = []
+ self._labelnames = labels
+ if value is not None:
+ self.add_metric([], value)
+
+ def add_metric(self, labels, value):
+ '''Add a metric to the metric family.
+
+ Args:
+ labels: A list of label values
+ value: A float
+ '''
+ self.samples.append((self.name, dict(zip(self._labelnames, labels)), value))
+
+
+class SummaryMetricFamily(Metric):
+ '''A single summary and its samples.
+
+ For use by custom collectors.
+ '''
+ def __init__(self, name, documentation, count_value=None, sum_value=None, labels=None):
+ Metric.__init__(self, name, documentation, 'summary')
+ if (sum_value is None) != (count_value is None):
+ raise ValueError('count_value and sum_value must be provided together.')
+ if labels is not None and count_value is not None:
+ raise ValueError('Can only specify at most one of value and labels.')
+ if labels is None:
+ labels = []
+ self._labelnames = labels
+ if count_value is not None:
+ self.add_metric([], count_value, sum_value)
+
+ def add_metric(self, labels, count_value, sum_value):
+ '''Add a metric to the metric family.
+
+ Args:
+ labels: A list of label values
+ count_value: The count value of the metric.
+ sum_value: The sum value of the metric.
+ '''
+ self.samples.append((self.name + '_count', dict(zip(self._labelnames, labels)), count_value))
+ self.samples.append((self.name + '_sum', dict(zip(self._labelnames, labels)), sum_value))
+
+
+class HistogramMetricFamily(Metric):
+ '''A single histogram and its samples.
+
+ For use by custom collectors.
+ '''
+ def __init__(self, name, documentation, buckets=None, sum_value=None, labels=None):
+ Metric.__init__(self, name, documentation, 'histogram')
+ if (sum_value is None) != (buckets is None):
+ raise ValueError('buckets and sum_value must be provided together.')
+ if labels is not None and buckets is not None:
+ raise ValueError('Can only specify at most one of buckets and labels.')
+ if labels is None:
+ labels = []
+ self._labelnames = labels
+ if buckets is not None:
+ self.add_metric([], buckets, sum_value)
+
+ def add_metric(self, labels, buckets, sum_value):
+ '''Add a metric to the metric family.
+
+ Args:
+ labels: A list of label values
+ buckets: A list of pairs of bucket names and values.
+ The buckets must be sorted, and +Inf present.
+ sum_value: The sum value of the metric.
+ '''
+ for bucket, value in buckets:
+ self.samples.append((self.name + '_bucket', dict(list(zip(self._labelnames, labels)) + [('le', bucket)]), value))
+ # +Inf is last and provides the count value.
+ self.samples.append((self.name + '_count', dict(zip(self._labelnames, labels)), buckets[-1][1]))
+ self.samples.append((self.name + '_sum', dict(zip(self._labelnames, labels)), sum_value))
+
+
+class _MutexValue(object):
+ '''A float protected by a mutex.'''
+
+ def __init__(self, name, labelnames, labelvalues):
+ self._value = 0.0
+ self._lock = Lock()
+
+ def inc(self, amount):
+ with self._lock:
+ self._value += amount
+
+ def set(self, value):
+ with self._lock:
+ self._value = value
+
+ def get(self):
+ with self._lock:
+ return self._value
+
+_ValueClass = _MutexValue
+
+
+class _LabelWrapper(object):
+ '''Handles labels for the wrapped metric.'''
+ def __init__(self, wrappedClass, name, labelnames, **kwargs):
+ self._wrappedClass = wrappedClass
+ self._type = wrappedClass._type
+ self._name = name
+ self._labelnames = labelnames
+ self._kwargs = kwargs
+ self._lock = Lock()
+ self._metrics = {}
+
+ for l in labelnames:
+ if l.startswith('__'):
+ raise ValueError('Invalid label metric name: ' + l)
+
+ def labels(self, *labelvalues):
+ '''Return the child for the given labelset.
+
+ Labels can be provided as a tuple or as a dict:
+ c = Counter('c', 'counter', ['l', 'm'])
+ # Set labels by position
+ c.labels('0', '1').inc()
+ # Set labels by name
+ c.labels({'l': '0', 'm': '1'}).inc()
+ '''
+ if len(labelvalues) == 1 and type(labelvalues[0]) == dict:
+ if sorted(labelvalues[0].keys()) != sorted(self._labelnames):
+ raise ValueError('Incorrect label names')
+ labelvalues = tuple([unicode(labelvalues[0][l]) for l in self._labelnames])
+ else:
+ if len(labelvalues) != len(self._labelnames):
+ raise ValueError('Incorrect label count')
+ labelvalues = tuple([unicode(l) for l in labelvalues])
+ with self._lock:
+ if labelvalues not in self._metrics:
+ self._metrics[labelvalues] = self._wrappedClass(self._name, self._labelnames, labelvalues, **self._kwargs)
+ return self._metrics[labelvalues]
+
+ def remove(self, *labelvalues):
+ '''Remove the given labelset from the metric.'''
+ if len(labelvalues) != len(self._labelnames):
+ raise ValueError('Incorrect label count')
+ labelvalues = tuple([unicode(l) for l in labelvalues])
+ with self._lock:
+ del self._metrics[labelvalues]
+
+ def _samples(self):
+ with self._lock:
+ metrics = self._metrics.copy()
+ for labels, metric in metrics.items():
+ series_labels = list(dict(zip(self._labelnames, labels)).items())
+ for suffix, sample_labels, value in metric._samples():
+ yield (suffix, dict(series_labels + list(sample_labels.items())), value)
+
+
+def _MetricWrapper(cls):
+ '''Provides common functionality for metrics.'''
+ def init(name, documentation, labelnames=(), namespace='', subsystem='', registry=REGISTRY, **kwargs):
+ full_name = ''
+ if namespace:
+ full_name += namespace + '_'
+ if subsystem:
+ full_name += subsystem + '_'
+ full_name += name
+
+ if labelnames:
+ labelnames = tuple(labelnames)
+ for l in labelnames:
+ if not _METRIC_LABEL_NAME_RE.match(l):
+ raise ValueError('Invalid label metric name: ' + l)
+ if _RESERVED_METRIC_LABEL_NAME_RE.match(l):
+ raise ValueError('Reserved label metric name: ' + l)
+ if l in cls._reserved_labelnames:
+ raise ValueError('Reserved label metric name: ' + l)
+ collector = _LabelWrapper(cls, name, labelnames, **kwargs)
+ else:
+ collector = cls(name, labelnames, (), **kwargs)
+
+ if not _METRIC_NAME_RE.match(full_name):
+ raise ValueError('Invalid metric name: ' + full_name)
+
+ def collect():
+ metric = Metric(full_name, documentation, cls._type)
+ for suffix, labels, value in collector._samples():
+ metric.add_sample(full_name + suffix, labels, value)
+ return [metric]
+ collector.collect = collect
+
+ if registry:
+ registry.register(collector)
+ return collector
+
+ return init
+
+
+@_MetricWrapper
+class Counter(object):
+ '''A Counter tracks counts of events or running totals.
+
+ Example use cases for Counters:
+ - Number of requests processed
+ - Number of items that were inserted into a queue
+ - Total amount of data that a system has processed
+
+ Counters can only go up (and be reset when the process restarts). If your use case can go down,
+ you should use a Gauge instead.
+
+ An example for a Counter:
+
+ from prometheus_client import Counter
+ c = Counter('my_failures_total', 'Description of counter')
+ c.inc() # Increment by 1
+ c.inc(1.6) # Increment by given value
+ '''
+ _type = 'counter'
+ _reserved_labelnames = []
+
+ def __init__(self, name, labelnames, labelvalues):
+ self._value = _ValueClass(name, labelnames, labelvalues)
+
+ def inc(self, amount=1):
+ '''Increment counter by the given amount.'''
+ if amount < 0:
+ raise ValueError('Counters can only be incremented by non-negative amounts.')
+ self._value.inc(amount)
+
+ def count_exceptions(self, exception=Exception):
+ '''Count exceptions in a block of code or function.
+
+ Can be used as a function decorator or context manager.
+ Increments the counter when an exception of the given
+ type is raised up out of the code.
+ '''
+
+ class ExceptionCounter(object):
+ def __init__(self, counter):
+ self._counter = counter
+
+ def __enter__(self):
+ pass
+
+ def __exit__(self, typ, value, traceback):
+ if isinstance(value, exception):
+ self._counter.inc()
+
+ def __call__(self, f):
+ @wraps(f)
+ def wrapped(*args, **kwargs):
+ with self:
+ return f(*args, **kwargs)
+ return wrapped
+
+ return ExceptionCounter(self)
+
+ def _samples(self):
+ return (('', {}, self._value.get()), )
+
+
+@_MetricWrapper
+class Gauge(object):
+ '''Gauge metric, to report instantaneous values.
+
+ Examples of Gauges include:
+ Inprogress requests
+ Number of items in a queue
+ Free memory
+ Total memory
+ Temperature
+
+ Gauges can go both up and down.
+
+ from prometheus_client import Gauge
+ g = Gauge('my_inprogress_requests', 'Description of gauge')
+ g.inc() # Increment by 1
+ g.dec(10) # Decrement by given value
+ g.set(4.2) # Set to a given value
+ '''
+ _type = 'gauge'
+ _reserved_labelnames = []
+
+ def __init__(self, name, labelnames, labelvalues):
+ self._value = _ValueClass(name, labelnames, labelvalues)
+
+ def inc(self, amount=1):
+ '''Increment gauge by the given amount.'''
+ self._value.inc(amount)
+
+ def dec(self, amount=1):
+ '''Decrement gauge by the given amount.'''
+ self._value.inc(-amount)
+
+ def set(self, value):
+ '''Set gauge to the given value.'''
+ self._value.set(float(value))
+
+ def set_to_current_time(self):
+ '''Set gauge to the current unixtime.'''
+ self.set(time.time())
+
+ def track_inprogress(self):
+ '''Track inprogress blocks of code or functions.
+
+ Can be used as a function decorator or context manager.
+ Increments the gauge when the code is entered,
+ and decrements when it is exited.
+ '''
+
+ class InprogressTracker(object):
+ def __init__(self, gauge):
+ self._gauge = gauge
+
+ def __enter__(self):
+ self._gauge.inc()
+
+ def __exit__(self, typ, value, traceback):
+ self._gauge.dec()
+
+ def __call__(self, f):
+ @wraps(f)
+ def wrapped(*args, **kwargs):
+ with self:
+ return f(*args, **kwargs)
+ return wrapped
+
+ return InprogressTracker(self)
+
+ def time(self):
+ '''Time a block of code or function, and set the duration in seconds.
+
+ Can be used as a function decorator or context manager.
+ '''
+
+ class Timer(object):
+ def __init__(self, gauge):
+ self._gauge = gauge
+
+ def __enter__(self):
+ self._start = time.time()
+
+ def __exit__(self, typ, value, traceback):
+ # Time can go backwards.
+ self._gauge.set(max(time.time() - self._start, 0))
+
+ def __call__(self, f):
+ @wraps(f)
+ def wrapped(*args, **kwargs):
+ with self:
+ return f(*args, **kwargs)
+ return wrapped
+
+ return Timer(self)
+
+ def set_function(self, f):
+ '''Call the provided function to return the Gauge value.
+
+ The function must return a float, and may be called from
+ multiple threads.
+ All other methods of the Gauge become NOOPs.
+ '''
+ def samples(self):
+ return (('', {}, float(f())), )
+ self._samples = types.MethodType(samples, self)
+
+ def _samples(self):
+ return (('', {}, self._value.get()), )
+
+
+@_MetricWrapper
+class Summary(object):
+ '''A Summary tracks the size and number of events.
+
+ Example use cases for Summaries:
+ - Response latency
+ - Request size
+
+ Example for a Summary:
+
+ from prometheus_client import Summary
+ s = Summary('request_size_bytes', 'Request size (bytes)')
+ s.observe(512) # Observe 512 (bytes)
+
+ Example for a Summary using time:
+ from prometheus_client import Summary
+ REQUEST_TIME = Summary('response_latency_seconds', 'Response latency (seconds)')
+
+ @REQUEST_TIME.time()
+ def create_response(request):
+ """A dummy function"""
+ time.sleep(1)
+
+ '''
+ _type = 'summary'
+ _reserved_labelnames = ['quantile']
+
+ def __init__(self, name, labelnames, labelvalues):
+ self._count = _ValueClass(name + '_count', labelnames, labelvalues)
+ self._sum = _ValueClass(name + '_sum', labelnames, labelvalues)
+
+ def observe(self, amount):
+ '''Observe the given amount.'''
+ self._count.inc(1)
+ self._sum.inc(amount)
+
+ def time(self):
+ '''Time a block of code or function, and observe the duration in seconds.
+
+ Can be used as a function decorator or context manager.
+ '''
+
+ class Timer(object):
+ def __init__(self, summary):
+ self._summary = summary
+
+ def __enter__(self):
+ self._start = time.time()
+
+ def __exit__(self, typ, value, traceback):
+ # Time can go backwards.
+ self._summary.observe(max(time.time() - self._start, 0))
+
+ def __call__(self, f):
+ @wraps(f)
+ def wrapped(*args, **kwargs):
+ with self:
+ return f(*args, **kwargs)
+ return wrapped
+
+ return Timer(self)
+
+ def _samples(self):
+ return (
+ ('_count', {}, self._count.get()),
+ ('_sum', {}, self._sum.get()))
+
+
+def _floatToGoString(d):
+ if d == _INF:
+ return '+Inf'
+ elif d == _MINUS_INF:
+ return '-Inf'
+ elif math.isnan(d):
+ return 'NaN'
+ else:
+ return repr(float(d))
+
+
+@_MetricWrapper
+class Histogram(object):
+ '''A Histogram tracks the size and number of events in buckets.
+
+ You can use Histograms for aggregatable calculation of quantiles.
+
+ Example use cases:
+ - Response latency
+ - Request size
+
+ Example for a Histogram:
+
+ from prometheus_client import Histogram
+ h = Histogram('request_size_bytes', 'Request size (bytes)')
+ h.observe(512) # Observe 512 (bytes)
+
+
+ Example for a Histogram using time:
+ from prometheus_client import Histogram
+ REQUEST_TIME = Histogram('response_latency_seconds', 'Response latency (seconds)')
+
+ @REQUEST_TIME.time()
+ def create_response(request):
+ """A dummy function"""
+ time.sleep(1)
+
+ The default buckets are intended to cover a typical web/rpc request from milliseconds to seconds.
+ They can be overridden by passing `buckets` keyword argument to `Histogram`.
+ '''
+ _type = 'histogram'
+ _reserved_labelnames = ['histogram']
+
+ def __init__(self, name, labelnames, labelvalues, buckets=(.005, .01, .025, .05, .075, .1, .25, .5, .75, 1.0, 2.5, 5.0, 7.5, 10.0, _INF)):
+ self._sum = _ValueClass(name + '_sum', labelnames, labelvalues)
+ buckets = [float(b) for b in buckets]
+ if buckets != sorted(buckets):
+ # This is probably an error on the part of the user,
+ # so raise rather than sorting for them.
+ raise ValueError('Buckets not in sorted order')
+ if buckets and buckets[-1] != _INF:
+ buckets.append(_INF)
+ if len(buckets) < 2:
+ raise ValueError('Must have at least two buckets')
+ self._upper_bounds = buckets
+ self._buckets = []
+ bucket_labelnames = labelnames + ('le',)
+ for b in buckets:
+ self._buckets.append(_ValueClass(name + '_bucket', bucket_labelnames, labelvalues + (_floatToGoString(b),)))
+
+ def observe(self, amount):
+ '''Observe the given amount.'''
+ self._sum.inc(amount)
+ for i, bound in enumerate(self._upper_bounds):
+ if amount <= bound:
+ self._buckets[i].inc(1)
+ break
+
+ def time(self):
+ '''Time a block of code or function, and observe the duration in seconds.
+
+ Can be used as a function decorator or context manager.
+ '''
+
+ class Timer(object):
+ def __init__(self, histogram):
+ self._histogram = histogram
+
+ def __enter__(self):
+ self._start = time.time()
+
+ def __exit__(self, typ, value, traceback):
+ # Time can go backwards.
+ self._histogram.observe(max(time.time() - self._start, 0))
+
+ def __call__(self, f):
+ @wraps(f)
+ def wrapped(*args, **kwargs):
+ with self:
+ return f(*args, **kwargs)
+ return wrapped
+
+ return Timer(self)
+
+ def _samples(self):
+ samples = []
+ acc = 0
+ for i, bound in enumerate(self._upper_bounds):
+ acc += self._buckets[i].get()
+ samples.append(('_bucket', {'le': _floatToGoString(bound)}, acc))
+ samples.append(('_count', {}, acc))
+ samples.append(('_sum', {}, self._sum.get()))
+ return tuple(samples)
+
diff --git a/prometheus_client/exposition.py b/prometheus_client/exposition.py
new file mode 100644
index 0000000..3c4795d
--- /dev/null
+++ b/prometheus_client/exposition.py
@@ -0,0 +1,131 @@
+#!/usr/bin/python
+
+from __future__ import unicode_literals
+
+import os
+import socket
+import time
+import threading
+from contextlib import closing
+
+from . import core
+try:
+ from BaseHTTPServer import BaseHTTPRequestHandler
+ from BaseHTTPServer import HTTPServer
+ from urllib2 import build_opener, Request, HTTPHandler
+ from urllib import quote_plus
+except ImportError:
+ # Python 3
+ unicode = str
+ from http.server import BaseHTTPRequestHandler
+ from http.server import HTTPServer
+ from urllib.request import build_opener, Request, HTTPHandler
+ from urllib.parse import quote_plus
+
+
+CONTENT_TYPE_LATEST = 'text/plain; version=0.0.4; charset=utf-8'
+'''Content type of the latest text format'''
+
+
+def generate_latest(registry=core.REGISTRY):
+ '''Returns the metrics from the registry in latest text format as a string.'''
+ output = []
+ for metric in registry.collect():
+ output.append('# HELP {0} {1}'.format(
+ metric.name, metric.documentation.replace('\\', r'\\').replace('\n', r'\n')))
+ output.append('\n# TYPE {0} {1}\n'.format(metric.name, metric.type))
+ for name, labels, value in metric.samples:
+ if labels:
+ labelstr = '{{{0}}}'.format(','.join(
+ ['{0}="{1}"'.format(
+ k, v.replace('\\', r'\\').replace('\n', r'\n').replace('"', r'\"'))
+ for k, v in sorted(labels.items())]))
+ else:
+ labelstr = ''
+ output.append('{0}{1} {2}\n'.format(name, labelstr, core._floatToGoString(value)))
+ return ''.join(output).encode('utf-8')
+
+
+class MetricsHandler(BaseHTTPRequestHandler):
+ def do_GET(self):
+ self.send_response(200)
+ self.send_header('Content-Type', CONTENT_TYPE_LATEST)
+ self.end_headers()
+ self.wfile.write(generate_latest(core.REGISTRY))
+
+ def log_message(self, format, *args):
+ return
+
+
+def start_http_server(port, addr=''):
+ """Starts a HTTP server for prometheus metrics as a daemon thread."""
+ class PrometheusMetricsServer(threading.Thread):
+ def run(self):
+ httpd = HTTPServer((addr, port), MetricsHandler)
+ httpd.serve_forever()
+ t = PrometheusMetricsServer()
+ t.daemon = True
+ t.start()
+
+
+def write_to_textfile(path, registry):
+ '''Write metrics to the given path.
+
+ This is intended for use with the Node exporter textfile collector.
+ The path must end in .prom for the textfile collector to process it.'''
+ tmppath = '%s.%s.%s' % (path, os.getpid(), threading.current_thread().ident)
+ with open(tmppath, 'wb') as f:
+ f.write(generate_latest(registry))
+ # rename(2) is atomic.
+ os.rename(tmppath, path)
+
+
+def push_to_gateway(gateway, job, registry, grouping_key=None, timeout=None):
+ '''Push metrics to the given pushgateway.
+
+ This overwrites all metrics with the same job and grouping_key.
+ This uses the PUT HTTP method.'''
+ _use_gateway('PUT', gateway, job, registry, grouping_key, timeout)
+
+
+def pushadd_to_gateway(gateway, job, registry, grouping_key=None, timeout=None):
+ '''PushAdd metrics to the given pushgateway.
+
+ This replaces metrics with the same name, job and grouping_key.
+ This uses the POST HTTP method.'''
+ _use_gateway('POST', gateway, job, registry, grouping_key, timeout)
+
+
+def delete_from_gateway(gateway, job, grouping_key=None, timeout=None):
+ '''Delete metrics from the given pushgateway.
+
+ This deletes metrics with the given job and grouping_key.
+ This uses the DELETE HTTP method.'''
+ _use_gateway('DELETE', gateway, job, None, grouping_key, timeout)
+
+
+def _use_gateway(method, gateway, job, registry, grouping_key, timeout):
+ url = 'http://{0}/metrics/job/{1}'.format(gateway, quote_plus(job))
+
+ data = b''
+ if method != 'DELETE':
+ data = generate_latest(registry)
+
+ if grouping_key is None:
+ grouping_key = {}
+ url = url + ''.join(['/{0}/{1}'.format(quote_plus(str(k)), quote_plus(str(v)))
+ for k, v in sorted(grouping_key.items())])
+
+ request = Request(url, data=data)
+ request.add_header('Content-Type', CONTENT_TYPE_LATEST)
+ request.get_method = lambda: method
+ resp = build_opener(HTTPHandler).open(request, timeout=timeout)
+ if resp.code >= 400:
+ raise IOError("error talking to pushgateway: {0} {1}".format(
+ resp.code, resp.msg))
+
+def instance_ip_grouping_key():
+ '''Grouping key with instance set to the IP Address of this host.'''
+ with closing(socket.socket(socket.AF_INET, socket.SOCK_DGRAM)) as s:
+ s.connect(('localhost', 0))
+ return {'instance': s.getsockname()[0]}
diff --git a/prometheus_client/parser.py b/prometheus_client/parser.py
new file mode 100644
index 0000000..4ca3d7c
--- /dev/null
+++ b/prometheus_client/parser.py
@@ -0,0 +1,224 @@
+#!/usr/bin/python
+
+from __future__ import unicode_literals
+
+try:
+ import StringIO
+except ImportError:
+ # Python 3
+ import io as StringIO
+
+from . import core
+
+
+def text_string_to_metric_families(text):
+ """Parse Prometheus text format from a string.
+
+ See text_fd_to_metric_families.
+ """
+ for metric_family in text_fd_to_metric_families(StringIO.StringIO(text)):
+ yield metric_family
+
+
+def _unescape_help(text):
+ result = []
+ slash = False
+
+ for char in text:
+ if slash:
+ if char == '\\':
+ result.append('\\')
+ elif char == 'n':
+ result.append('\n')
+ else:
+ result.append('\\' + char)
+ slash = False
+ else:
+ if char == '\\':
+ slash = True
+ else:
+ result.append(char)
+
+ if slash:
+ result.append('\\')
+
+ return ''.join(result)
+
+
+def _parse_sample(text):
+ name = []
+ labelname = []
+ labelvalue = []
+ value = []
+ labels = {}
+
+ state = 'name'
+
+ for char in text:
+ if state == 'name':
+ if char == '{':
+ state = 'startoflabelname'
+ elif char == ' ' or char == '\t':
+ state = 'endofname'
+ else:
+ name.append(char)
+ elif state == 'endofname':
+ if char == ' ' or char == '\t':
+ pass
+ elif char == '{':
+ state = 'startoflabelname'
+ else:
+ value.append(char)
+ state = 'value'
+ elif state == 'startoflabelname':
+ if char == ' ' or char == '\t':
+ pass
+ elif char == '}':
+ state = 'endoflabels'
+ else:
+ state = 'labelname'
+ labelname.append(char)
+ elif state == 'labelname':
+ if char == '=':
+ state = 'labelvaluequote'
+ elif char == ' ' or char == '\t':
+ state = 'labelvalueequals'
+ else:
+ labelname.append(char)
+ elif state == 'labelvalueequals':
+ if char == '=':
+ state = 'labelvaluequote'
+ elif char == ' ' or char == '\t':
+ pass
+ else:
+ raise ValueError("Invalid line: " + text)
+ elif state == 'labelvaluequote':
+ if char == '"':
+ state = 'labelvalue'
+ elif char == ' ' or char == '\t':
+ pass
+ else:
+ raise ValueError("Invalid line: " + text)
+ elif state == 'labelvalue':
+ if char == '\\':
+ state = 'labelvalueslash'
+ elif char == '"':
+ labels[''.join(labelname)] = ''.join(labelvalue)
+ labelname = []
+ labelvalue = []
+ state = 'nextlabel'
+ else:
+ labelvalue.append(char)
+ elif state == 'labelvalueslash':
+ state = 'labelvalue'
+ if char == '\\':
+ labelvalue.append('\\')
+ elif char == 'n':
+ labelvalue.append('\n')
+ elif char == '"':
+ labelvalue.append('"')
+ else:
+ labelvalue.append('\\' + char)
+ elif state == 'nextlabel':
+ if char == ',':
+ state = 'labelname'
+ elif char == '}':
+ state = 'endoflabels'
+ elif char == ' ' or char == '\t':
+ pass
+ else:
+ raise ValueError("Invalid line: " + text)
+ elif state == 'endoflabels':
+ if char == ' ' or char == '\t':
+ pass
+ else:
+ value.append(char)
+ state = 'value'
+ elif state == 'value':
+ if char == ' ' or char == '\t':
+ # Timestamps are not supported, halt
+ break
+ else:
+ value.append(char)
+ return (''.join(name), labels, float(''.join(value)))
+
+
+def text_fd_to_metric_families(fd):
+ """Parse Prometheus text format from a file descriptor.
+
+ This is a laxer parser than the main Go parser,
+ so successful parsing does not imply that the parsed
+ text meets the specification.
+
+ Yields core.Metric's.
+ """
+ name = ''
+ documentation = ''
+ typ = 'untyped'
+ samples = []
+ allowed_names = []
+
+ def build_metric(name, documentation, typ, samples):
+ metric = core.Metric(name, documentation, typ)
+ metric.samples = samples
+ return metric
+
+ for line in fd:
+ line = line.strip()
+
+ if line.startswith('#'):
+ parts = line.split(None, 3)
+ if len(parts) < 2:
+ continue
+ if parts[1] == 'HELP':
+ if parts[2] != name:
+ if name != '':
+ yield build_metric(name, documentation, typ, samples)
+ # New metric
+ name = parts[2]
+ typ = 'untyped'
+ samples = []
+ allowed_names = [parts[2]]
+ if len(parts) == 4:
+ documentation = _unescape_help(parts[3])
+ else:
+ documentation = ''
+ elif parts[1] == 'TYPE':
+ if parts[2] != name:
+ if name != '':
+ yield build_metric(name, documentation, typ, samples)
+ # New metric
+ name = parts[2]
+ documentation = ''
+ samples = []
+ typ = parts[3]
+ allowed_names = {
+ 'counter': [''],
+ 'gauge': [''],
+ 'summary': ['_count', '_sum', ''],
+ 'histogram': ['_count', '_sum', '_bucket'],
+ }.get(typ, [parts[2]])
+ allowed_names = [name + n for n in allowed_names]
+ else:
+ # Ignore other comment tokens
+ pass
+ elif line == '':
+ # Ignore blank lines
+ pass
+ else:
+ sample = _parse_sample(line)
+ if sample[0] not in allowed_names:
+ if name != '':
+ yield build_metric(name, documentation, typ, samples)
+ # New metric, yield immediately as untyped singleton
+ name = ''
+ documentation = ''
+ typ = 'untyped'
+ samples = []
+ allowed_names = []
+ yield build_metric(sample[0], documentation, typ, [sample])
+ else:
+ samples.append(sample)
+
+ if name != '':
+ yield build_metric(name, documentation, typ, samples)
diff --git a/prometheus_client/process_collector.py b/prometheus_client/process_collector.py
new file mode 100644
index 0000000..5c906c9
--- /dev/null
+++ b/prometheus_client/process_collector.py
@@ -0,0 +1,95 @@
+#!/usr/bin/python
+
+from __future__ import unicode_literals
+
+import os
+import time
+import threading
+
+from . import core
+try:
+ import resource
+ _PAGESIZE = resource.getpagesize()
+except ImportError:
+ # Not Unix
+ _PAGESIZE = 4096
+
+
+class ProcessCollector(object):
+ """Collector for Standard Exports such as cpu and memory."""
+ def __init__(self, namespace='', pid=lambda: 'self', proc='/proc', registry=core.REGISTRY):
+ self._namespace = namespace
+ self._pid = pid
+ self._proc = proc
+ if namespace:
+ self._prefix = namespace + '_process_'
+ else:
+ self._prefix = 'process_'
+ self._ticks = 100.0
+ try:
+ self._ticks = os.sysconf('SC_CLK_TCK')
+ except (ValueError, TypeError, AttributeError):
+ pass
+
+ # This is used to test if we can access /proc.
+ self._btime = 0
+ try:
+ self._btime = self._boot_time()
+ except IOError:
+ pass
+ if registry:
+ registry.register(self)
+
+ def _boot_time(self):
+ with open(os.path.join(self._proc, 'stat')) as stat:
+ for line in stat:
+ if line.startswith('btime '):
+ return float(line.split()[1])
+
+ def collect(self):
+ if not self._btime:
+ return []
+
+ try:
+ pid = os.path.join(self._proc, str(self._pid()).strip())
+ except:
+ # File likely didn't exist, fail silently.
+ raise
+ return []
+
+ result = []
+ try:
+ with open(os.path.join(pid, 'stat')) as stat:
+ parts = (stat.read().split(')')[-1].split())
+ vmem = core.GaugeMetricFamily(self._prefix + 'virtual_memory_bytes',
+ 'Virtual memory size in bytes', value=float(parts[20]))
+ rss = core.GaugeMetricFamily(self._prefix + 'resident_memory_bytes', 'Resident memory size in bytes', value=float(parts[21]) * _PAGESIZE)
+ start_time_secs = float(parts[19]) / self._ticks
+ start_time = core.GaugeMetricFamily(self._prefix + 'start_time_seconds',
+ 'Start time of the process since unix epoch in seconds.', value=start_time_secs + self._btime)
+ utime = float(parts[11]) / self._ticks
+ stime = float(parts[12]) / self._ticks
+ cpu = core.CounterMetricFamily(self._prefix + 'cpu_seconds_total',
+ 'Total user and system CPU time spent in seconds.', value=utime + stime)
+ result.extend([vmem, rss, start_time, cpu])
+ except IOError:
+ pass
+
+ try:
+ with open(os.path.join(pid, 'limits')) as limits:
+ for line in limits:
+ if line.startswith('Max open file'):
+ max_fds = core.GaugeMetricFamily(self._prefix + 'max_fds',
+ 'Maximum number of open file descriptors.', value=float(line.split()[3]))
+ break
+ open_fds = core.GaugeMetricFamily(self._prefix + 'open_fds',
+ 'Number of open file descriptors.', len(os.listdir(os.path.join(pid, 'fd'))))
+ result.extend([open_fds, max_fds])
+ except IOError:
+ pass
+
+ return result
+
+
+PROCESS_COLLECTOR = ProcessCollector()
+"""Default ProcessCollector in default Registry REGISTRY."""
diff --git a/setup.py b/setup.py
new file mode 100644
index 0000000..14f3489
--- /dev/null
+++ b/setup.py
@@ -0,0 +1,24 @@
+import os
+from setuptools import setup
+
+setup(
+ name = "prometheus_client",
+ version = "0.0.13",
+ author = "Brian Brazil",
+ author_email = "brian.brazil@robustperception.io",
+ description = ("Python client for the Prometheus monitoring system."),
+ long_description = ("See https://github.com/prometheus/client_python/blob/master/README.md for documentation."),
+ license = "Apache Software License 2.0",
+ keywords = "prometheus monitoring instrumentation client",
+ url = "https://github.com/prometheus/client_python",
+ packages=['prometheus_client', 'prometheus_client.bridge'],
+ test_suite="tests",
+ classifiers=[
+ "Development Status :: 4 - Beta",
+ "Intended Audience :: Developers",
+ "Intended Audience :: Information Technology",
+ "Intended Audience :: System Administrators",
+ "Topic :: System :: Monitoring",
+ "License :: OSI Approved :: Apache Software License",
+ ],
+)
diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/__init__.py
diff --git a/tests/graphite_bridge.py b/tests/graphite_bridge.py
new file mode 100644
index 0000000..0ac2420
--- /dev/null
+++ b/tests/graphite_bridge.py
@@ -0,0 +1,69 @@
+import unittest
+import threading
+try:
+ import SocketServer
+except ImportError:
+ import socketserver as SocketServer
+
+from prometheus_client import Counter, CollectorRegistry
+from prometheus_client.bridge.graphite import GraphiteBridge
+
+class FakeTime(object):
+ def time(self):
+ return 1434898897.5
+
+class TestGraphiteBridge(unittest.TestCase):
+ def setUp(self):
+ self.registry = CollectorRegistry()
+
+ self.data = ''
+ class TCPHandler(SocketServer.BaseRequestHandler):
+ def handle(s):
+ self.data = s.request.recv(1024)
+ server = SocketServer.TCPServer(('', 0), TCPHandler)
+ class ServingThread(threading.Thread):
+ def run(self):
+ server.handle_request()
+ server.socket.close()
+ self.t = ServingThread()
+ self.t.start()
+
+ # Explicitly use localhost as the target host, since connecting to 0.0.0.0 fails on Windows
+ address = ('localhost', server.server_address[1])
+ self.gb = GraphiteBridge(address, self.registry, _time=FakeTime())
+
+ def test_nolabels(self):
+ counter = Counter('c', 'help', registry=self.registry)
+ counter.inc()
+
+ self.gb.push()
+ self.t.join()
+
+ self.assertEqual(b'c 1.0 1434898897\n', self.data)
+
+ def test_labels(self):
+ labels = Counter('labels', 'help', ['a', 'b'], registry=self.registry)
+ labels.labels('c', 'd').inc()
+
+ self.gb.push()
+ self.t.join()
+
+ self.assertEqual(b'labels.a.c.b.d 1.0 1434898897\n', self.data)
+
+ def test_prefix(self):
+ labels = Counter('labels', 'help', ['a', 'b'], registry=self.registry)
+ labels.labels('c', 'd').inc()
+
+ self.gb.push(prefix = 'pre.fix')
+ self.t.join()
+
+ self.assertEqual(b'pre.fix.labels.a.c.b.d 1.0 1434898897\n', self.data)
+
+ def test_sanitizing(self):
+ labels = Counter('labels', 'help', ['a'], registry=self.registry)
+ labels.labels('c.:8').inc()
+
+ self.gb.push()
+ self.t.join()
+
+ self.assertEqual(b'labels.a.c__8 1.0 1434898897\n', self.data)
diff --git a/tests/proc/26231/fd/0 b/tests/proc/26231/fd/0
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/proc/26231/fd/0
diff --git a/tests/proc/26231/fd/1 b/tests/proc/26231/fd/1
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/proc/26231/fd/1
diff --git a/tests/proc/26231/fd/2 b/tests/proc/26231/fd/2
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/proc/26231/fd/2
diff --git a/tests/proc/26231/fd/3 b/tests/proc/26231/fd/3
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/proc/26231/fd/3
diff --git a/tests/proc/26231/fd/4 b/tests/proc/26231/fd/4
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tests/proc/26231/fd/4
diff --git a/tests/proc/26231/limits b/tests/proc/26231/limits
new file mode 100644
index 0000000..d477b9b
--- /dev/null
+++ b/tests/proc/26231/limits
@@ -0,0 +1,15 @@
+Limit Soft Limit Hard Limit Units
+Max cpu time unlimited unlimited seconds
+Max file size unlimited unlimited bytes
+Max data size unlimited unlimited bytes
+Max stack size 8388608 unlimited bytes
+Max core file size 0 unlimited bytes
+Max resident set unlimited unlimited bytes
+Max processes 62898 62898 processes
+Max open files 2048 4096 files
+Max locked memory 65536 65536 bytes
+Max address space unlimited unlimited bytes
+Max file locks unlimited unlimited locks
+Max pending signals 62898 62898 signals
+Max msgqueue size 819200 819200 bytes
+Max nice priority 0 0
diff --git a/tests/proc/26231/stat b/tests/proc/26231/stat
new file mode 100644
index 0000000..438aaa9
--- /dev/null
+++ b/tests/proc/26231/stat
@@ -0,0 +1 @@
+26231 (vim) R 5392 7446 5392 34835 7446 4218880 32533 309516 26 82 1677 44 158 99 20 0 1 0 82375 56274944 1981 18446744073709551615 4194304 6294284 140736914091744 140736914087944 139965136429984 0 0 12288 1870679807 0 0 0 17 0 0 0 31 0 0 8391624 8481048 16420864 140736914093252 140736914093279 140736914093279 140736914096107 0
diff --git a/tests/proc/584/stat b/tests/proc/584/stat
new file mode 100644
index 0000000..af0d385
--- /dev/null
+++ b/tests/proc/584/stat
@@ -0,0 +1 @@
+1020 ((a b ) ( c d) ) R 28378 1020 28378 34842 1020 4218880 286 0 0 0 0 0 0 0 20 0 1 0 10839175 10395648 155 18446744073709551615 4194304 4238788 140736466511168 140736466511168 140609271124624 0 0 0 0 0 0 0 17 5 0 0 0 0 0 6336016 6337300 25579520 140736466515030 140736466515061 140736466515061 140736466518002 0
diff --git a/tests/proc/stat b/tests/proc/stat
new file mode 100644
index 0000000..32fd6d2
--- /dev/null
+++ b/tests/proc/stat
@@ -0,0 +1,16 @@
+cpu 301854 612 111922 8979004 3552 2 3944 0 0 0
+cpu0 44490 19 21045 1087069 220 1 3410 0 0 0
+cpu1 47869 23 16474 1110787 591 0 46 0 0 0
+cpu2 46504 36 15916 1112321 441 0 326 0 0 0
+cpu3 47054 102 15683 1113230 533 0 60 0 0 0
+cpu4 28413 25 10776 1140321 217 0 8 0 0 0
+cpu5 29271 101 11586 1136270 672 0 30 0 0 0
+cpu6 29152 36 10276 1139721 319 0 29 0 0 0
+cpu7 29098 268 10164 1139282 555 0 31 0 0 0
+intr 8885917 17 0 0 0 0 0 0 0 1 79281 0 0 0 0 0 0 0 231237 0 0 0 0 250586 103 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 223424 190745 13 906 1283803 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0intr 8885917 17 0 0 0 0 0 0 0 1 79281 0 0 0 0 00 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
+ctxt 38014093
+btime 1418183276
+processes 26442
+procs_running 2
+procs_blocked 0
+softirq 5057579 250191 1481983 1647 211099 186066 0 1783454 622196 12499 508444
diff --git a/tests/test_client.py b/tests/test_client.py
new file mode 100644
index 0000000..83b15e8
--- /dev/null
+++ b/tests/test_client.py
@@ -0,0 +1,373 @@
+from __future__ import unicode_literals
+import os
+import threading
+import time
+import unittest
+
+from prometheus_client.core import *
+
+class TestCounter(unittest.TestCase):
+ def setUp(self):
+ self.registry = CollectorRegistry()
+ self.counter = Counter('c', 'help', registry=self.registry)
+
+ def test_increment(self):
+ self.assertEqual(0, self.registry.get_sample_value('c'))
+ self.counter.inc()
+ self.assertEqual(1, self.registry.get_sample_value('c'))
+ self.counter.inc(7)
+ self.assertEqual(8, self.registry.get_sample_value('c'))
+
+ def test_negative_increment_raises(self):
+ self.assertRaises(ValueError, self.counter.inc, -1)
+
+ def test_function_decorator(self):
+ @self.counter.count_exceptions(ValueError)
+ def f(r):
+ if r:
+ raise ValueError
+ else:
+ raise TypeError
+
+ try:
+ f(False)
+ except TypeError:
+ pass
+ self.assertEqual(0, self.registry.get_sample_value('c'))
+
+ try:
+ f(True)
+ except ValueError:
+ raised = True
+ self.assertEqual(1, self.registry.get_sample_value('c'))
+
+ def test_block_decorator(self):
+ with self.counter.count_exceptions():
+ pass
+ self.assertEqual(0, self.registry.get_sample_value('c'))
+
+ raised = False
+ try:
+ with self.counter.count_exceptions():
+ raise ValueError
+ except:
+ raised = True
+ self.assertTrue(raised)
+ self.assertEqual(1, self.registry.get_sample_value('c'))
+
+
+class TestGauge(unittest.TestCase):
+ def setUp(self):
+ self.registry = CollectorRegistry()
+ self.gauge = Gauge('g', 'help', registry=self.registry)
+
+ def test_gauge(self):
+ self.assertEqual(0, self.registry.get_sample_value('g'))
+ self.gauge.inc()
+ self.assertEqual(1, self.registry.get_sample_value('g'))
+ self.gauge.dec(3)
+ self.assertEqual(-2, self.registry.get_sample_value('g'))
+ self.gauge.set(9)
+ self.assertEqual(9, self.registry.get_sample_value('g'))
+
+ def test_function_decorator(self):
+ self.assertEqual(0, self.registry.get_sample_value('g'))
+
+ @self.gauge.track_inprogress()
+ def f():
+ self.assertEqual(1, self.registry.get_sample_value('g'))
+
+ f()
+ self.assertEqual(0, self.registry.get_sample_value('g'))
+
+ def test_block_decorator(self):
+ self.assertEqual(0, self.registry.get_sample_value('g'))
+ with self.gauge.track_inprogress():
+ self.assertEqual(1, self.registry.get_sample_value('g'))
+ self.assertEqual(0, self.registry.get_sample_value('g'))
+
+ def test_gauge_function(self):
+ x = {}
+ self.gauge.set_function(lambda: len(x))
+ self.assertEqual(0, self.registry.get_sample_value('g'))
+ self.gauge.inc()
+ self.assertEqual(0, self.registry.get_sample_value('g'))
+ x['a'] = None
+ self.assertEqual(1, self.registry.get_sample_value('g'))
+
+ def test_function_decorator(self):
+ self.assertEqual(0, self.registry.get_sample_value('g'))
+
+ @self.gauge.time()
+ def f():
+ time.sleep(.001)
+
+ f()
+ self.assertNotEqual(0, self.registry.get_sample_value('g'))
+
+ def test_block_decorator(self):
+ self.assertEqual(0, self.registry.get_sample_value('g'))
+ with self.gauge.time():
+ time.sleep(.001)
+ self.assertNotEqual(0, self.registry.get_sample_value('g'))
+
+
+class TestSummary(unittest.TestCase):
+ def setUp(self):
+ self.registry = CollectorRegistry()
+ self.summary = Summary('s', 'help', registry=self.registry)
+
+ def test_summary(self):
+ self.assertEqual(0, self.registry.get_sample_value('s_count'))
+ self.assertEqual(0, self.registry.get_sample_value('s_sum'))
+ self.summary.observe(10)
+ self.assertEqual(1, self.registry.get_sample_value('s_count'))
+ self.assertEqual(10, self.registry.get_sample_value('s_sum'))
+
+ def test_function_decorator(self):
+ self.assertEqual(0, self.registry.get_sample_value('s_count'))
+
+ @self.summary.time()
+ def f():
+ pass
+
+ f()
+ self.assertEqual(1, self.registry.get_sample_value('s_count'))
+
+ def test_block_decorator(self):
+ self.assertEqual(0, self.registry.get_sample_value('s_count'))
+ with self.summary.time():
+ pass
+ self.assertEqual(1, self.registry.get_sample_value('s_count'))
+
+
+class TestHistogram(unittest.TestCase):
+ def setUp(self):
+ self.registry = CollectorRegistry()
+ self.histogram = Histogram('h', 'help', registry=self.registry)
+ self.labels = Histogram('hl', 'help', ['l'], registry=self.registry)
+
+ def test_histogram(self):
+ self.assertEqual(0, self.registry.get_sample_value('h_bucket', {'le': '1.0'}))
+ self.assertEqual(0, self.registry.get_sample_value('h_bucket', {'le': '2.5'}))
+ self.assertEqual(0, self.registry.get_sample_value('h_bucket', {'le': '5.0'}))
+ self.assertEqual(0, self.registry.get_sample_value('h_bucket', {'le': '+Inf'}))
+ self.assertEqual(0, self.registry.get_sample_value('h_count'))
+ self.assertEqual(0, self.registry.get_sample_value('h_sum'))
+
+ self.histogram.observe(2)
+ self.assertEqual(0, self.registry.get_sample_value('h_bucket', {'le': '1.0'}))
+ self.assertEqual(1, self.registry.get_sample_value('h_bucket', {'le': '2.5'}))
+ self.assertEqual(1, self.registry.get_sample_value('h_bucket', {'le': '5.0'}))
+ self.assertEqual(1, self.registry.get_sample_value('h_bucket', {'le': '+Inf'}))
+ self.assertEqual(1, self.registry.get_sample_value('h_count'))
+ self.assertEqual(2, self.registry.get_sample_value('h_sum'))
+
+ self.histogram.observe(2.5)
+ self.assertEqual(0, self.registry.get_sample_value('h_bucket', {'le': '1.0'}))
+ self.assertEqual(2, self.registry.get_sample_value('h_bucket', {'le': '2.5'}))
+ self.assertEqual(2, self.registry.get_sample_value('h_bucket', {'le': '5.0'}))
+ self.assertEqual(2, self.registry.get_sample_value('h_bucket', {'le': '+Inf'}))
+ self.assertEqual(2, self.registry.get_sample_value('h_count'))
+ self.assertEqual(4.5, self.registry.get_sample_value('h_sum'))
+
+ self.histogram.observe(float("inf"))
+ self.assertEqual(0, self.registry.get_sample_value('h_bucket', {'le': '1.0'}))
+ self.assertEqual(2, self.registry.get_sample_value('h_bucket', {'le': '2.5'}))
+ self.assertEqual(2, self.registry.get_sample_value('h_bucket', {'le': '5.0'}))
+ self.assertEqual(3, self.registry.get_sample_value('h_bucket', {'le': '+Inf'}))
+ self.assertEqual(3, self.registry.get_sample_value('h_count'))
+ self.assertEqual(float("inf"), self.registry.get_sample_value('h_sum'))
+
+ def test_setting_buckets(self):
+ h = Histogram('h', 'help', registry=None, buckets=[0, 1, 2])
+ self.assertEqual([0.0, 1.0, 2.0, float("inf")], h._upper_bounds)
+
+ h = Histogram('h', 'help', registry=None, buckets=[0, 1, 2, float("inf")])
+ self.assertEqual([0.0, 1.0, 2.0, float("inf")], h._upper_bounds)
+
+ self.assertRaises(ValueError, Histogram, 'h', 'help', registry=None, buckets=[])
+ self.assertRaises(ValueError, Histogram, 'h', 'help', registry=None, buckets=[float("inf")])
+ self.assertRaises(ValueError, Histogram, 'h', 'help', registry=None, buckets=[3, 1])
+
+ def test_labels(self):
+ self.labels.labels('a').observe(2)
+ self.assertEqual(0, self.registry.get_sample_value('hl_bucket', {'le': '1.0', 'l': 'a'}))
+ self.assertEqual(1, self.registry.get_sample_value('hl_bucket', {'le': '2.5', 'l': 'a'}))
+ self.assertEqual(1, self.registry.get_sample_value('hl_bucket', {'le': '5.0', 'l': 'a'}))
+ self.assertEqual(1, self.registry.get_sample_value('hl_bucket', {'le': '+Inf', 'l': 'a'}))
+ self.assertEqual(1, self.registry.get_sample_value('hl_count', {'l': 'a'}))
+ self.assertEqual(2, self.registry.get_sample_value('hl_sum', {'l': 'a'}))
+
+ def test_function_decorator(self):
+ self.assertEqual(0, self.registry.get_sample_value('h_count'))
+ self.assertEqual(0, self.registry.get_sample_value('h_bucket', {'le': '+Inf'}))
+
+ @self.histogram.time()
+ def f():
+ pass
+
+ f()
+ self.assertEqual(1, self.registry.get_sample_value('h_count'))
+ self.assertEqual(1, self.registry.get_sample_value('h_bucket', {'le': '+Inf'}))
+
+ def test_block_decorator(self):
+ self.assertEqual(0, self.registry.get_sample_value('h_count'))
+ self.assertEqual(0, self.registry.get_sample_value('h_bucket', {'le': '+Inf'}))
+ with self.histogram.time():
+ pass
+ self.assertEqual(1, self.registry.get_sample_value('h_count'))
+ self.assertEqual(1, self.registry.get_sample_value('h_bucket', {'le': '+Inf'}))
+
+
+class TestMetricWrapper(unittest.TestCase):
+ def setUp(self):
+ self.registry = CollectorRegistry()
+ self.counter = Counter('c', 'help', labelnames=['l'], registry=self.registry)
+ self.two_labels = Counter('two', 'help', labelnames=['a', 'b'], registry=self.registry)
+
+ def test_child(self):
+ self.counter.labels('x').inc()
+ self.assertEqual(1, self.registry.get_sample_value('c', {'l': 'x'}))
+ self.two_labels.labels('x', 'y').inc(2)
+ self.assertEqual(2, self.registry.get_sample_value('two', {'a': 'x', 'b': 'y'}))
+
+ def test_remove(self):
+ self.counter.labels('x').inc()
+ self.counter.labels('y').inc(2)
+ self.assertEqual(1, self.registry.get_sample_value('c', {'l': 'x'}))
+ self.assertEqual(2, self.registry.get_sample_value('c', {'l': 'y'}))
+ self.counter.remove('x')
+ self.assertEqual(None, self.registry.get_sample_value('c', {'l': 'x'}))
+ self.assertEqual(2, self.registry.get_sample_value('c', {'l': 'y'}))
+
+ def test_incorrect_label_count_raises(self):
+ self.assertRaises(ValueError, self.counter.labels)
+ self.assertRaises(ValueError, self.counter.labels, 'a', 'b')
+ self.assertRaises(ValueError, self.counter.remove)
+ self.assertRaises(ValueError, self.counter.remove, 'a', 'b')
+
+ def test_labels_coerced_to_string(self):
+ self.counter.labels(None).inc()
+ self.counter.labels({'l': None}).inc()
+ self.assertEqual(2, self.registry.get_sample_value('c', {'l': 'None'}))
+
+ self.counter.remove(None)
+ self.assertEqual(None, self.registry.get_sample_value('c', {'l': 'None'}))
+
+ def test_non_string_labels_raises(self):
+ class Test(object):
+ __str__ = None
+ self.assertRaises(TypeError, self.counter.labels, Test())
+ self.assertRaises(TypeError, self.counter.labels, {'l': Test()})
+
+ def test_namespace_subsystem_concatenated(self):
+ c = Counter('c', 'help', namespace='a', subsystem='b', registry=self.registry)
+ c.inc()
+ self.assertEqual(1, self.registry.get_sample_value('a_b_c'))
+
+ def test_labels_by_dict(self):
+ self.counter.labels({'l': 'x'}).inc()
+ self.assertEqual(1, self.registry.get_sample_value('c', {'l': 'x'}))
+ self.assertRaises(ValueError, self.counter.labels, {'l': 'x', 'm': 'y'})
+ self.assertRaises(ValueError, self.counter.labels, {'m': 'y'})
+ self.assertRaises(ValueError, self.counter.labels, {})
+ self.two_labels.labels({'a': 'x', 'b': 'y'}).inc()
+ self.assertEqual(1, self.registry.get_sample_value('two', {'a': 'x', 'b': 'y'}))
+ self.assertRaises(ValueError, self.two_labels.labels, {'a': 'x', 'b': 'y', 'c': 'z'})
+ self.assertRaises(ValueError, self.two_labels.labels, {'a': 'x', 'c': 'z'})
+ self.assertRaises(ValueError, self.two_labels.labels, {'b': 'y', 'c': 'z'})
+ self.assertRaises(ValueError, self.two_labels.labels, {'c': 'z'})
+ self.assertRaises(ValueError, self.two_labels.labels, {})
+
+ def test_invalid_names_raise(self):
+ self.assertRaises(ValueError, Counter, '', 'help')
+ self.assertRaises(ValueError, Counter, '^', 'help')
+ self.assertRaises(ValueError, Counter, '', 'help', namespace='&')
+ self.assertRaises(ValueError, Counter, '', 'help', subsystem='(')
+ self.assertRaises(ValueError, Counter, 'c', '', labelnames=['^'])
+ self.assertRaises(ValueError, Counter, 'c', '', labelnames=['__reserved'])
+ self.assertRaises(ValueError, Summary, 'c', '', labelnames=['quantile'])
+
+
+class TestMetricFamilies(unittest.TestCase):
+ def setUp(self):
+ self.registry = CollectorRegistry()
+
+ def custom_collector(self, metric_family):
+ class CustomCollector(object):
+ def collect(self):
+ return [metric_family]
+ self.registry.register(CustomCollector())
+
+ def test_counter(self):
+ self.custom_collector(CounterMetricFamily('c', 'help', value=1))
+ self.assertEqual(1, self.registry.get_sample_value('c', {}))
+
+ def test_counter_labels(self):
+ cmf = CounterMetricFamily('c', 'help', labels=['a', 'c'])
+ cmf.add_metric(['b', 'd'], 2)
+ self.custom_collector(cmf)
+ self.assertEqual(2, self.registry.get_sample_value('c', {'a': 'b', 'c': 'd'}))
+
+ def test_gauge(self):
+ self.custom_collector(GaugeMetricFamily('g', 'help', value=1))
+ self.assertEqual(1, self.registry.get_sample_value('g', {}))
+
+ def test_gauge_labels(self):
+ cmf = GaugeMetricFamily('g', 'help', labels=['a'])
+ cmf.add_metric(['b'], 2)
+ self.custom_collector(cmf)
+ self.assertEqual(2, self.registry.get_sample_value('g', {'a':'b'}))
+
+ def test_summary(self):
+ self.custom_collector(SummaryMetricFamily('s', 'help', count_value=1, sum_value=2))
+ self.assertEqual(1, self.registry.get_sample_value('s_count', {}))
+ self.assertEqual(2, self.registry.get_sample_value('s_sum', {}))
+
+ def test_summary_labels(self):
+ cmf = SummaryMetricFamily('s', 'help', labels=['a'])
+ cmf.add_metric(['b'], count_value=1, sum_value=2)
+ self.custom_collector(cmf)
+ self.assertEqual(1, self.registry.get_sample_value('s_count', {'a': 'b'}))
+ self.assertEqual(2, self.registry.get_sample_value('s_sum', {'a': 'b'}))
+
+ def test_histogram(self):
+ self.custom_collector(HistogramMetricFamily('h', 'help', buckets=[('0', 1), ('+Inf', 2)], sum_value=3))
+ self.assertEqual(1, self.registry.get_sample_value('h_bucket', {'le': '0'}))
+ self.assertEqual(2, self.registry.get_sample_value('h_bucket', {'le': '+Inf'}))
+ self.assertEqual(2, self.registry.get_sample_value('h_count', {}))
+ self.assertEqual(3, self.registry.get_sample_value('h_sum', {}))
+
+ def test_histogram_labels(self):
+ cmf = HistogramMetricFamily('h', 'help', labels=['a'])
+ cmf.add_metric(['b'], buckets=[('0', 1), ('+Inf', 2)], sum_value=3)
+ self.custom_collector(cmf)
+ self.assertEqual(1, self.registry.get_sample_value('h_bucket', {'a': 'b', 'le': '0'}))
+ self.assertEqual(2, self.registry.get_sample_value('h_bucket', {'a': 'b', 'le': '+Inf'}))
+ self.assertEqual(2, self.registry.get_sample_value('h_count', {'a': 'b'}))
+ self.assertEqual(3, self.registry.get_sample_value('h_sum', {'a': 'b'}))
+
+ def test_bad_constructors(self):
+ self.assertRaises(ValueError, CounterMetricFamily, 'c', 'help', value=1, labels=[])
+ self.assertRaises(ValueError, CounterMetricFamily, 'c', 'help', value=1, labels=['a'])
+
+ self.assertRaises(ValueError, GaugeMetricFamily, 'g', 'help', value=1, labels=[])
+ self.assertRaises(ValueError, GaugeMetricFamily, 'g', 'help', value=1, labels=['a'])
+
+ self.assertRaises(ValueError, SummaryMetricFamily, 's', 'help', sum_value=1)
+ self.assertRaises(ValueError, SummaryMetricFamily, 's', 'help', count_value=1)
+ self.assertRaises(ValueError, SummaryMetricFamily, 's', 'help', count_value=1, labels=['a'])
+ self.assertRaises(ValueError, SummaryMetricFamily, 's', 'help', sum_value=1, labels=['a'])
+ self.assertRaises(ValueError, SummaryMetricFamily, 's', 'help', count_value=1, sum_value=1, labels=['a'])
+
+ self.assertRaises(ValueError, HistogramMetricFamily, 'h', 'help', sum_value=1)
+ self.assertRaises(ValueError, HistogramMetricFamily, 'h', 'help', buckets={})
+ self.assertRaises(ValueError, HistogramMetricFamily, 'h', 'help', sum_value=1, labels=['a'])
+ self.assertRaises(ValueError, HistogramMetricFamily, 'h', 'help', buckets={}, labels=['a'])
+ self.assertRaises(ValueError, HistogramMetricFamily, 'h', 'help', buckets={}, sum_value=1, labels=['a'])
+ self.assertRaises(KeyError, HistogramMetricFamily, 'h', 'help', buckets={}, sum_value=1)
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/tests/test_exposition.py b/tests/test_exposition.py
new file mode 100644
index 0000000..c5fd1fe
--- /dev/null
+++ b/tests/test_exposition.py
@@ -0,0 +1,167 @@
+from __future__ import unicode_literals
+import os
+import threading
+import time
+import unittest
+
+
+from prometheus_client import Gauge, Counter, Summary, Histogram, Metric
+from prometheus_client import CollectorRegistry, generate_latest
+from prometheus_client import push_to_gateway, pushadd_to_gateway, delete_from_gateway
+from prometheus_client import CONTENT_TYPE_LATEST, instance_ip_grouping_key
+
+try:
+ from BaseHTTPServer import BaseHTTPRequestHandler
+ from BaseHTTPServer import HTTPServer
+except ImportError:
+ # Python 3
+ from http.server import BaseHTTPRequestHandler
+ from http.server import HTTPServer
+
+
+class TestGenerateText(unittest.TestCase):
+ def setUp(self):
+ self.registry = CollectorRegistry()
+
+ def test_counter(self):
+ c = Counter('cc', 'A counter', registry=self.registry)
+ c.inc()
+ self.assertEqual(b'# HELP cc A counter\n# TYPE cc counter\ncc 1.0\n', generate_latest(self.registry))
+
+ def test_gauge(self):
+ g = Gauge('gg', 'A gauge', registry=self.registry)
+ g.set(17)
+ self.assertEqual(b'# HELP gg A gauge\n# TYPE gg gauge\ngg 17.0\n', generate_latest(self.registry))
+
+ def test_summary(self):
+ s = Summary('ss', 'A summary', ['a', 'b'], registry=self.registry)
+ s.labels('c', 'd').observe(17)
+ self.assertEqual(b'# HELP ss A summary\n# TYPE ss summary\nss_count{a="c",b="d"} 1.0\nss_sum{a="c",b="d"} 17.0\n', generate_latest(self.registry))
+
+ def test_histogram(self):
+ s = Histogram('hh', 'A histogram', registry=self.registry)
+ s.observe(0.05)
+ self.assertEqual(b'''# HELP hh A histogram
+# TYPE hh histogram
+hh_bucket{le="0.005"} 0.0
+hh_bucket{le="0.01"} 0.0
+hh_bucket{le="0.025"} 0.0
+hh_bucket{le="0.05"} 1.0
+hh_bucket{le="0.075"} 1.0
+hh_bucket{le="0.1"} 1.0
+hh_bucket{le="0.25"} 1.0
+hh_bucket{le="0.5"} 1.0
+hh_bucket{le="0.75"} 1.0
+hh_bucket{le="1.0"} 1.0
+hh_bucket{le="2.5"} 1.0
+hh_bucket{le="5.0"} 1.0
+hh_bucket{le="7.5"} 1.0
+hh_bucket{le="10.0"} 1.0
+hh_bucket{le="+Inf"} 1.0
+hh_count 1.0
+hh_sum 0.05
+''', generate_latest(self.registry))
+
+ def test_unicode(self):
+ c = Counter('cc', '\u4500', ['l'], registry=self.registry)
+ c.labels('\u4500').inc()
+ self.assertEqual(b'# HELP cc \xe4\x94\x80\n# TYPE cc counter\ncc{l="\xe4\x94\x80"} 1.0\n', generate_latest(self.registry))
+
+ def test_escaping(self):
+ c = Counter('cc', 'A\ncount\\er', ['a'], registry=self.registry)
+ c.labels('\\x\n"').inc(1)
+ self.assertEqual(b'# HELP cc A\\ncount\\\\er\n# TYPE cc counter\ncc{a="\\\\x\\n\\""} 1.0\n', generate_latest(self.registry))
+
+ def test_nonnumber(self):
+ class MyNumber():
+ def __repr__(self):
+ return "MyNumber(123)"
+ def __float__(self):
+ return 123.0
+ class MyCollector():
+ def collect(self):
+ metric = Metric("nonnumber", "Non number", 'untyped')
+ metric.add_sample("nonnumber", {}, MyNumber())
+ yield metric
+ self.registry.register(MyCollector())
+ self.assertEqual(b'# HELP nonnumber Non number\n# TYPE nonnumber untyped\nnonnumber 123.0\n', generate_latest(self.registry))
+
+
+class TestPushGateway(unittest.TestCase):
+ def setUp(self):
+ self.registry = CollectorRegistry()
+ self.counter = Gauge('g', 'help', registry=self.registry)
+ self.requests = requests = []
+ class TestHandler(BaseHTTPRequestHandler):
+ def do_PUT(self):
+ self.send_response(201)
+ length = int(self.headers['content-length'])
+ requests.append((self, self.rfile.read(length)))
+
+ do_POST = do_PUT
+ do_DELETE = do_PUT
+
+ httpd = HTTPServer(('', 0), TestHandler)
+ self.address = ':'.join([str(x) for x in httpd.server_address])
+ class TestServer(threading.Thread):
+ def run(self):
+ httpd.handle_request()
+ self.server = TestServer()
+ self.server.daemon = True
+ self.server.start()
+
+ def test_push(self):
+ push_to_gateway(self.address, "my_job", self.registry)
+ self.assertEqual(self.requests[0][0].command, 'PUT')
+ self.assertEqual(self.requests[0][0].path, '/metrics/job/my_job')
+ self.assertEqual(self.requests[0][0].headers.get('content-type'), CONTENT_TYPE_LATEST)
+ self.assertEqual(self.requests[0][1], b'# HELP g help\n# TYPE g gauge\ng 0.0\n')
+
+ def test_push_with_groupingkey(self):
+ push_to_gateway(self.address, "my_job", self.registry, {'a': 9})
+ self.assertEqual(self.requests[0][0].command, 'PUT')
+ self.assertEqual(self.requests[0][0].path, '/metrics/job/my_job/a/9')
+ self.assertEqual(self.requests[0][0].headers.get('content-type'), CONTENT_TYPE_LATEST)
+ self.assertEqual(self.requests[0][1], b'# HELP g help\n# TYPE g gauge\ng 0.0\n')
+
+ def test_push_with_complex_groupingkey(self):
+ push_to_gateway(self.address, "my_job", self.registry, {'a': 9, 'b': 'a/ z'})
+ self.assertEqual(self.requests[0][0].command, 'PUT')
+ self.assertEqual(self.requests[0][0].path, '/metrics/job/my_job/a/9/b/a%2F+z')
+ self.assertEqual(self.requests[0][0].headers.get('content-type'), CONTENT_TYPE_LATEST)
+ self.assertEqual(self.requests[0][1], b'# HELP g help\n# TYPE g gauge\ng 0.0\n')
+
+ def test_pushadd(self):
+ pushadd_to_gateway(self.address, "my_job", self.registry)
+ self.assertEqual(self.requests[0][0].command, 'POST')
+ self.assertEqual(self.requests[0][0].path, '/metrics/job/my_job')
+ self.assertEqual(self.requests[0][0].headers.get('content-type'), CONTENT_TYPE_LATEST)
+ self.assertEqual(self.requests[0][1], b'# HELP g help\n# TYPE g gauge\ng 0.0\n')
+
+ def test_pushadd_with_groupingkey(self):
+ pushadd_to_gateway(self.address, "my_job", self.registry, {'a': 9})
+ self.assertEqual(self.requests[0][0].command, 'POST')
+ self.assertEqual(self.requests[0][0].path, '/metrics/job/my_job/a/9')
+ self.assertEqual(self.requests[0][0].headers.get('content-type'), CONTENT_TYPE_LATEST)
+ self.assertEqual(self.requests[0][1], b'# HELP g help\n# TYPE g gauge\ng 0.0\n')
+
+ def test_delete(self):
+ delete_from_gateway(self.address, "my_job")
+ self.assertEqual(self.requests[0][0].command, 'DELETE')
+ self.assertEqual(self.requests[0][0].path, '/metrics/job/my_job')
+ self.assertEqual(self.requests[0][0].headers.get('content-type'), CONTENT_TYPE_LATEST)
+ self.assertEqual(self.requests[0][1], b'')
+
+ def test_delete_with_groupingkey(self):
+ delete_from_gateway(self.address, "my_job", {'a': 9})
+ self.assertEqual(self.requests[0][0].command, 'DELETE')
+ self.assertEqual(self.requests[0][0].path, '/metrics/job/my_job/a/9')
+ self.assertEqual(self.requests[0][0].headers.get('content-type'), CONTENT_TYPE_LATEST)
+ self.assertEqual(self.requests[0][1], b'')
+
+ def test_instance_ip_grouping_key(self):
+ self.assertTrue('' != instance_ip_grouping_key()['instance'])
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/tests/test_parser.py b/tests/test_parser.py
new file mode 100644
index 0000000..f207a22
--- /dev/null
+++ b/tests/test_parser.py
@@ -0,0 +1,188 @@
+from __future__ import unicode_literals
+
+import unittest
+
+from prometheus_client.core import *
+from prometheus_client.exposition import *
+from prometheus_client.parser import *
+
+
+class TestParse(unittest.TestCase):
+
+ def test_simple_counter(self):
+ families = text_string_to_metric_families("""# TYPE a counter
+# HELP a help
+a 1
+""")
+ self.assertEqual([CounterMetricFamily("a", "help", value=1)], list(families))
+
+ def test_simple_gauge(self):
+ families = text_string_to_metric_families("""# TYPE a gauge
+# HELP a help
+a 1
+""")
+ self.assertEqual([GaugeMetricFamily("a", "help", value=1)], list(families))
+
+ def test_simple_summary(self):
+ families = text_string_to_metric_families("""# TYPE a summary
+# HELP a help
+a_count 1
+a_sum 2
+""")
+
+ def test_summary_quantiles(self):
+ families = text_string_to_metric_families("""# TYPE a summary
+# HELP a help
+a_count 1
+a_sum 2
+a{quantile="0.5"} 0.7
+""")
+ # The Python client doesn't support quantiles, but we
+ # still need to be able to parse them.
+ metric_family = SummaryMetricFamily("a", "help", count_value=1, sum_value=2)
+ metric_family.add_sample("a", {"quantile": "0.5"}, 0.7)
+ self.assertEqual([metric_family], list(families))
+
+ def test_simple_histogram(self):
+ families = text_string_to_metric_families("""# TYPE a histogram
+# HELP a help
+a_bucket{le="1"} 0
+a_bucket{le="+Inf"} 3
+a_count 3
+a_sum 2
+""")
+ self.assertEqual([HistogramMetricFamily("a", "help", sum_value=2, buckets=[("1", 0.0), ("+Inf", 3.0)])], list(families))
+
+ def test_no_metadata(self):
+ families = text_string_to_metric_families("""a 1
+""")
+ metric_family = Metric("a", "", "untyped")
+ metric_family.add_sample("a", {}, 1)
+ self.assertEqual([metric_family], list(families))
+
+ def test_type_help_switched(self):
+ families = text_string_to_metric_families("""# HELP a help
+# TYPE a counter
+a 1
+""")
+ self.assertEqual([CounterMetricFamily("a", "help", value=1)], list(families))
+
+ def test_blank_lines_and_comments(self):
+ families = text_string_to_metric_families("""
+# TYPE a counter
+# FOO a
+# BAR b
+# HELP a help
+
+a 1
+""")
+ self.assertEqual([CounterMetricFamily("a", "help", value=1)], list(families))
+
+ def test_tabs(self):
+ families = text_string_to_metric_families("""#\tTYPE\ta\tcounter
+#\tHELP\ta\thelp
+a\t1
+""")
+ self.assertEqual([CounterMetricFamily("a", "help", value=1)], list(families))
+
+ def test_empty_help(self):
+ families = text_string_to_metric_families("""# TYPE a counter
+# HELP a
+a 1
+""")
+ self.assertEqual([CounterMetricFamily("a", "", value=1)], list(families))
+
+ def test_labels_and_infinite(self):
+ families = text_string_to_metric_families("""# TYPE a counter
+# HELP a help
+a{foo="bar"} +Inf
+a{foo="baz"} -Inf
+""")
+ metric_family = CounterMetricFamily("a", "help", labels=["foo"])
+ metric_family.add_metric(["bar"], core._INF)
+ metric_family.add_metric(["baz"], core._MINUS_INF)
+ self.assertEqual([metric_family], list(families))
+
+ def test_spaces(self):
+ families = text_string_to_metric_families("""# TYPE a counter
+# HELP a help
+a{ foo = "bar" } 1
+a\t\t{\t\tfoo\t\t=\t\t"baz"\t\t}\t\t2
+""")
+ metric_family = CounterMetricFamily("a", "help", labels=["foo"])
+ metric_family.add_metric(["bar"], 1)
+ metric_family.add_metric(["baz"], 2)
+ self.assertEqual([metric_family], list(families))
+
+ def test_nan(self):
+ families = text_string_to_metric_families("""a NaN
+""")
+ # Can't use a simple comparison as nan != nan.
+ self.assertTrue(math.isnan(list(families)[0].samples[0][2]))
+
+ def test_escaping(self):
+ families = text_string_to_metric_families("""# TYPE a counter
+# HELP a he\\n\\\\l\\tp
+a{foo="b\\"a\\nr"} 1
+a{foo="b\\\\a\\z"} 2
+""")
+ metric_family = CounterMetricFamily("a", "he\n\\l\\tp", labels=["foo"])
+ metric_family.add_metric(["b\"a\nr"], 1)
+ metric_family.add_metric(["b\\a\\z"], 2)
+ self.assertEqual([metric_family], list(families))
+
+ def test_roundtrip(self):
+ text = """# HELP go_gc_duration_seconds A summary of the GC invocation durations.
+# TYPE go_gc_duration_seconds summary
+go_gc_duration_seconds{quantile="0"} 0.013300656000000001
+go_gc_duration_seconds{quantile="0.25"} 0.013638736
+go_gc_duration_seconds{quantile="0.5"} 0.013759906
+go_gc_duration_seconds{quantile="0.75"} 0.013962066
+go_gc_duration_seconds{quantile="1"} 0.021383540000000003
+go_gc_duration_seconds_sum 56.12904785
+go_gc_duration_seconds_count 7476.0
+# HELP go_goroutines Number of goroutines that currently exist.
+# TYPE go_goroutines gauge
+go_goroutines 166.0
+# HELP prometheus_local_storage_indexing_batch_duration_milliseconds Quantiles for batch indexing duration in milliseconds.
+# TYPE prometheus_local_storage_indexing_batch_duration_milliseconds summary
+prometheus_local_storage_indexing_batch_duration_milliseconds{quantile="0.5"} NaN
+prometheus_local_storage_indexing_batch_duration_milliseconds{quantile="0.9"} NaN
+prometheus_local_storage_indexing_batch_duration_milliseconds{quantile="0.99"} NaN
+prometheus_local_storage_indexing_batch_duration_milliseconds_sum 871.5665949999999
+prometheus_local_storage_indexing_batch_duration_milliseconds_count 229.0
+# HELP process_cpu_seconds_total Total user and system CPU time spent in seconds.
+# TYPE process_cpu_seconds_total counter
+process_cpu_seconds_total 29323.4
+# HELP process_virtual_memory_bytes Virtual memory size in bytes.
+# TYPE process_virtual_memory_bytes gauge
+process_virtual_memory_bytes 2478268416.0
+# HELP prometheus_build_info A metric with a constant '1' value labeled by version, revision, and branch from which Prometheus was built.
+# TYPE prometheus_build_info gauge
+prometheus_build_info{branch="HEAD",revision="ef176e5",version="0.16.0rc1"} 1.0
+# HELP prometheus_local_storage_chunk_ops_total The total number of chunk operations by their type.
+# TYPE prometheus_local_storage_chunk_ops_total counter
+prometheus_local_storage_chunk_ops_total{type="clone"} 28.0
+prometheus_local_storage_chunk_ops_total{type="create"} 997844.0
+prometheus_local_storage_chunk_ops_total{type="drop"} 1345758.0
+prometheus_local_storage_chunk_ops_total{type="load"} 1641.0
+prometheus_local_storage_chunk_ops_total{type="persist"} 981408.0
+prometheus_local_storage_chunk_ops_total{type="pin"} 32662.0
+prometheus_local_storage_chunk_ops_total{type="transcode"} 980180.0
+prometheus_local_storage_chunk_ops_total{type="unpin"} 32662.0
+"""
+ families = list(text_string_to_metric_families(text))
+
+ class TextCollector(object):
+ def collect(self):
+ return families
+
+
+ registry = CollectorRegistry()
+ registry.register(TextCollector())
+ self.assertEqual(text.encode('utf-8'), generate_latest(registry))
+
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/tests/test_process_collector.py b/tests/test_process_collector.py
new file mode 100644
index 0000000..6455d43
--- /dev/null
+++ b/tests/test_process_collector.py
@@ -0,0 +1,62 @@
+from __future__ import unicode_literals
+import os
+import unittest
+
+
+from prometheus_client import CollectorRegistry, ProcessCollector
+
+class TestProcessCollector(unittest.TestCase):
+ def setUp(self):
+ self.registry = CollectorRegistry()
+ self.test_proc = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'proc')
+
+ def test_working(self):
+ collector = ProcessCollector(proc=self.test_proc, pid=lambda: 26231, registry=self.registry)
+ collector._ticks = 100
+
+ self.assertEqual(17.21, self.registry.get_sample_value('process_cpu_seconds_total'))
+ self.assertEqual(56274944.0, self.registry.get_sample_value('process_virtual_memory_bytes'))
+ self.assertEqual(8114176, self.registry.get_sample_value('process_resident_memory_bytes'))
+ self.assertEqual(1418184099.75, self.registry.get_sample_value('process_start_time_seconds'))
+ self.assertEqual(2048.0, self.registry.get_sample_value('process_max_fds'))
+ self.assertEqual(5.0, self.registry.get_sample_value('process_open_fds'))
+ self.assertEqual(None, self.registry.get_sample_value('process_fake_namespace'))
+
+ def test_namespace(self):
+ collector = ProcessCollector(proc=self.test_proc, pid=lambda: 26231, registry=self.registry, namespace='n')
+ collector._ticks = 100
+
+ self.assertEqual(17.21, self.registry.get_sample_value('n_process_cpu_seconds_total'))
+ self.assertEqual(56274944.0, self.registry.get_sample_value('n_process_virtual_memory_bytes'))
+ self.assertEqual(8114176, self.registry.get_sample_value('n_process_resident_memory_bytes'))
+ self.assertEqual(1418184099.75, self.registry.get_sample_value('n_process_start_time_seconds'))
+ self.assertEqual(2048.0, self.registry.get_sample_value('n_process_max_fds'))
+ self.assertEqual(5.0, self.registry.get_sample_value('n_process_open_fds'))
+ self.assertEqual(None, self.registry.get_sample_value('process_cpu_seconds_total'))
+
+ def test_working_584(self):
+ collector = ProcessCollector(proc=self.test_proc, pid=lambda: "584\n", registry=self.registry)
+ collector._ticks = 100
+
+ self.assertEqual(0.0, self.registry.get_sample_value('process_cpu_seconds_total'))
+ self.assertEqual(10395648.0, self.registry.get_sample_value('process_virtual_memory_bytes'))
+ self.assertEqual(634880, self.registry.get_sample_value('process_resident_memory_bytes'))
+ self.assertEqual(1418291667.75, self.registry.get_sample_value('process_start_time_seconds'))
+ self.assertEqual(None, self.registry.get_sample_value('process_max_fds'))
+ self.assertEqual(None, self.registry.get_sample_value('process_open_fds'))
+
+ def test_working_fake_pid(self):
+ collector = ProcessCollector(proc=self.test_proc, pid=lambda: 123, registry=self.registry)
+ collector._ticks = 100
+
+ self.assertEqual(None, self.registry.get_sample_value('process_cpu_seconds_total'))
+ self.assertEqual(None, self.registry.get_sample_value('process_virtual_memory_bytes'))
+ self.assertEqual(None, self.registry.get_sample_value('process_resident_memory_bytes'))
+ self.assertEqual(None, self.registry.get_sample_value('process_start_time_seconds'))
+ self.assertEqual(None, self.registry.get_sample_value('process_max_fds'))
+ self.assertEqual(None, self.registry.get_sample_value('process_open_fds'))
+ self.assertEqual(None, self.registry.get_sample_value('process_fake_namespace'))
+
+
+if __name__ == '__main__':
+ unittest.main()