Unverified Commit b20ed77d authored by Tobias Schmidt's avatar Tobias Schmidt Committed by GitHub

Merge pull request #851 from prometheus/versioned-docs

Versioned documentation
parents b01e377b b07bb937
...@@ -5,7 +5,8 @@ output/ ...@@ -5,7 +5,8 @@ output/
# Temporary file directory # Temporary file directory
tmp/ tmp/
downloads/ /downloads/
/repositories/
# Crash Log # Crash Log
crash.log crash.log
......
DOWNLOADS := prometheus alertmanager blackbox_exporter consul_exporter graphite_exporter haproxy_exporter memcached_exporter mysqld_exporter node_exporter pushgateway statsd_exporter DOWNLOADS := prometheus alertmanager blackbox_exporter consul_exporter graphite_exporter haproxy_exporter memcached_exporter mysqld_exporter node_exporter pushgateway statsd_exporter
clean: clean:
rm -rf output downloads rm -rf output downloads repositories
compile: clean downloads compile:
bundle exec nanoc bundle exec nanoc
deploy: github_pages_export github_pages_push deploy: clean downloads compile github_pages_export github_pages_push
downloads: $(DOWNLOADS:%=downloads/%/repo.json) $(DOWNLOADS:%=downloads/%/releases.json) downloads: $(DOWNLOADS:%=downloads/%/repo.json) $(DOWNLOADS:%=downloads/%/releases.json)
...@@ -20,7 +20,7 @@ downloads/%/releases.json: ...@@ -20,7 +20,7 @@ downloads/%/releases.json:
@echo "curl -sf -H 'Accept: application/vnd.github.v3+json' <GITHUB_AUTHENTICATION> https://api.github.com/repos/prometheus/$*/releases > $@" @echo "curl -sf -H 'Accept: application/vnd.github.v3+json' <GITHUB_AUTHENTICATION> https://api.github.com/repos/prometheus/$*/releases > $@"
@curl -sf -H 'Accept: application/vnd.github.v3+json' $(GITHUB_AUTHENTICATION) https://api.github.com/repos/prometheus/$*/releases > $@ @curl -sf -H 'Accept: application/vnd.github.v3+json' $(GITHUB_AUTHENTICATION) https://api.github.com/repos/prometheus/$*/releases > $@
github_pages_export: compile github_pages_export:
cd output && \ cd output && \
mkdir -p .github && \ mkdir -p .github && \
echo "This repository is auto-generated. You have to open pull requests against https://github.com/prometheus/docs instead." > .github/PULL_REQUEST_TEMPLATE.md && \ echo "This repository is auto-generated. You have to open pull requests against https://github.com/prometheus/docs instead." > .github/PULL_REQUEST_TEMPLATE.md && \
......
...@@ -13,6 +13,13 @@ ...@@ -13,6 +13,13 @@
# item, use the pattern “/about/*/”; “/about/*” will also select the parent, # item, use the pattern “/about/*/”; “/about/*” will also select the parent,
# because “*” matches zero or more characters. # because “*” matches zero or more characters.
compile '/_redirects/' do
end
route '/_redirects/' do
'/_redirects'
end
compile '/assets/*' do compile '/assets/*' do
end end
...@@ -46,6 +53,8 @@ compile '*' do ...@@ -46,6 +53,8 @@ compile '*' do
# Don't filter; this should propagate verbatim to the output GitHub repository. # Don't filter; this should propagate verbatim to the output GitHub repository.
elsif item[:extension] == 'md' elsif item[:extension] == 'md'
filter :redcarpet, options: {filter_html: true, autolink: true, no_intraemphasis: true, fenced_code_blocks: true, gh_blockcode: true, tables: true}, renderer_options: {with_toc_data: true} filter :redcarpet, options: {filter_html: true, autolink: true, no_intraemphasis: true, fenced_code_blocks: true, gh_blockcode: true, tables: true}, renderer_options: {with_toc_data: true}
filter :normalize_links, item[:repo_docs] if item[:repo_docs]
filter :outdated_content, item[:repo_docs] if item[:repo_docs] && item[:repo_docs][:outdated]
filter :add_anchors filter :add_anchors
filter :bootstrappify filter :bootstrappify
filter :admonition filter :admonition
......
# Redirects for old site structure.
/docs/introduction/getting_started/ /docs/prometheus/latest/getting_started/
/docs/introduction/install/ /docs/prometheus/latest/installation/
/docs/operating/configuration/ /docs/prometheus/latest/configuration/configuration/
/docs/operating/federation/ /docs/prometheus/latest/federation/
/docs/operating/storage/ /docs/prometheus/latest/storage/
/docs/querying/api/ /docs/prometheus/latest/querying/api/
/docs/querying/basics/ /docs/prometheus/latest/querying/basics/
/docs/querying/examples/ /docs/prometheus/latest/querying/examples/
/docs/querying/functions/ /docs/prometheus/latest/querying/functions/
/docs/querying/operators/ /docs/prometheus/latest/querying/operators/
/docs/querying/rules/ /docs/prometheus/latest/configuration/recording_rules/
/docs/visualization/template_examples/ /docs/prometheus/latest/configuration/template_examples/
/docs/visualization/template_reference/ /docs/prometheus/latest/configuration/template_reference/
# Redirects for sections.
# TODO(ts): Auto-generate from menu.
/docs/introduction/ /docs/introduction/overview/ 302
/docs/concepts/ /docs/concepts/data_model/ 302
/docs/prometheus/ /docs/prometheus/latest/getting_started/ 302
/docs/visualization/ /docs/visualization/browser/ 302
/docs/instrumenting/ /docs/instrumenting/clientlibs/ 302
/docs/operating/ /docs/operating/security/ 302
/docs/alerting/ /docs/alerting/overview/ 302
/docs/practices/ /docs/practices/naming/ 302
# Redirects for index.hml pages.
/:foo/index.html /:foo/
/:foo/:bar/index.html /:foo/:bar/
/:foo/:bar/:baz/index.html /:foo/:bar/:baz/
/:foo/:bar/:baz/:qux/index.html /:foo/:bar/:baz/:qux/
/:foo/:bar/:baz/:qux/:quux/index.html /:foo/:bar/:baz/:qux/:quux/
...@@ -16,7 +16,7 @@ real-world systems. ...@@ -16,7 +16,7 @@ real-world systems.
All hope is not lost though. There are many common anomalies which you can All hope is not lost though. There are many common anomalies which you can
detect and handle with custom-built rules. The Prometheus [query detect and handle with custom-built rules. The Prometheus [query
language](../../../../../docs/querying/basics/) gives you the tools to discover language](/docs/prometheus/latest/querying/basics/) gives you the tools to discover
these anomalies while avoiding false positives. these anomalies while avoiding false positives.
<!-- more --> <!-- more -->
...@@ -28,7 +28,7 @@ performing as well as the rest, such as responding with increased latency. ...@@ -28,7 +28,7 @@ performing as well as the rest, such as responding with increased latency.
Let us say that we have a metric `instance:latency_seconds:mean5m` representing the Let us say that we have a metric `instance:latency_seconds:mean5m` representing the
average query latency for each instance of a service, calculated via a average query latency for each instance of a service, calculated via a
[recording rule](/docs/querying/rules/) from a [recording rule](/docs/prometheus/latest/querying/rules/) from a
[Summary](/docs/concepts/metric_types/#summary) metric. [Summary](/docs/concepts/metric_types/#summary) metric.
A simple way to start would be to look for instances with a latency A simple way to start would be to look for instances with a latency
...@@ -116,7 +116,6 @@ route: ...@@ -116,7 +116,6 @@ route:
receiver: restart_webhook receiver: restart_webhook
``` ```
## Summary ## Summary
The Prometheus query language allows for rich processing of your monitoring The Prometheus query language allows for rich processing of your monitoring
......
...@@ -142,7 +142,7 @@ to finish within a reasonable amount of time. This happened to us when we wanted ...@@ -142,7 +142,7 @@ to finish within a reasonable amount of time. This happened to us when we wanted
to graph the top 5 utilized links out of ~18,000 in total. While the query to graph the top 5 utilized links out of ~18,000 in total. While the query
worked, it would take roughly the amount of time we set our timeout limit to, worked, it would take roughly the amount of time we set our timeout limit to,
meaning it was both slow and flaky. We decided to use Prometheus' [recording meaning it was both slow and flaky. We decided to use Prometheus' [recording
rules](/docs/querying/rules/) for precomputing heavy queries. rules](/docs/prometheus/latest/querying/rules/) for precomputing heavy queries.
precomputed_link_utilization_percent = rate(ifHCOutOctets{layer!='access'}[10m])*8/1000/1000 precomputed_link_utilization_percent = rate(ifHCOutOctets{layer!='access'}[10m])*8/1000/1000
/ on (device,interface,alias) / on (device,interface,alias)
......
...@@ -128,7 +128,7 @@ However, if your dashboard query doesn't only touch a single time series but ...@@ -128,7 +128,7 @@ However, if your dashboard query doesn't only touch a single time series but
aggregates over thousands of time series, the number of chunks to access aggregates over thousands of time series, the number of chunks to access
multiplies accordingly, and the overhead of the sequential scan will become multiplies accordingly, and the overhead of the sequential scan will become
dominant. (Such queries are frowned upon, and we usually recommend to use a dominant. (Such queries are frowned upon, and we usually recommend to use a
[recording rule](https://prometheus.io/docs/querying/rules/#recording-rules) [recording rule](https://prometheus.io/docs/prometheus/latest/querying/rules/#recording-rules)
for queries of that kind that are used frequently, e.g. in a dashboard.) But for queries of that kind that are used frequently, e.g. in a dashboard.) But
with the double-delta encoding, the query time might still have been with the double-delta encoding, the query time might still have been
acceptable, let's say around one second. After the switch to varbit encoding, acceptable, let's say around one second. After the switch to varbit encoding,
...@@ -147,7 +147,7 @@ encoding. Start your Prometheus server with ...@@ -147,7 +147,7 @@ encoding. Start your Prometheus server with
`-storage.local.chunk-encoding-version=2` and wait for a while until you have `-storage.local.chunk-encoding-version=2` and wait for a while until you have
enough new chunks with varbit encoding to vet the effects. If you see queries enough new chunks with varbit encoding to vet the effects. If you see queries
that are becoming unacceptably slow, check if you can use that are becoming unacceptably slow, check if you can use
[recording rules](https://prometheus.io/docs/querying/rules/#recording-rules) [recording rules](https://prometheus.io/docs/prometheus/latest/querying/rules/#recording-rules)
to speed them up. Most likely, those queries will gain a lot from that even to speed them up. Most likely, those queries will gain a lot from that even
with the old double-delta encoding. with the old double-delta encoding.
......
...@@ -269,6 +269,11 @@ footer { ...@@ -269,6 +269,11 @@ footer {
width: 100%; width: 100%;
} }
.side-nav div {
padding: 10px 15px;
background-color: #eee;
}
.side-nav .nav-header { .side-nav .nav-header {
display: block; display: block;
margin: 20px auto 15px auto; margin: 20px auto 15px auto;
...@@ -281,7 +286,7 @@ footer { ...@@ -281,7 +286,7 @@ footer {
text-decoration: none; text-decoration: none;
} }
.side-nav ul.active li.active { .side-nav li.current {
border-left: 3px solid #e6522c; border-left: 3px solid #e6522c;
margin-left: -2px; margin-left: -2px;
font-weight: bold; font-weight: bold;
...@@ -291,6 +296,10 @@ footer { ...@@ -291,6 +296,10 @@ footer {
border-left: 1px solid #e9eff2; border-left: 1px solid #e9eff2;
} }
.side-nav li li li a {
padding: 5px 15px 5px 40px;
}
.doc-content { .doc-content {
font-size: 16px; font-size: 16px;
} }
......
...@@ -26,7 +26,7 @@ sending a HTTP POST request to the `/-/reload` endpoint. ...@@ -26,7 +26,7 @@ sending a HTTP POST request to the `/-/reload` endpoint.
To specify which configuration file to load, use the `-config.file` flag. To specify which configuration file to load, use the `-config.file` flag.
``` ```bash
./alertmanager -config.file=simple.yml ./alertmanager -config.file=simple.yml
``` ```
...@@ -55,8 +55,7 @@ The global configuration specifies parameters that are valid in all other ...@@ -55,8 +55,7 @@ The global configuration specifies parameters that are valid in all other
configuration contexts. They also serve as defaults for other configuration configuration contexts. They also serve as defaults for other configuration
sections. sections.
```yaml
```
global: global:
# ResolveTimeout is the time after which an alert is declared resolved # ResolveTimeout is the time after which an alert is declared resolved
# if it has not been updated. # if it has not been updated.
...@@ -100,7 +99,6 @@ inhibit_rules: ...@@ -100,7 +99,6 @@ inhibit_rules:
[ - <inhibit_rule> ... ] [ - <inhibit_rule> ... ]
``` ```
## `<route>` ## `<route>`
A route block defines a node in a routing tree and its children. Its optional A route block defines a node in a routing tree and its children. Its optional
...@@ -115,8 +113,7 @@ If an alert does not match any children of a node (no matching child nodes, or ...@@ -115,8 +113,7 @@ If an alert does not match any children of a node (no matching child nodes, or
none exist), the alert is handled based on the configuration parameters of the none exist), the alert is handled based on the configuration parameters of the
current node. current node.
```yaml
```
[ receiver: <string> ] [ receiver: <string> ]
[ group_by: '[' <labelname>, ... ']' ] [ group_by: '[' <labelname>, ... ']' ]
...@@ -152,7 +149,7 @@ routes: ...@@ -152,7 +149,7 @@ routes:
### Example ### Example
``` ```yaml
# The root route with all parameters, which are inherited by the child # The root route with all parameters, which are inherited by the child
# routes if they are not overwritten. # routes if they are not overwritten.
route: route:
...@@ -179,8 +176,6 @@ route: ...@@ -179,8 +176,6 @@ route:
team: frontend team: frontend
``` ```
## `<inhibit_rule>` ## `<inhibit_rule>`
An inhibition rule is a rule that mutes an alert matching a set of matchers An inhibition rule is a rule that mutes an alert matching a set of matchers
...@@ -190,7 +185,7 @@ Both alerts must have a set of equal labels. ...@@ -190,7 +185,7 @@ Both alerts must have a set of equal labels.
__Alerts can inhibit themselves. Avoid writing inhibition rules where __Alerts can inhibit themselves. Avoid writing inhibition rules where
an alert matches both source and target.__ an alert matches both source and target.__
``` ```yaml
# Matchers that have to be fulfilled in the alerts to be muted. # Matchers that have to be fulfilled in the alerts to be muted.
target_match: target_match:
[ <labelname>: <labelvalue>, ... ] [ <labelname>: <labelvalue>, ... ]
...@@ -210,14 +205,13 @@ source_match_re: ...@@ -210,14 +205,13 @@ source_match_re:
``` ```
## `<receiver>` ## `<receiver>`
Receiver is a named configuration of one or more notification integrations. Receiver is a named configuration of one or more notification integrations.
__We're not actively adding new receivers, we recommend implementing custom notification integrations via the [webhook](/docs/alerting/configuration/#webhook_config) receiver.__ __We're not actively adding new receivers, we recommend implementing custom notification integrations via the [webhook](/docs/alerting/configuration/#webhook_config) receiver.__
``` ```yaml
# The unique name of the receiver. # The unique name of the receiver.
name: <string> name: <string>
...@@ -240,10 +234,9 @@ victorops_configs: ...@@ -240,10 +234,9 @@ victorops_configs:
[ - <victorops_config>, ... ] [ - <victorops_config>, ... ]
``` ```
## `<email_config>` ## `<email_config>`
``` ```yaml
# Whether or not to notify about resolved alerts. # Whether or not to notify about resolved alerts.
[ send_resolved: <boolean> | default = false ] [ send_resolved: <boolean> | default = false ]
...@@ -277,7 +270,7 @@ to: <tmpl_string> ...@@ -277,7 +270,7 @@ to: <tmpl_string>
HipChat notifications use a [Build Your Own](https://confluence.atlassian.com/hc/integrations-with-hipchat-server-683508267.html) integration. HipChat notifications use a [Build Your Own](https://confluence.atlassian.com/hc/integrations-with-hipchat-server-683508267.html) integration.
``` ```yaml
# Whether or not to notify about resolved alerts. # Whether or not to notify about resolved alerts.
[ send_resolved: <boolean> | default = false ] [ send_resolved: <boolean> | default = false ]
...@@ -306,7 +299,7 @@ room_id: <tmpl_string> ...@@ -306,7 +299,7 @@ room_id: <tmpl_string>
PagerDuty notifications are sent via the [PagerDuty API](https://developer.pagerduty.com/documentation/integration/events). PagerDuty notifications are sent via the [PagerDuty API](https://developer.pagerduty.com/documentation/integration/events).
PagerDuty provides documentation on how to integrate [here](https://www.pagerduty.com/docs/guides/prometheus-integration-guide/). PagerDuty provides documentation on how to integrate [here](https://www.pagerduty.com/docs/guides/prometheus-integration-guide/).
``` ```yaml
# Whether or not to notify about resolved alerts. # Whether or not to notify about resolved alerts.
[ send_resolved: <boolean> | default = true ] [ send_resolved: <boolean> | default = true ]
...@@ -337,7 +330,7 @@ service_key: <tmpl_secret> ...@@ -337,7 +330,7 @@ service_key: <tmpl_secret>
Pushover notifications are sent via the [Pushover API](https://pushover.net/api). Pushover notifications are sent via the [Pushover API](https://pushover.net/api).
``` ```yaml
# Whether or not to notify about resolved alerts. # Whether or not to notify about resolved alerts.
[ send_resolved: <boolean> | default = true ] [ send_resolved: <boolean> | default = true ]
...@@ -372,7 +365,7 @@ token: <secret> ...@@ -372,7 +365,7 @@ token: <secret>
Slack notifications are sent via [Slack webhooks](https://api.slack.com/incoming-webhooks). Slack notifications are sent via [Slack webhooks](https://api.slack.com/incoming-webhooks).
``` ```yaml
# Whether or not to notify about resolved alerts. # Whether or not to notify about resolved alerts.
[ send_resolved: <boolean> | default = false ] [ send_resolved: <boolean> | default = false ]
...@@ -394,12 +387,11 @@ channel: <tmpl_string> ...@@ -394,12 +387,11 @@ channel: <tmpl_string>
[ fallback: <tmpl_string> | default = '{{ template "slack.default.fallback" . }}' ] [ fallback: <tmpl_string> | default = '{{ template "slack.default.fallback" . }}' ]
``` ```
## `<opsgenie_config>` ## `<opsgenie_config>`
OpsGenie notifications are sent via the [OpsGenie API](https://www.opsgenie.com/docs/web-api/alert-api). OpsGenie notifications are sent via the [OpsGenie API](https://www.opsgenie.com/docs/web-api/alert-api).
``` ```yaml
# Whether or not to notify about resolved alerts. # Whether or not to notify about resolved alerts.
[ send_resolved: <boolean> | default = true ] [ send_resolved: <boolean> | default = true ]
...@@ -431,11 +423,12 @@ api_key: <secret> ...@@ -431,11 +423,12 @@ api_key: <secret>
# Additional alert note. # Additional alert note.
[ note: <tmpl_string> ] [ note: <tmpl_string> ]
``` ```
## `<victorops_config>` ## `<victorops_config>`
VictorOps notifications are sent out via the [VictorOps API](https://help.victorops.com/knowledge-base/victorops-restendpoint-integration/) VictorOps notifications are sent out via the [VictorOps API](https://help.victorops.com/knowledge-base/victorops-restendpoint-integration/)
``` ```yaml
# Whether or not to notify about resolved alerts. # Whether or not to notify about resolved alerts.
[ send_resolved: <boolean> | default = true ] [ send_resolved: <boolean> | default = true ]
...@@ -462,12 +455,11 @@ routing_key: <string> ...@@ -462,12 +455,11 @@ routing_key: <string>
``` ```
## `<webhook_config>` ## `<webhook_config>`
The webhook receiver allows configuring a generic receiver. The webhook receiver allows configuring a generic receiver.
``` ```yaml
# Whether or not to notify about resolved alerts. # Whether or not to notify about resolved alerts.
[ send_resolved: <boolean> | default = true ] [ send_resolved: <boolean> | default = true ]
...@@ -501,7 +493,6 @@ endpoint: ...@@ -501,7 +493,6 @@ endpoint:
} }
``` ```
There is a list of There is a list of
[integrations](/docs/operating/integrations/#alertmanager-webhook-receiver) with [integrations](/docs/operating/integrations/#alertmanager-webhook-receiver) with
this feature. this feature.
...@@ -12,7 +12,7 @@ vector elements at a given point in time, the alert counts as active for these ...@@ -12,7 +12,7 @@ vector elements at a given point in time, the alert counts as active for these
elements' label sets. elements' label sets.
Alerting rules are configured in Prometheus in the same way as [recording Alerting rules are configured in Prometheus in the same way as [recording
rules](../../querying/rules). rules](/docs/prometheus/latest/querying/rules).
### Defining alerting rules ### Defining alerting rules
...@@ -42,7 +42,7 @@ can be templated. ...@@ -42,7 +42,7 @@ can be templated.
#### Templating #### Templating
Label and annotation values can be templated using [console templates](../../visualization/consoles). Label and annotation values can be templated using [console templates](/docs/visualization/consoles).
The `$labels` variable holds the label key/value pairs of an alert instance The `$labels` variable holds the label key/value pairs of an alert instance
and `$value` holds the evaluated value of an alert instance. and `$value` holds the evaluated value of an alert instance.
...@@ -91,7 +91,7 @@ Prometheus's alerting rules are good at figuring what is broken *right now*, ...@@ -91,7 +91,7 @@ Prometheus's alerting rules are good at figuring what is broken *right now*,
but they are not a fully-fledged notification solution. Another layer is needed but they are not a fully-fledged notification solution. Another layer is needed
to add summarization, notification rate limiting, silencing and alert to add summarization, notification rate limiting, silencing and alert
dependencies on top of the simple alert definitions. In Prometheus's ecosystem, dependencies on top of the simple alert definitions. In Prometheus's ecosystem,
the [Alertmanager](../alertmanager) takes on this the [Alertmanager](/docs/alertmanager) takes on this
role. Thus, Prometheus may be configured to periodically send information about role. Thus, Prometheus may be configured to periodically send information about
alert states to an Alertmanager instance, which then takes care of dispatching alert states to an Alertmanager instance, which then takes care of dispatching
the right notifications. The Alertmanager instance may be configured via the the right notifications. The Alertmanager instance may be configured via the
......
...@@ -56,7 +56,7 @@ during a scrape: ...@@ -56,7 +56,7 @@ during a scrape:
* the **count** of events that have been observed, exposed as `<basename>_count` (identical to `<basename>_bucket{le="+Inf"}` above) * the **count** of events that have been observed, exposed as `<basename>_count` (identical to `<basename>_bucket{le="+Inf"}` above)
Use the Use the
[`histogram_quantile()` function](/docs/querying/functions/#histogram_quantile) [`histogram_quantile()` function](/docs/prometheus/latest/querying/functions/#histogram_quantile)
to calculate quantiles from histograms or even aggregations of histograms. A to calculate quantiles from histograms or even aggregations of histograms. A
histogram is also suitable to calculate an histogram is also suitable to calculate an
[Apdex score](http://en.wikipedia.org/wiki/Apdex). When operating on buckets, [Apdex score](http://en.wikipedia.org/wiki/Apdex). When operating on buckets,
......
---
title: Getting started
sort_rank: 3
---
# Getting started
This guide is a "Hello World"-style tutorial which shows how to install,
configure, and use Prometheus in a simple example setup. You will download and run
Prometheus locally, configure it to scrape itself and an example application,
and then work with queries, rules, and graphs to make use of the collected time
series data.
## Downloading and running Prometheus
[Download the latest release](/download) of Prometheus for your platform, then
extract and run it:
```language-bash
tar xvfz prometheus-*.tar.gz
cd prometheus-*
```
Before starting Prometheus, let's configure it.
## Configuring Prometheus to monitor itself
Prometheus collects metrics from monitored targets by scraping metrics HTTP
endpoints on these targets. Since Prometheus also exposes data in the same
manner about itself, it can also scrape and monitor its own health.
While a Prometheus server that collects only data about itself is not very
useful in practice, it is a good starting example. Save the following basic
Prometheus configuration as a file named `prometheus.yml`:
```language-yaml
global:
scrape_interval: 15s # By default, scrape targets every 15 seconds.
# Attach these labels to any time series or alerts when communicating with
# external systems (federation, remote storage, Alertmanager).
external_labels:
monitor: 'codelab-monitor'
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
- job_name: 'prometheus'
# Override the global default and scrape targets from this job every 5 seconds.
scrape_interval: 5s
static_configs:
- targets: ['localhost:9090']
```
For a complete specification of configuration options, see the
[configuration documentation](/docs/operating/configuration).
## Starting Prometheus
To start Prometheus with your newly created configuration file, change to the directory containing the Prometheus binary and run:
```language-bash
# Start Prometheus.
# By default, Prometheus stores its database in ./data (flag -storage.local.path).
./prometheus -config.file=prometheus.yml
```
Prometheus should start up. You should also be able to browse to a status page about itself at http://localhost:9090. Give it a couple of seconds to collect data about itself from its own HTTP metrics endpoint.
You can also verify that Prometheus is serving metrics about itself by
navigating to its metrics endpoint: http://localhost:9090/metrics
The number of OS threads executed by Prometheus is controlled by the
`GOMAXPROCS` environment variable. As of Go 1.5 the default value is
the number of cores available.
Blindly setting `GOMAXPROCS` to a high value can be counterproductive. See the relevant [Go FAQs](http://golang.org/doc/faq#Why_no_multi_CPU).
Prometheus by default uses around 3GB in memory. If you have a
smaller machine, you can tune Prometheus to use less memory. For details,
see the [memory usage documentation](/docs/operating/storage/#memory-usage).
## Using the expression browser
Let us try looking at some data that Prometheus has collected about itself. To
use Prometheus's built-in expression browser, navigate to
http://localhost:9090/graph and choose the "Console" view within the "Graph"
tab.
As you can gather from http://localhost:9090/metrics, one metric that
Prometheus exports about itself is called
`prometheus_target_interval_length_seconds` (the actual amount of time between
target scrapes). Go ahead and enter this into the expression console:
```
prometheus_target_interval_length_seconds
```
This should return a number of different time series (along with the latest value
recorded for each), all with the metric name
`prometheus_target_interval_length_seconds`, but with different labels. These
labels designate different latency percentiles and target group intervals.
If we were only interested in the 99th percentile latencies, we could use this
query to retrieve that information:
```
prometheus_target_interval_length_seconds{quantile="0.99"}
```
To count the number of returned time series, you could write:
```
count(prometheus_target_interval_length_seconds)
```
For more about the expression language, see the
[expression language documentation](/docs/querying/basics/).
## Using the graphing interface
To graph expressions, navigate to http://localhost:9090/graph and use the "Graph"
tab.
For example, enter the following expression to graph the per-second rate of all
storage chunk operations happening in the self-scraped Prometheus:
```
rate(prometheus_local_storage_chunk_ops_total[1m])
```
Experiment with the graph range parameters and other settings.
## Starting up some sample targets
Let us make this more interesting and start some example targets for Prometheus
to scrape.
The Go client library includes an example which exports fictional RPC latencies
for three services with different latency distributions.
Ensure you have the [Go compiler installed](https://golang.org/doc/install) and
have a [working Go build environment](https://golang.org/doc/code.html) (with
correct `GOPATH`) set up.
Download the Go client library for Prometheus and run three of these example
processes:
```language-bash
# Fetch the client library code and compile example.
git clone https://github.com/prometheus/client_golang.git
cd client_golang/examples/random
go get -d
go build
# Start 3 example targets in separate terminals:
./random -listen-address=:8080
./random -listen-address=:8081
./random -listen-address=:8082
```
You should now have example targets listening on http://localhost:8080/metrics,
http://localhost:8081/metrics, and http://localhost:8082/metrics.
## Configuring Prometheus to monitor the sample targets
Now we will configure Prometheus to scrape these new targets. Let's group all
three endpoints into one job called `example-random`. However, imagine that the
first two endpoints are production targets, while the third one represents a
canary instance. To model this in Prometheus, we can add several groups of
endpoints to a single job, adding extra labels to each group of targets. In
this example, we will add the `group="production"` label to the first group of
targets, while adding `group="canary"` to the second.
To achieve this, add the following job definition to the `scrape_configs`
section in your `prometheus.yml` and restart your Prometheus instance:
```
scrape_configs:
- job_name: 'example-random'
# Override the global default and scrape targets from this job every 5 seconds.
scrape_interval: 5s
static_configs:
- targets: ['localhost:8080', 'localhost:8081']
labels:
group: 'production'
- targets: ['localhost:8082']
labels:
group: 'canary'
```
Go to the expression browser and verify that Prometheus now has information
about time series that these example endpoints expose, such as the
`rpc_durations_seconds` metric.
## Configure rules for aggregating scraped data into new time series
Though not a problem in our example, queries that aggregate over thousands of
time series can get slow when computed ad-hoc. To make this more efficient,
Prometheus allows you to prerecord expressions into completely new persisted
time series via configured recording rules. Let's say we are interested in
recording the per-second rate of example RPCs
(`rpc_durations_seconds_count`) averaged over all instances (but
preserving the `job` and `service` dimensions) as measured over a window of 5
minutes. We could write this as:
```
avg(rate(rpc_durations_seconds_count[5m])) by (job, service)
```
Try graphing this expression.
To record the time series resulting from this expression into a new metric
called `job_service:rpc_durations_seconds_count:avg_rate5m`, create a file
with the following recording rule and save it as `prometheus.rules`:
```
job_service:rpc_durations_seconds_count:avg_rate5m = avg(rate(rpc_durations_seconds_count[5m])) by (job, service)
```
To make Prometheus pick up this new rule, add a `rule_files` statement to the
`global` configuration section in your `prometheus.yml`. The config should now
look like this:
```language-yaml
global:
scrape_interval: 15s # By default, scrape targets every 15 seconds.
evaluation_interval: 15s # Evaluate rules every 15 seconds.
# Attach these extra labels to all timeseries collected by this Prometheus instance.
external_labels:
monitor: 'codelab-monitor'
rule_files:
- 'prometheus.rules'
scrape_configs:
- job_name: 'prometheus'
# Override the global default and scrape targets from this job every 5 seconds.
scrape_interval: 5s
static_configs:
- targets: ['localhost:9090']
- job_name: 'example-random'
# Override the global default and scrape targets from this job every 5 seconds.
scrape_interval: 5s
static_configs:
- targets: ['localhost:8080', 'localhost:8081']
labels:
group: 'production'
- targets: ['localhost:8082']
labels:
group: 'canary'
```
Restart Prometheus with the new configuration and verify that a new time series
with the metric name `job_service:rpc_durations_seconds_count:avg_rate5m`
is now available by querying it through the expression browser or graphing it.
...@@ -70,7 +70,7 @@ also refer to the Prometheus monitoring system as a whole. ...@@ -70,7 +70,7 @@ also refer to the Prometheus monitoring system as a whole.
### PromQL ### PromQL
[PromQL](../../querying/basics/) is the Prometheus Query Language. It allows for [PromQL](/docs/prometheus/latest/querying/basics/) is the Prometheus Query Language. It allows for
a wide range of operations including aggregation, slicing and dicing, prediction and joins. a wide range of operations including aggregation, slicing and dicing, prediction and joins.
### Pushgateway ### Pushgateway
......
---
title: Installing
sort_rank: 2
---
# Installing
## Using pre-compiled binaries
We provide precompiled binaries for most official Prometheus components.
Check out the [download section](/download) for a list of all available
versions.
## From source
For building Prometheus components from source, see the `Makefile` targets in
the respective repository.
NOTE: **Note:** The documentation on this website refers to the latest stable
release (excluding pre-releases). The branch
[next-release](https://github.com/prometheus/docs/compare/next-release) refers
to unreleased changes that are in master branches of source repos.
## Using Docker
All Prometheus services are available as Docker images under the
[prom](https://hub.docker.com/u/prom/) organization.
Running Prometheus on Docker is as simple as `docker run -p 9090:9090
prom/prometheus`. This starts Prometheus with a sample configuration and
exposes it on port 9090.
The Prometheus image uses a volume to store the actual metrics. For
production deployments it is highly recommended to use the
[Data Volume Container](https://docs.docker.com/engine/admin/volumes/volumes/)
pattern to ease managing the data on Prometheus upgrades.
To provide your own configuration, there are several options. Here are
two examples.
### Volumes & bind-mount
Bind-mount your `prometheus.yml` from the host by running:
```
docker run -p 9090:9090 -v /tmp/prometheus.yml:/etc/prometheus/prometheus.yml \
prom/prometheus
```
Or use an additional volume for the config:
```
docker run -p 9090:9090 -v /prometheus-data \
prom/prometheus -config.file=/prometheus-data/prometheus.yml
```
### Custom image
To avoid managing a file on the host and bind-mount it, the
configuration can be baked into the image. This works well if the
configuration itself is rather static and the same across all
environments.
For this, create a new directory with a Prometheus configuration and a
`Dockerfile` like this:
```
FROM prom/prometheus
ADD prometheus.yml /etc/prometheus/
```
Now build and run it:
```
docker build -t my-prometheus .
docker run -p 9090:9090 my-prometheus
```
A more advanced option is to render the configuration dynamically on start
with some tooling or even have a daemon update it periodically.
## Using configuration management systems
If you prefer using configuration management systems you might be interested in
the following third-party contributions:
### Ansible
* [griggheo/ansible-prometheus](https://github.com/griggheo/ansible-prometheus)
* [William-Yeh/ansible-prometheus](https://github.com/William-Yeh/ansible-prometheus)
### Chef
* [rayrod2030/chef-prometheus](https://github.com/rayrod2030/chef-prometheus)
### Puppet
* [puppet/prometheus](https://forge.puppet.com/puppet/prometheus)
### SaltStack
* [bechtoldt/saltstack-prometheus-formula](https://github.com/bechtoldt/saltstack-prometheus-formula)
...@@ -25,7 +25,7 @@ For more elaborate overviews of Prometheus, see the resources linked from the ...@@ -25,7 +25,7 @@ For more elaborate overviews of Prometheus, see the resources linked from the
Prometheus's main features are: Prometheus's main features are:
* a multi-dimensional [data model](/docs/concepts/data_model/) with time series data identified by metric name and key/value pairs * a multi-dimensional [data model](/docs/concepts/data_model/) with time series data identified by metric name and key/value pairs
* a [flexible query language](/docs/querying/basics/) * a [flexible query language](/docs/prometheus/latest/querying/basics/)
to leverage this dimensionality to leverage this dimensionality
* no reliance on distributed storage; single server nodes are autonomous * no reliance on distributed storage; single server nodes are autonomous
* time series collection happens via a pull model over HTTP * time series collection happens via a pull model over HTTP
...@@ -57,7 +57,9 @@ its ecosystem components: ...@@ -57,7 +57,9 @@ its ecosystem components:
Prometheus scrapes metrics from instrumented jobs, either directly or via an Prometheus scrapes metrics from instrumented jobs, either directly or via an
intermediary push gateway for short-lived jobs. It stores all scraped samples intermediary push gateway for short-lived jobs. It stores all scraped samples
locally and runs rules over this data to either aggregate and record new time series from existing data or generate alerts. [Grafana](https://grafana.com/) or other API consumers can be used to visualize the collected data. locally and runs rules over this data to either aggregate and record new time
series from existing data or generate alerts. [Grafana](https://grafana.com/) or
other API consumers can be used to visualize the collected data.
## When does it fit? ## When does it fit?
......
This diff is collapsed.
---
title: Federation
sort_rank: 3
---
# Federation
Federation allows a Prometheus server to scrape selected time series from
another Prometheus server.
## Use cases
There are different use cases for federation. Commonly, it is used to either
achieve scalable Prometheus monitoring setups or to pull related metrics from
one service's Prometheus into another.
### Hierarchical federation
Hierarchical federation allows Prometheus to scale to environments with tens of
data centers and millions of nodes. In this use case, the federation topology
resembles a tree, with higher-level Prometheus servers collecting aggregated
time series data from a larger number of subordinated servers.
For example, a setup might consist of many per-datacenter Prometheus servers
that collect data in high detail (instance-level drill-down), and a set of
global Prometheus servers which collect and store only aggregated data
(job-level drill-down) from those local servers. This provides an aggregate
global view and detailed local views.
### Cross-service federation
In cross-service federation, a Prometheus server of one service is configured
to scrape selected data from another service's Prometheus server to enable
alerting and queries against both datasets within a single server.
For example, a cluster scheduler running multiple services might expose
resource usage information (like memory and CPU usage) about service instances
running on the cluster. On the other hand, a service running on that cluster
will only expose application-specific service metrics. Often, these two sets of
metrics are scraped by separate Prometheus servers. Using federation, the
Prometheus server containing service-level metrics may pull in the cluster
resource usage metrics about its specific service from the cluster Prometheus,
so that both sets of metrics can be used within that server.
## Configuring federation
On any given Prometheus server, the `/federate` endpoint allows retrieving the
current value for a selected set of time series in that server. At least one
`match[]` URL parameter must be specified to select the series to expose. Each
`match[]` argument needs to specify an
[instant vector selector](/docs/querying/basics/#instant-vector-selectors) like
`up` or `{job="api-server"}`. If multiple `match[]` parameters are provided,
the union of all matched series is selected.
To federate metrics from one server to another, configure your destination
Prometheus server to scrape from the `/federate` endpoint of a source server,
while also enabling the `honor_labels` scrape option (to not overwrite any
labels exposed by the source server) and passing in the desired `match[]`
parameters. For example, the following `scrape_config` federates any series
with the label `job="prometheus"` or a metric name starting with `job:` from
the Prometheus servers at `source-prometheus-{1,2,3}:9090` into the scraping
Prometheus:
```
- job_name: 'federate'
scrape_interval: 15s
honor_labels: true
metrics_path: '/federate'
params:
'match[]':
- '{job="prometheus"}'
- '{__name__=~"job:.*"}'
static_configs:
- targets:
- 'source-prometheus-1:9090'
- 'source-prometheus-2:9090'
- 'source-prometheus-3:9090'
```
This diff is collapsed.
...@@ -99,7 +99,7 @@ calculate streaming φ-quantiles on the client side and expose them directly, ...@@ -99,7 +99,7 @@ calculate streaming φ-quantiles on the client side and expose them directly,
while histograms expose bucketed observation counts and the calculation of while histograms expose bucketed observation counts and the calculation of
quantiles from the buckets of a histogram happens on the server side using the quantiles from the buckets of a histogram happens on the server side using the
[`histogram_quantile()` [`histogram_quantile()`
function](/docs/querying/functions/#histogram_quantile). function](/docs/prometheus/latest/querying/functions/#histogram_quantile).
The two approaches have a number of different implications: The two approaches have a number of different implications:
...@@ -107,11 +107,11 @@ The two approaches have a number of different implications: ...@@ -107,11 +107,11 @@ The two approaches have a number of different implications:
|---|-----------|--------- |---|-----------|---------
| Required configuration | Pick buckets suitable for the expected range of observed values. | Pick desired φ-quantiles and sliding window. Other φ-quantiles and sliding windows cannot be calculated later. | Required configuration | Pick buckets suitable for the expected range of observed values. | Pick desired φ-quantiles and sliding window. Other φ-quantiles and sliding windows cannot be calculated later.
| Client performance | Observations are very cheap as they only need to increment counters. | Observations are expensive due to the streaming quantile calculation. | Client performance | Observations are very cheap as they only need to increment counters. | Observations are expensive due to the streaming quantile calculation.
| Server performance | The server has to calculate quantiles. You can use [recording rules](/docs/querying/rules/#recording-rules) should the ad-hoc calculation take too long (e.g. in a large dashboard). | Low server-side cost. | Server performance | The server has to calculate quantiles. You can use [recording rules](/docs/prometheus/latest/querying/rules/#recording-rules) should the ad-hoc calculation take too long (e.g. in a large dashboard). | Low server-side cost.
| Number of time series (in addition to the `_sum` and `_count` series) | One time series per configured bucket. | One time series per configured quantile. | Number of time series (in addition to the `_sum` and `_count` series) | One time series per configured bucket. | One time series per configured quantile.
| Quantile error (see below for details) | Error is limited in the dimension of observed values by the width of the relevant bucket. | Error is limited in the dimension of φ by a configurable value. | Quantile error (see below for details) | Error is limited in the dimension of observed values by the width of the relevant bucket. | Error is limited in the dimension of φ by a configurable value.
| Specification of φ-quantile and sliding time-window | Ad-hoc with [Prometheus expressions](/docs/querying/functions/#histogram_quantile). | Preconfigured by the client. | Specification of φ-quantile and sliding time-window | Ad-hoc with [Prometheus expressions](/docs/prometheus/latest/querying/functions/#histogram_quantile). | Preconfigured by the client.
| Aggregation | Ad-hoc with [Prometheus expressions](/docs/querying/functions/#histogram_quantile). | In general [not aggregatable](http://latencytipoftheday.blogspot.de/2014/06/latencytipoftheday-you-cant-average.html). | Aggregation | Ad-hoc with [Prometheus expressions](/docs/prometheus/latest/querying/functions/#histogram_quantile). | In general [not aggregatable](http://latencytipoftheday.blogspot.de/2014/06/latencytipoftheday-you-cant-average.html).
Note the importance of the last item in the table. Let us return to Note the importance of the last item in the table. Let us return to
the SLA of serving 95% of requests within 300ms. This time, you do not the SLA of serving 95% of requests within 300ms. This time, you do not
...@@ -132,7 +132,7 @@ quantiles yields statistically nonsensical values. ...@@ -132,7 +132,7 @@ quantiles yields statistically nonsensical values.
Using histograms, the aggregation is perfectly possible with the Using histograms, the aggregation is perfectly possible with the
[`histogram_quantile()` [`histogram_quantile()`
function](/docs/querying/functions/#histogram_quantile). function](/docs/prometheus/latest/querying/functions/#histogram_quantile).
histogram_quantile(0.95, sum(rate(http_request_duration_seconds_bucket[5m])) by (le)) // GOOD. histogram_quantile(0.95, sum(rate(http_request_duration_seconds_bucket[5m])) by (le)) // GOOD.
......
...@@ -5,9 +5,9 @@ sort_rank: 6 ...@@ -5,9 +5,9 @@ sort_rank: 6
# Recording rules # Recording rules
A consistent naming scheme for [recording rules](/docs/querying/rules/) makes it A consistent naming scheme for [recording rules](/docs/prometheus/latest/querying/rules/)
easier to interpret the meaning of a rule at a glance. It also avoids mistakes by makes it easier to interpret the meaning of a rule at a glance. It also avoids
making incorrect or meaningless calculations stand out. mistakes by making incorrect or meaningless calculations stand out.
This page documents how to correctly do aggregation and suggests a naming This page documents how to correctly do aggregation and suggests a naming
convention. convention.
......
--- ---
title: Querying title: Prometheus
sort_rank: 3 sort_rank: 3
nav_icon: search nav_icon: server
--- ---
This diff is collapsed.
---
title: Querying basics
nav_title: Basics
sort_rank: 1
---
# Querying Prometheus
Prometheus provides a functional expression language that lets the user select
and aggregate time series data in real time. The result of an expression can
either be shown as a graph, viewed as tabular data in Prometheus's expression
browser, or consumed by external systems via the [HTTP API](/docs/querying/api/).
## Examples
This document is meant as a reference. For learning, it might be easier to
start with a couple of [examples](/docs/querying/examples/).
## Expression language data types
In Prometheus's expression language, an expression or sub-expression can
evaluate to one of four types:
* **Instant vector** - a set of time series containing a single sample for each time series, all sharing the same timestamp
* **Range vector** - a set of time series containing a range of data points over time for each time series
* **Scalar** - a simple numeric floating point value
* **String** - a simple string value; currently unused
Depending on the use-case (e.g. when graphing vs. displaying the output of an
expression), only some of these types are legal as the result from a
user-specified expression. For example, an expression that returns an instant
vector is the only type that can be directly graphed.
## Literals
### String literals
Strings may be specified as literals in single quotes, double quotes or
backticks.
PromQL follows the same [escaping rules as
Go](https://golang.org/ref/spec#String_literals). In single or double quotes a
backslash begins an escape sequence, which may be followed by `a`, `b`, `f`,
`n`, `r`, `t`, `v` or `\`. Specific characters can be provided using octal
(`\nnn`) or hexadecimal (`\xnn`, `\unnnn` and `\Unnnnnnnn`).
No escaping is processed inside backticks. Unlike Go, Prometheus does not discard newlines inside backticks.
Example:
"this is a string"
'these are unescaped: \n \\ \t'
`these are not unescaped: \n ' " \t`
### Float literals
Scalar float values can be literally written as numbers of the form
`[-](digits)[.(digits)]`.
-2.43
## Time series Selectors
### Instant vector selectors
Instant vector selectors allow the selection of a set of time series and a
single sample value for each at a given timestamp (instant): in the simplest
form, only a metric name is specified. This results in an instant vector
containing elements for all time series that have this metric name.
This example selects all time series that have the `http_requests_total` metric
name:
http_requests_total
It is possible to filter these time series further by appending a set of labels
to match in curly braces (`{}`).
This example selects only those time series with the `http_requests_total`
metric name that also have the `job` label set to `prometheus` and their
`group` label set to `canary`:
http_requests_total{job="prometheus",group="canary"}
It is also possible to negatively match a label value, or to match label values
against regular expressions. The following label matching operators exist:
* `=`: Select labels that are exactly equal to the provided string.
* `!=`: Select labels that are not equal to the provided string.
* `=~`: Select labels that regex-match the provided string (or substring).
* `!~`: Select labels that do not regex-match the provided string (or substring).
For example, this selects all `http_requests_total` time series for `staging`,
`testing`, and `development` environments and HTTP methods other than `GET`.
http_requests_total{environment=~"staging|testing|development",method!="GET"}
Label matchers that match empty label values also select all time series that do
not have the specific label set at all. Regex-matches are fully anchored.
Vector selectors must either specify a name or at least one label matcher
that does not match the empty string. The following expression is illegal:
{job=~".*"} # Bad!
In contrast, these expressions are valid as they both have a selector that does not
match empty label values.
{job=~".+"} # Good!
{job=~".*",method="get"} # Good!
Label matchers can also be applied to metric names by matching against the internal
`__name__` label. For example, the expression `http_requests_total` is equivalent to
`{__name__="http_requests_total"}`. Matchers other than `=` (`!=`, `=~`, `!~`) may also be used.
The following expression selects all metrics that have a name starting with `job:`:
{__name__=~"^job:.*"}
### Range Vector Selectors
Range vector literals work like instant vector literals, except that they
select a range of samples back from the current instant. Syntactically, a range
duration is appended in square brackets (`[]`) at the end of a vector selector
to specify how far back in time values should be fetched for each resulting
range vector element.
Time durations are specified as a number, followed immediately by one of the
following units:
* `s` - seconds
* `m` - minutes
* `h` - hours
* `d` - days
* `w` - weeks
* `y` - years
In this example, we select all the values we have recorded within the last 5
minutes for all time series that have the metric name `http_requests_total` and
a `job` label set to `prometheus`:
http_requests_total{job="prometheus"}[5m]
### Offset modifier
The `offset` modifier allows changing the time offset for individual
instant and range vectors in a query.
For example, the following expression returns the value of
`http_requests_total` 5 minutes in the past relative to the current
query evaluation time:
http_requests_total offset 5m
Note that the `offset` modifier always needs to follow the selector
immediately, i.e. the following would be correct:
sum(http_requests_total{method="GET"} offset 5m) // GOOD.
While the following would be *incorrect*:
sum(http_requests_total{method="GET"}) offset 5m // INVALID.
The same works for range vectors. This returns the 5-minutes rate that
`http_requests_total` had a week ago:
rate(http_requests_total[5m] offset 1w)
## Operators
Prometheus supports many binary and aggregation operators. These are described
in detail in the [expression language operators](/docs/querying/operators/) page.
## Functions
Prometheus supports several functions to operate on data. These are described
in detail in the [expression language functions](/docs/querying/functions/) page.
## Gotchas
### Interpolation and staleness
When queries are run, timestamps at which to sample data are selected
independently of the actual present time series data. This is mainly to support
cases like aggregation (`sum`, `avg`, and so on), where multiple aggregated
time series do not exactly align in time. Because of their independence,
Prometheus needs to assign a value at those timestamps for each relevant time
series. It does so by simply taking the newest sample before this timestamp.
If no stored sample is found (by default) 5 minutes before a sampling timestamp,
no value is assigned for this time series at this point in time. This
effectively means that time series "disappear" from graphs at times where their
latest collected sample is older than 5 minutes.
NOTE: <b>NOTE:</b> Staleness and interpolation handling might change. See
https://github.com/prometheus/prometheus/issues/398 and
https://github.com/prometheus/prometheus/issues/581.
### Avoiding slow queries and overloads
If a query needs to operate on a very large amount of data, graphing it might
time out or overload the server or browser. Thus, when constructing queries
over unknown data, always start building the query in the tabular view of
Prometheus's expression browser until the result set seems reasonable
(hundreds, not thousands, of time series at most). Only when you have filtered
or aggregated your data sufficiently, switch to graph mode. If the expression
still takes too long to graph ad-hoc, pre-record it via a [recording
rule](/docs/querying/rules/#recording-rules).
This is especially relevant for Prometheus's query language, where a bare
metric name selector like `api_http_requests_total` could expand to thousands
of time series with different labels. Also keep in mind that expressions which
aggregate over many time series will generate load on the server even if the
output is only a small number of time series. This is similar to how it would
be slow to sum all values of a column in a relational database, even if the
output value is only a single number.
---
title: Querying examples
nav_title: Examples
sort_rank: 4
---
# Query examples
## Simple time series selection
Return all time series with the metric `http_requests_total`:
http_requests_total
Return all time series with the metric `http_requests_total` and the given
`job` and `handler` labels:
http_requests_total{job="apiserver", handler="/api/comments"}
Return a whole range of time (in this case 5 minutes) for the same vector,
making it a range vector:
http_requests_total{job="apiserver", handler="/api/comments"}[5m]
Note that an expression resulting in a range vector cannot be graphed directly,
but viewed in the tabular ("Console") view of the expression browser.
Using regular expressions, you could select time series only for jobs whose
name match a certain pattern, in this case, all jobs that end with `server`.
Note that this does a substring match, not a full string match:
http_requests_total{job=~"server$"}
To select all HTTP status codes except 4xx ones, you could run:
http_requests_total{status!~"^4..$"}
## Using functions, operators, etc.
Return the per-second rate for all time series with the `http_requests_total`
metric name, as measured over the last 5 minutes:
rate(http_requests_total[5m])
Assuming that the `http_requests_total` time series all have the labels `job`
(fanout by job name) and `instance` (fanout by instance of the job), we might
want to sum over the rate of all instances, so we get fewer output time series,
but still preserve the `job` dimension:
sum(rate(http_requests_total[5m])) by (job)
If we have two different metrics with the same dimensional labels, we can apply
binary operators to them and elements on both sides with the same label set
will get matched and propagated to the output. For example, this expression
returns the unused memory in MiB for every instance (on a fictional cluster
scheduler exposing these metrics about the instances it runs):
(instance_memory_limit_bytes - instance_memory_usage_bytes) / 1024 / 1024
The same expression, but summed by application, could be written like this:
sum(
instance_memory_limit_bytes - instance_memory_usage_bytes
) by (app, proc) / 1024 / 1024
If the same fictional cluster scheduler exposed CPU usage metrics like the
following for every instance:
instance_cpu_time_ns{app="lion", proc="web", rev="34d0f99", env="prod", job="cluster-manager"}
instance_cpu_time_ns{app="elephant", proc="worker", rev="34d0f99", env="prod", job="cluster-manager"}
instance_cpu_time_ns{app="turtle", proc="api", rev="4d3a513", env="prod", job="cluster-manager"}
instance_cpu_time_ns{app="fox", proc="widget", rev="4d3a513", env="prod", job="cluster-manager"}
...
...we could get the top 3 CPU users grouped by application (`app`) and process
type (`proc`) like this:
topk(3, sum(rate(instance_cpu_time_ns[5m])) by (app, proc))
Assuming this metric contains one time series per running instance, you could
count the number of running instances per application like this:
count(instance_cpu_time_ns) by (app)
This diff is collapsed.
This diff is collapsed.
---
title: Recording rules
sort_rank: 6
---
# Defining recording rules
## Configuring rules
Prometheus supports two types of rules which may be configured and then
evaluated at regular intervals: recording rules and [alerting
rules](../../alerting/rules). To include rules in Prometheus, create a file
containing the necessary rule statements and have Prometheus load the file via
the `rule_files` field in the [Prometheus configuration](/docs/operating/configuration).
The rule files can be reloaded at runtime by sending `SIGHUP` to the Prometheus
process. The changes are only applied if all rule files are well-formatted.
## Syntax-checking rules
To quickly check whether a rule file is syntactically correct without starting
a Prometheus server, install and run Prometheus's `promtool` command-line
utility tool:
```bash
go get github.com/prometheus/prometheus/cmd/promtool
promtool check-rules /path/to/example.rules
```
When the file is syntactically valid, the checker prints a textual
representation of the parsed rules to standard output and then exits with
a `0` return status.
If there are any syntax errors, it prints an error message to standard error
and exits with a `1` return status. On invalid input arguments the exit status
is `2`.
## Recording rules
Recording rules allow you to precompute frequently needed or computationally
expensive expressions and save their result as a new set of time series.
Querying the precomputed result will then often be much faster than executing
the original expression every time it is needed. This is especially useful for
dashboards, which need to query the same expression repeatedly every time they
refresh.
To add a new recording rule, add a line of the following syntax to your rule
file:
<new time series name>[{<label overrides>}] = <expression to record>
Some examples:
# Saving the per-job HTTP in-progress request count as a new set of time series:
job:http_inprogress_requests:sum = sum(http_inprogress_requests) by (job)
# Drop or rewrite labels in the result time series:
new_time_series{label_to_change="new_value",label_to_drop=""} = old_time_series
Recording rules are evaluated at the interval specified by the
`evaluation_interval` field in the Prometheus configuration. During each
evaluation cycle, the right-hand-side expression of the rule statement is
evaluated at the current instant in time and the resulting sample vector is
stored as a new set of time series with the current timestamp and a new metric
name (and perhaps an overridden set of labels).
---
title: Template examples
sort_rank: 4
---
# Template examples
Prometheus supports templating in the summary and description fields of
alerts, as well as in served console pages. Templates have the ability to run
queries against the local database, iterate over data, use conditionals, format
data, etc. The Prometheus templating language is based on the
[Go templating](http://golang.org/pkg/text/template/) system.
## Simple alert field templates
ALERT InstanceDown
IF up == 0
FOR 5m
LABELS {
severity="page"
}
ANNOTATIONS {
summary = "Instance {{$labels.instance}} down",
description = "{{$labels.instance}} of job {{$labels.job}} has been down for more than 5 minutes.",
}
Alert field templates will be executed during every rule iteration for each
alert that fires, so keep any queries and templates lightweight. If you have a
need for more complicated templates for alerts, it is recommended to link to a
console instead.
## Simple iteration
This displays a list of instances, and whether they are up:
```
{{ range query "up" }}
{{ .Labels.instance }} {{ .Value }}
{{ end }}
```
The special `.` variable contains the value of the current sample for each loop iteration.
## Display one value
```
{{ with query "some_metric{instance='someinstance'}" }}
{{ . | first | value | humanize }}
{{ end }}
```
Go and Go's templating language are both strongly typed, so one must check that
samples were returned to avoid an execution error. For example this could
happen if a scrape or rule evaluation has not run yet, or a host was down.
The included `prom_query_drilldown` template handles this, allows for
formatting of results, and linking to the [expression browser](/docs/visualization/browser/).
## Using console URL parameters
```
{{ with printf "node_memory_MemTotal{job='node',instance='%s'}" .Params.instance | query }}
{{ . | first | value | humanize1024}}B
{{ end }}
```
If accessed as `console.html?instance=hostname`, `.Params.instance` will evaluate to `hostname`.
## Advanced iteration
```html
<table>
{{ range printf "node_network_receive_bytes{job='node',instance='%s',device!='lo'}" .Params.instance | query | sortByLabel "device"}}
<tr><th colspan=2>{{ .Labels.device }}</th></tr>
<tr>
<td>Received</td>
<td>{{ with printf "rate(node_network_receive_bytes{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device | query }}{{ . | first | value | humanize }}B/s{{end}}</td>
</tr>
<tr>
<td>Transmitted</td>
<td>{{ with printf "rate(node_network_transmit_bytes{job='node',instance='%s',device='%s'}[5m])" .Labels.instance .Labels.device | query }}{{ . | first | value | humanize }}B/s{{end}}</td>
</tr>{{ end }}
<table>
```
Here we iterate over all network devices and display the network traffic for each.
As the `range` action does not specify a variable, `.Params.instance` is not
available inside the loop as `.` is now the loop variable.
## Defining reusable templates
Prometheus supports defining templates that can be reused. This is particularly
powerful when combined with
[console library](/docs/visualization/template_reference/#console-templates) support, allowing
sharing of templates across consoles.
```
{{/* Define the template */}}
{{define "myTemplate"}}
do something
{{end}}
{{/* Use the template */}}
{{template "myTemplate"}}
```
Templates are limited to one argument. The `args` function can be used to wrap multiple arguments.
```
{{define "myMultiArgTemplate"}}
First argument: {{.arg0}}
Second argument: {{.arg1}}
{{end}}
{{template "myMultiArgTemplate" (args 1 2)}}
```
---
title: Template reference
sort_rank: 6
---
# Template reference
Prometheus supports templating in the summary and description fields of
alerts, as well as in served console pages. Templates have the ability to run
queries against the local database, iterate over data, use conditionals, format
data, etc. The Prometheus templating language is based on the
[Go templating](http://golang.org/pkg/text/template/) system.
## Data Structures
The primary data structure for dealing with time series data is the sample, defined as:
```
type sample struct {
Labels map[string]string
Value float64
}
```
The metric name of the sample is encoded in a special `__name__` label in the `Labels` map.
`[]sample` means a list of samples.
`interface{}` in Go is similar to a void pointer in C.
## Functions
In addition to the [default
functions](http://golang.org/pkg/text/template/#hdr-Functions) provided by Go
templating, Prometheus provides functions for easier processing of query
results in templates.
If functions are used in a pipeline, the pipeline value is passed as the last argument.
### Queries
| Name | Arguments | Returns | Notes |
| ------------- | ------------- | -------- | -------- |
| query | query string | []sample | Queries the database, does not support returning range vectors. |
| first | []sample | sample | Equivalent to `index a 0` |
| label | label, sample | string | Equivalent to `index sample.Labels label` |
| value | sample | float64 | Equivalent to `sample.Value` |
| sortByLabel | label, []samples | []sample | Sorts the samples by the given label. Is stable. |
`first`, `label` and `value` are intended to make query results easily usable in pipelines.
### Numbers
| Name | Arguments | Returns | Notes |
| ------------- | --------------| --------| --------- |
| humanize | number | string | Converts a number to a more readable format, using [metric prefixes](http://en.wikipedia.org/wiki/Metric_prefix).
| humanize1024 | number | string | Like `humanize`, but uses 1024 as the base rather than 1000. |
| humanizeDuration | number | string | Converts a duration in seconds to a more readable format. |
| humanizeTimestamp | number | string | Converts a Unix timestamp in seconds to a more readable format. |
Humanizing functions are intended to produce reasonable output for consumption
by humans, and are not guaranteed to return the same results between Prometheus
versions.
### Strings
| Name | Arguments | Returns | Notes |
| ------------- | ------------- | ------- | ----------- |
| title | string | string | [strings.Title](http://golang.org/pkg/strings/#Title), capitalises first character of each word.|
| toUpper | string | string | [strings.ToUpper](http://golang.org/pkg/strings/#ToUpper), converts all characters to upper case.|
| toLower | string | string | [strings.ToLower](http://golang.org/pkg/strings/#ToLower), converts all characters to lower case.|
| match | pattern, text | boolean | [regexp.MatchString](http://golang.org/pkg/regexp/#MatchString) Tests for a unanchored regexp match. |
| reReplaceAll | pattern, replacement, text | string | [Regexp.ReplaceAllString](http://golang.org/pkg/regexp/#Regexp.ReplaceAllString) Regexp substitution, unanchored. |
| graphLink | expr | string | Returns path to graph view in the [expression browser](/docs/visualization/browser/) for the expression. |
| tableLink | expr | string | Returns path to tabular ("Console") view in the [expression browser](/docs/visualization/browser/) for the expression. |
### Others
| Name | Arguments | Returns | Notes |
| ------------- | ------------- | ------- | ----------- |
| args | []interface{} | map[string]interface{} | This converts a list of objects to a map with keys arg0, arg1 etc. This is intended to allow multiple arguments to be passed to templates. |
| tmpl | string, []interface{} | nothing | Like the built-in `template`, but allows non-literals as the template name. Note that the result is assumed to be safe, and will not be auto-escaped. Only available in consoles. |
| safeHtml | string | string | Marks string as HTML not requiring auto-escaping. |
## Template type differences
Each of the types of templates provide different information that can be used to
parameterize templates, and have a few other differences.
### Alert field templates
`.Value` and `.Labels` contain the alert value and labels. They are also exposed
as the `$value` and `$labels` variables for convenience.
### Console templates
Consoles are exposed on `/consoles/`, and sourced from the directory pointed to
by the `-web.console.templates` flag.
Console templates are rendered with
[html/template](http://golang.org/pkg/html/template/), which provides
auto-escaping. To bypass the auto-escaping use the `safe*` functions.,
URL parameters are available as a map in `.Params`. To access multiple URL
parameters by the same name, `.RawParams` is a map of the list values for each
parameter. The URL path is available in `.Path`, excluding the `/consoles/`
prefix.
Consoles also have access to all the templates defined with `{{define
"templateName"}}...{{end}}` found in `*.lib` files in the directory pointed to
by the `-web.console.libraries` flag. As this is a shared namespace, take care
to avoid clashes with other users. Template names beginning with `prom`,
`_prom`, and `__` are reserved for use by Prometheus, as are the functions
listed above.
...@@ -24,10 +24,6 @@ title: Download ...@@ -24,10 +24,6 @@ title: Download
exporters listed at <a href="/docs/instrumenting/exporters/">Exporters exporters listed at <a href="/docs/instrumenting/exporters/">Exporters
and integrations</a>. and integrations</a>.
</p> </p>
<p>
After downloading a binary release suitable for your system, please follow
the <a href="/docs/introduction/getting_started/">installation instructions</a>.
</p>
<div class="panel panel-default download-selection"> <div class="panel panel-default download-selection">
<div class="panel-body"> <div class="panel-body">
......
...@@ -6,7 +6,7 @@ layout: jumbotron ...@@ -6,7 +6,7 @@ layout: jumbotron
<h1>From metrics to insight</h1> <h1>From metrics to insight</h1>
<p class="subtitle">Power your metrics and alerting with a leading<br>open-source monitoring solution.</p> <p class="subtitle">Power your metrics and alerting with a leading<br>open-source monitoring solution.</p>
<p> <p>
<a class="btn btn-default btn-lg" href="/docs/introduction/getting_started/" role="button">Get Started</a> <a class="btn btn-default btn-lg" href="/docs/prometheus/latest/getting_started/" role="button">Get Started</a>
<a class="btn btn-default btn-lg" href="/download" role="button">Download</a> <a class="btn btn-default btn-lg" href="/download" role="button">Download</a>
</p> </p>
</div> </div>
...@@ -25,7 +25,7 @@ layout: jumbotron ...@@ -25,7 +25,7 @@ layout: jumbotron
</a> </a>
</div> </div>
<div class="col-md-3 col-sm-6 col-xs-12 feature-item"> <div class="col-md-3 col-sm-6 col-xs-12 feature-item">
<a href="/docs/querying/basics/"> <a href="/docs/prometheus/latest/querying/basics/">
<h2><i class="fa fa-search"></i> Powerful queries</h2> <h2><i class="fa fa-search"></i> Powerful queries</h2>
<p>A flexible query language allows slicing and dicing of collected time series data in order to generate ad-hoc graphs, tables, and alerts.</p> <p>A flexible query language allows slicing and dicing of collected time series data in order to generate ad-hoc graphs, tables, and alerts.</p>
</a> </a>
...@@ -37,7 +37,7 @@ layout: jumbotron ...@@ -37,7 +37,7 @@ layout: jumbotron
</a> </a>
</div> </div>
<div class="col-md-3 col-sm-6 col-xs-12 feature-item"> <div class="col-md-3 col-sm-6 col-xs-12 feature-item">
<a href="/docs/operating/storage/"> <a href="/docs/prometheus/latest/storage/">
<h2><i class="fa fa-database"></i> Efficient storage</h2> <h2><i class="fa fa-database"></i> Efficient storage</h2>
<p>Prometheus stores time series in memory and on local disk in an efficient custom format. Scaling is achieved by functional sharding and federation.</p> <p>Prometheus stores time series in memory and on local disk in an efficient custom format. Scaling is achieved by functional sharding and federation.</p>
</a> </a>
...@@ -46,7 +46,7 @@ layout: jumbotron ...@@ -46,7 +46,7 @@ layout: jumbotron
<div class="row"> <div class="row">
<div class="col-md-3 col-sm-6 col-xs-12 feature-item"> <div class="col-md-3 col-sm-6 col-xs-12 feature-item">
<a href="/docs/operating/configuration/"> <a href="/docs/prometheus/latest/configuration/configuration/">
<h2><i class="fa fa-cog"></i> Simple operation</h2> <h2><i class="fa fa-cog"></i> Simple operation</h2>
<p>Each server is independent for reliability, relying only on local storage. Written in Go, all binaries are statically linked and easy to deploy.</p> <p>Each server is independent for reliability, relying only on local storage. Written in Go, all binaries are statically linked and easy to deploy.</p>
</a> </a>
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
<link rel="icon" type="image/png" href="/assets/favicons/favicon-96x96.png" sizes="96x96"> <link rel="icon" type="image/png" href="/assets/favicons/favicon-96x96.png" sizes="96x96">
<link rel="icon" type="image/png" href="/assets/favicons/favicon-16x16.png" sizes="16x16"> <link rel="icon" type="image/png" href="/assets/favicons/favicon-16x16.png" sizes="16x16">
<link rel="manifest" href="/assets/favicons/android-chrome-manifest.json"> <link rel="manifest" href="/assets/favicons/android-chrome-manifest.json">
<% if (c = @item[:repo_docs]) && c[:canonical] %><link rel="canonical" href="<%= @item.path.sub(c[:items_root], c[:canonical]) %>" /><% end %>
<meta name="msapplication-TileColor" content="#da532c"> <meta name="msapplication-TileColor" content="#da532c">
<meta name="msapplication-TileImage" content="/assets/favicons/mstile-144x144.png"> <meta name="msapplication-TileImage" content="/assets/favicons/mstile-144x144.png">
<meta name="theme-color" content="#ffffff"> <meta name="theme-color" content="#ffffff">
...@@ -43,7 +44,7 @@ ...@@ -43,7 +44,7 @@
<link href="/css/routing-tree-editor.css" rel="stylesheet"> <link href="/css/routing-tree-editor.css" rel="stylesheet">
<!-- Custom Fonts --> <!-- Custom Fonts -->
<link href="/assets/font-awesome-4.2.0/css/font-awesome.min.css" rel="stylesheet" type="text/css"> <link href="/assets/font-awesome-4.7.0/css/font-awesome.min.css" rel="stylesheet" type="text/css">
<link href='https://fonts.googleapis.com/css?family=Open+Sans' rel='stylesheet' type='text/css'> <link href='https://fonts.googleapis.com/css?family=Open+Sans' rel='stylesheet' type='text/css'>
<link href='https://fonts.googleapis.com/css?family=Lato:300,300italic,400' rel='stylesheet' type='text/css'> <link href='https://fonts.googleapis.com/css?family=Lato:300,300italic,400' rel='stylesheet' type='text/css'>
......
# TODO(ts): Rewrite data source and use one single instance to combine all
# different versions for a given path.
class RepoDocsDataSource < ::Nanoc::DataSources::FilesystemUnified
identifier :repo_docs
PATH = "repositories"
def up
c = config[:config]
%x(
scripts/checkout.sh \
-d "#{docs_root}" \
-t "#{repo_path}" \
"#{c[:repository]}" "#{c[:refspec]}"
)
if $?.exitstatus != 0
raise "Couldn't checkout repository #{c.inspect}"
end
super
end
def items
c = config.fetch(:config)
super.map do |item|
item[:repo_docs] = c
item[:repo_docs][:items_root] = config.fetch(:items_root)
# TODO(ts): Remove assumptions about the path layout, rewrite datasource.
item[:repo_docs][:version_root] = config.fetch(:items_root).sub(%r{(.+/)[^/]+/\Z}, '\\1')
# TODO(ts): Document that repo doc index.md will be ignored.
if item.identifier == '/'
item[:nav] = { strip: true }
end
item
end
end
def content_dir_name
File.join(repo_path, docs_root)
end
def layouts_dir_name
'unsupported'
end
private
def docs_root
c = config.fetch(:config)
c.fetch(:root, 'docs/')
end
def repo_path
c = config.fetch(:config)
base = c.fetch(:repo_base, 'repositories')
File.join(base, File.basename(c[:repository]), c[:name])
end
end
# encoding: utf-8
require 'nokogiri'
class NormalizeLinks < ::Nanoc::Filter
identifier :normalize_links
DOMAIN = 'https://prometheus.io'
def run(content, config = {})
doc = Nokogiri::HTML(content)
links = doc.xpath('//a')
links.each do |link|
link['href'] =
case
when link['href'].start_with?(DOMAIN)
link['href'][DOMAIN.size..-1]
when link['href'].start_with?('/')
# TODO(ts): It's not guaranteed that a repository is hosted on Github.
github_link_to(link['href'], config)
when link['href'].include?('.md')
relative_link_to(link['href'])
else
link['href']
end
end
doc.to_s
end
def github_link_to(file, config)
base = config[:repository]
if base.end_with?('.git')
base = base[0..-5]
end
File.join(base, 'blob', config[:refspec], file)
end
def relative_link_to(link)
# All nanoc pages end on a trailing slash.
File.join("../", link.gsub(/\.md($|#)/, '/\\1'))
end
end
# encoding: utf-8
require 'nokogiri'
class OutdatedContent < ::Nanoc::Filter
identifier :outdated_content
def run(content, params = {})
doc = Nokogiri::HTML(content)
# TODO(ts): We need to link to the same page or the first child without hardcoding /getting_started/.
warning = %(<p>CAUTION: This page documents an old version of Prometheus.
Check out the <a href="#{params[:outdated]}getting_started/">latest version</a>.</p>)
body = doc.css('body')
if first = body.children.first
first.add_previous_sibling(warning)
else
body << Nokogiri::HTML::DocumentFragment.parse(warning)
end
doc.to_s
end
end
...@@ -3,21 +3,41 @@ def nav(root_item, buffer='', layer=0) ...@@ -3,21 +3,41 @@ def nav(root_item, buffer='', layer=0)
children = nav_children(root_item) children = nav_children(root_item)
# Strip item from menu.
if root_item[:nav] && root_item[:nav][:strip]
children.each do |child|
nav(child, buffer, layer)
end
return buffer
end
classes = []
if nav_active?(root_item) if nav_active?(root_item)
buffer << "<li class=\"active\">" classes << 'active'
else classes << 'current' unless children.any?
buffer << "<li>"
end end
classes << 'hidden' if hidden?(root_item)
buffer << (classes.any? ? %(<li class="#{classes.join(' ')}">) : '<li>')
title = nav_title_of(root_item) title = nav_title_of(root_item)
if children.any?
if layer == 0 if layer == 0
buffer << "<span class=\"nav-header\"><i class=\"fa fa-#{root_item[:nav_icon]}\"></i> <span>#{title}</span></span>" buffer << "<span class=\"nav-header\"><i class=\"fa fa-#{root_item[:nav_icon]}\"></i> <span>#{title}</span></span>"
else
buffer << "<span class=\"nav-subheader\">#{title}</span>"
end
else else
buffer << link_to(title, root_item.path) buffer << link_to(title, root_item.path)
end end
if children.any? if children.any?
buffer << %(<ul class="nav #{nav_active?(root_item) ? 'active' : ''}">) active = nav_active?(root_item)
# TODO(ts): Remove the need to check for the layer.
if layer == 0 && children.any? { |i| Versioned.versioned?(i) }
buffer << Versioned.picker(children, @item_rep, active)
end
buffer << %(<ul class="nav #{active ? 'active' : ''}">)
children.each do |child| children.each do |child|
nav(child, buffer, layer + 1) nav(child, buffer, layer + 1)
...@@ -44,3 +64,49 @@ def nav_children(item) ...@@ -44,3 +64,49 @@ def nav_children(item)
.select { |child| !child[:is_hidden] && child.path } .select { |child| !child[:is_hidden] && child.path }
.sort_by { |child| child[:sort_rank] || 0 } .sort_by { |child| child[:sort_rank] || 0 }
end end
# hidden? returns true if the item is not part of the currently selected group.
def hidden?(item)
Versioned.versioned?(item) && !Versioned.current?(item[:repo_docs], @item_rep)
end
# Versioned repository docs related functions.
# TODO: Refactor and clean up all this code.
module Versioned
def self.versioned?(item)
!item[:repo_docs].nil?
end
# latest? returns true if the item is part of the version group "latest".
def self.latest?(opts)
opts[:name].include?('latest')
end
# current? returns true if the item is part of the selected version group. If
# no group is selected (e.g. when a page outside of the versioned docs is
# viewed), the latest version will be shown.
def self.current?(opts, page)
return false if opts.nil? || !page.respond_to?(:path)
if page.path.start_with?(opts[:version_root])
page.path.start_with?(opts[:items_root])
else
latest?(opts)
end
end
# picker returns the HTML code for a version select box.
def self.picker(items, page, active)
versions = items.map { |i| i[:repo_docs] }.uniq
options = versions.map do |v|
selected = current?(v, page) ? 'selected="selected"' : ''
# TODO(ts): Refactor and think about linking directly to the page of the same version.
first = items
.find { |i| i.path.start_with?(v[:items_root]) }
.children.sort_by { |c| c[:sort_rank] }.first
%(<option value="#{first.path}" #{selected}>#{v[:name]}</option>)
end
classes = active ? 'active' : ''
return %(<div class="#{classes}">Version: <select>#{options.join('')}</select></div>)
end
end
...@@ -61,6 +61,30 @@ data_sources: ...@@ -61,6 +61,30 @@ data_sources:
# The encoding to use for input files. If your input files are not in # The encoding to use for input files. If your input files are not in
# UTF-8 (which they should be!), change this. # UTF-8 (which they should be!), change this.
encoding: utf-8 encoding: utf-8
-
type: repo_docs
items_root: /docs/prometheus/2.0/
config:
name: '2.0'
repository: https://github.com/prometheus/prometheus.git
refspec: master
canonical: /docs/prometheus/latest/
-
type: repo_docs
items_root: /docs/prometheus/latest/
config:
name: 'latest (1.8)'
repository: https://github.com/prometheus/prometheus.git
refspec: release-1.8
-
type: repo_docs
items_root: /docs/prometheus/1.8/
config:
name: '1.8'
repository: https://github.com/prometheus/prometheus.git
refspec: release-1.8
canonical: /docs/prometheus/latest/
outdated: /docs/prometheus/latest/
- -
type: static type: static
items_root: /assets/ items_root: /assets/
......
#!/bin/bash
usage() {
me=$(basename $0)
cat <<EOF
Usage:
$me [ options ] <repository> [ <refspec> ]
Options:
-d <directory> Remote directory name of the sparse-checlout. Default: docs/
-t <path> Target path of the checkout. Default: repository basename
EOF
exit 1
}
while getopts 'd:t:' OPT
do
case ${OPT} in
d)
DIRECTORY="${OPTARG}"
;;
t)
TARGET="${OPTARG}"
;;
*)
usage
;;
esac
done
shift $((OPTIND-1))
[ $# -ge 1 ] || usage
REPOSITORY="$1"
REFSPEC="$2"
if [[ -z "${DIRECTORY}" ]]; then
DIRECTORY="docs/"
fi
if [[ -z "${TARGET}" ]]; then
TARGET=$(basename "${REPOSITORY}")
fi
mkdir -p "${TARGET}"
cd "${TARGET}"
git init
git config core.sparsecheckout true
echo "${DIRECTORY}" > .git/info/sparse-checkout
if ! git remote | grep -q origin; then
git remote add origin "${REPOSITORY}"
fi
git fetch --depth=1 origin "${REFSPEC}"
git reset --hard origin/"${REFSPEC}"
// Use CSS to hide elements without a delay during page load. // Use CSS to hide elements without a delay during page load.
$('head').append('<style type="text/css"> \ $('head').append('<style type="text/css"> \
.side-nav ul { display: none; } \ .side-nav ul, .side-nav div { display: none; } \
.side-nav ul.active { display: block; } \ .side-nav ul.active, .side-nav div.active { display: block; } \
</style>'); </style>');
$(document).ready(function() { $(document).ready(function() {
...@@ -9,17 +9,21 @@ $(document).ready(function() { ...@@ -9,17 +9,21 @@ $(document).ready(function() {
event.preventDefault(); event.preventDefault();
var visible = $(this).closest('li').children('ul.nav').is(':visible'); var visible = $(this).closest('li').children('ul.nav').is(':visible');
$(this).closest('ul').find('ul.nav').slideUp(200); $(this).closest('ul').find('ul.nav, div').slideUp(200);
if (!visible) { if (!visible) {
$(this).closest('li').children('ul.nav').slideDown(200); $(this).closest('li').children('ul.nav, div').slideDown(200);
} }
}; };
$('.nav-header span').each(function() { $('.nav-header span,.nav-subheader').each(function() {
var link = $('<a href="#">').text($(this).text()).click(navToggle); var link = $('<a href="#">').text($(this).text()).click(navToggle);
$(this).replaceWith(link); $(this).replaceWith(link);
}); });
$(".side-nav select").change(function() {
window.location.href = $(this).val();
});
var selected = function(value, want, group) { var selected = function(value, want, group) {
switch(want) { switch(want) {
case 'all': case 'all':
......
This diff is collapsed.
This source diff could not be displayed because it is too large. You can view the blob instead.
I hope you love Font Awesome. If you've found it useful, please do me a favor and check out my latest project,
Fort Awesome (https://fortawesome.com). It makes it easy to put the perfect icons on your website. Choose from our awesome,
comprehensive icon sets or copy and paste your own.
Please. Check it out.
-Dave Gandy
This diff is collapsed.
This diff is collapsed.
// Spinning Icons // Animated Icons
// -------------------------- // --------------------------
.@{fa-css-prefix}-spin { .@{fa-css-prefix}-spin {
...@@ -6,6 +6,11 @@ ...@@ -6,6 +6,11 @@
animation: fa-spin 2s infinite linear; animation: fa-spin 2s infinite linear;
} }
.@{fa-css-prefix}-pulse {
-webkit-animation: fa-spin 1s infinite steps(8);
animation: fa-spin 1s infinite steps(8);
}
@-webkit-keyframes fa-spin { @-webkit-keyframes fa-spin {
0% { 0% {
-webkit-transform: rotate(0deg); -webkit-transform: rotate(0deg);
......
...@@ -7,6 +7,15 @@ ...@@ -7,6 +7,15 @@
border-radius: .1em; border-radius: .1em;
} }
.@{fa-css-prefix}-pull-left { float: left; }
.@{fa-css-prefix}-pull-right { float: right; }
.@{fa-css-prefix} {
&.@{fa-css-prefix}-pull-left { margin-right: .3em; }
&.@{fa-css-prefix}-pull-right { margin-left: .3em; }
}
/* Deprecated as of 4.4.0 */
.pull-right { float: right; } .pull-right { float: right; }
.pull-left { float: left; } .pull-left { float: left; }
......
...@@ -3,9 +3,10 @@ ...@@ -3,9 +3,10 @@
.@{fa-css-prefix} { .@{fa-css-prefix} {
display: inline-block; display: inline-block;
font: normal normal normal 14px/1 FontAwesome; // shortening font declaration font: normal normal normal @fa-font-size-base/@fa-line-height-base FontAwesome; // shortening font declaration
font-size: inherit; // can't have font-size inherit on line above, so need to override font-size: inherit; // can't have font-size inherit on line above, so need to override
text-rendering: auto; // optimizelegibility throws things off #1094 text-rendering: auto; // optimizelegibility throws things off #1094
-webkit-font-smoothing: antialiased; -webkit-font-smoothing: antialiased;
-moz-osx-font-smoothing: grayscale; -moz-osx-font-smoothing: grayscale;
} }
/*! /*!
* Font Awesome 4.2.0 by @davegandy - http://fontawesome.io - @fontawesome * Font Awesome 4.7.0 by @davegandy - http://fontawesome.io - @fontawesome
* License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License) * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License)
*/ */
...@@ -11,7 +11,8 @@ ...@@ -11,7 +11,8 @@
@import "fixed-width.less"; @import "fixed-width.less";
@import "list.less"; @import "list.less";
@import "bordered-pulled.less"; @import "bordered-pulled.less";
@import "spinning.less"; @import "animated.less";
@import "rotated-flipped.less"; @import "rotated-flipped.less";
@import "stacked.less"; @import "stacked.less";
@import "icons.less"; @import "icons.less";
@import "screen-reader.less";
...@@ -3,23 +3,58 @@ ...@@ -3,23 +3,58 @@
.fa-icon() { .fa-icon() {
display: inline-block; display: inline-block;
font: normal normal normal 14px/1 FontAwesome; // shortening font declaration font: normal normal normal @fa-font-size-base/@fa-line-height-base FontAwesome; // shortening font declaration
font-size: inherit; // can't have font-size inherit on line above, so need to override font-size: inherit; // can't have font-size inherit on line above, so need to override
text-rendering: auto; // optimizelegibility throws things off #1094 text-rendering: auto; // optimizelegibility throws things off #1094
-webkit-font-smoothing: antialiased; -webkit-font-smoothing: antialiased;
-moz-osx-font-smoothing: grayscale; -moz-osx-font-smoothing: grayscale;
} }
.fa-icon-rotate(@degrees, @rotation) { .fa-icon-rotate(@degrees, @rotation) {
filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=@rotation); -ms-filter: "progid:DXImageTransform.Microsoft.BasicImage(rotation=@{rotation})";
-webkit-transform: rotate(@degrees); -webkit-transform: rotate(@degrees);
-ms-transform: rotate(@degrees); -ms-transform: rotate(@degrees);
transform: rotate(@degrees); transform: rotate(@degrees);
} }
.fa-icon-flip(@horiz, @vert, @rotation) { .fa-icon-flip(@horiz, @vert, @rotation) {
filter: progid:DXImageTransform.Microsoft.BasicImage(rotation=@rotation, mirror=1); -ms-filter: "progid:DXImageTransform.Microsoft.BasicImage(rotation=@{rotation}, mirror=1)";
-webkit-transform: scale(@horiz, @vert); -webkit-transform: scale(@horiz, @vert);
-ms-transform: scale(@horiz, @vert); -ms-transform: scale(@horiz, @vert);
transform: scale(@horiz, @vert); transform: scale(@horiz, @vert);
} }
// Only display content to screen readers. A la Bootstrap 4.
//
// See: http://a11yproject.com/posts/how-to-hide-content/
.sr-only() {
position: absolute;
width: 1px;
height: 1px;
padding: 0;
margin: -1px;
overflow: hidden;
clip: rect(0,0,0,0);
border: 0;
}
// Use in conjunction with .sr-only to only display content when it's focused.
//
// Useful for "Skip to main content" links; see http://www.w3.org/TR/2013/NOTE-WCAG20-TECHS-20130905/G1
//
// Credit: HTML5 Boilerplate
.sr-only-focusable() {
&:active,
&:focus {
position: static;
width: auto;
height: auto;
margin: 0;
overflow: visible;
clip: auto;
}
}
...@@ -5,10 +5,11 @@ ...@@ -5,10 +5,11 @@
font-family: 'FontAwesome'; font-family: 'FontAwesome';
src: url('@{fa-font-path}/fontawesome-webfont.eot?v=@{fa-version}'); src: url('@{fa-font-path}/fontawesome-webfont.eot?v=@{fa-version}');
src: url('@{fa-font-path}/fontawesome-webfont.eot?#iefix&v=@{fa-version}') format('embedded-opentype'), src: url('@{fa-font-path}/fontawesome-webfont.eot?#iefix&v=@{fa-version}') format('embedded-opentype'),
url('@{fa-font-path}/fontawesome-webfont.woff2?v=@{fa-version}') format('woff2'),
url('@{fa-font-path}/fontawesome-webfont.woff?v=@{fa-version}') format('woff'), url('@{fa-font-path}/fontawesome-webfont.woff?v=@{fa-version}') format('woff'),
url('@{fa-font-path}/fontawesome-webfont.ttf?v=@{fa-version}') format('truetype'), url('@{fa-font-path}/fontawesome-webfont.ttf?v=@{fa-version}') format('truetype'),
url('@{fa-font-path}/fontawesome-webfont.svg?v=@{fa-version}#fontawesomeregular') format('svg'); url('@{fa-font-path}/fontawesome-webfont.svg?v=@{fa-version}#fontawesomeregular') format('svg');
// src: url('@{fa-font-path}/FontAwesome.otf') format('opentype'); // used when developing fonts // src: url('@{fa-font-path}/FontAwesome.otf') format('opentype'); // used when developing fonts
font-weight: normal; font-weight: normal;
font-style: normal; font-style: normal;
} }
// Screen Readers
// -------------------------
.sr-only { .sr-only(); }
.sr-only-focusable { .sr-only-focusable(); }
...@@ -6,6 +6,11 @@ ...@@ -6,6 +6,11 @@
animation: fa-spin 2s infinite linear; animation: fa-spin 2s infinite linear;
} }
.#{$fa-css-prefix}-pulse {
-webkit-animation: fa-spin 1s infinite steps(8);
animation: fa-spin 1s infinite steps(8);
}
@-webkit-keyframes fa-spin { @-webkit-keyframes fa-spin {
0% { 0% {
-webkit-transform: rotate(0deg); -webkit-transform: rotate(0deg);
......
...@@ -7,6 +7,15 @@ ...@@ -7,6 +7,15 @@
border-radius: .1em; border-radius: .1em;
} }
.#{$fa-css-prefix}-pull-left { float: left; }
.#{$fa-css-prefix}-pull-right { float: right; }
.#{$fa-css-prefix} {
&.#{$fa-css-prefix}-pull-left { margin-right: .3em; }
&.#{$fa-css-prefix}-pull-right { margin-left: .3em; }
}
/* Deprecated as of 4.4.0 */
.pull-right { float: right; } .pull-right { float: right; }
.pull-left { float: left; } .pull-left { float: left; }
......
...@@ -3,9 +3,10 @@ ...@@ -3,9 +3,10 @@
.#{$fa-css-prefix} { .#{$fa-css-prefix} {
display: inline-block; display: inline-block;
font: normal normal normal 14px/1 FontAwesome; // shortening font declaration font: normal normal normal #{$fa-font-size-base}/#{$fa-line-height-base} FontAwesome; // shortening font declaration
font-size: inherit; // can't have font-size inherit on line above, so need to override font-size: inherit; // can't have font-size inherit on line above, so need to override
text-rendering: auto; // optimizelegibility throws things off #1094 text-rendering: auto; // optimizelegibility throws things off #1094
-webkit-font-smoothing: antialiased; -webkit-font-smoothing: antialiased;
-moz-osx-font-smoothing: grayscale; -moz-osx-font-smoothing: grayscale;
} }
...@@ -5,10 +5,11 @@ ...@@ -5,10 +5,11 @@
font-family: 'FontAwesome'; font-family: 'FontAwesome';
src: url('#{$fa-font-path}/fontawesome-webfont.eot?v=#{$fa-version}'); src: url('#{$fa-font-path}/fontawesome-webfont.eot?v=#{$fa-version}');
src: url('#{$fa-font-path}/fontawesome-webfont.eot?#iefix&v=#{$fa-version}') format('embedded-opentype'), src: url('#{$fa-font-path}/fontawesome-webfont.eot?#iefix&v=#{$fa-version}') format('embedded-opentype'),
url('#{$fa-font-path}/fontawesome-webfont.woff2?v=#{$fa-version}') format('woff2'),
url('#{$fa-font-path}/fontawesome-webfont.woff?v=#{$fa-version}') format('woff'), url('#{$fa-font-path}/fontawesome-webfont.woff?v=#{$fa-version}') format('woff'),
url('#{$fa-font-path}/fontawesome-webfont.ttf?v=#{$fa-version}') format('truetype'), url('#{$fa-font-path}/fontawesome-webfont.ttf?v=#{$fa-version}') format('truetype'),
url('#{$fa-font-path}/fontawesome-webfont.svg?v=#{$fa-version}#fontawesomeregular') format('svg'); url('#{$fa-font-path}/fontawesome-webfont.svg?v=#{$fa-version}#fontawesomeregular') format('svg');
//src: url('#{$fa-font-path}/FontAwesome.otf') format('opentype'); // used when developing fonts // src: url('#{$fa-font-path}/FontAwesome.otf') format('opentype'); // used when developing fonts
font-weight: normal; font-weight: normal;
font-style: normal; font-style: normal;
} }
// Screen Readers
// -------------------------
.sr-only { @include sr-only(); }
.sr-only-focusable { @include sr-only-focusable(); }
/*! /*!
* Font Awesome 4.2.0 by @davegandy - http://fontawesome.io - @fontawesome * Font Awesome 4.7.0 by @davegandy - http://fontawesome.io - @fontawesome
* License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License) * License - http://fontawesome.io/license (Font: SIL OFL 1.1, CSS: MIT License)
*/ */
...@@ -11,7 +11,8 @@ ...@@ -11,7 +11,8 @@
@import "fixed-width"; @import "fixed-width";
@import "list"; @import "list";
@import "bordered-pulled"; @import "bordered-pulled";
@import "spinning"; @import "animated";
@import "rotated-flipped"; @import "rotated-flipped";
@import "stacked"; @import "stacked";
@import "icons"; @import "icons";
@import "screen-reader";
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment