prometheus: add package

Compile tested: x86/64, aarch64_cortex-a53
Run tested: x86/64

Signed-off-by: Paul Spooren <spooren@informatik.uni-leipzig.de>
Signed-off-by: Paul Spooren <mail@aparcar.org>
This commit is contained in:
Paul Spooren
2018-02-10 20:38:21 +01:00
committed by Paul Spooren
parent fbffa348ef
commit 5290727e64
5 changed files with 154 additions and 0 deletions
+27
View File
@@ -0,0 +1,27 @@
#!/bin/sh /etc/rc.common
START=70
USE_PROCD=1
PROG=/usr/bin/prometheus
CONFFILE=/etc/prometheus.yml
start_service() {
local config_file
local storage_tsdb_path
local web_listen_address
config_load "prometheus"
config_get config_file prometheus config_file "$CONFFILE"
config_get storage_tsdb_path prometheus storage_tsdb_path "/data"
config_get web_listen_address prometheus web_listen_address "127.0.0.1:9090"
procd_open_instance
procd_set_param command "$PROG"
procd_append_param command --config.file="$config_file"
procd_append_param command --storage.tsdb.path="$storage_tsdb_path"
procd_append_param command --web.listen-address="$web_listen_address"
procd_append_param user "prometheus"
procd_set_param file "$config_file"
procd_set_param respawn
procd_close_instance
}
+29
View File
@@ -0,0 +1,29 @@
# my global config
global:
scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
# scrape_timeout is set to the global default (10s).
# Alertmanager configuration
alerting:
alertmanagers:
- static_configs:
- targets:
# - alertmanager:9093
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
# - "first_rules.yml"
# - "second_rules.yml"
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
- job_name: 'prometheus'
# metrics_path defaults to '/metrics'
# scheme defaults to 'http'.
static_configs:
- targets: ['localhost:9090']
@@ -0,0 +1,13 @@
#!/bin/sh
[ -e /etc/config/prometheus ] || touch /etc/config/prometheus
uci -q get prometheus.prometheus || {
uci -q batch <<EOF
set prometheus.prometheus=prometheus
set prometheus.prometheus.config_file='/etc/prometheus.yml'
set prometheus.prometheus.storage_tsdb_path='/data'
set prometheus.prometheus.web_listen_address='127.0.0.1:9090'
commit prometheus
EOF
}
+29
View File
@@ -0,0 +1,29 @@
# my global config
global:
scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
# scrape_timeout is set to the global default (10s).
# Alertmanager configuration
alerting:
alertmanagers:
- static_configs:
- targets:
# - alertmanager:9093
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
# - "first_rules.yml"
# - "second_rules.yml"
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
- job_name: 'prometheus'
# metrics_path defaults to '/metrics'
# scheme defaults to 'http'.
static_configs:
- targets: ['localhost:9090']