Skip to content

prometheus 默认端口 9090

下载 prometheus-3.0.0.linux-amd64.tar.gz

tar -zxvf prometheus-3.0.0.linux-amd64.tar.gz

mv prometheus-3.0.0.linux-amd64 prometheus-3

vim /etc/systemd/system/prometheus.service

yml
[Unit]
Description=Prometheus
Wants=network-online.target
After=network-online.target

[Service]
User=ubuntu
Group=ubuntu
Type=simple
ExecStart=/home/ubuntu/jiankong/prometheus/prometheus-3/prometheus \
  --config.file /home/ubuntu/jiankong/prometheus/prometheus-3/prometheus.yml \
  --storage.tsdb.path /home/ubuntu/jiankong/prometheus/prometheus-3/data \
  --web.listen-address=0.0.0.0:9090

[Install]
WantedBy=multi-user.target

vim prometheus.yml

yml
# my global config
global:
  scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
  evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
  # scrape_timeout is set to the global default (10s).

# Alertmanager configuration
alerting:
  alertmanagers:
    - static_configs:
        - targets:
          # - alertmanager:9093

# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
  # - "first_rules.yml"
  # - "second_rules.yml"

# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
  # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
  - job_name: "prometheus"

    # metrics_path defaults to '/metrics'
    # scheme defaults to 'http'.

    static_configs:
      - targets: ["localhost:9090"]
    # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.  linux 服务器的监控
  - job_name: "node_ex"

    # metrics_path defaults to '/metrics'
    # scheme defaults to 'http'.

    static_configs:
      - targets: ["localhost:9100"]
  #  mysql  监控    
  - job_name: "mysql_ex"

    # metrics_path defaults to '/metrics'
    # scheme defaults to 'http'.
  
    static_configs:
      - targets: ["localhost:9104"]
sh
启动停止命令

sudo  systemctl status prometheus
sudo  systemctl start prometheus
sudo  systemctl restart prometheus
sudo  systemctl stop prometheus

node_exporter

下载 node_exporter-1.8.2.linux-amd64.tar.gz tar -zxvf node_exporter-1.8.2.linux-amd64.tar.gz

mv node_exporter-1.8.2.linux-amd64 node_exporter

vim /usr/lib/systemd/system/node_exporter.service

yml
[Unit]
Description=node_exporter
After=network.target 
User=ubuntu
Group=ubuntu
 
[Service]
ExecStart=/home/ubuntu/jiankong/prometheus/node_exporter/node_exporter\
          --web.listen-address=:9100\
          --collector.systemd\
          --collector.systemd.unit-whitelist=(sshd|nginx).service\
          --collector.processes\
          --collector.tcpstat
[Install]
WantedBy=multi-user.target
sh
## 监听端口 9100
##  启动 停止命令
sudo  systemctl status node_exporter
sudo  systemctl start node_exporter
sudo  systemctl restart node_exporter
sudo  systemctl stop node_exporter

mysqld_exporter

下载 mysqld_exporter-0.16.0.linux-amd64.tar.gz

vim /usr/lib/systemd/system/mysqld_exporter.service

yml
[Unit]
Description=mysqld_exporter
After=network.target 
#User=ubuntu
#Group=ubuntu
[Service]
ExecStart=/home/ubuntu/jiankong/prometheus/mysqld_exporter/mysqld_exporter \
--collect.info_schema.processlist \
--collect.info_schema.innodb_tablespaces \
--collect.info_schema.innodb_metrics \
--collect.perf_schema.tableiowaits \
--collect.perf_schema.indexiowaits \
--collect.perf_schema.tablelocks \
--collect.engine_innodb_status \
--collect.perf_schema.file_events \
--collect.binlog_size \
--collect.info_schema.clientstats \
--collect.perf_schema.eventswaits \
--config.my-cnf=/home/ubuntu/jiankong/prometheus/mysqld_exporter/my.cnf
[Install]
WantedBy=multi-user.target

vim my.cnf

yml
[client]
user=root 
password=123456
sh
默认端口 9104
sudo systemctl status mysqld_exporter
sudo systemctl restart mysqld_exporter
sudo systemctl start mysqld_exporter
sudo systemctl stop mysqld_exporter

grafana

下载 grafana-enterprise-11.3.1.linux-amd64.tar.gz

tar -zxvf grafana-enterprise-11.3.1.linux-amd64.tar.gz mv grafana-enterprise-11.3.1.linux-amd64 grafana

vim /etc/systemd/system/grafana.service

yml
[Unit]
Description=grafana
 
[Service]
ExecStart=/home/ubuntu/jiankong/prometheus/grafana/bin/grafana-server -homepath=/home/ubuntu/jiankong/prometheus/grafana
ExecReload=/bin/kill -HUP $MAINPID
KillMode=process
Restart=on-failure
[Install]
WantedBy=multi-user.target
sh
##  vim defaults.ini
##  处理跨域问题 
root_url = %(protocol)s://%(domain)s:%(http_port)s/grafana
allowed_origins = *
allow_embedding = true
sh
默认端口 5000
sudo systemctl status grafana
sudo systemctl restart grafana
sudo systemctl start grafana
sudo systemctl stop grafana

nginx 代理 解决跨域

sh
 location /grafana {
			root   html;
			index  index.html index.htm;
			add_header 'Access-Control-Allow-Origin' '*';
			add_header Access-Control-Allow-Methods GET,POST,OPTIONS,DELETE;
			add_header 'Access-Control-Allow-Headers' 'userId,DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type';
			proxy_pass http://127.0.0.1:5000;
			rewrite ^/grafana/(.*) /$1 break;
			proxy_set_header X-Real-IP $remote_addr;
			proxy_set_header   Host $host;
			# websocket处理
			proxy_http_version 1.1;
			proxy_set_header Upgrade $http_upgrade;
			proxy_set_header Connection "upgrade";
			# 处理grafana重定向问题, proxy_redict grafana默认路径 nginx代理路径
			# proxy_redirect http://localhost:3000 http://10.0.204.66:12300;
        }

grafana 模板

linux : 8919 1860 15172 10242

nginx exporter

https://blog.csdn.net/qq_18138507/article/details/142816442 tar -zxvf nginx-prometheus-exporter_1.3.0_linux_amd64.tar.gz -C jiankong/nginx_exporter vim /etc/systemd/system/nginx_exporter.service

yml
[Unit]
Description=nginx-prometheus-exporter
After=network.target
[Service]
Type=simple
User=root
Group=root
Restart=always
ExecStart=/root/jiankong/nginx_exporter/nginx-prometheus-exporter -nginx.scrape-uri=http://localhost/status -web.listen-address=:9113
  
[Install]
WantedBy=multi-user.target
sh

端口 9113
sudo systemctl status nginx_exporter
sudo systemctl restart nginx_exporter
sudo systemctl start nginx_exporter
sudo systemctl stop nginx_exporter

模板 
12708  2949

goaccess 监控 nginx

https://www.goaccess.cc/?mod=man

sh
yum install goaccess 
##  输出  监控报告 
goaccess -f access.log -o report.html --log-format=COMBINED
## 中文 
LANG="zh_CN.UTF-8" goaccess -f access.log --log-format=COMBINED  -o report.html

redis exporter

tar -zxvf redis_exporter-v1.66.0.linux-amd64.tar.gz ./jiankong/redis_exporter vim /etc/systemd/system/redis_exporter.service

yml
 #添加如下内容:
 -redis.password 123456
[Unit]
Description=redis_exporter
After=network.target
 
[Service]
Type=simple
User=root
ExecStart=/root/jiankong/redis_exporter/redis_exporter -redis.addr 127.0.0.1:6379  --web.listen-address=:9104
Restart=on-failure
 
[Install]
WantedBy=multi-user.target
sh
# systemctl daemon-reload
# systemctl start redis_exporter
# systemctl status redis_exporter
# systemctl enable redis_exporter
# netstat -nltp|grep 9121

常用的模板
11835
14091
763

监控指标

sh
 ##监控redis  
 redis-cli INFO memory
yaml
# 已使用内存
used_memory:1691408
used_memory_human:1.61M
# Redis 进程占用的物理内存大小(以字节为单位
used_memory_rss:3489792
used_memory_rss_human:3.33M
#内存使用的峰值  历史记录中的
used_memory_peak:1725512
used_memory_peak_human:1.65M
#表示当前内存使用量与内存使用峰值的百分比关系。它的计算公式为:(used_memory / used_memory_peak) * 100
# 当前内存使用相对于历史最高内存使用峰值的程度。
used_memory_peak_perc:98.02%
## 除了实际存储数据(如键值对中的值)之外的内存开销
## 管理数据结构的元数据、索引结构、内部模块的辅助数据以及一些和客户端连接相关的内存占用等
used_memory_overhead:1272816
used_memory_startup:887280
used_memory_dataset:418592
#衡量数据存储部分占总内存使用比例的指标
#数据在 Redis 内存使用中的占比情况
# (used_memory - used_memory_overhead) / used_memory * 100
#如果used_memory_dataset_perc较低,说明内存开销部分占比较大,此时可以重点关注如何减少used_memory_overhead部分
used_memory_dataset_perc:52.06%
allocator_allocated:1641000
allocator_active:3459072
allocator_resident:3459072
##  系统硬件内存大小  
total_system_memory:1907814400
total_system_memory_human:1.78G
# Lua 脚本引擎在 Redis 内部占用的内存大小
used_memory_lua:30720
used_memory_lua_human:30.00K
used_memory_scripts:0
used_memory_scripts_human:0B
number_of_cached_scripts:0
## 配置的redis 最大内存占用
maxmemory:500000000
maxmemory_human:476.84M
##  redis 淘汰策略
maxmemory_policy:noeviction
allocator_frag_ratio:2.11
allocator_frag_bytes:1818072
allocator_rss_ratio:1.00
allocator_rss_bytes:0
rss_overhead_ratio:1.01
rss_overhead_bytes:30720
mem_fragmentation_ratio:2.13
mem_fragmentation_bytes:1848792
#界定了不可淘汰的内存边界。
mem_not_counted_for_evict:0
mem_replication_backlog:0
#存储与从节点(slaves)相关的客户端连接信息所占用的内存大小
mem_clients_slaves:0
mem_clients_normal:0
#存储 AOF(Append - Only File)缓冲区内存大小
mem_aof_buffer:0
mem_allocator:libc
active_defrag_running:0
lazyfree_pending_objects:0
lazyfreed_objects:0

https://blog.csdn.net/u012811805/article/details/143502018