继续浏览精彩内容
慕课网APP
程序员的梦工厂
打开
继续
感谢您的支持,我会继续努力的
赞赏金额会直接到老师账户
将二维码发送给自己后长按识别
微信支付
支付宝支付

docker高级应用之资源监控

PIPIONE
关注TA
已关注
手记 921
粉丝 147
获赞 701


最近忙着开发docker平台,所以挺久没有更新博客了,今天给大家分享一下,我开发docker平台里如何监控资源与进行图像展示的。

默认docker 1.5版本有stats命令查看容器的cpu使用率、内存使用量与网络流量,但此功能有2个必须:

1、必须是docker 1.5版本

2、必须使用默认docker0的网桥(如果你使用ovs这样非原生的网桥无法获取数据的)

我开发的监控里docker是1.5版本,并且通过使用ifconfig来获取容器rx或rx量来获取容器流量,解决了必须使用docker默认网桥才可以获取流量数据。

下面是容器资源监控效果图

1、平台里资源监控界面

wKiom1U0bCPzgt01AAFNUwAKwiw657.jpg

2、查看容器yangjing-test的cpu使用率资源监控

wKiom1U0bGjjlqoEAAGYHE6vta0864.jpg

3、查看内存使用量资源监控

wKioL1U0bfyz4qP5AAGV0vt88-k050.jpg

4、查看容器网络流量信息

wKiom1U0bNuj1dw4AAHrnfjn2rA970.jpg

下面是监控数据收集脚本信息

使用python写的,由于需要往mysql里写入数据,所以需要安装MySQLdb模块以及服务端mysql开启账号

[root@ip-10-10-29-201 code]# cat collect_docker_monitor_data_multi.py

#!/usr/bin/env python

#-*- coding: utf-8 -*-

#author:Deng Lei

#email: dl528888@gmail.com

from docker import Client

import os

import socket, struct, fcntl

import etcd

import MySQLdb

import re

import multiprocessing

import subprocess

import time

def get_local_ip(iface = 'em1'):

    sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)

    sockfd = sock.fileno()

    SIOCGIFADDR = 0x8915

    ifreq = struct.pack('16sH14s', iface, socket.AF_INET, '\x00'*14)

    try:

        res = fcntl.ioctl(sockfd, SIOCGIFADDR, ifreq)

    except:

        return None

    ip = struct.unpack('16sH2x4s8x', res)[2]

    return socket.inet_ntoa(ip)

def docker_container_all():

    docker_container=docker_client.containers(all=True)

    container_name=[]

    container_stop_name=[]

    for i in docker_container:

        container_name.append(i['Names'])

    for b in container_name:

        for c in b:

            container_stop_name.append(c[1::])

    return container_stop_name

def docker_container_run():

    docker_container=docker_client.containers(all=True)

    container_name=[]

    container_stop_name=[]

    for i in docker_container:

        if re.match('Up',i['Status']):

            container_name.append(i['Names'])

    for b in container_name:

        for c in b:

            container_stop_name.append(c[1::])

    return container_stop_name

def check_container_stats(name):

    container_collect=docker_client.stats(name)

    old_result=eval(container_collect.next())

    new_result=eval(container_collect.next())

    container_collect.close()

    cpu_total_usage=new_result['cpu_stats']['cpu_usage']['total_usage'] - old_result['cpu_stats']['cpu_usage']['total_usage']

    cpu_system_uasge=new_result['cpu_stats']['system_cpu_usage'] - old_result['cpu_stats']['system_cpu_usage']

    cpu_num=len(old_result['cpu_stats']['cpu_usage']['percpu_usage'])

    cpu_percent=round((float(cpu_total_usage)/float(cpu_system_uasge))*cpu_num*100.0,2)

    mem_usage=new_result['memory_stats']['usage']

    mem_limit=new_result['memory_stats']['limit']

    mem_percent=round(float(mem_usage)/float(mem_limit)*100.0,2)

    #network_rx_packets=new_result['network']['rx_packets']

    #network_tx_packets=new_result['network']['tx_packets']

    network_check_command="""docker exec %s ifconfig eth1|grep bytes|awk -F ':' '{print $2,$3}'|awk -F '(' '{print $1,$2}'|awk -F ')' '{print $1}'|awk '{print "{\\"rx\\":"$1",\\"tx\\":"$2"}"}'"""%name

    network_old_result=eval(((subprocess.Popen(network_check_command,shell=True,stdout=subprocess.PIPE)).stdout.readlines()[0]).strip('\n'))

    time.sleep(1)

    network_new_result=eval(((subprocess.Popen(network_check_command,shell=True,stdout=subprocess.PIPE)).stdout.readlines()[0]).strip('\n'))

    #unit KB

    network_rx_packets=(int(network_new_result['rx']) - int(network_old_result['rx']))/1024

    network_tx_packets=(int(network_new_result['tx']) - int(network_old_result['tx']))/1024

    collect_time=str(new_result['read'].split('.')[0].split('T')[0])+' '+str(new_result['read'].split('.')[0].split('T')[1])

    msg={'Container_name':name,'Cpu_percent':cpu_percent,'Memory_usage':mem_usage,'Memory_limit':mem_limit,'Memory_percent':mem_percent,'Network_rx_packets':network_rx_packets,'Network_tx_packets':network_tx_packets,'Collect_time':collect_time}

    #write_mysql(msg)

    return msg

def write_mysql(msg):

    container_name=msg['Container_name']

    search_sql="select dc.id from docker_containers dc,docker_physics dp where dc.container_name='%s' and dp.physics_internal_ip='%s';"%(container_name,local_ip)

    n=mysql_cur.execute(search_sql)

    container_id=[int(i[0]) for i in mysql_cur.fetchall()][0]

    insert_sql="insert into docker_monitor(container_id,cpu_percent,memory_usage,memory_limit,memory_percent,network_rx_packets,network_tx_packets,collect_time) values('%s','%s','%s','%s','%s','%s','%s','%s');"%(container_id,msg['Cpu_percent'],msg['Memory_usage'],msg['Memory_limit'],msg['Memory_percent'],msg['Network_rx_packets'],msg['Network_tx_packets'],msg['Collect_time'])

    n=mysql_cur.execute(insert_sql)

if __name__ == "__main__":

    local_ip=get_local_ip('ovs1')

    if local_ip is None:

  local_ip=get_local_ip('em1')

    etcd_client=etcd.Client(host='127.0.0.1', port=4001)

    docker_client = Client(base_url='unix://var/run/docker.sock', version='1.17')

    mysql_conn=MySQLdb.connect(host='10.10.27.10',user='ops',passwd='1FE@!#@NVE',port=3306,charset="utf8")

    mysql_cur=mysql_conn.cursor()

    mysql_conn.select_db('devops')

    #docker_container_all_name=docker_container_all()

    docker_container_run_name=docker_container_run()

    if len(docker_container_run_name) == 1:

  num=1

    elif len(docker_container_run_name) >= 4 and len(docker_container_run_name) <=8:

  num=4

    elif len(docker_container_run_name) >8 and len(docker_container_run_name) <=15:

  num=8

    elif len(docker_container_run_name) >15 and len(docker_container_run_name) <=30:

  num=20

    else:

  num=40

    pool = multiprocessing.Pool(processes=num)

    scan_result=[]

    #collect container monitor data

    for i in docker_container_run_name:

        pool.apply_async(check_container_stats, (i,))

        scan_result.append(pool.apply_async(check_container_stats, (i,)))

    pool.close()

    pool.join()

    result=[]

    for res in scan_result:

        if res.get() is not None:

      write_mysql(res.get())

  else:

      print 'fail is %s'%res.get()

    mysql_conn.commit()

    mysql_cur.close()

    mysql_conn.close()

下面是把此脚本放入crontab里每分钟收集一下

*/1 * * * * python /root/collect_docker_monitor_data_multi.py >>/root/docker_log/docker_monitor.log 2>&1

另外说明一下,上面的监控数据图形化使用highstock,使用ajax进行动态加载数据,每次获取容器所有时间监控数据。有问题请留言。

©著作权归作者所有:来自51CTO博客作者dl528888的原创作品,如需转载,请注明出处,否则将追究法律责任

dockerdocker监控平台监控运维自动化专题


打开App,阅读手记
0人推荐
发表评论
随时随地看视频慕课网APP