Commit ed829025 authored by Rafael Monnerat's avatar Rafael Monnerat

The functionalitly was moved into slapos core

parent 718c323d
......@@ -78,9 +78,7 @@ setup(name=name,
'is-process-older-than-dependency-set = slapos.promise.is_process_older_than_dependency_set:main',
'killpidfromfile = slapos.systool:killpidfromfile', # BBB
'monitor.bootstrap = slapos.monitor.monitor:main',
'monitor.collect = slapos.monitor.collect:main',
'monitor.statistic = slapos.monitor.build_statistic:main',
'monitor.runpromise = slapos.monitor.runpromise:main',
'monitor.genstatus = slapos.monitor.globalstate:main',
'monitor.configwrite = slapos.monitor.monitor_config_write:main',
'runResiliencyUnitTestTestNode = slapos.resiliencytest:runUnitTest',
......
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2010-2014 Vifib SARL and Contributors.
# All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from __future__ import division
import sqlite3
import os
import pwd
import time
import json
import argparse
import psutil
from time import strftime
from datetime import datetime, timedelta
from slapos.collect.db import Database
from slapos.collect.reporter import ConsumptionReportBase
def parseArguments():
"""
Parse arguments for monitor collector instance.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--output_folder',
help='Path of the folder where output files should be written.')
parser.add_argument('--pid_file',
help='Path where should be written the pid of process.')
parser.add_argument('--partition_id',
help='ID of the computer partition to collect data from.')
parser.add_argument('--collector_db',
help='The path of slapos collect database.')
return parser.parse_args()
class ResourceCollect(ConsumptionReportBase):
def __init__(self, db_path = None):
# If the database is locked, wait until 15 seconds
# Do not try to created or update tables, access will be refused
self.db = Database(db_path, create=False, timeout=15)
def main():
parser = parseArguments()
if not os.path.exists(parser.output_folder) and os.path.isdir(parser.output_folder):
raise Exception("Invalid ouput folder: %s" % parser.output_folder)
if parser.pid_file:
# Check that this process is not running
if os.path.exists(parser.pid_file):
with open(parser.pid_file, "r") as pidfile:
try:
pid = int(pidfile.read(6))
except ValueError:
pid = None
if pid and os.path.exists("/proc/" + str(pid)):
print("A process is already running with pid " + str(pid))
exit(1)
with open(parser.pid_file, "w") as pidfile:
pidfile.write('%s' % os.getpid())
# Consumption global status
process_file = os.path.join(parser.output_folder, 'monitor_resource_process.data.json')
mem_file = os.path.join(parser.output_folder, 'monitor_resource_memory.data.json')
io_file = os.path.join(parser.output_folder, 'monitor_resource_io.data.json')
resource_file = os.path.join(parser.output_folder, 'monitor_process_resource.status.json')
status_file = os.path.join(parser.output_folder, 'monitor_resource.status.json')
if not os.path.exists(parser.collector_db):
print("Collector database not found...")
initDataFile(process_file, ["date, total process, CPU percent, CPU time, CPU threads"])
initDataFile(mem_file, ["date, memory used percent, memory used"])
initDataFile(io_file, ["date, io rw counter, io cycles counter, disk used"])
with open(status_file, "w") as status_file:
status_file.write(json.dumps({
"cpu_time": 0,
"cpu_percent": 0,
"memory_rss": 0,
"memory_percent": 0,
"io_rw_counter": 0,
"date": "",
"total_process": 0,
"disk_used": 0,
"io_cycles_counter": 0,
"cpu_num_threads": 0
}))
with open(resource_file, "w") as resource_file:
resource_file.write('[]')
exit(1)
collector = ResourceCollect(parser.collector_db)
date_scope = datetime.now().strftime('%Y-%m-%d')
stat_info = os.stat(parser.output_folder)
partition_user = pwd.getpwuid(stat_info.st_uid)[0]
process_result, memory_result, io_result = collector.getPartitionConsumptionStatus(partition_user)
label_list = ['date', 'total_process', 'cpu_percent', 'cpu_time', 'cpu_num_threads',
'memory_percent', 'memory_rss', 'io_rw_counter', 'io_cycles_counter',
'disk_used']
resource_status_dict = {}
if process_result and process_result['total_process'] != 0.0:
appendToJsonFile(process_file, ", ".join(
str(process_result[key]) for key in label_list if key in process_result)
)
resource_status_dict.update(process_result)
if memory_result and memory_result['memory_rss'] != 0.0:
appendToJsonFile(mem_file, ", ".join(
str(memory_result[key]) for key in label_list if key in memory_result)
)
resource_status_dict.update(memory_result)
if io_result and io_result['io_rw_counter'] != 0.0:
appendToJsonFile(io_file, ", ".join(
str(io_result[key]) for key in label_list if key in io_result)
)
resource_status_dict.update(io_result)
with open(status_file, 'w') as fp:
fp.write(json.dumps(resource_status_dict))
# Consumption Resource
resource_process_status_list = collector.getPartitionConsumption(partition_user)
if resource_process_status_list:
with open(resource_file, 'w') as rf:
rf.write(json.dumps(resource_process_status_list))
if os.path.exists(parser.pid_file):
os.unlink(parser.pid_file)
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2010-2016 Vifib SARL and Contributors.
# All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public License
# as published by the Free Software Foundation; either version 2.1
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import os
import argparse
import csv
from slapos.util import mkdir_p
from slapos.collect.db import Database
def skip_bootstrap(self):
return
Database._bootstrap = skip_bootstrap
def parseArguments():
"""
Parse arguments for monitor collector instance.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--output_folder',
help='Path of the folder where output files should be written.')
parser.add_argument('--collector_db',
default='/srv/slapgrid/var/data-log/',
help='The path of slapos collect database is located.')
return parser.parse_args()
lapos/monitor/collect_csv_dump.py
def writeFile(name, folder, date_scope, rows):
if os.path.exists(
os.path.join(folder, "%s/dump_%s.csv" % (date_scope, name))):
# File already exists, no reason to recreate it.
return
mkdir_p(os.path.join(folder, date_scope), 0o755)
file_io = open(os.path.join(folder, "%s/dump_%s.csv" % (date_scope, name)), "w")
csv_output = csv.writer(file_io)
csv_output.writerows(rows)
file_io.close()
def dump_table_into_csv(db, folder):
db.connect()
table_list = db.getTableList()
# Save all dates first, as db.selector may switch the cursor
date_list = [(date_scope, _) \
for date_scope, _ in db.getDateScopeList(reported=1)]
for date_scope, amount in date_list:
for table in table_list:
if os.path.exists(
os.path.join(folder, "%s/dump_%s.csv" % (date_scope, table))):
# File already exists, no reason to recreate it.
continue
writeFile(table, folder, date_scope,
db.select(table, date_scope))
db.close()
if __name__ == "__main__":
parser = parseArguments()
if parser.output_folder is None:
raise Exception("Invalid ouput folder: %s" % parser.output_folder)
if parser.collector_db is None:
raise Exception("Invalid collector database folder: %s" % parser.collector_db)
if not os.path.exists(parser.output_folder) and \
os.path.isdir(parser.output_folder):
raise Exception("Invalid ouput folder: %s" % parser.output_folder)
if not os.path.exists(parser.collector_db):
print "Collector database not found..."
dump_table_into_csv(Database(parser.collector_db), parser.output_folder)
##############################################################################
#
# Copyright (c) 2017 Vifib SARL and Contributors. All Rights Reserved.
#
# WARNING: This program as such is intended to be used by professional
# programmers who take the whole responsibility of assessing all potential
# consequences resulting from its eventual inadequacies and bugs
# End users who are looking for a ready-to-use solution with commercial
# guarantees and support are strongly adviced to contract a Free Software
# Service Company
#
# This program is Free Software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
import unittest
import os
import sqlite3
import time
from ..promise import data
from slapos.monitor.collect import ResourceCollect
class TestMonitorCollect(unittest.TestCase):
base_path, = data.__path__
def setUp(self):
self.status = "ok"
# populate db
self.conn = sqlite3.connect('/tmp/collector.db')
f = open(self.base_path+"/monitor_collect.sql")
sql = f.read()
self.conn.executescript(sql)
self.conn.close()
# inititalise
self.collector = ResourceCollect('/tmp/')
def test_getPartitionUsedMemoryAverage(self):
self.assertEqual(1195.492578125,
self.collector.getPartitionUsedMemoryAverage('slapuser15', '2017-09-16'))
def test_getPartitionCPULoadAverage(self):
self.assertEqual(2.1599999999999993,
self.collector.getPartitionCPULoadAverage('slapuser15', '2017-09-16'))
def test_getPartitionDiskUsedAverage(self):
self.assertEqual(35.5234375,
self.collector.getPartitionDiskUsedAverage('slapuser15', '2017-04-18'))
def test_getPartitionConsumption(self):
data = self.collector.getPartitionConsumption('slapuser15', date_scope='2017-09-16',
min_time='00:01:00', max_time='00:13:00')
self.assertEqual(1302.66, data[0]['cpu_time'])
self.assertEqual(26825304064.0, data[0]['io_rw_counter'])
def test_getPartitionComsumptionStatus(self):
data = self.collector.getPartitionComsumptionStatus('slapuser15', date_scope='2017-09-16',
min_time='00:01:00', max_time='00:13:00')
self.assertEqual(7.3, data[0]['cpu_percent'])
self.assertEqual(2822535483392.0, data[2]['io_rw_counter'])
def tearDown(self):
os.remove("/tmp/collector.db")
if __name__ == '__main__':
unittest.main()
......@@ -8,7 +8,7 @@ import json
import pkg_resources
from slapos.monitor import globalstate
from slapos.monitor.runpromise import MonitorPromiseLauncher, getArgumentParser
#from slapos.monitor.runpromise import MonitorPromiseLauncher, getArgumentParser
from slapos.monitor.monitor import Monitoring
from jsonschema import validate
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment