Commit 1c8269b2 authored by Bryton Lacquement's avatar Bryton Lacquement Committed by Julien Muchembled

Add support for Python 3

/reviewed-on !51
parent eba2c149
......@@ -25,7 +25,7 @@
#
##############################################################################
import ConfigParser
from six.moves import configparser
import argparse
import collections
import json
......@@ -57,7 +57,7 @@ class AutoSTemp(object):
def __init__(self, value):
fd, self.__name = tempfile.mkstemp()
os.write(fd, value)
os.write(fd, value.encode('utf-8'))
os.close(fd)
@property
......@@ -67,7 +67,7 @@ class AutoSTemp(object):
def __del__(self):
self.__unlink(self.__name)
from tester import SoftwareReleaseTester
from .tester import SoftwareReleaseTester
class TestMap(object):
# tell pytest to skip this class (even if name starts with Test)
......@@ -94,7 +94,7 @@ class TestMap(object):
return set(exclude_list + list(self.ran_test_set))
def getGroupList(self):
return self.test_map_dict.keys()
return list(self.test_map_dict)
def dropGroup(self, group):
del self.test_map_dict[group]
......@@ -217,7 +217,7 @@ def main():
logger, log_file = getLogger(log, args.verbose)
configuration = ConfigParser.SafeConfigParser()
configuration = configparser.SafeConfigParser()
configuration.readfp(args.configuration_file)
pidfile = args.pidfile
......
from __future__ import print_function
import datetime
import json
import sys
......@@ -50,16 +52,16 @@ def retryOnNetworkFailure(func):
while True:
try:
return func(*args, **kwargs)
except SAFE_RPC_EXCEPTION_LIST, e:
print 'Network failure: %s , %s' % (sys.exc_info(), e)
except HTTPError, e:
print 'Network failure: %s , %s' % (sys.exc_info(), e)
except ConnectionError, e:
print 'Network failure: %s , %s' % (sys.exc_info(), e)
except slapos.slap.ConnectionError, e:
print 'Network failure: %s , %s' % (sys.exc_info(), e)
print 'Retry method %s in %i seconds' % (func, retry_time)
except SAFE_RPC_EXCEPTION_LIST as e:
print('Network failure: %s , %s' % (sys.exc_info(), e))
except HTTPError as e:
print('Network failure: %s , %s' % (sys.exc_info(), e))
except ConnectionError as e:
print('Network failure: %s , %s' % (sys.exc_info(), e))
except slapos.slap.ConnectionError as e:
print('Network failure: %s , %s' % (sys.exc_info(), e))
print('Retry method %s in %i seconds' % (func, retry_time))
time.sleep(retry_time)
retry_time = min(retry_time*1.5, 640)
......@@ -218,7 +220,7 @@ class SlapOSMasterCommunicator(object):
result = self.hateoas_navigator.GET(url)
result = json.loads(result)
if result['_links'].get('action_object_slap', None) is None:
print result['links']
print(result['links'])
return None
object_link = self.hateoas_navigator.hateoasGetLinkFromLinks(
......
......@@ -27,6 +27,8 @@
#
##############################################################################
from __future__ import print_function
import os, errno
import subprocess
import argparse
......@@ -52,7 +54,7 @@ def build_command(apachedex_executable, output_file,
# Automaticaly replace variable 'date'.
apache_log = logfile.strip() % {'date': today}
if not os.path.exists(apache_log):
print "WARNING: File %s not found..." % apache_log
print("WARNING: File %s not found..." % apache_log)
continue
log_list.append(apache_log)
if not log_list:
......@@ -81,7 +83,7 @@ def main():
base_url = args.base_url.strip()
if not os.path.exists(output_folder) or not os.path.isdir(output_folder):
print "ERROR: Output folder is not a directory. Exiting..."
print("ERROR: Output folder is not a directory. Exiting...")
return 1
today = date.today().strftime("%Y-%m-%d")
......@@ -93,21 +95,23 @@ def main():
args.apache_log_list,
config)
except ValueError as e:
print e
print(e)
return 1
process_handler = subprocess.Popen(argument_list,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
stdout, stderr = process_handler.communicate()
if process_handler.returncode != 0:
if stderr:
print stderr
print(stderr)
return 1
with open(output_file, 'r') as f:
print base_url + '/ApacheDex-%s.html' % today
# Check that output_file is a readable file.
with open(output_file, 'r'):
print(base_url + '/ApacheDex-%s.html' % today)
return 0
if __name__ == "__main__":
......
......@@ -27,6 +27,8 @@
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from __future__ import print_function
import os
import shutil
import sys
......@@ -35,6 +37,7 @@ import shlex
from subprocess import Popen, PIPE, STDOUT
import logging
from datetime import datetime
import six
_MARKER = []
WGET = 'wget'
......@@ -428,7 +431,7 @@ class HTTPCacheCheckerTestSuite(object):
if self.report_dict:
report_message_list = ['*Errors*:']
for url, message_list in self.report_dict.iteritems():
for url, message_list in six.iteritems(self.report_dict):
unique_message_list = []
for message in message_list:
if message not in unique_message_list:
......@@ -447,7 +450,7 @@ class HTTPCacheCheckerTestSuite(object):
from optparse import OptionParser
import ConfigParser
from six.moves.configparser import RawConfigParser
def _formatConfiguration(configuration):
""" format the configuration"""
......@@ -463,11 +466,11 @@ def web_checker_utility():
(options, args) = parser.parse_args()
if len(args) != 1 :
print parser.print_help()
print(parser.print_help())
parser.error('incorrect number of arguments')
config_path = args[0]
config = ConfigParser.RawConfigParser()
config = RawConfigParser()
config.read(config_path)
working_directory = config.get('web_checker', 'working_directory')
......@@ -525,7 +528,7 @@ def web_checker_utility():
file_object.write(result)
file_object.close()
else:
print result
print(result)
if __name__ == '__main__':
sys.exit(web_checker_utility())
# -*- coding: utf-8 -*-
import ConfigParser
from six.moves import configparser
import argparse
import gdbm
from six.moves import dbm_gnu as gdbm
import sys
import os
......@@ -41,7 +41,7 @@ def main():
run(args)
def run(args):
slapos_conf = ConfigParser.ConfigParser()
slapos_conf = configparser.ConfigParser()
slapos_conf.read(args.configuration_file)
current_binary = os.path.join(os.getcwd(), sys.argv[0])
......@@ -52,7 +52,7 @@ def run(args):
partition_base_name = slapos_conf.get('slapformat', 'partition_base_name')
try:
bridge_name = slapos_conf.get('slapformat', 'interface_name')
except ConfigParser.NoOptionError:
except configparser.NoOptionError:
bridge_name = slapos_conf.get('slapformat', 'bridge_name')
instance_root = slapos_conf.get('slapos', 'instance_root')
partition_base_path = os.path.join(instance_root, partition_base_name)
......@@ -61,7 +61,7 @@ def run(args):
logging.basicConfig(level=logging.getLevelName(args.log[0]))
database = gdbm.open(args.database, 'c', 0600)
database = gdbm.open(args.database, 'c', 0o600)
try:
process.main(sr_directory, partition_list, database, bridge_name)
finally:
......
......@@ -28,7 +28,7 @@
import argparse
import errno
import gdbm
from six.moves import dbm_gnu as gdbm
import json
from lockfile import LockFile
import logging
......@@ -38,8 +38,8 @@ import signal
import socket
import subprocess
import sys
import SocketServer
import StringIO
from six.moves import socketserver
import io
import threading
# Copied from erp5.util:erp5/util/testnode/ProcessManager.py
......@@ -75,15 +75,15 @@ def subprocess_capture(p, log, log_prefix, get_output=True):
return (p.stdout and ''.join(stdout),
p.stderr and ''.join(stderr))
class EqueueServer(SocketServer.ThreadingUnixStreamServer):
class EqueueServer(socketserver.ThreadingUnixStreamServer):
daemon_threads = True
def __init__(self, *args, **kw):
self.options = kw.pop('equeue_options')
SocketServer.ThreadingUnixStreamServer.__init__(self,
RequestHandlerClass=None,
*args, **kw)
super(EqueueServer, self).__init__(self,
RequestHandlerClass=None,
*args, **kw)
# Equeue Specific elements
self.setLogger(self.options.logfile[0], self.options.loglevel[0])
self.setDB(self.options.database[0])
......@@ -106,7 +106,7 @@ class EqueueServer(SocketServer.ThreadingUnixStreamServer):
self.logger.addHandler(handler)
def setDB(self, database):
self.db = gdbm.open(database, 'cs', 0700)
self.db = gdbm.open(database, 'cs', 0o700)
def _hasTakeoverBeenTriggered(self):
if hasattr(self, 'takeover_triggered_file_path') and \
......@@ -149,7 +149,7 @@ class EqueueServer(SocketServer.ThreadingUnixStreamServer):
# Handle request
self.logger.debug("Connection with file descriptor %d", request.fileno())
request.settimeout(self.options.timeout)
request_string = StringIO.StringIO()
request_string = io.StringIO()
segment = None
try:
while segment != '':
......@@ -181,7 +181,7 @@ class EqueueServer(SocketServer.ThreadingUnixStreamServer):
def remove_existing_file(path):
try:
os.remove(path)
except OSError, e:
except OSError as e:
if e.errno != errno.ENOENT:
raise
......
......@@ -76,7 +76,7 @@ def generateFeed(option):
# Reduces feed if number of items exceeds max_item
if len(item_dict) > option.max_item:
outdated_key_list = sorted_item_dict.keys()[:-option.max_item]
outdated_key_list = list(sorted_item_dict)[:-option.max_item]
for outdated_key in outdated_key_list:
del sorted_item_dict[outdated_key]
deleteFileList(outdated_key_list)
......
......@@ -12,6 +12,7 @@
#
##############################################################################
from __future__ import print_function
import os
import time
......@@ -76,7 +77,7 @@ def run():
result = parser.parse_args()
arguments = dict(result._get_kwargs())
if arguments['token'] == None and arguments['file_token'] == None:
print "lampconfigure: Error: Please specify where condition will be taken, use -d or -f option"
print("lampconfigure: Error: Please specify where condition will be taken, use -d or -f option")
return
setup(arguments)
......@@ -84,7 +85,7 @@ def setup(arguments):
timeout = 5;
while True:
if not checkAction(arguments):
print "Waiting for 3s and retrying"
print("Waiting for 3s and retrying")
time.sleep(3)
continue
time.sleep(timeout)
......@@ -115,9 +116,9 @@ def checkAction(arguments):
user = arguments['mysql_user'],
passwd = arguments['mysql_password'],
db = arguments['token'])
except Exception, ex:
except Exception as e:
#Mysql is not ready yet?...
print ex.message
print(e)
return False
if arguments['table'] == "**":
#only detect if mysql has been started
......@@ -145,7 +146,7 @@ def rename(arguments):
source = os.path.join(arguments['target_directory'], arguments['source'])
destination = os.path.join(arguments['target_directory'], arguments['destination'])
if not os.path.exists(source):
print "Error when moving: '%s': no such file or directory" % source
print("Error when moving: '%s': no such file or directory" % source)
return
os.rename(source, destination)
if arguments['mode'] != None:
......@@ -155,7 +156,7 @@ def delete(arguments):
for path in arguments['delete_target']:
path = os.path.join(arguments['target_directory'], path)
if not os.path.exists(path):
print "Error when deleting: '%s': no such file or directory" % path
print("Error when deleting: '%s': no such file or directory" % path)
continue
if os.path.isdir(path):
shutil.rmtree(path)
......@@ -164,7 +165,7 @@ def delete(arguments):
def run_script(arguments):
script = os.path.join(arguments['target_directory'], arguments['script'])
print 'Running script: %s' % script
print('Running script: %s' % script)
if os.path.exists(script):
import subprocess
#run python script with predefined data
......@@ -176,12 +177,12 @@ def run_script(arguments):
result = subprocess.Popen(data, env={'PYTHONPATH': ':'.join(sys.path)})
result.wait()
else:
print "Error: can not read file '%s'" % script
print("Error: can not read file '%s'" % script)
def run_sql_script(arguments):
script = os.path.join(arguments['target_directory'], arguments['sql_script'])
print 'Running SQL script: %s' % script
print('Running SQL script: %s' % script)
if os.path.exists(script):
conn = MySQLdb.connect(host=arguments['mysql_host'],
port=int(arguments['mysql_port']),
......@@ -196,7 +197,7 @@ def run_sql_script(arguments):
conn.close()
else:
print "Error: can not read file '%s'" % script
print("Error: can not read file '%s'" % script)
......@@ -204,6 +205,6 @@ def chmod(arguments):
for path in arguments['chmod_target']:
path = os.path.join(arguments['target_directory'], path)
if not os.path.exists(path):
print "Error when changing mode: '%s': no such file or directory" % path
print("Error when changing mode: '%s': no such file or directory" % path)
continue
os.chmod(path, int(arguments['mode'], 8))
......@@ -27,6 +27,8 @@
#
##############################################################################
from __future__ import division
import sqlite3
import os
import pwd
......@@ -80,19 +82,17 @@ class ResourceCollect:
table="sqlite_master",
columns='name',
where="type='table' AND name='%s'" % name)
table_exists_result = zip(*check_result_cursor)
if not len(table_exists_result) or table_exists_result[0][0] is None:
return False
return True
r = check_result_cursor.fetchone()
return r and r[0] is not None
def getPartitionCPULoadAverage(self, partition_id, date_scope):
return self.consumption_utils.getPartitionCPULoadAverage(partition_id, date_scope)
def getPartitionUsedMemoryAverage(self, partition_id, date_scope):
return self.consumption_utils.getPartitionUsedMemoryAverage(partition_id, date_scope)/(1024*1024.0)
return self.consumption_utils.getPartitionUsedMemoryAverage(partition_id, date_scope)/(1024*1024)
def getPartitionDiskUsedAverage(self, partition_id, date_scope):
return self.consumption_utils.getPartitionDiskUsedAverage(partition_id, date_scope)/1024.0
return self.consumption_utils.getPartitionDiskUsedAverage(partition_id, date_scope)/1024
def getPartitionConsumption(self, partition_id, where="", date_scope=None, min_time=None, max_time=None):
"""
......@@ -123,10 +123,10 @@ class ResourceCollect:
resource_dict = {
'pid': result[6],
'cpu_percent': round(result[1]/count, 2),
'cpu_time': round((result[2] or 0)/(60.0), 2),
'cpu_time': round((result[2] or 0)/(60), 2),
'cpu_num_threads': round(result[3]/count, 2),
'memory_percent': round(result[4]/count, 2),
'memory_rss': round((result[5] or 0)/(1024*1024.0), 2),
'memory_rss': round((result[5] or 0)/(1024*1024), 2),
'io_rw_counter': round(result[7]/count, 2),
'io_cycles_counter': round(result[8]/count, 2)
}
......@@ -159,38 +159,35 @@ class ResourceCollect:
query_result = self.db.select('user', date_scope, colums,
where="partition='%s' and (time between '%s' and '%s') %s" %
(partition_id, min_time, max_time, where))
result_list = zip(*query_result)
process_dict = memory_dict = io_dict = {}
if len(result_list):
result = result_list
process_dict = {'total_process': result[0][0],
'cpu_percent': round((result[1][0] or 0), 2),
'cpu_time': round((result[2][0] or 0)/(60.0), 2),
'cpu_num_threads': round((result[3][0] or 0), 2),
'date': '%s %s' % (date_scope, min_time)
}
memory_dict = {'memory_percent': round((result[4][0] or 0), 2),
'memory_rss': round((result[5][0] or 0)/(1024*1024.0), 2),
'date': '%s %s' % (date_scope, min_time)
}
io_dict = {'io_rw_counter': round((result[6][0] or 0), 2),
'io_cycles_counter': round((result[7][0] or 0), 2),
'disk_used': 0,
'date': '%s %s' % (date_scope, min_time)
}
if self.has_table('folder'):
disk_result_cursor = self.db.select(
"folder", date_scope,
columns="SUM(disk_used)",
where="partition='%s' and (time between '%s' and '%s') %s" % (
partition_id, min_time, max_time, where
)
result = query_result.fetchone()
process_dict = {'total_process': result[0],
'cpu_percent': round((result[1] or 0), 2),
'cpu_time': round((result[2] or 0)/(60), 2),
'cpu_num_threads': round((result[3] or 0), 2),
'date': '%s %s' % (date_scope, min_time)
}
memory_dict = {'memory_percent': round((result[4] or 0), 2),
'memory_rss': round((result[5] or 0)/(1024*1024), 2),
'date': '%s %s' % (date_scope, min_time)
}
io_dict = {'io_rw_counter': round((result[6] or 0), 2),
'io_cycles_counter': round((result[7] or 0), 2),
'disk_used': 0,
'date': '%s %s' % (date_scope, min_time)
}
if self.has_table('folder'):
disk_result_cursor = self.db.select(
"folder", date_scope,
columns="SUM(disk_used)",
where="partition='%s' and (time between '%s' and '%s') %s" % (
partition_id, min_time, max_time, where
)
disk_used_sum = zip(*disk_result_cursor)
if len(disk_used_sum) and disk_used_sum[0][0] is not None:
io_dict['disk_used'] = round(disk_used_sum[0][0]/1024.0, 2)
)
disk_used_sum, = disk_result_cursor.fetchone()
if disk_used_sum is not None:
io_dict['disk_used'] = round(disk_used_sum/1024, 2)
self.db.close()
return (process_dict, memory_dict, io_dict)
......@@ -252,7 +249,7 @@ def main():
status_file = os.path.join(parser.output_folder, 'monitor_resource.status.json')
if not os.path.exists(parser.collector_db):
print "Collector database not found..."
print("Collector database not found...")
initProcessDataFile(process_file)
initMemoryDataFile(mem_file)
initIODataFile(io_file)
......
#!/usr/bin/env python
from __future__ import print_function
import sys
import os
import glob
import json
import ConfigParser
from six.moves import configparser
import time
from datetime import datetime
import base64
import hashlib
import PyRSS2Gen
from slapos.util import bytes2str, str2bytes
def getKey(item):
return item.source.name
......@@ -30,6 +34,8 @@ class MonitorFeed(object):
event_date = item_dict['result']['change-date']
report_date = item_dict['result']['date']
description = item_dict['result'].get('message', '')
guid = base64.b64encode(str2bytes("%s, %s, %s, %s" % (self.hosting_name,
item_dict['title'], has_string, event_date)))
rss_item = PyRSS2Gen.RSSItem(
categories = [item_dict['status']],
source = PyRSS2Gen.Source(item_dict['title'], self.public_url),
......@@ -37,9 +43,7 @@ class MonitorFeed(object):
description = "\n%s" % (description,),
link = self.private_url,
pubDate = event_date,
guid = PyRSS2Gen.Guid(base64.b64encode("%s, %s, %s, %s" % (self.hosting_name,
item_dict['title'], has_string, event_date)),
isPermaLink=False)
guid = PyRSS2Gen.Guid(bytes2str(guid), isPermaLink=False)
)
self.rss_item_list.append(rss_item)
......@@ -69,7 +73,7 @@ def generateStatisticsData(stat_file_path, content):
fstat.write(json.dumps(data_dict))
current_state = ''
if content.has_key('state'):
if 'state' in content:
current_state = '%s, %s, %s, %s' % (
content['date'],
content['state']['success'],
......@@ -131,13 +135,14 @@ def generateMonitoringData(config, public_folder, private_folder, public_url,
promise_status = "OK"
success += 1
tmp_json['result']['change-date'] = tmp_json['result']['date']
if previous_state_dict.has_key(tmp_json['name']):
if tmp_json['name'] in previous_state_dict:
status, change_date, _ = previous_state_dict[tmp_json['name']]
if promise_status == status:
tmp_json['result']['change-date'] = change_date
tmp_json['status'] = promise_status
message_hash = hashlib.md5(tmp_json['result'].get('message', '')).hexdigest()
message_hash = hashlib.md5(
str2bytes(tmp_json['result'].get('message', ''))).hexdigest()
new_state_dict[tmp_json['name']] = [
promise_status,
tmp_json['result']['change-date'],
......@@ -150,9 +155,9 @@ def generateMonitoringData(config, public_folder, private_folder, public_url,
previous_state_dict.get(tmp_json['name']),
public_folder
)
except ValueError, e:
except ValueError as e:
# bad json file
print "ERROR: Bad json file at: %s\n%s" % (file, str(e))
print("ERROR: Bad json file at: %s\n%s" % (file, e))
continue
with open(promises_status_file, "w") as f:
......@@ -187,7 +192,7 @@ def savePromiseHistory(promise_name, state_dict, previous_state_list,
else:
if previous_state_list is not None:
_, change_date, checksum = previous_state_list
current_sum = hashlib.md5(state_dict.get('message', '')).hexdigest()
current_sum = hashlib.md5(str2bytes(state_dict.get('message', ''))).hexdigest()
if state_dict['change-date'] == change_date and \
current_sum == checksum:
# Only save the changes and not the same info
......@@ -202,7 +207,7 @@ def savePromiseHistory(promise_name, state_dict, previous_state_list,
def run(monitor_conf_file):
config = ConfigParser.ConfigParser()
config = configparser.ConfigParser()
config.read(monitor_conf_file)
base_folder = config.get('monitor', 'private-folder')
......
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys
import os
import stat
import json
import ConfigParser
from six.moves import configparser
import traceback
import argparse
import urllib2
from six.moves.urllib.request import urlopen
from six.moves.urllib.error import HTTPError
import ssl
import glob
import socket
from datetime import datetime
from xml.sax.saxutils import escape
from slapos.util import bytes2str
OPML_START = """<?xml version="1.0" encoding="UTF-8"?>
<!-- OPML generated by SlapOS -->
<opml version="1.1">
......@@ -47,7 +52,7 @@ def parseArguments():
def mkdirAll(path):
try:
os.makedirs(path)
except OSError, e:
except OSError as e:
if e.errno == os.errno.EEXIST and os.path.isdir(path):
pass
else: raise
......@@ -55,13 +60,13 @@ def mkdirAll(path):
def softConfigGet(config, *args, **kwargs):
try:
return config.get(*args, **kwargs)
except (ConfigParser.NoOptionError, ConfigParser.NoSectionError):
except (configparser.NoOptionError, configparser.NoSectionError):
return None
def createSymlink(source, destination):
try:
os.symlink(source, destination)
except OSError, e:
except OSError as e:
if e.errno != os.errno.EEXIST:
raise
......@@ -99,10 +104,10 @@ class Monitoring(object):
def loadConfig(self, pathes, config=None):
if config is None:
config = ConfigParser.ConfigParser()
config = configparser.ConfigParser()
try:
config.read(pathes)
except ConfigParser.MissingSectionHeaderError:
except configparser.MissingSectionHeaderError:
traceback.print_exc()
return config
......@@ -131,8 +136,8 @@ class Monitoring(object):
try:
with open(config_list[2]) as cfile:
param_value = cfile.read()
except OSError, e:
print 'Cannot read file %s, Error is: %s' % (config_list[2], str(e))
except OSError as e:
print('Cannot read file %s, Error is: %s' % (config_list[2], e))
pass
else:
param_value = ""
......@@ -147,7 +152,7 @@ class Monitoring(object):
)
if config_list[0] == 'htpasswd':
if len(config_list) != 5 or not os.path.exists(config_list[4]):
print 'htpasswd file is not specified: %s' % str(config_list)
print('htpasswd file is not specified: %s' %