Commit 7467ef4c authored by Bryton Lacquement's avatar Bryton Lacquement 🚪 Committed by Julien Muchembled

Add Python 3 support

parent ae15060e
......@@ -45,6 +45,6 @@ def do_bang(configp, message):
slap = slapos.slap.slap()
slap.initializeConnection(master_url, key_file=key_file, cert_file=cert_file)
computer = slap.registerComputer(computer_id)
print 'Banging to %r' % master_url
print('Banging to %r' % master_url)
computer.bang(message)
print 'Bang with message %r' % message
print('Bang with message %r' % message)
......@@ -27,8 +27,10 @@
#
##############################################################################
from __future__ import print_function
import subprocess
import urlparse
from six.moves.urllib.parse import urlparse
from time import sleep
import glob
import os
......@@ -46,14 +48,14 @@ def _removeTimestamp(instancehome):
"""
timestamp_glob_path = "%s/slappart*/.timestamp" % instancehome
for timestamp_path in glob.glob(timestamp_glob_path):
print "Removing %s" % timestamp_path
print("Removing %s" % timestamp_path)
os.remove(timestamp_path)
def _runBang(app):
"""
Launch slapos node format.
"""
print "[BOOT] Invoking slapos node bang..."
print("[BOOT] Invoking slapos node bang...")
result = app.run(['node', 'bang', '-m', 'Reboot'])
if result == 1:
return 0
......@@ -63,7 +65,7 @@ def _runFormat(app):
"""
Launch slapos node format.
"""
print "[BOOT] Invoking slapos node format..."
print("[BOOT] Invoking slapos node format...")
result = app.run(['node', 'format', '--now', '--verbose'])
if result == 1:
return 0
......@@ -73,30 +75,30 @@ def _ping(hostname):
"""
Ping a hostname
"""
print "[BOOT] Invoking ipv4 ping to %s..." % hostname
print("[BOOT] Invoking ipv4 ping to %s..." % hostname)
p = subprocess.Popen(
["ping", "-c", "2", hostname],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode == 0:
print "[BOOT] IPv4 network reachable..."
print("[BOOT] IPv4 network reachable...")
return 1
print "[BOOT] [ERROR] IPv4 network unreachable..."
print("[BOOT] [ERROR] IPv4 network unreachable...")
return 0
def _ping6(hostname):
"""
Ping an ipv6 address
"""
print "[BOOT] Invoking ipv6 ping to %s..." % hostname
print("[BOOT] Invoking ipv6 ping to %s..." % hostname)
p = subprocess.Popen(
["ping6", "-c", "2", hostname],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
if p.returncode == 0:
print "[BOOT] IPv6 network reachable..."
print("[BOOT] IPv6 network reachable...")
return 1
print "[BOOT] [ERROR] IPv6 network unreachable..."
print("[BOOT] [ERROR] IPv6 network unreachable...")
return 0
def _test_ping(hostname):
......@@ -126,7 +128,7 @@ def _waitIpv6Ready(ipv6_interface):
test if ipv6 is ready on ipv6_interface
"""
ipv6_address = ""
print "[BOOT] Checking if %r has IPv6..." % ipv6_interface
print("[BOOT] Checking if %r has IPv6..." % ipv6_interface)
while ipv6_address == "":
for inet_dict in netifaces.ifaddresses(ipv6_interface)[socket.AF_INET6]:
ipv6_address = inet_dict['addr'].split('%')[0]
......@@ -134,8 +136,8 @@ def _waitIpv6Ready(ipv6_interface):
break
else:
ipv6_address = ""
print "[BOOT] [ERROR] No IPv6 found on interface %r, " \
"try again in 5 seconds..." % ipv6_interface
print("[BOOT] [ERROR] No IPv6 found on interface %r, "
"try again in 5 seconds..." % ipv6_interface))
sleep(5)
class BootCommand(ConfigCommand):
......@@ -155,7 +157,7 @@ class BootCommand(ConfigCommand):
def take_action(self, args):
configp = self.fetch_config(args)
instance_root = configp.get('slapos','instance_root')
master_url = urlparse.urlparse(configp.get('slapos','master_url'))
master_url = urlparse(configp.get('slapos','master_url'))
master_hostname = master_url.hostname
# Check that we have IPv6 ready
......@@ -177,12 +179,12 @@ class BootCommand(ConfigCommand):
app = SlapOSApp()
# Make sure slapos node format returns ok
while not _runFormat(app):
print "[BOOT] [ERROR] Fail to format, try again in 15 seconds..."
print("[BOOT] [ERROR] Fail to format, try again in 15 seconds...")
sleep(15)
# Make sure slapos node bang returns ok
while not _runBang(app):
print "[BOOT] [ERROR] Fail to bang, try again in 15 seconds..."
print("[BOOT] [ERROR] Fail to bang, try again in 15 seconds...")
sleep(15)
_removeTimestamp(instance_root)
......@@ -27,7 +27,7 @@
#
##############################################################################
import ConfigParser
from six.moves import configparser
import os
from slapos.cli.command import Command
......@@ -77,7 +77,7 @@ class ConfigCommand(Command):
if not os.path.exists(cfg_path):
raise ConfigError('Configuration file does not exist: %s' % cfg_path)
configp = ConfigParser.SafeConfigParser()
configp = configparser.SafeConfigParser()
if configp.read(cfg_path) != [cfg_path]:
# bad permission, etc.
raise ConfigError('Cannot parse configuration file: %s' % cfg_path)
......
......@@ -31,6 +31,7 @@ import re
import os
import sys
import json
from six.moves import input
import requests
......@@ -94,7 +95,7 @@ def fetch_configuration_template():
def do_configure_client(logger, master_url_web, token, config_path, master_url):
while not token:
token = raw_input('Credential security token: ').strip()
token = input('Credential security token: ').strip()
# Check for existence of previous configuration, certificate or key files
# where we expect to create them. If so, ask the use to manually remove them.
......
......@@ -39,6 +39,7 @@ from slapos.cli.config import ConfigCommand
from slapos.grid.slapgrid import create_slapgrid_object
from slapos.grid.utils import updateFile, createPrivateDirectory
from slapos.grid.svcbackend import launchSupervisord
from slapos.util import bytes2str
DEFAULT_COMPUTER_ID = 'local_computer'
......@@ -126,8 +127,8 @@ def _replaceParameterValue(original_content, to_replace):
def _generateSlaposNodeConfigurationFile(slapos_node_config_path, args):
template_arg_list = (__name__, '../../slapos.cfg.example')
with pkg_resources.resource_stream(*template_arg_list) as fout:
slapos_node_configuration_template = fout.read()
slapos_node_configuration_template = \
bytes2str(pkg_resources.resource_string(*template_arg_list))
master_url = 'http://%s:%s' % (args.daemon_listen_ip, args.daemon_listen_port)
slapos_home = args.slapos_buildout_directory
to_replace = [
......@@ -149,12 +150,12 @@ def _generateSlaposNodeConfigurationFile(slapos_node_config_path, args):
'(key_file|cert_file|certificate_repository_path).*=.*\n',
'', slapos_node_configuration_content)
with open(slapos_node_config_path, 'w') as fout:
fout.write(slapos_node_configuration_content.encode('utf8'))
fout.write(slapos_node_configuration_content)
def _generateSlaposProxyConfigurationFile(conf):
template_arg_list = (__name__, '../../slapos-proxy.cfg.example')
with pkg_resources.resource_stream(*template_arg_list) as fout:
slapos_proxy_configuration_template = fout.read()
slapos_proxy_configuration_template = \
bytes2str(pkg_resources.resource_string(*template_arg_list))
slapos_proxy_configuration_path = os.path.join(
conf.slapos_configuration_directory, 'slapos-proxy.cfg')
listening_ip, listening_port = \
......@@ -172,7 +173,7 @@ def _generateSlaposProxyConfigurationFile(conf):
slapos_proxy_configuration_template, to_replace)
with open(slapos_proxy_configuration_path, 'w') as fout:
fout.write(slapos_proxy_configuration_content.encode('utf8'))
fout.write(slapos_proxy_configuration_content)
return slapos_proxy_configuration_path
......
......@@ -32,6 +32,8 @@ import textwrap
from slapos.cli.config import ClientConfigCommand
from slapos.client import init, do_console, ClientConfig
from six import exec_
class ShellNotFound(Exception):
pass
......@@ -90,7 +92,9 @@ class ConsoleCommand(ClientConfigCommand):
local = init(conf, self.app.log)
if args.script_file:
return execfile(args.script_file, globals(), local)
with open(args.script_file) as f:
code = compile(f.read(), args.script_file, 'exec')
return exec_(code, globals(), local)
if not any([args.python, args.ipython, args.bpython]):
args.ipython = True
......
......@@ -247,7 +247,7 @@ class SlapOSApp(App):
return
if self.options.log_color:
import coloredlogs
from slapos.cli import coloredlogs
console = coloredlogs.ColoredStreamHandler(show_name=True, # logger name (slapos) and PID
show_severity=True,
show_timestamps=self.options.log_time,
......
......@@ -29,6 +29,7 @@
import logging
import sys
import six
from slapos.cli.config import ClientConfigCommand
from slapos.client import init, ClientConfig
......@@ -64,5 +65,5 @@ def do_list(logger, conf, local):
logger.info('No existing service.')
return
logger.info('List of services:')
for title, instance in instance_dict.iteritems():
for title, instance in six.iteritems(instance_dict):
logger.info('%s %s', title, instance._software_release_url)
......@@ -34,7 +34,6 @@ import logging
import sys
import os
import subprocess
import StringIO
import lxml.etree
import prettytable
......@@ -43,8 +42,17 @@ import sqlite3
from slapos.cli.config import ConfigCommand
from slapos.proxy import ProxyConfig
from slapos.proxy.db_version import DB_VERSION
from slapos.util import sqlite_connect
from slapos.util import sqlite_connect, str2bytes
if bytes is str:
from io import BytesIO
class StringIO(BytesIO):
# Something between strict io.BytesIO and laxist/slow StringIO.StringIO
# (which starts returning unicode once unicode is written) for logging.
def write(self, b):
return BytesIO.write(self, b.encode('utf-8'))
else:
from io import StringIO
class ProxyShowCommand(ConfigCommand):
"""
......@@ -139,7 +147,7 @@ def log_params(logger, conn):
if not row['connection_xml']:
continue
xml = str(row['connection_xml'])
xml = str2bytes(row['connection_xml'])
logger.info('%s: %s (type %s)', row['reference'], row['partition_reference'], row['software_type'])
instance = lxml.etree.fromstring(xml)
for parameter in list(instance):
......@@ -150,9 +158,12 @@ def log_params(logger, conn):
# _ is usually json encoded - re-format to make it easier to read
if name == '_':
try:
text = json.dumps(json.loads(text), indent=2)
text = json.dumps(json.loads(text),
indent=2, sort_keys=True)
except ValueError:
pass
else: # to avoid differences between Py2 and Py3 in unit tests
text = '\n'.join(map(str.rstrip, text.splitlines()))
logger.info(' %s = %s', name, text)
......@@ -208,7 +219,7 @@ def do_show(conf):
# to paginate input, honoring $PAGER.
output = sys.stdout
if output.isatty():
output = StringIO.StringIO()
output = StringIO()
proxy_show_logger = logging.getLogger(__name__)
handler = logging.StreamHandler(output)
handler.setLevel(logging.DEBUG)
......@@ -221,7 +232,8 @@ def do_show(conf):
conn = sqlite_connect(conf.database_uri)
conn.row_factory = sqlite3.Row
conn.create_function('md5', 1, lambda s: hashlib.md5(s).hexdigest())
conn.create_function('md5', 1,
lambda s: hashlib.md5(str2bytes(s)).hexdigest())
call_table = [
(conf.computers, log_computer_table),
......@@ -248,4 +260,4 @@ def do_show(conf):
close_fds=True,
shell=True,
stdin=subprocess.PIPE,)
pager.communicate(output.getvalue().encode('utf-8'))
pager.communicate(str2bytes(output.getvalue()))
......@@ -36,6 +36,7 @@ import sys
import pkg_resources
import requests
import json
from six.moves import input
from slapos.cli.command import Command, must_be_root
......@@ -327,7 +328,7 @@ def gen_auth(conf):
else:
yield conf.login, getpass.getpass()
while ask:
yield raw_input('SlapOS Master Login: '), getpass.getpass()
yield input('SlapOS Master Login: '), getpass.getpass()
def do_register(conf):
......@@ -348,7 +349,7 @@ def do_register(conf):
password=password)
else:
while not conf.token:
conf.token = raw_input('Computer security token: ').strip()
conf.token = input('Computer security token: ').strip()
certificate, key = get_certificate_key_pair(conf.logger,
conf.master_url_web,
......
......@@ -28,7 +28,7 @@
##############################################################################
import atexit
import ConfigParser
from six.moves import configparser
import os
import sys
......@@ -52,7 +52,7 @@ class ClientConfig(object):
# Merges the arguments and configuration
try:
configuration_dict = dict(configp.items('slapconsole'))
except ConfigParser.NoSectionError:
except configparser.NoSectionError:
pass
else:
for key in configuration_dict:
......@@ -119,7 +119,7 @@ def _getSoftwareReleaseFromSoftwareString(logger, software_string, product):
try:
return product.__getattr__(software_string[len(SOFTWARE_PRODUCT_NAMESPACE):])
except AttributeError as e:
logger.error('Error: %s Exiting now.' % e.message)
logger.error('Error: %s Exiting now.', e)
sys.exit(1)
def do_console(local):
......
......@@ -27,6 +27,7 @@
#
##############################################################################
from __future__ import print_function
from psutil import process_iter, NoSuchProcess, AccessDenied
from time import strftime
import shutil
......@@ -42,7 +43,7 @@ from slapos.collect.reporter import RawCSVDumper, \
compressLogFolder, \
ConsumptionReport
from entity import get_user_list, Computer
from .entity import get_user_list, Computer
def _get_time():
return strftime("%Y-%m-%d -- %H:%M:%S").split(" -- ")
......@@ -157,5 +158,5 @@ def do_collect(conf):
database.garbageCollect()
except AccessDenied:
print "You HAVE TO execute this script with root permission."
print("You HAVE TO execute this script with root permission.")
......@@ -74,19 +74,15 @@ class User(object):
time_cycle = self.disk_snapshot_params.get('time_cycle', 0)
database.connect()
if time_cycle:
order = 'date DESC, time DESC'
limit = 1
query = database.select(table="folder", columns="date, time",
order=order, limit=limit,
where="partition='%s'" % self.name)
query_result = zip(*query)
if len(query_result):
date, time = (query_result[0][0], query_result[1][0])
latest_date = datetime.strptime('%s %s' % (date, time),
"%Y-%m-%d %H:%M:%S")
for date_time in database.select(table="folder", columns="date, time",
order='date DESC, time DESC', limit=1,
where="partition='%s'" % self.name):
latest_date = datetime.strptime('%s %s' % date_time,
"%Y-%m-%d %H:%M:%S")
if (datetime.now() - latest_date).seconds < time_cycle:
# wait the time cycle
return
break
pid_file = self.disk_snapshot_params.get('pid_folder', None)
if pid_file is not None:
pid_file = os.path.join(pid_file, '%s_disk_size.pid' % self.name)
......
......@@ -40,6 +40,8 @@ import tarfile
import time
import psutil
import six
log_file = False
class Dumper(object):
......@@ -59,10 +61,10 @@ class SystemReporter(Dumper):
""" Dump data """
_date = time.strftime("%Y-%m-%d")
self.db.connect()
for item, collected_item_list in self.db.exportSystemAsDict(_date).iteritems():
for item, collected_item_list in six.iteritems(self.db.exportSystemAsDict(_date)):
self.writeFile(item, folder, collected_item_list)
for partition, collected_item_list in self.db.exportDiskAsDict(_date).iteritems():
for partition, collected_item_list in six.iteritems(self.db.exportDiskAsDict(_date)):
partition_id = "_".join(partition.split("-")[:-1]).replace("/", "_")
item = "memory_%s" % partition.split("-")[-1]
self.writeFile("disk_%s_%s" % (item, partition_id), folder, collected_item_list)
......@@ -135,62 +137,53 @@ class ConsumptionReportBase(object):
def getPartitionCPULoadAverage(self, partition_id, date_scope):
self.db.connect()
query_result_cursor = self.db.select("user", date_scope,
(cpu_percent_sum,), = self.db.select("user", date_scope,
columns="SUM(cpu_percent)",
where="partition = '%s'" % partition_id)
cpu_percent_sum = zip(*query_result_cursor)
if len(cpu_percent_sum) and cpu_percent_sum[0][0] is None:
if cpu_percent_sum is None:
return
query_result_cursor = self.db.select("user", date_scope,
(sample_amount,), = self.db.select("user", date_scope,
columns="COUNT(DISTINCT time)",
where="partition = '%s'" % partition_id)
sample_amount = zip(*query_result_cursor)
self.db.close()
if len(sample_amount) and len(cpu_percent_sum):
return cpu_percent_sum[0][0]/sample_amount[0][0]
return cpu_percent_sum/sample_amount
def getPartitionUsedMemoryAverage(self, partition_id, date_scope):
self.db.connect()
query_result_cursor = self.db.select("user", date_scope,
(memory_sum,), = self.db.select("user", date_scope,
columns="SUM(memory_rss)",
where="partition = '%s'" % partition_id)
memory_sum = zip(*query_result_cursor)
if len(memory_sum) and memory_sum[0][0] is None:
if memory_sum is None:
return
query_result_cursor = self.db.select("user", date_scope,
(sample_amount,), = self.db.select("user", date_scope,
columns="COUNT(DISTINCT time)",
where="partition = '%s'" % partition_id)
sample_amount = zip(*query_result_cursor)
self.db.close()
if len(sample_amount) and len(memory_sum):
return memory_sum[0][0]/sample_amount[0][0]
return memory_sum/sample_amount
def getPartitionDiskUsedAverage(self, partition_id, date_scope):
self.db.connect()
query_result_cursor = self.db.select("folder", date_scope,
(disk_used_sum,), = self.db.select("folder", date_scope,
columns="SUM(disk_used)",
where="partition = '%s'" % partition_id)
disk_used_sum = zip(*query_result_cursor)
if len(disk_used_sum) and disk_used_sum[0][0] is None:
if disk_used_sum is None:
return
query_result_cursor = self.db.select("folder", date_scope,
(collect_amount,), = self.db.select("folder", date_scope,
columns="COUNT(DISTINCT time)",
where="partition = '%s'" % partition_id)
collect_amount = zip(*query_result_cursor)
self.db.close()
if len(collect_amount) and len(disk_used_sum):
return disk_used_sum[0][0]/collect_amount[0][0]
return disk_used_sum/collect_amount
class ConsumptionReport(ConsumptionReportBase):
......@@ -287,7 +280,7 @@ class ConsumptionReport(ConsumptionReportBase):
reference=user,
category="")
with open(xml_report_path, 'w') as f:
with open(xml_report_path, 'wb') as f:
f.write(journal.getXML())
f.close()
......@@ -298,23 +291,19 @@ class ConsumptionReport(ConsumptionReportBase):
def _getCpuLoadAverageConsumption(self, date_scope):
self.db.connect()
query_result_cursor = self.db.select("system", date_scope,
(cpu_load_percent_list,), = self.db.select("system", date_scope,
columns="SUM(cpu_percent)/COUNT(cpu_percent)")
cpu_load_percent_list = zip(*query_result_cursor)
self.db.close()
if len(cpu_load_percent_list):
return cpu_load_percent_list[0][0]
return cpu_load_percent_list
def _getMemoryAverageConsumption(self, date_scope):
self.db.connect()
query_result_cursor = self.db.select("system", date_scope,
(memory_used_list,), = self.db.select("system", date_scope,
columns="SUM(memory_used)/COUNT(memory_used)")
memory_used_list = zip(*query_result_cursor)
self.db.close()
if len(memory_used_list):
return memory_used_list[0][0]
return memory_used_list
def _getZeroEmissionContribution(self):
self.db.connect()
......@@ -329,7 +318,7 @@ class Journal(object):
def getXML(self):
report = ElementTree.tostring(self.root)
return "<?xml version='1.0' encoding='utf-8'?>%s" % report
return b"<?xml version='1.0' encoding='utf-8'?>%s" % report
def newTransaction(self, portal_type="Sale Packing List"):
transaction = ElementTree.SubElement(self.root, "transaction")
......
......@@ -27,13 +27,15 @@
#
##############################################################################
from __future__ import print_function
import psutil
import os
import subprocess
from temperature import collectComputerTemperature, \
launchTemperatureTest
from .temperature import collectComputerTemperature, launchTemperatureTest
from temperature.heating import get_contribution_ratio
from .temperature.heating import get_contribution_ratio
import six
MEASURE_INTERVAL = 5
......@@ -159,7 +161,7 @@ class HeatingContributionSnapshot(_Snapshot):
result = launchTemperatureTest(sensor_id)
if result is None:
print "Impossible to test sensor: %s " % sensor_id
print("Impossible to test sensor: %s " % sensor_id)
initial_temperature, final_temperature, duration = result
......@@ -215,8 +217,7 @@ class ComputerSnapshot(_Snapshot):
#
self.system_snapshot = SystemSnapshot()
self.temperature_snapshot_list = self._get_temperature_snapshot_list()
self.disk_snapshot_list = []
self.partition_list = self._get_physical_disk_info()
self._get_physical_disk_info()
if test_heating and model_id is not None \
and sensor_id is not None:
......@@ -231,16 +232,16 @@ class ComputerSnapshot(_Snapshot):
return temperature_snapshot_list
def _get_physical_disk_info(self):
partition_dict = {}
# XXX: merge the following 2 to avoid calling disk_usage() twice
self.disk_snapshot_list = []
self.partition_list = []
partition_set = set()
for partition in psutil.disk_partitions():
if partition.device not in partition_dict:
dev = partition.device
if dev not in partition_set: # XXX: useful ?
partition_set.add(dev)
usage = psutil.disk_usage(partition.mountpoint)
partition_dict[partition.device] = usage.total
self.partition_list.append((dev, usage.total))
self.disk_snapshot_list.append(
DiskPartitionSnapshot(partition.device,
partition.mountpoint))
return [(k, v) for k, v in partition_dict.iteritems()]
DiskPartitionSnapshot(dev, partition.mountpoint))
from __future__ import print_function
from multiprocessing import Process, active_children, cpu_count, Pipe
import subprocess
......@@ -14,28 +15,21 @@ except NotImplementedError:
DEFAULT_CPU = 1
def collectComputerTemperature(sensor_bin="sensors"):
cmd = ["%s -u" % sensor_bin]
sp = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True)
stdout, stderr = sp.communicate()
stdout = subprocess.check_output((sensor_bin, '-u'), universal_newlines=True)
  • This seems to be a change in behavior, before this change, if sensors command is not available, the test fail. Which I believe is a good thing, it was hiding errors.

    Isn't the missing part that we need to include component/lmsensors/buildout.cfg in the PATH of the test here ? similar to what we do for socat for example ?

    /cc @rafael @tomo

  • Apparently this is also something we can get from psutil https://psutil.readthedocs.io/en/latest/index.html#psutil.sensors_temperatures

  • Test can fail because command is not available (like on my development environment), but also some tests fail because command exit with non 0, for example:

    test_computer_snapshot (slapos.tests.collect.TestCollectSnapshot) ... No sensors found!
    Make sure you loaded all the kernel drivers you need.
    Try sensors-detect to find out which these are.
    ERROR

    I noticed this on rapidspace-testnode-003-3Nodes-ERP5PROJECT3 , also on RAPIDCLOUD-VM-COMP-2867-3Nodes-ERP5PROJECT6

  • it should fail if sensor is not available.... and it should get sensors from the component like you said.

  • Thanks. So if sensors command is not available it should fail. OK, let's change the software profile to add the command from component in $PATH -> slapos!479 (merged)

    But what to do when sensors command is available and it does no find sensors (the No sensors found! error message from above) ? slapos!479 (merged) won't solve this case.

    Also according to https://github.com/lm-sensors/lm-sensors , this is linux only, psutil version seems a bit more portable but it's not something that's universally available, so slapos should maybe consider this as something optional - only required on linux for example

  • From my understanding of the discussions in slapos!479 (comment 71803) , here we should not use check_output but an approach which would tolerate a non 0 exit code here.

    In !73 (merged) we discuss adding subprocess32, so we can maybe switch to subprocess.run here ?

  • for the subprocess.run approach -> !81 (merged)

Please register or sign in to reply
sensor_output_list = stdout.splitlines()
adapter_name = ""
sensor_temperature_list = []
for line_number in range(len(sensor_output_list)):
found_sensor = None
stripped_line = sensor_output_list[line_number].strip()
for line_number, sensor_output in enumerate(sensor_output_list):
stripped_line = sensor_output.strip()
if stripped_line.startswith("Adapter:"):
adapter_name = sensor_output_list[line_number-1]
elif stripped_line.startswith("temp") and "_input" in stripped_line:
temperature = sensor_output_list[line_number].split()[-1]
temperature = sensor_output.split()[-1]
found_sensor = ["%s %s" % (adapter_name, sensor_output_list[line_number-1]), float(temperature)]
if found_sensor is not None:
critical = '1000'
maximal = '1000'
for next_line in sensor_output_list[line_number+1:line_number+3]:
......@@ -120,7 +114,7 @@ def launchTemperatureTest(sensor_id, sensor_bin="sensors", timeout=600, interval
for connection in process_connection_list:
try:
print connection.recv()
print(connection.recv())
except EOFError:
continue
......
......@@ -28,7 +28,7 @@
#
##############################################################################
import ConfigParser
from six.moves import configparser
import errno
import fcntl
import grp
......@@ -51,12 +51,13 @@ import time
import traceback
import zipfile
import platform
from urllib2 import urlopen
from six.moves.urllib.request import urlopen
import six
import lxml.etree
import xml_marshaller.xml_marshaller
from slapos.util import mkdir_p, ipv6FromBin, binFromIpv6, lenNetmaskIpv6
from slapos.util import dumps, mkdir_p, ipv6FromBin, binFromIpv6, lenNetmaskIpv6
import slapos.slap as slap
from slapos import version
from slapos import manager as slapmanager
......@@ -87,7 +88,7 @@ class OS(object):
def _addWrapper(self, name):
def wrapper(*args, **kw):
arg_list = [repr(x) for x in args] + [
'%s=%r' % (x, y) for x, y in kw.iteritems()
'%s=%r' % (x, y) for x, y in six.iteritems(kw)
]
self._logger.debug('%s(%s)' % (name, ', '.join(arg_list)))
if not self._dry_run:
......@@ -232,8 +233,8 @@ def _getDict(obj):
return obj
return {
key: _getDict(value) \
for key, value in dikt.iteritems() \
key: _getDict(value)
for key, value in six.iteritems(dikt)
# do not attempt to serialize logger: it is both useless and recursive.
# do not serialize attributes starting with "_", let the classes have some privacy
if not key.startswith("_")
......@@ -336,7 +337,7 @@ class Computer(object):
if conf.dry_run:
return
try:
slap_computer.updateConfiguration(xml_marshaller.xml_marshaller.dumps(_getDict(self)))
slap_computer.updateConfiguration(dumps(_getDict(self)))
except slap.NotFoundError as error:
raise slap.NotFoundError("%s\nERROR: This SlapOS node is not recognised by "
"SlapOS Master and/or computer_id and certificates don't match. "
......@@ -358,7 +359,7 @@ class Computer(object):
with open(path_to_json, 'wb') as fout:
fout.write(json.dumps(computer_dict, sort_keys=True, indent=2))
new_xml = xml_marshaller.xml_marshaller.dumps(computer_dict)
new_xml = dumps(computer_dict)
new_pretty_xml = prettify_xml(new_xml)
path_to_archive = path_to_xml + '.zip'
......@@ -1197,7 +1198,7 @@ class Interface(object):
def parse_computer_definition(conf, definition_path):
conf.logger.info('Using definition file %r' % definition_path)
computer_definition = ConfigParser.RawConfigParser({
computer_definition = configparser.RawConfigParser({
'software_user': 'slapsoft',
})
computer_definition.read(definition_path)
......@@ -1308,7 +1309,7 @@ def parse_computer_xml(conf, xml_path):
def write_computer_definition(conf, computer):
computer_definition = ConfigParser.RawConfigParser()
computer_definition = configparser.RawConfigParser()
computer_definition.add_section('computer')
if computer.address is not None and computer.netmask is not None:
computer_definition.set('computer', 'address', '/'.join(
......
......@@ -38,7 +38,7 @@ import subprocess
import tarfile
import tempfile
import time
import xmlrpclib
from six.moves import xmlrpc_client as xmlrpclib, range
from supervisor import xmlrpc
......@@ -51,6 +51,7 @@ from slapos.grid.exception import (BuildoutFailedError, WrongPermissionError,
PathDoesNotExistError, DiskSpaceError)
from slapos.grid.networkcache import download_network_cached, upload_network_cached
from slapos.human import bytes2human
from slapos.util import bytes2str
WATCHDOG_MARK = '-on-watch'
......@@ -60,8 +61,8 @@ REQUIRED_COMPUTER_PARTITION_PERMISSION = 0o750
CP_STORAGE_FOLDER_NAME = 'DATA'
# XXX not very clean. this is changed when testing
PROGRAM_PARTITION_TEMPLATE = pkg_resources.resource_stream(__name__,
'templates/program_partition_supervisord.conf.in').read()
PROGRAM_PARTITION_TEMPLATE = bytes2str(pkg_resources.resource_string(__name__,
'templates/program_partition_supervisord.conf.in'))
def free_space(path, fn):
......@@ -150,7 +151,7 @@ class Software(object):
self.software_min_free_space = software_min_free_space
def check_free_space(self):
required = self.software_min_free_space
required = self.software_min_free_space or 0
available = free_space_nonroot(self.software_path)
if available < required:
......@@ -292,7 +293,7 @@ class Software(object):
f.close()
def _create_buildout_profile(self, buildout_cfg, url):
with open(buildout_cfg, 'wb') as fout:
with open(buildout_cfg, 'w') as fout:
fout.write('[buildout]\nextends = ' + url + '\n')
self._set_ownership(buildout_cfg)
......@@ -419,7 +420,7 @@ class Partition(object):
def check_free_space(self):
required = self.instance_min_free_space
required = self.instance_min_free_space or 0
available = free_space_nonroot(self.instance_path)
if available < required:
......@@ -481,8 +482,8 @@ class Partition(object):
}
def addCustomGroup(self, group_suffix, partition_id, program_list):
group_partition_template = pkg_resources.resource_stream(__name__,
'templates/group_partition_supervisord.conf.in').read()
group_partition_template = bytes2str(pkg_resources.resource_string(__name__,
'templates/group_partition_supervisord.conf.in'))
group_id = '{}-{}'.format(partition_id, group_suffix)
self.supervisor_configuration_group += group_partition_template % {
......@@ -568,8 +569,8 @@ class Partition(object):
# fill generated buildout with additional information
buildout_text = open(config_location).read()
buildout_text += '\n\n' + pkg_resources.resource_string(__name__,
'templates/buildout-tail.cfg.in') % {
buildout_text += '\n\n' + bytes2str(pkg_resources.resource_string(__name__,
'templates/buildout-tail.cfg.in')) % {
'computer_id': self.computer_id,
'partition_id': self.partition_id,
'server_url': self.server_url,
......@@ -671,8 +672,8 @@ class Partition(object):
os.unlink(self.supervisord_partition_configuration_path)
else:
partition_id = self.computer_partition.getId()
group_partition_template = pkg_resources.resource_stream(__name__,
'templates/group_partition_supervisord.conf.in').read()
group_partition_template = bytes2str(pkg_resources.resource_string(__name__,
'templates/group_partition_supervisord.conf.in'))
self.supervisor_configuration_group = group_partition_template % {
'instance_id': partition_id,
'program_list': ','.join(['_'.join([partition_id, runner])
......@@ -842,7 +843,7 @@ class Partition(object):
self.logger.warning('Problem while stopping process %r, will try later' % gname)
else:
self.logger.info('Stopped %r' % gname)
for i in xrange(0, 10):
for i in range(0, 10):
# Some process may be still running, be nice and wait for them to be stopped.
try:
supervisor.removeProcessGroup(gname)
......
......@@ -11,6 +11,7 @@
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
from __future__ import print_function
import ast
import json
......@@ -29,10 +30,10 @@ try:
else:
LIBNETWORKCACHE_ENABLED = True
except:
print 'There was problem while trying to import slapos.libnetworkcache:'\
'\n%s' % traceback.format_exc()
print('There was problem while trying to import slapos.libnetworkcache:\n%s'
% traceback.format_exc())
LIBNETWORKCACHE_ENABLED = False
print 'Networkcache forced to be disabled.'
print('Networkcache forced to be disabled.')
......@@ -45,8 +46,8 @@ def fallback_call(function):
try:
return function(self, *args, **kwd)
except: # indeed, *any* exception is swallowed
print 'There was problem while calling method %r:\n%s' % (
function.__name__, traceback.format_exc())
print('There was problem while calling method %r:\n%s' % (
function.__name__, traceback.format_exc()))
return False
wrapper.__doc__ = function.__doc__
return wrapper
......@@ -107,7 +108,7 @@ def download_network_cached(cache_url, dir_url, software_url, software_root,
f.close()
file_descriptor.close()
return True
except (IOError, DirectoryNotFound), e:
except (IOError, DirectoryNotFound) as e:
logger.info('Failed to download from network cache %s: %s' % \
(software_url, str(e)))
return False
......@@ -169,7 +170,7 @@ def upload_network_cached(software_root, software_url, cached_key,
try:
return nc.upload_generic(f, cached_key, **kw)
except (IOError, UploadError), e:
except (IOError, UploadError) as e:
logger.info('Failed to upload file. %s' % (str(e)))
return False
finally:
......
......@@ -38,7 +38,7 @@ import importlib
import traceback
import psutil
from multiprocessing import Process, Queue as MQueue
import Queue
from six.moves import queue, reload_module
from slapos.util import mkdir_p, chownDirectory
from slapos.grid.utils import dropPrivileges, killProcessTree
from slapos.grid.promise import interface
......@@ -168,7 +168,7 @@ class PromiseProcess(Process):
if not os.path.exists(init_file):
with open(init_file, 'w') as f:
f.write("")
os.chmod(init_file, 0644)
os.chmod(init_file, 0o644)
# add promise folder to sys.path so we can import promise script
if sys.path[0] != promise_folder:
sys.path[0:0] = [promise_folder]
......@@ -184,9 +184,8 @@ class PromiseProcess(Process):
raise AttributeError("Class RunPromise not found in promise" \
"%s" % self.name)
if not interface.IPromise.implementedBy(promise_module.RunPromise):
raise RuntimeError("RunPromise class in %s must implements 'IPromise'" \
" interface. zope_interface.implements(interface.IPromise) is" \
" missing ?" % self.name)
raise RuntimeError("RunPromise class in %s must implement 'IPromise'"
" interface. @implementer(interface.IPromise) is missing ?" % self.name)
from slapos.grid.promise.generic import GenericPromise
if not issubclass(promise_module.RunPromise, GenericPromise):
......@@ -195,7 +194,7 @@ class PromiseProcess(Process):
if promise_module.__file__ != self.promise_path:
# cached module need to be updated
promise_module = reload(promise_module)
promise_module = reload_module(promise_module)
# load extra parameters
self._loadPromiseParameterDict(promise_module)
......@@ -208,7 +207,7 @@ class PromiseProcess(Process):
if not isinstance(extra_dict, dict):
raise ValueError("Extra parameter is not a dict")
for key in extra_dict:
if self.argument_dict.has_key(key):
if key in self.argument_dict:
raise ValueError("Extra parameter name %r cannot be used.\n%s" % (
key, extra_dict))
self.argument_dict[key] = extra_dict[key]
......@@ -362,7 +361,7 @@ class PromiseLauncher(object):
try:
result = PromiseQueueResult()
result.load(json.loads(f.read()))
except ValueError, e:
except ValueError as e:
result = None
self.logger.warn('Bad promise JSON result at %r: %s' % (
promise_output_file,
......@@ -375,7 +374,7 @@ class PromiseLauncher(object):
while True:
try:
self.queue_result.get_nowait()
except Queue.Empty:
except queue.Empty:
return
def _updateFolderOwner(self, folder_path=None):
......@@ -443,7 +442,7 @@ class PromiseLauncher(object):
if not promise_process.is_alive():
try:
queue_item = self.queue_result.get(True, 1)
except Queue.Empty:
except queue.Empty:
# no result found in process result Queue
pass
else:
......
......@@ -35,8 +35,10 @@ import time
import random
import traceback
import slapos.slap
from slapos.util import mkdir_p
from slapos.util import bytes2str, mkdir_p
from abc import ABCMeta, abstractmethod
import six
from six import PY3, with_metaclass
from datetime import datetime, timedelta
PROMISE_STATE_FOLDER_NAME = '.slapgrid/promise'
......@@ -46,6 +48,10 @@ PROMISE_LOG_FOLDER_NAME = '.slapgrid/promise/log'
PROMISE_PARAMETER_NAME = 'extra_config_dict'
PROMISE_PERIOD_FILE_NAME = '%s.periodicity'
LOGLINE_RE = r"(\d{4}-\d{2}-\d{2}\s+\d{2}:\d{2}:\d{2})\s+\-?\s*(\w{4,7})\s+\-?\s+(\d+\-\d{3})\s+\-?\s*(.*)"
matchLogStr = re.compile(LOGLINE_RE).match
matchLogBytes = re.compile(LOGLINE_RE.encode()).match if PY3 else matchLogStr
class BaseResult(object):
def __init__(self, problem=False, message=None, date=None):
self.__problem = problem
......@@ -129,10 +135,7 @@ class PromiseQueueResult(object):
self.path = data['path']
self.execution_time = data['execution-time']
class GenericPromise(object):
# Abstract class
__metaclass__ = ABCMeta
class GenericPromise(with_metaclass(ABCMeta, object)):
def __init__(self, config):
self.__config = config
......@@ -160,9 +163,7 @@ class GenericPromise(object):
for handler in self.logger.handlers:
self.logger.removeHandler(handler)
if self.__log_folder is None:
# configure logger with StringIO
import cStringIO
self.__logger_buffer = cStringIO.StringIO()
self.__logger_buffer = six.StringIO()
logger_handler = logging.StreamHandler(self.__logger_buffer)
self.__log_file = None
else:
......@@ -230,9 +231,9 @@ class GenericPromise(object):
"""
Call bang if requested
"""
if self.__config.has_key('master-url') and \
self.__config.has_key('partition-id') and \
self.__config.has_key('computer-id'):
if 'master-url' in self.__config and \
'partition-in' in self.__config and \
'computer-id' in self.__config:
slap = slapos.slap.slap()
slap.initializeConnection(
......@@ -247,18 +248,14 @@ class GenericPromise(object):
computer_partition.bang(message)
self.logger.info("Bang with message %r." % message)
def __getLogRegex(self):
return re.compile(r"(\d{4}-\d{2}-\d{2}\s+\d{2}:\d{2}:\d{2})\s+\-?\s*(\w{4,7})\s+\-?\s+(\d+\-\d{3})\s+\-?\s*(.*)")
def __getResultFromString(self, result_string, only_failure=False):
line_list = result_string.split('\n')
result_list = []
line_part = ""
regex = self.__getLogRegex()
for line in line_list:
if not line:
continue
match = regex.match(line)
match = matchLogStr(line)
if match is not None:
if not only_failure or (only_failure and match.groups()[1] == 'ERROR'):
result_list.append({
......@@ -297,37 +294,36 @@ class GenericPromise(object):
if not os.path.exists(self.__log_file):
return []
regex = self.__getLogRegex()
max_date_string = ""
if latest_minute > 0:
date = datetime.now() - timedelta(minutes=latest_minute)
max_date_string = date.strftime('%Y-%m-%d %H:%M:%S')
else:
max_date_string = ""
line_list = []
result_list = []
transaction_id = None
transaction_count = 0
with open(self.__log_file, 'r') as f:
offset = 0
with open(self.__log_file, 'rb') as f:
f.seek(0, 2)
size = f.tell() * -1
line = line_part = ""
while offset > size:
offset = f.tell()
line = b""
line_part = ""
while offset:
offset -= 1
f.seek(offset, 2)
f.seek(offset)
char = f.read(1)
if char != '\n':
if char != b'\n':
line = char + line
if char == '\n' or offset == size:
# Add new line
if offset == -1:
if offset:
continue
if line != "":
result = regex.match(line)
if line:
result = matchLogBytes(line)
if result is not None:
if max_date_string and result.groups()[0] <= max_date_string:
date, level, tid, msg = map(bytes2str, result.groups())
if max_date_string and date <= max_date_string:
break
if transaction_id != result.groups()[2]:
if transaction_id != tid:
if transaction_id is not None:
# append new result
result_list.append(line_list)
......@@ -335,20 +331,18 @@ class GenericPromise(object):
transaction_count += 1
if transaction_count > result_count:
break
transaction_id = result.groups()[2]
if not only_failure or \
(only_failure and result.groups()[1] == 'ERROR'):
transaction_id = tid
if not only_failure or level == 'ERROR':
line_list.insert(0, {
'date': datetime.strptime(result.groups()[0],
'date': datetime.strptime(date,
'%Y-%m-%d %H:%M:%S'),
'status': result.groups()[1],
'message': (result.groups()[3] + line_part).strip(),
'status': level,
'message': (msg + line_part).strip(),
})
line_part = ""
else:
line_part = '\n' + line + line_part
line = ""
continue
line = line_part = ""
line_part = '\n' + bytes2str(line) + line_part
line = b""
if len(line_list):
result_list.append(line_list)
......@@ -410,7 +404,7 @@ class GenericPromise(object):
try:
self.__queue.put_nowait(result_item)
break
except Queue.Full, e:
except Queue.Full as e:
error = e
time.sleep(0.5)
if error:
......@@ -459,7 +453,7 @@ class GenericPromise(object):
"""
try:
self.sense()
except Exception, e:
except Exception as e:
# log the result
self.logger.error(str(e))
if check_anomaly:
......@@ -468,7 +462,7 @@ class GenericPromise(object):
result = self.anomaly()
if result is None:
raise ValueError("Promise anomaly method returned 'None'")
except Exception, e:
except Exception as e:
result = AnomalyResult(problem=True, message=str(e))
else:
if isinstance(result, AnomalyResult) and result.hasFailed() and can_bang:
......@@ -482,7 +476,7 @@ class GenericPromise(object):
result = self.test()
if result is None:
raise ValueError("Promise test method returned 'None'")
except Exception, e:
except Exception as e:
result = TestResult(problem=True, message=str(e))
if self.__logger_buffer is not None:
......
......@@ -32,17 +32,16 @@ import subprocess
import functools
import signal
import traceback
from zope import interface as zope_interface
from zope.interface import implementer
from slapos.grid.promise import interface
from slapos.grid.promise.generic import GenericPromise
@implementer(interface.IPromise)
class WrapPromise(GenericPromise):
"""
A wrapper promise used to run old promises style and bash promises
"""
zope_interface.implements(interface.IPromise)
def __init__(self, config):
GenericPromise.__init__(self, config)
self.setPeriodicity(minute=2)
......@@ -62,21 +61,19 @@ class WrapPromise(GenericPromise):
[self.getPromiseFile()],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=self.getPartitionFolder()
cwd=self.getPartitionFolder(),
universal_newlines=True,
)
handler = functools.partial(self.terminate, self.getName(), self.logger,
promise_process)
signal.signal(signal.SIGINT, handler)
signal.signal(signal.SIGTERM, handler)
output, error = promise_process.communicate()
message = output or ""
if error:
message += "\n" + error
if promise_process.returncode != 0:
self.logger.error(message.strip())
message = promise_process.communicate()[0].strip()
if promise_process.returncode:
self.logger.error(message)
else:
self.logger.info(message.strip())
self.logger.info(message)
def test(self):
# Fail if the latest promise result failed
......
......@@ -32,7 +32,7 @@ import os
import pkg_resources
import random
import socket
import StringIO
from io import BytesIO
import subprocess
import sys
import tempfile
......@@ -42,6 +42,7 @@ import warnings
import logging
import json
import shutil
import six
if sys.version_info < (2, 6):
warnings.warn('Used python version (%s) is old and has problems with'
......@@ -167,7 +168,7 @@ def merged_options(args, configp):
if configp.has_section('networkcache'):
options.update(dict(configp.items('networkcache')))
for key, value in vars(args).iteritems():
for key, value in six.iteritems(vars(args)):
if value is not None:
options[key] = value
......@@ -672,7 +673,7 @@ stderr_logfile_backups=1
computer_partition.setComputerPartitionRelatedInstanceList(
[reference for reference in tf.read().split('\n') if reference]
)
except NotFoundError, e:
except NotFoundError as e:
# Master doesn't implement this feature ?
self.logger.warning("NotFoundError: %s. \nCannot send requested instance "\
"list to master. Please check if this feature is"\
......@@ -682,12 +683,12 @@ stderr_logfile_backups=1
"""
"""
query_cmd = rule_command.replace('--add-rule', '--query-rule')
process = FPopen(query_cmd)
process = FPopen(query_cmd, universal_newlines=True)
result, stderr = process.communicate()
if result.strip() == 'no':
# rule doesn't exist add to firewall
self.logger.debug(rule_command)
process = FPopen(rule_command)
process = FPopen(rule_command, universal_newlines=True)
rule_result, stderr = process.communicate()
if process.returncode == 0:
if rule_result.strip() != 'success':
......@@ -705,13 +706,13 @@ stderr_logfile_backups=1
"""
"""
query_cmd = rule_command.replace('--add-rule', '--query-rule')
process = FPopen(query_cmd)
process = FPopen(query_cmd, universal_newlines=True)
result, stderr = process.communicate()
if result.strip() == 'yes':
# The rule really exist, remove it
remove_command = rule_command.replace('--add-rule', '--remove-rule')
self.logger.debug(remove_command)
process = FPopen(remove_command)
process = FPopen(remove_command, universal_newlines=True)
rule_result, stderr = process.communicate()
if process.returncode == 0:
if rule_result.strip() != 'success':
......@@ -764,7 +765,7 @@ stderr_logfile_backups=1
# XXX - need to check firewalld reload instead of restart
self.logger.info("Reloading firewall configuration...")
reload_cmd = self.firewall_conf['reload_config_cmd']
reload_process = FPopen(reload_cmd)
reload_process = FPopen(reload_cmd, universal_newlines=True)
stdout, stderr = reload_process.communicate()
if reload_process.returncode != 0:
raise Exception("Failed to load firewalld rules with command %s.\n%" % (
......@@ -922,7 +923,7 @@ stderr_logfile_backups=1
self._checkPromiseList(local_partition,
check_anomaly=True,
force=False)
except PromiseError, e:
except PromiseError as e:
self.logger.error(e)
if partition_access_status is None or not status_error:
computer_partition.error(e, logger=self.logger)
......@@ -1140,7 +1141,7 @@ stderr_logfile_backups=1
(computer_partition_id, computer_partition_state)
computer_partition.error(error_string, logger=self.logger)
raise NotImplementedError(error_string)
except Exception, e:
except Exception as e:
if not isinstance(e, PromiseError):
with open(error_output_file, 'w') as error_file:
# Write error message in a log file assible to computer partition user
......@@ -1309,7 +1310,7 @@ stderr_logfile_backups=1
def validateXML(self, to_be_validated, xsd_model):
"""Validates a given xml file"""
#We retrieve the xsd model
xsd_model = StringIO.StringIO(xsd_model)
xsd_model = BytesIO(xsd_model)
xmlschema_doc = etree.parse(xsd_model)
xmlschema = etree.XMLSchema(xmlschema_doc)
......
......@@ -35,9 +35,10 @@ import subprocess
import stat
import sys
import time
import xmlrpclib
from six.moves import xmlrpc_client as xmlrpclib
from slapos.grid.utils import (createPrivateDirectory, SlapPopen, updateFile)
from slapos.util import bytes2str
from supervisor import xmlrpc, states
......@@ -89,8 +90,8 @@ def createSupervisordConfiguration(instance_root, watchdog_command=''):
# Creates supervisord configuration
updateFile(supervisord_configuration_file_path,
pkg_resources.resource_stream(__name__,
'templates/supervisord.conf.in').read() % {
bytes2str(pkg_resources.resource_string(__name__,
'templates/supervisord.conf.in')) % {
'supervisord_configuration_directory': supervisord_configuration_directory,
'supervisord_socket': os.path.abspath(supervisord_socket),
'supervisord_loglevel': 'info',
......
......@@ -40,6 +40,8 @@ import logging
import psutil
import time
import six
from slapos.grid.exception import BuildoutFailedError, WrongPermissionError
# Such umask by default will create paths with full permission
......@@ -123,20 +125,18 @@ class SlapPopen(subprocess.Popen):
self.stdin.close()
self.stdin = None
# XXX-Cedric: this algorithm looks overkill for simple logging.
output_lines = []
while True:
line = self.stdout.readline()
if line == '' and self.poll() is not None:
break
if line:
output_lines.append(line)
logger.info(line.rstrip('\n'))
for line in self.stdout:
if type(line) is not str:
line = line.decode(errors='replace')
output_lines.append(line)
logger.info(line.rstrip('\n'))
self.wait()
self.output = ''.join(output_lines)
def md5digest(url):
return hashlib.md5(url).hexdigest()
return hashlib.md5(url.encode('utf-8')).hexdigest()
def getCleanEnvironment(logger, home_path='/tmp'):
......@@ -150,7 +150,7 @@ def getCleanEnvironment(logger, home_path='/tmp'):
if old is not None:
removed_env.append(k)
changed_env['HOME'] = env['HOME'] = home_path
for k in sorted(changed_env.iterkeys()):
for k in sorted(six.iterkeys(changed_env)):
logger.debug('Overridden %s = %r' % (k, changed_env[k]))
if removed_env:
logger.debug('Removed from environment: %s' % ', '.join(sorted(removed_env)))
......@@ -351,18 +351,20 @@ def launchBuildout(path, buildout_binary, logger,
def updateFile(file_path, content, mode=0o600):
"""Creates or updates a file with "content" as content."""
altered = False
if not (os.path.isfile(file_path)) or \
not (hashlib.md5(open(file_path).read()).digest() ==
hashlib.md5(content).digest()):
with open(file_path, 'w') as fout:
fout.write(content)
altered = True
os.chmod(file_path, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
if stat.S_IMODE(os.stat(file_path).st_mode) != mode:
os.chmod(file_path, mode)
altered = True
return altered
content = content.encode('utf-8')
try:
with open(file_path, 'rb') as f:
if f.read(len(content) + 1) == content:
if stat.S_IMODE(os.fstat(f.fileno()).st_mode) == mode:
return False
os.fchmod(f.fileno(), mode)
return True
except IOError:
pass
with open(file_path, 'wb') as f:
os.fchmod(f.fileno(), mode)
f.write(content)
return True
def updateExecutable(executable_path, content):
......@@ -399,7 +401,7 @@ def killProcessTree(pid, logger):
for child in running_process_list:
try:
child.suspend()
except psutil.Error, e:
except psutil.Error as e:
logger.debug(str(e))
time.sleep(0.2)
......@@ -408,5 +410,5 @@ def killProcessTree(pid, logger):
for process in process_list:
try:
process.kill()
except psutil.Error, e:
except psutil.Error as e:
logger.debug("Process kill: %s" % e)
......@@ -30,6 +30,7 @@
import argparse
import os.path
import sys
import six
import slapos.slap.slap
from slapos.grid.slapgrid import COMPUTER_PARTITION_TIMESTAMP_FILENAME, \
......@@ -56,7 +57,7 @@ def parseArgumentTuple():
# Build option_dict
option_dict = {}
for argument_key, argument_value in vars(option).iteritems():
for argument_key, argument_value in six.iteritems(vars(option)):
option_dict.update({argument_key: argument_value})
return option_dict
......
......@@ -5,12 +5,12 @@ import os.path
import pwd
import time
from zope import interface as zope_interface
from zope.interface import implementer
from slapos.manager import interface
logger = logging.getLogger(__name__)
@implementer(interface.IManager)
class Manager(object):
"""Manage cgroup's cpuset in terms on initializing and runtime operations.
......@@ -21,8 +21,6 @@ class Manager(object):
TODO: there is no limit on number of reserved cores per user.
"""
zope_interface.implements(interface.IManager)
cpu_exclusive_file = ".slapos-cpu-exclusive"
cpuset_path = "/sys/fs/cgroup/cpuset/"
task_write_mode = "wt"
......
......@@ -5,7 +5,6 @@ import os
import pwd
import grp
from .interface import IManager
from itertools import ifilter
from zope import interface
logger = logging.getLogger(__name__)
......
......@@ -5,8 +5,8 @@ import netaddr
import os
from .interface import IManager
from itertools import ifilter
from zope import interface
from six.moves import filter
from zope.interface import implementer
logger = logging.getLogger(__name__)
......@@ -23,9 +23,8 @@ def which(exename):
return full_path
return None
@implementer(IManager)
class Manager(object):
interface.implements(IManager)
port_redirect_filename = '.slapos-port-redirect'
def __init__(self, config):
......@@ -89,7 +88,7 @@ class Manager(object):
'full_ip_list', [])
partition_ip_list = [tup[1] for tup in partition_ip_list]
partition_ipv6 = next(ifilter(lambda ip_addr: ':' in ip_addr,
partition_ipv6 = next(filter(lambda ip_addr: ':' in ip_addr,
partition_ip_list),
None)
......
......@@ -4,17 +4,16 @@ import os
import sys
import subprocess
from zope import interface as zope_interface
from zope.interface import implementer
from slapos.manager import interface
from slapos.grid.slapgrid import COMPUTER_PARTITION_WAIT_LIST_FILENAME
logger = logging.getLogger(__name__)
@implementer(interface.IManager)
class Manager(object):
"""Manager is called in every step of preparation of the computer."""
zope_interface.implements(interface.IManager)
def __init__(self, config):
"""Manager needs to know config for its functioning.
"""
......
......@@ -33,6 +33,8 @@ import logging
from slapos.proxy.views import app
from slapos.util import sqlite_connect
import six
def _generateSoftwareProductListFromString(software_product_list_string):
"""
Take a string as argument (which usually comes from the software_product_list
......@@ -72,7 +74,7 @@ class ProxyConfig(object):
elif section.startswith('multimaster/'):
# Merge multimaster configuration if any
# XXX: check for duplicate SR entries
for key, value in configuration_dict.iteritems():
for key, value in six.iteritems(configuration_dict):
if key == 'software_release_list':
# Split multi-lines values
configuration_dict[key] = [line.strip() for line in value.strip().split('\n')]
......
# -*- coding: utf-8 -*-
import pkg_resources
from slapos.util import bytes2str
DB_VERSION = pkg_resources.resource_stream('slapos.proxy', 'schema.sql').readline().strip().split(':')[1]
DB_VERSION = bytes2str(pkg_resources.resource_stream('slapos.proxy', 'schema.sql').readline()).strip().split(':')[1]
This diff is collapsed.
......@@ -30,4 +30,4 @@ if sys.version_info < (2, 6):
import warnings
warnings.warn('Used python version (%s) is old and has problems with'
' IPv6 connections' % '.'.join([str(q) for q in sys.version_info[:3]]))
from slap import *
from .slap import *
This diff is collapsed.
......@@ -29,23 +29,21 @@ import logging
import pprint
import unittest
import tempfile
import StringIO
import sys
import os
import sqlite3
import pkg_resources
from contextlib import contextmanager
from mock import patch, create_autospec
import mock
from slapos.util import sqlite_connect
from slapos.util import sqlite_connect, bytes2str
import slapos.cli.console
import slapos.cli.entry
import slapos.cli.info
import slapos.cli.list
import slapos.cli.supervisorctl
import slapos.cli.proxy_show
from slapos.cli.proxy_show import do_show, StringIO
from slapos.client import ClientConfig
import slapos.grid.svcbackend
import slapos.proxy
......@@ -99,9 +97,9 @@ class TestCliProxyShow(CliMixin):
self.conf.logger = self.logger
# load database
schema = pkg_resources.resource_stream('slapos.tests.slapproxy', 'database_dump_version_11.sql')
schema = bytes2str(pkg_resources.resource_string('slapos.tests.slapproxy', 'database_dump_version_11.sql'))
db = sqlite_connect(self.db_file.name)
db.cursor().executescript(schema.read())
db.cursor().executescript(schema)
db.commit()
# by default we simulate being invoked with "show all" arguments
......@@ -121,7 +119,7 @@ class TestCliProxyShow(CliMixin):
with mock.patch(
'slapos.cli.proxy_show.logging.getLogger',
return_value=logger):
slapos.cli.proxy_show.do_show(self.conf)
do_show(self.conf)
# installed softwares are listed
logger.info.assert_any_call(
......@@ -132,7 +130,7 @@ class TestCliProxyShow(CliMixin):
logger.info.assert_any_call(
' %s = %s',
'_',
'{\n "url": "memcached://10.0.30.235:2003/", \n "monitor-base-url": ""\n}')
'{\n "monitor-base-url": "",\n "url": "memcached://10.0.30.235:2003/"\n}')
# other parameters are displayed as simple string
logger.info.assert_any_call(
......@@ -152,10 +150,10 @@ class TestCliProxyShow(CliMixin):
def test_proxy_show_displays_on_stdout(self):
saved_stderr = sys.stderr
saved_stdout = sys.stdout
sys.stderr = stderr = StringIO.StringIO()
sys.stdout = stdout = StringIO.StringIO()
sys.stderr = stderr = StringIO()
sys.stdout = stdout = StringIO()
try:
slapos.cli.proxy_show.do_show(self.conf)
do_show(self.conf)
finally:
sys.stderr = saved_stderr
sys.stdout = saved_stdout
......@@ -169,8 +167,8 @@ class TestCliProxyShow(CliMixin):
def test_proxy_show_use_pager(self):
saved_stderr = sys.stderr
saved_stdout = sys.stdout
sys.stderr = stderr = StringIO.StringIO()
sys.stdout = stdout = StringIO.StringIO()
sys.stderr = stderr = StringIO()
sys.stdout = stdout = StringIO()
stdout.isatty = lambda *args: True
# use a pager that just output to a file.
......@@ -179,7 +177,7 @@ class TestCliProxyShow(CliMixin):
os.environ['PAGER'] = 'cat > {}'.format(tmp.name)
try:
slapos.cli.proxy_show.do_show(self.conf)
do_show(self.conf)
finally:
sys.stderr = saved_stderr
sys.stdout = saved_stdout
......@@ -327,53 +325,34 @@ class TestCliSupervisorctl(CliMixin):
class TestCliConsole(unittest.TestCase):
def setUp(self):
cp = slapos.slap.ComputerPartition('computer_id', 'partition_id')
cp._parameter_dict = {'parameter_name': 'parameter_value'}
request_patch = patch.object(slapos.slap.OpenOrder, 'request', return_value = cp)
self.mock_request = request_patch.start()
script = """\
print(request('software_release', 'instance').getInstanceParameterDict()['parameter_name'])
"""
self.config_file = tempfile.NamedTemporaryFile()
self.config_file.write('''[slapos]
master_url=null
''')
self.config_file.flush()
def tearDown(self):
self.mock_request.stop()
self.config_file.close()
@contextmanager
def _test_console(self):
cp = slapos.slap.ComputerPartition('computer_id', 'partition_id')
cp._parameter_dict = {'parameter_name': 'parameter_value'}
with patch.object(slapos.slap.OpenOrder, 'request',
return_value = cp) as mock_request, \
patch.object(sys, 'stdout', StringIO()) as app_stdout, \
tempfile.NamedTemporaryFile() as config_file:
config_file.write(b'[slapos]\nmaster_url=null\n')
config_file.flush()
yield slapos.cli.entry.SlapOSApp(), config_file.name
mock_request.assert_called_once_with(
'software_release', 'instance')
self.assertIn('parameter_value', app_stdout.getvalue())
def test_console_interactive(self):
app = slapos.cli.entry.SlapOSApp()
saved_stdin = sys.stdin
saved_stdout = sys.stdout
try:
sys.stdin = app_stdin = StringIO.StringIO(
"""print request('software_release', 'instance').getInstanceParameterDict()['parameter_name']\n""")
sys.stdout = app_stdout = StringIO.StringIO()
app.run(('console', '--cfg', self.config_file.name))
finally:
sys.stdin = saved_stdin
sys.stdout = saved_stdout
self.mock_request.assert_called_once_with('software_release', 'instance')
self.assertIn('parameter_value', app_stdout.getvalue())
with self._test_console() as (app, config_file), \
patch.object(sys, 'stdin', StringIO(self.script)):
app.run(('console', '--cfg', config_file))
def test_console_script(self):
with tempfile.NamedTemporaryFile() as script:
script.write(
"""print request('software_release', 'instance').getInstanceParameterDict()['parameter_name']\n""")
with self._test_console() as (app, config_file), \
tempfile.NamedTemporaryFile('w') as script:
script.write(self.script)
script.flush()
app = slapos.cli.entry.SlapOSApp()
saved_stdout = sys.stdout
try:
sys.stdout = app_stdout = StringIO.StringIO()
app.run(('console', '--cfg', self.config_file.name, script.name))
finally:
sys.stdout = saved_stdout
self.mock_request.assert_called_once_with('software_release', 'instance')
self.assertIn('parameter_value', app_stdout.getvalue())
app.run(('console', '--cfg', config_file, script.name))
......@@ -36,7 +36,7 @@ import psutil
from time import strftime
from slapos.collect import entity, snapshot, db, reporter
from slapos.cli.entry import SlapOSApp
from ConfigParser import ConfigParser
from six.moves.configparser import ConfigParser
class FakeDatabase(object):
def __init__(self):
......@@ -364,7 +364,7 @@ class TestCollectReport(unittest.TestCase):
with tarfile.open("%s.tar.gz" % dump_folder) as tf:
self.assertEqual(tf.getmembers()[0].name, "1990-01-01")
self.assertEqual(tf.getmembers()[1].name, "1990-01-01/test.txt")
self.assertEqual(tf.extractfile(tf.getmembers()[1]).read(), 'hi')
self.assertEqual(tf.extractfile(tf.getmembers()[1]).read(), b'hi')
class TestCollectSnapshot(unittest.TestCase):
......@@ -482,10 +482,10 @@ class TestCollectEntity(unittest.TestCase):
config.set('slapos', 'instance_root', self.instance_root)
user_dict = entity.get_user_list(config)
username_list = ['slapuser0', 'slapuser1', 'slapuser2']
self.assertEqual(username_list, user_dict.keys())
username_set = {'slapuser0', 'slapuser1', 'slapuser2'}
self.assertEquals(username_set, set(user_dict))
for name in username_list:
for name in username_set:
self.assertEqual(user_dict[name].name, name)
self.assertEqual(user_dict[name].snapshot_list, [])
expected_path = "%s/slappart%s" % (self.instance_root, name.strip("slapuser"))
......@@ -508,11 +508,11 @@ class TestCollectEntity(unittest.TestCase):
self.assertEqual(database.invoked_method_list[1][0], "insertUserSnapshot")
self.assertEqual(database.invoked_method_list[1][1][0], ("fakeuser0",))
self.assertEqual(database.invoked_method_list[1][1][1].keys(),
['cpu_time', 'cpu_percent', 'process',
self.assertEqual(set(database.invoked_method_list[1][1][1]),
{'cpu_time', 'cpu_percent', 'process',
'memory_rss', 'pid', 'memory_percent',
'io_rw_counter', 'insertion_date', 'insertion_time',
'io_cycles_counter', 'cpu_num_threads'])
'io_cycles_counter', 'cpu_num_threads'})
self.assertEqual(database.invoked_method_list[2], ("commit", ""))
self.assertEqual(database.invoked_method_list[3], ("close", ""))
......@@ -527,19 +527,19 @@ class TestCollectEntity(unittest.TestCase):
self.assertEqual(database.invoked_method_list[1][0], "insertUserSnapshot")
self.assertEqual(database.invoked_method_list[1][1][0], ("fakeuser0",))
self.assertEqual(database.invoked_method_list[1][1][1].keys(),
['cpu_time', 'cpu_percent', 'process',
self.assertEqual(set(database.invoked_method_list[1][1][1]),
{'cpu_time', 'cpu_percent', 'process',
'memory_rss', 'pid', 'memory_percent',
'io_rw_counter', 'insertion_date', 'insertion_time',
'io_cycles_counter', 'cpu_num_threads'])
self.assertEqual(database.invoked_method_list[2], ("commit", ""))
self.assertEqual(database.invoked_method_list[3], ("close", ""))
'io_cycles_counter', 'cpu_num_threads'})
self.assertEquals(database.invoked_method_list[2], ("commit", ""))
self.assertEquals(database.invoked_method_list[3], ("close", ""))
self.assertEqual(database.invoked_method_list[4], ("connect", ""))
self.assertEqual(database.invoked_method_list[5][0], "inserFolderSnapshot")
self.assertEqual(database.invoked_method_list[5][1][0], ("fakeuser0",))
self.assertEqual(database.invoked_method_list[5][1][1].keys(),
['insertion_date', 'disk_usage', 'insertion_time'])
self.assertEqual(set(database.invoked_method_list[5][1][1]),
{'insertion_date', 'disk_usage', 'insertion_time'})
self.assertEqual(database.invoked_method_list[6], ("commit", ""))
self.assertEqual(database.invoked_method_list[7], ("close", ""))
......@@ -554,23 +554,23 @@ class TestCollectEntity(unittest.TestCase):
self.assertEqual(database.invoked_method_list[1][0], "insertUserSnapshot")
self.assertEqual(database.invoked_method_list[1][1][0], ("fakeuser0",))
self.assertEqual(database.invoked_method_list[1][1][1].keys(),
['cpu_time', 'cpu_percent', 'process',
self.assertEqual(set(database.invoked_method_list[1][1][1]),
{'cpu_time', 'cpu_percent', 'process',
'memory_rss', 'pid', 'memory_percent',
'io_rw_counter', 'insertion_date', 'insertion_time',
'io_cycles_counter', 'cpu_num_threads'])
'io_cycles_counter', 'cpu_num_threads'})
self.assertEqual(database.invoked_method_list[2], ("commit", ""))
self.assertEqual(database.invoked_method_list[3], ("close", ""))
self.assertEqual(database.invoked_method_list[4], ("connect", ""))
self.assertEqual(database.invoked_method_list[5][0], "select")
self.assertEqual(database.invoked_method_list[5][1][0], ())
self.assertEqual(database.invoked_method_list[5][1][1].keys(),
['table', 'where', 'limit', 'order', 'columns'])
self.assertEqual(set(database.invoked_method_list[5][1][1]),
{'table', 'where', 'limit', 'order', 'columns'})
self.assertEqual(database.invoked_method_list[6][0], "inserFolderSnapshot")
self.assertEqual(database.invoked_method_list[6][1][0], ("fakeuser0",))
self.assertEqual(database.invoked_method_list[6][1][1].keys(),
['insertion_date', 'disk_usage', 'insertion_time'])
self.assertEqual(set(database.invoked_method_list[6][1][1]),
{'insertion_date', 'disk_usage', 'insertion_time'})
self.assertEqual(database.invoked_method_list[7], ("commit", ""))
self.assertEqual(database.invoked_method_list[8], ("close", ""))
......@@ -583,14 +583,14 @@ class TestCollectEntity(unittest.TestCase):
self.assertEqual(database.invoked_method_list[1][0], "insertComputerSnapshot")
self.assertEqual(database.invoked_method_list[1][1][0], ())
self.assertEqual(database.invoked_method_list[1][1][1].keys(),
['insertion_time', 'insertion_date', 'cpu_num_core',
self.assertEqual(set(database.invoked_method_list[1][1][1]),
{'insertion_time', 'insertion_date', 'cpu_num_core',
'partition_list', 'cpu_frequency', 'memory_size',
'cpu_type', 'memory_type'])
'cpu_type', 'memory_type'})
self.assertEqual(database.invoked_method_list[2][0], "insertSystemSnapshot")
self.assertEqual(database.invoked_method_list[2][1][0], ())
self.assertEqual(set(database.invoked_method_list[2][1][1].keys()),
self.assertEqual(set(database.invoked_method_list[2][1][1]),
set([ 'memory_used', 'cpu_percent', 'insertion_date', 'insertion_time',
'loadavg', 'memory_free', 'net_in_bytes', 'net_in_dropped',
'net_in_errors', 'net_out_bytes', 'net_out_dropped',
......@@ -598,7 +598,7 @@ class TestCollectEntity(unittest.TestCase):
self.assertEqual(database.invoked_method_list[3][0], "insertDiskPartitionSnapshot")
self.assertEqual(database.invoked_method_list[3][1][0], ())
self.assertEqual(set(database.invoked_method_list[3][1][1].keys()),
self.assertEqual(set(database.invoked_method_list[3][1][1]),
set([ 'used', 'insertion_date', 'partition', 'free',
'mountpoint', 'insertion_time' ]))
......
......@@ -34,7 +34,7 @@ import slapos.cli.configure_local
from slapos.cli.configure_local import ConfigureLocalCommand, _createConfigurationDirectory
from slapos.cli.entry import SlapOSApp
from argparse import Namespace
from ConfigParser import ConfigParser
from six.moves.configparser import ConfigParser
# Disable any command to launch slapformat and supervisor
slapos.cli.configure_local._runFormat = lambda x: "Do nothing"
......
......@@ -47,7 +47,7 @@ class SlapPopenTestCase(unittest.TestCase):
def test_exec(self):
"""Test command execution with SlapPopen.
"""
self.script.write('#!/bin/sh\necho "hello"\nexit 123')
self.script.write(b'#!/bin/sh\necho "hello"\nexit 123')
self.script.close()
logger = mock.MagicMock()
......@@ -65,7 +65,7 @@ class SlapPopenTestCase(unittest.TestCase):
def test_debug(self):
"""Test debug=True, which keeps interactive.
"""
self.script.write('#!/bin/sh\necho "exit code?"\nread rc\nexit $rc')
self.script.write(b'#!/bin/sh\necho "exit code?"\nread rc\nexit $rc')
self.script.close()
# keep a reference to stdin and stdout to restore them later
......@@ -74,7 +74,7 @@ class SlapPopenTestCase(unittest.TestCase):
# replace stdin with a pipe that will write 123
child_stdin_r, child_stdin_w = os.pipe()
os.write(child_stdin_w, "123")
os.write(child_stdin_w, b"123")
os.close(child_stdin_w)
os.dup2(child_stdin_r, sys.stdin.fileno())
......@@ -88,7 +88,7 @@ class SlapPopenTestCase(unittest.TestCase):
debug=True,
logger=logging.getLogger())
# program output
self.assertEqual('exit code?\n', os.read(child_stdout_r, 1024))
self.assertEqual(b'exit code?\n', os.read(child_stdout_r, 1024))
self.assertEqual(123, program.returncode)
self.assertEqual('(output not captured in debug mode)', program.output)
......
......@@ -28,17 +28,15 @@ import unittest
from zope.interface.verify import verifyClass
import zope.interface
import types
from six import class_types
from slapos import slap
def getOnlyImplementationAssertionMethod(klass, method_list):
"""Returns method which verifies if a klass only implements its interfaces"""
def testMethod(self):
implemented_method_list = [x for x in dir(klass) \
if ((not x.startswith('_')) and callable(getattr(klass, x)))]
for interface_method in method_list:
if interface_method in implemented_method_list:
implemented_method_list.remove(interface_method)
implemented_method_list = {x for x in dir(klass)
if not x.startswith('_') and callable(getattr(klass, x))}
implemented_method_list.difference_update(method_list)
if implemented_method_list:
raise AssertionError("Unexpected methods %s" % implemented_method_list)
......@@ -61,7 +59,7 @@ def generateTestMethodListOnClass(klass, module):
"""Generate test method on klass"""
for class_id in dir(module):
implementing_class = getattr(module, class_id)
if type(implementing_class) not in (types.ClassType, types.TypeType):
if not isinstance(implementing_class, class_types):
continue
# add methods to assert that publicly available classes are defining
# interfaces
......@@ -69,7 +67,7 @@ def generateTestMethodListOnClass(klass, module):
setattr(klass, method_name, getDeclarationAssertionMethod(
implementing_class))
implemented_method_list = []
implemented_method_list = ['with_traceback']
for interface in list(zope.interface.implementedBy(implementing_class)):
# for each interface which class declares add a method which verify
# implementation
......
This diff is collapsed.
This diff is collapsed.
......@@ -26,6 +26,8 @@
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
##############################################################################
from __future__ import print_function
import glob
import logging
import slapos.format
......@@ -49,6 +51,8 @@ import mock
from .slapgrid import DummyManager
import six
USER_LIST = []
GROUP_LIST = []
INTERFACE_DICT = {}
......@@ -89,7 +93,7 @@ class FakeCallAndRead:
retval = 0, 'UP'
global INTERFACE_DICT
if 'useradd' in argument_list:
print argument_list
print(argument_list)
global USER_LIST
username = argument_list[-1]
if username == '-r':
......@@ -130,7 +134,7 @@ class LoggableWrapper:
def __call__(self, *args, **kwargs):
arg_list = [repr(x) for x in args] + [
'%s=%r' % (x, y) for x, y in kwargs.iteritems()]
'%s=%r' % (x, y) for x, y in six.iteritems(kwargs)]
self.__logger.debug('%s(%s)' % (self.__name, ', '.join(arg_list)))
......@@ -197,6 +201,8 @@ class SlapformatMixin(unittest.TestCase):
self.netifaces = NetifacesMock()
self.saved_netifaces = {}
for fake in vars(NetifacesMock):
if fake.startswith("__"):
continue
self.saved_netifaces[fake] = getattr(netifaces, fake, None)
setattr(netifaces, fake, getattr(self.netifaces, fake))
......@@ -208,6 +214,8 @@ class SlapformatMixin(unittest.TestCase):
def patchPwd(self):
self.saved_pwd = {}
for fake in vars(PwdMock):
if fake.startswith("__"):
continue
self.saved_pwd[fake] = getattr(pwd, fake, None)
setattr(pwd, fake, getattr(PwdMock, fake))
......@@ -219,6 +227,8 @@ class SlapformatMixin(unittest.TestCase):
def patchTime(self):
self.saved_time = {}
for fake in vars(TimeMock):
if fake.startswith("__"):
continue
self.saved_time[fake] = getattr(time, fake, None)
setattr(time, fake, getattr(TimeMock, fake))
......@@ -230,6 +240,8 @@ class SlapformatMixin(unittest.TestCase):
def patchGrp(self):
self.saved_grp = {}
for fake in vars(GrpMock):
if fake.startswith("__"):
continue
self.saved_grp[fake] = getattr(grp, fake, None)
setattr(grp, fake, getattr(GrpMock, fake))
......
This diff is collapsed.
This diff is collapsed.
......@@ -65,9 +65,9 @@ class TestUtil(unittest.TestCase):
wanted_directory0 = os.path.join(root_slaptest, 'slap-write0')
wanted_directory1 = os.path.join(root_slaptest, 'slap-write0', 'write-slap1')
wanted_directory2 = os.path.join(root_slaptest, 'slap-write0', 'write-slap1', 'write-teste2')
wanted_directory_mkdir0 = os.makedirs(wanted_directory0, mode=0777)
wanted_directory_mkdir1 = os.makedirs(wanted_directory1, mode=0777)
wanted_directory_mkdir2 = os.makedirs(wanted_directory2, mode=0777)
wanted_directory_mkdir0 = os.makedirs(wanted_directory0, mode=0o777)
wanted_directory_mkdir1 = os.makedirs(wanted_directory1, mode=0o777)
wanted_directory_mkdir2 = os.makedirs(wanted_directory2, mode=0o777)
create_file_txt = tempfile.mkstemp(suffix='.txt', prefix='tmp', dir=wanted_directory2, text=True)
user = 'nobody'
try:
......@@ -109,23 +109,14 @@ class TestUtil(unittest.TestCase):
shutil.rmtree(root_slaptest)
def test_string_to_boolean_with_true_values(self):
"""
Check that mkdir_p doesn't raise if directory already exist.
"""
for value in ['true', 'True', 'TRUE']:
self.assertTrue(string_to_boolean(value))
def test_string_to_boolean_with_false_values(self):
"""
Check that mkdir_p doesn't raise if directory already exist.
"""
for value in ['false', 'False', 'False']:
self.assertFalse(string_to_boolean(value))
def test_string_to_boolean_with_incorrect_values(self):
"""
Check that mkdir_p doesn't raise if directory already exist.
"""
for value in [True, False, 1, '1', 't', 'tru', 'truelle', 'f', 'fals', 'falsey']:
self.assertRaises(ValueError, string_to_boolean, value)
......
......@@ -33,6 +33,7 @@ import socket
import struct
import subprocess
import sqlite3
from xml_marshaller.xml_marshaller import dumps, loads
def mkdir_p(path, mode=0o700):
......@@ -86,16 +87,9 @@ def string_to_boolean(string):
The parser is completely arbitrary, see code for actual implementation.
"""
if not isinstance(string, str) and not isinstance(string, unicode):
raise ValueError('Given value is not a string.')
acceptable_true_values = ['true']
acceptable_false_values = ['false']
string = string.lower()
if string in acceptable_true_values:
return True
if string in acceptable_false_values:
return False
else:
try:
return ('false', 'true').index(string.lower())
except Exception:
raise ValueError('%s is neither True nor False.' % string)
......@@ -138,3 +132,15 @@ def ipv6FromBin(ip, suffix=''):
def lenNetmaskIpv6(netmask):
return len(binFromIpv6(netmask).rstrip('0'))
# Used for Python 2-3 compatibility
if str is bytes:
bytes2str = str2bytes = lambda s: s
def unicode2str(s):
return s.encode('utf-8')
else:
def bytes2str(s):
return s.decode()
def str2bytes(s):
return s.encode()
def unicode2str(s):
return s
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment