Commit e40edbd7 authored by bescoto's avatar bescoto

Final (?) too-long-filename bug fix


git-svn-id: http://svn.savannah.nongnu.org/svn/rdiff-backup@687 2b77aa54-bcbc-44c9-a7ec-4f6cf2b41109
parent 078eb1d6
......@@ -9,6 +9,9 @@ connection is lost.
When removing older than, delete empty increments directories
Long filename bug finally fixed (phew). rdiff-backup should now
correctly mirror any file that it can read.
New in v1.1.2 (2005/11/06)
--------------------------
......
For comparing, check source filesystem's abilities
Make sure remove older than doesn't delete current snapshot. When no
increments found, don't act like there's an error (no stderr).
Port close file fix to devel version
Regress needs to fix alternate names also
Clean up connection dropped message
See if rorpiter.CacheIndexable is really necessary
For comparing, check source filesystem's abilities
Clean up compare reports
......
......@@ -111,8 +111,8 @@ def MakeTar():
"FilenameMapping.py", "fs_abilities.py",
"Hardlink.py", "hash.py", "increment.py", "__init__.py",
"iterfile.py", "lazy.py", "librsync.py",
"log.py", "Main.py", "manage.py", "metadata.py",
"Rdiff.py", "regress.py", "restore.py",
"log.py", "longname.py", "Main.py", "manage.py",
"metadata.py", "Rdiff.py", "regress.py", "restore.py",
"robust.py", "rorpiter.py", "rpath.py",
"Security.py", "selection.py",
"SetConnections.py", "static.py",
......
......@@ -961,15 +961,6 @@ automatically (as from
or similar) it is probably a good idea to check the exit code.
.SH BUGS
rdiff-backup uses the shell command
.BR mknod (1)
to backup device files (e.g. /dev/ttyS0), so device files won't be
handled correctly on systems with non-standard mknod syntax.
.PP
Files whose names are close to the maximum length (e.g. 235 chars if
the maximum is 255) may be skipped because the filenames of related
increment files would be too long.
.PP
The gzip library in versions 2.2 and earlier of python (but fixed in
2.3a1) has trouble producing files over 2GB in length. This bug will
prevent rdiff-backup from producing large compressed increments
......
......@@ -32,8 +32,6 @@ handle that error.)
import re, types
import Globals, log, rpath
max_filename_length = 255
# If true, enable character quoting, and set characters making
# regex-style range.
chars_to_quote = None
......@@ -114,8 +112,8 @@ class QuotedRPath(rpath.RPath):
"""
def __init__(self, connection, base, index = (), data = None):
"""Make new QuotedRPath"""
quoted_index = tuple(map(quote, index))
rpath.RPath.__init__(self, connection, base, quoted_index, data)
self.quoted_index = tuple(map(quote, index))
rpath.RPath.__init__(self, connection, base, self.quoted_index, data)
self.index = index
def listdir(self):
......
......@@ -221,7 +221,6 @@ never_drop_acls = None
# prevent highbit permissions on systems which don't support them.)
permission_mask = 07777
def get(name):
"""Return the value of something in this module"""
return globals()[name]
......
......@@ -74,10 +74,10 @@ def parse_cmdlineoptions(arglist):
"include-globbing-filelist-stdin", "include-regexp=",
"include-special-files", "include-symbolic-links",
"list-at-time=", "list-changed-since=", "list-increments",
"list-increment-sizes", "never-drop-acls", "no-acls",
"no-carbonfile", "no-compare-inode", "no-compression",
"no-compression-regexp=", "no-eas", "no-file-statistics",
"no-hard-links", "null-separator",
"list-increment-sizes", "never-drop-acls",
"no-acls", "no-carbonfile",
"no-compare-inode", "no-compression", "no-compression-regexp=",
"no-eas", "no-file-statistics", "no-hard-links", "null-separator",
"override-chars-to-quote=", "parsable-output",
"preserve-numerical-ids", "print-statistics",
"remote-cmd=", "remote-schema=",
......
......@@ -23,7 +23,7 @@ from __future__ import generators
import errno
import Globals, metadata, rorpiter, TempFile, Hardlink, robust, increment, \
rpath, static, log, selection, Time, Rdiff, statistics, iterfile, \
hash
hash, longname
def Mirror(src_rpath, dest_rpath):
"""Turn dest_rpath into a copy of src_rpath"""
......@@ -189,7 +189,8 @@ class DestinationStruct:
elif dest_rorp:
dest_sig = dest_rorp.getRORPath()
if dest_rorp.isreg():
sig_fp = cls.get_one_sig_fp(dest_base_rpath.new_index(index))
dest_rp = longname.get_mirror_rp(dest_base_rpath, dest_rorp)
sig_fp = cls.get_one_sig_fp(dest_rp)
if sig_fp is None: return None
dest_sig.setfile(sig_fp)
else: dest_sig = rpath.RORPath(index)
......@@ -467,44 +468,26 @@ class PatchITRB(rorpiter.ITRBranch):
self.statfileobj = (statistics.get_active_statfileobj() or
statistics.StatFileObj())
self.dir_replacement, self.dir_update = None, None
self.cached_rp = None
self.CCPP = CCPP
self.error_handler = robust.get_error_handler("UpdateError")
def get_rp_from_root(self, index):
"""Return RPath by adding index to self.basis_root_rp"""
if not self.cached_rp or self.cached_rp.index != index:
self.cached_rp = self.basis_root_rp.new_index(index)
return self.cached_rp
def check_long_name(self, func, *args):
"""Execute function, checking for ENAMETOOLONG error"""
try: result = func(*args)
except OSError, exc:
if (errno.errorcode.has_key(exc[0]) and
errno.errorcode[exc[0]] == 'ENAMETOOLONG'):
self.error_handler(exc, args[0])
return None
else: raise
return result
def can_fast_process(self, index, diff_rorp):
"""True if diff_rorp and mirror are not directories"""
rp = self.check_long_name(self.get_rp_from_root, index)
# filename too long error qualifies (hack)
return not rp or (not diff_rorp.isdir() and not rp.isdir())
mirror_rorp = self.CCPP.get_mirror_rorp(index)
return not (diff_rorp.isdir() or (mirror_rorp and mirror_rorp.isdir()))
def fast_process(self, index, diff_rorp):
"""Patch base_rp with diff_rorp (case where neither is directory)"""
rp = self.check_long_name(self.get_rp_from_root, index)
if not rp: return
tf = TempFile.new(rp)
if self.patch_to_temp(rp, diff_rorp, tf):
mirror_rp, discard = longname.get_mirror_inc_rps(
self.CCPP.get_rorps(index), self.basis_root_rp)
assert not mirror_rp.isdir(), mirror_rp
tf = TempFile.new(mirror_rp)
if self.patch_to_temp(mirror_rp, diff_rorp, tf):
if tf.lstat():
rpath.rename(tf, rp)
rpath.rename(tf, mirror_rp)
self.CCPP.flag_success(index)
elif rp.lstat():
rp.delete()
elif mirror_rp and mirror_rp.lstat():
mirror_rp.delete()
self.CCPP.flag_deleted(index)
else:
tf.setdata()
......@@ -583,11 +566,12 @@ class PatchITRB(rorpiter.ITRBranch):
def start_process(self, index, diff_rorp):
"""Start processing directory - record information for later"""
base_rp = self.base_rp = self.get_rp_from_root(index)
assert diff_rorp.isdir() or base_rp.isdir() or not base_rp.index
if diff_rorp.isdir(): self.prepare_dir(diff_rorp, base_rp)
elif self.set_dir_replacement(diff_rorp, base_rp):
self.CCPP.flag_success(index)
self.base_rp, discard = longname.get_mirror_inc_rps(
self.CCPP.get_rorps(index), self.basis_root_rp)
if diff_rorp.isdir(): self.prepare_dir(diff_rorp, self.base_rp)
elif self.set_dir_replacement(diff_rorp, self.base_rp):
if diff_rorp.lstat(): self.CCPP.flag_success(index)
else: self.CCPP.flag_deleted(index)
def set_dir_replacement(self, diff_rorp, base_rp):
"""Set self.dir_replacement, which holds data until done with dir
......@@ -607,10 +591,11 @@ class PatchITRB(rorpiter.ITRBranch):
else: return 1
def prepare_dir(self, diff_rorp, base_rp):
"""Prepare base_rp to turn into a directory"""
"""Prepare base_rp to be a directory"""
self.dir_update = diff_rorp.getRORPath() # make copy in case changes
if not base_rp.isdir():
if base_rp.lstat(): base_rp.delete()
if base_rp.lstat(): self.base_rp.delete()
base_rp.setdata()
base_rp.mkdir()
self.CCPP.flag_success(diff_rorp.index)
else: # maybe no change, so query CCPP before tagging success
......@@ -622,8 +607,7 @@ class PatchITRB(rorpiter.ITRBranch):
if self.dir_update:
assert self.base_rp.isdir()
rpath.copy_attribs(self.dir_update, self.base_rp)
else:
assert self.dir_replacement
elif self.dir_replacement:
self.base_rp.rmdir()
if self.dir_replacement.lstat():
rpath.rename(self.dir_replacement, self.base_rp)
......@@ -637,32 +621,24 @@ class IncrementITRB(PatchITRB):
"""
def __init__(self, basis_root_rp, inc_root_rp, rorp_cache):
self.inc_root_rp = inc_root_rp
self.cached_incrp = None
PatchITRB.__init__(self, basis_root_rp, rorp_cache)
def get_incrp(self, index):
"""Return inc RPath by adding index to self.basis_root_rp"""
if not self.cached_incrp or self.cached_incrp.index != index:
self.cached_incrp = self.inc_root_rp.new_index(index)
return self.cached_incrp
def fast_process(self, index, diff_rorp):
"""Patch base_rp with diff_rorp and write increment (neither is dir)"""
rp = self.check_long_name(self.get_rp_from_root, index)
if not rp: return
tf = TempFile.new(rp)
if self.patch_to_temp(rp, diff_rorp, tf):
inc = self.check_long_name(increment.Increment,
tf, rp, self.get_incrp(index))
mirror_rp, inc_prefix = longname.get_mirror_inc_rps(
self.CCPP.get_rorps(index), self.basis_root_rp, self.inc_root_rp)
tf = TempFile.new(mirror_rp)
if self.patch_to_temp(mirror_rp, diff_rorp, tf):
inc = increment.Increment(tf, mirror_rp, inc_prefix)
if inc is not None:
self.CCPP.set_inc(index, inc)
if inc.isreg():
inc.fsync_with_dir() # Write inc before rp changed
if tf.lstat():
rpath.rename(tf, rp)
rpath.rename(tf, mirror_rp)
self.CCPP.flag_success(index)
elif rp.lstat():
rp.delete()
elif mirror_rp.lstat():
mirror_rp.delete()
self.CCPP.flag_deleted(index)
return # normal return, otherwise error occurred
tf.setdata()
......@@ -670,17 +646,19 @@ class IncrementITRB(PatchITRB):
def start_process(self, index, diff_rorp):
"""Start processing directory"""
base_rp = self.base_rp = self.get_rp_from_root(index)
assert diff_rorp.isdir() or base_rp.isdir()
self.base_rp, inc_prefix = longname.get_mirror_inc_rps(
self.CCPP.get_rorps(index), self.basis_root_rp, self.inc_root_rp)
self.base_rp.setdata()
assert diff_rorp.isdir() or self.base_rp.isdir()
if diff_rorp.isdir():
inc = self.check_long_name(increment.Increment,
diff_rorp, base_rp, self.get_incrp(index))
inc = increment.Increment(diff_rorp, self.base_rp, inc_prefix)
if inc and inc.isreg():
inc.fsync_with_dir() # must write inc before rp changed
self.prepare_dir(diff_rorp, base_rp)
elif self.set_dir_replacement(diff_rorp, base_rp):
inc = self.check_long_name(increment.Increment,
self.dir_replacement, base_rp, self.get_incrp(index))
self.base_rp.setdata() # in case written by increment above
self.prepare_dir(diff_rorp, self.base_rp)
elif self.set_dir_replacement(diff_rorp, self.base_rp):
inc = increment.Increment(self.dir_replacement, self.base_rp,
inc_prefix)
if inc:
self.CCPP.set_inc(index, inc)
self.CCPP.flag_success(index)
......
......@@ -34,7 +34,7 @@ def Increment(new, mirror, incpref):
"""
log.Log("Incrementing mirror file " + mirror.path, 5)
if ((new and new.isdir()) or mirror.isdir()) and not incpref.isdir():
if ((new and new.isdir()) or mirror.isdir()) and not incpref.lstat():
incpref.mkdir()
if not mirror.lstat(): incrp = makemissing(incpref)
......
# Copyright 2005 Ben Escoto
#
# This file is part of rdiff-backup.
#
# rdiff-backup is free software; you can redistribute it and/or modify
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version.
#
# rdiff-backup is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with rdiff-backup; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
"""Handle long filenames
rdiff-backup sometimes wants to write filenames longer than allowed by
the destination directory. This can happen in 3 ways:
1) Because the destination directory has a low maximum length limit.
2) When the source directory has a filename close to the limit, so
that its increments would be above the limit.
3) When quoting is enabled, so that even the mirror filenames are too
long.
When rdiff-backup would otherwise write a file whose name is too long,
instead it either skips the operation altogether (for non-regular
files), or writes the data to a unique file in the
rdiff-backup-data/long-filename directory. This file will have an
arbitrary basename, but if it's an increment the suffix will be the
same. The name will be recorded in the mirror_metadata so we can find
it later.
"""
import types, errno
import log, Globals, restore, rpath, FilenameMapping, regress
long_name_dir = "long_filename_data"
rootrp = None
def get_long_rp(base = None):
"""Return an rpath in long name directory with given base"""
global rootrp
if not rootrp:
rootrp = Globals.rbdir.append(long_name_dir)
if not rootrp.lstat(): rootrp.mkdir()
if base: return rootrp.append(base)
else: return rootrp
# ------------------------------------------------------------------
# These functions used mainly for backing up
# integer number of next free prefix. Names will be created from
# integers consecutively like '1', '2', and so on.
free_name_counter = None
# Filename which holds the next available free name in it
counter_filename = "next_free"
def get_next_free():
"""Return next free filename available in the long filename directory"""
global free_name_counter
def scan_next_free():
"""Return value of free_name_counter by listing long filename dir"""
log.Log("Setting next free from long filenames dir", 5)
cur_high = 0
for filename in get_long_rp().listdir():
try: i = int(filename.split('.')[0])
except ValueError: continue
if i > cur_high: cur_high = i
return cur_high + 1
def read_next_free():
"""Return next int free by reading the next_free file, or None"""
rp = get_long_rp(counter_filename)
if not rp.lstat(): return None
return int(rp.get_data())
def write_next_free(i):
"""Write value i into the counter file"""
rp = get_long_rp(counter_filename)
if rp.lstat(): rp.delete()
rp.write_string(str(free_name_counter))
rp.fsync_with_dir()
if not free_name_counter: free_name_counter = read_next_free()
if not free_name_counter: free_name_counter = scan_next_free()
filename = str(free_name_counter)
rp = get_long_rp(filename)
assert not rp.lstat(), "Unexpected file at %s found" % (rp.path,)
free_name_counter += 1
write_next_free(free_name_counter)
return filename
def check_new_index(base, index, make_dirs = 0):
"""Return new rpath with given index, or None if that is too long
If make_dir is True, make any parent directories to assure that
file is really too long, and not just in directories that don't exist.
"""
def wrap_call(func, *args):
try: result = func(*args)
except EnvironmentError, exc:
if (errno.errorcode.has_key(exc[0]) and
errno.errorcode[exc[0]] == 'ENAMETOOLONG'):
return None
raise
return result
def make_parent(rp):
parent = rp.get_parent_rp()
if parent.lstat(): return 1
parent.makedirs()
return 2
rp = wrap_call(base.new_index, index)
if not make_dirs or not rp or rp.lstat(): return rp
parent_result = wrap_call(make_parent, rp)
if not parent_result: return None
elif parent_result == 1: return rp
else: return wrap_call(base.new_index, index)
def get_mirror_rp(mirror_base, mirror_rorp):
"""Get the mirror_rp for reading a regular file
This will just be in the mirror_base, unless rorp has an alt
mirror name specified. Use new_rorp, unless it is None or empty,
and mirror_rorp exists.
"""
if mirror_rorp.has_alt_mirror_name():
return get_long_rp(mirror_rorp.get_alt_mirror_name())
else:
rp = check_new_index(mirror_base, mirror_rorp.index)
if rp: return rp
else: return mirror_base.new_index_empty(index)
def get_mirror_inc_rps(rorp_pair, mirror_root, inc_root = None):
"""Get (mirror_rp, inc_rp) pair, possibly making new longname base
To test inc_rp, pad incbase with 50 random (non-quoted) characters
and see if that raises an error.
"""
if not inc_root: # make fake inc_root if not available
inc_root = mirror_root.append_path('rdiff-backup-data/increments')
def mir_triple_old(old_rorp):
"""Return (mirror_rp, alt_mirror, alt_inc) from old_rorp"""
if old_rorp.has_alt_mirror_name():
alt_mirror = old_rorp.get_alt_mirror_name()
return (get_long_rp(alt_mirror), alt_mirror, None)
else:
mirror_rp = mirror_root.new_index(old_rorp.index)
if old_rorp.has_alt_inc_name():
return (mirror_rp, None, old_rorp.get_alt_inc_name())
else: return (mirror_rp, None, None)
def mir_triple_new(new_rorp):
"""Return (mirror_rp, alt_mirror, None) from new_rorp"""
mirror_rp = check_new_index(mirror_root, new_rorp.index)
if mirror_rp: return (mirror_rp, None, None)
alt_mirror = get_next_free()
return (get_long_rp(alt_mirror), alt_mirror, None)
def update_rorp(new_rorp, alt_mirror, alt_inc):
"""Update new_rorp with alternate mirror/inc information"""
if not new_rorp or not new_rorp.lstat(): return
if alt_mirror: new_rorp.set_alt_mirror_name(alt_mirror)
elif alt_inc: new_rorp.set_alt_inc_name(alt_inc)
def find_inc_pair(index, mirror_rp, alt_mirror, alt_inc):
"""Return (alt_inc, inc_rp) pair"""
if alt_mirror: return (None, mirror_rp)
elif alt_inc: return (alt_inc, get_long_rp(alt_inc))
elif not index: return (None, inc_root)
trial_inc_index = index[:-1] + (index[-1] + ('a'*50),)
if check_new_index(inc_root, trial_inc_index, make_dirs = 1):
return (None, inc_root.new_index(index))
alt_inc = get_next_free()
return (alt_inc, get_long_rp(alt_inc))
(new_rorp, old_rorp) = rorp_pair
if old_rorp and old_rorp.lstat():
mirror_rp, alt_mirror, alt_inc = mir_triple_old(old_rorp)
index = old_rorp.index
else:
assert new_rorp and new_rorp.lstat(), (old_rorp, new_rorp)
mirror_rp, alt_mirror, alt_inc = mir_triple_new(new_rorp)
index = new_rorp.index
alt_inc, inc_rp = find_inc_pair(index, mirror_rp, alt_mirror, alt_inc)
update_rorp(new_rorp, alt_mirror, alt_inc)
return mirror_rp, inc_rp
# ------------------------------------------------------------------
# The following section is for restoring
# This holds a dictionary {incbase: inclist}. The keys are increment
# bases like '1' or '23', and the values are lists containing the
# associated increments.
restore_inc_cache = None
def set_restore_cache():
"""Initialize restore_inc_cache based on long filename dir"""
global restore_inc_cache
restore_inc_cache = {}
root_rf = restore.RestoreFile(get_long_rp(), get_long_rp(), [])
for incbase_rp, inclist in root_rf.yield_inc_complexes(get_long_rp()):
restore_inc_cache[incbase_rp.index[-1]] = inclist
def get_inclist(inc_base_name):
if not restore_inc_cache: set_restore_cache()
try: return restore_inc_cache[inc_base_name]
except KeyError: return []
def update_rf(rf, rorp, mirror_root):
"""Return new or updated restorefile based on alt name info in rorp"""
def update_incs(rf, inc_base):
"""Swap inclist in rf with those with base inc_base and return"""
log.Log("Restoring with increment base %s for file %s" %
(inc_base, rorp.get_indexpath()), 6)
rf.inc_rp = get_long_rp(inc_base)
rf.inc_list = get_inclist(inc_base)
rf.set_relevant_incs()
def update_existing_rf(rf, rorp):
"""Update rf based on rorp, don't make new one"""
if rorp.has_alt_mirror_name():
inc_name = rorp.get_alt_mirror_name()
rf.mirror_rp = get_long_rp(mirror_name)
elif rorp.has_alt_inc_name(): inc_name = rorp.get_alt_inc_name()
else: inc_name = None
if inc_name: update_incs(rf, inc_name)
def make_new_rf(rorp, mirror_root):
"""Make a new rf when long name info is available"""
if rorp.has_alt_mirror_name():
inc_name = rorp.get_alt_mirror_name()
mirror_rp = get_long_rp(mirror_name)
elif rorp.has_alt_inc_name():
inc_name = rorp.get_alt_inc_name()
mirror_rp = mirror_root.new_index(rorp.index)
else: assert 0, "Making new rf when rorp has no alternate name info"
rf = restore.RestoreFile(mirror_rp, None, [])
update_incs(rf, inc_name)
return rf
if not rorp: return rf
if rf and not rorp.has_alt_mirror_name() and not rorp.has_alt_inc_name():
return rf # Most common case
if rf:
update_existing_rf(rf, rorp)
return rf
else: return make_new_rf(rorp, mirror_root)
def update_regressfile(rf, rorp, mirror_root):
"""Like update_rf except return a regress file object"""
rf = update_rf(rf, rorp, mirror_root)
if isinstance(rf, regress.RegressFile): return rf
return regress.RegressFile(rf.mirror_rp, rf.inc_rp, rf.inc_list)
......@@ -146,6 +146,15 @@ def RORP2Record(rorpath):
str_list.append(" Gid %s\n" % gid)
str_list.append(" Gname %s\n" % (rorpath.getgname() or ":"))
str_list.append(" Permissions %s\n" % rorpath.getperms())
# Add long filename information
if rorpath.has_alt_mirror_name():
str_list.append(" AlternateMirrorName %s\n" %
(rorpath.get_alt_mirror_name(),))
elif rorpath.has_alt_inc_name():
str_list.append(" AlternateIncrementName %s\n" %
(rorpath.get_alt_inc_name(),))
return "".join(str_list)
line_parsing_regexp = re.compile("^ *([A-Za-z0-9]+) (.+)$", re.M)
......@@ -188,6 +197,8 @@ def Record2RORP(record_string):
if data == ':' or data == 'None': data_dict['gname'] = None
else: data_dict['gname'] = data
elif field == "Permissions": data_dict['perms'] = int(data)
elif field == "AlternateMirrorName": data_dict['mirrorname'] = data
elif field == "AlternateIncrementName": data_dict['incname'] = data
else: raise ParsingError("Unknown field in line '%s %s'" %
(field, data))
return rpath.RORPath(index, data_dict)
......
# Copyright 2002 Ben Escoto
# Copyright 2002, 2005 Ben Escoto
#
# This file is part of rdiff-backup.
#
......@@ -35,7 +35,7 @@ be recovered.
from __future__ import generators
import Globals, restore, log, rorpiter, TempFile, metadata, rpath, C, \
Time, backup, robust
Time, backup, robust, longname
# regress_time should be set to the time we want to regress back to
# (usually the time of the last successful backup)
......@@ -193,6 +193,7 @@ def iterate_meta_rfs(mirror_rp, inc_rp):
raw_rfs = iterate_raw_rfs(mirror_rp, inc_rp)
collated = rorpiter.Collate2Iters(raw_rfs, yield_metadata())
for raw_rf, metadata_rorp in collated:
raw_rf = longname.update_regressfile(raw_rf, metadata_rorp, mirror_rp)
if not raw_rf:
log.Log("Warning, metadata file has entry for %s,\n"
"but there are no associated files." %
......
# Copyright 2002, 2003, 2004 Ben Escoto
# Copyright 2002, 2003, 2004, 2005 Ben Escoto
#
# This file is part of rdiff-backup.
#
......@@ -21,9 +21,7 @@
from __future__ import generators
import tempfile, os, cStringIO
import Globals, Time, Rdiff, Hardlink, rorpiter, selection, rpath, \
log, static, robust, metadata, statistics, TempFile, hash
import static, rorpiter
class RestoreError(Exception): pass
......@@ -256,7 +254,7 @@ class MirrorStruct:
mir_rorp.flaglinked(Hardlink.get_link_index(mir_rorp))
elif mir_rorp.isreg():
expanded_index = cls.mirror_base.index + mir_rorp.index
file_fp = cls.rf_cache.get_fp(expanded_index)
file_fp = cls.rf_cache.get_fp(expanded_index, mir_rorp)
mir_rorp.setfile(hash.FileWrapper(file_fp))
mir_rorp.set_attached_filetype('snapshot')
return mir_rorp
......@@ -325,7 +323,7 @@ class CachedRF:
return "\n".join((s1, s2, s3))
def get_rf(self, index):
"""Return RestoreFile of given index, or None"""
"""Get a RestoreFile for given index, or None"""
while 1:
if not self.rf_list:
if not self.add_rfs(index): return None
......@@ -341,15 +339,16 @@ class CachedRF:
self.add_rfs(index)): return None
else: del self.rf_list[0]
def get_fp(self, index):
def get_fp(self, index, mir_rorp):
"""Return the file object (for reading) of given index"""
rf = self.get_rf(index)
rf = longname.update_rf(self.get_rf(index), mir_rorp,
self.root_rf.mirror_rp)
if not rf:
log.Log("""Error: Unable to retrieve data for file %s!
The cause is probably data loss from the backup repository.""" %
(index and "/".join(index) or '.',), 2)
log.Log("Error: Unable to retrieve data for file %s!\nThe "
"cause is probably data loss from the backup repository."
% (index and "/".join(index) or '.',), 2)
return cStringIO.StringIO('')
return self.get_rf(index).get_restore_fp()
return rf.get_restore_fp()
def add_rfs(self, index):
"""Given index, add the rfs in that same directory
......@@ -364,9 +363,7 @@ The cause is probably data loss from the backup repository.""" %
temp_rf = RestoreFile(self.root_rf.mirror_rp.new_index(parent_index),
self.root_rf.inc_rp.new_index(parent_index), [])
new_rfs = list(temp_rf.yield_sub_rfs())
if not new_rfs:
log.Log("Warning: No RFs added for index %s" % (index,), 2)
return 0
if not new_rfs: return 0
self.rf_list[0:0] = new_rfs
return 1
......@@ -384,9 +381,6 @@ class RestoreFile:
"""
def __init__(self, mirror_rp, inc_rp, inc_list):
assert mirror_rp.index == inc_rp.index, \
("mirror and inc indicies don't match: %s %s" %
(mirror_rp.get_indexpath(), inc_rp.get_indexpath()))
self.index = mirror_rp.index
self.mirror_rp = mirror_rp
self.inc_rp, self.inc_list = inc_rp, inc_list
......@@ -485,7 +479,7 @@ constructed from existing increments because last increment had type
%s. Instead of the actual file's data, an empty length file will be
created. This error is probably caused by data loss in the
rdiff-backup destination directory, or a bug in rdiff-backup""" %
(self.mirror_rp.path, self.relevant_incs[-1].lstat()), 2)
(self.mirror_rp.get_indexpath(), self.relevant_incs[-1].lstat()), 2)
return cStringIO.StringIO('')
return robust.check_common_error(error_handler, get_fp)
......@@ -505,13 +499,7 @@ rdiff-backup destination directory, or a bug in rdiff-backup""" %
def yield_sub_rfs(self):
"""Return RestoreFiles under current RestoreFile (which is dir)"""
if not self.mirror_rp.isdir() and not self.inc_rp.isdir():
log.Log("""Warning: directory %s seems to be missing from backup!
This is probably due to files being deleted manually from the
rdiff-backup destination directory. In general you shouldn't do this,
as data loss may result.\n""" % (self.mirror_rp.get_indexpath(),), 2)
return
if not self.mirror_rp.isdir() and not self.inc_rp.isdir(): return
if self.mirror_rp.isdir():
mirror_iter = self.yield_mirrorrps(self.mirror_rp)
else: mirror_iter = iter([])
......@@ -743,3 +731,8 @@ class PermissionChanger:
def finish(self):
"""Restore any remaining rps"""
for index, rp, perms in self.open_index_list: rp.chmod(perms)
import Globals, Time, Rdiff, Hardlink, selection, rpath, \
log, robust, metadata, statistics, TempFile, hash, longname
......@@ -357,8 +357,10 @@ class RORPath:
elif key == 'resourcefork' and not Globals.resource_forks_write:
pass
elif key == 'sha1': pass # one or other may not have set
elif key == 'mirrorname' or key == 'incname': pass
elif (not other.data.has_key(key) or
self.data[key] != other.data[key]): return 0
self.data[key] != other.data[key]):
return 0
if self.lstat() and not self.issym() and Globals.change_ownership:
# Now compare ownership. Symlinks don't have ownership
......@@ -654,6 +656,42 @@ class RORPath:
"""Record resource fork in dictionary. Does not write"""
self.data['resourcefork'] = rfork
def has_alt_mirror_name(self):
"""True if rorp has an alternate mirror name specified"""
return self.data.has_key('mirrorname')
def get_alt_mirror_name(self):
"""Return alternate mirror name (for long filenames)"""
return self.data['mirrorname']
def set_alt_mirror_name(self, filename):
"""Set alternate mirror name to filename
Instead of writing to the traditional mirror file, store
mirror information in filename in the long filename
directory.
"""
self.data['mirrorname'] = filename
def has_alt_inc_name(self):
"""True if rorp has an alternate increment base specified"""
return self.data.has_key('incname')
def get_alt_inc_name(self):
"""Return alternate increment base (used for long name support)"""
return self.data['incname']
def set_alt_inc_name(self, name):
"""Set alternate increment name to name
If set, increments will be in the long name directory with
name as their base. If the alt mirror name is set, this
should be set to the same.
"""
self.data['incname'] = name
def has_sha1(self):
"""True iff self has its sha1 digest set"""
return self.data.has_key('sha1')
......
"""commontest - Some functions and constants common to several test cases"""
import os, sys, code
# Avoid circularities
from rdiff_backup.log import Log
from rdiff_backup.rpath import RPath
from rdiff_backup import Globals, Hardlink, SetConnections, Main, \
selection, lazy, Time, rpath, eas_acls, rorpiter, Security
RBBin = "../rdiff-backup"
SourceDir = "../rdiff_backup"
AbsCurdir = os.getcwd() # Absolute path name of current directory
......@@ -383,3 +385,5 @@ def raise_interpreter(use_locals = None):
if use_locals: local_dict = locals()
else: local_dict = globals()
code.InteractiveConsole(local_dict).interact()
import unittest, errno
from commontest import *
from rdiff_backup import rpath, longname, Globals, regress
max_len = 255
class LongNameTest(unittest.TestCase):
"""Test the longname module"""
root_rp = rpath.RPath(Globals.local_connection, "testfiles")
out_rp = root_rp.append_path('output')
def test_length_limit(self):
"""Confirm that length limit is max_len
Some of these tests depend on the length being at most
max_len, so check to make sure it's accurate.
"""
Myrm(self.out_rp.path)
self.out_rp.mkdir()
really_long = self.out_rp.append('a'*max_len)
really_long.touch()
try: too_long = self.out_rp.append("a"*(max_len+1))
except EnvironmentError, e:
assert errno.errorcode[e[0]] == 'ENAMETOOLONG', e
else: assert 0, "File made successfully with length " + str(max_len+1)
def make_input_dirs(self):
"""Create two input directories with long filename(s) in them"""
dir1 = self.root_rp.append('longname1')
dir2 = self.root_rp.append('longname2')
Myrm(dir1.path)
Myrm(dir2.path)
dir1.mkdir()
rp11 = dir1.append('A'*max_len)
rp11.write_string('foobar')
rp12 = dir1.append('B'*max_len)
rp12.mkdir()
rp121 = rp12.append('C'*max_len)
rp121.touch()
dir2.mkdir()
rp21 = dir2.append('A'*max_len)
rp21.write_string('Hello, world')
rp22 = dir2.append('D'*max_len)
rp22.mkdir()
rp221 = rp22.append('C'*max_len)
rp221.touch()
return dir1, dir2
def check_dir1(self, dirrp):
"""Make sure dirrp looks like dir1"""
rp1 = dirrp.append('A'*max_len)
assert rp1.get_data() == 'foobar', "data doesn't match"
rp2 = dirrp.append('B'*max_len)
assert rp2.isdir(), rp2
rp21 = rp2.append('C'*max_len)
assert rp21.isreg(), rp21
def check_dir2(self, dirrp):
"""Make sure dirrp looks like dir2"""
rp1 = dirrp.append('A'*max_len)
assert rp1.get_data() == 'Hello, world', "data doesn't match"
rp2 = dirrp.append('D'*max_len)
assert rp2.isdir(), rp2
rp21 = rp2.append('C'*max_len)
assert rp21.isreg(), rp21
def generic_test(self, inlocal, outlocal, extra_args, compare_back):
"""Used for some of the tests below"""
in1, in2 = self.make_input_dirs()
Myrm(self.out_rp.path)
restore_dir = self.root_rp.append('longname_out')
# Test backing up
rdiff_backup(inlocal, outlocal, in1.path, self.out_rp.path, 10000,
extra_options = extra_args)
if compare_back: self.check_dir1(self.out_rp)
rdiff_backup(inlocal, outlocal, in2.path, self.out_rp.path, 20000,
extra_options = extra_args)
if compare_back: self.check_dir2(self.out_rp)
# Now try restoring
Myrm(restore_dir.path)
rdiff_backup(inlocal, outlocal, self.out_rp.path, restore_dir.path,
30000, extra_options = "-r now " + extra_args)
self.check_dir2(restore_dir)
Myrm(restore_dir.path)
rdiff_backup(1, 1, self.out_rp.path, restore_dir.path, 30000,
extra_options = "-r 10000 " + extra_args)
self.check_dir1(restore_dir)
def test_basic_local(self):
"""Test backup session when increment would be too long"""
self.generic_test(1, 1, "", 1)
def test_quoting_local(self):
"""Test backup session with quoting, so reg files also too long"""
self.generic_test(1, 1, "--override-chars-to-quote A-Z", 0)
def generic_regress_test(self, extra_args):
"""Used for regress tests below"""
in1, in2 = self.make_input_dirs()
Myrm(self.out_rp.path)
restore_dir = self.root_rp.append('longname_out')
Myrm(restore_dir.path)
rdiff_backup(1, 1, in1.path, self.out_rp.path, 10000,
extra_options = extra_args)
rdiff_backup(1, 1, in2.path, self.out_rp.path, 20000,
extra_options = extra_args)
# Regress repository back to in1 condition
Globals.rbdir = self.out_rp.append_path('rdiff-backup-data')
self.add_current_mirror(10000)
self.out_rp.setdata()
regress.Regress(self.out_rp)
# Restore in1 and compare
rdiff_backup(1, 1, self.out_rp.path, restore_dir.path, 30000,
extra_options = '-r now ' + extra_args)
self.check_dir1(restore_dir)
def add_current_mirror(self, time):
"""Add current_mirror marker at given time"""
cur_mirror_rp = Globals.rbdir.append(
"current_mirror.%s.data" % (Time.timetostring(time),))
cur_mirror_rp.touch()
def test_regress_basic(self):
"""Test regressing when increments would be too long"""
self.generic_regress_test('')
if __name__ == "__main__": unittest.main()
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment