Commit 9a0d779c authored by Barry Warsaw's avatar Barry Warsaw

Port BerkeleyDB 4.1 support from the pybsddb project. bsddb is now at

version 4.1.1 and works with up to BerkeleyDB 4.1.25.
parent 0a26235e
......@@ -33,14 +33,7 @@
#----------------------------------------------------------------------
"""
This package initialization module provides a compatibility interface
that should enable bsddb3 to be a near drop-in replacement for the original
old bsddb module. The functions and classes provided here are all
wrappers around the new functionality provided in the bsddb3.db module.
People interested in the more advanced capabilites of Berkeley DB 3.x
should use the bsddb3.db module directly.
"""Support for BerkeleyDB 3.1 through 4.1.
"""
try:
......@@ -55,7 +48,7 @@ except ImportError:
_db = _bsddb
__version__ = _db.__version__
error = _db.DBError # So bsddb3.error will mean something...
error = _db.DBError # So bsddb.error will mean something...
#----------------------------------------------------------------------
......
......@@ -17,7 +17,6 @@
import db
class DBEnv:
def __init__(self, *args, **kwargs):
self._cobj = apply(db.DBEnv, args, kwargs)
......@@ -77,6 +76,14 @@ class DBEnv:
def set_get_returns_none(self, *args, **kwargs):
return apply(self._cobj.set_get_returns_none, args, kwargs)
if db.version() >= (4,1):
def dbremove(self, *args, **kwargs):
return apply(self._cobj.dbremove, args, kwargs)
def dbrename(self, *args, **kwargs):
return apply(self._cobj.dbrename, args, kwargs)
def set_encrypt(self, *args, **kwargs):
return apply(self._cobj.set_encrypt, args, kwargs)
class DB:
def __init__(self, dbenv, *args, **kwargs):
......@@ -175,3 +182,8 @@ class DB:
return apply(self._cobj.verify, args, kwargs)
def set_get_returns_none(self, *args, **kwargs):
return apply(self._cobj.set_get_returns_none, args, kwargs)
if db.version() >= (4,1):
def set_encrypt(self, *args, **kwargs):
return apply(self._cobj.set_encrypt, args, kwargs)
"""
File-like objects that read from or write to a bsddb3 record.
File-like objects that read from or write to a bsddb record.
This implements (nearly) all stdio methods.
......
......@@ -23,8 +23,7 @@
#
#------------------------------------------------------------------------
"""
Manage shelves of pickled objects using bsddb3 database files for the
"""Manage shelves of pickled objects using bsddb database files for the
storage.
"""
......@@ -43,7 +42,7 @@ def open(filename, flags=db.DB_CREATE, mode=0660, filetype=db.DB_HASH,
shleve.py module. It can be used like this, where key is a string
and data is a pickleable object:
from bsddb3 import dbshelve
from bsddb import dbshelve
db = dbshelve.open(filename)
db[key] = data
......@@ -63,7 +62,7 @@ def open(filename, flags=db.DB_CREATE, mode=0660, filetype=db.DB_HASH,
elif sflag == 'n':
flags = db.DB_TRUNCATE | db.DB_CREATE
else:
raise error, "flags should be one of 'r', 'w', 'c' or 'n' or use the bsddb3.db.DB_* flags"
raise error, "flags should be one of 'r', 'w', 'c' or 'n' or use the bsddb.db.DB_* flags"
d = DBShelf(dbenv)
d.open(filename, dbname, filetype, flags, mode)
......@@ -73,7 +72,7 @@ def open(filename, flags=db.DB_CREATE, mode=0660, filetype=db.DB_HASH,
class DBShelf:
"""
A shelf to hold pickled objects, built upon a bsddb3 DB object. It
A shelf to hold pickled objects, built upon a bsddb DB object. It
automatically pickles/unpickles data objects going to/from the DB.
"""
def __init__(self, dbenv=None):
......@@ -286,3 +285,6 @@ class DBShelfCursor:
#---------------------------------------------------------------------------
......@@ -113,39 +113,48 @@ def contains_metastrings(s) :
class bsdTableDB :
# Save close() from bombing out if __init__() failed
db = None
env = None
def __init__(self, filename, dbhome, create=0, truncate=0, mode=0600, recover=0, dbflags=0) :
def __init__(self, filename, dbhome, create=0, truncate=0, mode=0600,
recover=0, dbflags=0) :
"""bsdTableDB.open(filename, dbhome, create=0, truncate=0, mode=0600)
Open database name in the dbhome BerkeleyDB directory.
Use keyword arguments when calling this constructor.
"""
self.db = None
myflags = DB_THREAD
if create :
myflags = myflags | DB_CREATE
flagsforenv = DB_INIT_MPOOL | DB_INIT_LOCK | DB_INIT_LOG | DB_INIT_TXN | dbflags
if recover :
if create:
myflags |= DB_CREATE
flagsforenv = (DB_INIT_MPOOL | DB_INIT_LOCK | DB_INIT_LOG |
DB_INIT_TXN | dbflags)
# DB_AUTO_COMMIT isn't a valid flag for env.open()
try:
dbflags |= DB_AUTO_COMMIT
except AttributeError:
pass
if recover:
flagsforenv = flagsforenv | DB_RECOVER
self.env = DBEnv()
self.env.set_lk_detect(DB_LOCK_DEFAULT) # enable auto deadlock avoidance
# enable auto deadlock avoidance
self.env.set_lk_detect(DB_LOCK_DEFAULT)
self.env.open(dbhome, myflags | flagsforenv)
if truncate :
myflags = myflags | DB_TRUNCATE
if truncate:
myflags |= DB_TRUNCATE
self.db = DB(self.env)
self.db.set_flags(DB_DUP) # allow duplicate entries [warning: be careful w/ metadata]
self.db.open(filename, DB_BTREE, myflags, mode)
# allow duplicate entries [warning: be careful w/ metadata]
self.db.set_flags(DB_DUP)
self.db.open(filename, DB_BTREE, dbflags | myflags, mode)
self.dbfilename = filename
# Initialize the table names list if this is a new database
if not self.db.has_key(_table_names_key) :
self.db.put(_table_names_key, pickle.dumps([], 1))
txn = self.env.txn_begin()
try:
if not self.db.has_key(_table_names_key, txn):
self.db.put(_table_names_key, pickle.dumps([], 1), txn=txn)
# Yes, bare except
except:
txn.abort()
raise
else:
txn.commit()
# TODO verify more of the database's metadata?
self.__tablecolumns = {}
def __del__(self):
......@@ -189,7 +198,7 @@ class bsdTableDB :
cur.close()
def CreateTable(self, table, columns) :
def CreateTable(self, table, columns):
"""CreateTable(table, columns) - Create a new table in the database
raises TableDBError if it already exists or for other DB errors.
"""
......@@ -198,14 +207,16 @@ class bsdTableDB :
try:
# checking sanity of the table and column names here on
# table creation will prevent problems elsewhere.
if contains_metastrings(table) :
raise ValueError, "bad table name: contains reserved metastrings"
if contains_metastrings(table):
raise ValueError(
"bad table name: contains reserved metastrings")
for column in columns :
if contains_metastrings(column) :
raise ValueError, "bad column name: contains reserved metastrings"
if contains_metastrings(column):
raise ValueError(
"bad column name: contains reserved metastrings")
columnlist_key = _columns_key(table)
if self.db.has_key(columnlist_key) :
if self.db.has_key(columnlist_key):
raise TableAlreadyExists, "table already exists"
txn = self.env.txn_begin()
......@@ -213,9 +224,11 @@ class bsdTableDB :
self.db.put(columnlist_key, pickle.dumps(columns, 1), txn=txn)
# add the table name to the tablelist
tablelist = pickle.loads(self.db.get(_table_names_key, txn=txn, flags=DB_RMW))
tablelist = pickle.loads(self.db.get(_table_names_key, txn=txn,
flags=DB_RMW))
tablelist.append(table)
self.db.delete(_table_names_key, txn) # delete 1st, incase we opened with DB_DUP
# delete 1st, in case we opened with DB_DUP
self.db.delete(_table_names_key, txn)
self.db.put(_table_names_key, pickle.dumps(tablelist, 1), txn=txn)
txn.commit()
......@@ -228,7 +241,8 @@ class bsdTableDB :
def ListTableColumns(self, table):
"""Return a list of columns in the given table. [] if the table doesn't exist.
"""Return a list of columns in the given table.
[] if the table doesn't exist.
"""
assert type(table) == type('')
if contains_metastrings(table) :
......@@ -252,7 +266,9 @@ class bsdTableDB :
return []
def CreateOrExtendTable(self, table, columns):
"""CreateOrExtendTable(table, columns) - Create a new table in the database.
"""CreateOrExtendTable(table, columns)
- Create a new table in the database.
If a table of this name already exists, extend it to have any
additional columns present in the given list as well as
all of its current columns.
......@@ -268,13 +284,16 @@ class bsdTableDB :
txn = self.env.txn_begin()
# load the current column list
oldcolumnlist = pickle.loads(self.db.get(columnlist_key, txn=txn, flags=DB_RMW))
# create a hash table for fast lookups of column names in the loop below
oldcolumnlist = pickle.loads(
self.db.get(columnlist_key, txn=txn, flags=DB_RMW))
# create a hash table for fast lookups of column names in the
# loop below
oldcolumnhash = {}
for c in oldcolumnlist:
oldcolumnhash[c] = c
# create a new column list containing both the old and new column names
# create a new column list containing both the old and new
# column names
newcolumnlist = copy.copy(oldcolumnlist)
for c in columns:
if not oldcolumnhash.has_key(c):
......@@ -284,7 +303,9 @@ class bsdTableDB :
if newcolumnlist != oldcolumnlist :
# delete the old one first since we opened with DB_DUP
self.db.delete(columnlist_key, txn)
self.db.put(columnlist_key, pickle.dumps(newcolumnlist, 1), txn=txn)
self.db.put(columnlist_key,
pickle.dumps(newcolumnlist, 1),
txn=txn)
txn.commit()
txn = None
......@@ -307,7 +328,7 @@ class bsdTableDB :
raise TableDBError, "unknown table: " + `table`
self.__tablecolumns[table] = pickle.loads(tcolpickles)
def __new_rowid(self, table, txn=None) :
def __new_rowid(self, table, txn) :
"""Create a new unique row identifier"""
unique = 0
while not unique :
......@@ -321,8 +342,9 @@ class bsdTableDB :
# Guarantee uniqueness by adding this key to the database
try:
self.db.put(_rowid_key(table, newid), None, txn=txn, flags=DB_NOOVERWRITE)
except DBKeyExistsError:
self.db.put(_rowid_key(table, newid), None, txn=txn,
flags=DB_NOOVERWRITE)
except DBKeyExistError:
pass
else:
unique = 1
......@@ -347,9 +369,8 @@ class bsdTableDB :
raise TableDBError, "unknown column: "+`column`
# get a unique row identifier for this row
rowid = self.__new_rowid(table)
txn = self.env.txn_begin()
rowid = self.__new_rowid(table, txn=txn)
# insert the row values into the table database
for column, dataitem in rowdict.items() :
......@@ -360,10 +381,15 @@ class bsdTableDB :
txn = None
except DBError, dberror:
if txn :
# WIBNI we could just abort the txn and re-raise the exception?
# But no, because TableDBError is not related to DBError via
# inheritance, so it would be backwards incompatible. Do the next
# best thing.
info = sys.exc_info()
if txn:
txn.abort()
self.db.delete(_rowid_key(table, rowid))
raise TableDBError, dberror[1]
raise TableDBError, dberror[1], info[2]
def Modify(self, table, conditions={}, mappings={}) :
......@@ -388,13 +414,21 @@ class bsdTableDB :
txn = self.env.txn_begin()
# modify the requested column
try:
dataitem = self.db.get(_data_key(table, column, rowid), txn)
self.db.delete(_data_key(table, column, rowid), txn)
dataitem = self.db.get(
_data_key(table, column, rowid),
txn)
self.db.delete(
_data_key(table, column, rowid),
txn)
except DBNotFoundError:
dataitem = None # XXXXXXX row key somehow didn't exist, assume no error
# XXXXXXX row key somehow didn't exist, assume no
# error
dataitem = None
dataitem = mappings[column](dataitem)
if dataitem <> None:
self.db.put(_data_key(table, column, rowid), dataitem, txn=txn)
self.db.put(
_data_key(table, column, rowid),
dataitem, txn=txn)
txn.commit()
txn = None
......@@ -425,14 +459,17 @@ class bsdTableDB :
for column in columns :
# delete the data key
try:
self.db.delete(_data_key(table, column, rowid), txn)
self.db.delete(_data_key(table, column, rowid),
txn)
except DBNotFoundError:
pass # XXXXXXX column may not exist, assume no error
# XXXXXXX column may not exist, assume no error
pass
try:
self.db.delete(_rowid_key(table, rowid), txn)
except DBNotFoundError:
pass # XXXXXXX row key somehow didn't exist, assume no error
# XXXXXXX row key somehow didn't exist, assume no error
pass
txn.commit()
txn = None
except DBError, dberror:
......@@ -490,15 +527,18 @@ class bsdTableDB :
rejected_rowids = {} # keys are rowids that do not match
# attempt to sort the conditions in such a way as to minimize full column lookups
# attempt to sort the conditions in such a way as to minimize full
# column lookups
def cmp_conditions(atuple, btuple):
a = atuple[1]
b = btuple[1]
if type(a) == type(b) :
if isinstance(a, PrefixCond) and isinstance(b, PrefixCond):
return cmp(len(b.prefix), len(a.prefix)) # longest prefix first
# longest prefix first
return cmp(len(b.prefix), len(a.prefix))
if isinstance(a, LikeCond) and isinstance(b, LikeCond):
return cmp(len(b.likestr), len(a.likestr)) # longest likestr first
# longest likestr first
return cmp(len(b.likestr), len(a.likestr))
return 0
if isinstance(a, ExactCond):
return -1
......@@ -565,7 +605,8 @@ class bsdTableDB :
if rowdata.has_key(column) :
continue
try:
rowdata[column] = self.db.get(_data_key(table, column, rowid))
rowdata[column] = self.db.get(
_data_key(table, column, rowid))
except DBError, dberror:
if dberror[0] != DB_NOTFOUND :
raise
......@@ -614,12 +655,15 @@ class bsdTableDB :
cur.close()
# delete the tablename from the table name list
tablelist = pickle.loads(self.db.get(_table_names_key, txn=txn, flags=DB_RMW))
tablelist = pickle.loads(
self.db.get(_table_names_key, txn=txn, flags=DB_RMW))
try:
tablelist.remove(table)
except ValueError:
pass # hmm, it wasn't there, oh well, that's what we want.
self.db.delete(_table_names_key, txn) # delete 1st, incase we opened with DB_DUP
# hmm, it wasn't there, oh well, that's what we want.
pass
# delete 1st, incase we opened with DB_DUP
self.db.delete(_table_names_key, txn)
self.db.put(_table_names_key, pickle.dumps(tablelist, 1), txn=txn)
txn.commit()
......
......@@ -22,19 +22,21 @@
#
# import the time.sleep function in a namespace safe way to allow
# "from bsddb3.db import *"
# "from bsddb.db import *"
#
from time import sleep
_sleep = sleep
del sleep
from time import sleep as _sleep
import _bsddb
from bsddb import _db
_deadlock_MinSleepTime = 1.0/64 # always sleep at least N seconds between retrys
_deadlock_MaxSleepTime = 3.14159 # never sleep more than N seconds between retrys
# always sleep at least N seconds between retrys
_deadlock_MinSleepTime = 1.0/64
# never sleep more than N seconds between retrys
_deadlock_MaxSleepTime = 3.14159
# Assign a file object to this for a "sleeping" message to be written to it
# each retry
_deadlock_VerboseFile = None
_deadlock_VerboseFile = None # Assign a file object to this for a "sleeping"
# message to be written to it each retry
def DeadlockWrap(function, *_args, **_kwargs):
"""DeadlockWrap(function, *_args, **_kwargs) - automatically retries
......@@ -57,16 +59,17 @@ def DeadlockWrap(function, *_args, **_kwargs):
del _kwargs['max_retries']
while 1:
try:
return apply(function, _args, _kwargs)
except _bsddb.DBLockDeadlockError:
return function(*_args, **_kwargs)
except _db.DBLockDeadlockError:
if _deadlock_VerboseFile:
_deadlock_VerboseFile.write('dbutils.DeadlockWrap: sleeping %1.3f\n' % sleeptime)
_deadlock_VerboseFile.write(
'dbutils.DeadlockWrap: sleeping %1.3f\n' % sleeptime)
_sleep(sleeptime)
# exponential backoff in the sleep time
sleeptime = sleeptime * 2
if sleeptime > _deadlock_MaxSleepTime :
sleeptime *= 2
if sleeptime > _deadlock_MaxSleepTime:
sleeptime = _deadlock_MaxSleepTime
max_retries = max_retries - 1
max_retries -= 1
if max_retries == -1:
raise
......
"""Run all test cases.
"""
import sys
import os
import unittest
verbose = 0
if 'verbose' in sys.argv:
verbose = 1
sys.argv.remove('verbose')
if 'silent' in sys.argv: # take care of old flag, just in case
verbose = 0
sys.argv.remove('silent')
def print_versions():
from bsddb import db
print
print '-=' * 38
print db.DB_VERSION_STRING
print 'bsddb.db.version(): %s' % (db.version(), )
print 'bsddb.db.__version__: %s' % db.__version__
print 'bsddb.db.cvsid: %s' % db.cvsid
print 'python version: %s' % sys.version
print 'My pid: %s' % os.getpid()
print '-=' * 38
class PrintInfoFakeTest(unittest.TestCase):
def testPrintVersions(self):
print_versions()
# This little hack is for when this module is run as main and all the
# other modules import it so they will still be able to get the right
# verbose setting. It's confusing but it works.
import test_all
test_all.verbose = verbose
def suite():
test_modules = [
'test_associate',
'test_basics',
'test_compat',
'test_dbobj',
'test_dbshelve',
'test_dbtables',
'test_env_close',
'test_get_none',
'test_join',
'test_lock',
'test_misc',
'test_queue',
'test_recno',
'test_thread',
]
alltests = unittest.TestSuite()
for name in test_modules:
module = __import__(name)
alltests.addTest(module.suite())
return alltests
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(PrintInfoFakeTest))
return suite
if __name__ == '__main__':
print_versions()
unittest.main(defaultTest='suite')
......@@ -14,7 +14,7 @@ except ImportError:
have_threads = 0
import unittest
from test.test_support import verbose
from test_all import verbose
from bsddb import db, dbshelve
......@@ -70,7 +70,8 @@ musicdata = {
45: ("Blue Man Group", "Klein Mandelbrot", "New Age"),
46: ("Kenny G", "Silhouette", "Jazz"),
47: ("Sade", "Smooth Operator", "Jazz"),
48: ("David Arkenstone", "Papillon (On The Wings Of The Butterfly)", "New Age"),
48: ("David Arkenstone", "Papillon (On The Wings Of The Butterfly)",
"New Age"),
49: ("David Arkenstone", "Stepping Stars", "New Age"),
50: ("David Arkenstone", "Carnation Lily Lily Rose", "New Age"),
51: ("David Lanz", "Behind The Waterfall", "New Age"),
......@@ -109,8 +110,6 @@ class AssociateTestCase(unittest.TestCase):
key = "%02d" % key
d.put(key, string.join(value, '|'))
def createDB(self):
self.primary = db.DB(self.env)
self.primary.open(self.filename, "primary", self.dbtype,
......@@ -122,18 +121,18 @@ class AssociateTestCase(unittest.TestCase):
def getDB(self):
return self.primary
def test01_associateWithDB(self):
if verbose:
print '\n', '-=' * 30
print "Running %s.test01_associateWithDB..." % self.__class__.__name__
print "Running %s.test01_associateWithDB..." % \
self.__class__.__name__
self.createDB()
secDB = db.DB(self.env)
secDB.set_flags(db.DB_DUP)
secDB.open(self.filename, "secondary", db.DB_BTREE, db.DB_CREATE | db.DB_THREAD)
secDB.open(self.filename, "secondary", db.DB_BTREE,
db.DB_CREATE | db.DB_THREAD)
self.getDB().associate(secDB, self.getGenre)
self.addDataToDB(self.getDB())
......@@ -144,14 +143,16 @@ class AssociateTestCase(unittest.TestCase):
def test02_associateAfterDB(self):
if verbose:
print '\n', '-=' * 30
print "Running %s.test02_associateAfterDB..." % self.__class__.__name__
print "Running %s.test02_associateAfterDB..." % \
self.__class__.__name__
self.createDB()
self.addDataToDB(self.getDB())
secDB = db.DB(self.env)
secDB.set_flags(db.DB_DUP)
secDB.open(self.filename, "secondary", db.DB_BTREE, db.DB_CREATE | db.DB_THREAD)
secDB.open(self.filename, "secondary", db.DB_BTREE,
db.DB_CREATE | db.DB_THREAD)
# adding the DB_CREATE flag will cause it to index existing records
self.getDB().associate(secDB, self.getGenre, db.DB_CREATE)
......@@ -159,8 +160,6 @@ class AssociateTestCase(unittest.TestCase):
self.finish_test(secDB)
def finish_test(self, secDB):
if verbose:
print "Primary key traversal:"
......@@ -190,9 +189,8 @@ class AssociateTestCase(unittest.TestCase):
if verbose:
print rec
rec = c.next()
assert count == len(musicdata)-1 # all items accounted for EXCEPT for 1 with "Blues" genre
# all items accounted for EXCEPT for 1 with "Blues" genre
assert count == len(musicdata)-1
def getGenre(self, priKey, priData):
assert type(priData) == type("")
......@@ -299,25 +297,25 @@ class ThreadedAssociateRecnoTestCase(ShelveAssociateTestCase):
#----------------------------------------------------------------------
def suite():
theSuite = unittest.TestSuite()
def test_suite():
suite = unittest.TestSuite()
if db.version() >= (3, 3, 11):
theSuite.addTest(unittest.makeSuite(AssociateHashTestCase))
theSuite.addTest(unittest.makeSuite(AssociateBTreeTestCase))
theSuite.addTest(unittest.makeSuite(AssociateRecnoTestCase))
suite.addTest(unittest.makeSuite(AssociateHashTestCase))
suite.addTest(unittest.makeSuite(AssociateBTreeTestCase))
suite.addTest(unittest.makeSuite(AssociateRecnoTestCase))
theSuite.addTest(unittest.makeSuite(ShelveAssociateHashTestCase))
theSuite.addTest(unittest.makeSuite(ShelveAssociateBTreeTestCase))
theSuite.addTest(unittest.makeSuite(ShelveAssociateRecnoTestCase))
suite.addTest(unittest.makeSuite(ShelveAssociateHashTestCase))
suite.addTest(unittest.makeSuite(ShelveAssociateBTreeTestCase))
suite.addTest(unittest.makeSuite(ShelveAssociateRecnoTestCase))
if have_threads:
theSuite.addTest(unittest.makeSuite(ThreadedAssociateHashTestCase))
theSuite.addTest(unittest.makeSuite(ThreadedAssociateBTreeTestCase))
theSuite.addTest(unittest.makeSuite(ThreadedAssociateRecnoTestCase))
suite.addTest(unittest.makeSuite(ThreadedAssociateHashTestCase))
suite.addTest(unittest.makeSuite(ThreadedAssociateBTreeTestCase))
suite.addTest(unittest.makeSuite(ThreadedAssociateRecnoTestCase))
return theSuite
return suite
if __name__ == '__main__':
unittest.main( defaultTest='suite' )
unittest.main(defaultTest='test_suite')
......@@ -3,14 +3,20 @@ Basic TestCases for BTree and hash DBs, with and without a DBEnv, with
various DB flags, etc.
"""
import sys, os, string
import os
import sys
import errno
import shutil
import string
import tempfile
from pprint import pprint
import unittest
from bsddb import db
from test.test_support import verbose
from test_all import verbose
DASH = '-'
#----------------------------------------------------------------------
......@@ -23,7 +29,8 @@ class VersionTestCase(unittest.TestCase):
print 'bsddb.db.version(): %s' % (info, )
print db.DB_VERSION_STRING
print '-=' * 20
assert info == (db.DB_VERSION_MAJOR, db.DB_VERSION_MINOR, db.DB_VERSION_PATCH)
assert info == (db.DB_VERSION_MAJOR, db.DB_VERSION_MINOR,
db.DB_VERSION_PATCH)
#----------------------------------------------------------------------
......@@ -35,19 +42,30 @@ class BasicTestCase(unittest.TestCase):
dbname = None
useEnv = 0
envflags = 0
envsetflags = 0
def setUp(self):
if self.useEnv:
homeDir = os.path.join(os.path.dirname(sys.argv[0]), 'db_home')
try: os.mkdir(homeDir)
except os.error: pass
self.homeDir = homeDir
try:
shutil.rmtree(homeDir)
except OSError, e:
# unix returns ENOENT, windows returns ESRCH
if e.errno not in (errno.ENOENT, errno.ESRCH): raise
os.mkdir(homeDir)
try:
self.env = db.DBEnv()
self.env.set_lg_max(1024*1024)
self.env.set_flags(self.envsetflags, 1)
self.env.open(homeDir, self.envflags | db.DB_CREATE)
tempfile.tempdir = homeDir
self.filename = os.path.split(tempfile.mktemp())[1]
tempfile.tempdir = None
self.homeDir = homeDir
# Yes, a bare except is intended, since we're re-raising the exc.
except:
shutil.rmtree(homeDir)
raise
else:
self.env = None
self.filename = tempfile.mktemp()
......@@ -61,7 +79,8 @@ class BasicTestCase(unittest.TestCase):
else:
self.d.open(self.filename, # try out keyword args
mode = self.dbmode,
dbtype = self.dbtype, flags = self.dbopenflags|db.DB_CREATE)
dbtype = self.dbtype,
flags = self.dbopenflags|db.DB_CREATE)
self.populateDB()
......@@ -70,19 +89,13 @@ class BasicTestCase(unittest.TestCase):
self.d.close()
if self.env is not None:
self.env.close()
import glob
files = glob.glob(os.path.join(self.homeDir, '*'))
for file in files:
os.remove(file)
shutil.rmtree(self.homeDir)
## Make a new DBEnv to remove the env files from the home dir.
## (It can't be done while the env is open, nor after it has been
## closed, so we make a new one to do it.)
#e = db.DBEnv()
#e.remove(self.homeDir)
#os.remove(os.path.join(self.homeDir, self.filename))
else:
os.remove(self.filename)
......@@ -106,7 +119,7 @@ class BasicTestCase(unittest.TestCase):
def makeData(self, key):
return string.join([key] * 5, '-')
return DASH.join([key] * 5)
......@@ -209,7 +222,8 @@ class BasicTestCase(unittest.TestCase):
if verbose:
print '\n', '-=' * 30
print "Running %s.test02_DictionaryMethods..." % self.__class__.__name__
print "Running %s.test02_DictionaryMethods..." % \
self.__class__.__name__
for key in ['0002', '0101', '0401', '0701', '0998']:
data = d[key]
......@@ -266,10 +280,14 @@ class BasicTestCase(unittest.TestCase):
def test03_SimpleCursorStuff(self):
if verbose:
print '\n', '-=' * 30
print "Running %s.test03_SimpleCursorStuff..." % self.__class__.__name__
c = self.d.cursor()
print "Running %s.test03_SimpleCursorStuff..." % \
self.__class__.__name__
if self.env and self.dbopenflags & db.DB_AUTO_COMMIT:
txn = self.env.txn_begin()
else:
txn = None
c = self.d.cursor(txn=txn)
rec = c.first()
count = 0
......@@ -350,6 +368,8 @@ class BasicTestCase(unittest.TestCase):
c.close()
c2.close()
if txn:
txn.commit()
# time to abuse the closed cursors and hope we don't crash
methods_to_test = {
......@@ -367,14 +387,16 @@ class BasicTestCase(unittest.TestCase):
for method, args in methods_to_test.items():
try:
if verbose:
print "attempting to use a closed cursor's %s method" % method
print "attempting to use a closed cursor's %s method" % \
method
# a bug may cause a NULL pointer dereference...
apply(getattr(c, method), args)
except db.DBError, val:
assert val[0] == 0
if verbose: print val
else:
self.fail("no exception raised when using a buggy cursor's %s method" % method)
self.fail("no exception raised when using a buggy cursor's"
"%s method" % method)
#----------------------------------------
......@@ -382,7 +404,8 @@ class BasicTestCase(unittest.TestCase):
d = self.d
if verbose:
print '\n', '-=' * 30
print "Running %s.test04_PartialGetAndPut..." % self.__class__.__name__
print "Running %s.test04_PartialGetAndPut..." % \
self.__class__.__name__
key = "partialTest"
data = "1" * 1000 + "2" * 1000
......@@ -393,7 +416,8 @@ class BasicTestCase(unittest.TestCase):
d.put("partialtest2", ("1" * 30000) + "robin" )
assert d.get("partialtest2", dlen=5, doff=30000) == "robin"
# There seems to be a bug in DB here... Commented out the test for now.
# There seems to be a bug in DB here... Commented out the test for
# now.
##assert d.get("partialtest2", dlen=5, doff=30010) == ""
if self.dbsetflags != db.DB_DUP:
......@@ -423,6 +447,10 @@ class BasicTestCase(unittest.TestCase):
#----------------------------------------
def test06_Truncate(self):
if db.version() < (3,3):
# truncate is a feature of BerkeleyDB 3.3 and above
return
d = self.d
if verbose:
print '\n', '-=' * 30
......@@ -472,9 +500,11 @@ class BasicHashWithEnvTestCase(BasicTestCase):
#----------------------------------------------------------------------
class BasicTransactionTestCase(BasicTestCase):
dbopenflags = db.DB_THREAD
dbopenflags = db.DB_THREAD | db.DB_AUTO_COMMIT
useEnv = 1
envflags = db.DB_THREAD | db.DB_INIT_MPOOL | db.DB_INIT_LOCK | db.DB_INIT_TXN
envflags = (db.DB_THREAD | db.DB_INIT_MPOOL | db.DB_INIT_LOCK |
db.DB_INIT_TXN)
envsetflags = db.DB_AUTO_COMMIT
def tearDown(self):
......@@ -557,6 +587,10 @@ class BasicTransactionTestCase(BasicTestCase):
#----------------------------------------
def test07_TxnTruncate(self):
if db.version() < (3,3):
# truncate is a feature of BerkeleyDB 3.3 and above
return
d = self.d
if verbose:
print '\n', '-=' * 30
......@@ -624,10 +658,11 @@ class BasicDUPTestCase(BasicTestCase):
d = self.d
if verbose:
print '\n', '-=' * 30
print "Running %s.test08_DuplicateKeys..." % self.__class__.__name__
print "Running %s.test08_DuplicateKeys..." % \
self.__class__.__name__
d.put("dup0", "before")
for x in string.split("The quick brown fox jumped over the lazy dog."):
for x in "The quick brown fox jumped over the lazy dog.".split():
d.put("dup1", x)
d.put("dup2", "after")
......@@ -699,11 +734,13 @@ class BasicMultiDBTestCase(BasicTestCase):
print "Running %s.test09_MultiDB..." % self.__class__.__name__
d2 = db.DB(self.env)
d2.open(self.filename, "second", self.dbtype, self.dbopenflags|db.DB_CREATE)
d2.open(self.filename, "second", self.dbtype,
self.dbopenflags|db.DB_CREATE)
d3 = db.DB(self.env)
d3.open(self.filename, "third", self.otherType(), self.dbopenflags|db.DB_CREATE)
d3.open(self.filename, "third", self.otherType(),
self.dbopenflags|db.DB_CREATE)
for x in string.split("The quick brown fox jumped over the lazy dog"):
for x in "The quick brown fox jumped over the lazy dog".split():
d2.put(x, self.makeData(x))
for x in string.letters:
......@@ -785,29 +822,29 @@ class HashMultiDBTestCase(BasicMultiDBTestCase):
#----------------------------------------------------------------------
#----------------------------------------------------------------------
def suite():
theSuite = unittest.TestSuite()
theSuite.addTest(unittest.makeSuite(VersionTestCase))
theSuite.addTest(unittest.makeSuite(BasicBTreeTestCase))
theSuite.addTest(unittest.makeSuite(BasicHashTestCase))
theSuite.addTest(unittest.makeSuite(BasicBTreeWithThreadFlagTestCase))
theSuite.addTest(unittest.makeSuite(BasicHashWithThreadFlagTestCase))
theSuite.addTest(unittest.makeSuite(BasicBTreeWithEnvTestCase))
theSuite.addTest(unittest.makeSuite(BasicHashWithEnvTestCase))
theSuite.addTest(unittest.makeSuite(BTreeTransactionTestCase))
theSuite.addTest(unittest.makeSuite(HashTransactionTestCase))
theSuite.addTest(unittest.makeSuite(BTreeRecnoTestCase))
theSuite.addTest(unittest.makeSuite(BTreeRecnoWithThreadFlagTestCase))
theSuite.addTest(unittest.makeSuite(BTreeDUPTestCase))
theSuite.addTest(unittest.makeSuite(HashDUPTestCase))
theSuite.addTest(unittest.makeSuite(BTreeDUPWithThreadTestCase))
theSuite.addTest(unittest.makeSuite(HashDUPWithThreadTestCase))
theSuite.addTest(unittest.makeSuite(BTreeMultiDBTestCase))
theSuite.addTest(unittest.makeSuite(HashMultiDBTestCase))
return theSuite
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(VersionTestCase))
suite.addTest(unittest.makeSuite(BasicBTreeTestCase))
suite.addTest(unittest.makeSuite(BasicHashTestCase))
suite.addTest(unittest.makeSuite(BasicBTreeWithThreadFlagTestCase))
suite.addTest(unittest.makeSuite(BasicHashWithThreadFlagTestCase))
suite.addTest(unittest.makeSuite(BasicBTreeWithEnvTestCase))
suite.addTest(unittest.makeSuite(BasicHashWithEnvTestCase))
suite.addTest(unittest.makeSuite(BTreeTransactionTestCase))
suite.addTest(unittest.makeSuite(HashTransactionTestCase))
suite.addTest(unittest.makeSuite(BTreeRecnoTestCase))
suite.addTest(unittest.makeSuite(BTreeRecnoWithThreadFlagTestCase))
suite.addTest(unittest.makeSuite(BTreeDUPTestCase))
suite.addTest(unittest.makeSuite(HashDUPTestCase))
suite.addTest(unittest.makeSuite(BTreeDUPWithThreadTestCase))
suite.addTest(unittest.makeSuite(HashDUPWithThreadTestCase))
suite.addTest(unittest.makeSuite(BTreeMultiDBTestCase))
suite.addTest(unittest.makeSuite(HashMultiDBTestCase))
return suite
if __name__ == '__main__':
unittest.main( defaultTest='suite' )
unittest.main(defaultTest='test_suite')
......@@ -9,7 +9,7 @@ import bsddb
import unittest
import tempfile
from test.test_support import verbose
from test_all import verbose
......@@ -159,9 +159,9 @@ class CompatibilityTestCase(unittest.TestCase):
#----------------------------------------------------------------------
def suite():
def test_suite():
return unittest.makeSuite(CompatibilityTestCase)
if __name__ == '__main__':
unittest.main( defaultTest='suite' )
unittest.main(defaultTest='test_suite')
......@@ -36,18 +36,20 @@ class dbobjTestCase(unittest.TestCase):
# call our parent classes put method with an upper case key
return apply(dbobj.DB.put, (self, key) + args, kwargs)
self.env = TestDBEnv()
self.env.open(self.db_home, db.DB_CREATE | db.DB_INIT_MPOOL)
self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL)
self.db = TestDB(self.env)
self.db.open(self.db_name, db.DB_HASH, db.DB_CREATE)
self.db.put('spam', 'eggs')
assert self.db.get('spam') == None, "overridden dbobj.DB.put() method failed [1]"
assert self.db.get('SPAM') == 'eggs', "overridden dbobj.DB.put() method failed [2]"
assert self.db.get('spam') == None, \
"overridden dbobj.DB.put() method failed [1]"
assert self.db.get('SPAM') == 'eggs', \
"overridden dbobj.DB.put() method failed [2]"
self.db.close()
self.env.close()
def test02_dbobj_dict_interface(self):
self.env = dbobj.DBEnv()
self.env.open(self.db_home, db.DB_CREATE | db.DB_INIT_MPOOL)
self.env.open(self.homeDir, db.DB_CREATE | db.DB_INIT_MPOOL)
self.db = dbobj.DB(self.env)
self.db.open(self.db_name+'02', db.DB_HASH, db.DB_CREATE)
# __setitem__
......@@ -64,8 +66,8 @@ class dbobjTestCase(unittest.TestCase):
#----------------------------------------------------------------------
def suite():
def test_suite():
return unittest.makeSuite(dbobjTestCase)
if __name__ == '__main__':
unittest.main( defaultTest='suite' )
unittest.main(defaultTest='test_suite')
......@@ -10,7 +10,7 @@ import unittest
from bsddb import dbshelve, db
from test.test_support import verbose
from test_all import verbose
#----------------------------------------------------------------------
......@@ -143,6 +143,7 @@ class DBShelveTestCase(unittest.TestCase):
key, value = rec
self.checkrec(key, value)
rec = c.next()
del c
assert count == len(d)
......@@ -162,9 +163,7 @@ class DBShelveTestCase(unittest.TestCase):
c.set('SS')
key, value = c.current()
self.checkrec(key, value)
c.close()
del c
......@@ -202,8 +201,6 @@ class BasicShelveTestCase(DBShelveTestCase):
self.d.close()
class BTreeShelveTestCase(BasicShelveTestCase):
dbtype = db.DB_BTREE
dbflags = db.DB_CREATE
......@@ -228,7 +225,8 @@ class ThreadHashShelveTestCase(BasicShelveTestCase):
class BasicEnvShelveTestCase(DBShelveTestCase):
def do_open(self):
self.homeDir = homeDir = os.path.join(os.path.dirname(sys.argv[0]), 'db_home')
self.homeDir = homeDir = os.path.join(
os.path.dirname(sys.argv[0]), 'db_home')
try: os.mkdir(homeDir)
except os.error: pass
self.env = db.DBEnv()
......@@ -283,21 +281,21 @@ class EnvThreadHashShelveTestCase(BasicEnvShelveTestCase):
#----------------------------------------------------------------------
def suite():
theSuite = unittest.TestSuite()
def test_suite():
suite = unittest.TestSuite()
theSuite.addTest(unittest.makeSuite(DBShelveTestCase))
theSuite.addTest(unittest.makeSuite(BTreeShelveTestCase))
theSuite.addTest(unittest.makeSuite(HashShelveTestCase))
theSuite.addTest(unittest.makeSuite(ThreadBTreeShelveTestCase))
theSuite.addTest(unittest.makeSuite(ThreadHashShelveTestCase))
theSuite.addTest(unittest.makeSuite(EnvBTreeShelveTestCase))
theSuite.addTest(unittest.makeSuite(EnvHashShelveTestCase))
theSuite.addTest(unittest.makeSuite(EnvThreadBTreeShelveTestCase))
theSuite.addTest(unittest.makeSuite(EnvThreadHashShelveTestCase))
suite.addTest(unittest.makeSuite(DBShelveTestCase))
suite.addTest(unittest.makeSuite(BTreeShelveTestCase))
suite.addTest(unittest.makeSuite(HashShelveTestCase))
suite.addTest(unittest.makeSuite(ThreadBTreeShelveTestCase))
suite.addTest(unittest.makeSuite(ThreadHashShelveTestCase))
suite.addTest(unittest.makeSuite(EnvBTreeShelveTestCase))
suite.addTest(unittest.makeSuite(EnvHashShelveTestCase))
suite.addTest(unittest.makeSuite(EnvThreadBTreeShelveTestCase))
suite.addTest(unittest.makeSuite(EnvThreadHashShelveTestCase))
return theSuite
return suite
if __name__ == '__main__':
unittest.main( defaultTest='suite' )
unittest.main(defaultTest='test_suite')
......@@ -28,7 +28,7 @@ except ImportError:
import pickle
import unittest
from test.test_support import verbose
from test_all import verbose
from bsddb import db, dbtables
......@@ -45,7 +45,8 @@ class TableDBTestCase(unittest.TestCase):
self.homeDir = homeDir
try: os.mkdir(homeDir)
except os.error: pass
self.tdb = dbtables.bsdTableDB(filename='tabletest.db', dbhome='db_home', create=1)
self.tdb = dbtables.bsdTableDB(
filename='tabletest.db', dbhome=homeDir, create=1)
def tearDown(self):
self.tdb.close()
......@@ -67,7 +68,8 @@ class TableDBTestCase(unittest.TestCase):
if verbose:
self.tdb._db_print()
values = self.tdb.Select(tabname, [colname], conditions={colname: None})
values = self.tdb.Select(
tabname, [colname], conditions={colname: None})
colval = pickle.loads(values[0][colname])
assert(colval > 3.141 and colval < 3.142)
......@@ -125,7 +127,10 @@ class TableDBTestCase(unittest.TestCase):
self.tdb.CreateTable(tabname, ['a', 'b', 'c', 'd', 'e'])
try:
self.tdb.Insert(tabname, {'a': "", 'e': pickle.dumps([{4:5, 6:7}, 'foo'], 1), 'f': "Zero"})
self.tdb.Insert(tabname,
{'a': "",
'e': pickle.dumps([{4:5, 6:7}, 'foo'], 1),
'f': "Zero"})
assert 0
except dbtables.TableDBError:
pass
......@@ -136,21 +141,38 @@ class TableDBTestCase(unittest.TestCase):
except dbtables.TableDBError:
pass
self.tdb.Insert(tabname, {'a': '42', 'b': "bad", 'c': "meep", 'e': 'Fuzzy wuzzy was a bear'})
self.tdb.Insert(tabname, {'a': '581750', 'b': "good", 'd': "bla", 'c': "black", 'e': 'fuzzy was here'})
self.tdb.Insert(tabname, {'a': '800000', 'b': "good", 'd': "bla", 'c': "black", 'e': 'Fuzzy wuzzy is a bear'})
self.tdb.Insert(tabname,
{'a': '42',
'b': "bad",
'c': "meep",
'e': 'Fuzzy wuzzy was a bear'})
self.tdb.Insert(tabname,
{'a': '581750',
'b': "good",
'd': "bla",
'c': "black",
'e': 'fuzzy was here'})
self.tdb.Insert(tabname,
{'a': '800000',
'b': "good",
'd': "bla",
'c': "black",
'e': 'Fuzzy wuzzy is a bear'})
if verbose:
self.tdb._db_print()
# this should return two rows
values = self.tdb.Select(tabname, ['b', 'a', 'd'],
conditions={'e': re.compile('wuzzy').search, 'a': re.compile('^[0-9]+$').match})
conditions={'e': re.compile('wuzzy').search,
'a': re.compile('^[0-9]+$').match})
assert len(values) == 2
# now lets delete one of them and try again
self.tdb.Delete(tabname, conditions={'b': dbtables.ExactCond('good')})
values = self.tdb.Select(tabname, ['a', 'd', 'b'], conditions={'e': dbtables.PrefixCond('Fuzzy')})
values = self.tdb.Select(
tabname, ['a', 'd', 'b'],
conditions={'e': dbtables.PrefixCond('Fuzzy')})
assert len(values) == 1
assert values[0]['d'] == None
......@@ -169,14 +191,20 @@ class TableDBTestCase(unittest.TestCase):
self.tdb.CreateTable(tabname, ['a', 'b', 'c', 'd', 'e'])
try:
self.tdb.Insert(tabname, {'a': "", 'e': pickle.dumps([{4:5, 6:7}, 'foo'], 1), 'f': "Zero"})
self.tdb.Insert(tabname,
{'a': "",
'e': pickle.dumps([{4:5, 6:7}, 'foo'], 1),
'f': "Zero"})
assert 0
except dbtables.TableDBError:
pass
self.tdb.Insert(tabname, {'a': "A", 'b': "B", 'c': "C", 'd': "D", 'e': "E"})
self.tdb.Insert(tabname, {'a': "-A", 'b': "-B", 'c': "-C", 'd': "-D", 'e': "-E"})
self.tdb.Insert(tabname, {'a': "A-", 'b': "B-", 'c': "C-", 'd': "D-", 'e': "E-"})
self.tdb.Insert(tabname, {'a': "A", 'b': "B", 'c': "C", 'd': "D",
'e': "E"})
self.tdb.Insert(tabname, {'a': "-A", 'b': "-B", 'c': "-C", 'd': "-D",
'e': "-E"})
self.tdb.Insert(tabname, {'a': "A-", 'b': "B-", 'c': "C-", 'd': "D-",
'e': "E-"})
if verbose:
self.tdb._db_print()
......@@ -197,17 +225,25 @@ class TableDBTestCase(unittest.TestCase):
def test_CreateOrExtend(self):
tabname = "test_CreateOrExtend"
self.tdb.CreateOrExtendTable(tabname, ['name', 'taste', 'filling', 'alcohol content', 'price'])
self.tdb.CreateOrExtendTable(
tabname, ['name', 'taste', 'filling', 'alcohol content', 'price'])
try:
self.tdb.Insert(tabname, {'taste': 'crap', 'filling': 'no', 'is it Guinness?': 'no'})
self.tdb.Insert(tabname,
{'taste': 'crap',
'filling': 'no',
'is it Guinness?': 'no'})
assert 0, "Insert should've failed due to bad column name"
except:
pass
self.tdb.CreateOrExtendTable(tabname, ['name', 'taste', 'is it Guinness?'])
self.tdb.CreateOrExtendTable(tabname,
['name', 'taste', 'is it Guinness?'])
# these should both succeed as the table should contain the union of both sets of columns.
self.tdb.Insert(tabname, {'taste': 'crap', 'filling': 'no', 'is it Guinness?': 'no'})
self.tdb.Insert(tabname, {'taste': 'great', 'filling': 'yes', 'is it Guinness?': 'yes', 'name': 'Guinness'})
self.tdb.Insert(tabname, {'taste': 'crap', 'filling': 'no',
'is it Guinness?': 'no'})
self.tdb.Insert(tabname, {'taste': 'great', 'filling': 'yes',
'is it Guinness?': 'yes',
'name': 'Guinness'})
def test_CondObjs(self):
......@@ -215,21 +251,39 @@ class TableDBTestCase(unittest.TestCase):
self.tdb.CreateTable(tabname, ['a', 'b', 'c', 'd', 'e', 'p'])
self.tdb.Insert(tabname, {'a': "the letter A", 'b': "the letter B", 'c': "is for cookie"})
self.tdb.Insert(tabname, {'a': "is for aardvark", 'e': "the letter E", 'c': "is for cookie", 'd': "is for dog"})
self.tdb.Insert(tabname, {'a': "the letter A", 'e': "the letter E", 'c': "is for cookie", 'p': "is for Python"})
values = self.tdb.Select(tabname, ['p', 'e'], conditions={'e': dbtables.PrefixCond('the l')})
self.tdb.Insert(tabname, {'a': "the letter A",
'b': "the letter B",
'c': "is for cookie"})
self.tdb.Insert(tabname, {'a': "is for aardvark",
'e': "the letter E",
'c': "is for cookie",
'd': "is for dog"})
self.tdb.Insert(tabname, {'a': "the letter A",
'e': "the letter E",
'c': "is for cookie",
'p': "is for Python"})
values = self.tdb.Select(
tabname, ['p', 'e'],
conditions={'e': dbtables.PrefixCond('the l')})
assert len(values) == 2, values
assert values[0]['e'] == values[1]['e'], values
assert values[0]['p'] != values[1]['p'], values
values = self.tdb.Select(tabname, ['d', 'a'], conditions={'a': dbtables.LikeCond('%aardvark%')})
values = self.tdb.Select(
tabname, ['d', 'a'],
conditions={'a': dbtables.LikeCond('%aardvark%')})
assert len(values) == 1, values
assert values[0]['d'] == "is for dog", values
assert values[0]['a'] == "is for aardvark", values
values = self.tdb.Select(tabname, None, {'b': dbtables.Cond(), 'e':dbtables.LikeCond('%letter%'), 'a':dbtables.PrefixCond('is'), 'd':dbtables.ExactCond('is for dog'), 'c':dbtables.PrefixCond('is for'), 'p':lambda s: not s})
values = self.tdb.Select(tabname, None,
{'b': dbtables.Cond(),
'e':dbtables.LikeCond('%letter%'),
'a':dbtables.PrefixCond('is'),
'd':dbtables.ExactCond('is for dog'),
'c':dbtables.PrefixCond('is for'),
'p':lambda s: not s})
assert len(values) == 1, values
assert values[0]['d'] == "is for dog", values
assert values[0]['a'] == "is for aardvark", values
......@@ -246,14 +300,16 @@ class TableDBTestCase(unittest.TestCase):
self.tdb.Insert(tabname, {'x': 'X2', 'y':'Y2', 'z': 'Z2'})
self.tdb.Delete(tabname, conditions={'x': dbtables.PrefixCond('X')})
values = self.tdb.Select(tabname, ['y'], conditions={'x': dbtables.PrefixCond('X')})
values = self.tdb.Select(tabname, ['y'],
conditions={'x': dbtables.PrefixCond('X')})
assert len(values) == 0
def test_Modify(self):
tabname = "test_Modify"
self.tdb.CreateTable(tabname, ['Name', 'Type', 'Access'])
self.tdb.Insert(tabname, {'Name': 'Index to MP3 files.doc', 'Type': 'Word', 'Access': '8'})
self.tdb.Insert(tabname, {'Name': 'Index to MP3 files.doc',
'Type': 'Word', 'Access': '8'})
self.tdb.Insert(tabname, {'Name': 'Nifty.MP3', 'Access': '1'})
self.tdb.Insert(tabname, {'Type': 'Unknown', 'Access': '0'})
......@@ -268,33 +324,45 @@ class TableDBTestCase(unittest.TestCase):
def remove_value(value):
return None
self.tdb.Modify(tabname, conditions={'Access': dbtables.ExactCond('0')}, mappings={'Access': remove_value})
self.tdb.Modify(tabname, conditions={'Name': dbtables.LikeCond('%MP3%')}, mappings={'Type': set_type})
self.tdb.Modify(tabname, conditions={'Name': dbtables.LikeCond('%')}, mappings={'Access': increment_access})
self.tdb.Modify(tabname,
conditions={'Access': dbtables.ExactCond('0')},
mappings={'Access': remove_value})
self.tdb.Modify(tabname,
conditions={'Name': dbtables.LikeCond('%MP3%')},
mappings={'Type': set_type})
self.tdb.Modify(tabname,
conditions={'Name': dbtables.LikeCond('%')},
mappings={'Access': increment_access})
# Delete key in select conditions
values = self.tdb.Select(tabname, None, conditions={'Type': dbtables.ExactCond('Unknown')})
values = self.tdb.Select(
tabname, None,
conditions={'Type': dbtables.ExactCond('Unknown')})
assert len(values) == 1, values
assert values[0]['Name'] == None, values
assert values[0]['Access'] == None, values
# Modify value by select conditions
values = self.tdb.Select(tabname, None, conditions={'Name': dbtables.ExactCond('Nifty.MP3')})
values = self.tdb.Select(
tabname, None,
conditions={'Name': dbtables.ExactCond('Nifty.MP3')})
assert len(values) == 1, values
assert values[0]['Type'] == "MP3", values
assert values[0]['Access'] == "2", values
# Make sure change applied only to select conditions
values = self.tdb.Select(tabname, None, conditions={'Name': dbtables.LikeCond('%doc%')})
values = self.tdb.Select(
tabname, None, conditions={'Name': dbtables.LikeCond('%doc%')})
assert len(values) == 1, values
assert values[0]['Type'] == "Word", values
assert values[0]['Access'] == "9", values
def suite():
theSuite = unittest.TestSuite()
theSuite.addTest(unittest.makeSuite(TableDBTestCase))
return theSuite
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TableDBTestCase))
return suite
if __name__ == '__main__':
unittest.main( defaultTest='suite' )
unittest.main(defaultTest='test_suite')
"""
TestCases for checking that it does not segfault when a DBEnv object
"""TestCases for checking that it does not segfault when a DBEnv object
is closed before its DB objects.
"""
import sys, os, string
from pprint import pprint
import os
import sys
import tempfile
import glob
import unittest
from bsddb import db
from test.test_support import verbose
from test_all import verbose
# We're going to get warnings in this module about trying to close the db when
# its env is already closed. Let's just ignore those.
try:
import warnings
except ImportError:
pass
else:
warnings.filterwarnings('ignore',
message='DB could not be closed in',
category=RuntimeWarning)
#----------------------------------------------------------------------
......@@ -33,7 +43,9 @@ class DBEnvClosedEarlyCrash(unittest.TestCase):
def test01_close_dbenv_before_db(self):
dbenv = db.DBEnv()
dbenv.open(self.homeDir,db.DB_INIT_CDB| db.DB_CREATE |db.DB_THREAD|db.DB_INIT_MPOOL, 0666)
dbenv.open(self.homeDir,
db.DB_INIT_CDB| db.DB_CREATE |db.DB_THREAD|db.DB_INIT_MPOOL,
0666)
d = db.DB(dbenv)
d.open(self.filename, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0666)
......@@ -45,14 +57,18 @@ class DBEnvClosedEarlyCrash(unittest.TestCase):
d.close()
except db.DBError:
return
assert 0, "DB close did not raise an exception about its DBEnv being trashed"
assert 0, \
"DB close did not raise an exception about its "\
"DBEnv being trashed"
assert 0, "dbenv did not raise an exception about its DB being open"
def test02_close_dbenv_delete_db_success(self):
dbenv = db.DBEnv()
dbenv.open(self.homeDir,db.DB_INIT_CDB| db.DB_CREATE |db.DB_THREAD|db.DB_INIT_MPOOL, 0666)
dbenv.open(self.homeDir,
db.DB_INIT_CDB| db.DB_CREATE |db.DB_THREAD|db.DB_INIT_MPOOL,
0666)
d = db.DB(dbenv)
d.open(self.filename, db.DB_BTREE, db.DB_CREATE | db.DB_THREAD, 0666)
......@@ -62,8 +78,6 @@ class DBEnvClosedEarlyCrash(unittest.TestCase):
except db.DBError:
pass # good, it should raise an exception
# this should not raise an exception, it should silently skip
# the db->close() call as it can't be done safely.
del d
try:
import gc
......@@ -76,9 +90,11 @@ class DBEnvClosedEarlyCrash(unittest.TestCase):
#----------------------------------------------------------------------
def suite():
return unittest.makeSuite(DBEnvClosedEarlyCrash)
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(DBEnvClosedEarlyCrash))
return suite
if __name__ == '__main__':
unittest.main( defaultTest='suite' )
unittest.main(defaultTest='test_suite')
......@@ -9,7 +9,7 @@ import unittest
from bsddb import db
from test.test_support import verbose
from test_all import verbose
#----------------------------------------------------------------------
......@@ -88,9 +88,9 @@ class GetReturnsNoneTestCase(unittest.TestCase):
#----------------------------------------------------------------------
def suite():
def test_suite():
return unittest.makeSuite(GetReturnsNoneTestCase)
if __name__ == '__main__':
unittest.main( defaultTest='suite' )
unittest.main(defaultTest='test_suite')
......@@ -16,7 +16,7 @@ except ImportError:
import unittest
from test.test_support import verbose
from test_all import verbose
from bsddb import db
......@@ -68,15 +68,24 @@ class LockingTestCase(unittest.TestCase):
print "Running %s.test02_threaded..." % self.__class__.__name__
threads = []
threads.append(Thread(target = self.theThread, args=(5, db.DB_LOCK_WRITE)))
threads.append(Thread(target = self.theThread, args=(1, db.DB_LOCK_READ)))
threads.append(Thread(target = self.theThread, args=(1, db.DB_LOCK_READ)))
threads.append(Thread(target = self.theThread, args=(1, db.DB_LOCK_WRITE)))
threads.append(Thread(target = self.theThread, args=(1, db.DB_LOCK_READ)))
threads.append(Thread(target = self.theThread, args=(1, db.DB_LOCK_READ)))
threads.append(Thread(target = self.theThread, args=(1, db.DB_LOCK_WRITE)))
threads.append(Thread(target = self.theThread, args=(1, db.DB_LOCK_WRITE)))
threads.append(Thread(target = self.theThread, args=(1, db.DB_LOCK_WRITE)))
threads.append(Thread(target = self.theThread,
args=(5, db.DB_LOCK_WRITE)))
threads.append(Thread(target = self.theThread,
args=(1, db.DB_LOCK_READ)))
threads.append(Thread(target = self.theThread,
args=(1, db.DB_LOCK_READ)))
threads.append(Thread(target = self.theThread,
args=(1, db.DB_LOCK_WRITE)))
threads.append(Thread(target = self.theThread,
args=(1, db.DB_LOCK_READ)))
threads.append(Thread(target = self.theThread,
args=(1, db.DB_LOCK_READ)))
threads.append(Thread(target = self.theThread,
args=(1, db.DB_LOCK_WRITE)))
threads.append(Thread(target = self.theThread,
args=(1, db.DB_LOCK_WRITE)))
threads.append(Thread(target = self.theThread,
args=(1, db.DB_LOCK_WRITE)))
for t in threads:
t.start()
......@@ -109,16 +118,16 @@ class LockingTestCase(unittest.TestCase):
#----------------------------------------------------------------------
def suite():
theSuite = unittest.TestSuite()
def test_suite():
suite = unittest.TestSuite()
if have_threads:
theSuite.addTest(unittest.makeSuite(LockingTestCase))
suite.addTest(unittest.makeSuite(LockingTestCase))
else:
theSuite.addTest(unittest.makeSuite(LockingTestCase, 'test01'))
suite.addTest(unittest.makeSuite(LockingTestCase, 'test01'))
return theSuite
return suite
if __name__ == '__main__':
unittest.main( defaultTest='suite' )
unittest.main(defaultTest='test_suite')
"""
Misc TestCases
"""Miscellaneous bsddb module test cases
"""
import sys, os, string
import tempfile
from pprint import pprint
import os
import sys
import unittest
from bsddb import db
......@@ -19,25 +17,26 @@ class MiscTestCase(unittest.TestCase):
self.filename = self.__class__.__name__ + '.db'
homeDir = os.path.join(os.path.dirname(sys.argv[0]), 'db_home')
self.homeDir = homeDir
try: os.mkdir(homeDir)
except os.error: pass
try:
os.mkdir(homeDir)
except OSError:
pass
def tearDown(self):
try: os.remove(self.filename)
except os.error: pass
try:
os.remove(self.filename)
except OSError:
pass
import glob
files = glob.glob(os.path.join(self.homeDir, '*'))
for file in files:
os.remove(file)
def test01_badpointer(self):
dbs = dbshelve.open(self.filename)
dbs.close()
self.assertRaises(db.DBError, dbs.get, "foo")
def test02_db_home(self):
env = db.DBEnv()
# check for crash fixed when db_home is used before open()
......@@ -45,12 +44,13 @@ class MiscTestCase(unittest.TestCase):
env.open(self.homeDir, db.DB_CREATE)
assert self.homeDir == env.db_home
#----------------------------------------------------------------------
def suite():
def test_suite():
return unittest.makeSuite(MiscTestCase)
if __name__ == '__main__':
unittest.main( defaultTest='suite' )
unittest.main(defaultTest='test_suite')
......@@ -9,7 +9,7 @@ import unittest
from bsddb import db
from test.test_support import verbose
from test_all import verbose
#----------------------------------------------------------------------
......@@ -84,8 +84,8 @@ class SimpleQueueTestCase(unittest.TestCase):
pprint(d.stat())
assert len(d) == 0, \
"if you see this message then you need to rebuild BerkeleyDB 3.1.17 "\
"with the patch in patches/qam_stat.diff"
"if you see this message then you need to rebuild " \
"BerkeleyDB 3.1.17 with the patch in patches/qam_stat.diff"
d.close()
......@@ -160,9 +160,9 @@ class SimpleQueueTestCase(unittest.TestCase):
#----------------------------------------------------------------------
def suite():
def test_suite():
return unittest.makeSuite(SimpleQueueTestCase)
if __name__ == '__main__':
unittest.main( defaultTest='suite' )
unittest.main(defaultTest='test_suite')
......@@ -2,14 +2,16 @@
TestCases for exercising a Recno DB.
"""
import sys, os, string
import os
import sys
import string
import tempfile
from pprint import pprint
import unittest
from bsddb import db
from test.test_support import verbose
from test_all import verbose
#----------------------------------------------------------------------
......@@ -165,21 +167,24 @@ class SimpleRecnoTestCase(unittest.TestCase):
def test02_WithSource(self):
"""
A Recno file that is given a "backing source file" is essentially a simple ASCII
file. Normally each record is delimited by \n and so is just a line in the file,
but you can set a different record delimiter if needed.
A Recno file that is given a "backing source file" is essentially a
simple ASCII file. Normally each record is delimited by \n and so is
just a line in the file, but you can set a different record delimiter
if needed.
"""
source = os.path.join(os.path.dirname(sys.argv[0]), 'db_home/test_recno.txt')
source = os.path.join(os.path.dirname(sys.argv[0]),
'db_home/test_recno.txt')
f = open(source, 'w') # create the file
f.close()
d = db.DB()
d.set_re_delim(0x0A) # This is the default value, just checking if both int
# This is the default value, just checking if both int
d.set_re_delim(0x0A)
d.set_re_delim('\n') # and char can be used...
d.set_re_source(source)
d.open(self.filename, db.DB_RECNO, db.DB_CREATE)
data = string.split("The quick brown fox jumped over the lazy dog")
data = "The quick brown fox jumped over the lazy dog".split()
for datum in data:
d.append(datum)
d.sync()
......@@ -187,13 +192,13 @@ class SimpleRecnoTestCase(unittest.TestCase):
# get the text from the backing source
text = open(source, 'r').read()
text = string.strip(text)
text = text.strip()
if verbose:
print text
print data
print string.split(text, '\n')
print text.split('\n')
assert string.split(text, '\n') == data
assert text.split('\n') == data
# open as a DB again
d = db.DB()
......@@ -207,12 +212,13 @@ class SimpleRecnoTestCase(unittest.TestCase):
d.close()
text = open(source, 'r').read()
text = string.strip(text)
text = text.strip()
if verbose:
print text
print string.split(text, '\n')
print text.split('\n')
assert string.split(text, '\n') == string.split("The quick reddish-brown fox jumped over the comatose dog")
assert text.split('\n') == \
"The quick reddish-brown fox jumped over the comatose dog".split()
def test03_FixedLength(self):
......@@ -248,9 +254,9 @@ class SimpleRecnoTestCase(unittest.TestCase):
#----------------------------------------------------------------------
def suite():
def test_suite():
return unittest.makeSuite(SimpleRecnoTestCase)
if __name__ == '__main__':
unittest.main( defaultTest='suite' )
unittest.main(defaultTest='test_suite')
......@@ -16,11 +16,10 @@ except ImportError:
import unittest
from test.test_support import verbose
from test_all import verbose
from bsddb import db, dbutils
#----------------------------------------------------------------------
class BaseThreadedTestCase(unittest.TestCase):
......@@ -80,7 +79,8 @@ class ConcurrentDataStoreBase(BaseThreadedTestCase):
def test01_1WriterMultiReaders(self):
if verbose:
print '\n', '-=' * 30
print "Running %s.test01_1WriterMultiReaders..." % self.__class__.__name__
print "Running %s.test01_1WriterMultiReaders..." % \
self.__class__.__name__
threads = []
for x in range(self.writers):
......@@ -112,7 +112,8 @@ class ConcurrentDataStoreBase(BaseThreadedTestCase):
for x in range(start, stop):
key = '%04d' % x
dbutils.DeadlockWrap(d.put, key, self.makeData(key), max_retries=12)
dbutils.DeadlockWrap(d.put, key, self.makeData(key),
max_retries=12)
if verbose and x % 100 == 0:
print "%s: records %d - %d finished" % (name, start, x)
......@@ -215,7 +216,8 @@ class SimpleThreadedBase(BaseThreadedTestCase):
# create a bunch of records
for x in xrange(start, stop):
key = '%04d' % x
dbutils.DeadlockWrap(d.put, key, self.makeData(key), max_retries=12)
dbutils.DeadlockWrap(d.put, key, self.makeData(key),
max_retries=12)
if verbose and x % 100 == 0:
print "%s: records %d - %d finished" % (name, start, x)
......@@ -284,7 +286,7 @@ class HashSimpleThreaded(SimpleThreadedBase):
class ThreadedTransactionsBase(BaseThreadedTestCase):
dbopenflags = db.DB_THREAD
dbopenflags = db.DB_THREAD | db.DB_AUTO_COMMIT
envflags = (db.DB_THREAD |
db.DB_INIT_MPOOL |
db.DB_INIT_LOCK |
......@@ -306,7 +308,8 @@ class ThreadedTransactionsBase(BaseThreadedTestCase):
def test03_ThreadedTransactions(self):
if verbose:
print '\n', '-=' * 30
print "Running %s.test03_ThreadedTransactions..." % self.__class__.__name__
print "Running %s.test03_ThreadedTransactions..." % \
self.__class__.__name__
threads = []
for x in range(self.writers):
......@@ -430,9 +433,11 @@ class ThreadedTransactionsBase(BaseThreadedTestCase):
while self.doLockDetect:
time.sleep(0.5)
try:
aborted = self.env.lock_detect(db.DB_LOCK_RANDOM, db.DB_LOCK_CONFLICT)
aborted = self.env.lock_detect(
db.DB_LOCK_RANDOM, db.DB_LOCK_CONFLICT)
if verbose and aborted:
print "deadlock: Aborted %d deadlocked transaction(s)" % aborted
print "deadlock: Aborted %d deadlocked transaction(s)" \
% aborted
except db.DBError:
pass
......@@ -467,24 +472,24 @@ class HashThreadedNoWaitTransactions(ThreadedTransactionsBase):
#----------------------------------------------------------------------
def suite():
theSuite = unittest.TestSuite()
def test_suite():
suite = unittest.TestSuite()
if have_threads:
theSuite.addTest(unittest.makeSuite(BTreeConcurrentDataStore))
theSuite.addTest(unittest.makeSuite(HashConcurrentDataStore))
theSuite.addTest(unittest.makeSuite(BTreeSimpleThreaded))
theSuite.addTest(unittest.makeSuite(HashSimpleThreaded))
theSuite.addTest(unittest.makeSuite(BTreeThreadedTransactions))
theSuite.addTest(unittest.makeSuite(HashThreadedTransactions))
theSuite.addTest(unittest.makeSuite(BTreeThreadedNoWaitTransactions))
theSuite.addTest(unittest.makeSuite(HashThreadedNoWaitTransactions))
suite.addTest(unittest.makeSuite(BTreeConcurrentDataStore))
suite.addTest(unittest.makeSuite(HashConcurrentDataStore))
suite.addTest(unittest.makeSuite(BTreeSimpleThreaded))
suite.addTest(unittest.makeSuite(HashSimpleThreaded))
suite.addTest(unittest.makeSuite(BTreeThreadedTransactions))
suite.addTest(unittest.makeSuite(HashThreadedTransactions))
suite.addTest(unittest.makeSuite(BTreeThreadedNoWaitTransactions))
suite.addTest(unittest.makeSuite(HashThreadedNoWaitTransactions))
else:
print "Threads not available, skipping thread tests."
return theSuite
return suite
if __name__ == '__main__':
unittest.main( defaultTest='suite' )
unittest.main(defaultTest='test_suite')
......@@ -35,16 +35,23 @@
/*
* Handwritten code to wrap version 3.x of the Berkeley DB library,
* written to replace a SWIG-generated file.
* written to replace a SWIG-generated file. It has since been updated
* to compile with BerkeleyDB versions 3.2 through 4.1.
*
* This module was started by Andrew Kuchling to remove the dependency
* on SWIG in a package by Gregory P. Smith <greg@electricrain.com> who
* based his work on a similar package by Robin Dunn <robin@alldunn.com>
* which wrapped Berkeley DB 2.7.x.
*
* Development of this module has now returned full circle back to
* Robin Dunn who is working in behalf of Digital Creations to complete
* the wrapping of the DB 3.x API and to build a solid unit test suite.
* Development of this module then returned full circle back to Robin Dunn
* who worked on behalf of Digital Creations to complete the wrapping of
* the DB 3.x API and to build a solid unit test suite. Robin has
* since gone onto other projects (wxPython).
*
* Gregory P. Smith <greg@electricrain.com> is once again the maintainer.
*
* Use the pybsddb-users@lists.sf.net mailing list for all questions.
* Things can change faster than the header of this file is updated.
*
* This module contains 5 types:
*
......@@ -75,12 +82,10 @@
/* --------------------------------------------------------------------- */
/* Various macro definitions */
#define PY_BSDDB_VERSION "3.4.2"
/* 40 = 4.0, 33 = 3.3; this will break if the second number is > 9 */
#define DBVER (DB_VERSION_MAJOR * 10 + DB_VERSION_MINOR)
static char *orig_rcs_id = "/Id: _db.c,v 1.48 2002/11/21 19:11:19 greg Exp /";
#define PY_BSDDB_VERSION "4.1.1"
static char *rcs_id = "$Id$";
......@@ -166,7 +171,7 @@ static PyObject* DBPermissionsError; /* EPERM */
typedef struct {
PyObject_HEAD
DB_ENV* db_env;
int flags; /* saved flags from open() */
u_int32_t flags; /* saved flags from open() */
int closed;
int getReturnsNone;
} DBEnvObject;
......@@ -176,8 +181,8 @@ typedef struct {
PyObject_HEAD
DB* db;
DBEnvObject* myenvobj; /* PyObject containing the DB_ENV */
int flags; /* saved flags from open() */
int setflags; /* saved flags from set_flags() */
u_int32_t flags; /* saved flags from open() */
u_int32_t setflags; /* saved flags from set_flags() */
int haveStat;
int getReturnsNone;
#if (DBVER >= 33)
......@@ -299,7 +304,8 @@ static int make_dbt(PyObject* obj, DBT* dbt)
what's been given, verifies that it's allowed, and then makes the DBT.
Caller should call FREE_DBT(key) when done. */
static int make_key_dbt(DBObject* self, PyObject* keyobj, DBT* key, int* pflags)
static int
make_key_dbt(DBObject* self, PyObject* keyobj, DBT* key, int* pflags)
{
db_recno_t recno;
int type;
......@@ -315,7 +321,9 @@ static int make_key_dbt(DBObject* self, PyObject* keyobj, DBT* key, int* pflags)
if (type == -1)
return 0;
if (type == DB_RECNO || type == DB_QUEUE) {
PyErr_SetString(PyExc_TypeError, "String keys not allowed for Recno and Queue DB's");
PyErr_SetString(
PyExc_TypeError,
"String keys not allowed for Recno and Queue DB's");
return 0;
}
......@@ -329,16 +337,19 @@ static int make_key_dbt(DBObject* self, PyObject* keyobj, DBT* key, int* pflags)
if (type == -1)
return 0;
if (type == DB_BTREE && pflags != NULL) {
/* if BTREE then an Integer key is allowed with the DB_SET_RECNO flag */
/* if BTREE then an Integer key is allowed with the
* DB_SET_RECNO flag */
*pflags |= DB_SET_RECNO;
}
else if (type != DB_RECNO && type != DB_QUEUE) {
PyErr_SetString(PyExc_TypeError, "Integer keys only allowed for Recno and Queue DB's");
PyErr_SetString(
PyExc_TypeError,
"Integer keys only allowed for Recno and Queue DB's");
return 0;
}
/* Make a key out of the requested recno, use allocated space so DB will
be able to realloc room for the real key if needed. */
/* Make a key out of the requested recno, use allocated space so DB
* will be able to realloc room for the real key if needed. */
recno = PyInt_AS_LONG(keyobj);
key->data = malloc(sizeof(db_recno_t));
if (key->data == NULL) {
......@@ -381,7 +392,8 @@ static int add_partial_dbt(DBT* d, int dlen, int doff) {
}
/* Callback used to save away more information about errors from the DB library. */
/* Callback used to save away more information about errors from the DB
* library. */
static char _db_errmsg[1024];
static void _db_errorCallback(const char* prefix, char* msg)
{
......@@ -393,12 +405,14 @@ static void _db_errorCallback(const char* prefix, char* msg)
static int makeDBError(int err)
{
char errTxt[2048]; /* really big, just in case... */
PyObject* errObj = NULL;
PyObject *errObj = NULL;
PyObject *errTuple = NULL;
int exceptionRaised = 0;
switch (err) {
case 0: /* successful, no error */ break;
#if (DBVER < 41)
case DB_INCOMPLETE:
#if INCOMPLETE_IS_WARNING
strcpy(errTxt, db_strerror(err));
......@@ -407,7 +421,8 @@ static int makeDBError(int err)
strcat(errTxt, _db_errmsg);
_db_errmsg[0] = 0;
}
#if PYTHON_API_VERSION >= 1010 /* if Python 2.1 or better use warning framework */
/* if Python 2.1 or better use warning framework */
#if PYTHON_API_VERSION >= 1010
exceptionRaised = PyErr_Warn(PyExc_RuntimeWarning, errTxt);
#else
fprintf(stderr, errTxt);
......@@ -418,6 +433,7 @@ static int makeDBError(int err)
errObj = DBIncompleteError;
#endif
break;
#endif /* DBVER < 41 */
case DB_KEYEMPTY: errObj = DBKeyEmptyError; break;
case DB_KEYEXIST: errObj = DBKeyExistError; break;
......@@ -455,7 +471,10 @@ static int makeDBError(int err)
strcat(errTxt, _db_errmsg);
_db_errmsg[0] = 0;
}
PyErr_SetObject(errObj, Py_BuildValue("(is)", err, errTxt));
errTuple = Py_BuildValue("(is)", err, errTxt);
PyErr_SetObject(errObj, errTuple);
Py_DECREF(errTuple);
}
return ((errObj != NULL) || exceptionRaised);
......@@ -666,13 +685,16 @@ static void
DB_dealloc(DBObject* self)
{
if (self->db != NULL) {
/* avoid closing a DB when its DBEnv has been closed out from under it */
/* avoid closing a DB when its DBEnv has been closed out from under
* it */
if (!self->myenvobj ||
(self->myenvobj && self->myenvobj->db_env)) {
(self->myenvobj && self->myenvobj->db_env))
{
MYDB_BEGIN_ALLOW_THREADS;
self->db->close(self->db, 0);
MYDB_END_ALLOW_THREADS;
#if PYTHON_API_VERSION >= 1010 /* if Python 2.1 or better use warning framework */
/* if Python 2.1 or better use warning framework */
#if PYTHON_API_VERSION >= 1010
} else {
PyErr_Warn(PyExc_RuntimeWarning,
"DB could not be closed in destructor: DBEnv already closed");
......@@ -843,7 +865,8 @@ newDBLockObject(DBEnvObject* myenv, u_int32_t locker, DBT* obj,
MYDB_BEGIN_ALLOW_THREADS;
#if (DBVER >= 40)
err = myenv->db_env->lock_get(myenv->db_env, locker, flags, obj, lock_mode, &self->lock);
err = myenv->db_env->lock_get(myenv->db_env, locker, flags, obj, lock_mode,
&self->lock);
#else
err = lock_get(myenv->db_env, locker, flags, obj, lock_mode, &self->lock);
#endif
......@@ -907,7 +930,8 @@ DB_append(DBObject* self, PyObject* args)
#if (DBVER >= 33)
static int
_db_associateCallback(DB* db, const DBT* priKey, const DBT* priData, DBT* secKey)
_db_associateCallback(DB* db, const DBT* priKey, const DBT* priData,
DBT* secKey)
{
int retval = DB_DONOTINDEX;
DBObject* secondaryDB = (DBObject*)db->app_private;
......@@ -958,13 +982,21 @@ _db_associateCallback(DB* db, const DBT* priKey, const DBT* priData, DBT* secKey
#endif
secKey->flags = DB_DBT_APPMALLOC; /* DB will free */
secKey->data = malloc(size); /* TODO, check this */
if (secKey->data) {
memcpy(secKey->data, data, size);
secKey->size = size;
retval = 0;
}
else {
PyErr_SetString(PyExc_TypeError,
"DB associate callback should return DB_DONOTINDEX or a string.");
PyErr_SetString(PyExc_MemoryError,
"malloc failed in _db_associateCallback");
PyErr_Print();
}
}
else {
PyErr_SetString(
PyExc_TypeError,
"DB associate callback should return DB_DONOTINDEX or string.");
PyErr_Print();
}
......@@ -985,11 +1017,28 @@ DB_associate(DBObject* self, PyObject* args, PyObject* kwargs)
int err, flags=0;
DBObject* secondaryDB;
PyObject* callback;
#if (DBVER >= 41)
PyObject *txnobj = NULL;
DB_TXN *txn = NULL;
char* kwnames[] = {"secondaryDB", "callback", "flags", "txn", NULL};
#else
char* kwnames[] = {"secondaryDB", "callback", "flags", NULL};
#endif
#if (DBVER >= 41)
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OO|iO:associate", kwnames,
&secondaryDB, &callback, &flags,
&txnobj)) {
#else
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OO|i:associate", kwnames,
&secondaryDB, &callback, &flags))
&secondaryDB, &callback, &flags)) {
#endif
return NULL;
}
#if (DBVER >= 41)
if (!checkTxnObj(txnobj, &txn)) return NULL;
#endif
CHECK_DB_NOT_CLOSED(self);
if (!DBObject_Check(secondaryDB)) {
......@@ -1024,10 +1073,18 @@ DB_associate(DBObject* self, PyObject* args, PyObject* kwargs)
*/
PyEval_InitThreads();
MYDB_BEGIN_ALLOW_THREADS;
#if (DBVER >= 41)
err = self->db->associate(self->db,
txn,
secondaryDB->db,
_db_associateCallback,
flags);
#else
err = self->db->associate(self->db,
secondaryDB->db,
_db_associateCallback,
flags);
#endif
MYDB_END_ALLOW_THREADS;
if (err) {
......@@ -1083,7 +1140,8 @@ _DB_consume(DBObject* self, PyObject* args, PyObject* kwargs, int consume_flag)
if (type == -1)
return NULL;
if (type != DB_QUEUE) {
PyErr_SetString(PyExc_TypeError, "Consume methods only allowed for Queue DB's");
PyErr_SetString(PyExc_TypeError,
"Consume methods only allowed for Queue DB's");
return NULL;
}
if (!checkTxnObj(txnobj, &txn))
......@@ -1107,7 +1165,8 @@ _DB_consume(DBObject* self, PyObject* args, PyObject* kwargs, int consume_flag)
retval = Py_None;
}
else if (!err) {
retval = Py_BuildValue("s#s#", key.data, key.size, data.data, data.size);
retval = Py_BuildValue("s#s#", key.data, key.size, data.data,
data.size);
FREE_DBT(key);
FREE_DBT(data);
}
......@@ -1123,7 +1182,8 @@ DB_consume(DBObject* self, PyObject* args, PyObject* kwargs, int consume_flag)
}
static PyObject*
DB_consume_wait(DBObject* self, PyObject* args, PyObject* kwargs, int consume_flag)
DB_consume_wait(DBObject* self, PyObject* args, PyObject* kwargs,
int consume_flag)
{
return _DB_consume(self, args, kwargs, DB_CONSUME_WAIT);
}
......@@ -1211,10 +1271,11 @@ DB_get(DBObject* self, PyObject* args, PyObject* kwargs)
int doff = -1;
DBT key, data;
DB_TXN *txn = NULL;
char* kwnames[] = { "key", "default", "txn", "flags", "dlen", "doff", NULL };
char* kwnames[] = {"key", "default", "txn", "flags", "dlen", "doff", NULL};
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|OOiii:get", kwnames,
&keyobj, &dfltobj, &txnobj, &flags, &dlen, &doff))
&keyobj, &dfltobj, &txnobj, &flags, &dlen,
&doff))
return NULL;
CHECK_DB_NOT_CLOSED(self);
......@@ -1247,7 +1308,8 @@ DB_get(DBObject* self, PyObject* args, PyObject* kwargs)
}
else if (!err) {
if (flags & DB_SET_RECNO) /* return both key and data */
retval = Py_BuildValue("s#s#", key.data, key.size, data.data, data.size);
retval = Py_BuildValue("s#s#", key.data, key.size, data.data,
data.size);
else /* return just the data */
retval = PyString_FromStringAndSize((char*)data.data, data.size);
FREE_DBT(key);
......@@ -1415,7 +1477,8 @@ DB_join(DBObject* self, PyObject* args)
CHECK_DB_NOT_CLOSED(self);
if (!PySequence_Check(cursorsObj)) {
PyErr_SetString(PyExc_TypeError, "Sequence of DBCursor objects expected");
PyErr_SetString(PyExc_TypeError,
"Sequence of DBCursor objects expected");
return NULL;
}
......@@ -1425,7 +1488,8 @@ DB_join(DBObject* self, PyObject* args)
for (x=0; x<length; x++) {
PyObject* item = PySequence_GetItem(cursorsObj, x);
if (!DBCursorObject_Check(item)) {
PyErr_SetString(PyExc_TypeError, "Sequence of DBCursor objects expected");
PyErr_SetString(PyExc_TypeError,
"Sequence of DBCursor objects expected");
free(cursors);
return NULL;
}
......@@ -1457,7 +1521,8 @@ DB_key_range(DBObject* self, PyObject* args, PyObject* kwargs)
&keyobj, &txnobj, &flags))
return NULL;
CHECK_DB_NOT_CLOSED(self);
if (!make_dbt(keyobj, &key)) /* BTree only, don't need to allow for an int key */
if (!make_dbt(keyobj, &key))
/* BTree only, don't need to allow for an int key */
return NULL;
if (!checkTxnObj(txnobj, &txn))
return NULL;
......@@ -1477,27 +1542,82 @@ DB_open(DBObject* self, PyObject* args, PyObject* kwargs)
int err, type = DB_UNKNOWN, flags=0, mode=0660;
char* filename = NULL;
char* dbname = NULL;
char* kwnames[] = { "filename", "dbname", "dbtype", "flags", "mode", NULL };
char* kwnames2[] = { "filename", "dbtype", "flags", "mode", NULL };
#if (DBVER >= 41)
PyObject *txnobj = NULL;
DB_TXN *txn = NULL;
/* with dbname */
char* kwnames[] = {
"filename", "dbname", "dbtype", "flags", "mode", "txn", NULL};
/* without dbname */
char* kwnames_basic[] = {
"filename", "dbtype", "flags", "mode", "txn", NULL};
#else
/* with dbname */
char* kwnames[] = {
"filename", "dbname", "dbtype", "flags", "mode", NULL};
/* without dbname */
char* kwnames_basic[] = {
"filename", "dbtype", "flags", "mode", NULL};
#endif
#if (DBVER >= 41)
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "z|ziiiO:open", kwnames,
&filename, &dbname, &type, &flags, &mode,
&txnobj))
#else
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "z|ziii:open", kwnames,
&filename, &dbname, &type, &flags, &mode)) {
&filename, &dbname, &type, &flags,
&mode))
#endif
{
PyErr_Clear();
type = DB_UNKNOWN; flags = 0; mode = 0660;
filename = NULL; dbname = NULL;
if (!PyArg_ParseTupleAndKeywords(args, kwargs,"z|iii:open", kwnames2,
#if (DBVER >= 41)
if (!PyArg_ParseTupleAndKeywords(args, kwargs,"z|iiiO:open",
kwnames_basic,
&filename, &type, &flags, &mode,
&txnobj))
return NULL;
#else
if (!PyArg_ParseTupleAndKeywords(args, kwargs,"z|iii:open",
kwnames_basic,
&filename, &type, &flags, &mode))
return NULL;
#endif
}
#if (DBVER >= 41)
if (!checkTxnObj(txnobj, &txn)) return NULL;
#endif
if (NULL == self->db) {
PyErr_SetObject(DBError, Py_BuildValue("(is)", 0,
"Cannot call open() twice for DB object"));
return NULL;
}
#if 0 && (DBVER >= 41)
if ((!txn) && (txnobj != Py_None) && self->myenvobj
&& (self->myenvobj->flags & DB_INIT_TXN))
{
/* If no 'txn' parameter was supplied (no DbTxn object and None was not
* explicitly passed) but we are in a transaction ready environment:
* add DB_AUTO_COMMIT to allow for older pybsddb apps using transactions
* to work on BerkeleyDB 4.1 without needing to modify their
* DBEnv or DB open calls.
* TODO make this behaviour of the library configurable.
*/
flags |= DB_AUTO_COMMIT;
}
#endif
MYDB_BEGIN_ALLOW_THREADS;
#if (DBVER >= 41)
err = self->db->open(self->db, txn, filename, dbname, type, flags, mode);
#else
err = self->db->open(self->db, filename, dbname, type, flags, mode);
#endif
MYDB_END_ALLOW_THREADS;
if (makeDBError(err)) {
self->db = NULL;
......@@ -1578,7 +1698,8 @@ DB_rename(DBObject* self, PyObject* args)
char* newname;
int err, flags=0;
if (!PyArg_ParseTuple(args, "sss|i:rename", &filename, &database, &newname, &flags))
if (!PyArg_ParseTuple(args, "sss|i:rename", &filename, &database, &newname,
&flags))
return NULL;
CHECK_DB_NOT_CLOSED(self);
......@@ -1850,7 +1971,9 @@ DB_stat(DBObject* self, PyObject* args)
MAKE_HASH_ENTRY(nkeys);
MAKE_HASH_ENTRY(ndata);
MAKE_HASH_ENTRY(pagesize);
#if (DBVER < 41)
MAKE_HASH_ENTRY(nelem);
#endif
MAKE_HASH_ENTRY(ffactor);
MAKE_HASH_ENTRY(buckets);
MAKE_HASH_ENTRY(free);
......@@ -2021,6 +2144,29 @@ DB_set_get_returns_none(DBObject* self, PyObject* args)
return PyInt_FromLong(oldValue);
}
#if (DBVER >= 41)
static PyObject*
DB_set_encrypt(DBObject* self, PyObject* args, PyObject* kwargs)
{
int err;
u_int32_t flags=0;
char *passwd = NULL;
char* kwnames[] = { "passwd", "flags", NULL };
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s|i:set_encrypt", kwnames,
&passwd, &flags)) {
return NULL;
}
MYDB_BEGIN_ALLOW_THREADS;
err = self->db->set_encrypt(self->db, passwd, flags);
MYDB_END_ALLOW_THREADS;
RETURN_IF_ERR();
RETURN_NONE();
}
#endif /* DBVER >= 41 */
/*-------------------------------------------------------------- */
/* Mapping and Dictionary-like access routines */
......@@ -2033,7 +2179,8 @@ int DB_length(DBObject* self)
void* sp;
if (self->db == NULL) {
PyErr_SetObject(DBError, Py_BuildValue("(is)", 0, "DB object has been closed"));
PyErr_SetObject(DBError,
Py_BuildValue("(is)", 0, "DB object has been closed"));
return -1;
}
......@@ -2107,7 +2254,8 @@ DB_ass_sub(DBObject* self, PyObject* keyobj, PyObject* dataobj)
int flags = 0;
if (self->db == NULL) {
PyErr_SetObject(DBError, Py_BuildValue("(is)", 0, "DB object has been closed"));
PyErr_SetObject(DBError,
Py_BuildValue("(is)", 0, "DB object has been closed"));
return -1;
}
......@@ -2119,11 +2267,13 @@ DB_ass_sub(DBObject* self, PyObject* keyobj, PyObject* dataobj)
retval = -1;
else {
if (self->setflags & (DB_DUP|DB_DUPSORT))
flags = DB_NOOVERWRITE; /* dictionaries shouldn't have duplicate keys */
/* dictionaries shouldn't have duplicate keys */
flags = DB_NOOVERWRITE;
retval = _DB_put(self, NULL, &key, &data, flags);
if ((retval == -1) && (self->setflags & (DB_DUP|DB_DUPSORT))) {
/* try deleting any old record that matches and then PUT it again... */
/* try deleting any old record that matches and then PUT it
* again... */
_DB_delete(self, NULL, &key, 0);
PyErr_Clear();
retval = _DB_put(self, NULL, &key, &data, flags);
......@@ -2148,7 +2298,7 @@ DB_has_key(DBObject* self, PyObject* args)
PyObject* txnobj = NULL;
DB_TXN *txn = NULL;
if (!PyArg_ParseTuple(args,"O|O:has_key", &keyobj, &txnobj ))
if (!PyArg_ParseTuple(args,"O|O:has_key", &keyobj, &txnobj))
return NULL;
CHECK_DB_NOT_CLOSED(self);
if (!make_key_dbt(self, keyobj, &key, NULL))
......@@ -2244,11 +2394,13 @@ _DB_make_list(DBObject* self, DB_TXN* txn, int type)
case DB_BTREE:
case DB_HASH:
default:
item = Py_BuildValue("s#s#", key.data, key.size, data.data, data.size);
item = Py_BuildValue("s#s#", key.data, key.size, data.data,
data.size);
break;
case DB_RECNO:
case DB_QUEUE:
item = Py_BuildValue("is#", *((db_recno_t*)key.data), data.data, data.size);
item = Py_BuildValue("is#", *((db_recno_t*)key.data),
data.data, data.size);
break;
}
break;
......@@ -2320,7 +2472,6 @@ DB_values(DBObject* self, PyObject* args)
return _DB_make_list(self, txn, _VALUES_LIST);
}
/* --------------------------------------------------------------------- */
/* DBCursor methods */
......@@ -2433,13 +2584,18 @@ DBC_get(DBCursorObject* self, PyObject* args, PyObject *kwargs)
CLEAR_DBT(key);
CLEAR_DBT(data);
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i|ii:get", &kwnames[2],
&flags, &dlen, &doff)) {
&flags, &dlen, &doff))
{
PyErr_Clear();
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "Oi|ii:get", &kwnames[1],
&keyobj, &flags, &dlen, &doff)) {
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "Oi|ii:get",
&kwnames[1],
&keyobj, &flags, &dlen, &doff))
{
PyErr_Clear();
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OOi|ii:get", kwnames,
&keyobj, &dataobj, &flags, &dlen, &doff)) {
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OOi|ii:get",
kwnames, &keyobj, &dataobj,
&flags, &dlen, &doff))
{
return NULL;
}
}
......@@ -2763,7 +2919,8 @@ DBC_set_recno(DBCursorObject* self, PyObject* args, PyObject *kwargs)
CLEAR_DBT(key);
recno = (db_recno_t) irecno;
/* use allocated space so DB will be able to realloc room for the real key */
/* use allocated space so DB will be able to realloc room for the real
* key */
key.data = malloc(sizeof(db_recno_t));
if (key.data == NULL) {
PyErr_SetString(PyExc_MemoryError, "Key memory allocation failed");
......@@ -2924,6 +3081,82 @@ DBEnv_remove(DBEnvObject* self, PyObject* args)
RETURN_NONE();
}
#if (DBVER >= 41)
static PyObject*
DBEnv_dbremove(DBEnvObject* self, PyObject* args, PyObject* kwargs)
{
int err;
u_int32_t flags=0;
char *file = NULL;
char *database = NULL;
PyObject *txnobj = NULL;
DB_TXN *txn = NULL;
char* kwnames[] = { "file", "database", "txn", "flags", NULL };
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "ss|Oi:dbremove", kwnames,
&file, &database, &txnobj, &flags)) {
return NULL;
}
if (!checkTxnObj(txnobj, &txn)) {
return NULL;
}
CHECK_ENV_NOT_CLOSED(self);
MYDB_BEGIN_ALLOW_THREADS;
err = self->db_env->dbremove(self->db_env, txn, file, database, flags);
MYDB_END_ALLOW_THREADS;
RETURN_IF_ERR();
RETURN_NONE();
}
static PyObject*
DBEnv_dbrename(DBEnvObject* self, PyObject* args, PyObject* kwargs)
{
int err;
u_int32_t flags=0;
char *file = NULL;
char *database = NULL;
char *newname = NULL;
PyObject *txnobj = NULL;
DB_TXN *txn = NULL;
char* kwnames[] = { "file", "database", "newname", "txn", "flags", NULL };
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "sss|Oi:dbrename", kwnames,
&file, &database, &newname, &txnobj, &flags)) {
return NULL;
}
if (!checkTxnObj(txnobj, &txn)) {
return NULL;
}
CHECK_ENV_NOT_CLOSED(self);
MYDB_BEGIN_ALLOW_THREADS;
err = self->db_env->dbrename(self->db_env, txn, file, database, newname,
flags);
MYDB_END_ALLOW_THREADS;
RETURN_IF_ERR();
RETURN_NONE();
}
static PyObject*
DBEnv_set_encrypt(DBEnvObject* self, PyObject* args, PyObject* kwargs)
{
int err;
u_int32_t flags=0;
char *passwd = NULL;
char* kwnames[] = { "passwd", "flags", NULL };
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s|i:set_encrypt", kwnames,
&passwd, &flags)) {
return NULL;
}
MYDB_BEGIN_ALLOW_THREADS;
err = self->db_env->set_encrypt(self->db_env, passwd, flags);
MYDB_END_ALLOW_THREADS;
RETURN_IF_ERR();
RETURN_NONE();
}
#endif /* DBVER >= 41 */
static PyObject*
DBEnv_set_cachesize(DBEnvObject* self, PyObject* args)
......@@ -3334,7 +3567,9 @@ DBEnv_lock_stat(DBEnvObject* self, PyObject* args)
#define MAKE_ENTRY(name) _addIntToDict(d, #name, sp->st_##name)
#if (DBVER < 41)
MAKE_ENTRY(lastid);
#endif
MAKE_ENTRY(nmodes);
#if (DBVER >= 32)
MAKE_ENTRY(maxlocks);
......@@ -3399,7 +3634,8 @@ DBEnv_log_archive(DBEnvObject* self, PyObject* args)
item = PyString_FromString (*log_list);
if (item == NULL) {
Py_DECREF(list);
PyErr_SetString(PyExc_MemoryError, "List item creation failed");
PyErr_SetString(PyExc_MemoryError,
"List item creation failed");
list = NULL;
break;
}
......@@ -3612,6 +3848,9 @@ static PyMethodDef DB_methods[] = {
{"rename", (PyCFunction)DB_rename, METH_VARARGS},
{"set_bt_minkey", (PyCFunction)DB_set_bt_minkey, METH_VARARGS},
{"set_cachesize", (PyCFunction)DB_set_cachesize, METH_VARARGS},
#if (DBVER >= 41)
{"set_encrypt", (PyCFunction)DB_set_encrypt, METH_VARARGS|METH_KEYWORDS},
#endif
{"set_flags", (PyCFunction)DB_set_flags, METH_VARARGS},
{"set_h_ffactor", (PyCFunction)DB_set_h_ffactor, METH_VARARGS},
{"set_h_nelem", (PyCFunction)DB_set_h_nelem, METH_VARARGS},
......@@ -3676,6 +3915,11 @@ static PyMethodDef DBEnv_methods[] = {
{"close", (PyCFunction)DBEnv_close, METH_VARARGS},
{"open", (PyCFunction)DBEnv_open, METH_VARARGS},
{"remove", (PyCFunction)DBEnv_remove, METH_VARARGS},
#if (DBVER >= 41)
{"dbremove", (PyCFunction)DBEnv_dbremove, METH_VARARGS|METH_KEYWORDS},
{"dbrename", (PyCFunction)DBEnv_dbrename, METH_VARARGS|METH_KEYWORDS},
{"set_encrypt", (PyCFunction)DBEnv_set_encrypt, METH_VARARGS|METH_KEYWORDS},
#endif
{"set_cachesize", (PyCFunction)DBEnv_set_cachesize, METH_VARARGS},
{"set_data_dir", (PyCFunction)DBEnv_set_data_dir, METH_VARARGS},
#if (DBVER >= 32)
......@@ -3866,7 +4110,8 @@ DB_construct(PyObject* self, PyObject* args, PyObject* kwargs)
int flags = 0;
char* kwnames[] = { "dbEnv", "flags", NULL};
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|Oi:DB", kwnames, &dbenvobj, &flags))
if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|Oi:DB", kwnames,
&dbenvobj, &flags))
return NULL;
if (dbenvobj == Py_None)
dbenvobj = NULL;
......@@ -4036,7 +4281,8 @@ DL_EXPORT(void) init_bsddb(void)
#if (DBVER >= 33)
_addIntToDict(d, "DB_LOCK_CONFLICT", 0); /* docs say to use zero instead */
/* docs say to use zero instead */
_addIntToDict(d, "DB_LOCK_CONFLICT", 0);
#else
ADD_INT(d, DB_LOCK_CONFLICT);
#endif
......@@ -4111,7 +4357,12 @@ DL_EXPORT(void) init_bsddb(void)
ADD_INT(d, DB_APPEND);
ADD_INT(d, DB_BEFORE);
ADD_INT(d, DB_CACHED_COUNTS);
#if (DBVER >= 41)
_addIntToDict(d, "DB_CHECKPOINT", 0);
#else
ADD_INT(d, DB_CHECKPOINT);
ADD_INT(d, DB_CURLSN);
#endif
#if (DBVER >= 33)
ADD_INT(d, DB_COMMIT);
#endif
......@@ -4119,7 +4370,6 @@ DL_EXPORT(void) init_bsddb(void)
#if (DBVER >= 32)
ADD_INT(d, DB_CONSUME_WAIT);
#endif
ADD_INT(d, DB_CURLSN);
ADD_INT(d, DB_CURRENT);
#if (DBVER >= 33)
ADD_INT(d, DB_FAST_STAT);
......@@ -4159,7 +4409,11 @@ DL_EXPORT(void) init_bsddb(void)
ADD_INT(d, DB_DONOTINDEX);
#endif
#if (DBVER >= 41)
_addIntToDict(d, "DB_INCOMPLETE", 0);
#else
ADD_INT(d, DB_INCOMPLETE);
#endif
ADD_INT(d, DB_KEYEMPTY);
ADD_INT(d, DB_KEYEXIST);
ADD_INT(d, DB_LOCK_DEADLOCK);
......@@ -4184,6 +4438,14 @@ DL_EXPORT(void) init_bsddb(void)
ADD_INT(d, DB_NOPANIC);
#endif
#if (DBVER >= 41)
ADD_INT(d, DB_ENCRYPT_AES);
ADD_INT(d, DB_AUTO_COMMIT);
#else
/* allow berkeleydb 4.1 aware apps to run on older versions */
_addIntToDict(d, "DB_AUTO_COMMIT", 0);
#endif
ADD_INT(d, EINVAL);
ADD_INT(d, EACCES);
ADD_INT(d, ENOSPC);
......@@ -4197,7 +4459,7 @@ DL_EXPORT(void) init_bsddb(void)
/* The base exception class is DBError */
DBError = PyErr_NewException("bsddb3._db.DBError", NULL, NULL);
DBError = PyErr_NewException("bsddb._db.DBError", NULL, NULL);
PyDict_SetItemString(d, "DBError", DBError);
/* Some magic to make DBNotFoundError derive from both DBError and
......@@ -4210,7 +4472,7 @@ DL_EXPORT(void) init_bsddb(void)
/* All the rest of the exceptions derive only from DBError */
#define MAKE_EX(name) name = PyErr_NewException("bsddb3._db." #name, DBError, NULL); \
#define MAKE_EX(name) name = PyErr_NewException("bsddb._db." #name, DBError, NULL); \
PyDict_SetItemString(d, #name, name)
#if !INCOMPLETE_IS_WARNING
......@@ -4246,9 +4508,6 @@ DL_EXPORT(void) init_bsddb(void)
/* Check for errors */
if (PyErr_Occurred()) {
PyErr_Print();
Py_FatalError("can't initialize module _db");
Py_FatalError("can't initialize module _bsddb");
}
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment