importer.conf 4.08 KB
# This file describes how to use the NEO Importer storage backend,
# which is the recommended way to import the data of an existing Zope database
# into a NEO cluster.
#
# Note that the 'neomigrate' command is another way to migrate to NEO but:
# - the database is unusable as long as migration is not finished;
# - it can not merge bases at mountpoints;
# - it can not resume an interrupted migration;
# - it does not preserve metadata that are specific to "undo" transactions
#   (which would then behave like normal transactions);
# - it only supports conversion from FileStorage;
# - it is slower.
# The only advantage of 'neomigrate' over Importer is that data can be imported
# directly to a NEO cluster with replicas or several storage nodes.
# Importer backend can only be used with a single storage node.
#
# Here is how to proceed once this file ready:
# 1. Restart ZODB clients to connect to new NEO cluster (not started yet).
# 2. Start NEO cluster and use 'neoctl -a <admin> start' command.
# 3. Your Zope applications should work and background migration of data
#    started automatically. The only downtime depends on how fast you are to do
#    the first 2 steps. The only limitations in this mode are that:
#    - pack is not supported
#    - IStorage.history() is not implemented (this is used for example by
#      history tab in Zope Management Interface)
# 4. A warning message reporting that "All data are imported"
#    is emitted to storage node's log when the migration is finished.
#    This can take days with big databases.
# The following steps can be scheduled any time after migration is over,
# at your convenience:
# 5. Change NEO configuration to stop using Importer backend.
# 6. Stop clients.
# 7. Restart NEO cluster.
# 8. Start clients. This was the second, very short, downtime.
# 9. Optionally, add storage nodes and balance partitions.
#
# Data are migrated even if your ZODB clients are stopped.

# The first section describes the destination NEO storage.
# See neo.conf for description of parameters.
[neo]
# Once migration is finished, restart NEO storage to use the below directly
# (instead of adapter=Importer & database=/path_to_this_file).
adapter=MySQL
database=neo

# The other sections are for source databases.
[root]
# Example with FileStorage but this can be anything else.
# ZEO is possible but less efficient: ZEO servers must be stopped
# if NEO opens FileStorage DBs directly.
# Note that NEO uses 'new_oid' method to get the last OID, that's why the
# source DB can't be open read-only. NEO never modifies a FileStorage DB.
storage=
  <filestorage>
    path /path/to/root.fs
  </filestorage>
# (leading spaces indicate value continuation)

# This file can stop here if your source DB is not splitted.
# Otherwise, you need to describe mountpoints and other ZODB.
# OID mapping can't be changed once the NEO cluster is started with this
# configuration.

# Mountpoints for this ZODB (see the use case at the end of this file).
# <section_name>=<oid>
foo=421
bar=666

# Following sections must define 'oid' parameter.
[foo]
# Any reference to oid 421 in 'root' is changed to point to oid 123 of 'foo'.
# Of course, original oid 421 in 'root' will become unreferenced.
oid=123

storage=
  <filestorage>
    path /path/to/foo.fs
  </filestorage>
baz=1000

[bar]
oid=4567
storage=
  <filestorage>
    path /path/to/bar.fs
  </filestorage>

[baz]
oid=2000
storage=
  <filestorage>
    path /path/to/baz.fs
  </filestorage>


## Case of several databases linked with MountedObject objects
#
# MountedObject is provided by ZODBMountPoint Zope product.
# It relies on IAcquirer and ITraversable to fetch the real object.
#
# Given the following multi-base:
# - in 'foo', /a/b is the real object
# - in 'root', /c/d is a MountedObject object configured to point to
#   /a/b in 'foo'
#
# So you need to get the oid of /a/b in 'foo':
#   unrestrictedTraverse("/a/b")._p_oid
# which equals to 123 in the above example
#
# And that of /c/d in 'root':
#   unrestrictedTraverse("/c").__dict__["d"]._p_oid -> 421
# The way to retrieve the mount point depends on the container type.
# For a BTreeFolder2, it would be: c._tree["d"]._p_oid