Commit ae54c563 authored by Levin Zimmermann's avatar Levin Zimmermann Committed by Kirill Smelkov

wcfs: Move zuri filter to lib/zodb

The WCFS mountpoint of any ZODB storage must be a unique, persistent,
repeatable hash. This means any client which uses the same storage must
always calculate the same WCFS mountpoint (independent from
client-only parameters etc.). Therefore the WCFS mountpoint calculation
must be robust for all supported ZODB storage types (at least NEO, ZEO,
filestorage).

It was recently decided [1] that in order to provide this robustness, WCFS
mountpoint calculation should filter the parsed URI in order to drop
parts, which prevents the repeatability/persistence across different
clients (e.g. parts which can differ between clients although the same
storage is accessed). In order to make this filtering implementation a
bit easier to read and the wcfs/__init__.py less dense, the first step
is to move the zurl filtering ("normalization") into lib/zodb.py
This also makes sense since this normalization can be regarded as a
general zodb tool which may be useful for other solutions which use
zodburi.

[1] nexedi/neoppod!18 (comment 184671)

/reviewed-by @kirr
/reviewed-on nexedi/wendelin.core!17
parent cc33d610
...@@ -32,6 +32,7 @@ from weakref import WeakSet ...@@ -32,6 +32,7 @@ from weakref import WeakSet
import gc import gc
from six.moves.urllib import parse as urlparse from six.moves.urllib import parse as urlparse
import socket import socket
from six.moves.urllib.parse import urlsplit, urlunsplit
import pkg_resources import pkg_resources
...@@ -432,3 +433,34 @@ def _is_ipv6(host): ...@@ -432,3 +433,34 @@ def _is_ipv6(host):
except socket.error: except socket.error:
return False return False
return True return True
# zurl_normalize_main returns main part of zurl in its canonical form.
#
# The main part of a zurl identifies particular ZODB database without
# client-side options how to open and access it. The main part also does not
# include secrets.
#
# The following invariant is true:
#
# zurl_normalize_main(zurl₁) = zurl_normalize_main(zurl₂)
#
# ⇕
#
# zurl₁ and zurl₂
# point to the same storage
#
#
# Example:
#
# neos://ca=zzz@def:2,abc:1/cluster -> neos://abc:1,def:2/cluster
def zurl_normalize_main(zurl):
# remove credentials from zurl.
# The same database can be accessed from different clients with different
# credentials, but we want to map them all to the same single WCFS
# instance.
scheme, netloc, path, query, frag = urlsplit(zurl)
if '@' in netloc:
netloc = netloc[netloc.index('@')+1:]
zurl = urlunsplit((scheme, netloc, path, query, frag))
return zurl
...@@ -79,8 +79,8 @@ from golang.gcompat import qq ...@@ -79,8 +79,8 @@ from golang.gcompat import qq
from persistent import Persistent from persistent import Persistent
from zodbtools.util import ashex as h from zodbtools.util import ashex as h
from six.moves.urllib.parse import urlsplit, urlunsplit
from wendelin.lib.zodb import zurl_normalize_main
from .client._wcfs import \ from .client._wcfs import \
PyWCFS as _WCFS, \ PyWCFS as _WCFS, \
PyWatchLink as WatchLink \ PyWatchLink as WatchLink \
...@@ -499,14 +499,10 @@ def _wcfs_exe(): ...@@ -499,14 +499,10 @@ def _wcfs_exe():
# #
# it also makes sure the mountpoint exists. # it also makes sure the mountpoint exists.
def _mntpt_4zurl(zurl): def _mntpt_4zurl(zurl):
# remove credentials from zurl. # normalize zurl so that even if we have e.g. two neos:// urls coming
# The same database can be accessed from different clients with different # with different paths to ssl keys, or with different order in the list of
# credentials, but we want to map them all to the same single WCFS # masters, we still have them associated with the same wcfs mountpoint.
# instance. zurl = zurl_normalize_main(zurl)
scheme, netloc, path, query, frag = urlsplit(zurl)
if '@' in netloc:
netloc = netloc[netloc.index('@')+1:]
zurl = urlunsplit((scheme, netloc, path, query, frag))
m = hashlib.sha1() m = hashlib.sha1()
m.update(zurl) m.update(zurl)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment