Commit 2c0968e4 authored by Levin Zimmermann's avatar Levin Zimmermann Committed by Kirill Smelkov

lib/zodb: Update NEO URI format to be in sync with upstream NEO

NEO/go and NEO/py URI format diverged over time:

- neo@8c974485

However with nexedi/neoppod!21 a
common solution was found. With neo!7 NEO/go and NEO/py
URI formats are in sync again. We therefore now need to update
'wendelin.core' to support the finally agreed on URI format.

/reviewed-by @kirr
/reviewed-on nexedi/wendelin.core!28
parent 921ad362
......@@ -513,7 +513,7 @@ def test_zstor_2zurl(tmpdir, neo_ssl_dict):
# sslp is the ssl encryption uri part of an encrypted NEO node
q = quote_plus
sslp = ";".join(("%s=%s" % (q(k), q(v)) for k, v in sorted(neo_ssl_dict.items())))
sslp = "&".join(("%s=%s" % (q(k), q(v)) for k, v in sorted(neo_ssl_dict.items())))
_ = assert_zurl_is_correct
_(fs1("test.fs"), "file://%s/test.fs" % tmpdir) # FileStorage
......@@ -523,13 +523,13 @@ def test_zstor_2zurl(tmpdir, neo_ssl_dict):
_(zeo("test", ("127.0.0.1", 1234)), "zeo://127.0.0.1:1234?storage=test") # + non-default storage name
_(zeo("1", ("::1", 1234)), "zeo://[::1]:1234") # ZEO/ip6
_(zeo("test", ("::1", 1234)), "zeo://[::1]:1234?storage=test") # + non-default storage name
_(neo("test", "127.0.0.1:1234"), "neo://127.0.0.1:1234/test") # NEO/ip4
_(neo("test", "127.0.0.1:1234", 1), "neos://%s@127.0.0.1:1234/test" % sslp) # + ssl
_(neo("test", "[::1]:1234"), "neo://[::1]:1234/test") # NEO/ip6
_(neo("test", "[::1]:1234", 1), "neos://%s@[::1]:1234/test" % sslp) # + ssl
_(neo("test", "127.0.0.1:1234"), "neo://test@127.0.0.1:1234") # NEO/ip4
_(neo("test", "127.0.0.1:1234", 1), "neos://test@127.0.0.1:1234?%s" % sslp) # + ssl
_(neo("test", "[::1]:1234"), "neo://test@[::1]:1234") # NEO/ip6
_(neo("test", "[::1]:1234", 1), "neos://test@[::1]:1234?%s" % sslp) # + ssl
_(neo("test", "[::1]:1234\n[::2]:1234"), # + 2 master nodes
# Master order is not specified, so we have 2 possible/acceptable zurl
"neo://[::1]:1234,[::2]:1234/test", "neo://[::2]:1234,[::1]:1234/test")
"neo://test@[::1]:1234,[::2]:1234", "neo://test@[::2]:1234,[::1]:1234")
_(demo(zeo("base", ("1.2.3.4", 5)), # DemoStorage
fs1("delta.fs")), "demo:(zeo://1.2.3.4:5?storage=base)/(file://%s/delta.fs)" % tmpdir)
......@@ -551,13 +551,18 @@ def test_zstor_2zurl(tmpdir, neo_ssl_dict):
# ZEO
("zeo://localhost:9001", "zeo://localhost:9001"),
# NEO
("neo://127.0.0.1:1234/cluster", "neo://127.0.0.1:1234/cluster"),
("neo://cluster@127.0.0.1:1234", "neo://cluster@127.0.0.1:1234"),
# > 1 master nodes \w different order
("neo://abc:1,def:2/cluster", "neo://abc:1,def:2/cluster"),
("neo://def:2,abc:1/cluster", "neo://abc:1,def:2/cluster"),
("neo://cluster@abc:1,def:2", "neo://cluster@abc:1,def:2"),
("neo://cluster@def:2,abc:1", "neo://cluster@abc:1,def:2"),
# Different SSL paths
("neos://ca=a&key=b&cert=c@xyz:1/cluster", "neos://xyz:1/cluster"),
("neos://ca=α&key=β&cert=γ@xyz:1/cluster", "neos://xyz:1/cluster"),
("neos://cluster@xyz:1?ca=a&key=b&cert=c", "neos://cluster@xyz:1"),
("neos://cluster@xyz:1?ca=α&key=β&cert=γ", "neos://cluster@xyz:1"),
# neo:// with anything SSL-related in query -> neos://
("neo://cluster@xyz:1?cert=c", "neos://cluster@xyz:1"),
# any order of options should result in the same normalized URI
("neo://cluster@xyz:1?a=1&c=10&b=2", "neo://cluster@xyz:1?a=1&b=2&c=10"),
("neo://cluster@xyz:1?b=2&a=1&c=10", "neo://cluster@xyz:1?a=1&b=2&c=10"),
],
)
def test_zurl_normalize_main(zurl, zurl_norm_ok):
......
......@@ -389,15 +389,14 @@ def zstor_2zurl(zstor):
# NEO
if ztype == "neo.client.Storage.Storage":
# neo(s)://[<credentials>@]<master>/<cluster>
# neo(s)://<cluster>@<master>?[<credentials>]
app = zstor.app
if not app.ssl:
u = "neo://"
else:
q = urlparse.quote_plus
u = "neos://"
ca, cert, key = app.ssl_credentials # .ssl_credentials depend on kirr's patch
u += "ca=%s;cert=%s;key=%s@" % (q(ca), q(cert), q(key))
u += "%s@" % app.name
masterv = app.nm.getMasterList()
if len(masterv) == 0:
......@@ -413,7 +412,11 @@ def zstor_2zurl(zstor):
master_list.append("%s:%s" % (host, port))
u += ",".join(master_list)
u += "/%s" % app.name
if app.ssl:
q = urlparse.quote_plus
ca, cert, key = app.ssl_credentials # .ssl_credentials depend on kirr's patch
u += "?ca=%s&cert=%s&key=%s" % (q(ca), q(cert), q(key))
return u
......@@ -453,7 +456,7 @@ def _is_ipv6(host):
#
# Example:
#
# neos://ca=zzz@def:2,abc:1/cluster -> neos://abc:1,def:2/cluster
# neos://cluster@def:2,abc:1?ca=zzz -> neos://cluster@abc:1,def:2
def zurl_normalize_main(zurl):
scheme, netloc, path, query, frag = urlsplit(zurl)
try:
......@@ -481,14 +484,31 @@ def _znormalize_neo(scheme, netloc, path, query, frag):
# The same database can be accessed from different clients with different
# credentials, but we want to map them all to the same single WCFS
# instance.
if "@" in netloc:
netloc = netloc[netloc.index("@") + 1 :]
q = urlparse.parse_qs(query)
for k in ("ca", "cert", "key"):
# NOTE: We only enforce SSL in case ca/cert/key are a non-empty
# string - this means "?ca=&cert=&key=" is interpreted as !SSL.
if q.pop(k, 0):
# Ensure we use 'neos' scheme (instead of 'neo' scheme),
# otherwise a normalized URL with 'neo' scheme but provided
# ca/cert/key is different than a URL with 'neos' scheme,
# although both point to the same database.
scheme = "neos"
# Explicitly sort query before reassembling into string to insure
# parameter order of input URI doesn't impact normalized URI.
query = urlparse.urlencode(tuple((k, q[k]) for k in sorted(q.keys())), doseq=True)
# Sort multiple master nodes: if a NEO cluster has multiple master
# nodes, there is no agreed on order in which the master node
# addresses appear in the netloc. In order to insure we always
# get the same mountpoint among different clients we explicitly
# sort the master node addr order.
if "@" in netloc:
i = netloc.index("@") + 1
name, netloc = netloc[:i], netloc[i:]
else:
name = ""
netloc = ",".join(sorted(netloc.split(',')))
netloc = "%s%s" % (name, netloc)
return (scheme, netloc, path, query, frag)
_znormalizer('neo', _znormalize_neo)
_znormalizer('neos', _znormalize_neo)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment