Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
slapos
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Ophélie Gagnard
slapos
Commits
747ce243
Commit
747ce243
authored
May 09, 2022
by
Jérome Perrin
Browse files
Options
Browse Files
Download
Plain Diff
Update Release Candidate
parents
1a8746af
19498bbe
Changes
12
Hide whitespace changes
Inline
Side-by-side
Showing
12 changed files
with
1104 additions
and
2936 deletions
+1104
-2936
component/fish-shell/buildout.cfg
component/fish-shell/buildout.cfg
+2
-2
component/theia/buildout.hash.cfg
component/theia/buildout.hash.cfg
+1
-1
component/theia/download-plugins.cfg
component/theia/download-plugins.cfg
+1
-1
component/theia/yarn.lock
component/theia/yarn.lock
+957
-858
setup.py
setup.py
+1
-1
slapos/recipe/redis/MyRedis2410.py
slapos/recipe/redis/MyRedis2410.py
+0
-2005
slapos/recipe/redis/__init__.py
slapos/recipe/redis/__init__.py
+29
-25
slapos/test/recipe/test_redis.py
slapos/test/recipe/test_redis.py
+80
-0
software/beremiz-ide/buildout.hash.cfg
software/beremiz-ide/buildout.hash.cfg
+1
-1
software/beremiz-ide/instance-beremiz-test.cfg.jinja2.in
software/beremiz-ide/instance-beremiz-test.cfg.jinja2.in
+5
-1
software/theia/buildout.hash.cfg
software/theia/buildout.hash.cfg
+1
-1
software/theia/python-language-server-requirements.txt
software/theia/python-language-server-requirements.txt
+26
-40
No files found.
component/fish-shell/buildout.cfg
View file @
747ce243
...
...
@@ -14,8 +14,8 @@ extends =
[fish-shell]
recipe = slapos.recipe.cmmi
shared = true
url = https://github.com/fish-shell/fish-shell/releases/download/3.
2.2/fish-3.2.2
.tar.xz
md5sum =
606253699ce41991b03a93bcc6047d51
url = https://github.com/fish-shell/fish-shell/releases/download/3.
4.1/fish-3.4.1
.tar.xz
md5sum =
80733d30a14ffa50bf48cce96296aa7a
configure-command = ${cmake:location}/bin/cmake
configure-options =
-DCMAKE_INSTALL_PREFIX=${:location}
...
...
component/theia/buildout.hash.cfg
View file @
747ce243
...
...
@@ -19,4 +19,4 @@ md5sum = 8157c22134200bd862a07c6521ebf799
[yarn.lock]
_update_hash_filename_ = yarn.lock
md5sum =
7c6a0103f9b07cf51940f25b8e3a573
0
md5sum =
b1012625be07ad6a3daf27b9ed6004f
0
component/theia/download-plugins.cfg
View file @
747ce243
...
...
@@ -75,7 +75,7 @@ urls = vscode-bat https://open-vsx.org/api/vscode/bat/1.62.3/file/vscode.bat-1.6
redhat-java https://open-vsx.org/api/redhat/java/0.61.0/file/redhat.java-0.61.0.vsix 72e548e2845e1ff655f28111558d6942
vscjava-vscode-java-test https://open-vsx.org/api/vscjava/vscode-java-test/0.26.0/file/vscjava.vscode-java-test-0.26.0.vsix fd63da5537a4bee1d3ceaae0fa6bf419
ms-python-python https://open-vsx.org/api/ms-python/python/2020.9.112786/file/ms-python.python-2020.9.112786.vsix c64b79fa822418e07b6d0f57b8838b44
perrinjerome-vscode-zc-buildout https://open-vsx.org/api/perrinjerome/vscode-zc-buildout/0.7.
0/file/perrinjerome.vscode-zc-buildout-0.7.0.vsix 7598fa3c1c3701cb2da5c330fe996ff1
perrinjerome-vscode-zc-buildout https://open-vsx.org/api/perrinjerome/vscode-zc-buildout/0.7.
1/file/perrinjerome.vscode-zc-buildout-0.7.1.vsix 2f1904f6e358c854cc98fff1b9614d0e
jebbs-plantuml https://open-vsx.org/api/jebbs/plantuml/2.14.0/file/jebbs.plantuml-2.14.0.vsix 13fa7cbd14a30ecca166c41a307c7a73
rafaelmaiolla-diff https://open-vsx.org/api/rafaelmaiolla/diff/0.0.1/file/rafaelmaiolla.diff-0.0.1.vsix 1d8f868bc19b7d703c1be2bf99c4c7f9
perrinjerome-git-commit-syntax https://open-vsx.org/api/perrinjerome/git-commit-syntax/0.0.1/file/perrinjerome.git-commit-syntax-0.0.1.vsix 46625f2f05e244911c2cb9cc5032c0ef
...
...
component/theia/yarn.lock
View file @
747ce243
This source diff could not be displayed because it is too large. You can
view the blob
instead.
setup.py
View file @
747ce243
...
...
@@ -28,7 +28,7 @@ from setuptools import setup, find_packages
import
glob
import
os
version
=
'1.0.2
38
'
version
=
'1.0.2
44
'
name
=
'slapos.cookbook'
long_description
=
open
(
"README.rst"
).
read
()
...
...
slapos/recipe/redis/MyRedis2410.py
deleted
100644 → 0
View file @
1a8746af
from
__future__
import
with_statement
"Core exceptions raised by the Redis client"
class
RedisError
(
Exception
):
pass
class
AuthenticationError
(
RedisError
):
pass
class
ConnectionError
(
RedisError
):
pass
class
ResponseError
(
RedisError
):
pass
class
InvalidResponse
(
RedisError
):
pass
class
DataError
(
RedisError
):
pass
class
PubSubError
(
RedisError
):
pass
class
WatchError
(
RedisError
):
pass
import
socket
from
itertools
import
chain
,
imap
#from redis.exceptions import ConnectionError, ResponseError, InvalidResponse
try
:
from
cStringIO
import
StringIO
except
ImportError
:
from
StringIO
import
StringIO
class
PythonParser
(
object
):
"Plain Python parsing class"
MAX_READ_LENGTH
=
1000000
def
__init__
(
self
):
self
.
_fp
=
None
def
__del__
(
self
):
try
:
self
.
on_disconnect
()
except
:
pass
def
on_connect
(
self
,
connection
):
"Called when the socket connects"
self
.
_fp
=
connection
.
_sock
.
makefile
(
'r'
)
def
on_disconnect
(
self
):
"Called when the socket disconnects"
if
self
.
_fp
is
not
None
:
self
.
_fp
.
close
()
self
.
_fp
=
None
def
read
(
self
,
length
=
None
):
"""
Read a line from the socket is no length is specified,
otherwise read ``length`` bytes. Always strip away the newlines.
"""
try
:
if
length
is
not
None
:
bytes_left
=
length
+
2
# read the line ending
if
length
>
self
.
MAX_READ_LENGTH
:
# apparently reading more than 1MB or so from a windows
# socket can cause MemoryErrors. See:
# https://github.com/andymccurdy/redis-py/issues/205
# read smaller chunks at a time to work around this
try
:
buf
=
StringIO
()
while
bytes_left
>
0
:
read_len
=
min
(
bytes_left
,
self
.
MAX_READ_LENGTH
)
buf
.
write
(
self
.
_fp
.
read
(
read_len
))
bytes_left
-=
read_len
buf
.
seek
(
0
)
return
buf
.
read
(
length
)
finally
:
buf
.
close
()
return
self
.
_fp
.
read
(
bytes_left
)[:
-
2
]
# no length, read a full line
return
self
.
_fp
.
readline
()[:
-
2
]
except
(
socket
.
error
,
socket
.
timeout
),
e
:
raise
ConnectionError
(
"Error while reading from socket: %s"
%
\
(
e
.
args
,))
def
read_response
(
self
):
response
=
self
.
read
()
if
not
response
:
raise
ConnectionError
(
"Socket closed on remote end"
)
byte
,
response
=
response
[
0
],
response
[
1
:]
# server returned an error
if
byte
==
'-'
:
if
response
.
startswith
(
'ERR '
):
response
=
response
[
4
:]
return
ResponseError
(
response
)
if
response
.
startswith
(
'LOADING '
):
# If we're loading the dataset into memory, kill the socket
# so we re-initialize (and re-SELECT) next time.
raise
ConnectionError
(
"Redis is loading data into memory"
)
# single value
elif
byte
==
'+'
:
return
response
# int value
elif
byte
==
':'
:
return
long
(
response
)
# bulk response
elif
byte
==
'$'
:
length
=
int
(
response
)
if
length
==
-
1
:
return
None
response
=
self
.
read
(
length
)
return
response
# multi-bulk response
elif
byte
==
'*'
:
length
=
int
(
response
)
if
length
==
-
1
:
return
None
return
[
self
.
read_response
()
for
i
in
xrange
(
length
)]
raise
InvalidResponse
(
"Protocol Error"
)
class
HiredisParser
(
object
):
"Parser class for connections using Hiredis"
def
__del__
(
self
):
try
:
self
.
on_disconnect
()
except
:
pass
def
on_connect
(
self
,
connection
):
self
.
_sock
=
connection
.
_sock
self
.
_reader
=
hiredis
.
Reader
(
protocolError
=
InvalidResponse
,
replyError
=
ResponseError
)
def
on_disconnect
(
self
):
self
.
_sock
=
None
self
.
_reader
=
None
def
read_response
(
self
):
if
not
self
.
_reader
:
raise
ConnectionError
(
"Socket closed on remote end"
)
response
=
self
.
_reader
.
gets
()
while
response
is
False
:
try
:
buffer
=
self
.
_sock
.
recv
(
4096
)
except
(
socket
.
error
,
socket
.
timeout
),
e
:
raise
ConnectionError
(
"Error while reading from socket: %s"
%
\
(
e
.
args
,))
if
not
buffer
:
raise
ConnectionError
(
"Socket closed on remote end"
)
self
.
_reader
.
feed
(
buffer
)
# proactively, but not conclusively, check if more data is in the
# buffer. if the data received doesn't end with \n, there's more.
if
not
buffer
.
endswith
(
'
\
n
'
):
continue
response
=
self
.
_reader
.
gets
()
return
response
try
:
import
hiredis
DefaultParser
=
HiredisParser
except
ImportError
:
DefaultParser
=
PythonParser
class
Connection
(
object
):
"Manages TCP communication to and from a Redis server"
def
__init__
(
self
,
host
=
'localhost'
,
port
=
6379
,
db
=
0
,
password
=
None
,
socket_timeout
=
None
,
encoding
=
'utf-8'
,
encoding_errors
=
'strict'
,
parser_class
=
DefaultParser
):
self
.
host
=
host
self
.
port
=
port
self
.
db
=
db
self
.
password
=
password
self
.
socket_timeout
=
socket_timeout
self
.
encoding
=
encoding
self
.
encoding_errors
=
encoding_errors
self
.
_sock
=
None
self
.
_parser
=
parser_class
()
def
__del__
(
self
):
try
:
self
.
disconnect
()
except
:
pass
def
connect
(
self
):
"Connects to the Redis server if not already connected"
if
self
.
_sock
:
return
try
:
sock
=
self
.
_connect
()
except
socket
.
error
,
e
:
raise
ConnectionError
(
self
.
_error_message
(
e
))
self
.
_sock
=
sock
self
.
on_connect
()
def
_connect
(
self
):
"Create a TCP socket connection"
sock
=
socket
.
socket
(
socket
.
AF_INET6
,
socket
.
SOCK_STREAM
)
sock
.
settimeout
(
self
.
socket_timeout
)
sock
.
connect
((
self
.
host
,
self
.
port
))
return
sock
def
_error_message
(
self
,
exception
):
# args for socket.error can either be (errno, "message")
# or just "message"
if
len
(
exception
.
args
)
==
1
:
return
"Error connecting to %s:%s. %s."
%
\
(
self
.
host
,
self
.
port
,
exception
.
args
[
0
])
else
:
return
"Error %s connecting %s:%s. %s."
%
\
(
exception
.
args
[
0
],
self
.
host
,
self
.
port
,
exception
.
args
[
1
])
def
on_connect
(
self
):
"Initialize the connection, authenticate and select a database"
self
.
_parser
.
on_connect
(
self
)
# if a password is specified, authenticate
if
self
.
password
:
self
.
send_command
(
'AUTH'
,
self
.
password
)
if
self
.
read_response
()
!=
'OK'
:
raise
ConnectionError
(
'Invalid Password'
)
# if a database is specified, switch to it
if
self
.
db
:
self
.
send_command
(
'SELECT'
,
self
.
db
)
if
self
.
read_response
()
!=
'OK'
:
raise
ConnectionError
(
'Invalid Database'
)
def
disconnect
(
self
):
"Disconnects from the Redis server"
self
.
_parser
.
on_disconnect
()
if
self
.
_sock
is
None
:
return
try
:
self
.
_sock
.
close
()
except
socket
.
error
:
pass
self
.
_sock
=
None
def
send_packed_command
(
self
,
command
):
"Send an already packed command to the Redis server"
if
not
self
.
_sock
:
self
.
connect
()
try
:
self
.
_sock
.
sendall
(
command
)
except
socket
.
error
,
e
:
self
.
disconnect
()
if
len
(
e
.
args
)
==
1
:
_errno
,
errmsg
=
'UNKNOWN'
,
e
.
args
[
0
]
else
:
_errno
,
errmsg
=
e
.
args
raise
ConnectionError
(
"Error %s while writing to socket. %s."
%
\
(
_errno
,
errmsg
))
except
:
self
.
disconnect
()
raise
def
send_command
(
self
,
*
args
):
"Pack and send a command to the Redis server"
self
.
send_packed_command
(
self
.
pack_command
(
*
args
))
def
read_response
(
self
):
"Read the response from a previously sent command"
try
:
response
=
self
.
_parser
.
read_response
()
except
:
self
.
disconnect
()
raise
if
response
.
__class__
==
ResponseError
:
raise
response
return
response
def
encode
(
self
,
value
):
"Return a bytestring representation of the value"
if
isinstance
(
value
,
unicode
):
return
value
.
encode
(
self
.
encoding
,
self
.
encoding_errors
)
return
str
(
value
)
def
pack_command
(
self
,
*
args
):
"Pack a series of arguments into a value Redis command"
command
=
[
'$%s
\
r
\
n
%s
\
r
\
n
'
%
(
len
(
enc_value
),
enc_value
)
for
enc_value
in
imap
(
self
.
encode
,
args
)]
return
'*%s
\
r
\
n
%s'
%
(
len
(
command
),
''
.
join
(
command
))
class
UnixDomainSocketConnection
(
Connection
):
def
__init__
(
self
,
path
=
''
,
db
=
0
,
password
=
None
,
socket_timeout
=
None
,
encoding
=
'utf-8'
,
encoding_errors
=
'strict'
,
parser_class
=
DefaultParser
):
self
.
path
=
path
self
.
db
=
db
self
.
password
=
password
self
.
socket_timeout
=
socket_timeout
self
.
encoding
=
encoding
self
.
encoding_errors
=
encoding_errors
self
.
_sock
=
None
self
.
_parser
=
parser_class
()
def
_connect
(
self
):
"Create a Unix domain socket connection"
sock
=
socket
.
socket
(
socket
.
AF_UNIX
,
socket
.
SOCK_STREAM
)
sock
.
settimeout
(
self
.
socket_timeout
)
sock
.
connect
(
self
.
path
)
return
sock
def
_error_message
(
self
,
exception
):
# args for socket.error can either be (errno, "message")
# or just "message"
if
len
(
exception
.
args
)
==
1
:
return
"Error connecting to unix socket: %s. %s."
%
\
(
self
.
path
,
exception
.
args
[
0
])
else
:
return
"Error %s connecting to unix socket: %s. %s."
%
\
(
exception
.
args
[
0
],
self
.
path
,
exception
.
args
[
1
])
# TODO: add ability to block waiting on a connection to be released
class
ConnectionPool
(
object
):
"Generic connection pool"
def
__init__
(
self
,
connection_class
=
Connection
,
max_connections
=
None
,
**
connection_kwargs
):
self
.
connection_class
=
connection_class
self
.
connection_kwargs
=
connection_kwargs
self
.
max_connections
=
max_connections
or
2
**
31
self
.
_created_connections
=
0
self
.
_available_connections
=
[]
self
.
_in_use_connections
=
set
()
def
get_connection
(
self
,
command_name
,
*
keys
,
**
options
):
"Get a connection from the pool"
try
:
connection
=
self
.
_available_connections
.
pop
()
except
IndexError
:
connection
=
self
.
make_connection
()
self
.
_in_use_connections
.
add
(
connection
)
return
connection
def
make_connection
(
self
):
"Create a new connection"
if
self
.
_created_connections
>=
self
.
max_connections
:
raise
ConnectionError
(
"Too many connections"
)
self
.
_created_connections
+=
1
return
self
.
connection_class
(
**
self
.
connection_kwargs
)
def
release
(
self
,
connection
):
"Releases the connection back to the pool"
self
.
_in_use_connections
.
remove
(
connection
)
self
.
_available_connections
.
append
(
connection
)
def
disconnect
(
self
):
"Disconnects all connections in the pool"
all_conns
=
chain
(
self
.
_available_connections
,
self
.
_in_use_connections
)
for
connection
in
all_conns
:
connection
.
disconnect
()
import
datetime
import
time
import
warnings
from
itertools
import
imap
,
izip
,
starmap
#from redis.connection import ConnectionPool, UnixDomainSocketConnection
#from redis.exceptions import (
# ConnectionError,
# DataError,
# RedisError,
# ResponseError,
# WatchError,
#)
def
list_or_args
(
keys
,
args
):
# returns a single list combining keys and args
try
:
i
=
iter
(
keys
)
# a string can be iterated, but indicates
# keys wasn't passed as a list
if
isinstance
(
keys
,
basestring
):
keys
=
[
keys
]
except
TypeError
:
keys
=
[
keys
]
if
args
:
keys
.
extend
(
args
)
return
keys
def
timestamp_to_datetime
(
response
):
"Converts a unix timestamp to a Python datetime object"
if
not
response
:
return
None
try
:
response
=
int
(
response
)
except
ValueError
:
return
None
return
datetime
.
datetime
.
fromtimestamp
(
response
)
def
string_keys_to_dict
(
key_string
,
callback
):
return
dict
.
fromkeys
(
key_string
.
split
(),
callback
)
def
dict_merge
(
*
dicts
):
merged
=
{}
[
merged
.
update
(
d
)
for
d
in
dicts
]
return
merged
def
parse_debug_object
(
response
):
"Parse the results of Redis's DEBUG OBJECT command into a Python dict"
# The 'type' of the object is the first item in the response, but isn't
# prefixed with a name
response
=
'type:'
+
response
response
=
dict
([
kv
.
split
(
':'
)
for
kv
in
response
.
split
()])
# parse some expected int values from the string response
# note: this cmd isn't spec'd so these may not appear in all redis versions
int_fields
=
(
'refcount'
,
'serializedlength'
,
'lru'
,
'lru_seconds_idle'
)
for
field
in
int_fields
:
if
field
in
response
:
response
[
field
]
=
int
(
response
[
field
])
return
response
def
parse_object
(
response
,
infotype
):
"Parse the results of an OBJECT command"
if
infotype
in
(
'idletime'
,
'refcount'
):
return
int
(
response
)
return
response
def
parse_info
(
response
):
"Parse the result of Redis's INFO command into a Python dict"
info
=
{}
def
get_value
(
value
):
if
','
not
in
value
:
return
value
sub_dict
=
{}
for
item
in
value
.
split
(
','
):
k
,
v
=
item
.
rsplit
(
'='
,
1
)
try
:
sub_dict
[
k
]
=
int
(
v
)
except
ValueError
:
sub_dict
[
k
]
=
v
return
sub_dict
for
line
in
response
.
splitlines
():
if
line
and
not
line
.
startswith
(
'#'
):
key
,
value
=
line
.
split
(
':'
)
try
:
if
'.'
in
value
:
info
[
key
]
=
float
(
value
)
else
:
info
[
key
]
=
int
(
value
)
except
ValueError
:
info
[
key
]
=
get_value
(
value
)
return
info
def
pairs_to_dict
(
response
):
"Create a dict given a list of key/value pairs"
it
=
iter
(
response
)
return
dict
(
izip
(
it
,
it
))
def
zset_score_pairs
(
response
,
**
options
):
"""
If ``withscores`` is specified in the options, return the response as
a list of (value, score) pairs
"""
if
not
response
or
not
options
[
'withscores'
]:
return
response
score_cast_func
=
options
.
get
(
'score_cast_func'
,
float
)
it
=
iter
(
response
)
return
zip
(
it
,
imap
(
score_cast_func
,
it
))
def
int_or_none
(
response
):
if
response
is
None
:
return
None
return
int
(
response
)
def
float_or_none
(
response
):
if
response
is
None
:
return
None
return
float
(
response
)
def
parse_config
(
response
,
**
options
):
# this is stupid, but don't have a better option right now
if
options
[
'parse'
]
==
'GET'
:
return
response
and
pairs_to_dict
(
response
)
or
{}
return
response
==
'OK'
class
StrictRedis
(
object
):
"""
Implementation of the Redis protocol.
This abstract class provides a Python interface to all Redis commands
and an implementation of the Redis protocol.
Connection and Pipeline derive from this, implementing how
the commands are sent and received to the Redis server
"""
RESPONSE_CALLBACKS
=
dict_merge
(
string_keys_to_dict
(
'AUTH DEL EXISTS EXPIRE EXPIREAT HDEL HEXISTS HMSET MOVE MSETNX '
'PERSIST RENAMENX SISMEMBER SMOVE SETEX SETNX SREM ZREM'
,
bool
),
string_keys_to_dict
(
'DECRBY GETBIT HLEN INCRBY LINSERT LLEN LPUSHX RPUSHX SADD SCARD '
'SDIFFSTORE SETBIT SETRANGE SINTERSTORE STRLEN SUNIONSTORE ZADD '
'ZCARD ZREMRANGEBYRANK ZREMRANGEBYSCORE'
,
int
),
string_keys_to_dict
(
# these return OK, or int if redis-server is >=1.3.4
'LPUSH RPUSH'
,
lambda
r
:
isinstance
(
r
,
long
)
and
r
or
r
==
'OK'
),
string_keys_to_dict
(
'ZSCORE ZINCRBY'
,
float_or_none
),
string_keys_to_dict
(
'FLUSHALL FLUSHDB LSET LTRIM MSET RENAME '
'SAVE SELECT SET SHUTDOWN SLAVEOF WATCH UNWATCH'
,
lambda
r
:
r
==
'OK'
),
string_keys_to_dict
(
'BLPOP BRPOP'
,
lambda
r
:
r
and
tuple
(
r
)
or
None
),
string_keys_to_dict
(
'SDIFF SINTER SMEMBERS SUNION'
,
lambda
r
:
r
and
set
(
r
)
or
set
()
),
string_keys_to_dict
(
'ZRANGE ZRANGEBYSCORE ZREVRANGE ZREVRANGEBYSCORE'
,
zset_score_pairs
),
string_keys_to_dict
(
'ZRANK ZREVRANK'
,
int_or_none
),
{
'BGREWRITEAOF'
:
lambda
r
:
\
r
==
'Background rewriting of AOF file started'
,
'BGSAVE'
:
lambda
r
:
r
==
'Background saving started'
,
'BRPOPLPUSH'
:
lambda
r
:
r
and
r
or
None
,
'CONFIG'
:
parse_config
,
'DEBUG'
:
parse_debug_object
,
'HGETALL'
:
lambda
r
:
r
and
pairs_to_dict
(
r
)
or
{},
'INFO'
:
parse_info
,
'LASTSAVE'
:
timestamp_to_datetime
,
'OBJECT'
:
parse_object
,
'PING'
:
lambda
r
:
r
==
'PONG'
,
'RANDOMKEY'
:
lambda
r
:
r
and
r
or
None
,
}
)
def
__init__
(
self
,
host
=
'localhost'
,
port
=
6379
,
db
=
0
,
password
=
None
,
socket_timeout
=
None
,
connection_pool
=
None
,
charset
=
'utf-8'
,
errors
=
'strict'
,
unix_socket_path
=
None
):
if
not
connection_pool
:
kwargs
=
{
'db'
:
db
,
'password'
:
password
,
'socket_timeout'
:
socket_timeout
,
'encoding'
:
charset
,
'encoding_errors'
:
errors
}
# based on input, setup appropriate connection args
if
unix_socket_path
:
kwargs
.
update
({
'path'
:
unix_socket_path
,
'connection_class'
:
UnixDomainSocketConnection
})
else
:
kwargs
.
update
({
'host'
:
host
,
'port'
:
port
})
connection_pool
=
ConnectionPool
(
**
kwargs
)
self
.
connection_pool
=
connection_pool
self
.
response_callbacks
=
self
.
__class__
.
RESPONSE_CALLBACKS
.
copy
()
def
set_response_callback
(
self
,
command
,
callback
):
"Set a custom Response Callback"
self
.
response_callbacks
[
command
]
=
callback
def
pipeline
(
self
,
transaction
=
True
,
shard_hint
=
None
):
"""
Return a new pipeline object that can queue multiple commands for
later execution. ``transaction`` indicates whether all commands
should be executed atomically. Apart from making a group of operations
atomic, pipelines are useful for reducing the back-and-forth overhead
between the client and server.
"""
return
StrictPipeline
(
self
.
connection_pool
,
self
.
response_callbacks
,
transaction
,
shard_hint
)
def
transaction
(
self
,
func
,
*
watches
,
**
kwargs
):
"""
Convenience method for executing the callable `func` as a transaction
while watching all keys specified in `watches`. The 'func' callable
should expect a single arguement which is a Pipeline object.
"""
shard_hint
=
kwargs
.
pop
(
'shard_hint'
,
None
)
with
self
.
pipeline
(
True
,
shard_hint
)
as
pipe
:
while
1
:
try
:
pipe
.
watch
(
*
watches
)
func
(
pipe
)
return
pipe
.
execute
()
except
WatchError
:
continue
def
lock
(
self
,
name
,
timeout
=
None
,
sleep
=
0.1
):
"""
Return a new Lock object using key ``name`` that mimics
the behavior of threading.Lock.
If specified, ``timeout`` indicates a maximum life for the lock.
By default, it will remain locked until release() is called.
``sleep`` indicates the amount of time to sleep per loop iteration
when the lock is in blocking mode and another client is currently
holding the lock.
"""
return
Lock
(
self
,
name
,
timeout
=
timeout
,
sleep
=
sleep
)
def
pubsub
(
self
,
shard_hint
=
None
):
"""
Return a Publish/Subscribe object. With this object, you can
subscribe to channels and listen for messages that get published to
them.
"""
return
PubSub
(
self
.
connection_pool
,
shard_hint
)
#### COMMAND EXECUTION AND PROTOCOL PARSING ####
def
execute_command
(
self
,
*
args
,
**
options
):
"Execute a command and return a parsed response"
pool
=
self
.
connection_pool
command_name
=
args
[
0
]
connection
=
pool
.
get_connection
(
command_name
,
**
options
)
try
:
connection
.
send_command
(
*
args
)
return
self
.
parse_response
(
connection
,
command_name
,
**
options
)
except
ConnectionError
:
connection
.
disconnect
()
connection
.
send_command
(
*
args
)
return
self
.
parse_response
(
connection
,
command_name
,
**
options
)
finally
:
pool
.
release
(
connection
)
def
parse_response
(
self
,
connection
,
command_name
,
**
options
):
"Parses a response from the Redis server"
response
=
connection
.
read_response
()
if
command_name
in
self
.
response_callbacks
:
return
self
.
response_callbacks
[
command_name
](
response
,
**
options
)
return
response
#### SERVER INFORMATION ####
def
bgrewriteaof
(
self
):
"Tell the Redis server to rewrite the AOF file from data in memory."
return
self
.
execute_command
(
'BGREWRITEAOF'
)
def
bgsave
(
self
):
"""
Tell the Redis server to save its data to disk. Unlike save(),
this method is asynchronous and returns immediately.
"""
return
self
.
execute_command
(
'BGSAVE'
)
def
config_get
(
self
,
pattern
=
"*"
):
"Return a dictionary of configuration based on the ``pattern``"
return
self
.
execute_command
(
'CONFIG'
,
'GET'
,
pattern
,
parse
=
'GET'
)
def
config_set
(
self
,
name
,
value
):
"Set config item ``name`` with ``value``"
return
self
.
execute_command
(
'CONFIG'
,
'SET'
,
name
,
value
,
parse
=
'SET'
)
def
dbsize
(
self
):
"Returns the number of keys in the current database"
return
self
.
execute_command
(
'DBSIZE'
)
def
debug_object
(
self
,
key
):
"Returns version specific metainformation about a give key"
return
self
.
execute_command
(
'DEBUG'
,
'OBJECT'
,
key
)
def
delete
(
self
,
*
names
):
"Delete one or more keys specified by ``names``"
return
self
.
execute_command
(
'DEL'
,
*
names
)
__delitem__
=
delete
def
echo
(
self
,
value
):
"Echo the string back from the server"
return
self
.
execute_command
(
'ECHO'
,
value
)
def
flushall
(
self
):
"Delete all keys in all databases on the current host"
return
self
.
execute_command
(
'FLUSHALL'
)
def
flushdb
(
self
):
"Delete all keys in the current database"
return
self
.
execute_command
(
'FLUSHDB'
)
def
info
(
self
):
"Returns a dictionary containing information about the Redis server"
return
self
.
execute_command
(
'INFO'
)
def
lastsave
(
self
):
"""
Return a Python datetime object representing the last time the
Redis database was saved to disk
"""
return
self
.
execute_command
(
'LASTSAVE'
)
def
object
(
self
,
infotype
,
key
):
"Return the encoding, idletime, or refcount about the key"
return
self
.
execute_command
(
'OBJECT'
,
infotype
,
key
,
infotype
=
infotype
)
def
ping
(
self
):
"Ping the Redis server"
return
self
.
execute_command
(
'PING'
)
def
save
(
self
):
"""
Tell the Redis server to save its data to disk,
blocking until the save is complete
"""
return
self
.
execute_command
(
'SAVE'
)
def
shutdown
(
self
):
"Shutdown the server"
try
:
self
.
execute_command
(
'SHUTDOWN'
)
except
ConnectionError
:
# a ConnectionError here is expected
return
raise
RedisError
(
"SHUTDOWN seems to have failed."
)
def
slaveof
(
self
,
host
=
None
,
port
=
None
):
"""
Set the server to be a replicated slave of the instance identified
by the ``host`` and ``port``. If called without arguements, the
instance is promoted to a master instead.
"""
if
host
is
None
and
port
is
None
:
return
self
.
execute_command
(
"SLAVEOF"
,
"NO"
,
"ONE"
)
return
self
.
execute_command
(
"SLAVEOF"
,
host
,
port
)
#### BASIC KEY COMMANDS ####
def
append
(
self
,
key
,
value
):
"""
Appends the string ``value`` to the value at ``key``. If ``key``
doesn't already exist, create it with a value of ``value``.
Returns the new length of the value at ``key``.
"""
return
self
.
execute_command
(
'APPEND'
,
key
,
value
)
def
decr
(
self
,
name
,
amount
=
1
):
"""
Decrements the value of ``key`` by ``amount``. If no key exists,
the value will be initialized as 0 - ``amount``
"""
return
self
.
execute_command
(
'DECRBY'
,
name
,
amount
)
def
exists
(
self
,
name
):
"Returns a boolean indicating whether key ``name`` exists"
return
self
.
execute_command
(
'EXISTS'
,
name
)
__contains__
=
exists
def
expire
(
self
,
name
,
time
):
"Set an expire flag on key ``name`` for ``time`` seconds"
return
self
.
execute_command
(
'EXPIRE'
,
name
,
time
)
def
expireat
(
self
,
name
,
when
):
"""
Set an expire flag on key ``name``. ``when`` can be represented
as an integer indicating unix time or a Python datetime object.
"""
if
isinstance
(
when
,
datetime
.
datetime
):
when
=
int
(
time
.
mktime
(
when
.
timetuple
()))
return
self
.
execute_command
(
'EXPIREAT'
,
name
,
when
)
def
get
(
self
,
name
):
"""
Return the value at key ``name``, or None if the key doesn't exist
"""
return
self
.
execute_command
(
'GET'
,
name
)
def
__getitem__
(
self
,
name
):
"""
Return the value at key ``name``, raises a KeyError if the key
doesn't exist.
"""
value
=
self
.
get
(
name
)
if
value
:
return
value
raise
KeyError
(
name
)
def
getbit
(
self
,
name
,
offset
):
"Returns a boolean indicating the value of ``offset`` in ``name``"
return
self
.
execute_command
(
'GETBIT'
,
name
,
offset
)
def
getset
(
self
,
name
,
value
):
"""
Set the value at key ``name`` to ``value`` if key doesn't exist
Return the value at key ``name`` atomically
"""
return
self
.
execute_command
(
'GETSET'
,
name
,
value
)
def
incr
(
self
,
name
,
amount
=
1
):
"""
Increments the value of ``key`` by ``amount``. If no key exists,
the value will be initialized as ``amount``
"""
return
self
.
execute_command
(
'INCRBY'
,
name
,
amount
)
def
keys
(
self
,
pattern
=
'*'
):
"Returns a list of keys matching ``pattern``"
return
self
.
execute_command
(
'KEYS'
,
pattern
)
def
mget
(
self
,
keys
,
*
args
):
"""
Returns a list of values ordered identically to ``keys``
"""
keys
=
list_or_args
(
keys
,
args
)
return
self
.
execute_command
(
'MGET'
,
*
keys
)
def
mset
(
self
,
mapping
):
"Sets each key in the ``mapping`` dict to its corresponding value"
items
=
[]
for
pair
in
mapping
.
iteritems
():
items
.
extend
(
pair
)
return
self
.
execute_command
(
'MSET'
,
*
items
)
def
msetnx
(
self
,
mapping
):
"""
Sets each key in the ``mapping`` dict to its corresponding value if
none of the keys are already set
"""
items
=
[]
for
pair
in
mapping
.
iteritems
():
items
.
extend
(
pair
)
return
self
.
execute_command
(
'MSETNX'
,
*
items
)
def
move
(
self
,
name
,
db
):
"Moves the key ``name`` to a different Redis database ``db``"
return
self
.
execute_command
(
'MOVE'
,
name
,
db
)
def
persist
(
self
,
name
):
"Removes an expiration on ``name``"
return
self
.
execute_command
(
'PERSIST'
,
name
)
def
randomkey
(
self
):
"Returns the name of a random key"
return
self
.
execute_command
(
'RANDOMKEY'
)
def
rename
(
self
,
src
,
dst
):
"""
Rename key ``src`` to ``dst``
"""
return
self
.
execute_command
(
'RENAME'
,
src
,
dst
)
def
renamenx
(
self
,
src
,
dst
):
"Rename key ``src`` to ``dst`` if ``dst`` doesn't already exist"
return
self
.
execute_command
(
'RENAMENX'
,
src
,
dst
)
def
set
(
self
,
name
,
value
):
"Set the value at key ``name`` to ``value``"
return
self
.
execute_command
(
'SET'
,
name
,
value
)
__setitem__
=
set
def
setbit
(
self
,
name
,
offset
,
value
):
"""
Flag the ``offset`` in ``name`` as ``value``. Returns a boolean
indicating the previous value of ``offset``.
"""
value
=
value
and
1
or
0
return
self
.
execute_command
(
'SETBIT'
,
name
,
offset
,
value
)
def
setex
(
self
,
name
,
time
,
value
):
"""
Set the value of key ``name`` to ``value``
that expires in ``time`` seconds
"""
return
self
.
execute_command
(
'SETEX'
,
name
,
time
,
value
)
def
setnx
(
self
,
name
,
value
):
"Set the value of key ``name`` to ``value`` if key doesn't exist"
return
self
.
execute_command
(
'SETNX'
,
name
,
value
)
def
setrange
(
self
,
name
,
offset
,
value
):
"""
Overwrite bytes in the value of ``name`` starting at ``offset`` with
``value``. If ``offset`` plus the length of ``value`` exceeds the
length of the original value, the new value will be larger than before.
If ``offset`` exceeds the length of the original value, null bytes
will be used to pad between the end of the previous value and the start
of what's being injected.
Returns the length of the new string.
"""
return
self
.
execute_command
(
'SETRANGE'
,
name
,
offset
,
value
)
def
strlen
(
self
,
name
):
"Return the number of bytes stored in the value of ``name``"
return
self
.
execute_command
(
'STRLEN'
,
name
)
def
substr
(
self
,
name
,
start
,
end
=-
1
):
"""
Return a substring of the string at key ``name``. ``start`` and ``end``
are 0-based integers specifying the portion of the string to return.
"""
return
self
.
execute_command
(
'SUBSTR'
,
name
,
start
,
end
)
def
ttl
(
self
,
name
):
"Returns the number of seconds until the key ``name`` will expire"
return
self
.
execute_command
(
'TTL'
,
name
)
def
type
(
self
,
name
):
"Returns the type of key ``name``"
return
self
.
execute_command
(
'TYPE'
,
name
)
def
watch
(
self
,
*
names
):
"""
Watches the values at keys ``names``, or None if the key doesn't exist
"""
warnings
.
warn
(
DeprecationWarning
(
'Call WATCH from a Pipeline object'
))
def
unwatch
(
self
):
"""
Unwatches the value at key ``name``, or None of the key doesn't exist
"""
warnings
.
warn
(
DeprecationWarning
(
'Call UNWATCH from a Pipeline object'
))
#### LIST COMMANDS ####
def
blpop
(
self
,
keys
,
timeout
=
0
):
"""
LPOP a value off of the first non-empty list
named in the ``keys`` list.
If none of the lists in ``keys`` has a value to LPOP, then block
for ``timeout`` seconds, or until a value gets pushed on to one
of the lists.
If timeout is 0, then block indefinitely.
"""
if
timeout
is
None
:
timeout
=
0
if
isinstance
(
keys
,
basestring
):
keys
=
[
keys
]
else
:
keys
=
list
(
keys
)
keys
.
append
(
timeout
)
return
self
.
execute_command
(
'BLPOP'
,
*
keys
)
def
brpop
(
self
,
keys
,
timeout
=
0
):
"""
RPOP a value off of the first non-empty list
named in the ``keys`` list.
If none of the lists in ``keys`` has a value to LPOP, then block
for ``timeout`` seconds, or until a value gets pushed on to one
of the lists.
If timeout is 0, then block indefinitely.
"""
if
timeout
is
None
:
timeout
=
0
if
isinstance
(
keys
,
basestring
):
keys
=
[
keys
]
else
:
keys
=
list
(
keys
)
keys
.
append
(
timeout
)
return
self
.
execute_command
(
'BRPOP'
,
*
keys
)
def
brpoplpush
(
self
,
src
,
dst
,
timeout
=
0
):
"""
Pop a value off the tail of ``src``, push it on the head of ``dst``
and then return it.
This command blocks until a value is in ``src`` or until ``timeout``
seconds elapse, whichever is first. A ``timeout`` value of 0 blocks
forever.
"""
if
timeout
is
None
:
timeout
=
0
return
self
.
execute_command
(
'BRPOPLPUSH'
,
src
,
dst
,
timeout
)
def
lindex
(
self
,
name
,
index
):
"""
Return the item from list ``name`` at position ``index``
Negative indexes are supported and will return an item at the
end of the list
"""
return
self
.
execute_command
(
'LINDEX'
,
name
,
index
)
def
linsert
(
self
,
name
,
where
,
refvalue
,
value
):
"""
Insert ``value`` in list ``name`` either immediately before or after
[``where``] ``refvalue``
Returns the new length of the list on success or -1 if ``refvalue``
is not in the list.
"""
return
self
.
execute_command
(
'LINSERT'
,
name
,
where
,
refvalue
,
value
)
def
llen
(
self
,
name
):
"Return the length of the list ``name``"
return
self
.
execute_command
(
'LLEN'
,
name
)
def
lpop
(
self
,
name
):
"Remove and return the first item of the list ``name``"
return
self
.
execute_command
(
'LPOP'
,
name
)
def
lpush
(
self
,
name
,
*
values
):
"Push ``values`` onto the head of the list ``name``"
return
self
.
execute_command
(
'LPUSH'
,
name
,
*
values
)
def
lpushx
(
self
,
name
,
value
):
"Push ``value`` onto the head of the list ``name`` if ``name`` exists"
return
self
.
execute_command
(
'LPUSHX'
,
name
,
value
)
def
lrange
(
self
,
name
,
start
,
end
):
"""
Return a slice of the list ``name`` between
position ``start`` and ``end``
``start`` and ``end`` can be negative numbers just like
Python slicing notation
"""
return
self
.
execute_command
(
'LRANGE'
,
name
,
start
,
end
)
def
lrem
(
self
,
name
,
count
,
value
):
"""
Remove the first ``count`` occurrences of elements equal to ``value``
from the list stored at ``name``.
The count argument influences the operation in the following ways:
count > 0: Remove elements equal to value moving from head to tail.
count < 0: Remove elements equal to value moving from tail to head.
count = 0: Remove all elements equal to value.
"""
return
self
.
execute_command
(
'LREM'
,
name
,
count
,
value
)
def
lset
(
self
,
name
,
index
,
value
):
"Set ``position`` of list ``name`` to ``value``"
return
self
.
execute_command
(
'LSET'
,
name
,
index
,
value
)
def
ltrim
(
self
,
name
,
start
,
end
):
"""
Trim the list ``name``, removing all values not within the slice
between ``start`` and ``end``
``start`` and ``end`` can be negative numbers just like
Python slicing notation
"""
return
self
.
execute_command
(
'LTRIM'
,
name
,
start
,
end
)
def
rpop
(
self
,
name
):
"Remove and return the last item of the list ``name``"
return
self
.
execute_command
(
'RPOP'
,
name
)
def
rpoplpush
(
self
,
src
,
dst
):
"""
RPOP a value off of the ``src`` list and atomically LPUSH it
on to the ``dst`` list. Returns the value.
"""
return
self
.
execute_command
(
'RPOPLPUSH'
,
src
,
dst
)
def
rpush
(
self
,
name
,
*
values
):
"Push ``values`` onto the tail of the list ``name``"
return
self
.
execute_command
(
'RPUSH'
,
name
,
*
values
)
def
rpushx
(
self
,
name
,
value
):
"Push ``value`` onto the tail of the list ``name`` if ``name`` exists"
return
self
.
execute_command
(
'RPUSHX'
,
name
,
value
)
def
sort
(
self
,
name
,
start
=
None
,
num
=
None
,
by
=
None
,
get
=
None
,
desc
=
False
,
alpha
=
False
,
store
=
None
):
"""
Sort and return the list, set or sorted set at ``name``.
``start`` and ``num`` allow for paging through the sorted data
``by`` allows using an external key to weight and sort the items.
Use an "*" to indicate where in the key the item value is located
``get`` allows for returning items from external keys rather than the
sorted data itself. Use an "*" to indicate where int he key
the item value is located
``desc`` allows for reversing the sort
``alpha`` allows for sorting lexicographically rather than numerically
``store`` allows for storing the result of the sort into
the key ``store``
"""
if
(
start
is
not
None
and
num
is
None
)
or
\
(
num
is
not
None
and
start
is
None
):
raise
RedisError
(
"``start`` and ``num`` must both be specified"
)
pieces
=
[
name
]
if
by
is
not
None
:
pieces
.
append
(
'BY'
)
pieces
.
append
(
by
)
if
start
is
not
None
and
num
is
not
None
:
pieces
.
append
(
'LIMIT'
)
pieces
.
append
(
start
)
pieces
.
append
(
num
)
if
get
is
not
None
:
# If get is a string assume we want to get a single value.
# Otherwise assume it's an interable and we want to get multiple
# values. We can't just iterate blindly because strings are
# iterable.
if
isinstance
(
get
,
basestring
):
pieces
.
append
(
'GET'
)
pieces
.
append
(
get
)
else
:
for
g
in
get
:
pieces
.
append
(
'GET'
)
pieces
.
append
(
g
)
if
desc
:
pieces
.
append
(
'DESC'
)
if
alpha
:
pieces
.
append
(
'ALPHA'
)
if
store
is
not
None
:
pieces
.
append
(
'STORE'
)
pieces
.
append
(
store
)
return
self
.
execute_command
(
'SORT'
,
*
pieces
)
#### SET COMMANDS ####
def
sadd
(
self
,
name
,
*
values
):
"Add ``value(s)`` to set ``name``"
return
self
.
execute_command
(
'SADD'
,
name
,
*
values
)
def
scard
(
self
,
name
):
"Return the number of elements in set ``name``"
return
self
.
execute_command
(
'SCARD'
,
name
)
def
sdiff
(
self
,
keys
,
*
args
):
"Return the difference of sets specified by ``keys``"
keys
=
list_or_args
(
keys
,
args
)
return
self
.
execute_command
(
'SDIFF'
,
*
keys
)
def
sdiffstore
(
self
,
dest
,
keys
,
*
args
):
"""
Store the difference of sets specified by ``keys`` into a new
set named ``dest``. Returns the number of keys in the new set.
"""
keys
=
list_or_args
(
keys
,
args
)
return
self
.
execute_command
(
'SDIFFSTORE'
,
dest
,
*
keys
)
def
sinter
(
self
,
keys
,
*
args
):
"Return the intersection of sets specified by ``keys``"
keys
=
list_or_args
(
keys
,
args
)
return
self
.
execute_command
(
'SINTER'
,
*
keys
)
def
sinterstore
(
self
,
dest
,
keys
,
*
args
):
"""
Store the intersection of sets specified by ``keys`` into a new
set named ``dest``. Returns the number of keys in the new set.
"""
keys
=
list_or_args
(
keys
,
args
)
return
self
.
execute_command
(
'SINTERSTORE'
,
dest
,
*
keys
)
def
sismember
(
self
,
name
,
value
):
"Return a boolean indicating if ``value`` is a member of set ``name``"
return
self
.
execute_command
(
'SISMEMBER'
,
name
,
value
)
def
smembers
(
self
,
name
):
"Return all members of the set ``name``"
return
self
.
execute_command
(
'SMEMBERS'
,
name
)
def
smove
(
self
,
src
,
dst
,
value
):
"Move ``value`` from set ``src`` to set ``dst`` atomically"
return
self
.
execute_command
(
'SMOVE'
,
src
,
dst
,
value
)
def
spop
(
self
,
name
):
"Remove and return a random member of set ``name``"
return
self
.
execute_command
(
'SPOP'
,
name
)
def
srandmember
(
self
,
name
):
"Return a random member of set ``name``"
return
self
.
execute_command
(
'SRANDMEMBER'
,
name
)
def
srem
(
self
,
name
,
*
values
):
"Remove ``values`` from set ``name``"
return
self
.
execute_command
(
'SREM'
,
name
,
*
values
)
def
sunion
(
self
,
keys
,
*
args
):
"Return the union of sets specifiued by ``keys``"
keys
=
list_or_args
(
keys
,
args
)
return
self
.
execute_command
(
'SUNION'
,
*
keys
)
def
sunionstore
(
self
,
dest
,
keys
,
*
args
):
"""
Store the union of sets specified by ``keys`` into a new
set named ``dest``. Returns the number of keys in the new set.
"""
keys
=
list_or_args
(
keys
,
args
)
return
self
.
execute_command
(
'SUNIONSTORE'
,
dest
,
*
keys
)
#### SORTED SET COMMANDS ####
def
zadd
(
self
,
name
,
*
args
,
**
kwargs
):
"""
Set any number of score, element-name pairs to the key ``name``. Pairs
can be specified in two ways:
As *args, in the form of: score1, name1, score2, name2, ...
or as **kwargs, in the form of: name1=score1, name2=score2, ...
The following example would add four values to the 'my-key' key:
redis.zadd('my-key', 1.1, 'name1', 2.2, 'name2', name3=3.3, name4=4.4)
"""
pieces
=
[]
if
args
:
if
len
(
args
)
%
2
!=
0
:
raise
RedisError
(
"ZADD requires an equal number of "
"values and scores"
)
pieces
.
extend
(
args
)
for
pair
in
kwargs
.
iteritems
():
pieces
.
append
(
pair
[
1
])
pieces
.
append
(
pair
[
0
])
return
self
.
execute_command
(
'ZADD'
,
name
,
*
pieces
)
def
zcard
(
self
,
name
):
"Return the number of elements in the sorted set ``name``"
return
self
.
execute_command
(
'ZCARD'
,
name
)
def
zcount
(
self
,
name
,
min
,
max
):
return
self
.
execute_command
(
'ZCOUNT'
,
name
,
min
,
max
)
def
zincrby
(
self
,
name
,
value
,
amount
=
1
):
"Increment the score of ``value`` in sorted set ``name`` by ``amount``"
return
self
.
execute_command
(
'ZINCRBY'
,
name
,
amount
,
value
)
def
zinterstore
(
self
,
dest
,
keys
,
aggregate
=
None
):
"""
Intersect multiple sorted sets specified by ``keys`` into
a new sorted set, ``dest``. Scores in the destination will be
aggregated based on the ``aggregate``, or SUM if none is provided.
"""
return
self
.
_zaggregate
(
'ZINTERSTORE'
,
dest
,
keys
,
aggregate
)
def
zrange
(
self
,
name
,
start
,
end
,
desc
=
False
,
withscores
=
False
,
score_cast_func
=
float
):
"""
Return a range of values from sorted set ``name`` between
``start`` and ``end`` sorted in ascending order.
``start`` and ``end`` can be negative, indicating the end of the range.
``desc`` a boolean indicating whether to sort the results descendingly
``withscores`` indicates to return the scores along with the values.
The return type is a list of (value, score) pairs
``score_cast_func`` a callable used to cast the score return value
"""
if
desc
:
return
self
.
zrevrange
(
name
,
start
,
end
,
withscores
)
pieces
=
[
'ZRANGE'
,
name
,
start
,
end
]
if
withscores
:
pieces
.
append
(
'withscores'
)
options
=
{
'withscores'
:
withscores
,
'score_cast_func'
:
score_cast_func
}
return
self
.
execute_command
(
*
pieces
,
**
options
)
def
zrangebyscore
(
self
,
name
,
min
,
max
,
start
=
None
,
num
=
None
,
withscores
=
False
,
score_cast_func
=
float
):
"""
Return a range of values from the sorted set ``name`` with scores
between ``min`` and ``max``.
If ``start`` and ``num`` are specified, then return a slice
of the range.
``withscores`` indicates to return the scores along with the values.
The return type is a list of (value, score) pairs
`score_cast_func`` a callable used to cast the score return value
"""
if
(
start
is
not
None
and
num
is
None
)
or
\
(
num
is
not
None
and
start
is
None
):
raise
RedisError
(
"``start`` and ``num`` must both be specified"
)
pieces
=
[
'ZRANGEBYSCORE'
,
name
,
min
,
max
]
if
start
is
not
None
and
num
is
not
None
:
pieces
.
extend
([
'LIMIT'
,
start
,
num
])
if
withscores
:
pieces
.
append
(
'withscores'
)
options
=
{
'withscores'
:
withscores
,
'score_cast_func'
:
score_cast_func
}
return
self
.
execute_command
(
*
pieces
,
**
options
)
def
zrank
(
self
,
name
,
value
):
"""
Returns a 0-based value indicating the rank of ``value`` in sorted set
``name``
"""
return
self
.
execute_command
(
'ZRANK'
,
name
,
value
)
def
zrem
(
self
,
name
,
*
values
):
"Remove member ``values`` from sorted set ``name``"
return
self
.
execute_command
(
'ZREM'
,
name
,
*
values
)
def
zremrangebyrank
(
self
,
name
,
min
,
max
):
"""
Remove all elements in the sorted set ``name`` with ranks between
``min`` and ``max``. Values are 0-based, ordered from smallest score
to largest. Values can be negative indicating the highest scores.
Returns the number of elements removed
"""
return
self
.
execute_command
(
'ZREMRANGEBYRANK'
,
name
,
min
,
max
)
def
zremrangebyscore
(
self
,
name
,
min
,
max
):
"""
Remove all elements in the sorted set ``name`` with scores
between ``min`` and ``max``. Returns the number of elements removed.
"""
return
self
.
execute_command
(
'ZREMRANGEBYSCORE'
,
name
,
min
,
max
)
def
zrevrange
(
self
,
name
,
start
,
num
,
withscores
=
False
,
score_cast_func
=
float
):
"""
Return a range of values from sorted set ``name`` between
``start`` and ``num`` sorted in descending order.
``start`` and ``num`` can be negative, indicating the end of the range.
``withscores`` indicates to return the scores along with the values
The return type is a list of (value, score) pairs
``score_cast_func`` a callable used to cast the score return value
"""
pieces
=
[
'ZREVRANGE'
,
name
,
start
,
num
]
if
withscores
:
pieces
.
append
(
'withscores'
)
options
=
{
'withscores'
:
withscores
,
'score_cast_func'
:
score_cast_func
}
return
self
.
execute_command
(
*
pieces
,
**
options
)
def
zrevrangebyscore
(
self
,
name
,
max
,
min
,
start
=
None
,
num
=
None
,
withscores
=
False
,
score_cast_func
=
float
):
"""
Return a range of values from the sorted set ``name`` with scores
between ``min`` and ``max`` in descending order.
If ``start`` and ``num`` are specified, then return a slice
of the range.
``withscores`` indicates to return the scores along with the values.
The return type is a list of (value, score) pairs
``score_cast_func`` a callable used to cast the score return value
"""
if
(
start
is
not
None
and
num
is
None
)
or
\
(
num
is
not
None
and
start
is
None
):
raise
RedisError
(
"``start`` and ``num`` must both be specified"
)
pieces
=
[
'ZREVRANGEBYSCORE'
,
name
,
max
,
min
]
if
start
is
not
None
and
num
is
not
None
:
pieces
.
extend
([
'LIMIT'
,
start
,
num
])
if
withscores
:
pieces
.
append
(
'withscores'
)
options
=
{
'withscores'
:
withscores
,
'score_cast_func'
:
score_cast_func
}
return
self
.
execute_command
(
*
pieces
,
**
options
)
def
zrevrank
(
self
,
name
,
value
):
"""
Returns a 0-based value indicating the descending rank of
``value`` in sorted set ``name``
"""
return
self
.
execute_command
(
'ZREVRANK'
,
name
,
value
)
def
zscore
(
self
,
name
,
value
):
"Return the score of element ``value`` in sorted set ``name``"
return
self
.
execute_command
(
'ZSCORE'
,
name
,
value
)
def
zunionstore
(
self
,
dest
,
keys
,
aggregate
=
None
):
"""
Union multiple sorted sets specified by ``keys`` into
a new sorted set, ``dest``. Scores in the destination will be
aggregated based on the ``aggregate``, or SUM if none is provided.
"""
return
self
.
_zaggregate
(
'ZUNIONSTORE'
,
dest
,
keys
,
aggregate
)
def
_zaggregate
(
self
,
command
,
dest
,
keys
,
aggregate
=
None
):
pieces
=
[
command
,
dest
,
len
(
keys
)]
if
isinstance
(
keys
,
dict
):
keys
,
weights
=
keys
.
keys
(),
keys
.
values
()
else
:
weights
=
None
pieces
.
extend
(
keys
)
if
weights
:
pieces
.
append
(
'WEIGHTS'
)
pieces
.
extend
(
weights
)
if
aggregate
:
pieces
.
append
(
'AGGREGATE'
)
pieces
.
append
(
aggregate
)
return
self
.
execute_command
(
*
pieces
)
#### HASH COMMANDS ####
def
hdel
(
self
,
name
,
*
keys
):
"Delete ``keys`` from hash ``name``"
return
self
.
execute_command
(
'HDEL'
,
name
,
*
keys
)
def
hexists
(
self
,
name
,
key
):
"Returns a boolean indicating if ``key`` exists within hash ``name``"
return
self
.
execute_command
(
'HEXISTS'
,
name
,
key
)
def
hget
(
self
,
name
,
key
):
"Return the value of ``key`` within the hash ``name``"
return
self
.
execute_command
(
'HGET'
,
name
,
key
)
def
hgetall
(
self
,
name
):
"Return a Python dict of the hash's name/value pairs"
return
self
.
execute_command
(
'HGETALL'
,
name
)
def
hincrby
(
self
,
name
,
key
,
amount
=
1
):
"Increment the value of ``key`` in hash ``name`` by ``amount``"
return
self
.
execute_command
(
'HINCRBY'
,
name
,
key
,
amount
)
def
hkeys
(
self
,
name
):
"Return the list of keys within hash ``name``"
return
self
.
execute_command
(
'HKEYS'
,
name
)
def
hlen
(
self
,
name
):
"Return the number of elements in hash ``name``"
return
self
.
execute_command
(
'HLEN'
,
name
)
def
hset
(
self
,
name
,
key
,
value
):
"""
Set ``key`` to ``value`` within hash ``name``
Returns 1 if HSET created a new field, otherwise 0
"""
return
self
.
execute_command
(
'HSET'
,
name
,
key
,
value
)
def
hsetnx
(
self
,
name
,
key
,
value
):
"""
Set ``key`` to ``value`` within hash ``name`` if ``key`` does not
exist. Returns 1 if HSETNX created a field, otherwise 0.
"""
return
self
.
execute_command
(
"HSETNX"
,
name
,
key
,
value
)
def
hmset
(
self
,
name
,
mapping
):
"""
Sets each key in the ``mapping`` dict to its corresponding value
in the hash ``name``
"""
if
not
mapping
:
raise
DataError
(
"'hmset' with 'mapping' of length 0"
)
items
=
[]
for
pair
in
mapping
.
iteritems
():
items
.
extend
(
pair
)
return
self
.
execute_command
(
'HMSET'
,
name
,
*
items
)
def
hmget
(
self
,
name
,
keys
):
"Returns a list of values ordered identically to ``keys``"
return
self
.
execute_command
(
'HMGET'
,
name
,
*
keys
)
def
hvals
(
self
,
name
):
"Return the list of values within hash ``name``"
return
self
.
execute_command
(
'HVALS'
,
name
)
def
publish
(
self
,
channel
,
message
):
"""
Publish ``message`` on ``channel``.
Returns the number of subscribers the message was delivered to.
"""
return
self
.
execute_command
(
'PUBLISH'
,
channel
,
message
)
class
Redis
(
StrictRedis
):
"""
Provides backwards compatibility with older versions of redis-py that
changed arguments to some commands to be more Pythonic, sane, or by
accident.
"""
# Overridden callbacks
RESPONSE_CALLBACKS
=
dict_merge
(
StrictRedis
.
RESPONSE_CALLBACKS
,
{
'TTL'
:
lambda
r
:
r
!=
-
1
and
r
or
None
,
}
)
def
pipeline
(
self
,
transaction
=
True
,
shard_hint
=
None
):
"""
Return a new pipeline object that can queue multiple commands for
later execution. ``transaction`` indicates whether all commands
should be executed atomically. Apart from making a group of operations
atomic, pipelines are useful for reducing the back-and-forth overhead
between the client and server.
"""
return
Pipeline
(
self
.
connection_pool
,
self
.
response_callbacks
,
transaction
,
shard_hint
)
def
setex
(
self
,
name
,
value
,
time
):
"""
Set the value of key ``name`` to ``value``
that expires in ``time`` seconds
"""
return
self
.
execute_command
(
'SETEX'
,
name
,
time
,
value
)
def
lrem
(
self
,
name
,
value
,
num
=
0
):
"""
Remove the first ``num`` occurrences of elements equal to ``value``
from the list stored at ``name``.
The ``num`` argument influences the operation in the following ways:
num > 0: Remove elements equal to value moving from head to tail.
num < 0: Remove elements equal to value moving from tail to head.
num = 0: Remove all elements equal to value.
"""
return
self
.
execute_command
(
'LREM'
,
name
,
num
,
value
)
def
zadd
(
self
,
name
,
*
args
,
**
kwargs
):
"""
NOTE: The order of arguments differs from that of the official ZADD
command. For backwards compatability, this method accepts arguments
in the form of name1, score1, name2, score2, while the official Redis
documents expects score1, name1, score2, name2.
If you're looking to use the standard syntax, consider using the
StrictRedis class. See the API Reference section of the docs for more
information.
Set any number of element-name, score pairs to the key ``name``. Pairs
can be specified in two ways:
As *args, in the form of: name1, score1, name2, score2, ...
or as **kwargs, in the form of: name1=score1, name2=score2, ...
The following example would add four values to the 'my-key' key:
redis.zadd('my-key', 'name1', 1.1, 'name2', 2.2, name3=3.3, name4=4.4)
"""
pieces
=
[]
if
args
:
if
len
(
args
)
%
2
!=
0
:
raise
RedisError
(
"ZADD requires an equal number of "
"values and scores"
)
pieces
.
extend
(
reversed
(
args
))
for
pair
in
kwargs
.
iteritems
():
pieces
.
append
(
pair
[
1
])
pieces
.
append
(
pair
[
0
])
return
self
.
execute_command
(
'ZADD'
,
name
,
*
pieces
)
class
PubSub
(
object
):
"""
PubSub provides publish, subscribe and listen support to Redis channels.
After subscribing to one or more channels, the listen() method will block
until a message arrives on one of the subscribed channels. That message
will be returned and it's safe to start listening again.
"""
def
__init__
(
self
,
connection_pool
,
shard_hint
=
None
):
self
.
connection_pool
=
connection_pool
self
.
shard_hint
=
shard_hint
self
.
connection
=
None
self
.
channels
=
set
()
self
.
patterns
=
set
()
self
.
subscription_count
=
0
self
.
subscribe_commands
=
set
(
(
'subscribe'
,
'psubscribe'
,
'unsubscribe'
,
'punsubscribe'
)
)
def
__del__
(
self
):
try
:
# if this object went out of scope prior to shutting down
# subscriptions, close the connection manually before
# returning it to the connection pool
if
self
.
connection
and
(
self
.
channels
or
self
.
patterns
):
self
.
connection
.
disconnect
()
self
.
reset
()
except
:
pass
def
reset
(
self
):
if
self
.
connection
:
self
.
connection_pool
.
release
(
self
.
connection
)
self
.
connection
=
None
def
execute_command
(
self
,
*
args
,
**
kwargs
):
"Execute a publish/subscribe command"
if
self
.
connection
is
None
:
self
.
connection
=
self
.
connection_pool
.
get_connection
(
'pubsub'
,
self
.
shard_hint
)
connection
=
self
.
connection
try
:
connection
.
send_command
(
*
args
)
return
self
.
parse_response
()
except
ConnectionError
:
connection
.
disconnect
()
# Connect manually here. If the Redis server is down, this will
# fail and raise a ConnectionError as desired.
connection
.
connect
()
# resubscribe to all channels and patterns before
# resending the current command
for
channel
in
self
.
channels
:
self
.
subscribe
(
channel
)
for
pattern
in
self
.
patterns
:
self
.
psubscribe
(
pattern
)
connection
.
send_command
(
*
args
)
return
self
.
parse_response
()
def
parse_response
(
self
):
"Parse the response from a publish/subscribe command"
response
=
self
.
connection
.
read_response
()
if
response
[
0
]
in
self
.
subscribe_commands
:
self
.
subscription_count
=
response
[
2
]
# if we've just unsubscribed from the remaining channels,
# release the connection back to the pool
if
not
self
.
subscription_count
:
self
.
reset
()
return
response
def
psubscribe
(
self
,
patterns
):
"Subscribe to all channels matching any pattern in ``patterns``"
if
isinstance
(
patterns
,
basestring
):
patterns
=
[
patterns
]
for
pattern
in
patterns
:
self
.
patterns
.
add
(
pattern
)
return
self
.
execute_command
(
'PSUBSCRIBE'
,
*
patterns
)
def
punsubscribe
(
self
,
patterns
=
[]):
"""
Unsubscribe from any channel matching any pattern in ``patterns``.
If empty, unsubscribe from all channels.
"""
if
isinstance
(
patterns
,
basestring
):
patterns
=
[
patterns
]
for
pattern
in
patterns
:
try
:
self
.
patterns
.
remove
(
pattern
)
except
KeyError
:
pass
return
self
.
execute_command
(
'PUNSUBSCRIBE'
,
*
patterns
)
def
subscribe
(
self
,
channels
):
"Subscribe to ``channels``, waiting for messages to be published"
if
isinstance
(
channels
,
basestring
):
channels
=
[
channels
]
for
channel
in
channels
:
self
.
channels
.
add
(
channel
)
return
self
.
execute_command
(
'SUBSCRIBE'
,
*
channels
)
def
unsubscribe
(
self
,
channels
=
[]):
"""
Unsubscribe from ``channels``. If empty, unsubscribe
from all channels
"""
if
isinstance
(
channels
,
basestring
):
channels
=
[
channels
]
for
channel
in
channels
:
try
:
self
.
channels
.
remove
(
channel
)
except
KeyError
:
pass
return
self
.
execute_command
(
'UNSUBSCRIBE'
,
*
channels
)
def
listen
(
self
):
"Listen for messages on channels this client has been subscribed to"
while
self
.
subscription_count
:
r
=
self
.
parse_response
()
if
r
[
0
]
==
'pmessage'
:
msg
=
{
'type'
:
r
[
0
],
'pattern'
:
r
[
1
],
'channel'
:
r
[
2
],
'data'
:
r
[
3
]
}
else
:
msg
=
{
'type'
:
r
[
0
],
'pattern'
:
None
,
'channel'
:
r
[
1
],
'data'
:
r
[
2
]
}
yield
msg
class
BasePipeline
(
object
):
"""
Pipelines provide a way to transmit multiple commands to the Redis server
in one transmission. This is convenient for batch processing, such as
saving all the values in a list to Redis.
All commands executed within a pipeline are wrapped with MULTI and EXEC
calls. This guarantees all commands executed in the pipeline will be
executed atomically.
Any command raising an exception does *not* halt the execution of
subsequent commands in the pipeline. Instead, the exception is caught
and its instance is placed into the response list returned by execute().
Code iterating over the response list should be able to deal with an
instance of an exception as a potential value. In general, these will be
ResponseError exceptions, such as those raised when issuing a command
on a key of a different datatype.
"""
UNWATCH_COMMANDS
=
set
((
'DISCARD'
,
'EXEC'
,
'UNWATCH'
))
def
__init__
(
self
,
connection_pool
,
response_callbacks
,
transaction
,
shard_hint
):
self
.
connection_pool
=
connection_pool
self
.
connection
=
None
self
.
response_callbacks
=
response_callbacks
self
.
transaction
=
transaction
self
.
shard_hint
=
shard_hint
self
.
watching
=
False
self
.
reset
()
def
__enter__
(
self
):
return
self
def
__exit__
(
self
,
exc_type
,
exc_value
,
traceback
):
self
.
reset
()
def
__del__
(
self
):
try
:
self
.
reset
()
except
:
pass
def
reset
(
self
):
self
.
command_stack
=
[]
# make sure to reset the connection state in the event that we were
# watching something
if
self
.
watching
and
self
.
connection
:
try
:
# call this manually since our unwatch or
# immediate_execute_command methods can call reset()
self
.
connection
.
send_command
(
'UNWATCH'
)
self
.
connection
.
read_response
()
except
ConnectionError
:
# disconnect will also remove any previous WATCHes
self
.
connection
.
disconnect
()
# clean up the other instance attributes
self
.
watching
=
False
self
.
explicit_transaction
=
False
# we can safely return the connection to the pool here since we're
# sure we're no longer WATCHing anything
if
self
.
connection
:
self
.
connection_pool
.
release
(
self
.
connection
)
self
.
connection
=
None
def
multi
(
self
):
"""
Start a transactional block of the pipeline after WATCH commands
are issued. End the transactional block with `execute`.
"""
if
self
.
explicit_transaction
:
raise
RedisError
(
'Cannot issue nested calls to MULTI'
)
if
self
.
command_stack
:
raise
RedisError
(
'Commands without an initial WATCH have already '
'been issued'
)
self
.
explicit_transaction
=
True
def
execute_command
(
self
,
*
args
,
**
kwargs
):
if
(
self
.
watching
or
args
[
0
]
==
'WATCH'
)
and
\
not
self
.
explicit_transaction
:
return
self
.
immediate_execute_command
(
*
args
,
**
kwargs
)
return
self
.
pipeline_execute_command
(
*
args
,
**
kwargs
)
def
immediate_execute_command
(
self
,
*
args
,
**
options
):
"""
Execute a command immediately, but don't auto-retry on a
ConnectionError if we're already WATCHing a variable. Used when
issuing WATCH or subsequent commands retrieving their values but before
MULTI is called.
"""
command_name
=
args
[
0
]
conn
=
self
.
connection
# if this is the first call, we need a connection
if
not
conn
:
conn
=
self
.
connection_pool
.
get_connection
(
command_name
,
self
.
shard_hint
)
self
.
connection
=
conn
try
:
conn
.
send_command
(
*
args
)
return
self
.
parse_response
(
conn
,
command_name
,
**
options
)
except
ConnectionError
:
conn
.
disconnect
()
# if we're not already watching, we can safely retry the command
# assuming it was a connection timeout
if
not
self
.
watching
:
conn
.
send_command
(
*
args
)
return
self
.
parse_response
(
conn
,
command_name
,
**
options
)
self
.
reset
()
raise
def
pipeline_execute_command
(
self
,
*
args
,
**
options
):
"""
Stage a command to be executed when execute() is next called
Returns the current Pipeline object back so commands can be
chained together, such as:
pipe = pipe.set('foo', 'bar').incr('baz').decr('bang')
At some other point, you can then run: pipe.execute(),
which will execute all commands queued in the pipe.
"""
self
.
command_stack
.
append
((
args
,
options
))
return
self
def
_execute_transaction
(
self
,
connection
,
commands
):
all_cmds
=
''
.
join
(
starmap
(
connection
.
pack_command
,
[
args
for
args
,
options
in
commands
]))
connection
.
send_packed_command
(
all_cmds
)
# we don't care about the multi/exec any longer
commands
=
commands
[
1
:
-
1
]
# parse off the response for MULTI and all commands prior to EXEC.
# the only data we care about is the response the EXEC
# which is the last command
for
i
in
range
(
len
(
commands
)
+
1
):
self
.
parse_response
(
connection
,
'_'
)
# parse the EXEC.
response
=
self
.
parse_response
(
connection
,
'_'
)
if
response
is
None
:
raise
WatchError
(
"Watched variable changed."
)
if
len
(
response
)
!=
len
(
commands
):
raise
ResponseError
(
"Wrong number of response items from "
"pipeline execution"
)
# We have to run response callbacks manually
data
=
[]
for
r
,
cmd
in
izip
(
response
,
commands
):
if
not
isinstance
(
r
,
Exception
):
args
,
options
=
cmd
command_name
=
args
[
0
]
if
command_name
in
self
.
response_callbacks
:
r
=
self
.
response_callbacks
[
command_name
](
r
,
**
options
)
data
.
append
(
r
)
return
data
def
_execute_pipeline
(
self
,
connection
,
commands
):
# build up all commands into a single request to increase network perf
all_cmds
=
''
.
join
(
starmap
(
connection
.
pack_command
,
[
args
for
args
,
options
in
commands
]))
connection
.
send_packed_command
(
all_cmds
)
return
[
self
.
parse_response
(
connection
,
args
[
0
],
**
options
)
for
args
,
options
in
commands
]
def
parse_response
(
self
,
connection
,
command_name
,
**
options
):
result
=
StrictRedis
.
parse_response
(
self
,
connection
,
command_name
,
**
options
)
if
command_name
in
self
.
UNWATCH_COMMANDS
:
self
.
watching
=
False
elif
command_name
==
'WATCH'
:
self
.
watching
=
True
return
result
def
execute
(
self
):
"Execute all the commands in the current pipeline"
stack
=
self
.
command_stack
if
self
.
transaction
or
self
.
explicit_transaction
:
stack
=
[((
'MULTI'
,
),
{})]
+
stack
+
[((
'EXEC'
,
),
{})]
execute
=
self
.
_execute_transaction
else
:
execute
=
self
.
_execute_pipeline
conn
=
self
.
connection
if
not
conn
:
conn
=
self
.
connection_pool
.
get_connection
(
'MULTI'
,
self
.
shard_hint
)
# assign to self.connection so reset() releases the connection
# back to the pool after we're done
self
.
connection
=
conn
try
:
return
execute
(
conn
,
stack
)
except
ConnectionError
:
conn
.
disconnect
()
# if we were watching a variable, the watch is no longer valid since
# this connection has died. raise a WatchError, which indicates
# the user should retry his transaction. If this is more than a
# temporary failure, the WATCH that the user next issue will fail,
# propegating the real ConnectionError
if
self
.
watching
:
raise
WatchError
(
"A ConnectionError occured on while watching "
"one or more keys"
)
# otherwise, it's safe to retry since the transaction isn't
# predicated on any state
return
execute
(
conn
,
stack
)
finally
:
self
.
reset
()
def
watch
(
self
,
*
names
):
"""
Watches the values at keys ``names``
"""
if
self
.
explicit_transaction
:
raise
RedisError
(
'Cannot issue a WATCH after a MULTI'
)
return
self
.
execute_command
(
'WATCH'
,
*
names
)
def
unwatch
(
self
):
"""
Unwatches all previously specified keys
"""
return
self
.
watching
and
self
.
execute_command
(
'UNWATCH'
)
or
True
class
StrictPipeline
(
BasePipeline
,
StrictRedis
):
"Pipeline for the StrictRedis class"
pass
class
Pipeline
(
BasePipeline
,
Redis
):
"Pipeline for the Redis class"
pass
class
LockError
(
RedisError
):
"Errors thrown from the Lock"
pass
class
Lock
(
object
):
"""
A shared, distributed Lock. Using Redis for locking allows the Lock
to be shared across processes and/or machines.
It's left to the user to resolve deadlock issues and make sure
multiple clients play nicely together.
"""
LOCK_FOREVER
=
float
(
2
**
31
+
1
)
# 1 past max unix time
def
__init__
(
self
,
redis
,
name
,
timeout
=
None
,
sleep
=
0.1
):
"""
Create a new Lock instnace named ``name`` using the Redis client
supplied by ``redis``.
``timeout`` indicates a maximum life for the lock.
By default, it will remain locked until release() is called.
``sleep`` indicates the amount of time to sleep per loop iteration
when the lock is in blocking mode and another client is currently
holding the lock.
Note: If using ``timeout``, you should make sure all the hosts
that are running clients have their time synchronized with a network time
service like ntp.
"""
self
.
redis
=
redis
self
.
name
=
name
self
.
acquired_until
=
None
self
.
timeout
=
timeout
self
.
sleep
=
sleep
if
self
.
timeout
and
self
.
sleep
>
self
.
timeout
:
raise
LockError
(
"'sleep' must be less than 'timeout'"
)
def
__enter__
(
self
):
return
self
.
acquire
()
def
__exit__
(
self
,
exc_type
,
exc_value
,
traceback
):
self
.
release
()
def
acquire
(
self
,
blocking
=
True
):
"""
Use Redis to hold a shared, distributed lock named ``name``.
Returns True once the lock is acquired.
If ``blocking`` is False, always return immediately. If the lock
was acquired, return True, otherwise return False.
"""
sleep
=
self
.
sleep
timeout
=
self
.
timeout
while
1
:
unixtime
=
int
(
time
.
time
())
if
timeout
:
timeout_at
=
unixtime
+
timeout
else
:
timeout_at
=
Lock
.
LOCK_FOREVER
timeout_at
=
float
(
timeout_at
)
if
self
.
redis
.
setnx
(
self
.
name
,
timeout_at
):
self
.
acquired_until
=
timeout_at
return
True
# We want blocking, but didn't acquire the lock
# check to see if the current lock is expired
existing
=
float
(
self
.
redis
.
get
(
self
.
name
)
or
1
)
if
existing
<
unixtime
:
# the previous lock is expired, attempt to overwrite it
existing
=
float
(
self
.
redis
.
getset
(
self
.
name
,
timeout_at
)
or
1
)
if
existing
<
unixtime
:
# we successfully acquired the lock
self
.
acquired_until
=
timeout_at
return
True
if
not
blocking
:
return
False
time
.
sleep
(
sleep
)
def
release
(
self
):
"Releases the already acquired lock"
if
self
.
acquired_until
is
None
:
raise
ValueError
(
"Cannot release an unlocked lock"
)
existing
=
float
(
self
.
redis
.
get
(
self
.
name
)
or
1
)
# if the lock time is in the future, delete the lock
if
existing
>=
self
.
acquired_until
:
self
.
redis
.
delete
(
self
.
name
)
self
.
acquired_until
=
None
#print "TOTOTOTOTOTO"
slapos/recipe/redis/__init__.py
View file @
747ce243
...
...
@@ -33,22 +33,23 @@ class Recipe(GenericBaseRecipe):
def
install
(
self
):
path_list
=
[]
if
not
self
.
optionIsTrue
(
'use
_
passwd'
,
False
):
if
not
self
.
optionIsTrue
(
'use
-
passwd'
,
False
):
master_passwd
=
"# masterauth <master-password>"
else
:
master_passwd
=
"masterauth %s"
%
self
.
options
[
'passwd'
]
config_file
=
self
.
options
[
'config_file'
].
strip
()
configuration
=
dict
(
pid_file
=
self
.
options
[
'pid_file'
],
port
=
self
.
options
[
'port'
],
ipv6
=
self
.
options
[
'ipv6'
],
server_dir
=
self
.
options
[
'server_dir'
],
log_file
=
self
.
options
[
'log_file'
],
master_passwd
=
master_passwd
config_file
=
self
.
options
[
'config-file'
].
strip
()
configuration
=
dict
(
pid_file
=
self
.
options
[
'pid-file'
],
port
=
self
.
options
[
'port'
],
ipv6
=
self
.
options
[
'ipv6'
],
server_dir
=
self
.
options
[
'server-dir'
],
log_file
=
self
.
options
[
'log-file'
],
master_passwd
=
master_passwd
)
if
self
.
options
.
get
(
'unixsocket'
):
unixsocket
=
"unixsocket %s
\
n
unixsocketperm 700"
%
self
.
options
[
'unixsocket'
]
unixsocket
=
"unixsocket %s
\
n
unixsocketperm 700"
%
self
.
options
[
'unixsocket'
]
else
:
unixsocket
=
""
unixsocket
=
""
configuration
[
'unixsocket'
]
=
unixsocket
config
=
self
.
createFile
(
config_file
,
...
...
@@ -58,28 +59,31 @@ class Recipe(GenericBaseRecipe):
redis
=
self
.
createWrapper
(
self
.
options
[
'wrapper'
],
(
self
.
options
[
'server
_
bin'
],
config_file
),
(
self
.
options
[
'server
-
bin'
],
config_file
),
)
path_list
.
append
(
redis
)
promise_script
=
self
.
options
.
get
(
'promise
_
wrapper'
,
''
).
strip
()
promise_script
=
self
.
options
.
get
(
'promise
-
wrapper'
,
''
).
strip
()
if
promise_script
:
promise
=
self
.
createPythonScript
(
args
=
[
self
.
options
[
'cli-bin'
],
'-h'
,
self
.
options
[
'ipv6'
],
'-p'
,
self
.
options
[
'port'
],
]
if
self
.
options
.
get
(
'unixsocket'
):
args
.
extend
((
'-s'
,
self
.
options
[
'unixsocket'
]))
args
.
extend
((
'publish'
,
'Promise-Service'
,
'SlapOS Promise'
,
))
promise
=
self
.
createWrapper
(
promise_script
,
__name__
+
'.promise'
,
(
self
.
options
[
'ipv6'
],
int
(
self
.
options
[
'port'
]),
self
.
options
.
get
(
'unixsocket'
))
args
,
)
path_list
.
append
(
promise
)
return
path_list
def
promise
(
host
,
port
,
unixsocket
):
from
.MyRedis2410
import
Redis
try
:
r
=
Redis
(
host
=
host
,
port
=
port
,
unix_socket_path
=
unixsocket
,
db
=
0
)
r
.
publish
(
"Promise-Service"
,
"SlapOS Promise"
)
r
.
connection_pool
.
disconnect
()
except
Exception
as
e
:
sys
.
exit
(
e
)
slapos/test/recipe/test_redis.py
0 → 100755
View file @
747ce243
import
functools
import
os
import
shutil
import
tempfile
import
unittest
import
zc.buildout.testing
class
TestRedis
(
unittest
.
TestCase
):
def
getConfig
(
self
):
return
{
'config-file'
:
self
.
getTempPath
(
'redis.cfg'
),
'pid-file'
:
self
.
getTempPath
(
'redis.pid'
),
'port'
:
1234
,
'ipv6'
:
'::1'
,
'server-dir'
:
self
.
getTempPath
(
'srv'
),
'log-file'
:
self
.
getTempPath
(
'redis.log'
),
'wrapper'
:
self
.
getTempPath
(
'wrapper'
),
'server-bin'
:
'/path/to/bin/redis-server'
,
}
def
setUp
(
self
):
self
.
tmp_dir
=
tempfile
.
mkdtemp
()
self
.
addCleanup
(
shutil
.
rmtree
,
self
.
tmp_dir
)
self
.
getTempPath
=
functools
.
partial
(
os
.
path
.
join
,
self
.
tmp_dir
)
self
.
buildout
=
buildout
=
zc
.
buildout
.
testing
.
Buildout
()
self
.
config
=
self
.
getConfig
()
buildout
[
'redis'
]
=
self
.
config
from
slapos.recipe
import
redis
self
.
recipe
=
redis
.
Recipe
(
buildout
,
"redis"
,
buildout
[
'redis'
])
def
test_install
(
self
):
self
.
installed
=
self
.
recipe
.
install
()
redis_cfg
=
self
.
installed
[
0
]
self
.
assertEqual
(
redis_cfg
,
self
.
config
[
'config-file'
])
with
open
(
redis_cfg
)
as
f
:
self
.
assertIn
(
self
.
config
[
'pid-file'
],
f
.
read
())
wrapper
=
self
.
installed
[
1
]
self
.
assertEqual
(
wrapper
,
self
.
getConfig
()[
'wrapper'
])
with
open
(
wrapper
)
as
f
:
self
.
assertIn
(
'/path/to/bin/redis-server'
,
f
.
read
())
class
TestRedisWithUnixSocket
(
TestRedis
):
def
getConfig
(
self
):
return
dict
(
super
(
TestRedisWithUnixSocket
,
self
).
getConfig
(),
unixsocket
=
self
.
getTempPath
(
'redis.sock'
))
class
TestRedisWithPassword
(
TestRedis
):
def
getConfig
(
self
):
return
dict
(
super
(
TestRedisWithPassword
,
self
).
getConfig
(),
passwd
=
'secret'
)
class
TestRedisWithPromise
(
TestRedis
):
def
getConfig
(
self
):
return
dict
(
super
(
TestRedisWithPromise
,
self
).
getConfig
(),
**
{
'cli-bin'
:
'/path/to/bin/redis-cli'
,
'promise-wrapper'
:
self
.
getTempPath
(
'promise-wrapper'
)
})
def
test_install
(
self
):
super
(
TestRedisWithPromise
,
self
).
test_install
()
promise_wrapper
=
self
.
installed
[
2
]
self
.
assertEqual
(
promise_wrapper
,
self
.
getConfig
()[
'promise-wrapper'
])
with
open
(
promise_wrapper
)
as
f
:
self
.
assertIn
(
'/path/to/bin/redis-cli'
,
f
.
read
())
class
TestRedisWithUnixSocketAndPromise
(
TestRedisWithPromise
):
def
getConfig
(
self
):
return
dict
(
super
(
TestRedisWithUnixSocketAndPromise
,
self
).
getConfig
(),
unixsocket
=
self
.
getTempPath
(
'redis.sock'
))
software/beremiz-ide/buildout.hash.cfg
View file @
747ce243
...
...
@@ -23,7 +23,7 @@ md5sum = 51071494633f4ffba700baf935dc6955
[template-instance-beremiz-test]
filename = instance-beremiz-test.cfg.jinja2.in
md5sum =
6049681908c5619d94499a6f4f224045
md5sum =
ff7cf06927041f6aec5ad559950b69cb
[template-fluxbox-menu.in]
filename = fluxbox-menu.in
...
...
software/beremiz-ide/instance-beremiz-test.cfg.jinja2.in
View file @
747ce243
...
...
@@ -30,7 +30,11 @@ recipe = slapos.recipe.template
inline =
#!/bin/sh -e
cd {{ beremiz_location }}/tests
make test_dir=${directory:tests} xserver_command='echo "Using ${xserver:display} on Slapos X Server !";' "$@"
testdir=$SLAPOS_TEST_LOG_DIRECTORY
if [ -z "$testdir" ]; then
testdir=${directory:tests}
fi
make test_dir=$testdir xserver_command='echo "Using ${xserver:display} on Slapos X Server !";' "$@"
output = ${directory:bin}/beremiztest
[sikulix]
...
...
software/theia/buildout.hash.cfg
View file @
747ce243
...
...
@@ -51,7 +51,7 @@ md5sum = 1a668d6203d42b4d46d56e24c7606cb2
[python-language-server-requirements.txt]
_update_hash_filename_ = python-language-server-requirements.txt
md5sum =
febc3e1e18e8e831ac5561e29c3b23d7
md5sum =
bdcf72fc278487de721753f67fd97d56
[slapos.css.in]
_update_hash_filename_ = slapos.css.in
...
...
software/theia/python-language-server-requirements.txt
View file @
747ce243
appdirs==1.4.4
astroid==2.11.2
attrs==21.2.0
black==20.8b1
cachetools==4.2.4
certifi==2021.5.30
chardet==4.0.0
click==7.1.2
astroid==2.11.4
black==22.3.0
cachetools==5.0.0
certifi==2021.10.8
charset-normalizer==2.0.12
click==8.1.3
dill==0.3.4
future==0.18.2
idna==2.10
importlib-metadata==3.10.0
iniconfig==1.1.1
idna==3.3
importlib-metadata==4.11.3
isort==5.10.1
jedi==0.18.
0
jedi==0.18.
1
lazy-object-proxy==1.7.1
mccabe==0.7.0
mypy==0.950
mypy-extensions==0.4.3
mypy==0.942
packaging==21.0
parso==0.8.2
pathspec==0.8.1
platformdirs==2.5.1
pluggy==0.13.1
packaging==21.3
parso==0.8.3
pathspec==0.9.0
platformdirs==2.5.2
pydantic==1.8.2
pydocstyle==6.0.0
pyflags==0.1
pyflakes==2.1.0
pygls==0.11.1
pylint==2.13.1
pyparsing==2.4.7
regex==2021.4.4
requests==2.25.1
rope==0.11.0
six==1.16.0
snowballstemmer==1.2.1
pygls==0.11.3
pylint==2.13.8
pyparsing==3.0.8
requests==2.27.1
tomli==2.0.1
typed-ast==1.4.1
typeguard==2.12.1
types-requests==2.25.0
types-toml==0.1.3
typing-extensions==4.1.1
typed-ast==1.5.3
typeguard==2.13.3
typing_extensions==4.2.0
urllib3==1.26.9
wrapt==1.14.0
yapf==0.31.0
zc.buildout.languageserver==0.7.0
zc.buildout==2.13.4
zipp==3.4.1
\ No newline at end of file
wrapt==1.14.1
yapf==0.32.0
zc.buildout==2.13.7
zc.buildout.languageserver==0.7.1
zipp==3.8.0
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment