Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
slapos
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
iv
slapos
Commits
c658d8ca
Commit
c658d8ca
authored
Mar 28, 2013
by
Alain Takoudjou
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Add promise to redis recipe, pin version and add certificate
parent
65654894
Changes
6
Show whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
2224 additions
and
3 deletions
+2224
-3
slapos/recipe/redis/MyRedis2410.py
slapos/recipe/redis/MyRedis2410.py
+2005
-0
slapos/recipe/redis/__init__.py
slapos/recipe/redis/__init__.py
+9
-0
slapos/recipe/redis/promise.py
slapos/recipe/redis/promise.py
+18
-0
slapos/recipe/redis/template/redis.conf.in
slapos/recipe/redis/template/redis.conf.in
+1
-1
software/redis-server/instance-redis.cfg
software/redis-server/instance-redis.cfg
+2
-0
software/redis-server/software.cfg
software/redis-server/software.cfg
+189
-2
No files found.
slapos/recipe/redis/MyRedis2410.py
0 → 100644
View file @
c658d8ca
from
__future__
import
with_statement
"Core exceptions raised by the Redis client"
class
RedisError
(
Exception
):
pass
class
AuthenticationError
(
RedisError
):
pass
class
ConnectionError
(
RedisError
):
pass
class
ResponseError
(
RedisError
):
pass
class
InvalidResponse
(
RedisError
):
pass
class
DataError
(
RedisError
):
pass
class
PubSubError
(
RedisError
):
pass
class
WatchError
(
RedisError
):
pass
import
socket
from
itertools
import
chain
,
imap
#from redis.exceptions import ConnectionError, ResponseError, InvalidResponse
try
:
from
cStringIO
import
StringIO
except
ImportError
:
from
StringIO
import
StringIO
class
PythonParser
(
object
):
"Plain Python parsing class"
MAX_READ_LENGTH
=
1000000
def
__init__
(
self
):
self
.
_fp
=
None
def
__del__
(
self
):
try
:
self
.
on_disconnect
()
except
:
pass
def
on_connect
(
self
,
connection
):
"Called when the socket connects"
self
.
_fp
=
connection
.
_sock
.
makefile
(
'r'
)
def
on_disconnect
(
self
):
"Called when the socket disconnects"
if
self
.
_fp
is
not
None
:
self
.
_fp
.
close
()
self
.
_fp
=
None
def
read
(
self
,
length
=
None
):
"""
Read a line from the socket is no length is specified,
otherwise read ``length`` bytes. Always strip away the newlines.
"""
try
:
if
length
is
not
None
:
bytes_left
=
length
+
2
# read the line ending
if
length
>
self
.
MAX_READ_LENGTH
:
# apparently reading more than 1MB or so from a windows
# socket can cause MemoryErrors. See:
# https://github.com/andymccurdy/redis-py/issues/205
# read smaller chunks at a time to work around this
try
:
buf
=
StringIO
()
while
bytes_left
>
0
:
read_len
=
min
(
bytes_left
,
self
.
MAX_READ_LENGTH
)
buf
.
write
(
self
.
_fp
.
read
(
read_len
))
bytes_left
-=
read_len
buf
.
seek
(
0
)
return
buf
.
read
(
length
)
finally
:
buf
.
close
()
return
self
.
_fp
.
read
(
bytes_left
)[:
-
2
]
# no length, read a full line
return
self
.
_fp
.
readline
()[:
-
2
]
except
(
socket
.
error
,
socket
.
timeout
),
e
:
raise
ConnectionError
(
"Error while reading from socket: %s"
%
\
(
e
.
args
,))
def
read_response
(
self
):
response
=
self
.
read
()
if
not
response
:
raise
ConnectionError
(
"Socket closed on remote end"
)
byte
,
response
=
response
[
0
],
response
[
1
:]
# server returned an error
if
byte
==
'-'
:
if
response
.
startswith
(
'ERR '
):
response
=
response
[
4
:]
return
ResponseError
(
response
)
if
response
.
startswith
(
'LOADING '
):
# If we're loading the dataset into memory, kill the socket
# so we re-initialize (and re-SELECT) next time.
raise
ConnectionError
(
"Redis is loading data into memory"
)
# single value
elif
byte
==
'+'
:
return
response
# int value
elif
byte
==
':'
:
return
long
(
response
)
# bulk response
elif
byte
==
'$'
:
length
=
int
(
response
)
if
length
==
-
1
:
return
None
response
=
self
.
read
(
length
)
return
response
# multi-bulk response
elif
byte
==
'*'
:
length
=
int
(
response
)
if
length
==
-
1
:
return
None
return
[
self
.
read_response
()
for
i
in
xrange
(
length
)]
raise
InvalidResponse
(
"Protocol Error"
)
class
HiredisParser
(
object
):
"Parser class for connections using Hiredis"
def
__del__
(
self
):
try
:
self
.
on_disconnect
()
except
:
pass
def
on_connect
(
self
,
connection
):
self
.
_sock
=
connection
.
_sock
self
.
_reader
=
hiredis
.
Reader
(
protocolError
=
InvalidResponse
,
replyError
=
ResponseError
)
def
on_disconnect
(
self
):
self
.
_sock
=
None
self
.
_reader
=
None
def
read_response
(
self
):
if
not
self
.
_reader
:
raise
ConnectionError
(
"Socket closed on remote end"
)
response
=
self
.
_reader
.
gets
()
while
response
is
False
:
try
:
buffer
=
self
.
_sock
.
recv
(
4096
)
except
(
socket
.
error
,
socket
.
timeout
),
e
:
raise
ConnectionError
(
"Error while reading from socket: %s"
%
\
(
e
.
args
,))
if
not
buffer
:
raise
ConnectionError
(
"Socket closed on remote end"
)
self
.
_reader
.
feed
(
buffer
)
# proactively, but not conclusively, check if more data is in the
# buffer. if the data received doesn't end with \n, there's more.
if
not
buffer
.
endswith
(
'
\
n
'
):
continue
response
=
self
.
_reader
.
gets
()
return
response
try
:
import
hiredis
DefaultParser
=
HiredisParser
except
ImportError
:
DefaultParser
=
PythonParser
class
Connection
(
object
):
"Manages TCP communication to and from a Redis server"
def
__init__
(
self
,
host
=
'localhost'
,
port
=
6379
,
db
=
0
,
password
=
None
,
socket_timeout
=
None
,
encoding
=
'utf-8'
,
encoding_errors
=
'strict'
,
parser_class
=
DefaultParser
):
self
.
host
=
host
self
.
port
=
port
self
.
db
=
db
self
.
password
=
password
self
.
socket_timeout
=
socket_timeout
self
.
encoding
=
encoding
self
.
encoding_errors
=
encoding_errors
self
.
_sock
=
None
self
.
_parser
=
parser_class
()
def
__del__
(
self
):
try
:
self
.
disconnect
()
except
:
pass
def
connect
(
self
):
"Connects to the Redis server if not already connected"
if
self
.
_sock
:
return
try
:
sock
=
self
.
_connect
()
except
socket
.
error
,
e
:
raise
ConnectionError
(
self
.
_error_message
(
e
))
self
.
_sock
=
sock
self
.
on_connect
()
def
_connect
(
self
):
"Create a TCP socket connection"
sock
=
socket
.
socket
(
socket
.
AF_INET6
,
socket
.
SOCK_STREAM
)
sock
.
settimeout
(
self
.
socket_timeout
)
sock
.
connect
((
self
.
host
,
self
.
port
))
return
sock
def
_error_message
(
self
,
exception
):
# args for socket.error can either be (errno, "message")
# or just "message"
if
len
(
exception
.
args
)
==
1
:
return
"Error connecting to %s:%s. %s."
%
\
(
self
.
host
,
self
.
port
,
exception
.
args
[
0
])
else
:
return
"Error %s connecting %s:%s. %s."
%
\
(
exception
.
args
[
0
],
self
.
host
,
self
.
port
,
exception
.
args
[
1
])
def
on_connect
(
self
):
"Initialize the connection, authenticate and select a database"
self
.
_parser
.
on_connect
(
self
)
# if a password is specified, authenticate
if
self
.
password
:
self
.
send_command
(
'AUTH'
,
self
.
password
)
if
self
.
read_response
()
!=
'OK'
:
raise
ConnectionError
(
'Invalid Password'
)
# if a database is specified, switch to it
if
self
.
db
:
self
.
send_command
(
'SELECT'
,
self
.
db
)
if
self
.
read_response
()
!=
'OK'
:
raise
ConnectionError
(
'Invalid Database'
)
def
disconnect
(
self
):
"Disconnects from the Redis server"
self
.
_parser
.
on_disconnect
()
if
self
.
_sock
is
None
:
return
try
:
self
.
_sock
.
close
()
except
socket
.
error
:
pass
self
.
_sock
=
None
def
send_packed_command
(
self
,
command
):
"Send an already packed command to the Redis server"
if
not
self
.
_sock
:
self
.
connect
()
try
:
self
.
_sock
.
sendall
(
command
)
except
socket
.
error
,
e
:
self
.
disconnect
()
if
len
(
e
.
args
)
==
1
:
_errno
,
errmsg
=
'UNKNOWN'
,
e
.
args
[
0
]
else
:
_errno
,
errmsg
=
e
.
args
raise
ConnectionError
(
"Error %s while writing to socket. %s."
%
\
(
_errno
,
errmsg
))
except
:
self
.
disconnect
()
raise
def
send_command
(
self
,
*
args
):
"Pack and send a command to the Redis server"
self
.
send_packed_command
(
self
.
pack_command
(
*
args
))
def
read_response
(
self
):
"Read the response from a previously sent command"
try
:
response
=
self
.
_parser
.
read_response
()
except
:
self
.
disconnect
()
raise
if
response
.
__class__
==
ResponseError
:
raise
response
return
response
def
encode
(
self
,
value
):
"Return a bytestring representation of the value"
if
isinstance
(
value
,
unicode
):
return
value
.
encode
(
self
.
encoding
,
self
.
encoding_errors
)
return
str
(
value
)
def
pack_command
(
self
,
*
args
):
"Pack a series of arguments into a value Redis command"
command
=
[
'$%s
\
r
\
n
%s
\
r
\
n
'
%
(
len
(
enc_value
),
enc_value
)
for
enc_value
in
imap
(
self
.
encode
,
args
)]
return
'*%s
\
r
\
n
%s'
%
(
len
(
command
),
''
.
join
(
command
))
class
UnixDomainSocketConnection
(
Connection
):
def
__init__
(
self
,
path
=
''
,
db
=
0
,
password
=
None
,
socket_timeout
=
None
,
encoding
=
'utf-8'
,
encoding_errors
=
'strict'
,
parser_class
=
DefaultParser
):
self
.
path
=
path
self
.
db
=
db
self
.
password
=
password
self
.
socket_timeout
=
socket_timeout
self
.
encoding
=
encoding
self
.
encoding_errors
=
encoding_errors
self
.
_sock
=
None
self
.
_parser
=
parser_class
()
def
_connect
(
self
):
"Create a Unix domain socket connection"
sock
=
socket
.
socket
(
socket
.
AF_UNIX
,
socket
.
SOCK_STREAM
)
sock
.
settimeout
(
self
.
socket_timeout
)
sock
.
connect
(
self
.
path
)
return
sock
def
_error_message
(
self
,
exception
):
# args for socket.error can either be (errno, "message")
# or just "message"
if
len
(
exception
.
args
)
==
1
:
return
"Error connecting to unix socket: %s. %s."
%
\
(
self
.
path
,
exception
.
args
[
0
])
else
:
return
"Error %s connecting to unix socket: %s. %s."
%
\
(
exception
.
args
[
0
],
self
.
path
,
exception
.
args
[
1
])
# TODO: add ability to block waiting on a connection to be released
class
ConnectionPool
(
object
):
"Generic connection pool"
def
__init__
(
self
,
connection_class
=
Connection
,
max_connections
=
None
,
**
connection_kwargs
):
self
.
connection_class
=
connection_class
self
.
connection_kwargs
=
connection_kwargs
self
.
max_connections
=
max_connections
or
2
**
31
self
.
_created_connections
=
0
self
.
_available_connections
=
[]
self
.
_in_use_connections
=
set
()
def
get_connection
(
self
,
command_name
,
*
keys
,
**
options
):
"Get a connection from the pool"
try
:
connection
=
self
.
_available_connections
.
pop
()
except
IndexError
:
connection
=
self
.
make_connection
()
self
.
_in_use_connections
.
add
(
connection
)
return
connection
def
make_connection
(
self
):
"Create a new connection"
if
self
.
_created_connections
>=
self
.
max_connections
:
raise
ConnectionError
(
"Too many connections"
)
self
.
_created_connections
+=
1
return
self
.
connection_class
(
**
self
.
connection_kwargs
)
def
release
(
self
,
connection
):
"Releases the connection back to the pool"
self
.
_in_use_connections
.
remove
(
connection
)
self
.
_available_connections
.
append
(
connection
)
def
disconnect
(
self
):
"Disconnects all connections in the pool"
all_conns
=
chain
(
self
.
_available_connections
,
self
.
_in_use_connections
)
for
connection
in
all_conns
:
connection
.
disconnect
()
import
datetime
import
time
import
warnings
from
itertools
import
imap
,
izip
,
starmap
#from redis.connection import ConnectionPool, UnixDomainSocketConnection
#from redis.exceptions import (
# ConnectionError,
# DataError,
# RedisError,
# ResponseError,
# WatchError,
#)
def
list_or_args
(
keys
,
args
):
# returns a single list combining keys and args
try
:
i
=
iter
(
keys
)
# a string can be iterated, but indicates
# keys wasn't passed as a list
if
isinstance
(
keys
,
basestring
):
keys
=
[
keys
]
except
TypeError
:
keys
=
[
keys
]
if
args
:
keys
.
extend
(
args
)
return
keys
def
timestamp_to_datetime
(
response
):
"Converts a unix timestamp to a Python datetime object"
if
not
response
:
return
None
try
:
response
=
int
(
response
)
except
ValueError
:
return
None
return
datetime
.
datetime
.
fromtimestamp
(
response
)
def
string_keys_to_dict
(
key_string
,
callback
):
return
dict
.
fromkeys
(
key_string
.
split
(),
callback
)
def
dict_merge
(
*
dicts
):
merged
=
{}
[
merged
.
update
(
d
)
for
d
in
dicts
]
return
merged
def
parse_debug_object
(
response
):
"Parse the results of Redis's DEBUG OBJECT command into a Python dict"
# The 'type' of the object is the first item in the response, but isn't
# prefixed with a name
response
=
'type:'
+
response
response
=
dict
([
kv
.
split
(
':'
)
for
kv
in
response
.
split
()])
# parse some expected int values from the string response
# note: this cmd isn't spec'd so these may not appear in all redis versions
int_fields
=
(
'refcount'
,
'serializedlength'
,
'lru'
,
'lru_seconds_idle'
)
for
field
in
int_fields
:
if
field
in
response
:
response
[
field
]
=
int
(
response
[
field
])
return
response
def
parse_object
(
response
,
infotype
):
"Parse the results of an OBJECT command"
if
infotype
in
(
'idletime'
,
'refcount'
):
return
int
(
response
)
return
response
def
parse_info
(
response
):
"Parse the result of Redis's INFO command into a Python dict"
info
=
{}
def
get_value
(
value
):
if
','
not
in
value
:
return
value
sub_dict
=
{}
for
item
in
value
.
split
(
','
):
k
,
v
=
item
.
rsplit
(
'='
,
1
)
try
:
sub_dict
[
k
]
=
int
(
v
)
except
ValueError
:
sub_dict
[
k
]
=
v
return
sub_dict
for
line
in
response
.
splitlines
():
if
line
and
not
line
.
startswith
(
'#'
):
key
,
value
=
line
.
split
(
':'
)
try
:
if
'.'
in
value
:
info
[
key
]
=
float
(
value
)
else
:
info
[
key
]
=
int
(
value
)
except
ValueError
:
info
[
key
]
=
get_value
(
value
)
return
info
def
pairs_to_dict
(
response
):
"Create a dict given a list of key/value pairs"
it
=
iter
(
response
)
return
dict
(
izip
(
it
,
it
))
def
zset_score_pairs
(
response
,
**
options
):
"""
If ``withscores`` is specified in the options, return the response as
a list of (value, score) pairs
"""
if
not
response
or
not
options
[
'withscores'
]:
return
response
score_cast_func
=
options
.
get
(
'score_cast_func'
,
float
)
it
=
iter
(
response
)
return
zip
(
it
,
imap
(
score_cast_func
,
it
))
def
int_or_none
(
response
):
if
response
is
None
:
return
None
return
int
(
response
)
def
float_or_none
(
response
):
if
response
is
None
:
return
None
return
float
(
response
)
def
parse_config
(
response
,
**
options
):
# this is stupid, but don't have a better option right now
if
options
[
'parse'
]
==
'GET'
:
return
response
and
pairs_to_dict
(
response
)
or
{}
return
response
==
'OK'
class
StrictRedis
(
object
):
"""
Implementation of the Redis protocol.
This abstract class provides a Python interface to all Redis commands
and an implementation of the Redis protocol.
Connection and Pipeline derive from this, implementing how
the commands are sent and received to the Redis server
"""
RESPONSE_CALLBACKS
=
dict_merge
(
string_keys_to_dict
(
'AUTH DEL EXISTS EXPIRE EXPIREAT HDEL HEXISTS HMSET MOVE MSETNX '
'PERSIST RENAMENX SISMEMBER SMOVE SETEX SETNX SREM ZREM'
,
bool
),
string_keys_to_dict
(
'DECRBY GETBIT HLEN INCRBY LINSERT LLEN LPUSHX RPUSHX SADD SCARD '
'SDIFFSTORE SETBIT SETRANGE SINTERSTORE STRLEN SUNIONSTORE ZADD '
'ZCARD ZREMRANGEBYRANK ZREMRANGEBYSCORE'
,
int
),
string_keys_to_dict
(
# these return OK, or int if redis-server is >=1.3.4
'LPUSH RPUSH'
,
lambda
r
:
isinstance
(
r
,
long
)
and
r
or
r
==
'OK'
),
string_keys_to_dict
(
'ZSCORE ZINCRBY'
,
float_or_none
),
string_keys_to_dict
(
'FLUSHALL FLUSHDB LSET LTRIM MSET RENAME '
'SAVE SELECT SET SHUTDOWN SLAVEOF WATCH UNWATCH'
,
lambda
r
:
r
==
'OK'
),
string_keys_to_dict
(
'BLPOP BRPOP'
,
lambda
r
:
r
and
tuple
(
r
)
or
None
),
string_keys_to_dict
(
'SDIFF SINTER SMEMBERS SUNION'
,
lambda
r
:
r
and
set
(
r
)
or
set
()
),
string_keys_to_dict
(
'ZRANGE ZRANGEBYSCORE ZREVRANGE ZREVRANGEBYSCORE'
,
zset_score_pairs
),
string_keys_to_dict
(
'ZRANK ZREVRANK'
,
int_or_none
),
{
'BGREWRITEAOF'
:
lambda
r
:
\
r
==
'Background rewriting of AOF file started'
,
'BGSAVE'
:
lambda
r
:
r
==
'Background saving started'
,
'BRPOPLPUSH'
:
lambda
r
:
r
and
r
or
None
,
'CONFIG'
:
parse_config
,
'DEBUG'
:
parse_debug_object
,
'HGETALL'
:
lambda
r
:
r
and
pairs_to_dict
(
r
)
or
{},
'INFO'
:
parse_info
,
'LASTSAVE'
:
timestamp_to_datetime
,
'OBJECT'
:
parse_object
,
'PING'
:
lambda
r
:
r
==
'PONG'
,
'RANDOMKEY'
:
lambda
r
:
r
and
r
or
None
,
}
)
def
__init__
(
self
,
host
=
'localhost'
,
port
=
6379
,
db
=
0
,
password
=
None
,
socket_timeout
=
None
,
connection_pool
=
None
,
charset
=
'utf-8'
,
errors
=
'strict'
,
unix_socket_path
=
None
):
if
not
connection_pool
:
kwargs
=
{
'db'
:
db
,
'password'
:
password
,
'socket_timeout'
:
socket_timeout
,
'encoding'
:
charset
,
'encoding_errors'
:
errors
}
# based on input, setup appropriate connection args
if
unix_socket_path
:
kwargs
.
update
({
'path'
:
unix_socket_path
,
'connection_class'
:
UnixDomainSocketConnection
})
else
:
kwargs
.
update
({
'host'
:
host
,
'port'
:
port
})
connection_pool
=
ConnectionPool
(
**
kwargs
)
self
.
connection_pool
=
connection_pool
self
.
response_callbacks
=
self
.
__class__
.
RESPONSE_CALLBACKS
.
copy
()
def
set_response_callback
(
self
,
command
,
callback
):
"Set a custom Response Callback"
self
.
response_callbacks
[
command
]
=
callback
def
pipeline
(
self
,
transaction
=
True
,
shard_hint
=
None
):
"""
Return a new pipeline object that can queue multiple commands for
later execution. ``transaction`` indicates whether all commands
should be executed atomically. Apart from making a group of operations
atomic, pipelines are useful for reducing the back-and-forth overhead
between the client and server.
"""
return
StrictPipeline
(
self
.
connection_pool
,
self
.
response_callbacks
,
transaction
,
shard_hint
)
def
transaction
(
self
,
func
,
*
watches
,
**
kwargs
):
"""
Convenience method for executing the callable `func` as a transaction
while watching all keys specified in `watches`. The 'func' callable
should expect a single arguement which is a Pipeline object.
"""
shard_hint
=
kwargs
.
pop
(
'shard_hint'
,
None
)
with
self
.
pipeline
(
True
,
shard_hint
)
as
pipe
:
while
1
:
try
:
pipe
.
watch
(
*
watches
)
func
(
pipe
)
return
pipe
.
execute
()
except
WatchError
:
continue
def
lock
(
self
,
name
,
timeout
=
None
,
sleep
=
0.1
):
"""
Return a new Lock object using key ``name`` that mimics
the behavior of threading.Lock.
If specified, ``timeout`` indicates a maximum life for the lock.
By default, it will remain locked until release() is called.
``sleep`` indicates the amount of time to sleep per loop iteration
when the lock is in blocking mode and another client is currently
holding the lock.
"""
return
Lock
(
self
,
name
,
timeout
=
timeout
,
sleep
=
sleep
)
def
pubsub
(
self
,
shard_hint
=
None
):
"""
Return a Publish/Subscribe object. With this object, you can
subscribe to channels and listen for messages that get published to
them.
"""
return
PubSub
(
self
.
connection_pool
,
shard_hint
)
#### COMMAND EXECUTION AND PROTOCOL PARSING ####
def
execute_command
(
self
,
*
args
,
**
options
):
"Execute a command and return a parsed response"
pool
=
self
.
connection_pool
command_name
=
args
[
0
]
connection
=
pool
.
get_connection
(
command_name
,
**
options
)
try
:
connection
.
send_command
(
*
args
)
return
self
.
parse_response
(
connection
,
command_name
,
**
options
)
except
ConnectionError
:
connection
.
disconnect
()
connection
.
send_command
(
*
args
)
return
self
.
parse_response
(
connection
,
command_name
,
**
options
)
finally
:
pool
.
release
(
connection
)
def
parse_response
(
self
,
connection
,
command_name
,
**
options
):
"Parses a response from the Redis server"
response
=
connection
.
read_response
()
if
command_name
in
self
.
response_callbacks
:
return
self
.
response_callbacks
[
command_name
](
response
,
**
options
)
return
response
#### SERVER INFORMATION ####
def
bgrewriteaof
(
self
):
"Tell the Redis server to rewrite the AOF file from data in memory."
return
self
.
execute_command
(
'BGREWRITEAOF'
)
def
bgsave
(
self
):
"""
Tell the Redis server to save its data to disk. Unlike save(),
this method is asynchronous and returns immediately.
"""
return
self
.
execute_command
(
'BGSAVE'
)
def
config_get
(
self
,
pattern
=
"*"
):
"Return a dictionary of configuration based on the ``pattern``"
return
self
.
execute_command
(
'CONFIG'
,
'GET'
,
pattern
,
parse
=
'GET'
)
def
config_set
(
self
,
name
,
value
):
"Set config item ``name`` with ``value``"
return
self
.
execute_command
(
'CONFIG'
,
'SET'
,
name
,
value
,
parse
=
'SET'
)
def
dbsize
(
self
):
"Returns the number of keys in the current database"
return
self
.
execute_command
(
'DBSIZE'
)
def
debug_object
(
self
,
key
):
"Returns version specific metainformation about a give key"
return
self
.
execute_command
(
'DEBUG'
,
'OBJECT'
,
key
)
def
delete
(
self
,
*
names
):
"Delete one or more keys specified by ``names``"
return
self
.
execute_command
(
'DEL'
,
*
names
)
__delitem__
=
delete
def
echo
(
self
,
value
):
"Echo the string back from the server"
return
self
.
execute_command
(
'ECHO'
,
value
)
def
flushall
(
self
):
"Delete all keys in all databases on the current host"
return
self
.
execute_command
(
'FLUSHALL'
)
def
flushdb
(
self
):
"Delete all keys in the current database"
return
self
.
execute_command
(
'FLUSHDB'
)
def
info
(
self
):
"Returns a dictionary containing information about the Redis server"
return
self
.
execute_command
(
'INFO'
)
def
lastsave
(
self
):
"""
Return a Python datetime object representing the last time the
Redis database was saved to disk
"""
return
self
.
execute_command
(
'LASTSAVE'
)
def
object
(
self
,
infotype
,
key
):
"Return the encoding, idletime, or refcount about the key"
return
self
.
execute_command
(
'OBJECT'
,
infotype
,
key
,
infotype
=
infotype
)
def
ping
(
self
):
"Ping the Redis server"
return
self
.
execute_command
(
'PING'
)
def
save
(
self
):
"""
Tell the Redis server to save its data to disk,
blocking until the save is complete
"""
return
self
.
execute_command
(
'SAVE'
)
def
shutdown
(
self
):
"Shutdown the server"
try
:
self
.
execute_command
(
'SHUTDOWN'
)
except
ConnectionError
:
# a ConnectionError here is expected
return
raise
RedisError
(
"SHUTDOWN seems to have failed."
)
def
slaveof
(
self
,
host
=
None
,
port
=
None
):
"""
Set the server to be a replicated slave of the instance identified
by the ``host`` and ``port``. If called without arguements, the
instance is promoted to a master instead.
"""
if
host
is
None
and
port
is
None
:
return
self
.
execute_command
(
"SLAVEOF"
,
"NO"
,
"ONE"
)
return
self
.
execute_command
(
"SLAVEOF"
,
host
,
port
)
#### BASIC KEY COMMANDS ####
def
append
(
self
,
key
,
value
):
"""
Appends the string ``value`` to the value at ``key``. If ``key``
doesn't already exist, create it with a value of ``value``.
Returns the new length of the value at ``key``.
"""
return
self
.
execute_command
(
'APPEND'
,
key
,
value
)
def
decr
(
self
,
name
,
amount
=
1
):
"""
Decrements the value of ``key`` by ``amount``. If no key exists,
the value will be initialized as 0 - ``amount``
"""
return
self
.
execute_command
(
'DECRBY'
,
name
,
amount
)
def
exists
(
self
,
name
):
"Returns a boolean indicating whether key ``name`` exists"
return
self
.
execute_command
(
'EXISTS'
,
name
)
__contains__
=
exists
def
expire
(
self
,
name
,
time
):
"Set an expire flag on key ``name`` for ``time`` seconds"
return
self
.
execute_command
(
'EXPIRE'
,
name
,
time
)
def
expireat
(
self
,
name
,
when
):
"""
Set an expire flag on key ``name``. ``when`` can be represented
as an integer indicating unix time or a Python datetime object.
"""
if
isinstance
(
when
,
datetime
.
datetime
):
when
=
int
(
time
.
mktime
(
when
.
timetuple
()))
return
self
.
execute_command
(
'EXPIREAT'
,
name
,
when
)
def
get
(
self
,
name
):
"""
Return the value at key ``name``, or None if the key doesn't exist
"""
return
self
.
execute_command
(
'GET'
,
name
)
def
__getitem__
(
self
,
name
):
"""
Return the value at key ``name``, raises a KeyError if the key
doesn't exist.
"""
value
=
self
.
get
(
name
)
if
value
:
return
value
raise
KeyError
(
name
)
def
getbit
(
self
,
name
,
offset
):
"Returns a boolean indicating the value of ``offset`` in ``name``"
return
self
.
execute_command
(
'GETBIT'
,
name
,
offset
)
def
getset
(
self
,
name
,
value
):
"""
Set the value at key ``name`` to ``value`` if key doesn't exist
Return the value at key ``name`` atomically
"""
return
self
.
execute_command
(
'GETSET'
,
name
,
value
)
def
incr
(
self
,
name
,
amount
=
1
):
"""
Increments the value of ``key`` by ``amount``. If no key exists,
the value will be initialized as ``amount``
"""
return
self
.
execute_command
(
'INCRBY'
,
name
,
amount
)
def
keys
(
self
,
pattern
=
'*'
):
"Returns a list of keys matching ``pattern``"
return
self
.
execute_command
(
'KEYS'
,
pattern
)
def
mget
(
self
,
keys
,
*
args
):
"""
Returns a list of values ordered identically to ``keys``
"""
keys
=
list_or_args
(
keys
,
args
)
return
self
.
execute_command
(
'MGET'
,
*
keys
)
def
mset
(
self
,
mapping
):
"Sets each key in the ``mapping`` dict to its corresponding value"
items
=
[]
for
pair
in
mapping
.
iteritems
():
items
.
extend
(
pair
)
return
self
.
execute_command
(
'MSET'
,
*
items
)
def
msetnx
(
self
,
mapping
):
"""
Sets each key in the ``mapping`` dict to its corresponding value if
none of the keys are already set
"""
items
=
[]
for
pair
in
mapping
.
iteritems
():
items
.
extend
(
pair
)
return
self
.
execute_command
(
'MSETNX'
,
*
items
)
def
move
(
self
,
name
,
db
):
"Moves the key ``name`` to a different Redis database ``db``"
return
self
.
execute_command
(
'MOVE'
,
name
,
db
)
def
persist
(
self
,
name
):
"Removes an expiration on ``name``"
return
self
.
execute_command
(
'PERSIST'
,
name
)
def
randomkey
(
self
):
"Returns the name of a random key"
return
self
.
execute_command
(
'RANDOMKEY'
)
def
rename
(
self
,
src
,
dst
):
"""
Rename key ``src`` to ``dst``
"""
return
self
.
execute_command
(
'RENAME'
,
src
,
dst
)
def
renamenx
(
self
,
src
,
dst
):
"Rename key ``src`` to ``dst`` if ``dst`` doesn't already exist"
return
self
.
execute_command
(
'RENAMENX'
,
src
,
dst
)
def
set
(
self
,
name
,
value
):
"Set the value at key ``name`` to ``value``"
return
self
.
execute_command
(
'SET'
,
name
,
value
)
__setitem__
=
set
def
setbit
(
self
,
name
,
offset
,
value
):
"""
Flag the ``offset`` in ``name`` as ``value``. Returns a boolean
indicating the previous value of ``offset``.
"""
value
=
value
and
1
or
0
return
self
.
execute_command
(
'SETBIT'
,
name
,
offset
,
value
)
def
setex
(
self
,
name
,
time
,
value
):
"""
Set the value of key ``name`` to ``value``
that expires in ``time`` seconds
"""
return
self
.
execute_command
(
'SETEX'
,
name
,
time
,
value
)
def
setnx
(
self
,
name
,
value
):
"Set the value of key ``name`` to ``value`` if key doesn't exist"
return
self
.
execute_command
(
'SETNX'
,
name
,
value
)
def
setrange
(
self
,
name
,
offset
,
value
):
"""
Overwrite bytes in the value of ``name`` starting at ``offset`` with
``value``. If ``offset`` plus the length of ``value`` exceeds the
length of the original value, the new value will be larger than before.
If ``offset`` exceeds the length of the original value, null bytes
will be used to pad between the end of the previous value and the start
of what's being injected.
Returns the length of the new string.
"""
return
self
.
execute_command
(
'SETRANGE'
,
name
,
offset
,
value
)
def
strlen
(
self
,
name
):
"Return the number of bytes stored in the value of ``name``"
return
self
.
execute_command
(
'STRLEN'
,
name
)
def
substr
(
self
,
name
,
start
,
end
=-
1
):
"""
Return a substring of the string at key ``name``. ``start`` and ``end``
are 0-based integers specifying the portion of the string to return.
"""
return
self
.
execute_command
(
'SUBSTR'
,
name
,
start
,
end
)
def
ttl
(
self
,
name
):
"Returns the number of seconds until the key ``name`` will expire"
return
self
.
execute_command
(
'TTL'
,
name
)
def
type
(
self
,
name
):
"Returns the type of key ``name``"
return
self
.
execute_command
(
'TYPE'
,
name
)
def
watch
(
self
,
*
names
):
"""
Watches the values at keys ``names``, or None if the key doesn't exist
"""
warnings
.
warn
(
DeprecationWarning
(
'Call WATCH from a Pipeline object'
))
def
unwatch
(
self
):
"""
Unwatches the value at key ``name``, or None of the key doesn't exist
"""
warnings
.
warn
(
DeprecationWarning
(
'Call UNWATCH from a Pipeline object'
))
#### LIST COMMANDS ####
def
blpop
(
self
,
keys
,
timeout
=
0
):
"""
LPOP a value off of the first non-empty list
named in the ``keys`` list.
If none of the lists in ``keys`` has a value to LPOP, then block
for ``timeout`` seconds, or until a value gets pushed on to one
of the lists.
If timeout is 0, then block indefinitely.
"""
if
timeout
is
None
:
timeout
=
0
if
isinstance
(
keys
,
basestring
):
keys
=
[
keys
]
else
:
keys
=
list
(
keys
)
keys
.
append
(
timeout
)
return
self
.
execute_command
(
'BLPOP'
,
*
keys
)
def
brpop
(
self
,
keys
,
timeout
=
0
):
"""
RPOP a value off of the first non-empty list
named in the ``keys`` list.
If none of the lists in ``keys`` has a value to LPOP, then block
for ``timeout`` seconds, or until a value gets pushed on to one
of the lists.
If timeout is 0, then block indefinitely.
"""
if
timeout
is
None
:
timeout
=
0
if
isinstance
(
keys
,
basestring
):
keys
=
[
keys
]
else
:
keys
=
list
(
keys
)
keys
.
append
(
timeout
)
return
self
.
execute_command
(
'BRPOP'
,
*
keys
)
def
brpoplpush
(
self
,
src
,
dst
,
timeout
=
0
):
"""
Pop a value off the tail of ``src``, push it on the head of ``dst``
and then return it.
This command blocks until a value is in ``src`` or until ``timeout``
seconds elapse, whichever is first. A ``timeout`` value of 0 blocks
forever.
"""
if
timeout
is
None
:
timeout
=
0
return
self
.
execute_command
(
'BRPOPLPUSH'
,
src
,
dst
,
timeout
)
def
lindex
(
self
,
name
,
index
):
"""
Return the item from list ``name`` at position ``index``
Negative indexes are supported and will return an item at the
end of the list
"""
return
self
.
execute_command
(
'LINDEX'
,
name
,
index
)
def
linsert
(
self
,
name
,
where
,
refvalue
,
value
):
"""
Insert ``value`` in list ``name`` either immediately before or after
[``where``] ``refvalue``
Returns the new length of the list on success or -1 if ``refvalue``
is not in the list.
"""
return
self
.
execute_command
(
'LINSERT'
,
name
,
where
,
refvalue
,
value
)
def
llen
(
self
,
name
):
"Return the length of the list ``name``"
return
self
.
execute_command
(
'LLEN'
,
name
)
def
lpop
(
self
,
name
):
"Remove and return the first item of the list ``name``"
return
self
.
execute_command
(
'LPOP'
,
name
)
def
lpush
(
self
,
name
,
*
values
):
"Push ``values`` onto the head of the list ``name``"
return
self
.
execute_command
(
'LPUSH'
,
name
,
*
values
)
def
lpushx
(
self
,
name
,
value
):
"Push ``value`` onto the head of the list ``name`` if ``name`` exists"
return
self
.
execute_command
(
'LPUSHX'
,
name
,
value
)
def
lrange
(
self
,
name
,
start
,
end
):
"""
Return a slice of the list ``name`` between
position ``start`` and ``end``
``start`` and ``end`` can be negative numbers just like
Python slicing notation
"""
return
self
.
execute_command
(
'LRANGE'
,
name
,
start
,
end
)
def
lrem
(
self
,
name
,
count
,
value
):
"""
Remove the first ``count`` occurrences of elements equal to ``value``
from the list stored at ``name``.
The count argument influences the operation in the following ways:
count > 0: Remove elements equal to value moving from head to tail.
count < 0: Remove elements equal to value moving from tail to head.
count = 0: Remove all elements equal to value.
"""
return
self
.
execute_command
(
'LREM'
,
name
,
count
,
value
)
def
lset
(
self
,
name
,
index
,
value
):
"Set ``position`` of list ``name`` to ``value``"
return
self
.
execute_command
(
'LSET'
,
name
,
index
,
value
)
def
ltrim
(
self
,
name
,
start
,
end
):
"""
Trim the list ``name``, removing all values not within the slice
between ``start`` and ``end``
``start`` and ``end`` can be negative numbers just like
Python slicing notation
"""
return
self
.
execute_command
(
'LTRIM'
,
name
,
start
,
end
)
def
rpop
(
self
,
name
):
"Remove and return the last item of the list ``name``"
return
self
.
execute_command
(
'RPOP'
,
name
)
def
rpoplpush
(
self
,
src
,
dst
):
"""
RPOP a value off of the ``src`` list and atomically LPUSH it
on to the ``dst`` list. Returns the value.
"""
return
self
.
execute_command
(
'RPOPLPUSH'
,
src
,
dst
)
def
rpush
(
self
,
name
,
*
values
):
"Push ``values`` onto the tail of the list ``name``"
return
self
.
execute_command
(
'RPUSH'
,
name
,
*
values
)
def
rpushx
(
self
,
name
,
value
):
"Push ``value`` onto the tail of the list ``name`` if ``name`` exists"
return
self
.
execute_command
(
'RPUSHX'
,
name
,
value
)
def
sort
(
self
,
name
,
start
=
None
,
num
=
None
,
by
=
None
,
get
=
None
,
desc
=
False
,
alpha
=
False
,
store
=
None
):
"""
Sort and return the list, set or sorted set at ``name``.
``start`` and ``num`` allow for paging through the sorted data
``by`` allows using an external key to weight and sort the items.
Use an "*" to indicate where in the key the item value is located
``get`` allows for returning items from external keys rather than the
sorted data itself. Use an "*" to indicate where int he key
the item value is located
``desc`` allows for reversing the sort
``alpha`` allows for sorting lexicographically rather than numerically
``store`` allows for storing the result of the sort into
the key ``store``
"""
if
(
start
is
not
None
and
num
is
None
)
or
\
(
num
is
not
None
and
start
is
None
):
raise
RedisError
(
"``start`` and ``num`` must both be specified"
)
pieces
=
[
name
]
if
by
is
not
None
:
pieces
.
append
(
'BY'
)
pieces
.
append
(
by
)
if
start
is
not
None
and
num
is
not
None
:
pieces
.
append
(
'LIMIT'
)
pieces
.
append
(
start
)
pieces
.
append
(
num
)
if
get
is
not
None
:
# If get is a string assume we want to get a single value.
# Otherwise assume it's an interable and we want to get multiple
# values. We can't just iterate blindly because strings are
# iterable.
if
isinstance
(
get
,
basestring
):
pieces
.
append
(
'GET'
)
pieces
.
append
(
get
)
else
:
for
g
in
get
:
pieces
.
append
(
'GET'
)
pieces
.
append
(
g
)
if
desc
:
pieces
.
append
(
'DESC'
)
if
alpha
:
pieces
.
append
(
'ALPHA'
)
if
store
is
not
None
:
pieces
.
append
(
'STORE'
)
pieces
.
append
(
store
)
return
self
.
execute_command
(
'SORT'
,
*
pieces
)
#### SET COMMANDS ####
def
sadd
(
self
,
name
,
*
values
):
"Add ``value(s)`` to set ``name``"
return
self
.
execute_command
(
'SADD'
,
name
,
*
values
)
def
scard
(
self
,
name
):
"Return the number of elements in set ``name``"
return
self
.
execute_command
(
'SCARD'
,
name
)
def
sdiff
(
self
,
keys
,
*
args
):
"Return the difference of sets specified by ``keys``"
keys
=
list_or_args
(
keys
,
args
)
return
self
.
execute_command
(
'SDIFF'
,
*
keys
)
def
sdiffstore
(
self
,
dest
,
keys
,
*
args
):
"""
Store the difference of sets specified by ``keys`` into a new
set named ``dest``. Returns the number of keys in the new set.
"""
keys
=
list_or_args
(
keys
,
args
)
return
self
.
execute_command
(
'SDIFFSTORE'
,
dest
,
*
keys
)
def
sinter
(
self
,
keys
,
*
args
):
"Return the intersection of sets specified by ``keys``"
keys
=
list_or_args
(
keys
,
args
)
return
self
.
execute_command
(
'SINTER'
,
*
keys
)
def
sinterstore
(
self
,
dest
,
keys
,
*
args
):
"""
Store the intersection of sets specified by ``keys`` into a new
set named ``dest``. Returns the number of keys in the new set.
"""
keys
=
list_or_args
(
keys
,
args
)
return
self
.
execute_command
(
'SINTERSTORE'
,
dest
,
*
keys
)
def
sismember
(
self
,
name
,
value
):
"Return a boolean indicating if ``value`` is a member of set ``name``"
return
self
.
execute_command
(
'SISMEMBER'
,
name
,
value
)
def
smembers
(
self
,
name
):
"Return all members of the set ``name``"
return
self
.
execute_command
(
'SMEMBERS'
,
name
)
def
smove
(
self
,
src
,
dst
,
value
):
"Move ``value`` from set ``src`` to set ``dst`` atomically"
return
self
.
execute_command
(
'SMOVE'
,
src
,
dst
,
value
)
def
spop
(
self
,
name
):
"Remove and return a random member of set ``name``"
return
self
.
execute_command
(
'SPOP'
,
name
)
def
srandmember
(
self
,
name
):
"Return a random member of set ``name``"
return
self
.
execute_command
(
'SRANDMEMBER'
,
name
)
def
srem
(
self
,
name
,
*
values
):
"Remove ``values`` from set ``name``"
return
self
.
execute_command
(
'SREM'
,
name
,
*
values
)
def
sunion
(
self
,
keys
,
*
args
):
"Return the union of sets specifiued by ``keys``"
keys
=
list_or_args
(
keys
,
args
)
return
self
.
execute_command
(
'SUNION'
,
*
keys
)
def
sunionstore
(
self
,
dest
,
keys
,
*
args
):
"""
Store the union of sets specified by ``keys`` into a new
set named ``dest``. Returns the number of keys in the new set.
"""
keys
=
list_or_args
(
keys
,
args
)
return
self
.
execute_command
(
'SUNIONSTORE'
,
dest
,
*
keys
)
#### SORTED SET COMMANDS ####
def
zadd
(
self
,
name
,
*
args
,
**
kwargs
):
"""
Set any number of score, element-name pairs to the key ``name``. Pairs
can be specified in two ways:
As *args, in the form of: score1, name1, score2, name2, ...
or as **kwargs, in the form of: name1=score1, name2=score2, ...
The following example would add four values to the 'my-key' key:
redis.zadd('my-key', 1.1, 'name1', 2.2, 'name2', name3=3.3, name4=4.4)
"""
pieces
=
[]
if
args
:
if
len
(
args
)
%
2
!=
0
:
raise
RedisError
(
"ZADD requires an equal number of "
"values and scores"
)
pieces
.
extend
(
args
)
for
pair
in
kwargs
.
iteritems
():
pieces
.
append
(
pair
[
1
])
pieces
.
append
(
pair
[
0
])
return
self
.
execute_command
(
'ZADD'
,
name
,
*
pieces
)
def
zcard
(
self
,
name
):
"Return the number of elements in the sorted set ``name``"
return
self
.
execute_command
(
'ZCARD'
,
name
)
def
zcount
(
self
,
name
,
min
,
max
):
return
self
.
execute_command
(
'ZCOUNT'
,
name
,
min
,
max
)
def
zincrby
(
self
,
name
,
value
,
amount
=
1
):
"Increment the score of ``value`` in sorted set ``name`` by ``amount``"
return
self
.
execute_command
(
'ZINCRBY'
,
name
,
amount
,
value
)
def
zinterstore
(
self
,
dest
,
keys
,
aggregate
=
None
):
"""
Intersect multiple sorted sets specified by ``keys`` into
a new sorted set, ``dest``. Scores in the destination will be
aggregated based on the ``aggregate``, or SUM if none is provided.
"""
return
self
.
_zaggregate
(
'ZINTERSTORE'
,
dest
,
keys
,
aggregate
)
def
zrange
(
self
,
name
,
start
,
end
,
desc
=
False
,
withscores
=
False
,
score_cast_func
=
float
):
"""
Return a range of values from sorted set ``name`` between
``start`` and ``end`` sorted in ascending order.
``start`` and ``end`` can be negative, indicating the end of the range.
``desc`` a boolean indicating whether to sort the results descendingly
``withscores`` indicates to return the scores along with the values.
The return type is a list of (value, score) pairs
``score_cast_func`` a callable used to cast the score return value
"""
if
desc
:
return
self
.
zrevrange
(
name
,
start
,
end
,
withscores
)
pieces
=
[
'ZRANGE'
,
name
,
start
,
end
]
if
withscores
:
pieces
.
append
(
'withscores'
)
options
=
{
'withscores'
:
withscores
,
'score_cast_func'
:
score_cast_func
}
return
self
.
execute_command
(
*
pieces
,
**
options
)
def
zrangebyscore
(
self
,
name
,
min
,
max
,
start
=
None
,
num
=
None
,
withscores
=
False
,
score_cast_func
=
float
):
"""
Return a range of values from the sorted set ``name`` with scores
between ``min`` and ``max``.
If ``start`` and ``num`` are specified, then return a slice
of the range.
``withscores`` indicates to return the scores along with the values.
The return type is a list of (value, score) pairs
`score_cast_func`` a callable used to cast the score return value
"""
if
(
start
is
not
None
and
num
is
None
)
or
\
(
num
is
not
None
and
start
is
None
):
raise
RedisError
(
"``start`` and ``num`` must both be specified"
)
pieces
=
[
'ZRANGEBYSCORE'
,
name
,
min
,
max
]
if
start
is
not
None
and
num
is
not
None
:
pieces
.
extend
([
'LIMIT'
,
start
,
num
])
if
withscores
:
pieces
.
append
(
'withscores'
)
options
=
{
'withscores'
:
withscores
,
'score_cast_func'
:
score_cast_func
}
return
self
.
execute_command
(
*
pieces
,
**
options
)
def
zrank
(
self
,
name
,
value
):
"""
Returns a 0-based value indicating the rank of ``value`` in sorted set
``name``
"""
return
self
.
execute_command
(
'ZRANK'
,
name
,
value
)
def
zrem
(
self
,
name
,
*
values
):
"Remove member ``values`` from sorted set ``name``"
return
self
.
execute_command
(
'ZREM'
,
name
,
*
values
)
def
zremrangebyrank
(
self
,
name
,
min
,
max
):
"""
Remove all elements in the sorted set ``name`` with ranks between
``min`` and ``max``. Values are 0-based, ordered from smallest score
to largest. Values can be negative indicating the highest scores.
Returns the number of elements removed
"""
return
self
.
execute_command
(
'ZREMRANGEBYRANK'
,
name
,
min
,
max
)
def
zremrangebyscore
(
self
,
name
,
min
,
max
):
"""
Remove all elements in the sorted set ``name`` with scores
between ``min`` and ``max``. Returns the number of elements removed.
"""
return
self
.
execute_command
(
'ZREMRANGEBYSCORE'
,
name
,
min
,
max
)
def
zrevrange
(
self
,
name
,
start
,
num
,
withscores
=
False
,
score_cast_func
=
float
):
"""
Return a range of values from sorted set ``name`` between
``start`` and ``num`` sorted in descending order.
``start`` and ``num`` can be negative, indicating the end of the range.
``withscores`` indicates to return the scores along with the values
The return type is a list of (value, score) pairs
``score_cast_func`` a callable used to cast the score return value
"""
pieces
=
[
'ZREVRANGE'
,
name
,
start
,
num
]
if
withscores
:
pieces
.
append
(
'withscores'
)
options
=
{
'withscores'
:
withscores
,
'score_cast_func'
:
score_cast_func
}
return
self
.
execute_command
(
*
pieces
,
**
options
)
def
zrevrangebyscore
(
self
,
name
,
max
,
min
,
start
=
None
,
num
=
None
,
withscores
=
False
,
score_cast_func
=
float
):
"""
Return a range of values from the sorted set ``name`` with scores
between ``min`` and ``max`` in descending order.
If ``start`` and ``num`` are specified, then return a slice
of the range.
``withscores`` indicates to return the scores along with the values.
The return type is a list of (value, score) pairs
``score_cast_func`` a callable used to cast the score return value
"""
if
(
start
is
not
None
and
num
is
None
)
or
\
(
num
is
not
None
and
start
is
None
):
raise
RedisError
(
"``start`` and ``num`` must both be specified"
)
pieces
=
[
'ZREVRANGEBYSCORE'
,
name
,
max
,
min
]
if
start
is
not
None
and
num
is
not
None
:
pieces
.
extend
([
'LIMIT'
,
start
,
num
])
if
withscores
:
pieces
.
append
(
'withscores'
)
options
=
{
'withscores'
:
withscores
,
'score_cast_func'
:
score_cast_func
}
return
self
.
execute_command
(
*
pieces
,
**
options
)
def
zrevrank
(
self
,
name
,
value
):
"""
Returns a 0-based value indicating the descending rank of
``value`` in sorted set ``name``
"""
return
self
.
execute_command
(
'ZREVRANK'
,
name
,
value
)
def
zscore
(
self
,
name
,
value
):
"Return the score of element ``value`` in sorted set ``name``"
return
self
.
execute_command
(
'ZSCORE'
,
name
,
value
)
def
zunionstore
(
self
,
dest
,
keys
,
aggregate
=
None
):
"""
Union multiple sorted sets specified by ``keys`` into
a new sorted set, ``dest``. Scores in the destination will be
aggregated based on the ``aggregate``, or SUM if none is provided.
"""
return
self
.
_zaggregate
(
'ZUNIONSTORE'
,
dest
,
keys
,
aggregate
)
def
_zaggregate
(
self
,
command
,
dest
,
keys
,
aggregate
=
None
):
pieces
=
[
command
,
dest
,
len
(
keys
)]
if
isinstance
(
keys
,
dict
):
keys
,
weights
=
keys
.
keys
(),
keys
.
values
()
else
:
weights
=
None
pieces
.
extend
(
keys
)
if
weights
:
pieces
.
append
(
'WEIGHTS'
)
pieces
.
extend
(
weights
)
if
aggregate
:
pieces
.
append
(
'AGGREGATE'
)
pieces
.
append
(
aggregate
)
return
self
.
execute_command
(
*
pieces
)
#### HASH COMMANDS ####
def
hdel
(
self
,
name
,
*
keys
):
"Delete ``keys`` from hash ``name``"
return
self
.
execute_command
(
'HDEL'
,
name
,
*
keys
)
def
hexists
(
self
,
name
,
key
):
"Returns a boolean indicating if ``key`` exists within hash ``name``"
return
self
.
execute_command
(
'HEXISTS'
,
name
,
key
)
def
hget
(
self
,
name
,
key
):
"Return the value of ``key`` within the hash ``name``"
return
self
.
execute_command
(
'HGET'
,
name
,
key
)
def
hgetall
(
self
,
name
):
"Return a Python dict of the hash's name/value pairs"
return
self
.
execute_command
(
'HGETALL'
,
name
)
def
hincrby
(
self
,
name
,
key
,
amount
=
1
):
"Increment the value of ``key`` in hash ``name`` by ``amount``"
return
self
.
execute_command
(
'HINCRBY'
,
name
,
key
,
amount
)
def
hkeys
(
self
,
name
):
"Return the list of keys within hash ``name``"
return
self
.
execute_command
(
'HKEYS'
,
name
)
def
hlen
(
self
,
name
):
"Return the number of elements in hash ``name``"
return
self
.
execute_command
(
'HLEN'
,
name
)
def
hset
(
self
,
name
,
key
,
value
):
"""
Set ``key`` to ``value`` within hash ``name``
Returns 1 if HSET created a new field, otherwise 0
"""
return
self
.
execute_command
(
'HSET'
,
name
,
key
,
value
)
def
hsetnx
(
self
,
name
,
key
,
value
):
"""
Set ``key`` to ``value`` within hash ``name`` if ``key`` does not
exist. Returns 1 if HSETNX created a field, otherwise 0.
"""
return
self
.
execute_command
(
"HSETNX"
,
name
,
key
,
value
)
def
hmset
(
self
,
name
,
mapping
):
"""
Sets each key in the ``mapping`` dict to its corresponding value
in the hash ``name``
"""
if
not
mapping
:
raise
DataError
(
"'hmset' with 'mapping' of length 0"
)
items
=
[]
for
pair
in
mapping
.
iteritems
():
items
.
extend
(
pair
)
return
self
.
execute_command
(
'HMSET'
,
name
,
*
items
)
def
hmget
(
self
,
name
,
keys
):
"Returns a list of values ordered identically to ``keys``"
return
self
.
execute_command
(
'HMGET'
,
name
,
*
keys
)
def
hvals
(
self
,
name
):
"Return the list of values within hash ``name``"
return
self
.
execute_command
(
'HVALS'
,
name
)
def
publish
(
self
,
channel
,
message
):
"""
Publish ``message`` on ``channel``.
Returns the number of subscribers the message was delivered to.
"""
return
self
.
execute_command
(
'PUBLISH'
,
channel
,
message
)
class
Redis
(
StrictRedis
):
"""
Provides backwards compatibility with older versions of redis-py that
changed arguments to some commands to be more Pythonic, sane, or by
accident.
"""
# Overridden callbacks
RESPONSE_CALLBACKS
=
dict_merge
(
StrictRedis
.
RESPONSE_CALLBACKS
,
{
'TTL'
:
lambda
r
:
r
!=
-
1
and
r
or
None
,
}
)
def
pipeline
(
self
,
transaction
=
True
,
shard_hint
=
None
):
"""
Return a new pipeline object that can queue multiple commands for
later execution. ``transaction`` indicates whether all commands
should be executed atomically. Apart from making a group of operations
atomic, pipelines are useful for reducing the back-and-forth overhead
between the client and server.
"""
return
Pipeline
(
self
.
connection_pool
,
self
.
response_callbacks
,
transaction
,
shard_hint
)
def
setex
(
self
,
name
,
value
,
time
):
"""
Set the value of key ``name`` to ``value``
that expires in ``time`` seconds
"""
return
self
.
execute_command
(
'SETEX'
,
name
,
time
,
value
)
def
lrem
(
self
,
name
,
value
,
num
=
0
):
"""
Remove the first ``num`` occurrences of elements equal to ``value``
from the list stored at ``name``.
The ``num`` argument influences the operation in the following ways:
num > 0: Remove elements equal to value moving from head to tail.
num < 0: Remove elements equal to value moving from tail to head.
num = 0: Remove all elements equal to value.
"""
return
self
.
execute_command
(
'LREM'
,
name
,
num
,
value
)
def
zadd
(
self
,
name
,
*
args
,
**
kwargs
):
"""
NOTE: The order of arguments differs from that of the official ZADD
command. For backwards compatability, this method accepts arguments
in the form of name1, score1, name2, score2, while the official Redis
documents expects score1, name1, score2, name2.
If you're looking to use the standard syntax, consider using the
StrictRedis class. See the API Reference section of the docs for more
information.
Set any number of element-name, score pairs to the key ``name``. Pairs
can be specified in two ways:
As *args, in the form of: name1, score1, name2, score2, ...
or as **kwargs, in the form of: name1=score1, name2=score2, ...
The following example would add four values to the 'my-key' key:
redis.zadd('my-key', 'name1', 1.1, 'name2', 2.2, name3=3.3, name4=4.4)
"""
pieces
=
[]
if
args
:
if
len
(
args
)
%
2
!=
0
:
raise
RedisError
(
"ZADD requires an equal number of "
"values and scores"
)
pieces
.
extend
(
reversed
(
args
))
for
pair
in
kwargs
.
iteritems
():
pieces
.
append
(
pair
[
1
])
pieces
.
append
(
pair
[
0
])
return
self
.
execute_command
(
'ZADD'
,
name
,
*
pieces
)
class
PubSub
(
object
):
"""
PubSub provides publish, subscribe and listen support to Redis channels.
After subscribing to one or more channels, the listen() method will block
until a message arrives on one of the subscribed channels. That message
will be returned and it's safe to start listening again.
"""
def
__init__
(
self
,
connection_pool
,
shard_hint
=
None
):
self
.
connection_pool
=
connection_pool
self
.
shard_hint
=
shard_hint
self
.
connection
=
None
self
.
channels
=
set
()
self
.
patterns
=
set
()
self
.
subscription_count
=
0
self
.
subscribe_commands
=
set
(
(
'subscribe'
,
'psubscribe'
,
'unsubscribe'
,
'punsubscribe'
)
)
def
__del__
(
self
):
try
:
# if this object went out of scope prior to shutting down
# subscriptions, close the connection manually before
# returning it to the connection pool
if
self
.
connection
and
(
self
.
channels
or
self
.
patterns
):
self
.
connection
.
disconnect
()
self
.
reset
()
except
:
pass
def
reset
(
self
):
if
self
.
connection
:
self
.
connection_pool
.
release
(
self
.
connection
)
self
.
connection
=
None
def
execute_command
(
self
,
*
args
,
**
kwargs
):
"Execute a publish/subscribe command"
if
self
.
connection
is
None
:
self
.
connection
=
self
.
connection_pool
.
get_connection
(
'pubsub'
,
self
.
shard_hint
)
connection
=
self
.
connection
try
:
connection
.
send_command
(
*
args
)
return
self
.
parse_response
()
except
ConnectionError
:
connection
.
disconnect
()
# Connect manually here. If the Redis server is down, this will
# fail and raise a ConnectionError as desired.
connection
.
connect
()
# resubscribe to all channels and patterns before
# resending the current command
for
channel
in
self
.
channels
:
self
.
subscribe
(
channel
)
for
pattern
in
self
.
patterns
:
self
.
psubscribe
(
pattern
)
connection
.
send_command
(
*
args
)
return
self
.
parse_response
()
def
parse_response
(
self
):
"Parse the response from a publish/subscribe command"
response
=
self
.
connection
.
read_response
()
if
response
[
0
]
in
self
.
subscribe_commands
:
self
.
subscription_count
=
response
[
2
]
# if we've just unsubscribed from the remaining channels,
# release the connection back to the pool
if
not
self
.
subscription_count
:
self
.
reset
()
return
response
def
psubscribe
(
self
,
patterns
):
"Subscribe to all channels matching any pattern in ``patterns``"
if
isinstance
(
patterns
,
basestring
):
patterns
=
[
patterns
]
for
pattern
in
patterns
:
self
.
patterns
.
add
(
pattern
)
return
self
.
execute_command
(
'PSUBSCRIBE'
,
*
patterns
)
def
punsubscribe
(
self
,
patterns
=
[]):
"""
Unsubscribe from any channel matching any pattern in ``patterns``.
If empty, unsubscribe from all channels.
"""
if
isinstance
(
patterns
,
basestring
):
patterns
=
[
patterns
]
for
pattern
in
patterns
:
try
:
self
.
patterns
.
remove
(
pattern
)
except
KeyError
:
pass
return
self
.
execute_command
(
'PUNSUBSCRIBE'
,
*
patterns
)
def
subscribe
(
self
,
channels
):
"Subscribe to ``channels``, waiting for messages to be published"
if
isinstance
(
channels
,
basestring
):
channels
=
[
channels
]
for
channel
in
channels
:
self
.
channels
.
add
(
channel
)
return
self
.
execute_command
(
'SUBSCRIBE'
,
*
channels
)
def
unsubscribe
(
self
,
channels
=
[]):
"""
Unsubscribe from ``channels``. If empty, unsubscribe
from all channels
"""
if
isinstance
(
channels
,
basestring
):
channels
=
[
channels
]
for
channel
in
channels
:
try
:
self
.
channels
.
remove
(
channel
)
except
KeyError
:
pass
return
self
.
execute_command
(
'UNSUBSCRIBE'
,
*
channels
)
def
listen
(
self
):
"Listen for messages on channels this client has been subscribed to"
while
self
.
subscription_count
:
r
=
self
.
parse_response
()
if
r
[
0
]
==
'pmessage'
:
msg
=
{
'type'
:
r
[
0
],
'pattern'
:
r
[
1
],
'channel'
:
r
[
2
],
'data'
:
r
[
3
]
}
else
:
msg
=
{
'type'
:
r
[
0
],
'pattern'
:
None
,
'channel'
:
r
[
1
],
'data'
:
r
[
2
]
}
yield
msg
class
BasePipeline
(
object
):
"""
Pipelines provide a way to transmit multiple commands to the Redis server
in one transmission. This is convenient for batch processing, such as
saving all the values in a list to Redis.
All commands executed within a pipeline are wrapped with MULTI and EXEC
calls. This guarantees all commands executed in the pipeline will be
executed atomically.
Any command raising an exception does *not* halt the execution of
subsequent commands in the pipeline. Instead, the exception is caught
and its instance is placed into the response list returned by execute().
Code iterating over the response list should be able to deal with an
instance of an exception as a potential value. In general, these will be
ResponseError exceptions, such as those raised when issuing a command
on a key of a different datatype.
"""
UNWATCH_COMMANDS
=
set
((
'DISCARD'
,
'EXEC'
,
'UNWATCH'
))
def
__init__
(
self
,
connection_pool
,
response_callbacks
,
transaction
,
shard_hint
):
self
.
connection_pool
=
connection_pool
self
.
connection
=
None
self
.
response_callbacks
=
response_callbacks
self
.
transaction
=
transaction
self
.
shard_hint
=
shard_hint
self
.
watching
=
False
self
.
reset
()
def
__enter__
(
self
):
return
self
def
__exit__
(
self
,
exc_type
,
exc_value
,
traceback
):
self
.
reset
()
def
__del__
(
self
):
try
:
self
.
reset
()
except
:
pass
def
reset
(
self
):
self
.
command_stack
=
[]
# make sure to reset the connection state in the event that we were
# watching something
if
self
.
watching
and
self
.
connection
:
try
:
# call this manually since our unwatch or
# immediate_execute_command methods can call reset()
self
.
connection
.
send_command
(
'UNWATCH'
)
self
.
connection
.
read_response
()
except
ConnectionError
:
# disconnect will also remove any previous WATCHes
self
.
connection
.
disconnect
()
# clean up the other instance attributes
self
.
watching
=
False
self
.
explicit_transaction
=
False
# we can safely return the connection to the pool here since we're
# sure we're no longer WATCHing anything
if
self
.
connection
:
self
.
connection_pool
.
release
(
self
.
connection
)
self
.
connection
=
None
def
multi
(
self
):
"""
Start a transactional block of the pipeline after WATCH commands
are issued. End the transactional block with `execute`.
"""
if
self
.
explicit_transaction
:
raise
RedisError
(
'Cannot issue nested calls to MULTI'
)
if
self
.
command_stack
:
raise
RedisError
(
'Commands without an initial WATCH have already '
'been issued'
)
self
.
explicit_transaction
=
True
def
execute_command
(
self
,
*
args
,
**
kwargs
):
if
(
self
.
watching
or
args
[
0
]
==
'WATCH'
)
and
\
not
self
.
explicit_transaction
:
return
self
.
immediate_execute_command
(
*
args
,
**
kwargs
)
return
self
.
pipeline_execute_command
(
*
args
,
**
kwargs
)
def
immediate_execute_command
(
self
,
*
args
,
**
options
):
"""
Execute a command immediately, but don't auto-retry on a
ConnectionError if we're already WATCHing a variable. Used when
issuing WATCH or subsequent commands retrieving their values but before
MULTI is called.
"""
command_name
=
args
[
0
]
conn
=
self
.
connection
# if this is the first call, we need a connection
if
not
conn
:
conn
=
self
.
connection_pool
.
get_connection
(
command_name
,
self
.
shard_hint
)
self
.
connection
=
conn
try
:
conn
.
send_command
(
*
args
)
return
self
.
parse_response
(
conn
,
command_name
,
**
options
)
except
ConnectionError
:
conn
.
disconnect
()
# if we're not already watching, we can safely retry the command
# assuming it was a connection timeout
if
not
self
.
watching
:
conn
.
send_command
(
*
args
)
return
self
.
parse_response
(
conn
,
command_name
,
**
options
)
self
.
reset
()
raise
def
pipeline_execute_command
(
self
,
*
args
,
**
options
):
"""
Stage a command to be executed when execute() is next called
Returns the current Pipeline object back so commands can be
chained together, such as:
pipe = pipe.set('foo', 'bar').incr('baz').decr('bang')
At some other point, you can then run: pipe.execute(),
which will execute all commands queued in the pipe.
"""
self
.
command_stack
.
append
((
args
,
options
))
return
self
def
_execute_transaction
(
self
,
connection
,
commands
):
all_cmds
=
''
.
join
(
starmap
(
connection
.
pack_command
,
[
args
for
args
,
options
in
commands
]))
connection
.
send_packed_command
(
all_cmds
)
# we don't care about the multi/exec any longer
commands
=
commands
[
1
:
-
1
]
# parse off the response for MULTI and all commands prior to EXEC.
# the only data we care about is the response the EXEC
# which is the last command
for
i
in
range
(
len
(
commands
)
+
1
):
self
.
parse_response
(
connection
,
'_'
)
# parse the EXEC.
response
=
self
.
parse_response
(
connection
,
'_'
)
if
response
is
None
:
raise
WatchError
(
"Watched variable changed."
)
if
len
(
response
)
!=
len
(
commands
):
raise
ResponseError
(
"Wrong number of response items from "
"pipeline execution"
)
# We have to run response callbacks manually
data
=
[]
for
r
,
cmd
in
izip
(
response
,
commands
):
if
not
isinstance
(
r
,
Exception
):
args
,
options
=
cmd
command_name
=
args
[
0
]
if
command_name
in
self
.
response_callbacks
:
r
=
self
.
response_callbacks
[
command_name
](
r
,
**
options
)
data
.
append
(
r
)
return
data
def
_execute_pipeline
(
self
,
connection
,
commands
):
# build up all commands into a single request to increase network perf
all_cmds
=
''
.
join
(
starmap
(
connection
.
pack_command
,
[
args
for
args
,
options
in
commands
]))
connection
.
send_packed_command
(
all_cmds
)
return
[
self
.
parse_response
(
connection
,
args
[
0
],
**
options
)
for
args
,
options
in
commands
]
def
parse_response
(
self
,
connection
,
command_name
,
**
options
):
result
=
StrictRedis
.
parse_response
(
self
,
connection
,
command_name
,
**
options
)
if
command_name
in
self
.
UNWATCH_COMMANDS
:
self
.
watching
=
False
elif
command_name
==
'WATCH'
:
self
.
watching
=
True
return
result
def
execute
(
self
):
"Execute all the commands in the current pipeline"
stack
=
self
.
command_stack
if
self
.
transaction
or
self
.
explicit_transaction
:
stack
=
[((
'MULTI'
,
),
{})]
+
stack
+
[((
'EXEC'
,
),
{})]
execute
=
self
.
_execute_transaction
else
:
execute
=
self
.
_execute_pipeline
conn
=
self
.
connection
if
not
conn
:
conn
=
self
.
connection_pool
.
get_connection
(
'MULTI'
,
self
.
shard_hint
)
# assign to self.connection so reset() releases the connection
# back to the pool after we're done
self
.
connection
=
conn
try
:
return
execute
(
conn
,
stack
)
except
ConnectionError
:
conn
.
disconnect
()
# if we were watching a variable, the watch is no longer valid since
# this connection has died. raise a WatchError, which indicates
# the user should retry his transaction. If this is more than a
# temporary failure, the WATCH that the user next issue will fail,
# propegating the real ConnectionError
if
self
.
watching
:
raise
WatchError
(
"A ConnectionError occured on while watching "
"one or more keys"
)
# otherwise, it's safe to retry since the transaction isn't
# predicated on any state
return
execute
(
conn
,
stack
)
finally
:
self
.
reset
()
def
watch
(
self
,
*
names
):
"""
Watches the values at keys ``names``
"""
if
self
.
explicit_transaction
:
raise
RedisError
(
'Cannot issue a WATCH after a MULTI'
)
return
self
.
execute_command
(
'WATCH'
,
*
names
)
def
unwatch
(
self
):
"""
Unwatches all previously specified keys
"""
return
self
.
watching
and
self
.
execute_command
(
'UNWATCH'
)
or
True
class
StrictPipeline
(
BasePipeline
,
StrictRedis
):
"Pipeline for the StrictRedis class"
pass
class
Pipeline
(
BasePipeline
,
Redis
):
"Pipeline for the Redis class"
pass
class
LockError
(
RedisError
):
"Errors thrown from the Lock"
pass
class
Lock
(
object
):
"""
A shared, distributed Lock. Using Redis for locking allows the Lock
to be shared across processes and/or machines.
It's left to the user to resolve deadlock issues and make sure
multiple clients play nicely together.
"""
LOCK_FOREVER
=
float
(
2
**
31
+
1
)
# 1 past max unix time
def
__init__
(
self
,
redis
,
name
,
timeout
=
None
,
sleep
=
0.1
):
"""
Create a new Lock instnace named ``name`` using the Redis client
supplied by ``redis``.
``timeout`` indicates a maximum life for the lock.
By default, it will remain locked until release() is called.
``sleep`` indicates the amount of time to sleep per loop iteration
when the lock is in blocking mode and another client is currently
holding the lock.
Note: If using ``timeout``, you should make sure all the hosts
that are running clients have their time synchronized with a network time
service like ntp.
"""
self
.
redis
=
redis
self
.
name
=
name
self
.
acquired_until
=
None
self
.
timeout
=
timeout
self
.
sleep
=
sleep
if
self
.
timeout
and
self
.
sleep
>
self
.
timeout
:
raise
LockError
(
"'sleep' must be less than 'timeout'"
)
def
__enter__
(
self
):
return
self
.
acquire
()
def
__exit__
(
self
,
exc_type
,
exc_value
,
traceback
):
self
.
release
()
def
acquire
(
self
,
blocking
=
True
):
"""
Use Redis to hold a shared, distributed lock named ``name``.
Returns True once the lock is acquired.
If ``blocking`` is False, always return immediately. If the lock
was acquired, return True, otherwise return False.
"""
sleep
=
self
.
sleep
timeout
=
self
.
timeout
while
1
:
unixtime
=
int
(
time
.
time
())
if
timeout
:
timeout_at
=
unixtime
+
timeout
else
:
timeout_at
=
Lock
.
LOCK_FOREVER
timeout_at
=
float
(
timeout_at
)
if
self
.
redis
.
setnx
(
self
.
name
,
timeout_at
):
self
.
acquired_until
=
timeout_at
return
True
# We want blocking, but didn't acquire the lock
# check to see if the current lock is expired
existing
=
float
(
self
.
redis
.
get
(
self
.
name
)
or
1
)
if
existing
<
unixtime
:
# the previous lock is expired, attempt to overwrite it
existing
=
float
(
self
.
redis
.
getset
(
self
.
name
,
timeout_at
)
or
1
)
if
existing
<
unixtime
:
# we successfully acquired the lock
self
.
acquired_until
=
timeout_at
return
True
if
not
blocking
:
return
False
time
.
sleep
(
sleep
)
def
release
(
self
):
"Releases the already acquired lock"
if
self
.
acquired_until
is
None
:
raise
ValueError
(
"Cannot release an unlocked lock"
)
existing
=
float
(
self
.
redis
.
get
(
self
.
name
)
or
1
)
# if the lock time is in the future, delete the lock
if
existing
>=
self
.
acquired_until
:
self
.
redis
.
delete
(
self
.
name
)
self
.
acquired_until
=
None
#print "TOTOTOTOTOTO"
slapos/recipe/redis/__init__.py
View file @
c658d8ca
...
@@ -58,5 +58,14 @@ class Recipe(GenericBaseRecipe):
...
@@ -58,5 +58,14 @@ class Recipe(GenericBaseRecipe):
)
)
path_list
.
append
(
redis
)
path_list
.
append
(
redis
)
promise_script
=
self
.
options
.
get
(
'promise_wrapper'
,
''
).
strip
()
if
promise_script
:
promise
=
self
.
createPythonScript
(
promise_script
,
'%s.promise.main'
%
__name__
,
dict
(
host
=
self
.
options
[
'ipv6'
],
port
=
self
.
options
[
'port'
])
)
path_list
.
append
(
promise
)
return
path_list
return
path_list
slapos/recipe/redis/promise.py
0 → 100644
View file @
c658d8ca
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import
slapos.recipe.redis.MyRedis2410
as
redis
import
sys
def
main
(
args
):
host
=
args
[
'host'
]
port
=
int
(
args
[
'port'
])
try
:
pool
=
redis
.
ConnectionPool
(
host
=
host
,
port
=
port
,
db
=
0
)
r
=
redis
.
Redis
(
connection_pool
=
pool
)
r
.
publish
(
"Promise-Service"
,
"SlapOS Promise"
)
pool
.
disconnect
()
sys
.
exit
(
0
)
except
Exception
,
e
:
print
str
(
e
)
sys
.
exit
(
1
)
\ No newline at end of file
slapos/recipe/redis/template/redis.conf.in
View file @
c658d8ca
...
@@ -14,7 +14,7 @@
...
@@ -14,7 +14,7 @@
# By default Redis does not run as a daemon. Use 'yes' if you need it.
# By default Redis does not run as a daemon. Use 'yes' if you need it.
# Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
# Note that Redis will write a pid file in /var/run/redis.pid when daemonized.
daemonize
no
daemonize
yes
# When running daemonized, Redis writes a pid file in /var/run/redis.pid by
# When running daemonized, Redis writes a pid file in /var/run/redis.pid by
# default. You can specify a custom pid file location here.
# default. You can specify a custom pid file location here.
...
...
software/redis-server/instance-redis.cfg
View file @
c658d8ca
...
@@ -21,6 +21,7 @@ bin = $${buildout:directory}/bin/
...
@@ -21,6 +21,7 @@ bin = $${buildout:directory}/bin/
recipe = slapos.cookbook:mkdirectory
recipe = slapos.cookbook:mkdirectory
scripts = $${rootdirectory:etc}/run/
scripts = $${rootdirectory:etc}/run/
services = $${rootdirectory:etc}/service/
services = $${rootdirectory:etc}/service/
promises = $${rootdirectory:etc}/promise/
run = $${rootdirectory:var}/run/
run = $${rootdirectory:var}/run/
log = $${rootdirectory:var}/log/
log = $${rootdirectory:var}/log/
...
@@ -41,6 +42,7 @@ passwd = $${master-passwd:passwd}
...
@@ -41,6 +42,7 @@ passwd = $${master-passwd:passwd}
config_file = $${rootdirectory:etc}/redis.conf
config_file = $${rootdirectory:etc}/redis.conf
log_file = $${basedirectory:log}/redis.log
log_file = $${basedirectory:log}/redis.log
wrapper = $${basedirectory:services}/redis_server
wrapper = $${basedirectory:services}/redis_server
promise_wrapper = $${basedirectory:promises}/redis
# Send informations to SlapOS Master
# Send informations to SlapOS Master
[publish-connection-informations]
[publish-connection-informations]
...
...
software/redis-server/software.cfg
View file @
c658d8ca
...
@@ -29,7 +29,7 @@ recipe = slapos.recipe.template
...
@@ -29,7 +29,7 @@ recipe = slapos.recipe.template
url = ${:_profile_base_location_}/instance-redis.cfg
url = ${:_profile_base_location_}/instance-redis.cfg
output = ${buildout:directory}/template-redis.cfg
output = ${buildout:directory}/template-redis.cfg
mode = 0644
mode = 0644
md5sum =
8b223d1fe0ffd40c7151766730da7fcb
md5sum =
f7b01ca7698c1b771f0653d64dc945a7
# Local development
# Local development
[slapos.cookbook-repository]
[slapos.cookbook-repository]
...
@@ -46,3 +46,190 @@ recipe = plone.recipe.command
...
@@ -46,3 +46,190 @@ recipe = plone.recipe.command
stop-on-error = true
stop-on-error = true
update-command = ${:command}
update-command = ${:command}
command = grep parts ${buildout:develop-eggs-directory}/slapos.cookbook.egg-link
command = grep parts ${buildout:develop-eggs-directory}/slapos.cookbook.egg-link
[networkcache]
# Romain Courteaud + Sebastien Robin + Alain Takoudjou
# + Cedric de Saint Martin signature certificate
# List of signatures of uploaders we trust:
# Romain Courteaud
# Sebastien Robin
# Kazuhiko Shiozaki
# Cedric de Saint Martin
# Yingjie Xu
# Gabriel Monnerat
# Åukasz Nowak
# Test Agent Signature
# Alain Takoudjou
signature-certificate-list =
-----BEGIN CERTIFICATE-----
MIIB4DCCAUkCADANBgkqhkiG9w0BAQsFADA5MQswCQYDVQQGEwJGUjEZMBcGA1UE
CBMQRGVmYXVsdCBQcm92aW5jZTEPMA0GA1UEChMGTmV4ZWRpMB4XDTExMDkxNTA5
MDAwMloXDTEyMDkxNTA5MDAwMlowOTELMAkGA1UEBhMCRlIxGTAXBgNVBAgTEERl
ZmF1bHQgUHJvdmluY2UxDzANBgNVBAoTBk5leGVkaTCBnzANBgkqhkiG9w0BAQEF
AAOBjQAwgYkCgYEApYZv6OstoqNzxG1KI6iE5U4Ts2Xx9lgLeUGAMyfJLyMmRLhw
boKOyJ9Xke4dncoBAyNPokUR6iWOcnPHtMvNOsBFZ2f7VA28em3+E1JRYdeNUEtX
Z0s3HjcouaNAnPfjFTXHYj4um1wOw2cURSPuU5dpzKBbV+/QCb5DLheynisCAwEA
ATANBgkqhkiG9w0BAQsFAAOBgQBCZLbTVdrw3RZlVVMFezSHrhBYKAukTwZrNmJX
mHqi2tN8tNo6FX+wmxUUAf3e8R2Ymbdbn2bfbPpcKQ2fG7PuKGvhwMG3BlF9paEC
q7jdfWO18Zp/BG7tagz0jmmC4y/8akzHsVlruo2+2du2freE8dK746uoMlXlP93g
QUUGLQ==
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIB8jCCAVugAwIBAgIJAPu2zchZ2BxoMA0GCSqGSIb3DQEBBQUAMBIxEDAOBgNV
BAMMB3RzeGRldjMwHhcNMTExMDE0MTIxNjIzWhcNMTIxMDEzMTIxNjIzWjASMRAw
DgYDVQQDDAd0c3hkZXYzMIGfMA0GCSqGSIb3DQEBAQUAA4GNADCBiQKBgQCrPbh+
YGmo6mWmhVb1vTqX0BbeU0jCTB8TK3i6ep3tzSw2rkUGSx3niXn9LNTFNcIn3MZN
XHqbb4AS2Zxyk/2tr3939qqOrS4YRCtXBwTCuFY6r+a7pZsjiTNddPsEhuj4lEnR
L8Ax5mmzoi9nE+hiPSwqjRwWRU1+182rzXmN4QIDAQABo1AwTjAdBgNVHQ4EFgQU
/4XXREzqBbBNJvX5gU8tLWxZaeQwHwYDVR0jBBgwFoAU/4XXREzqBbBNJvX5gU8t
LWxZaeQwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQUFAAOBgQA07q/rKoE7fAda
FED57/SR00OvY9wLlFEF2QJ5OLu+O33YUXDDbGpfUSF9R8l0g9dix1JbWK9nQ6Yd
R/KCo6D0sw0ZgeQv1aUXbl/xJ9k4jlTxmWbPeiiPZEqU1W9wN5lkGuLxV4CEGTKU
hJA/yXa1wbwIPGvX3tVKdOEWPRXZLg==
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIB7jCCAVegAwIBAgIJAJWA0jQ4o9DGMA0GCSqGSIb3DQEBBQUAMA8xDTALBgNV
BAMMBHg2MXMwIBcNMTExMTI0MTAyNDQzWhgPMjExMTEwMzExMDI0NDNaMA8xDTAL
BgNVBAMMBHg2MXMwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBANdJNiFsRlkH
vq2kHP2zdxEyzPAWZH3CQ3Myb3F8hERXTIFSUqntPXDKXDb7Y/laqjMXdj+vptKk
3Q36J+8VnJbSwjGwmEG6tym9qMSGIPPNw1JXY1R29eF3o4aj21o7DHAkhuNc5Tso
67fUSKgvyVnyH4G6ShQUAtghPaAwS0KvAgMBAAGjUDBOMB0GA1UdDgQWBBSjxFUE
RfnTvABRLAa34Ytkhz5vPzAfBgNVHSMEGDAWgBSjxFUERfnTvABRLAa34Ytkhz5v
PzAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4GBAFLDS7zNhlrQYSQO5KIj
z2RJe3fj4rLPklo3TmP5KLvendG+LErE2cbKPqnhQ2oVoj6u9tWVwo/g03PMrrnL
KrDm39slYD/1KoE5kB4l/p6KVOdeJ4I6xcgu9rnkqqHzDwI4v7e8/D3WZbpiFUsY
vaZhjNYKWQf79l6zXfOvphzJ
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIB9jCCAV+gAwIBAgIJAO4V/jiMoICoMA0GCSqGSIb3DQEBBQUAMBMxETAPBgNV
BAMMCENPTVAtMjMyMCAXDTEyMDIxNjExMTAyM1oYDzIxMTIwMTIzMTExMDIzWjAT
MREwDwYDVQQDDAhDT01QLTIzMjCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA
wi/3Z8W9pUiegUXIk/AiFDQ0UJ4JFAwjqr+HSRUirlUsHHT+8DzH/hfcTDX1I5BB
D1ADk+ydXjMm3OZrQcXjn29OUfM5C+g+oqeMnYQImN0DDQIOcUyr7AJc4xhvuXQ1
P2pJ5NOd3tbd0kexETa1LVhR6EgBC25LyRBRae76qosCAwEAAaNQME4wHQYDVR0O
BBYEFMDmW9aFy1sKTfCpcRkYnP6zUd1cMB8GA1UdIwQYMBaAFMDmW9aFy1sKTfCp
cRkYnP6zUd1cMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADgYEAskbFizHr
b6d3iIyN+wffxz/V9epbKIZVEGJd/6LrTdLiUfJPec7FaxVCWNyKBlCpINBM7cEV
Gn9t8mdVQflNqOlAMkOlUv1ZugCt9rXYQOV7rrEYJBWirn43BOMn9Flp2nibblby
If1a2ZoqHRxoNo2yTmm7TSYRORWVS+vvfjY=
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIB9jCCAV+gAwIBAgIJAIlBksrZVkK8MA0GCSqGSIb3DQEBBQUAMBMxETAPBgNV
BAMMCENPTVAtMzU3MCAXDTEyMDEyNjEwNTUyOFoYDzIxMTIwMTAyMTA1NTI4WjAT
MREwDwYDVQQDDAhDT01QLTM1NzCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA
ts+iGUwi44vtIfwXR8DCnLtHV4ydl0YTK2joJflj0/Ws7mz5BYkxIU4fea/6+VF3
i11nwBgYgxQyjNztgc9u9O71k1W5tU95yO7U7bFdYd5uxYA9/22fjObaTQoC4Nc9
mTu6r/VHyJ1yRsunBZXvnk/XaKp7gGE9vNEyJvPn2bkCAwEAAaNQME4wHQYDVR0O
BBYEFKuGIYu8+6aEkTVg62BRYaD11PILMB8GA1UdIwQYMBaAFKuGIYu8+6aEkTVg
62BRYaD11PILMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADgYEAMoTRpBxK
YLEZJbofF7gSrRIcrlUJYXfTfw1QUBOKkGFFDsiJpEg4y5pUk1s5Jq9K3SDzNq/W
it1oYjOhuGg3al8OOeKFrU6nvNTF1BAvJCl0tr3POai5yXyN5jlK/zPfypmQYxE+
TaqQSGBJPVXYt6lrq/PRD9ciZgKLOwEqK8w=
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIB9jCCAV+gAwIBAgIJAPHoWu90gbsgMA0GCSqGSIb3DQEBBQUAMBQxEjAQBgNV
BAMMCXZpZmlibm9kZTAeFw0xMjAzMTkyMzIwNTVaFw0xMzAzMTkyMzIwNTVaMBQx
EjAQBgNVBAMMCXZpZmlibm9kZTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA
ozBijpO8PS5RTeKTzA90vi9ezvv4vVjNaguqT4UwP9+O1+i6yq1Y2W5zZxw/Klbn
oudyNzie3/wqs9VfPmcyU9ajFzBv/Tobm3obmOqBN0GSYs5fyGw+O9G3//6ZEhf0
NinwdKmrRX+d0P5bHewadZWIvlmOupcnVJmkks852BECAwEAAaNQME4wHQYDVR0O
BBYEFF9EtgfZZs8L2ZxBJxSiY6eTsTEwMB8GA1UdIwQYMBaAFF9EtgfZZs8L2ZxB
JxSiY6eTsTEwMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADgYEAc43YTfc6
baSemaMAc/jz8LNLhRE5dLfLOcRSoHda8y0lOrfe4lHT6yP5l8uyWAzLW+g6s3DA
Yme/bhX0g51BmI6gjKJo5DoPtiXk/Y9lxwD3p7PWi+RhN+AZQ5rpo8UfwnnN059n
yDuimQfvJjBFMVrdn9iP6SfMjxKaGk6gVmI=
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIB9jCCAV+gAwIBAgIJAMNZBmoIOXPBMA0GCSqGSIb3DQEBBQUAMBMxETAPBgNV
BAMMCENPTVAtMTMyMCAXDTEyMDUwMjEyMDQyNloYDzIxMTIwNDA4MTIwNDI2WjAT
MREwDwYDVQQDDAhDT01QLTEzMjCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA
6peZQt1sAmMAmSG9BVxxcXm8x15kE9iAplmANYNQ7z2YO57c10jDtlYlwVfi/rct
xNUOKQtc8UQtV/fJWP0QT0GITdRz5X/TkWiojiFgkopza9/b1hXs5rltYByUGLhg
7JZ9dZGBihzPfn6U8ESAKiJzQP8Hyz/o81FPfuHCftsCAwEAAaNQME4wHQYDVR0O
BBYEFNuxsc77Z6/JSKPoyloHNm9zF9yqMB8GA1UdIwQYMBaAFNuxsc77Z6/JSKPo
yloHNm9zF9yqMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADgYEAl4hBaJy1
cgiNV2+Z5oNTrHgmzWvSY4duECOTBxeuIOnhql3vLlaQmo0p8Z4c13kTZq2s3nhd
Loe5mIHsjRVKvzB6SvIaFUYq/EzmHnqNdpIGkT/Mj7r/iUs61btTcGUCLsUiUeci
Vd0Ozh79JSRpkrdI8R/NRQ2XPHAo+29TT70=
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIB9jCCAV+gAwIBAgIJAKRvzcy7OH0UMA0GCSqGSIb3DQEBBQUAMBMxETAPBgNV
BAMMCENPTVAtNzcyMCAXDTEyMDgxMDE1NDI1MVoYDzIxMTIwNzE3MTU0MjUxWjAT
MREwDwYDVQQDDAhDT01QLTc3MjCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA
o7aipd6MbnuGDeR1UJUjuMLQUariAyQ2l2ZDS6TfOwjHiPw/mhzkielgk73kqN7A
sUREx41eTcYCXzTq3WP3xCLE4LxLg1eIhd4nwNHj8H18xR9aP0AGjo4UFl5BOMa1
mwoyBt3VtfGtUmb8whpeJgHhqrPPxLoON+i6fIbXDaUCAwEAAaNQME4wHQYDVR0O
BBYEFEfjy3OopT2lOksKmKBNHTJE2hFlMB8GA1UdIwQYMBaAFEfjy3OopT2lOksK
mKBNHTJE2hFlMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADgYEAaNRx6YN2
M/p3R8/xS6zvH1EqJ3FFD7XeAQ52WuQnKSREzuw0dsw12ClxjcHiQEFioyTiTtjs
5pW18Ry5Ie7iFK4cQMerZwWPxBodEbAteYlRsI6kePV7Gf735Y1RpuN8qZ2sYL6e
x2IMeSwJ82BpdEI5niXxB+iT0HxhmR+XaMI=
-----END CERTIFICATE-----
-----BEGIN CERTIFICATE-----
MIIB9DCCAV2gAwIBAgIJAL392bEdqpFQMA0GCSqGSIb3DQEBBQUAMBMxETAPBgNV
BAMMCENPTVAtMjM0MB4XDTExMTEwOTE1MzA0M1oXDTEyMTEwODE1MzA0M1owEzER
MA8GA1UEAwwIQ09NUC0yMzQwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMRR
T0cacZKztie/DaRRjq3mgcqfPKoGXu6zXmeRQI+6Y4bnzjf8h/jAuPzR552P0xK5
psxhavXA8hOGRLFDtvDMQLepVHWfwqtFtcp5vNf2+KWqOYy0OxHfVIlnatvCqTZN
NG1vRsSOAQ+v7QNFHh6NBbiSrjBBfg4vkfzqnsUvAgMBAAGjUDBOMB0GA1UdDgQW
BBTGGw+ASoDi9kqPElDkC0Q5RtAfRjAfBgNVHSMEGDAWgBTGGw+ASoDi9kqPElDk
C0Q5RtAfRjAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4GBAIOuR1OqXLke
LLzH0kRnlIOe60dYJvRya53wDx5x2g4/qkXZPLx2RcbaUrX/SCbL70vfr+apUPss
dOSJ86sf/PQHW3/1fhTTE+Vck1MiiAq0aIx6WnKnz4+ZcQctB7b0DCsTcQnmbpi2
n9MuhWaT21VOYhIGzJFPw5XW47/RrwhR
-----END CERTIFICATE-----
[versions]
Jinja2 = 2.6
Werkzeug = 0.8.3
buildout-versions = 1.7
hexagonit.recipe.cmmi = 1.6
inotifyx = 0.2.0
lxml = 3.1.0
meld3 = 0.6.10
netaddr = 0.7.10
plone.recipe.command = 1.1
pytz = 2013b
slapos.core = 0.35.1
slapos.recipe.template = 2.4.2
xml-marshaller = 0.9.7
# Required by:
# slapos.core==0.35.1
Flask = 0.9
# Required by:
# hexagonit.recipe.cmmi==1.6
hexagonit.recipe.download = 1.6
# Required by:
# slapos.cookbook==0.72.0
lock-file = 2.0
# Required by:
# slapos.core==0.35.1
netifaces = 0.8
# Required by:
# slapos.core==0.35.1
pyflakes = 0.6.1
# Required by:
# slapos.cookbook==0.72.0
# slapos.core==0.35.1
# supervisor==3.0b1
# zc.buildout==1.6.0-dev-SlapOS-010
# zope.interface==4.0.5
setuptools = 0.6c12dev-r88846
# Required by:
# slapos.core==0.35.1
supervisor = 3.0b1
# Required by:
# slapos.core==0.35.1
unittest2 = 0.5.1
# Required by:
# slapos.core==0.35.1
zope.interface = 4.0.5
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment