Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Z
Zope
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
Zope
Commits
6f5ae03a
Commit
6f5ae03a
authored
Apr 25, 2001
by
Andreas Jung
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
removed old Medusa release
parent
6786b136
Changes
42
Show whitespace changes
Inline
Side-by-side
Showing
42 changed files
with
0 additions
and
11342 deletions
+0
-11342
ZServer/medusa/__init__.py
ZServer/medusa/__init__.py
+0
-3
ZServer/medusa/asynchat.py
ZServer/medusa/asynchat.py
+0
-294
ZServer/medusa/counter.py
ZServer/medusa/counter.py
+0
-47
ZServer/medusa/default_handler.py
ZServer/medusa/default_handler.py
+0
-248
ZServer/medusa/filesys.py
ZServer/medusa/filesys.py
+0
-467
ZServer/medusa/ftp_server.py
ZServer/medusa/ftp_server.py
+0
-1129
ZServer/medusa/http_date.py
ZServer/medusa/http_date.py
+0
-131
ZServer/medusa/http_server.py
ZServer/medusa/http_server.py
+0
-774
ZServer/medusa/logger.py
ZServer/medusa/logger.py
+0
-266
ZServer/medusa/m_syslog.py
ZServer/medusa/m_syslog.py
+0
-235
ZServer/medusa/max_sockets.py
ZServer/medusa/max_sockets.py
+0
-65
ZServer/medusa/medusa_gif.py
ZServer/medusa/medusa_gif.py
+0
-8
ZServer/medusa/mime_type_table.py
ZServer/medusa/mime_type_table.py
+0
-113
ZServer/medusa/monitor.py
ZServer/medusa/monitor.py
+0
-349
ZServer/medusa/monitor_client.py
ZServer/medusa/monitor_client.py
+0
-126
ZServer/medusa/monitor_client_win32.py
ZServer/medusa/monitor_client_win32.py
+0
-53
ZServer/medusa/producers.py
ZServer/medusa/producers.py
+0
-331
ZServer/medusa/resolver.py
ZServer/medusa/resolver.py
+0
-442
ZServer/medusa/select_trigger.py
ZServer/medusa/select_trigger.py
+0
-281
ZServer/medusa/status_handler.py
ZServer/medusa/status_handler.py
+0
-294
ZServer/medusa/test_logger.py
ZServer/medusa/test_logger.py
+0
-15
lib/python/ZServer/medusa/__init__.py
lib/python/ZServer/medusa/__init__.py
+0
-3
lib/python/ZServer/medusa/asynchat.py
lib/python/ZServer/medusa/asynchat.py
+0
-294
lib/python/ZServer/medusa/counter.py
lib/python/ZServer/medusa/counter.py
+0
-47
lib/python/ZServer/medusa/default_handler.py
lib/python/ZServer/medusa/default_handler.py
+0
-248
lib/python/ZServer/medusa/filesys.py
lib/python/ZServer/medusa/filesys.py
+0
-467
lib/python/ZServer/medusa/ftp_server.py
lib/python/ZServer/medusa/ftp_server.py
+0
-1129
lib/python/ZServer/medusa/http_date.py
lib/python/ZServer/medusa/http_date.py
+0
-131
lib/python/ZServer/medusa/http_server.py
lib/python/ZServer/medusa/http_server.py
+0
-774
lib/python/ZServer/medusa/logger.py
lib/python/ZServer/medusa/logger.py
+0
-266
lib/python/ZServer/medusa/m_syslog.py
lib/python/ZServer/medusa/m_syslog.py
+0
-235
lib/python/ZServer/medusa/max_sockets.py
lib/python/ZServer/medusa/max_sockets.py
+0
-65
lib/python/ZServer/medusa/medusa_gif.py
lib/python/ZServer/medusa/medusa_gif.py
+0
-8
lib/python/ZServer/medusa/mime_type_table.py
lib/python/ZServer/medusa/mime_type_table.py
+0
-113
lib/python/ZServer/medusa/monitor.py
lib/python/ZServer/medusa/monitor.py
+0
-349
lib/python/ZServer/medusa/monitor_client.py
lib/python/ZServer/medusa/monitor_client.py
+0
-126
lib/python/ZServer/medusa/monitor_client_win32.py
lib/python/ZServer/medusa/monitor_client_win32.py
+0
-53
lib/python/ZServer/medusa/producers.py
lib/python/ZServer/medusa/producers.py
+0
-331
lib/python/ZServer/medusa/resolver.py
lib/python/ZServer/medusa/resolver.py
+0
-442
lib/python/ZServer/medusa/select_trigger.py
lib/python/ZServer/medusa/select_trigger.py
+0
-281
lib/python/ZServer/medusa/status_handler.py
lib/python/ZServer/medusa/status_handler.py
+0
-294
lib/python/ZServer/medusa/test_logger.py
lib/python/ZServer/medusa/test_logger.py
+0
-15
No files found.
ZServer/medusa/__init__.py
deleted
100644 → 0
View file @
6786b136
# Make medusa into a package
__version__
=
'$Revision: 1.5 $'
[
11
:
-
2
]
ZServer/medusa/asynchat.py
deleted
100644 → 0
View file @
6786b136
# -*- Mode: Python; tab-width: 4 -*-
# Id: asynchat.py,v 2.26 2000/09/07 22:29:26 rushing Exp
# Author: Sam Rushing <rushing@nightmare.com>
# ======================================================================
# Copyright 1996 by Sam Rushing
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of Sam
# Rushing not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# ======================================================================
"""A class supporting chat-style (command/response) protocols.
This class adds support for 'chat' style protocols - where one side
sends a 'command', and the other sends a response (examples would be
the common internet protocols - smtp, nntp, ftp, etc..).
The handle_read() method looks at the input stream for the current
'terminator' (usually '
\
r
\
n
' for single-line responses, '
\
r
\
n
.
\
r
\
n
'
for multi-line output), calling self.found_terminator() on its
receipt.
for example:
Say you build an async nntp client using this class. At the start
of the connection, you'll have self.terminator set to '
\
r
\
n
', in
order to process the single-line greeting. Just before issuing a
'LIST' command you'll set it to '
\
r
\
n
.
\
r
\
n
'. The output of the LIST
command will be accumulated (using your own 'collect_incoming_data'
method) up to the terminator, and then control will be returned to
you - by calling your self.found_terminator() method.
"""
import
socket
import
asyncore
import
string
class
async_chat
(
asyncore
.
dispatcher
):
"""This is an abstract class. You must derive from this class, and add
the two methods collect_incoming_data() and found_terminator()"""
# these are overridable defaults
ac_in_buffer_size
=
4096
ac_out_buffer_size
=
4096
def
__init__
(
self
,
conn
=
None
):
self
.
ac_in_buffer
=
''
self
.
ac_out_buffer
=
''
self
.
producer_fifo
=
fifo
()
asyncore
.
dispatcher
.
__init__
(
self
,
conn
)
def
set_terminator
(
self
,
term
):
"Set the input delimiter. Can be a fixed string of any length, an integer, or None"
self
.
terminator
=
term
def
get_terminator
(
self
):
return
self
.
terminator
# grab some more data from the socket,
# throw it to the collector method,
# check for the terminator,
# if found, transition to the next state.
def
handle_read
(
self
):
try
:
data
=
self
.
recv
(
self
.
ac_in_buffer_size
)
except
socket
.
error
,
why
:
self
.
handle_error
()
return
self
.
ac_in_buffer
=
self
.
ac_in_buffer
+
data
# Continue to search for self.terminator in self.ac_in_buffer,
# while calling self.collect_incoming_data. The while loop
# is necessary because we might read several data+terminator
# combos with a single recv(1024).
while
self
.
ac_in_buffer
:
lb
=
len
(
self
.
ac_in_buffer
)
terminator
=
self
.
get_terminator
()
if
terminator
is
None
:
# no terminator, collect it all
self
.
collect_incoming_data
(
self
.
ac_in_buffer
)
self
.
ac_in_buffer
=
''
elif
type
(
terminator
)
==
type
(
0
):
# numeric terminator
n
=
terminator
if
lb
<
n
:
self
.
collect_incoming_data
(
self
.
ac_in_buffer
)
self
.
ac_in_buffer
=
''
self
.
terminator
=
self
.
terminator
-
lb
else
:
self
.
collect_incoming_data
(
self
.
ac_in_buffer
[:
n
])
self
.
ac_in_buffer
=
self
.
ac_in_buffer
[
n
:]
self
.
terminator
=
0
self
.
found_terminator
()
else
:
# 3 cases:
# 1) end of buffer matches terminator exactly:
# collect data, transition
# 2) end of buffer matches some prefix:
# collect data to the prefix
# 3) end of buffer does not match any prefix:
# collect data
terminator_len
=
len
(
terminator
)
index
=
string
.
find
(
self
.
ac_in_buffer
,
terminator
)
if
index
!=
-
1
:
# we found the terminator
if
index
>
0
:
# don't bother reporting the empty string (source of subtle bugs)
self
.
collect_incoming_data
(
self
.
ac_in_buffer
[:
index
])
self
.
ac_in_buffer
=
self
.
ac_in_buffer
[
index
+
terminator_len
:]
# This does the Right Thing if the terminator is changed here.
self
.
found_terminator
()
else
:
# check for a prefix of the terminator
index
=
find_prefix_at_end
(
self
.
ac_in_buffer
,
terminator
)
if
index
:
if
index
!=
lb
:
# we found a prefix, collect up to the prefix
self
.
collect_incoming_data
(
self
.
ac_in_buffer
[:
-
index
])
self
.
ac_in_buffer
=
self
.
ac_in_buffer
[
-
index
:]
break
else
:
# no prefix, collect it all
self
.
collect_incoming_data
(
self
.
ac_in_buffer
)
self
.
ac_in_buffer
=
''
def
handle_write
(
self
):
self
.
initiate_send
()
def
handle_close
(
self
):
self
.
close
()
def
push
(
self
,
data
):
self
.
producer_fifo
.
push
(
simple_producer
(
data
))
self
.
initiate_send
()
def
push_with_producer
(
self
,
producer
):
self
.
producer_fifo
.
push
(
producer
)
self
.
initiate_send
()
def
readable
(
self
):
"predicate for inclusion in the readable for select()"
return
(
len
(
self
.
ac_in_buffer
)
<=
self
.
ac_in_buffer_size
)
def
writable
(
self
):
"predicate for inclusion in the writable for select()"
# return len(self.ac_out_buffer) or len(self.producer_fifo) or (not self.connected)
# this is about twice as fast, though not as clear.
return
not
(
(
self
.
ac_out_buffer
==
''
)
and
self
.
producer_fifo
.
is_empty
()
and
self
.
connected
)
def
close_when_done
(
self
):
"automatically close this channel once the outgoing queue is empty"
self
.
producer_fifo
.
push
(
None
)
# refill the outgoing buffer by calling the more() method
# of the first producer in the queue
def
refill_buffer
(
self
):
_string_type
=
type
(
''
)
while
1
:
if
len
(
self
.
producer_fifo
):
p
=
self
.
producer_fifo
.
first
()
# a 'None' in the producer fifo is a sentinel,
# telling us to close the channel.
if
p
is
None
:
if
not
self
.
ac_out_buffer
:
self
.
producer_fifo
.
pop
()
self
.
close
()
return
elif
type
(
p
)
is
_string_type
:
self
.
producer_fifo
.
pop
()
self
.
ac_out_buffer
=
self
.
ac_out_buffer
+
p
return
data
=
p
.
more
()
if
data
:
self
.
ac_out_buffer
=
self
.
ac_out_buffer
+
data
return
else
:
self
.
producer_fifo
.
pop
()
else
:
return
def
initiate_send
(
self
):
obs
=
self
.
ac_out_buffer_size
# try to refill the buffer
if
(
len
(
self
.
ac_out_buffer
)
<
obs
):
self
.
refill_buffer
()
if
self
.
ac_out_buffer
and
self
.
connected
:
# try to send the buffer
try
:
num_sent
=
self
.
send
(
self
.
ac_out_buffer
[:
obs
])
if
num_sent
:
self
.
ac_out_buffer
=
self
.
ac_out_buffer
[
num_sent
:]
except
socket
.
error
,
why
:
self
.
handle_error
()
return
def
discard_buffers
(
self
):
# Emergencies only!
self
.
ac_in_buffer
=
''
self
.
ac_out_buffer
=
''
while
self
.
producer_fifo
:
self
.
producer_fifo
.
pop
()
class
simple_producer
:
def
__init__
(
self
,
data
,
buffer_size
=
512
):
self
.
data
=
data
self
.
buffer_size
=
buffer_size
def
more
(
self
):
if
len
(
self
.
data
)
>
self
.
buffer_size
:
result
=
self
.
data
[:
self
.
buffer_size
]
self
.
data
=
self
.
data
[
self
.
buffer_size
:]
return
result
else
:
result
=
self
.
data
self
.
data
=
''
return
result
class
fifo
:
def
__init__
(
self
,
list
=
None
):
if
not
list
:
self
.
list
=
[]
else
:
self
.
list
=
list
def
__len__
(
self
):
return
len
(
self
.
list
)
def
is_empty
(
self
):
return
self
.
list
==
[]
def
first
(
self
):
return
self
.
list
[
0
]
def
push
(
self
,
data
):
self
.
list
.
append
(
data
)
def
pop
(
self
):
if
self
.
list
:
result
=
self
.
list
[
0
]
del
self
.
list
[
0
]
return
(
1
,
result
)
else
:
return
(
0
,
None
)
# Given 'haystack', see if any prefix of 'needle' is at its end. This
# assumes an exact match has already been checked. Return the number of
# characters matched.
# for example:
# f_p_a_e ("qwerty\r", "\r\n") => 1
# f_p_a_e ("qwerty\r\n", "\r\n") => 2
# f_p_a_e ("qwertydkjf", "\r\n") => 0
# this could maybe be made faster with a computed regex?
# [answer: no; circa Python-2.0, Jan 2001]
# python: 18307/s
# re: 12820/s
# regex: 14035/s
def
find_prefix_at_end
(
haystack
,
needle
):
nl
=
len
(
needle
)
result
=
0
for
i
in
range
(
1
,
nl
):
if
haystack
[
-
(
nl
-
i
):]
==
needle
[:(
nl
-
i
)]:
result
=
nl
-
i
break
return
result
ZServer/medusa/counter.py
deleted
100644 → 0
View file @
6786b136
# -*- Mode: Python; tab-width: 4 -*-
# It is tempting to add an __int__ method to this class, but it's not
# a good idea. This class tries to gracefully handle integer
# overflow, and to hide this detail from both the programmer and the
# user. Note that the __str__ method can be relied on for printing out
# the value of a counter:
#
# >>> print 'Total Client: %s' % self.total_clients
#
# If you need to do arithmetic with the value, then use the 'as_long'
# method, the use of long arithmetic is a reminder that the counter
# will overflow.
class
counter
:
"general-purpose counter"
def
__init__
(
self
,
initial_value
=
0
):
self
.
value
=
initial_value
def
increment
(
self
,
delta
=
1
):
result
=
self
.
value
try
:
self
.
value
=
self
.
value
+
delta
except
OverflowError
:
self
.
value
=
long
(
self
.
value
)
+
delta
return
result
def
decrement
(
self
,
delta
=
1
):
result
=
self
.
value
try
:
self
.
value
=
self
.
value
-
delta
except
OverflowError
:
self
.
value
=
long
(
self
.
value
)
-
delta
return
result
def
as_long
(
self
):
return
long
(
self
.
value
)
def
__nonzero__
(
self
):
return
self
.
value
!=
0
def
__repr__
(
self
):
return
'<counter value=%s at %x>'
%
(
self
.
value
,
id
(
self
))
def
__str__
(
self
):
return
str
(
long
(
self
.
value
))[:
-
1
]
ZServer/medusa/default_handler.py
deleted
100644 → 0
View file @
6786b136
# -*- Mode: Python; tab-width: 4 -*-
#
# Author: Sam Rushing <rushing@nightmare.com>
# Copyright 1997 by Sam Rushing
# All Rights Reserved.
#
RCS_ID
=
'$Id: default_handler.py,v 1.4 2000/06/02 14:22:48 brian Exp $'
# standard python modules
import
os
import
regex
import
posixpath
import
stat
import
string
import
time
# medusa modules
import
http_date
import
http_server
import
mime_type_table
import
status_handler
import
producers
# from <lib/urllib.py>
_quoteprog
=
regex
.
compile
(
'%[0-9a-fA-F][0-9a-fA-F]'
)
def
unquote
(
s
):
i
=
0
n
=
len
(
s
)
res
=
[]
while
0
<=
i
<
n
:
j
=
_quoteprog
.
search
(
s
,
i
)
if
j
<
0
:
res
.
append
(
s
[
i
:])
break
res
.
append
(
s
[
i
:
j
]
+
chr
(
string
.
atoi
(
s
[
j
+
1
:
j
+
3
],
16
)))
i
=
j
+
3
return
string
.
join
(
res
,
''
)
# split a uri
# <path>;<params>?<query>#<fragment>
path_regex
=
regex
.
compile
(
# path params query fragment
'
\
\
([^;?#]*
\
\
)
\
\
(;[^?#]*
\
\
)?
\
\
(
\
\
?[^#]*
\
)?
\
(#.*
\
)?
'
)
def split_path (path):
if path_regex.match (path) != len(path):
raise ValueError, "bad path"
else:
return map (lambda i,r=path_regex: r.group(i), range(1,5))
# This is the '
default
' handler. it implements the base set of
# features expected of a simple file-delivering HTTP server. file
# services are provided through a '
filesystem
' object, the very same
# one used by the FTP server.
#
# You can replace or modify this handler if you want a non-standard
# HTTP server. You can also derive your own handler classes from
# it.
#
# support for handling POST requests is available in the derived
# class <default_with_post_handler>, defined below.
#
from counter import counter
class default_handler:
valid_commands = ['
get
', '
head
']
IDENT = '
Default
HTTP
Request
Handler
'
# Pathnames that are tried when a URI resolves to a directory name
directory_defaults = [
'
index
.
html
',
'
default
.
html
'
]
default_file_producer = producers.file_producer
def __init__ (self, filesystem):
self.filesystem = filesystem
# count total hits
self.hit_counter = counter()
# count file deliveries
self.file_counter = counter()
# count cache hits
self.cache_counter = counter()
hit_counter = 0
def __repr__ (self):
return '
<%
s
(
%
s
hits
)
at
%
x
>
' % (
self.IDENT,
self.hit_counter,
id (self)
)
# always match, since this is a default
def match (self, request):
return 1
# handle a file request, with caching.
def handle_request (self, request):
if request.command not in self.valid_commands:
request.error (400) # bad request
return
self.hit_counter.increment()
[path, params, query, fragment] = split_path (request.uri)
# unquote path if necessary (thanks to Skip Montaro for pointing
# out that we must unquote in piecemeal fashion).
if '
%
' in path:
path = unquote (path)
# strip off all leading slashes
while path and path[0] == '
/
':
path = path[1:]
if self.filesystem.isdir (path):
if path and path[-1] != '
/
':
request['
Location
'] = '
http
:
//%
s
/%
s
/
' % (
request.channel.server.server_name,
path
)
request.error (301)
return
# we could also generate a directory listing here,
# may want to move this into another method for that
# purpose
found = 0
if path and path[-1] != '
/
':
path = path + '
/
'
for default in self.directory_defaults:
p = path + default
if self.filesystem.isfile (p):
path = p
found = 1
break
if not found:
request.error (404) # Not Found
return
elif not self.filesystem.isfile (path):
request.error (404) # Not Found
return
file_length = self.filesystem.stat (path)[stat.ST_SIZE]
ims = get_header (IF_MODIFIED_SINCE, request.header)
length_match = 1
if ims:
length = IF_MODIFIED_SINCE.group(4)
if length:
try:
length = string.atoi (length)
if length != file_length:
length_match = 0
except:
pass
ims_date = 0
if ims:
ims_date = http_date.parse_http_date (ims)
try:
mtime = self.filesystem.stat (path)[stat.ST_MTIME]
except:
request.error (404)
return
if length_match and ims_date:
if mtime <= ims_date:
request.reply_code = 304
request.done()
self.cache_counter.increment()
return
try:
file = self.filesystem.open (path, 'rb')
except IOError:
request.error (404)
return
request['
Last
-
Modified
'] = http_date.build_http_date (mtime)
request['
Content
-
Length
'] = file_length
self.set_content_type (path, request)
if request.command == '
get
':
request.push (self.default_file_producer (file))
self.file_counter.increment()
request.done()
def set_content_type (self, path, request):
ext = string.lower (get_extension (path))
if mime_type_table.content_type_map.has_key (ext):
request['
Content
-
Type
'] = mime_type_table.content_type_map[ext]
else:
# TODO: test a chunk off the front of the file for 8-bit
# characters, and use application/octet-stream instead.
request['
Content
-
Type
'] = '
text
/
plain
'
def status (self):
return producers.simple_producer (
'
<
li
>%
s
' % status_handler.html_repr (self)
+ '
<
ul
>
'
+ '
<
li
><
b
>
Total
Hits
:
</
b
>
%
s
' % self.hit_counter
+ '
<
li
><
b
>
Files
Delivered
:
</
b
>
%
s
' % self.file_counter
+ '
<
li
><
b
>
Cache
Hits
:
</
b
>
%
s
' % self.cache_counter
+ '
</
ul
>
'
)
ACCEPT = regex.compile ('
Accept
:
\
(.
*
\
)
', regex.casefold)
# HTTP/1.0 doesn'
t
say
anything
about
the
"; length=nnnn"
addition
# to this header. I suppose it's purpose is to avoid the overhead
# of parsing dates...
IF_MODIFIED_SINCE
=
regex
.
compile
(
'If-Modified-Since:
\
([^;]+
\
)
\
(
\
(; length=
\
([
0
-9]+
\
)$
\
)
\
|$
\
)'
,
regex
.
casefold
)
USER_AGENT
=
regex
.
compile
(
'User-Agent:
\
(.*
\
)'
,
regex
.
casefold
)
boundary_chars
=
"A-Za-z0-9'()+_,./:=?-"
CONTENT_TYPE
=
regex
.
compile
(
'Content-Type:
\
([^;]+
\
)
\
(
\
(; boundary=
\
([%s]+
\
)$
\
)
\
|$
\
)
'
% boundary_chars,
regex.casefold
)
get_header = http_server.get_header
def get_extension (path):
dirsep = string.rfind (path, '
/
')
dotsep = string.rfind (path, '
.
')
if dotsep > dirsep:
return path[dotsep+1:]
else:
return ''
ZServer/medusa/filesys.py
deleted
100644 → 0
View file @
6786b136
# -*- Mode: Python; tab-width: 4 -*-
# $Id: filesys.py,v 1.5 2000/06/02 14:22:48 brian Exp $
# Author: Sam Rushing <rushing@nightmare.com>
#
# Generic filesystem interface.
#
# We want to provide a complete wrapper around any and all
# filesystem operations.
# this class is really just for documentation,
# identifying the API for a filesystem object.
# opening files for reading, and listing directories, should
# return a producer.
class
abstract_filesystem
:
def
__init__
(
self
):
pass
def
current_directory
(
self
):
"Return a string representing the current directory."
pass
def
listdir
(
self
,
path
,
long
=
0
):
"""Return a listing of the directory at 'path' The empty string
indicates the current directory. If 'long' is set, instead
return a list of (name, stat_info) tuples
"""
pass
def
open
(
self
,
path
,
mode
):
"Return an open file object"
pass
def
stat
(
self
,
path
):
"Return the equivalent of os.stat() on the given path."
pass
def
isdir
(
self
,
path
):
"Does the path represent a directory?"
pass
def
isfile
(
self
,
path
):
"Does the path represent a plain file?"
pass
def
cwd
(
self
,
path
):
"Change the working directory."
pass
def
cdup
(
self
):
"Change to the parent of the current directory."
pass
def
longify
(
self
,
path
):
"""Return a 'long' representation of the filename
[for the output of the LIST command]"""
pass
# standard wrapper around a unix-like filesystem, with a 'false root'
# capability.
# security considerations: can symbolic links be used to 'escape' the
# root? should we allow it? if not, then we could scan the
# filesystem on startup, but that would not help if they were added
# later. We will probably need to check for symlinks in the cwd method.
# what to do if wd is an invalid directory?
import
os
import
stat
import
string
def
safe_stat
(
path
):
try
:
return
(
path
,
os
.
stat
(
path
))
except
:
return
None
import
regex
import
regsub
import
glob
class
os_filesystem
:
path_module
=
os
.
path
# set this to zero if you want to disable pathname globbing.
# [we currently don't glob, anyway]
do_globbing
=
1
def
__init__
(
self
,
root
,
wd
=
'/'
):
self
.
root
=
root
self
.
wd
=
wd
def
current_directory
(
self
):
return
self
.
wd
def
isfile
(
self
,
path
):
p
=
self
.
normalize
(
self
.
path_module
.
join
(
self
.
wd
,
path
))
return
self
.
path_module
.
isfile
(
self
.
translate
(
p
))
def
isdir
(
self
,
path
):
p
=
self
.
normalize
(
self
.
path_module
.
join
(
self
.
wd
,
path
))
return
self
.
path_module
.
isdir
(
self
.
translate
(
p
))
def
cwd
(
self
,
path
):
p
=
self
.
normalize
(
self
.
path_module
.
join
(
self
.
wd
,
path
))
translated_path
=
self
.
translate
(
p
)
if
not
self
.
path_module
.
isdir
(
translated_path
):
return
0
else
:
old_dir
=
os
.
getcwd
()
# temporarily change to that directory, in order
# to see if we have permission to do so.
try
:
can
=
0
try
:
os
.
chdir
(
translated_path
)
can
=
1
self
.
wd
=
p
except
:
pass
finally
:
if
can
:
os
.
chdir
(
old_dir
)
return
can
def
cdup
(
self
):
return
self
.
cwd
(
'..'
)
def
listdir
(
self
,
path
,
long
=
0
):
p
=
self
.
translate
(
path
)
# I think we should glob, but limit it to the current
# directory only.
ld
=
os
.
listdir
(
p
)
if
not
long
:
return
list_producer
(
ld
,
0
,
None
)
else
:
old_dir
=
os
.
getcwd
()
try
:
os
.
chdir
(
p
)
# if os.stat fails we ignore that file.
result
=
filter
(
None
,
map
(
safe_stat
,
ld
))
finally
:
os
.
chdir
(
old_dir
)
return
list_producer
(
result
,
1
,
self
.
longify
)
# TODO: implement a cache w/timeout for stat()
def
stat
(
self
,
path
):
p
=
self
.
translate
(
path
)
return
os
.
stat
(
p
)
def
open
(
self
,
path
,
mode
):
p
=
self
.
translate
(
path
)
return
open
(
p
,
mode
)
def
unlink
(
self
,
path
):
p
=
self
.
translate
(
path
)
return
os
.
unlink
(
p
)
def
mkdir
(
self
,
path
):
p
=
self
.
translate
(
path
)
return
os
.
mkdir
(
p
)
def
rmdir
(
self
,
path
):
p
=
self
.
translate
(
path
)
return
os
.
rmdir
(
p
)
# utility methods
def
normalize
(
self
,
path
):
# watch for the ever-sneaky '/+' path element
path
=
regsub
.
gsub
(
'/+'
,
'/'
,
path
)
p
=
self
.
path_module
.
normpath
(
path
)
# remove 'dangling' cdup's.
if
len
(
p
)
>
2
and
p
[:
3
]
==
'/..'
:
p
=
'/'
return
p
def
translate
(
self
,
path
):
# we need to join together three separate
# path components, and do it safely.
# <real_root>/<current_directory>/<path>
# use the operating system's path separator.
path
=
string
.
join
(
string
.
split
(
path
,
'/'
),
os
.
sep
)
p
=
self
.
normalize
(
self
.
path_module
.
join
(
self
.
wd
,
path
))
p
=
self
.
normalize
(
self
.
path_module
.
join
(
self
.
root
,
p
[
1
:]))
return
p
def
longify
(
self
,
(
path
,
stat_info
)):
return
unix_longify
(
path
,
stat_info
)
def
__repr__
(
self
):
return
'<unix-style fs root:%s wd:%s>'
%
(
self
.
root
,
self
.
wd
)
if
os
.
name
==
'posix'
:
class
unix_filesystem
(
os_filesystem
):
pass
class
schizophrenic_unix_filesystem
(
os_filesystem
):
PROCESS_UID
=
os
.
getuid
()
PROCESS_EUID
=
os
.
geteuid
()
PROCESS_GID
=
os
.
getgid
()
PROCESS_EGID
=
os
.
getegid
()
def
__init__
(
self
,
root
,
wd
=
'/'
,
persona
=
(
None
,
None
)):
os_filesystem
.
__init__
(
self
,
root
,
wd
)
self
.
persona
=
persona
def
become_persona
(
self
):
if
self
.
persona
is
not
(
None
,
None
):
uid
,
gid
=
self
.
persona
# the order of these is important!
os
.
setegid
(
gid
)
os
.
seteuid
(
uid
)
def
become_nobody
(
self
):
if
self
.
persona
is
not
(
None
,
None
):
os
.
seteuid
(
self
.
PROCESS_UID
)
os
.
setegid
(
self
.
PROCESS_GID
)
# cwd, cdup, open, listdir
def
cwd
(
self
,
path
):
try
:
self
.
become_persona
()
return
os_filesystem
.
cwd
(
self
,
path
)
finally
:
self
.
become_nobody
()
def
cdup
(
self
,
path
):
try
:
self
.
become_persona
()
return
os_filesystem
.
cdup
(
self
)
finally
:
self
.
become_nobody
()
def
open
(
self
,
filename
,
mode
):
try
:
self
.
become_persona
()
return
os_filesystem
.
open
(
self
,
filename
,
mode
)
finally
:
self
.
become_nobody
()
def
listdir
(
self
,
path
,
long
=
0
):
try
:
self
.
become_persona
()
return
os_filesystem
.
listdir
(
self
,
path
,
long
)
finally
:
self
.
become_nobody
()
# This hasn't been very reliable across different platforms.
# maybe think about a separate 'directory server'.
#
# import posixpath
# import fcntl
# import FCNTL
# import select
# import asyncore
#
# # pipes /bin/ls for directory listings.
# class unix_filesystem (os_filesystem):
# pass
# path_module = posixpath
#
# def listdir (self, path, long=0):
# p = self.translate (path)
# if not long:
# return list_producer (os.listdir (p), 0, None)
# else:
# command = '/bin/ls -l %s' % p
# print 'opening pipe to "%s"' % command
# fd = os.popen (command, 'rt')
# return pipe_channel (fd)
#
# # this is both a dispatcher, _and_ a producer
# class pipe_channel (asyncore.file_dispatcher):
# buffer_size = 4096
#
# def __init__ (self, fd):
# asyncore.file_dispatcher.__init__ (self, fd)
# self.fd = fd
# self.done = 0
# self.data = ''
#
# def handle_read (self):
# if len (self.data) < self.buffer_size:
# self.data = self.data + self.fd.read (self.buffer_size)
# #print '%s.handle_read() => len(self.data) == %d' % (self, len(self.data))
#
# def handle_expt (self):
# #print '%s.handle_expt()' % self
# self.done = 1
#
# def ready (self):
# #print '%s.ready() => %d' % (self, len(self.data))
# return ((len (self.data) > 0) or self.done)
#
# def more (self):
# if self.data:
# r = self.data
# self.data = ''
# elif self.done:
# self.close()
# self.downstream.finished()
# r = ''
# else:
# r = None
# #print '%s.more() => %s' % (self, (r and len(r)))
# return r
# For the 'real' root, we could obtain a list of drives, and then
# use that. Doesn't win32 provide such a 'real' filesystem?
# [yes, I think something like this "\\.\c\windows"]
class
msdos_filesystem
(
os_filesystem
):
def
longify
(
self
,
(
path
,
stat_info
)):
return
msdos_longify
(
path
,
stat_info
)
# A merged filesystem will let you plug other filesystems together.
# We really need the equivalent of a 'mount' capability - this seems
# to be the most general idea. So you'd use a 'mount' method to place
# another filesystem somewhere in the hierarchy.
# Note: this is most likely how I will handle ~user directories
# with the http server.
class
merged_filesystem
:
def
__init__
(
self
,
*
fsys
):
pass
# this matches the output of NT's ftp server (when in
# MSDOS mode) exactly.
def
msdos_longify
(
file
,
stat_info
):
if
stat
.
S_ISDIR
(
stat_info
[
stat
.
ST_MODE
]):
dir
=
'<DIR>'
else
:
dir
=
' '
date
=
msdos_date
(
stat_info
[
stat
.
ST_MTIME
])
return
'%s %s %8d %s'
%
(
date
,
dir
,
stat_info
[
stat
.
ST_SIZE
],
file
)
def
msdos_date
(
t
):
try
:
info
=
time
.
gmtime
(
t
)
except
:
info
=
time
.
gmtime
(
0
)
# year, month, day, hour, minute, second, ...
if
info
[
3
]
>
11
:
merid
=
'PM'
info
[
3
]
=
info
[
3
]
-
12
else
:
merid
=
'AM'
return
'%02d-%02d-%02d %02d:%02d%s'
%
(
info
[
1
],
info
[
2
],
info
[
0
]
%
100
,
info
[
3
],
info
[
4
],
merid
)
months
=
[
'Jan'
,
'Feb'
,
'Mar'
,
'Apr'
,
'May'
,
'Jun'
,
'Jul'
,
'Aug'
,
'Sep'
,
'Oct'
,
'Nov'
,
'Dec'
]
mode_table
=
{
'0'
:
'---'
,
'1'
:
'--x'
,
'2'
:
'-w-'
,
'3'
:
'-wx'
,
'4'
:
'r--'
,
'5'
:
'r-x'
,
'6'
:
'rw-'
,
'7'
:
'rwx'
}
import
time
def
unix_longify
(
file
,
stat_info
):
# for now, only pay attention to the lower bits
mode
=
(
'%o'
%
stat_info
[
stat
.
ST_MODE
])[
-
3
:]
mode
=
string
.
join
(
map
(
lambda
x
:
mode_table
[
x
],
mode
),
''
)
if
stat
.
S_ISDIR
(
stat_info
[
stat
.
ST_MODE
]):
dirchar
=
'd'
else
:
dirchar
=
'-'
date
=
ls_date
(
long
(
time
.
time
()),
stat_info
[
stat
.
ST_MTIME
])
return
'%s%s %3d %-8s %-8s %8d %s %s'
%
(
dirchar
,
mode
,
stat_info
[
stat
.
ST_NLINK
],
stat_info
[
stat
.
ST_UID
],
stat_info
[
stat
.
ST_GID
],
stat_info
[
stat
.
ST_SIZE
],
date
,
file
)
# Emulate the unix 'ls' command's date field.
# it has two formats - if the date is more than 180
# days in the past, then it's like this:
# Oct 19 1995
# otherwise, it looks like this:
# Oct 19 17:33
def
ls_date
(
now
,
t
):
try
:
info
=
time
.
gmtime
(
t
)
except
:
info
=
time
.
gmtime
(
0
)
# 15,600,000 == 86,400 * 180
if
(
now
-
t
)
>
15600000
:
return
'%s %2d %d'
%
(
months
[
info
[
1
]
-
1
],
info
[
2
],
info
[
0
]
)
else
:
return
'%s %2d %02d:%02d'
%
(
months
[
info
[
1
]
-
1
],
info
[
2
],
info
[
3
],
info
[
4
]
)
# ===========================================================================
# Producers
# ===========================================================================
class
list_producer
:
def
__init__
(
self
,
file_list
,
long
,
longify
):
self
.
file_list
=
file_list
self
.
long
=
long
self
.
longify
=
longify
self
.
done
=
0
def
ready
(
self
):
if
len
(
self
.
file_list
):
return
1
else
:
if
not
self
.
done
:
self
.
done
=
1
return
0
return
(
len
(
self
.
file_list
)
>
0
)
# this should do a pushd/popd
def
more
(
self
):
if
not
self
.
file_list
:
return
''
else
:
# do a few at a time
bunch
=
self
.
file_list
[:
50
]
if
self
.
long
:
bunch
=
map
(
self
.
longify
,
bunch
)
self
.
file_list
=
self
.
file_list
[
50
:]
return
string
.
joinfields
(
bunch
,
'
\
r
\
n
'
)
+
'
\
r
\
n
'
ZServer/medusa/ftp_server.py
deleted
100755 → 0
View file @
6786b136
# -*- Mode: Python; tab-width: 4 -*-
# Author: Sam Rushing <rushing@nightmare.com>
# Copyright 1996-2000 by Sam Rushing
# All Rights Reserved.
#
RCS_ID
=
'$Id: ftp_server.py,v 1.13 2000/07/05 14:22:13 brian Exp $'
# An extensible, configurable, asynchronous FTP server.
#
# All socket I/O is non-blocking, however file I/O is currently
# blocking. Eventually file I/O may be made non-blocking, too, if it
# seems necessary. Currently the only CPU-intensive operation is
# getting and formatting a directory listing. [this could be moved
# into another process/directory server, or another thread?]
#
# Only a subset of RFC 959 is implemented, but much of that RFC is
# vestigial anyway. I've attempted to include the most commonly-used
# commands, using the feature set of wu-ftpd as a guide.
import
asyncore
import
asynchat
import
os
import
regsub
import
socket
import
stat
import
string
import
sys
import
time
# TODO: implement a directory listing cache. On very-high-load
# servers this could save a lot of disk abuse, and possibly the
# work of computing emulated unix ls output.
# Potential security problem with the FTP protocol? I don't think
# there's any verification of the origin of a data connection. Not
# really a problem for the server (since it doesn't send the port
# command, except when in PASV mode) But I think a data connection
# could be spoofed by a program with access to a sniffer - it could
# watch for a PORT command to go over a command channel, and then
# connect to that port before the server does.
# Unix user id's:
# In order to support assuming the id of a particular user,
# it seems there are two options:
# 1) fork, and seteuid in the child
# 2) carefully control the effective uid around filesystem accessing
# methods, using try/finally. [this seems to work]
VERSION
=
string
.
split
(
RCS_ID
)[
2
]
from
counter
import
counter
import
producers
import
status_handler
import
logger
import
string
class
ftp_channel
(
asynchat
.
async_chat
):
# defaults for a reliable __repr__
addr
=
(
'unknown'
,
'0'
)
# unset this in a derived class in order
# to enable the commands in 'self.write_commands'
read_only
=
1
write_commands
=
[
'appe'
,
'dele'
,
'mkd'
,
'rmd'
,
'rnfr'
,
'rnto'
,
'stor'
,
'stou'
]
restart_position
=
0
# comply with (possibly troublesome) RFC959 requirements
# This is necessary to correctly run an active data connection
# through a firewall that triggers on the source port (expected
# to be 'L-1', or 20 in the normal case).
bind_local_minus_one
=
0
def
__init__
(
self
,
server
,
conn
,
addr
):
self
.
server
=
server
self
.
current_mode
=
'a'
self
.
addr
=
addr
asynchat
.
async_chat
.
__init__
(
self
,
conn
)
self
.
set_terminator
(
'
\
r
\
n
'
)
# client data port. Defaults to 'the same as the control connection'.
self
.
client_addr
=
(
addr
[
0
],
21
)
self
.
client_dc
=
None
self
.
in_buffer
=
''
self
.
closing
=
0
self
.
passive_acceptor
=
None
self
.
passive_connection
=
None
self
.
filesystem
=
None
self
.
authorized
=
0
# send the greeting
self
.
respond
(
'220 %s FTP server (Medusa Async V%s [experimental]) ready.'
%
(
self
.
server
.
hostname
,
VERSION
)
)
# def __del__ (self):
# print 'ftp_channel.__del__()'
# --------------------------------------------------
# async-library methods
# --------------------------------------------------
def
handle_expt
(
self
):
# this is handled below. not sure what I could
# do here to make that code less kludgish.
pass
def
collect_incoming_data
(
self
,
data
):
self
.
in_buffer
=
self
.
in_buffer
+
data
if
len
(
self
.
in_buffer
)
>
4096
:
# silently truncate really long lines
# (possible denial-of-service attack)
self
.
in_buffer
=
''
def
found_terminator
(
self
):
line
=
self
.
in_buffer
if
not
len
(
line
):
return
sp
=
string
.
find
(
line
,
' '
)
if
sp
!=
-
1
:
line
=
[
line
[:
sp
],
line
[
sp
+
1
:]]
else
:
line
=
[
line
]
command
=
string
.
lower
(
line
[
0
])
# watch especially for 'urgent' abort commands.
if
string
.
find
(
command
,
'abor'
)
!=
-
1
:
# strip off telnet sync chars and the like...
while
command
and
command
[
0
]
not
in
string
.
letters
:
command
=
command
[
1
:]
fun_name
=
'cmd_%s'
%
command
if
command
!=
'pass'
:
self
.
log
(
'<== %s'
%
repr
(
self
.
in_buffer
)[
1
:
-
1
])
else
:
self
.
log
(
'<== %s'
%
line
[
0
]
+
' <password>'
)
self
.
in_buffer
=
''
if
not
hasattr
(
self
,
fun_name
):
self
.
command_not_understood
(
line
[
0
])
return
fun
=
getattr
(
self
,
fun_name
)
if
(
not
self
.
authorized
)
and
(
command
not
in
(
'user'
,
'pass'
,
'help'
,
'quit'
)):
self
.
respond
(
'530 Please log in with USER and PASS'
)
elif
(
not
self
.
check_command_authorization
(
command
)):
self
.
command_not_authorized
(
command
)
else
:
try
:
result
=
apply
(
fun
,
(
line
,))
except
:
self
.
server
.
total_exceptions
.
increment
()
(
file
,
fun
,
line
),
t
,
v
,
tbinfo
=
asyncore
.
compact_traceback
()
if
self
.
client_dc
:
try
:
self
.
client_dc
.
close
()
except
:
pass
self
.
respond
(
'451 Server Error: %s, %s: file: %s line: %s'
%
(
t
,
v
,
file
,
line
,
)
)
closed
=
0
def
close
(
self
):
if
not
self
.
closed
:
self
.
closed
=
1
if
self
.
passive_acceptor
:
self
.
passive_acceptor
.
close
()
if
self
.
client_dc
:
self
.
client_dc
.
close
()
self
.
server
.
closed_sessions
.
increment
()
asynchat
.
async_chat
.
close
(
self
)
# --------------------------------------------------
# filesystem interface functions.
# override these to provide access control or perform
# other functions.
# --------------------------------------------------
def
cwd
(
self
,
line
):
return
self
.
filesystem
.
cwd
(
line
[
1
])
def
cdup
(
self
,
line
):
return
self
.
filesystem
.
cdup
()
def
open
(
self
,
path
,
mode
):
return
self
.
filesystem
.
open
(
path
,
mode
)
# returns a producer
def
listdir
(
self
,
path
,
long
=
0
):
return
self
.
filesystem
.
listdir
(
path
,
long
)
def
get_dir_list
(
self
,
line
,
long
=
0
):
# we need to scan the command line for arguments to '/bin/ls'...
args
=
line
[
1
:]
path_args
=
[]
for
arg
in
args
:
if
arg
[
0
]
!=
'-'
:
path_args
.
append
(
arg
)
else
:
# ignore arguments
pass
if
len
(
path_args
)
<
1
:
dir
=
'.'
else
:
dir
=
path_args
[
0
]
return
self
.
listdir
(
dir
,
long
)
# --------------------------------------------------
# authorization methods
# --------------------------------------------------
def
check_command_authorization
(
self
,
command
):
if
command
in
self
.
write_commands
and
self
.
read_only
:
return
0
else
:
return
1
# --------------------------------------------------
# utility methods
# --------------------------------------------------
def
log
(
self
,
message
):
self
.
server
.
logger
.
log
(
self
.
addr
[
0
],
'%d %s'
%
(
self
.
addr
[
1
],
message
)
)
def
respond
(
self
,
resp
):
self
.
log
(
'==> %s'
%
resp
)
self
.
push
(
resp
+
'
\
r
\
n
'
)
def
command_not_understood
(
self
,
command
):
self
.
respond
(
"500 '%s': command not understood."
%
command
)
def
command_not_authorized
(
self
,
command
):
self
.
respond
(
"530 You are not authorized to perform the '%s' command"
%
(
command
)
)
def
make_xmit_channel
(
self
):
# In PASV mode, the connection may or may _not_ have been made
# yet. [although in most cases it is... FTP Explorer being
# the only exception I've yet seen]. This gets somewhat confusing
# because things may happen in any order...
pa
=
self
.
passive_acceptor
if
pa
:
if
pa
.
ready
:
# a connection has already been made.
conn
,
addr
=
self
.
passive_acceptor
.
ready
cdc
=
xmit_channel
(
self
,
addr
)
cdc
.
set_socket
(
conn
)
cdc
.
connected
=
1
self
.
passive_acceptor
.
close
()
self
.
passive_acceptor
=
None
else
:
# we're still waiting for a connect to the PASV port.
cdc
=
xmit_channel
(
self
)
else
:
# not in PASV mode.
ip
,
port
=
self
.
client_addr
cdc
=
xmit_channel
(
self
,
self
.
client_addr
)
cdc
.
create_socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
)
if
self
.
bind_local_minus_one
:
cdc
.
bind
((
''
,
self
.
server
.
port
-
1
))
try
:
cdc
.
connect
((
ip
,
port
))
except
socket
.
error
,
why
:
self
.
respond
(
"425 Can't build data connection"
)
self
.
client_dc
=
cdc
# pretty much the same as xmit, but only right on the verge of
# being worth a merge.
def
make_recv_channel
(
self
,
fd
):
pa
=
self
.
passive_acceptor
if
pa
:
if
pa
.
ready
:
# a connection has already been made.
conn
,
addr
=
pa
.
ready
cdc
=
recv_channel
(
self
,
addr
,
fd
)
cdc
.
set_socket
(
conn
)
cdc
.
connected
=
1
self
.
passive_acceptor
.
close
()
self
.
passive_acceptor
=
None
else
:
# we're still waiting for a connect to the PASV port.
cdc
=
recv_channel
(
self
,
None
,
fd
)
else
:
# not in PASV mode.
ip
,
port
=
self
.
client_addr
cdc
=
recv_channel
(
self
,
self
.
client_addr
,
fd
)
cdc
.
create_socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
)
try
:
cdc
.
connect
((
ip
,
port
))
except
socket
.
error
,
why
:
self
.
respond
(
"425 Can't build data connection"
)
self
.
client_dc
=
cdc
type_map
=
{
'a'
:
'ASCII'
,
'i'
:
'Binary'
,
'e'
:
'EBCDIC'
,
'l'
:
'Binary'
}
type_mode_map
=
{
'a'
:
't'
,
'i'
:
'b'
,
'e'
:
'b'
,
'l'
:
'b'
}
# --------------------------------------------------
# command methods
# --------------------------------------------------
def
cmd_type
(
self
,
line
):
'specify data transfer type'
# ascii, ebcdic, image, local <byte size>
t
=
string
.
lower
(
line
[
1
])
# no support for EBCDIC
# if t not in ['a','e','i','l']:
if
t
not
in
[
'a'
,
'i'
,
'l'
]:
self
.
command_not_understood
(
string
.
join
(
line
))
elif
t
==
'l'
and
(
len
(
line
)
>
2
and
line
[
2
]
!=
'8'
):
self
.
respond
(
'504 Byte size must be 8'
)
else
:
self
.
current_mode
=
t
self
.
respond
(
'200 Type set to %s.'
%
self
.
type_map
[
t
])
def
cmd_quit
(
self
,
line
):
'terminate session'
self
.
respond
(
'221 Goodbye.'
)
self
.
close_when_done
()
def
cmd_port
(
self
,
line
):
'specify data connection port'
info
=
string
.
split
(
line
[
1
],
','
)
ip
=
string
.
join
(
info
[:
4
],
'.'
)
port
=
string
.
atoi
(
info
[
4
])
*
256
+
string
.
atoi
(
info
[
5
])
# how many data connections at a time?
# I'm assuming one for now...
# TODO: we should (optionally) verify that the
# ip number belongs to the client. [wu-ftpd does this?]
self
.
client_addr
=
(
ip
,
port
)
self
.
respond
(
'200 PORT command successful.'
)
def
new_passive_acceptor
(
self
):
# ensure that only one of these exists at a time.
if
self
.
passive_acceptor
is
not
None
:
self
.
passive_acceptor
.
close
()
self
.
passive_acceptor
=
None
self
.
passive_acceptor
=
passive_acceptor
(
self
)
return
self
.
passive_acceptor
def
cmd_pasv
(
self
,
line
):
'prepare for server-to-server transfer'
pc
=
self
.
new_passive_acceptor
()
port
=
pc
.
addr
[
1
]
ip_addr
=
pc
.
control_channel
.
getsockname
()[
0
]
self
.
respond
(
'227 Entering Passive Mode (%s,%d,%d)'
%
(
string
.
join
(
string
.
split
(
ip_addr
,
'.'
),
','
),
port
/
256
,
port
%
256
)
)
self
.
client_dc
=
None
def
cmd_nlst
(
self
,
line
):
'give name list of files in directory'
# ncftp adds the -FC argument for the user-visible 'nlist'
# command. We could try to emulate ls flags, but not just yet.
if
'-FC'
in
line
:
line
.
remove
(
'-FC'
)
try
:
dir_list_producer
=
self
.
get_dir_list
(
line
,
0
)
except
os
.
error
,
why
:
self
.
respond
(
'550 Could not list directory: %s'
%
repr
(
why
))
return
self
.
respond
(
'150 Opening %s mode data connection for file list'
%
(
self
.
type_map
[
self
.
current_mode
]
)
)
self
.
make_xmit_channel
()
self
.
client_dc
.
push_with_producer
(
dir_list_producer
)
self
.
client_dc
.
close_when_done
()
def
cmd_list
(
self
,
line
):
'give list files in a directory'
try
:
dir_list_producer
=
self
.
get_dir_list
(
line
,
1
)
except
os
.
error
,
why
:
self
.
respond
(
'550 Could not list directory: %s'
%
repr
(
why
))
return
self
.
respond
(
'150 Opening %s mode data connection for file list'
%
(
self
.
type_map
[
self
.
current_mode
]
)
)
self
.
make_xmit_channel
()
self
.
client_dc
.
push_with_producer
(
dir_list_producer
)
self
.
client_dc
.
close_when_done
()
def
cmd_cwd
(
self
,
line
):
'change working directory'
if
self
.
cwd
(
line
):
self
.
respond
(
'250 CWD command successful.'
)
else
:
self
.
respond
(
'550 No such directory.'
)
def
cmd_cdup
(
self
,
line
):
'change to parent of current working directory'
if
self
.
cdup
(
line
):
self
.
respond
(
'250 CDUP command successful.'
)
else
:
self
.
respond
(
'550 No such directory.'
)
def
cmd_pwd
(
self
,
line
):
'print the current working directory'
self
.
respond
(
'257 "%s" is the current directory.'
%
(
self
.
filesystem
.
current_directory
()
)
)
# modification time
# example output:
# 213 19960301204320
def
cmd_mdtm
(
self
,
line
):
'show last modification time of file'
filename
=
line
[
1
]
if
not
self
.
filesystem
.
isfile
(
filename
):
self
.
respond
(
'550 "%s" is not a file'
%
filename
)
else
:
mtime
=
time
.
gmtime
(
self
.
filesystem
.
stat
(
filename
)[
stat
.
ST_MTIME
])
self
.
respond
(
'213 %4d%02d%02d%02d%02d%02d'
%
(
mtime
[
0
],
mtime
[
1
],
mtime
[
2
],
mtime
[
3
],
mtime
[
4
],
mtime
[
5
]
)
)
def
cmd_noop
(
self
,
line
):
'do nothing'
self
.
respond
(
'200 NOOP command successful.'
)
def
cmd_size
(
self
,
line
):
'return size of file'
filename
=
line
[
1
]
if
not
self
.
filesystem
.
isfile
(
filename
):
self
.
respond
(
'550 "%s" is not a file'
%
filename
)
else
:
self
.
respond
(
'213 %d'
%
(
self
.
filesystem
.
stat
(
filename
)[
stat
.
ST_SIZE
])
)
def
cmd_retr
(
self
,
line
):
'retrieve a file'
if
len
(
line
)
<
2
:
self
.
command_not_understood
(
string
.
join
(
line
))
else
:
file
=
line
[
1
]
if
not
self
.
filesystem
.
isfile
(
file
):
self
.
log_info
(
'checking %s'
%
file
)
self
.
respond
(
'550 No such file'
)
else
:
try
:
# FIXME: for some reason, 'rt' isn't working on win95
mode
=
'r'
+
self
.
type_mode_map
[
self
.
current_mode
]
fd
=
self
.
open
(
file
,
mode
)
except
IOError
,
why
:
self
.
respond
(
'553 could not open file for reading: %s'
%
(
repr
(
why
)))
return
self
.
respond
(
"150 Opening %s mode data connection for file '%s'"
%
(
self
.
type_map
[
self
.
current_mode
],
file
)
)
self
.
make_xmit_channel
()
if
self
.
restart_position
:
# try to position the file as requested, but
# give up silently on failure (the 'file object'
# may not support seek())
try
:
fd
.
seek
(
self
.
restart_position
)
except
:
pass
self
.
restart_position
=
0
self
.
client_dc
.
push_with_producer
(
file_producer
(
self
,
self
.
client_dc
,
fd
)
)
self
.
client_dc
.
close_when_done
()
def
cmd_stor
(
self
,
line
,
mode
=
'wb'
):
'store a file'
if
len
(
line
)
<
2
:
self
.
command_not_understood
(
string
.
join
(
line
))
else
:
if
self
.
restart_position
:
restart_position
=
0
self
.
respond
(
'553 restart on STOR not yet supported'
)
return
file
=
line
[
1
]
# todo: handle that type flag
try
:
fd
=
self
.
open
(
file
,
mode
)
except
IOError
,
why
:
self
.
respond
(
'553 could not open file for writing: %s'
%
(
repr
(
why
)))
return
self
.
respond
(
'150 Opening %s connection for %s'
%
(
self
.
type_map
[
self
.
current_mode
],
file
)
)
self
.
make_recv_channel
(
fd
)
def
cmd_abor
(
self
,
line
):
'abort operation'
if
self
.
client_dc
:
self
.
client_dc
.
close
()
self
.
respond
(
'226 ABOR command successful.'
)
def
cmd_appe
(
self
,
line
):
'append to a file'
return
self
.
cmd_stor
(
line
,
'ab'
)
def
cmd_dele
(
self
,
line
):
if
len
(
line
)
!=
2
:
self
.
command_not_understood
(
string
.
join
(
line
))
else
:
file
=
line
[
1
]
if
self
.
filesystem
.
isfile
(
file
):
try
:
self
.
filesystem
.
unlink
(
file
)
self
.
respond
(
'250 DELE command successful.'
)
except
:
self
.
respond
(
'550 error deleting file.'
)
else
:
self
.
respond
(
'550 %s: No such file.'
%
file
)
def
cmd_mkd
(
self
,
line
):
if
len
(
line
)
!=
2
:
self
.
command
.
not_understood
(
string
.
join
(
line
))
else
:
path
=
line
[
1
]
try
:
self
.
filesystem
.
mkdir
(
path
)
self
.
respond
(
'257 MKD command successful.'
)
except
:
self
.
respond
(
'550 error creating directory.'
)
def
cmd_rmd
(
self
,
line
):
if
len
(
line
)
!=
2
:
self
.
command
.
not_understood
(
string
.
join
(
line
))
else
:
path
=
line
[
1
]
try
:
self
.
filesystem
.
rmdir
(
path
)
self
.
respond
(
'250 RMD command successful.'
)
except
:
self
.
respond
(
'550 error removing directory.'
)
def
cmd_user
(
self
,
line
):
'specify user name'
if
len
(
line
)
>
1
:
self
.
user
=
line
[
1
]
self
.
respond
(
'331 Password required.'
)
else
:
self
.
command_not_understood
(
string
.
join
(
line
))
def
cmd_pass
(
self
,
line
):
'specify password'
if
len
(
line
)
<
2
:
pw
=
''
else
:
pw
=
line
[
1
]
result
,
message
,
fs
=
self
.
server
.
authorizer
.
authorize
(
self
,
self
.
user
,
pw
)
if
result
:
self
.
respond
(
'230 %s'
%
message
)
self
.
filesystem
=
fs
self
.
authorized
=
1
self
.
log_info
(
'Successful login: Filesystem=%s'
%
repr
(
fs
))
else
:
self
.
respond
(
'530 %s'
%
message
)
def
cmd_rest
(
self
,
line
):
'restart incomplete transfer'
try
:
pos
=
string
.
atoi
(
line
[
1
])
except
ValueError
:
self
.
command_not_understood
(
string
.
join
(
line
))
self
.
restart_position
=
pos
self
.
respond
(
'350 Restarting at %d. Send STORE or RETRIEVE to initiate transfer.'
%
pos
)
def
cmd_stru
(
self
,
line
):
'obsolete - set file transfer structure'
if
line
[
1
]
in
'fF'
:
# f == 'file'
self
.
respond
(
'200 STRU F Ok'
)
else
:
self
.
respond
(
'504 Unimplemented STRU type'
)
def
cmd_mode
(
self
,
line
):
'obsolete - set file transfer mode'
if
line
[
1
]
in
'sS'
:
# f == 'file'
self
.
respond
(
'200 MODE S Ok'
)
else
:
self
.
respond
(
'502 Unimplemented MODE type'
)
# The stat command has two personalities. Normally it returns status
# information about the current connection. But if given an argument,
# it is equivalent to the LIST command, with the data sent over the
# control connection. Strange. But wuftpd, ftpd, and nt's ftp server
# all support it.
#
## def cmd_stat (self, line):
## 'return status of server'
## pass
def
cmd_syst
(
self
,
line
):
'show operating system type of server system'
# Replying to this command is of questionable utility, because
# this server does not behave in a predictable way w.r.t. the
# output of the LIST command. We emulate Unix ls output, but
# on win32 the pathname can contain drive information at the front
# Currently, the combination of ensuring that os.sep == '/'
# and removing the leading slash when necessary seems to work.
# [cd'ing to another drive also works]
#
# This is how wuftpd responds, and is probably
# the most expected. The main purpose of this reply is so that
# the client knows to expect Unix ls-style LIST output.
self
.
respond
(
'215 UNIX Type: L8'
)
# one disadvantage to this is that some client programs
# assume they can pass args to /bin/ls.
# a few typical responses:
# 215 UNIX Type: L8 (wuftpd)
# 215 Windows_NT version 3.51
# 215 VMS MultiNet V3.3
# 500 'SYST': command not understood. (SVR4)
def
cmd_help
(
self
,
line
):
'give help information'
# find all the methods that match 'cmd_xxxx',
# use their docstrings for the help response.
attrs
=
dir
(
self
.
__class__
)
help_lines
=
[]
for
attr
in
attrs
:
if
attr
[:
4
]
==
'cmd_'
:
x
=
getattr
(
self
,
attr
)
if
type
(
x
)
==
type
(
self
.
cmd_help
):
if
x
.
__doc__
:
help_lines
.
append
(
'
\
t
%s
\
t
%s'
%
(
attr
[
4
:],
x
.
__doc__
))
if
help_lines
:
self
.
push
(
'214-The following commands are recognized
\
r
\
n
'
)
self
.
push_with_producer
(
producers
.
lines_producer
(
help_lines
))
self
.
push
(
'214
\
r
\
n
'
)
else
:
self
.
push
(
'214-
\
r
\
n
\
t
Help Unavailable
\
r
\
n
214
\
r
\
n
'
)
class
ftp_server
(
asyncore
.
dispatcher
):
# override this to spawn a different FTP channel class.
ftp_channel_class
=
ftp_channel
SERVER_IDENT
=
'FTP Server (V%s)'
%
VERSION
def
__init__
(
self
,
authorizer
,
hostname
=
None
,
ip
=
''
,
port
=
21
,
resolver
=
None
,
logger_object
=
logger
.
file_logger
(
sys
.
stdout
)
):
self
.
ip
=
ip
self
.
port
=
port
self
.
authorizer
=
authorizer
if
hostname
is
None
:
self
.
hostname
=
socket
.
gethostname
()
else
:
self
.
hostname
=
hostname
# statistics
self
.
total_sessions
=
counter
()
self
.
closed_sessions
=
counter
()
self
.
total_files_out
=
counter
()
self
.
total_files_in
=
counter
()
self
.
total_bytes_out
=
counter
()
self
.
total_bytes_in
=
counter
()
self
.
total_exceptions
=
counter
()
#
asyncore
.
dispatcher
.
__init__
(
self
)
self
.
create_socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
)
self
.
set_reuse_addr
()
self
.
bind
((
self
.
ip
,
self
.
port
))
self
.
listen
(
5
)
if
not
logger_object
:
logger_object
=
sys
.
stdout
if
resolver
:
self
.
logger
=
logger
.
resolving_logger
(
resolver
,
logger_object
)
else
:
self
.
logger
=
logger
.
unresolving_logger
(
logger_object
)
self
.
log_info
(
'FTP server started at %s
\
n
\
t
Authorizer:%s
\
n
\
t
Hostname: %s
\
n
\
t
Port: %d'
%
(
time
.
ctime
(
time
.
time
()),
repr
(
self
.
authorizer
),
self
.
hostname
,
self
.
port
)
)
def
writable
(
self
):
return
0
def
handle_read
(
self
):
pass
def
handle_connect
(
self
):
pass
def
handle_accept
(
self
):
conn
,
addr
=
self
.
accept
()
self
.
total_sessions
.
increment
()
self
.
log_info
(
'Incoming connection from %s:%d'
%
(
addr
[
0
],
addr
[
1
]))
self
.
ftp_channel_class
(
self
,
conn
,
addr
)
# return a producer describing the state of the server
def
status
(
self
):
def
nice_bytes
(
n
):
return
string
.
join
(
status_handler
.
english_bytes
(
n
))
return
producers
.
lines_producer
(
[
'<h2>%s</h2>'
%
self
.
SERVER_IDENT
,
'<br>Listening on <b>Host:</b> %s'
%
self
.
hostname
,
'<b>Port:</b> %d'
%
self
.
port
,
'<br>Sessions'
,
'<b>Total:</b> %s'
%
self
.
total_sessions
,
'<b>Current:</b> %d'
%
(
self
.
total_sessions
.
as_long
()
-
self
.
closed_sessions
.
as_long
()),
'<br>Files'
,
'<b>Sent:</b> %s'
%
self
.
total_files_out
,
'<b>Received:</b> %s'
%
self
.
total_files_in
,
'<br>Bytes'
,
'<b>Sent:</b> %s'
%
nice_bytes
(
self
.
total_bytes_out
.
as_long
()),
'<b>Received:</b> %s'
%
nice_bytes
(
self
.
total_bytes_in
.
as_long
()),
'<br>Exceptions: %s'
%
self
.
total_exceptions
,
]
)
# ======================================================================
# Data Channel Classes
# ======================================================================
# This socket accepts a data connection, used when the server has been
# placed in passive mode. Although the RFC implies that we ought to
# be able to use the same acceptor over and over again, this presents
# a problem: how do we shut it off, so that we are accepting
# connections only when we expect them? [we can't]
#
# wuftpd, and probably all the other servers, solve this by allowing
# only one connection to hit this acceptor. They then close it. Any
# subsequent data-connection command will then try for the default
# port on the client side [which is of course never there]. So the
# 'always-send-PORT/PASV' behavior seems required.
#
# Another note: wuftpd will also be listening on the channel as soon
# as the PASV command is sent. It does not wait for a data command
# first.
# --- we need to queue up a particular behavior:
# 1) xmit : queue up producer[s]
# 2) recv : the file object
#
# It would be nice if we could make both channels the same. Hmmm..
#
class
passive_acceptor
(
asyncore
.
dispatcher
):
ready
=
None
def
__init__
(
self
,
control_channel
):
# connect_fun (conn, addr)
asyncore
.
dispatcher
.
__init__
(
self
)
self
.
control_channel
=
control_channel
self
.
create_socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
)
# bind to an address on the interface that the
# control connection is coming from.
self
.
bind
((
self
.
control_channel
.
getsockname
()[
0
],
0
))
self
.
addr
=
self
.
getsockname
()
self
.
listen
(
1
)
# def __del__ (self):
# print 'passive_acceptor.__del__()'
def
log
(
self
,
*
ignore
):
pass
def
handle_accept
(
self
):
conn
,
addr
=
self
.
accept
()
dc
=
self
.
control_channel
.
client_dc
if
dc
is
not
None
:
dc
.
set_socket
(
conn
)
dc
.
addr
=
addr
dc
.
connected
=
1
self
.
control_channel
.
passive_acceptor
=
None
else
:
self
.
ready
=
conn
,
addr
self
.
close
()
class
xmit_channel
(
asynchat
.
async_chat
):
# for an ethernet, you want this to be fairly large, in fact, it
# _must_ be large for performance comparable to an ftpd. [64k] we
# ought to investigate automatically-sized buffers...
ac_out_buffer_size
=
16384
bytes_out
=
0
def
__init__
(
self
,
channel
,
client_addr
=
None
):
self
.
channel
=
channel
self
.
client_addr
=
client_addr
asynchat
.
async_chat
.
__init__
(
self
)
# def __del__ (self):
# print 'xmit_channel.__del__()'
def
log
(
*
args
):
pass
def
readable
(
self
):
return
not
self
.
connected
def
writable
(
self
):
return
1
def
send
(
self
,
data
):
result
=
asynchat
.
async_chat
.
send
(
self
,
data
)
self
.
bytes_out
=
self
.
bytes_out
+
result
return
result
def
handle_error
(
self
):
# usually this is to catch an unexpected disconnect.
self
.
log_info
(
'unexpected disconnect on data xmit channel'
,
'error'
)
try
:
self
.
close
()
except
:
pass
# TODO: there's a better way to do this. we need to be able to
# put 'events' in the producer fifo. to do this cleanly we need
# to reposition the 'producer' fifo as an 'event' fifo.
def
close
(
self
):
c
=
self
.
channel
s
=
c
.
server
c
.
client_dc
=
None
s
.
total_files_out
.
increment
()
s
.
total_bytes_out
.
increment
(
self
.
bytes_out
)
if
not
len
(
self
.
producer_fifo
):
c
.
respond
(
'226 Transfer complete'
)
elif
not
c
.
closed
:
c
.
respond
(
'426 Connection closed; transfer aborted'
)
del
c
del
s
del
self
.
channel
asynchat
.
async_chat
.
close
(
self
)
class
recv_channel
(
asyncore
.
dispatcher
):
def
__init__
(
self
,
channel
,
client_addr
,
fd
):
self
.
channel
=
channel
self
.
client_addr
=
client_addr
self
.
fd
=
fd
asyncore
.
dispatcher
.
__init__
(
self
)
self
.
bytes_in
=
counter
()
def
log
(
self
,
*
ignore
):
pass
def
handle_connect
(
self
):
pass
def
writable
(
self
):
return
0
def
recv
(
*
args
):
result
=
apply
(
asyncore
.
dispatcher
.
recv
,
args
)
self
=
args
[
0
]
self
.
bytes_in
.
increment
(
len
(
result
))
return
result
buffer_size
=
8192
def
handle_read
(
self
):
block
=
self
.
recv
(
self
.
buffer_size
)
if
block
:
try
:
self
.
fd
.
write
(
block
)
except
IOError
:
self
.
log_info
(
'got exception writing block...'
,
'error'
)
def
handle_close
(
self
):
s
=
self
.
channel
.
server
s
.
total_files_in
.
increment
()
s
.
total_bytes_in
.
increment
(
self
.
bytes_in
.
as_long
())
self
.
fd
.
close
()
self
.
channel
.
respond
(
'226 Transfer complete.'
)
self
.
close
()
import
filesys
# not much of a doorman! 8^)
class
dummy_authorizer
:
def
__init__
(
self
,
root
=
'/'
):
self
.
root
=
root
def
authorize
(
self
,
channel
,
username
,
password
):
channel
.
persona
=
-
1
,
-
1
channel
.
read_only
=
1
return
1
,
'Ok.'
,
filesys
.
os_filesystem
(
self
.
root
)
class
anon_authorizer
:
def
__init__
(
self
,
root
=
'/'
):
self
.
root
=
root
def
authorize
(
self
,
channel
,
username
,
password
):
if
username
in
(
'ftp'
,
'anonymous'
):
channel
.
persona
=
-
1
,
-
1
channel
.
read_only
=
1
return
1
,
'Ok.'
,
filesys
.
os_filesystem
(
self
.
root
)
else
:
return
0
,
'Password invalid.'
,
None
# ===========================================================================
# Unix-specific improvements
# ===========================================================================
if
os
.
name
==
'posix'
:
class
unix_authorizer
:
# return a trio of (success, reply_string, filesystem)
def
authorize
(
self
,
channel
,
username
,
password
):
import
crypt
import
pwd
try
:
info
=
pwd
.
getpwnam
(
username
)
except
KeyError
:
return
0
,
'No such user.'
,
None
mangled
=
info
[
1
]
if
crypt
.
crypt
(
password
,
mangled
[:
2
])
==
mangled
:
channel
.
read_only
=
0
fs
=
filesys
.
schizophrenic_unix_filesystem
(
'/'
,
info
[
5
],
persona
=
(
info
[
2
],
info
[
3
])
)
return
1
,
'Login successful.'
,
fs
else
:
return
0
,
'Password invalid.'
,
None
def
__repr__
(
self
):
return
'<standard unix authorizer>'
# simple anonymous ftp support
class
unix_authorizer_with_anonymous
(
unix_authorizer
):
def
__init__
(
self
,
root
=
None
,
real_users
=
0
):
self
.
root
=
root
self
.
real_users
=
real_users
def
authorize
(
self
,
channel
,
username
,
password
):
if
string
.
lower
(
username
)
in
[
'anonymous'
,
'ftp'
]:
import
pwd
try
:
# ok, here we run into lots of confusion.
# on some os', anon runs under user 'nobody',
# on others as 'ftp'. ownership is also critical.
# need to investigate.
# linux: new linuxen seem to have nobody's UID=-1,
# which is an illegal value. Use ftp.
ftp_user_info
=
pwd
.
getpwnam
(
'ftp'
)
if
string
.
lower
(
os
.
uname
()[
0
])
==
'linux'
:
nobody_user_info
=
pwd
.
getpwnam
(
'ftp'
)
else
:
nobody_user_info
=
pwd
.
getpwnam
(
'nobody'
)
channel
.
read_only
=
1
if
self
.
root
is
None
:
self
.
root
=
ftp_user_info
[
5
]
fs
=
filesys
.
unix_filesystem
(
self
.
root
,
'/'
)
return
1
,
'Anonymous Login Successful'
,
fs
except
KeyError
:
return
0
,
'Anonymous account not set up'
,
None
elif
self
.
real_users
:
return
unix_authorizer
.
authorize
(
self
,
channel
,
username
,
password
)
else
:
return
0
,
'User logins not allowed'
,
None
class
file_producer
:
block_size
=
16384
def
__init__
(
self
,
server
,
dc
,
fd
):
self
.
fd
=
fd
self
.
done
=
0
def
more
(
self
):
if
self
.
done
:
return
''
else
:
block
=
self
.
fd
.
read
(
self
.
block_size
)
if
not
block
:
self
.
fd
.
close
()
self
.
done
=
1
return
block
# usage: ftp_server /PATH/TO/FTP/ROOT PORT
# for example:
# $ ftp_server /home/users/ftp 8021
if
os
.
name
==
'posix'
:
def
test
(
port
=
'8021'
):
import
sys
fs
=
ftp_server
(
unix_authorizer
(),
port
=
string
.
atoi
(
port
)
)
try
:
asyncore
.
loop
()
except
KeyboardInterrupt
:
self
.
log_info
(
'FTP server shutting down. (received SIGINT)'
,
'warning'
)
# close everything down on SIGINT.
# of course this should be a cleaner shutdown.
asyncore
.
close_all
()
if
__name__
==
'__main__'
:
test
(
sys
.
argv
[
1
])
# not unix
else
:
def
test
():
fs
=
ftp_server
(
dummy_authorizer
())
if
__name__
==
'__main__'
:
test
()
# this is the command list from the wuftpd man page
# '*' means we've implemented it.
# '!' requires write access
#
command_documentation
=
{
'abor'
:
'abort previous command'
,
#*
'acct'
:
'specify account (ignored)'
,
'allo'
:
'allocate storage (vacuously)'
,
'appe'
:
'append to a file'
,
#*!
'cdup'
:
'change to parent of current working directory'
,
#*
'cwd'
:
'change working directory'
,
#*
'dele'
:
'delete a file'
,
#!
'help'
:
'give help information'
,
#*
'list'
:
'give list files in a directory'
,
#*
'mkd'
:
'make a directory'
,
#!
'mdtm'
:
'show last modification time of file'
,
#*
'mode'
:
'specify data transfer mode'
,
'nlst'
:
'give name list of files in directory'
,
#*
'noop'
:
'do nothing'
,
#*
'pass'
:
'specify password'
,
#*
'pasv'
:
'prepare for server-to-server transfer'
,
#*
'port'
:
'specify data connection port'
,
#*
'pwd'
:
'print the current working directory'
,
#*
'quit'
:
'terminate session'
,
#*
'rest'
:
'restart incomplete transfer'
,
#*
'retr'
:
'retrieve a file'
,
#*
'rmd'
:
'remove a directory'
,
#!
'rnfr'
:
'specify rename-from file name'
,
#!
'rnto'
:
'specify rename-to file name'
,
#!
'site'
:
'non-standard commands (see next section)'
,
'size'
:
'return size of file'
,
#*
'stat'
:
'return status of server'
,
#*
'stor'
:
'store a file'
,
#*!
'stou'
:
'store a file with a unique name'
,
#!
'stru'
:
'specify data transfer structure'
,
'syst'
:
'show operating system type of server system'
,
#*
'type'
:
'specify data transfer type'
,
#*
'user'
:
'specify user name'
,
#*
'xcup'
:
'change to parent of current working directory (deprecated)'
,
'xcwd'
:
'change working directory (deprecated)'
,
'xmkd'
:
'make a directory (deprecated)'
,
#!
'xpwd'
:
'print the current working directory (deprecated)'
,
'xrmd'
:
'remove a directory (deprecated)'
,
#!
}
# debugging aid (linux)
def
get_vm_size
():
return
string
.
atoi
(
string
.
split
(
open
(
'/proc/self/stat'
).
readline
())[
22
])
def
print_vm
():
print
'vm: %8dk'
%
(
get_vm_size
()
/
1024
)
ZServer/medusa/http_date.py
deleted
100644 → 0
View file @
6786b136
# -*- Mode: Python; tab-width: 4 -*-
import
regex
import
string
import
time
def
concat
(
*
args
):
return
string
.
joinfields
(
args
,
''
)
def
join
(
seq
,
field
=
' '
):
return
string
.
joinfields
(
seq
,
field
)
def
group
(
s
):
return
'
\
\
('
+
s
+
'
\
\
)'
short_days
=
[
'sun'
,
'mon'
,
'tue'
,
'wed'
,
'thu'
,
'fri'
,
'sat'
]
long_days
=
[
'sunday'
,
'monday'
,
'tuesday'
,
'wednesday'
,
'thursday'
,
'friday'
,
'saturday'
]
short_day_reg
=
group
(
join
(
short_days
,
'
\
\
|'
))
long_day_reg
=
group
(
join
(
long_days
,
'
\
\
|'
))
daymap
=
{}
for
i
in
range
(
7
):
daymap
[
short_days
[
i
]]
=
i
daymap
[
long_days
[
i
]]
=
i
hms_reg
=
join
(
3
*
[
group
(
'[0-9][0-9]'
)],
':'
)
months
=
[
'jan'
,
'feb'
,
'mar'
,
'apr'
,
'may'
,
'jun'
,
'jul'
,
'aug'
,
'sep'
,
'oct'
,
'nov'
,
'dec'
]
monmap
=
{}
for
i
in
range
(
12
):
monmap
[
months
[
i
]]
=
i
+
1
months_reg
=
group
(
join
(
months
,
'
\
\
|'
))
# From draft-ietf-http-v11-spec-07.txt/3.3.1
# Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123
# Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036
# Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format
# rfc822 format
rfc822_date
=
join
(
[
concat
(
short_day_reg
,
','
),
# day
group
(
'[0-9][0-9]?'
),
# date
months_reg
,
# month
group
(
'[0-9]+'
),
# year
hms_reg
,
# hour minute second
'gmt'
],
' '
)
rfc822_reg
=
regex
.
compile
(
rfc822_date
)
def
unpack_rfc822
():
g
=
rfc822_reg
.
group
a
=
string
.
atoi
return
(
a
(
g
(
4
)),
# year
monmap
[
g
(
3
)],
# month
a
(
g
(
2
)),
# day
a
(
g
(
5
)),
# hour
a
(
g
(
6
)),
# minute
a
(
g
(
7
)),
# second
0
,
0
,
0
)
# rfc850 format
rfc850_date
=
join
(
[
concat
(
long_day_reg
,
','
),
join
(
[
group
(
'[0-9][0-9]?'
),
months_reg
,
group
(
'[0-9]+'
)
],
'-'
),
hms_reg
,
'gmt'
],
' '
)
rfc850_reg
=
regex
.
compile
(
rfc850_date
)
# they actually unpack the same way
def
unpack_rfc850
():
g
=
rfc850_reg
.
group
a
=
string
.
atoi
return
(
a
(
g
(
4
)),
# year
monmap
[
g
(
3
)],
# month
a
(
g
(
2
)),
# day
a
(
g
(
5
)),
# hour
a
(
g
(
6
)),
# minute
a
(
g
(
7
)),
# second
0
,
0
,
0
)
# parsdate.parsedate - ~700/sec.
# parse_http_date - ~1333/sec.
weekdayname
=
[
'Mon'
,
'Tue'
,
'Wed'
,
'Thu'
,
'Fri'
,
'Sat'
,
'Sun'
]
monthname
=
[
None
,
'Jan'
,
'Feb'
,
'Mar'
,
'Apr'
,
'May'
,
'Jun'
,
'Jul'
,
'Aug'
,
'Sep'
,
'Oct'
,
'Nov'
,
'Dec'
]
def
build_http_date
(
when
):
year
,
month
,
day
,
hh
,
mm
,
ss
,
wd
,
y
,
z
=
time
.
gmtime
(
when
)
return
"%s, %02d %3s %4d %02d:%02d:%02d GMT"
%
(
weekdayname
[
wd
],
day
,
monthname
[
month
],
year
,
hh
,
mm
,
ss
)
def
parse_http_date
(
d
):
d
=
string
.
lower
(
d
)
tz
=
time
.
timezone
if
rfc850_reg
.
match
(
d
)
==
len
(
d
):
retval
=
int
(
time
.
mktime
(
unpack_rfc850
())
-
tz
)
elif
rfc822_reg
.
match
(
d
)
==
len
(
d
):
retval
=
int
(
time
.
mktime
(
unpack_rfc822
())
-
tz
)
else
:
return
0
# Thanks to Craig Silverstein <csilvers@google.com> for pointing
# out the DST discrepancy
if
time
.
daylight
and
time
.
localtime
(
retval
)[
-
1
]
==
1
:
# DST correction
retval
=
retval
+
(
tz
-
time
.
altzone
)
return
retval
ZServer/medusa/http_server.py
deleted
100644 → 0
View file @
6786b136
#! /usr/local/bin/python
# -*- Mode: Python; tab-width: 4 -*-
#
# Author: Sam Rushing <rushing@nightmare.com>
# Copyright 1996-2000 by Sam Rushing
# All Rights Reserved.
#
RCS_ID
=
'$Id: http_server.py,v 1.20 2001/02/13 21:18:54 brian Exp $'
# python modules
import
os
import
regex
import
re
import
socket
import
stat
import
string
import
sys
import
time
# async modules
import
asyncore
import
asynchat
# medusa modules
import
http_date
import
producers
import
status_handler
import
logger
VERSION_STRING
=
string
.
split
(
RCS_ID
)[
2
]
from
counter
import
counter
# ===========================================================================
# Request Object
# ===========================================================================
class
http_request
:
# default reply code
reply_code
=
200
request_counter
=
counter
()
# Whether to automatically use chunked encoding when
#
# HTTP version is 1.1
# Content-Length is not set
# Chunked encoding is not already in effect
#
# If your clients are having trouble, you might want to disable this.
use_chunked
=
1
# by default, this request object ignores user data.
collector
=
None
def
__init__
(
self
,
*
args
):
# unpack information about the request
(
self
.
channel
,
self
.
request
,
self
.
command
,
self
.
uri
,
self
.
version
,
self
.
header
)
=
args
self
.
outgoing
=
fifo
()
self
.
reply_headers
=
{
'Server'
:
'Medusa/%s'
%
VERSION_STRING
,
'Date'
:
http_date
.
build_http_date
(
time
.
time
())
}
self
.
request_number
=
http_request
.
request_counter
.
increment
()
self
.
_split_uri
=
None
self
.
_header_cache
=
{}
# --------------------------------------------------
# reply header management
# --------------------------------------------------
def
__setitem__
(
self
,
key
,
value
):
self
.
reply_headers
[
key
]
=
value
def
__getitem__
(
self
,
key
):
return
self
.
reply_headers
[
key
]
def
has_key
(
self
,
key
):
return
self
.
reply_headers
.
has_key
(
key
)
def
build_reply_header
(
self
):
return
string
.
join
(
[
self
.
response
(
self
.
reply_code
)]
+
map
(
lambda
x
:
'%s: %s'
%
x
,
self
.
reply_headers
.
items
()
),
'
\
r
\
n
'
)
+
'
\
r
\
n
\
r
\
n
'
# --------------------------------------------------
# split a uri
# --------------------------------------------------
# <path>;<params>?<query>#<fragment>
path_regex
=
regex
.
compile
(
# path params query fragment
'
\
\
([^;?#]*
\
\
)
\
\
(;[^?#]*
\
\
)?
\
\
(
\
\
?[^#]*
\
)?
\
(#.*
\
)?
'
)
def split_uri (self):
if self._split_uri is None:
if self.path_regex.match (self.uri) != len(self.uri):
raise ValueError, "Broken URI"
else:
self._split_uri = map (lambda i,r=self.path_regex: r.group(i), range(1,5))
return self._split_uri
def get_header_with_regex (self, head_reg, group):
for line in self.header:
if head_reg.match (line) == len(line):
return head_reg.group(group)
return ''
def get_header (self, header):
header = string.lower (header)
hc = self._header_cache
if not hc.has_key (header):
h = header + '
:
'
hl = len(h)
for line in self.header:
if string.lower (line[:hl]) == h:
r = line[hl:]
hc[header] = r
return r
hc[header] = None
return None
else:
return hc[header]
# --------------------------------------------------
# user data
# --------------------------------------------------
def collect_incoming_data (self, data):
if self.collector:
self.collector.collect_incoming_data (data)
else:
self.log_info(
'
Dropping
%
d
bytes
of
incoming
request
data
' % len(data),
'
warning
'
)
def found_terminator (self):
if self.collector:
self.collector.found_terminator()
else:
self.log_info (
'
Unexpected
end
-
of
-
record
for
incoming
request
',
'
warning
'
)
def push (self, thing):
if type(thing) == type(''):
self.outgoing.push (producers.simple_producer (thing))
else:
self.outgoing.push (thing)
def response (self, code=200):
message = self.responses[code]
self.reply_code = code
return '
HTTP
/%
s
%
d
%
s
' % (self.version, code, message)
def error (self, code):
self.reply_code = code
message = self.responses[code]
s = self.DEFAULT_ERROR_MESSAGE % {
'
code
': code,
'
message
': message,
}
self['
Content
-
Length
'] = len(s)
self['
Content
-
Type
'] = '
text
/
html
'
# make an error reply
self.push (s)
self.done()
# can also be used for empty replies
reply_now = error
def done (self):
"finalize this transaction - send output to the http channel"
# ----------------------------------------
# persistent connection management
# ----------------------------------------
# --- BUCKLE UP! ----
connection = string.lower (get_header (CONNECTION, self.header))
close_it = 0
wrap_in_chunking = 0
if self.version == '
1.0
':
if connection == '
keep
-
alive
':
if not self.has_key ('
Content
-
Length
'):
close_it = 1
else:
self['
Connection
'] = '
Keep
-
Alive
'
else:
close_it = 1
elif self.version == '
1.1
':
if connection == '
close
':
close_it = 1
elif not self.has_key ('
Content
-
Length
'):
if self.has_key ('
Transfer
-
Encoding
'):
if not self['
Transfer
-
Encoding
'] == '
chunked
':
close_it = 1
elif self.use_chunked:
self['
Transfer
-
Encoding
'] = '
chunked
'
wrap_in_chunking = 1
else:
close_it = 1
elif self.version is None:
# Although we don'
t
*
really
*
support
http
/
0.9
(
because
we
'd have to
# use
\
r
\
n
as a terminator, and it would just yuck up a lot of stuff)
# it'
s
very
common
for
developers
to
not
want
to
type
a
version
number
# when using telnet to debug a server.
close_it
=
1
outgoing_header
=
producers
.
simple_producer
(
self
.
build_reply_header
())
if
close_it
:
self
[
'Connection'
]
=
'close'
if
wrap_in_chunking
:
outgoing_producer
=
producers
.
chunked_producer
(
producers
.
composite_producer
(
self
.
outgoing
)
)
# prepend the header
outgoing_producer
=
producers
.
composite_producer
(
fifo
([
outgoing_header
,
outgoing_producer
])
)
else
:
# prepend the header
self
.
outgoing
.
push_front
(
outgoing_header
)
outgoing_producer
=
producers
.
composite_producer
(
self
.
outgoing
)
# apply a few final transformations to the output
self
.
channel
.
push_with_producer
(
# globbing gives us large packets
producers
.
globbing_producer
(
# hooking lets us log the number of bytes sent
producers
.
hooked_producer
(
outgoing_producer
,
self
.
log
)
)
)
self
.
channel
.
current_request
=
None
if
close_it
:
self
.
channel
.
close_when_done
()
def
log_date_string
(
self
,
when
):
logtime
=
time
.
localtime
(
when
)
return
time
.
strftime
(
'%d/'
,
logtime
)
+
\
http_date
.
monthname
[
logtime
[
1
]]
+
\
time
.
strftime
(
'/%Y:%H:%M:%S '
,
logtime
)
+
\
tz_for_log
def
log
(
self
,
bytes
):
user_agent
=
self
.
get_header
(
'user-agent'
)
if
not
user_agent
:
user_agent
=
''
referer
=
self
.
get_header
(
'referer'
)
if
not
referer
:
referer
=
''
self
.
channel
.
server
.
logger
.
log
(
self
.
channel
.
addr
[
0
],
' - - [%s] "%s" %d %d "%s" "%s"
\
n
'
%
(
# self.channel.addr[1],
self
.
log_date_string
(
time
.
time
()),
self
.
request
,
self
.
reply_code
,
bytes
,
referer
,
user_agent
)
)
responses
=
{
100
:
"Continue"
,
101
:
"Switching Protocols"
,
200
:
"OK"
,
201
:
"Created"
,
202
:
"Accepted"
,
203
:
"Non-Authoritative Information"
,
204
:
"No Content"
,
205
:
"Reset Content"
,
206
:
"Partial Content"
,
300
:
"Multiple Choices"
,
301
:
"Moved Permanently"
,
302
:
"Moved Temporarily"
,
303
:
"See Other"
,
304
:
"Not Modified"
,
305
:
"Use Proxy"
,
400
:
"Bad Request"
,
401
:
"Unauthorized"
,
402
:
"Payment Required"
,
403
:
"Forbidden"
,
404
:
"Not Found"
,
405
:
"Method Not Allowed"
,
406
:
"Not Acceptable"
,
407
:
"Proxy Authentication Required"
,
408
:
"Request Time-out"
,
409
:
"Conflict"
,
410
:
"Gone"
,
411
:
"Length Required"
,
412
:
"Precondition Failed"
,
413
:
"Request Entity Too Large"
,
414
:
"Request-URI Too Large"
,
415
:
"Unsupported Media Type"
,
500
:
"Internal Server Error"
,
501
:
"Not Implemented"
,
502
:
"Bad Gateway"
,
503
:
"Service Unavailable"
,
504
:
"Gateway Time-out"
,
505
:
"HTTP Version not supported"
}
# Default error message
DEFAULT_ERROR_MESSAGE
=
string
.
join
(
[
'<head>'
,
'<title>Error response</title>'
,
'</head>'
,
'<body>'
,
'<h1>Error response</h1>'
,
'<p>Error code %(code)d.'
,
'<p>Message: %(message)s.'
,
'</body>'
,
''
],
'
\
r
\
n
'
)
# ===========================================================================
# HTTP Channel Object
# ===========================================================================
class
http_channel
(
asynchat
.
async_chat
):
# use a larger default output buffer
ac_out_buffer_size
=
1
<<
16
current_request
=
None
channel_counter
=
counter
()
def
__init__
(
self
,
server
,
conn
,
addr
):
self
.
channel_number
=
http_channel
.
channel_counter
.
increment
()
self
.
request_counter
=
counter
()
asynchat
.
async_chat
.
__init__
(
self
,
conn
)
self
.
server
=
server
self
.
addr
=
addr
self
.
set_terminator
(
'
\
r
\
n
\
r
\
n
'
)
self
.
in_buffer
=
''
self
.
creation_time
=
int
(
time
.
time
())
self
.
check_maintenance
()
def
__repr__
(
self
):
ar
=
asynchat
.
async_chat
.
__repr__
(
self
)[
1
:
-
1
]
return
'<%s channel#: %s requests:%s>'
%
(
ar
,
self
.
channel_number
,
self
.
request_counter
)
# Channel Counter, Maintenance Interval...
maintenance_interval
=
500
def
check_maintenance
(
self
):
if
not
self
.
channel_number
%
self
.
maintenance_interval
:
self
.
maintenance
()
def
maintenance
(
self
):
self
.
kill_zombies
()
# 30-minute zombie timeout. status_handler also knows how to kill zombies.
zombie_timeout
=
30
*
60
def
kill_zombies
(
self
):
now
=
int
(
time
.
time
())
for
channel
in
asyncore
.
socket_map
.
values
():
if
channel
.
__class__
==
self
.
__class__
:
if
(
now
-
channel
.
creation_time
)
>
channel
.
zombie_timeout
:
channel
.
close
()
# --------------------------------------------------
# send/recv overrides, good place for instrumentation.
# --------------------------------------------------
# this information needs to get into the request object,
# so that it may log correctly.
def
send
(
self
,
data
):
result
=
asynchat
.
async_chat
.
send
(
self
,
data
)
self
.
server
.
bytes_out
.
increment
(
len
(
data
))
return
result
def
recv
(
self
,
buffer_size
):
try
:
result
=
asynchat
.
async_chat
.
recv
(
self
,
buffer_size
)
self
.
server
.
bytes_in
.
increment
(
len
(
result
))
return
result
except
MemoryError
:
# --- Save a Trip to Your Service Provider ---
# It's possible for a process to eat up all the memory of
# the machine, and put it in an extremely wedged state,
# where medusa keeps running and can't be shut down. This
# is where MemoryError tends to get thrown, though of
# course it could get thrown elsewhere.
sys
.
exit
(
"Out of Memory!"
)
def
handle_error
(
self
):
t
,
v
=
sys
.
exc_info
()[:
2
]
if
t
is
SystemExit
:
raise
t
,
v
else
:
asynchat
.
async_chat
.
handle_error
(
self
)
def
log
(
self
,
*
args
):
pass
# --------------------------------------------------
# async_chat methods
# --------------------------------------------------
def
collect_incoming_data
(
self
,
data
):
if
self
.
current_request
:
# we are receiving data (probably POST data) for a request
self
.
current_request
.
collect_incoming_data
(
data
)
else
:
# we are receiving header (request) data
self
.
in_buffer
=
self
.
in_buffer
+
data
def
found_terminator
(
self
):
if
self
.
current_request
:
self
.
current_request
.
found_terminator
()
else
:
header
=
self
.
in_buffer
self
.
in_buffer
=
''
lines
=
string
.
split
(
header
,
'
\
r
\
n
'
)
# --------------------------------------------------
# crack the request header
# --------------------------------------------------
while
lines
and
not
lines
[
0
]:
# as per the suggestion of http-1.1 section 4.1, (and
# Eric Parker <eparker@zyvex.com>), ignore a leading
# blank lines (buggy browsers tack it onto the end of
# POST requests)
lines
=
lines
[
1
:]
if
not
lines
:
self
.
close_when_done
()
return
request
=
lines
[
0
]
command
,
uri
,
version
=
crack_request
(
request
)
header
=
join_headers
(
lines
[
1
:])
r
=
http_request
(
self
,
request
,
command
,
uri
,
version
,
header
)
self
.
request_counter
.
increment
()
self
.
server
.
total_requests
.
increment
()
if
command
is
None
:
self
.
log_info
(
'Bad HTTP request: %s'
%
request
,
'error'
)
r
.
error
(
400
)
return
# --------------------------------------------------
# handler selection and dispatch
# --------------------------------------------------
for
h
in
self
.
server
.
handlers
:
if
h
.
match
(
r
):
try
:
self
.
current_request
=
r
# This isn't used anywhere.
# r.handler = h # CYCLE
h
.
handle_request
(
r
)
except
:
self
.
server
.
exceptions
.
increment
()
(
file
,
fun
,
line
),
t
,
v
,
tbinfo
=
asyncore
.
compact_traceback
()
self
.
log_info
(
'Server Error: %s, %s: file: %s line: %s'
%
(
t
,
v
,
file
,
line
),
'error'
)
try
:
r
.
error
(
500
)
except
:
pass
return
# no handlers, so complain
r
.
error
(
404
)
def
writable
(
self
):
# this is just the normal async_chat 'writable', here for comparison
return
self
.
ac_out_buffer
or
len
(
self
.
producer_fifo
)
def
writable_for_proxy
(
self
):
# this version of writable supports the idea of a 'stalled' producer
# [i.e., it's not ready to produce any output yet] This is needed by
# the proxy, which will be waiting for the magic combination of
# 1) hostname resolved
# 2) connection made
# 3) data available.
if
self
.
ac_out_buffer
:
return
1
elif
len
(
self
.
producer_fifo
):
p
=
self
.
producer_fifo
.
first
()
if
hasattr
(
p
,
'stalled'
):
return
not
p
.
stalled
()
else
:
return
1
# ===========================================================================
# HTTP Server Object
# ===========================================================================
class
http_server
(
asyncore
.
dispatcher
):
SERVER_IDENT
=
'HTTP Server (V%s)'
%
VERSION_STRING
channel_class
=
http_channel
def
__init__
(
self
,
ip
,
port
,
resolver
=
None
,
logger_object
=
None
):
self
.
ip
=
ip
self
.
port
=
port
asyncore
.
dispatcher
.
__init__
(
self
)
self
.
create_socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
)
self
.
handlers
=
[]
if
not
logger_object
:
logger_object
=
logger
.
file_logger
(
sys
.
stdout
)
self
.
set_reuse_addr
()
self
.
bind
((
ip
,
port
))
# lower this to 5 if your OS complains
self
.
listen
(
1024
)
host
,
port
=
self
.
socket
.
getsockname
()
if
not
ip
:
self
.
log_info
(
'Computing default hostname'
,
'warning'
)
ip
=
socket
.
gethostbyname
(
socket
.
gethostname
())
try
:
self
.
server_name
=
socket
.
gethostbyaddr
(
ip
)[
0
]
except
socket
.
error
:
self
.
log_info
(
'Cannot do reverse lookup'
,
'warning'
)
self
.
server_name
=
ip
# use the IP address as the "hostname"
self
.
server_port
=
port
self
.
total_clients
=
counter
()
self
.
total_requests
=
counter
()
self
.
exceptions
=
counter
()
self
.
bytes_out
=
counter
()
self
.
bytes_in
=
counter
()
if
not
logger_object
:
logger_object
=
logger
.
file_logger
(
sys
.
stdout
)
if
resolver
:
self
.
logger
=
logger
.
resolving_logger
(
resolver
,
logger_object
)
else
:
self
.
logger
=
logger
.
unresolving_logger
(
logger_object
)
self
.
log_info
(
'Medusa (V%s) started at %s'
'
\
n
\
t
Hostname: %s'
'
\
n
\
t
Port:%d'
'
\
n
'
%
(
VERSION_STRING
,
time
.
ctime
(
time
.
time
()),
self
.
server_name
,
port
,
)
)
def
writable
(
self
):
return
0
def
handle_read
(
self
):
pass
def
readable
(
self
):
return
self
.
accepting
def
handle_connect
(
self
):
pass
def
handle_accept
(
self
):
self
.
total_clients
.
increment
()
try
:
conn
,
addr
=
self
.
accept
()
except
socket
.
error
:
# linux: on rare occasions we get a bogus socket back from
# accept. socketmodule.c:makesockaddr complains that the
# address family is unknown. We don't want the whole server
# to shut down because of this.
self
.
log_info
(
'warning: server accept() threw an exception'
,
'warning'
)
return
except
TypeError
:
# unpack non-sequence. this can happen when a read event
# fires on a listening socket, but when we call accept()
# we get EWOULDBLOCK, so dispatcher.accept() returns None.
# Seen on FreeBSD3.
self
.
log_info
(
'warning: server accept() threw EWOULDBLOCK'
,
'warning'
)
return
self
.
channel_class
(
self
,
conn
,
addr
)
def
install_handler
(
self
,
handler
,
back
=
0
):
if
back
:
self
.
handlers
.
append
(
handler
)
else
:
self
.
handlers
.
insert
(
0
,
handler
)
def
remove_handler
(
self
,
handler
):
self
.
handlers
.
remove
(
handler
)
def
status
(
self
):
def
nice_bytes
(
n
):
return
string
.
join
(
status_handler
.
english_bytes
(
n
))
handler_stats
=
filter
(
None
,
map
(
maybe_status
,
self
.
handlers
))
if
self
.
total_clients
:
ratio
=
self
.
total_requests
.
as_long
()
/
float
(
self
.
total_clients
.
as_long
())
else
:
ratio
=
0.0
return
producers
.
composite_producer
(
fifo
([
producers
.
lines_producer
(
[
'<h2>%s</h2>'
%
self
.
SERVER_IDENT
,
'<br>Listening on: <b>Host:</b> %s'
%
self
.
server_name
,
'<b>Port:</b> %d'
%
self
.
port
,
'<p><ul>'
'<li>Total <b>Clients:</b> %s'
%
self
.
total_clients
,
'<b>Requests:</b> %s'
%
self
.
total_requests
,
'<b>Requests/Client:</b> %.1f'
%
(
ratio
),
'<li>Total <b>Bytes In:</b> %s'
%
(
nice_bytes
(
self
.
bytes_in
.
as_long
())),
'<b>Bytes Out:</b> %s'
%
(
nice_bytes
(
self
.
bytes_out
.
as_long
())),
'<li>Total <b>Exceptions:</b> %s'
%
self
.
exceptions
,
'</ul><p>'
'<b>Extension List</b><ul>'
,
])]
+
handler_stats
+
[
producers
.
simple_producer
(
'</ul>'
)]
)
)
def
maybe_status
(
thing
):
if
hasattr
(
thing
,
'status'
):
return
thing
.
status
()
else
:
return
None
CONNECTION
=
regex
.
compile
(
'Connection:
\
(.*
\
)'
,
regex
.
casefold
)
# merge multi-line headers
# [486dx2: ~500/sec]
def
join_headers
(
headers
):
r
=
[]
for
i
in
range
(
len
(
headers
)):
if
headers
[
i
][
0
]
in
'
\
t
'
:
r
[
-
1
]
=
r
[
-
1
]
+
headers
[
i
][
1
:]
else
:
r
.
append
(
headers
[
i
])
return
r
def
get_header
(
head_reg
,
lines
,
group
=
1
):
for
line
in
lines
:
if
head_reg
.
match
(
line
)
==
len
(
line
):
return
head_reg
.
group
(
group
)
return
''
REQUEST
=
re
.
compile
(
'([^ ]+) (?:[^ :?#]+://[^ ?#/]*)?([^ ]+)(( HTTP/([0-9.]+))$|$)'
)
def
crack_request
(
r
):
m
=
REQUEST
.
match
(
r
)
if
m
is
not
None
:
return
string
.
lower
(
m
.
group
(
1
)),
m
.
group
(
2
),
m
.
group
(
5
)
else
:
return
None
,
None
,
None
class
fifo
:
def
__init__
(
self
,
list
=
None
):
if
not
list
:
self
.
list
=
[]
else
:
self
.
list
=
list
def
__len__
(
self
):
return
len
(
self
.
list
)
def
first
(
self
):
return
self
.
list
[
0
]
def
push_front
(
self
,
object
):
self
.
list
.
insert
(
0
,
object
)
def
push
(
self
,
data
):
self
.
list
.
append
(
data
)
def
pop
(
self
):
if
self
.
list
:
result
=
self
.
list
[
0
]
del
self
.
list
[
0
]
return
(
1
,
result
)
else
:
return
(
0
,
None
)
def
compute_timezone_for_log
():
if
time
.
daylight
:
tz
=
time
.
altzone
else
:
tz
=
time
.
timezone
if
tz
>
0
:
neg
=
1
else
:
neg
=
0
tz
=
-
tz
h
,
rem
=
divmod
(
tz
,
3600
)
m
,
rem
=
divmod
(
rem
,
60
)
if
neg
:
return
'-%02d%02d'
%
(
h
,
m
)
else
:
return
'+%02d%02d'
%
(
h
,
m
)
# if you run this program over a TZ change boundary, this will be invalid.
tz_for_log
=
compute_timezone_for_log
()
if
__name__
==
'__main__'
:
import
sys
if
len
(
sys
.
argv
)
<
2
:
print
'usage: %s <root> <port>'
%
(
sys
.
argv
[
0
])
else
:
import
monitor
import
filesys
import
default_handler
import
status_handler
import
ftp_server
import
chat_server
import
resolver
import
logger
rs
=
resolver
.
caching_resolver
(
'127.0.0.1'
)
lg
=
logger
.
file_logger
(
sys
.
stdout
)
ms
=
monitor
.
secure_monitor_server
(
'fnord'
,
'127.0.0.1'
,
9999
)
fs
=
filesys
.
os_filesystem
(
sys
.
argv
[
1
])
dh
=
default_handler
.
default_handler
(
fs
)
hs
=
http_server
(
''
,
string
.
atoi
(
sys
.
argv
[
2
]),
rs
,
lg
)
hs
.
install_handler
(
dh
)
ftp
=
ftp_server
.
ftp_server
(
ftp_server
.
dummy_authorizer
(
sys
.
argv
[
1
]),
port
=
8021
,
resolver
=
rs
,
logger_object
=
lg
)
cs
=
chat_server
.
chat_server
(
''
,
7777
)
sh
=
status_handler
.
status_extension
([
hs
,
ms
,
ftp
,
cs
,
rs
])
hs
.
install_handler
(
sh
)
if
(
'-p'
in
sys
.
argv
):
def
profile_loop
():
try
:
asyncore
.
loop
()
except
KeyboardInterrupt
:
pass
import
profile
profile
.
run
(
'profile_loop()'
,
'profile.out'
)
else
:
asyncore
.
loop
()
ZServer/medusa/logger.py
deleted
100644 → 0
View file @
6786b136
# -*- Mode: Python; tab-width: 4 -*-
import
asynchat
import
socket
import
string
import
time
# these three are for the rotating logger
import
os
# |
import
stat
# v
#
# three types of log:
# 1) file
# with optional flushing. Also, one that rotates the log.
# 2) socket
# dump output directly to a socket connection. [how do we
# keep it open?]
# 3) syslog
# log to syslog via tcp. this is a per-line protocol.
#
#
# The 'standard' interface to a logging object is simply
# log_object.log (message)
#
# a file-like object that captures output, and
# makes sure to flush it always... this could
# be connected to:
# o stdio file
# o low-level file
# o socket channel
# o syslog output...
class
file_logger
:
# pass this either a path or a file object.
def
__init__
(
self
,
file
,
flush
=
1
,
mode
=
'a'
):
if
type
(
file
)
==
type
(
''
):
if
(
file
==
'-'
):
import
sys
self
.
file
=
sys
.
stdout
else
:
self
.
file
=
open
(
file
,
mode
)
else
:
self
.
file
=
file
self
.
do_flush
=
flush
def
__repr__
(
self
):
return
'<file logger: %s>'
%
self
.
file
def
write
(
self
,
data
):
self
.
file
.
write
(
data
)
self
.
maybe_flush
()
def
writeline
(
self
,
line
):
self
.
file
.
writeline
(
line
)
self
.
maybe_flush
()
def
writelines
(
self
,
lines
):
self
.
file
.
writelines
(
lines
)
self
.
maybe_flush
()
def
maybe_flush
(
self
):
if
self
.
do_flush
:
self
.
file
.
flush
()
def
flush
(
self
):
self
.
file
.
flush
()
def
softspace
(
self
,
*
args
):
pass
def
log
(
self
,
message
):
if
message
[
-
1
]
not
in
(
'
\
r
'
,
'
\
n
'
):
self
.
write
(
message
+
'
\
n
'
)
else
:
self
.
write
(
message
)
# like a file_logger, but it must be attached to a filename.
# When the log gets too full, or a certain time has passed,
# it backs up the log and starts a new one. Note that backing
# up the log is done via "mv" because anything else (cp, gzip)
# would take time, during which medusa would do nothing else.
class
rotating_file_logger
(
file_logger
):
# If freq is non-None we back up "daily", "weekly", or "monthly".
# Else if maxsize is non-None we back up whenever the log gets
# to big. If both are None we never back up.
def
__init__
(
self
,
file
,
freq
=
None
,
maxsize
=
None
,
flush
=
1
,
mode
=
'a'
):
self
.
filename
=
file
self
.
mode
=
mode
self
.
file
=
open
(
file
,
mode
)
self
.
freq
=
freq
self
.
maxsize
=
maxsize
self
.
rotate_when
=
self
.
next_backup
(
self
.
freq
)
self
.
do_flush
=
flush
def
__repr__
(
self
):
return
'<rotating-file logger: %s>'
%
self
.
file
# We back up at midnight every 1) day, 2) monday, or 3) 1st of month
def
next_backup
(
self
,
freq
):
(
yr
,
mo
,
day
,
hr
,
min
,
sec
,
wd
,
jday
,
dst
)
=
time
.
localtime
(
time
.
time
())
if
freq
==
'daily'
:
return
time
.
mktime
(
yr
,
mo
,
day
+
1
,
0
,
0
,
0
,
0
,
0
,
-
1
)
elif
freq
==
'weekly'
:
return
time
.
mktime
(
yr
,
mo
,
day
-
wd
+
7
,
0
,
0
,
0
,
0
,
0
,
-
1
)
# wd(monday)==0
elif
freq
==
'monthly'
:
return
time
.
mktime
(
yr
,
mo
+
1
,
1
,
0
,
0
,
0
,
0
,
0
,
-
1
)
else
:
return
None
# not a date-based backup
def
maybe_flush
(
self
):
# rotate first if necessary
self
.
maybe_rotate
()
if
self
.
do_flush
:
# from file_logger()
self
.
file
.
flush
()
def
maybe_rotate
(
self
):
if
self
.
freq
and
time
.
time
()
>
self
.
rotate_when
:
self
.
rotate
()
self
.
rotate_when
=
self
.
next_backup
(
self
.
freq
)
elif
self
.
maxsize
:
# rotate when we get too big
try
:
if
os
.
stat
(
self
.
filename
)[
stat
.
ST_SIZE
]
>
self
.
maxsize
:
self
.
rotate
()
except
os
.
error
:
# file not found, probably
self
.
rotate
()
# will create a new file
def
rotate
(
self
):
(
yr
,
mo
,
day
,
hr
,
min
,
sec
,
wd
,
jday
,
dst
)
=
time
.
localtime
(
time
.
time
())
try
:
self
.
file
.
close
()
newname
=
'%s.ends%04d%02d%02d'
%
(
self
.
filename
,
yr
,
mo
,
day
)
try
:
open
(
newname
,
"r"
).
close
()
# check if file exists
newname
=
newname
+
"-%02d%02d%02d"
%
(
hr
,
min
,
sec
)
except
:
# YEARMODY is unique
pass
os
.
rename
(
self
.
filename
,
newname
)
self
.
file
=
open
(
self
.
filename
,
self
.
mode
)
except
:
pass
# syslog is a line-oriented log protocol - this class would be
# appropriate for FTP or HTTP logs, but not for dumping stderr to.
# TODO: a simple safety wrapper that will ensure that the line sent
# to syslog is reasonable.
# TODO: async version of syslog_client: now, log entries use blocking
# send()
import
m_syslog
syslog_logger
=
m_syslog
.
syslog_client
class
syslog_logger
(
m_syslog
.
syslog_client
):
svc_name
=
'medusa'
pid_str
=
str
(
os
.
getpid
())
def
__init__
(
self
,
address
,
facility
=
'user'
):
m_syslog
.
syslog_client
.
__init__
(
self
,
address
)
self
.
facility
=
m_syslog
.
facility_names
[
facility
]
self
.
address
=
address
def
__repr__
(
self
):
return
'<syslog logger address=%s>'
%
(
repr
(
self
.
address
))
def
log
(
self
,
message
):
m_syslog
.
syslog_client
.
log
(
self
,
'%s[%s]: %s'
%
(
self
.
svc_name
,
self
.
pid_str
,
message
),
facility
=
self
.
facility
,
priority
=
m_syslog
.
LOG_INFO
)
# log to a stream socket, asynchronously
class
socket_logger
(
asynchat
.
async_chat
):
def
__init__
(
self
,
address
):
if
type
(
address
)
==
type
(
''
):
self
.
create_socket
(
socket
.
AF_UNIX
,
socket
.
SOCK_STREAM
)
else
:
self
.
create_socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
)
self
.
connect
(
address
)
self
.
address
=
address
def
__repr__
(
self
):
return
'<socket logger: address=%s>'
%
(
self
.
address
)
def
log
(
self
,
message
):
if
message
[
-
2
:]
!=
'
\
r
\
n
'
:
self
.
socket
.
push
(
message
+
'
\
r
\
n
'
)
else
:
self
.
socket
.
push
(
message
)
# log to multiple places
class
multi_logger
:
def
__init__
(
self
,
loggers
):
self
.
loggers
=
loggers
def
__repr__
(
self
):
return
'<multi logger: %s>'
%
(
repr
(
self
.
loggers
))
def
log
(
self
,
message
):
for
logger
in
self
.
loggers
:
logger
.
log
(
message
)
class
resolving_logger
:
"""Feed (ip, message) combinations into this logger to get a
resolved hostname in front of the message. The message will not
be logged until the PTR request finishes (or fails)."""
def
__init__
(
self
,
resolver
,
logger
):
self
.
resolver
=
resolver
self
.
logger
=
logger
class
logger_thunk
:
def
__init__
(
self
,
message
,
logger
):
self
.
message
=
message
self
.
logger
=
logger
def
__call__
(
self
,
host
,
ttl
,
answer
):
if
not
answer
:
answer
=
host
self
.
logger
.
log
(
'%s:%s'
%
(
answer
,
self
.
message
))
def
log
(
self
,
ip
,
message
):
self
.
resolver
.
resolve_ptr
(
ip
,
self
.
logger_thunk
(
message
,
self
.
logger
)
)
class
unresolving_logger
:
"Just in case you don't want to resolve"
def
__init__
(
self
,
logger
):
self
.
logger
=
logger
def
log
(
self
,
ip
,
message
):
self
.
logger
.
log
(
'%s:%s'
%
(
ip
,
message
))
def
strip_eol
(
line
):
while
line
and
line
[
-
1
]
in
'
\
r
\
n
'
:
line
=
line
[:
-
1
]
return
line
class
tail_logger
:
"Keep track of the last <size> log messages"
def
__init__
(
self
,
logger
,
size
=
500
):
self
.
size
=
size
self
.
logger
=
logger
self
.
messages
=
[]
def
log
(
self
,
message
):
self
.
messages
.
append
(
strip_eol
(
message
))
if
len
(
self
.
messages
)
>
self
.
size
:
del
self
.
messages
[
0
]
self
.
logger
.
log
(
message
)
ZServer/medusa/m_syslog.py
deleted
100644 → 0
View file @
6786b136
# -*- Mode: Python; tab-width: 4 -*-
# ======================================================================
# Copyright 1997 by Sam Rushing
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of Sam
# Rushing not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# ======================================================================
"""socket interface to unix syslog.
On Unix, there are usually two ways of getting to syslog: via a
local unix-domain socket, or via the TCP service.
Usually "/dev/log" is the unix domain socket. This may be different
for other systems.
>>> my_client = syslog_client ('/dev/log')
Otherwise, just use the UDP version, port 514.
>>> my_client = syslog_client (('my_log_host', 514))
On win32, you will have to use the UDP version. Note that
you can use this to log to other hosts (and indeed, multiple
hosts).
This module is not a drop-in replacement for the python
<syslog> extension module - the interface is different.
Usage:
>>> c = syslog_client()
>>> c = syslog_client ('/strange/non_standard_log_location')
>>> c = syslog_client (('other_host.com', 514))
>>> c.log ('testing', facility='local0', priority='debug')
"""
# TODO: support named-pipe syslog.
# [see ftp://sunsite.unc.edu/pub/Linux/system/Daemons/syslog-fifo.tar.z]
# from <linux/sys/syslog.h>:
# ===========================================================================
# priorities/facilities are encoded into a single 32-bit quantity, where the
# bottom 3 bits are the priority (0-7) and the top 28 bits are the facility
# (0-big number). Both the priorities and the facilities map roughly
# one-to-one to strings in the syslogd(8) source code. This mapping is
# included in this file.
#
# priorities (these are ordered)
LOG_EMERG
=
0
# system is unusable
LOG_ALERT
=
1
# action must be taken immediately
LOG_CRIT
=
2
# critical conditions
LOG_ERR
=
3
# error conditions
LOG_WARNING
=
4
# warning conditions
LOG_NOTICE
=
5
# normal but significant condition
LOG_INFO
=
6
# informational
LOG_DEBUG
=
7
# debug-level messages
# facility codes
LOG_KERN
=
0
# kernel messages
LOG_USER
=
1
# random user-level messages
LOG_MAIL
=
2
# mail system
LOG_DAEMON
=
3
# system daemons
LOG_AUTH
=
4
# security/authorization messages
LOG_SYSLOG
=
5
# messages generated internally by syslogd
LOG_LPR
=
6
# line printer subsystem
LOG_NEWS
=
7
# network news subsystem
LOG_UUCP
=
8
# UUCP subsystem
LOG_CRON
=
9
# clock daemon
LOG_AUTHPRIV
=
10
# security/authorization messages (private)
# other codes through 15 reserved for system use
LOG_LOCAL0
=
16
# reserved for local use
LOG_LOCAL1
=
17
# reserved for local use
LOG_LOCAL2
=
18
# reserved for local use
LOG_LOCAL3
=
19
# reserved for local use
LOG_LOCAL4
=
20
# reserved for local use
LOG_LOCAL5
=
21
# reserved for local use
LOG_LOCAL6
=
22
# reserved for local use
LOG_LOCAL7
=
23
# reserved for local use
priority_names
=
{
"alert"
:
LOG_ALERT
,
"crit"
:
LOG_CRIT
,
"debug"
:
LOG_DEBUG
,
"emerg"
:
LOG_EMERG
,
"err"
:
LOG_ERR
,
"error"
:
LOG_ERR
,
# DEPRECATED
"info"
:
LOG_INFO
,
"notice"
:
LOG_NOTICE
,
"panic"
:
LOG_EMERG
,
# DEPRECATED
"warn"
:
LOG_WARNING
,
# DEPRECATED
"warning"
:
LOG_WARNING
,
}
facility_names
=
{
"auth"
:
LOG_AUTH
,
"authpriv"
:
LOG_AUTHPRIV
,
"cron"
:
LOG_CRON
,
"daemon"
:
LOG_DAEMON
,
"kern"
:
LOG_KERN
,
"lpr"
:
LOG_LPR
,
"mail"
:
LOG_MAIL
,
"news"
:
LOG_NEWS
,
"security"
:
LOG_AUTH
,
# DEPRECATED
"syslog"
:
LOG_SYSLOG
,
"user"
:
LOG_USER
,
"uucp"
:
LOG_UUCP
,
"local0"
:
LOG_LOCAL0
,
"local1"
:
LOG_LOCAL1
,
"local2"
:
LOG_LOCAL2
,
"local3"
:
LOG_LOCAL3
,
"local4"
:
LOG_LOCAL4
,
"local5"
:
LOG_LOCAL5
,
"local6"
:
LOG_LOCAL6
,
"local7"
:
LOG_LOCAL7
,
}
import
socket
class
syslog_client
:
def
__init__
(
self
,
address
=
'/dev/log'
):
self
.
address
=
address
if
type
(
address
)
==
type
(
''
):
try
:
# APUE 13.4.2 specifes /dev/log as datagram socket
self
.
socket
=
socket
.
socket
(
socket
.
AF_UNIX
,
socket
.
SOCK_DGRAM
)
self
.
socket
.
connect
(
address
)
except
:
# older linux may create as stream socket
self
.
socket
=
socket
.
socket
(
socket
.
AF_UNIX
,
socket
.
SOCK_STREAM
)
self
.
socket
.
connect
(
address
)
self
.
unix
=
1
else
:
self
.
socket
=
socket
.
socket
(
socket
.
AF_INET
,
socket
.
SOCK_DGRAM
)
self
.
unix
=
0
# curious: when talking to the unix-domain '/dev/log' socket, a
# zero-terminator seems to be required. this string is placed
# into a class variable so that it can be overridden if
# necessary.
log_format_string
=
'<%d>%s
\
000
'
def
log
(
self
,
message
,
facility
=
LOG_USER
,
priority
=
LOG_INFO
):
message
=
self
.
log_format_string
%
(
self
.
encode_priority
(
facility
,
priority
),
message
)
if
self
.
unix
:
self
.
socket
.
send
(
message
)
else
:
self
.
socket
.
sendto
(
message
,
self
.
address
)
def
encode_priority
(
self
,
facility
,
priority
):
if
type
(
facility
)
==
type
(
''
):
facility
=
facility_names
[
facility
]
if
type
(
priority
)
==
type
(
''
):
priority
=
priority_names
[
priority
]
return
(
facility
<<
3
)
|
priority
def
close
(
self
):
if
self
.
unix
:
self
.
socket
.
close
()
if
__name__
==
'__main__'
:
"""
Unit test for syslog_client. Set up for the test by:
* tail -f /var/log/allstuf (to see the "normal" log messages).
* Running the test_logger.py script with a junk file name (which
will be opened as a Unix-domain socket). "Custom" log messages
will go here.
* Run this script, passing the same junk file name.
* Check that the "bogus" test throws, and that none of the rest do.
* Check that the 'default' and 'UDP' messages show up in the tail.
* Check that the 'non-std' message shows up in the test_logger
console.
* Finally, kill off the tail and test_logger, and clean up the
socket file.
"""
import
sys
,
traceback
if
len
(
sys
.
argv
)
!=
2
:
print
"Usage: syslog.py localSocketFilename"
sys
.
exit
()
def
test_client
(
desc
,
address
=
None
):
try
:
if
address
:
client
=
syslog_client
(
address
)
else
:
client
=
syslog_client
()
except
:
print
'syslog_client() [%s] ctor threw'
%
desc
traceback
.
print_exc
()
return
try
:
client
.
log
(
'testing syslog_client() [%s]'
%
desc
,
facility
=
'local0'
,
priority
=
'debug'
)
print
'syslog_client.log() [%s] did not throw'
%
desc
except
:
print
'syslog_client.log() [%s] threw'
%
desc
traceback
.
print_exc
()
test_client
(
'default'
)
test_client
(
'bogus file'
,
'/some/bogus/logsocket'
)
test_client
(
'nonstd file'
,
sys
.
argv
[
1
]
)
test_client
(
'UDP'
,
(
'localhost'
,
514
)
)
ZServer/medusa/max_sockets.py
deleted
100644 → 0
View file @
6786b136
import
socket
import
select
# several factors here we might want to test:
# 1) max we can create
# 2) max we can bind
# 3) max we can listen on
# 4) max we can connect
def
max_server_sockets
():
sl
=
[]
while
1
:
try
:
s
=
socket
.
socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
)
s
.
bind
((
''
,
0
))
s
.
listen
(
5
)
sl
.
append
(
s
)
except
:
break
num
=
len
(
sl
)
for
s
in
sl
:
s
.
close
()
del
sl
return
num
def
max_client_sockets
():
# make a server socket
server
=
socket
.
socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
)
server
.
bind
((
''
,
9999
))
server
.
listen
(
5
)
sl
=
[]
while
1
:
try
:
s
=
socket
.
socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
)
s
.
connect
((
''
,
9999
))
conn
,
addr
=
server
.
accept
()
sl
.
append
((
s
,
conn
))
except
:
break
num
=
len
(
sl
)
for
s
,
c
in
sl
:
s
.
close
()
c
.
close
()
del
sl
return
num
def
max_select_sockets
():
sl
=
[]
while
1
:
try
:
num
=
len
(
sl
)
for
i
in
range
(
1
+
len
(
sl
)
*
0.05
):
# Increase exponentially.
s
=
socket
.
socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
)
s
.
bind
((
''
,
0
))
s
.
listen
(
5
)
sl
.
append
(
s
)
select
.
select
(
sl
,[],[],
0
)
except
:
break
for
s
in
sl
:
s
.
close
()
del
sl
return
num
ZServer/medusa/medusa_gif.py
deleted
100644 → 0
View file @
6786b136
# -*- Mode: Python -*-
# the medusa icon as a python source file.
width
=
97
height
=
61
data
=
'GIF89aa
\
000
=
\
000
\
204
\
000
\
000
\
000
\
000
\
000
\
255
\
255
\
255
\
245
\
245
\
245
ssskkkccc111)))
\
326
\
326
\
326
!!!
\
316
\
316
\
316
\
300
\
300
\
300
\
204
\
204
\
000
\
224
\
224
\
224
\
214
\
214
\
214
\
200
\
200
\
200
RRR
\
377
\
377
\
377
JJJ
\
367
\
367
\
367
BBB
\
347
\
347
\
347
\
000
\
204
\
000
\
020
\
020
\
020
\
265
\
265
\
265
\
000
\
000
\
000
\
000
\
000
\
000
\
000
\
000
\
000
\
000
\
000
\
000
\
000
\
000
\
000
\
000
\
000
\
000
\
000
\
000
\
000
!
\
371
\
004
\
001
\
000
\
000
\
021
\
000
,
\
000
\
000
\
000
\
000
a
\
000
=
\
000
\
000
\
005
\
376
`$
\
216
di
\
236
h
\
252
\
256
l
\
353
\
276
p,
\
317
tm
\
337
x
\
256
\
357
|m
\
001
@
\
240
E
\
305
\
000
\
364
\
216
4
\
206
R)$
\
005
\
201
\
214
\
007
r
\
012
{X
\
255
\
312
a
\
004
\
260
\
\
>
\
026
\
324
0
\
353
)
\
224
n
\
001
W+X
\
334
\
373
\
231
~
\
344
.
\
303
b
\
216
\
024
\
027
x<
\
273
\
307
\
255
G,rJiWN
\
014
{S}k"?ti
\
013
EdPQ
\
207
G@_%
\
000
\
026
yy
\
\
\
201
\
202
\
227
\
224
<
\
221
Fs$pOjWz
\
241
<r@vO
\
236
\
231
\
233
k
\
247
M
\
254
4
\
203
F
\
177
\
235
\
236
L#
\
247
\
256
Z
\
270
,
\
266
BxJ[
\
276
\
256
A]iE
\
304
\
305
\
262
\
273
E
\
313
\
201
\
275
i#
\
\
\
303
\
321
\
'
h
\
203
V
\
\
\
177
\
326
\
276
\
216
\
220
P~
\
335
\
230
_
\
264
\
013
\
342
\
275
\
344
KF
\
233
\
360
Q
\
212
\
352
\
246
\
000
\
367
\
274
s
\
361
\
236
\
334
\
347
T
\
341
;
\
341
\
246
\
220
2
\
177
\
314
2
\
211
`
\
242
o
\
325
@S
\
202
\
264
\
031
\
252
\
207
\
260
\
323
\
256
\
205
\
311
\
036
\
236
\
270
\
002
\
'
\
013
\
302
\
177
\
274
H
\
010
\
324
X
\
002
\
017
6
\
212
\
037
\
376
\
321
\
360
\
032
\
226
\
207
\
244
\
267
4(+^
\
202
\
346
r
\
205
J
\
021
1
\
375
\
241
Y#
\
256
f
\
012
7
\
315
>
\
272
\
002
\
325
\
307
g
\
012
(
\
007
\
205
\
312
#j
\
317
(
\
012
A
\
200
\
224
.
\
241
\
003
\
346
GS
\
247
\
033
\
245
\
344
\
264
\
366
\
015
L
\
'
PXQl]
\
266
\
263
\
243
\
232
\
260
?
\
245
\
316
\
371
\
362
\
225
\
035
\
332
\
243
J
\
273
\
332
Q
\
263
\
357
-D
\
241
T
\
327
\
270
\
265
\
013
W&
\
330
\
010
u
\
371
b
\
322
IW0
\
214
\
261
]
\
003
\
033
Va
\
365
Z#
\
207
\
213
a
\
030
k
\
264
7
\
262
\
014
p
\
354
\
024
[n
\
321
N
\
363
\
346
\
317
\
003
\
037
P
\
000
\
235
C
\
302
\
000
\
322
8(
\
244
\
363
YaA
\
005
\
022
\
255
_
\
237
@
\
260
\
000
A
\
212
\
326
\
256
qbp
\
321
\
332
\
266
\
011
\
334
=T
\
023
\
010
"!B
\
005
\
003
A
\
010
\
224
\
020
\
220
H
\
002
\
337
#
\
020
O
\
276
E
\
357
h
\
221
\
327
\
003
\
\
\
000
b@v
\
004
\
351
A.h
\
365
\
354
\
342
B
\
002
\
011
\
257
\
025
\
\
\
220
\
340
\
301
\
353
\
006
\
000
\
024
\
214
\
200
pA
\
300
\
353
\
012
\
364
\
241
k/
\
340
\
033
C
\
202
\
003
\
000
\
310
fZ
\
011
\
003
V
\
240
R
\
005
\
007
\
354
\
376
\
026
A
\
000
\
000
\
360
\
'
\
202
\
177
\
024
\
004
\
210
\
003
\
000
\
305
\
215
\
360
\
000
\
000
\
015
\
220
\
240
\
332
\
203
\
027
@
\
'
\
202
\
004
\
025
VpA
\
000
%
\
210
x
\
321
\
206
\
032
J
\
341
\
316
\
010
\
262
\
211
H"l
\
333
\
341
\
200
\
200
>"]P
\
002
\
212
\
011
\
010
`
\
002
\
006
6FP
\
200
\
001
\
'
\
024
p]
\
004
\
027
(8B
\
221
\
306
]
\
000
\
201
w>
\
002
iB
\
001
\
007
\
340
\
260
"v7J1
\
343
(
\
257
\
020
\
251
\
243
\
011
\
242
i
\
263
\
017
\
215
\
337
\
035
\
220
\
200
\
221
\
365
m4d
\
015
\
016
D
\
251
\
341
iN
\
354
\
346
Ng
\
253
\
200
I
\
240
\
031
\
356
09
\
245
\
205
7
\
311
I
\
302
\
200
7t
\
231
"&`
\
314
\
310
\
244
\
011
e
\
226
(
\
236
\
010
w
\
212
\
300
\
234
\
011
\
012
HX(
\
214
\
253
\
311
@
\
001
\
233
^
\
222
pg{%
\
340
\
035
\
224
&H
\
000
\
246
\
201
\
362
\
215
`@
\
001
"L
\
340
\
004
\
030
\
234
\
022
\
250
\
'
\
015
(V:
\
302
\
235
\
030
\
240
q
\
337
\
205
\
224
\
212
h@
\
177
\
006
\
000
\
250
\
210
\
004
\
007
\
310
\
207
\
337
\
005
\
257
-P
\
346
\
257
\
367
]p
\
353
\
203
\
271
\
256
:
\
203
\
236
\
211
F
\
340
\
247
\
010
\
332
9g
\
244
\
010
\
307
*=A
\
000
\
203
\
260
y
\
012
\
304
s#
\
014
\
007
D
\
207
,N
\
007
\
304
\
265
\
027
\
021
C
\
233
\
207
%B
\
366
[m
\
353
\
006
\
006
\
034
j
\
360
\
306
+
\
357
\
274
a
\
204
\
000
\
000
;'
ZServer/medusa/mime_type_table.py
deleted
100644 → 0
View file @
6786b136
# -*- Python -*-
# Converted by ./convert_mime_type_table.py from:
# /usr/src2/apache_1.2b6/conf/mime.types
#
content_type_map
=
\
{
'ai'
:
'application/postscript'
,
'aif'
:
'audio/x-aiff'
,
'aifc'
:
'audio/x-aiff'
,
'aiff'
:
'audio/x-aiff'
,
'au'
:
'audio/basic'
,
'avi'
:
'video/x-msvideo'
,
'bcpio'
:
'application/x-bcpio'
,
'bin'
:
'application/octet-stream'
,
'cdf'
:
'application/x-netcdf'
,
'class'
:
'application/octet-stream'
,
'cpio'
:
'application/x-cpio'
,
'cpt'
:
'application/mac-compactpro'
,
'csh'
:
'application/x-csh'
,
'dcr'
:
'application/x-director'
,
'dir'
:
'application/x-director'
,
'dms'
:
'application/octet-stream'
,
'doc'
:
'application/msword'
,
'dvi'
:
'application/x-dvi'
,
'dxr'
:
'application/x-director'
,
'eps'
:
'application/postscript'
,
'etx'
:
'text/x-setext'
,
'exe'
:
'application/octet-stream'
,
'gif'
:
'image/gif'
,
'gtar'
:
'application/x-gtar'
,
'gz'
:
'application/x-gzip'
,
'hdf'
:
'application/x-hdf'
,
'hqx'
:
'application/mac-binhex40'
,
'htm'
:
'text/html'
,
'html'
:
'text/html'
,
'ice'
:
'x-conference/x-cooltalk'
,
'ief'
:
'image/ief'
,
'jpe'
:
'image/jpeg'
,
'jpeg'
:
'image/jpeg'
,
'jpg'
:
'image/jpeg'
,
'kar'
:
'audio/midi'
,
'latex'
:
'application/x-latex'
,
'lha'
:
'application/octet-stream'
,
'lzh'
:
'application/octet-stream'
,
'man'
:
'application/x-troff-man'
,
'me'
:
'application/x-troff-me'
,
'mid'
:
'audio/midi'
,
'midi'
:
'audio/midi'
,
'mif'
:
'application/x-mif'
,
'mov'
:
'video/quicktime'
,
'movie'
:
'video/x-sgi-movie'
,
'mp2'
:
'audio/mpeg'
,
'mpe'
:
'video/mpeg'
,
'mpeg'
:
'video/mpeg'
,
'mpg'
:
'video/mpeg'
,
'mpga'
:
'audio/mpeg'
,
'mp3'
:
'audio/mpeg'
,
'ms'
:
'application/x-troff-ms'
,
'nc'
:
'application/x-netcdf'
,
'oda'
:
'application/oda'
,
'pbm'
:
'image/x-portable-bitmap'
,
'pdb'
:
'chemical/x-pdb'
,
'pdf'
:
'application/pdf'
,
'pgm'
:
'image/x-portable-graymap'
,
'png'
:
'image/png'
,
'pnm'
:
'image/x-portable-anymap'
,
'ppm'
:
'image/x-portable-pixmap'
,
'ppt'
:
'application/powerpoint'
,
'ps'
:
'application/postscript'
,
'qt'
:
'video/quicktime'
,
'ra'
:
'audio/x-realaudio'
,
'ram'
:
'audio/x-pn-realaudio'
,
'ras'
:
'image/x-cmu-raster'
,
'rgb'
:
'image/x-rgb'
,
'roff'
:
'application/x-troff'
,
'rpm'
:
'audio/x-pn-realaudio-plugin'
,
'rtf'
:
'application/rtf'
,
'rtx'
:
'text/richtext'
,
'sgm'
:
'text/x-sgml'
,
'sgml'
:
'text/x-sgml'
,
'sh'
:
'application/x-sh'
,
'shar'
:
'application/x-shar'
,
'sit'
:
'application/x-stuffit'
,
'skd'
:
'application/x-koan'
,
'skm'
:
'application/x-koan'
,
'skp'
:
'application/x-koan'
,
'skt'
:
'application/x-koan'
,
'snd'
:
'audio/basic'
,
'src'
:
'application/x-wais-source'
,
'sv4cpio'
:
'application/x-sv4cpio'
,
'sv4crc'
:
'application/x-sv4crc'
,
't'
:
'application/x-troff'
,
'tar'
:
'application/x-tar'
,
'tcl'
:
'application/x-tcl'
,
'tex'
:
'application/x-tex'
,
'texi'
:
'application/x-texinfo'
,
'texinfo'
:
'application/x-texinfo'
,
'tif'
:
'image/tiff'
,
'tiff'
:
'image/tiff'
,
'tr'
:
'application/x-troff'
,
'tsv'
:
'text/tab-separated-values'
,
'txt'
:
'text/plain'
,
'ustar'
:
'application/x-ustar'
,
'vcd'
:
'application/x-cdlink'
,
'vrml'
:
'x-world/x-vrml'
,
'wav'
:
'audio/x-wav'
,
'wrl'
:
'x-world/x-vrml'
,
'xbm'
:
'image/x-xbitmap'
,
'xpm'
:
'image/x-xpixmap'
,
'xwd'
:
'image/x-xwindowdump'
,
'xyz'
:
'chemical/x-pdb'
,
'zip'
:
'application/zip'
,
}
ZServer/medusa/monitor.py
deleted
100644 → 0
View file @
6786b136
# -*- Mode: Python; tab-width: 4 -*-
# Author: Sam Rushing <rushing@nightmare.com>
#
# python REPL channel.
#
RCS_ID
=
'$Id: monitor.py,v 1.9 2000/07/21 18:59:08 shane Exp $'
import
md5
import
socket
import
string
import
sys
import
time
import
traceback
VERSION
=
string
.
split
(
RCS_ID
)[
2
]
import
asyncore
import
asynchat
from
counter
import
counter
import
producers
class
monitor_channel
(
asynchat
.
async_chat
):
try_linemode
=
1
def
__init__
(
self
,
server
,
sock
,
addr
):
asynchat
.
async_chat
.
__init__
(
self
,
sock
)
self
.
server
=
server
self
.
addr
=
addr
self
.
set_terminator
(
'
\
r
\
n
'
)
self
.
data
=
''
# local bindings specific to this channel
self
.
local_env
=
sys
.
modules
[
'__main__'
].
__dict__
.
copy
()
self
.
push
(
'Python '
+
sys
.
version
+
'
\
r
\
n
'
)
self
.
push
(
sys
.
copyright
+
'
\
r
\
n
'
)
self
.
push
(
'Welcome to %s
\
r
\
n
'
%
self
)
self
.
push
(
"[Hint: try 'from __main__ import *']
\
r
\
n
"
)
self
.
prompt
()
self
.
number
=
server
.
total_sessions
.
as_long
()
self
.
line_counter
=
counter
()
self
.
multi_line
=
[]
def
handle_connect
(
self
):
# send IAC DO LINEMODE
self
.
push
(
'
\
377
\
375
\
"
'
)
def
close
(
self
):
self
.
server
.
closed_sessions
.
increment
()
asynchat
.
async_chat
.
close
(
self
)
def
prompt
(
self
):
self
.
push
(
'>>> '
)
def
collect_incoming_data
(
self
,
data
):
self
.
data
=
self
.
data
+
data
if
len
(
self
.
data
)
>
1024
:
# denial of service.
self
.
push
(
'BCNU
\
r
\
n
'
)
self
.
close_when_done
()
def
found_terminator
(
self
):
line
=
self
.
clean_line
(
self
.
data
)
self
.
data
=
''
self
.
line_counter
.
increment
()
# check for special case inputs...
if
not
line
and
not
self
.
multi_line
:
self
.
prompt
()
return
if
line
in
[
'
\
004
'
,
'exit'
]:
self
.
push
(
'BCNU
\
r
\
n
'
)
self
.
close_when_done
()
return
oldout
=
sys
.
stdout
olderr
=
sys
.
stderr
try
:
p
=
output_producer
(
self
,
olderr
)
sys
.
stdout
=
p
sys
.
stderr
=
p
try
:
# this is, of course, a blocking operation.
# if you wanted to thread this, you would have
# to synchronize, etc... and treat the output
# like a pipe. Not Fun.
#
# try eval first. If that fails, try exec. If that fails,
# hurl.
try
:
if
self
.
multi_line
:
# oh, this is horrible...
raise
SyntaxError
co
=
compile
(
line
,
repr
(
self
),
'eval'
)
result
=
eval
(
co
,
self
.
local_env
)
method
=
'eval'
if
result
is
not
None
:
print
repr
(
result
)
self
.
local_env
[
'_'
]
=
result
except
SyntaxError
:
try
:
if
self
.
multi_line
:
if
line
and
line
[
0
]
in
[
' '
,
'
\
t
'
]:
self
.
multi_line
.
append
(
line
)
self
.
push
(
'... '
)
return
else
:
self
.
multi_line
.
append
(
line
)
line
=
string
.
join
(
self
.
multi_line
,
'
\
n
'
)
co
=
compile
(
line
,
repr
(
self
),
'exec'
)
self
.
multi_line
=
[]
else
:
co
=
compile
(
line
,
repr
(
self
),
'exec'
)
except
SyntaxError
,
why
:
if
why
[
0
]
==
'unexpected EOF while parsing'
:
self
.
push
(
'... '
)
self
.
multi_line
.
append
(
line
)
return
exec
co
in
self
.
local_env
method
=
'exec'
except
:
method
=
'exception'
self
.
multi_line
=
[]
(
file
,
fun
,
line
),
t
,
v
,
tbinfo
=
asyncore
.
compact_traceback
()
self
.
log_info
(
'%s %s %s'
%
(
t
,
v
,
tbinfo
),
'warning'
)
traceback
.
print_exc
()
tbinfo
=
None
finally
:
sys
.
stdout
=
oldout
sys
.
stderr
=
olderr
self
.
log_info
(
'%s:%s (%s)> %s'
%
(
self
.
number
,
self
.
line_counter
,
method
,
repr
(
line
))
)
self
.
push_with_producer
(
p
)
self
.
prompt
()
# for now, we ignore any telnet option stuff sent to
# us, and we process the backspace key ourselves.
# gee, it would be fun to write a full-blown line-editing
# environment, etc...
def
clean_line
(
self
,
line
):
chars
=
[]
for
ch
in
line
:
oc
=
ord
(
ch
)
if
oc
<
127
:
if
oc
in
[
8
,
177
]:
# backspace
chars
=
chars
[:
-
1
]
else
:
chars
.
append
(
ch
)
return
string
.
join
(
chars
,
''
)
class
monitor_server
(
asyncore
.
dispatcher
):
SERVER_IDENT
=
'Monitor Server (V%s)'
%
VERSION
channel_class
=
monitor_channel
def
__init__
(
self
,
hostname
=
'127.0.0.1'
,
port
=
8023
):
self
.
hostname
=
hostname
self
.
port
=
port
self
.
create_socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
)
self
.
set_reuse_addr
()
self
.
bind
((
hostname
,
port
))
self
.
log_info
(
'%s started on port %d'
%
(
self
.
SERVER_IDENT
,
port
))
self
.
listen
(
5
)
self
.
closed
=
0
self
.
failed_auths
=
0
self
.
total_sessions
=
counter
()
self
.
closed_sessions
=
counter
()
def
writable
(
self
):
return
0
def
handle_accept
(
self
):
conn
,
addr
=
self
.
accept
()
self
.
log_info
(
'Incoming monitor connection from %s:%d'
%
addr
)
self
.
channel_class
(
self
,
conn
,
addr
)
self
.
total_sessions
.
increment
()
def
status
(
self
):
return
producers
.
simple_producer
(
'<h2>%s</h2>'
%
self
.
SERVER_IDENT
+
'<br><b>Total Sessions:</b> %s'
%
self
.
total_sessions
+
'<br><b>Current Sessions:</b> %d'
%
(
self
.
total_sessions
.
as_long
()
-
self
.
closed_sessions
.
as_long
()
)
)
def
hex_digest
(
s
):
m
=
md5
.
md5
()
m
.
update
(
s
)
return
string
.
joinfields
(
map
(
lambda
x
:
hex
(
ord
(
x
))[
2
:],
map
(
None
,
m
.
digest
())),
''
,
)
class
secure_monitor_channel
(
monitor_channel
):
authorized
=
0
def
__init__
(
self
,
server
,
sock
,
addr
):
asynchat
.
async_chat
.
__init__
(
self
,
sock
)
self
.
server
=
server
self
.
addr
=
addr
self
.
set_terminator
(
'
\
r
\
n
'
)
self
.
data
=
''
# local bindings specific to this channel
self
.
local_env
=
{}
# send timestamp string
self
.
timestamp
=
str
(
time
.
time
())
self
.
count
=
0
self
.
line_counter
=
counter
()
self
.
number
=
int
(
server
.
total_sessions
.
as_long
())
self
.
multi_line
=
[]
self
.
push
(
self
.
timestamp
+
'
\
r
\
n
'
)
def
found_terminator
(
self
):
if
not
self
.
authorized
:
if
hex_digest
(
'%s%s'
%
(
self
.
timestamp
,
self
.
server
.
password
))
!=
self
.
data
:
self
.
log_info
(
'%s: failed authorization'
%
self
,
'warning'
)
self
.
server
.
failed_auths
=
self
.
server
.
failed_auths
+
1
self
.
close
()
else
:
self
.
authorized
=
1
self
.
push
(
'Python '
+
sys
.
version
+
'
\
r
\
n
'
)
self
.
push
(
sys
.
copyright
+
'
\
r
\
n
'
)
self
.
push
(
'Welcome to %s
\
r
\
n
'
%
self
)
self
.
prompt
()
self
.
data
=
''
else
:
monitor_channel
.
found_terminator
(
self
)
class
secure_encrypted_monitor_channel
(
secure_monitor_channel
):
"Wrap send() and recv() with a stream cipher"
def
__init__
(
self
,
server
,
conn
,
addr
):
key
=
server
.
password
self
.
outgoing
=
server
.
cipher
.
new
(
key
)
self
.
incoming
=
server
.
cipher
.
new
(
key
)
secure_monitor_channel
.
__init__
(
self
,
server
,
conn
,
addr
)
def
send
(
self
,
data
):
# send the encrypted data instead
ed
=
self
.
outgoing
.
encrypt
(
data
)
return
secure_monitor_channel
.
send
(
self
,
ed
)
def
recv
(
self
,
block_size
):
data
=
secure_monitor_channel
.
recv
(
self
,
block_size
)
if
data
:
dd
=
self
.
incoming
.
decrypt
(
data
)
return
dd
else
:
return
data
class
secure_monitor_server
(
monitor_server
):
channel_class
=
secure_monitor_channel
def
__init__
(
self
,
password
,
hostname
=
''
,
port
=
8023
):
monitor_server
.
__init__
(
self
,
hostname
,
port
)
self
.
password
=
password
def
status
(
self
):
p
=
monitor_server
.
status
(
self
)
# kludge
p
.
data
=
p
.
data
+
(
'<br><b>Failed Authorizations:</b> %d'
%
self
.
failed_auths
)
return
p
# don't try to print from within any of the methods
# of this object. 8^)
class
output_producer
:
def
__init__
(
self
,
channel
,
real_stderr
):
self
.
channel
=
channel
self
.
data
=
''
# use _this_ for debug output
self
.
stderr
=
real_stderr
def
check_data
(
self
):
if
len
(
self
.
data
)
>
1
<<
16
:
# runaway output, close it.
self
.
channel
.
close
()
def
write
(
self
,
data
):
lines
=
string
.
splitfields
(
data
,
'
\
n
'
)
data
=
string
.
join
(
lines
,
'
\
r
\
n
'
)
self
.
data
=
self
.
data
+
data
self
.
check_data
()
def
writeline
(
self
,
line
):
self
.
data
=
self
.
data
+
line
+
'
\
r
\
n
'
self
.
check_data
()
def
writelines
(
self
,
lines
):
self
.
data
=
self
.
data
+
string
.
joinfields
(
lines
,
'
\
r
\
n
'
)
+
'
\
r
\
n
'
self
.
check_data
()
def
ready
(
self
):
return
(
len
(
self
.
data
)
>
0
)
def
flush
(
self
):
pass
def
softspace
(
self
,
*
args
):
pass
def
more
(
self
):
if
self
.
data
:
result
=
self
.
data
[:
512
]
self
.
data
=
self
.
data
[
512
:]
return
result
else
:
return
''
if
__name__
==
'__main__'
:
import
string
import
sys
if
'-s'
in
sys
.
argv
:
sys
.
argv
.
remove
(
'-s'
)
print
'Enter password: '
,
password
=
raw_input
()
else
:
password
=
None
if
'-e'
in
sys
.
argv
:
sys
.
argv
.
remove
(
'-e'
)
encrypt
=
1
else
:
encrypt
=
0
print
sys
.
argv
if
len
(
sys
.
argv
)
>
1
:
port
=
string
.
atoi
(
sys
.
argv
[
1
])
else
:
port
=
8023
if
password
is
not
None
:
s
=
secure_monitor_server
(
password
,
''
,
port
)
if
encrypt
:
s
.
channel_class
=
secure_encrypted_monitor_channel
import
sapphire
s
.
cipher
=
sapphire
else
:
s
=
monitor_server
(
''
,
port
)
asyncore
.
loop
()
ZServer/medusa/monitor_client.py
deleted
100644 → 0
View file @
6786b136
# -*- Mode: Python; tab-width: 4 -*-
# monitor client, unix version.
import
asyncore
import
asynchat
import
regsub
import
socket
import
string
import
sys
import
os
import
md5
import
time
class
stdin_channel
(
asyncore
.
file_dispatcher
):
def
handle_read
(
self
):
data
=
self
.
recv
(
512
)
if
not
data
:
print
'
\
n
closed.'
self
.
sock_channel
.
close
()
try
:
self
.
close
()
except
:
pass
data
=
regsub
.
gsub
(
'
\
n
'
,
'
\
r
\
n
'
,
data
)
self
.
sock_channel
.
push
(
data
)
def
writable
(
self
):
return
0
def
log
(
self
,
*
ignore
):
pass
class
monitor_client
(
asynchat
.
async_chat
):
def
__init__
(
self
,
password
,
addr
=
(
''
,
8023
),
socket_type
=
socket
.
AF_INET
):
asynchat
.
async_chat
.
__init__
(
self
)
self
.
create_socket
(
socket_type
,
socket
.
SOCK_STREAM
)
self
.
terminator
=
'
\
r
\
n
'
self
.
connect
(
addr
)
self
.
sent_auth
=
0
self
.
timestamp
=
''
self
.
password
=
password
def
collect_incoming_data
(
self
,
data
):
if
not
self
.
sent_auth
:
self
.
timestamp
=
self
.
timestamp
+
data
else
:
sys
.
stdout
.
write
(
data
)
sys
.
stdout
.
flush
()
def
found_terminator
(
self
):
if
not
self
.
sent_auth
:
self
.
push
(
hex_digest
(
self
.
timestamp
+
self
.
password
)
+
'
\
r
\
n
'
)
self
.
sent_auth
=
1
else
:
print
def
handle_close
(
self
):
# close all the channels, which will make the standard main
# loop exit.
map
(
lambda
x
:
x
.
close
(),
asyncore
.
socket_map
.
values
())
def
log
(
self
,
*
ignore
):
pass
class
encrypted_monitor_client
(
monitor_client
):
"Wrap push() and recv() with a stream cipher"
def
init_cipher
(
self
,
cipher
,
key
):
self
.
outgoing
=
cipher
.
new
(
key
)
self
.
incoming
=
cipher
.
new
(
key
)
def
push
(
self
,
data
):
# push the encrypted data instead
return
monitor_client
.
push
(
self
,
self
.
outgoing
.
encrypt
(
data
))
def
recv
(
self
,
block_size
):
data
=
monitor_client
.
recv
(
self
,
block_size
)
if
data
:
return
self
.
incoming
.
decrypt
(
data
)
else
:
return
data
def
hex_digest
(
s
):
m
=
md5
.
md5
()
m
.
update
(
s
)
return
string
.
join
(
map
(
lambda
x
:
hex
(
ord
(
x
))[
2
:],
map
(
None
,
m
.
digest
())),
''
,
)
if
__name__
==
'__main__'
:
if
len
(
sys
.
argv
)
==
1
:
print
'Usage: %s host port'
%
sys
.
argv
[
0
]
sys
.
exit
(
0
)
if
(
'-e'
in
sys
.
argv
):
encrypt
=
1
sys
.
argv
.
remove
(
'-e'
)
else
:
encrypt
=
0
sys
.
stderr
.
write
(
'Enter Password: '
)
sys
.
stderr
.
flush
()
import
os
try
:
os
.
system
(
'stty -echo'
)
p
=
raw_input
()
print
finally
:
os
.
system
(
'stty echo'
)
stdin
=
stdin_channel
(
0
)
if
len
(
sys
.
argv
)
>
1
:
if
encrypt
:
client
=
encrypted_monitor_client
(
p
,
(
sys
.
argv
[
1
],
string
.
atoi
(
sys
.
argv
[
2
])))
import
sapphire
client
.
init_cipher
(
sapphire
,
p
)
else
:
client
=
monitor_client
(
p
,
(
sys
.
argv
[
1
],
string
.
atoi
(
sys
.
argv
[
2
])))
else
:
# default to local host, 'standard' port
client
=
monitor_client
(
p
)
stdin
.
sock_channel
=
client
asyncore
.
loop
()
ZServer/medusa/monitor_client_win32.py
deleted
100644 → 0
View file @
6786b136
# -*- Mode: Python; tab-width: 4 -*-
# monitor client, win32 version
# since we can't do select() on stdin/stdout, we simply
# use threads and blocking sockets. <sigh>
import
regsub
import
socket
import
string
import
sys
import
thread
import
md5
def
hex_digest
(
s
):
m
=
md5
.
md5
()
m
.
update
(
s
)
return
string
.
join
(
map
(
lambda
x
:
hex
(
ord
(
x
))[
2
:],
map
(
None
,
m
.
digest
())),
''
,
)
def
reader
(
lock
,
sock
,
password
):
# first grab the timestamp
ts
=
sock
.
recv
(
1024
)[:
-
2
]
sock
.
send
(
hex_digest
(
ts
+
password
)
+
'
\
r
\
n
'
)
while
1
:
d
=
sock
.
recv
(
1024
)
if
not
d
:
lock
.
release
()
print
'Connection closed. Hit <return> to exit'
thread
.
exit
()
sys
.
stdout
.
write
(
d
)
sys
.
stdout
.
flush
()
def
writer
(
lock
,
sock
,
barrel
=
"just kidding"
):
while
lock
.
locked
():
sock
.
send
(
sys
.
stdin
.
readline
()[:
-
1
]
+
'
\
r
\
n
'
)
if
__name__
==
'__main__'
:
if
len
(
sys
.
argv
)
==
1
:
print
'Usage: %s host port'
sys
.
exit
(
0
)
print
'Enter Password: '
,
p
=
raw_input
()
s
=
socket
.
socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
)
s
.
connect
((
sys
.
argv
[
1
],
string
.
atoi
(
sys
.
argv
[
2
])))
l
=
thread
.
allocate_lock
()
l
.
acquire
()
thread
.
start_new_thread
(
reader
,
(
l
,
s
,
p
))
writer
(
l
,
s
)
ZServer/medusa/producers.py
deleted
100644 → 0
View file @
6786b136
# -*- Mode: Python; tab-width: 4 -*-
RCS_ID
=
'$Id: producers.py,v 1.7 2000/06/02 14:22:48 brian Exp $'
import
string
"""
A collection of producers.
Each producer implements a particular feature: They can be combined
in various ways to get interesting and useful behaviors.
For example, you can feed dynamically-produced output into the compressing
producer, then wrap this with the 'chunked' transfer-encoding producer.
"""
class
simple_producer
:
"producer for a string"
def
__init__
(
self
,
data
,
buffer_size
=
1024
):
self
.
data
=
data
self
.
buffer_size
=
buffer_size
def
more
(
self
):
if
len
(
self
.
data
)
>
self
.
buffer_size
:
result
=
self
.
data
[:
self
.
buffer_size
]
self
.
data
=
self
.
data
[
self
.
buffer_size
:]
return
result
else
:
result
=
self
.
data
self
.
data
=
''
return
result
class
scanning_producer
:
"like simple_producer, but more efficient for large strings"
def
__init__
(
self
,
data
,
buffer_size
=
1024
):
self
.
data
=
data
self
.
buffer_size
=
buffer_size
self
.
pos
=
0
def
more
(
self
):
if
self
.
pos
<
len
(
self
.
data
):
lp
=
self
.
pos
rp
=
min
(
len
(
self
.
data
),
self
.
pos
+
self
.
buffer_size
)
result
=
self
.
data
[
lp
:
rp
]
self
.
pos
=
self
.
pos
+
len
(
result
)
return
result
else
:
return
''
class
lines_producer
:
"producer for a list of lines"
def
__init__
(
self
,
lines
):
self
.
lines
=
lines
def
ready
(
self
):
return
len
(
self
.
lines
)
def
more
(
self
):
if
self
.
lines
:
chunk
=
self
.
lines
[:
50
]
self
.
lines
=
self
.
lines
[
50
:]
return
string
.
join
(
chunk
,
'
\
r
\
n
'
)
+
'
\
r
\
n
'
else
:
return
''
class
buffer_list_producer
:
"producer for a list of buffers"
# i.e., data == string.join (buffers, '')
def
__init__
(
self
,
buffers
):
self
.
index
=
0
self
.
buffers
=
buffers
def
more
(
self
):
if
self
.
index
>=
len
(
self
.
buffers
):
return
''
else
:
data
=
self
.
buffers
[
self
.
index
]
self
.
index
=
self
.
index
+
1
return
data
class
file_producer
:
"producer wrapper for file[-like] objects"
# match http_channel's outgoing buffer size
out_buffer_size
=
1
<<
16
def
__init__
(
self
,
file
):
self
.
done
=
0
self
.
file
=
file
def
more
(
self
):
if
self
.
done
:
return
''
else
:
data
=
self
.
file
.
read
(
self
.
out_buffer_size
)
if
not
data
:
self
.
file
.
close
()
del
self
.
file
self
.
done
=
1
return
''
else
:
return
data
# A simple output producer. This one does not [yet] have
# the safety feature builtin to the monitor channel: runaway
# output will not be caught.
# don't try to print from within any of the methods
# of this object.
class
output_producer
:
"Acts like an output file; suitable for capturing sys.stdout"
def
__init__
(
self
):
self
.
data
=
''
def
write
(
self
,
data
):
lines
=
string
.
splitfields
(
data
,
'
\
n
'
)
data
=
string
.
join
(
lines
,
'
\
r
\
n
'
)
self
.
data
=
self
.
data
+
data
def
writeline
(
self
,
line
):
self
.
data
=
self
.
data
+
line
+
'
\
r
\
n
'
def
writelines
(
self
,
lines
):
self
.
data
=
self
.
data
+
string
.
joinfields
(
lines
,
'
\
r
\
n
'
)
+
'
\
r
\
n
'
def
ready
(
self
):
return
(
len
(
self
.
data
)
>
0
)
def
flush
(
self
):
pass
def
softspace
(
self
,
*
args
):
pass
def
more
(
self
):
if
self
.
data
:
result
=
self
.
data
[:
512
]
self
.
data
=
self
.
data
[
512
:]
return
result
else
:
return
''
class
composite_producer
:
"combine a fifo of producers into one"
def
__init__
(
self
,
producers
):
self
.
producers
=
producers
def
more
(
self
):
while
len
(
self
.
producers
):
p
=
self
.
producers
.
first
()
d
=
p
.
more
()
if
d
:
return
d
else
:
self
.
producers
.
pop
()
else
:
return
''
class
globbing_producer
:
"""
'glob' the output from a producer into a particular buffer size.
helps reduce the number of calls to send(). [this appears to
gain about 30% performance on requests to a single channel]
"""
def
__init__
(
self
,
producer
,
buffer_size
=
1
<<
16
):
self
.
producer
=
producer
self
.
buffer
=
''
self
.
buffer_size
=
buffer_size
def
more
(
self
):
while
len
(
self
.
buffer
)
<
self
.
buffer_size
:
data
=
self
.
producer
.
more
()
if
data
:
self
.
buffer
=
self
.
buffer
+
data
else
:
break
r
=
self
.
buffer
self
.
buffer
=
''
return
r
class
hooked_producer
:
"""
A producer that will call <function> when it empties,.
with an argument of the number of bytes produced. Useful
for logging/instrumentation purposes.
"""
def
__init__
(
self
,
producer
,
function
):
self
.
producer
=
producer
self
.
function
=
function
self
.
bytes
=
0
def
more
(
self
):
if
self
.
producer
:
result
=
self
.
producer
.
more
()
if
not
result
:
self
.
producer
=
None
self
.
function
(
self
.
bytes
)
else
:
self
.
bytes
=
self
.
bytes
+
len
(
result
)
return
result
else
:
return
''
# HTTP 1.1 emphasizes that an advertised Content-Length header MUST be
# correct. In the face of Strange Files, it is conceivable that
# reading a 'file' may produce an amount of data not matching that
# reported by os.stat() [text/binary mode issues, perhaps the file is
# being appended to, etc..] This makes the chunked encoding a True
# Blessing, and it really ought to be used even with normal files.
# How beautifully it blends with the concept of the producer.
class
chunked_producer
:
"""A producer that implements the 'chunked' transfer coding for HTTP/1.1.
Here is a sample usage:
request['Transfer-Encoding'] = 'chunked'
request.push (
producers.chunked_producer (your_producer)
)
request.done()
"""
def
__init__
(
self
,
producer
,
footers
=
None
):
self
.
producer
=
producer
self
.
footers
=
footers
def
more
(
self
):
if
self
.
producer
:
data
=
self
.
producer
.
more
()
if
data
:
return
'%x
\
r
\
n
%s
\
r
\
n
'
%
(
len
(
data
),
data
)
else
:
self
.
producer
=
None
if
self
.
footers
:
return
string
.
join
(
[
'0'
]
+
self
.
footers
,
'
\
r
\
n
'
)
+
'
\
r
\
n
\
r
\
n
'
else
:
return
'0
\
r
\
n
\
r
\
n
'
else
:
return
''
# Unfortunately this isn't very useful right now (Aug 97), because
# apparently the browsers don't do on-the-fly decompression. Which
# is sad, because this could _really_ speed things up, especially for
# low-bandwidth clients (i.e., most everyone).
try
:
import
zlib
except
ImportError
:
zlib
=
None
class
compressed_producer
:
"""
Compress another producer on-the-fly, using ZLIB
[Unfortunately, none of the current browsers seem to support this]
"""
# Note: It's not very efficient to have the server repeatedly
# compressing your outgoing files: compress them ahead of time, or
# use a compress-once-and-store scheme. However, if you have low
# bandwidth and low traffic, this may make more sense than
# maintaining your source files compressed.
#
# Can also be used for compressing dynamically-produced output.
def
__init__
(
self
,
producer
,
level
=
5
):
self
.
producer
=
producer
self
.
compressor
=
zlib
.
compressobj
(
level
)
def
more
(
self
):
if
self
.
producer
:
cdata
=
''
# feed until we get some output
while
not
cdata
:
data
=
self
.
producer
.
more
()
if
not
data
:
self
.
producer
=
None
return
self
.
compressor
.
flush
()
else
:
cdata
=
self
.
compressor
.
compress
(
data
)
return
cdata
else
:
return
''
class
escaping_producer
:
"A producer that escapes a sequence of characters"
" Common usage: escaping the CRLF.CRLF sequence in SMTP, NNTP, etc..."
def
__init__
(
self
,
producer
,
esc_from
=
'
\
r
\
n
.'
,
esc_to
=
'
\
r
\
n
..'
):
self
.
producer
=
producer
self
.
esc_from
=
esc_from
self
.
esc_to
=
esc_to
self
.
buffer
=
''
from
asynchat
import
find_prefix_at_end
self
.
find_prefix_at_end
=
find_prefix_at_end
def
more
(
self
):
esc_from
=
self
.
esc_from
esc_to
=
self
.
esc_to
buffer
=
self
.
buffer
+
self
.
producer
.
more
()
if
buffer
:
buffer
=
string
.
replace
(
buffer
,
esc_from
,
esc_to
)
i
=
self
.
find_prefix_at_end
(
buffer
,
esc_from
)
if
i
:
# we found a prefix
self
.
buffer
=
buffer
[
-
i
:]
return
buffer
[:
-
i
]
else
:
# no prefix, return it all
self
.
buffer
=
''
return
buffer
else
:
return
buffer
ZServer/medusa/resolver.py
deleted
100644 → 0
View file @
6786b136
# -*- Mode: Python; tab-width: 4 -*-
#
# Author: Sam Rushing <rushing@nightmare.com>
#
RCS_ID
=
'$Id: resolver.py,v 1.6 2000/06/02 14:22:48 brian Exp $'
# Fast, low-overhead asynchronous name resolver. uses 'pre-cooked'
# DNS requests, unpacks only as much as it needs of the reply.
# see rfc1035 for details
import
string
import
asyncore
import
socket
import
sys
import
time
from
counter
import
counter
VERSION
=
string
.
split
(
RCS_ID
)[
2
]
# header
# 1 1 1 1 1 1
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ID |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# |QR| Opcode |AA|TC|RD|RA| Z | RCODE |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | QDCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ANCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | NSCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ARCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# question
# 1 1 1 1 1 1
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | |
# / QNAME /
# / /
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | QTYPE |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | QCLASS |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# build a DNS address request, _quickly_
def
fast_address_request
(
host
,
id
=
0
):
return
(
'%c%c'
%
(
chr
((
id
>>
8
)
&
0xff
),
chr
(
id
&
0xff
))
+
'
\
001
\
000
\
000
\
001
\
000
\
000
\
000
\
000
\
000
\
000
%s
\
000
\
000
\
001
\
000
\
001
'
%
(
string
.
join
(
map
(
lambda
part
:
'%c%s'
%
(
chr
(
len
(
part
)),
part
),
string
.
split
(
host
,
'.'
)
),
''
)
)
)
def
fast_ptr_request
(
host
,
id
=
0
):
return
(
'%c%c'
%
(
chr
((
id
>>
8
)
&
0xff
),
chr
(
id
&
0xff
))
+
'
\
001
\
000
\
000
\
001
\
000
\
000
\
000
\
000
\
000
\
000
%s
\
000
\
000
\
014
\
000
\
001
'
%
(
string
.
join
(
map
(
lambda
part
:
'%c%s'
%
(
chr
(
len
(
part
)),
part
),
string
.
split
(
host
,
'.'
)
),
''
)
)
)
def
unpack_name
(
r
,
pos
):
n
=
[]
while
1
:
ll
=
ord
(
r
[
pos
])
if
(
ll
&
0xc0
):
# compression
pos
=
(
ll
&
0x3f
<<
8
)
+
(
ord
(
r
[
pos
+
1
]))
elif
ll
==
0
:
break
else
:
pos
=
pos
+
1
n
.
append
(
r
[
pos
:
pos
+
ll
])
pos
=
pos
+
ll
return
string
.
join
(
n
,
'.'
)
def
skip_name
(
r
,
pos
):
s
=
pos
while
1
:
ll
=
ord
(
r
[
pos
])
if
(
ll
&
0xc0
):
# compression
return
pos
+
2
elif
ll
==
0
:
pos
=
pos
+
1
break
else
:
pos
=
pos
+
ll
+
1
return
pos
def
unpack_ttl
(
r
,
pos
):
return
reduce
(
lambda
x
,
y
:
(
x
<<
8
)
|
y
,
map
(
ord
,
r
[
pos
:
pos
+
4
])
)
# resource record
# 1 1 1 1 1 1
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | |
# / /
# / NAME /
# | |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | TYPE |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | CLASS |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | TTL |
# | |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | RDLENGTH |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--|
# / RDATA /
# / /
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
def
unpack_address_reply
(
r
):
ancount
=
(
ord
(
r
[
6
])
<<
8
)
+
(
ord
(
r
[
7
]))
# skip question, first name starts at 12,
# this is followed by QTYPE and QCLASS
pos
=
skip_name
(
r
,
12
)
+
4
if
ancount
:
# we are looking very specifically for
# an answer with TYPE=A, CLASS=IN (\000\001\000\001)
for
an
in
range
(
ancount
):
pos
=
skip_name
(
r
,
pos
)
if
r
[
pos
:
pos
+
4
]
==
'
\
000
\
001
\
000
\
001
'
:
return
(
unpack_ttl
(
r
,
pos
+
4
),
'%d.%d.%d.%d'
%
tuple
(
map
(
ord
,
r
[
pos
+
10
:
pos
+
14
]))
)
# skip over TYPE, CLASS, TTL, RDLENGTH, RDATA
pos
=
pos
+
8
rdlength
=
(
ord
(
r
[
pos
])
<<
8
)
+
(
ord
(
r
[
pos
+
1
]))
pos
=
pos
+
2
+
rdlength
return
0
,
None
else
:
return
0
,
None
def
unpack_ptr_reply
(
r
):
ancount
=
(
ord
(
r
[
6
])
<<
8
)
+
(
ord
(
r
[
7
]))
# skip question, first name starts at 12,
# this is followed by QTYPE and QCLASS
pos
=
skip_name
(
r
,
12
)
+
4
if
ancount
:
# we are looking very specifically for
# an answer with TYPE=PTR, CLASS=IN (\000\014\000\001)
for
an
in
range
(
ancount
):
pos
=
skip_name
(
r
,
pos
)
if
r
[
pos
:
pos
+
4
]
==
'
\
000
\
014
\
000
\
001
'
:
return
(
unpack_ttl
(
r
,
pos
+
4
),
unpack_name
(
r
,
pos
+
10
)
)
# skip over TYPE, CLASS, TTL, RDLENGTH, RDATA
pos
=
pos
+
8
rdlength
=
(
ord
(
r
[
pos
])
<<
8
)
+
(
ord
(
r
[
pos
+
1
]))
pos
=
pos
+
2
+
rdlength
return
0
,
None
else
:
return
0
,
None
# This is a UDP (datagram) resolver.
#
# It may be useful to implement a TCP resolver. This would presumably
# give us more reliable behavior when things get too busy. A TCP
# client would have to manage the connection carefully, since the
# server is allowed to close it at will (the RFC recommends closing
# after 2 minutes of idle time).
#
# Note also that the TCP client will have to prepend each request
# with a 2-byte length indicator (see rfc1035).
#
class
resolver
(
asyncore
.
dispatcher
):
id
=
counter
()
def
__init__
(
self
,
server
=
'127.0.0.1'
):
asyncore
.
dispatcher
.
__init__
(
self
)
self
.
create_socket
(
socket
.
AF_INET
,
socket
.
SOCK_DGRAM
)
self
.
server
=
server
self
.
request_map
=
{}
self
.
last_reap_time
=
int
(
time
.
time
())
# reap every few minutes
def
writable
(
self
):
return
0
def
log
(
self
,
*
args
):
pass
def
handle_close
(
self
):
self
.
log_info
(
'closing!'
)
self
.
close
()
def
handle_error
(
self
):
# don't close the connection on error
(
file
,
fun
,
line
),
t
,
v
,
tbinfo
=
asyncore
.
compact_traceback
()
self
.
log_info
(
'Problem with DNS lookup (%s:%s %s)'
%
(
t
,
v
,
tbinfo
),
'error'
)
def
get_id
(
self
):
return
(
self
.
id
.
as_long
()
%
(
1
<<
16
))
def
reap
(
self
):
# find DNS requests that have timed out
now
=
int
(
time
.
time
())
if
now
-
self
.
last_reap_time
>
180
:
# reap every 3 minutes
self
.
last_reap_time
=
now
# update before we forget
for
k
,(
host
,
unpack
,
callback
,
when
)
in
self
.
request_map
.
items
():
if
now
-
when
>
180
:
# over 3 minutes old
del
self
.
request_map
[
k
]
try
:
# same code as in handle_read
callback
(
host
,
0
,
None
)
# timeout val is (0,None)
except
:
(
file
,
fun
,
line
),
t
,
v
,
tbinfo
=
asyncore
.
compact_traceback
()
self
.
log_info
(
'%s %s %s'
%
(
t
,
v
,
tbinfo
),
'error'
)
def
resolve
(
self
,
host
,
callback
):
self
.
reap
()
# first, get rid of old guys
self
.
socket
.
sendto
(
fast_address_request
(
host
,
self
.
get_id
()),
(
self
.
server
,
53
)
)
self
.
request_map
[
self
.
get_id
()]
=
(
host
,
unpack_address_reply
,
callback
,
int
(
time
.
time
()))
self
.
id
.
increment
()
def
resolve_ptr
(
self
,
host
,
callback
):
self
.
reap
()
# first, get rid of old guys
ip
=
string
.
split
(
host
,
'.'
)
ip
.
reverse
()
ip
=
string
.
join
(
ip
,
'.'
)
+
'.in-addr.arpa'
self
.
socket
.
sendto
(
fast_ptr_request
(
ip
,
self
.
get_id
()),
(
self
.
server
,
53
)
)
self
.
request_map
[
self
.
get_id
()]
=
(
host
,
unpack_ptr_reply
,
callback
,
int
(
time
.
time
()))
self
.
id
.
increment
()
def
handle_read
(
self
):
reply
,
whence
=
self
.
socket
.
recvfrom
(
512
)
# for security reasons we may want to double-check
# that <whence> is the server we sent the request to.
id
=
(
ord
(
reply
[
0
])
<<
8
)
+
ord
(
reply
[
1
])
if
self
.
request_map
.
has_key
(
id
):
host
,
unpack
,
callback
,
when
=
self
.
request_map
[
id
]
del
self
.
request_map
[
id
]
ttl
,
answer
=
unpack
(
reply
)
try
:
callback
(
host
,
ttl
,
answer
)
except
:
(
file
,
fun
,
line
),
t
,
v
,
tbinfo
=
asyncore
.
compact_traceback
()
self
.
log_info
(
'%s %s %s'
%
(
t
,
v
,
tbinfo
),
'error'
)
class
rbl
(
resolver
):
def
resolve_maps
(
self
,
host
,
callback
):
ip
=
string
.
split
(
host
,
'.'
)
ip
.
reverse
()
ip
=
string
.
join
(
ip
,
'.'
)
+
'.rbl.maps.vix.com'
self
.
socket
.
sendto
(
fast_ptr_request
(
ip
,
self
.
get_id
()),
(
self
.
server
,
53
)
)
self
.
request_map
[
self
.
get_id
()]
=
host
,
self
.
check_reply
,
callback
self
.
id
.
increment
()
def
check_reply
(
self
,
r
):
# we only need to check RCODE.
rcode
=
(
ord
(
r
[
3
])
&
0xf
)
self
.
log_info
(
'MAPS RBL; RCODE =%02x
\
n
%s'
%
(
rcode
,
repr
(
r
)))
return
0
,
rcode
# (ttl, answer)
class
hooked_callback
:
def
__init__
(
self
,
hook
,
callback
):
self
.
hook
,
self
.
callback
=
hook
,
callback
def
__call__
(
self
,
*
args
):
apply
(
self
.
hook
,
args
)
apply
(
self
.
callback
,
args
)
class
caching_resolver
(
resolver
):
"Cache DNS queries. Will need to honor the TTL value in the replies"
def
__init__
(
*
args
):
apply
(
resolver
.
__init__
,
args
)
self
=
args
[
0
]
self
.
cache
=
{}
self
.
forward_requests
=
counter
()
self
.
reverse_requests
=
counter
()
self
.
cache_hits
=
counter
()
def
resolve
(
self
,
host
,
callback
):
self
.
forward_requests
.
increment
()
if
self
.
cache
.
has_key
(
host
):
when
,
ttl
,
answer
=
self
.
cache
[
host
]
# ignore TTL for now
callback
(
host
,
ttl
,
answer
)
self
.
cache_hits
.
increment
()
else
:
resolver
.
resolve
(
self
,
host
,
hooked_callback
(
self
.
callback_hook
,
callback
)
)
def
resolve_ptr
(
self
,
host
,
callback
):
self
.
reverse_requests
.
increment
()
if
self
.
cache
.
has_key
(
host
):
when
,
ttl
,
answer
=
self
.
cache
[
host
]
# ignore TTL for now
callback
(
host
,
ttl
,
answer
)
self
.
cache_hits
.
increment
()
else
:
resolver
.
resolve_ptr
(
self
,
host
,
hooked_callback
(
self
.
callback_hook
,
callback
)
)
def
callback_hook
(
self
,
host
,
ttl
,
answer
):
self
.
cache
[
host
]
=
time
.
time
(),
ttl
,
answer
SERVER_IDENT
=
'Caching DNS Resolver (V%s)'
%
VERSION
def
status
(
self
):
import
status_handler
import
producers
return
producers
.
simple_producer
(
'<h2>%s</h2>'
%
self
.
SERVER_IDENT
+
'<br>Server: %s'
%
self
.
server
+
'<br>Cache Entries: %d'
%
len
(
self
.
cache
)
+
'<br>Outstanding Requests: %d'
%
len
(
self
.
request_map
)
+
'<br>Forward Requests: %s'
%
self
.
forward_requests
+
'<br>Reverse Requests: %s'
%
self
.
reverse_requests
+
'<br>Cache Hits: %s'
%
self
.
cache_hits
)
#test_reply = """\000\000\205\200\000\001\000\001\000\002\000\002\006squirl\011nightmare\003com\000\000\001\000\001\300\014\000\001\000\001\000\001Q\200\000\004\315\240\260\005\011nightmare\003com\000\000\002\000\001\000\001Q\200\000\002\300\014\3006\000\002\000\001\000\001Q\200\000\015\003ns1\003iag\003net\000\300\014\000\001\000\001\000\001Q\200\000\004\315\240\260\005\300]\000\001\000\001\000\000\350\227\000\004\314\033\322\005"""
# def test_unpacker ():
# print unpack_address_reply (test_reply)
#
# import time
# class timer:
# def __init__ (self):
# self.start = time.time()
# def end (self):
# return time.time() - self.start
#
# # I get ~290 unpacks per second for the typical case, compared to ~48
# # using dnslib directly. also, that latter number does not include
# # picking the actual data out.
#
# def benchmark_unpacker():
#
# r = range(1000)
# t = timer()
# for i in r:
# unpack_address_reply (test_reply)
# print '%.2f unpacks per second' % (1000.0 / t.end())
if
__name__
==
'__main__'
:
import
sys
if
len
(
sys
.
argv
)
==
1
:
print
'usage: %s [-r] [-s <server_IP>] host [host ...]'
%
sys
.
argv
[
0
]
sys
.
exit
(
0
)
elif
(
'-s'
in
sys
.
argv
):
i
=
sys
.
argv
.
index
(
'-s'
)
server
=
sys
.
argv
[
i
+
1
]
del
sys
.
argv
[
i
:
i
+
2
]
else
:
server
=
'127.0.0.1'
if
(
'-r'
in
sys
.
argv
):
reverse
=
1
i
=
sys
.
argv
.
index
(
'-r'
)
del
sys
.
argv
[
i
]
else
:
reverse
=
0
if
(
'-m'
in
sys
.
argv
):
maps
=
1
sys
.
argv
.
remove
(
'-m'
)
else
:
maps
=
0
if
maps
:
r
=
rbl
(
server
)
else
:
r
=
caching_resolver
(
server
)
count
=
len
(
sys
.
argv
)
-
1
def
print_it
(
host
,
ttl
,
answer
):
global
count
print
'%s: %s'
%
(
host
,
answer
)
count
=
count
-
1
if
not
count
:
r
.
close
()
for
host
in
sys
.
argv
[
1
:]:
if
reverse
:
r
.
resolve_ptr
(
host
,
print_it
)
elif
maps
:
r
.
resolve_maps
(
host
,
print_it
)
else
:
r
.
resolve
(
host
,
print_it
)
# hooked asyncore.loop()
while
asyncore
.
socket_map
:
asyncore
.
poll
(
30.0
)
print
'requests outstanding: %d'
%
len
(
r
.
request_map
)
ZServer/medusa/select_trigger.py
deleted
100644 → 0
View file @
6786b136
# -*- Mode: Python; tab-width: 4 -*-
VERSION_STRING
=
"$Id: select_trigger.py,v 1.14 2000/06/02 14:22:48 brian Exp $"
import
asyncore
import
asynchat
import
os
import
socket
import
string
import
thread
if
os
.
name
==
'posix'
:
class
trigger
(
asyncore
.
file_dispatcher
):
"Wake up a call to select() running in the main thread"
# This is useful in a context where you are using Medusa's I/O
# subsystem to deliver data, but the data is generated by another
# thread. Normally, if Medusa is in the middle of a call to
# select(), new output data generated by another thread will have
# to sit until the call to select() either times out or returns.
# If the trigger is 'pulled' by another thread, it should immediately
# generate a READ event on the trigger object, which will force the
# select() invocation to return.
# A common use for this facility: letting Medusa manage I/O for a
# large number of connections; but routing each request through a
# thread chosen from a fixed-size thread pool. When a thread is
# acquired, a transaction is performed, but output data is
# accumulated into buffers that will be emptied more efficiently
# by Medusa. [picture a server that can process database queries
# rapidly, but doesn't want to tie up threads waiting to send data
# to low-bandwidth connections]
# The other major feature provided by this class is the ability to
# move work back into the main thread: if you call pull_trigger()
# with a thunk argument, when select() wakes up and receives the
# event it will call your thunk from within that thread. The main
# purpose of this is to remove the need to wrap thread locks around
# Medusa's data structures, which normally do not need them. [To see
# why this is true, imagine this scenario: A thread tries to push some
# new data onto a channel's outgoing data queue at the same time that
# the main thread is trying to remove some]
def
__init__
(
self
):
r
,
w
=
os
.
pipe
()
self
.
trigger
=
w
asyncore
.
file_dispatcher
.
__init__
(
self
,
r
)
self
.
lock
=
thread
.
allocate_lock
()
self
.
thunks
=
[]
def
__repr__
(
self
):
return
'<select-trigger (pipe) at %x>'
%
id
(
self
)
def
readable
(
self
):
return
1
def
writable
(
self
):
return
0
def
handle_connect
(
self
):
pass
def
pull_trigger
(
self
,
thunk
=
None
):
# print 'PULL_TRIGGER: ', len(self.thunks)
if
thunk
:
try
:
self
.
lock
.
acquire
()
self
.
thunks
.
append
(
thunk
)
finally
:
self
.
lock
.
release
()
os
.
write
(
self
.
trigger
,
'x'
)
def
handle_read
(
self
):
self
.
recv
(
8192
)
try
:
self
.
lock
.
acquire
()
for
thunk
in
self
.
thunks
:
try
:
thunk
()
except
:
(
file
,
fun
,
line
),
t
,
v
,
tbinfo
=
asyncore
.
compact_traceback
()
print
'exception in trigger thunk: (%s:%s %s)'
%
(
t
,
v
,
tbinfo
)
self
.
thunks
=
[]
finally
:
self
.
lock
.
release
()
else
:
# win32-safe version
class
trigger
(
asyncore
.
dispatcher
):
address
=
(
'127.9.9.9'
,
19999
)
def
__init__
(
self
):
a
=
socket
.
socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
)
w
=
socket
.
socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
)
# set TCP_NODELAY to true to avoid buffering
w
.
setsockopt
(
socket
.
IPPROTO_TCP
,
1
,
1
)
# tricky: get a pair of connected sockets
host
=
'127.0.0.1'
port
=
19999
while
1
:
try
:
self
.
address
=
(
host
,
port
)
a
.
bind
(
self
.
address
)
break
except
:
if
port
<=
19950
:
raise
'Bind Error'
,
'Cannot bind trigger!'
port
=
port
-
1
a
.
listen
(
1
)
w
.
setblocking
(
0
)
try
:
w
.
connect
(
self
.
address
)
except
:
pass
r
,
addr
=
a
.
accept
()
a
.
close
()
w
.
setblocking
(
1
)
self
.
trigger
=
w
asyncore
.
dispatcher
.
__init__
(
self
,
r
)
self
.
lock
=
thread
.
allocate_lock
()
self
.
thunks
=
[]
self
.
_trigger_connected
=
0
def
__repr__
(
self
):
return
'<select-trigger (loopback) at %x>'
%
id
(
self
)
def
readable
(
self
):
return
1
def
writable
(
self
):
return
0
def
handle_connect
(
self
):
pass
def
pull_trigger
(
self
,
thunk
=
None
):
if
thunk
:
try
:
self
.
lock
.
acquire
()
self
.
thunks
.
append
(
thunk
)
finally
:
self
.
lock
.
release
()
self
.
trigger
.
send
(
'x'
)
def
handle_read
(
self
):
self
.
recv
(
8192
)
try
:
self
.
lock
.
acquire
()
for
thunk
in
self
.
thunks
:
try
:
thunk
()
except
:
(
file
,
fun
,
line
),
t
,
v
,
tbinfo
=
asyncore
.
compact_traceback
()
print
'exception in trigger thunk: (%s:%s %s)'
%
(
t
,
v
,
tbinfo
)
self
.
thunks
=
[]
finally
:
self
.
lock
.
release
()
the_trigger
=
None
class
trigger_file
:
"A 'triggered' file object"
buffer_size
=
4096
def
__init__
(
self
,
parent
):
global
the_trigger
if
the_trigger
is
None
:
the_trigger
=
trigger
()
self
.
parent
=
parent
self
.
buffer
=
''
def
write
(
self
,
data
):
self
.
buffer
=
self
.
buffer
+
data
if
len
(
self
.
buffer
)
>
self
.
buffer_size
:
d
,
self
.
buffer
=
self
.
buffer
,
''
the_trigger
.
pull_trigger
(
lambda
d
=
d
,
p
=
self
.
parent
:
p
.
push
(
d
)
)
def
writeline
(
self
,
line
):
self
.
write
(
line
+
'
\
r
\
n
'
)
def
writelines
(
self
,
lines
):
self
.
write
(
string
.
joinfields
(
lines
,
'
\
r
\
n
'
)
+
'
\
r
\
n
'
)
def
flush
(
self
):
if
self
.
buffer
:
d
,
self
.
buffer
=
self
.
buffer
,
''
the_trigger
.
pull_trigger
(
lambda
p
=
self
.
parent
,
d
=
d
:
p
.
push
(
d
)
)
def
softspace
(
self
,
*
args
):
pass
def
close
(
self
):
# in a derived class, you may want to call trigger_close() instead.
self
.
flush
()
self
.
parent
=
None
def
trigger_close
(
self
):
d
,
self
.
buffer
=
self
.
buffer
,
''
p
,
self
.
parent
=
self
.
parent
,
None
the_trigger
.
pull_trigger
(
lambda
p
=
p
,
d
=
d
:
(
p
.
push
(
d
),
p
.
close_when_done
())
)
if
__name__
==
'__main__'
:
import
time
def
thread_function
(
output_file
,
i
,
n
):
print
'entering thread_function'
while
n
:
time
.
sleep
(
5
)
output_file
.
write
(
'%2d.%2d %s
\
r
\
n
'
%
(
i
,
n
,
output_file
))
output_file
.
flush
()
n
=
n
-
1
output_file
.
close
()
print
'exiting thread_function'
class
thread_parent
(
asynchat
.
async_chat
):
def
__init__
(
self
,
conn
,
addr
):
self
.
addr
=
addr
asynchat
.
async_chat
.
__init__
(
self
,
conn
)
self
.
set_terminator
(
'
\
r
\
n
'
)
self
.
buffer
=
''
self
.
count
=
0
def
collect_incoming_data
(
self
,
data
):
self
.
buffer
=
self
.
buffer
+
data
def
found_terminator
(
self
):
data
,
self
.
buffer
=
self
.
buffer
,
''
if
not
data
:
asyncore
.
close_all
()
print
"done"
return
n
=
string
.
atoi
(
string
.
split
(
data
)[
0
])
tf
=
trigger_file
(
self
)
self
.
count
=
self
.
count
+
1
thread
.
start_new_thread
(
thread_function
,
(
tf
,
self
.
count
,
n
))
class
thread_server
(
asyncore
.
dispatcher
):
def
__init__
(
self
,
family
=
socket
.
AF_INET
,
address
=
(
''
,
9003
)):
asyncore
.
dispatcher
.
__init__
(
self
)
self
.
create_socket
(
family
,
socket
.
SOCK_STREAM
)
self
.
set_reuse_addr
()
self
.
bind
(
address
)
self
.
listen
(
5
)
def
handle_accept
(
self
):
conn
,
addr
=
self
.
accept
()
tp
=
thread_parent
(
conn
,
addr
)
thread_server
()
#asyncore.loop(1.0, use_poll=1)
try
:
asyncore
.
loop
()
except
:
asyncore
.
close_all
()
ZServer/medusa/status_handler.py
deleted
100644 → 0
View file @
6786b136
# -*- Mode: Python; tab-width: 4 -*-
VERSION_STRING
=
"$Id: status_handler.py,v 1.4 2000/06/02 14:22:48 brian Exp $"
#
# medusa status extension
#
import
string
import
time
import
regex
import
asyncore
import
http_server
import
medusa_gif
import
producers
from
counter
import
counter
START_TIME
=
long
(
time
.
time
())
# split a uri
# <path>;<params>?<query>#<fragment>
path_regex
=
regex
.
compile
(
# path params query fragment
'
\
\
([^;?#]*
\
\
)
\
\
(;[^?#]*
\
\
)?
\
\
(
\
\
?[^#]*
\
)?
\
(#.*
\
)?
'
)
def split_path (path):
if path_regex.match (path) != len(path):
raise ValueError, "bad path"
else:
return map (lambda i,r=path_regex: r.group(i), range(1,5))
class status_extension:
hit_counter = counter()
def __init__ (self, objects, statusdir='
/
status
', allow_emergency_debug=0):
self.objects = objects
self.statusdir = statusdir
self.allow_emergency_debug = allow_emergency_debug
# We use /status instead of statusdir here because it'
s
too
# hard to pass statusdir to the logger, who makes the HREF
# to the object dir. We don't need the security-through-
# obscurity here in any case, because the id is obscurity enough
self
.
hyper_regex
=
regex
.
compile
(
'/status/object/
\
([
0
-9]+
\
)/.*
'
)
self.hyper_objects = []
for object in objects:
self.register_hyper_object (object)
def __repr__ (self):
return '
<
Status
Extension
(
%
s
hits
)
at
%
x
>
' % (
self.hit_counter,
id(self)
)
def match (self, request):
[path, params, query, fragment] = split_path (request.uri)
# For reasons explained above, we don'
t
use
statusdir
for
/
object
return
(
path
[:
len
(
self
.
statusdir
)]
==
self
.
statusdir
or
path
[:
len
(
"/status/object/"
)]
==
'/status/object/'
)
# Possible Targets:
# /status
# /status/channel_list
# /status/medusa.gif
# can we have 'clickable' objects?
# [yes, we can use id(x) and do a linear search]
# Dynamic producers:
# HTTP/1.0: we must close the channel, because it's dynamic output
# HTTP/1.1: we can use the chunked transfer-encoding, and leave
# it open.
def
handle_request
(
self
,
request
):
[
path
,
params
,
query
,
fragment
]
=
split_path
(
request
.
uri
)
self
.
hit_counter
.
increment
()
if
path
==
self
.
statusdir
:
# and not a subdirectory
up_time
=
string
.
join
(
english_time
(
long
(
time
.
time
())
-
START_TIME
))
request
[
'Content-Type'
]
=
'text/html'
request
.
push
(
'<html>'
'<title>Medusa Status Reports</title>'
'<body bgcolor="#ffffff">'
'<h1>Medusa Status Reports</h1>'
'<b>Up:</b> %s'
%
up_time
)
for
i
in
range
(
len
(
self
.
objects
)):
request
.
push
(
self
.
objects
[
i
].
status
())
request
.
push
(
'<hr>
\
r
\
n
'
)
request
.
push
(
'<p><a href="%s/channel_list">Channel List</a>'
'<hr>'
'<img src="%s/medusa.gif" align=right width=%d height=%d>'
'</body></html>'
%
(
self
.
statusdir
,
self
.
statusdir
,
medusa_gif
.
width
,
medusa_gif
.
height
)
)
request
.
done
()
elif
path
==
self
.
statusdir
+
'/channel_list'
:
request
[
'Content-Type'
]
=
'text/html'
request
.
push
(
'<html><body>'
)
request
.
push
(
channel_list_producer
(
self
.
statusdir
))
request
.
push
(
'<hr>'
'<img src="%s/medusa.gif" align=right width=%d height=%d>'
%
(
self
.
statusdir
,
medusa_gif
.
width
,
medusa_gif
.
height
)
+
'</body></html>'
)
request
.
done
()
elif
path
==
self
.
statusdir
+
'/medusa.gif'
:
request
[
'Content-Type'
]
=
'image/gif'
request
[
'Content-Length'
]
=
len
(
medusa_gif
.
data
)
request
.
push
(
medusa_gif
.
data
)
request
.
done
()
elif
path
==
self
.
statusdir
+
'/close_zombies'
:
message
=
(
'<h2>Closing all zombie http client connections...</h2>'
'<p><a href="%s">Back to the status page</a>'
%
self
.
statusdir
)
request
[
'Content-Type'
]
=
'text/html'
request
[
'Content-Length'
]
=
len
(
message
)
request
.
push
(
message
)
now
=
int
(
time
.
time
())
for
channel
in
asyncore
.
socket_map
.
keys
():
if
channel
.
__class__
==
http_server
.
http_channel
:
if
channel
!=
request
.
channel
:
if
(
now
-
channel
.
creation_time
)
>
channel
.
zombie_timeout
:
channel
.
close
()
request
.
done
()
# Emergency Debug Mode
# If a server is running away from you, don't KILL it!
# Move all the AF_INET server ports and perform an autopsy...
# [disabled by default to protect the innocent]
elif
self
.
allow_emergency_debug
and
path
==
self
.
statusdir
+
'/emergency_debug'
:
request
.
push
(
'<html>Moving All Servers...</html>'
)
request
.
done
()
for
channel
in
asyncore
.
socket_map
.
keys
():
if
channel
.
accepting
:
if
type
(
channel
.
addr
)
is
type
(()):
ip
,
port
=
channel
.
addr
channel
.
socket
.
close
()
channel
.
del_channel
()
channel
.
addr
=
(
ip
,
port
+
10000
)
fam
,
typ
=
channel
.
family_and_type
channel
.
create_socket
(
fam
,
typ
)
channel
.
set_reuse_addr
()
channel
.
bind
(
channel
.
addr
)
channel
.
listen
(
5
)
elif
self
.
hyper_regex
.
match
(
path
)
!=
-
1
:
oid
=
string
.
atoi
(
self
.
hyper_regex
.
group
(
1
))
for
object
in
self
.
hyper_objects
:
if
id
(
object
)
==
oid
:
if
hasattr
(
object
,
'hyper_respond'
):
object
.
hyper_respond
(
self
,
path
,
request
)
else
:
request
.
error
(
404
)
return
def
status
(
self
):
return
producers
.
simple_producer
(
'<li>Status Extension <b>Hits</b> : %s'
%
self
.
hit_counter
)
def
register_hyper_object
(
self
,
object
):
if
not
object
in
self
.
hyper_objects
:
self
.
hyper_objects
.
append
(
object
)
import
logger
class
logger_for_status
(
logger
.
tail_logger
):
def
status
(
self
):
return
'Last %d log entries for: %s'
%
(
len
(
self
.
messages
),
html_repr
(
self
)
)
def
hyper_respond
(
self
,
sh
,
path
,
request
):
request
[
'Content-Type'
]
=
'text/plain'
messages
=
self
.
messages
[:]
messages
.
reverse
()
request
.
push
(
lines_producer
(
messages
))
request
.
done
()
class
lines_producer
:
def
__init__
(
self
,
lines
):
self
.
lines
=
lines
def
ready
(
self
):
return
len
(
self
.
lines
)
def
more
(
self
):
if
self
.
lines
:
chunk
=
self
.
lines
[:
50
]
self
.
lines
=
self
.
lines
[
50
:]
return
string
.
join
(
chunk
,
'
\
r
\
n
'
)
+
'
\
r
\
n
'
else
:
return
''
class
channel_list_producer
(
lines_producer
):
def
__init__
(
self
,
statusdir
):
channel_reprs
=
map
(
lambda
x
:
'<'
+
repr
(
x
)[
1
:
-
1
]
+
'>'
,
asyncore
.
socket_map
.
values
()
)
channel_reprs
.
sort
()
lines_producer
.
__init__
(
self
,
[
'<h1>Active Channel List</h1>'
,
'<pre>'
]
+
channel_reprs
+
[
'</pre>'
,
'<p><a href="%s">Status Report</a>'
%
statusdir
]
)
# this really needs a full-blown quoter...
def
sanitize
(
s
):
if
'<'
in
s
:
s
=
string
.
join
(
string
.
split
(
s
,
'<'
),
'<'
)
if
'>'
in
s
:
s
=
string
.
join
(
string
.
split
(
s
,
'>'
),
'>'
)
return
s
def
html_repr
(
object
):
so
=
sanitize
(
repr
(
object
))
if
hasattr
(
object
,
'hyper_respond'
):
return
'<a href="/status/object/%d/">%s</a>'
%
(
id
(
object
),
so
)
else
:
return
so
def
html_reprs
(
list
,
front
=
''
,
back
=
''
):
reprs
=
map
(
lambda
x
,
f
=
front
,
b
=
back
:
'%s%s%s'
%
(
f
,
x
,
b
),
map
(
lambda
x
:
sanitize
(
html_repr
(
x
)),
list
)
)
reprs
.
sort
()
return
reprs
# for example, tera, giga, mega, kilo
# p_d (n, (1024, 1024, 1024, 1024))
# smallest divider goes first - for example
# minutes, hours, days
# p_d (n, (60, 60, 24))
def
progressive_divide
(
n
,
parts
):
result
=
[]
for
part
in
parts
:
n
,
rem
=
divmod
(
n
,
part
)
result
.
append
(
rem
)
result
.
append
(
n
)
return
result
# b,k,m,g,t
def
split_by_units
(
n
,
units
,
dividers
,
format_string
):
divs
=
progressive_divide
(
n
,
dividers
)
result
=
[]
for
i
in
range
(
len
(
units
)):
if
divs
[
i
]:
result
.
append
(
format_string
%
(
divs
[
i
],
units
[
i
]))
result
.
reverse
()
if
not
result
:
return
[
format_string
%
(
0
,
units
[
0
])]
else
:
return
result
def
english_bytes
(
n
):
return
split_by_units
(
n
,
(
''
,
'K'
,
'M'
,
'G'
,
'T'
),
(
1024
,
1024
,
1024
,
1024
,
1024
),
'%d %sB'
)
def
english_time
(
n
):
return
split_by_units
(
n
,
(
'secs'
,
'mins'
,
'hours'
,
'days'
,
'weeks'
,
'years'
),
(
60
,
60
,
24
,
7
,
52
),
'%d %s'
)
ZServer/medusa/test_logger.py
deleted
100644 → 0
View file @
6786b136
import
sys
import
socket
import
select
print
"Simulating Unix-domain logging using file: %s"
%
sys
.
argv
[
1
]
log_socket
=
socket
.
socket
(
socket
.
AF_UNIX
,
socket
.
SOCK_DGRAM
)
log_socket
.
bind
(
sys
.
argv
[
1
]
)
while
1
:
n
=
select
.
select
(
[
log_socket
],
[],
[]
)
print
"."
,
if
n
>
0
:
print
log_socket
.
recv
(
1024
)
lib/python/ZServer/medusa/__init__.py
deleted
100644 → 0
View file @
6786b136
# Make medusa into a package
__version__
=
'$Revision: 1.5 $'
[
11
:
-
2
]
lib/python/ZServer/medusa/asynchat.py
deleted
100644 → 0
View file @
6786b136
# -*- Mode: Python; tab-width: 4 -*-
# Id: asynchat.py,v 2.26 2000/09/07 22:29:26 rushing Exp
# Author: Sam Rushing <rushing@nightmare.com>
# ======================================================================
# Copyright 1996 by Sam Rushing
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of Sam
# Rushing not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# ======================================================================
"""A class supporting chat-style (command/response) protocols.
This class adds support for 'chat' style protocols - where one side
sends a 'command', and the other sends a response (examples would be
the common internet protocols - smtp, nntp, ftp, etc..).
The handle_read() method looks at the input stream for the current
'terminator' (usually '
\
r
\
n
' for single-line responses, '
\
r
\
n
.
\
r
\
n
'
for multi-line output), calling self.found_terminator() on its
receipt.
for example:
Say you build an async nntp client using this class. At the start
of the connection, you'll have self.terminator set to '
\
r
\
n
', in
order to process the single-line greeting. Just before issuing a
'LIST' command you'll set it to '
\
r
\
n
.
\
r
\
n
'. The output of the LIST
command will be accumulated (using your own 'collect_incoming_data'
method) up to the terminator, and then control will be returned to
you - by calling your self.found_terminator() method.
"""
import
socket
import
asyncore
import
string
class
async_chat
(
asyncore
.
dispatcher
):
"""This is an abstract class. You must derive from this class, and add
the two methods collect_incoming_data() and found_terminator()"""
# these are overridable defaults
ac_in_buffer_size
=
4096
ac_out_buffer_size
=
4096
def
__init__
(
self
,
conn
=
None
):
self
.
ac_in_buffer
=
''
self
.
ac_out_buffer
=
''
self
.
producer_fifo
=
fifo
()
asyncore
.
dispatcher
.
__init__
(
self
,
conn
)
def
set_terminator
(
self
,
term
):
"Set the input delimiter. Can be a fixed string of any length, an integer, or None"
self
.
terminator
=
term
def
get_terminator
(
self
):
return
self
.
terminator
# grab some more data from the socket,
# throw it to the collector method,
# check for the terminator,
# if found, transition to the next state.
def
handle_read
(
self
):
try
:
data
=
self
.
recv
(
self
.
ac_in_buffer_size
)
except
socket
.
error
,
why
:
self
.
handle_error
()
return
self
.
ac_in_buffer
=
self
.
ac_in_buffer
+
data
# Continue to search for self.terminator in self.ac_in_buffer,
# while calling self.collect_incoming_data. The while loop
# is necessary because we might read several data+terminator
# combos with a single recv(1024).
while
self
.
ac_in_buffer
:
lb
=
len
(
self
.
ac_in_buffer
)
terminator
=
self
.
get_terminator
()
if
terminator
is
None
:
# no terminator, collect it all
self
.
collect_incoming_data
(
self
.
ac_in_buffer
)
self
.
ac_in_buffer
=
''
elif
type
(
terminator
)
==
type
(
0
):
# numeric terminator
n
=
terminator
if
lb
<
n
:
self
.
collect_incoming_data
(
self
.
ac_in_buffer
)
self
.
ac_in_buffer
=
''
self
.
terminator
=
self
.
terminator
-
lb
else
:
self
.
collect_incoming_data
(
self
.
ac_in_buffer
[:
n
])
self
.
ac_in_buffer
=
self
.
ac_in_buffer
[
n
:]
self
.
terminator
=
0
self
.
found_terminator
()
else
:
# 3 cases:
# 1) end of buffer matches terminator exactly:
# collect data, transition
# 2) end of buffer matches some prefix:
# collect data to the prefix
# 3) end of buffer does not match any prefix:
# collect data
terminator_len
=
len
(
terminator
)
index
=
string
.
find
(
self
.
ac_in_buffer
,
terminator
)
if
index
!=
-
1
:
# we found the terminator
if
index
>
0
:
# don't bother reporting the empty string (source of subtle bugs)
self
.
collect_incoming_data
(
self
.
ac_in_buffer
[:
index
])
self
.
ac_in_buffer
=
self
.
ac_in_buffer
[
index
+
terminator_len
:]
# This does the Right Thing if the terminator is changed here.
self
.
found_terminator
()
else
:
# check for a prefix of the terminator
index
=
find_prefix_at_end
(
self
.
ac_in_buffer
,
terminator
)
if
index
:
if
index
!=
lb
:
# we found a prefix, collect up to the prefix
self
.
collect_incoming_data
(
self
.
ac_in_buffer
[:
-
index
])
self
.
ac_in_buffer
=
self
.
ac_in_buffer
[
-
index
:]
break
else
:
# no prefix, collect it all
self
.
collect_incoming_data
(
self
.
ac_in_buffer
)
self
.
ac_in_buffer
=
''
def
handle_write
(
self
):
self
.
initiate_send
()
def
handle_close
(
self
):
self
.
close
()
def
push
(
self
,
data
):
self
.
producer_fifo
.
push
(
simple_producer
(
data
))
self
.
initiate_send
()
def
push_with_producer
(
self
,
producer
):
self
.
producer_fifo
.
push
(
producer
)
self
.
initiate_send
()
def
readable
(
self
):
"predicate for inclusion in the readable for select()"
return
(
len
(
self
.
ac_in_buffer
)
<=
self
.
ac_in_buffer_size
)
def
writable
(
self
):
"predicate for inclusion in the writable for select()"
# return len(self.ac_out_buffer) or len(self.producer_fifo) or (not self.connected)
# this is about twice as fast, though not as clear.
return
not
(
(
self
.
ac_out_buffer
==
''
)
and
self
.
producer_fifo
.
is_empty
()
and
self
.
connected
)
def
close_when_done
(
self
):
"automatically close this channel once the outgoing queue is empty"
self
.
producer_fifo
.
push
(
None
)
# refill the outgoing buffer by calling the more() method
# of the first producer in the queue
def
refill_buffer
(
self
):
_string_type
=
type
(
''
)
while
1
:
if
len
(
self
.
producer_fifo
):
p
=
self
.
producer_fifo
.
first
()
# a 'None' in the producer fifo is a sentinel,
# telling us to close the channel.
if
p
is
None
:
if
not
self
.
ac_out_buffer
:
self
.
producer_fifo
.
pop
()
self
.
close
()
return
elif
type
(
p
)
is
_string_type
:
self
.
producer_fifo
.
pop
()
self
.
ac_out_buffer
=
self
.
ac_out_buffer
+
p
return
data
=
p
.
more
()
if
data
:
self
.
ac_out_buffer
=
self
.
ac_out_buffer
+
data
return
else
:
self
.
producer_fifo
.
pop
()
else
:
return
def
initiate_send
(
self
):
obs
=
self
.
ac_out_buffer_size
# try to refill the buffer
if
(
len
(
self
.
ac_out_buffer
)
<
obs
):
self
.
refill_buffer
()
if
self
.
ac_out_buffer
and
self
.
connected
:
# try to send the buffer
try
:
num_sent
=
self
.
send
(
self
.
ac_out_buffer
[:
obs
])
if
num_sent
:
self
.
ac_out_buffer
=
self
.
ac_out_buffer
[
num_sent
:]
except
socket
.
error
,
why
:
self
.
handle_error
()
return
def
discard_buffers
(
self
):
# Emergencies only!
self
.
ac_in_buffer
=
''
self
.
ac_out_buffer
=
''
while
self
.
producer_fifo
:
self
.
producer_fifo
.
pop
()
class
simple_producer
:
def
__init__
(
self
,
data
,
buffer_size
=
512
):
self
.
data
=
data
self
.
buffer_size
=
buffer_size
def
more
(
self
):
if
len
(
self
.
data
)
>
self
.
buffer_size
:
result
=
self
.
data
[:
self
.
buffer_size
]
self
.
data
=
self
.
data
[
self
.
buffer_size
:]
return
result
else
:
result
=
self
.
data
self
.
data
=
''
return
result
class
fifo
:
def
__init__
(
self
,
list
=
None
):
if
not
list
:
self
.
list
=
[]
else
:
self
.
list
=
list
def
__len__
(
self
):
return
len
(
self
.
list
)
def
is_empty
(
self
):
return
self
.
list
==
[]
def
first
(
self
):
return
self
.
list
[
0
]
def
push
(
self
,
data
):
self
.
list
.
append
(
data
)
def
pop
(
self
):
if
self
.
list
:
result
=
self
.
list
[
0
]
del
self
.
list
[
0
]
return
(
1
,
result
)
else
:
return
(
0
,
None
)
# Given 'haystack', see if any prefix of 'needle' is at its end. This
# assumes an exact match has already been checked. Return the number of
# characters matched.
# for example:
# f_p_a_e ("qwerty\r", "\r\n") => 1
# f_p_a_e ("qwerty\r\n", "\r\n") => 2
# f_p_a_e ("qwertydkjf", "\r\n") => 0
# this could maybe be made faster with a computed regex?
# [answer: no; circa Python-2.0, Jan 2001]
# python: 18307/s
# re: 12820/s
# regex: 14035/s
def
find_prefix_at_end
(
haystack
,
needle
):
nl
=
len
(
needle
)
result
=
0
for
i
in
range
(
1
,
nl
):
if
haystack
[
-
(
nl
-
i
):]
==
needle
[:(
nl
-
i
)]:
result
=
nl
-
i
break
return
result
lib/python/ZServer/medusa/counter.py
deleted
100644 → 0
View file @
6786b136
# -*- Mode: Python; tab-width: 4 -*-
# It is tempting to add an __int__ method to this class, but it's not
# a good idea. This class tries to gracefully handle integer
# overflow, and to hide this detail from both the programmer and the
# user. Note that the __str__ method can be relied on for printing out
# the value of a counter:
#
# >>> print 'Total Client: %s' % self.total_clients
#
# If you need to do arithmetic with the value, then use the 'as_long'
# method, the use of long arithmetic is a reminder that the counter
# will overflow.
class
counter
:
"general-purpose counter"
def
__init__
(
self
,
initial_value
=
0
):
self
.
value
=
initial_value
def
increment
(
self
,
delta
=
1
):
result
=
self
.
value
try
:
self
.
value
=
self
.
value
+
delta
except
OverflowError
:
self
.
value
=
long
(
self
.
value
)
+
delta
return
result
def
decrement
(
self
,
delta
=
1
):
result
=
self
.
value
try
:
self
.
value
=
self
.
value
-
delta
except
OverflowError
:
self
.
value
=
long
(
self
.
value
)
-
delta
return
result
def
as_long
(
self
):
return
long
(
self
.
value
)
def
__nonzero__
(
self
):
return
self
.
value
!=
0
def
__repr__
(
self
):
return
'<counter value=%s at %x>'
%
(
self
.
value
,
id
(
self
))
def
__str__
(
self
):
return
str
(
long
(
self
.
value
))[:
-
1
]
lib/python/ZServer/medusa/default_handler.py
deleted
100644 → 0
View file @
6786b136
# -*- Mode: Python; tab-width: 4 -*-
#
# Author: Sam Rushing <rushing@nightmare.com>
# Copyright 1997 by Sam Rushing
# All Rights Reserved.
#
RCS_ID
=
'$Id: default_handler.py,v 1.4 2000/06/02 14:22:48 brian Exp $'
# standard python modules
import
os
import
regex
import
posixpath
import
stat
import
string
import
time
# medusa modules
import
http_date
import
http_server
import
mime_type_table
import
status_handler
import
producers
# from <lib/urllib.py>
_quoteprog
=
regex
.
compile
(
'%[0-9a-fA-F][0-9a-fA-F]'
)
def
unquote
(
s
):
i
=
0
n
=
len
(
s
)
res
=
[]
while
0
<=
i
<
n
:
j
=
_quoteprog
.
search
(
s
,
i
)
if
j
<
0
:
res
.
append
(
s
[
i
:])
break
res
.
append
(
s
[
i
:
j
]
+
chr
(
string
.
atoi
(
s
[
j
+
1
:
j
+
3
],
16
)))
i
=
j
+
3
return
string
.
join
(
res
,
''
)
# split a uri
# <path>;<params>?<query>#<fragment>
path_regex
=
regex
.
compile
(
# path params query fragment
'
\
\
([^;?#]*
\
\
)
\
\
(;[^?#]*
\
\
)?
\
\
(
\
\
?[^#]*
\
)?
\
(#.*
\
)?
'
)
def split_path (path):
if path_regex.match (path) != len(path):
raise ValueError, "bad path"
else:
return map (lambda i,r=path_regex: r.group(i), range(1,5))
# This is the '
default
' handler. it implements the base set of
# features expected of a simple file-delivering HTTP server. file
# services are provided through a '
filesystem
' object, the very same
# one used by the FTP server.
#
# You can replace or modify this handler if you want a non-standard
# HTTP server. You can also derive your own handler classes from
# it.
#
# support for handling POST requests is available in the derived
# class <default_with_post_handler>, defined below.
#
from counter import counter
class default_handler:
valid_commands = ['
get
', '
head
']
IDENT = '
Default
HTTP
Request
Handler
'
# Pathnames that are tried when a URI resolves to a directory name
directory_defaults = [
'
index
.
html
',
'
default
.
html
'
]
default_file_producer = producers.file_producer
def __init__ (self, filesystem):
self.filesystem = filesystem
# count total hits
self.hit_counter = counter()
# count file deliveries
self.file_counter = counter()
# count cache hits
self.cache_counter = counter()
hit_counter = 0
def __repr__ (self):
return '
<%
s
(
%
s
hits
)
at
%
x
>
' % (
self.IDENT,
self.hit_counter,
id (self)
)
# always match, since this is a default
def match (self, request):
return 1
# handle a file request, with caching.
def handle_request (self, request):
if request.command not in self.valid_commands:
request.error (400) # bad request
return
self.hit_counter.increment()
[path, params, query, fragment] = split_path (request.uri)
# unquote path if necessary (thanks to Skip Montaro for pointing
# out that we must unquote in piecemeal fashion).
if '
%
' in path:
path = unquote (path)
# strip off all leading slashes
while path and path[0] == '
/
':
path = path[1:]
if self.filesystem.isdir (path):
if path and path[-1] != '
/
':
request['
Location
'] = '
http
:
//%
s
/%
s
/
' % (
request.channel.server.server_name,
path
)
request.error (301)
return
# we could also generate a directory listing here,
# may want to move this into another method for that
# purpose
found = 0
if path and path[-1] != '
/
':
path = path + '
/
'
for default in self.directory_defaults:
p = path + default
if self.filesystem.isfile (p):
path = p
found = 1
break
if not found:
request.error (404) # Not Found
return
elif not self.filesystem.isfile (path):
request.error (404) # Not Found
return
file_length = self.filesystem.stat (path)[stat.ST_SIZE]
ims = get_header (IF_MODIFIED_SINCE, request.header)
length_match = 1
if ims:
length = IF_MODIFIED_SINCE.group(4)
if length:
try:
length = string.atoi (length)
if length != file_length:
length_match = 0
except:
pass
ims_date = 0
if ims:
ims_date = http_date.parse_http_date (ims)
try:
mtime = self.filesystem.stat (path)[stat.ST_MTIME]
except:
request.error (404)
return
if length_match and ims_date:
if mtime <= ims_date:
request.reply_code = 304
request.done()
self.cache_counter.increment()
return
try:
file = self.filesystem.open (path, 'rb')
except IOError:
request.error (404)
return
request['
Last
-
Modified
'] = http_date.build_http_date (mtime)
request['
Content
-
Length
'] = file_length
self.set_content_type (path, request)
if request.command == '
get
':
request.push (self.default_file_producer (file))
self.file_counter.increment()
request.done()
def set_content_type (self, path, request):
ext = string.lower (get_extension (path))
if mime_type_table.content_type_map.has_key (ext):
request['
Content
-
Type
'] = mime_type_table.content_type_map[ext]
else:
# TODO: test a chunk off the front of the file for 8-bit
# characters, and use application/octet-stream instead.
request['
Content
-
Type
'] = '
text
/
plain
'
def status (self):
return producers.simple_producer (
'
<
li
>%
s
' % status_handler.html_repr (self)
+ '
<
ul
>
'
+ '
<
li
><
b
>
Total
Hits
:
</
b
>
%
s
' % self.hit_counter
+ '
<
li
><
b
>
Files
Delivered
:
</
b
>
%
s
' % self.file_counter
+ '
<
li
><
b
>
Cache
Hits
:
</
b
>
%
s
' % self.cache_counter
+ '
</
ul
>
'
)
ACCEPT = regex.compile ('
Accept
:
\
(.
*
\
)
', regex.casefold)
# HTTP/1.0 doesn'
t
say
anything
about
the
"; length=nnnn"
addition
# to this header. I suppose it's purpose is to avoid the overhead
# of parsing dates...
IF_MODIFIED_SINCE
=
regex
.
compile
(
'If-Modified-Since:
\
([^;]+
\
)
\
(
\
(; length=
\
([
0
-9]+
\
)$
\
)
\
|$
\
)'
,
regex
.
casefold
)
USER_AGENT
=
regex
.
compile
(
'User-Agent:
\
(.*
\
)'
,
regex
.
casefold
)
boundary_chars
=
"A-Za-z0-9'()+_,./:=?-"
CONTENT_TYPE
=
regex
.
compile
(
'Content-Type:
\
([^;]+
\
)
\
(
\
(; boundary=
\
([%s]+
\
)$
\
)
\
|$
\
)
'
% boundary_chars,
regex.casefold
)
get_header = http_server.get_header
def get_extension (path):
dirsep = string.rfind (path, '
/
')
dotsep = string.rfind (path, '
.
')
if dotsep > dirsep:
return path[dotsep+1:]
else:
return ''
lib/python/ZServer/medusa/filesys.py
deleted
100644 → 0
View file @
6786b136
# -*- Mode: Python; tab-width: 4 -*-
# $Id: filesys.py,v 1.5 2000/06/02 14:22:48 brian Exp $
# Author: Sam Rushing <rushing@nightmare.com>
#
# Generic filesystem interface.
#
# We want to provide a complete wrapper around any and all
# filesystem operations.
# this class is really just for documentation,
# identifying the API for a filesystem object.
# opening files for reading, and listing directories, should
# return a producer.
class
abstract_filesystem
:
def
__init__
(
self
):
pass
def
current_directory
(
self
):
"Return a string representing the current directory."
pass
def
listdir
(
self
,
path
,
long
=
0
):
"""Return a listing of the directory at 'path' The empty string
indicates the current directory. If 'long' is set, instead
return a list of (name, stat_info) tuples
"""
pass
def
open
(
self
,
path
,
mode
):
"Return an open file object"
pass
def
stat
(
self
,
path
):
"Return the equivalent of os.stat() on the given path."
pass
def
isdir
(
self
,
path
):
"Does the path represent a directory?"
pass
def
isfile
(
self
,
path
):
"Does the path represent a plain file?"
pass
def
cwd
(
self
,
path
):
"Change the working directory."
pass
def
cdup
(
self
):
"Change to the parent of the current directory."
pass
def
longify
(
self
,
path
):
"""Return a 'long' representation of the filename
[for the output of the LIST command]"""
pass
# standard wrapper around a unix-like filesystem, with a 'false root'
# capability.
# security considerations: can symbolic links be used to 'escape' the
# root? should we allow it? if not, then we could scan the
# filesystem on startup, but that would not help if they were added
# later. We will probably need to check for symlinks in the cwd method.
# what to do if wd is an invalid directory?
import
os
import
stat
import
string
def
safe_stat
(
path
):
try
:
return
(
path
,
os
.
stat
(
path
))
except
:
return
None
import
regex
import
regsub
import
glob
class
os_filesystem
:
path_module
=
os
.
path
# set this to zero if you want to disable pathname globbing.
# [we currently don't glob, anyway]
do_globbing
=
1
def
__init__
(
self
,
root
,
wd
=
'/'
):
self
.
root
=
root
self
.
wd
=
wd
def
current_directory
(
self
):
return
self
.
wd
def
isfile
(
self
,
path
):
p
=
self
.
normalize
(
self
.
path_module
.
join
(
self
.
wd
,
path
))
return
self
.
path_module
.
isfile
(
self
.
translate
(
p
))
def
isdir
(
self
,
path
):
p
=
self
.
normalize
(
self
.
path_module
.
join
(
self
.
wd
,
path
))
return
self
.
path_module
.
isdir
(
self
.
translate
(
p
))
def
cwd
(
self
,
path
):
p
=
self
.
normalize
(
self
.
path_module
.
join
(
self
.
wd
,
path
))
translated_path
=
self
.
translate
(
p
)
if
not
self
.
path_module
.
isdir
(
translated_path
):
return
0
else
:
old_dir
=
os
.
getcwd
()
# temporarily change to that directory, in order
# to see if we have permission to do so.
try
:
can
=
0
try
:
os
.
chdir
(
translated_path
)
can
=
1
self
.
wd
=
p
except
:
pass
finally
:
if
can
:
os
.
chdir
(
old_dir
)
return
can
def
cdup
(
self
):
return
self
.
cwd
(
'..'
)
def
listdir
(
self
,
path
,
long
=
0
):
p
=
self
.
translate
(
path
)
# I think we should glob, but limit it to the current
# directory only.
ld
=
os
.
listdir
(
p
)
if
not
long
:
return
list_producer
(
ld
,
0
,
None
)
else
:
old_dir
=
os
.
getcwd
()
try
:
os
.
chdir
(
p
)
# if os.stat fails we ignore that file.
result
=
filter
(
None
,
map
(
safe_stat
,
ld
))
finally
:
os
.
chdir
(
old_dir
)
return
list_producer
(
result
,
1
,
self
.
longify
)
# TODO: implement a cache w/timeout for stat()
def
stat
(
self
,
path
):
p
=
self
.
translate
(
path
)
return
os
.
stat
(
p
)
def
open
(
self
,
path
,
mode
):
p
=
self
.
translate
(
path
)
return
open
(
p
,
mode
)
def
unlink
(
self
,
path
):
p
=
self
.
translate
(
path
)
return
os
.
unlink
(
p
)
def
mkdir
(
self
,
path
):
p
=
self
.
translate
(
path
)
return
os
.
mkdir
(
p
)
def
rmdir
(
self
,
path
):
p
=
self
.
translate
(
path
)
return
os
.
rmdir
(
p
)
# utility methods
def
normalize
(
self
,
path
):
# watch for the ever-sneaky '/+' path element
path
=
regsub
.
gsub
(
'/+'
,
'/'
,
path
)
p
=
self
.
path_module
.
normpath
(
path
)
# remove 'dangling' cdup's.
if
len
(
p
)
>
2
and
p
[:
3
]
==
'/..'
:
p
=
'/'
return
p
def
translate
(
self
,
path
):
# we need to join together three separate
# path components, and do it safely.
# <real_root>/<current_directory>/<path>
# use the operating system's path separator.
path
=
string
.
join
(
string
.
split
(
path
,
'/'
),
os
.
sep
)
p
=
self
.
normalize
(
self
.
path_module
.
join
(
self
.
wd
,
path
))
p
=
self
.
normalize
(
self
.
path_module
.
join
(
self
.
root
,
p
[
1
:]))
return
p
def
longify
(
self
,
(
path
,
stat_info
)):
return
unix_longify
(
path
,
stat_info
)
def
__repr__
(
self
):
return
'<unix-style fs root:%s wd:%s>'
%
(
self
.
root
,
self
.
wd
)
if
os
.
name
==
'posix'
:
class
unix_filesystem
(
os_filesystem
):
pass
class
schizophrenic_unix_filesystem
(
os_filesystem
):
PROCESS_UID
=
os
.
getuid
()
PROCESS_EUID
=
os
.
geteuid
()
PROCESS_GID
=
os
.
getgid
()
PROCESS_EGID
=
os
.
getegid
()
def
__init__
(
self
,
root
,
wd
=
'/'
,
persona
=
(
None
,
None
)):
os_filesystem
.
__init__
(
self
,
root
,
wd
)
self
.
persona
=
persona
def
become_persona
(
self
):
if
self
.
persona
is
not
(
None
,
None
):
uid
,
gid
=
self
.
persona
# the order of these is important!
os
.
setegid
(
gid
)
os
.
seteuid
(
uid
)
def
become_nobody
(
self
):
if
self
.
persona
is
not
(
None
,
None
):
os
.
seteuid
(
self
.
PROCESS_UID
)
os
.
setegid
(
self
.
PROCESS_GID
)
# cwd, cdup, open, listdir
def
cwd
(
self
,
path
):
try
:
self
.
become_persona
()
return
os_filesystem
.
cwd
(
self
,
path
)
finally
:
self
.
become_nobody
()
def
cdup
(
self
,
path
):
try
:
self
.
become_persona
()
return
os_filesystem
.
cdup
(
self
)
finally
:
self
.
become_nobody
()
def
open
(
self
,
filename
,
mode
):
try
:
self
.
become_persona
()
return
os_filesystem
.
open
(
self
,
filename
,
mode
)
finally
:
self
.
become_nobody
()
def
listdir
(
self
,
path
,
long
=
0
):
try
:
self
.
become_persona
()
return
os_filesystem
.
listdir
(
self
,
path
,
long
)
finally
:
self
.
become_nobody
()
# This hasn't been very reliable across different platforms.
# maybe think about a separate 'directory server'.
#
# import posixpath
# import fcntl
# import FCNTL
# import select
# import asyncore
#
# # pipes /bin/ls for directory listings.
# class unix_filesystem (os_filesystem):
# pass
# path_module = posixpath
#
# def listdir (self, path, long=0):
# p = self.translate (path)
# if not long:
# return list_producer (os.listdir (p), 0, None)
# else:
# command = '/bin/ls -l %s' % p
# print 'opening pipe to "%s"' % command
# fd = os.popen (command, 'rt')
# return pipe_channel (fd)
#
# # this is both a dispatcher, _and_ a producer
# class pipe_channel (asyncore.file_dispatcher):
# buffer_size = 4096
#
# def __init__ (self, fd):
# asyncore.file_dispatcher.__init__ (self, fd)
# self.fd = fd
# self.done = 0
# self.data = ''
#
# def handle_read (self):
# if len (self.data) < self.buffer_size:
# self.data = self.data + self.fd.read (self.buffer_size)
# #print '%s.handle_read() => len(self.data) == %d' % (self, len(self.data))
#
# def handle_expt (self):
# #print '%s.handle_expt()' % self
# self.done = 1
#
# def ready (self):
# #print '%s.ready() => %d' % (self, len(self.data))
# return ((len (self.data) > 0) or self.done)
#
# def more (self):
# if self.data:
# r = self.data
# self.data = ''
# elif self.done:
# self.close()
# self.downstream.finished()
# r = ''
# else:
# r = None
# #print '%s.more() => %s' % (self, (r and len(r)))
# return r
# For the 'real' root, we could obtain a list of drives, and then
# use that. Doesn't win32 provide such a 'real' filesystem?
# [yes, I think something like this "\\.\c\windows"]
class
msdos_filesystem
(
os_filesystem
):
def
longify
(
self
,
(
path
,
stat_info
)):
return
msdos_longify
(
path
,
stat_info
)
# A merged filesystem will let you plug other filesystems together.
# We really need the equivalent of a 'mount' capability - this seems
# to be the most general idea. So you'd use a 'mount' method to place
# another filesystem somewhere in the hierarchy.
# Note: this is most likely how I will handle ~user directories
# with the http server.
class
merged_filesystem
:
def
__init__
(
self
,
*
fsys
):
pass
# this matches the output of NT's ftp server (when in
# MSDOS mode) exactly.
def
msdos_longify
(
file
,
stat_info
):
if
stat
.
S_ISDIR
(
stat_info
[
stat
.
ST_MODE
]):
dir
=
'<DIR>'
else
:
dir
=
' '
date
=
msdos_date
(
stat_info
[
stat
.
ST_MTIME
])
return
'%s %s %8d %s'
%
(
date
,
dir
,
stat_info
[
stat
.
ST_SIZE
],
file
)
def
msdos_date
(
t
):
try
:
info
=
time
.
gmtime
(
t
)
except
:
info
=
time
.
gmtime
(
0
)
# year, month, day, hour, minute, second, ...
if
info
[
3
]
>
11
:
merid
=
'PM'
info
[
3
]
=
info
[
3
]
-
12
else
:
merid
=
'AM'
return
'%02d-%02d-%02d %02d:%02d%s'
%
(
info
[
1
],
info
[
2
],
info
[
0
]
%
100
,
info
[
3
],
info
[
4
],
merid
)
months
=
[
'Jan'
,
'Feb'
,
'Mar'
,
'Apr'
,
'May'
,
'Jun'
,
'Jul'
,
'Aug'
,
'Sep'
,
'Oct'
,
'Nov'
,
'Dec'
]
mode_table
=
{
'0'
:
'---'
,
'1'
:
'--x'
,
'2'
:
'-w-'
,
'3'
:
'-wx'
,
'4'
:
'r--'
,
'5'
:
'r-x'
,
'6'
:
'rw-'
,
'7'
:
'rwx'
}
import
time
def
unix_longify
(
file
,
stat_info
):
# for now, only pay attention to the lower bits
mode
=
(
'%o'
%
stat_info
[
stat
.
ST_MODE
])[
-
3
:]
mode
=
string
.
join
(
map
(
lambda
x
:
mode_table
[
x
],
mode
),
''
)
if
stat
.
S_ISDIR
(
stat_info
[
stat
.
ST_MODE
]):
dirchar
=
'd'
else
:
dirchar
=
'-'
date
=
ls_date
(
long
(
time
.
time
()),
stat_info
[
stat
.
ST_MTIME
])
return
'%s%s %3d %-8s %-8s %8d %s %s'
%
(
dirchar
,
mode
,
stat_info
[
stat
.
ST_NLINK
],
stat_info
[
stat
.
ST_UID
],
stat_info
[
stat
.
ST_GID
],
stat_info
[
stat
.
ST_SIZE
],
date
,
file
)
# Emulate the unix 'ls' command's date field.
# it has two formats - if the date is more than 180
# days in the past, then it's like this:
# Oct 19 1995
# otherwise, it looks like this:
# Oct 19 17:33
def
ls_date
(
now
,
t
):
try
:
info
=
time
.
gmtime
(
t
)
except
:
info
=
time
.
gmtime
(
0
)
# 15,600,000 == 86,400 * 180
if
(
now
-
t
)
>
15600000
:
return
'%s %2d %d'
%
(
months
[
info
[
1
]
-
1
],
info
[
2
],
info
[
0
]
)
else
:
return
'%s %2d %02d:%02d'
%
(
months
[
info
[
1
]
-
1
],
info
[
2
],
info
[
3
],
info
[
4
]
)
# ===========================================================================
# Producers
# ===========================================================================
class
list_producer
:
def
__init__
(
self
,
file_list
,
long
,
longify
):
self
.
file_list
=
file_list
self
.
long
=
long
self
.
longify
=
longify
self
.
done
=
0
def
ready
(
self
):
if
len
(
self
.
file_list
):
return
1
else
:
if
not
self
.
done
:
self
.
done
=
1
return
0
return
(
len
(
self
.
file_list
)
>
0
)
# this should do a pushd/popd
def
more
(
self
):
if
not
self
.
file_list
:
return
''
else
:
# do a few at a time
bunch
=
self
.
file_list
[:
50
]
if
self
.
long
:
bunch
=
map
(
self
.
longify
,
bunch
)
self
.
file_list
=
self
.
file_list
[
50
:]
return
string
.
joinfields
(
bunch
,
'
\
r
\
n
'
)
+
'
\
r
\
n
'
lib/python/ZServer/medusa/ftp_server.py
deleted
100755 → 0
View file @
6786b136
# -*- Mode: Python; tab-width: 4 -*-
# Author: Sam Rushing <rushing@nightmare.com>
# Copyright 1996-2000 by Sam Rushing
# All Rights Reserved.
#
RCS_ID
=
'$Id: ftp_server.py,v 1.13 2000/07/05 14:22:13 brian Exp $'
# An extensible, configurable, asynchronous FTP server.
#
# All socket I/O is non-blocking, however file I/O is currently
# blocking. Eventually file I/O may be made non-blocking, too, if it
# seems necessary. Currently the only CPU-intensive operation is
# getting and formatting a directory listing. [this could be moved
# into another process/directory server, or another thread?]
#
# Only a subset of RFC 959 is implemented, but much of that RFC is
# vestigial anyway. I've attempted to include the most commonly-used
# commands, using the feature set of wu-ftpd as a guide.
import
asyncore
import
asynchat
import
os
import
regsub
import
socket
import
stat
import
string
import
sys
import
time
# TODO: implement a directory listing cache. On very-high-load
# servers this could save a lot of disk abuse, and possibly the
# work of computing emulated unix ls output.
# Potential security problem with the FTP protocol? I don't think
# there's any verification of the origin of a data connection. Not
# really a problem for the server (since it doesn't send the port
# command, except when in PASV mode) But I think a data connection
# could be spoofed by a program with access to a sniffer - it could
# watch for a PORT command to go over a command channel, and then
# connect to that port before the server does.
# Unix user id's:
# In order to support assuming the id of a particular user,
# it seems there are two options:
# 1) fork, and seteuid in the child
# 2) carefully control the effective uid around filesystem accessing
# methods, using try/finally. [this seems to work]
VERSION
=
string
.
split
(
RCS_ID
)[
2
]
from
counter
import
counter
import
producers
import
status_handler
import
logger
import
string
class
ftp_channel
(
asynchat
.
async_chat
):
# defaults for a reliable __repr__
addr
=
(
'unknown'
,
'0'
)
# unset this in a derived class in order
# to enable the commands in 'self.write_commands'
read_only
=
1
write_commands
=
[
'appe'
,
'dele'
,
'mkd'
,
'rmd'
,
'rnfr'
,
'rnto'
,
'stor'
,
'stou'
]
restart_position
=
0
# comply with (possibly troublesome) RFC959 requirements
# This is necessary to correctly run an active data connection
# through a firewall that triggers on the source port (expected
# to be 'L-1', or 20 in the normal case).
bind_local_minus_one
=
0
def
__init__
(
self
,
server
,
conn
,
addr
):
self
.
server
=
server
self
.
current_mode
=
'a'
self
.
addr
=
addr
asynchat
.
async_chat
.
__init__
(
self
,
conn
)
self
.
set_terminator
(
'
\
r
\
n
'
)
# client data port. Defaults to 'the same as the control connection'.
self
.
client_addr
=
(
addr
[
0
],
21
)
self
.
client_dc
=
None
self
.
in_buffer
=
''
self
.
closing
=
0
self
.
passive_acceptor
=
None
self
.
passive_connection
=
None
self
.
filesystem
=
None
self
.
authorized
=
0
# send the greeting
self
.
respond
(
'220 %s FTP server (Medusa Async V%s [experimental]) ready.'
%
(
self
.
server
.
hostname
,
VERSION
)
)
# def __del__ (self):
# print 'ftp_channel.__del__()'
# --------------------------------------------------
# async-library methods
# --------------------------------------------------
def
handle_expt
(
self
):
# this is handled below. not sure what I could
# do here to make that code less kludgish.
pass
def
collect_incoming_data
(
self
,
data
):
self
.
in_buffer
=
self
.
in_buffer
+
data
if
len
(
self
.
in_buffer
)
>
4096
:
# silently truncate really long lines
# (possible denial-of-service attack)
self
.
in_buffer
=
''
def
found_terminator
(
self
):
line
=
self
.
in_buffer
if
not
len
(
line
):
return
sp
=
string
.
find
(
line
,
' '
)
if
sp
!=
-
1
:
line
=
[
line
[:
sp
],
line
[
sp
+
1
:]]
else
:
line
=
[
line
]
command
=
string
.
lower
(
line
[
0
])
# watch especially for 'urgent' abort commands.
if
string
.
find
(
command
,
'abor'
)
!=
-
1
:
# strip off telnet sync chars and the like...
while
command
and
command
[
0
]
not
in
string
.
letters
:
command
=
command
[
1
:]
fun_name
=
'cmd_%s'
%
command
if
command
!=
'pass'
:
self
.
log
(
'<== %s'
%
repr
(
self
.
in_buffer
)[
1
:
-
1
])
else
:
self
.
log
(
'<== %s'
%
line
[
0
]
+
' <password>'
)
self
.
in_buffer
=
''
if
not
hasattr
(
self
,
fun_name
):
self
.
command_not_understood
(
line
[
0
])
return
fun
=
getattr
(
self
,
fun_name
)
if
(
not
self
.
authorized
)
and
(
command
not
in
(
'user'
,
'pass'
,
'help'
,
'quit'
)):
self
.
respond
(
'530 Please log in with USER and PASS'
)
elif
(
not
self
.
check_command_authorization
(
command
)):
self
.
command_not_authorized
(
command
)
else
:
try
:
result
=
apply
(
fun
,
(
line
,))
except
:
self
.
server
.
total_exceptions
.
increment
()
(
file
,
fun
,
line
),
t
,
v
,
tbinfo
=
asyncore
.
compact_traceback
()
if
self
.
client_dc
:
try
:
self
.
client_dc
.
close
()
except
:
pass
self
.
respond
(
'451 Server Error: %s, %s: file: %s line: %s'
%
(
t
,
v
,
file
,
line
,
)
)
closed
=
0
def
close
(
self
):
if
not
self
.
closed
:
self
.
closed
=
1
if
self
.
passive_acceptor
:
self
.
passive_acceptor
.
close
()
if
self
.
client_dc
:
self
.
client_dc
.
close
()
self
.
server
.
closed_sessions
.
increment
()
asynchat
.
async_chat
.
close
(
self
)
# --------------------------------------------------
# filesystem interface functions.
# override these to provide access control or perform
# other functions.
# --------------------------------------------------
def
cwd
(
self
,
line
):
return
self
.
filesystem
.
cwd
(
line
[
1
])
def
cdup
(
self
,
line
):
return
self
.
filesystem
.
cdup
()
def
open
(
self
,
path
,
mode
):
return
self
.
filesystem
.
open
(
path
,
mode
)
# returns a producer
def
listdir
(
self
,
path
,
long
=
0
):
return
self
.
filesystem
.
listdir
(
path
,
long
)
def
get_dir_list
(
self
,
line
,
long
=
0
):
# we need to scan the command line for arguments to '/bin/ls'...
args
=
line
[
1
:]
path_args
=
[]
for
arg
in
args
:
if
arg
[
0
]
!=
'-'
:
path_args
.
append
(
arg
)
else
:
# ignore arguments
pass
if
len
(
path_args
)
<
1
:
dir
=
'.'
else
:
dir
=
path_args
[
0
]
return
self
.
listdir
(
dir
,
long
)
# --------------------------------------------------
# authorization methods
# --------------------------------------------------
def
check_command_authorization
(
self
,
command
):
if
command
in
self
.
write_commands
and
self
.
read_only
:
return
0
else
:
return
1
# --------------------------------------------------
# utility methods
# --------------------------------------------------
def
log
(
self
,
message
):
self
.
server
.
logger
.
log
(
self
.
addr
[
0
],
'%d %s'
%
(
self
.
addr
[
1
],
message
)
)
def
respond
(
self
,
resp
):
self
.
log
(
'==> %s'
%
resp
)
self
.
push
(
resp
+
'
\
r
\
n
'
)
def
command_not_understood
(
self
,
command
):
self
.
respond
(
"500 '%s': command not understood."
%
command
)
def
command_not_authorized
(
self
,
command
):
self
.
respond
(
"530 You are not authorized to perform the '%s' command"
%
(
command
)
)
def
make_xmit_channel
(
self
):
# In PASV mode, the connection may or may _not_ have been made
# yet. [although in most cases it is... FTP Explorer being
# the only exception I've yet seen]. This gets somewhat confusing
# because things may happen in any order...
pa
=
self
.
passive_acceptor
if
pa
:
if
pa
.
ready
:
# a connection has already been made.
conn
,
addr
=
self
.
passive_acceptor
.
ready
cdc
=
xmit_channel
(
self
,
addr
)
cdc
.
set_socket
(
conn
)
cdc
.
connected
=
1
self
.
passive_acceptor
.
close
()
self
.
passive_acceptor
=
None
else
:
# we're still waiting for a connect to the PASV port.
cdc
=
xmit_channel
(
self
)
else
:
# not in PASV mode.
ip
,
port
=
self
.
client_addr
cdc
=
xmit_channel
(
self
,
self
.
client_addr
)
cdc
.
create_socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
)
if
self
.
bind_local_minus_one
:
cdc
.
bind
((
''
,
self
.
server
.
port
-
1
))
try
:
cdc
.
connect
((
ip
,
port
))
except
socket
.
error
,
why
:
self
.
respond
(
"425 Can't build data connection"
)
self
.
client_dc
=
cdc
# pretty much the same as xmit, but only right on the verge of
# being worth a merge.
def
make_recv_channel
(
self
,
fd
):
pa
=
self
.
passive_acceptor
if
pa
:
if
pa
.
ready
:
# a connection has already been made.
conn
,
addr
=
pa
.
ready
cdc
=
recv_channel
(
self
,
addr
,
fd
)
cdc
.
set_socket
(
conn
)
cdc
.
connected
=
1
self
.
passive_acceptor
.
close
()
self
.
passive_acceptor
=
None
else
:
# we're still waiting for a connect to the PASV port.
cdc
=
recv_channel
(
self
,
None
,
fd
)
else
:
# not in PASV mode.
ip
,
port
=
self
.
client_addr
cdc
=
recv_channel
(
self
,
self
.
client_addr
,
fd
)
cdc
.
create_socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
)
try
:
cdc
.
connect
((
ip
,
port
))
except
socket
.
error
,
why
:
self
.
respond
(
"425 Can't build data connection"
)
self
.
client_dc
=
cdc
type_map
=
{
'a'
:
'ASCII'
,
'i'
:
'Binary'
,
'e'
:
'EBCDIC'
,
'l'
:
'Binary'
}
type_mode_map
=
{
'a'
:
't'
,
'i'
:
'b'
,
'e'
:
'b'
,
'l'
:
'b'
}
# --------------------------------------------------
# command methods
# --------------------------------------------------
def
cmd_type
(
self
,
line
):
'specify data transfer type'
# ascii, ebcdic, image, local <byte size>
t
=
string
.
lower
(
line
[
1
])
# no support for EBCDIC
# if t not in ['a','e','i','l']:
if
t
not
in
[
'a'
,
'i'
,
'l'
]:
self
.
command_not_understood
(
string
.
join
(
line
))
elif
t
==
'l'
and
(
len
(
line
)
>
2
and
line
[
2
]
!=
'8'
):
self
.
respond
(
'504 Byte size must be 8'
)
else
:
self
.
current_mode
=
t
self
.
respond
(
'200 Type set to %s.'
%
self
.
type_map
[
t
])
def
cmd_quit
(
self
,
line
):
'terminate session'
self
.
respond
(
'221 Goodbye.'
)
self
.
close_when_done
()
def
cmd_port
(
self
,
line
):
'specify data connection port'
info
=
string
.
split
(
line
[
1
],
','
)
ip
=
string
.
join
(
info
[:
4
],
'.'
)
port
=
string
.
atoi
(
info
[
4
])
*
256
+
string
.
atoi
(
info
[
5
])
# how many data connections at a time?
# I'm assuming one for now...
# TODO: we should (optionally) verify that the
# ip number belongs to the client. [wu-ftpd does this?]
self
.
client_addr
=
(
ip
,
port
)
self
.
respond
(
'200 PORT command successful.'
)
def
new_passive_acceptor
(
self
):
# ensure that only one of these exists at a time.
if
self
.
passive_acceptor
is
not
None
:
self
.
passive_acceptor
.
close
()
self
.
passive_acceptor
=
None
self
.
passive_acceptor
=
passive_acceptor
(
self
)
return
self
.
passive_acceptor
def
cmd_pasv
(
self
,
line
):
'prepare for server-to-server transfer'
pc
=
self
.
new_passive_acceptor
()
port
=
pc
.
addr
[
1
]
ip_addr
=
pc
.
control_channel
.
getsockname
()[
0
]
self
.
respond
(
'227 Entering Passive Mode (%s,%d,%d)'
%
(
string
.
join
(
string
.
split
(
ip_addr
,
'.'
),
','
),
port
/
256
,
port
%
256
)
)
self
.
client_dc
=
None
def
cmd_nlst
(
self
,
line
):
'give name list of files in directory'
# ncftp adds the -FC argument for the user-visible 'nlist'
# command. We could try to emulate ls flags, but not just yet.
if
'-FC'
in
line
:
line
.
remove
(
'-FC'
)
try
:
dir_list_producer
=
self
.
get_dir_list
(
line
,
0
)
except
os
.
error
,
why
:
self
.
respond
(
'550 Could not list directory: %s'
%
repr
(
why
))
return
self
.
respond
(
'150 Opening %s mode data connection for file list'
%
(
self
.
type_map
[
self
.
current_mode
]
)
)
self
.
make_xmit_channel
()
self
.
client_dc
.
push_with_producer
(
dir_list_producer
)
self
.
client_dc
.
close_when_done
()
def
cmd_list
(
self
,
line
):
'give list files in a directory'
try
:
dir_list_producer
=
self
.
get_dir_list
(
line
,
1
)
except
os
.
error
,
why
:
self
.
respond
(
'550 Could not list directory: %s'
%
repr
(
why
))
return
self
.
respond
(
'150 Opening %s mode data connection for file list'
%
(
self
.
type_map
[
self
.
current_mode
]
)
)
self
.
make_xmit_channel
()
self
.
client_dc
.
push_with_producer
(
dir_list_producer
)
self
.
client_dc
.
close_when_done
()
def
cmd_cwd
(
self
,
line
):
'change working directory'
if
self
.
cwd
(
line
):
self
.
respond
(
'250 CWD command successful.'
)
else
:
self
.
respond
(
'550 No such directory.'
)
def
cmd_cdup
(
self
,
line
):
'change to parent of current working directory'
if
self
.
cdup
(
line
):
self
.
respond
(
'250 CDUP command successful.'
)
else
:
self
.
respond
(
'550 No such directory.'
)
def
cmd_pwd
(
self
,
line
):
'print the current working directory'
self
.
respond
(
'257 "%s" is the current directory.'
%
(
self
.
filesystem
.
current_directory
()
)
)
# modification time
# example output:
# 213 19960301204320
def
cmd_mdtm
(
self
,
line
):
'show last modification time of file'
filename
=
line
[
1
]
if
not
self
.
filesystem
.
isfile
(
filename
):
self
.
respond
(
'550 "%s" is not a file'
%
filename
)
else
:
mtime
=
time
.
gmtime
(
self
.
filesystem
.
stat
(
filename
)[
stat
.
ST_MTIME
])
self
.
respond
(
'213 %4d%02d%02d%02d%02d%02d'
%
(
mtime
[
0
],
mtime
[
1
],
mtime
[
2
],
mtime
[
3
],
mtime
[
4
],
mtime
[
5
]
)
)
def
cmd_noop
(
self
,
line
):
'do nothing'
self
.
respond
(
'200 NOOP command successful.'
)
def
cmd_size
(
self
,
line
):
'return size of file'
filename
=
line
[
1
]
if
not
self
.
filesystem
.
isfile
(
filename
):
self
.
respond
(
'550 "%s" is not a file'
%
filename
)
else
:
self
.
respond
(
'213 %d'
%
(
self
.
filesystem
.
stat
(
filename
)[
stat
.
ST_SIZE
])
)
def
cmd_retr
(
self
,
line
):
'retrieve a file'
if
len
(
line
)
<
2
:
self
.
command_not_understood
(
string
.
join
(
line
))
else
:
file
=
line
[
1
]
if
not
self
.
filesystem
.
isfile
(
file
):
self
.
log_info
(
'checking %s'
%
file
)
self
.
respond
(
'550 No such file'
)
else
:
try
:
# FIXME: for some reason, 'rt' isn't working on win95
mode
=
'r'
+
self
.
type_mode_map
[
self
.
current_mode
]
fd
=
self
.
open
(
file
,
mode
)
except
IOError
,
why
:
self
.
respond
(
'553 could not open file for reading: %s'
%
(
repr
(
why
)))
return
self
.
respond
(
"150 Opening %s mode data connection for file '%s'"
%
(
self
.
type_map
[
self
.
current_mode
],
file
)
)
self
.
make_xmit_channel
()
if
self
.
restart_position
:
# try to position the file as requested, but
# give up silently on failure (the 'file object'
# may not support seek())
try
:
fd
.
seek
(
self
.
restart_position
)
except
:
pass
self
.
restart_position
=
0
self
.
client_dc
.
push_with_producer
(
file_producer
(
self
,
self
.
client_dc
,
fd
)
)
self
.
client_dc
.
close_when_done
()
def
cmd_stor
(
self
,
line
,
mode
=
'wb'
):
'store a file'
if
len
(
line
)
<
2
:
self
.
command_not_understood
(
string
.
join
(
line
))
else
:
if
self
.
restart_position
:
restart_position
=
0
self
.
respond
(
'553 restart on STOR not yet supported'
)
return
file
=
line
[
1
]
# todo: handle that type flag
try
:
fd
=
self
.
open
(
file
,
mode
)
except
IOError
,
why
:
self
.
respond
(
'553 could not open file for writing: %s'
%
(
repr
(
why
)))
return
self
.
respond
(
'150 Opening %s connection for %s'
%
(
self
.
type_map
[
self
.
current_mode
],
file
)
)
self
.
make_recv_channel
(
fd
)
def
cmd_abor
(
self
,
line
):
'abort operation'
if
self
.
client_dc
:
self
.
client_dc
.
close
()
self
.
respond
(
'226 ABOR command successful.'
)
def
cmd_appe
(
self
,
line
):
'append to a file'
return
self
.
cmd_stor
(
line
,
'ab'
)
def
cmd_dele
(
self
,
line
):
if
len
(
line
)
!=
2
:
self
.
command_not_understood
(
string
.
join
(
line
))
else
:
file
=
line
[
1
]
if
self
.
filesystem
.
isfile
(
file
):
try
:
self
.
filesystem
.
unlink
(
file
)
self
.
respond
(
'250 DELE command successful.'
)
except
:
self
.
respond
(
'550 error deleting file.'
)
else
:
self
.
respond
(
'550 %s: No such file.'
%
file
)
def
cmd_mkd
(
self
,
line
):
if
len
(
line
)
!=
2
:
self
.
command
.
not_understood
(
string
.
join
(
line
))
else
:
path
=
line
[
1
]
try
:
self
.
filesystem
.
mkdir
(
path
)
self
.
respond
(
'257 MKD command successful.'
)
except
:
self
.
respond
(
'550 error creating directory.'
)
def
cmd_rmd
(
self
,
line
):
if
len
(
line
)
!=
2
:
self
.
command
.
not_understood
(
string
.
join
(
line
))
else
:
path
=
line
[
1
]
try
:
self
.
filesystem
.
rmdir
(
path
)
self
.
respond
(
'250 RMD command successful.'
)
except
:
self
.
respond
(
'550 error removing directory.'
)
def
cmd_user
(
self
,
line
):
'specify user name'
if
len
(
line
)
>
1
:
self
.
user
=
line
[
1
]
self
.
respond
(
'331 Password required.'
)
else
:
self
.
command_not_understood
(
string
.
join
(
line
))
def
cmd_pass
(
self
,
line
):
'specify password'
if
len
(
line
)
<
2
:
pw
=
''
else
:
pw
=
line
[
1
]
result
,
message
,
fs
=
self
.
server
.
authorizer
.
authorize
(
self
,
self
.
user
,
pw
)
if
result
:
self
.
respond
(
'230 %s'
%
message
)
self
.
filesystem
=
fs
self
.
authorized
=
1
self
.
log_info
(
'Successful login: Filesystem=%s'
%
repr
(
fs
))
else
:
self
.
respond
(
'530 %s'
%
message
)
def
cmd_rest
(
self
,
line
):
'restart incomplete transfer'
try
:
pos
=
string
.
atoi
(
line
[
1
])
except
ValueError
:
self
.
command_not_understood
(
string
.
join
(
line
))
self
.
restart_position
=
pos
self
.
respond
(
'350 Restarting at %d. Send STORE or RETRIEVE to initiate transfer.'
%
pos
)
def
cmd_stru
(
self
,
line
):
'obsolete - set file transfer structure'
if
line
[
1
]
in
'fF'
:
# f == 'file'
self
.
respond
(
'200 STRU F Ok'
)
else
:
self
.
respond
(
'504 Unimplemented STRU type'
)
def
cmd_mode
(
self
,
line
):
'obsolete - set file transfer mode'
if
line
[
1
]
in
'sS'
:
# f == 'file'
self
.
respond
(
'200 MODE S Ok'
)
else
:
self
.
respond
(
'502 Unimplemented MODE type'
)
# The stat command has two personalities. Normally it returns status
# information about the current connection. But if given an argument,
# it is equivalent to the LIST command, with the data sent over the
# control connection. Strange. But wuftpd, ftpd, and nt's ftp server
# all support it.
#
## def cmd_stat (self, line):
## 'return status of server'
## pass
def
cmd_syst
(
self
,
line
):
'show operating system type of server system'
# Replying to this command is of questionable utility, because
# this server does not behave in a predictable way w.r.t. the
# output of the LIST command. We emulate Unix ls output, but
# on win32 the pathname can contain drive information at the front
# Currently, the combination of ensuring that os.sep == '/'
# and removing the leading slash when necessary seems to work.
# [cd'ing to another drive also works]
#
# This is how wuftpd responds, and is probably
# the most expected. The main purpose of this reply is so that
# the client knows to expect Unix ls-style LIST output.
self
.
respond
(
'215 UNIX Type: L8'
)
# one disadvantage to this is that some client programs
# assume they can pass args to /bin/ls.
# a few typical responses:
# 215 UNIX Type: L8 (wuftpd)
# 215 Windows_NT version 3.51
# 215 VMS MultiNet V3.3
# 500 'SYST': command not understood. (SVR4)
def
cmd_help
(
self
,
line
):
'give help information'
# find all the methods that match 'cmd_xxxx',
# use their docstrings for the help response.
attrs
=
dir
(
self
.
__class__
)
help_lines
=
[]
for
attr
in
attrs
:
if
attr
[:
4
]
==
'cmd_'
:
x
=
getattr
(
self
,
attr
)
if
type
(
x
)
==
type
(
self
.
cmd_help
):
if
x
.
__doc__
:
help_lines
.
append
(
'
\
t
%s
\
t
%s'
%
(
attr
[
4
:],
x
.
__doc__
))
if
help_lines
:
self
.
push
(
'214-The following commands are recognized
\
r
\
n
'
)
self
.
push_with_producer
(
producers
.
lines_producer
(
help_lines
))
self
.
push
(
'214
\
r
\
n
'
)
else
:
self
.
push
(
'214-
\
r
\
n
\
t
Help Unavailable
\
r
\
n
214
\
r
\
n
'
)
class
ftp_server
(
asyncore
.
dispatcher
):
# override this to spawn a different FTP channel class.
ftp_channel_class
=
ftp_channel
SERVER_IDENT
=
'FTP Server (V%s)'
%
VERSION
def
__init__
(
self
,
authorizer
,
hostname
=
None
,
ip
=
''
,
port
=
21
,
resolver
=
None
,
logger_object
=
logger
.
file_logger
(
sys
.
stdout
)
):
self
.
ip
=
ip
self
.
port
=
port
self
.
authorizer
=
authorizer
if
hostname
is
None
:
self
.
hostname
=
socket
.
gethostname
()
else
:
self
.
hostname
=
hostname
# statistics
self
.
total_sessions
=
counter
()
self
.
closed_sessions
=
counter
()
self
.
total_files_out
=
counter
()
self
.
total_files_in
=
counter
()
self
.
total_bytes_out
=
counter
()
self
.
total_bytes_in
=
counter
()
self
.
total_exceptions
=
counter
()
#
asyncore
.
dispatcher
.
__init__
(
self
)
self
.
create_socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
)
self
.
set_reuse_addr
()
self
.
bind
((
self
.
ip
,
self
.
port
))
self
.
listen
(
5
)
if
not
logger_object
:
logger_object
=
sys
.
stdout
if
resolver
:
self
.
logger
=
logger
.
resolving_logger
(
resolver
,
logger_object
)
else
:
self
.
logger
=
logger
.
unresolving_logger
(
logger_object
)
self
.
log_info
(
'FTP server started at %s
\
n
\
t
Authorizer:%s
\
n
\
t
Hostname: %s
\
n
\
t
Port: %d'
%
(
time
.
ctime
(
time
.
time
()),
repr
(
self
.
authorizer
),
self
.
hostname
,
self
.
port
)
)
def
writable
(
self
):
return
0
def
handle_read
(
self
):
pass
def
handle_connect
(
self
):
pass
def
handle_accept
(
self
):
conn
,
addr
=
self
.
accept
()
self
.
total_sessions
.
increment
()
self
.
log_info
(
'Incoming connection from %s:%d'
%
(
addr
[
0
],
addr
[
1
]))
self
.
ftp_channel_class
(
self
,
conn
,
addr
)
# return a producer describing the state of the server
def
status
(
self
):
def
nice_bytes
(
n
):
return
string
.
join
(
status_handler
.
english_bytes
(
n
))
return
producers
.
lines_producer
(
[
'<h2>%s</h2>'
%
self
.
SERVER_IDENT
,
'<br>Listening on <b>Host:</b> %s'
%
self
.
hostname
,
'<b>Port:</b> %d'
%
self
.
port
,
'<br>Sessions'
,
'<b>Total:</b> %s'
%
self
.
total_sessions
,
'<b>Current:</b> %d'
%
(
self
.
total_sessions
.
as_long
()
-
self
.
closed_sessions
.
as_long
()),
'<br>Files'
,
'<b>Sent:</b> %s'
%
self
.
total_files_out
,
'<b>Received:</b> %s'
%
self
.
total_files_in
,
'<br>Bytes'
,
'<b>Sent:</b> %s'
%
nice_bytes
(
self
.
total_bytes_out
.
as_long
()),
'<b>Received:</b> %s'
%
nice_bytes
(
self
.
total_bytes_in
.
as_long
()),
'<br>Exceptions: %s'
%
self
.
total_exceptions
,
]
)
# ======================================================================
# Data Channel Classes
# ======================================================================
# This socket accepts a data connection, used when the server has been
# placed in passive mode. Although the RFC implies that we ought to
# be able to use the same acceptor over and over again, this presents
# a problem: how do we shut it off, so that we are accepting
# connections only when we expect them? [we can't]
#
# wuftpd, and probably all the other servers, solve this by allowing
# only one connection to hit this acceptor. They then close it. Any
# subsequent data-connection command will then try for the default
# port on the client side [which is of course never there]. So the
# 'always-send-PORT/PASV' behavior seems required.
#
# Another note: wuftpd will also be listening on the channel as soon
# as the PASV command is sent. It does not wait for a data command
# first.
# --- we need to queue up a particular behavior:
# 1) xmit : queue up producer[s]
# 2) recv : the file object
#
# It would be nice if we could make both channels the same. Hmmm..
#
class
passive_acceptor
(
asyncore
.
dispatcher
):
ready
=
None
def
__init__
(
self
,
control_channel
):
# connect_fun (conn, addr)
asyncore
.
dispatcher
.
__init__
(
self
)
self
.
control_channel
=
control_channel
self
.
create_socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
)
# bind to an address on the interface that the
# control connection is coming from.
self
.
bind
((
self
.
control_channel
.
getsockname
()[
0
],
0
))
self
.
addr
=
self
.
getsockname
()
self
.
listen
(
1
)
# def __del__ (self):
# print 'passive_acceptor.__del__()'
def
log
(
self
,
*
ignore
):
pass
def
handle_accept
(
self
):
conn
,
addr
=
self
.
accept
()
dc
=
self
.
control_channel
.
client_dc
if
dc
is
not
None
:
dc
.
set_socket
(
conn
)
dc
.
addr
=
addr
dc
.
connected
=
1
self
.
control_channel
.
passive_acceptor
=
None
else
:
self
.
ready
=
conn
,
addr
self
.
close
()
class
xmit_channel
(
asynchat
.
async_chat
):
# for an ethernet, you want this to be fairly large, in fact, it
# _must_ be large for performance comparable to an ftpd. [64k] we
# ought to investigate automatically-sized buffers...
ac_out_buffer_size
=
16384
bytes_out
=
0
def
__init__
(
self
,
channel
,
client_addr
=
None
):
self
.
channel
=
channel
self
.
client_addr
=
client_addr
asynchat
.
async_chat
.
__init__
(
self
)
# def __del__ (self):
# print 'xmit_channel.__del__()'
def
log
(
*
args
):
pass
def
readable
(
self
):
return
not
self
.
connected
def
writable
(
self
):
return
1
def
send
(
self
,
data
):
result
=
asynchat
.
async_chat
.
send
(
self
,
data
)
self
.
bytes_out
=
self
.
bytes_out
+
result
return
result
def
handle_error
(
self
):
# usually this is to catch an unexpected disconnect.
self
.
log_info
(
'unexpected disconnect on data xmit channel'
,
'error'
)
try
:
self
.
close
()
except
:
pass
# TODO: there's a better way to do this. we need to be able to
# put 'events' in the producer fifo. to do this cleanly we need
# to reposition the 'producer' fifo as an 'event' fifo.
def
close
(
self
):
c
=
self
.
channel
s
=
c
.
server
c
.
client_dc
=
None
s
.
total_files_out
.
increment
()
s
.
total_bytes_out
.
increment
(
self
.
bytes_out
)
if
not
len
(
self
.
producer_fifo
):
c
.
respond
(
'226 Transfer complete'
)
elif
not
c
.
closed
:
c
.
respond
(
'426 Connection closed; transfer aborted'
)
del
c
del
s
del
self
.
channel
asynchat
.
async_chat
.
close
(
self
)
class
recv_channel
(
asyncore
.
dispatcher
):
def
__init__
(
self
,
channel
,
client_addr
,
fd
):
self
.
channel
=
channel
self
.
client_addr
=
client_addr
self
.
fd
=
fd
asyncore
.
dispatcher
.
__init__
(
self
)
self
.
bytes_in
=
counter
()
def
log
(
self
,
*
ignore
):
pass
def
handle_connect
(
self
):
pass
def
writable
(
self
):
return
0
def
recv
(
*
args
):
result
=
apply
(
asyncore
.
dispatcher
.
recv
,
args
)
self
=
args
[
0
]
self
.
bytes_in
.
increment
(
len
(
result
))
return
result
buffer_size
=
8192
def
handle_read
(
self
):
block
=
self
.
recv
(
self
.
buffer_size
)
if
block
:
try
:
self
.
fd
.
write
(
block
)
except
IOError
:
self
.
log_info
(
'got exception writing block...'
,
'error'
)
def
handle_close
(
self
):
s
=
self
.
channel
.
server
s
.
total_files_in
.
increment
()
s
.
total_bytes_in
.
increment
(
self
.
bytes_in
.
as_long
())
self
.
fd
.
close
()
self
.
channel
.
respond
(
'226 Transfer complete.'
)
self
.
close
()
import
filesys
# not much of a doorman! 8^)
class
dummy_authorizer
:
def
__init__
(
self
,
root
=
'/'
):
self
.
root
=
root
def
authorize
(
self
,
channel
,
username
,
password
):
channel
.
persona
=
-
1
,
-
1
channel
.
read_only
=
1
return
1
,
'Ok.'
,
filesys
.
os_filesystem
(
self
.
root
)
class
anon_authorizer
:
def
__init__
(
self
,
root
=
'/'
):
self
.
root
=
root
def
authorize
(
self
,
channel
,
username
,
password
):
if
username
in
(
'ftp'
,
'anonymous'
):
channel
.
persona
=
-
1
,
-
1
channel
.
read_only
=
1
return
1
,
'Ok.'
,
filesys
.
os_filesystem
(
self
.
root
)
else
:
return
0
,
'Password invalid.'
,
None
# ===========================================================================
# Unix-specific improvements
# ===========================================================================
if
os
.
name
==
'posix'
:
class
unix_authorizer
:
# return a trio of (success, reply_string, filesystem)
def
authorize
(
self
,
channel
,
username
,
password
):
import
crypt
import
pwd
try
:
info
=
pwd
.
getpwnam
(
username
)
except
KeyError
:
return
0
,
'No such user.'
,
None
mangled
=
info
[
1
]
if
crypt
.
crypt
(
password
,
mangled
[:
2
])
==
mangled
:
channel
.
read_only
=
0
fs
=
filesys
.
schizophrenic_unix_filesystem
(
'/'
,
info
[
5
],
persona
=
(
info
[
2
],
info
[
3
])
)
return
1
,
'Login successful.'
,
fs
else
:
return
0
,
'Password invalid.'
,
None
def
__repr__
(
self
):
return
'<standard unix authorizer>'
# simple anonymous ftp support
class
unix_authorizer_with_anonymous
(
unix_authorizer
):
def
__init__
(
self
,
root
=
None
,
real_users
=
0
):
self
.
root
=
root
self
.
real_users
=
real_users
def
authorize
(
self
,
channel
,
username
,
password
):
if
string
.
lower
(
username
)
in
[
'anonymous'
,
'ftp'
]:
import
pwd
try
:
# ok, here we run into lots of confusion.
# on some os', anon runs under user 'nobody',
# on others as 'ftp'. ownership is also critical.
# need to investigate.
# linux: new linuxen seem to have nobody's UID=-1,
# which is an illegal value. Use ftp.
ftp_user_info
=
pwd
.
getpwnam
(
'ftp'
)
if
string
.
lower
(
os
.
uname
()[
0
])
==
'linux'
:
nobody_user_info
=
pwd
.
getpwnam
(
'ftp'
)
else
:
nobody_user_info
=
pwd
.
getpwnam
(
'nobody'
)
channel
.
read_only
=
1
if
self
.
root
is
None
:
self
.
root
=
ftp_user_info
[
5
]
fs
=
filesys
.
unix_filesystem
(
self
.
root
,
'/'
)
return
1
,
'Anonymous Login Successful'
,
fs
except
KeyError
:
return
0
,
'Anonymous account not set up'
,
None
elif
self
.
real_users
:
return
unix_authorizer
.
authorize
(
self
,
channel
,
username
,
password
)
else
:
return
0
,
'User logins not allowed'
,
None
class
file_producer
:
block_size
=
16384
def
__init__
(
self
,
server
,
dc
,
fd
):
self
.
fd
=
fd
self
.
done
=
0
def
more
(
self
):
if
self
.
done
:
return
''
else
:
block
=
self
.
fd
.
read
(
self
.
block_size
)
if
not
block
:
self
.
fd
.
close
()
self
.
done
=
1
return
block
# usage: ftp_server /PATH/TO/FTP/ROOT PORT
# for example:
# $ ftp_server /home/users/ftp 8021
if
os
.
name
==
'posix'
:
def
test
(
port
=
'8021'
):
import
sys
fs
=
ftp_server
(
unix_authorizer
(),
port
=
string
.
atoi
(
port
)
)
try
:
asyncore
.
loop
()
except
KeyboardInterrupt
:
self
.
log_info
(
'FTP server shutting down. (received SIGINT)'
,
'warning'
)
# close everything down on SIGINT.
# of course this should be a cleaner shutdown.
asyncore
.
close_all
()
if
__name__
==
'__main__'
:
test
(
sys
.
argv
[
1
])
# not unix
else
:
def
test
():
fs
=
ftp_server
(
dummy_authorizer
())
if
__name__
==
'__main__'
:
test
()
# this is the command list from the wuftpd man page
# '*' means we've implemented it.
# '!' requires write access
#
command_documentation
=
{
'abor'
:
'abort previous command'
,
#*
'acct'
:
'specify account (ignored)'
,
'allo'
:
'allocate storage (vacuously)'
,
'appe'
:
'append to a file'
,
#*!
'cdup'
:
'change to parent of current working directory'
,
#*
'cwd'
:
'change working directory'
,
#*
'dele'
:
'delete a file'
,
#!
'help'
:
'give help information'
,
#*
'list'
:
'give list files in a directory'
,
#*
'mkd'
:
'make a directory'
,
#!
'mdtm'
:
'show last modification time of file'
,
#*
'mode'
:
'specify data transfer mode'
,
'nlst'
:
'give name list of files in directory'
,
#*
'noop'
:
'do nothing'
,
#*
'pass'
:
'specify password'
,
#*
'pasv'
:
'prepare for server-to-server transfer'
,
#*
'port'
:
'specify data connection port'
,
#*
'pwd'
:
'print the current working directory'
,
#*
'quit'
:
'terminate session'
,
#*
'rest'
:
'restart incomplete transfer'
,
#*
'retr'
:
'retrieve a file'
,
#*
'rmd'
:
'remove a directory'
,
#!
'rnfr'
:
'specify rename-from file name'
,
#!
'rnto'
:
'specify rename-to file name'
,
#!
'site'
:
'non-standard commands (see next section)'
,
'size'
:
'return size of file'
,
#*
'stat'
:
'return status of server'
,
#*
'stor'
:
'store a file'
,
#*!
'stou'
:
'store a file with a unique name'
,
#!
'stru'
:
'specify data transfer structure'
,
'syst'
:
'show operating system type of server system'
,
#*
'type'
:
'specify data transfer type'
,
#*
'user'
:
'specify user name'
,
#*
'xcup'
:
'change to parent of current working directory (deprecated)'
,
'xcwd'
:
'change working directory (deprecated)'
,
'xmkd'
:
'make a directory (deprecated)'
,
#!
'xpwd'
:
'print the current working directory (deprecated)'
,
'xrmd'
:
'remove a directory (deprecated)'
,
#!
}
# debugging aid (linux)
def
get_vm_size
():
return
string
.
atoi
(
string
.
split
(
open
(
'/proc/self/stat'
).
readline
())[
22
])
def
print_vm
():
print
'vm: %8dk'
%
(
get_vm_size
()
/
1024
)
lib/python/ZServer/medusa/http_date.py
deleted
100644 → 0
View file @
6786b136
# -*- Mode: Python; tab-width: 4 -*-
import
regex
import
string
import
time
def
concat
(
*
args
):
return
string
.
joinfields
(
args
,
''
)
def
join
(
seq
,
field
=
' '
):
return
string
.
joinfields
(
seq
,
field
)
def
group
(
s
):
return
'
\
\
('
+
s
+
'
\
\
)'
short_days
=
[
'sun'
,
'mon'
,
'tue'
,
'wed'
,
'thu'
,
'fri'
,
'sat'
]
long_days
=
[
'sunday'
,
'monday'
,
'tuesday'
,
'wednesday'
,
'thursday'
,
'friday'
,
'saturday'
]
short_day_reg
=
group
(
join
(
short_days
,
'
\
\
|'
))
long_day_reg
=
group
(
join
(
long_days
,
'
\
\
|'
))
daymap
=
{}
for
i
in
range
(
7
):
daymap
[
short_days
[
i
]]
=
i
daymap
[
long_days
[
i
]]
=
i
hms_reg
=
join
(
3
*
[
group
(
'[0-9][0-9]'
)],
':'
)
months
=
[
'jan'
,
'feb'
,
'mar'
,
'apr'
,
'may'
,
'jun'
,
'jul'
,
'aug'
,
'sep'
,
'oct'
,
'nov'
,
'dec'
]
monmap
=
{}
for
i
in
range
(
12
):
monmap
[
months
[
i
]]
=
i
+
1
months_reg
=
group
(
join
(
months
,
'
\
\
|'
))
# From draft-ietf-http-v11-spec-07.txt/3.3.1
# Sun, 06 Nov 1994 08:49:37 GMT ; RFC 822, updated by RFC 1123
# Sunday, 06-Nov-94 08:49:37 GMT ; RFC 850, obsoleted by RFC 1036
# Sun Nov 6 08:49:37 1994 ; ANSI C's asctime() format
# rfc822 format
rfc822_date
=
join
(
[
concat
(
short_day_reg
,
','
),
# day
group
(
'[0-9][0-9]?'
),
# date
months_reg
,
# month
group
(
'[0-9]+'
),
# year
hms_reg
,
# hour minute second
'gmt'
],
' '
)
rfc822_reg
=
regex
.
compile
(
rfc822_date
)
def
unpack_rfc822
():
g
=
rfc822_reg
.
group
a
=
string
.
atoi
return
(
a
(
g
(
4
)),
# year
monmap
[
g
(
3
)],
# month
a
(
g
(
2
)),
# day
a
(
g
(
5
)),
# hour
a
(
g
(
6
)),
# minute
a
(
g
(
7
)),
# second
0
,
0
,
0
)
# rfc850 format
rfc850_date
=
join
(
[
concat
(
long_day_reg
,
','
),
join
(
[
group
(
'[0-9][0-9]?'
),
months_reg
,
group
(
'[0-9]+'
)
],
'-'
),
hms_reg
,
'gmt'
],
' '
)
rfc850_reg
=
regex
.
compile
(
rfc850_date
)
# they actually unpack the same way
def
unpack_rfc850
():
g
=
rfc850_reg
.
group
a
=
string
.
atoi
return
(
a
(
g
(
4
)),
# year
monmap
[
g
(
3
)],
# month
a
(
g
(
2
)),
# day
a
(
g
(
5
)),
# hour
a
(
g
(
6
)),
# minute
a
(
g
(
7
)),
# second
0
,
0
,
0
)
# parsdate.parsedate - ~700/sec.
# parse_http_date - ~1333/sec.
weekdayname
=
[
'Mon'
,
'Tue'
,
'Wed'
,
'Thu'
,
'Fri'
,
'Sat'
,
'Sun'
]
monthname
=
[
None
,
'Jan'
,
'Feb'
,
'Mar'
,
'Apr'
,
'May'
,
'Jun'
,
'Jul'
,
'Aug'
,
'Sep'
,
'Oct'
,
'Nov'
,
'Dec'
]
def
build_http_date
(
when
):
year
,
month
,
day
,
hh
,
mm
,
ss
,
wd
,
y
,
z
=
time
.
gmtime
(
when
)
return
"%s, %02d %3s %4d %02d:%02d:%02d GMT"
%
(
weekdayname
[
wd
],
day
,
monthname
[
month
],
year
,
hh
,
mm
,
ss
)
def
parse_http_date
(
d
):
d
=
string
.
lower
(
d
)
tz
=
time
.
timezone
if
rfc850_reg
.
match
(
d
)
==
len
(
d
):
retval
=
int
(
time
.
mktime
(
unpack_rfc850
())
-
tz
)
elif
rfc822_reg
.
match
(
d
)
==
len
(
d
):
retval
=
int
(
time
.
mktime
(
unpack_rfc822
())
-
tz
)
else
:
return
0
# Thanks to Craig Silverstein <csilvers@google.com> for pointing
# out the DST discrepancy
if
time
.
daylight
and
time
.
localtime
(
retval
)[
-
1
]
==
1
:
# DST correction
retval
=
retval
+
(
tz
-
time
.
altzone
)
return
retval
lib/python/ZServer/medusa/http_server.py
deleted
100644 → 0
View file @
6786b136
#! /usr/local/bin/python
# -*- Mode: Python; tab-width: 4 -*-
#
# Author: Sam Rushing <rushing@nightmare.com>
# Copyright 1996-2000 by Sam Rushing
# All Rights Reserved.
#
RCS_ID
=
'$Id: http_server.py,v 1.20 2001/02/13 21:18:54 brian Exp $'
# python modules
import
os
import
regex
import
re
import
socket
import
stat
import
string
import
sys
import
time
# async modules
import
asyncore
import
asynchat
# medusa modules
import
http_date
import
producers
import
status_handler
import
logger
VERSION_STRING
=
string
.
split
(
RCS_ID
)[
2
]
from
counter
import
counter
# ===========================================================================
# Request Object
# ===========================================================================
class
http_request
:
# default reply code
reply_code
=
200
request_counter
=
counter
()
# Whether to automatically use chunked encoding when
#
# HTTP version is 1.1
# Content-Length is not set
# Chunked encoding is not already in effect
#
# If your clients are having trouble, you might want to disable this.
use_chunked
=
1
# by default, this request object ignores user data.
collector
=
None
def
__init__
(
self
,
*
args
):
# unpack information about the request
(
self
.
channel
,
self
.
request
,
self
.
command
,
self
.
uri
,
self
.
version
,
self
.
header
)
=
args
self
.
outgoing
=
fifo
()
self
.
reply_headers
=
{
'Server'
:
'Medusa/%s'
%
VERSION_STRING
,
'Date'
:
http_date
.
build_http_date
(
time
.
time
())
}
self
.
request_number
=
http_request
.
request_counter
.
increment
()
self
.
_split_uri
=
None
self
.
_header_cache
=
{}
# --------------------------------------------------
# reply header management
# --------------------------------------------------
def
__setitem__
(
self
,
key
,
value
):
self
.
reply_headers
[
key
]
=
value
def
__getitem__
(
self
,
key
):
return
self
.
reply_headers
[
key
]
def
has_key
(
self
,
key
):
return
self
.
reply_headers
.
has_key
(
key
)
def
build_reply_header
(
self
):
return
string
.
join
(
[
self
.
response
(
self
.
reply_code
)]
+
map
(
lambda
x
:
'%s: %s'
%
x
,
self
.
reply_headers
.
items
()
),
'
\
r
\
n
'
)
+
'
\
r
\
n
\
r
\
n
'
# --------------------------------------------------
# split a uri
# --------------------------------------------------
# <path>;<params>?<query>#<fragment>
path_regex
=
regex
.
compile
(
# path params query fragment
'
\
\
([^;?#]*
\
\
)
\
\
(;[^?#]*
\
\
)?
\
\
(
\
\
?[^#]*
\
)?
\
(#.*
\
)?
'
)
def split_uri (self):
if self._split_uri is None:
if self.path_regex.match (self.uri) != len(self.uri):
raise ValueError, "Broken URI"
else:
self._split_uri = map (lambda i,r=self.path_regex: r.group(i), range(1,5))
return self._split_uri
def get_header_with_regex (self, head_reg, group):
for line in self.header:
if head_reg.match (line) == len(line):
return head_reg.group(group)
return ''
def get_header (self, header):
header = string.lower (header)
hc = self._header_cache
if not hc.has_key (header):
h = header + '
:
'
hl = len(h)
for line in self.header:
if string.lower (line[:hl]) == h:
r = line[hl:]
hc[header] = r
return r
hc[header] = None
return None
else:
return hc[header]
# --------------------------------------------------
# user data
# --------------------------------------------------
def collect_incoming_data (self, data):
if self.collector:
self.collector.collect_incoming_data (data)
else:
self.log_info(
'
Dropping
%
d
bytes
of
incoming
request
data
' % len(data),
'
warning
'
)
def found_terminator (self):
if self.collector:
self.collector.found_terminator()
else:
self.log_info (
'
Unexpected
end
-
of
-
record
for
incoming
request
',
'
warning
'
)
def push (self, thing):
if type(thing) == type(''):
self.outgoing.push (producers.simple_producer (thing))
else:
self.outgoing.push (thing)
def response (self, code=200):
message = self.responses[code]
self.reply_code = code
return '
HTTP
/%
s
%
d
%
s
' % (self.version, code, message)
def error (self, code):
self.reply_code = code
message = self.responses[code]
s = self.DEFAULT_ERROR_MESSAGE % {
'
code
': code,
'
message
': message,
}
self['
Content
-
Length
'] = len(s)
self['
Content
-
Type
'] = '
text
/
html
'
# make an error reply
self.push (s)
self.done()
# can also be used for empty replies
reply_now = error
def done (self):
"finalize this transaction - send output to the http channel"
# ----------------------------------------
# persistent connection management
# ----------------------------------------
# --- BUCKLE UP! ----
connection = string.lower (get_header (CONNECTION, self.header))
close_it = 0
wrap_in_chunking = 0
if self.version == '
1.0
':
if connection == '
keep
-
alive
':
if not self.has_key ('
Content
-
Length
'):
close_it = 1
else:
self['
Connection
'] = '
Keep
-
Alive
'
else:
close_it = 1
elif self.version == '
1.1
':
if connection == '
close
':
close_it = 1
elif not self.has_key ('
Content
-
Length
'):
if self.has_key ('
Transfer
-
Encoding
'):
if not self['
Transfer
-
Encoding
'] == '
chunked
':
close_it = 1
elif self.use_chunked:
self['
Transfer
-
Encoding
'] = '
chunked
'
wrap_in_chunking = 1
else:
close_it = 1
elif self.version is None:
# Although we don'
t
*
really
*
support
http
/
0.9
(
because
we
'd have to
# use
\
r
\
n
as a terminator, and it would just yuck up a lot of stuff)
# it'
s
very
common
for
developers
to
not
want
to
type
a
version
number
# when using telnet to debug a server.
close_it
=
1
outgoing_header
=
producers
.
simple_producer
(
self
.
build_reply_header
())
if
close_it
:
self
[
'Connection'
]
=
'close'
if
wrap_in_chunking
:
outgoing_producer
=
producers
.
chunked_producer
(
producers
.
composite_producer
(
self
.
outgoing
)
)
# prepend the header
outgoing_producer
=
producers
.
composite_producer
(
fifo
([
outgoing_header
,
outgoing_producer
])
)
else
:
# prepend the header
self
.
outgoing
.
push_front
(
outgoing_header
)
outgoing_producer
=
producers
.
composite_producer
(
self
.
outgoing
)
# apply a few final transformations to the output
self
.
channel
.
push_with_producer
(
# globbing gives us large packets
producers
.
globbing_producer
(
# hooking lets us log the number of bytes sent
producers
.
hooked_producer
(
outgoing_producer
,
self
.
log
)
)
)
self
.
channel
.
current_request
=
None
if
close_it
:
self
.
channel
.
close_when_done
()
def
log_date_string
(
self
,
when
):
logtime
=
time
.
localtime
(
when
)
return
time
.
strftime
(
'%d/'
,
logtime
)
+
\
http_date
.
monthname
[
logtime
[
1
]]
+
\
time
.
strftime
(
'/%Y:%H:%M:%S '
,
logtime
)
+
\
tz_for_log
def
log
(
self
,
bytes
):
user_agent
=
self
.
get_header
(
'user-agent'
)
if
not
user_agent
:
user_agent
=
''
referer
=
self
.
get_header
(
'referer'
)
if
not
referer
:
referer
=
''
self
.
channel
.
server
.
logger
.
log
(
self
.
channel
.
addr
[
0
],
' - - [%s] "%s" %d %d "%s" "%s"
\
n
'
%
(
# self.channel.addr[1],
self
.
log_date_string
(
time
.
time
()),
self
.
request
,
self
.
reply_code
,
bytes
,
referer
,
user_agent
)
)
responses
=
{
100
:
"Continue"
,
101
:
"Switching Protocols"
,
200
:
"OK"
,
201
:
"Created"
,
202
:
"Accepted"
,
203
:
"Non-Authoritative Information"
,
204
:
"No Content"
,
205
:
"Reset Content"
,
206
:
"Partial Content"
,
300
:
"Multiple Choices"
,
301
:
"Moved Permanently"
,
302
:
"Moved Temporarily"
,
303
:
"See Other"
,
304
:
"Not Modified"
,
305
:
"Use Proxy"
,
400
:
"Bad Request"
,
401
:
"Unauthorized"
,
402
:
"Payment Required"
,
403
:
"Forbidden"
,
404
:
"Not Found"
,
405
:
"Method Not Allowed"
,
406
:
"Not Acceptable"
,
407
:
"Proxy Authentication Required"
,
408
:
"Request Time-out"
,
409
:
"Conflict"
,
410
:
"Gone"
,
411
:
"Length Required"
,
412
:
"Precondition Failed"
,
413
:
"Request Entity Too Large"
,
414
:
"Request-URI Too Large"
,
415
:
"Unsupported Media Type"
,
500
:
"Internal Server Error"
,
501
:
"Not Implemented"
,
502
:
"Bad Gateway"
,
503
:
"Service Unavailable"
,
504
:
"Gateway Time-out"
,
505
:
"HTTP Version not supported"
}
# Default error message
DEFAULT_ERROR_MESSAGE
=
string
.
join
(
[
'<head>'
,
'<title>Error response</title>'
,
'</head>'
,
'<body>'
,
'<h1>Error response</h1>'
,
'<p>Error code %(code)d.'
,
'<p>Message: %(message)s.'
,
'</body>'
,
''
],
'
\
r
\
n
'
)
# ===========================================================================
# HTTP Channel Object
# ===========================================================================
class
http_channel
(
asynchat
.
async_chat
):
# use a larger default output buffer
ac_out_buffer_size
=
1
<<
16
current_request
=
None
channel_counter
=
counter
()
def
__init__
(
self
,
server
,
conn
,
addr
):
self
.
channel_number
=
http_channel
.
channel_counter
.
increment
()
self
.
request_counter
=
counter
()
asynchat
.
async_chat
.
__init__
(
self
,
conn
)
self
.
server
=
server
self
.
addr
=
addr
self
.
set_terminator
(
'
\
r
\
n
\
r
\
n
'
)
self
.
in_buffer
=
''
self
.
creation_time
=
int
(
time
.
time
())
self
.
check_maintenance
()
def
__repr__
(
self
):
ar
=
asynchat
.
async_chat
.
__repr__
(
self
)[
1
:
-
1
]
return
'<%s channel#: %s requests:%s>'
%
(
ar
,
self
.
channel_number
,
self
.
request_counter
)
# Channel Counter, Maintenance Interval...
maintenance_interval
=
500
def
check_maintenance
(
self
):
if
not
self
.
channel_number
%
self
.
maintenance_interval
:
self
.
maintenance
()
def
maintenance
(
self
):
self
.
kill_zombies
()
# 30-minute zombie timeout. status_handler also knows how to kill zombies.
zombie_timeout
=
30
*
60
def
kill_zombies
(
self
):
now
=
int
(
time
.
time
())
for
channel
in
asyncore
.
socket_map
.
values
():
if
channel
.
__class__
==
self
.
__class__
:
if
(
now
-
channel
.
creation_time
)
>
channel
.
zombie_timeout
:
channel
.
close
()
# --------------------------------------------------
# send/recv overrides, good place for instrumentation.
# --------------------------------------------------
# this information needs to get into the request object,
# so that it may log correctly.
def
send
(
self
,
data
):
result
=
asynchat
.
async_chat
.
send
(
self
,
data
)
self
.
server
.
bytes_out
.
increment
(
len
(
data
))
return
result
def
recv
(
self
,
buffer_size
):
try
:
result
=
asynchat
.
async_chat
.
recv
(
self
,
buffer_size
)
self
.
server
.
bytes_in
.
increment
(
len
(
result
))
return
result
except
MemoryError
:
# --- Save a Trip to Your Service Provider ---
# It's possible for a process to eat up all the memory of
# the machine, and put it in an extremely wedged state,
# where medusa keeps running and can't be shut down. This
# is where MemoryError tends to get thrown, though of
# course it could get thrown elsewhere.
sys
.
exit
(
"Out of Memory!"
)
def
handle_error
(
self
):
t
,
v
=
sys
.
exc_info
()[:
2
]
if
t
is
SystemExit
:
raise
t
,
v
else
:
asynchat
.
async_chat
.
handle_error
(
self
)
def
log
(
self
,
*
args
):
pass
# --------------------------------------------------
# async_chat methods
# --------------------------------------------------
def
collect_incoming_data
(
self
,
data
):
if
self
.
current_request
:
# we are receiving data (probably POST data) for a request
self
.
current_request
.
collect_incoming_data
(
data
)
else
:
# we are receiving header (request) data
self
.
in_buffer
=
self
.
in_buffer
+
data
def
found_terminator
(
self
):
if
self
.
current_request
:
self
.
current_request
.
found_terminator
()
else
:
header
=
self
.
in_buffer
self
.
in_buffer
=
''
lines
=
string
.
split
(
header
,
'
\
r
\
n
'
)
# --------------------------------------------------
# crack the request header
# --------------------------------------------------
while
lines
and
not
lines
[
0
]:
# as per the suggestion of http-1.1 section 4.1, (and
# Eric Parker <eparker@zyvex.com>), ignore a leading
# blank lines (buggy browsers tack it onto the end of
# POST requests)
lines
=
lines
[
1
:]
if
not
lines
:
self
.
close_when_done
()
return
request
=
lines
[
0
]
command
,
uri
,
version
=
crack_request
(
request
)
header
=
join_headers
(
lines
[
1
:])
r
=
http_request
(
self
,
request
,
command
,
uri
,
version
,
header
)
self
.
request_counter
.
increment
()
self
.
server
.
total_requests
.
increment
()
if
command
is
None
:
self
.
log_info
(
'Bad HTTP request: %s'
%
request
,
'error'
)
r
.
error
(
400
)
return
# --------------------------------------------------
# handler selection and dispatch
# --------------------------------------------------
for
h
in
self
.
server
.
handlers
:
if
h
.
match
(
r
):
try
:
self
.
current_request
=
r
# This isn't used anywhere.
# r.handler = h # CYCLE
h
.
handle_request
(
r
)
except
:
self
.
server
.
exceptions
.
increment
()
(
file
,
fun
,
line
),
t
,
v
,
tbinfo
=
asyncore
.
compact_traceback
()
self
.
log_info
(
'Server Error: %s, %s: file: %s line: %s'
%
(
t
,
v
,
file
,
line
),
'error'
)
try
:
r
.
error
(
500
)
except
:
pass
return
# no handlers, so complain
r
.
error
(
404
)
def
writable
(
self
):
# this is just the normal async_chat 'writable', here for comparison
return
self
.
ac_out_buffer
or
len
(
self
.
producer_fifo
)
def
writable_for_proxy
(
self
):
# this version of writable supports the idea of a 'stalled' producer
# [i.e., it's not ready to produce any output yet] This is needed by
# the proxy, which will be waiting for the magic combination of
# 1) hostname resolved
# 2) connection made
# 3) data available.
if
self
.
ac_out_buffer
:
return
1
elif
len
(
self
.
producer_fifo
):
p
=
self
.
producer_fifo
.
first
()
if
hasattr
(
p
,
'stalled'
):
return
not
p
.
stalled
()
else
:
return
1
# ===========================================================================
# HTTP Server Object
# ===========================================================================
class
http_server
(
asyncore
.
dispatcher
):
SERVER_IDENT
=
'HTTP Server (V%s)'
%
VERSION_STRING
channel_class
=
http_channel
def
__init__
(
self
,
ip
,
port
,
resolver
=
None
,
logger_object
=
None
):
self
.
ip
=
ip
self
.
port
=
port
asyncore
.
dispatcher
.
__init__
(
self
)
self
.
create_socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
)
self
.
handlers
=
[]
if
not
logger_object
:
logger_object
=
logger
.
file_logger
(
sys
.
stdout
)
self
.
set_reuse_addr
()
self
.
bind
((
ip
,
port
))
# lower this to 5 if your OS complains
self
.
listen
(
1024
)
host
,
port
=
self
.
socket
.
getsockname
()
if
not
ip
:
self
.
log_info
(
'Computing default hostname'
,
'warning'
)
ip
=
socket
.
gethostbyname
(
socket
.
gethostname
())
try
:
self
.
server_name
=
socket
.
gethostbyaddr
(
ip
)[
0
]
except
socket
.
error
:
self
.
log_info
(
'Cannot do reverse lookup'
,
'warning'
)
self
.
server_name
=
ip
# use the IP address as the "hostname"
self
.
server_port
=
port
self
.
total_clients
=
counter
()
self
.
total_requests
=
counter
()
self
.
exceptions
=
counter
()
self
.
bytes_out
=
counter
()
self
.
bytes_in
=
counter
()
if
not
logger_object
:
logger_object
=
logger
.
file_logger
(
sys
.
stdout
)
if
resolver
:
self
.
logger
=
logger
.
resolving_logger
(
resolver
,
logger_object
)
else
:
self
.
logger
=
logger
.
unresolving_logger
(
logger_object
)
self
.
log_info
(
'Medusa (V%s) started at %s'
'
\
n
\
t
Hostname: %s'
'
\
n
\
t
Port:%d'
'
\
n
'
%
(
VERSION_STRING
,
time
.
ctime
(
time
.
time
()),
self
.
server_name
,
port
,
)
)
def
writable
(
self
):
return
0
def
handle_read
(
self
):
pass
def
readable
(
self
):
return
self
.
accepting
def
handle_connect
(
self
):
pass
def
handle_accept
(
self
):
self
.
total_clients
.
increment
()
try
:
conn
,
addr
=
self
.
accept
()
except
socket
.
error
:
# linux: on rare occasions we get a bogus socket back from
# accept. socketmodule.c:makesockaddr complains that the
# address family is unknown. We don't want the whole server
# to shut down because of this.
self
.
log_info
(
'warning: server accept() threw an exception'
,
'warning'
)
return
except
TypeError
:
# unpack non-sequence. this can happen when a read event
# fires on a listening socket, but when we call accept()
# we get EWOULDBLOCK, so dispatcher.accept() returns None.
# Seen on FreeBSD3.
self
.
log_info
(
'warning: server accept() threw EWOULDBLOCK'
,
'warning'
)
return
self
.
channel_class
(
self
,
conn
,
addr
)
def
install_handler
(
self
,
handler
,
back
=
0
):
if
back
:
self
.
handlers
.
append
(
handler
)
else
:
self
.
handlers
.
insert
(
0
,
handler
)
def
remove_handler
(
self
,
handler
):
self
.
handlers
.
remove
(
handler
)
def
status
(
self
):
def
nice_bytes
(
n
):
return
string
.
join
(
status_handler
.
english_bytes
(
n
))
handler_stats
=
filter
(
None
,
map
(
maybe_status
,
self
.
handlers
))
if
self
.
total_clients
:
ratio
=
self
.
total_requests
.
as_long
()
/
float
(
self
.
total_clients
.
as_long
())
else
:
ratio
=
0.0
return
producers
.
composite_producer
(
fifo
([
producers
.
lines_producer
(
[
'<h2>%s</h2>'
%
self
.
SERVER_IDENT
,
'<br>Listening on: <b>Host:</b> %s'
%
self
.
server_name
,
'<b>Port:</b> %d'
%
self
.
port
,
'<p><ul>'
'<li>Total <b>Clients:</b> %s'
%
self
.
total_clients
,
'<b>Requests:</b> %s'
%
self
.
total_requests
,
'<b>Requests/Client:</b> %.1f'
%
(
ratio
),
'<li>Total <b>Bytes In:</b> %s'
%
(
nice_bytes
(
self
.
bytes_in
.
as_long
())),
'<b>Bytes Out:</b> %s'
%
(
nice_bytes
(
self
.
bytes_out
.
as_long
())),
'<li>Total <b>Exceptions:</b> %s'
%
self
.
exceptions
,
'</ul><p>'
'<b>Extension List</b><ul>'
,
])]
+
handler_stats
+
[
producers
.
simple_producer
(
'</ul>'
)]
)
)
def
maybe_status
(
thing
):
if
hasattr
(
thing
,
'status'
):
return
thing
.
status
()
else
:
return
None
CONNECTION
=
regex
.
compile
(
'Connection:
\
(.*
\
)'
,
regex
.
casefold
)
# merge multi-line headers
# [486dx2: ~500/sec]
def
join_headers
(
headers
):
r
=
[]
for
i
in
range
(
len
(
headers
)):
if
headers
[
i
][
0
]
in
'
\
t
'
:
r
[
-
1
]
=
r
[
-
1
]
+
headers
[
i
][
1
:]
else
:
r
.
append
(
headers
[
i
])
return
r
def
get_header
(
head_reg
,
lines
,
group
=
1
):
for
line
in
lines
:
if
head_reg
.
match
(
line
)
==
len
(
line
):
return
head_reg
.
group
(
group
)
return
''
REQUEST
=
re
.
compile
(
'([^ ]+) (?:[^ :?#]+://[^ ?#/]*)?([^ ]+)(( HTTP/([0-9.]+))$|$)'
)
def
crack_request
(
r
):
m
=
REQUEST
.
match
(
r
)
if
m
is
not
None
:
return
string
.
lower
(
m
.
group
(
1
)),
m
.
group
(
2
),
m
.
group
(
5
)
else
:
return
None
,
None
,
None
class
fifo
:
def
__init__
(
self
,
list
=
None
):
if
not
list
:
self
.
list
=
[]
else
:
self
.
list
=
list
def
__len__
(
self
):
return
len
(
self
.
list
)
def
first
(
self
):
return
self
.
list
[
0
]
def
push_front
(
self
,
object
):
self
.
list
.
insert
(
0
,
object
)
def
push
(
self
,
data
):
self
.
list
.
append
(
data
)
def
pop
(
self
):
if
self
.
list
:
result
=
self
.
list
[
0
]
del
self
.
list
[
0
]
return
(
1
,
result
)
else
:
return
(
0
,
None
)
def
compute_timezone_for_log
():
if
time
.
daylight
:
tz
=
time
.
altzone
else
:
tz
=
time
.
timezone
if
tz
>
0
:
neg
=
1
else
:
neg
=
0
tz
=
-
tz
h
,
rem
=
divmod
(
tz
,
3600
)
m
,
rem
=
divmod
(
rem
,
60
)
if
neg
:
return
'-%02d%02d'
%
(
h
,
m
)
else
:
return
'+%02d%02d'
%
(
h
,
m
)
# if you run this program over a TZ change boundary, this will be invalid.
tz_for_log
=
compute_timezone_for_log
()
if
__name__
==
'__main__'
:
import
sys
if
len
(
sys
.
argv
)
<
2
:
print
'usage: %s <root> <port>'
%
(
sys
.
argv
[
0
])
else
:
import
monitor
import
filesys
import
default_handler
import
status_handler
import
ftp_server
import
chat_server
import
resolver
import
logger
rs
=
resolver
.
caching_resolver
(
'127.0.0.1'
)
lg
=
logger
.
file_logger
(
sys
.
stdout
)
ms
=
monitor
.
secure_monitor_server
(
'fnord'
,
'127.0.0.1'
,
9999
)
fs
=
filesys
.
os_filesystem
(
sys
.
argv
[
1
])
dh
=
default_handler
.
default_handler
(
fs
)
hs
=
http_server
(
''
,
string
.
atoi
(
sys
.
argv
[
2
]),
rs
,
lg
)
hs
.
install_handler
(
dh
)
ftp
=
ftp_server
.
ftp_server
(
ftp_server
.
dummy_authorizer
(
sys
.
argv
[
1
]),
port
=
8021
,
resolver
=
rs
,
logger_object
=
lg
)
cs
=
chat_server
.
chat_server
(
''
,
7777
)
sh
=
status_handler
.
status_extension
([
hs
,
ms
,
ftp
,
cs
,
rs
])
hs
.
install_handler
(
sh
)
if
(
'-p'
in
sys
.
argv
):
def
profile_loop
():
try
:
asyncore
.
loop
()
except
KeyboardInterrupt
:
pass
import
profile
profile
.
run
(
'profile_loop()'
,
'profile.out'
)
else
:
asyncore
.
loop
()
lib/python/ZServer/medusa/logger.py
deleted
100644 → 0
View file @
6786b136
# -*- Mode: Python; tab-width: 4 -*-
import
asynchat
import
socket
import
string
import
time
# these three are for the rotating logger
import
os
# |
import
stat
# v
#
# three types of log:
# 1) file
# with optional flushing. Also, one that rotates the log.
# 2) socket
# dump output directly to a socket connection. [how do we
# keep it open?]
# 3) syslog
# log to syslog via tcp. this is a per-line protocol.
#
#
# The 'standard' interface to a logging object is simply
# log_object.log (message)
#
# a file-like object that captures output, and
# makes sure to flush it always... this could
# be connected to:
# o stdio file
# o low-level file
# o socket channel
# o syslog output...
class
file_logger
:
# pass this either a path or a file object.
def
__init__
(
self
,
file
,
flush
=
1
,
mode
=
'a'
):
if
type
(
file
)
==
type
(
''
):
if
(
file
==
'-'
):
import
sys
self
.
file
=
sys
.
stdout
else
:
self
.
file
=
open
(
file
,
mode
)
else
:
self
.
file
=
file
self
.
do_flush
=
flush
def
__repr__
(
self
):
return
'<file logger: %s>'
%
self
.
file
def
write
(
self
,
data
):
self
.
file
.
write
(
data
)
self
.
maybe_flush
()
def
writeline
(
self
,
line
):
self
.
file
.
writeline
(
line
)
self
.
maybe_flush
()
def
writelines
(
self
,
lines
):
self
.
file
.
writelines
(
lines
)
self
.
maybe_flush
()
def
maybe_flush
(
self
):
if
self
.
do_flush
:
self
.
file
.
flush
()
def
flush
(
self
):
self
.
file
.
flush
()
def
softspace
(
self
,
*
args
):
pass
def
log
(
self
,
message
):
if
message
[
-
1
]
not
in
(
'
\
r
'
,
'
\
n
'
):
self
.
write
(
message
+
'
\
n
'
)
else
:
self
.
write
(
message
)
# like a file_logger, but it must be attached to a filename.
# When the log gets too full, or a certain time has passed,
# it backs up the log and starts a new one. Note that backing
# up the log is done via "mv" because anything else (cp, gzip)
# would take time, during which medusa would do nothing else.
class
rotating_file_logger
(
file_logger
):
# If freq is non-None we back up "daily", "weekly", or "monthly".
# Else if maxsize is non-None we back up whenever the log gets
# to big. If both are None we never back up.
def
__init__
(
self
,
file
,
freq
=
None
,
maxsize
=
None
,
flush
=
1
,
mode
=
'a'
):
self
.
filename
=
file
self
.
mode
=
mode
self
.
file
=
open
(
file
,
mode
)
self
.
freq
=
freq
self
.
maxsize
=
maxsize
self
.
rotate_when
=
self
.
next_backup
(
self
.
freq
)
self
.
do_flush
=
flush
def
__repr__
(
self
):
return
'<rotating-file logger: %s>'
%
self
.
file
# We back up at midnight every 1) day, 2) monday, or 3) 1st of month
def
next_backup
(
self
,
freq
):
(
yr
,
mo
,
day
,
hr
,
min
,
sec
,
wd
,
jday
,
dst
)
=
time
.
localtime
(
time
.
time
())
if
freq
==
'daily'
:
return
time
.
mktime
(
yr
,
mo
,
day
+
1
,
0
,
0
,
0
,
0
,
0
,
-
1
)
elif
freq
==
'weekly'
:
return
time
.
mktime
(
yr
,
mo
,
day
-
wd
+
7
,
0
,
0
,
0
,
0
,
0
,
-
1
)
# wd(monday)==0
elif
freq
==
'monthly'
:
return
time
.
mktime
(
yr
,
mo
+
1
,
1
,
0
,
0
,
0
,
0
,
0
,
-
1
)
else
:
return
None
# not a date-based backup
def
maybe_flush
(
self
):
# rotate first if necessary
self
.
maybe_rotate
()
if
self
.
do_flush
:
# from file_logger()
self
.
file
.
flush
()
def
maybe_rotate
(
self
):
if
self
.
freq
and
time
.
time
()
>
self
.
rotate_when
:
self
.
rotate
()
self
.
rotate_when
=
self
.
next_backup
(
self
.
freq
)
elif
self
.
maxsize
:
# rotate when we get too big
try
:
if
os
.
stat
(
self
.
filename
)[
stat
.
ST_SIZE
]
>
self
.
maxsize
:
self
.
rotate
()
except
os
.
error
:
# file not found, probably
self
.
rotate
()
# will create a new file
def
rotate
(
self
):
(
yr
,
mo
,
day
,
hr
,
min
,
sec
,
wd
,
jday
,
dst
)
=
time
.
localtime
(
time
.
time
())
try
:
self
.
file
.
close
()
newname
=
'%s.ends%04d%02d%02d'
%
(
self
.
filename
,
yr
,
mo
,
day
)
try
:
open
(
newname
,
"r"
).
close
()
# check if file exists
newname
=
newname
+
"-%02d%02d%02d"
%
(
hr
,
min
,
sec
)
except
:
# YEARMODY is unique
pass
os
.
rename
(
self
.
filename
,
newname
)
self
.
file
=
open
(
self
.
filename
,
self
.
mode
)
except
:
pass
# syslog is a line-oriented log protocol - this class would be
# appropriate for FTP or HTTP logs, but not for dumping stderr to.
# TODO: a simple safety wrapper that will ensure that the line sent
# to syslog is reasonable.
# TODO: async version of syslog_client: now, log entries use blocking
# send()
import
m_syslog
syslog_logger
=
m_syslog
.
syslog_client
class
syslog_logger
(
m_syslog
.
syslog_client
):
svc_name
=
'medusa'
pid_str
=
str
(
os
.
getpid
())
def
__init__
(
self
,
address
,
facility
=
'user'
):
m_syslog
.
syslog_client
.
__init__
(
self
,
address
)
self
.
facility
=
m_syslog
.
facility_names
[
facility
]
self
.
address
=
address
def
__repr__
(
self
):
return
'<syslog logger address=%s>'
%
(
repr
(
self
.
address
))
def
log
(
self
,
message
):
m_syslog
.
syslog_client
.
log
(
self
,
'%s[%s]: %s'
%
(
self
.
svc_name
,
self
.
pid_str
,
message
),
facility
=
self
.
facility
,
priority
=
m_syslog
.
LOG_INFO
)
# log to a stream socket, asynchronously
class
socket_logger
(
asynchat
.
async_chat
):
def
__init__
(
self
,
address
):
if
type
(
address
)
==
type
(
''
):
self
.
create_socket
(
socket
.
AF_UNIX
,
socket
.
SOCK_STREAM
)
else
:
self
.
create_socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
)
self
.
connect
(
address
)
self
.
address
=
address
def
__repr__
(
self
):
return
'<socket logger: address=%s>'
%
(
self
.
address
)
def
log
(
self
,
message
):
if
message
[
-
2
:]
!=
'
\
r
\
n
'
:
self
.
socket
.
push
(
message
+
'
\
r
\
n
'
)
else
:
self
.
socket
.
push
(
message
)
# log to multiple places
class
multi_logger
:
def
__init__
(
self
,
loggers
):
self
.
loggers
=
loggers
def
__repr__
(
self
):
return
'<multi logger: %s>'
%
(
repr
(
self
.
loggers
))
def
log
(
self
,
message
):
for
logger
in
self
.
loggers
:
logger
.
log
(
message
)
class
resolving_logger
:
"""Feed (ip, message) combinations into this logger to get a
resolved hostname in front of the message. The message will not
be logged until the PTR request finishes (or fails)."""
def
__init__
(
self
,
resolver
,
logger
):
self
.
resolver
=
resolver
self
.
logger
=
logger
class
logger_thunk
:
def
__init__
(
self
,
message
,
logger
):
self
.
message
=
message
self
.
logger
=
logger
def
__call__
(
self
,
host
,
ttl
,
answer
):
if
not
answer
:
answer
=
host
self
.
logger
.
log
(
'%s:%s'
%
(
answer
,
self
.
message
))
def
log
(
self
,
ip
,
message
):
self
.
resolver
.
resolve_ptr
(
ip
,
self
.
logger_thunk
(
message
,
self
.
logger
)
)
class
unresolving_logger
:
"Just in case you don't want to resolve"
def
__init__
(
self
,
logger
):
self
.
logger
=
logger
def
log
(
self
,
ip
,
message
):
self
.
logger
.
log
(
'%s:%s'
%
(
ip
,
message
))
def
strip_eol
(
line
):
while
line
and
line
[
-
1
]
in
'
\
r
\
n
'
:
line
=
line
[:
-
1
]
return
line
class
tail_logger
:
"Keep track of the last <size> log messages"
def
__init__
(
self
,
logger
,
size
=
500
):
self
.
size
=
size
self
.
logger
=
logger
self
.
messages
=
[]
def
log
(
self
,
message
):
self
.
messages
.
append
(
strip_eol
(
message
))
if
len
(
self
.
messages
)
>
self
.
size
:
del
self
.
messages
[
0
]
self
.
logger
.
log
(
message
)
lib/python/ZServer/medusa/m_syslog.py
deleted
100644 → 0
View file @
6786b136
# -*- Mode: Python; tab-width: 4 -*-
# ======================================================================
# Copyright 1997 by Sam Rushing
#
# All Rights Reserved
#
# Permission to use, copy, modify, and distribute this software and
# its documentation for any purpose and without fee is hereby
# granted, provided that the above copyright notice appear in all
# copies and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of Sam
# Rushing not be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior
# permission.
#
# SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
# NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
# CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
# OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
# NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# ======================================================================
"""socket interface to unix syslog.
On Unix, there are usually two ways of getting to syslog: via a
local unix-domain socket, or via the TCP service.
Usually "/dev/log" is the unix domain socket. This may be different
for other systems.
>>> my_client = syslog_client ('/dev/log')
Otherwise, just use the UDP version, port 514.
>>> my_client = syslog_client (('my_log_host', 514))
On win32, you will have to use the UDP version. Note that
you can use this to log to other hosts (and indeed, multiple
hosts).
This module is not a drop-in replacement for the python
<syslog> extension module - the interface is different.
Usage:
>>> c = syslog_client()
>>> c = syslog_client ('/strange/non_standard_log_location')
>>> c = syslog_client (('other_host.com', 514))
>>> c.log ('testing', facility='local0', priority='debug')
"""
# TODO: support named-pipe syslog.
# [see ftp://sunsite.unc.edu/pub/Linux/system/Daemons/syslog-fifo.tar.z]
# from <linux/sys/syslog.h>:
# ===========================================================================
# priorities/facilities are encoded into a single 32-bit quantity, where the
# bottom 3 bits are the priority (0-7) and the top 28 bits are the facility
# (0-big number). Both the priorities and the facilities map roughly
# one-to-one to strings in the syslogd(8) source code. This mapping is
# included in this file.
#
# priorities (these are ordered)
LOG_EMERG
=
0
# system is unusable
LOG_ALERT
=
1
# action must be taken immediately
LOG_CRIT
=
2
# critical conditions
LOG_ERR
=
3
# error conditions
LOG_WARNING
=
4
# warning conditions
LOG_NOTICE
=
5
# normal but significant condition
LOG_INFO
=
6
# informational
LOG_DEBUG
=
7
# debug-level messages
# facility codes
LOG_KERN
=
0
# kernel messages
LOG_USER
=
1
# random user-level messages
LOG_MAIL
=
2
# mail system
LOG_DAEMON
=
3
# system daemons
LOG_AUTH
=
4
# security/authorization messages
LOG_SYSLOG
=
5
# messages generated internally by syslogd
LOG_LPR
=
6
# line printer subsystem
LOG_NEWS
=
7
# network news subsystem
LOG_UUCP
=
8
# UUCP subsystem
LOG_CRON
=
9
# clock daemon
LOG_AUTHPRIV
=
10
# security/authorization messages (private)
# other codes through 15 reserved for system use
LOG_LOCAL0
=
16
# reserved for local use
LOG_LOCAL1
=
17
# reserved for local use
LOG_LOCAL2
=
18
# reserved for local use
LOG_LOCAL3
=
19
# reserved for local use
LOG_LOCAL4
=
20
# reserved for local use
LOG_LOCAL5
=
21
# reserved for local use
LOG_LOCAL6
=
22
# reserved for local use
LOG_LOCAL7
=
23
# reserved for local use
priority_names
=
{
"alert"
:
LOG_ALERT
,
"crit"
:
LOG_CRIT
,
"debug"
:
LOG_DEBUG
,
"emerg"
:
LOG_EMERG
,
"err"
:
LOG_ERR
,
"error"
:
LOG_ERR
,
# DEPRECATED
"info"
:
LOG_INFO
,
"notice"
:
LOG_NOTICE
,
"panic"
:
LOG_EMERG
,
# DEPRECATED
"warn"
:
LOG_WARNING
,
# DEPRECATED
"warning"
:
LOG_WARNING
,
}
facility_names
=
{
"auth"
:
LOG_AUTH
,
"authpriv"
:
LOG_AUTHPRIV
,
"cron"
:
LOG_CRON
,
"daemon"
:
LOG_DAEMON
,
"kern"
:
LOG_KERN
,
"lpr"
:
LOG_LPR
,
"mail"
:
LOG_MAIL
,
"news"
:
LOG_NEWS
,
"security"
:
LOG_AUTH
,
# DEPRECATED
"syslog"
:
LOG_SYSLOG
,
"user"
:
LOG_USER
,
"uucp"
:
LOG_UUCP
,
"local0"
:
LOG_LOCAL0
,
"local1"
:
LOG_LOCAL1
,
"local2"
:
LOG_LOCAL2
,
"local3"
:
LOG_LOCAL3
,
"local4"
:
LOG_LOCAL4
,
"local5"
:
LOG_LOCAL5
,
"local6"
:
LOG_LOCAL6
,
"local7"
:
LOG_LOCAL7
,
}
import
socket
class
syslog_client
:
def
__init__
(
self
,
address
=
'/dev/log'
):
self
.
address
=
address
if
type
(
address
)
==
type
(
''
):
try
:
# APUE 13.4.2 specifes /dev/log as datagram socket
self
.
socket
=
socket
.
socket
(
socket
.
AF_UNIX
,
socket
.
SOCK_DGRAM
)
self
.
socket
.
connect
(
address
)
except
:
# older linux may create as stream socket
self
.
socket
=
socket
.
socket
(
socket
.
AF_UNIX
,
socket
.
SOCK_STREAM
)
self
.
socket
.
connect
(
address
)
self
.
unix
=
1
else
:
self
.
socket
=
socket
.
socket
(
socket
.
AF_INET
,
socket
.
SOCK_DGRAM
)
self
.
unix
=
0
# curious: when talking to the unix-domain '/dev/log' socket, a
# zero-terminator seems to be required. this string is placed
# into a class variable so that it can be overridden if
# necessary.
log_format_string
=
'<%d>%s
\
000
'
def
log
(
self
,
message
,
facility
=
LOG_USER
,
priority
=
LOG_INFO
):
message
=
self
.
log_format_string
%
(
self
.
encode_priority
(
facility
,
priority
),
message
)
if
self
.
unix
:
self
.
socket
.
send
(
message
)
else
:
self
.
socket
.
sendto
(
message
,
self
.
address
)
def
encode_priority
(
self
,
facility
,
priority
):
if
type
(
facility
)
==
type
(
''
):
facility
=
facility_names
[
facility
]
if
type
(
priority
)
==
type
(
''
):
priority
=
priority_names
[
priority
]
return
(
facility
<<
3
)
|
priority
def
close
(
self
):
if
self
.
unix
:
self
.
socket
.
close
()
if
__name__
==
'__main__'
:
"""
Unit test for syslog_client. Set up for the test by:
* tail -f /var/log/allstuf (to see the "normal" log messages).
* Running the test_logger.py script with a junk file name (which
will be opened as a Unix-domain socket). "Custom" log messages
will go here.
* Run this script, passing the same junk file name.
* Check that the "bogus" test throws, and that none of the rest do.
* Check that the 'default' and 'UDP' messages show up in the tail.
* Check that the 'non-std' message shows up in the test_logger
console.
* Finally, kill off the tail and test_logger, and clean up the
socket file.
"""
import
sys
,
traceback
if
len
(
sys
.
argv
)
!=
2
:
print
"Usage: syslog.py localSocketFilename"
sys
.
exit
()
def
test_client
(
desc
,
address
=
None
):
try
:
if
address
:
client
=
syslog_client
(
address
)
else
:
client
=
syslog_client
()
except
:
print
'syslog_client() [%s] ctor threw'
%
desc
traceback
.
print_exc
()
return
try
:
client
.
log
(
'testing syslog_client() [%s]'
%
desc
,
facility
=
'local0'
,
priority
=
'debug'
)
print
'syslog_client.log() [%s] did not throw'
%
desc
except
:
print
'syslog_client.log() [%s] threw'
%
desc
traceback
.
print_exc
()
test_client
(
'default'
)
test_client
(
'bogus file'
,
'/some/bogus/logsocket'
)
test_client
(
'nonstd file'
,
sys
.
argv
[
1
]
)
test_client
(
'UDP'
,
(
'localhost'
,
514
)
)
lib/python/ZServer/medusa/max_sockets.py
deleted
100644 → 0
View file @
6786b136
import
socket
import
select
# several factors here we might want to test:
# 1) max we can create
# 2) max we can bind
# 3) max we can listen on
# 4) max we can connect
def
max_server_sockets
():
sl
=
[]
while
1
:
try
:
s
=
socket
.
socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
)
s
.
bind
((
''
,
0
))
s
.
listen
(
5
)
sl
.
append
(
s
)
except
:
break
num
=
len
(
sl
)
for
s
in
sl
:
s
.
close
()
del
sl
return
num
def
max_client_sockets
():
# make a server socket
server
=
socket
.
socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
)
server
.
bind
((
''
,
9999
))
server
.
listen
(
5
)
sl
=
[]
while
1
:
try
:
s
=
socket
.
socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
)
s
.
connect
((
''
,
9999
))
conn
,
addr
=
server
.
accept
()
sl
.
append
((
s
,
conn
))
except
:
break
num
=
len
(
sl
)
for
s
,
c
in
sl
:
s
.
close
()
c
.
close
()
del
sl
return
num
def
max_select_sockets
():
sl
=
[]
while
1
:
try
:
num
=
len
(
sl
)
for
i
in
range
(
1
+
len
(
sl
)
*
0.05
):
# Increase exponentially.
s
=
socket
.
socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
)
s
.
bind
((
''
,
0
))
s
.
listen
(
5
)
sl
.
append
(
s
)
select
.
select
(
sl
,[],[],
0
)
except
:
break
for
s
in
sl
:
s
.
close
()
del
sl
return
num
lib/python/ZServer/medusa/medusa_gif.py
deleted
100644 → 0
View file @
6786b136
# -*- Mode: Python -*-
# the medusa icon as a python source file.
width
=
97
height
=
61
data
=
'GIF89aa
\
000
=
\
000
\
204
\
000
\
000
\
000
\
000
\
000
\
255
\
255
\
255
\
245
\
245
\
245
ssskkkccc111)))
\
326
\
326
\
326
!!!
\
316
\
316
\
316
\
300
\
300
\
300
\
204
\
204
\
000
\
224
\
224
\
224
\
214
\
214
\
214
\
200
\
200
\
200
RRR
\
377
\
377
\
377
JJJ
\
367
\
367
\
367
BBB
\
347
\
347
\
347
\
000
\
204
\
000
\
020
\
020
\
020
\
265
\
265
\
265
\
000
\
000
\
000
\
000
\
000
\
000
\
000
\
000
\
000
\
000
\
000
\
000
\
000
\
000
\
000
\
000
\
000
\
000
\
000
\
000
\
000
!
\
371
\
004
\
001
\
000
\
000
\
021
\
000
,
\
000
\
000
\
000
\
000
a
\
000
=
\
000
\
000
\
005
\
376
`$
\
216
di
\
236
h
\
252
\
256
l
\
353
\
276
p,
\
317
tm
\
337
x
\
256
\
357
|m
\
001
@
\
240
E
\
305
\
000
\
364
\
216
4
\
206
R)$
\
005
\
201
\
214
\
007
r
\
012
{X
\
255
\
312
a
\
004
\
260
\
\
>
\
026
\
324
0
\
353
)
\
224
n
\
001
W+X
\
334
\
373
\
231
~
\
344
.
\
303
b
\
216
\
024
\
027
x<
\
273
\
307
\
255
G,rJiWN
\
014
{S}k"?ti
\
013
EdPQ
\
207
G@_%
\
000
\
026
yy
\
\
\
201
\
202
\
227
\
224
<
\
221
Fs$pOjWz
\
241
<r@vO
\
236
\
231
\
233
k
\
247
M
\
254
4
\
203
F
\
177
\
235
\
236
L#
\
247
\
256
Z
\
270
,
\
266
BxJ[
\
276
\
256
A]iE
\
304
\
305
\
262
\
273
E
\
313
\
201
\
275
i#
\
\
\
303
\
321
\
'
h
\
203
V
\
\
\
177
\
326
\
276
\
216
\
220
P~
\
335
\
230
_
\
264
\
013
\
342
\
275
\
344
KF
\
233
\
360
Q
\
212
\
352
\
246
\
000
\
367
\
274
s
\
361
\
236
\
334
\
347
T
\
341
;
\
341
\
246
\
220
2
\
177
\
314
2
\
211
`
\
242
o
\
325
@S
\
202
\
264
\
031
\
252
\
207
\
260
\
323
\
256
\
205
\
311
\
036
\
236
\
270
\
002
\
'
\
013
\
302
\
177
\
274
H
\
010
\
324
X
\
002
\
017
6
\
212
\
037
\
376
\
321
\
360
\
032
\
226
\
207
\
244
\
267
4(+^
\
202
\
346
r
\
205
J
\
021
1
\
375
\
241
Y#
\
256
f
\
012
7
\
315
>
\
272
\
002
\
325
\
307
g
\
012
(
\
007
\
205
\
312
#j
\
317
(
\
012
A
\
200
\
224
.
\
241
\
003
\
346
GS
\
247
\
033
\
245
\
344
\
264
\
366
\
015
L
\
'
PXQl]
\
266
\
263
\
243
\
232
\
260
?
\
245
\
316
\
371
\
362
\
225
\
035
\
332
\
243
J
\
273
\
332
Q
\
263
\
357
-D
\
241
T
\
327
\
270
\
265
\
013
W&
\
330
\
010
u
\
371
b
\
322
IW0
\
214
\
261
]
\
003
\
033
Va
\
365
Z#
\
207
\
213
a
\
030
k
\
264
7
\
262
\
014
p
\
354
\
024
[n
\
321
N
\
363
\
346
\
317
\
003
\
037
P
\
000
\
235
C
\
302
\
000
\
322
8(
\
244
\
363
YaA
\
005
\
022
\
255
_
\
237
@
\
260
\
000
A
\
212
\
326
\
256
qbp
\
321
\
332
\
266
\
011
\
334
=T
\
023
\
010
"!B
\
005
\
003
A
\
010
\
224
\
020
\
220
H
\
002
\
337
#
\
020
O
\
276
E
\
357
h
\
221
\
327
\
003
\
\
\
000
b@v
\
004
\
351
A.h
\
365
\
354
\
342
B
\
002
\
011
\
257
\
025
\
\
\
220
\
340
\
301
\
353
\
006
\
000
\
024
\
214
\
200
pA
\
300
\
353
\
012
\
364
\
241
k/
\
340
\
033
C
\
202
\
003
\
000
\
310
fZ
\
011
\
003
V
\
240
R
\
005
\
007
\
354
\
376
\
026
A
\
000
\
000
\
360
\
'
\
202
\
177
\
024
\
004
\
210
\
003
\
000
\
305
\
215
\
360
\
000
\
000
\
015
\
220
\
240
\
332
\
203
\
027
@
\
'
\
202
\
004
\
025
VpA
\
000
%
\
210
x
\
321
\
206
\
032
J
\
341
\
316
\
010
\
262
\
211
H"l
\
333
\
341
\
200
\
200
>"]P
\
002
\
212
\
011
\
010
`
\
002
\
006
6FP
\
200
\
001
\
'
\
024
p]
\
004
\
027
(8B
\
221
\
306
]
\
000
\
201
w>
\
002
iB
\
001
\
007
\
340
\
260
"v7J1
\
343
(
\
257
\
020
\
251
\
243
\
011
\
242
i
\
263
\
017
\
215
\
337
\
035
\
220
\
200
\
221
\
365
m4d
\
015
\
016
D
\
251
\
341
iN
\
354
\
346
Ng
\
253
\
200
I
\
240
\
031
\
356
09
\
245
\
205
7
\
311
I
\
302
\
200
7t
\
231
"&`
\
314
\
310
\
244
\
011
e
\
226
(
\
236
\
010
w
\
212
\
300
\
234
\
011
\
012
HX(
\
214
\
253
\
311
@
\
001
\
233
^
\
222
pg{%
\
340
\
035
\
224
&H
\
000
\
246
\
201
\
362
\
215
`@
\
001
"L
\
340
\
004
\
030
\
234
\
022
\
250
\
'
\
015
(V:
\
302
\
235
\
030
\
240
q
\
337
\
205
\
224
\
212
h@
\
177
\
006
\
000
\
250
\
210
\
004
\
007
\
310
\
207
\
337
\
005
\
257
-P
\
346
\
257
\
367
]p
\
353
\
203
\
271
\
256
:
\
203
\
236
\
211
F
\
340
\
247
\
010
\
332
9g
\
244
\
010
\
307
*=A
\
000
\
203
\
260
y
\
012
\
304
s#
\
014
\
007
D
\
207
,N
\
007
\
304
\
265
\
027
\
021
C
\
233
\
207
%B
\
366
[m
\
353
\
006
\
006
\
034
j
\
360
\
306
+
\
357
\
274
a
\
204
\
000
\
000
;'
lib/python/ZServer/medusa/mime_type_table.py
deleted
100644 → 0
View file @
6786b136
# -*- Python -*-
# Converted by ./convert_mime_type_table.py from:
# /usr/src2/apache_1.2b6/conf/mime.types
#
content_type_map
=
\
{
'ai'
:
'application/postscript'
,
'aif'
:
'audio/x-aiff'
,
'aifc'
:
'audio/x-aiff'
,
'aiff'
:
'audio/x-aiff'
,
'au'
:
'audio/basic'
,
'avi'
:
'video/x-msvideo'
,
'bcpio'
:
'application/x-bcpio'
,
'bin'
:
'application/octet-stream'
,
'cdf'
:
'application/x-netcdf'
,
'class'
:
'application/octet-stream'
,
'cpio'
:
'application/x-cpio'
,
'cpt'
:
'application/mac-compactpro'
,
'csh'
:
'application/x-csh'
,
'dcr'
:
'application/x-director'
,
'dir'
:
'application/x-director'
,
'dms'
:
'application/octet-stream'
,
'doc'
:
'application/msword'
,
'dvi'
:
'application/x-dvi'
,
'dxr'
:
'application/x-director'
,
'eps'
:
'application/postscript'
,
'etx'
:
'text/x-setext'
,
'exe'
:
'application/octet-stream'
,
'gif'
:
'image/gif'
,
'gtar'
:
'application/x-gtar'
,
'gz'
:
'application/x-gzip'
,
'hdf'
:
'application/x-hdf'
,
'hqx'
:
'application/mac-binhex40'
,
'htm'
:
'text/html'
,
'html'
:
'text/html'
,
'ice'
:
'x-conference/x-cooltalk'
,
'ief'
:
'image/ief'
,
'jpe'
:
'image/jpeg'
,
'jpeg'
:
'image/jpeg'
,
'jpg'
:
'image/jpeg'
,
'kar'
:
'audio/midi'
,
'latex'
:
'application/x-latex'
,
'lha'
:
'application/octet-stream'
,
'lzh'
:
'application/octet-stream'
,
'man'
:
'application/x-troff-man'
,
'me'
:
'application/x-troff-me'
,
'mid'
:
'audio/midi'
,
'midi'
:
'audio/midi'
,
'mif'
:
'application/x-mif'
,
'mov'
:
'video/quicktime'
,
'movie'
:
'video/x-sgi-movie'
,
'mp2'
:
'audio/mpeg'
,
'mpe'
:
'video/mpeg'
,
'mpeg'
:
'video/mpeg'
,
'mpg'
:
'video/mpeg'
,
'mpga'
:
'audio/mpeg'
,
'mp3'
:
'audio/mpeg'
,
'ms'
:
'application/x-troff-ms'
,
'nc'
:
'application/x-netcdf'
,
'oda'
:
'application/oda'
,
'pbm'
:
'image/x-portable-bitmap'
,
'pdb'
:
'chemical/x-pdb'
,
'pdf'
:
'application/pdf'
,
'pgm'
:
'image/x-portable-graymap'
,
'png'
:
'image/png'
,
'pnm'
:
'image/x-portable-anymap'
,
'ppm'
:
'image/x-portable-pixmap'
,
'ppt'
:
'application/powerpoint'
,
'ps'
:
'application/postscript'
,
'qt'
:
'video/quicktime'
,
'ra'
:
'audio/x-realaudio'
,
'ram'
:
'audio/x-pn-realaudio'
,
'ras'
:
'image/x-cmu-raster'
,
'rgb'
:
'image/x-rgb'
,
'roff'
:
'application/x-troff'
,
'rpm'
:
'audio/x-pn-realaudio-plugin'
,
'rtf'
:
'application/rtf'
,
'rtx'
:
'text/richtext'
,
'sgm'
:
'text/x-sgml'
,
'sgml'
:
'text/x-sgml'
,
'sh'
:
'application/x-sh'
,
'shar'
:
'application/x-shar'
,
'sit'
:
'application/x-stuffit'
,
'skd'
:
'application/x-koan'
,
'skm'
:
'application/x-koan'
,
'skp'
:
'application/x-koan'
,
'skt'
:
'application/x-koan'
,
'snd'
:
'audio/basic'
,
'src'
:
'application/x-wais-source'
,
'sv4cpio'
:
'application/x-sv4cpio'
,
'sv4crc'
:
'application/x-sv4crc'
,
't'
:
'application/x-troff'
,
'tar'
:
'application/x-tar'
,
'tcl'
:
'application/x-tcl'
,
'tex'
:
'application/x-tex'
,
'texi'
:
'application/x-texinfo'
,
'texinfo'
:
'application/x-texinfo'
,
'tif'
:
'image/tiff'
,
'tiff'
:
'image/tiff'
,
'tr'
:
'application/x-troff'
,
'tsv'
:
'text/tab-separated-values'
,
'txt'
:
'text/plain'
,
'ustar'
:
'application/x-ustar'
,
'vcd'
:
'application/x-cdlink'
,
'vrml'
:
'x-world/x-vrml'
,
'wav'
:
'audio/x-wav'
,
'wrl'
:
'x-world/x-vrml'
,
'xbm'
:
'image/x-xbitmap'
,
'xpm'
:
'image/x-xpixmap'
,
'xwd'
:
'image/x-xwindowdump'
,
'xyz'
:
'chemical/x-pdb'
,
'zip'
:
'application/zip'
,
}
lib/python/ZServer/medusa/monitor.py
deleted
100644 → 0
View file @
6786b136
# -*- Mode: Python; tab-width: 4 -*-
# Author: Sam Rushing <rushing@nightmare.com>
#
# python REPL channel.
#
RCS_ID
=
'$Id: monitor.py,v 1.9 2000/07/21 18:59:08 shane Exp $'
import
md5
import
socket
import
string
import
sys
import
time
import
traceback
VERSION
=
string
.
split
(
RCS_ID
)[
2
]
import
asyncore
import
asynchat
from
counter
import
counter
import
producers
class
monitor_channel
(
asynchat
.
async_chat
):
try_linemode
=
1
def
__init__
(
self
,
server
,
sock
,
addr
):
asynchat
.
async_chat
.
__init__
(
self
,
sock
)
self
.
server
=
server
self
.
addr
=
addr
self
.
set_terminator
(
'
\
r
\
n
'
)
self
.
data
=
''
# local bindings specific to this channel
self
.
local_env
=
sys
.
modules
[
'__main__'
].
__dict__
.
copy
()
self
.
push
(
'Python '
+
sys
.
version
+
'
\
r
\
n
'
)
self
.
push
(
sys
.
copyright
+
'
\
r
\
n
'
)
self
.
push
(
'Welcome to %s
\
r
\
n
'
%
self
)
self
.
push
(
"[Hint: try 'from __main__ import *']
\
r
\
n
"
)
self
.
prompt
()
self
.
number
=
server
.
total_sessions
.
as_long
()
self
.
line_counter
=
counter
()
self
.
multi_line
=
[]
def
handle_connect
(
self
):
# send IAC DO LINEMODE
self
.
push
(
'
\
377
\
375
\
"
'
)
def
close
(
self
):
self
.
server
.
closed_sessions
.
increment
()
asynchat
.
async_chat
.
close
(
self
)
def
prompt
(
self
):
self
.
push
(
'>>> '
)
def
collect_incoming_data
(
self
,
data
):
self
.
data
=
self
.
data
+
data
if
len
(
self
.
data
)
>
1024
:
# denial of service.
self
.
push
(
'BCNU
\
r
\
n
'
)
self
.
close_when_done
()
def
found_terminator
(
self
):
line
=
self
.
clean_line
(
self
.
data
)
self
.
data
=
''
self
.
line_counter
.
increment
()
# check for special case inputs...
if
not
line
and
not
self
.
multi_line
:
self
.
prompt
()
return
if
line
in
[
'
\
004
'
,
'exit'
]:
self
.
push
(
'BCNU
\
r
\
n
'
)
self
.
close_when_done
()
return
oldout
=
sys
.
stdout
olderr
=
sys
.
stderr
try
:
p
=
output_producer
(
self
,
olderr
)
sys
.
stdout
=
p
sys
.
stderr
=
p
try
:
# this is, of course, a blocking operation.
# if you wanted to thread this, you would have
# to synchronize, etc... and treat the output
# like a pipe. Not Fun.
#
# try eval first. If that fails, try exec. If that fails,
# hurl.
try
:
if
self
.
multi_line
:
# oh, this is horrible...
raise
SyntaxError
co
=
compile
(
line
,
repr
(
self
),
'eval'
)
result
=
eval
(
co
,
self
.
local_env
)
method
=
'eval'
if
result
is
not
None
:
print
repr
(
result
)
self
.
local_env
[
'_'
]
=
result
except
SyntaxError
:
try
:
if
self
.
multi_line
:
if
line
and
line
[
0
]
in
[
' '
,
'
\
t
'
]:
self
.
multi_line
.
append
(
line
)
self
.
push
(
'... '
)
return
else
:
self
.
multi_line
.
append
(
line
)
line
=
string
.
join
(
self
.
multi_line
,
'
\
n
'
)
co
=
compile
(
line
,
repr
(
self
),
'exec'
)
self
.
multi_line
=
[]
else
:
co
=
compile
(
line
,
repr
(
self
),
'exec'
)
except
SyntaxError
,
why
:
if
why
[
0
]
==
'unexpected EOF while parsing'
:
self
.
push
(
'... '
)
self
.
multi_line
.
append
(
line
)
return
exec
co
in
self
.
local_env
method
=
'exec'
except
:
method
=
'exception'
self
.
multi_line
=
[]
(
file
,
fun
,
line
),
t
,
v
,
tbinfo
=
asyncore
.
compact_traceback
()
self
.
log_info
(
'%s %s %s'
%
(
t
,
v
,
tbinfo
),
'warning'
)
traceback
.
print_exc
()
tbinfo
=
None
finally
:
sys
.
stdout
=
oldout
sys
.
stderr
=
olderr
self
.
log_info
(
'%s:%s (%s)> %s'
%
(
self
.
number
,
self
.
line_counter
,
method
,
repr
(
line
))
)
self
.
push_with_producer
(
p
)
self
.
prompt
()
# for now, we ignore any telnet option stuff sent to
# us, and we process the backspace key ourselves.
# gee, it would be fun to write a full-blown line-editing
# environment, etc...
def
clean_line
(
self
,
line
):
chars
=
[]
for
ch
in
line
:
oc
=
ord
(
ch
)
if
oc
<
127
:
if
oc
in
[
8
,
177
]:
# backspace
chars
=
chars
[:
-
1
]
else
:
chars
.
append
(
ch
)
return
string
.
join
(
chars
,
''
)
class
monitor_server
(
asyncore
.
dispatcher
):
SERVER_IDENT
=
'Monitor Server (V%s)'
%
VERSION
channel_class
=
monitor_channel
def
__init__
(
self
,
hostname
=
'127.0.0.1'
,
port
=
8023
):
self
.
hostname
=
hostname
self
.
port
=
port
self
.
create_socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
)
self
.
set_reuse_addr
()
self
.
bind
((
hostname
,
port
))
self
.
log_info
(
'%s started on port %d'
%
(
self
.
SERVER_IDENT
,
port
))
self
.
listen
(
5
)
self
.
closed
=
0
self
.
failed_auths
=
0
self
.
total_sessions
=
counter
()
self
.
closed_sessions
=
counter
()
def
writable
(
self
):
return
0
def
handle_accept
(
self
):
conn
,
addr
=
self
.
accept
()
self
.
log_info
(
'Incoming monitor connection from %s:%d'
%
addr
)
self
.
channel_class
(
self
,
conn
,
addr
)
self
.
total_sessions
.
increment
()
def
status
(
self
):
return
producers
.
simple_producer
(
'<h2>%s</h2>'
%
self
.
SERVER_IDENT
+
'<br><b>Total Sessions:</b> %s'
%
self
.
total_sessions
+
'<br><b>Current Sessions:</b> %d'
%
(
self
.
total_sessions
.
as_long
()
-
self
.
closed_sessions
.
as_long
()
)
)
def
hex_digest
(
s
):
m
=
md5
.
md5
()
m
.
update
(
s
)
return
string
.
joinfields
(
map
(
lambda
x
:
hex
(
ord
(
x
))[
2
:],
map
(
None
,
m
.
digest
())),
''
,
)
class
secure_monitor_channel
(
monitor_channel
):
authorized
=
0
def
__init__
(
self
,
server
,
sock
,
addr
):
asynchat
.
async_chat
.
__init__
(
self
,
sock
)
self
.
server
=
server
self
.
addr
=
addr
self
.
set_terminator
(
'
\
r
\
n
'
)
self
.
data
=
''
# local bindings specific to this channel
self
.
local_env
=
{}
# send timestamp string
self
.
timestamp
=
str
(
time
.
time
())
self
.
count
=
0
self
.
line_counter
=
counter
()
self
.
number
=
int
(
server
.
total_sessions
.
as_long
())
self
.
multi_line
=
[]
self
.
push
(
self
.
timestamp
+
'
\
r
\
n
'
)
def
found_terminator
(
self
):
if
not
self
.
authorized
:
if
hex_digest
(
'%s%s'
%
(
self
.
timestamp
,
self
.
server
.
password
))
!=
self
.
data
:
self
.
log_info
(
'%s: failed authorization'
%
self
,
'warning'
)
self
.
server
.
failed_auths
=
self
.
server
.
failed_auths
+
1
self
.
close
()
else
:
self
.
authorized
=
1
self
.
push
(
'Python '
+
sys
.
version
+
'
\
r
\
n
'
)
self
.
push
(
sys
.
copyright
+
'
\
r
\
n
'
)
self
.
push
(
'Welcome to %s
\
r
\
n
'
%
self
)
self
.
prompt
()
self
.
data
=
''
else
:
monitor_channel
.
found_terminator
(
self
)
class
secure_encrypted_monitor_channel
(
secure_monitor_channel
):
"Wrap send() and recv() with a stream cipher"
def
__init__
(
self
,
server
,
conn
,
addr
):
key
=
server
.
password
self
.
outgoing
=
server
.
cipher
.
new
(
key
)
self
.
incoming
=
server
.
cipher
.
new
(
key
)
secure_monitor_channel
.
__init__
(
self
,
server
,
conn
,
addr
)
def
send
(
self
,
data
):
# send the encrypted data instead
ed
=
self
.
outgoing
.
encrypt
(
data
)
return
secure_monitor_channel
.
send
(
self
,
ed
)
def
recv
(
self
,
block_size
):
data
=
secure_monitor_channel
.
recv
(
self
,
block_size
)
if
data
:
dd
=
self
.
incoming
.
decrypt
(
data
)
return
dd
else
:
return
data
class
secure_monitor_server
(
monitor_server
):
channel_class
=
secure_monitor_channel
def
__init__
(
self
,
password
,
hostname
=
''
,
port
=
8023
):
monitor_server
.
__init__
(
self
,
hostname
,
port
)
self
.
password
=
password
def
status
(
self
):
p
=
monitor_server
.
status
(
self
)
# kludge
p
.
data
=
p
.
data
+
(
'<br><b>Failed Authorizations:</b> %d'
%
self
.
failed_auths
)
return
p
# don't try to print from within any of the methods
# of this object. 8^)
class
output_producer
:
def
__init__
(
self
,
channel
,
real_stderr
):
self
.
channel
=
channel
self
.
data
=
''
# use _this_ for debug output
self
.
stderr
=
real_stderr
def
check_data
(
self
):
if
len
(
self
.
data
)
>
1
<<
16
:
# runaway output, close it.
self
.
channel
.
close
()
def
write
(
self
,
data
):
lines
=
string
.
splitfields
(
data
,
'
\
n
'
)
data
=
string
.
join
(
lines
,
'
\
r
\
n
'
)
self
.
data
=
self
.
data
+
data
self
.
check_data
()
def
writeline
(
self
,
line
):
self
.
data
=
self
.
data
+
line
+
'
\
r
\
n
'
self
.
check_data
()
def
writelines
(
self
,
lines
):
self
.
data
=
self
.
data
+
string
.
joinfields
(
lines
,
'
\
r
\
n
'
)
+
'
\
r
\
n
'
self
.
check_data
()
def
ready
(
self
):
return
(
len
(
self
.
data
)
>
0
)
def
flush
(
self
):
pass
def
softspace
(
self
,
*
args
):
pass
def
more
(
self
):
if
self
.
data
:
result
=
self
.
data
[:
512
]
self
.
data
=
self
.
data
[
512
:]
return
result
else
:
return
''
if
__name__
==
'__main__'
:
import
string
import
sys
if
'-s'
in
sys
.
argv
:
sys
.
argv
.
remove
(
'-s'
)
print
'Enter password: '
,
password
=
raw_input
()
else
:
password
=
None
if
'-e'
in
sys
.
argv
:
sys
.
argv
.
remove
(
'-e'
)
encrypt
=
1
else
:
encrypt
=
0
print
sys
.
argv
if
len
(
sys
.
argv
)
>
1
:
port
=
string
.
atoi
(
sys
.
argv
[
1
])
else
:
port
=
8023
if
password
is
not
None
:
s
=
secure_monitor_server
(
password
,
''
,
port
)
if
encrypt
:
s
.
channel_class
=
secure_encrypted_monitor_channel
import
sapphire
s
.
cipher
=
sapphire
else
:
s
=
monitor_server
(
''
,
port
)
asyncore
.
loop
()
lib/python/ZServer/medusa/monitor_client.py
deleted
100644 → 0
View file @
6786b136
# -*- Mode: Python; tab-width: 4 -*-
# monitor client, unix version.
import
asyncore
import
asynchat
import
regsub
import
socket
import
string
import
sys
import
os
import
md5
import
time
class
stdin_channel
(
asyncore
.
file_dispatcher
):
def
handle_read
(
self
):
data
=
self
.
recv
(
512
)
if
not
data
:
print
'
\
n
closed.'
self
.
sock_channel
.
close
()
try
:
self
.
close
()
except
:
pass
data
=
regsub
.
gsub
(
'
\
n
'
,
'
\
r
\
n
'
,
data
)
self
.
sock_channel
.
push
(
data
)
def
writable
(
self
):
return
0
def
log
(
self
,
*
ignore
):
pass
class
monitor_client
(
asynchat
.
async_chat
):
def
__init__
(
self
,
password
,
addr
=
(
''
,
8023
),
socket_type
=
socket
.
AF_INET
):
asynchat
.
async_chat
.
__init__
(
self
)
self
.
create_socket
(
socket_type
,
socket
.
SOCK_STREAM
)
self
.
terminator
=
'
\
r
\
n
'
self
.
connect
(
addr
)
self
.
sent_auth
=
0
self
.
timestamp
=
''
self
.
password
=
password
def
collect_incoming_data
(
self
,
data
):
if
not
self
.
sent_auth
:
self
.
timestamp
=
self
.
timestamp
+
data
else
:
sys
.
stdout
.
write
(
data
)
sys
.
stdout
.
flush
()
def
found_terminator
(
self
):
if
not
self
.
sent_auth
:
self
.
push
(
hex_digest
(
self
.
timestamp
+
self
.
password
)
+
'
\
r
\
n
'
)
self
.
sent_auth
=
1
else
:
print
def
handle_close
(
self
):
# close all the channels, which will make the standard main
# loop exit.
map
(
lambda
x
:
x
.
close
(),
asyncore
.
socket_map
.
values
())
def
log
(
self
,
*
ignore
):
pass
class
encrypted_monitor_client
(
monitor_client
):
"Wrap push() and recv() with a stream cipher"
def
init_cipher
(
self
,
cipher
,
key
):
self
.
outgoing
=
cipher
.
new
(
key
)
self
.
incoming
=
cipher
.
new
(
key
)
def
push
(
self
,
data
):
# push the encrypted data instead
return
monitor_client
.
push
(
self
,
self
.
outgoing
.
encrypt
(
data
))
def
recv
(
self
,
block_size
):
data
=
monitor_client
.
recv
(
self
,
block_size
)
if
data
:
return
self
.
incoming
.
decrypt
(
data
)
else
:
return
data
def
hex_digest
(
s
):
m
=
md5
.
md5
()
m
.
update
(
s
)
return
string
.
join
(
map
(
lambda
x
:
hex
(
ord
(
x
))[
2
:],
map
(
None
,
m
.
digest
())),
''
,
)
if
__name__
==
'__main__'
:
if
len
(
sys
.
argv
)
==
1
:
print
'Usage: %s host port'
%
sys
.
argv
[
0
]
sys
.
exit
(
0
)
if
(
'-e'
in
sys
.
argv
):
encrypt
=
1
sys
.
argv
.
remove
(
'-e'
)
else
:
encrypt
=
0
sys
.
stderr
.
write
(
'Enter Password: '
)
sys
.
stderr
.
flush
()
import
os
try
:
os
.
system
(
'stty -echo'
)
p
=
raw_input
()
print
finally
:
os
.
system
(
'stty echo'
)
stdin
=
stdin_channel
(
0
)
if
len
(
sys
.
argv
)
>
1
:
if
encrypt
:
client
=
encrypted_monitor_client
(
p
,
(
sys
.
argv
[
1
],
string
.
atoi
(
sys
.
argv
[
2
])))
import
sapphire
client
.
init_cipher
(
sapphire
,
p
)
else
:
client
=
monitor_client
(
p
,
(
sys
.
argv
[
1
],
string
.
atoi
(
sys
.
argv
[
2
])))
else
:
# default to local host, 'standard' port
client
=
monitor_client
(
p
)
stdin
.
sock_channel
=
client
asyncore
.
loop
()
lib/python/ZServer/medusa/monitor_client_win32.py
deleted
100644 → 0
View file @
6786b136
# -*- Mode: Python; tab-width: 4 -*-
# monitor client, win32 version
# since we can't do select() on stdin/stdout, we simply
# use threads and blocking sockets. <sigh>
import
regsub
import
socket
import
string
import
sys
import
thread
import
md5
def
hex_digest
(
s
):
m
=
md5
.
md5
()
m
.
update
(
s
)
return
string
.
join
(
map
(
lambda
x
:
hex
(
ord
(
x
))[
2
:],
map
(
None
,
m
.
digest
())),
''
,
)
def
reader
(
lock
,
sock
,
password
):
# first grab the timestamp
ts
=
sock
.
recv
(
1024
)[:
-
2
]
sock
.
send
(
hex_digest
(
ts
+
password
)
+
'
\
r
\
n
'
)
while
1
:
d
=
sock
.
recv
(
1024
)
if
not
d
:
lock
.
release
()
print
'Connection closed. Hit <return> to exit'
thread
.
exit
()
sys
.
stdout
.
write
(
d
)
sys
.
stdout
.
flush
()
def
writer
(
lock
,
sock
,
barrel
=
"just kidding"
):
while
lock
.
locked
():
sock
.
send
(
sys
.
stdin
.
readline
()[:
-
1
]
+
'
\
r
\
n
'
)
if
__name__
==
'__main__'
:
if
len
(
sys
.
argv
)
==
1
:
print
'Usage: %s host port'
sys
.
exit
(
0
)
print
'Enter Password: '
,
p
=
raw_input
()
s
=
socket
.
socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
)
s
.
connect
((
sys
.
argv
[
1
],
string
.
atoi
(
sys
.
argv
[
2
])))
l
=
thread
.
allocate_lock
()
l
.
acquire
()
thread
.
start_new_thread
(
reader
,
(
l
,
s
,
p
))
writer
(
l
,
s
)
lib/python/ZServer/medusa/producers.py
deleted
100644 → 0
View file @
6786b136
# -*- Mode: Python; tab-width: 4 -*-
RCS_ID
=
'$Id: producers.py,v 1.7 2000/06/02 14:22:48 brian Exp $'
import
string
"""
A collection of producers.
Each producer implements a particular feature: They can be combined
in various ways to get interesting and useful behaviors.
For example, you can feed dynamically-produced output into the compressing
producer, then wrap this with the 'chunked' transfer-encoding producer.
"""
class
simple_producer
:
"producer for a string"
def
__init__
(
self
,
data
,
buffer_size
=
1024
):
self
.
data
=
data
self
.
buffer_size
=
buffer_size
def
more
(
self
):
if
len
(
self
.
data
)
>
self
.
buffer_size
:
result
=
self
.
data
[:
self
.
buffer_size
]
self
.
data
=
self
.
data
[
self
.
buffer_size
:]
return
result
else
:
result
=
self
.
data
self
.
data
=
''
return
result
class
scanning_producer
:
"like simple_producer, but more efficient for large strings"
def
__init__
(
self
,
data
,
buffer_size
=
1024
):
self
.
data
=
data
self
.
buffer_size
=
buffer_size
self
.
pos
=
0
def
more
(
self
):
if
self
.
pos
<
len
(
self
.
data
):
lp
=
self
.
pos
rp
=
min
(
len
(
self
.
data
),
self
.
pos
+
self
.
buffer_size
)
result
=
self
.
data
[
lp
:
rp
]
self
.
pos
=
self
.
pos
+
len
(
result
)
return
result
else
:
return
''
class
lines_producer
:
"producer for a list of lines"
def
__init__
(
self
,
lines
):
self
.
lines
=
lines
def
ready
(
self
):
return
len
(
self
.
lines
)
def
more
(
self
):
if
self
.
lines
:
chunk
=
self
.
lines
[:
50
]
self
.
lines
=
self
.
lines
[
50
:]
return
string
.
join
(
chunk
,
'
\
r
\
n
'
)
+
'
\
r
\
n
'
else
:
return
''
class
buffer_list_producer
:
"producer for a list of buffers"
# i.e., data == string.join (buffers, '')
def
__init__
(
self
,
buffers
):
self
.
index
=
0
self
.
buffers
=
buffers
def
more
(
self
):
if
self
.
index
>=
len
(
self
.
buffers
):
return
''
else
:
data
=
self
.
buffers
[
self
.
index
]
self
.
index
=
self
.
index
+
1
return
data
class
file_producer
:
"producer wrapper for file[-like] objects"
# match http_channel's outgoing buffer size
out_buffer_size
=
1
<<
16
def
__init__
(
self
,
file
):
self
.
done
=
0
self
.
file
=
file
def
more
(
self
):
if
self
.
done
:
return
''
else
:
data
=
self
.
file
.
read
(
self
.
out_buffer_size
)
if
not
data
:
self
.
file
.
close
()
del
self
.
file
self
.
done
=
1
return
''
else
:
return
data
# A simple output producer. This one does not [yet] have
# the safety feature builtin to the monitor channel: runaway
# output will not be caught.
# don't try to print from within any of the methods
# of this object.
class
output_producer
:
"Acts like an output file; suitable for capturing sys.stdout"
def
__init__
(
self
):
self
.
data
=
''
def
write
(
self
,
data
):
lines
=
string
.
splitfields
(
data
,
'
\
n
'
)
data
=
string
.
join
(
lines
,
'
\
r
\
n
'
)
self
.
data
=
self
.
data
+
data
def
writeline
(
self
,
line
):
self
.
data
=
self
.
data
+
line
+
'
\
r
\
n
'
def
writelines
(
self
,
lines
):
self
.
data
=
self
.
data
+
string
.
joinfields
(
lines
,
'
\
r
\
n
'
)
+
'
\
r
\
n
'
def
ready
(
self
):
return
(
len
(
self
.
data
)
>
0
)
def
flush
(
self
):
pass
def
softspace
(
self
,
*
args
):
pass
def
more
(
self
):
if
self
.
data
:
result
=
self
.
data
[:
512
]
self
.
data
=
self
.
data
[
512
:]
return
result
else
:
return
''
class
composite_producer
:
"combine a fifo of producers into one"
def
__init__
(
self
,
producers
):
self
.
producers
=
producers
def
more
(
self
):
while
len
(
self
.
producers
):
p
=
self
.
producers
.
first
()
d
=
p
.
more
()
if
d
:
return
d
else
:
self
.
producers
.
pop
()
else
:
return
''
class
globbing_producer
:
"""
'glob' the output from a producer into a particular buffer size.
helps reduce the number of calls to send(). [this appears to
gain about 30% performance on requests to a single channel]
"""
def
__init__
(
self
,
producer
,
buffer_size
=
1
<<
16
):
self
.
producer
=
producer
self
.
buffer
=
''
self
.
buffer_size
=
buffer_size
def
more
(
self
):
while
len
(
self
.
buffer
)
<
self
.
buffer_size
:
data
=
self
.
producer
.
more
()
if
data
:
self
.
buffer
=
self
.
buffer
+
data
else
:
break
r
=
self
.
buffer
self
.
buffer
=
''
return
r
class
hooked_producer
:
"""
A producer that will call <function> when it empties,.
with an argument of the number of bytes produced. Useful
for logging/instrumentation purposes.
"""
def
__init__
(
self
,
producer
,
function
):
self
.
producer
=
producer
self
.
function
=
function
self
.
bytes
=
0
def
more
(
self
):
if
self
.
producer
:
result
=
self
.
producer
.
more
()
if
not
result
:
self
.
producer
=
None
self
.
function
(
self
.
bytes
)
else
:
self
.
bytes
=
self
.
bytes
+
len
(
result
)
return
result
else
:
return
''
# HTTP 1.1 emphasizes that an advertised Content-Length header MUST be
# correct. In the face of Strange Files, it is conceivable that
# reading a 'file' may produce an amount of data not matching that
# reported by os.stat() [text/binary mode issues, perhaps the file is
# being appended to, etc..] This makes the chunked encoding a True
# Blessing, and it really ought to be used even with normal files.
# How beautifully it blends with the concept of the producer.
class
chunked_producer
:
"""A producer that implements the 'chunked' transfer coding for HTTP/1.1.
Here is a sample usage:
request['Transfer-Encoding'] = 'chunked'
request.push (
producers.chunked_producer (your_producer)
)
request.done()
"""
def
__init__
(
self
,
producer
,
footers
=
None
):
self
.
producer
=
producer
self
.
footers
=
footers
def
more
(
self
):
if
self
.
producer
:
data
=
self
.
producer
.
more
()
if
data
:
return
'%x
\
r
\
n
%s
\
r
\
n
'
%
(
len
(
data
),
data
)
else
:
self
.
producer
=
None
if
self
.
footers
:
return
string
.
join
(
[
'0'
]
+
self
.
footers
,
'
\
r
\
n
'
)
+
'
\
r
\
n
\
r
\
n
'
else
:
return
'0
\
r
\
n
\
r
\
n
'
else
:
return
''
# Unfortunately this isn't very useful right now (Aug 97), because
# apparently the browsers don't do on-the-fly decompression. Which
# is sad, because this could _really_ speed things up, especially for
# low-bandwidth clients (i.e., most everyone).
try
:
import
zlib
except
ImportError
:
zlib
=
None
class
compressed_producer
:
"""
Compress another producer on-the-fly, using ZLIB
[Unfortunately, none of the current browsers seem to support this]
"""
# Note: It's not very efficient to have the server repeatedly
# compressing your outgoing files: compress them ahead of time, or
# use a compress-once-and-store scheme. However, if you have low
# bandwidth and low traffic, this may make more sense than
# maintaining your source files compressed.
#
# Can also be used for compressing dynamically-produced output.
def
__init__
(
self
,
producer
,
level
=
5
):
self
.
producer
=
producer
self
.
compressor
=
zlib
.
compressobj
(
level
)
def
more
(
self
):
if
self
.
producer
:
cdata
=
''
# feed until we get some output
while
not
cdata
:
data
=
self
.
producer
.
more
()
if
not
data
:
self
.
producer
=
None
return
self
.
compressor
.
flush
()
else
:
cdata
=
self
.
compressor
.
compress
(
data
)
return
cdata
else
:
return
''
class
escaping_producer
:
"A producer that escapes a sequence of characters"
" Common usage: escaping the CRLF.CRLF sequence in SMTP, NNTP, etc..."
def
__init__
(
self
,
producer
,
esc_from
=
'
\
r
\
n
.'
,
esc_to
=
'
\
r
\
n
..'
):
self
.
producer
=
producer
self
.
esc_from
=
esc_from
self
.
esc_to
=
esc_to
self
.
buffer
=
''
from
asynchat
import
find_prefix_at_end
self
.
find_prefix_at_end
=
find_prefix_at_end
def
more
(
self
):
esc_from
=
self
.
esc_from
esc_to
=
self
.
esc_to
buffer
=
self
.
buffer
+
self
.
producer
.
more
()
if
buffer
:
buffer
=
string
.
replace
(
buffer
,
esc_from
,
esc_to
)
i
=
self
.
find_prefix_at_end
(
buffer
,
esc_from
)
if
i
:
# we found a prefix
self
.
buffer
=
buffer
[
-
i
:]
return
buffer
[:
-
i
]
else
:
# no prefix, return it all
self
.
buffer
=
''
return
buffer
else
:
return
buffer
lib/python/ZServer/medusa/resolver.py
deleted
100644 → 0
View file @
6786b136
# -*- Mode: Python; tab-width: 4 -*-
#
# Author: Sam Rushing <rushing@nightmare.com>
#
RCS_ID
=
'$Id: resolver.py,v 1.6 2000/06/02 14:22:48 brian Exp $'
# Fast, low-overhead asynchronous name resolver. uses 'pre-cooked'
# DNS requests, unpacks only as much as it needs of the reply.
# see rfc1035 for details
import
string
import
asyncore
import
socket
import
sys
import
time
from
counter
import
counter
VERSION
=
string
.
split
(
RCS_ID
)[
2
]
# header
# 1 1 1 1 1 1
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ID |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# |QR| Opcode |AA|TC|RD|RA| Z | RCODE |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | QDCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ANCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | NSCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | ARCOUNT |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# question
# 1 1 1 1 1 1
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | |
# / QNAME /
# / /
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | QTYPE |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | QCLASS |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# build a DNS address request, _quickly_
def
fast_address_request
(
host
,
id
=
0
):
return
(
'%c%c'
%
(
chr
((
id
>>
8
)
&
0xff
),
chr
(
id
&
0xff
))
+
'
\
001
\
000
\
000
\
001
\
000
\
000
\
000
\
000
\
000
\
000
%s
\
000
\
000
\
001
\
000
\
001
'
%
(
string
.
join
(
map
(
lambda
part
:
'%c%s'
%
(
chr
(
len
(
part
)),
part
),
string
.
split
(
host
,
'.'
)
),
''
)
)
)
def
fast_ptr_request
(
host
,
id
=
0
):
return
(
'%c%c'
%
(
chr
((
id
>>
8
)
&
0xff
),
chr
(
id
&
0xff
))
+
'
\
001
\
000
\
000
\
001
\
000
\
000
\
000
\
000
\
000
\
000
%s
\
000
\
000
\
014
\
000
\
001
'
%
(
string
.
join
(
map
(
lambda
part
:
'%c%s'
%
(
chr
(
len
(
part
)),
part
),
string
.
split
(
host
,
'.'
)
),
''
)
)
)
def
unpack_name
(
r
,
pos
):
n
=
[]
while
1
:
ll
=
ord
(
r
[
pos
])
if
(
ll
&
0xc0
):
# compression
pos
=
(
ll
&
0x3f
<<
8
)
+
(
ord
(
r
[
pos
+
1
]))
elif
ll
==
0
:
break
else
:
pos
=
pos
+
1
n
.
append
(
r
[
pos
:
pos
+
ll
])
pos
=
pos
+
ll
return
string
.
join
(
n
,
'.'
)
def
skip_name
(
r
,
pos
):
s
=
pos
while
1
:
ll
=
ord
(
r
[
pos
])
if
(
ll
&
0xc0
):
# compression
return
pos
+
2
elif
ll
==
0
:
pos
=
pos
+
1
break
else
:
pos
=
pos
+
ll
+
1
return
pos
def
unpack_ttl
(
r
,
pos
):
return
reduce
(
lambda
x
,
y
:
(
x
<<
8
)
|
y
,
map
(
ord
,
r
[
pos
:
pos
+
4
])
)
# resource record
# 1 1 1 1 1 1
# 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | |
# / /
# / NAME /
# | |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | TYPE |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | CLASS |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | TTL |
# | |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
# | RDLENGTH |
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--|
# / RDATA /
# / /
# +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
def
unpack_address_reply
(
r
):
ancount
=
(
ord
(
r
[
6
])
<<
8
)
+
(
ord
(
r
[
7
]))
# skip question, first name starts at 12,
# this is followed by QTYPE and QCLASS
pos
=
skip_name
(
r
,
12
)
+
4
if
ancount
:
# we are looking very specifically for
# an answer with TYPE=A, CLASS=IN (\000\001\000\001)
for
an
in
range
(
ancount
):
pos
=
skip_name
(
r
,
pos
)
if
r
[
pos
:
pos
+
4
]
==
'
\
000
\
001
\
000
\
001
'
:
return
(
unpack_ttl
(
r
,
pos
+
4
),
'%d.%d.%d.%d'
%
tuple
(
map
(
ord
,
r
[
pos
+
10
:
pos
+
14
]))
)
# skip over TYPE, CLASS, TTL, RDLENGTH, RDATA
pos
=
pos
+
8
rdlength
=
(
ord
(
r
[
pos
])
<<
8
)
+
(
ord
(
r
[
pos
+
1
]))
pos
=
pos
+
2
+
rdlength
return
0
,
None
else
:
return
0
,
None
def
unpack_ptr_reply
(
r
):
ancount
=
(
ord
(
r
[
6
])
<<
8
)
+
(
ord
(
r
[
7
]))
# skip question, first name starts at 12,
# this is followed by QTYPE and QCLASS
pos
=
skip_name
(
r
,
12
)
+
4
if
ancount
:
# we are looking very specifically for
# an answer with TYPE=PTR, CLASS=IN (\000\014\000\001)
for
an
in
range
(
ancount
):
pos
=
skip_name
(
r
,
pos
)
if
r
[
pos
:
pos
+
4
]
==
'
\
000
\
014
\
000
\
001
'
:
return
(
unpack_ttl
(
r
,
pos
+
4
),
unpack_name
(
r
,
pos
+
10
)
)
# skip over TYPE, CLASS, TTL, RDLENGTH, RDATA
pos
=
pos
+
8
rdlength
=
(
ord
(
r
[
pos
])
<<
8
)
+
(
ord
(
r
[
pos
+
1
]))
pos
=
pos
+
2
+
rdlength
return
0
,
None
else
:
return
0
,
None
# This is a UDP (datagram) resolver.
#
# It may be useful to implement a TCP resolver. This would presumably
# give us more reliable behavior when things get too busy. A TCP
# client would have to manage the connection carefully, since the
# server is allowed to close it at will (the RFC recommends closing
# after 2 minutes of idle time).
#
# Note also that the TCP client will have to prepend each request
# with a 2-byte length indicator (see rfc1035).
#
class
resolver
(
asyncore
.
dispatcher
):
id
=
counter
()
def
__init__
(
self
,
server
=
'127.0.0.1'
):
asyncore
.
dispatcher
.
__init__
(
self
)
self
.
create_socket
(
socket
.
AF_INET
,
socket
.
SOCK_DGRAM
)
self
.
server
=
server
self
.
request_map
=
{}
self
.
last_reap_time
=
int
(
time
.
time
())
# reap every few minutes
def
writable
(
self
):
return
0
def
log
(
self
,
*
args
):
pass
def
handle_close
(
self
):
self
.
log_info
(
'closing!'
)
self
.
close
()
def
handle_error
(
self
):
# don't close the connection on error
(
file
,
fun
,
line
),
t
,
v
,
tbinfo
=
asyncore
.
compact_traceback
()
self
.
log_info
(
'Problem with DNS lookup (%s:%s %s)'
%
(
t
,
v
,
tbinfo
),
'error'
)
def
get_id
(
self
):
return
(
self
.
id
.
as_long
()
%
(
1
<<
16
))
def
reap
(
self
):
# find DNS requests that have timed out
now
=
int
(
time
.
time
())
if
now
-
self
.
last_reap_time
>
180
:
# reap every 3 minutes
self
.
last_reap_time
=
now
# update before we forget
for
k
,(
host
,
unpack
,
callback
,
when
)
in
self
.
request_map
.
items
():
if
now
-
when
>
180
:
# over 3 minutes old
del
self
.
request_map
[
k
]
try
:
# same code as in handle_read
callback
(
host
,
0
,
None
)
# timeout val is (0,None)
except
:
(
file
,
fun
,
line
),
t
,
v
,
tbinfo
=
asyncore
.
compact_traceback
()
self
.
log_info
(
'%s %s %s'
%
(
t
,
v
,
tbinfo
),
'error'
)
def
resolve
(
self
,
host
,
callback
):
self
.
reap
()
# first, get rid of old guys
self
.
socket
.
sendto
(
fast_address_request
(
host
,
self
.
get_id
()),
(
self
.
server
,
53
)
)
self
.
request_map
[
self
.
get_id
()]
=
(
host
,
unpack_address_reply
,
callback
,
int
(
time
.
time
()))
self
.
id
.
increment
()
def
resolve_ptr
(
self
,
host
,
callback
):
self
.
reap
()
# first, get rid of old guys
ip
=
string
.
split
(
host
,
'.'
)
ip
.
reverse
()
ip
=
string
.
join
(
ip
,
'.'
)
+
'.in-addr.arpa'
self
.
socket
.
sendto
(
fast_ptr_request
(
ip
,
self
.
get_id
()),
(
self
.
server
,
53
)
)
self
.
request_map
[
self
.
get_id
()]
=
(
host
,
unpack_ptr_reply
,
callback
,
int
(
time
.
time
()))
self
.
id
.
increment
()
def
handle_read
(
self
):
reply
,
whence
=
self
.
socket
.
recvfrom
(
512
)
# for security reasons we may want to double-check
# that <whence> is the server we sent the request to.
id
=
(
ord
(
reply
[
0
])
<<
8
)
+
ord
(
reply
[
1
])
if
self
.
request_map
.
has_key
(
id
):
host
,
unpack
,
callback
,
when
=
self
.
request_map
[
id
]
del
self
.
request_map
[
id
]
ttl
,
answer
=
unpack
(
reply
)
try
:
callback
(
host
,
ttl
,
answer
)
except
:
(
file
,
fun
,
line
),
t
,
v
,
tbinfo
=
asyncore
.
compact_traceback
()
self
.
log_info
(
'%s %s %s'
%
(
t
,
v
,
tbinfo
),
'error'
)
class
rbl
(
resolver
):
def
resolve_maps
(
self
,
host
,
callback
):
ip
=
string
.
split
(
host
,
'.'
)
ip
.
reverse
()
ip
=
string
.
join
(
ip
,
'.'
)
+
'.rbl.maps.vix.com'
self
.
socket
.
sendto
(
fast_ptr_request
(
ip
,
self
.
get_id
()),
(
self
.
server
,
53
)
)
self
.
request_map
[
self
.
get_id
()]
=
host
,
self
.
check_reply
,
callback
self
.
id
.
increment
()
def
check_reply
(
self
,
r
):
# we only need to check RCODE.
rcode
=
(
ord
(
r
[
3
])
&
0xf
)
self
.
log_info
(
'MAPS RBL; RCODE =%02x
\
n
%s'
%
(
rcode
,
repr
(
r
)))
return
0
,
rcode
# (ttl, answer)
class
hooked_callback
:
def
__init__
(
self
,
hook
,
callback
):
self
.
hook
,
self
.
callback
=
hook
,
callback
def
__call__
(
self
,
*
args
):
apply
(
self
.
hook
,
args
)
apply
(
self
.
callback
,
args
)
class
caching_resolver
(
resolver
):
"Cache DNS queries. Will need to honor the TTL value in the replies"
def
__init__
(
*
args
):
apply
(
resolver
.
__init__
,
args
)
self
=
args
[
0
]
self
.
cache
=
{}
self
.
forward_requests
=
counter
()
self
.
reverse_requests
=
counter
()
self
.
cache_hits
=
counter
()
def
resolve
(
self
,
host
,
callback
):
self
.
forward_requests
.
increment
()
if
self
.
cache
.
has_key
(
host
):
when
,
ttl
,
answer
=
self
.
cache
[
host
]
# ignore TTL for now
callback
(
host
,
ttl
,
answer
)
self
.
cache_hits
.
increment
()
else
:
resolver
.
resolve
(
self
,
host
,
hooked_callback
(
self
.
callback_hook
,
callback
)
)
def
resolve_ptr
(
self
,
host
,
callback
):
self
.
reverse_requests
.
increment
()
if
self
.
cache
.
has_key
(
host
):
when
,
ttl
,
answer
=
self
.
cache
[
host
]
# ignore TTL for now
callback
(
host
,
ttl
,
answer
)
self
.
cache_hits
.
increment
()
else
:
resolver
.
resolve_ptr
(
self
,
host
,
hooked_callback
(
self
.
callback_hook
,
callback
)
)
def
callback_hook
(
self
,
host
,
ttl
,
answer
):
self
.
cache
[
host
]
=
time
.
time
(),
ttl
,
answer
SERVER_IDENT
=
'Caching DNS Resolver (V%s)'
%
VERSION
def
status
(
self
):
import
status_handler
import
producers
return
producers
.
simple_producer
(
'<h2>%s</h2>'
%
self
.
SERVER_IDENT
+
'<br>Server: %s'
%
self
.
server
+
'<br>Cache Entries: %d'
%
len
(
self
.
cache
)
+
'<br>Outstanding Requests: %d'
%
len
(
self
.
request_map
)
+
'<br>Forward Requests: %s'
%
self
.
forward_requests
+
'<br>Reverse Requests: %s'
%
self
.
reverse_requests
+
'<br>Cache Hits: %s'
%
self
.
cache_hits
)
#test_reply = """\000\000\205\200\000\001\000\001\000\002\000\002\006squirl\011nightmare\003com\000\000\001\000\001\300\014\000\001\000\001\000\001Q\200\000\004\315\240\260\005\011nightmare\003com\000\000\002\000\001\000\001Q\200\000\002\300\014\3006\000\002\000\001\000\001Q\200\000\015\003ns1\003iag\003net\000\300\014\000\001\000\001\000\001Q\200\000\004\315\240\260\005\300]\000\001\000\001\000\000\350\227\000\004\314\033\322\005"""
# def test_unpacker ():
# print unpack_address_reply (test_reply)
#
# import time
# class timer:
# def __init__ (self):
# self.start = time.time()
# def end (self):
# return time.time() - self.start
#
# # I get ~290 unpacks per second for the typical case, compared to ~48
# # using dnslib directly. also, that latter number does not include
# # picking the actual data out.
#
# def benchmark_unpacker():
#
# r = range(1000)
# t = timer()
# for i in r:
# unpack_address_reply (test_reply)
# print '%.2f unpacks per second' % (1000.0 / t.end())
if
__name__
==
'__main__'
:
import
sys
if
len
(
sys
.
argv
)
==
1
:
print
'usage: %s [-r] [-s <server_IP>] host [host ...]'
%
sys
.
argv
[
0
]
sys
.
exit
(
0
)
elif
(
'-s'
in
sys
.
argv
):
i
=
sys
.
argv
.
index
(
'-s'
)
server
=
sys
.
argv
[
i
+
1
]
del
sys
.
argv
[
i
:
i
+
2
]
else
:
server
=
'127.0.0.1'
if
(
'-r'
in
sys
.
argv
):
reverse
=
1
i
=
sys
.
argv
.
index
(
'-r'
)
del
sys
.
argv
[
i
]
else
:
reverse
=
0
if
(
'-m'
in
sys
.
argv
):
maps
=
1
sys
.
argv
.
remove
(
'-m'
)
else
:
maps
=
0
if
maps
:
r
=
rbl
(
server
)
else
:
r
=
caching_resolver
(
server
)
count
=
len
(
sys
.
argv
)
-
1
def
print_it
(
host
,
ttl
,
answer
):
global
count
print
'%s: %s'
%
(
host
,
answer
)
count
=
count
-
1
if
not
count
:
r
.
close
()
for
host
in
sys
.
argv
[
1
:]:
if
reverse
:
r
.
resolve_ptr
(
host
,
print_it
)
elif
maps
:
r
.
resolve_maps
(
host
,
print_it
)
else
:
r
.
resolve
(
host
,
print_it
)
# hooked asyncore.loop()
while
asyncore
.
socket_map
:
asyncore
.
poll
(
30.0
)
print
'requests outstanding: %d'
%
len
(
r
.
request_map
)
lib/python/ZServer/medusa/select_trigger.py
deleted
100644 → 0
View file @
6786b136
# -*- Mode: Python; tab-width: 4 -*-
VERSION_STRING
=
"$Id: select_trigger.py,v 1.14 2000/06/02 14:22:48 brian Exp $"
import
asyncore
import
asynchat
import
os
import
socket
import
string
import
thread
if
os
.
name
==
'posix'
:
class
trigger
(
asyncore
.
file_dispatcher
):
"Wake up a call to select() running in the main thread"
# This is useful in a context where you are using Medusa's I/O
# subsystem to deliver data, but the data is generated by another
# thread. Normally, if Medusa is in the middle of a call to
# select(), new output data generated by another thread will have
# to sit until the call to select() either times out or returns.
# If the trigger is 'pulled' by another thread, it should immediately
# generate a READ event on the trigger object, which will force the
# select() invocation to return.
# A common use for this facility: letting Medusa manage I/O for a
# large number of connections; but routing each request through a
# thread chosen from a fixed-size thread pool. When a thread is
# acquired, a transaction is performed, but output data is
# accumulated into buffers that will be emptied more efficiently
# by Medusa. [picture a server that can process database queries
# rapidly, but doesn't want to tie up threads waiting to send data
# to low-bandwidth connections]
# The other major feature provided by this class is the ability to
# move work back into the main thread: if you call pull_trigger()
# with a thunk argument, when select() wakes up and receives the
# event it will call your thunk from within that thread. The main
# purpose of this is to remove the need to wrap thread locks around
# Medusa's data structures, which normally do not need them. [To see
# why this is true, imagine this scenario: A thread tries to push some
# new data onto a channel's outgoing data queue at the same time that
# the main thread is trying to remove some]
def
__init__
(
self
):
r
,
w
=
os
.
pipe
()
self
.
trigger
=
w
asyncore
.
file_dispatcher
.
__init__
(
self
,
r
)
self
.
lock
=
thread
.
allocate_lock
()
self
.
thunks
=
[]
def
__repr__
(
self
):
return
'<select-trigger (pipe) at %x>'
%
id
(
self
)
def
readable
(
self
):
return
1
def
writable
(
self
):
return
0
def
handle_connect
(
self
):
pass
def
pull_trigger
(
self
,
thunk
=
None
):
# print 'PULL_TRIGGER: ', len(self.thunks)
if
thunk
:
try
:
self
.
lock
.
acquire
()
self
.
thunks
.
append
(
thunk
)
finally
:
self
.
lock
.
release
()
os
.
write
(
self
.
trigger
,
'x'
)
def
handle_read
(
self
):
self
.
recv
(
8192
)
try
:
self
.
lock
.
acquire
()
for
thunk
in
self
.
thunks
:
try
:
thunk
()
except
:
(
file
,
fun
,
line
),
t
,
v
,
tbinfo
=
asyncore
.
compact_traceback
()
print
'exception in trigger thunk: (%s:%s %s)'
%
(
t
,
v
,
tbinfo
)
self
.
thunks
=
[]
finally
:
self
.
lock
.
release
()
else
:
# win32-safe version
class
trigger
(
asyncore
.
dispatcher
):
address
=
(
'127.9.9.9'
,
19999
)
def
__init__
(
self
):
a
=
socket
.
socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
)
w
=
socket
.
socket
(
socket
.
AF_INET
,
socket
.
SOCK_STREAM
)
# set TCP_NODELAY to true to avoid buffering
w
.
setsockopt
(
socket
.
IPPROTO_TCP
,
1
,
1
)
# tricky: get a pair of connected sockets
host
=
'127.0.0.1'
port
=
19999
while
1
:
try
:
self
.
address
=
(
host
,
port
)
a
.
bind
(
self
.
address
)
break
except
:
if
port
<=
19950
:
raise
'Bind Error'
,
'Cannot bind trigger!'
port
=
port
-
1
a
.
listen
(
1
)
w
.
setblocking
(
0
)
try
:
w
.
connect
(
self
.
address
)
except
:
pass
r
,
addr
=
a
.
accept
()
a
.
close
()
w
.
setblocking
(
1
)
self
.
trigger
=
w
asyncore
.
dispatcher
.
__init__
(
self
,
r
)
self
.
lock
=
thread
.
allocate_lock
()
self
.
thunks
=
[]
self
.
_trigger_connected
=
0
def
__repr__
(
self
):
return
'<select-trigger (loopback) at %x>'
%
id
(
self
)
def
readable
(
self
):
return
1
def
writable
(
self
):
return
0
def
handle_connect
(
self
):
pass
def
pull_trigger
(
self
,
thunk
=
None
):
if
thunk
:
try
:
self
.
lock
.
acquire
()
self
.
thunks
.
append
(
thunk
)
finally
:
self
.
lock
.
release
()
self
.
trigger
.
send
(
'x'
)
def
handle_read
(
self
):
self
.
recv
(
8192
)
try
:
self
.
lock
.
acquire
()
for
thunk
in
self
.
thunks
:
try
:
thunk
()
except
:
(
file
,
fun
,
line
),
t
,
v
,
tbinfo
=
asyncore
.
compact_traceback
()
print
'exception in trigger thunk: (%s:%s %s)'
%
(
t
,
v
,
tbinfo
)
self
.
thunks
=
[]
finally
:
self
.
lock
.
release
()
the_trigger
=
None
class
trigger_file
:
"A 'triggered' file object"
buffer_size
=
4096
def
__init__
(
self
,
parent
):
global
the_trigger
if
the_trigger
is
None
:
the_trigger
=
trigger
()
self
.
parent
=
parent
self
.
buffer
=
''
def
write
(
self
,
data
):
self
.
buffer
=
self
.
buffer
+
data
if
len
(
self
.
buffer
)
>
self
.
buffer_size
:
d
,
self
.
buffer
=
self
.
buffer
,
''
the_trigger
.
pull_trigger
(
lambda
d
=
d
,
p
=
self
.
parent
:
p
.
push
(
d
)
)
def
writeline
(
self
,
line
):
self
.
write
(
line
+
'
\
r
\
n
'
)
def
writelines
(
self
,
lines
):
self
.
write
(
string
.
joinfields
(
lines
,
'
\
r
\
n
'
)
+
'
\
r
\
n
'
)
def
flush
(
self
):
if
self
.
buffer
:
d
,
self
.
buffer
=
self
.
buffer
,
''
the_trigger
.
pull_trigger
(
lambda
p
=
self
.
parent
,
d
=
d
:
p
.
push
(
d
)
)
def
softspace
(
self
,
*
args
):
pass
def
close
(
self
):
# in a derived class, you may want to call trigger_close() instead.
self
.
flush
()
self
.
parent
=
None
def
trigger_close
(
self
):
d
,
self
.
buffer
=
self
.
buffer
,
''
p
,
self
.
parent
=
self
.
parent
,
None
the_trigger
.
pull_trigger
(
lambda
p
=
p
,
d
=
d
:
(
p
.
push
(
d
),
p
.
close_when_done
())
)
if
__name__
==
'__main__'
:
import
time
def
thread_function
(
output_file
,
i
,
n
):
print
'entering thread_function'
while
n
:
time
.
sleep
(
5
)
output_file
.
write
(
'%2d.%2d %s
\
r
\
n
'
%
(
i
,
n
,
output_file
))
output_file
.
flush
()
n
=
n
-
1
output_file
.
close
()
print
'exiting thread_function'
class
thread_parent
(
asynchat
.
async_chat
):
def
__init__
(
self
,
conn
,
addr
):
self
.
addr
=
addr
asynchat
.
async_chat
.
__init__
(
self
,
conn
)
self
.
set_terminator
(
'
\
r
\
n
'
)
self
.
buffer
=
''
self
.
count
=
0
def
collect_incoming_data
(
self
,
data
):
self
.
buffer
=
self
.
buffer
+
data
def
found_terminator
(
self
):
data
,
self
.
buffer
=
self
.
buffer
,
''
if
not
data
:
asyncore
.
close_all
()
print
"done"
return
n
=
string
.
atoi
(
string
.
split
(
data
)[
0
])
tf
=
trigger_file
(
self
)
self
.
count
=
self
.
count
+
1
thread
.
start_new_thread
(
thread_function
,
(
tf
,
self
.
count
,
n
))
class
thread_server
(
asyncore
.
dispatcher
):
def
__init__
(
self
,
family
=
socket
.
AF_INET
,
address
=
(
''
,
9003
)):
asyncore
.
dispatcher
.
__init__
(
self
)
self
.
create_socket
(
family
,
socket
.
SOCK_STREAM
)
self
.
set_reuse_addr
()
self
.
bind
(
address
)
self
.
listen
(
5
)
def
handle_accept
(
self
):
conn
,
addr
=
self
.
accept
()
tp
=
thread_parent
(
conn
,
addr
)
thread_server
()
#asyncore.loop(1.0, use_poll=1)
try
:
asyncore
.
loop
()
except
:
asyncore
.
close_all
()
lib/python/ZServer/medusa/status_handler.py
deleted
100644 → 0
View file @
6786b136
# -*- Mode: Python; tab-width: 4 -*-
VERSION_STRING
=
"$Id: status_handler.py,v 1.4 2000/06/02 14:22:48 brian Exp $"
#
# medusa status extension
#
import
string
import
time
import
regex
import
asyncore
import
http_server
import
medusa_gif
import
producers
from
counter
import
counter
START_TIME
=
long
(
time
.
time
())
# split a uri
# <path>;<params>?<query>#<fragment>
path_regex
=
regex
.
compile
(
# path params query fragment
'
\
\
([^;?#]*
\
\
)
\
\
(;[^?#]*
\
\
)?
\
\
(
\
\
?[^#]*
\
)?
\
(#.*
\
)?
'
)
def split_path (path):
if path_regex.match (path) != len(path):
raise ValueError, "bad path"
else:
return map (lambda i,r=path_regex: r.group(i), range(1,5))
class status_extension:
hit_counter = counter()
def __init__ (self, objects, statusdir='
/
status
', allow_emergency_debug=0):
self.objects = objects
self.statusdir = statusdir
self.allow_emergency_debug = allow_emergency_debug
# We use /status instead of statusdir here because it'
s
too
# hard to pass statusdir to the logger, who makes the HREF
# to the object dir. We don't need the security-through-
# obscurity here in any case, because the id is obscurity enough
self
.
hyper_regex
=
regex
.
compile
(
'/status/object/
\
([
0
-9]+
\
)/.*
'
)
self.hyper_objects = []
for object in objects:
self.register_hyper_object (object)
def __repr__ (self):
return '
<
Status
Extension
(
%
s
hits
)
at
%
x
>
' % (
self.hit_counter,
id(self)
)
def match (self, request):
[path, params, query, fragment] = split_path (request.uri)
# For reasons explained above, we don'
t
use
statusdir
for
/
object
return
(
path
[:
len
(
self
.
statusdir
)]
==
self
.
statusdir
or
path
[:
len
(
"/status/object/"
)]
==
'/status/object/'
)
# Possible Targets:
# /status
# /status/channel_list
# /status/medusa.gif
# can we have 'clickable' objects?
# [yes, we can use id(x) and do a linear search]
# Dynamic producers:
# HTTP/1.0: we must close the channel, because it's dynamic output
# HTTP/1.1: we can use the chunked transfer-encoding, and leave
# it open.
def
handle_request
(
self
,
request
):
[
path
,
params
,
query
,
fragment
]
=
split_path
(
request
.
uri
)
self
.
hit_counter
.
increment
()
if
path
==
self
.
statusdir
:
# and not a subdirectory
up_time
=
string
.
join
(
english_time
(
long
(
time
.
time
())
-
START_TIME
))
request
[
'Content-Type'
]
=
'text/html'
request
.
push
(
'<html>'
'<title>Medusa Status Reports</title>'
'<body bgcolor="#ffffff">'
'<h1>Medusa Status Reports</h1>'
'<b>Up:</b> %s'
%
up_time
)
for
i
in
range
(
len
(
self
.
objects
)):
request
.
push
(
self
.
objects
[
i
].
status
())
request
.
push
(
'<hr>
\
r
\
n
'
)
request
.
push
(
'<p><a href="%s/channel_list">Channel List</a>'
'<hr>'
'<img src="%s/medusa.gif" align=right width=%d height=%d>'
'</body></html>'
%
(
self
.
statusdir
,
self
.
statusdir
,
medusa_gif
.
width
,
medusa_gif
.
height
)
)
request
.
done
()
elif
path
==
self
.
statusdir
+
'/channel_list'
:
request
[
'Content-Type'
]
=
'text/html'
request
.
push
(
'<html><body>'
)
request
.
push
(
channel_list_producer
(
self
.
statusdir
))
request
.
push
(
'<hr>'
'<img src="%s/medusa.gif" align=right width=%d height=%d>'
%
(
self
.
statusdir
,
medusa_gif
.
width
,
medusa_gif
.
height
)
+
'</body></html>'
)
request
.
done
()
elif
path
==
self
.
statusdir
+
'/medusa.gif'
:
request
[
'Content-Type'
]
=
'image/gif'
request
[
'Content-Length'
]
=
len
(
medusa_gif
.
data
)
request
.
push
(
medusa_gif
.
data
)
request
.
done
()
elif
path
==
self
.
statusdir
+
'/close_zombies'
:
message
=
(
'<h2>Closing all zombie http client connections...</h2>'
'<p><a href="%s">Back to the status page</a>'
%
self
.
statusdir
)
request
[
'Content-Type'
]
=
'text/html'
request
[
'Content-Length'
]
=
len
(
message
)
request
.
push
(
message
)
now
=
int
(
time
.
time
())
for
channel
in
asyncore
.
socket_map
.
keys
():
if
channel
.
__class__
==
http_server
.
http_channel
:
if
channel
!=
request
.
channel
:
if
(
now
-
channel
.
creation_time
)
>
channel
.
zombie_timeout
:
channel
.
close
()
request
.
done
()
# Emergency Debug Mode
# If a server is running away from you, don't KILL it!
# Move all the AF_INET server ports and perform an autopsy...
# [disabled by default to protect the innocent]
elif
self
.
allow_emergency_debug
and
path
==
self
.
statusdir
+
'/emergency_debug'
:
request
.
push
(
'<html>Moving All Servers...</html>'
)
request
.
done
()
for
channel
in
asyncore
.
socket_map
.
keys
():
if
channel
.
accepting
:
if
type
(
channel
.
addr
)
is
type
(()):
ip
,
port
=
channel
.
addr
channel
.
socket
.
close
()
channel
.
del_channel
()
channel
.
addr
=
(
ip
,
port
+
10000
)
fam
,
typ
=
channel
.
family_and_type
channel
.
create_socket
(
fam
,
typ
)
channel
.
set_reuse_addr
()
channel
.
bind
(
channel
.
addr
)
channel
.
listen
(
5
)
elif
self
.
hyper_regex
.
match
(
path
)
!=
-
1
:
oid
=
string
.
atoi
(
self
.
hyper_regex
.
group
(
1
))
for
object
in
self
.
hyper_objects
:
if
id
(
object
)
==
oid
:
if
hasattr
(
object
,
'hyper_respond'
):
object
.
hyper_respond
(
self
,
path
,
request
)
else
:
request
.
error
(
404
)
return
def
status
(
self
):
return
producers
.
simple_producer
(
'<li>Status Extension <b>Hits</b> : %s'
%
self
.
hit_counter
)
def
register_hyper_object
(
self
,
object
):
if
not
object
in
self
.
hyper_objects
:
self
.
hyper_objects
.
append
(
object
)
import
logger
class
logger_for_status
(
logger
.
tail_logger
):
def
status
(
self
):
return
'Last %d log entries for: %s'
%
(
len
(
self
.
messages
),
html_repr
(
self
)
)
def
hyper_respond
(
self
,
sh
,
path
,
request
):
request
[
'Content-Type'
]
=
'text/plain'
messages
=
self
.
messages
[:]
messages
.
reverse
()
request
.
push
(
lines_producer
(
messages
))
request
.
done
()
class
lines_producer
:
def
__init__
(
self
,
lines
):
self
.
lines
=
lines
def
ready
(
self
):
return
len
(
self
.
lines
)
def
more
(
self
):
if
self
.
lines
:
chunk
=
self
.
lines
[:
50
]
self
.
lines
=
self
.
lines
[
50
:]
return
string
.
join
(
chunk
,
'
\
r
\
n
'
)
+
'
\
r
\
n
'
else
:
return
''
class
channel_list_producer
(
lines_producer
):
def
__init__
(
self
,
statusdir
):
channel_reprs
=
map
(
lambda
x
:
'<'
+
repr
(
x
)[
1
:
-
1
]
+
'>'
,
asyncore
.
socket_map
.
values
()
)
channel_reprs
.
sort
()
lines_producer
.
__init__
(
self
,
[
'<h1>Active Channel List</h1>'
,
'<pre>'
]
+
channel_reprs
+
[
'</pre>'
,
'<p><a href="%s">Status Report</a>'
%
statusdir
]
)
# this really needs a full-blown quoter...
def
sanitize
(
s
):
if
'<'
in
s
:
s
=
string
.
join
(
string
.
split
(
s
,
'<'
),
'<'
)
if
'>'
in
s
:
s
=
string
.
join
(
string
.
split
(
s
,
'>'
),
'>'
)
return
s
def
html_repr
(
object
):
so
=
sanitize
(
repr
(
object
))
if
hasattr
(
object
,
'hyper_respond'
):
return
'<a href="/status/object/%d/">%s</a>'
%
(
id
(
object
),
so
)
else
:
return
so
def
html_reprs
(
list
,
front
=
''
,
back
=
''
):
reprs
=
map
(
lambda
x
,
f
=
front
,
b
=
back
:
'%s%s%s'
%
(
f
,
x
,
b
),
map
(
lambda
x
:
sanitize
(
html_repr
(
x
)),
list
)
)
reprs
.
sort
()
return
reprs
# for example, tera, giga, mega, kilo
# p_d (n, (1024, 1024, 1024, 1024))
# smallest divider goes first - for example
# minutes, hours, days
# p_d (n, (60, 60, 24))
def
progressive_divide
(
n
,
parts
):
result
=
[]
for
part
in
parts
:
n
,
rem
=
divmod
(
n
,
part
)
result
.
append
(
rem
)
result
.
append
(
n
)
return
result
# b,k,m,g,t
def
split_by_units
(
n
,
units
,
dividers
,
format_string
):
divs
=
progressive_divide
(
n
,
dividers
)
result
=
[]
for
i
in
range
(
len
(
units
)):
if
divs
[
i
]:
result
.
append
(
format_string
%
(
divs
[
i
],
units
[
i
]))
result
.
reverse
()
if
not
result
:
return
[
format_string
%
(
0
,
units
[
0
])]
else
:
return
result
def
english_bytes
(
n
):
return
split_by_units
(
n
,
(
''
,
'K'
,
'M'
,
'G'
,
'T'
),
(
1024
,
1024
,
1024
,
1024
,
1024
),
'%d %sB'
)
def
english_time
(
n
):
return
split_by_units
(
n
,
(
'secs'
,
'mins'
,
'hours'
,
'days'
,
'weeks'
,
'years'
),
(
60
,
60
,
24
,
7
,
52
),
'%d %s'
)
lib/python/ZServer/medusa/test_logger.py
deleted
100644 → 0
View file @
6786b136
import
sys
import
socket
import
select
print
"Simulating Unix-domain logging using file: %s"
%
sys
.
argv
[
1
]
log_socket
=
socket
.
socket
(
socket
.
AF_UNIX
,
socket
.
SOCK_DGRAM
)
log_socket
.
bind
(
sys
.
argv
[
1
]
)
while
1
:
n
=
select
.
select
(
[
log_socket
],
[],
[]
)
print
"."
,
if
n
>
0
:
print
log_socket
.
recv
(
1024
)
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment