Commit 54f22ed3 authored by Guido van Rossum's avatar Guido van Rossum

More trivial comment -> docstring transformations by Ka-Ping Yee,

who writes:

Here is batch 2, as a big collection of CVS context diffs.
Along with moving comments into docstrings, i've added a
couple of missing docstrings and attempted to make sure more
module docstrings begin with a one-line summary.

I did not add docstrings to the methods in profile.py for
fear of upsetting any careful optimizations there, though
i did move class documentation into class docstrings.

The convention i'm using is to leave credits/version/copyright
type of stuff in # comments, and move the rest of the descriptive
stuff about module usage into module docstrings.  Hope this is
okay.
parent 8b6323d3
This diff is collapsed.
This diff is collapsed.
...@@ -22,120 +22,120 @@ decoder = re.compile(r'^([-+]?)0*(\d*)((?:\.\d*)?)(([eE][-+]?\d+)?)$') ...@@ -22,120 +22,120 @@ decoder = re.compile(r'^([-+]?)0*(\d*)((?:\.\d*)?)(([eE][-+]?\d+)?)$')
# \4 exponent part (empty or begins with 'e' or 'E') # \4 exponent part (empty or begins with 'e' or 'E')
try: try:
class NotANumber(ValueError): class NotANumber(ValueError):
pass pass
except TypeError: except TypeError:
NotANumber = 'fpformat.NotANumber' NotANumber = 'fpformat.NotANumber'
# Return (sign, intpart, fraction, expo) or raise an exception:
# sign is '+' or '-'
# intpart is 0 or more digits beginning with a nonzero
# fraction is 0 or more digits
# expo is an integer
def extract(s): def extract(s):
res = decoder.match(s) """Return (sign, intpart, fraction, expo) or raise an exception:
if res is None: raise NotANumber, s sign is '+' or '-'
sign, intpart, fraction, exppart = res.group(1,2,3,4) intpart is 0 or more digits beginning with a nonzero
if sign == '+': sign = '' fraction is 0 or more digits
if fraction: fraction = fraction[1:] expo is an integer"""
if exppart: expo = int(exppart[1:]) res = decoder.match(s)
else: expo = 0 if res is None: raise NotANumber, s
return sign, intpart, fraction, expo sign, intpart, fraction, exppart = res.group(1,2,3,4)
if sign == '+': sign = ''
if fraction: fraction = fraction[1:]
if exppart: expo = int(exppart[1:])
else: expo = 0
return sign, intpart, fraction, expo
# Remove the exponent by changing intpart and fraction
def unexpo(intpart, fraction, expo): def unexpo(intpart, fraction, expo):
if expo > 0: # Move the point left """Remove the exponent by changing intpart and fraction."""
f = len(fraction) if expo > 0: # Move the point left
intpart, fraction = intpart + fraction[:expo], fraction[expo:] f = len(fraction)
if expo > f: intpart, fraction = intpart + fraction[:expo], fraction[expo:]
intpart = intpart + '0'*(expo-f) if expo > f:
elif expo < 0: # Move the point right intpart = intpart + '0'*(expo-f)
i = len(intpart) elif expo < 0: # Move the point right
intpart, fraction = intpart[:expo], intpart[expo:] + fraction i = len(intpart)
if expo < -i: intpart, fraction = intpart[:expo], intpart[expo:] + fraction
fraction = '0'*(-expo-i) + fraction if expo < -i:
return intpart, fraction fraction = '0'*(-expo-i) + fraction
return intpart, fraction
# Round or extend the fraction to size digs
def roundfrac(intpart, fraction, digs): def roundfrac(intpart, fraction, digs):
f = len(fraction) """Round or extend the fraction to size digs."""
if f <= digs: f = len(fraction)
return intpart, fraction + '0'*(digs-f) if f <= digs:
i = len(intpart) return intpart, fraction + '0'*(digs-f)
if i+digs < 0: i = len(intpart)
return '0'*-digs, '' if i+digs < 0:
total = intpart + fraction return '0'*-digs, ''
nextdigit = total[i+digs] total = intpart + fraction
if nextdigit >= '5': # Hard case: increment last digit, may have carry! nextdigit = total[i+digs]
n = i + digs - 1 if nextdigit >= '5': # Hard case: increment last digit, may have carry!
while n >= 0: n = i + digs - 1
if total[n] != '9': break while n >= 0:
n = n-1 if total[n] != '9': break
else: n = n-1
total = '0' + total else:
i = i+1 total = '0' + total
n = 0 i = i+1
total = total[:n] + chr(ord(total[n]) + 1) + '0'*(len(total)-n-1) n = 0
intpart, fraction = total[:i], total[i:] total = total[:n] + chr(ord(total[n]) + 1) + '0'*(len(total)-n-1)
if digs >= 0: intpart, fraction = total[:i], total[i:]
return intpart, fraction[:digs] if digs >= 0:
else: return intpart, fraction[:digs]
return intpart[:digs] + '0'*-digs, '' else:
return intpart[:digs] + '0'*-digs, ''
# Format x as [-]ddd.ddd with 'digs' digits after the point
# and at least one digit before.
# If digs <= 0, the point is suppressed.
def fix(x, digs): def fix(x, digs):
if type(x) != type(''): x = `x` """Format x as [-]ddd.ddd with 'digs' digits after the point
try: and at least one digit before.
sign, intpart, fraction, expo = extract(x) If digs <= 0, the point is suppressed."""
except NotANumber: if type(x) != type(''): x = `x`
return x try:
intpart, fraction = unexpo(intpart, fraction, expo) sign, intpart, fraction, expo = extract(x)
intpart, fraction = roundfrac(intpart, fraction, digs) except NotANumber:
while intpart and intpart[0] == '0': intpart = intpart[1:] return x
if intpart == '': intpart = '0' intpart, fraction = unexpo(intpart, fraction, expo)
if digs > 0: return sign + intpart + '.' + fraction intpart, fraction = roundfrac(intpart, fraction, digs)
else: return sign + intpart while intpart and intpart[0] == '0': intpart = intpart[1:]
if intpart == '': intpart = '0'
if digs > 0: return sign + intpart + '.' + fraction
else: return sign + intpart
# Format x as [-]d.dddE[+-]ddd with 'digs' digits after the point
# and exactly one digit before.
# If digs is <= 0, one digit is kept and the point is suppressed.
def sci(x, digs): def sci(x, digs):
if type(x) != type(''): x = `x` """Format x as [-]d.dddE[+-]ddd with 'digs' digits after the point
sign, intpart, fraction, expo = extract(x) and exactly one digit before.
if not intpart: If digs is <= 0, one digit is kept and the point is suppressed."""
while fraction and fraction[0] == '0': if type(x) != type(''): x = `x`
fraction = fraction[1:] sign, intpart, fraction, expo = extract(x)
expo = expo - 1 if not intpart:
if fraction: while fraction and fraction[0] == '0':
intpart, fraction = fraction[0], fraction[1:] fraction = fraction[1:]
expo = expo - 1 expo = expo - 1
else: if fraction:
intpart = '0' intpart, fraction = fraction[0], fraction[1:]
else: expo = expo - 1
expo = expo + len(intpart) - 1 else:
intpart, fraction = intpart[0], intpart[1:] + fraction intpart = '0'
digs = max(0, digs) else:
intpart, fraction = roundfrac(intpart, fraction, digs) expo = expo + len(intpart) - 1
if len(intpart) > 1: intpart, fraction = intpart[0], intpart[1:] + fraction
intpart, fraction, expo = \ digs = max(0, digs)
intpart[0], intpart[1:] + fraction[:-1], \ intpart, fraction = roundfrac(intpart, fraction, digs)
expo + len(intpart) - 1 if len(intpart) > 1:
s = sign + intpart intpart, fraction, expo = \
if digs > 0: s = s + '.' + fraction intpart[0], intpart[1:] + fraction[:-1], \
e = `abs(expo)` expo + len(intpart) - 1
e = '0'*(3-len(e)) + e s = sign + intpart
if expo < 0: e = '-' + e if digs > 0: s = s + '.' + fraction
else: e = '+' + e e = `abs(expo)`
return s + 'e' + e e = '0'*(3-len(e)) + e
if expo < 0: e = '-' + e
else: e = '+' + e
return s + 'e' + e
# Interactive test run
def test(): def test():
try: """Interactive test run."""
while 1: try:
x, digs = input('Enter (x, digs): ') while 1:
print x, fix(x, digs), sci(x, digs) x, digs = input('Enter (x, digs): ')
except (EOFError, KeyboardInterrupt): print x, fix(x, digs), sci(x, digs)
pass except (EOFError, KeyboardInterrupt):
pass
# Gopher protocol client interface """Gopher protocol client interface."""
import string import string
...@@ -29,180 +29,180 @@ A_IMAGE = 'I' ...@@ -29,180 +29,180 @@ A_IMAGE = 'I'
A_WHOIS = 'w' A_WHOIS = 'w'
A_QUERY = 'q' A_QUERY = 'q'
A_GIF = 'g' A_GIF = 'g'
A_HTML = 'h' # HTML file A_HTML = 'h' # HTML file
A_WWW = 'w' # WWW address A_WWW = 'w' # WWW address
A_PLUS_IMAGE = ':' A_PLUS_IMAGE = ':'
A_PLUS_MOVIE = ';' A_PLUS_MOVIE = ';'
A_PLUS_SOUND = '<' A_PLUS_SOUND = '<'
# Function mapping all file types to strings; unknown types become TYPE='x'
_names = dir() _names = dir()
_type_to_name_map = {} _type_to_name_map = {}
def type_to_name(gtype): def type_to_name(gtype):
global _type_to_name_map """Map all file types to strings; unknown types become TYPE='x'."""
if _type_to_name_map=={}: global _type_to_name_map
for name in _names: if _type_to_name_map=={}:
if name[:2] == 'A_': for name in _names:
_type_to_name_map[eval(name)] = name[2:] if name[:2] == 'A_':
if _type_to_name_map.has_key(gtype): _type_to_name_map[eval(name)] = name[2:]
return _type_to_name_map[gtype] if _type_to_name_map.has_key(gtype):
return 'TYPE=' + `gtype` return _type_to_name_map[gtype]
return 'TYPE=' + `gtype`
# Names for characters and strings # Names for characters and strings
CRLF = '\r\n' CRLF = '\r\n'
TAB = '\t' TAB = '\t'
# Send a selector to a given host and port, return a file with the reply
def send_selector(selector, host, port = 0): def send_selector(selector, host, port = 0):
import socket """Send a selector to a given host and port, return a file with the reply."""
import string import socket
if not port: import string
i = string.find(host, ':') if not port:
if i >= 0: i = string.find(host, ':')
host, port = host[:i], string.atoi(host[i+1:]) if i >= 0:
if not port: host, port = host[:i], string.atoi(host[i+1:])
port = DEF_PORT if not port:
elif type(port) == type(''): port = DEF_PORT
port = string.atoi(port) elif type(port) == type(''):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) port = string.atoi(port)
s.connect(host, port) s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.send(selector + CRLF) s.connect(host, port)
s.shutdown(1) s.send(selector + CRLF)
return s.makefile('rb') s.shutdown(1)
return s.makefile('rb')
# Send a selector and a query string
def send_query(selector, query, host, port = 0): def send_query(selector, query, host, port = 0):
return send_selector(selector + '\t' + query, host, port) """Send a selector and a query string."""
return send_selector(selector + '\t' + query, host, port)
# Takes a path as returned by urlparse and returns the appropriate selector
def path_to_selector(path): def path_to_selector(path):
if path=="/": """Takes a path as returned by urlparse and returns the appropriate selector."""
return "/" if path=="/":
else: return "/"
return path[2:] # Cuts initial slash and data type identifier else:
return path[2:] # Cuts initial slash and data type identifier
# Takes a path as returned by urlparse and maps it to a string
# See section 3.4 of RFC 1738 for details
def path_to_datatype_name(path): def path_to_datatype_name(path):
if path=="/": """Takes a path as returned by urlparse and maps it to a string.
# No way to tell, although "INDEX" is likely See section 3.4 of RFC 1738 for details."""
return "TYPE='unknown'" if path=="/":
else: # No way to tell, although "INDEX" is likely
return type_to_name(path[1]) return "TYPE='unknown'"
else:
return type_to_name(path[1])
# The following functions interpret the data returned by the gopher # The following functions interpret the data returned by the gopher
# server according to the expected type, e.g. textfile or directory # server according to the expected type, e.g. textfile or directory
# Get a directory in the form of a list of entries
def get_directory(f): def get_directory(f):
import string """Get a directory in the form of a list of entries."""
list = [] import string
while 1: list = []
line = f.readline() while 1:
if not line: line = f.readline()
print '(Unexpected EOF from server)' if not line:
break print '(Unexpected EOF from server)'
if line[-2:] == CRLF: break
line = line[:-2] if line[-2:] == CRLF:
elif line[-1:] in CRLF: line = line[:-2]
line = line[:-1] elif line[-1:] in CRLF:
if line == '.': line = line[:-1]
break if line == '.':
if not line: break
print '(Empty line from server)' if not line:
continue print '(Empty line from server)'
gtype = line[0] continue
parts = string.splitfields(line[1:], TAB) gtype = line[0]
if len(parts) < 4: parts = string.splitfields(line[1:], TAB)
print '(Bad line from server:', `line`, ')' if len(parts) < 4:
continue print '(Bad line from server:', `line`, ')'
if len(parts) > 4: continue
if parts[4:] != ['+']: if len(parts) > 4:
print '(Extra info from server:', if parts[4:] != ['+']:
print parts[4:], ')' print '(Extra info from server:',
else: print parts[4:], ')'
parts.append('') else:
parts.insert(0, gtype) parts.append('')
list.append(parts) parts.insert(0, gtype)
return list list.append(parts)
return list
# Get a text file as a list of lines, with trailing CRLF stripped
def get_textfile(f): def get_textfile(f):
list = [] """Get a text file as a list of lines, with trailing CRLF stripped."""
get_alt_textfile(f, list.append) list = []
return list get_alt_textfile(f, list.append)
return list
# Get a text file and pass each line to a function, with trailing CRLF stripped
def get_alt_textfile(f, func): def get_alt_textfile(f, func):
while 1: """Get a text file and pass each line to a function, with trailing CRLF stripped."""
line = f.readline() while 1:
if not line: line = f.readline()
print '(Unexpected EOF from server)' if not line:
break print '(Unexpected EOF from server)'
if line[-2:] == CRLF: break
line = line[:-2] if line[-2:] == CRLF:
elif line[-1:] in CRLF: line = line[:-2]
line = line[:-1] elif line[-1:] in CRLF:
if line == '.': line = line[:-1]
break if line == '.':
if line[:2] == '..': break
line = line[1:] if line[:2] == '..':
func(line) line = line[1:]
func(line)
# Get a binary file as one solid data block
def get_binary(f): def get_binary(f):
data = f.read() """Get a binary file as one solid data block."""
return data data = f.read()
return data
# Get a binary file and pass each block to a function
def get_alt_binary(f, func, blocksize): def get_alt_binary(f, func, blocksize):
while 1: """Get a binary file and pass each block to a function."""
data = f.read(blocksize) while 1:
if not data: data = f.read(blocksize)
break if not data:
func(data) break
func(data)
# Trivial test program
def test(): def test():
import sys """Trivial test program."""
import getopt import sys
opts, args = getopt.getopt(sys.argv[1:], '') import getopt
selector = DEF_SELECTOR opts, args = getopt.getopt(sys.argv[1:], '')
type = selector[0] selector = DEF_SELECTOR
host = DEF_HOST type = selector[0]
port = DEF_PORT host = DEF_HOST
if args: port = DEF_PORT
host = args[0] if args:
args = args[1:] host = args[0]
if args: args = args[1:]
type = args[0] if args:
args = args[1:] type = args[0]
if len(type) > 1: args = args[1:]
type, selector = type[0], type if len(type) > 1:
else: type, selector = type[0], type
selector = '' else:
if args: selector = ''
selector = args[0] if args:
args = args[1:] selector = args[0]
query = '' args = args[1:]
if args: query = ''
query = args[0] if args:
args = args[1:] query = args[0]
if type == A_INDEX: args = args[1:]
f = send_query(selector, query, host) if type == A_INDEX:
else: f = send_query(selector, query, host)
f = send_selector(selector, host) else:
if type == A_TEXT: f = send_selector(selector, host)
list = get_textfile(f) if type == A_TEXT:
for item in list: print item list = get_textfile(f)
elif type in (A_MENU, A_INDEX): for item in list: print item
list = get_directory(f) elif type in (A_MENU, A_INDEX):
for item in list: print item list = get_directory(f)
else: for item in list: print item
data = get_binary(f) else:
print 'binary data:', len(data), 'bytes:', `data[:100]`[:40] data = get_binary(f)
print 'binary data:', len(data), 'bytes:', `data[:100]`[:40]
# Run the test when run as script # Run the test when run as script
if __name__ == '__main__': if __name__ == '__main__':
test() test()
"""This module implements a function that reads and writes a gzipped file.
The user of the file doesn't have to worry about the compression,
but random access is not allowed."""
# based on Andrew Kuchling's minigzip.py distributed with the zlib module
import time import time
import string import string
import zlib import zlib
import struct import struct
import __builtin__ import __builtin__
# implements a python function that reads and writes a gzipped file
# the user of the file doesn't have to worry about the compression,
# but random access is not allowed
# based on Andrew Kuchling's minigzip.py distributed with the zlib module
FTEXT, FHCRC, FEXTRA, FNAME, FCOMMENT = 1, 2, 4, 8, 16 FTEXT, FHCRC, FEXTRA, FNAME, FCOMMENT = 1, 2, 4, 8, 16
READ, WRITE = 1, 2 READ, WRITE = 1, 2
......
"""HTML character entity references."""
entitydefs = { entitydefs = {
'AElig': '\306', # latin capital letter AE = latin capital ligature AE, U+00C6 ISOlat1 'AElig': '\306', # latin capital letter AE = latin capital ligature AE, U+00C6 ISOlat1
'Aacute': '\301', # latin capital letter A with acute, U+00C1 ISOlat1 'Aacute': '\301', # latin capital letter A with acute, U+00C1 ISOlat1
......
# Recognizing image files based on their first few bytes. """Recognize image file formats based on their first few bytes."""
#-------------------------# #-------------------------#
...@@ -6,25 +6,25 @@ ...@@ -6,25 +6,25 @@
#-------------------------# #-------------------------#
def what(file, h=None): def what(file, h=None):
if h is None: if h is None:
if type(file) == type(''): if type(file) == type(''):
f = open(file, 'rb') f = open(file, 'rb')
h = f.read(32) h = f.read(32)
else: else:
location = file.tell() location = file.tell()
h = file.read(32) h = file.read(32)
file.seek(location) file.seek(location)
f = None f = None
else: else:
f = None f = None
try: try:
for tf in tests: for tf in tests:
res = tf(h, f) res = tf(h, f)
if res: if res:
return res return res
finally: finally:
if f: f.close() if f: f.close()
return None return None
#---------------------------------# #---------------------------------#
...@@ -34,81 +34,81 @@ def what(file, h=None): ...@@ -34,81 +34,81 @@ def what(file, h=None):
tests = [] tests = []
def test_rgb(h, f): def test_rgb(h, f):
# SGI image library """SGI image library"""
if h[:2] == '\001\332': if h[:2] == '\001\332':
return 'rgb' return 'rgb'
tests.append(test_rgb) tests.append(test_rgb)
def test_gif(h, f): def test_gif(h, f):
# GIF ('87 and '89 variants) """GIF ('87 and '89 variants)"""
if h[:6] in ('GIF87a', 'GIF89a'): if h[:6] in ('GIF87a', 'GIF89a'):
return 'gif' return 'gif'
tests.append(test_gif) tests.append(test_gif)
def test_pbm(h, f): def test_pbm(h, f):
# PBM (portable bitmap) """PBM (portable bitmap)"""
if len(h) >= 3 and \ if len(h) >= 3 and \
h[0] == 'P' and h[1] in '14' and h[2] in ' \t\n\r': h[0] == 'P' and h[1] in '14' and h[2] in ' \t\n\r':
return 'pbm' return 'pbm'
tests.append(test_pbm) tests.append(test_pbm)
def test_pgm(h, f): def test_pgm(h, f):
# PGM (portable graymap) """PGM (portable graymap)"""
if len(h) >= 3 and \ if len(h) >= 3 and \
h[0] == 'P' and h[1] in '25' and h[2] in ' \t\n\r': h[0] == 'P' and h[1] in '25' and h[2] in ' \t\n\r':
return 'pgm' return 'pgm'
tests.append(test_pgm) tests.append(test_pgm)
def test_ppm(h, f): def test_ppm(h, f):
# PPM (portable pixmap) """PPM (portable pixmap)"""
if len(h) >= 3 and \ if len(h) >= 3 and \
h[0] == 'P' and h[1] in '36' and h[2] in ' \t\n\r': h[0] == 'P' and h[1] in '36' and h[2] in ' \t\n\r':
return 'ppm' return 'ppm'
tests.append(test_ppm) tests.append(test_ppm)
def test_tiff(h, f): def test_tiff(h, f):
# TIFF (can be in Motorola or Intel byte order) """TIFF (can be in Motorola or Intel byte order)"""
if h[:2] in ('MM', 'II'): if h[:2] in ('MM', 'II'):
return 'tiff' return 'tiff'
tests.append(test_tiff) tests.append(test_tiff)
def test_rast(h, f): def test_rast(h, f):
# Sun raster file """Sun raster file"""
if h[:4] == '\x59\xA6\x6A\x95': if h[:4] == '\x59\xA6\x6A\x95':
return 'rast' return 'rast'
tests.append(test_rast) tests.append(test_rast)
def test_xbm(h, f): def test_xbm(h, f):
# X bitmap (X10 or X11) """X bitmap (X10 or X11)"""
s = '#define ' s = '#define '
if h[:len(s)] == s: if h[:len(s)] == s:
return 'xbm' return 'xbm'
tests.append(test_xbm) tests.append(test_xbm)
def test_jpeg(h, f): def test_jpeg(h, f):
# JPEG data in JFIF format """JPEG data in JFIF format"""
if h[6:10] == 'JFIF': if h[6:10] == 'JFIF':
return 'jpeg' return 'jpeg'
tests.append(test_jpeg) tests.append(test_jpeg)
def test_bmp(h, f): def test_bmp(h, f):
if h[:2] == 'BM': if h[:2] == 'BM':
return 'bmp' return 'bmp'
tests.append(test_bmp) tests.append(test_bmp)
def test_png(h, f): def test_png(h, f):
if h[:8] == "\211PNG\r\n\032\n": if h[:8] == "\211PNG\r\n\032\n":
return 'png' return 'png'
tests.append(test_png) tests.append(test_png)
...@@ -117,37 +117,37 @@ tests.append(test_png) ...@@ -117,37 +117,37 @@ tests.append(test_png)
#--------------------# #--------------------#
def test(): def test():
import sys import sys
recursive = 0 recursive = 0
if sys.argv[1:] and sys.argv[1] == '-r': if sys.argv[1:] and sys.argv[1] == '-r':
del sys.argv[1:2] del sys.argv[1:2]
recursive = 1 recursive = 1
try: try:
if sys.argv[1:]: if sys.argv[1:]:
testall(sys.argv[1:], recursive, 1) testall(sys.argv[1:], recursive, 1)
else: else:
testall(['.'], recursive, 1) testall(['.'], recursive, 1)
except KeyboardInterrupt: except KeyboardInterrupt:
sys.stderr.write('\n[Interrupted]\n') sys.stderr.write('\n[Interrupted]\n')
sys.exit(1) sys.exit(1)
def testall(list, recursive, toplevel): def testall(list, recursive, toplevel):
import sys import sys
import os import os
for filename in list: for filename in list:
if os.path.isdir(filename): if os.path.isdir(filename):
print filename + '/:', print filename + '/:',
if recursive or toplevel: if recursive or toplevel:
print 'recursing down:' print 'recursing down:'
import glob import glob
names = glob.glob(os.path.join(filename, '*')) names = glob.glob(os.path.join(filename, '*'))
testall(names, recursive, 0) testall(names, recursive, 0)
else: else:
print '*** directory (use -r) ***' print '*** directory (use -r) ***'
else: else:
print filename + ':', print filename + ':',
sys.stdout.flush() sys.stdout.flush()
try: try:
print what(filename) print what(filename)
except IOError: except IOError:
print '*** not found ***' print '*** not found ***'
#! /usr/bin/env python #! /usr/bin/env python
#
# Keywords (from "graminit.c") """Keywords (from "graminit.c")
#
# This file is automatically generated; please don't muck it up! This file is automatically generated; please don't muck it up!
#
# To update the symbols in this file, 'cd' to the top directory of To update the symbols in this file, 'cd' to the top directory of
# the python source tree after building the interpreter and run: the python source tree after building the interpreter and run:
#
# python Lib/keyword.py python Lib/keyword.py
"""
kwlist = [ kwlist = [
#--start keywords-- #--start keywords--
......
# Cache lines from files. """Cache lines from files.
# This is intended to read lines from modules imported -- hence if a filename
# is not found, it will look down the module search path for a file by This is intended to read lines from modules imported -- hence if a filename
# that name. is not found, it will look down the module search path for a file by
that name.
"""
import sys import sys
import os import os
from stat import * from stat import *
def getline(filename, lineno): def getline(filename, lineno):
lines = getlines(filename) lines = getlines(filename)
if 1 <= lineno <= len(lines): if 1 <= lineno <= len(lines):
return lines[lineno-1] return lines[lineno-1]
else: else:
return '' return ''
# The cache # The cache
...@@ -20,71 +22,71 @@ def getline(filename, lineno): ...@@ -20,71 +22,71 @@ def getline(filename, lineno):
cache = {} # The cache cache = {} # The cache
# Clear the cache entirely
def clearcache(): def clearcache():
global cache """Clear the cache entirely."""
cache = {}
global cache
cache = {}
# Get the lines for a file from the cache.
# Update the cache if it doesn't contain an entry for this file already.
def getlines(filename): def getlines(filename):
if cache.has_key(filename): """Get the lines for a file from the cache.
return cache[filename][2] Update the cache if it doesn't contain an entry for this file already."""
else:
return updatecache(filename)
if cache.has_key(filename):
return cache[filename][2]
else:
return updatecache(filename)
# Discard cache entries that are out of date.
# (This is not checked upon each call!)
def checkcache(): def checkcache():
for filename in cache.keys(): """Discard cache entries that are out of date.
size, mtime, lines, fullname = cache[filename] (This is not checked upon each call!)"""
try:
stat = os.stat(fullname)
except os.error:
del cache[filename]
continue
if size <> stat[ST_SIZE] or mtime <> stat[ST_MTIME]:
del cache[filename]
for filename in cache.keys():
size, mtime, lines, fullname = cache[filename]
try:
stat = os.stat(fullname)
except os.error:
del cache[filename]
continue
if size <> stat[ST_SIZE] or mtime <> stat[ST_MTIME]:
del cache[filename]
# Update a cache entry and return its list of lines.
# If something's wrong, print a message, discard the cache entry,
# and return an empty list.
def updatecache(filename): def updatecache(filename):
if cache.has_key(filename): """Update a cache entry and return its list of lines.
del cache[filename] If something's wrong, print a message, discard the cache entry,
if not filename or filename[0] + filename[-1] == '<>': and return an empty list."""
return []
fullname = filename if cache.has_key(filename):
try: del cache[filename]
stat = os.stat(fullname) if not filename or filename[0] + filename[-1] == '<>':
except os.error, msg: return []
# Try looking through the module search path fullname = filename
basename = os.path.split(filename)[1] try:
for dirname in sys.path: stat = os.stat(fullname)
fullname = os.path.join(dirname, basename) except os.error, msg:
try: # Try looking through the module search path
stat = os.stat(fullname) basename = os.path.split(filename)[1]
break for dirname in sys.path:
except os.error: fullname = os.path.join(dirname, basename)
pass try:
else: stat = os.stat(fullname)
# No luck break
## print '*** Cannot stat', filename, ':', msg except os.error:
return [] pass
try: else:
fp = open(fullname, 'r') # No luck
lines = fp.readlines() ## print '*** Cannot stat', filename, ':', msg
fp.close() return []
except IOError, msg: try:
## print '*** Cannot open', fullname, ':', msg fp = open(fullname, 'r')
return [] lines = fp.readlines()
size, mtime = stat[ST_SIZE], stat[ST_MTIME] fp.close()
cache[filename] = size, mtime, lines, fullname except IOError, msg:
return lines ## print '*** Cannot open', fullname, ':', msg
return []
size, mtime = stat[ST_SIZE], stat[ST_MTIME]
cache[filename] = size, mtime, lines, fullname
return lines
# module 'macpath' -- pathname (or -related) operations for the Macintosh """Pathname and path-related operations for the Macintosh."""
import string import string
import os import os
...@@ -10,77 +10,77 @@ from stat import * ...@@ -10,77 +10,77 @@ from stat import *
normcase = string.lower normcase = string.lower
# Return true if a path is absolute.
# On the Mac, relative paths begin with a colon,
# but as a special case, paths with no colons at all are also relative.
# Anything else is absolute (the string up to the first colon is the
# volume name).
def isabs(s): def isabs(s):
return ':' in s and s[0] <> ':' """Return true if a path is absolute.
On the Mac, relative paths begin with a colon,
but as a special case, paths with no colons at all are also relative.
Anything else is absolute (the string up to the first colon is the
volume name)."""
return ':' in s and s[0] <> ':'
def join(s, *p): def join(s, *p):
path = s path = s
for t in p: for t in p:
if (not s) or isabs(t): if (not s) or isabs(t):
path = t path = t
continue continue
if t[:1] == ':': if t[:1] == ':':
t = t[1:] t = t[1:]
if ':' not in path: if ':' not in path:
path = ':' + path path = ':' + path
if path[-1:] <> ':': if path[-1:] <> ':':
path = path + ':' path = path + ':'
path = path + t path = path + t
return path return path
# Split a pathname in two parts: the directory leading up to the final bit,
# and the basename (the filename, without colons, in that directory).
# The result (s, t) is such that join(s, t) yields the original argument.
def split(s): def split(s):
if ':' not in s: return '', s """Split a pathname into two parts: the directory leading up to the final
colon = 0 bit, and the basename (the filename, without colons, in that directory).
for i in range(len(s)): The result (s, t) is such that join(s, t) yields the original argument."""
if s[i] == ':': colon = i+1
path, file = s[:colon-1], s[colon:]
if path and not ':' in path:
path = path + ':'
return path, file
if ':' not in s: return '', s
colon = 0
for i in range(len(s)):
if s[i] == ':': colon = i+1
path, file = s[:colon-1], s[colon:]
if path and not ':' in path:
path = path + ':'
return path, file
# Split a path in root and extension.
# The extension is everything starting at the last dot in the last
# pathname component; the root is everything before that.
# It is always true that root + ext == p.
def splitext(p): def splitext(p):
root, ext = '', '' """Split a path into root and extension.
for c in p: The extension is everything starting at the last dot in the last
if c == ':': pathname component; the root is everything before that.
root, ext = root + ext + c, '' It is always true that root + ext == p."""
elif c == '.':
if ext: root, ext = '', ''
root, ext = root + ext, c for c in p:
else: if c == ':':
ext = c root, ext = root + ext + c, ''
elif ext: elif c == '.':
ext = ext + c if ext:
else: root, ext = root + ext, c
root = root + c else:
return root, ext ext = c
elif ext:
ext = ext + c
# Split a pathname into a drive specification and the rest of the else:
# path. Useful on DOS/Windows/NT; on the Mac, the drive is always root = root + c
# empty (don't use the volume name -- it doesn't have the same return root, ext
# syntactic and semantic oddities as DOS drive letters, such as there
# being a separate current directory per drive).
def splitdrive(p): def splitdrive(p):
return '', p """Split a pathname into a drive specification and the rest of the
path. Useful on DOS/Windows/NT; on the Mac, the drive is always
empty (don't use the volume name -- it doesn't have the same
syntactic and semantic oddities as DOS drive letters, such as there
being a separate current directory per drive)."""
return '', p
# Short interfaces to split() # Short interfaces to split()
...@@ -89,14 +89,14 @@ def dirname(s): return split(s)[0] ...@@ -89,14 +89,14 @@ def dirname(s): return split(s)[0]
def basename(s): return split(s)[1] def basename(s): return split(s)[1]
# Return true if the pathname refers to an existing directory.
def isdir(s): def isdir(s):
try: """Return true if the pathname refers to an existing directory."""
st = os.stat(s)
except os.error: try:
return 0 st = os.stat(s)
return S_ISDIR(st[ST_MODE]) except os.error:
return 0
return S_ISDIR(st[ST_MODE])
# Get size, mtime, atime of files. # Get size, mtime, atime of files.
...@@ -117,105 +117,103 @@ def getatime(filename): ...@@ -117,105 +117,103 @@ def getatime(filename):
return st[ST_MTIME] return st[ST_MTIME]
# Return true if the pathname refers to a symbolic link.
# (Always false on the Mac, until we understand Aliases.)
def islink(s): def islink(s):
return 0 """Return true if the pathname refers to a symbolic link.
Always false on the Mac, until we understand Aliases.)"""
return 0
# Return true if the pathname refers to an existing regular file.
def isfile(s): def isfile(s):
try: """Return true if the pathname refers to an existing regular file."""
st = os.stat(s)
except os.error:
return 0
return S_ISREG(st[ST_MODE])
try:
st = os.stat(s)
except os.error:
return 0
return S_ISREG(st[ST_MODE])
# Return true if the pathname refers to an existing file or directory.
def exists(s): def exists(s):
try: """Return true if the pathname refers to an existing file or directory."""
st = os.stat(s)
except os.error: try:
return 0 st = os.stat(s)
return 1 except os.error:
return 0
# return 1
# dummy expandvars to retain interface-compatability with other
# operating systems.
def expandvars(path): def expandvars(path):
return path """Dummy to retain interface-compatibility with other operating systems."""
return path
#
# dummy expanduser to retain interface-compatability with other
# operating systems.
def expanduser(path):
return path
# Normalize a pathname: get rid of '::' sequences by backing up, def expanduser(path):
# e.g., 'foo:bar::bletch' becomes 'foo:bletch'. """Dummy to retain interface-compatibility with other operating systems."""
# Raise the exception norm_error below if backing up is impossible, return path
# e.g., for '::foo'.
# XXX The Unix version doesn't raise an exception but simply
# returns an unnormalized path. Should do so here too.
norm_error = 'macpath.norm_error: path cannot be normalized' norm_error = 'macpath.norm_error: path cannot be normalized'
def normpath(s): def normpath(s):
import string """Normalize a pathname: get rid of '::' sequences by backing up,
if ':' not in s: e.g., 'foo:bar::bletch' becomes 'foo:bletch'.
return ':' + s Raise the exception norm_error below if backing up is impossible,
f = string.splitfields(s, ':') e.g., for '::foo'."""
pre = [] # XXX The Unix version doesn't raise an exception but simply
post = [] # returns an unnormalized path. Should do so here too.
if not f[0]:
pre = f[:1] import string
f = f[1:] if ':' not in s:
if not f[len(f)-1]: return ':' + s
post = f[-1:] f = string.splitfields(s, ':')
f = f[:-1] pre = []
res = [] post = []
for seg in f: if not f[0]:
if seg: pre = f[:1]
res.append(seg) f = f[1:]
else: if not f[len(f)-1]:
if not res: raise norm_error, 'path starts with ::' post = f[-1:]
del res[len(res)-1] f = f[:-1]
if not (pre or res): res = []
raise norm_error, 'path starts with volume::' for seg in f:
if pre: res = pre + res if seg:
if post: res = res + post res.append(seg)
s = res[0] else:
for seg in res[1:]: if not res: raise norm_error, 'path starts with ::'
s = s + ':' + seg del res[len(res)-1]
return s if not (pre or res):
raise norm_error, 'path starts with volume::'
if pre: res = pre + res
# Directory tree walk. if post: res = res + post
# For each directory under top (including top itself), s = res[0]
# func(arg, dirname, filenames) is called, where for seg in res[1:]:
# dirname is the name of the directory and filenames is the list s = s + ':' + seg
# of files (and subdirectories etc.) in the directory. return s
# The func may modify the filenames list, to implement a filter,
# or to impose a different order of visiting.
def walk(top, func, arg): def walk(top, func, arg):
try: """Directory tree walk.
names = os.listdir(top) For each directory under top (including top itself),
except os.error: func(arg, dirname, filenames) is called, where
return dirname is the name of the directory and filenames is the list
func(arg, top, names) of files (and subdirectories etc.) in the directory.
for name in names: The func may modify the filenames list, to implement a filter,
name = join(top, name) or to impose a different order of visiting."""
if isdir(name):
walk(name, func, arg) try:
names = os.listdir(top)
except os.error:
# Return an absolute path. return
func(arg, top, names)
for name in names:
name = join(top, name)
if isdir(name):
walk(name, func, arg)
def abspath(path): def abspath(path):
"""Return an absolute path."""
if not isabs(path): if not isabs(path):
path = join(os.getcwd(), path) path = join(os.getcwd(), path)
return normpath(path) return normpath(path)
...@@ -9,8 +9,11 @@ import string ...@@ -9,8 +9,11 @@ import string
def getcaps(): def getcaps():
"""Return a dictionary containing the mailcap database. """Return a dictionary containing the mailcap database.
The dictionary maps a MIME type (in all lowercase, The dictionary maps a MIME type (in all lowercase, e.g. 'text/plain')
e.g. 'text/plain') to a list of corresponding mailcap entries. to a list of dictionaries corresponding to mailcap entries. The list
collects all the entries for that MIME type from all available mailcap
files. Each dictionary contains key-value pairs for that MIME type,
where the viewing command is stored with the key "view".
""" """
caps = {} caps = {}
...@@ -48,6 +51,14 @@ def listmailcapfiles(): ...@@ -48,6 +51,14 @@ def listmailcapfiles():
# Part 2: the parser. # Part 2: the parser.
def readmailcapfile(fp): def readmailcapfile(fp):
"""Read a mailcap file and return a dictionary keyed by MIME type.
Each MIME type is mapped to an entry consisting of a list of
dictionaries; the list will contain more than one such dictionary
if a given MIME type appears more than once in the mailcap file.
Each dictionary contains key-value pairs for that MIME type, where
the viewing command is stored with the key "view".
"""
caps = {} caps = {}
while 1: while 1:
line = fp.readline() line = fp.readline()
...@@ -78,6 +89,11 @@ def readmailcapfile(fp): ...@@ -78,6 +89,11 @@ def readmailcapfile(fp):
return caps return caps
def parseline(line): def parseline(line):
"""Parse one entry in a mailcap file and return a dictionary.
The viewing command is stored as the value with the key "view",
and the rest of the fields produce key-value pairs in the dict.
"""
fields = [] fields = []
i, n = 0, len(line) i, n = 0, len(line)
while i < n: while i < n:
...@@ -104,6 +120,7 @@ def parseline(line): ...@@ -104,6 +120,7 @@ def parseline(line):
return key, fields return key, fields
def parsefield(line, i, n): def parsefield(line, i, n):
"""Separate one key-value pair in a mailcap entry."""
start = i start = i
while i < n: while i < n:
c = line[i] c = line[i]
......
This diff is collapsed.
# Various tools used by MIME-reading or MIME-writing programs. """Various tools used by MIME-reading or MIME-writing programs."""
import os import os
...@@ -7,10 +7,9 @@ import string ...@@ -7,10 +7,9 @@ import string
import tempfile import tempfile
# A derived class of rfc822.Message that knows about MIME headers and
# contains some hooks for decoding encoded and multipart messages.
class Message(rfc822.Message): class Message(rfc822.Message):
"""A derived class of rfc822.Message that knows about MIME headers and
contains some hooks for decoding encoded and multipart messages."""
def __init__(self, fp, seekable = 1): def __init__(self, fp, seekable = 1):
rfc822.Message.__init__(self, fp, seekable) rfc822.Message.__init__(self, fp, seekable)
...@@ -96,17 +95,17 @@ class Message(rfc822.Message): ...@@ -96,17 +95,17 @@ class Message(rfc822.Message):
# ----------------- # -----------------
# Return a random string usable as a multipart boundary.
# The method used is so that it is *very* unlikely that the same
# string of characters will every occur again in the Universe,
# so the caller needn't check the data it is packing for the
# occurrence of the boundary.
#
# The boundary contains dots so you have to quote it in the header.
_prefix = None _prefix = None
def choose_boundary(): def choose_boundary():
"""Return a random string usable as a multipart boundary.
The method used is so that it is *very* unlikely that the same
string of characters will every occur again in the Universe,
so the caller needn't check the data it is packing for the
occurrence of the boundary.
The boundary contains dots so you have to quote it in the header."""
global _prefix global _prefix
import time import time
import random import random
...@@ -131,6 +130,7 @@ def choose_boundary(): ...@@ -131,6 +130,7 @@ def choose_boundary():
# Subroutines for decoding some common content-transfer-types # Subroutines for decoding some common content-transfer-types
def decode(input, output, encoding): def decode(input, output, encoding):
"""Decode common content-transfer-encodings (base64, quopri, uuencode)."""
if encoding == 'base64': if encoding == 'base64':
import base64 import base64
return base64.decode(input, output) return base64.decode(input, output)
...@@ -147,6 +147,7 @@ def decode(input, output, encoding): ...@@ -147,6 +147,7 @@ def decode(input, output, encoding):
'unknown Content-Transfer-Encoding: %s' % encoding 'unknown Content-Transfer-Encoding: %s' % encoding
def encode(input, output, encoding): def encode(input, output, encoding):
"""Encode common content-transfer-encodings (base64, quopri, uuencode)."""
if encoding == 'base64': if encoding == 'base64':
import base64 import base64
return base64.encode(input, output) return base64.encode(input, output)
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
'''Mimification and unmimification of mail messages. '''Mimification and unmimification of mail messages.
decode quoted-printable parts of a mail message or encode using Decode quoted-printable parts of a mail message or encode using
quoted-printable. quoted-printable.
Usage: Usage:
...@@ -39,9 +39,8 @@ mime_head = re.compile('=\\?iso-8859-1\\?q\\?([^? \t\n]+)\\?=', re.I) ...@@ -39,9 +39,8 @@ mime_head = re.compile('=\\?iso-8859-1\\?q\\?([^? \t\n]+)\\?=', re.I)
repl = re.compile('^subject:\\s+re: ', re.I) repl = re.compile('^subject:\\s+re: ', re.I)
class File: class File:
'''A simple fake file object that knows about limited """A simple fake file object that knows about limited read-ahead and
read-ahead and boundaries. boundaries. The only supported method is readline()."""
The only supported method is readline().'''
def __init__(self, file, boundary): def __init__(self, file, boundary):
self.file = file self.file = file
...@@ -87,7 +86,7 @@ class HeaderFile: ...@@ -87,7 +86,7 @@ class HeaderFile:
self.peek = None self.peek = None
def mime_decode(line): def mime_decode(line):
'''Decode a single line of quoted-printable text to 8bit.''' """Decode a single line of quoted-printable text to 8bit."""
newline = '' newline = ''
pos = 0 pos = 0
while 1: while 1:
...@@ -100,7 +99,7 @@ def mime_decode(line): ...@@ -100,7 +99,7 @@ def mime_decode(line):
return newline + line[pos:] return newline + line[pos:]
def mime_decode_header(line): def mime_decode_header(line):
'''Decode a header line to 8bit.''' """Decode a header line to 8bit."""
newline = '' newline = ''
pos = 0 pos = 0
while 1: while 1:
...@@ -115,7 +114,7 @@ def mime_decode_header(line): ...@@ -115,7 +114,7 @@ def mime_decode_header(line):
return newline + line[pos:] return newline + line[pos:]
def unmimify_part(ifile, ofile, decode_base64 = 0): def unmimify_part(ifile, ofile, decode_base64 = 0):
'''Convert a quoted-printable part of a MIME mail message to 8bit.''' """Convert a quoted-printable part of a MIME mail message to 8bit."""
multipart = None multipart = None
quoted_printable = 0 quoted_printable = 0
is_base64 = 0 is_base64 = 0
...@@ -200,7 +199,7 @@ def unmimify_part(ifile, ofile, decode_base64 = 0): ...@@ -200,7 +199,7 @@ def unmimify_part(ifile, ofile, decode_base64 = 0):
ofile.write(pref + line) ofile.write(pref + line)
def unmimify(infile, outfile, decode_base64 = 0): def unmimify(infile, outfile, decode_base64 = 0):
'''Convert quoted-printable parts of a MIME mail message to 8bit.''' """Convert quoted-printable parts of a MIME mail message to 8bit."""
if type(infile) == type(''): if type(infile) == type(''):
ifile = open(infile) ifile = open(infile)
if type(outfile) == type('') and infile == outfile: if type(outfile) == type('') and infile == outfile:
...@@ -221,8 +220,8 @@ mime_char = re.compile('[=\177-\377]') # quote these chars in body ...@@ -221,8 +220,8 @@ mime_char = re.compile('[=\177-\377]') # quote these chars in body
mime_header_char = re.compile('[=?\177-\377]') # quote these in header mime_header_char = re.compile('[=?\177-\377]') # quote these in header
def mime_encode(line, header): def mime_encode(line, header):
'''Code a single line as quoted-printable. """Code a single line as quoted-printable.
If header is set, quote some extra characters.''' If header is set, quote some extra characters."""
if header: if header:
reg = mime_header_char reg = mime_header_char
else: else:
...@@ -255,7 +254,7 @@ def mime_encode(line, header): ...@@ -255,7 +254,7 @@ def mime_encode(line, header):
mime_header = re.compile('([ \t(]|^)([-a-zA-Z0-9_+]*[\177-\377][-a-zA-Z0-9_+\177-\377]*)([ \t)]|\n)') mime_header = re.compile('([ \t(]|^)([-a-zA-Z0-9_+]*[\177-\377][-a-zA-Z0-9_+\177-\377]*)([ \t)]|\n)')
def mime_encode_header(line): def mime_encode_header(line):
'''Code a single header line as quoted-printable.''' """Code a single header line as quoted-printable."""
newline = '' newline = ''
pos = 0 pos = 0
while 1: while 1:
...@@ -273,7 +272,7 @@ cte = re.compile('^content-transfer-encoding:', re.I) ...@@ -273,7 +272,7 @@ cte = re.compile('^content-transfer-encoding:', re.I)
iso_char = re.compile('[\177-\377]') iso_char = re.compile('[\177-\377]')
def mimify_part(ifile, ofile, is_mime): def mimify_part(ifile, ofile, is_mime):
'''Convert an 8bit part of a MIME mail message to quoted-printable.''' """Convert an 8bit part of a MIME mail message to quoted-printable."""
has_cte = is_qp = is_base64 = 0 has_cte = is_qp = is_base64 = 0
multipart = None multipart = None
must_quote_body = must_quote_header = has_iso_chars = 0 must_quote_body = must_quote_header = has_iso_chars = 0
...@@ -408,7 +407,7 @@ def mimify_part(ifile, ofile, is_mime): ...@@ -408,7 +407,7 @@ def mimify_part(ifile, ofile, is_mime):
ofile.write(line) ofile.write(line)
def mimify(infile, outfile): def mimify(infile, outfile):
'''Convert 8bit parts of a MIME mail message to quoted-printable.''' """Convert 8bit parts of a MIME mail message to quoted-printable."""
if type(infile) == type(''): if type(infile) == type(''):
ifile = open(infile) ifile = open(infile)
if type(outfile) == type('') and infile == outfile: if type(outfile) == type('') and infile == outfile:
......
# A class that makes each part of a multipart message "feel" like an """A readline()-style interface to the parts of a multipart message.
# ordinary file, as long as you use fp.readline(). Allows recursive
# use, for nested multipart messages. Probably best used together The MultiFile class makes each part of a multipart message "feel" like
# with module mimetools. an ordinary file, as long as you use fp.readline(). Allows recursive
# use, for nested multipart messages. Probably best used together
# Suggested use: with module mimetools.
#
# real_fp = open(...) Suggested use:
# fp = MultiFile(real_fp)
# real_fp = open(...)
# "read some lines from fp" fp = MultiFile(real_fp)
# fp.push(separator)
# while 1: "read some lines from fp"
# "read lines from fp until it returns an empty string" (A) fp.push(separator)
# if not fp.next(): break while 1:
# fp.pop() "read lines from fp until it returns an empty string" (A)
# "read remaining lines from fp until it returns an empty string" if not fp.next(): break
# fp.pop()
# The latter sequence may be used recursively at (A). "read remaining lines from fp until it returns an empty string"
# It is also allowed to use multiple push()...pop() sequences.
# The latter sequence may be used recursively at (A).
# If seekable is given as 0, the class code will not do the bookeeping It is also allowed to use multiple push()...pop() sequences.
# it normally attempts in order to make seeks relative to the beginning of the
# current file part. This may be useful when using MultiFile with a non- If seekable is given as 0, the class code will not do the bookeeping
# seekable stream object. it normally attempts in order to make seeks relative to the beginning of the
current file part. This may be useful when using MultiFile with a non-
seekable stream object.
"""
import sys import sys
import string import string
...@@ -30,9 +33,9 @@ import string ...@@ -30,9 +33,9 @@ import string
Error = 'multifile.Error' Error = 'multifile.Error'
class MultiFile: class MultiFile:
#
seekable = 0 seekable = 0
#
def __init__(self, fp, seekable=1): def __init__(self, fp, seekable=1):
self.fp = fp self.fp = fp
self.stack = [] # Grows down self.stack = [] # Grows down
...@@ -42,12 +45,12 @@ class MultiFile: ...@@ -42,12 +45,12 @@ class MultiFile:
self.seekable = 1 self.seekable = 1
self.start = self.fp.tell() self.start = self.fp.tell()
self.posstack = [] # Grows down self.posstack = [] # Grows down
#
def tell(self): def tell(self):
if self.level > 0: if self.level > 0:
return self.lastpos return self.lastpos
return self.fp.tell() - self.start return self.fp.tell() - self.start
#
def seek(self, pos, whence=0): def seek(self, pos, whence=0):
here = self.tell() here = self.tell()
if whence: if whence:
...@@ -64,7 +67,7 @@ class MultiFile: ...@@ -64,7 +67,7 @@ class MultiFile:
self.fp.seek(pos + self.start) self.fp.seek(pos + self.start)
self.level = 0 self.level = 0
self.last = 0 self.last = 0
#
def readline(self): def readline(self):
if self.level > 0: if self.level > 0:
return '' return ''
...@@ -105,7 +108,7 @@ class MultiFile: ...@@ -105,7 +108,7 @@ class MultiFile:
if self.level > 1: if self.level > 1:
raise Error,'Missing endmarker in MultiFile.readline()' raise Error,'Missing endmarker in MultiFile.readline()'
return '' return ''
#
def readlines(self): def readlines(self):
list = [] list = []
while 1: while 1:
...@@ -113,10 +116,10 @@ class MultiFile: ...@@ -113,10 +116,10 @@ class MultiFile:
if not line: break if not line: break
list.append(line) list.append(line)
return list return list
#
def read(self): # Note: no size argument -- read until EOF only! def read(self): # Note: no size argument -- read until EOF only!
return string.joinfields(self.readlines(), '') return string.joinfields(self.readlines(), '')
#
def next(self): def next(self):
while self.readline(): pass while self.readline(): pass
if self.level > 1 or self.last: if self.level > 1 or self.last:
...@@ -126,7 +129,7 @@ class MultiFile: ...@@ -126,7 +129,7 @@ class MultiFile:
if self.seekable: if self.seekable:
self.start = self.fp.tell() self.start = self.fp.tell()
return 1 return 1
#
def push(self, sep): def push(self, sep):
if self.level > 0: if self.level > 0:
raise Error, 'bad MultiFile.push() call' raise Error, 'bad MultiFile.push() call'
...@@ -134,7 +137,7 @@ class MultiFile: ...@@ -134,7 +137,7 @@ class MultiFile:
if self.seekable: if self.seekable:
self.posstack.insert(0, self.start) self.posstack.insert(0, self.start)
self.start = self.fp.tell() self.start = self.fp.tell()
#
def pop(self): def pop(self):
if self.stack == []: if self.stack == []:
raise Error, 'bad MultiFile.pop() call' raise Error, 'bad MultiFile.pop() call'
...@@ -149,12 +152,12 @@ class MultiFile: ...@@ -149,12 +152,12 @@ class MultiFile:
del self.posstack[0] del self.posstack[0]
if self.level > 0: if self.level > 0:
self.lastpos = abslastpos - self.start self.lastpos = abslastpos - self.start
#
def is_data(self, line): def is_data(self, line):
return line[:2] <> '--' return line[:2] <> '--'
#
def section_divider(self, str): def section_divider(self, str):
return "--" + str return "--" + str
#
def end_marker(self, str): def end_marker(self, str):
return "--" + str + "--" return "--" + str + "--"
# Mutual exclusion -- for use with module sched """Mutual exclusion -- for use with module sched
A mutex has two pieces of state -- a 'locked' bit and a queue.
When the mutex is not locked, the queue is empty.
Otherwise, the queue contains 0 or more (function, argument) pairs
representing functions (or methods) waiting to acquire the lock.
When the mutex is unlocked while the queue is not empty,
the first queue entry is removed and its function(argument) pair called,
implying it now has the lock.
Of course, no multi-threading is implied -- hence the funny interface
for lock, where a function is called once the lock is aquired.
"""
# A mutex has two pieces of state -- a 'locked' bit and a queue.
# When the mutex is not locked, the queue is empty.
# Otherwise, the queue contains 0 or more (function, argument) pairs
# representing functions (or methods) waiting to acquire the lock.
# When the mutex is unlocked while the queue is not empty,
# the first queue entry is removed and its function(argument) pair called,
# implying it now has the lock.
#
# Of course, no multi-threading is implied -- hence the funny interface
# for lock, where a function is called once the lock is aquired.
#
class mutex: class mutex:
#
# Create a new mutex -- initially unlocked
#
def __init__(self): def __init__(self):
"""Create a new mutex -- initially unlocked."""
self.locked = 0 self.locked = 0
self.queue = [] self.queue = []
#
# Test the locked bit of the mutex
#
def test(self): def test(self):
"""Test the locked bit of the mutex."""
return self.locked return self.locked
#
# Atomic test-and-set -- grab the lock if it is not set,
# return true if it succeeded
#
def testandset(self): def testandset(self):
"""Atomic test-and-set -- grab the lock if it is not set,
return true if it succeeded."""
if not self.locked: if not self.locked:
self.locked = 1 self.locked = 1
return 1 return 1
else: else:
return 0 return 0
#
# Lock a mutex, call the function with supplied argument
# when it is acquired.
# If the mutex is already locked, place function and argument
# in the queue.
#
def lock(self, function, argument): def lock(self, function, argument):
"""Lock a mutex, call the function with supplied argument
when it is acquired. If the mutex is already locked, place
function and argument in the queue."""
if self.testandset(): if self.testandset():
function(argument) function(argument)
else: else:
self.queue.append((function, argument)) self.queue.append((function, argument))
#
# Unlock a mutex. If the queue is not empty, call the next
# function with its argument.
#
def unlock(self): def unlock(self):
"""Unlock a mutex. If the queue is not empty, call the next
function with its argument."""
if self.queue: if self.queue:
function, argument = self.queue[0] function, argument = self.queue[0]
del self.queue[0] del self.queue[0]
function(argument) function(argument)
else: else:
self.locked = 0 self.locked = 0
#
"""An object-oriented interface to .netrc files."""
# Module and documentation by Eric S. Raymond, 21 Dec 1998 # Module and documentation by Eric S. Raymond, 21 Dec 1998
import os, shlex import os, shlex
...@@ -63,7 +65,7 @@ class netrc: ...@@ -63,7 +65,7 @@ class netrc:
raise SyntaxError, "bad follower token %s, file %s, line %d"%(tt,file,lexer.lineno) raise SyntaxError, "bad follower token %s, file %s, line %d"%(tt,file,lexer.lineno)
def authenticators(self, host): def authenticators(self, host):
"Return a (user, account, password) tuple for given host." """Return a (user, account, password) tuple for given host."""
if self.hosts.has_key(host): if self.hosts.has_key(host):
return self.hosts[host] return self.hosts[host]
elif self.hosts.has_key('default'): elif self.hosts.has_key('default'):
...@@ -72,7 +74,7 @@ class netrc: ...@@ -72,7 +74,7 @@ class netrc:
return None return None
def __repr__(self): def __repr__(self):
"Dump the class data in the format of a .netrc file" """Dump the class data in the format of a .netrc file."""
rep = "" rep = ""
for host in self.hosts.keys(): for host in self.hosts.keys():
attrs = self.hosts[host] attrs = self.hosts[host]
......
This diff is collapsed.
# """Convert a NT pathname to a file URL and vice versa."""
# nturl2path convert a NT pathname to a file URL and
# vice versa
def url2pathname(url): def url2pathname(url):
""" Convert a URL to a DOS path... """ Convert a URL to a DOS path...
...@@ -34,7 +32,6 @@ def url2pathname(url): ...@@ -34,7 +32,6 @@ def url2pathname(url):
return path return path
def pathname2url(p): def pathname2url(p):
""" Convert a DOS path name to a file url... """ Convert a DOS path name to a file url...
C:\foo\bar\spam.foo C:\foo\bar\spam.foo
......
# os.py -- either mac, dos or posix depending on what system we're on. """os.py -- either mac, dos or posix depending on what system we're on.
# This exports: This exports:
# - all functions from either posix or mac, e.g., os.unlink, os.stat, etc. - all functions from either posix or mac, e.g., os.unlink, os.stat, etc.
# - os.path is either module posixpath or macpath - os.path is either module posixpath or macpath
# - os.name is either 'posix' or 'mac' - os.name is either 'posix' or 'mac'
# - os.curdir is a string representing the current directory ('.' or ':') - os.curdir is a string representing the current directory ('.' or ':')
# - os.pardir is a string representing the parent directory ('..' or '::') - os.pardir is a string representing the parent directory ('..' or '::')
# - os.sep is the (or a most common) pathname separator ('/' or ':' or '\\') - os.sep is the (or a most common) pathname separator ('/' or ':' or '\\')
# - os.altsep is the alternatte pathname separator (None or '/') - os.altsep is the alternatte pathname separator (None or '/')
# - os.pathsep is the component separator used in $PATH etc - os.pathsep is the component separator used in $PATH etc
# - os.defpath is the default search path for executables - os.defpath is the default search path for executables
# Programs that import and use 'os' stand a better chance of being Programs that import and use 'os' stand a better chance of being
# portable between different platforms. Of course, they must then portable between different platforms. Of course, they must then
# only use functions that are defined by all platforms (e.g., unlink only use functions that are defined by all platforms (e.g., unlink
# and opendir), and leave all pathname manipulation to os.path and opendir), and leave all pathname manipulation to os.path
# (e.g., split and join). (e.g., split and join).
"""
import sys import sys
......
#! /usr/bin/env python #! /usr/bin/env python
# pdb.py -- finally, a Python debugger! """pdb.py -- finally, a Python debugger!"""
# (See pdb.doc for documentation.) # (See pdb.doc for documentation.)
...@@ -106,18 +106,18 @@ class Pdb(bdb.Bdb, cmd.Cmd): ...@@ -106,18 +106,18 @@ class Pdb(bdb.Bdb, cmd.Cmd):
# Override Bdb methods (except user_call, for now) # Override Bdb methods (except user_call, for now)
def user_line(self, frame): def user_line(self, frame):
# This function is called when we stop or break at this line """This function is called when we stop or break at this line."""
self.interaction(frame, None) self.interaction(frame, None)
def user_return(self, frame, return_value): def user_return(self, frame, return_value):
# This function is called when a return trap is set here """This function is called when a return trap is set here."""
frame.f_locals['__return__'] = return_value frame.f_locals['__return__'] = return_value
print '--Return--' print '--Return--'
self.interaction(frame, None) self.interaction(frame, None)
def user_exception(self, frame, (exc_type, exc_value, exc_traceback)): def user_exception(self, frame, (exc_type, exc_value, exc_traceback)):
# This function is called if an exception occurs, """This function is called if an exception occurs,
# but only if we are to stop at or just below this level but only if we are to stop at or just below this level."""
frame.f_locals['__exception__'] = exc_type, exc_value frame.f_locals['__exception__'] = exc_type, exc_value
if type(exc_type) == type(''): if type(exc_type) == type(''):
exc_type_name = exc_type exc_type_name = exc_type
...@@ -148,7 +148,7 @@ class Pdb(bdb.Bdb, cmd.Cmd): ...@@ -148,7 +148,7 @@ class Pdb(bdb.Bdb, cmd.Cmd):
print '***', exc_type_name + ':', v print '***', exc_type_name + ':', v
def precmd(self, line): def precmd(self, line):
# Handle alias expansion and ';;' separator """Handle alias expansion and ';;' separator."""
if not line: if not line:
return line return line
args = string.split(line) args = string.split(line)
...@@ -262,7 +262,7 @@ class Pdb(bdb.Bdb, cmd.Cmd): ...@@ -262,7 +262,7 @@ class Pdb(bdb.Bdb, cmd.Cmd):
# To be overridden in derived debuggers # To be overridden in derived debuggers
def defaultFile(self): def defaultFile(self):
# Produce a reasonable default """Produce a reasonable default."""
filename = self.curframe.f_code.co_filename filename = self.curframe.f_code.co_filename
if filename == '<string>' and mainpyfile: if filename == '<string>' and mainpyfile:
filename = mainpyfile filename = mainpyfile
...@@ -384,7 +384,7 @@ class Pdb(bdb.Bdb, cmd.Cmd): ...@@ -384,7 +384,7 @@ class Pdb(bdb.Bdb, cmd.Cmd):
print 'is now unconditional.' print 'is now unconditional.'
def do_ignore(self,arg): def do_ignore(self,arg):
# arg is bp number followed by ignore count """arg is bp number followed by ignore count."""
args = string.split(arg) args = string.split(arg)
bpnum = int(string.strip(args[0])) bpnum = int(string.strip(args[0]))
try: try:
...@@ -406,10 +406,10 @@ class Pdb(bdb.Bdb, cmd.Cmd): ...@@ -406,10 +406,10 @@ class Pdb(bdb.Bdb, cmd.Cmd):
print bpnum, 'is reached.' print bpnum, 'is reached.'
def do_clear(self, arg): def do_clear(self, arg):
# Three possibilities, tried in this order: """Three possibilities, tried in this order:
# clear -> clear all breaks, ask for confirmation clear -> clear all breaks, ask for confirmation
# clear file:lineno -> clear all breaks at file:lineno clear file:lineno -> clear all breaks at file:lineno
# clear bpno bpno ... -> clear breakpoints by number clear bpno bpno ... -> clear breakpoints by number"""
if not arg: if not arg:
try: try:
reply = raw_input('Clear all breaks? ') reply = raw_input('Clear all breaks? ')
...@@ -851,9 +851,8 @@ class Pdb(bdb.Bdb, cmd.Cmd): ...@@ -851,9 +851,8 @@ class Pdb(bdb.Bdb, cmd.Cmd):
def help_pdb(self): def help_pdb(self):
help() help()
# Helper function for break/clear parsing -- may be overridden
def lookupmodule(self, filename): def lookupmodule(self, filename):
"""Helper function for break/clear parsing -- may be overridden."""
root, ext = os.path.splitext(filename) root, ext = os.path.splitext(filename)
if ext == '': if ext == '':
filename = filename + '.py' filename = filename + '.py'
......
"""create portable serialized representations of Python objects. """Create portable serialized representations of Python objects.
See module cPickle for a (much) faster implementation. See module cPickle for a (much) faster implementation.
See module copy_reg for a mechanism for registering custom picklers. See module copy_reg for a mechanism for registering custom picklers.
......
# Conversion pipeline templates """Conversion pipeline templates.
# =============================
The problem:
------------
# The problem:
# ------------ Suppose you have some data that you want to convert to another format
# (e.g. from GIF image format to PPM image format). Maybe the
# Suppose you have some data that you want to convert to another format conversion involves several steps (e.g. piping it through compress or
# (e.g. from GIF image format to PPM image format). Maybe the uuencode). Some of the conversion steps may require that their input
# conversion involves several steps (e.g. piping it through compress or is a disk file, others may be able to read standard input; similar for
# uuencode). Some of the conversion steps may require that their input their output. The input to the entire conversion may also be read
# is a disk file, others may be able to read standard input; similar for from a disk file or from an open file, and similar for its output.
# their output. The input to the entire conversion may also be read
# from a disk file or from an open file, and similar for its output. The module lets you construct a pipeline template by sticking one or
# more conversion steps together. It will take care of creating and
# The module lets you construct a pipeline template by sticking one or removing temporary files if they are necessary to hold intermediate
# more conversion steps together. It will take care of creating and data. You can then use the template to do conversions from many
# removing temporary files if they are necessary to hold intermediate different sources to many different destinations. The temporary
# data. You can then use the template to do conversions from many file names used are different each time the template is used.
# different sources to many different destinations. The temporary
# file names used are different each time the template is used. The templates are objects so you can create templates for many
# different conversion steps and store them in a dictionary, for
# The templates are objects so you can create templates for many instance.
# different conversion steps and store them in a dictionary, for
# instance.
Directions:
-----------
# Directions:
# ----------- To create a template:
# t = Template()
# To create a template:
# t = Template() To add a conversion step to a template:
# t.append(command, kind)
# To add a conversion step to a template: where kind is a string of two characters: the first is '-' if the
# t.append(command, kind) command reads its standard input or 'f' if it requires a file; the
# where kind is a string of two characters: the first is '-' if the second likewise for the output. The command must be valid /bin/sh
# command reads its standard input or 'f' if it requires a file; the syntax. If input or output files are required, they are passed as
# second likewise for the output. The command must be valid /bin/sh $IN and $OUT; otherwise, it must be possible to use the command in
# syntax. If input or output files are required, they are passed as a pipeline.
# $IN and $OUT; otherwise, it must be possible to use the command in
# a pipeline. To add a conversion step at the beginning:
# t.prepend(command, kind)
# To add a conversion step at the beginning:
# t.prepend(command, kind) To convert a file to another file using a template:
# sts = t.copy(infile, outfile)
# To convert a file to another file using a template: If infile or outfile are the empty string, standard input is read or
# sts = t.copy(infile, outfile) standard output is written, respectively. The return value is the
# If infile or outfile are the empty string, standard input is read or exit status of the conversion pipeline.
# standard output is written, respectively. The return value is the
# exit status of the conversion pipeline. To open a file for reading or writing through a conversion pipeline:
# fp = t.open(file, mode)
# To open a file for reading or writing through a conversion pipeline: where mode is 'r' to read the file, or 'w' to write it -- just like
# fp = t.open(file, mode) for the built-in function open() or for os.popen().
# where mode is 'r' to read the file, or 'w' to write it -- just like
# for the built-in function open() or for os.popen(). To create a new template object initialized to a given one:
# t2 = t.clone()
# To create a new template object initialized to a given one:
# t2 = t.clone() For an example, see the function test() at the end of the file.
# """
# For an example, see the function test() at the end of the file.
import sys import sys
...@@ -81,37 +80,36 @@ stepkinds = [FILEIN_FILEOUT, STDIN_FILEOUT, FILEIN_STDOUT, STDIN_STDOUT, \ ...@@ -81,37 +80,36 @@ stepkinds = [FILEIN_FILEOUT, STDIN_FILEOUT, FILEIN_STDOUT, STDIN_STDOUT, \
SOURCE, SINK] SOURCE, SINK]
# A pipeline template is a Template object:
class Template: class Template:
"""Class representing a pipeline template."""
# Template() returns a fresh pipeline template
def __init__(self): def __init__(self):
"""Template() returns a fresh pipeline template."""
self.debugging = 0 self.debugging = 0
self.reset() self.reset()
# t.__repr__() implements `t`
def __repr__(self): def __repr__(self):
"""t.__repr__() implements `t`."""
return '<Template instance, steps=' + `self.steps` + '>' return '<Template instance, steps=' + `self.steps` + '>'
# t.reset() restores a pipeline template to its initial state
def reset(self): def reset(self):
"""t.reset() restores a pipeline template to its initial state."""
self.steps = [] self.steps = []
# t.clone() returns a new pipeline template with identical
# initial state as the current one
def clone(self): def clone(self):
"""t.clone() returns a new pipeline template with identical
initial state as the current one."""
t = Template() t = Template()
t.steps = self.steps[:] t.steps = self.steps[:]
t.debugging = self.debugging t.debugging = self.debugging
return t return t
# t.debug(flag) turns debugging on or off
def debug(self, flag): def debug(self, flag):
"""t.debug(flag) turns debugging on or off."""
self.debugging = flag self.debugging = flag
# t.append(cmd, kind) adds a new step at the end
def append(self, cmd, kind): def append(self, cmd, kind):
"""t.append(cmd, kind) adds a new step at the end."""
if type(cmd) <> type(''): if type(cmd) <> type(''):
raise TypeError, \ raise TypeError, \
'Template.append: cmd must be a string' 'Template.append: cmd must be a string'
...@@ -132,8 +130,8 @@ class Template: ...@@ -132,8 +130,8 @@ class Template:
'Template.append: missing $OUT in cmd' 'Template.append: missing $OUT in cmd'
self.steps.append((cmd, kind)) self.steps.append((cmd, kind))
# t.prepend(cmd, kind) adds a new step at the front
def prepend(self, cmd, kind): def prepend(self, cmd, kind):
"""t.prepend(cmd, kind) adds a new step at the front."""
if type(cmd) <> type(''): if type(cmd) <> type(''):
raise TypeError, \ raise TypeError, \
'Template.prepend: cmd must be a string' 'Template.prepend: cmd must be a string'
...@@ -154,9 +152,9 @@ class Template: ...@@ -154,9 +152,9 @@ class Template:
'Template.prepend: missing $OUT in cmd' 'Template.prepend: missing $OUT in cmd'
self.steps.insert(0, (cmd, kind)) self.steps.insert(0, (cmd, kind))
# t.open(file, rw) returns a pipe or file object open for
# reading or writing; the file is the other end of the pipeline
def open(self, file, rw): def open(self, file, rw):
"""t.open(file, rw) returns a pipe or file object open for
reading or writing; the file is the other end of the pipeline."""
if rw == 'r': if rw == 'r':
return self.open_r(file) return self.open_r(file)
if rw == 'w': if rw == 'w':
...@@ -164,10 +162,9 @@ class Template: ...@@ -164,10 +162,9 @@ class Template:
raise ValueError, \ raise ValueError, \
'Template.open: rw must be \'r\' or \'w\', not ' + `rw` 'Template.open: rw must be \'r\' or \'w\', not ' + `rw`
# t.open_r(file) and t.open_w(file) implement
# t.open(file, 'r') and t.open(file, 'w') respectively
def open_r(self, file): def open_r(self, file):
"""t.open_r(file) and t.open_w(file) implement
t.open(file, 'r') and t.open(file, 'w') respectively."""
if self.steps == []: if self.steps == []:
return open(file, 'r') return open(file, 'r')
if self.steps[-1][1] == SINK: if self.steps[-1][1] == SINK:
......
"""Spawn a command with pipes to its stdin, stdout, and optionally stderr.
The normal os.popen(cmd, mode) call spawns a shell command and provides a
file interface to just the input or output of the process depending on
whether mode is 'r' or 'w'. This module provides the functions popen2(cmd)
and popen3(cmd) which return two or three pipes to the spawned command.
"""
import os import os
import sys import sys
import string import string
...@@ -11,7 +19,15 @@ def _cleanup(): ...@@ -11,7 +19,15 @@ def _cleanup():
inst.poll() inst.poll()
class Popen3: class Popen3:
"""Class representing a child process. Normally instances are created
by the factory functions popen2() and popen3()."""
def __init__(self, cmd, capturestderr=0, bufsize=-1): def __init__(self, cmd, capturestderr=0, bufsize=-1):
"""The parameter 'cmd' is the shell command to execute in a
sub-process. The 'capturestderr' flag, if true, specifies that
the object should capture standard error output of the child process.
The default is false. If the 'bufsize' parameter is specified, it
specifies the size of the I/O buffers to/from the child process."""
if type(cmd) == type(''): if type(cmd) == type(''):
cmd = ['/bin/sh', '-c', cmd] cmd = ['/bin/sh', '-c', cmd]
p2cread, p2cwrite = os.pipe() p2cread, p2cwrite = os.pipe()
...@@ -51,7 +67,10 @@ class Popen3: ...@@ -51,7 +67,10 @@ class Popen3:
self.childerr = None self.childerr = None
self.sts = -1 # Child not completed yet self.sts = -1 # Child not completed yet
_active.append(self) _active.append(self)
def poll(self): def poll(self):
"""Return the exit status of the child process if it has finished,
or -1 if it hasn't finished yet."""
if self.sts < 0: if self.sts < 0:
try: try:
pid, sts = os.waitpid(self.pid, os.WNOHANG) pid, sts = os.waitpid(self.pid, os.WNOHANG)
...@@ -61,7 +80,9 @@ class Popen3: ...@@ -61,7 +80,9 @@ class Popen3:
except os.error: except os.error:
pass pass
return self.sts return self.sts
def wait(self): def wait(self):
"""Wait for and return the exit status of the child process."""
pid, sts = os.waitpid(self.pid, 0) pid, sts = os.waitpid(self.pid, 0)
if pid == self.pid: if pid == self.pid:
self.sts = sts self.sts = sts
...@@ -69,11 +90,17 @@ class Popen3: ...@@ -69,11 +90,17 @@ class Popen3:
return self.sts return self.sts
def popen2(cmd, bufsize=-1): def popen2(cmd, bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. If 'bufsize' is
specified, it sets the buffer size for the I/O pipes. The file objects
(child_stdout, child_stdin) are returned."""
_cleanup() _cleanup()
inst = Popen3(cmd, 0, bufsize) inst = Popen3(cmd, 0, bufsize)
return inst.fromchild, inst.tochild return inst.fromchild, inst.tochild
def popen3(cmd, bufsize=-1): def popen3(cmd, bufsize=-1):
"""Execute the shell command 'cmd' in a sub-process. If 'bufsize' is
specified, it sets the buffer size for the I/O pipes. The file objects
(child_stdout, child_stdin, child_stderr) are returned."""
_cleanup() _cleanup()
inst = Popen3(cmd, 1, bufsize) inst = Popen3(cmd, 1, bufsize)
return inst.fromchild, inst.tochild, inst.childerr return inst.fromchild, inst.tochild, inst.childerr
......
# """Extended file operations available in POSIX.
# Start of posixfile.py
# f = posixfile.open(filename, [mode, [bufsize]])
will create a new posixfile object
#
# Extended file operations f = posixfile.fileopen(fileobject)
# will create a posixfile object from a builtin file object
# f = posixfile.open(filename, [mode, [bufsize]])
# will create a new posixfile object f.file()
# will return the original builtin file object
# f = posixfile.fileopen(fileobject)
# will create a posixfile object from a builtin file object f.dup()
# will return a new file object based on a new filedescriptor
# f.file()
# will return the original builtin file object f.dup2(fd)
# will return a new file object based on the given filedescriptor
# f.dup()
# will return a new file object based on a new filedescriptor f.flags(mode)
# will turn on the associated flag (merge)
# f.dup2(fd) mode can contain the following characters:
# will return a new file object based on the given filedescriptor
# (character representing a flag)
# f.flags(mode) a append only flag
# will turn on the associated flag (merge) c close on exec flag
# mode can contain the following characters: n no delay flag
# s synchronization flag
# (character representing a flag) (modifiers)
# a append only flag ! turn flags 'off' instead of default 'on'
# c close on exec flag = copy flags 'as is' instead of default 'merge'
# n no delay flag ? return a string in which the characters represent the flags
# s synchronization flag that are set
# (modifiers)
# ! turn flags 'off' instead of default 'on' note: - the '!' and '=' modifiers are mutually exclusive.
# = copy flags 'as is' instead of default 'merge' - the '?' modifier will return the status of the flags after they
# ? return a string in which the characters represent the flags have been changed by other characters in the mode string
# that are set
# f.lock(mode [, len [, start [, whence]]])
# note: - the '!' and '=' modifiers are mutually exclusive. will (un)lock a region
# - the '?' modifier will return the status of the flags after they mode can contain the following characters:
# have been changed by other characters in the mode string
# (character representing type of lock)
# f.lock(mode [, len [, start [, whence]]]) u unlock
# will (un)lock a region r read lock
# mode can contain the following characters: w write lock
# (modifiers)
# (character representing type of lock) | wait until the lock can be granted
# u unlock ? return the first lock conflicting with the requested lock
# r read lock or 'None' if there is no conflict. The lock returned is in the
# w write lock format (mode, len, start, whence, pid) where mode is a
# (modifiers) character representing the type of lock ('r' or 'w')
# | wait until the lock can be granted
# ? return the first lock conflicting with the requested lock note: - the '?' modifier prevents a region from being locked; it is
# or 'None' if there is no conflict. The lock returned is in the query only
# format (mode, len, start, whence, pid) where mode is a """
# character representing the type of lock ('r' or 'w')
#
# note: - the '?' modifier prevents a region from being locked; it is
# query only
#
class _posixfile_: class _posixfile_:
"""File wrapper class that provides extra POSIX file routines."""
states = ['open', 'closed'] states = ['open', 'closed']
# #
...@@ -215,13 +212,12 @@ class _posixfile_: ...@@ -215,13 +212,12 @@ class _posixfile_:
else: else:
return 'w', l_len, l_start, l_whence, l_pid return 'w', l_len, l_start, l_whence, l_pid
#
# Public routine to obtain a posixfile object
#
def open(name, mode='r', bufsize=-1): def open(name, mode='r', bufsize=-1):
"""Public routine to open a file as a posixfile object."""
return _posixfile_().open(name, mode, bufsize) return _posixfile_().open(name, mode, bufsize)
def fileopen(file): def fileopen(file):
"""Public routine to get a posixfile object from a Python file object."""
return _posixfile_().fileopen(file) return _posixfile_().fileopen(file)
# #
......
# Module 'posixpath' -- common operations on Posix pathnames. """Common operations on Posix pathnames.
# Some of this can actually be useful on non-Posix systems too, e.g.
# for manipulation of the pathname component of URLs. Instead of importing this module directly, import os and refer to
# The "os.path" name is an alias for this module on Posix systems; this module as os.path. The "os.path" name is an alias for this
# on other systems (e.g. Mac, Windows), os.path provides the same module on Posix systems; on other systems (e.g. Mac, Windows),
# operations in a manner specific to that platform, and is an alias os.path provides the same operations in a manner specific to that
# to another module (e.g. macpath, ntpath). platform, and is an alias to another module (e.g. macpath, ntpath).
"""Common pathname manipulations, Posix version.
Instead of importing this module Some of this can actually be useful on non-Posix systems too, e.g.
directly, import os and refer to this module as os.path. for manipulation of the pathname component of URLs.
""" """
import os import os
...@@ -369,8 +369,8 @@ def normpath(path): ...@@ -369,8 +369,8 @@ def normpath(path):
return slashes + string.joinfields(comps, '/') return slashes + string.joinfields(comps, '/')
# Return an absolute path.
def abspath(path): def abspath(path):
"""Return an absolute path."""
if not isabs(path): if not isabs(path):
path = join(os.getcwd(), path) path = join(os.getcwd(), path)
return normpath(path) return normpath(path)
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
# #
# See profile.doc for more information # See profile.doc for more information
"""Class for profiling Python code."""
# Copyright 1994, by InfoSeek Corporation, all rights reserved. # Copyright 1994, by InfoSeek Corporation, all rights reserved.
# Written by James Roskind # Written by James Roskind
...@@ -79,44 +80,43 @@ def help(): ...@@ -79,44 +80,43 @@ def help():
print 'along the Python search path' print 'along the Python search path'
#**************************************************************************
# class Profile documentation:
#**************************************************************************
# self.cur is always a tuple. Each such tuple corresponds to a stack
# frame that is currently active (self.cur[-2]). The following are the
# definitions of its members. We use this external "parallel stack" to
# avoid contaminating the program that we are profiling. (old profiler
# used to write into the frames local dictionary!!) Derived classes
# can change the definition of some entries, as long as they leave
# [-2:] intact.
#
# [ 0] = Time that needs to be charged to the parent frame's function. It is
# used so that a function call will not have to access the timing data
# for the parents frame.
# [ 1] = Total time spent in this frame's function, excluding time in
# subfunctions
# [ 2] = Cumulative time spent in this frame's function, including time in
# all subfunctions to this frame.
# [-3] = Name of the function that corresonds to this frame.
# [-2] = Actual frame that we correspond to (used to sync exception handling)
# [-1] = Our parent 6-tuple (corresonds to frame.f_back)
#**************************************************************************
# Timing data for each function is stored as a 5-tuple in the dictionary
# self.timings[]. The index is always the name stored in self.cur[4].
# The following are the definitions of the members:
#
# [0] = The number of times this function was called, not counting direct
# or indirect recursion,
# [1] = Number of times this function appears on the stack, minus one
# [2] = Total time spent internal to this function
# [3] = Cumulative time that this function was present on the stack. In
# non-recursive functions, this is the total execution time from start
# to finish of each invocation of a function, including time spent in
# all subfunctions.
# [5] = A dictionary indicating for each function name, the number of times
# it was called by us.
#**************************************************************************
class Profile: class Profile:
"""Profiler class.
self.cur is always a tuple. Each such tuple corresponds to a stack
frame that is currently active (self.cur[-2]). The following are the
definitions of its members. We use this external "parallel stack" to
avoid contaminating the program that we are profiling. (old profiler
used to write into the frames local dictionary!!) Derived classes
can change the definition of some entries, as long as they leave
[-2:] intact.
[ 0] = Time that needs to be charged to the parent frame's function.
It is used so that a function call will not have to access the
timing data for the parent frame.
[ 1] = Total time spent in this frame's function, excluding time in
subfunctions
[ 2] = Cumulative time spent in this frame's function, including time in
all subfunctions to this frame.
[-3] = Name of the function that corresonds to this frame.
[-2] = Actual frame that we correspond to (used to sync exception handling)
[-1] = Our parent 6-tuple (corresonds to frame.f_back)
Timing data for each function is stored as a 5-tuple in the dictionary
self.timings[]. The index is always the name stored in self.cur[4].
The following are the definitions of the members:
[0] = The number of times this function was called, not counting direct
or indirect recursion,
[1] = Number of times this function appears on the stack, minus one
[2] = Total time spent internal to this function
[3] = Cumulative time that this function was present on the stack. In
non-recursive functions, this is the total execution time from start
to finish of each invocation of a function, including time spent in
all subfunctions.
[5] = A dictionary indicating for each function name, the number of times
it was called by us.
"""
def __init__(self, timer=None): def __init__(self, timer=None):
self.timings = {} self.timings = {}
...@@ -449,19 +449,16 @@ class Profile: ...@@ -449,19 +449,16 @@ class Profile:
#****************************************************************************
# OldProfile class documentation
#****************************************************************************
#
# The following derived profiler simulates the old style profile, providing
# errant results on recursive functions. The reason for the usefulnes of this
# profiler is that it runs faster (i.e., less overhead). It still creates
# all the caller stats, and is quite useful when there is *no* recursion
# in the user's code.
#
# This code also shows how easy it is to create a modified profiler.
#****************************************************************************
class OldProfile(Profile): class OldProfile(Profile):
"""A derived profiler that simulates the old style profile, providing
errant results on recursive functions. The reason for the usefulness of
this profiler is that it runs faster (i.e., less overhead). It still
creates all the caller stats, and is quite useful when there is *no*
recursion in the user's code.
This code also shows how easy it is to create a modified profiler.
"""
def trace_dispatch_exception(self, frame, t): def trace_dispatch_exception(self, frame, t):
rt, rtt, rct, rfn, rframe, rcur = self.cur rt, rtt, rct, rfn, rframe, rcur = self.cur
if rcur and not rframe is frame: if rcur and not rframe is frame:
...@@ -509,16 +506,13 @@ class OldProfile(Profile): ...@@ -509,16 +506,13 @@ class OldProfile(Profile):
#****************************************************************************
# HotProfile class documentation
#****************************************************************************
#
# This profiler is the fastest derived profile example. It does not
# calculate caller-callee relationships, and does not calculate cumulative
# time under a function. It only calculates time spent in a function, so
# it runs very quickly (re: very low overhead)
#****************************************************************************
class HotProfile(Profile): class HotProfile(Profile):
"""The fastest derived profile example. It does not calculate
caller-callee relationships, and does not calculate cumulative
time under a function. It only calculates time spent in a
function, so it runs very quickly due to its very low overhead.
"""
def trace_dispatch_exception(self, frame, t): def trace_dispatch_exception(self, frame, t):
rt, rtt, rfn, rframe, rcur = self.cur rt, rtt, rfn, rframe, rcur = self.cur
if rcur and not rframe is frame: if rcur and not rframe is frame:
......
# """Class for printing reports on profiled python code."""
# Class for printing reports on profiled python code. rev 1.0 4/1/94 # Class for printing reports on profiled python code. rev 1.0 4/1/94
# #
# Based on prior profile module by Sjoerd Mullender... # Based on prior profile module by Sjoerd Mullender...
...@@ -37,41 +38,38 @@ import string ...@@ -37,41 +38,38 @@ import string
import marshal import marshal
import re import re
#**************************************************************************
# Class Stats documentation
#**************************************************************************
# This class is used for creating reports from data generated by the
# Profile class. It is a "friend" of that class, and imports data either
# by direct access to members of Profile class, or by reading in a dictionary
# that was emitted (via marshal) from the Profile class.
#
# The big change from the previous Profiler (in terms of raw functionality)
# is that an "add()" method has been provided to combine Stats from
# several distinct profile runs. Both the constructor and the add()
# method now take arbitrarilly many file names as arguments.
#
# All the print methods now take an argument that indicats how many lines
# to print. If the arg is a floating point number between 0 and 1.0, then
# it is taken as a decimal percentage of the availabel lines to be printed
# (e.g., .1 means print 10% of all available lines). If it is an integer,
# it is taken to mean the number of lines of data that you wish to have
# printed.
#
# The sort_stats() method now processes some additionaly options (i.e., in
# addition to the old -1, 0, 1, or 2). It takes an arbitrary number of quoted
# strings to select the sort order. For example sort_stats('time', 'name')
# sorts on the major key of "internal function time", and on the minor
# key of 'the name of the function'. Look at the two tables in sort_stats()
# and get_sort_arg_defs(self) for more examples.
#
# All methods now return "self", so you can string together commands like:
# Stats('foo', 'goo').strip_dirs().sort_stats('calls').\
# print_stats(5).print_callers(5)
#
#**************************************************************************
import fpformat import fpformat
class Stats: class Stats:
"""This class is used for creating reports from data generated by the
Profile class. It is a "friend" of that class, and imports data either
by direct access to members of Profile class, or by reading in a dictionary
that was emitted (via marshal) from the Profile class.
The big change from the previous Profiler (in terms of raw functionality)
is that an "add()" method has been provided to combine Stats from
several distinct profile runs. Both the constructor and the add()
method now take arbitrarilly many file names as arguments.
All the print methods now take an argument that indicats how many lines
to print. If the arg is a floating point number between 0 and 1.0, then
it is taken as a decimal percentage of the availabel lines to be printed
(e.g., .1 means print 10% of all available lines). If it is an integer,
it is taken to mean the number of lines of data that you wish to have
printed.
The sort_stats() method now processes some additionaly options (i.e., in
addition to the old -1, 0, 1, or 2). It takes an arbitrary number of quoted
strings to select the sort order. For example sort_stats('time', 'name')
sorts on the major key of "internal function time", and on the minor
key of 'the name of the function'. Look at the two tables in sort_stats()
and get_sort_arg_defs(self) for more examples.
All methods now return "self", so you can string together commands like:
Stats('foo', 'goo').strip_dirs().sort_stats('calls').\
print_stats(5).print_callers(5)
"""
def __init__(self, *args): def __init__(self, *args):
if not len(args): if not len(args):
arg = None arg = None
...@@ -182,8 +180,8 @@ class Stats: ...@@ -182,8 +180,8 @@ class Stats:
"time" : (((2,-1), ), "internal time"),\ "time" : (((2,-1), ), "internal time"),\
} }
# Expand all abbreviations that are unique
def get_sort_arg_defs(self): def get_sort_arg_defs(self):
"""Expand all abbreviations that are unique."""
if not self.sort_arg_dict: if not self.sort_arg_dict:
self.sort_arg_dict = dict = {} self.sort_arg_dict = dict = {}
std_list = dict.keys() std_list = dict.keys()
...@@ -289,9 +287,9 @@ class Stats: ...@@ -289,9 +287,9 @@ class Stats:
all_callees[func2][func] = callers[func2] all_callees[func2][func] = callers[func2]
return return
#****************************************************************** #******************************************************************
# The following functions support actual printing of reports # The following functions support actual printing of reports
#****************************************************************** #******************************************************************
# Optional "amount" is either a line count, or a percentage of lines. # Optional "amount" is either a line count, or a percentage of lines.
...@@ -447,17 +445,14 @@ class Stats: ...@@ -447,17 +445,14 @@ class Stats:
pass # has no return value, so use at end of line :-) pass # has no return value, so use at end of line :-)
#**************************************************************************
# class TupleComp Documentation
#**************************************************************************
# This class provides a generic function for comparing any two tuples.
# Each instance records a list of tuple-indicies (from most significant
# to least significant), and sort direction (ascending or decending) for
# each tuple-index. The compare functions can then be used as the function
# argument to the system sort() function when a list of tuples need to be
# sorted in the instances order.
#**************************************************************************
class TupleComp: class TupleComp:
"""This class provides a generic function for comparing any two tuples.
Each instance records a list of tuple-indicies (from most significant
to least significant), and sort direction (ascending or decending) for
each tuple-index. The compare functions can then be used as the function
argument to the system sort() function when a list of tuples need to be
sorted in the instances order."""
def __init__(self, comp_select_list): def __init__(self, comp_select_list):
self.comp_select_list = comp_select_list self.comp_select_list = comp_select_list
...@@ -495,16 +490,16 @@ def func_split(func_name): ...@@ -495,16 +490,16 @@ def func_split(func_name):
# such as callers and callees. # such as callers and callees.
#************************************************************************** #**************************************************************************
# Add together all the stats for two profile entries def add_func_stats(target, source):
def add_func_stats(target, source): """Add together all the stats for two profile entries."""
cc, nc, tt, ct, callers = source cc, nc, tt, ct, callers = source
t_cc, t_nc, t_tt, t_ct, t_callers = target t_cc, t_nc, t_tt, t_ct, t_callers = target
return (cc+t_cc, nc+t_nc, tt+t_tt, ct+t_ct, \ return (cc+t_cc, nc+t_nc, tt+t_tt, ct+t_ct, \
add_callers(t_callers, callers)) add_callers(t_callers, callers))
# Combine two caller lists in a single list.
def add_callers(target, source): def add_callers(target, source):
"""Combine two caller lists in a single list."""
new_callers = {} new_callers = {}
for func in target.keys(): for func in target.keys():
new_callers[func] = target[func] new_callers[func] = target[func]
...@@ -515,8 +510,8 @@ def add_callers(target, source): ...@@ -515,8 +510,8 @@ def add_callers(target, source):
new_callers[func] = source[func] new_callers[func] = source[func]
return new_callers return new_callers
# Sum the caller statistics to get total number of calls recieved
def count_calls(callers): def count_calls(callers):
"""Sum the caller statistics to get total number of calls received."""
nc = 0 nc = 0
for func in callers.keys(): for func in callers.keys():
nc = nc + callers[func] nc = nc + callers[func]
...@@ -529,4 +524,3 @@ def count_calls(callers): ...@@ -529,4 +524,3 @@ def count_calls(callers):
def f8(x): def f8(x):
return string.rjust(fpformat.fix(x, 3), 8) return string.rjust(fpformat.fix(x, 3), 8)
# pty.py -- Pseudo terminal utilities. """Pseudo terminal utilities."""
# Bugs: No signal handling. Doesn't set slave termios and window size. # Bugs: No signal handling. Doesn't set slave termios and window size.
# Only tested on Linux. # Only tested on Linux.
...@@ -16,8 +16,9 @@ STDERR_FILENO = 2 ...@@ -16,8 +16,9 @@ STDERR_FILENO = 2
CHILD = 0 CHILD = 0
# Open pty master. Returns (master_fd, tty_name). SGI and Linux/BSD version.
def master_open(): def master_open():
"""Open pty master and return (master_fd, tty_name).
SGI and Linux/BSD version."""
try: try:
import sgi import sgi
except ImportError: except ImportError:
...@@ -38,14 +39,15 @@ def master_open(): ...@@ -38,14 +39,15 @@ def master_open():
return (fd, '/dev/tty' + x + y) return (fd, '/dev/tty' + x + y)
raise os.error, 'out of pty devices' raise os.error, 'out of pty devices'
# Open the pty slave. Acquire the controlling terminal.
# Returns file descriptor. Linux version. (Should be universal? --Guido)
def slave_open(tty_name): def slave_open(tty_name):
"""Open the pty slave and acquire the controlling terminal.
Return the file descriptor. Linux version."""
# (Should be universal? --Guido)
return os.open(tty_name, FCNTL.O_RDWR) return os.open(tty_name, FCNTL.O_RDWR)
# Fork and make the child a session leader with a controlling terminal.
# Returns (pid, master_fd)
def fork(): def fork():
"""Fork and make the child a session leader with a controlling terminal.
Return (pid, master_fd)."""
master_fd, tty_name = master_open() master_fd, tty_name = master_open()
pid = os.fork() pid = os.fork()
if pid == CHILD: if pid == CHILD:
...@@ -66,21 +68,21 @@ def fork(): ...@@ -66,21 +68,21 @@ def fork():
# Parent and child process. # Parent and child process.
return pid, master_fd return pid, master_fd
# Write all the data to a descriptor.
def writen(fd, data): def writen(fd, data):
"""Write all the data to a descriptor."""
while data != '': while data != '':
n = os.write(fd, data) n = os.write(fd, data)
data = data[n:] data = data[n:]
# Default read function.
def read(fd): def read(fd):
"""Default read function."""
return os.read(fd, 1024) return os.read(fd, 1024)
# Parent copy loop.
# Copies
# pty master -> standard output (master_read)
# standard input -> pty master (stdin_read)
def copy(master_fd, master_read=read, stdin_read=read): def copy(master_fd, master_read=read, stdin_read=read):
"""Parent copy loop.
Copies
pty master -> standard output (master_read)
standard input -> pty master (stdin_read)"""
while 1: while 1:
rfds, wfds, xfds = select( rfds, wfds, xfds = select(
[master_fd, STDIN_FILENO], [], []) [master_fd, STDIN_FILENO], [], [])
...@@ -91,8 +93,8 @@ def copy(master_fd, master_read=read, stdin_read=read): ...@@ -91,8 +93,8 @@ def copy(master_fd, master_read=read, stdin_read=read):
data = stdin_read(STDIN_FILENO) data = stdin_read(STDIN_FILENO)
writen(master_fd, data) writen(master_fd, data)
# Create a spawned process.
def spawn(argv, master_read=read, stdin_read=read): def spawn(argv, master_read=read, stdin_read=read):
"""Create a spawned process."""
if type(argv) == type(''): if type(argv) == type(''):
argv = (argv,) argv = (argv,)
pid, master_fd = fork() pid, master_fd = fork()
......
...@@ -7,7 +7,7 @@ import imp ...@@ -7,7 +7,7 @@ import imp
MAGIC = imp.get_magic() MAGIC = imp.get_magic()
def wr_long(f, x): def wr_long(f, x):
"Internal; write a 32-bit int to a file in little-endian order." """Internal; write a 32-bit int to a file in little-endian order."""
f.write(chr( x & 0xff)) f.write(chr( x & 0xff))
f.write(chr((x >> 8) & 0xff)) f.write(chr((x >> 8) & 0xff))
f.write(chr((x >> 16) & 0xff)) f.write(chr((x >> 16) & 0xff))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment