Commit fd7121a6 authored by Andreas Jung's avatar Andreas Jung

Yeah...Zope is now a regex|ts_regex|regsub zone. Replaced and removed all old...

Yeah...Zope is now a regex|ts_regex|regsub zone. Replaced and removed all old regex stuff. No one survived.
parent cedef143
......@@ -93,9 +93,8 @@ from ZPublisher.HTTPRequest import HTTPRequest
from cStringIO import StringIO
import os
from regsub import gsub
from base64 import encodestring
import string
import string,re
class FTPRequest(HTTPRequest):
......@@ -141,7 +140,7 @@ class FTPRequest(HTTPRequest):
env['REQUEST_METHOD']='GET' # XXX what should this be?
env['SERVER_SOFTWARE']=channel.server.SERVER_IDENT
if channel.userid != 'anonymous':
env['HTTP_AUTHORIZATION']='Basic %s' % gsub('\012','',
env['HTTP_AUTHORIZATION']='Basic %s' % re.sub('\012','',
encodestring('%s:%s' % (channel.userid, channel.password)))
env['SERVER_NAME']=channel.server.hostname
env['SERVER_PORT']=str(channel.server.port)
......
......@@ -84,9 +84,9 @@
##############################################################################
"""Access control package"""
__version__='$Revision: 1.147 $'[11:-2]
__version__='$Revision: 1.148 $'[11:-2]
import Globals, socket, ts_regex, SpecialUsers
import Globals, socket, SpecialUsers,re
import os
from Globals import DTMLFile, MessageDialog, Persistent, PersistentMapping
from string import join, strip, split, lower, upper
......@@ -1009,14 +1009,14 @@ def rolejoin(roles, other):
roles.sort()
return roles
addr_match=ts_regex.compile('[0-9\.\*]*').match #TS
host_match=ts_regex.compile('[-A-Za-z0-9\.\*]*').match #TS
addr_match=re.compile(r'[\d.]*').match
host_match=re.compile(r'[-\w.]*').match
def domainSpecMatch(spec, request):
host=''
addr=''
# Fast exit for the match-all case
if len(spec) == 1 and spec[0] == '*':
return 1
......@@ -1037,6 +1037,7 @@ def domainSpecMatch(spec, request):
try: addr=socket.gethostbyname(host)
except: pass
_host=split(host, '.')
_addr=split(addr, '.')
_hlen=len(_host)
......@@ -1047,35 +1048,39 @@ def domainSpecMatch(spec, request):
_ob=split(ob, '.')
_sz=len(_ob)
if addr_match(ob)==sz:
fail=0
for i in range(_sz):
a=_addr[i]
o=_ob[i]
if (o != a) and (o != '*'):
fail=1
break
if fail:
continue
return 1
mo = addr_match(ob)
if mo is not None:
if mo.end(0)==sz:
fail=0
for i in range(_sz):
a=_addr[i]
o=_ob[i]
if (o != a) and (o != '*'):
fail=1
break
if fail:
continue
return 1
if host_match(ob)==sz:
if _hlen < _sz:
continue
elif _hlen > _sz:
_item=_host[-_sz:]
else:
_item=_host
fail=0
for i in range(_sz):
h=_item[i]
o=_ob[i]
if (o != h) and (o != '*'):
fail=1
break
if fail:
continue
return 1
mo = host_match(ob)
if mo is not None:
if mo.end(0)==sz:
if _hlen < _sz:
continue
elif _hlen > _sz:
_item=_host[-_sz:]
else:
_item=_host
fail=0
for i in range(_sz):
h=_item[i]
o=_ob[i]
if (o != h) and (o != '*'):
fail=1
break
if fail:
continue
return 1
return 0
......
......@@ -107,7 +107,7 @@
import Globals, OFS.Folder, OFS.SimpleItem, os, string, Acquisition, Products
import regex, zlib, Globals, cPickle, marshal, rotor
import re, zlib, Globals, cPickle, marshal, rotor
import ZClasses, ZClasses.ZClass, AccessControl.Owned
from OFS.Folder import Folder
......@@ -157,12 +157,12 @@ class Product(Folder, PermissionManager):
_isBeingUsedAsAMethod_=1
def new_version(self,
_intending=regex.compile("[.]?[0-9]+$").search, #TS
_intending=re.compile(r"[.]?[0-9]+$").search, #TS
):
# Return a new version number based on the existing version.
v=str(self.version)
if not v: return '1.0'
if _intending(v) < 0: return v
if _intending(v) is None: return v
l=rfind(v,'.')
return v[:l+1]+str(1+atoi(v[l+1:]))
......
......@@ -84,10 +84,10 @@
##############################################################################
"""Encapsulation of date/time values"""
__version__='$Revision: 1.64 $'[11:-2]
__version__='$Revision: 1.65 $'[11:-2]
import sys, os, math, regex, ts_regex, DateTimeZone
import re,sys, os, math, DateTimeZone
from string import strip,split,upper,lower,atoi,atof,find,join
from time import time, gmtime, localtime, asctime
from time import timezone, strftime, mktime
......@@ -108,7 +108,7 @@ EPOCH =(to_year+to_month+dy+(hr/24.0+mn/1440.0+sc/86400.0))*86400
jd1901 =2415385L
numericTimeZoneMatch=regex.compile('[+-][\0-\9][\0-\9][\0-\9][\0-\9]').match #TS
numericTimeZoneMatch=re.compile(r'[+-][0-9][0-9][0-9][0-9]').match #TS
......@@ -306,7 +306,7 @@ class _cache:
def __getitem__(self,k):
try: n=self._zmap[lower(k)]
except KeyError:
if numericTimeZoneMatch(k) <= 0:
if numericTimeZoneMatch(k) == None:
raise 'DateTimeError','Unrecognized timezone: %s' % k
return k
try: return self._d[n]
......@@ -436,7 +436,7 @@ def _tzoffset(tz, t):
try:
return DateTime._tzinfo[tz].info(t)[0]
except:
if numericTimeZoneMatch(tz) > 0:
if numericTimeZoneMatch(tz) is not None:
return atoi(tz[1:3])*3600+atoi(tz[3:5])*60
else:
return 0 # ??
......@@ -717,7 +717,7 @@ class DateTime:
if tz:
try: tz=self._tzinfo._zmap[lower(tz)]
except KeyError:
if numericTimeZoneMatch(tz) <= 0:
if numericTimeZoneMatch(tz) is None:
raise self.DateTimeError, \
'Unknown time zone in date: %s' % arg
else:
......@@ -785,7 +785,7 @@ class DateTime:
if tz:
try: tz=self._tzinfo._zmap[lower(tz)]
except KeyError:
if numericTimeZoneMatch(tz) <= 0:
if numericTimeZoneMatch(tz) is None:
raise self.DateTimeError, \
'Unknown time zone: %s' % tz
else:
......@@ -817,9 +817,9 @@ class DateTime:
DateTimeError='DateTimeError'
SyntaxError ='Invalid Date-Time String'
DateError ='Invalid Date Components'
int_pattern =ts_regex.compile('\([0-9]+\)') #TS
flt_pattern =ts_regex.compile(':\([0-9]+\.[0-9]+\)') #TS
name_pattern =ts_regex.compile('\([a-z][a-z]+\)', ts_regex.casefold) #TS
int_pattern =re.compile(r'([0-9]+)') #AJ
flt_pattern =re.compile(r':([0-9]+\.[0-9]+)') #AJ
name_pattern =re.compile(r'([a-zA-Z]+)', re.I) #AJ
space_chars =' \t\n'
delimiters ='-/.:,+'
_month_len =((0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31),
......@@ -938,19 +938,17 @@ class DateTime:
if i > 0: b=i-1
else: b=i
ts_results = fltpat.match_group(string, (1,), b)
ts_results = fltpat.match(string, b)
if ts_results:
#s=ts_results[1][0]
s=ts_results[1]
s=ts_results.group(1)
i=i+len(s)
ints.append(atof(s))
continue
#TS
ts_results = intpat.match_group(string, (1,), i)
#AJ
ts_results = intpat.match(string, i)
if ts_results:
#s=ts_results[1][0]
s=ts_results[1]
s=ts_results.group(0)
ls=len(s)
i=i+ls
......@@ -963,9 +961,9 @@ class DateTime:
continue
ts_results = wordpat.match_group(string, (1,), i)
ts_results = wordpat.match(string, i)
if ts_results:
o,s=ts_results[1],lower(ts_results[1])
o,s=ts_results.group(0),lower(ts_results.group(0))
i=i+len(s)
if i < l and string[i]=='.': i=i+1
# Check for month name:
......@@ -1386,10 +1384,7 @@ class DateTime:
def strftime(self, format):
# Format the date/time using the *current timezone representation*.
diff = _tzoffset(self._tz, self._t)
format = ts_regex.gsub('\(^\|[^%]\)%Z',
'\\1' + self._tz,
format)
format = ts_regex.gsub('\(^\|[^%]\)%z',
format = re.sub('(^\|[^%])%z',
'\\1%+05d' % (diff / 36),
format)
return strftime(format, safegmtime(self.timeTime() + diff))
......
......@@ -139,3 +139,6 @@ class DateTimeTests (unittest.TestCase):
def test_suite():
return unittest.makeSuite(DateTimeTests)
if __name__=="__main__":
unittest.TextTestRunner().run(test_suite())
......@@ -84,37 +84,37 @@
##############################################################################
"""HTML formated DocumentTemplates
$Id: DT_HTML.py,v 1.24 2000/08/17 14:03:42 brian Exp $"""
$Id: DT_HTML.py,v 1.25 2001/04/27 18:07:09 andreas Exp $"""
from DT_String import String, FileMixin
import DT_String, regex
import DT_String, re
from DT_Util import ParseError, str
from string import strip, find, split, join, rfind, replace
class dtml_re_class:
""" This needs to be replaced before 2.4. It's a hackaround. """
def search(self, text, start=0,
name_match=regex.compile('[\0- ]*[a-zA-Z]+[\0- ]*').match,
end_match=regex.compile('[\0- ]*\(/\|end\)',
regex.casefold).match,
start_search=regex.compile('[<&]').search,
ent_name=regex.compile('[-a-zA-Z0-9_.]+').match,
name_match=re.compile(r'[\0- ]*[a-zA-Z]+[\0- ]*').match,
end_match=re.compile(r'[\0- ]*(/|end)', re.I).match,
start_search=re.compile(r'[<&]').search,
ent_name=re.compile(r'[-a-zA-Z0-9_.]+').match,
find=find,
strip=strip,
replace=replace,
):
while 1:
s=start_search(text, start)
if s < 0: return -1
mo = start_search(text,start)
if mo is None: return None
s = mo.start(0)
if text[s:s+5] == '<!--#':
n=s+5
e=find(text,'-->',n)
if e < 0: return -1
if e < 0: return None
en=3
l=end_match(text,n)
if l > 0:
mo =end_match(text,n)
if mo is not None:
l = mo.end(0) - mo.start(0)
end=strip(text[n:n+l])
n=n+l
else: end=''
......@@ -123,7 +123,7 @@ class dtml_re_class:
e=n=s+6
while 1:
e=find(text,'>',e+1)
if e < 0: return -1
if e < 0: return None
if len(split(text[n:e],'"'))%2:
# check for even number of "s inside
break
......@@ -135,7 +135,7 @@ class dtml_re_class:
e=n=s+7
while 1:
e=find(text,'>',e+1)
if e < 0: return -1
if e < 0: return None
if len(split(text[n:e],'"'))%2:
# check for even number of "s inside
break
......@@ -150,32 +150,38 @@ class dtml_re_class:
if e >= 0:
args=text[n:e]
l=len(args)
if ent_name(args) == l:
d=self.__dict__
if text[s+5]=='-':
d[1]=d['end']=''
d[2]=d['name']='var'
d[0]=text[s:e+1]
d[3]=d['args']=args+' html_quote'
return s
else:
nn=find(args,'-')
if nn >= 0 and nn < l-1:
mo = ent_name(args)
if mo is not None:
if mo.end(0)-mo.start(0) == l:
d=self.__dict__
if text[s+5]=='-':
d[1]=d['end']=''
d[2]=d['name']='var'
d[0]=text[s:e+1]
args=(args[nn+1:]+' '+
replace(args[:nn],'.',' '))
d[3]=d['args']=args
return s
d[3]=d['args']=args+' html_quote'
self._start = s
return self
else:
nn=find(args,'-')
if nn >= 0 and nn < l-1:
d[1]=d['end']=''
d[2]=d['name']='var'
d[0]=text[s:e+1]
args=(args[nn+1:]+' '+
replace(args[:nn],'.',' '))
d[3]=d['args']=args
self._start = s
return self
start=s+1
continue
break
l=name_match(text,n)
if l < 0: return l
mo = name_match(text,n)
if mo is None: return None
l = mo.end(0) - mo.start(0)
a=n+l
name=strip(text[n:a])
......@@ -186,8 +192,8 @@ class dtml_re_class:
d[1]=d['end']=end
d[2]=d['name']=name
d[3]=d['args']=args
return s
self._start = s
return self
def group(self, *args):
get=self.__dict__.get
......@@ -195,7 +201,8 @@ class dtml_re_class:
return get(args[0])
return tuple(map(get, args))
def start(self, *args):
return self._start
class HTML(DT_String.String):
"""HTML Document Templates
......
......@@ -402,13 +402,13 @@
''' #'
__rcs_id__='$Id: DT_In.py,v 1.48 2001/04/13 19:31:42 brian Exp $'
__version__='$Revision: 1.48 $'[11:-2]
__rcs_id__='$Id: DT_In.py,v 1.49 2001/04/27 18:07:09 andreas Exp $'
__version__='$Revision: 1.49 $'[11:-2]
from DT_Util import ParseError, parse_params, name_param, str
from DT_Util import render_blocks, InstanceDict, ValidationError, VSEval, expr_globals
from string import find, atoi, join, split, lower
import ts_regex
import re
from DT_InSV import sequence_variables, opt
TupleType=type(())
......@@ -471,11 +471,12 @@ class InClass:
if type(v)==type(''):
try: atoi(v)
except:
self.start_name_re=ts_regex.compile(
self.start_name_re=re.compile(
'&+'+
join(map(lambda c: "[%s]" % c, v),'')+
'=[0-9]+&+')
name,expr=name_param(args,'in',1)
if expr is not None: expr=expr.eval
self.__name__, self.expr = name, expr
......
......@@ -85,11 +85,12 @@
__doc__='''Sequence variables support
$Id: DT_InSV.py,v 1.18 2001/01/16 21:57:19 chrism Exp $'''
__version__='$Revision: 1.18 $'[11:-2]
$Id: DT_InSV.py,v 1.19 2001/04/27 18:07:10 andreas Exp $'''
__version__='$Revision: 1.19 $'[11:-2]
from string import lower, rfind, split, join
from math import sqrt
import re
TupleType=type(())
try:
import Missing
......@@ -199,6 +200,7 @@ class sequence_variables:
return l
def query(self, *ignored):
if self.start_name_re is None: raise KeyError, 'sequence-query'
query_string=self.query_string
while query_string and query_string[:1] in '?&':
......@@ -207,16 +209,26 @@ class sequence_variables:
query_string=query_string[:-1]
if query_string:
query_string='&%s&' % query_string
re=self.start_name_re
l=re.search_group(query_string, (0,))
if l:
v=l[1]
l=l[0]
query_string=(query_string[:l]+
query_string[l+len(v)-1:])
reg=self.start_name_re
if type(reg)==type(re.compile(r"")):
mo = reg.search(query_string)
if mo is not None:
v = mo.group(0)
l = mo.start(0)
query_string=(query_string[:l]+ query_string[l+len(v)-1:])
else:
l=reg.search_group(query_string, (0,))
if l:
v=l[1]
l=l[0]
query_string=(query_string[:l]+ query_string[l+len(v)-1:])
query_string='?'+query_string[1:]
else: query_string='?'
self.data['sequence-query']=query_string
return query_string
......
......@@ -112,8 +112,9 @@
as desired.
'''
from DT_Util import render_blocks, Eval, expr_globals, ParseError, regex, strip
from DT_Util import render_blocks, Eval, expr_globals, ParseError, strip
from DT_Util import str # Probably needed due to hysterical pickles.
import re
class Let:
......@@ -149,29 +150,33 @@ class Let:
__call__ = render
def parse_let_params(text,
result=None,
tag='let',
parmre=regex.compile(
'\([\0- ]*\([^\0- =\"]+\)=\([^\0- =\"]+\)\)'),
qparmre=regex.compile(
'\([\0- ]*\([^\0- =\"]+\)="\([^"]*\)\"\)'),
parmre=re.compile(
r'([\0- ]*([^\0- =\"]+)=([^\0- =\"]+))'),
qparmre=re.compile(
r'([\0- ]*([^\0- =\"]+)="([^"]*)\")'),
**parms):
result=result or []
if parmre.match(text) >= 0:
name=parmre.group(2)
value=parmre.group(3)
l=len(parmre.group(1))
elif qparmre.match(text) >= 0:
name=qparmre.group(2)
value='"%s"' % qparmre.group(3)
l=len(qparmre.group(1))
mo = parmre.match(text)
mo1= qparmre.match(text)
if mo is not None:
name=mo.group(2)
value=mo.group(3)
l=len(mo.group(1))
elif mo1 is not None:
name=mo1.group(2)
value='"%s"' % mo1.group(3)
l=len(mo1.group(1))
else:
if not text or not strip(text): return result
raise ParseError, ('invalid parameter: "%s"' % text, tag)
result.append((name,value))
text=strip(text[l:])
......
......@@ -82,10 +82,10 @@
# attributions are listed in the accompanying credits file.
#
##############################################################################
__version__='$Revision: 1.1 $'[11:-2]
__version__='$Revision: 1.2 $'[11:-2]
from DT_Util import parse_params, name_param, html_quote, str
import regex, string, sys, regex
import string, sys
from string import find, split, join, atoi, rfind
class ReturnTag:
......
##############################################################################
#
# Zope Public License (ZPL) Version 1.0
# -------------------------------------
#
# Copyright (c) Digital Creations. All rights reserved.
# Copyright (c) Digital Ceeations. All rights reserved.
#
# This license has been certified as Open Source(tm).
#
......@@ -82,10 +83,10 @@
# attributions are listed in the accompanying credits file.
#
##############################################################################
"$Id: DT_String.py,v 1.39 2000/12/12 21:20:25 shane Exp $"
"$Id: DT_String.py,v 1.40 2001/04/27 18:07:10 andreas Exp $"
from string import split, strip
import regex, ts_regex
import thread,re
from DT_Util import ParseError, InstanceDict, TemplateDict, render_blocks, str
from DT_Var import Var, Call, Comment
......@@ -154,15 +155,15 @@ class String:
tagre__roles__=()
def tagre(self):
return regex.symcomp(
'%(' # beginning
'\(<name>[a-zA-Z0-9_/.-]+\)' # tag name
'\('
return re.compile(
r'%(' # beginning
'(?P<name>[a-zA-Z0-9_/.-]+)' # tag name
'('
'[\0- ]+' # space after tag name
'\(<args>\([^)"]+\("[^"]*"\)?\)*\)' # arguments
'\)?'
')\(<fmt>[0-9]*[.]?[0-9]*[a-z]\|[]![]\)' # end
, regex.casefold)
'(?P<args>([^)"]+("[^"]*")?\)*)' # arguments
')?'
')(?P<fmt>[0-9]*[.]?[0-9]*[a-z]\|[]![])' # end
, re.I)
_parseTag__roles__=()
def _parseTag(self, tagre, command=None, sargs='', tt=type(())):
......@@ -227,8 +228,9 @@ class String:
def parse(self,text,start=0,result=None,tagre=None):
if result is None: result=[]
if tagre is None: tagre=self.tagre()
l=tagre.search(text,start)
while l >= 0:
mo =tagre.search(text,start)
while mo :
l = mo.start(0)
try: tag, args, command, coname = self._parseTag(tagre)
except ParseError, m: self.parse_error(m[0],m[1],text,l)
......@@ -248,17 +250,19 @@ class String:
result.append(r)
except ParseError, m: self.parse_error(m[0],tag,text,l)
l=tagre.search(text,start)
mo = tagre.search(text,start)
text=text[start:]
if text: result.append(text)
return result
skip_eol__roles__=()
def skip_eol(self, text, start, eol=regex.compile('[ \t]*\n')):
def skip_eol(self, text, start, eol=re.compile(r'[ \t]*\n')):
# if block open is followed by newline, then skip past newline
l=eol.match(text,start)
if l > 0: start=start+l
mo =eol.match(text,start)
if mo is not None:
start = start + mo.end(0) - mo.start(0)
return start
parse_block__roles__=()
......@@ -274,8 +278,9 @@ class String:
sa=sargs
while 1:
l=tagre.search(text,start)
if l < 0: self.parse_error('No closing tag', stag, text, sloc)
mo = tagre.search(text,start)
if mo is None: self.parse_error('No closing tag', stag, text, sloc)
l = mo.start(0)
try: tag, args, command, coname= self._parseTag(tagre,scommand,sa)
except ParseError, m: self.parse_error(m[0],m[1], text, l)
......@@ -312,8 +317,9 @@ class String:
parse_close__roles__=()
def parse_close(self, text, start, tagre, stag, sloc, scommand, sa):
while 1:
l=tagre.search(text,start)
if l < 0: self.parse_error('No closing tag', stag, text, sloc)
mo = tagre.search(text,start)
if mo is None: self.parse_error('No closing tag', stag, text, sloc)
l = mo.start(0)
try: tag, args, command, coname= self._parseTag(tagre,scommand,sa)
except ParseError, m: self.parse_error(m[0],m[1], text, l)
......@@ -401,7 +407,7 @@ class String:
cook__roles__=()
def cook(self,
cooklock=ts_regex.allocate_lock(),
cooklock=thread.allocate_lock(),
):
cooklock.acquire()
try:
......
......@@ -82,10 +82,11 @@
# attributions are listed in the accompanying credits file.
#
##############################################################################
'''$Id: DT_Util.py,v 1.72 2001/01/22 16:36:16 brian Exp $'''
__version__='$Revision: 1.72 $'[11:-2]
'''$Id: DT_Util.py,v 1.73 2001/04/27 18:07:11 andreas Exp $'''
__version__='$Revision: 1.73 $'[11:-2]
import regex, string, math, os
import string, math, os
import re
from string import strip, join, atoi, lower, split, find
import VSEval
......@@ -449,14 +450,14 @@ ListType=type([])
def parse_params(text,
result=None,
tag='',
unparmre=regex.compile(
'\([\0- ]*\([^\0- =\"]+\)\)'),
qunparmre=regex.compile(
'\([\0- ]*\("[^"]*"\)\)'),
parmre=regex.compile(
'\([\0- ]*\([^\0- =\"]+\)=\([^\0- =\"]+\)\)'),
qparmre=regex.compile(
'\([\0- ]*\([^\0- =\"]+\)="\([^"]*\)\"\)'),
unparmre=re.compile(
r'([\0- ]*([^\0- =\"]+))'),
qunparmre=re.compile(
r'([\0- ]*("[^"]*"))'),
parmre=re.compile(
r'([\0- ]*([^\0- =\"]+)=([^\0- =\"]+))'),
qparmre=re.compile(
r'([\0- ]*([^\0- =\"]+)="([^"]*)\")'),
**parms):
"""Parse tag parameters
......@@ -482,17 +483,25 @@ def parse_params(text,
result=result or {}
if parmre.match(text) >= 0:
name=lower(parmre.group(2))
value=parmre.group(3)
l=len(parmre.group(1))
elif qparmre.match(text) >= 0:
name=lower(qparmre.group(2))
value=qparmre.group(3)
l=len(qparmre.group(1))
elif unparmre.match(text) >= 0:
name=unparmre.group(2)
l=len(unparmre.group(1))
# HACK - we precalculate all matches. Maybe we don't need them
# all. This should be fixed for performance issues
mo_p = parmre.match(text)
mo_q = qparmre.match(text)
mo_unp = unparmre.match(text)
mo_unq = qunparmre.match(text)
if mo_p:
name=lower(mo_p.group(2))
value=mo_p.group(3)
l=len(mo_p.group(1))
elif mo_q:
name=lower(mo_q.group(2))
value=mo_q.group(3)
l=len(mo_q.group(1))
elif mo_unp:
name=mo_unp.group(2)
l=len(mo_unp.group(1))
if result:
if parms.has_key(name):
if parms[name] is None: raise ParseError, (
......@@ -504,9 +513,9 @@ def parse_params(text,
else:
result['']=name
return apply(parse_params,(text[l:],result),parms)
elif qunparmre.match(text) >= 0:
name=qunparmre.group(2)
l=len(qunparmre.group(1))
elif mo_unq:
name=mo_unq.group(2)
l=len(mo_unq.group(1))
if result: raise ParseError, (
'Invalid attribute name, "%s"' % name, tag)
else: result['']=name
......
......@@ -217,11 +217,11 @@ Evaluating expressions without rendering results
''' # '
__rcs_id__='$Id: DT_Var.py,v 1.37 2000/09/05 22:03:12 amos Exp $'
__version__='$Revision: 1.37 $'[11:-2]
__rcs_id__='$Id: DT_Var.py,v 1.38 2001/04/27 18:07:11 andreas Exp $'
__version__='$Revision: 1.38 $'[11:-2]
from DT_Util import parse_params, name_param, html_quote, str
import regex, string, sys, regex
import re, string, sys
from string import find, split, join, atoi, rfind
from urllib import quote, quote_plus
......@@ -373,8 +373,8 @@ def dollars_and_cents(v, name='(Unknown name)', md={}):
except: return ''
def thousands_commas(v, name='(Unknown name)', md={},
thou=regex.compile(
"\([0-9]\)\([0-9][0-9][0-9]\([,.]\|$\)\)").search):
thou=re.compile(
r"([0-9])([0-9][0-9][0-9]([,.]\|$))").search):
v=str(v)
vl=split(v,'.')
if not vl: return v
......@@ -382,8 +382,9 @@ def thousands_commas(v, name='(Unknown name)', md={},
del vl[0]
if vl: s='.'+join(vl,'.')
else: s=''
l=thou(v)
while l >= 0:
mo=thou(v)
while mo is not None:
l = mo.start(0)
v=v[:l+1]+','+v[l+1:]
l=thou(v)
return v+s
......
......@@ -84,12 +84,12 @@
##############################################################################
"""Help system support module"""
__version__='$Revision: 1.7 $'[11:-2]
__version__='$Revision: 1.8 $'[11:-2]
import Globals, Acquisition
import StructuredText.StructuredText
import sys, os, string, regex
import sys, os, string, re
stx_class=StructuredText.StructuredText.HTML
......@@ -282,9 +282,9 @@ class classobject(object):
pre_match=regex.compile('[A-Za-z0-9_]*([^)]*)[ -]*').match #TS
sig_match=regex.compile('[A-Za-z0-9_]*([^)]*)').match #TS
# needs to be tested !!! The conversion of reconvert.convert looks suspicious
sig_match=re.compile(r'[\w]*\([^)]*\)').match # matches "f(arg1, arg2)"
pre_match=re.compile(r'[\w]*\([^)]*\)[ -]*').match # with ' ' or '-' included
class methodobject(object):
......@@ -309,9 +309,9 @@ class methodobject(object):
if hasattr(func, 'func_code'):
if hasattr(func.func_code, 'co_varnames'):
return doc
n=pre_match(doc)
if n > -1:
return doc[n:]
mo=pre_match(doc)
if mo is not None:
return doc[mo.end(0):]
return doc
def get_signaturex(self):
......@@ -348,9 +348,9 @@ class methodobject(object):
doc=func.__doc__
if not doc: doc=''
doc=string.strip(doc)
n=sig_match(doc)
if n > -1:
return doc[:n]
mo=sig_match(doc)
if mo is not None:
return doc[:mo.end(0)]
return '%s()' % name
......
......@@ -84,7 +84,7 @@
##############################################################################
"""DTML Method objects."""
__version__='$Revision: 1.62 $'[11:-2]
__version__='$Revision: 1.63 $'[11:-2]
import History
from Globals import HTML, DTMLFile, MessageDialog
......@@ -402,7 +402,7 @@ class DTMLMethod(HTML, Acquisition.Implicit, RoleManager,
import re
from string import find, strip
token = "[a-zA-Z0-9!#$%&'*+\-.\\\\^_`|~]+"
hdr_start = re.compile('(%s):(.*)' % token).match
hdr_start = re.compile(r'(%s):(.*)' % token).match
def decapitate(html, RESPONSE=None):
headers = []
......
......@@ -84,12 +84,12 @@
##############################################################################
__doc__="""Object Manager
$Id: ObjectManager.py,v 1.135 2001/04/26 00:14:15 andreas Exp $"""
$Id: ObjectManager.py,v 1.136 2001/04/27 18:07:12 andreas Exp $"""
__version__='$Revision: 1.135 $'[11:-2]
__version__='$Revision: 1.136 $'[11:-2]
import App.Management, Acquisition, Globals, CopySupport, Products
import os, App.FactoryDispatcher, ts_regex, Products
import os, App.FactoryDispatcher, re, Products
from OFS.Traversable import Traversable
from Globals import DTMLFile, Persistent
from Globals import MessageDialog, default__class_init__
......@@ -109,7 +109,7 @@ customImporters={
XMLExportImport.magic: XMLExportImport.importXML,
}
bad_id=ts_regex.compile('[^a-zA-Z0-9-_~\,\. ]').search #TS
bad_id=re.compile(r'[^a-zA-Z0-9-_~,. ]').search #TS
# Global constants: __replaceable__ flags:
NOT_REPLACEABLE = 0
......@@ -126,7 +126,7 @@ def checkValidId(self, id, allow_dup=0):
# set to false before the object is added.
if not id or (type(id) != type('')):
raise BadRequestException, 'Empty or invalid id specified.'
if bad_id(id) != -1:
if bad_id(id) is not None:
raise BadRequestException, (
'The id "%s" contains characters illegal in URLs.' % id)
if id[0]=='_': raise BadRequestException, (
......
......@@ -89,10 +89,10 @@ Aqueduct database adapters, etc.
This module can also be used as a simple template for implementing new
item types.
$Id: SimpleItem.py,v 1.88 2001/04/18 18:00:07 chrism Exp $'''
__version__='$Revision: 1.88 $'[11:-2]
$Id: SimpleItem.py,v 1.89 2001/04/27 18:07:13 andreas Exp $'''
__version__='$Revision: 1.89 $'[11:-2]
import ts_regex, sys, Globals, App.Management, Acquisition, App.Undo
import re, sys, Globals, App.Management, Acquisition, App.Undo
import AccessControl.Role, AccessControl.Owned, App.Common
from webdav.Resource import Resource
from ExtensionClass import Base
......@@ -213,7 +213,7 @@ class Item(Base, Resource, CopySource, App.Management.Tabs, Traversable,
self, client=None, REQUEST={},
error_type=None, error_value=None, tb=None,
error_tb=None, error_message='',
tagSearch=ts_regex.compile('[a-zA-Z]>').search):
tagSearch=re.compile(r'[a-zA-Z]>').search):
try:
if error_type is None: error_type =sys.exc_info()[0]
......@@ -245,10 +245,10 @@ class Item(Base, Resource, CopySource, App.Management.Tabs, Traversable,
except:
pass
else:
if tagSearch(s) >= 0:
if tagSearch(s) is not None:
error_message=error_value
elif (type(error_value) is StringType
and tagSearch(error_value) >= 0):
and tagSearch(error_value) is not None):
error_message=error_value
if client is None: client=self
......
......@@ -83,13 +83,13 @@
#
##############################################################################
"""A utility module for content-type handling."""
__version__='$Revision: 1.13 $'[11:-2]
__version__='$Revision: 1.14 $'[11:-2]
from string import split, strip, lower, find
import ts_regex, mimetypes
import re, mimetypes
find_binary=ts_regex.compile('[\0-\7]').search
find_binary=re.compile('[\0-\7]').search
def text_type(s):
# Yuk. See if we can figure out the type by content.
......@@ -151,7 +151,7 @@ def guess_content_type(name='', body='', default=None):
type, enc=mimetypes.guess_type(name)
if type is None:
if body:
if find_binary(body) >= 0:
if find_binary(body) is not None:
type=default or 'application/octet-stream'
else:
type=(default or text_type(body)
......
......@@ -86,9 +86,8 @@
from Persistence import Persistent
import Acquisition
import ExtensionClass
from SearchIndex import UnIndex, UnTextIndex, UnKeywordIndex, Query
from SearchIndex import UnIndex, UnTextIndex, UnKeywordIndex
from SearchIndex.Lexicon import Lexicon
import regex, pdb
from MultiMapping import MultiMapping
from string import lower
import Record
......@@ -106,19 +105,6 @@ from SearchIndex.randid import randid
import time
def orify(seq,
query_map={
type(regex.compile('')): Query.Regex,
type(''): Query.String,
}):
subqueries=[]
for q in seq:
try: q=query_map[type(q)](q)
except KeyError: q=Query.Cmp(q)
subqueries.append(q)
return apply(Query.Or,tuple(subqueries))
class Catalog(Persistent, Acquisition.Implicit, ExtensionClass.Base):
""" An Object Catalog
......@@ -607,13 +593,7 @@ class Catalog(Persistent, Acquisition.Implicit, ExtensionClass.Base):
return used
def searchResults(self, REQUEST=None, used=None,
query_map={
type(regex.compile('')): Query.Regex,
type([]): orify,
type(''): Query.String,
}, **kw):
def searchResults(self, REQUEST=None, used=None, **kw):
# Get search arguments:
if REQUEST is None and not kw:
try: REQUEST=self.REQUEST
......
......@@ -90,17 +90,15 @@ from OFS.Folder import Folder
from OFS.FindSupport import FindSupport
from DateTime import DateTime
from SearchIndex import Query
import string, regex, urlparse, urllib, os, sys, time
import string, urlparse, urllib, os, sys, time
import Products
from Acquisition import Implicit
from Persistence import Persistent
from DocumentTemplate.DT_Util import InstanceDict, TemplateDict
from DocumentTemplate.DT_Util import Eval, expr_globals
from AccessControl.Permission import name_trans
from Catalog import Catalog, orify, CatalogError
from SearchIndex import UnIndex, UnTextIndex
from Catalog import Catalog, CatalogError
from Vocabulary import Vocabulary
from Shared.DC.ZRDB.TM import TM
from AccessControl import getSecurityManager
from zLOG import LOG, ERROR
......@@ -518,21 +516,14 @@ class ZCatalog(Folder, Persistent, Implicit):
'width': 8})
return r
def searchResults(self, REQUEST=None, used=None,
query_map={
type(regex.compile('')): Query.Regex,
type([]): orify,
type(()): orify,
type(''): Query.String,
}, **kw):
def searchResults(self, REQUEST=None, used=None, **kw):
"""
Search the catalog according to the ZTables search interface.
Search terms can be passed in the REQUEST or as keyword
arguments.
"""
return apply(self._catalog.searchResults,
(REQUEST,used, query_map), kw)
return apply(self._catalog.searchResults, (REQUEST,used), kw)
__call__=searchResults
......
......@@ -46,7 +46,7 @@
</td>
<td align="left" valign="top">
<div class="form-text">
<dtml-with name="aq_self" only>
<dtml-with sequence-item only>
<dtml-if name="meta_type">
<dtml-var name="meta_type" size="15">
<dtml-else>
......
......@@ -202,7 +202,7 @@ Notes on a new text index design
space.
"""
__version__='$Revision: 1.28 $'[11:-2]
__version__='$Revision: 1.29 $'[11:-2]
#XXX I strongly suspect that this is broken, but I'm not going to fix it. :(
......@@ -212,7 +212,7 @@ from BTrees.IIBTree import IISet, IIBucket
import operator
from Splitter import Splitter
from string import strip
import string, ts_regex, regex
import string, re
from Lexicon import Lexicon, stop_word_dict
from ResultList import ResultList
......@@ -463,7 +463,7 @@ QueryError='TextIndex.QueryError'
def query(s, index, default_operator = Or,
ws = (string.whitespace,)):
# First replace any occurences of " and not " with " andnot "
s = ts_regex.gsub('[%s]+and[%s]+not[%s]+' % (ws * 3), ' andnot ', s)
s = re.sub('[%s]+and[%s]+not[%s]+' % (ws * 3), ' andnot ', s)
q = parse(s)
q = parse2(q, default_operator)
return evaluate(q, index)
......@@ -515,13 +515,13 @@ def parse2(q, default_operator,
return q
def parens(s, parens_re = regex.compile('(\|)').search):
def parens(s, parens_re = re.compile(r'(\|)').search):
index=open_index=paren_count = 0
while 1:
index = parens_re(s, index)
if index < 0 : break
if index is None : break
if s[index] == '(':
paren_count = paren_count + 1
......@@ -543,7 +543,7 @@ def parens(s, parens_re = regex.compile('(\|)').search):
def quotes(s, ws = (string.whitespace,)):
# split up quoted regions
splitted = ts_regex.split(s, '[%s]*\"[%s]*' % (ws * 2))
splitted = re.split( '[%s]*\"[%s]*' % (ws * 2),s)
split=string.split
if (len(splitted) > 1):
......
......@@ -91,10 +91,10 @@ undo information so that objects can be unindexed when the old value
is no longer known.
"""
__version__ = '$Revision: 1.46 $'[11:-2]
__version__ = '$Revision: 1.47 $'[11:-2]
import string, regex, regsub, ts_regex
import string, re
import operator
from Globals import Persistent
......@@ -558,7 +558,7 @@ class UnTextIndex(Persistent, Implicit):
parsed again, then the whole thing is 'evaluated'. """
# First replace any occurences of " and not " with " andnot "
s = ts_regex.gsub(
s = re.sub(
'[%s]+[aA][nN][dD][%s]*[nN][oO][tT][%s]+' % (ws * 3),
' andnot ', s)
......@@ -700,13 +700,13 @@ def parse2(q, default_operator,
return q
def parens(s, parens_re=regex.compile('(\|)').search):
def parens(s, parens_re=re.compile(r'(\|)').search):
index = open_index = paren_count = 0
while 1:
index = parens_re(s, index)
if index < 0 : break
if index is None : break
if s[index] == '(':
paren_count = paren_count + 1
......@@ -728,7 +728,7 @@ def parens(s, parens_re=regex.compile('(\|)').search):
def quotes(s, ws=(string.whitespace,)):
# split up quoted regions
splitted = ts_regex.split(s, '[%s]*\"[%s]*' % (ws * 2))
splitted = re.split( '[%s]*\"[%s]*' % (ws * 2),s)
split=string.split
if (len(splitted) > 1):
......@@ -752,3 +752,4 @@ def quotes(s, ws=(string.whitespace,)):
splitted = filter(None, split(s))
return splitted
......@@ -109,7 +109,7 @@ class NameAssignments:
('name_subpath', 'self._getTraverseSubpath()'),
)
_isLegalName = re.compile('_$|[a-zA-Z][a-zA-Z0-9_]*$').match
_isLegalName = re.compile(r'_$|[a-zA-Z][a-zA-Z0-9_]*$').match
_asgns = {}
__allow_access_to_unprotected_subobjects__ = 1
......
......@@ -84,12 +84,12 @@
##############################################################################
__doc__='''Shared classes and functions
$Id: Aqueduct.py,v 1.44 2001/01/15 16:07:39 brian Exp $'''
__version__='$Revision: 1.44 $'[11:-2]
$Id: Aqueduct.py,v 1.45 2001/04/27 18:07:16 andreas Exp $'''
__version__='$Revision: 1.45 $'[11:-2]
import Globals, os
from Globals import Persistent
import DocumentTemplate, DateTime, ts_regex, regex, string
import DocumentTemplate, DateTime, re, string
import binascii, Acquisition
DateTime.now=DateTime.DateTime
from cStringIO import StringIO
......@@ -275,7 +275,7 @@ custom_default_report_src=DocumentTemplate.File(
os.path.join(dtml_dir,'customDefaultReport.dtml'))
def custom_default_report(id, result, action='', no_table=0,
goofy=regex.compile('[^a-zA-Z0-9_]').search
goofy=re.compile(r'\W').search
):
columns=result._searchable_result_columns()
__traceback_info__=columns
......@@ -294,7 +294,8 @@ def custom_default_report(id, result, action='', no_table=0,
row=[]
for c in columns:
n=c['name']
if goofy(n) >= 0: n='expr="_[\'%s]"' % (`'"'+n`[2:])
if goofy(n) is not None:
n='expr="_[\'%s]"' % (`'"'+n`[2:])
row.append(' %s<dtml-var %s%s>%s'
% (td,n,c['type']!='s' and ' null=""' or '',_td))
......@@ -342,12 +343,12 @@ class Args:
def parse(text,
result=None,
keys=None,
unparmre=ts_regex.compile(
'\([\0- ]*\([^\0- =\"]+\)\)'),
parmre=ts_regex.compile(
'\([\0- ]*\([^\0- =\"]+\)=\([^\0- =\"]+\)\)'),
qparmre=ts_regex.compile(
'\([\0- ]*\([^\0- =\"]+\)="\([^"]*\)\"\)'),
unparmre=re.compile(
r'([\0- ]*([^\0- =\"]+))'),
parmre=re.compile(
r'([\0- ]*([^\0- =\"]+)=([^\0- =\"]+))'),
qparmre=re.compile(
r'([\0- ]*([^\0- =\"]+)="([^"]*)\")'),
):
if result is None:
......@@ -356,25 +357,22 @@ def parse(text,
__traceback_info__=text
ts_results = parmre.match_group(text, (1,2,3))
if ts_results:
start, grps = ts_results
name=grps[1]
value={'default':grps[2]}
l=len(grps[0])
mo = parmre.match(text)
if mo:
name=mo.group(1)
value={'default':mo.group(2)}
l=len(mo.group(0))
else:
ts_results = qparmre.match_group(text, (1,2,3))
if ts_results:
start, grps = ts_results
name=grps[1]
value={'default':grps[2]}
l=len(grps[0])
mo = qparmre.match(text)
if mo:
name=mo.group(0)
value={'default':mo.group(2)}
l=len(mo.group(0))
else:
ts_results = unparmre.match_group(text, (1,2))
mo = unparmre.match(text)
if ts_results:
start, grps = ts_results
name=grps[1]
l=len(grps[0])
name=mo.group(1)
l=len(mo.group(0))
value={}
else:
if not text or not strip(text): return Args(result,keys)
......@@ -409,22 +407,22 @@ def nicify(name):
return string.upper(name[:1])+name[1:]
def decapitate(html, RESPONSE=None,
header_re=ts_regex.compile(
'\(\('
header_re=re.compile(
r'(('
'[^\0- <>:]+:[^\n]*\n'
'\|'
'[ \t]+[^\0- ][^\n]*\n'
'\)+\)[ \t]*\n\([\0-\377]+\)'
')+)[ \t]*\n([\0-\377]+)'
),
space_re=ts_regex.compile('\([ \t]+\)'),
name_re=ts_regex.compile('\([^\0- <>:]+\):\([^\n]*\)'),
space_re=re.compile(r'([ \t]+)'),
name_re=re.compile(r'([^\0- <>:]+):([^\n]*)'),
):
ts_results = header_re.match_group(html, (1,3))
if not ts_results: return html
mo = header_re.match(html)
if mo is None: return html
headers, html = ts_results[1]
headers, html = mo.group(1,3)
headers=string.split(headers,'\n')
......@@ -433,18 +431,18 @@ def decapitate(html, RESPONSE=None,
if not headers[i]:
del headers[i]
else:
ts_results = space_re.match_group(headers[i], (1,))
if ts_results:
mo = space_re.match(headers[i])
if mo:
headers[i-1]="%s %s" % (headers[i-1],
headers[i][len(ts_reults[1]):])
headers[i][len(mo.group(1)):])
del headers[i]
else:
i=i+1
for i in range(len(headers)):
ts_results = name_re.match_group(headers[i], (1,2))
if ts_reults:
k, v = ts_reults[1]
mo = name_re.match(headers[i])
if mo:
k,v = mo.group(1,2)
v=string.strip(v)
else:
raise ValueError, 'Invalid Header (%d): %s ' % (i,headers[i])
......
......@@ -85,11 +85,10 @@
__doc__='''Class for reading RDB files
$Id: RDB.py,v 1.29 2000/12/21 17:12:00 brian Exp $'''
__version__='$Revision: 1.29 $'[11:-2]
$Id: RDB.py,v 1.30 2001/04/27 18:07:16 andreas Exp $'''
__version__='$Revision: 1.30 $'[11:-2]
import regex, regsub
from string import split, strip, lower, upper, atof, atoi, atol, find, join
from string import split, strip, lower, upper, atof, atoi, atol, find, join,find
import DateTime
from Missing import MV
from array import array
......@@ -136,8 +135,8 @@ class DatabaseResults:
self._parent=parent
if zbrains is None: zbrains=NoBrains
comment_pattern=regex.compile('#')
while line and comment_pattern.match(line) >= 0: line=readline()
while line and line.find('#') != -1 : line=readline()
line=line[:-1]
if line and line[-1:] in '\r\n': line=line[:-1]
......@@ -174,14 +173,14 @@ class DatabaseResults:
i=0
self._parsers=parsers=[]
defre=regex.compile('\([0-9]*\)\([a-zA-Z]\)?')
defre=re.compile(r'([0-9]*)([a-zA-Z])?')
self._data_dictionary=dd={}
self.__items__=items=[]
for _def in defs:
_def=strip(_def)
if not _def:
raise ValueError, ('Empty column definition for %s' % names[i])
if defre.match(_def) < 0:
if defre.match(_def) is None:
raise ValueError, (
'Invalid column definition for, %s, for %s'
% _def, names[i])
......
......@@ -93,7 +93,6 @@ from string import replace
import struct
import base64
import string
import regex
import pickle
import tempfile
import marshal
......
......@@ -85,6 +85,7 @@
import re, ST, STDOM
from string import split, join, replace, expandtabs, strip, find
from STletters import letters
StringType=type('')
ListType=type([])
......@@ -500,7 +501,7 @@ class DocumentClass:
def doc_numbered(
self, paragraph,
expr = re.compile('(\s*[a-zA-Z]+\.)|(\s*[0-9]+\.)|(\s*[0-9]+\s+)').match):
expr = re.compile('(\s*[%s]+\.)|(\s*[0-9]+\.)|(\s*[0-9]+\s+)' % letters).match):
# This is the old expression. It had a nasty habit
# of grabbing paragraphs that began with a single
......@@ -549,7 +550,7 @@ class DocumentClass:
delim=d)
def doc_header(self, paragraph,
expr = re.compile('[ a-zA-Z0-9.:/,-_*<>\?\'\"]+').match
expr = re.compile('[ %s0-9.:/,-_*<>\?\'\"]+' % letters).match
):
subs=paragraph.getSubparagraphs()
if not subs: return None
......@@ -583,7 +584,7 @@ class DocumentClass:
def doc_emphasize(
self, s,
expr = re.compile('\s*\*([ \na-zA-Z0-9.:/;,\'\"\?\=\-\>\<\(\)]+)\*(?!\*|-)').search
expr = re.compile('\s*\*([ \n%s0-9.:/;,\'\"\?\=\-\>\<\(\)]+)\*(?!\*|-)' % letters).search
):
r=expr(s)
......@@ -596,7 +597,7 @@ class DocumentClass:
def doc_inner_link(self,
s,
expr1 = re.compile("\.\.\s*").search,
expr2 = re.compile("\[[a-zA-Z0-9]+\]").search):
expr2 = re.compile("\[[%s0-9]+\]" % letters).search):
# make sure we dont grab a named link
if expr2(s) and expr1(s):
......@@ -616,7 +617,7 @@ class DocumentClass:
def doc_named_link(self,
s,
expr=re.compile("(\.\.\s)(\[[a-zA-Z0-9]+\])").search):
expr=re.compile("(\.\.\s)(\[[%s0-9]+\])" % letters).search):
result = expr(s)
if result:
......@@ -631,7 +632,7 @@ class DocumentClass:
def doc_underline(self,
s,
expr=re.compile("\_([a-zA-Z0-9\s\.,\?\/]+)\_").search):
expr=re.compile("\_([%s0-9\s\.,\?\/]+)\_" % letters).search):
result = expr(s)
if result:
......@@ -643,7 +644,7 @@ class DocumentClass:
def doc_strong(self,
s,
expr = re.compile('\s*\*\*([ \na-zA-Z0-9.:/;\-,!\?\'\"]+)\*\*').search
expr = re.compile('\s*\*\*([ \n%s0-9.:/;\-,!\?\'\"]+)\*\*' % letters).search
):
r=expr(s)
......@@ -656,8 +657,8 @@ class DocumentClass:
def doc_href(
self, s,
expr1 = re.compile("(\"[ a-zA-Z0-9\n\-\.\,\;\(\)\/\:\/\*\']+\")(:)([a-zA-Z0-9\:\/\.\~\-]+)([,]*\s*)").search,
expr2 = re.compile('(\"[ a-zA-Z0-9\n\-\.\:\;\(\)\/\*\']+\")([,]+\s+)([a-zA-Z0-9\@\.\,\?\!\/\:\;\-\#]+)(\s*)').search):
expr1 = re.compile("(\"[ %s0-9\n\-\.\,\;\(\)\/\:\/\*\']+\")(:)([a-zA-Z0-9\:\/\.\~\-]+)([,]*\s*)" % letters).search,
expr2 = re.compile('(\"[ %s0-9\n\-\.\:\;\(\)\/\*\']+\")([,]+\s+)([a-zA-Z0-9\@\.\,\?\!\/\:\;\-\#]+)(\s*)' % letters).search):
#expr1=re.compile('\"([ a-zA-Z0-9.:/;,\n\~\(\)\-]+)\"'
# ':'
......
#! /usr/bin/env python -- # -*- python -*-
##############################################################################
#
# Zope Public License (ZPL) Version 1.0
# -------------------------------------
#
# Copyright (c) Digital Creations. All rights reserved.
#
# This license has been certified as Open Source(tm).
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions in source code must retain the above copyright
# notice, this list of conditions, and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions, and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# 3. Digital Creations requests that attribution be given to Zope
# in any manner possible. Zope includes a "Powered by Zope"
# button that is installed by default. While it is not a license
# violation to remove this button, it is requested that the
# attribution remain. A significant investment has been put
# into Zope, and this effort will continue if the Zope community
# continues to grow. This is one way to assure that growth.
#
# 4. All advertising materials and documentation mentioning
# features derived from or use of this software must display
# the following acknowledgement:
#
# "This product includes software developed by Digital Creations
# for use in the Z Object Publishing Environment
# (http://www.zope.org/)."
#
# In the event that the product being advertised includes an
# intact Zope distribution (with copyright and license included)
# then this clause is waived.
#
# 5. Names associated with Zope or Digital Creations must not be used to
# endorse or promote products derived from this software without
# prior written permission from Digital Creations.
#
# 6. Modified redistributions of any form whatsoever must retain
# the following acknowledgment:
#
# "This product includes software developed by Digital Creations
# for use in the Z Object Publishing Environment
# (http://www.zope.org/)."
#
# Intact (re-)distributions of any official Zope release do not
# require an external acknowledgement.
#
# 7. Modifications are encouraged but must be packaged separately as
# patches to official Zope releases. Distributions that do not
# clearly separate the patches from the original work must be clearly
# labeled as unofficial distributions. Modifications which do not
# carry the name Zope may be packaged in any form, as long as they
# conform to all of the clauses above.
#
#
# Disclaimer
#
# THIS SOFTWARE IS PROVIDED BY DIGITAL CREATIONS ``AS IS'' AND ANY
# EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL DIGITAL CREATIONS OR ITS
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
#
# This software consists of contributions made by Digital Creations and
# many individuals on behalf of Digital Creations. Specific
# attributions are listed in the accompanying credits file.
#
##############################################################################
'''Structured Text Manipulation
Parse a structured text string into a form that can be used with
structured formats, like html.
Structured text is text that uses indentation and simple
symbology to indicate the structure of a document.
A structured string consists of a sequence of paragraphs separated by
one or more blank lines. Each paragraph has a level which is defined
as the minimum indentation of the paragraph. A paragraph is a
sub-paragraph of another paragraph if the other paragraph is the last
preceding paragraph that has a lower level.
Special symbology is used to indicate special constructs:
- A single-line paragraph whose immediately succeeding paragraphs are lower
level is treated as a header.
- A paragraph that begins with a '-', '*', or 'o' is treated as an
unordered list (bullet) element.
- A paragraph that begins with a sequence of digits followed by a
white-space character is treated as an ordered list element.
- A paragraph that begins with a sequence of sequences, where each
sequence is a sequence of digits or a sequence of letters followed
by a period, is treated as an ordered list element.
- A paragraph with a first line that contains some text, followed by
some white-space and '--' is treated as
a descriptive list element. The leading text is treated as the
element title.
- Sub-paragraphs of a paragraph that ends in the word 'example' or the
word 'examples', or '::' is treated as example code and is output as is.
- Text enclosed single quotes (with white-space to the left of the
first quote and whitespace or punctuation to the right of the second quote)
is treated as example code.
- Text surrounded by '*' characters (with white-space to the left of the
first '*' and whitespace or punctuation to the right of the second '*')
is emphasized.
- Text surrounded by '**' characters (with white-space to the left of the
first '**' and whitespace or punctuation to the right of the second '**')
is made strong.
- Text surrounded by '_' underscore characters (with whitespace to the left
and whitespace or punctuation to the right) is made underlined.
- Text encloded by double quotes followed by a colon, a URL, and concluded
by punctuation plus white space, *or* just white space, is treated as a
hyper link. For example:
"Zope":http://www.zope.org/ is ...
Is interpreted as '<a href="http://www.zope.org/">Zope</a> is ....'
Note: This works for relative as well as absolute URLs.
- Text enclosed by double quotes followed by a comma, one or more spaces,
an absolute URL and concluded by punctuation plus white space, or just
white space, is treated as a hyper link. For example:
"mail me", mailto:amos@digicool.com.
Is interpreted as '<a href="mailto:amos@digicool.com">mail me</a>.'
- Text enclosed in brackets which consists only of letters, digits,
underscores and dashes is treated as hyper links within the document.
For example:
As demonstrated by Smith [12] this technique is quite effective.
Is interpreted as '... by Smith <a href="#12">[12]</a> this ...'. Together
with the next rule this allows easy coding of references or end notes.
- Text enclosed in brackets which is preceded by the start of a line, two
periods and a space is treated as a named link. For example:
.. [12] "Effective Techniques" Smith, Joe ...
Is interpreted as '<a name="12">[12]</a> "Effective Techniques" ...'.
Together with the previous rule this allows easy coding of references or
end notes.
- A paragraph that has blocks of text enclosed in '||' is treated as a
table. The text blocks correspond to table cells and table rows are
denoted by newlines. By default the cells are center aligned. A cell
can span more than one column by preceding a block of text with an
equivalent number of cell separators '||'. Newlines and '|' cannot
be a part of the cell text. For example:
|||| **Ingredients** ||
|| *Name* || *Amount* ||
||Spam||10||
||Eggs||3||
is interpreted as::
<TABLE BORDER=1 CELLPADDING=2>
<TR>
<TD ALIGN=CENTER COLSPAN=2> <strong>Ingredients</strong> </TD>
</TR>
<TR>
<TD ALIGN=CENTER COLSPAN=1> <em>Name</em> </TD>
<TD ALIGN=CENTER COLSPAN=1> <em>Amount</em> </TD>
</TR>
<TR>
<TD ALIGN=CENTER COLSPAN=1>Spam</TD>
<TD ALIGN=CENTER COLSPAN=1>10</TD>
</TR>
<TR>
<TD ALIGN=CENTER COLSPAN=1>Eggs</TD>
<TD ALIGN=CENTER COLSPAN=1>3</TD>
</TR>
</TABLE>
'''
import ts_regex
import regex
from ts_regex import gsub
from string import split, join, strip, find
import string,re
def untabify(aString,
indent_tab=ts_regex.compile('\(\n\|^\)\( *\)\t').search_group,
):
'''\
Convert indentation tabs to spaces.
'''
result=''
rest=aString
while 1:
ts_results = indent_tab(rest, (1,2))
if ts_results:
start, grps = ts_results
lnl=len(grps[0])
indent=len(grps[1])
result=result+rest[:start]
rest="\n%s%s" % (' ' * ((indent/8+1)*8),
rest[start+indent+1+lnl:])
else:
return result+rest
def indent(aString, indent=2):
"""Indent a string the given number of spaces"""
r=split(untabify(aString),'\n')
if not r: return ''
if not r[-1]: del r[-1]
tab=' '*level
return "%s%s\n" % (tab,join(r,'\n'+tab))
def reindent(aString, indent=2, already_untabified=0):
"reindent a block of text, so that the minimum indent is as given"
if not already_untabified: aString=untabify(aString)
l=indent_level(aString)[0]
if indent==l: return aString
r=[]
append=r.append
if indent > l:
tab=' ' * (indent-l)
for s in split(aString,'\n'): append(tab+s)
else:
l=l-indent
for s in split(aString,'\n'): append(s[l:])
return join(r,'\n')
def indent_level(aString,
indent_space=ts_regex.compile('\n\( *\)').search_group,
):
'''\
Find the minimum indentation for a string, not counting blank lines.
'''
start=0
text='\n'+aString
indent=l=len(text)
while 1:
ts_results = indent_space(text, (1,2), start)
if ts_results:
start, grps = ts_results
i=len(grps[0])
start=start+i+1
if start < l and text[start] != '\n': # Skip blank lines
if not i: return (0,aString)
if i < indent: indent = i
else:
return (indent,aString)
def paragraphs(list,start):
l=len(list)
level=list[start][0]
i=start+1
while i < l and list[i][0] > level: i=i+1
return i-1-start
def structure(list):
if not list: return []
i=0
l=len(list)
r=[]
while i < l:
sublen=paragraphs(list,i)
i=i+1
r.append((list[i-1][1],structure(list[i:i+sublen])))
i=i+sublen
return r
class Table:
CELL=' <TD ALIGN=CENTER COLSPAN=%i>%s</TD>\n'
ROW=' <TR>\n%s </TR>\n'
TABLE='\n<TABLE BORDER=1 CELLPADDING=2>\n%s</TABLE>'
def create(self,aPar,
td_reg=re.compile(r'[ \t\n]*\|\|([^\0x00|]*)')
):
'''parses a table and returns nested list representing the
table'''
self.table=[]
text=filter(None,split(aPar,'\n'))
for line in text:
row=[]
while 1:
mo = td_reg.match(line)
if not mo: return 0
pos = mo.end(1)
row.append(mo.group(1))
if pos==len(line):break
line=line[pos:]
self.table.append(row)
return 1
def html(self):
'''Creates an HTML representation of table'''
htmltable=[]
for row in self.table:
htmlrow=[]
colspan=1
for cell in row:
if cell=='':
colspan=colspan+1
continue
else:
htmlrow.append(self.CELL%(colspan,cell))
colspan=1
htmltable.append(self.ROW%join(htmlrow,''))
return self.TABLE%join(htmltable,'')
table=Table()
class StructuredText:
"""Model text as structured collection of paragraphs.
Structure is implied by the indentation level.
This class is intended as a base classes that do actual text
output formatting.
"""
def __init__(self, aStructuredString, level=0,
paragraph_divider=regex.compile('\(\r?\n *\)+\r?\n'),
):
'''Convert a structured text string into a structured text object.
Aguments:
aStructuredString -- The string to be parsed.
level -- The level of top level headings to be created.
'''
pat = ' \"([%s0-9-_,./?=@~&]*)\":' % string.letters+ \
'([-:%s0-9_,./?=@#~&]*?)' % string.letters + \
'([.:?;] )'
p_reg = re.compile(pat,re.M)
aStructuredString = p_reg.sub(r'<a href="\2">\1</a>\3 ' , aStructuredString)
pat = ' \"([%s0-9-_,./?=@~&]*)\", ' % string.letters+ \
'([-:%s0-9_,./?=@#~&]*?)' % string.letters + \
'([.:?;] )'
p_reg = re.compile(pat,re.M)
aStructuredString = p_reg.sub(r'<a href="\2">\1</a>\3 ' , aStructuredString)
protoless = find(aStructuredString, '<a href=":')
if protoless != -1:
aStructuredString = re.sub('<a href=":', '<a href="',
aStructuredString)
self.level=level
paragraphs=ts_regex.split(untabify(aStructuredString),
paragraph_divider)
paragraphs=map(indent_level,paragraphs)
self.structure=structure(paragraphs)
def __str__(self):
return str(self.structure)
ctag_prefix=r'([\x00- \\(]|^)'
ctag_suffix=r'([\x00- ,.:;!?\\)]|$)'
ctag_middle=r'[%s]([^\x00- %s][^%s]*[^\x00- %s]|[^%s])[%s]'
ctag_middl2=r'[%s][%s]([^\x00- %s][^%s]*[^\x00- %s]|[^%s])[%s][%s]'
def ctag(s,
em=re.compile(
ctag_prefix+(ctag_middle % (("*",)*6) )+ctag_suffix),
strong=re.compile(
ctag_prefix+(ctag_middl2 % (("*",)*8))+ctag_suffix),
under=re.compile(
ctag_prefix+(ctag_middle % (("_",)*6) )+ctag_suffix),
code=re.compile(
ctag_prefix+(ctag_middle % (("\'",)*6))+ctag_suffix),
):
if s is None: s=''
s=strong.sub(r'\1<strong>\2</strong>\3',s)
s=under.sub( r'\1<u>\2</u>\3',s)
s=code.sub( r'\1<code>\2</code>\3',s)
s=em.sub( r'\1<em>\2</em>\3',s)
return s
class HTML(StructuredText):
'''\
An HTML structured text formatter.
'''\
def __str__(self,
extra_dl=re.compile("</dl>\n<dl>"),
extra_ul=re.compile("</ul>\n<ul>"),
extra_ol=re.compile("</ol>\n<ol>"),
):
'''\
Return an HTML string representation of the structured text data.
'''
s=self._str(self.structure,self.level)
s=extra_dl.sub('\n',s)
s=extra_ul.sub('\n',s)
s=extra_ol.sub('\n',s)
return s
def ul(self, before, p, after):
if p: p="<p>%s</p>" % strip(ctag(p))
return ('%s<ul><li>%s\n%s\n</li></ul>\n'
% (before,p,after))
def ol(self, before, p, after):
if p: p="<p>%s</p>" % strip(ctag(p))
return ('%s<ol><li>%s\n%s\n</li></ol>\n'
% (before,p,after))
def dl(self, before, t, d, after):
return ('%s<dl><dt>%s</dt><dd><p>%s</p>\n%s\n</dd></dl>\n'
% (before,ctag(t),ctag(d),after))
def head(self, before, t, level, d):
if level > 0 and level < 6:
return ('%s<h%d>%s</h%d>\n%s\n'
% (before,level,strip(ctag(t)),level,d))
t="<p><strong>%s</strong></p>" % strip(ctag(t))
return ('%s<dl><dt>%s\n</dt><dd>%s\n</dd></dl>\n'
% (before,t,d))
def normal(self,before,p,after):
return '%s<p>%s</p>\n%s\n' % (before,ctag(p),after)
def pre(self,structure,tagged=0):
if not structure: return ''
if tagged:
r=''
else:
r='<PRE>\n'
for s in structure:
r="%s%s\n\n%s" % (r,html_quote(s[0]),self.pre(s[1],1))
if not tagged: r=r+'</PRE>\n'
return r
def table(self,before,table,after):
return '%s<p>%s</p>\n%s\n' % (before,ctag(table),after)
def _str(self,structure,level,
# Static
bullet=ts_regex.compile('[ \t\n]*[o*-][ \t\n]+\([^\0]*\)'
).match_group,
example=ts_regex.compile('[\0- ]examples?:[\0- ]*$'
).search,
dl=ts_regex.compile('\([^\n]+\)[ \t]+--[ \t\n]+\([^\0]*\)'
).match_group,
nl=ts_regex.compile('\n').search,
ol=ts_regex.compile(
'[ \t]*\(\([0-9]+\|[%s]+\)[.)]\)+[ \t\n]+\([^\0]*\|$\)' % string.letters
).match_group,
olp=ts_regex.compile('[ \t]*([0-9]+)[ \t\n]+\([^\0]*\|$\)'
).match_group,
):
r=''
for s in structure:
ts_results = bullet(s[0], (1,))
if ts_results:
p = ts_results[1]
if s[0][-2:]=='::' and s[1]: ps=self.pre(s[1])
else: ps=self._str(s[1],level)
r=self.ul(r,p,ps)
continue
ts_results = ol(s[0], (3,))
if ts_results:
p = ts_results[1]
if s[0][-2:]=='::' and s[1]: ps=self.pre(s[1])
else: ps=self._str(s[1],level)
r=self.ol(r,p,ps)
continue
ts_results = olp(s[0], (1,))
if ts_results:
p = ts_results[1]
if s[0][-2:]=='::' and s[1]: ps=self.pre(s[1])
else: ps=self._str(s[1],level)
r=self.ol(r,p,ps)
continue
ts_results = dl(s[0], (1,2))
if ts_results:
t,d = ts_results[1]
r=self.dl(r,t,d,self._str(s[1],level))
continue
if example(s[0]) >= 0 and s[1]:
# Introduce an example, using pre tags:
r=self.normal(r,s[0],self.pre(s[1]))
continue
if s[0][-2:]=='::' and s[1]:
# Introduce an example, using pre tags:
r=self.normal(r,s[0][:-1],self.pre(s[1]))
continue
if table.create(s[0]):
## table support.
r=self.table(r,table.html(),self._str(s[1],level))
continue
else:
if nl(s[0]) < 0 and s[1] and s[0][-1:] != ':':
# Treat as a heading
t=s[0]
r=self.head(r,t,level,
self._str(s[1],level and level+1))
else:
r=self.normal(r,s[0],self._str(s[1],level))
return r
def html_quote(v,
character_entities=(
(re.compile('&'), '&amp;'),
(re.compile("<"), '&lt;' ),
(re.compile(">"), '&gt;' ),
(re.compile('"'), '&quot;')
)): #"
text=str(v)
for re,name in character_entities:
text=re.sub(name,text)
return text
def html_with_references(text, level=1):
text = re.sub(
r'[\0\n]\.\. \[([0-9_%s-]+)\]' % string.letters,
r'\n <a name="\1">[\1]</a>',
text)
text = re.sub(
r'([\x00- ,])\[(?P<ref>[0-9_%s-]+)\]([\x00- ,.:])' % string.letters,
r'\1<a href="#\2">[\2]</a>\3',
text)
text = re.sub(
r'([\0- ,])\[([^]]+)\.html\]([\0- ,.:])',
r'\1<a href="\2.html">[\2]</a>\3',
text)
return HTML(text,level=level)
def main():
import sys, getopt
opts,args=getopt.getopt(sys.argv[1:],'twl')
if args:
[infile]=args
s=open(infile,'r').read()
else:
s=sys.stdin.read()
if opts:
if filter(lambda o: o[0]=='-w', opts):
print 'Content-Type: text/html\n'
if filter(lambda o: o[0]=='-l', opts):
import locale
locale.setlocale(locale.LC_ALL,"")
if s[:2]=='#!':
s=re.sub('^#![^\n]+','',s)
mo = re.compile('([\0-\n]*\n)').match(s)
if mo is not None:
s = s[len(mo.group(0)) :]
s=str(html_with_references(s))
if s[:4]=='<h1>':
t=s[4:find(s,'</h1>')]
s='''<html><head><title>%s</title>
</head><body>
%s
</body></html>
''' % (t,s)
print s
else:
print html_with_references(s)
if __name__=="__main__": main()
......@@ -85,6 +85,8 @@
import re, ST, STDOM
from string import split, join, replace, expandtabs, strip, find, rstrip
from STletters import letters
StringType=type('')
ListType=type([])
......@@ -784,7 +786,7 @@ class DocumentClass:
def doc_numbered(
self, paragraph,
expr = re.compile(r'(\s*[a-zA-Z]+\.)|(\s*[0-9]+\.)|(\s*[0-9]+\s+)').match):
expr = re.compile(r'(\s*[%s]+\.)|(\s*[0-9]+\.)|(\s*[0-9]+\s+)' % letters).match):
# This is the old expression. It had a nasty habit
# of grabbing paragraphs that began with a single
......@@ -833,7 +835,7 @@ class DocumentClass:
delim=d)
def doc_header(self, paragraph,
expr = re.compile(r'[ a-zA-Z0-9.:/,-_*<>\?\'\"]+').match
expr = re.compile(r'[ %s0-9.:/,-_*<>\?\'\"]+' % letters).match
):
subs=paragraph.getSubparagraphs()
if not subs: return None
......@@ -865,7 +867,7 @@ class DocumentClass:
def doc_emphasize(
self, s,
expr = re.compile(r'\s*\*([ \na-zA-Z0-9.:/;,\'\"\?\-\_\/\=\-\>\<\(\)]+)\*(?!\*|-)').search
expr = re.compile(r'\s*\*([ \n%s0-9.:/;,\'\"\?\-\_\/\=\-\>\<\(\)]+)\*(?!\*|-)' % letters).search
):
r=expr(s)
......@@ -878,7 +880,7 @@ class DocumentClass:
def doc_inner_link(self,
s,
expr1 = re.compile(r"\.\.\s*").search,
expr2 = re.compile(r"\[[a-zA-Z0-9]+\]").search):
expr2 = re.compile(r"\[[%s0-9]+\]" % letters ).search):
# make sure we dont grab a named link
if expr2(s) and expr1(s):
......@@ -898,7 +900,7 @@ class DocumentClass:
def doc_named_link(self,
s,
expr=re.compile(r"(\.\.\s)(\[[a-zA-Z0-9]+\])").search):
expr=re.compile(r"(\.\.\s)(\[[%s0-9]+\])" % letters).search):
result = expr(s)
if result:
......@@ -912,7 +914,7 @@ class DocumentClass:
def doc_underline(self,
s,
expr=re.compile(r"\_([a-zA-Z0-9\s\.,\?]+)\_").search):
expr=re.compile(r"\_([%s0-9\s\.,\?]+)\_" % letters).search):
result = expr(s)
if result:
......@@ -924,7 +926,7 @@ class DocumentClass:
def doc_strong(self,
s,
expr = re.compile(r'\s*\*\*([ \na-zA-Z0-9.:/;\-,!\?\'\"]+)\*\*').search
expr = re.compile(r'\s*\*\*([ \n%sZ0-9.:/;\-,!\?\'\"]+)\*\*' % letters).search
):
r=expr(s)
......@@ -935,8 +937,8 @@ class DocumentClass:
return None
## Some constants to make the doc_href() regex easier to read.
_DQUOTEDTEXT = r'("[ a-zA-Z0-9\n\-\.\,\;\(\)\/\:\/\*\']+")' ## double quoted text
_URL_AND_PUNC = r'([a-zA-Z0-9\@\.\,\?\!\/\:\;\-\#\~]+)'
_DQUOTEDTEXT = r'("[%s0-9\n\-\.\,\;\(\)\/\:\/\*\']+")' % letters ## double quoted text
_URL_AND_PUNC = r'([%s0-9\@\.\,\?\!\/\:\;\-\#\~]+)' % letters
_SPACES = r'(\s*)'
def doc_href(self, s,
......@@ -970,7 +972,7 @@ class DocumentClass:
else:
return None
def doc_sgml(self,s,expr=re.compile(r"\<[a-zA-Z0-9\.\=\'\"\:\/\-\#\+\s\*]+\>").search):
def doc_sgml(self,s,expr=re.compile(r"\<[%s0-9\.\=\'\"\:\/\-\#\+\s\*]+\>" % letters).search):
"""
SGML text is ignored and outputed as-is
"""
......@@ -982,7 +984,7 @@ class DocumentClass:
def doc_xref(self, s,
expr = re.compile('\[([a-zA-Z0-9\-.:/;,\n\~]+)\]').search
expr = re.compile('\[([%s0-9\-.:/;,\n\~]+)\]' % letters).search
):
r = expr(s)
if r:
......
......@@ -101,6 +101,9 @@ def HTML(aStructuredString, level=0):
doc = Document(st)
return HTMLNG(doc)
def StructuredText(aStructuredString, level=0):
return HTML(aStructuredString,level)
def html_with_references(text, level=1):
text = re.sub(
r'[\0\n]\.\. \[([0-9_%s-]+)\]' % letters,
......
......@@ -85,7 +85,7 @@
"""Zope Classes
"""
import Globals, string, OFS.SimpleItem, OFS.PropertySheets, Products
import Method, Basic, Property, AccessControl.Role, ts_regex
import Method, Basic, Property, AccessControl.Role, re
from ZPublisher.mapply import mapply
from ExtensionClass import Base
......@@ -190,14 +190,14 @@ def dbVersionEquals(ver):
Globals.DatabaseVersion == ver
bad_id=ts_regex.compile('[^a-zA-Z0-9_]').search
bad_id=re.compile('[^a-zA-Z0-9_]').search
def manage_addZClass(self, id, title='', baseclasses=[],
meta_type='', CreateAFactory=0, REQUEST=None,
zope_object=0):
"""Add a Z Class
"""
if bad_id(id) != -1:
if bad_id(id) is not None:
raise 'Bad Request', (
'The id %s is invalid as a class name.' % id)
if not meta_type: meta_type=id
......
......@@ -93,9 +93,8 @@ from ZPublisher.HTTPRequest import HTTPRequest
from cStringIO import StringIO
import os
from regsub import gsub
from base64 import encodestring
import string
import string,re
class FTPRequest(HTTPRequest):
......@@ -141,7 +140,7 @@ class FTPRequest(HTTPRequest):
env['REQUEST_METHOD']='GET' # XXX what should this be?
env['SERVER_SOFTWARE']=channel.server.SERVER_IDENT
if channel.userid != 'anonymous':
env['HTTP_AUTHORIZATION']='Basic %s' % gsub('\012','',
env['HTTP_AUTHORIZATION']='Basic %s' % re.sub('\012','',
encodestring('%s:%s' % (channel.userid, channel.password)))
env['SERVER_NAME']=channel.server.hostname
env['SERVER_PORT']=str(channel.server.port)
......
......@@ -82,68 +82,75 @@
# attributions are listed in the accompanying credits file.
#
##############################################################################
"""Provide a thread-safe interface to regex
"""
import regex, regsub #, Sync
from regex import *
from regsub import split, sub, gsub, splitx, capwords
try:
import thread
except:
class allocate_lock:
def acquire(*args): pass
def release(*args): pass
else:
class SafeFunction:
_l=thread.allocate_lock()
_a=_l.acquire
_r=_l.release
def __init__(self, f):
self._f=f
def __call__(self, *args, **kw):
self._a()
try: return apply(self._f, args, kw)
finally: self._r()
split=SafeFunction(split)
sub=SafeFunction(sub)
gsub=SafeFunction(gsub)
splitx=SafeFunction(splitx)
capwords=SafeFunction(capwords)
allocate_lock=thread.allocate_lock
Replacement of the old ts_regex module using the standard re module
"""
import re,reconvert
import sys
import ts_regex_old as OLD
import ts_regex_new as NEW
def _rcCV(s):
cs = reconvert.convert(s)
if cs != s:
print 'Warning: "%s" must be converted to "%s"' % (s,cs)
return cs
class compile:
_r=None
groupindex=None
def sub(pat,repl,str):
x = OLD.sub(pat,repl,str)
y = NEW.sub(pat,repl,str)
if x!=y: print 'Warning: sub():',pat,repl,str
return x
def gsub(pat,repl,str):
x = OLD.gsub(pat,repl,str)
y = NEW.gsub(pat,repl,str)
if x!=y: print 'Warning: subg():',pat,repl,str
return x
def split(str,pat,maxsplit=0):
x = OLD.split(str,pat,maxsplit)
y = NEW.split(str,pat,maxsplit)
if x!=y: print 'Warning: split():',str,pat,maxsplit
return x
def splitx(str,pat,maxsplit=0):
x = OLD.splitx(str,pat,maxsplit)
y = NEW.splitx(str,pat,maxsplit)
if x!=y: print 'Warning: splitx():',str,pat,maxsplit
return x
class compile:
def __init__(self, *args):
self._r=r=apply(regex.compile,args)
self._init(r)
print>>sys.stderr, args
self._old = apply(OLD.compile,args)
self._new = apply(NEW.compile,args)
def _init(self, r):
lock=allocate_lock()
self.__a=lock.acquire
self.__r=lock.release
self.translate=r.translate
self.givenpat=r.givenpat
self.realpat=r.realpat
def match(self, string, pos=0):
self.__a()
try: return self._r.match(string, pos)
finally: self.__r()
x = self._old.match(string,pos)
y = self._new.match(string,pos)
if x!=y: print 'Warning: match():',string,pos
return x
def search(self, string, pos=0):
self.__a()
try: return self._r.search(string, pos)
finally: self.__r()
x = self._old.search(string,pos)
y = self._new.search(string,pos)
if x!=y: print 'Warning: search():',string,pos
return x
def search_group(self, str, group, pos=0):
"""Search a string for a pattern.
......@@ -151,13 +158,11 @@ class compile:
otherwise, the location where the pattern was found,
as well as any specified group are returned.
"""
self.__a()
try:
r=self._r
l=r.search(str, pos)
if l < 0: return None
return l, apply(r.group, group)
finally: self.__r()
x = self._old.search_group(str,group,pos)
y = self._new.search_group(str,group,pos)
if x!=y: print 'Warning: seach_group(%s,%s,%s) %s vs %s' % (str,group,pos,x,y)
return x
def match_group(self, str, group, pos=0):
"""Match a pattern against a string
......@@ -166,50 +171,53 @@ class compile:
returned, otherwise, the length of the match, as well
as any specified group are returned.
"""
self.__a()
try:
r=self._r
l=r.match(str, pos)
if l < 0: return None
return l, apply(r.group, group)
finally: self.__r()
def search_regs(self, str, pos=0):
"""Search a string for a pattern.
x = self._old.match_group(str,group,pos)
y = self._new.match_group(str,group,pos)
if x!=y:
print 'Warning: match_group(%s,%s,%s) %s vs %s' % (str,group,pos,x,y)
print self._old.givenpat
print self._new.givenpat
return x
If the pattern was not found, then None is returned,
otherwise, the 'regs' attribute of the expression is
returned.
"""
self.__a()
try:
r=self._r
r.search(str, pos)
return r.regs
finally: self.__r()
def match_regs(self, str, pos=0):
"""Match a pattern against a string
If the string does not match the pattern, then None is
returned, otherwise, the 'regs' attribute of the expression is
returned.
"""
self.__a()
try:
r=self._r
r.match(str, pos)
return r.regs
finally: self.__r()
if __name__=='__main__':
class symcomp(compile):
import sys
def __init__(self, *args):
self._r=r=apply(regex.symcomp,args)
self._init(r)
self.groupindex=r.groupindex
s1 = 'The quick brown fox jumps of The lazy dog'
s2 = '892 The quick brown 123 fox jumps over 3454 21 The lazy dog'
r1 = ' [a-zA-Z][a-zA-Z] '
r2 = '[0-9][0-9]'
print 'new:',split(s1,' ')
print 'new:',splitx(s2,' ')
print 'new:',split(s2,' ',2)
print 'new:',splitx(s2,' ',2)
print 'new:',sub('The','###',s1)
print 'new:',gsub('The','###',s1)
p1 = compile(r1)
p2 = compile(r2)
for s in [s1,s2]:
print 'search'
print 'new:',p1.search(s)
print 'new:',p2.search(s)
print 'match'
print 'new:',p1.match(s)
print 'new:',p2.match(s)
print 'match_group'
print 'new:',p1.match_group(s,(0,))
print 'new:',p2.match_group(s,(0,))
print 'search_group'
print 'new:',p1.match_group(s,(0,1))
print 'new:',p2.match_group(s,(0,1))
"""HTTP 1.1 / WebDAV client library."""
__version__='$Revision: 1.16 $'[11:-2]
__version__='$Revision: 1.17 $'[11:-2]
import sys, os, string, time, types,re
import socket, httplib, mimetools
......@@ -57,8 +57,9 @@ class Resource:
self.username=username
self.password=password
self.url=url
mo = urlregex(match(url))
if mo:
mo = urlreg.match(url)
if mo:
host,port,uri=mo.group(1,2,3)
self.host=host
self.port=port and string.atoi(port[1:]) or 80
......@@ -157,7 +158,7 @@ class Resource:
return self.__snd_request('POST', self.uri, headers, body)
def put(self, file='', content_type='', content_enc='',
isbin=re.compile('[\0-\6\177-\277]').search,
isbin=re.compile(r'[\000-\006\177-\277]').search,
**kw):
headers=self.__get_headers(kw)
filetype=type(file)
......@@ -425,7 +426,7 @@ find_xml="""<?xml version="1.0" encoding="utf-8" ?>
# Implementation details below here
urlregex=re.compile('http://([^:/]+)(:[0-9]+)?(/.+)?', re.I)
urlreg=re.compile(r'http://([^:/]+)(:[0-9]+)?(/.+)?', re.I)
def marshal_string(name, val):
return '%s=%s' % (name, quote(str(val)))
......
......@@ -85,9 +85,9 @@
"""WebDAV xml request objects."""
__version__='$Revision: 1.12 $'[11:-2]
__version__='$Revision: 1.13 $'[11:-2]
import sys, os, string, regex
import sys, os, string
from common import absattr, aq_base, urlfix, urlbase
from OFS.PropertySheets import DAVProperties
from LockItem import LockItem
......
# Implement the "hookable PUT" hook.
import re, OFS.DTMLMethod
TEXT_PATTERN = re.compile( '^text/.*$' )
TEXT_PATTERN = re.compile( r'^text/.*$' )
def PUT_factory( self, name, typ, body ):
"""
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment