Commit 0907d71f authored by jbrockmendel's avatar jbrockmendel Committed by Stefan Behnel

CLN: Remove unused (#2830)

Remove unused code, mostly from Plex.
parent ebc9b746
...@@ -39,21 +39,6 @@ module_name_pattern = re.compile(r"[A-Za-z_][A-Za-z0-9_]*(\.[A-Za-z_][A-Za-z0-9_ ...@@ -39,21 +39,6 @@ module_name_pattern = re.compile(r"[A-Za-z_][A-Za-z0-9_]*(\.[A-Za-z_][A-Za-z0-9_
verbose = 0 verbose = 0
class CompilationData(object):
# Bundles the information that is passed from transform to transform.
# (For now, this is only)
# While Context contains every pxd ever loaded, path information etc.,
# this only contains the data related to a single compilation pass
#
# pyx ModuleNode Main code tree of this compilation.
# pxds {string : ModuleNode} Trees for the pxds used in the pyx.
# codewriter CCodeWriter Where to output final code.
# options CompilationOptions
# result CompilationResult
pass
class Context(object): class Context(object):
# This class encapsulates the context needed for compiling # This class encapsulates the context needed for compiling
# one or more Cython implementation files along with their # one or more Cython implementation files along with their
...@@ -125,15 +110,6 @@ class Context(object): ...@@ -125,15 +110,6 @@ class Context(object):
self._interned[key] = value self._interned[key] = value
return value return value
def intern_value(self, value, *key):
key = (type(value), value) + key
try:
return self._interned[key]
except KeyError:
pass
self._interned[key] = value
return value
# pipeline creation functions can now be found in Pipeline.py # pipeline creation functions can now be found in Pipeline.py
def process_pxd(self, source_desc, scope, module_name): def process_pxd(self, source_desc, scope, module_name):
......
...@@ -17,10 +17,6 @@ class PlexValueError(PlexError, ValueError): ...@@ -17,10 +17,6 @@ class PlexValueError(PlexError, ValueError):
pass pass
class InvalidRegex(PlexError):
pass
class InvalidToken(PlexError): class InvalidToken(PlexError):
def __init__(self, token_number, message): def __init__(self, token_number, message):
PlexError.__init__(self, "Token number %d: %s" % (token_number, message)) PlexError.__init__(self, "Token number %d: %s" % (token_number, message))
......
...@@ -109,14 +109,9 @@ class Lexicon(object): ...@@ -109,14 +109,9 @@ class Lexicon(object):
machine = None # Machine machine = None # Machine
tables = None # StateTableMachine tables = None # StateTableMachine
def __init__(self, specifications, debug=None, debug_flags=7, timings=None): def __init__(self, specifications, debug=None, debug_flags=7):
if not isinstance(specifications, list): if not isinstance(specifications, list):
raise Errors.InvalidScanner("Scanner definition is not a list") raise Errors.InvalidScanner("Scanner definition is not a list")
if timings:
from .Timing import time
total_time = 0.0
time1 = time()
nfa = Machines.Machine() nfa = Machines.Machine()
default_initial_state = nfa.new_initial_state('') default_initial_state = nfa.new_initial_state('')
...@@ -138,25 +133,15 @@ class Lexicon(object): ...@@ -138,25 +133,15 @@ class Lexicon(object):
token_number, token_number,
"Expected a token definition (tuple) or State instance") "Expected a token definition (tuple) or State instance")
if timings:
time2 = time()
total_time = total_time + (time2 - time1)
time3 = time()
if debug and (debug_flags & 1): if debug and (debug_flags & 1):
debug.write("\n============= NFA ===========\n") debug.write("\n============= NFA ===========\n")
nfa.dump(debug) nfa.dump(debug)
dfa = DFA.nfa_to_dfa(nfa, debug=(debug_flags & 3) == 3 and debug) dfa = DFA.nfa_to_dfa(nfa, debug=(debug_flags & 3) == 3 and debug)
if timings:
time4 = time()
total_time = total_time + (time4 - time3)
if debug and (debug_flags & 2): if debug and (debug_flags & 2):
debug.write("\n============= DFA ===========\n") debug.write("\n============= DFA ===========\n")
dfa.dump(debug) dfa.dump(debug)
if timings:
timings.write("Constructing NFA : %5.2f\n" % (time2 - time1))
timings.write("Converting to DFA: %5.2f\n" % (time4 - time3))
timings.write("TOTAL : %5.2f\n" % total_time)
self.machine = dfa self.machine = dfa
......
"""
Get time in platform-dependent way
"""
from __future__ import absolute_import
import os
from sys import platform, exit, stderr
if platform == 'mac':
import MacOS
def time():
return MacOS.GetTicks() / 60.0
timekind = "real"
elif hasattr(os, 'times'):
def time():
t = os.times()
return t[0] + t[1]
timekind = "cpu"
else:
stderr.write(
"Don't know how to get time on platform %s\n" % repr(platform))
exit(1)
"""
Python Lexical Analyser
Traditional Regular Expression Syntax
"""
from __future__ import absolute_import
from .Regexps import Alt, Seq, Rep, Rep1, Opt, Any, AnyBut, Bol, Eol, Char
from .Errors import PlexError
class RegexpSyntaxError(PlexError):
pass
def re(s):
"""
Convert traditional string representation of regular expression |s|
into Plex representation.
"""
return REParser(s).parse_re()
class REParser(object):
def __init__(self, s):
self.s = s
self.i = -1
self.end = 0
self.next()
def parse_re(self):
re = self.parse_alt()
if not self.end:
self.error("Unexpected %s" % repr(self.c))
return re
def parse_alt(self):
"""Parse a set of alternative regexps."""
re = self.parse_seq()
if self.c == '|':
re_list = [re]
while self.c == '|':
self.next()
re_list.append(self.parse_seq())
re = Alt(*re_list)
return re
def parse_seq(self):
"""Parse a sequence of regexps."""
re_list = []
while not self.end and self.c not in "|)":
re_list.append(self.parse_mod())
return Seq(*re_list)
def parse_mod(self):
"""Parse a primitive regexp followed by *, +, ? modifiers."""
re = self.parse_prim()
while not self.end and self.c in "*+?":
if self.c == '*':
re = Rep(re)
elif self.c == '+':
re = Rep1(re)
else: # self.c == '?'
re = Opt(re)
self.next()
return re
def parse_prim(self):
"""Parse a primitive regexp."""
c = self.get()
if c == '.':
re = AnyBut("\n")
elif c == '^':
re = Bol
elif c == '$':
re = Eol
elif c == '(':
re = self.parse_alt()
self.expect(')')
elif c == '[':
re = self.parse_charset()
self.expect(']')
else:
if c == '\\':
c = self.get()
re = Char(c)
return re
def parse_charset(self):
"""Parse a charset. Does not include the surrounding []."""
char_list = []
invert = 0
if self.c == '^':
invert = 1
self.next()
if self.c == ']':
char_list.append(']')
self.next()
while not self.end and self.c != ']':
c1 = self.get()
if self.c == '-' and self.lookahead(1) != ']':
self.next()
c2 = self.get()
for a in range(ord(c1), ord(c2) + 1):
char_list.append(chr(a))
else:
char_list.append(c1)
chars = ''.join(char_list)
if invert:
return AnyBut(chars)
else:
return Any(chars)
def next(self):
"""Advance to the next char."""
s = self.s
i = self.i = self.i + 1
if i < len(s):
self.c = s[i]
else:
self.c = ''
self.end = 1
def get(self):
if self.end:
self.error("Premature end of string")
c = self.c
self.next()
return c
def lookahead(self, n):
"""Look ahead n chars."""
j = self.i + n
if j < len(self.s):
return self.s[j]
else:
return ''
def expect(self, c):
"""
Expect to find character |c| at current position.
Raises an exception otherwise.
"""
if self.c == c:
self.next()
else:
self.error("Missing %s" % repr(c))
def error(self, mess):
"""Raise exception to signal syntax error in regexp."""
raise RegexpSyntaxError("Syntax error in regexp %s at position %d: %s" % (
repr(self.s), self.i, mess))
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment