Commit 92c77a7d authored by Christian Heimes's avatar Christian Heimes

Fixed 1349: HEAD doesn't have rest-header-level

Updated docutils including a fix for 1426:  System locale breaks reStructuredText horribly
Added rest-language-code to zope.conf schema. it's used instead of the locales
parent bb67215b
...@@ -93,3 +93,4 @@ class DefaultConfiguration: ...@@ -93,3 +93,4 @@ class DefaultConfiguration:
self.rest_input_encoding = default_enc self.rest_input_encoding = default_enc
self.rest_output_encoding = default_enc self.rest_output_encoding = default_enc
self.rest_header_level = 3 self.rest_header_level = 3
self.rest_language_code = 'en'
...@@ -19,7 +19,7 @@ def trim_doc_string(text): ...@@ -19,7 +19,7 @@ def trim_doc_string(text):
min_indent=indent min_indent=indent
for line in lines[1:]: for line in lines[1:]:
nlines.append(line[min_indent:]) nlines.append(line[min_indent:])
return '\n'.join(nlines, '\n') return '\n'.join(nlines)
......
...@@ -83,6 +83,14 @@ def rest_output_encoding(value): ...@@ -83,6 +83,14 @@ def rest_output_encoding(value):
value and _setenv('REST_OUTPUT_ENCODING' , value) value and _setenv('REST_OUTPUT_ENCODING' , value)
return value return value
def rest_header_level(value):
value and _setenv('REST_DEFAULT_LEVEL' , value)
return value
def rest_language_code(value):
value and _setenv('REST_LANGUAGE_CODE' , value)
return value
# server handlers # server handlers
def root_handler(config): def root_handler(config):
......
...@@ -482,6 +482,25 @@ ...@@ -482,6 +482,25 @@
<metadefault>unset</metadefault> <metadefault>unset</metadefault>
</key> </key>
<key name="rest-header-level" datatype="integer" default="3"
handler="rest_header_level">
<description>
Set the default starting HTML header level for restructured text
documents. The default is 3, which implies that top-level headers
will be created with an H3 HTML tag.
</description>
<metadefault>3</metadefault>
</key>
<key name="rest-language-code" handler="rest_language_code" default="en">
<description>
Language code used for some internal translations inside of the docutils
package and for DTD bibliographic elements mapping. See
lib/python/docutils/languages/ for a list of supported language codes.
</description>
<metadefault>en</metadefault>
</key>
<key name="publisher-profile-file"> <key name="publisher-profile-file">
<description> <description>
Causing this directive to point to a file on the filesystem will Causing this directive to point to a file on the filesystem will
......
...@@ -52,13 +52,11 @@ Subpackages: ...@@ -52,13 +52,11 @@ Subpackages:
__docformat__ = 'reStructuredText' __docformat__ = 'reStructuredText'
__version__ = '0.3.4' __version__ = '0.3.4'
"""``major.minor.micro`` version number. The micro number is bumped """``major.minor.micro`` version number. The micro number is bumped for API
any time there's a change in the API incompatible with one of the changes, for new functionality, and for interim project releases. The minor
front ends or significant new functionality, and at any alpha or beta number is bumped whenever there is a significant project release. The major
release. The minor number is bumped whenever there is a stable number will be bumped when the project is feature-complete, and perhaps if
project release. The major number will be bumped when the project is there is a major change in the design."""
feature-complete, and perhaps if there is a major change in the
design."""
class ApplicationError(StandardError): pass class ApplicationError(StandardError): pass
...@@ -76,22 +74,36 @@ class SettingsSpec: ...@@ -76,22 +74,36 @@ class SettingsSpec:
settings_spec = () settings_spec = ()
"""Runtime settings specification. Override in subclasses. """Runtime settings specification. Override in subclasses.
Specifies runtime settings and associated command-line options, as used by Defines runtime settings and associated command-line options, as used by
`docutils.frontend.OptionParser`. This tuple contains one or more sets of `docutils.frontend.OptionParser`. This is a tuple of:
option group title, description, and a list/tuple of tuples: ``('help
text', [list of option strings], {keyword arguments})``. Group title - Option group title (string or `None` which implies no group, just a list
and/or description may be `None`; a group title of `None` implies no of single options).
group, just a list of single options. The "keyword arguments" dictionary
contains arguments to the OptionParser/OptionGroup ``add_option`` method, - Description (string or `None`).
with the addition of a "validator" keyword (see the
`docutils.frontend.OptionParser.validators` instance attribute). Runtime - A sequence of option tuples. Each consists of:
settings names are derived implicitly from long option names
("--a-setting" becomes ``settings.a_setting``) or explicitly from the - Help text (string)
"dest" keyword argument."""
- List of option strings (e.g. ``['-Q', '--quux']``).
- Dictionary of keyword arguments. It contains arguments to the
OptionParser/OptionGroup ``add_option`` method, possibly with the
addition of a 'validator' keyword (see the
`docutils.frontend.OptionParser.validators` instance attribute). Runtime
settings names are derived implicitly from long option names
('--a-setting' becomes ``settings.a_setting``) or explicitly from the
'dest' keyword argument. See optparse docs for more details.
- More triples of group title, description, options, as many times as
needed. Thus, `settings_spec` tuples can be simply concatenated.
"""
settings_defaults = None settings_defaults = None
"""A dictionary of defaults for internal or inaccessible (by command-line """A dictionary of defaults for settings not in `settings_spec` (internal
or config file) settings. Override in subclasses.""" settings, intended to be inaccessible by command-line and config file).
Override in subclasses."""
settings_default_overrides = None settings_default_overrides = None
"""A dictionary of auxiliary defaults, to override defaults for settings """A dictionary of auxiliary defaults, to override defaults for settings
...@@ -126,14 +138,21 @@ class TransformSpec: ...@@ -126,14 +138,21 @@ class TransformSpec:
"""Transforms required by this class. Override in subclasses.""" """Transforms required by this class. Override in subclasses."""
unknown_reference_resolvers = () unknown_reference_resolvers = ()
"""List of functions to try to resolve unknown references. Called when """List of functions to try to resolve unknown references. Unknown
FinalCheckVisitor is unable to find a correct target. The list should references have a 'refname' attribute which doesn't correspond to any
contain functions which will try to resolve unknown references, with the target in the document. Called when FinalCheckVisitor is unable to find a
following signature:: correct target. The list should contain functions which will try to
resolve unknown references, with the following signature::
def reference_resolver(node): def reference_resolver(node):
'''Returns boolean: true if resolved, false if not.''' '''Returns boolean: true if resolved, false if not.'''
If the function is able to resolve the reference, it should also remove
the 'refname' attribute and mark the node as resolved::
del node['refname']
node.resolved = 1
Each function must have a "priority" attribute which will affect the order Each function must have a "priority" attribute which will affect the order
the unknown_reference_resolvers are run:: the unknown_reference_resolvers are run::
......
...@@ -9,14 +9,17 @@ Calling the ``publish_*`` convenience functions (or instantiating a ...@@ -9,14 +9,17 @@ Calling the ``publish_*`` convenience functions (or instantiating a
`Publisher` object) with component names will result in default `Publisher` object) with component names will result in default
behavior. For custom behavior (setting component options), create behavior. For custom behavior (setting component options), create
custom component objects first, and pass *them* to custom component objects first, and pass *them* to
``publish_*``/`Publisher`. ``publish_*``/`Publisher`. See `The Docutils Publisher`_.
.. _The Docutils Publisher: http://docutils.sf.net/docs/api/publisher.html
""" """
__docformat__ = 'reStructuredText' __docformat__ = 'reStructuredText'
import sys import sys
from docutils import __version__, Component, SettingsSpec import pprint
from docutils import frontend, io, utils, readers, parsers, writers from docutils import __version__, SettingsSpec
from docutils import frontend, io, utils, readers, writers
from docutils.frontend import OptionParser from docutils.frontend import OptionParser
...@@ -37,22 +40,23 @@ class Publisher: ...@@ -37,22 +40,23 @@ class Publisher:
""" """
self.reader = reader self.reader = reader
"""A `readers.Reader` instance.""" """A `docutils.readers.Reader` instance."""
self.parser = parser self.parser = parser
"""A `parsers.Parser` instance.""" """A `docutils.parsers.Parser` instance."""
self.writer = writer self.writer = writer
"""A `writers.Writer` instance.""" """A `docutils.writers.Writer` instance."""
self.source = source self.source = source
"""The source of input data, an `io.Input` instance.""" """The source of input data, a `docutils.io.Input` instance."""
self.source_class = source_class self.source_class = source_class
"""The class for dynamically created source objects.""" """The class for dynamically created source objects."""
self.destination = destination self.destination = destination
"""The destination for docutils output, an `io.Output` instance.""" """The destination for docutils output, a `docutils.io.Output`
instance."""
self.destination_class = destination_class self.destination_class = destination_class
"""The class for dynamically created destination objects.""" """The class for dynamically created destination objects."""
...@@ -85,8 +89,9 @@ class Publisher: ...@@ -85,8 +89,9 @@ class Publisher:
def setup_option_parser(self, usage=None, description=None, def setup_option_parser(self, usage=None, description=None,
settings_spec=None, config_section=None, settings_spec=None, config_section=None,
**defaults): **defaults):
if config_section and not settings_spec: if config_section:
settings_spec = SettingsSpec() if not settings_spec:
settings_spec = SettingsSpec()
settings_spec.config_section = config_section settings_spec.config_section = config_section
parts = config_section.split() parts = config_section.split()
if len(parts) > 1 and parts[-1] == 'application': if len(parts) > 1 and parts[-1] == 'application':
...@@ -112,6 +117,17 @@ class Publisher: ...@@ -112,6 +117,17 @@ class Publisher:
self.settings = option_parser.get_default_values() self.settings = option_parser.get_default_values()
return self.settings return self.settings
def process_programmatic_settings(self, settings_spec,
settings_overrides,
config_section):
if self.settings is None:
defaults = (settings_overrides or {}).copy()
# Propagate exceptions by default when used programmatically:
defaults.setdefault('traceback', 1)
self.get_settings(settings_spec=settings_spec,
config_section=config_section,
**defaults)
def process_command_line(self, argv=None, usage=None, description=None, def process_command_line(self, argv=None, usage=None, description=None,
settings_spec=None, config_section=None, settings_spec=None, config_section=None,
**defaults): **defaults):
...@@ -122,7 +138,7 @@ class Publisher: ...@@ -122,7 +138,7 @@ class Publisher:
Set components first (`self.set_reader` & `self.set_writer`). Set components first (`self.set_reader` & `self.set_writer`).
""" """
option_parser = self.setup_option_parser( option_parser = self.setup_option_parser(
usage, description, settings_spec, config_section,**defaults) usage, description, settings_spec, config_section, **defaults)
if argv is None: if argv is None:
argv = sys.argv[1:] argv = sys.argv[1:]
self.settings = option_parser.parse_args(argv) self.settings = option_parser.parse_args(argv)
...@@ -160,7 +176,7 @@ class Publisher: ...@@ -160,7 +176,7 @@ class Publisher:
def publish(self, argv=None, usage=None, description=None, def publish(self, argv=None, usage=None, description=None,
settings_spec=None, settings_overrides=None, settings_spec=None, settings_overrides=None,
config_section=None, enable_exit=None): config_section=None, enable_exit_status=None):
""" """
Process command line options and arguments (if `self.settings` not Process command line options and arguments (if `self.settings` not
already set), run `self.reader` and then `self.writer`. Return already set), run `self.reader` and then `self.writer`. Return
...@@ -170,8 +186,6 @@ class Publisher: ...@@ -170,8 +186,6 @@ class Publisher:
self.process_command_line( self.process_command_line(
argv, usage, description, settings_spec, config_section, argv, usage, description, settings_spec, config_section,
**(settings_overrides or {})) **(settings_overrides or {}))
elif settings_overrides:
self.settings._update(settings_overrides, 'loose')
self.set_io() self.set_io()
exit = None exit = None
document = None document = None
...@@ -181,47 +195,88 @@ class Publisher: ...@@ -181,47 +195,88 @@ class Publisher:
self.apply_transforms(document) self.apply_transforms(document)
output = self.writer.write(document, self.destination) output = self.writer.write(document, self.destination)
self.writer.assemble_parts() self.writer.assemble_parts()
except utils.SystemMessage, error:
if self.settings.traceback:
raise
print >>sys.stderr, ('Exiting due to level-%s (%s) system message.'
% (error.level,
utils.Reporter.levels[error.level]))
exit = 1
except Exception, error: except Exception, error:
if self.settings.traceback: if self.settings.traceback: # propagate exceptions?
raise raise
print >>sys.stderr, error self.report_Exception(error)
print >>sys.stderr, ("""\
Exiting due to error. Use "--traceback" to diagnose.
Please report errors to <docutils-users@lists.sf.net>.
Include "--traceback" output, Docutils version (%s),
Python version (%s), your OS type & version, and the
command line used.""" % (__version__, sys.version.split()[0]))
exit = 1 exit = 1
self.debugging_dumps(document)
if (enable_exit_status and document
and (document.reporter.max_level
>= self.settings.exit_status_level)):
sys.exit(document.reporter.max_level + 10)
elif exit:
sys.exit(1)
return output
def debugging_dumps(self, document):
if self.settings.dump_settings: if self.settings.dump_settings:
from pprint import pformat
print >>sys.stderr, '\n::: Runtime settings:' print >>sys.stderr, '\n::: Runtime settings:'
print >>sys.stderr, pformat(self.settings.__dict__) print >>sys.stderr, pprint.pformat(self.settings.__dict__)
if self.settings.dump_internals and document: if self.settings.dump_internals and document:
from pprint import pformat
print >>sys.stderr, '\n::: Document internals:' print >>sys.stderr, '\n::: Document internals:'
print >>sys.stderr, pformat(document.__dict__) print >>sys.stderr, pprint.pformat(document.__dict__)
if self.settings.dump_transforms and document: if self.settings.dump_transforms and document:
from pprint import pformat
print >>sys.stderr, '\n::: Transforms applied:' print >>sys.stderr, '\n::: Transforms applied:'
print >>sys.stderr, pformat(document.transformer.applied) print >>sys.stderr, pprint.pformat(document.transformer.applied)
if self.settings.dump_pseudo_xml and document: if self.settings.dump_pseudo_xml and document:
print >>sys.stderr, '\n::: Pseudo-XML:' print >>sys.stderr, '\n::: Pseudo-XML:'
print >>sys.stderr, document.pformat().encode( print >>sys.stderr, document.pformat().encode(
'raw_unicode_escape') 'raw_unicode_escape')
if enable_exit and document and (document.reporter.max_level
>= self.settings.exit_level):
sys.exit(document.reporter.max_level + 10)
elif exit:
sys.exit(1)
return output
def report_Exception(self, error):
if isinstance(error, utils.SystemMessage):
self.report_SystemMessage(error)
elif isinstance(error, UnicodeError):
self.report_UnicodeError(error)
else:
print >>sys.stderr, '%s: %s' % (error.__class__.__name__, error)
print >>sys.stderr, ("""\
Exiting due to error. Use "--traceback" to diagnose.
Please report errors to <docutils-users@lists.sf.net>.
Include "--traceback" output, Docutils version (%s),
Python version (%s), your OS type & version, and the
command line used.""" % (__version__, sys.version.split()[0]))
def report_SystemMessage(self, error):
print >>sys.stderr, ('Exiting due to level-%s (%s) system message.'
% (error.level,
utils.Reporter.levels[error.level]))
def report_UnicodeError(self, error):
sys.stderr.write(
'%s: %s\n'
'\n'
'The specified output encoding (%s) cannot\n'
'handle all of the output.\n'
'Try setting "--output-encoding-error-handler" to\n'
'\n'
'* "xmlcharrefreplace" (for HTML & XML output);\n'
% (error.__class__.__name__, error,
self.settings.output_encoding))
try:
data = error.object[error.start:error.end]
sys.stderr.write(
' the output will contain "%s" and should be usable.\n'
'* "backslashreplace" (for other output formats, Python 2.3+);\n'
' look for "%s" in the output.\n'
% (data.encode('ascii', 'xmlcharrefreplace'),
data.encode('ascii', 'backslashreplace')))
except AttributeError:
sys.stderr.write(' the output should be usable as-is.\n')
sys.stderr.write(
'* "replace"; look for "?" in the output.\n'
'\n'
'"--output-encoding-error-handler" is currently set to "%s".\n'
'\n'
'Exiting due to error. Use "--traceback" to diagnose.\n'
'If the advice above doesn\'t eliminate the error,\n'
'please report it to <docutils-users@lists.sf.net>.\n'
'Include "--traceback" output, Docutils version (%s),\n'
'Python version (%s), your OS type & version, and the\n'
'command line used.\n'
% (self.settings.output_encoding_error_handler,
__version__, sys.version.split()[0]))
default_usage = '%prog [options] [<source> [<destination>]]' default_usage = '%prog [options] [<source> [<destination>]]'
default_description = ('Reads from <source> (default is stdin) and writes to ' default_description = ('Reads from <source> (default is stdin) and writes to '
...@@ -232,30 +287,15 @@ def publish_cmdline(reader=None, reader_name='standalone', ...@@ -232,30 +287,15 @@ def publish_cmdline(reader=None, reader_name='standalone',
writer=None, writer_name='pseudoxml', writer=None, writer_name='pseudoxml',
settings=None, settings_spec=None, settings=None, settings_spec=None,
settings_overrides=None, config_section=None, settings_overrides=None, config_section=None,
enable_exit=1, argv=None, enable_exit_status=1, argv=None,
usage=default_usage, description=default_description): usage=default_usage, description=default_description):
""" """
Set up & run a `Publisher`. For command-line front ends. Set up & run a `Publisher` for command-line-based file I/O (input and
output file paths taken automatically from the command line). Return the
encoded string output also.
Parameters: Parameters: see `publish_programmatically` for the remainder.
- `reader`: A `docutils.readers.Reader` object.
- `reader_name`: Name or alias of the Reader class to be instantiated if
no `reader` supplied.
- `parser`: A `docutils.parsers.Parser` object.
- `parser_name`: Name or alias of the Parser class to be instantiated if
no `parser` supplied.
- `writer`: A `docutils.writers.Writer` object.
- `writer_name`: Name or alias of the Writer class to be instantiated if
no `writer` supplied.
- `settings`: Runtime settings object.
- `settings_spec`: Extra settings specification; a `docutils.SettingsSpec`
subclass. Used only if no `settings` specified.
- `settings_overrides`: A dictionary containing program-specific overrides
of component settings.
- `config_section`: Name of configuration file section for application.
Used only if no `settings` or `settings_spec` specified.
- `enable_exit`: Boolean; enable exit status at end of processing?
- `argv`: Command-line argument list to use instead of ``sys.argv[1:]``. - `argv`: Command-line argument list to use instead of ``sys.argv[1:]``.
- `usage`: Usage string, output if there's a problem parsing the command - `usage`: Usage string, output if there's a problem parsing the command
line. line.
...@@ -264,8 +304,10 @@ def publish_cmdline(reader=None, reader_name='standalone', ...@@ -264,8 +304,10 @@ def publish_cmdline(reader=None, reader_name='standalone',
""" """
pub = Publisher(reader, parser, writer, settings=settings) pub = Publisher(reader, parser, writer, settings=settings)
pub.set_components(reader_name, parser_name, writer_name) pub.set_components(reader_name, parser_name, writer_name)
pub.publish(argv, usage, description, settings_spec, settings_overrides, output = pub.publish(
config_section=config_section, enable_exit=enable_exit) argv, usage, description, settings_spec, settings_overrides,
config_section=config_section, enable_exit_status=enable_exit_status)
return output
def publish_file(source=None, source_path=None, def publish_file(source=None, source_path=None,
destination=None, destination_path=None, destination=None, destination_path=None,
...@@ -273,63 +315,40 @@ def publish_file(source=None, source_path=None, ...@@ -273,63 +315,40 @@ def publish_file(source=None, source_path=None,
parser=None, parser_name='restructuredtext', parser=None, parser_name='restructuredtext',
writer=None, writer_name='pseudoxml', writer=None, writer_name='pseudoxml',
settings=None, settings_spec=None, settings_overrides=None, settings=None, settings_spec=None, settings_overrides=None,
config_section=None, enable_exit=None): config_section=None, enable_exit_status=None):
""" """
Set up & run a `Publisher`. For programmatic use with file-like I/O. Set up & run a `Publisher` for programmatic use with file-like I/O.
Return the encoded string output also.
Parameters:
- `source`: A file-like object (must have "read" and "close" methods). Parameters: see `publish_programmatically`.
- `source_path`: Path to the input file. Opened if no `source` supplied.
If neither `source` nor `source_path` are supplied, `sys.stdin` is used.
- `destination`: A file-like object (must have "write" and "close"
methods).
- `destination_path`: Path to the input file. Opened if no `destination`
supplied. If neither `destination` nor `destination_path` are supplied,
`sys.stdout` is used.
- `reader`: A `docutils.readers.Reader` object.
- `reader_name`: Name or alias of the Reader class to be instantiated if
no `reader` supplied.
- `parser`: A `docutils.parsers.Parser` object.
- `parser_name`: Name or alias of the Parser class to be instantiated if
no `parser` supplied.
- `writer`: A `docutils.writers.Writer` object.
- `writer_name`: Name or alias of the Writer class to be instantiated if
no `writer` supplied.
- `settings`: Runtime settings object.
- `settings_spec`: Extra settings specification; a `docutils.SettingsSpec`
subclass. Used only if no `settings` specified.
- `settings_overrides`: A dictionary containing program-specific overrides
of component settings.
- `config_section`: Name of configuration file section for application.
Used only if no `settings` or `settings_spec` specified.
- `enable_exit`: Boolean; enable exit status at end of processing?
""" """
pub = Publisher(reader, parser, writer, settings=settings) output, pub = publish_programmatically(
pub.set_components(reader_name, parser_name, writer_name) source_class=io.FileInput, source=source, source_path=source_path,
if settings is None: destination_class=io.FileOutput,
settings = pub.get_settings(settings_spec=settings_spec, destination=destination, destination_path=destination_path,
config_section=config_section) reader=reader, reader_name=reader_name,
if settings_overrides: parser=parser, parser_name=parser_name,
settings._update(settings_overrides, 'loose') writer=writer, writer_name=writer_name,
pub.set_source(source, source_path) settings=settings, settings_spec=settings_spec,
pub.set_destination(destination, destination_path) settings_overrides=settings_overrides,
pub.publish(enable_exit=enable_exit) config_section=config_section,
enable_exit_status=enable_exit_status)
def publish_string(source, source_path=None, destination_path=None, return output
def publish_string(source, source_path=None, destination_path=None,
reader=None, reader_name='standalone', reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext', parser=None, parser_name='restructuredtext',
writer=None, writer_name='pseudoxml', writer=None, writer_name='pseudoxml',
settings=None, settings_spec=None, settings=None, settings_spec=None,
settings_overrides=None, config_section=None, settings_overrides=None, config_section=None,
enable_exit=None): enable_exit_status=None):
""" """
Set up & run a `Publisher`, and return the string output. Set up & run a `Publisher` for programmatic use with string I/O. Return
For programmatic use with string I/O. the encoded string or Unicode string output.
For encoded string output, be sure to set the "output_encoding" setting to For encoded string output, be sure to set the 'output_encoding' setting to
the desired encoding. Set it to "unicode" for unencoded Unicode string the desired encoding. Set it to 'unicode' for unencoded Unicode string
output. Here's how:: output. Here's one way::
publish_string(..., settings_overrides={'output_encoding': 'unicode'}) publish_string(..., settings_overrides={'output_encoding': 'unicode'})
...@@ -337,103 +356,159 @@ def publish_string(source, source_path=None, destination_path=None, ...@@ -337,103 +356,159 @@ def publish_string(source, source_path=None, destination_path=None,
publish_string(..., settings_overrides={'input_encoding': 'unicode'}) publish_string(..., settings_overrides={'input_encoding': 'unicode'})
Parameters: Parameters: see `publish_programmatically`.
- `source`: An input string; required. This can be an encoded 8-bit
string (set the "input_encoding" setting to the correct encoding) or a
Unicode string (set the "input_encoding" setting to "unicode").
- `source_path`: Path to the file or object that produced `source`;
optional. Only used for diagnostic output.
- `destination_path`: Path to the file or object which will receive the
output; optional. Used for determining relative paths (stylesheets,
source links, etc.).
- `reader`: A `docutils.readers.Reader` object.
- `reader_name`: Name or alias of the Reader class to be instantiated if
no `reader` supplied.
- `parser`: A `docutils.parsers.Parser` object.
- `parser_name`: Name or alias of the Parser class to be instantiated if
no `parser` supplied.
- `writer`: A `docutils.writers.Writer` object.
- `writer_name`: Name or alias of the Writer class to be instantiated if
no `writer` supplied.
- `settings`: Runtime settings object.
- `settings_spec`: Extra settings specification; a `docutils.SettingsSpec`
subclass. Used only if no `settings` specified.
- `settings_overrides`: A dictionary containing program-specific overrides
of component settings.
- `config_section`: Name of configuration file section for application.
Used only if no `settings` or `settings_spec` specified.
- `enable_exit`: Boolean; enable exit status at end of processing?
""" """
pub = Publisher(reader, parser, writer, settings=settings, output, pub = publish_programmatically(
source_class=io.StringInput, source_class=io.StringInput, source=source, source_path=source_path,
destination_class=io.StringOutput) destination_class=io.StringOutput,
pub.set_components(reader_name, parser_name, writer_name) destination=None, destination_path=destination_path,
if settings is None: reader=reader, reader_name=reader_name,
settings = pub.get_settings(settings_spec=settings_spec, parser=parser, parser_name=parser_name,
config_section=config_section) writer=writer, writer_name=writer_name,
if settings_overrides: settings=settings, settings_spec=settings_spec,
settings._update(settings_overrides, 'loose') settings_overrides=settings_overrides,
pub.set_source(source, source_path) config_section=config_section,
pub.set_destination(destination_path=destination_path) enable_exit_status=enable_exit_status)
return pub.publish(enable_exit=enable_exit) return output
def publish_parts(source, source_path=None, destination_path=None, def publish_parts(source, source_path=None, destination_path=None,
reader=None, reader_name='standalone', reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext', parser=None, parser_name='restructuredtext',
writer=None, writer_name='pseudoxml', writer=None, writer_name='pseudoxml',
settings=None, settings_spec=None, settings=None, settings_spec=None,
settings_overrides=None, config_section=None, settings_overrides=None, config_section=None,
enable_exit=None): enable_exit_status=None):
""" """
Set up & run a `Publisher`, and return a dictionary of document parts. Set up & run a `Publisher`, and return a dictionary of document parts.
Dictionary keys are the names of parts, and values are Unicode strings; Dictionary keys are the names of parts, and values are Unicode strings;
encoding is up to the client. For programmatic use with string I/O. encoding is up to the client. For programmatic use with string I/O.
For encoded string input, be sure to set the "input_encoding" setting to For encoded string input, be sure to set the 'input_encoding' setting to
the desired encoding. Set it to "unicode" for unencoded Unicode string the desired encoding. Set it to 'unicode' for unencoded Unicode string
input. Here's how:: input. Here's how::
publish_string(..., settings_overrides={'input_encoding': 'unicode'}) publish_string(..., settings_overrides={'input_encoding': 'unicode'})
Parameters: see `publish_programmatically`.
"""
output, pub = publish_programmatically(
source_class=io.StringInput, source=source, source_path=source_path,
destination_class=io.StringOutput,
destination=None, destination_path=destination_path,
reader=reader, reader_name=reader_name,
parser=parser, parser_name=parser_name,
writer=writer, writer_name=writer_name,
settings=settings, settings_spec=settings_spec,
settings_overrides=settings_overrides,
config_section=config_section,
enable_exit_status=enable_exit_status)
return pub.writer.parts
def publish_programmatically(source_class, source, source_path,
destination_class, destination, destination_path,
reader, reader_name,
parser, parser_name,
writer, writer_name,
settings, settings_spec,
settings_overrides, config_section,
enable_exit_status):
"""
Set up & run a `Publisher` for custom programmatic use. Return the
encoded string output and the Publisher object.
Applications should not need to call this function directly. If it does
seem to be necessary to call this function directly, please write to the
docutils-develop@lists.sourceforge.net mailing list.
Parameters: Parameters:
- `source`: An input string; required. This can be an encoded 8-bit * `source_class` **required**: The class for dynamically created source
string (set the "input_encoding" setting to the correct encoding) or a objects. Typically `io.FileInput` or `io.StringInput`.
Unicode string (set the "input_encoding" setting to "unicode").
- `source_path`: Path to the file or object that produced `source`; * `source`: Type depends on `source_class`:
optional. Only used for diagnostic output.
- `destination_path`: Path to the file or object which will receive the - `io.FileInput`: Either a file-like object (must have 'read' and
output; optional. Used for determining relative paths (stylesheets, 'close' methods), or ``None`` (`source_path` is opened). If neither
source links, etc.). `source` nor `source_path` are supplied, `sys.stdin` is used.
- `reader`: A `docutils.readers.Reader` object.
- `reader_name`: Name or alias of the Reader class to be instantiated if - `io.StringInput` **required**: The input string, either an encoded
8-bit string (set the 'input_encoding' setting to the correct
encoding) or a Unicode string (set the 'input_encoding' setting to
'unicode').
* `source_path`: Type depends on `source_class`:
- `io.FileInput`: Path to the input file, opened if no `source`
supplied.
- `io.StringInput`: Optional. Path to the file or object that produced
`source`. Only used for diagnostic output.
* `destination_class` **required**: The class for dynamically created
destination objects. Typically `io.FileOutput` or `io.StringOutput`.
* `destination`: Type depends on `destination_class`:
- `io.FileOutput`: Either a file-like object (must have 'write' and
'close' methods), or ``None`` (`destination_path` is opened). If
neither `destination` nor `destination_path` are supplied,
`sys.stdout` is used.
- `io.StringOutput`: Not used; pass ``None``.
* `destination_path`: Type depends on `destination_class`:
- `io.FileOutput`: Path to the output file. Opened if no `destination`
supplied.
- `io.StringOutput`: Path to the file or object which will receive the
output; optional. Used for determining relative paths (stylesheets,
source links, etc.).
* `reader`: A `docutils.readers.Reader` object.
* `reader_name`: Name or alias of the Reader class to be instantiated if
no `reader` supplied. no `reader` supplied.
- `parser`: A `docutils.parsers.Parser` object.
- `parser_name`: Name or alias of the Parser class to be instantiated if * `parser`: A `docutils.parsers.Parser` object.
* `parser_name`: Name or alias of the Parser class to be instantiated if
no `parser` supplied. no `parser` supplied.
- `writer`: A `docutils.writers.Writer` object.
- `writer_name`: Name or alias of the Writer class to be instantiated if * `writer`: A `docutils.writers.Writer` object.
* `writer_name`: Name or alias of the Writer class to be instantiated if
no `writer` supplied. no `writer` supplied.
- `settings`: Runtime settings object.
- `settings_spec`: Extra settings specification; a `docutils.SettingsSpec` * `settings`: A runtime settings (`docutils.frontend.Values`) object, for
subclass. Used only if no `settings` specified. dotted-attribute access to runtime settings. It's the end result of the
- `settings_overrides`: A dictionary containing program-specific overrides `SettingsSpec`, config file, and option processing. If `settings` is
of component settings. passed, it's assumed to be complete and no further setting/config/option
- `config_section`: Name of configuration file section for application. processing is done.
Used only if no `settings` or `settings_spec` specified.
- `enable_exit`: Boolean; enable exit status at end of processing? * `settings_spec`: A `docutils.SettingsSpec` subclass or object. Provides
extra application-specific settings definitions independently of
components. In other words, the application becomes a component, and
its settings data is processed along with that of the other components.
Used only if no `settings` specified.
* `settings_overrides`: A dictionary containing application-specific
settings defaults that override the defaults of other components.
Used only if no `settings` specified.
* `config_section`: A string, the name of the configuration file section
for this application. Overrides the ``config_section`` attribute
defined by `settings_spec`. Used only if no `settings` specified.
* `enable_exit_status`: Boolean; enable exit status at end of processing?
""" """
pub = Publisher(reader, parser, writer, settings=settings, pub = Publisher(reader, parser, writer, settings=settings,
source_class=io.StringInput, source_class=source_class,
destination_class=io.NullOutput) destination_class=destination_class)
pub.set_components(reader_name, parser_name, writer_name) pub.set_components(reader_name, parser_name, writer_name)
if settings is None: pub.process_programmatic_settings(
settings = pub.get_settings(settings_spec=settings_spec, settings_spec, settings_overrides, config_section)
config_section=config_section)
if settings_overrides:
settings._update(settings_overrides, 'loose')
pub.set_source(source, source_path) pub.set_source(source, source_path)
pub.set_destination(destination_path=destination_path) pub.set_destination(destination, destination_path)
pub.publish(enable_exit=enable_exit) output = pub.publish(enable_exit_status=enable_exit_status)
return pub.writer.parts return output, pub
...@@ -43,7 +43,7 @@ from optparse import Values, SUPPRESS_HELP ...@@ -43,7 +43,7 @@ from optparse import Values, SUPPRESS_HELP
def store_multiple(option, opt, value, parser, *args, **kwargs): def store_multiple(option, opt, value, parser, *args, **kwargs):
""" """
Store multiple values in `parser.values`. (Option callback.) Store multiple values in `parser.values`. (Option callback.)
Store `None` for each attribute named in `args`, and store the value for Store `None` for each attribute named in `args`, and store the value for
each key (attribute name) in `kwargs`. each key (attribute name) in `kwargs`.
""" """
...@@ -77,10 +77,10 @@ def validate_encoding_error_handler(setting, value, option_parser, ...@@ -77,10 +77,10 @@ def validate_encoding_error_handler(setting, value, option_parser,
try: try:
codecs.lookup_error(value) codecs.lookup_error(value)
except AttributeError: # prior to Python 2.3 except AttributeError: # prior to Python 2.3
if value not in ('strict', 'ignore', 'replace'): if value not in ('strict', 'ignore', 'replace', 'xmlcharrefreplace'):
raise (LookupError( raise (LookupError(
'unknown encoding error handler: "%s" (choices: ' 'unknown encoding error handler: "%s" (choices: '
'"strict", "ignore", or "replace")' % value), '"strict", "ignore", "replace", or "xmlcharrefreplace")' % value),
None, sys.exc_info()[2]) None, sys.exc_info()[2])
except LookupError: except LookupError:
raise (LookupError( raise (LookupError(
...@@ -143,6 +143,15 @@ def validate_colon_separated_string_list( ...@@ -143,6 +143,15 @@ def validate_colon_separated_string_list(
value.extend(last.split(':')) value.extend(last.split(':'))
return value return value
def validate_url_trailing_slash(
setting, value, option_parser, config_parser=None, config_section=None):
if not value:
return './'
elif value.endswith('/'):
return value
else:
return value + '/'
def make_paths_absolute(pathdict, keys, base_path=None): def make_paths_absolute(pathdict, keys, base_path=None):
""" """
Interpret filesystem path settings relative to the `base_path` given. Interpret filesystem path settings relative to the `base_path` given.
...@@ -311,8 +320,9 @@ class OptionParser(optparse.OptionParser, docutils.SettingsSpec): ...@@ -311,8 +320,9 @@ class OptionParser(optparse.OptionParser, docutils.SettingsSpec):
['--quiet', '-q'], {'action': 'store_const', 'const': 'none', ['--quiet', '-q'], {'action': 'store_const', 'const': 'none',
'dest': 'report_level'}), 'dest': 'report_level'}),
('Set the threshold (<level>) at or above which system messages are ' ('Set the threshold (<level>) at or above which system messages are '
'converted to exceptions, halting execution immediately. Levels ' 'converted to exceptions, halting execution immediately by '
'as in --report. Default is 4 (severe).', 'exiting (or propagating the exception if --traceback set). '
'Levels as in --report. Default is 4 (severe).',
['--halt'], {'choices': threshold_choices, 'dest': 'halt_level', ['--halt'], {'choices': threshold_choices, 'dest': 'halt_level',
'default': 4, 'metavar': '<level>', 'default': 4, 'metavar': '<level>',
'validator': validate_threshold}), 'validator': validate_threshold}),
...@@ -323,9 +333,10 @@ class OptionParser(optparse.OptionParser, docutils.SettingsSpec): ...@@ -323,9 +333,10 @@ class OptionParser(optparse.OptionParser, docutils.SettingsSpec):
'system messages (at or above <level>) were generated. Levels as ' 'system messages (at or above <level>) were generated. Levels as '
'in --report. Default is 5 (disabled). Exit status is the maximum ' 'in --report. Default is 5 (disabled). Exit status is the maximum '
'system message level plus 10 (11 for INFO, etc.).', 'system message level plus 10 (11 for INFO, etc.).',
['--exit'], {'choices': threshold_choices, 'dest': 'exit_level', ['--exit-status'], {'choices': threshold_choices,
'default': 5, 'metavar': '<level>', 'dest': 'exit_status_level',
'validator': validate_threshold}), 'default': 5, 'metavar': '<level>',
'validator': validate_threshold}),
('Report debug-level system messages and generate diagnostic output.', ('Report debug-level system messages and generate diagnostic output.',
['--debug'], {'action': 'store_true', 'validator': validate_boolean}), ['--debug'], {'action': 'store_true', 'validator': validate_boolean}),
('Do not report debug-level system messages or generate diagnostic ' ('Do not report debug-level system messages or generate diagnostic '
...@@ -333,7 +344,9 @@ class OptionParser(optparse.OptionParser, docutils.SettingsSpec): ...@@ -333,7 +344,9 @@ class OptionParser(optparse.OptionParser, docutils.SettingsSpec):
['--no-debug'], {'action': 'store_false', 'dest': 'debug'}), ['--no-debug'], {'action': 'store_false', 'dest': 'debug'}),
('Send the output of system messages (warnings) to <file>.', ('Send the output of system messages (warnings) to <file>.',
['--warnings'], {'dest': 'warning_stream', 'metavar': '<file>'}), ['--warnings'], {'dest': 'warning_stream', 'metavar': '<file>'}),
('Enable Python tracebacks when an error occurs.', ('Enable Python tracebacks when halt-level system messages and '
'other exceptions occur. Useful for debugging, and essential for '
'issue reports.',
['--traceback'], {'action': 'store_true', 'default': None, ['--traceback'], {'action': 'store_true', 'default': None,
'validator': validate_boolean}), 'validator': validate_boolean}),
('Disable Python tracebacks when errors occur; report just the error ' ('Disable Python tracebacks when errors occur; report just the error '
...@@ -343,23 +356,31 @@ class OptionParser(optparse.OptionParser, docutils.SettingsSpec): ...@@ -343,23 +356,31 @@ class OptionParser(optparse.OptionParser, docutils.SettingsSpec):
['--input-encoding', '-i'], ['--input-encoding', '-i'],
{'metavar': '<name>', 'validator': validate_encoding}), {'metavar': '<name>', 'validator': validate_encoding}),
('Specify the text encoding for output. Default is UTF-8. ' ('Specify the text encoding for output. Default is UTF-8. '
'Optionally also specify the encoding error handler for unencodable ' 'Optionally also specify the error handler for unencodable '
'characters (see "--error-encoding"); default is "strict".', 'characters, after a colon (":"); default is "strict". (See '
'"--output-encoding-error-encoding".)',
['--output-encoding', '-o'], ['--output-encoding', '-o'],
{'metavar': '<name[:handler]>', 'default': 'utf-8', {'metavar': '<name[:handler]>', 'default': 'utf-8',
'validator': validate_encoding_and_error_handler}), 'validator': validate_encoding_and_error_handler}),
(SUPPRESS_HELP, # usually handled by --output-encoding ('Specify the error handler for unencodable characters in '
'the output. Acceptable values include "strict", "ignore", '
'"replace", "xmlcharrefreplace", and '
'"backslashreplace" (in Python 2.3+). Default is "strict". '
'Usually specified as part of --output-encoding.',
['--output-encoding-error-handler'], ['--output-encoding-error-handler'],
{'default': 'strict', 'validator': validate_encoding_error_handler}), {'default': 'strict', 'validator': validate_encoding_error_handler}),
('Specify the text encoding for error output. Default is ASCII. ' ('Specify the text encoding for error output. Default is ASCII. '
'Optionally also specify the encoding error handler for unencodable ' 'Optionally also specify the error handler for unencodable '
'characters, after a colon (":"). Acceptable values are the same ' 'characters, after a colon (":"); default is "%s". (See '
'as for the "error" parameter of Python\'s ``encode`` string ' '"--output-encoding-error-encoding".'
'method. Default is "%s".' % default_error_encoding_error_handler, % default_error_encoding_error_handler,
['--error-encoding', '-e'], ['--error-encoding', '-e'],
{'metavar': '<name[:handler]>', 'default': 'ascii', {'metavar': '<name[:handler]>', 'default': 'ascii',
'validator': validate_encoding_and_error_handler}), 'validator': validate_encoding_and_error_handler}),
(SUPPRESS_HELP, # usually handled by --error-encoding ('Specify the error handler for unencodable characters in '
'error output. See --output-encoding-error-handler for acceptable '
'values. Default is "%s". Usually specified as part of '
'--error-encoding.' % default_error_encoding_error_handler,
['--error-encoding-error-handler'], ['--error-encoding-error-handler'],
{'default': default_error_encoding_error_handler, {'default': default_error_encoding_error_handler,
'validator': validate_encoding_error_handler}), 'validator': validate_encoding_error_handler}),
...@@ -386,7 +407,9 @@ class OptionParser(optparse.OptionParser, docutils.SettingsSpec): ...@@ -386,7 +407,9 @@ class OptionParser(optparse.OptionParser, docutils.SettingsSpec):
ends. Setting specs specific to individual Docutils components are also ends. Setting specs specific to individual Docutils components are also
used (see `populate_from_components()`).""" used (see `populate_from_components()`)."""
settings_defaults = {'_disable_config': None} settings_defaults = {'_disable_config': None,
'_source': None,
'_destination': None}
"""Defaults for settings that don't have command-line option equivalents.""" """Defaults for settings that don't have command-line option equivalents."""
relative_path_settings = ('warning_stream',) relative_path_settings = ('warning_stream',)
...@@ -425,16 +448,13 @@ class OptionParser(optparse.OptionParser, docutils.SettingsSpec): ...@@ -425,16 +448,13 @@ class OptionParser(optparse.OptionParser, docutils.SettingsSpec):
self.relative_path_settings = list(self.relative_path_settings) self.relative_path_settings = list(self.relative_path_settings)
self.components = (self,) + tuple(components) self.components = (self,) + tuple(components)
self.populate_from_components(self.components) self.populate_from_components(self.components)
defaults = defaults or {} self.set_defaults(**(defaults or {}))
if read_config_files and not self.defaults['_disable_config']: if read_config_files and not self.defaults['_disable_config']:
try: try:
config_settings = self.get_standard_config_settings() config_settings = self.get_standard_config_settings()
except ValueError, error: except ValueError, error:
self.error(error) self.error(error)
defaults.update(config_settings.__dict__) self.set_defaults(**config_settings.__dict__)
# Internal settings with no defaults from settings specifications;
# initialize manually:
self.set_defaults(_source=None, _destination=None, **defaults)
def populate_from_components(self, components): def populate_from_components(self, components):
""" """
...@@ -446,11 +466,10 @@ class OptionParser(optparse.OptionParser, docutils.SettingsSpec): ...@@ -446,11 +466,10 @@ class OptionParser(optparse.OptionParser, docutils.SettingsSpec):
for component in components: for component in components:
if component is None: if component is None:
continue continue
i = 0
settings_spec = component.settings_spec settings_spec = component.settings_spec
self.relative_path_settings.extend( self.relative_path_settings.extend(
component.relative_path_settings) component.relative_path_settings)
while i < len(settings_spec): for i in range(0, len(settings_spec), 3):
title, description, option_spec = settings_spec[i:i+3] title, description, option_spec = settings_spec[i:i+3]
if title: if title:
group = optparse.OptionGroup(self, title, description) group = optparse.OptionGroup(self, title, description)
...@@ -472,7 +491,6 @@ class OptionParser(optparse.OptionParser, docutils.SettingsSpec): ...@@ -472,7 +491,6 @@ class OptionParser(optparse.OptionParser, docutils.SettingsSpec):
self.lists[option.dest] = 1 self.lists[option.dest] = 1
if component.settings_defaults: if component.settings_defaults:
self.defaults.update(component.settings_defaults) self.defaults.update(component.settings_defaults)
i += 3
for component in components: for component in components:
if component and component.settings_default_overrides: if component and component.settings_default_overrides:
self.defaults.update(component.settings_default_overrides) self.defaults.update(component.settings_default_overrides)
...@@ -552,8 +570,8 @@ class ConfigParser(CP.ConfigParser): ...@@ -552,8 +570,8 @@ class ConfigParser(CP.ConfigParser):
old_warning = """ old_warning = """
The "[option]" section is deprecated. Support for old-format configuration The "[option]" section is deprecated. Support for old-format configuration
files may be removed in a future Docutils release. Please revise your files may be removed in a future Docutils release. Please revise your
configuration files. See <http://docutils.sf.net/docs/config.html>, section configuration files. See <http://docutils.sf.net/docs/user/config.html>,
"Old-Format Configuration Files". section "Old-Format Configuration Files".
""" """
def read(self, filenames, option_parser): def read(self, filenames, option_parser):
......
...@@ -121,13 +121,32 @@ class Output(TransformSpec): ...@@ -121,13 +121,32 @@ class Output(TransformSpec):
% (self.__class__, self.destination, self.destination_path)) % (self.__class__, self.destination, self.destination_path))
def write(self, data): def write(self, data):
"""`data` is a Unicode string, to be encoded by `self.encode`."""
raise NotImplementedError raise NotImplementedError
def encode(self, data): def encode(self, data):
if self.encoding and self.encoding.lower() == 'unicode': if self.encoding and self.encoding.lower() == 'unicode':
return data return data
else: else:
return data.encode(self.encoding, self.error_handler) try:
return data.encode(self.encoding, self.error_handler)
except ValueError:
# ValueError is raised if there are unencodable chars
# in data and the error_handler isn't found.
if self.error_handler == 'xmlcharrefreplace':
# We are using xmlcharrefreplace with a Python
# version that doesn't support it (2.1 or 2.2), so
# we emulate its behavior.
return ''.join([self.xmlcharref_encode(char) for char in data])
else:
raise
def xmlcharref_encode(self, char):
"""Emulate Python 2.3's 'xmlcharrefreplace' encoding error handler."""
try:
return char.encode(self.encoding, 'strict')
except UnicodeError:
return '&#%i;' % ord(char)
class FileInput(Input): class FileInput(Input):
...@@ -172,7 +191,9 @@ class FileInput(Input): ...@@ -172,7 +191,9 @@ class FileInput(Input):
pass pass
def read(self): def read(self):
"""Read and decode a single file and return the data.""" """
Read and decode a single file and return the data (Unicode string).
"""
data = self.source.read() data = self.source.read()
if self.autoclose: if self.autoclose:
self.close() self.close()
......
...@@ -18,6 +18,9 @@ _languages = {} ...@@ -18,6 +18,9 @@ _languages = {}
def get_language(language_code): def get_language(language_code):
if _languages.has_key(language_code): if _languages.has_key(language_code):
return _languages[language_code] return _languages[language_code]
module = __import__(language_code, globals(), locals()) try:
module = __import__(language_code, globals(), locals())
except ImportError:
return None
_languages[language_code] = module _languages[language_code] = module
return module return module
...@@ -20,43 +20,43 @@ labels = { ...@@ -20,43 +20,43 @@ labels = {
# fixed: language-dependent # fixed: language-dependent
'author': u'Autor', 'author': u'Autor',
'authors': u'Autores', 'authors': u'Autores',
'organization': unicode('Organizao', 'latin1'), 'organization': u'Organiza\u00E7\u00E3o',
'address': unicode('Endereo', 'latin1'), 'address': u'Endere\u00E7o',
'contact': u'Contato', 'contact': u'Contato',
'version': unicode('Verso', 'latin1'), 'version': u'Vers\u00E3o',
'revision': unicode('Reviso', 'latin1'), 'revision': u'Revis\u00E3o',
'status': u'Estado', 'status': u'Estado',
'date': u'Data', 'date': u'Data',
'copyright': u'Copyright', 'copyright': u'Copyright',
'dedication': unicode('Dedicatria', 'latin1'), 'dedication': u'Dedicat\u00F3ria',
'abstract': u'Resumo', 'abstract': u'Resumo',
'attention': unicode('Atteno!', 'latin1'), 'attention': u'Atten\u00E7\u00E3o!',
'caution': u'Cuidado!', 'caution': u'Cuidado!',
'danger': u'PERIGO!', 'danger': u'PERIGO!',
'error': u'Erro', 'error': u'Erro',
'hint': unicode('Sugesto', 'latin1'), 'hint': u'Sugest\u00E3o',
'important': u'Importante', 'important': u'Importante',
'note': u'Nota', 'note': u'Nota',
'tip': u'Dica', 'tip': u'Dica',
'warning': u'Aviso', 'warning': u'Aviso',
'contents': unicode('Sumrio', 'latin1')} 'contents': u'Sum\u00E1rio'}
"""Mapping of node class name to label text.""" """Mapping of node class name to label text."""
bibliographic_fields = { bibliographic_fields = {
# language-dependent: fixed # language-dependent: fixed
u'autor': 'author', u'autor': 'author',
u'autores': 'authors', u'autores': 'authors',
unicode('organizao', 'latin1'): 'organization', u'organiza\u00E7\u00E3o': 'organization',
unicode('endereo', 'latin1'): 'address', u'endere\u00E7o': 'address',
u'contato': 'contact', u'contato': 'contact',
unicode('verso', 'latin1'): 'version', u'vers\u00E3o': 'version',
unicode('reviso', 'latin1'): 'revision', u'revis\u00E3o': 'revision',
u'estado': 'status', u'estado': 'status',
u'data': 'date', u'data': 'date',
u'copyright': 'copyright', u'copyright': 'copyright',
unicode('dedicatria', 'latin1'): 'dedication', u'dedicat\u00F3ria': 'dedication',
u'resumo': 'abstract'} u'resumo': 'abstract'}
"""English (lowcased) to canonical name mapping for bibliographic fields.""" """Brazilian Portuguese (lowcased) to canonical name mapping for bibliographic fields."""
author_separators = [';', ','] author_separators = [';', ',']
"""List of separator strings for the 'Authors' bibliographic field. Tried in """List of separator strings for the 'Authors' bibliographic field. Tried in
......
...@@ -18,7 +18,7 @@ is represented by abstract base classes (`Root`, `Structural`, `Body`, ...@@ -18,7 +18,7 @@ is represented by abstract base classes (`Root`, `Structural`, `Body`,
``isinstance(node, base_class)`` to determine the position of the node in the ``isinstance(node, base_class)`` to determine the position of the node in the
hierarchy. hierarchy.
.. _DTD: http://docutils.sourceforge.net/spec/docutils.dtd .. _DTD: http://docutils.sourceforge.net/docs/ref/docutils.dtd
""" """
__docformat__ = 'reStructuredText' __docformat__ = 'reStructuredText'
...@@ -1123,8 +1123,8 @@ class pending(Special, Invisible, PreBibliographic, Element): ...@@ -1123,8 +1123,8 @@ class pending(Special, Invisible, PreBibliographic, Element):
But the "contents" directive can't do its work until the entire document But the "contents" directive can't do its work until the entire document
has been parsed and possibly transformed to some extent. So the directive has been parsed and possibly transformed to some extent. So the directive
code leaves a placeholder behind that will trigger the second phase of the code leaves a placeholder behind that will trigger the second phase of its
its processing, something like this:: processing, something like this::
<pending ...public attributes...> + internal attributes <pending ...public attributes...> + internal attributes
......
...@@ -88,12 +88,21 @@ class Parser(docutils.parsers.Parser): ...@@ -88,12 +88,21 @@ class Parser(docutils.parsers.Parser):
settings_spec = ( settings_spec = (
'reStructuredText Parser Options', 'reStructuredText Parser Options',
None, None,
(('Recognize and link to PEP references (like "PEP 258").', (('Recognize and link to standalone PEP references (like "PEP 258").',
['--pep-references'], ['--pep-references'],
{'action': 'store_true', 'validator': frontend.validate_boolean}), {'action': 'store_true', 'validator': frontend.validate_boolean}),
('Recognize and link to RFC references (like "RFC 822").', ('Base URL for PEP references '
'(default "http://www.python.org/peps/").',
['--pep-base-url'],
{'metavar': '<URL>', 'default': 'http://www.python.org/peps/',
'validator': frontend.validate_url_trailing_slash}),
('Recognize and link to standalone RFC references (like "RFC 822").',
['--rfc-references'], ['--rfc-references'],
{'action': 'store_true', 'validator': frontend.validate_boolean}), {'action': 'store_true', 'validator': frontend.validate_boolean}),
('Base URL for RFC references (default "http://www.faqs.org/rfcs/").',
['--rfc-base-url'],
{'metavar': '<URL>', 'default': 'http://www.faqs.org/rfcs/',
'validator': frontend.validate_url_trailing_slash}),
('Set number of spaces for tab expansion (default 8).', ('Set number of spaces for tab expansion (default 8).',
['--tab-width'], ['--tab-width'],
{'metavar': '<width>', 'type': 'int', 'default': 8}), {'metavar': '<width>', 'type': 'int', 'default': 8}),
......
...@@ -20,11 +20,12 @@ The interface for directive functions is as follows:: ...@@ -20,11 +20,12 @@ The interface for directive functions is as follows::
Parameters: Parameters:
- ``name`` is the directive type or name. - ``name`` is the directive type or name (string).
- ``arguments`` is a list of positional arguments. - ``arguments`` is a list of positional arguments (strings).
- ``options`` is a dictionary mapping option names to values. - ``options`` is a dictionary mapping option names (strings) to values (type
depends on option conversion functions; see below).
- ``content`` is a list of strings, the directive content. - ``content`` is a list of strings, the directive content.
...@@ -63,6 +64,10 @@ directive function): ...@@ -63,6 +64,10 @@ directive function):
options to parse. Several directive option conversion functions are defined options to parse. Several directive option conversion functions are defined
in this module. in this module.
Option conversion functions take a single parameter, the option argument (a
string or ``None``), validate it and/or convert it to the appropriate form.
Conversion functions may raise ``ValueError`` and ``TypeError`` exceptions.
- ``content``: A boolean; true if content is allowed. Client code must handle - ``content``: A boolean; true if content is allowed. Client code must handle
the case where content is required but not supplied (an empty content list the case where content is required but not supplied (an empty content list
will be supplied). will be supplied).
...@@ -74,11 +79,12 @@ empty list). ...@@ -74,11 +79,12 @@ empty list).
See `Creating reStructuredText Directives`_ for more information. See `Creating reStructuredText Directives`_ for more information.
.. _Creating reStructuredText Directives: .. _Creating reStructuredText Directives:
http://docutils.sourceforge.net/spec/howto/rst-directives.html http://docutils.sourceforge.net/docs/howto/rst-directives.html
""" """
__docformat__ = 'reStructuredText' __docformat__ = 'reStructuredText'
import re
from docutils import nodes from docutils import nodes
from docutils.parsers.rst.languages import en as _fallback_language_module from docutils.parsers.rst.languages import en as _fallback_language_module
...@@ -102,8 +108,9 @@ _directive_registry = { ...@@ -102,8 +108,9 @@ _directive_registry = {
'epigraph': ('body', 'epigraph'), 'epigraph': ('body', 'epigraph'),
'highlights': ('body', 'highlights'), 'highlights': ('body', 'highlights'),
'pull-quote': ('body', 'pull_quote'), 'pull-quote': ('body', 'pull_quote'),
'table': ('body', 'table'),
#'questions': ('body', 'question_list'), #'questions': ('body', 'question_list'),
'table': ('tables', 'table'),
'csv-table': ('tables', 'csv_table'),
'image': ('images', 'image'), 'image': ('images', 'image'),
'figure': ('images', 'figure'), 'figure': ('images', 'figure'),
'contents': ('parts', 'contents'), 'contents': ('parts', 'contents'),
...@@ -193,12 +200,12 @@ def directive(directive_name, language_module, document): ...@@ -193,12 +200,12 @@ def directive(directive_name, language_module, document):
return None, messages return None, messages
return function, messages return function, messages
def register_directive(name, directive): def register_directive(name, directive_function):
""" """
Register a nonstandard application-defined directive function. Register a nonstandard application-defined directive function.
Language lookups are not needed for such functions. Language lookups are not needed for such functions.
""" """
_directives[name] = directive _directives[name] = directive_function
def flag(argument): def flag(argument):
""" """
...@@ -278,6 +285,60 @@ def class_option(argument): ...@@ -278,6 +285,60 @@ def class_option(argument):
raise ValueError('cannot make "%s" into a class name' % argument) raise ValueError('cannot make "%s" into a class name' % argument)
return class_name return class_name
unicode_pattern = re.compile(
r'(?:0x|x|\\x|U\+?|\\u)([0-9a-f]+)$|&#x([0-9a-f]+);$', re.IGNORECASE)
def unicode_code(code):
r"""
Convert a Unicode character code to a Unicode character.
Codes may be decimal numbers, hexadecimal numbers (prefixed by ``0x``,
``x``, ``\x``, ``U+``, ``u``, or ``\u``; e.g. ``U+262E``), or XML-style
numeric character entities (e.g. ``&#x262E;``). Other text remains as-is.
"""
try:
if code.isdigit(): # decimal number
return unichr(int(code))
else:
match = unicode_pattern.match(code)
if match: # hex number
value = match.group(1) or match.group(2)
return unichr(int(value, 16))
else: # other text
return code
except OverflowError, detail:
raise ValueError('code too large (%s)' % detail)
def single_char_or_unicode(argument):
char = unicode_code(argument)
if len(char) > 1:
raise ValueError('%r invalid; must be a single character or '
'a Unicode code' % char)
return char
def single_char_or_whitespace_or_unicode(argument):
if argument == 'tab':
char = '\t'
elif argument == 'space':
char = ' '
else:
char = single_char_or_unicode(argument)
return char
def positive_int(argument):
value = int(argument)
if value < 1:
raise ValueError('negative or zero value; must be positive')
return value
def positive_int_list(argument):
if ',' in argument:
entries = argument.split(',')
else:
entries = argument.split()
return [positive_int(entry) for entry in entries]
def format_values(values): def format_values(values):
return '%s, or "%s"' % (', '.join(['"%s"' % s for s in values[:-1]]), return '%s, or "%s"' % (', '.join(['"%s"' % s for s in values[:-1]]),
values[-1]) values[-1])
......
...@@ -15,7 +15,7 @@ import sys ...@@ -15,7 +15,7 @@ import sys
from docutils import nodes from docutils import nodes
from docutils.parsers.rst import directives from docutils.parsers.rst import directives
def topic(name, arguments, options, content, lineno, def topic(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine, content_offset, block_text, state, state_machine,
node_class=nodes.topic): node_class=nodes.topic):
...@@ -74,6 +74,7 @@ def line_block(name, arguments, options, content, lineno, ...@@ -74,6 +74,7 @@ def line_block(name, arguments, options, content, lineno,
text = '\n'.join(content) text = '\n'.join(content)
text_nodes, messages = state.inline_text(text, lineno) text_nodes, messages = state.inline_text(text, lineno)
node = node_class(text, '', *text_nodes, **options) node = node_class(text, '', *text_nodes, **options)
node.line = content_offset + 1
return [node] + messages return [node] + messages
line_block.options = {'class': directives.class_option} line_block.options = {'class': directives.class_option}
...@@ -121,38 +122,3 @@ def pull_quote(name, arguments, options, content, lineno, ...@@ -121,38 +122,3 @@ def pull_quote(name, arguments, options, content, lineno,
return [block_quote] + messages return [block_quote] + messages
pull_quote.content = 1 pull_quote.content = 1
def table(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
if not content:
warning = state_machine.reporter.warning(
'Content block expected for the "%s" directive; none found.'
% name, nodes.literal_block(block_text, block_text),
line=lineno)
return [warning]
if arguments:
title_text = arguments[0]
text_nodes, messages = state.inline_text(title_text, lineno)
title = nodes.title(title_text, '', *text_nodes)
else:
title = None
node = nodes.Element() # anonymous container for parsing
text = '\n'.join(content)
state.nested_parse(content, content_offset, node)
if len(node) != 1 or not isinstance(node[0], nodes.table):
error = state_machine.reporter.error(
'Error parsing content block for the "%s" directive: '
'exactly one table expected.'
% name, nodes.literal_block(block_text, block_text),
line=lineno)
return [error]
table_node = node[0]
if options.has_key('class'):
table_node.set_class(options['class'])
if title:
table_node.insert(0, title)
return [table_node]
table.arguments = (0, 1, 1)
table.options = {'class': directives.class_option}
table.content = 1
...@@ -28,6 +28,7 @@ def align(argument): ...@@ -28,6 +28,7 @@ def align(argument):
def image(name, arguments, options, content, lineno, def image(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine): content_offset, block_text, state, state_machine):
messages = []
reference = ''.join(arguments[0].split('\n')) reference = ''.join(arguments[0].split('\n'))
if reference.find(' ') != -1: if reference.find(' ') != -1:
error = state_machine.reporter.error( error = state_machine.reporter.error(
...@@ -35,23 +36,26 @@ def image(name, arguments, options, content, lineno, ...@@ -35,23 +36,26 @@ def image(name, arguments, options, content, lineno,
nodes.literal_block(block_text, block_text), line=lineno) nodes.literal_block(block_text, block_text), line=lineno)
return [error] return [error]
options['uri'] = reference options['uri'] = reference
reference_node = None
if options.has_key('target'): if options.has_key('target'):
block = states.escape2null(options['target']).splitlines() block = states.escape2null(options['target']).splitlines()
block = [line for line in block] block = [line for line in block]
target_type, data = state.parse_target(block, block_text, lineno) target_type, data = state.parse_target(block, block_text, lineno)
if target_type == 'refuri': if target_type == 'refuri':
node_list = nodes.reference(refuri=data) reference_node = nodes.reference(refuri=data)
elif target_type == 'refname': elif target_type == 'refname':
node_list = nodes.reference( reference_node = nodes.reference(
refname=data, name=whitespace_normalize_name(options['target'])) refname=data, name=whitespace_normalize_name(options['target']))
state.document.note_refname(node_list) state.document.note_refname(reference_node)
else: # malformed target else: # malformed target
node_list = [data] # data is a system message messages.append(data) # data is a system message
del options['target'] del options['target']
image_node = nodes.image(block_text, **options)
if reference_node:
reference_node += image_node
return messages + [reference_node]
else: else:
node_list = [] return messages + [image_node]
node_list.append(nodes.image(block_text, **options))
return node_list
image.arguments = (1, 0, 1) image.arguments = (1, 0, 1)
image.options = {'alt': directives.unchanged, image.options = {'alt': directives.unchanged,
......
...@@ -117,7 +117,7 @@ def raw(name, arguments, options, content, lineno, ...@@ -117,7 +117,7 @@ def raw(name, arguments, options, content, lineno,
nodes.literal_block(block_text, block_text), line=lineno) nodes.literal_block(block_text, block_text), line=lineno)
return [severe] return [severe]
text = raw_file.read() text = raw_file.read()
raw_file.close() raw_file.close()
attributes['source'] = options['file'] attributes['source'] = options['file']
else: else:
error = state_machine.reporter.warning( error = state_machine.reporter.warning(
...@@ -185,29 +185,19 @@ def unicode_directive(name, arguments, options, content, lineno, ...@@ -185,29 +185,19 @@ def unicode_directive(name, arguments, options, content, lineno,
element = nodes.Element() element = nodes.Element()
for code in codes: for code in codes:
try: try:
if code.isdigit(): decoded = directives.unicode_code(code)
element += nodes.Text(unichr(int(code))) except ValueError, err:
else:
match = unicode_pattern.match(code)
if match:
value = match.group(1) or match.group(2)
element += nodes.Text(unichr(int(value, 16)))
else:
element += nodes.Text(code)
except (ValueError, OverflowError), err:
error = state_machine.reporter.error( error = state_machine.reporter.error(
'Invalid character code: %s\n%s: %s' 'Invalid character code: %s\n%s: %s'
% (code, err.__class__.__name__, err), % (code, err.__class__.__name__, err),
nodes.literal_block(block_text, block_text), line=lineno) nodes.literal_block(block_text, block_text), line=lineno)
return [error] return [error]
element += nodes.Text(decoded)
return element.children return element.children
unicode_directive.arguments = (1, 0, 1) unicode_directive.arguments = (1, 0, 1)
unicode_pattern = re.compile(
r'(?:0x|x|\\x|U\+?|\\u)([0-9a-f]+)$|&#x([0-9a-f]+);$', re.IGNORECASE)
unicode_comment_pattern = re.compile(r'( |\n|^).. ') unicode_comment_pattern = re.compile(r'( |\n|^).. ')
def class_directive(name, arguments, options, content, lineno, def class_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine): content_offset, block_text, state, state_machine):
"""""" """"""
...@@ -259,7 +249,7 @@ def role(name, arguments, options, content, lineno, ...@@ -259,7 +249,7 @@ def role(name, arguments, options, content, lineno,
return messages + [error] return messages + [error]
else: else:
base_role = roles.generic_custom_role base_role = roles.generic_custom_role
assert not hasattr(base_role, 'arguments'), ( assert not hasattr(base_role, 'arguments'), (
'Supplemental directive arguments for "%s" directive not supported' 'Supplemental directive arguments for "%s" directive not supported'
'(specified by "%r" role).' % (name, base_role)) '(specified by "%r" role).' % (name, base_role))
try: try:
......
# Authors: David Goodger, David Priest
# Contact: goodger@python.org
# Revision: $Revision: 1.2 $
# Date: $Date: 2004/06/19 22:53:32 $
# Copyright: This module has been placed in the public domain.
"""
Directives for table elements.
"""
__docformat__ = 'reStructuredText'
import sys
import os.path
from docutils import nodes, statemachine, utils
from docutils.utils import SystemMessagePropagation
from docutils.parsers.rst import directives
try:
import csv # new in Python 2.3
except ImportError:
csv = None
try:
import urllib2
except ImportError:
urllib2 = None
try:
True
except NameError: # Python 2.2 & 2.1 compatibility
True = not 0
False = not 1
def table(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
if not content:
warning = state_machine.reporter.warning(
'Content block expected for the "%s" directive; none found.'
% name, nodes.literal_block(block_text, block_text),
line=lineno)
return [warning]
title, messages = make_title(arguments, state, lineno)
node = nodes.Element() # anonymous container for parsing
text = '\n'.join(content)
state.nested_parse(content, content_offset, node)
if len(node) != 1 or not isinstance(node[0], nodes.table):
error = state_machine.reporter.error(
'Error parsing content block for the "%s" directive: '
'exactly one table expected.'
% name, nodes.literal_block(block_text, block_text),
line=lineno)
return [error]
table_node = node[0]
if options.has_key('class'):
table_node.set_class(options['class'])
if title:
table_node.insert(0, title)
return [table_node] + messages
table.arguments = (0, 1, 1)
table.options = {'class': directives.class_option}
table.content = 1
def make_title(arguments, state, lineno):
if arguments:
title_text = arguments[0]
text_nodes, messages = state.inline_text(title_text, lineno)
title = nodes.title(title_text, '', *text_nodes)
else:
title = None
messages = []
return title, messages
if csv:
class DocutilsDialect(csv.Dialect):
"""CSV dialect for `csv_table` directive function."""
delimiter = ','
quotechar = '"'
doublequote = True
skipinitialspace = True
lineterminator = '\n'
quoting = csv.QUOTE_MINIMAL
def __init__(self, options):
if options.has_key('delim'):
self.delimiter = str(options['delim'])
if options.has_key('keepspace'):
self.skipinitialspace = False
if options.has_key('quote'):
self.quotechar = str(options['quote'])
if options.has_key('escape'):
self.doublequote = False
self.escapechar = str(options['escape'])
csv.Dialect.__init__(self)
class HeaderDialect(csv.Dialect):
"""CSV dialect to use for the "header" option data."""
delimiter = ','
quotechar = '"'
escapechar = '\\'
doublequote = False
skipinitialspace = True
lineterminator = '\n'
quoting = csv.QUOTE_MINIMAL
def csv_table(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
try:
check_requirements(name, lineno, block_text, state_machine)
title, messages = make_title(arguments, state, lineno)
csv_data, source = get_csv_data(
name, options, content, lineno, block_text, state, state_machine)
table_head, max_header_cols = process_header_option(
options, state_machine, lineno)
rows, max_cols = parse_csv_data_into_rows(
csv_data, DocutilsDialect(options), source, options)
max_cols = max(max_cols, max_header_cols)
header_rows = options.get('header-rows', 0) # default 0
check_table_dimensions(
rows, header_rows, name, lineno, block_text, state_machine)
table_head.extend(rows[:header_rows])
table_body = rows[header_rows:]
col_widths = get_column_widths(
max_cols, name, options, lineno, block_text, state_machine)
extend_short_rows_with_empty_cells(max_cols, (table_head, table_body))
except SystemMessagePropagation, detail:
return [detail.args[0]]
except csv.Error, detail:
error = state_machine.reporter.error(
'Error with CSV data in "%s" directive:\n%s' % (name, detail),
nodes.literal_block(block_text, block_text), line=lineno)
return [error]
table = (col_widths, table_head, table_body)
table_node = state.build_table(table, content_offset)
if options.has_key('class'):
table_node.set_class(options['class'])
if title:
table_node.insert(0, title)
return [table_node] + messages
csv_table.arguments = (0, 1, 1)
csv_table.options = {'header-rows': directives.nonnegative_int,
'header': directives.unchanged,
'widths': directives.positive_int_list,
'file': directives.path,
'url': directives.path,
'class': directives.class_option,
# field delimiter char
'delim': directives.single_char_or_whitespace_or_unicode,
# treat whitespace after delimiter as significant
'keepspace': directives.flag,
# text field quote/unquote char:
'quote': directives.single_char_or_unicode,
# char used to escape delim & quote as-needed:
'escape': directives.single_char_or_unicode,}
csv_table.content = 1
def check_requirements(name, lineno, block_text, state_machine):
if not csv:
error = state_machine.reporter.error(
'The "%s" directive is not compatible with this version of '
'Python (%s). Requires the "csv" module, new in Python 2.3.'
% (name, sys.version.split()[0]),
nodes.literal_block(block_text, block_text), line=lineno)
raise SystemMessagePropagation(error)
def get_csv_data(name, options, content, lineno, block_text,
state, state_machine):
"""
CSV data can come from the directive content, from an external file, or
from a URL reference.
"""
if content: # CSV data is from directive content
if options.has_key('file') or options.has_key('url'):
error = state_machine.reporter.error(
'"%s" directive may not both specify an external file and '
'have content.' % name,
nodes.literal_block(block_text, block_text), line=lineno)
raise SystemMessagePropagation(error)
source = content.source(0)
csv_data = content
elif options.has_key('file'): # CSV data is from an external file
if options.has_key('url'):
error = state_machine.reporter.error(
'The "file" and "url" options may not be simultaneously '
'specified for the "%s" directive.' % name,
nodes.literal_block(block_text, block_text), line=lineno)
raise SystemMessagePropagation(error)
source_dir = os.path.dirname(
os.path.abspath(state.document.current_source))
source = os.path.normpath(os.path.join(source_dir, options['file']))
source = utils.relative_path(None, source)
try:
csv_file = open(source, 'rb')
try:
csv_data = csv_file.read().splitlines()
finally:
csv_file.close()
except IOError, error:
severe = state_machine.reporter.severe(
'Problems with "%s" directive path:\n%s.' % (name, error),
nodes.literal_block(block_text, block_text), line=lineno)
raise SystemMessagePropagation(severe)
elif options.has_key('url'): # CSV data is from a URL
if not urllib2:
severe = state_machine.reporter.severe(
'Problems with the "%s" directive and its "url" option: '
'unable to access the required functionality (from the '
'"urllib2" module).' % name,
nodes.literal_block(block_text, block_text), line=lineno)
raise SystemMessagePropagation(severe)
source = options['url']
try:
csv_data = urllib2.urlopen(source).read().splitlines()
except (urllib2.URLError, IOError, OSError, ValueError), error:
severe = state_machine.reporter.severe(
'Problems with "%s" directive URL "%s":\n%s.'
% (name, options['url'], error),
nodes.literal_block(block_text, block_text), line=lineno)
raise SystemMessagePropagation(severe)
else:
error = state_machine.reporter.warning(
'The "%s" directive requires content; none supplied.' % (name),
nodes.literal_block(block_text, block_text), line=lineno)
raise SystemMessagePropagation(error)
return csv_data, source
def process_header_option(options, state_machine, lineno):
source = state_machine.get_source(lineno - 1)
table_head = []
max_header_cols = 0
if options.has_key('header'): # separate table header in option
rows, max_header_cols = parse_csv_data_into_rows(
options['header'].split('\n'), HeaderDialect(), source, options)
table_head.extend(rows)
return table_head, max_header_cols
def parse_csv_data_into_rows(csv_data, dialect, source, options):
csv_reader = csv.reader(csv_data, dialect=dialect)
rows = []
max_cols = 0
for row in csv_reader:
row_data = []
for cell in row:
cell_data = (0, 0, 0, statemachine.StringList(cell.splitlines(),
source=source))
row_data.append(cell_data)
rows.append(row_data)
max_cols = max(max_cols, len(row))
return rows, max_cols
def check_table_dimensions(rows, header_rows, name, lineno, block_text,
state_machine):
if len(rows) < header_rows:
error = state_machine.reporter.error(
'%s header row(s) specified but only %s row(s) of data supplied '
'("%s" directive).' % (header_rows, len(rows), name),
nodes.literal_block(block_text, block_text), line=lineno)
raise SystemMessagePropagation(error)
elif len(rows) == header_rows > 0:
error = state_machine.reporter.error(
'Insufficient data supplied (%s row(s)); no data remaining for '
'table body, required by "%s" directive.' % (len(rows), name),
nodes.literal_block(block_text, block_text), line=lineno)
raise SystemMessagePropagation(error)
def get_column_widths(max_cols, name, options, lineno, block_text,
state_machine):
if options.has_key('widths'):
col_widths = options['widths']
if len(col_widths) != max_cols:
error = state_machine.reporter.error(
'"%s" widths do not match the number of columns in table (%s).'
% (name, max_cols),
nodes.literal_block(block_text, block_text), line=lineno)
raise SystemMessagePropagation(error)
else:
col_widths = [100 / max_cols] * max_cols
return col_widths
def extend_short_rows_with_empty_cells(columns, parts):
for part in parts:
for row in part:
if len(row) < columns:
row.extend([(0, 0, 0, [])] * (columns - len(row)))
...@@ -36,10 +36,11 @@ directives = { ...@@ -36,10 +36,11 @@ directives = {
'epigraaf': 'epigraph', 'epigraaf': 'epigraph',
'hoogtepunte': 'highlights', 'hoogtepunte': 'highlights',
'pull-quote (translation required)': 'pull-quote', 'pull-quote (translation required)': 'pull-quote',
'table (translation required)': 'table',
#'vrae': 'questions', #'vrae': 'questions',
#'qa': 'questions', #'qa': 'questions',
#'faq': 'questions', #'faq': 'questions',
'table (translation required)': 'table',
'csv-table (translation required)': 'csv-table',
'meta': 'meta', 'meta': 'meta',
#'beeldkaart': 'imagemap', #'beeldkaart': 'imagemap',
'beeld': 'image', 'beeld': 'image',
......
...@@ -37,10 +37,11 @@ directives = { ...@@ -37,10 +37,11 @@ directives = {
u'moto': 'epigraph', u'moto': 'epigraph',
u'highlights': 'highlights', u'highlights': 'highlights',
u'pull-quote': 'pull-quote', u'pull-quote': 'pull-quote',
u'table (translation required)': 'table',
#'questions': 'questions', #'questions': 'questions',
#'qa': 'questions', #'qa': 'questions',
#'faq': 'questions', #'faq': 'questions',
u'table (translation required)': 'table',
u'csv-table (translation required)': 'csv-table',
u'meta': 'meta', u'meta': 'meta',
#'imagemap': 'imagemap', #'imagemap': 'imagemap',
u'image': 'image', # obrazek u'image': 'image', # obrazek
......
...@@ -37,10 +37,11 @@ directives = { ...@@ -37,10 +37,11 @@ directives = {
'epigraph (translation required)': 'epigraph', 'epigraph (translation required)': 'epigraph',
'highlights (translation required)': 'highlights', 'highlights (translation required)': 'highlights',
'pull-quote (translation required)': 'pull-quote', # kasten too ? 'pull-quote (translation required)': 'pull-quote', # kasten too ?
'table (translation required)': 'table',
#'questions': 'questions', #'questions': 'questions',
#'qa': 'questions', #'qa': 'questions',
#'faq': 'questions', #'faq': 'questions',
'table (translation required)': 'table',
'csv-table (translation required)': 'csv-table',
'meta': 'meta', 'meta': 'meta',
#'imagemap': 'imagemap', #'imagemap': 'imagemap',
'bild': 'image', 'bild': 'image',
......
...@@ -37,8 +37,9 @@ directives = { ...@@ -37,8 +37,9 @@ directives = {
'epigraph': 'epigraph', 'epigraph': 'epigraph',
'highlights': 'highlights', 'highlights': 'highlights',
'pull-quote': 'pull-quote', 'pull-quote': 'pull-quote',
'table': 'table',
#'questions': 'questions', #'questions': 'questions',
'table': 'table',
'csv-table': 'csv-table',
#'qa': 'questions', #'qa': 'questions',
#'faq': 'questions', #'faq': 'questions',
'meta': 'meta', 'meta': 'meta',
......
...@@ -40,10 +40,12 @@ directives = { ...@@ -40,10 +40,12 @@ directives = {
u'elstara\u0135oj': 'highlights', u'elstara\u0135oj': 'highlights',
u'ekstera-citajxo': 'pull-quote', u'ekstera-citajxo': 'pull-quote',
u'ekstera-cita\u0135o': 'pull-quote', u'ekstera-cita\u0135o': 'pull-quote',
u'tabelo': 'table',
#'questions': 'questions', #'questions': 'questions',
#'qa': 'questions', #'qa': 'questions',
#'faq': 'questions', #'faq': 'questions',
u'tabelo': 'table',
u'tabelo-vdk': 'csv-table', # "valoroj disigitaj per komoj"
u'tabelo-csv': 'csv-table',
u'meta': 'meta', u'meta': 'meta',
#'imagemap': 'imagemap', #'imagemap': 'imagemap',
u'bildo': 'image', u'bildo': 'image',
......
...@@ -42,10 +42,12 @@ directives = { ...@@ -42,10 +42,12 @@ directives = {
u'epigrafe': 'epigraph', u'epigrafe': 'epigraph',
u'destacado': 'highlights', u'destacado': 'highlights',
u'cita-destacada': 'pull-quote', u'cita-destacada': 'pull-quote',
u'tabla': 'table',
#'questions': 'questions', #'questions': 'questions',
#'qa': 'questions', #'qa': 'questions',
#'faq': 'questions', #'faq': 'questions',
u'tabla': 'table',
u'tabla-vsc': 'csv-table',
u'tabla-csv': 'csv-table',
u'meta': 'meta', u'meta': 'meta',
#'imagemap': 'imagemap', #'imagemap': 'imagemap',
u'imagen': 'image', u'imagen': 'image',
......
...@@ -38,10 +38,11 @@ directives = { ...@@ -38,10 +38,11 @@ directives = {
u'\u00E9pigraphe': 'epigraph', u'\u00E9pigraphe': 'epigraph',
u'chapeau': 'highlights', u'chapeau': 'highlights',
u'accroche': 'pull-quote', u'accroche': 'pull-quote',
u'tableau': 'table',
#u'questions': 'questions', #u'questions': 'questions',
#u'qr': 'questions', #u'qr': 'questions',
#u'faq': 'questions', #u'faq': 'questions',
u'tableau': 'table',
u'csv-table (translation required)': 'csv-table',
u'm\u00E9ta': 'meta', u'm\u00E9ta': 'meta',
#u'imagemap (translation required)': 'imagemap', #u'imagemap (translation required)': 'imagemap',
u'image': 'image', u'image': 'image',
......
...@@ -31,10 +31,11 @@ directives = { ...@@ -31,10 +31,11 @@ directives = {
'epigrafe': 'epigraph', 'epigrafe': 'epigraph',
'evidenzia': 'highlights', 'evidenzia': 'highlights',
'pull-quote (translation required)': 'pull-quote', 'pull-quote (translation required)': 'pull-quote',
'tabella': 'table',
#'questions': 'questions', #'questions': 'questions',
#'qa': 'questions', #'qa': 'questions',
#'faq': 'questions', #'faq': 'questions',
'tabella': 'table',
'csv-table (translation required)': 'csv-table',
'meta': 'meta', 'meta': 'meta',
#'imagemap': 'imagemap', #'imagemap': 'imagemap',
'immagine': 'image', 'immagine': 'image',
......
...@@ -20,78 +20,79 @@ __docformat__ = 'reStructuredText' ...@@ -20,78 +20,79 @@ __docformat__ = 'reStructuredText'
directives = { directives = {
# language-dependent: fixed # language-dependent: fixed
u'ateno': 'attention', u'aten\u00E7\u00E3o': 'attention',
'cuidado': 'caution', 'cuidado': 'caution',
'perigo': 'danger', 'perigo': 'danger',
'erro': 'error', 'erro': 'error',
u'sugesto': 'hint', u'sugest\u00E3o': 'hint',
'importante': 'important', 'importante': 'important',
'nota': 'note', 'nota': 'note',
'dica': 'tip', 'dica': 'tip',
'aviso': 'warning', 'aviso': 'warning',
u'exortao': 'admonition', u'exorta\u00E7\u00E3o': 'admonition',
'barra-lateral': 'sidebar', 'barra-lateral': 'sidebar',
u'tpico': 'topic', u't\u00F3pico': 'topic',
'bloco-de-linhas': 'line-block', 'bloco-de-linhas': 'line-block',
'literal-interpretado': 'parsed-literal', 'literal-interpretado': 'parsed-literal',
'rubrica': 'rubric', 'rubrica': 'rubric',
u'epgrafo': 'epigraph', u'ep\u00EDgrafo': 'epigraph',
'destaques': 'highlights', 'destaques': 'highlights',
u'citao-destacada': 'pull-quote', u'cita\u00E7\u00E3o-destacada': 'pull-quote',
u'table (translation required)': 'table',
#'perguntas': 'questions', #'perguntas': 'questions',
#'qa': 'questions', #'qa': 'questions',
#'faq': 'questions', #'faq': 'questions',
u'table (translation required)': 'table',
u'csv-table (translation required)': 'csv-table',
'meta': 'meta', 'meta': 'meta',
#'imagemap': 'imagemap', #'imagemap': 'imagemap',
'imagem': 'image', 'imagem': 'image',
'figura': 'figure', 'figura': 'figure',
u'incluso': 'include', u'inclus\u00E3o': 'include',
'cru': 'raw', 'cru': 'raw',
u'substituio': 'replace', u'substitui\u00E7\u00E3o': 'replace',
'unicode': 'unicode', 'unicode': 'unicode',
'classe': 'class', 'classe': 'class',
'role (translation required)': 'role', 'role (translation required)': 'role',
u'ndice': 'contents', u'\u00EDndice': 'contents',
'numsec': 'sectnum', 'numsec': 'sectnum',
u'numerao-de-sees': 'sectnum', u'numera\u00E7\u00E3o-de-se\u00E7\u00F5es': 'sectnum',
#u'notas-de-rorap': 'footnotes', #u'notas-de-rorap\u00E9': 'footnotes',
#u'citaes': 'citations', #u'cita\u00E7\u00F5es': 'citations',
u'links-no-rodap': 'target-notes', u'links-no-rodap\u00E9': 'target-notes',
'restructuredtext-test-directive': 'restructuredtext-test-directive'} 'restructuredtext-test-directive': 'restructuredtext-test-directive'}
"""English name to registered (in directives/__init__.py) directive name """Brazilian Portuguese name to registered (in directives/__init__.py)
mapping.""" directive name mapping."""
roles = { roles = {
# language-dependent: fixed # language-dependent: fixed
u'abbreviao': 'abbreviation', u'abbrevia\u00E7\u00E3o': 'abbreviation',
'ab': 'abbreviation', 'ab': 'abbreviation',
u'acrnimo': 'acronym', u'acr\u00F4nimo': 'acronym',
'ac': 'acronym', 'ac': 'acronym',
u'ndice-remissivo': 'index', u'\u00EDndice-remissivo': 'index',
'i': 'index', 'i': 'index',
'subscrito': 'subscript', 'subscrito': 'subscript',
'sub': 'subscript', 'sub': 'subscript',
'sobrescrito': 'superscript', 'sobrescrito': 'superscript',
'sob': 'superscript', 'sob': 'superscript',
u'referncia-a-ttulo': 'title-reference', u'refer\u00EAncia-a-t\u00EDtulo': 'title-reference',
u'ttulo': 'title-reference', u't\u00EDtulo': 'title-reference',
't': 'title-reference', 't': 'title-reference',
u'referncia-a-pep': 'pep-reference', u'refer\u00EAncia-a-pep': 'pep-reference',
'pep': 'pep-reference', 'pep': 'pep-reference',
u'referncia-a-rfc': 'rfc-reference', u'refer\u00EAncia-a-rfc': 'rfc-reference',
'rfc': 'rfc-reference', 'rfc': 'rfc-reference',
u'nfase': 'emphasis', u'\u00EAnfase': 'emphasis',
'forte': 'strong', 'forte': 'strong',
'literal': 'literal', 'literal': 'literal',
u'referncia-por-nome': 'named-reference', u'refer\u00EAncia-por-nome': 'named-reference',
u'referncia-annima': 'anonymous-reference', u'refer\u00EAncia-an\u00F4nima': 'anonymous-reference',
u'referncia-a-nota-de-rodap': 'footnote-reference', u'refer\u00EAncia-a-nota-de-rodap\u00E9': 'footnote-reference',
u'referncia-a-citao': 'citation-reference', u'refer\u00EAncia-a-cita\u00E7\u00E3o': 'citation-reference',
u'referncia-a-substituio': 'substitution-reference', u'refer\u00EAncia-a-substitui\u00E7\u00E3o': 'substitution-reference',
'alvo': 'target', 'alvo': 'target',
u'referncia-a-uri': 'uri-reference', u'refer\u00EAncia-a-uri': 'uri-reference',
'uri': 'uri-reference', 'uri': 'uri-reference',
'url': 'uri-reference',} 'url': 'uri-reference',}
"""Mapping of English role names to canonical role names for interpreted text. """Mapping of Brazilian Portuguese role names to canonical role names
""" for interpreted text."""
...@@ -24,6 +24,7 @@ directives = { ...@@ -24,6 +24,7 @@ directives = {
u'\u0432\u044b\u0434\u0435\u043b\u0435\u043d\u043d\u0430\u044f-\u0446\u0438\u0442\u0430\u0442\u0430': u'\u0432\u044b\u0434\u0435\u043b\u0435\u043d\u043d\u0430\u044f-\u0446\u0438\u0442\u0430\u0442\u0430':
u'pull-quote', u'pull-quote',
u'table (translation required)': 'table', u'table (translation required)': 'table',
u'csv-table (translation required)': 'csv-table',
u'\u0441\u044b\u0440\u043e\u0439': u'raw', u'\u0441\u044b\u0440\u043e\u0439': u'raw',
u'\u0437\u0430\u043c\u0435\u043d\u0430': u'replace', u'\u0437\u0430\u043c\u0435\u043d\u0430': u'replace',
u'\u0442\u0435\u0441\u0442\u043e\u0432\u0430\u044f-\u0434\u0438\u0440\u0435\u043a\u0442\u0438\u0432\u0430-restructuredtext': u'\u0442\u0435\u0441\u0442\u043e\u0432\u0430\u044f-\u0434\u0438\u0440\u0435\u043a\u0442\u0438\u0432\u0430-restructuredtext':
......
...@@ -36,10 +36,11 @@ directives = { ...@@ -36,10 +36,11 @@ directives = {
u'epigraph (translation required)': 'epigraph', u'epigraph (translation required)': 'epigraph',
u'highlights (translation required)': 'highlights', u'highlights (translation required)': 'highlights',
u'pull-quote (translation required)': 'pull-quote', u'pull-quote (translation required)': 'pull-quote',
u'table (translation required)': 'table',
#u'questions': 'questions', #u'questions': 'questions',
#u'qa': 'questions', #u'qa': 'questions',
#u'faq': 'questions', #u'faq': 'questions',
u'table (translation required)': 'table',
u'csv-table (translation required)': 'csv-table',
u'meta': 'meta', u'meta': 'meta',
#u'imagemap': 'imagemap', #u'imagemap': 'imagemap',
u'obr\xe1zok': 'image', u'obr\xe1zok': 'image',
......
...@@ -35,11 +35,12 @@ directives = { ...@@ -35,11 +35,12 @@ directives = {
u'epigraph (translation required)': 'epigraph', u'epigraph (translation required)': 'epigraph',
u'highlights (translation required)': 'highlights', u'highlights (translation required)': 'highlights',
u'pull-quote (translation required)': 'pull-quote', u'pull-quote (translation required)': 'pull-quote',
u'table (translation required)': 'table',
# u'fr\u00e5gor': 'questions', # u'fr\u00e5gor': 'questions',
# NOTE: A bit long, but recommended by http://www.nada.kth.se/dataterm/: # NOTE: A bit long, but recommended by http://www.nada.kth.se/dataterm/:
# u'fr\u00e5gor-och-svar': 'questions', # u'fr\u00e5gor-och-svar': 'questions',
# u'vanliga-fr\u00e5gor': 'questions', # u'vanliga-fr\u00e5gor': 'questions',
u'table (translation required)': 'table',
u'csv-table (translation required)': 'csv-table',
u'meta': 'meta', u'meta': 'meta',
# u'bildkarta': 'imagemap', # FIXME: Translation might be too literal. # u'bildkarta': 'imagemap', # FIXME: Translation might be too literal.
u'bild': 'image', u'bild': 'image',
......
...@@ -263,7 +263,7 @@ def pep_reference_role(role, rawtext, text, lineno, inliner, ...@@ -263,7 +263,7 @@ def pep_reference_role(role, rawtext, text, lineno, inliner,
prb = inliner.problematic(rawtext, rawtext, msg) prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg] return [prb], [msg]
# Base URL mainly used by inliner.pep_reference; so this is correct: # Base URL mainly used by inliner.pep_reference; so this is correct:
ref = inliner.pep_url % pepnum ref = inliner.document.settings.pep_base_url + inliner.pep_url % pepnum
return [nodes.reference(rawtext, 'PEP ' + text, refuri=ref, **options)], [] return [nodes.reference(rawtext, 'PEP ' + text, refuri=ref, **options)], []
register_canonical_role('pep-reference', pep_reference_role) register_canonical_role('pep-reference', pep_reference_role)
...@@ -281,7 +281,7 @@ def rfc_reference_role(role, rawtext, text, lineno, inliner, ...@@ -281,7 +281,7 @@ def rfc_reference_role(role, rawtext, text, lineno, inliner,
prb = inliner.problematic(rawtext, rawtext, msg) prb = inliner.problematic(rawtext, rawtext, msg)
return [prb], [msg] return [prb], [msg]
# Base URL mainly used by inliner.rfc_reference, so this is correct: # Base URL mainly used by inliner.rfc_reference, so this is correct:
ref = inliner.rfc_url % rfcnum ref = inliner.document.settings.rfc_base_url + inliner.rfc_url % rfcnum
node = nodes.reference(rawtext, 'RFC ' + text, refuri=ref, **options) node = nodes.reference(rawtext, 'RFC ' + text, refuri=ref, **options)
return [node], [] return [node], []
......
...@@ -556,14 +556,19 @@ class Inliner: ...@@ -556,14 +556,19 @@ class Inliner:
# Valid URI characters (see RFC 2396 & RFC 2732); # Valid URI characters (see RFC 2396 & RFC 2732);
# final \x00 allows backslash escapes in URIs: # final \x00 allows backslash escapes in URIs:
uric = r"""[-_.!~*'()[\];/:@&=+$,%a-zA-Z0-9\x00]""" uric = r"""[-_.!~*'()[\];/:@&=+$,%a-zA-Z0-9\x00]"""
# Delimiter indicating the end of a URI (not part of the URI):
uri_end_delim = r"""[>]"""
# Last URI character; same as uric but no punctuation: # Last URI character; same as uric but no punctuation:
urilast = r"""[_~/a-zA-Z0-9]""" urilast = r"""[_~*/=+a-zA-Z0-9]"""
# End of a URI (either 'urilast' or 'uric followed by a
# uri_end_delim'):
uri_end = r"""(?:%(urilast)s|%(uric)s(?=%(uri_end_delim)s))""" % locals()
emailc = r"""[-_!~*'{|}/#?^`&=+$%a-zA-Z0-9\x00]""" emailc = r"""[-_!~*'{|}/#?^`&=+$%a-zA-Z0-9\x00]"""
email_pattern = r""" email_pattern = r"""
%(emailc)s+(?:\.%(emailc)s+)* # name %(emailc)s+(?:\.%(emailc)s+)* # name
@ # at @ # at
%(emailc)s+(?:\.%(emailc)s*)* # host %(emailc)s+(?:\.%(emailc)s*)* # host
%(urilast)s # final URI char %(uri_end)s # final URI char
""" """
parts = ('initial_inline', start_string_prefix, '', parts = ('initial_inline', start_string_prefix, '',
[('start', '', non_whitespace_after, # simple start-strings [('start', '', non_whitespace_after, # simple start-strings
...@@ -642,15 +647,15 @@ class Inliner: ...@@ -642,15 +647,15 @@ class Inliner:
( # either: ( # either:
(//?)? # hierarchical URI (//?)? # hierarchical URI
%(uric)s* # URI characters %(uric)s* # URI characters
%(urilast)s # final URI char %(uri_end)s # final URI char
) )
( # optional query ( # optional query
\?%(uric)s* \?%(uric)s*
%(urilast)s %(uri_end)s
)? )?
( # optional fragment ( # optional fragment
\#%(uric)s* \#%(uric)s*
%(urilast)s %(uri_end)s
)? )?
) )
) )
...@@ -954,9 +959,7 @@ class Inliner: ...@@ -954,9 +959,7 @@ class Inliner:
else: # not a valid scheme else: # not a valid scheme
raise MarkupMismatch raise MarkupMismatch
pep_url_local = 'pep-%04d.html' pep_url = 'pep-%04d.html'
pep_url_absolute = 'http://www.python.org/peps/pep-%04d.html'
pep_url = pep_url_absolute
def pep_reference(self, match, lineno): def pep_reference(self, match, lineno):
text = match.group(0) text = match.group(0)
...@@ -966,17 +969,17 @@ class Inliner: ...@@ -966,17 +969,17 @@ class Inliner:
pepnum = int(match.group('pepnum2')) pepnum = int(match.group('pepnum2'))
else: else:
raise MarkupMismatch raise MarkupMismatch
ref = self.pep_url % pepnum ref = self.document.settings.pep_base_url + self.pep_url % pepnum
unescaped = unescape(text, 0) unescaped = unescape(text, 0)
return [nodes.reference(unescape(text, 1), unescaped, refuri=ref)] return [nodes.reference(unescape(text, 1), unescaped, refuri=ref)]
rfc_url = 'http://www.faqs.org/rfcs/rfc%d.html' rfc_url = 'rfc%d.html'
def rfc_reference(self, match, lineno): def rfc_reference(self, match, lineno):
text = match.group(0) text = match.group(0)
if text.startswith('RFC'): if text.startswith('RFC'):
rfcnum = int(match.group('rfcnum')) rfcnum = int(match.group('rfcnum'))
ref = self.rfc_url % rfcnum ref = self.document.settings.rfc_base_url + self.rfc_url % rfcnum
else: else:
raise MarkupMismatch raise MarkupMismatch
unescaped = unescape(text, 0) unescaped = unescape(text, 0)
...@@ -2542,9 +2545,10 @@ class Text(RSTState): ...@@ -2542,9 +2545,10 @@ class Text(RSTState):
indented.trim_end() indented.trim_end()
if not indented: if not indented:
return self.quoted_literal_block() return self.quoted_literal_block()
nodelist = []
data = '\n'.join(indented) data = '\n'.join(indented)
nodelist.append(nodes.literal_block(data, data)) literal_block = nodes.literal_block(data, data)
literal_block.line = offset + 1
nodelist = [literal_block]
if not blank_finish: if not blank_finish:
nodelist.append(self.unindent_warning('Literal block')) nodelist.append(self.unindent_warning('Literal block'))
return nodelist return nodelist
......
...@@ -16,15 +16,6 @@ from docutils.transforms import peps, references ...@@ -16,15 +16,6 @@ from docutils.transforms import peps, references
from docutils.parsers import rst from docutils.parsers import rst
class Inliner(rst.states.Inliner):
"""
Extend `rst.Inliner` for local PEP references.
"""
pep_url = rst.states.Inliner.pep_url_local
class Reader(standalone.Reader): class Reader(standalone.Reader):
supported = ('pep',) supported = ('pep',)
...@@ -52,7 +43,7 @@ class Reader(standalone.Reader): ...@@ -52,7 +43,7 @@ class Reader(standalone.Reader):
settings_default_overrides = {'pep_references': 1, 'rfc_references': 1} settings_default_overrides = {'pep_references': 1, 'rfc_references': 1}
inliner_class = Inliner inliner_class = rst.states.Inliner
def __init__(self, parser=None, parser_name=None): def __init__(self, parser=None, parser_name=None):
"""`parser` should be ``None``.""" """`parser` should be ``None``."""
......
# Author: David Goodger # Author: David Goodger
# Contact: goodger@users.sourceforge.net # Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.3.2.1.8.1 $ # Revision: $Revision: 1.7 $
# Date: $Date: 2004/05/12 19:57:53 $ # Date: $Date: 2004/07/25 01:45:26 $
# Copyright: This module has been placed in the public domain. # Copyright: This module has been placed in the public domain.
""" """
This package contains the Python Source Reader modules. This package contains the Python Source Reader modules.
It requires Python 2.2 or higher (`moduleparser` depends on `compiler` and
`tokenizer` modules).
""" """
__docformat__ = 'reStructuredText' __docformat__ = 'reStructuredText'
......
# Author: David Goodger # Author: David Goodger
# Contact: goodger@users.sourceforge.net # Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.3.2.1.8.1 $ # Revision: $Revision: 1.14 $
# Date: $Date: 2004/05/12 19:57:53 $ # Date: $Date: 2004/07/25 01:45:26 $
# Copyright: This module has been placed in the public domain. # Copyright: This module has been placed in the public domain.
""" """
Parser for Python modules. Parser for Python modules. Requires Python 2.2 or higher.
The `parse_module()` function takes a module's text and file name, The `parse_module()` function takes a module's text and file name,
runs it through the module parser (using compiler.py and tokenize.py) runs it through the module parser (using compiler.py and tokenize.py)
......
...@@ -3,8 +3,8 @@ ...@@ -3,8 +3,8 @@
""" """
:Author: David Goodger :Author: David Goodger
:Contact: goodger@users.sourceforge.net :Contact: goodger@users.sourceforge.net
:Revision: $Revision: 1.1.2.1 $ :Revision: $Revision: 1.3 $
:Date: $Date: 2004/05/12 19:57:53 $ :Date: $Date: 2004/03/23 23:21:11 $
:Copyright: This module has been placed in the public domain. :Copyright: This module has been placed in the public domain.
""" """
......
...@@ -344,6 +344,10 @@ class StateMachine: ...@@ -344,6 +344,10 @@ class StateMachine:
finally: finally:
self.notify_observers() self.notify_observers()
def get_source(self, line_offset):
"""Return source of line at absolute line offset `line_offset`."""
return self.input_lines.source(line_offset - self.input_offset)
def abs_line_offset(self): def abs_line_offset(self):
"""Return line offset of current line, from beginning of file.""" """Return line offset of current line, from beginning of file."""
return self.line_offset + self.input_offset return self.line_offset + self.input_offset
......
...@@ -118,7 +118,9 @@ class Headers(Transform): ...@@ -118,7 +118,9 @@ class Headers(Transform):
for refpep in re.split(',?\s+', body.astext()): for refpep in re.split(',?\s+', body.astext()):
pepno = int(refpep) pepno = int(refpep)
newbody.append(nodes.reference( newbody.append(nodes.reference(
refpep, refpep, refuri=self.pep_url % pepno)) refpep, refpep,
refuri=(self.document.settings.pep_base_url
+ self.pep_url % pepno)))
newbody.append(space) newbody.append(space)
para[:] = newbody[:-1] # drop trailing space para[:] = newbody[:-1] # drop trailing space
elif name == 'last-modified': elif name == 'last-modified':
...@@ -128,7 +130,7 @@ class Headers(Transform): ...@@ -128,7 +130,7 @@ class Headers(Transform):
para[:] = [nodes.reference('', date, refuri=cvs_url)] para[:] = [nodes.reference('', date, refuri=cvs_url)]
elif name == 'content-type': elif name == 'content-type':
pep_type = para.astext() pep_type = para.astext()
uri = self.pep_url % 12 uri = self.document.settings.pep_base_url + self.pep_url % 12
para[:] = [nodes.reference('', pep_type, refuri=uri)] para[:] = [nodes.reference('', pep_type, refuri=uri)]
elif name == 'version' and len(body): elif name == 'version' and len(body):
utils.clean_rcs_keywords(para, self.rcs_keyword_substitutions) utils.clean_rcs_keywords(para, self.rcs_keyword_substitutions)
...@@ -266,7 +268,8 @@ class PEPZeroSpecial(nodes.SparseNodeVisitor): ...@@ -266,7 +268,8 @@ class PEPZeroSpecial(nodes.SparseNodeVisitor):
text = p.astext() text = p.astext()
try: try:
pep = int(text) pep = int(text)
ref = self.pep_url % pep ref = (self.document.settings.pep_base_url
+ self.pep_url % pep)
p[0] = nodes.reference(text, text, refuri=ref) p[0] = nodes.reference(text, text, refuri=ref)
except ValueError: except ValueError:
pass pass
......
...@@ -25,6 +25,9 @@ class SystemMessage(ApplicationError): ...@@ -25,6 +25,9 @@ class SystemMessage(ApplicationError):
self.level = level self.level = level
class SystemMessagePropagation(ApplicationError): pass
class Reporter: class Reporter:
""" """
...@@ -52,7 +55,7 @@ class Reporter: ...@@ -52,7 +55,7 @@ class Reporter:
is compared to the thresholds stored in the category, and a warning or is compared to the thresholds stored in the category, and a warning or
error is generated as appropriate. Debug messages are produced iff the error is generated as appropriate. Debug messages are produced iff the
stored debug switch is on. Message output is sent to the stored warning stored debug switch is on. Message output is sent to the stored warning
stream. stream if not set to ''.
The default category is '' (empty string). By convention, Writers should The default category is '' (empty string). By convention, Writers should
retrieve reporting conditions from the 'writer' category (which, unless retrieve reporting conditions from the 'writer' category (which, unless
...@@ -89,7 +92,8 @@ class Reporter: ...@@ -89,7 +92,8 @@ class Reporter:
exceptions will be raised, halting execution. exceptions will be raised, halting execution.
- `debug`: Show debug (level=0) system messages? - `debug`: Show debug (level=0) system messages?
- `stream`: Where warning output is sent. Can be file-like (has a - `stream`: Where warning output is sent. Can be file-like (has a
``.write`` method), a string (file name, opened for writing), or ``.write`` method), a string (file name, opened for writing),
'' (empty string, for discarding all stream messages) or
`None` (implies `sys.stderr`; default). `None` (implies `sys.stderr`; default).
- `encoding`: The encoding for stderr output. - `encoding`: The encoding for stderr output.
- `error_handler`: The error handler for stderr output encoding. - `error_handler`: The error handler for stderr output encoding.
...@@ -100,7 +104,12 @@ class Reporter: ...@@ -100,7 +104,12 @@ class Reporter:
if stream is None: if stream is None:
stream = sys.stderr stream = sys.stderr
elif type(stream) in (StringType, UnicodeType): elif type(stream) in (StringType, UnicodeType):
raise NotImplementedError('This should open a file for writing.') # Leave stream untouched if it's ''.
if stream != '':
if type(stream) == StringType:
stream = open(stream, 'w')
elif type(stream) == UnicodeType:
stream = open(stream.encode(), 'w')
self.encoding = encoding self.encoding = encoding
"""The character encoding for the stderr output.""" """The character encoding for the stderr output."""
...@@ -175,7 +184,7 @@ class Reporter: ...@@ -175,7 +184,7 @@ class Reporter:
type=self.levels[level], type=self.levels[level],
*children, **attributes) *children, **attributes)
debug, report_level, halt_level, stream = self[category].astuple() debug, report_level, halt_level, stream = self[category].astuple()
if level >= report_level or debug and level == 0: if (level >= report_level or debug and level == 0) and stream:
msgtext = msg.astext().encode(self.encoding, self.error_handler) msgtext = msg.astext().encode(self.encoding, self.error_handler)
if category: if category:
print >>stream, msgtext, '[%s]' % category print >>stream, msgtext, '[%s]' % category
...@@ -265,6 +274,8 @@ def extract_extension_options(field_list, options_spec): ...@@ -265,6 +274,8 @@ def extract_extension_options(field_list, options_spec):
- `KeyError` for unknown option names. - `KeyError` for unknown option names.
- `ValueError` for invalid option values (raised by the conversion - `ValueError` for invalid option values (raised by the conversion
function). function).
- `TypeError` for invalid option value types (raised by conversion
function).
- `DuplicateOptionError` for duplicate options. - `DuplicateOptionError` for duplicate options.
- `BadOptionError` for invalid fields. - `BadOptionError` for invalid fields.
- `BadOptionDataError` for invalid option data (missing name, - `BadOptionDataError` for invalid option data (missing name,
...@@ -321,6 +332,8 @@ def assemble_option_dict(option_list, options_spec): ...@@ -321,6 +332,8 @@ def assemble_option_dict(option_list, options_spec):
- `DuplicateOptionError` for duplicate options. - `DuplicateOptionError` for duplicate options.
- `ValueError` for invalid option values (raised by conversion - `ValueError` for invalid option values (raised by conversion
function). function).
- `TypeError` for invalid option value types (raised by conversion
function).
""" """
options = {} options = {}
for name, value in option_list: for name, value in option_list:
......
...@@ -36,13 +36,15 @@ class Writer(Component): ...@@ -36,13 +36,15 @@ class Writer(Component):
"""The document to write (Docutils doctree); set by `write`.""" """The document to write (Docutils doctree); set by `write`."""
output = None output = None
"""Final translated form of `document`; set by `translate`.""" """Final translated form of `document` (Unicode string);
set by `translate`."""
language = None language = None
"""Language module for the document; set by `write`.""" """Language module for the document; set by `write`."""
destination = None destination = None
"""`docutils.io` IO object; where to write the document. Set by `write`.""" """`docutils.io` Output object; where to write the document.
Set by `write`."""
def __init__(self): def __init__(self):
...@@ -73,8 +75,8 @@ class Writer(Component): ...@@ -73,8 +75,8 @@ class Writer(Component):
def translate(self): def translate(self):
""" """
Do final translation of `self.document` into `self.output`. Do final translation of `self.document` into `self.output` (Unicode
Called from `write`. Override in subclasses. string). Called from `write`. Override in subclasses.
Usually done with a `docutils.nodes.NodeVisitor` subclass, in Usually done with a `docutils.nodes.NodeVisitor` subclass, in
combination with a call to `docutils.nodes.Node.walk()` or combination with a call to `docutils.nodes.Node.walk()` or
......
...@@ -50,7 +50,7 @@ class Writer(writers.Writer): ...@@ -50,7 +50,7 @@ class Writer(writers.Writer):
doctype = ( doctype = (
'<!DOCTYPE document PUBLIC' '<!DOCTYPE document PUBLIC'
' "+//IDN docutils.sourceforge.net//DTD Docutils Generic//EN//XML"' ' "+//IDN docutils.sourceforge.net//DTD Docutils Generic//EN//XML"'
' "http://docutils.sourceforge.net/spec/docutils.dtd">\n') ' "http://docutils.sourceforge.net/docs/ref/docutils.dtd">\n')
generator = '<!-- Generated by Docutils %s -->\n' generator = '<!-- Generated by Docutils %s -->\n'
def translate(self): def translate(self):
......
...@@ -418,7 +418,6 @@ class HTMLTranslator(nodes.NodeVisitor): ...@@ -418,7 +418,6 @@ class HTMLTranslator(nodes.NodeVisitor):
self.body.append(self.starttag(node, 'table', CLASS='citation', self.body.append(self.starttag(node, 'table', CLASS='citation',
frame="void", rules="none")) frame="void", rules="none"))
self.body.append('<colgroup><col class="label" /><col /></colgroup>\n' self.body.append('<colgroup><col class="label" /><col /></colgroup>\n'
'<col />\n'
'<tbody valign="top">\n' '<tbody valign="top">\n'
'<tr>') '<tr>')
self.footnote_backrefs(node) self.footnote_backrefs(node)
...@@ -1143,7 +1142,7 @@ class HTMLTranslator(nodes.NodeVisitor): ...@@ -1143,7 +1142,7 @@ class HTMLTranslator(nodes.NodeVisitor):
self.body.append( self.body.append(
# "border=None" is a boolean attribute; # "border=None" is a boolean attribute;
# it means "standard border", not "no border": # it means "standard border", not "no border":
self.starttag(node, 'table', CLASS="table", border=None)) self.starttag(node, 'table', CLASS="table", border="1"))
def depart_table(self, node): def depart_table(self, node):
self.body.append('</table>\n') self.body.append('</table>\n')
......
...@@ -34,10 +34,10 @@ class Writer(writers.Writer): ...@@ -34,10 +34,10 @@ class Writer(writers.Writer):
['--documentclass'], ['--documentclass'],
{'default': 'article', }), {'default': 'article', }),
('Specify document options. Multiple options can be given, ' ('Specify document options. Multiple options can be given, '
'separated by commas. Default is "10pt".', 'separated by commas. Default is "10pt,a4paper".',
['--documentoptions'], ['--documentoptions'],
{'default': '10pt', }), {'default': '10pt,a4paper', }),
('Use LaTeX footnotes. ' ('Use LaTeX footnotes. LaTeX supports only numbered footnotes (does it?). '
'Default: no, uses figures.', 'Default: no, uses figures.',
['--use-latex-footnotes'], ['--use-latex-footnotes'],
{'default': 0, 'action': 'store_true', {'default': 0, 'action': 'store_true',
...@@ -47,6 +47,11 @@ class Writer(writers.Writer): ...@@ -47,6 +47,11 @@ class Writer(writers.Writer):
['--footnote-references'], ['--footnote-references'],
{'choices': ['superscript', 'brackets'], 'default': 'brackets', {'choices': ['superscript', 'brackets'], 'default': 'brackets',
'metavar': '<format>'}), 'metavar': '<format>'}),
('Use LaTeX citations. '
'Default: no, uses figures which might get mixed with images.',
['--use-latex-citations'],
{'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}),
('Format for block quote attributions: one of "dash" (em-dash ' ('Format for block quote attributions: one of "dash" (em-dash '
'prefix), "parentheses"/"parens", or "none". Default is "dash".', 'prefix), "parentheses"/"parens", or "none". Default is "dash".',
['--attribution'], ['--attribution'],
...@@ -112,7 +117,20 @@ class Writer(writers.Writer): ...@@ -112,7 +117,20 @@ class Writer(writers.Writer):
['--use-verbatim-when-possible'], ['--use-verbatim-when-possible'],
{'default': 0, 'action': 'store_true', {'default': 0, 'action': 'store_true',
'validator': frontend.validate_boolean}), 'validator': frontend.validate_boolean}),
)) ('Table style. "standard" with horizontal and vertical lines, '
'"booktabs" (LaTeX booktabs style) only horizontal lines '
'above and below the table and below the header or "nolines".'
'default: "standard"',
['--table-style'],
{'choices': ['standard', 'booktabs','nolines'], 'default': 'standard',
'metavar': '<format>'}),
('LaTeX graphicx package option.'
'Possible values are "dvips", "pdftex". "auto" includes LaTeX code '
'to use "pdftex" if processing with pdf(la)tex and dvips otherwise. '
'Default is no option.',
['--graphicx-option'],
{'default': ''}),
),)
settings_defaults = {'output_encoding': 'latin-1'} settings_defaults = {'output_encoding': 'latin-1'}
...@@ -285,10 +303,15 @@ class DocumentClass: ...@@ -285,10 +303,15 @@ class DocumentClass:
# BUG: LaTeX has no deeper sections (actually paragrah is no # BUG: LaTeX has no deeper sections (actually paragrah is no
# section either). # section either).
# BUG: No support for unknown document classes. Make 'article'
# default?
_class_sections = { _class_sections = {
'book': ( 'chapter', 'section', 'subsection', 'subsubsection' ), 'book': ( 'chapter', 'section', 'subsection', 'subsubsection' ),
'scrbook': ( 'chapter', 'section', 'subsection', 'subsubsection' ),
'report': ( 'chapter', 'section', 'subsection', 'subsubsection' ), 'report': ( 'chapter', 'section', 'subsection', 'subsubsection' ),
'scrreprt': ( 'chapter', 'section', 'subsection', 'subsubsection' ),
'article': ( 'section', 'subsection', 'subsubsection' ), 'article': ( 'section', 'subsection', 'subsubsection' ),
'scrartcl': ( 'section', 'subsection', 'subsubsection' ),
} }
_deepest_section = 'subsubsection' _deepest_section = 'subsubsection'
...@@ -307,28 +330,183 @@ class DocumentClass: ...@@ -307,28 +330,183 @@ class DocumentClass:
else: else:
return self._deepest_section return self._deepest_section
class Table:
""" Manage a table while traversing.
Maybe change to a mixin defining the visit/departs, but then
class Table internal variables are in the Translator.
"""
def __init__(self,latex_type,table_style):
self._latex_type = latex_type
self._table_style = table_style
self._open = 0
# miscellaneous attributes
self._attrs = {}
self._col_width = []
self._rowspan = []
def open(self):
self._open = 1
self._col_specs = []
self.caption = None
self._attrs = {}
self._in_head = 0 # maybe context with search
def close(self):
self._open = 0
self._col_specs = None
self.caption = None
self._attrs = {}
def is_open(self):
return self._open
def used_packages(self):
if self._table_style == 'booktabs':
return '\\usepackage{booktabs}\n'
return ''
def is_open(self):
return self._open
def get_latex_type(self):
return self._latex_type
def set(self,attr,value):
self._attrs[attr] = value
def get(self,attr):
if self._attrs.has_key(attr):
return self._attrs[attr]
return None
def get_vertical_bar(self):
if self._table_style == 'standard':
return '|'
return ''
# horizontal lines are drawn below a row, because we.
def get_opening(self):
return '\\begin{%s}[c]' % self._latex_type
def get_closing(self):
line = ""
if self._table_style == 'booktabs':
line = '\\bottomrule\n'
elif self._table_style == 'standard':
lines = '\\hline\n'
return '%s\\end{%s}' % (line,self._latex_type)
def visit_colspec(self,node):
self._col_specs.append(node)
def get_colspecs(self):
"""
Return column specification for longtable.
Assumes reST line length being 80 characters.
Table width is hairy.
=== ===
ABC DEF
=== ===
usually gets to narrow, therefore we add 1 (fiddlefactor).
"""
width = 80
total_width = 0.0
# first see if we get too wide.
for node in self._col_specs:
colwidth = float(node['colwidth']+1) / width
total_width += colwidth
self._col_width = []
self._rowspan = []
# donot make it full linewidth
factor = 0.93
if total_width > 1.0:
factor /= total_width
bar = self.get_vertical_bar()
latex_table_spec = ""
for node in self._col_specs:
colwidth = factor * float(node['colwidth']+1) / width
self._col_width.append(colwidth+0.005)
self._rowspan.append(0)
latex_table_spec += "%sp{%.2f\\locallinewidth}" % (bar,colwidth+0.005)
return latex_table_spec+bar
def get_column_width(self):
""" return columnwidth for current cell (not multicell)
"""
return "%.2f\\locallinewidth" % self._col_width[self._cell_in_row-1]
def visit_thead(self):
self._in_thead = 1
if self._table_style == 'standard':
return ['\\hline\n']
elif self._table_style == 'booktabs':
return ['\\toprule\n']
return []
def depart_thead(self):
a = []
#if self._table_style == 'standard':
# a.append('\\hline\n')
if self._table_style == 'booktabs':
a.append('\\midrule\n')
a.append('\\endhead\n')
# for longtable one could add firsthead, foot and lastfoot
self._in_thead = 0
return a
def visit_row(self):
self._cell_in_row = 0
def depart_row(self):
res = [' \\\\\n']
self._cell_in_row = None # remove cell counter
for i in range(len(self._rowspan)):
if (self._rowspan[i]>0):
self._rowspan[i] -= 1
if self._table_style == 'standard':
rowspans = []
for i in range(len(self._rowspan)):
if (self._rowspan[i]<=0):
rowspans.append(i+1)
if len(rowspans)==len(self._rowspan):
res.append('\\hline\n')
else:
cline = ''
rowspans.reverse()
# TODO merge clines
while 1:
try:
c_start = rowspans.pop()
except:
break
cline += '\\cline{%d-%d}\n' % (c_start,c_start)
res.append(cline)
return res
def set_rowspan(self,cell,value):
try:
self._rowspan[cell] = value
except:
pass
def get_rowspan(self,cell):
try:
return self._rowspan[cell]
except:
return 0
def get_entry_number(self):
return self._cell_in_row
def visit_entry(self):
self._cell_in_row += 1
class LaTeXTranslator(nodes.NodeVisitor): class LaTeXTranslator(nodes.NodeVisitor):
# When options are given to the documentclass, latex will pass them # When options are given to the documentclass, latex will pass them
# to other packages, as done with babel. # to other packages, as done with babel.
# Dummy settings might be taken from document settings # Dummy settings might be taken from document settings
d_paper = 'a4paper' # papersize
d_margins = '2cm'
latex_head = '\\documentclass[%s]{%s}\n' latex_head = '\\documentclass[%s]{%s}\n'
encoding = '\\usepackage[%s]{inputenc}\n' encoding = '\\usepackage[%s]{inputenc}\n'
linking = '\\usepackage[colorlinks=%s,linkcolor=%s,urlcolor=%s]{hyperref}\n' linking = '\\usepackage[colorlinks=%s,linkcolor=%s,urlcolor=%s]{hyperref}\n'
geometry = '\\usepackage[%s,margin=%s,nohead]{geometry}\n'
stylesheet = '\\input{%s}\n' stylesheet = '\\input{%s}\n'
# add a generated on day , machine by user using docutils version. # add a generated on day , machine by user using docutils version.
generator = '%% generator Docutils: http://docutils.sourceforge.net/\n' generator = '%% generator Docutils: http://docutils.sourceforge.net/\n'
# use latex tableofcontents or let docutils do it. # use latex tableofcontents or let docutils do it.
use_latex_toc = 0 use_latex_toc = 0
# table kind: if 0 tabularx (single page), 1 longtable
# maybe should be decided on row count.
use_longtable = 1
# TODO: use mixins for different implementations. # TODO: use mixins for different implementations.
# list environment for option-list. else tabularx # list environment for option-list. else tabularx
use_optionlist_for_option_list = 1 use_optionlist_for_option_list = 1
...@@ -354,8 +532,10 @@ class LaTeXTranslator(nodes.NodeVisitor): ...@@ -354,8 +532,10 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.use_latex_toc = settings.use_latex_toc self.use_latex_toc = settings.use_latex_toc
self.use_latex_docinfo = settings.use_latex_docinfo self.use_latex_docinfo = settings.use_latex_docinfo
self.use_latex_footnotes = settings.use_latex_footnotes self.use_latex_footnotes = settings.use_latex_footnotes
self._use_latex_citations = settings.use_latex_citations
self.hyperlink_color = settings.hyperlink_color self.hyperlink_color = settings.hyperlink_color
self.compound_enumerators = settings.compound_enumerators self.compound_enumerators = settings.compound_enumerators
self.fontenc = ''
self.section_prefix_for_enumerators = ( self.section_prefix_for_enumerators = (
settings.section_prefix_for_enumerators) settings.section_prefix_for_enumerators)
self.section_enumerator_separator = ( self.section_enumerator_separator = (
...@@ -377,15 +557,47 @@ class LaTeXTranslator(nodes.NodeVisitor): ...@@ -377,15 +557,47 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.babel.get_language() self.babel.get_language()
self.d_class = DocumentClass(settings.documentclass) self.d_class = DocumentClass(settings.documentclass)
# object for a table while proccessing.
self.active_table = Table('longtable',settings.table_style)
# HACK. Should have more sophisticated typearea handling.
if settings.documentclass.find('scr') == -1:
self.typearea = '\\usepackage[DIV12]{typearea}\n'
else:
if self.d_options.find('DIV') == -1 and self.d_options.find('BCOR') == -1:
self.typearea = '\\typearea{12}\n'
else:
self.typearea = ''
if self.fontenc == 'T1':
fontenc = '\\usepackage[T1]{fontenc}\n'
else:
fontenc = ''
if self.settings.graphicx_option == '':
self.graphicx_package = '\\usepackage{graphicx}\n'
elif self.settings.graphicx_option.lower() == 'auto':
self.graphicx_package = '\n'.join(
('%Check if we are compiling under latex or pdflatex',
'\\ifx\\pdftexversion\\undefined',
' \\usepackage{graphicx}',
'\\else',
' \\usepackage[pdftex]{graphicx}',
'\\fi\n'))
else:
self.graphicx_package = (
'\\usepackage[%s]{graphicx}\n' % self.settings.graphicx_option)
self.head_prefix = [ self.head_prefix = [
self.latex_head % (self.d_options,self.settings.documentclass), self.latex_head % (self.d_options,self.settings.documentclass),
'\\usepackage{babel}\n', # language is in documents settings. '\\usepackage{babel}\n', # language is in documents settings.
fontenc,
'\\usepackage{shortvrb}\n', # allows verb in footnotes. '\\usepackage{shortvrb}\n', # allows verb in footnotes.
self.encoding % self.to_latex_encoding(settings.output_encoding), self.encoding % self.to_latex_encoding(settings.output_encoding),
# * tabularx: for docinfo, automatic width of columns, always on one page. # * tabularx: for docinfo, automatic width of columns, always on one page.
'\\usepackage{tabularx}\n', '\\usepackage{tabularx}\n',
'\\usepackage{longtable}\n', '\\usepackage{longtable}\n',
self.active_table.used_packages(),
# possible other packages. # possible other packages.
# * fancyhdr # * fancyhdr
# * ltxtable is a combination of tabularx and longtable (pagebreaks). # * ltxtable is a combination of tabularx and longtable (pagebreaks).
...@@ -394,13 +606,12 @@ class LaTeXTranslator(nodes.NodeVisitor): ...@@ -394,13 +606,12 @@ class LaTeXTranslator(nodes.NodeVisitor):
# extra space between text in tables and the line above them # extra space between text in tables and the line above them
'\\setlength{\\extrarowheight}{2pt}\n', '\\setlength{\\extrarowheight}{2pt}\n',
'\\usepackage{amsmath}\n', # what fore amsmath. '\\usepackage{amsmath}\n', # what fore amsmath.
'\\usepackage{graphicx}\n', self.graphicx_package,
'\\usepackage{color}\n', '\\usepackage{color}\n',
'\\usepackage{multirow}\n', '\\usepackage{multirow}\n',
'\\usepackage{ifthen}\n', # before hyperref!
self.linking % (self.colorlinks, self.hyperlink_color, self.hyperlink_color), self.linking % (self.colorlinks, self.hyperlink_color, self.hyperlink_color),
# geometry and fonts might go into style.tex. self.typearea,
self.geometry % (self.d_paper, self.d_margins),
#
self.generator, self.generator,
# latex lengths # latex lengths
'\\newlength{\\admonitionwidth}\n', '\\newlength{\\admonitionwidth}\n',
...@@ -443,7 +654,6 @@ class LaTeXTranslator(nodes.NodeVisitor): ...@@ -443,7 +654,6 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.context = [] self.context = []
self.topic_class = '' self.topic_class = ''
# column specification for tables # column specification for tables
self.colspecs = []
self.table_caption = None self.table_caption = None
# do we have one or more authors # do we have one or more authors
self.author_stack = None self.author_stack = None
...@@ -470,6 +680,8 @@ class LaTeXTranslator(nodes.NodeVisitor): ...@@ -470,6 +680,8 @@ class LaTeXTranslator(nodes.NodeVisitor):
# them into a compound enumeration # them into a compound enumeration
self._enumeration_counters = [] self._enumeration_counters = []
self._bibitems = []
# docinfo. # docinfo.
self.docinfo = None self.docinfo = None
# inside literal block: no quote mangling. # inside literal block: no quote mangling.
...@@ -527,6 +739,23 @@ class LaTeXTranslator(nodes.NodeVisitor): ...@@ -527,6 +739,23 @@ class LaTeXTranslator(nodes.NodeVisitor):
def language_label(self, docutil_label): def language_label(self, docutil_label):
return self.language.labels[docutil_label] return self.language.labels[docutil_label]
def utf8_to_latex(self,text):
# see LaTeX codec http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/252124
# Only some special chracters are translated, for documents with many utf-8
# chars one should use the LaTeX unicode package.
latex_equivalents = {
u'\u00A0' : '~',
u'\u00A9' : '{\\copyright}',
u'\u2013' : '{--}',
u'\u2014' : '{---}',
u'\u2020' : '{\\dag}',
u'\u2021' : '{\\ddag}',
u'\u21d4' : '{$\\Leftrightarrow$}',
}
for uchar in latex_equivalents.keys():
text = text.replace(uchar,latex_equivalents[uchar])
return text
def encode(self, text): def encode(self, text):
""" """
Encode special characters in `text` & return. Encode special characters in `text` & return.
...@@ -563,7 +792,6 @@ class LaTeXTranslator(nodes.NodeVisitor): ...@@ -563,7 +792,6 @@ class LaTeXTranslator(nodes.NodeVisitor):
text = text.replace(">", '{\\textgreater}') text = text.replace(">", '{\\textgreater}')
# then # then
text = text.replace("&", '{\\&}') text = text.replace("&", '{\\&}')
text = text.replace("_", '{\\_}')
# the ^: # the ^:
# * verb|^| does not work in mbox. # * verb|^| does not work in mbox.
# * mathmode has wedge. hat{~} would also work. # * mathmode has wedge. hat{~} would also work.
...@@ -575,10 +803,19 @@ class LaTeXTranslator(nodes.NodeVisitor): ...@@ -575,10 +803,19 @@ class LaTeXTranslator(nodes.NodeVisitor):
if self.literal_block or self.literal: if self.literal_block or self.literal:
# pdflatex does not produce doublequotes for ngerman. # pdflatex does not produce doublequotes for ngerman.
text = self.babel.double_quotes_in_tt(text) text = self.babel.double_quotes_in_tt(text)
if self.fontenc == 'T1':
# make sure "--" does not become a "-".
# the same for "<<" and ">>".
text = text.replace("--","-{}-").replace("--","-{}-")
text = text.replace(">>",">{}>").replace(">>",">{}>")
text = text.replace("<<","<{}<").replace("<<","<{}<")
# replace underline by underlined blank, because this has correct width.
text = text.replace("_", '{\\underline{ }}')
else: else:
text = self.babel.quote_quotes(text) text = self.babel.quote_quotes(text)
if self.insert_newline: text = text.replace("_", '{\\_}')
# HACK: insert a blank before the newline, to avoid if self.insert_newline or self.literal_block:
# Insert a blank before the newline, to avoid
# ! LaTeX Error: There's no line here to end. # ! LaTeX Error: There's no line here to end.
text = text.replace("\n", '~\\\\\n') text = text.replace("\n", '~\\\\\n')
elif self.mbox_newline: elif self.mbox_newline:
...@@ -589,10 +826,12 @@ class LaTeXTranslator(nodes.NodeVisitor): ...@@ -589,10 +826,12 @@ class LaTeXTranslator(nodes.NodeVisitor):
closings = "" closings = ""
openings = "" openings = ""
text = text.replace("\n", "%s}\\\\\n\\mbox{%s" % (closings,openings)) text = text.replace("\n", "%s}\\\\\n\\mbox{%s" % (closings,openings))
# lines starting with "[" give errors.
text = text.replace('[', '{[}')
if self.insert_none_breaking_blanks: if self.insert_none_breaking_blanks:
text = text.replace(' ', '~') text = text.replace(' ', '~')
# unicode !!! if self.settings.output_encoding != 'utf-8':
text = text.replace(u'\u2020', '{$\\dagger$}') text = self.utf8_to_latex(text)
return text return text
def attval(self, text, def attval(self, text,
...@@ -709,28 +948,46 @@ class LaTeXTranslator(nodes.NodeVisitor): ...@@ -709,28 +948,46 @@ class LaTeXTranslator(nodes.NodeVisitor):
def depart_caution(self, node): def depart_caution(self, node):
self.depart_admonition() self.depart_admonition()
def visit_citation(self, node):
self.visit_footnote(node)
def depart_citation(self, node):
self.depart_footnote(node)
def visit_title_reference(self, node): def visit_title_reference(self, node):
self.body.append( '\\titlereference{' ) self.body.append( '\\titlereference{' )
def depart_title_reference(self, node): def depart_title_reference(self, node):
self.body.append( '}' ) self.body.append( '}' )
def visit_citation(self, node):
# TODO maybe use cite bibitems
if self._use_latex_citations:
self.context.append(len(self.body))
else:
self.body.append('\\begin{figure}[b]')
self.body.append('\\hypertarget{%s}' % node['id'])
def depart_citation(self, node):
if self._use_latex_citations:
size = self.context.pop()
label = self.body[size]
text = ''.join(self.body[size+1:])
del self.body[size:]
self._bibitems.append([label, text])
else:
self.body.append('\\end{figure}\n')
def visit_citation_reference(self, node): def visit_citation_reference(self, node):
href = '' if self._use_latex_citations:
if node.has_key('refid'): self.body.append('\\cite{')
href = node['refid'] else:
elif node.has_key('refname'): href = ''
href = self.document.nameids[node['refname']] if node.has_key('refid'):
self.body.append('[\\hyperlink{%s}{' % href) href = node['refid']
elif node.has_key('refname'):
href = self.document.nameids[node['refname']]
self.body.append('[\\hyperlink{%s}{' % href)
def depart_citation_reference(self, node): def depart_citation_reference(self, node):
self.body.append('}]') if self._use_latex_citations:
self.body.append('}')
else:
self.body.append('}]')
def visit_classifier(self, node): def visit_classifier(self, node):
self.body.append( '(\\textbf{' ) self.body.append( '(\\textbf{' )
...@@ -739,10 +996,7 @@ class LaTeXTranslator(nodes.NodeVisitor): ...@@ -739,10 +996,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.body.append( '})\n' ) self.body.append( '})\n' )
def visit_colspec(self, node): def visit_colspec(self, node):
if self.use_longtable: self.active_table.visit_colspec(node)
self.colspecs.append(node)
else:
self.context[-1] += 1
def depart_colspec(self, node): def depart_colspec(self, node):
pass pass
...@@ -871,12 +1125,25 @@ class LaTeXTranslator(nodes.NodeVisitor): ...@@ -871,12 +1125,25 @@ class LaTeXTranslator(nodes.NodeVisitor):
def visit_document(self, node): def visit_document(self, node):
self.body_prefix.append('\\begin{document}\n') self.body_prefix.append('\\begin{document}\n')
# BUG: \maketitle without title (i.e. --no-doc-title) adds
# unnecessary vspace.
self.body_prefix.append('\\maketitle\n\n') self.body_prefix.append('\\maketitle\n\n')
# alternative use titlepage environment. # alternative use titlepage environment.
# \begin{titlepage} # \begin{titlepage}
self.body.append('\n\\setlength{\\locallinewidth}{\\linewidth}\n') self.body.append('\n\\setlength{\\locallinewidth}{\\linewidth}\n')
def depart_document(self, node): def depart_document(self, node):
# TODO insertion point of bibliography should none automatic.
if self._use_latex_citations and len(self._bibitems)>0:
widest_label = ""
for bi in self._bibitems:
if len(widest_label)<len(bi[0]):
widest_label = bi[0]
self.body.append('\n\\begin{thebibliography}{%s}\n'%widest_label)
for bi in self._bibitems:
self.body.append('\\bibitem[%s]{%s}{%s}\n' % (bi[0], bi[0], bi[1]))
self.body.append('\\end{thebibliography}\n')
self.body_suffix.append('\\end{document}\n') self.body_suffix.append('\\end{document}\n')
def visit_emphasis(self, node): def visit_emphasis(self, node):
...@@ -888,33 +1155,41 @@ class LaTeXTranslator(nodes.NodeVisitor): ...@@ -888,33 +1155,41 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.literal_block_stack.pop() self.literal_block_stack.pop()
def visit_entry(self, node): def visit_entry(self, node):
self.active_table.visit_entry()
# cell separation # cell separation
column_one = 1 if self.active_table.get_entry_number() == 1:
if self.context[-1] > 0: # if the firstrow is a multirow, this actually is the second row.
column_one = 0 # this gets hairy if rowspans follow each other.
if not column_one: if self.active_table.get_rowspan(0):
self.body.append(' & ')
self.active_table.visit_entry() # increment cell count
else:
self.body.append(' & ') self.body.append(' & ')
# multi{row,column} # multi{row,column}
# IN WORK BUG TODO HACK continues here
# multirow in LaTeX simply will enlarge the cell over several rows
# (the following n if n is positive, the former if negative).
if node.has_key('morerows') and node.has_key('morecols'): if node.has_key('morerows') and node.has_key('morecols'):
raise NotImplementedError('LaTeX can\'t handle cells that ' raise NotImplementedError('Cells that '
'span multiple rows *and* columns, sorry.') 'span multiple rows *and* columns are not supported, sorry.')
atts = {}
if node.has_key('morerows'): if node.has_key('morerows'):
raise NotImplementedError('multiple rows are not working (yet), sorry.')
count = node['morerows'] + 1 count = node['morerows'] + 1
self.body.append('\\multirow{%d}*{' % count) self.active_table.set_rowspan(self.active_table.get_entry_number()-1,count)
self.body.append('\\multirow{%d}{%s}{' % \
(count,self.active_table.get_column_width()))
self.context.append('}') self.context.append('}')
# BUG following rows must have empty cells. # BUG following rows must have empty cells.
elif node.has_key('morecols'): elif node.has_key('morecols'):
# the vertical bar before column is missing if it is the first column. # the vertical bar before column is missing if it is the first column.
# the one after always. # the one after always.
if column_one: if self.active_table.get_entry_number() == 1:
bar = '|' bar1 = self.active_table.get_vertical_bar()
else: else:
bar = '' bar1 = ''
count = node['morecols'] + 1 count = node['morecols'] + 1
self.body.append('\\multicolumn{%d}{%sl|}{' % (count, bar)) self.body.append('\\multicolumn{%d}{%sl%s}{' % \
(count, bar1, self.active_table.get_vertical_bar()))
self.context.append('}') self.context.append('}')
else: else:
self.context.append('') self.context.append('')
...@@ -929,7 +1204,16 @@ class LaTeXTranslator(nodes.NodeVisitor): ...@@ -929,7 +1204,16 @@ class LaTeXTranslator(nodes.NodeVisitor):
def depart_entry(self, node): def depart_entry(self, node):
self.body.append(self.context.pop()) # header / not header self.body.append(self.context.pop()) # header / not header
self.body.append(self.context.pop()) # multirow/column self.body.append(self.context.pop()) # multirow/column
self.context[-1] += 1 # if following row is spanned from above.
if self.active_table.get_rowspan(self.active_table.get_entry_number()):
self.body.append(' & ')
self.active_table.visit_entry() # increment cell count
def visit_row(self, node):
self.active_table.visit_row()
def depart_row(self, node):
self.body.extend(self.active_table.depart_row())
def visit_enumerated_list(self, node): def visit_enumerated_list(self, node):
# We create our own enumeration list environment. # We create our own enumeration list environment.
...@@ -1034,10 +1318,10 @@ class LaTeXTranslator(nodes.NodeVisitor): ...@@ -1034,10 +1318,10 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.body.append(':]') self.body.append(':]')
def visit_figure(self, node): def visit_figure(self, node):
self.body.append( '\\begin{figure}\n' ) self.body.append( '\\begin{figure}[htbp]\\begin{center}\n' )
def depart_figure(self, node): def depart_figure(self, node):
self.body.append( '\\end{figure}\n' ) self.body.append( '\\end{center}\\end{figure}\n' )
def visit_footer(self, node): def visit_footer(self, node):
self.context.append(len(self.body)) self.context.append(len(self.body))
...@@ -1054,15 +1338,14 @@ class LaTeXTranslator(nodes.NodeVisitor): ...@@ -1054,15 +1338,14 @@ class LaTeXTranslator(nodes.NodeVisitor):
num,text = node.astext().split(None,1) num,text = node.astext().split(None,1)
num = self.encode(num.strip()) num = self.encode(num.strip())
self.body.append('\\footnotetext['+num+']') self.body.append('\\footnotetext['+num+']')
self.body.append('{'+self.encode(text)+'}') self.body.append('{')
raise nodes.SkipNode
else: else:
self.body.append('\\begin{figure}[b]') self.body.append('\\begin{figure}[b]')
self.body.append('\\hypertarget{%s}' % node['id']) self.body.append('\\hypertarget{%s}' % node['id'])
def depart_footnote(self, node): def depart_footnote(self, node):
if self.use_latex_footnotes: if self.use_latex_footnotes:
self.body.append('}') self.body.append('}\n')
else: else:
self.body.append('\\end{figure}\n') self.body.append('\\end{figure}\n')
...@@ -1070,7 +1353,6 @@ class LaTeXTranslator(nodes.NodeVisitor): ...@@ -1070,7 +1353,6 @@ class LaTeXTranslator(nodes.NodeVisitor):
if self.use_latex_footnotes: if self.use_latex_footnotes:
self.body.append("\\footnotemark["+self.encode(node.astext())+"]") self.body.append("\\footnotemark["+self.encode(node.astext())+"]")
raise nodes.SkipNode raise nodes.SkipNode
return
href = '' href = ''
if node.has_key('refid'): if node.has_key('refid'):
href = node['refid'] href = node['refid']
...@@ -1092,6 +1374,20 @@ class LaTeXTranslator(nodes.NodeVisitor): ...@@ -1092,6 +1374,20 @@ class LaTeXTranslator(nodes.NodeVisitor):
return return
self.body.append('}%s' % self.context.pop()) self.body.append('}%s' % self.context.pop())
# footnote/citation label
def visit_label(self, node):
if isinstance(node.parent, nodes.footnote) and self.use_latex_footnotes:
raise nodes.SkipNode
elif isinstance(node.parent, nodes.citation) and self._use_latex_citations:
pass
else:
self.body.append('[')
def depart_label(self, node):
if isinstance(node.parent, nodes.citation) and self._use_latex_citations:
return
self.body.append(']')
# elements generated by the framework e.g. section numbers. # elements generated by the framework e.g. section numbers.
def visit_generated(self, node): def visit_generated(self, node):
pass pass
...@@ -1120,12 +1416,12 @@ class LaTeXTranslator(nodes.NodeVisitor): ...@@ -1120,12 +1416,12 @@ class LaTeXTranslator(nodes.NodeVisitor):
pre = [] # in reverse order pre = [] # in reverse order
post = ['\\includegraphics{%s}' % attrs['uri']] post = ['\\includegraphics{%s}' % attrs['uri']]
inline = isinstance(node.parent, nodes.TextElement) inline = isinstance(node.parent, nodes.TextElement)
if 'scale' in attrs: if attrs.has_key('scale'):
# Could also be done with ``scale`` option to # Could also be done with ``scale`` option to
# ``\includegraphics``; doing it this way for consistency. # ``\includegraphics``; doing it this way for consistency.
pre.append('\\scalebox{%f}{' % (attrs['scale'] / 100.0,)) pre.append('\\scalebox{%f}{' % (attrs['scale'] / 100.0,))
post.append('}') post.append('}')
if 'align' in attrs: if attrs.has_key('align'):
align_prepost = { align_prepost = {
# By default latex aligns the top of an image. # By default latex aligns the top of an image.
(1, 'top'): ('', ''), (1, 'top'): ('', ''),
...@@ -1165,13 +1461,6 @@ class LaTeXTranslator(nodes.NodeVisitor): ...@@ -1165,13 +1461,6 @@ class LaTeXTranslator(nodes.NodeVisitor):
def depart_interpreted(self, node): def depart_interpreted(self, node):
self.depart_literal(node) self.depart_literal(node)
def visit_label(self, node):
# footnote/citation label
self.body.append('[')
def depart_label(self, node):
self.body.append(']')
def visit_legend(self, node): def visit_legend(self, node):
self.body.append('{\\small ') self.body.append('{\\small ')
...@@ -1184,10 +1473,11 @@ class LaTeXTranslator(nodes.NodeVisitor): ...@@ -1184,10 +1473,11 @@ class LaTeXTranslator(nodes.NodeVisitor):
* inline markup is supported. * inline markup is supported.
* serif typeface * serif typeface
mbox would stop LaTeX from wrapping long lines.
""" """
self.body.append('\\begin{flushleft}\n') self.body.append('\\begin{flushleft}\n')
self.insert_none_breaking_blanks = 1 self.insert_none_breaking_blanks = 1
# mbox would stop LaTeX from wrapping long lines.
# but line_blocks are allowed to wrap.
self.line_block_without_mbox = 1 self.line_block_without_mbox = 1
if self.line_block_without_mbox: if self.line_block_without_mbox:
self.insert_newline = 1 self.insert_newline = 1
...@@ -1205,7 +1495,7 @@ class LaTeXTranslator(nodes.NodeVisitor): ...@@ -1205,7 +1495,7 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.body.append('\n\\end{flushleft}\n') self.body.append('\n\\end{flushleft}\n')
def visit_list_item(self, node): def visit_list_item(self, node):
# HACK append "{}" in case the next character is "[", which would break # Append "{}" in case the next character is "[", which would break
# LaTeX's list environment (no numbering and the "[" is not printed). # LaTeX's list environment (no numbering and the "[" is not printed).
self.body.append('\\item {} ') self.body.append('\\item {} ')
...@@ -1228,8 +1518,6 @@ class LaTeXTranslator(nodes.NodeVisitor): ...@@ -1228,8 +1518,6 @@ class LaTeXTranslator(nodes.NodeVisitor):
blocks of text, where the inline markup is not recognized, blocks of text, where the inline markup is not recognized,
but are also the product of the parsed-literal directive, but are also the product of the parsed-literal directive,
where the markup is respected. where the markup is respected.
mbox stops LaTeX from wrapping long lines.
""" """
# In both cases, we want to use a typewriter/monospaced typeface. # In both cases, we want to use a typewriter/monospaced typeface.
# For "real" literal-blocks, we can use \verbatim, while for all # For "real" literal-blocks, we can use \verbatim, while for all
...@@ -1239,34 +1527,39 @@ class LaTeXTranslator(nodes.NodeVisitor): ...@@ -1239,34 +1527,39 @@ class LaTeXTranslator(nodes.NodeVisitor):
# siblings the compose this node: if it is composed by a # siblings the compose this node: if it is composed by a
# single element, it's surely is either a real one, otherwise # single element, it's surely is either a real one, otherwise
# it's a parsed-literal that does not contain any markup. # it's a parsed-literal that does not contain any markup.
# #
if (self.settings.use_verbatim_when_possible and (len(node) == 1) if (self.settings.use_verbatim_when_possible and (len(node) == 1)
# in case of a parsed-literal containing just a "**bold**" word: # in case of a parsed-literal containing just a "**bold**" word:
and isinstance(node[0], nodes.Text)): and isinstance(node[0], nodes.Text)):
self.verbatim = 1 self.verbatim = 1
self.body.append('\\begin{verbatim}\n') self.body.append('\\begin{quote}\\begin{verbatim}\n')
else: else:
self.literal_block = 1 self.literal_block = 1
self.insert_none_breaking_blanks = 1 self.insert_none_breaking_blanks = 1
self.body.append('\\begin{ttfamily}\\begin{flushleft}\n') if self.active_table.is_open():
self.mbox_newline = 1 self.body.append('\n{\\ttfamily \\raggedright \\noindent\n')
if self.mbox_newline: else:
self.body.append('\\mbox{') # no quote inside tables, to avoid vertical sppace between
# table border and literal block.
# BUG: fails if normal text preceeds the literal block.
self.body.append('\\begin{quote}')
self.body.append('{\\ttfamily \\raggedright \\noindent\n')
# * obey..: is from julien and never worked for me (grubert). # * obey..: is from julien and never worked for me (grubert).
# self.body.append('{\\obeylines\\obeyspaces\\ttfamily\n') # self.body.append('{\\obeylines\\obeyspaces\\ttfamily\n')
def depart_literal_block(self, node): def depart_literal_block(self, node):
if self.verbatim: if self.verbatim:
self.body.append('\n\\end{verbatim}\n') self.body.append('\n\\end{verbatim}\\end{quote}\n')
self.verbatim = 0 self.verbatim = 0
else: else:
if self.mbox_newline: if self.active_table.is_open():
self.body.append('}') self.body.append('\n}\n')
self.body.append('\n\\end{flushleft}\\end{ttfamily}\n') else:
self.body.append('\n')
self.body.append('}\\end{quote}\n')
self.insert_none_breaking_blanks = 0 self.insert_none_breaking_blanks = 0
self.mbox_newline = 0
# obey end: self.body.append('}\n')
self.literal_block = 0 self.literal_block = 0
# obey end: self.body.append('}\n')
def visit_meta(self, node): def visit_meta(self, node):
self.body.append('[visit_meta]\n') self.body.append('[visit_meta]\n')
...@@ -1302,7 +1595,6 @@ class LaTeXTranslator(nodes.NodeVisitor): ...@@ -1302,7 +1595,6 @@ class LaTeXTranslator(nodes.NodeVisitor):
if self.use_optionlist_for_option_list: if self.use_optionlist_for_option_list:
self.body.append('\\item [') self.body.append('\\item [')
else: else:
atts = {}
if len(node.astext()) > 14: if len(node.astext()) > 14:
self.body.append('\\multicolumn{2}{l}{') self.body.append('\\multicolumn{2}{l}{')
self.context.append('} \\\\\n ') self.context.append('} \\\\\n ')
...@@ -1398,14 +1690,6 @@ class LaTeXTranslator(nodes.NodeVisitor): ...@@ -1398,14 +1690,6 @@ class LaTeXTranslator(nodes.NodeVisitor):
def depart_revision(self, node): def depart_revision(self, node):
self.depart_docinfo_item(node) self.depart_docinfo_item(node)
def visit_row(self, node):
self.context.append(0)
def depart_row(self, node):
self.context.pop() # remove cell counter
self.body.append(' \\\\ \\hline\n')
# BUG if multirow cells \cline{x-y}
def visit_section(self, node): def visit_section(self, node):
self.section_level += 1 self.section_level += 1
# Initialize counter for potential subsections: # Initialize counter for potential subsections:
...@@ -1482,70 +1766,19 @@ class LaTeXTranslator(nodes.NodeVisitor): ...@@ -1482,70 +1766,19 @@ class LaTeXTranslator(nodes.NodeVisitor):
if node['level'] < self.document.reporter['writer'].report_level: if node['level'] < self.document.reporter['writer'].report_level:
raise nodes.SkipNode raise nodes.SkipNode
def depart_system_message(self, node): def depart_system_message(self, node):
self.body.append('\n') self.body.append('\n')
def get_colspecs(self):
"""
Return column specification for longtable.
Assumes reST line length being 80 characters.
Table width is hairy.
=== ===
ABC DEF
=== ===
usually gets to narrow, therefore we add 1 (fiddlefactor).
"""
width = 80
total_width = 0.0
# first see if we get too wide.
for node in self.colspecs:
colwidth = float(node['colwidth']+1) / width
total_width += colwidth
# donot make it full linewidth
factor = 0.93
if total_width > 1.0:
factor /= total_width
latex_table_spec = ""
for node in self.colspecs:
colwidth = factor * float(node['colwidth']+1) / width
latex_table_spec += "|p{%.2f\\locallinewidth}" % (colwidth+0.005)
self.colspecs = []
return latex_table_spec+"|"
def visit_table(self, node): def visit_table(self, node):
if self.use_longtable: if self.active_table.is_open():
self.body.append('\n\\begin{longtable}[c]') print 'nested tables are not supported'
else: raise AssertionError
self.body.append('\n\\begin{tabularx}{\\linewidth}') self.active_table.open()
self.context.append('table_sentinel') # sentinel self.body.append('\n' + self.active_table.get_opening())
self.context.append(0) # column counter
def depart_table(self, node): def depart_table(self, node):
if self.use_longtable: self.body.append(self.active_table.get_closing() + '\n')
self.body.append('\\end{longtable}\n') self.active_table.close()
else:
self.body.append('\\end{tabularx}\n')
sentinel = self.context.pop()
if sentinel != 'table_sentinel':
print 'context:', self.context + [sentinel]
raise AssertionError
def table_preamble(self):
if self.use_longtable:
self.body.append('{%s}\n' % self.get_colspecs())
if self.table_caption:
self.body.append('\\caption{%s}\\\\\n' % self.table_caption)
self.table_caption = None
else:
if self.context[-1] != 'table_sentinel':
self.body.append('{%s}' % ('|X' * self.context.pop() + '|'))
self.body.append('\n\\hline')
def visit_target(self, node): def visit_target(self, node):
# BUG: why not (refuri or refid or refname) means not footnote ? # BUG: why not (refuri or refid or refname) means not footnote ?
...@@ -1562,13 +1795,12 @@ class LaTeXTranslator(nodes.NodeVisitor): ...@@ -1562,13 +1795,12 @@ class LaTeXTranslator(nodes.NodeVisitor):
def visit_tbody(self, node): def visit_tbody(self, node):
# BUG write preamble if not yet done (colspecs not []) # BUG write preamble if not yet done (colspecs not [])
# for tables without heads. # for tables without heads.
if self.colspecs: if not self.active_table.get('preamble written'):
self.visit_thead(None) self.visit_thead(None)
self.depart_thead(None) # self.depart_thead(None)
self.body.append('%[visit_tbody]\n')
def depart_tbody(self, node): def depart_tbody(self, node):
self.body.append('%[depart_tbody]\n') pass
def visit_term(self, node): def visit_term(self, node):
self.body.append('\\item[') self.body.append('\\item[')
...@@ -1586,29 +1818,27 @@ class LaTeXTranslator(nodes.NodeVisitor): ...@@ -1586,29 +1818,27 @@ class LaTeXTranslator(nodes.NodeVisitor):
pass pass
def visit_thead(self, node): def visit_thead(self, node):
# number_of_columns will be zero after get_colspecs. self.body.append('{%s}\n' % self.active_table.get_colspecs())
# BUG ! push onto context for depart to pop it. if self.active_table.caption:
number_of_columns = len(self.colspecs) self.body.append('\\caption{%s}\\\\\n' % self.active_table.caption)
self.table_preamble() self.active_table.set('preamble written',1)
#BUG longtable needs firstpage and lastfooter too. # TODO longtable supports firsthead and lastfoot too.
self.body.append('\\hline\n') self.body.extend(self.active_table.visit_thead())
def depart_thead(self, node): def depart_thead(self, node):
if self.use_longtable: # the table header written should be on every page
# the table header written should be on every page # => \endhead
# => \endhead self.body.extend(self.active_table.depart_thead())
self.body.append('\\endhead\n') # and the firsthead => \endfirsthead
# and the firsthead => \endfirsthead # BUG i want a "continued from previous page" on every not
# BUG i want a "continued from previous page" on every not # firsthead, but then we need the header twice.
# firsthead, but then we need the header twice. #
# # there is a \endfoot and \endlastfoot too.
# there is a \endfoot and \endlastfoot too. # but we need the number of columns to
# but we need the number of columns to # self.body.append('\\multicolumn{%d}{c}{"..."}\n' % number_of_columns)
# self.body.append('\\multicolumn{%d}{c}{"..."}\n' % number_of_columns) # self.body.append('\\hline\n\\endfoot\n')
# self.body.append('\\hline\n\\endfoot\n') # self.body.append('\\hline\n')
# self.body.append('\\hline\n') # self.body.append('\\endlastfoot\n')
# self.body.append('\\endlastfoot\n')
def visit_tip(self, node): def visit_tip(self, node):
self.visit_admonition(node, 'tip') self.visit_admonition(node, 'tip')
...@@ -1630,14 +1860,13 @@ class LaTeXTranslator(nodes.NodeVisitor): ...@@ -1630,14 +1860,13 @@ class LaTeXTranslator(nodes.NodeVisitor):
if l>0: if l>0:
l = l-1 l = l-1
# pdftex does not like "_" subscripts in titles # pdftex does not like "_" subscripts in titles
text = node.astext().replace("_","\\_") text = self.encode(node.astext())
self.body.append('\\pdfbookmark[%d]{%s}{%s}\n' % \ self.body.append('\\pdfbookmark[%d]{%s}{%s}\n' % \
(l,text,node.parent['id'])) (l,text,node.parent['id']))
def visit_title(self, node): def visit_title(self, node):
"""Only 3 section levels are supported by LaTeX article (AFAIR).""" """Only 3 section levels are supported by LaTeX article (AFAIR)."""
if isinstance(node.parent, nodes.topic): if isinstance(node.parent, nodes.topic):
# section titles before the table of contents. # section titles before the table of contents.
self.bookmark(node) self.bookmark(node)
...@@ -1646,12 +1875,15 @@ class LaTeXTranslator(nodes.NodeVisitor): ...@@ -1646,12 +1875,15 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.body.append('\\subsection*{~\\hfill ') self.body.append('\\subsection*{~\\hfill ')
# the closing brace for subsection. # the closing brace for subsection.
self.context.append('\\hfill ~}\n') self.context.append('\\hfill ~}\n')
elif isinstance(node.parent, nodes.sidebar): # TODO: for admonition titles before the first section
# either specify every possible node or ... ?
elif isinstance(node.parent, nodes.sidebar) \
or isinstance(node.parent, nodes.admonition):
self.body.append('\\textbf{\\large ') self.body.append('\\textbf{\\large ')
self.context.append('}\n\\smallskip\n') self.context.append('}\n\\smallskip\n')
elif isinstance(node.parent, nodes.table): elif isinstance(node.parent, nodes.table):
# caption must be written after column spec # caption must be written after column spec
self.table_caption = node.astext() self.active_table.caption = node.astext()
raise nodes.SkipNode raise nodes.SkipNode
elif self.section_level == 0: elif self.section_level == 0:
# document title # document title
...@@ -1690,8 +1922,6 @@ class LaTeXTranslator(nodes.NodeVisitor): ...@@ -1690,8 +1922,6 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.body.append('\n') self.body.append('\n')
def visit_rubric(self, node): def visit_rubric(self, node):
# self.body.append('\\hfill {\\color{red}\\bfseries{}')
# self.context.append('} \\hfill ~\n')
self.body.append('\\rubric{') self.body.append('\\rubric{')
self.context.append('}\n') self.context.append('}\n')
...@@ -1705,7 +1935,6 @@ class LaTeXTranslator(nodes.NodeVisitor): ...@@ -1705,7 +1935,6 @@ class LaTeXTranslator(nodes.NodeVisitor):
self.body.append('\n\n') self.body.append('\n\n')
def depart_transition(self, node): def depart_transition(self, node):
#self.body.append('[depart_transition]')
pass pass
def visit_version(self, node): def visit_version(self, node):
......
...@@ -15,6 +15,19 @@ ...@@ -15,6 +15,19 @@
This implementation requires docutils 0.3.4+ from http://docutils.sf.net/ This implementation requires docutils 0.3.4+ from http://docutils.sf.net/
""" """
try:
import docutils
except ImportError:
raise ImportError, 'Please install docutils 0.3.3+ from http://docutils.sourceforge.net/#download.'
version = docutils.__version__.split('.')
if version < ['0', '3', '3']:
raise ImportError, """Old version of docutils found:
Got: %(version)s, required: 0.3.3+
Please remove docutils from %(path)s and replace it with a new version. You
can download docutils at http://docutils.sourceforge.net/#download.
""" % {'version' : docutils.__version__, 'path' : docutils.__path__[0] }
import sys, os, locale import sys, os, locale
from App.config import getConfiguration from App.config import getConfiguration
from docutils.core import publish_parts from docutils.core import publish_parts
...@@ -28,10 +41,10 @@ default_input_encoding = getConfiguration().rest_input_encoding or default_enc ...@@ -28,10 +41,10 @@ default_input_encoding = getConfiguration().rest_input_encoding or default_enc
default_level = 3 default_level = 3
initial_header_level = getConfiguration().rest_header_level or default_level initial_header_level = getConfiguration().rest_header_level or default_level
# default language # default language used for internal translations and language mappings for DTD
default_lang = getConfiguration().locale or locale.getdefaultlocale()[0] # elements
if default_lang and '_' in default_lang: default_lang = 'en'
default_lang = default_lang[:default_lang.index('_')] default_language_code = getConfiguration().rest_language_code or default_language
class Warnings: class Warnings:
...@@ -48,7 +61,7 @@ def render(src, ...@@ -48,7 +61,7 @@ def render(src,
stylesheet='default.css', stylesheet='default.css',
input_encoding=default_input_encoding, input_encoding=default_input_encoding,
output_encoding=default_output_encoding, output_encoding=default_output_encoding,
language_code=default_lang, language_code=default_language_code,
initial_header_level = initial_header_level, initial_header_level = initial_header_level,
settings = {}): settings = {}):
"""get the rendered parts of the document the and warning object """get the rendered parts of the document the and warning object
...@@ -60,7 +73,7 @@ def render(src, ...@@ -60,7 +73,7 @@ def render(src,
settings['stylesheet'] = stylesheet settings['stylesheet'] = stylesheet
settings['language_code'] = language_code settings['language_code'] = language_code
# starting level for <H> elements: # starting level for <H> elements:
settings['initial_header_level'] = initial_header_level settings['initial_header_level'] = initial_header_level + 1
# set the reporting level to something sane: # set the reporting level to something sane:
settings['report_level'] = report_level settings['report_level'] = report_level
# don't break if we get errors: # don't break if we get errors:
...@@ -80,7 +93,7 @@ def HTML(src, ...@@ -80,7 +93,7 @@ def HTML(src,
stylesheet='default.css', stylesheet='default.css',
input_encoding=default_input_encoding, input_encoding=default_input_encoding,
output_encoding=default_output_encoding, output_encoding=default_output_encoding,
language_code=default_lang, language_code=default_language_code,
initial_header_level = initial_header_level, initial_header_level = initial_header_level,
warnings = None, warnings = None,
settings = {}): settings = {}):
...@@ -119,17 +132,23 @@ def HTML(src, ...@@ -119,17 +132,23 @@ def HTML(src,
initial_header_level = initial_header_level, initial_header_level = initial_header_level,
settings = settings) settings = settings)
output = ('<h%(level)s class="title">%(title)s</h%(level)s>\n' header = '<h%(level)s class="title">%(title)s</h%(level)s>\n' % {
'%(docinfo)s%(body)s' % {
'level': initial_header_level, 'level': initial_header_level,
'title': parts['title'], 'title': parts['title'],
}
body = '%(docinfo)s%(body)s' % {
'docinfo': parts['docinfo'], 'docinfo': parts['docinfo'],
'body': parts['body'] 'body': parts['body'],
}).encode(output_encoding) }
warnings = ''.join(warning_stream.messages) if parts['title']:
output = header + body
else:
output = body
return output warnings = ''.join(warning_stream.messages)
return output.encode(output_encoding)
__all__ = ("HTML", 'render') __all__ = ("HTML", 'render')
...@@ -3,8 +3,9 @@ ...@@ -3,8 +3,9 @@
======================================= =======================================
:Author: David Goodger :Author: David Goodger
:Contact: goodger@users.sourceforge.net :Contact: goodger@users.sourceforge.net
:Revision: $Revision: 1.2 $ :Revision: $Revision: 1.64 $
:Date: $Date: 2003/02/01 15:45:16 $ :Date: $Date: 2004/07/24 18:35:08 $
:Copyright: This document has been placed in the public domain.
.. Note:: .. Note::
...@@ -13,8 +14,8 @@ ...@@ -13,8 +14,8 @@
reStructuredText, please read `A ReStructuredText Primer`_ and the reStructuredText, please read `A ReStructuredText Primer`_ and the
`Quick reStructuredText`_ user reference first. `Quick reStructuredText`_ user reference first.
.. _A ReStructuredText Primer: ../../docs/rst/quickstart.html .. _A ReStructuredText Primer: ../../user/rst/quickstart.html
.. _Quick reStructuredText: ../../docs/rst/quickref.html .. _Quick reStructuredText: ../../user/rst/quickref.html
reStructuredText_ is plaintext that uses simple and intuitive reStructuredText_ is plaintext that uses simple and intuitive
...@@ -123,8 +124,9 @@ Here are examples of `body elements`_: ...@@ -123,8 +124,9 @@ Here are examples of `body elements`_:
- `Literal blocks`_:: - `Literal blocks`_::
Literal blocks are indented, and indicated with a double-colon Literal blocks are either indented or line-prefix-quoted blocks,
("::") at the end of the preceding paragraph (right here -->):: and indicated with a double-colon ("::") at the end of the
preceding paragraph (right here -->)::
if literal_block: if literal_block:
text = 'is left as-is' text = 'is left as-is'
...@@ -137,7 +139,7 @@ Here are examples of `body elements`_: ...@@ -137,7 +139,7 @@ Here are examples of `body elements`_:
This theory, that is mine, is mine. This theory, that is mine, is mine.
Anne Elk (Miss) -- Anne Elk (Miss)
- `Doctest blocks`_:: - `Doctest blocks`_::
...@@ -207,11 +209,11 @@ Here are examples of `body elements`_: ...@@ -207,11 +209,11 @@ Here are examples of `body elements`_:
Syntax Details Syntax Details
---------------- ----------------
Descriptions below list "DTD elements" (XML "generic identifiers") Descriptions below list "doctree elements" (document tree element
corresponding to syntax constructs. For details on the hierarchy of names; XML DTD generic identifiers) corresponding to syntax
elements, please see `Docutils Document Tree Structure`_ and the constructs. For details on the hierarchy of elements, please see `The
`Generic Plaintext Document Interface DTD`_ XML document type Docutils Document Tree`_ and the `Docutils Generic DTD`_ XML document
definition. type definition.
Whitespace Whitespace
...@@ -343,12 +345,16 @@ mechanism to override the default meaning of the characters used for ...@@ -343,12 +345,16 @@ mechanism to override the default meaning of the characters used for
the markup. In reStructuredText we use the backslash, commonly used the markup. In reStructuredText we use the backslash, commonly used
as an escaping character in other domains. as an escaping character in other domains.
A backslash followed by any character escapes that character. The A backslash followed by any character (except whitespace characters)
escaped character represents the character itself, and is prevented escapes that character. The escaped character represents the
from playing a role in any markup interpretation. The backslash is character itself, and is prevented from playing a role in any markup
removed from the output. A literal backslash is represented by two interpretation. The backslash is removed from the output. A literal
backslashes in a row (the first backslash "escapes" the second, backslash is represented by two backslashes in a row (the first
preventing it being interpreted in an "escaping" role). backslash "escapes" the second, preventing it being interpreted in an
"escaping" role).
Backslash-escaped whitespace characters are removed from the document.
This allows for character-level `inline markup`_.
There are two contexts in which backslashes have no special meaning: There are two contexts in which backslashes have no special meaning:
literal blocks and inline literals. In these contexts, a single literal blocks and inline literals. In these contexts, a single
...@@ -421,7 +427,7 @@ Document Structure ...@@ -421,7 +427,7 @@ Document Structure
Document Document
-------- --------
DTD element: document. Doctree element: document.
The top-level element of a parsed reStructuredText document is the The top-level element of a parsed reStructuredText document is the
"document" element. After initial parsing, the document element is a "document" element. After initial parsing, the document element is a
...@@ -437,14 +443,15 @@ Specifically, there is no way to indicate a document title and ...@@ -437,14 +443,15 @@ Specifically, there is no way to indicate a document title and
subtitle explicitly in reStructuredText. Instead, a lone top-level subtitle explicitly in reStructuredText. Instead, a lone top-level
section title (see Sections_ below) can be treated as the document section title (see Sections_ below) can be treated as the document
title. Similarly, a lone second-level section title immediately after title. Similarly, a lone second-level section title immediately after
the "document title" can become the document subtitle. See the the "document title" can become the document subtitle. The rest of
`DocTitle transform`_ for details. the sections are then lifted up a level or two. See the `DocTitle
transform`_ for details.
Sections Sections
-------- --------
DTD elements: section, title. Doctree elements: section, title.
Sections are identified through their titles, which are marked up with Sections are identified through their titles, which are marked up with
adornment: "underlines" below the title text, or underlines and adornment: "underlines" below the title text, or underlines and
...@@ -467,7 +474,7 @@ formats may have limits (HTML has 6 levels). ...@@ -467,7 +474,7 @@ formats may have limits (HTML has 6 levels).
Some characters are more suitable than others. The following are Some characters are more suitable than others. The following are
recommended:: recommended::
= - ` : ' " ~ ^ _ * + # < > = - ` : . ' " ~ ^ _ * + #
Rather than imposing a fixed number and order of section title Rather than imposing a fixed number and order of section title
adornment styles, the order enforced will be the order as encountered. adornment styles, the order enforced will be the order as encountered.
...@@ -538,7 +545,7 @@ sections. ...@@ -538,7 +545,7 @@ sections.
Transitions Transitions
----------- -----------
DTD element: transition. Doctree element: transition.
Instead of subheads, extra space or a type ornament between Instead of subheads, extra space or a type ornament between
paragraphs may be used to mark text divisions or to signal paragraphs may be used to mark text divisions or to signal
...@@ -578,7 +585,7 @@ Body Elements ...@@ -578,7 +585,7 @@ Body Elements
Paragraphs Paragraphs
---------- ----------
DTD element: paragraph. Doctree element: paragraph.
Paragraphs consist of blocks of left-aligned text with no markup Paragraphs consist of blocks of left-aligned text with no markup
indicating any other body element. Blank lines separate paragraphs indicating any other body element. Blank lines separate paragraphs
...@@ -601,7 +608,7 @@ Syntax diagram:: ...@@ -601,7 +608,7 @@ Syntax diagram::
Bullet Lists Bullet Lists
------------ ------------
DTD elements: bullet_list, list_item. Doctree elements: bullet_list, list_item.
A text block which begins with a "-", "*", or "+", followed by A text block which begins with a "-", "*", or "+", followed by
whitespace, is a bullet list item (a.k.a. "unordered" list item). whitespace, is a bullet list item (a.k.a. "unordered" list item).
...@@ -650,7 +657,7 @@ Syntax diagram:: ...@@ -650,7 +657,7 @@ Syntax diagram::
Enumerated Lists Enumerated Lists
---------------- ----------------
DTD elements: enumerated_list, list_item. Doctree elements: enumerated_list, list_item.
Enumerated lists (a.k.a. "ordered" lists) are similar to bullet lists, Enumerated lists (a.k.a. "ordered" lists) are similar to bullet lists,
but use enumerators instead of bullets. An enumerator consists of an but use enumerators instead of bullets. An enumerator consists of an
...@@ -730,8 +737,8 @@ Example syntax diagram:: ...@@ -730,8 +737,8 @@ Example syntax diagram::
Definition Lists Definition Lists
---------------- ----------------
DTD elements: definition_list, definition_list_item, term, classifier, Doctree elements: definition_list, definition_list_item, term,
definition. classifier, definition.
Each definition list item contains a term, an optional classifier, and Each definition list item contains a term, an optional classifier, and
a definition. A term is a simple one-line word or phrase. An a definition. A term is a simple one-line word or phrase. An
...@@ -784,7 +791,7 @@ Syntax diagram:: ...@@ -784,7 +791,7 @@ Syntax diagram::
Field Lists Field Lists
----------- -----------
DTD elements: field_list, field, field_name, field_body. Doctree elements: field_list, field, field_name, field_body.
Field lists are used as part of an extension syntax, such as options Field lists are used as part of an extension syntax, such as options
for directives_, or database-like records meant for further for directives_, or database-like records meant for further
...@@ -797,13 +804,14 @@ directives in `reStructuredText Directives`_. ...@@ -797,13 +804,14 @@ directives in `reStructuredText Directives`_.
Field lists are mappings from field names to field bodies, modeled on Field lists are mappings from field names to field bodies, modeled on
RFC822_ headers. A field name is made up of one or more letters, RFC822_ headers. A field name is made up of one or more letters,
numbers, whitespace, and punctuation, except colons (":"). Field numbers, whitespace, and punctuation, except colons (":"). Inline
names are case-insensitive. The field name, along with a single colon markup is parsed in field names. Field names are case-insensitive
prefix and suffix, together form the field marker. The field marker when further processed or transformed. The field name, along with a
is followed by whitespace and the field body. The field body may single colon prefix and suffix, together form the field marker. The
contain multiple body elements, indented relative to the field marker. field marker is followed by whitespace and the field body. The field
The first line after the field name marker determines the indentation body may contain multiple body elements, indented relative to the
of the field body. For example:: field marker. The first line after the field name marker determines
the indentation of the field body. For example::
:Date: 2001-08-16 :Date: 2001-08-16
:Version: 1 :Version: 1
...@@ -841,7 +849,7 @@ Syntax diagram (simplified):: ...@@ -841,7 +849,7 @@ Syntax diagram (simplified)::
Bibliographic Fields Bibliographic Fields
```````````````````` ````````````````````
DTD elements: docinfo, author, authors, organization, contact, Doctree elements: docinfo, author, authors, organization, contact,
version, status, date, copyright, field, topic. version, status, date, copyright, field, topic.
When a field list is the first non-comment element in a document When a field list is the first non-comment element in a document
...@@ -851,7 +859,7 @@ corresponds to the front matter of a book, such as the title page and ...@@ -851,7 +859,7 @@ corresponds to the front matter of a book, such as the title page and
copyright page. copyright page.
Certain registered field names (listed below) are recognized and Certain registered field names (listed below) are recognized and
transformed to the corresponding DTD elements, most becoming child transformed to the corresponding doctree elements, most becoming child
elements of the "docinfo" element. No ordering is required of these elements of the "docinfo" element. No ordering is required of these
fields, although they may be rearranged to fit the document structure, fields, although they may be rearranged to fit the document structure,
as noted. Unless otherwise indicated below, each of the bibliographic as noted. Unless otherwise indicated below, each of the bibliographic
...@@ -860,8 +868,8 @@ bodies may be checked for `RCS keywords`_ and cleaned up. Any ...@@ -860,8 +868,8 @@ bodies may be checked for `RCS keywords`_ and cleaned up. Any
unrecognized fields will remain as generic fields in the docinfo unrecognized fields will remain as generic fields in the docinfo
element. element.
The registered bibliographic field names and their corresponding DTD The registered bibliographic field names and their corresponding
elements are as follows: doctree elements are as follows:
- Field name "Author": author element. - Field name "Author": author element.
- "Authors": authors. - "Authors": authors.
...@@ -921,7 +929,7 @@ Processed, the "status" element's text will become simply "expansion ...@@ -921,7 +929,7 @@ Processed, the "status" element's text will become simply "expansion
text". The dollar sign delimiters and leading RCS keyword name are text". The dollar sign delimiters and leading RCS keyword name are
removed. removed.
The RCS keyword processing only kicks in when all of these conditions The RCS keyword processing only kicks in when both of these conditions
hold: hold:
1. The field list is in bibliographic context (first non-comment 1. The field list is in bibliographic context (first non-comment
...@@ -930,14 +938,11 @@ hold: ...@@ -930,14 +938,11 @@ hold:
2. The field name is a recognized bibliographic field name. 2. The field name is a recognized bibliographic field name.
3. The sole contents of the field is an expanded RCS keyword, of the
form "$Keyword: data $".
Option Lists Option Lists
------------ ------------
DTD elements: option_list, option_list_item, option_group, option, Doctree elements: option_list, option_list_item, option_group, option,
option_string, option_argument, description. option_string, option_argument, description.
Option lists are two-column lists of command-line options and Option lists are two-column lists of command-line options and
...@@ -997,7 +1002,14 @@ given, supported by notes about truncation if and when applicable. ...@@ -997,7 +1002,14 @@ given, supported by notes about truncation if and when applicable.
Options may be followed by an argument placeholder, whose role and Options may be followed by an argument placeholder, whose role and
syntax should be explained in the description text. Either a space or syntax should be explained in the description text. Either a space or
an equals sign may be used as a delimiter between options and option an equals sign may be used as a delimiter between options and option
argument placeholders. argument placeholders; short options ("-" or "+" prefix only) may omit
the delimiter. Option arguments may take one of two forms:
- Begins with a letter (``[a-zA-Z]``) and subsequently consists of
letters, numbers, underscores and hyphens (``[a-zA-Z0-9_-]``).
- Begins with an open-angle-bracket (``<``) and ends with a
close-angle-bracket (``>``); any characters except angle brackets
are allowed internally.
Multiple option "synonyms" may be listed, sharing a single Multiple option "synonyms" may be listed, sharing a single
description. They must be separated by comma-space. description. They must be separated by comma-space.
...@@ -1021,14 +1033,15 @@ Syntax diagram (simplified):: ...@@ -1021,14 +1033,15 @@ Syntax diagram (simplified)::
Literal Blocks Literal Blocks
-------------- --------------
DTD element: literal_block. Doctree element: literal_block.
A paragraph consisting of two colons ("::") signifies that all A paragraph consisting of two colons ("::") signifies that the
following **indented** text blocks comprise a literal block. No following text block(s) comprise a literal block. The literal block
markup processing is done within a literal block. It is left as-is, must either be indented or quoted (see below). No markup processing
and is typically rendered in a monospaced typeface:: is done within a literal block. It is left as-is, and is typically
rendered in a monospaced typeface::
This is a typical paragraph. A literal block follows. This is a typical paragraph. An indented literal block follows.
:: ::
...@@ -1074,11 +1087,21 @@ colons after "Paragraph"): ...@@ -1074,11 +1087,21 @@ colons after "Paragraph"):
Literal block Literal block
The minimum leading whitespace will be removed from each line of the All whitespace (including line breaks, but excluding minimum
literal block. Other than that, all whitespace (including line indentation for indented literal blocks) is preserved. Blank lines
breaks) is preserved. Blank lines are required before and after a are required before and after a literal block, but these blank lines
literal block, but these blank lines are not included as part of the are not included as part of the literal block.
literal block.
Indented Literal Blocks
```````````````````````
Indented literal blocks are indicated by indentation relative to the
surrounding text (leading whitespace on each line). The minimum
indentation will be removed from each line of an indented literal
block. The literal block need not be contiguous; blank lines are
allowed between sections of indented text. The literal block ends
with the end of the indentation.
Syntax diagram:: Syntax diagram::
...@@ -1087,14 +1110,53 @@ Syntax diagram:: ...@@ -1087,14 +1110,53 @@ Syntax diagram::
| (ends with "::") | | (ends with "::") |
+------------------------------+ +------------------------------+
+---------------------------+ +---------------------------+
| literal block | | indented literal block |
+---------------------------+ +---------------------------+
Quoted Literal Blocks
`````````````````````
Quoted literal blocks are unindented contiguous blocks of text where
each line begins with the same non-alphanumeric printable 7-bit ASCII
character [#]_. A blank line ends a quoted literal block. The
quoting characters are preserved in the processed document.
.. [#]
The following are all valid quoting characters::
! " # $ % & ' ( ) * + , - . / : ; < = > ? @ [ \ ] ^ _ ` { | } ~
Note that these are the same characters as are valid for title
adornment of sections_.
Possible uses include literate programming in Haskell and email
quoting::
John Doe wrote::
>> Great idea!
>
> Why didn't I think of that?
You just did! ;-)
Syntax diagram::
+------------------------------+
| paragraph |
| (ends with "::") |
+------------------------------+
+------------------------------+
| ">" per-line-quoted |
| ">" contiguous literal block |
+------------------------------+
Block Quotes Block Quotes
------------ ------------
DTD element: block_quote. Doctree element: block_quote, attribution.
A text block that is indented relative to the preceding text, without A text block that is indented relative to the preceding text, without
markup indicating it to be a literal block, is a block quote. All markup indicating it to be a literal block, is a block quote. All
...@@ -1107,6 +1169,11 @@ within the block quote:: ...@@ -1107,6 +1169,11 @@ within the block quote::
-- Sherlock Holmes -- Sherlock Holmes
If the final block of a block quote begins with "--", "---", or a true
em-dash (flush left within the block quote), it is interpreted as an
attribution. If the attribution consists of multiple lines, the left
edges of the second and subsequent lines must align.
Blank lines are required before and after a block quote, but these Blank lines are required before and after a block quote, but these
blank lines are not included as part of the block quote. blank lines are not included as part of the block quote.
...@@ -1119,13 +1186,16 @@ Syntax diagram:: ...@@ -1119,13 +1186,16 @@ Syntax diagram::
+---------------------------+ +---------------------------+
| block quote | | block quote |
| (body elements)+ | | (body elements)+ |
| |
| -- attribution text |
| (optional) |
+---------------------------+ +---------------------------+
Doctest Blocks Doctest Blocks
-------------- --------------
DTD element: doctest_block. Doctree element: doctest_block.
Doctest blocks are interactive Python sessions cut-and-pasted into Doctest blocks are interactive Python sessions cut-and-pasted into
docstrings. They are meant to illustrate usage by example, and docstrings. They are meant to illustrate usage by example, and
...@@ -1155,7 +1225,7 @@ Indentation is not required for doctest blocks. ...@@ -1155,7 +1225,7 @@ Indentation is not required for doctest blocks.
Tables Tables
------ ------
DTD elements: table, tgroup, colspec, thead, tbody, row, entry. Doctree elements: table, tgroup, colspec, thead, tbody, row, entry.
ReStructuredText provides two syntaxes for delineating table cells: ReStructuredText provides two syntaxes for delineating table cells:
`Grid Tables`_ and `Simple Tables`_. `Grid Tables`_ and `Simple Tables`_.
...@@ -1373,7 +1443,7 @@ targets, directives, substitution definitions, and comments. ...@@ -1373,7 +1443,7 @@ targets, directives, substitution definitions, and comments.
Footnotes Footnotes
````````` `````````
DTD elements: footnote, label. Doctree elements: footnote, label.
Each footnote consists of an explicit markup start (".. "), a left Each footnote consists of an explicit markup start (".. "), a left
square bracket, the footnote label, a right square bracket, and square bracket, the footnote label, a right square bracket, and
...@@ -1493,15 +1563,15 @@ The standard Docutils system uses the following symbols for footnote ...@@ -1493,15 +1563,15 @@ The standard Docutils system uses the following symbols for footnote
marks [#]_: marks [#]_:
- asterisk/star ("*") - asterisk/star ("*")
- dagger (HTML character entity "&dagger;") - dagger (HTML character entity "&dagger;", Unicode U+02020)
- double dagger ("&Dagger;") - double dagger ("&Dagger;"/U+02021)
- section mark ("&sect;") - section mark ("&sect;"/U+000A7)
- pilcrow or paragraph mark ("&para;") - pilcrow or paragraph mark ("&para;"/U+000B6)
- number sign ("#") - number sign ("#")
- spade suit ("&spades;") - spade suit ("&spades;"/U+02660)
- heart suit ("&hearts;") - heart suit ("&hearts;"/U+02665)
- diamond suit ("&diams;") - diamond suit ("&diams;"/U+02666)
- club suit ("&clubs;") - club suit ("&clubs;"/U+02663)
.. [#] This list was inspired by the list of symbols for "Note .. [#] This list was inspired by the list of symbols for "Note
Reference Marks" in The Chicago Manual of Style, 14th edition, Reference Marks" in The Chicago Manual of Style, 14th edition,
...@@ -1512,6 +1582,15 @@ marks [#]_: ...@@ -1512,6 +1582,15 @@ marks [#]_:
If more than ten symbols are required, the same sequence will be If more than ten symbols are required, the same sequence will be
reused, doubled and then tripled, and so on ("**" etc.). reused, doubled and then tripled, and so on ("**" etc.).
.. Note:: When using auto-symbol footnotes, the choice of output
encoding is important. Many of the symbols used are not encodable
in certain common text encodings such as Latin-1 (ISO 8859-1). The
use of UTF-8 for the output encoding is recommended. An
alternative for HTML and XML output is to use the
"xmlcharrefreplace" `output encoding error handler`__.
__ ../../user/config.html#output-encoding-error-handler
Mixed Manual and Auto-Numbered Footnotes Mixed Manual and Auto-Numbered Footnotes
........................................ ........................................
...@@ -1558,7 +1637,7 @@ differently from footnotes. For example:: ...@@ -1558,7 +1637,7 @@ differently from footnotes. For example::
Hyperlink Targets Hyperlink Targets
````````````````` `````````````````
DTD element: target. Doctree element: target.
These are also called _`explicit hyperlink targets`, to differentiate These are also called _`explicit hyperlink targets`, to differentiate
them from `implicit hyperlink targets`_ defined below. them from `implicit hyperlink targets`_ defined below.
...@@ -1624,8 +1703,8 @@ indirect. ...@@ -1624,8 +1703,8 @@ indirect.
.. _archive: .. _archive:
.. _Doc-SIG: http://mail.python.org/pipermail/doc-sig/ .. _Doc-SIG: http://mail.python.org/pipermail/doc-sig/
An inline form of internal hyperlink target is available; see An inline form of internal hyperlink target is available; see
`Inline Internal Targets`_. `Inline Internal Targets`_.
2. _`External hyperlink targets` have an absolute or relative URI or 2. _`External hyperlink targets` have an absolute or relative URI or
email address in their link blocks. For example, take the email address in their link blocks. For example, take the
...@@ -1768,11 +1847,12 @@ require attention to the order of corresponding targets. ...@@ -1768,11 +1847,12 @@ require attention to the order of corresponding targets.
Directives Directives
`````````` ``````````
DTD elements: depend on the directive. Doctree elements: depend on the directive.
Directives are an extension mechanism for reStructuredText, a way of Directives are an extension mechanism for reStructuredText, a way of
adding support for new constructs without adding new syntax. All adding support for new constructs without adding new primary syntax
standard directives (those implemented and registered in the reference (directives may support additional syntax locally). All standard
directives (those implemented and registered in the reference
reStructuredText parser) are described in the `reStructuredText reStructuredText parser) are described in the `reStructuredText
Directives`_ document, and are always available. Any other directives Directives`_ document, and are always available. Any other directives
are domain-specific, and may require special action to make them are domain-specific, and may require special action to make them
...@@ -1801,15 +1881,17 @@ words (alphanumerics plus internal hyphens, underscores, and periods; ...@@ -1801,15 +1881,17 @@ words (alphanumerics plus internal hyphens, underscores, and periods;
no whitespace). Two colons are used after the directive type for no whitespace). Two colons are used after the directive type for
these reasons: these reasons:
- To avoid clashes with common comment text like:: - Two colons are distinctive, and unlikely to be used in common text.
- Two colons avoids clashes with common comment text like::
.. Danger: modify at your own risk! .. Danger: modify at your own risk!
- If an implementation of reStructuredText does not recognize a - If an implementation of reStructuredText does not recognize a
directive (i.e., the directive-handler is not installed), the entire directive (i.e., the directive-handler is not installed), a level-3
directive block (including the directive itself) will be treated as (error) system message is generated, and the entire directive block
a literal block, and a level-3 (error) system message generated. (including the directive itself) will be included as a literal
Thus "::" is a natural choice. block. Thus "::" is a natural choice.
The directive block is consists of any text on the first line of the The directive block is consists of any text on the first line of the
directive after the directive marker, and any subsequent indented directive after the directive marker, and any subsequent indented
...@@ -1870,7 +1952,7 @@ Syntax diagram:: ...@@ -1870,7 +1952,7 @@ Syntax diagram::
Substitution Definitions Substitution Definitions
```````````````````````` ````````````````````````
DTD element: substitution_definition. Doctree element: substitution_definition.
Substitution definitions are indicated by an explicit markup start Substitution definitions are indicated by an explicit markup start
(".. ") followed by a vertical bar, the substitution text, another (".. ") followed by a vertical bar, the substitution text, another
...@@ -1889,12 +1971,14 @@ indirectly contain a circular substitution reference. ...@@ -1889,12 +1971,14 @@ indirectly contain a circular substitution reference.
`Substitution references`_ are replaced in-line by the processed `Substitution references`_ are replaced in-line by the processed
contents of the corresponding definition (linked by matching contents of the corresponding definition (linked by matching
substitution text). Substitution definitions allow the power and substitution text). Matches are case-sensitive but forgiving; if no
flexibility of block-level directives_ to be shared by inline text. exact match is found, a case-insensitive comparison is attempted.
They are a way to include arbitrarily complex inline structures within
text, while keeping the details out of the flow of text. They are the Substitution definitions allow the power and flexibility of
equivalent of SGML/XML's named entities or programming language block-level directives_ to be shared by inline text. They are a way
macros. to include arbitrarily complex inline structures within text, while
keeping the details out of the flow of text. They are the equivalent
of SGML/XML's named entities or programming language macros.
Without the substitution mechanism, every time someone wants an Without the substitution mechanism, every time someone wants an
application-specific new inline structure, they would have to petition application-specific new inline structure, they would have to petition
...@@ -2064,7 +2148,7 @@ Replacement text ...@@ -2064,7 +2148,7 @@ Replacement text
Comments Comments
```````` ````````
DTD element: comment. Doctree element: comment.
Arbitrary indented text may follow the explicit markup start and will Arbitrary indented text may follow the explicit markup start and will
be processed as a comment element. No further processing is done on be processed as a comment element. No further processing is done on
...@@ -2142,9 +2226,8 @@ In reStructuredText, inline markup applies to words or phrases within ...@@ -2142,9 +2226,8 @@ In reStructuredText, inline markup applies to words or phrases within
a text block. The same whitespace and punctuation that serves to a text block. The same whitespace and punctuation that serves to
delimit words in written text is used to delimit the inline markup delimit words in written text is used to delimit the inline markup
syntax constructs. The text within inline markup may not begin or end syntax constructs. The text within inline markup may not begin or end
with whitespace. Arbitrary character-level markup is not supported with whitespace. Arbitrary `character-level inline markup`_ is
[#]_; it is not possible to mark up individual characters within a supported although not encouraged. Inline markup cannot be nested.
word. Inline markup cannot be nested.
There are nine inline markup constructs. Five of the constructs use There are nine inline markup constructs. Five of the constructs use
identical start-strings and end-strings to indicate the markup: identical start-strings and end-strings to indicate the markup:
...@@ -2183,7 +2266,7 @@ end-string will not be recognized or processed. ...@@ -2183,7 +2266,7 @@ end-string will not be recognized or processed.
4. Inline markup end-strings must end a text block or be immediately 4. Inline markup end-strings must end a text block or be immediately
followed by whitespace or one of:: followed by whitespace or one of::
' " ) ] } > - / : . , ; ! ? ' " ) ] } > - / : . , ; ! ? \
5. If an inline markup start-string is immediately preceded by a 5. If an inline markup start-string is immediately preceded by a
single or double quote, "(", "[", "{", or "<", it must not be single or double quote, "(", "[", "{", or "<", it must not be
...@@ -2245,23 +2328,41 @@ each character. The inline markup recognition order is as follows: ...@@ -2245,23 +2328,41 @@ each character. The inline markup recognition order is as follows:
- `Standalone hyperlinks`_ are the last to be recognized. - `Standalone hyperlinks`_ are the last to be recognized.
.. [#] Backslash escapes can be used to allow arbitrary text to
immediately follow inline markup::
Python ``list``\s use square bracket syntax. Character-Level Inline Markup
-----------------------------
It is possible to mark up individual characters within a word with
backslash escapes (see `Escaping Mechanism`_ above). Backslash
escapes can be used to allow arbitrary text to immediately follow
inline markup::
Python ``list``\s use square bracket syntax.
The backslash will disappear from the processed document. The word
"list" will appear as inline literal text, and the letter "s" will
immediately follow it as normal text, with no space in-between.
Arbitrary text may immediately precede inline markup using
backslash-escaped whitespace::
The backslash will disappear from the processed document. The word Possible in *re*\ ``Structured``\ *Text*, though not encouraged.
"list" will appear as inline literal text, and the letter "s" will
immediately follow it as normal text, with no space in-between.
There is not yet any way for arbitrary text to immediately precede The backslashes and spaces separating "re", "Structured", and "Text"
inline markup. above will disappear from the processed document.
.. CAUTION::
The use of backslash-escapes for character-level inline markup is
not encouraged. Such use is ugly and detrimental to the
unprocessed document's readability. Please use this feature
sparingly and only where absolutely necessary.
Emphasis Emphasis
-------- --------
DTD element: emphasis. Doctree element: emphasis.
Start-string = end-string = "*". Start-string = end-string = "*".
...@@ -2275,7 +2376,7 @@ Emphasized text is typically displayed in italics. ...@@ -2275,7 +2376,7 @@ Emphasized text is typically displayed in italics.
Strong Emphasis Strong Emphasis
--------------- ---------------
DTD element: strong. Doctree element: strong.
Start-string = end-string = "**". Start-string = end-string = "**".
...@@ -2289,7 +2390,8 @@ Strongly emphasized text is typically displayed in boldface. ...@@ -2289,7 +2390,8 @@ Strongly emphasized text is typically displayed in boldface.
Interpreted Text Interpreted Text
---------------- ----------------
DTD element: depends on the explicit or implicit role and processing. Doctree element: depends on the explicit or implicit role and
processing.
Start-string = end-string = "`". Start-string = end-string = "`".
...@@ -2327,7 +2429,7 @@ specialized roles. ...@@ -2327,7 +2429,7 @@ specialized roles.
Inline Literals Inline Literals
--------------- ---------------
DTD element: literal. Doctree element: literal.
Start-string = end-string = "``". Start-string = end-string = "``".
...@@ -2356,7 +2458,7 @@ Inline literals are useful for short code snippets. For example:: ...@@ -2356,7 +2458,7 @@ Inline literals are useful for short code snippets. For example::
Hyperlink References Hyperlink References
-------------------- --------------------
DTD element: reference. Doctree element: reference.
- Named hyperlink references: - Named hyperlink references:
...@@ -2427,7 +2529,15 @@ For example, here is a reference to a title describing a tag:: ...@@ -2427,7 +2529,15 @@ For example, here is a reference to a title describing a tag::
See `HTML Element: \<a>`_ below. See `HTML Element: \<a>`_ below.
.. Caution:: The reference text may also be omitted, in which case the URI will be
duplicated for use as the reference text. This is useful for relative
URIs where the address or file name is also the desired reference
text::
See `<a_named_relative_link>`_ or `<an_anonymous_relative_link>`__
for details.
.. CAUTION::
This construct offers easy authoring and maintenance of hyperlinks This construct offers easy authoring and maintenance of hyperlinks
at the expense of general readability. Inline URIs, especially at the expense of general readability. Inline URIs, especially
...@@ -2441,7 +2551,7 @@ For example, here is a reference to a title describing a tag:: ...@@ -2441,7 +2551,7 @@ For example, here is a reference to a title describing a tag::
Inline Internal Targets Inline Internal Targets
------------------------ ------------------------
DTD element: target. Doctree element: target.
Start-string = "_`", end-string = "`". Start-string = "_`", end-string = "`".
...@@ -2463,7 +2573,7 @@ reference names. ...@@ -2463,7 +2573,7 @@ reference names.
Footnote References Footnote References
------------------- -------------------
DTD element: footnote_reference. Doctree element: footnote_reference.
Start-string = "[", end-string = "]_". Start-string = "[", end-string = "]_".
...@@ -2489,7 +2599,7 @@ For example:: ...@@ -2489,7 +2599,7 @@ For example::
Citation References Citation References
------------------- -------------------
DTD element: citation_reference. Doctree element: citation_reference.
Start-string = "[", end-string = "]_". Start-string = "[", end-string = "]_".
...@@ -2508,7 +2618,7 @@ See Citations_ for the citation itself. ...@@ -2508,7 +2618,7 @@ See Citations_ for the citation itself.
Substitution References Substitution References
----------------------- -----------------------
DTD element: substitution_reference, reference. Doctree element: substitution_reference, reference.
Start-string = "|", end-string = "|" (optionally followed by "_" or Start-string = "|", end-string = "|" (optionally followed by "_" or
"__"). "__").
...@@ -2519,8 +2629,9 @@ a "_" (named) or "__" (anonymous) suffix; the substitution text is ...@@ -2519,8 +2629,9 @@ a "_" (named) or "__" (anonymous) suffix; the substitution text is
used for the reference text in the named case. used for the reference text in the named case.
The processing system replaces substitution references with the The processing system replaces substitution references with the
processed contents of the corresponding `substitution definitions`_. processed contents of the corresponding `substitution definitions`_
Substitution definitions produce inline-compatible elements. (which see for the definition of "correspond"). Substitution
definitions produce inline-compatible elements.
Examples:: Examples::
...@@ -2535,7 +2646,7 @@ Examples:: ...@@ -2535,7 +2646,7 @@ Examples::
Standalone Hyperlinks Standalone Hyperlinks
--------------------- ---------------------
DTD element: reference. Doctree element: reference.
Start-string = end-string = "" (empty string). Start-string = end-string = "" (empty string).
...@@ -2558,8 +2669,8 @@ Two forms of URI are recognized: ...@@ -2558,8 +2669,8 @@ Two forms of URI are recognized:
The scheme is the name of the protocol, such as "http", "ftp", The scheme is the name of the protocol, such as "http", "ftp",
"mailto", or "telnet". The scheme consists of an initial letter, "mailto", or "telnet". The scheme consists of an initial letter,
followed by letters, numbers, and/or "+", "-", ".". Recognition is followed by letters, numbers, and/or "+", "-", ".". Recognition is
limited to known schemes, per the W3C's `Index of WWW Addressing limited to known schemes, per the `Official IANA Registry of URI
Schemes`_. Schemes`_ and the W3C's `Retired Index of WWW Addressing Schemes`_.
The scheme-specific part of the resource identifier may be either The scheme-specific part of the resource identifier may be either
hierarchical or opaque: hierarchical or opaque:
...@@ -2588,7 +2699,11 @@ Two forms of URI are recognized: ...@@ -2588,7 +2699,11 @@ Two forms of URI are recognized:
someone@somewhere.com someone@somewhere.com
Punctuation at the end of a URI is not considered part of the URI. Punctuation at the end of a URI is not considered part of the URI,
unless the URI is terminated by a closing angle bracket (">").
Backslashes may be used in URIs to escape markup characters,
specifically asterisks ("*") and underscores ("_") which are vaid URI
characters (see `Escaping Mechanism`_ above).
.. [#URI] Uniform Resource Identifier. URIs are a general form of .. [#URI] Uniform Resource Identifier. URIs are a general form of
URLs (Uniform Resource Locators). For the syntax of URIs see URLs (Uniform Resource Locators). For the syntax of URIs see
...@@ -2599,7 +2714,7 @@ Punctuation at the end of a URI is not considered part of the URI. ...@@ -2599,7 +2714,7 @@ Punctuation at the end of a URI is not considered part of the URI.
Error Handling Error Handling
---------------- ----------------
DTD element: system_message, problematic. Doctree element: system_message, problematic.
Markup errors are handled according to the specification in `PEP Markup errors are handled according to the specification in `PEP
258`_. 258`_.
...@@ -2607,8 +2722,8 @@ Markup errors are handled according to the specification in `PEP ...@@ -2607,8 +2722,8 @@ Markup errors are handled according to the specification in `PEP
.. _reStructuredText: http://docutils.sourceforge.net/rst.html .. _reStructuredText: http://docutils.sourceforge.net/rst.html
.. _Docutils: http://docutils.sourceforge.net/ .. _Docutils: http://docutils.sourceforge.net/
.. _Docutils Document Tree Structure: ../doctree.html .. _The Docutils Document Tree: ../doctree.html
.. _Generic Plaintext Document Interface DTD: ../gpdi.dtd .. _Docutils Generic DTD: ../docutils.dtd
.. _transforms: .. _transforms:
http://docutils.sourceforge.net/docutils/transforms/ http://docutils.sourceforge.net/docutils/transforms/
.. _Grouch: http://www.mems-exchange.org/software/grouch/ .. _Grouch: http://www.mems-exchange.org/software/grouch/
...@@ -2619,23 +2734,25 @@ Markup errors are handled according to the specification in `PEP ...@@ -2619,23 +2734,25 @@ Markup errors are handled according to the specification in `PEP
.. _getopt.py: .. _getopt.py:
http://www.python.org/doc/current/lib/module-getopt.html http://www.python.org/doc/current/lib/module-getopt.html
.. _GNU libc getopt_long(): .. _GNU libc getopt_long():
http://www.gnu.org/manual/glibc-2.2.3/html_node/libc_516.html http://www.gnu.org/software/libc/manual/html_node/Getopt-Long-Options.html
.. _doctest module: .. _doctest module:
http://www.python.org/doc/current/lib/module-doctest.html http://www.python.org/doc/current/lib/module-doctest.html
.. _Emacs table mode: http://table.sourceforge.net/ .. _Emacs table mode: http://table.sourceforge.net/
.. _Index of WWW Addressing Schemes: .. _Official IANA Registry of URI Schemes:
http://www.iana.org/assignments/uri-schemes
.. _Retired Index of WWW Addressing Schemes:
http://www.w3.org/Addressing/schemes.html http://www.w3.org/Addressing/schemes.html
.. _World Wide Web Consortium: http://www.w3.org/ .. _World Wide Web Consortium: http://www.w3.org/
.. _HTML Techniques for Web Content Accessibility Guidelines: .. _HTML Techniques for Web Content Accessibility Guidelines:
http://www.w3.org/TR/WCAG10-HTML-TECHS/#link-text http://www.w3.org/TR/WCAG10-HTML-TECHS/#link-text
.. _reStructuredText Directives: directives.html .. _reStructuredText Directives: directives.html
.. _reStructuredText Interpreted Text Roles: interpreted.html .. _reStructuredText Interpreted Text Roles: roles.html
.. _RFC2396: http://www.rfc-editor.org/rfc/rfc2396.txt .. _RFC2396: http://www.rfc-editor.org/rfc/rfc2396.txt
.. _RFC2732: http://www.rfc-editor.org/rfc/rfc2732.txt .. _RFC2732: http://www.rfc-editor.org/rfc/rfc2732.txt
.. _Zope: http://www.zope.com/ .. _Zope: http://www.zope.com/
.. _PEP 258: http://docutils.sourceforge.net/spec/pep-0258.txt .. _PEP 258: ../../peps/pep-0258.html
.. ..
Local Variables: Local Variables:
mode: indented-text mode: indented-text
......
...@@ -335,6 +335,32 @@ instancehome $INSTANCE ...@@ -335,6 +335,32 @@ instancehome $INSTANCE
# #
# rest-output-encoding iso-8859-15 # rest-output-encoding iso-8859-15
# Directive: rest-header-level
#
# Description:
# Set the default starting HTML header level for restructured text
# documents. The default is 3, which implies that top-level headers
# will be created with an <H3> tag.
#
# Default: 3
#
# Example:
#
# rest-header-level 2
# Directive: rest-language-code
#
# Description:
# Language code used for some internal translations inside of the docutils
# package and for DTD bibliographic elements mapping. See
# lib/python/docutils/languages/ for a list of supported language codes.
#
# Default: en
#
# Example:
#
# rest-language-code de
# Directive: cgi-environment # Directive: cgi-environment
# #
# Description: # Description:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment