Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Z
Zope
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
Zope
Commits
86878d0d
Commit
86878d0d
authored
Nov 30, 2003
by
Andreas Jung
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
removed
parent
e1142d2d
Changes
53
Hide whitespace changes
Inline
Side-by-side
Showing
53 changed files
with
0 additions
and
12260 deletions
+0
-12260
lib/python/docutils/languages/__init__.py
lib/python/docutils/languages/__init__.py
+0
-23
lib/python/docutils/languages/af.py
lib/python/docutils/languages/af.py
+0
-60
lib/python/docutils/languages/de.py
lib/python/docutils/languages/de.py
+0
-60
lib/python/docutils/languages/en.py
lib/python/docutils/languages/en.py
+0
-62
lib/python/docutils/languages/es.py
lib/python/docutils/languages/es.py
+0
-60
lib/python/docutils/languages/fr.py
lib/python/docutils/languages/fr.py
+0
-60
lib/python/docutils/languages/it.py
lib/python/docutils/languages/it.py
+0
-60
lib/python/docutils/languages/ru.py
lib/python/docutils/languages/ru.py
+0
-68
lib/python/docutils/languages/sk.py
lib/python/docutils/languages/sk.py
+0
-60
lib/python/docutils/languages/sv.py
lib/python/docutils/languages/sv.py
+0
-61
lib/python/docutils/parsers/__init__.py
lib/python/docutils/parsers/__init__.py
+0
-48
lib/python/docutils/parsers/rst/__init__.py
lib/python/docutils/parsers/rst/__init__.py
+0
-123
lib/python/docutils/parsers/rst/directives/__init__.py
lib/python/docutils/parsers/rst/directives/__init__.py
+0
-290
lib/python/docutils/parsers/rst/directives/admonitions.py
lib/python/docutils/parsers/rst/directives/admonitions.py
+0
-90
lib/python/docutils/parsers/rst/directives/body.py
lib/python/docutils/parsers/rst/directives/body.py
+0
-122
lib/python/docutils/parsers/rst/directives/html.py
lib/python/docutils/parsers/rst/directives/html.py
+0
-96
lib/python/docutils/parsers/rst/directives/images.py
lib/python/docutils/parsers/rst/directives/images.py
+0
-100
lib/python/docutils/parsers/rst/directives/misc.py
lib/python/docutils/parsers/rst/directives/misc.py
+0
-233
lib/python/docutils/parsers/rst/directives/parts.py
lib/python/docutils/parsers/rst/directives/parts.py
+0
-56
lib/python/docutils/parsers/rst/directives/references.py
lib/python/docutils/parsers/rst/directives/references.py
+0
-23
lib/python/docutils/parsers/rst/languages/__init__.py
lib/python/docutils/parsers/rst/languages/__init__.py
+0
-27
lib/python/docutils/parsers/rst/languages/af.py
lib/python/docutils/parsers/rst/languages/af.py
+0
-92
lib/python/docutils/parsers/rst/languages/de.py
lib/python/docutils/parsers/rst/languages/de.py
+0
-83
lib/python/docutils/parsers/rst/languages/en.py
lib/python/docutils/parsers/rst/languages/en.py
+0
-94
lib/python/docutils/parsers/rst/languages/es.py
lib/python/docutils/parsers/rst/languages/es.py
+0
-102
lib/python/docutils/parsers/rst/languages/fr.py
lib/python/docutils/parsers/rst/languages/fr.py
+0
-89
lib/python/docutils/parsers/rst/languages/it.py
lib/python/docutils/parsers/rst/languages/it.py
+0
-81
lib/python/docutils/parsers/rst/languages/ru.py
lib/python/docutils/parsers/rst/languages/ru.py
+0
-93
lib/python/docutils/parsers/rst/languages/sk.py
lib/python/docutils/parsers/rst/languages/sk.py
+0
-81
lib/python/docutils/parsers/rst/languages/sv.py
lib/python/docutils/parsers/rst/languages/sv.py
+0
-80
lib/python/docutils/parsers/rst/roman.py
lib/python/docutils/parsers/rst/roman.py
+0
-81
lib/python/docutils/parsers/rst/states.py
lib/python/docutils/parsers/rst/states.py
+0
-2915
lib/python/docutils/parsers/rst/tableparser.py
lib/python/docutils/parsers/rst/tableparser.py
+0
-522
lib/python/docutils/readers/__init__.py
lib/python/docutils/readers/__init__.py
+0
-88
lib/python/docutils/readers/pep.py
lib/python/docutils/readers/pep.py
+0
-58
lib/python/docutils/readers/python/__init__.py
lib/python/docutils/readers/python/__init__.py
+0
-19
lib/python/docutils/readers/python/moduleparser.py
lib/python/docutils/readers/python/moduleparser.py
+0
-784
lib/python/docutils/readers/standalone.py
lib/python/docutils/readers/standalone.py
+0
-49
lib/python/docutils/transforms/__init__.py
lib/python/docutils/transforms/__init__.py
+0
-166
lib/python/docutils/transforms/components.py
lib/python/docutils/transforms/components.py
+0
-54
lib/python/docutils/transforms/frontmatter.py
lib/python/docutils/transforms/frontmatter.py
+0
-399
lib/python/docutils/transforms/misc.py
lib/python/docutils/transforms/misc.py
+0
-62
lib/python/docutils/transforms/parts.py
lib/python/docutils/transforms/parts.py
+0
-176
lib/python/docutils/transforms/peps.py
lib/python/docutils/transforms/peps.py
+0
-294
lib/python/docutils/transforms/references.py
lib/python/docutils/transforms/references.py
+0
-762
lib/python/docutils/transforms/universal.py
lib/python/docutils/transforms/universal.py
+0
-185
lib/python/docutils/writers/__init__.py
lib/python/docutils/writers/__init__.py
+0
-83
lib/python/docutils/writers/docutils_xml.py
lib/python/docutils/writers/docutils_xml.py
+0
-66
lib/python/docutils/writers/html4css1.py
lib/python/docutils/writers/html4css1.py
+0
-1246
lib/python/docutils/writers/html4zope.py
lib/python/docutils/writers/html4zope.py
+0
-59
lib/python/docutils/writers/latex2e.py
lib/python/docutils/writers/latex2e.py
+0
-1472
lib/python/docutils/writers/pep_html.py
lib/python/docutils/writers/pep_html.py
+0
-113
lib/python/docutils/writers/pseudoxml.py
lib/python/docutils/writers/pseudoxml.py
+0
-30
No files found.
lib/python/docutils/languages/__init__.py
deleted
100644 → 0
View file @
e1142d2d
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:34 $
# Copyright: This module has been placed in the public domain.
# Internationalization details are documented in
# <http://docutils.sf.net/spec/howto/i18n.html>.
"""
This package contains modules for language-dependent features of Docutils.
"""
__docformat__
=
'reStructuredText'
_languages
=
{}
def
get_language
(
language_code
):
if
_languages
.
has_key
(
language_code
):
return
_languages
[
language_code
]
module
=
__import__
(
language_code
,
globals
(),
locals
())
_languages
[
language_code
]
=
module
return
module
lib/python/docutils/languages/af.py
deleted
100644 → 0
View file @
e1142d2d
# Author: Jannie Hofmeyr
# Contact: jhsh@sun.ac.za
# Revision: $Revision: 1.1 $
# Date: $Date: 2003/07/10 15:49:34 $
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/spec/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Afrikaans-language mappings for language-dependent features of Docutils.
"""
__docformat__
=
'reStructuredText'
labels
=
{
'author'
:
'Auteur'
,
'authors'
:
'Auteurs'
,
'organization'
:
'Organisasie'
,
'address'
:
'Adres'
,
'contact'
:
'Kontak'
,
'version'
:
'Weergawe'
,
'revision'
:
'Revisie'
,
'status'
:
'Status'
,
'date'
:
'Datum'
,
'copyright'
:
'Kopiereg'
,
'dedication'
:
'Opdrag'
,
'abstract'
:
'Opsomming'
,
'attention'
:
'Aandag!'
,
'caution'
:
'Wees versigtig!'
,
'danger'
:
'!GEVAAR!'
,
'error'
:
'Fout'
,
'hint'
:
'Wenk'
,
'important'
:
'Belangrik'
,
'note'
:
'Nota'
,
'tip'
:
'Tip'
,
# hint and tip both have the same translation: wenk
'warning'
:
'Waarskuwing'
,
'contents'
:
'Inhoud'
}
"""Mapping of node class name to label text."""
bibliographic_fields
=
{
'auteur'
:
'author'
,
'auteurs'
:
'authors'
,
'organisasie'
:
'organization'
,
'adres'
:
'address'
,
'kontak'
:
'contact'
,
'weergawe'
:
'version'
,
'revisie'
:
'revision'
,
'status'
:
'status'
,
'datum'
:
'date'
,
'kopiereg'
:
'copyright'
,
'opdrag'
:
'dedication'
,
'opsomming'
:
'abstract'
}
"""Afrikaans (lowcased) to canonical name mapping for bibliographic fields."""
author_separators
=
[
';'
,
','
]
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""
lib/python/docutils/languages/de.py
deleted
100644 → 0
View file @
e1142d2d
# Authors: David Goodger; Gunnar Schwant
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:34 $
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/spec/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
German language mappings for language-dependent features of Docutils.
"""
__docformat__
=
'reStructuredText'
labels
=
{
'author'
:
'Autor'
,
'authors'
:
'Autoren'
,
'organization'
:
'Organisation'
,
'address'
:
'Adresse'
,
'contact'
:
'Kontakt'
,
'version'
:
'Version'
,
'revision'
:
'Revision'
,
'status'
:
'Status'
,
'date'
:
'Datum'
,
'dedication'
:
'Widmung'
,
'copyright'
:
'Copyright'
,
'abstract'
:
'Zusammenfassung'
,
'attention'
:
'Achtung!'
,
'caution'
:
'Vorsicht!'
,
'danger'
:
'!GEFAHR!'
,
'error'
:
'Fehler'
,
'hint'
:
'Hinweis'
,
'important'
:
'Wichtig'
,
'note'
:
'Bemerkung'
,
'tip'
:
'Tipp'
,
'warning'
:
'Warnung'
,
'contents'
:
'Inhalt'
}
"""Mapping of node class name to label text."""
bibliographic_fields
=
{
'autor'
:
'author'
,
'autoren'
:
'authors'
,
'organisation'
:
'organization'
,
'adresse'
:
'address'
,
'kontakt'
:
'contact'
,
'version'
:
'version'
,
'revision'
:
'revision'
,
'status'
:
'status'
,
'datum'
:
'date'
,
'copyright'
:
'copyright'
,
'widmung'
:
'dedication'
,
'zusammenfassung'
:
'abstract'
}
"""German (lowcased) to canonical name mapping for bibliographic fields."""
author_separators
=
[
';'
,
','
]
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""
lib/python/docutils/languages/en.py
deleted
100644 → 0
View file @
e1142d2d
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:34 $
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/spec/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
English-language mappings for language-dependent features of Docutils.
"""
__docformat__
=
'reStructuredText'
labels
=
{
# fixed: language-dependent
'author'
:
'Author'
,
'authors'
:
'Authors'
,
'organization'
:
'Organization'
,
'address'
:
'Address'
,
'contact'
:
'Contact'
,
'version'
:
'Version'
,
'revision'
:
'Revision'
,
'status'
:
'Status'
,
'date'
:
'Date'
,
'copyright'
:
'Copyright'
,
'dedication'
:
'Dedication'
,
'abstract'
:
'Abstract'
,
'attention'
:
'Attention!'
,
'caution'
:
'Caution!'
,
'danger'
:
'!DANGER!'
,
'error'
:
'Error'
,
'hint'
:
'Hint'
,
'important'
:
'Important'
,
'note'
:
'Note'
,
'tip'
:
'Tip'
,
'warning'
:
'Warning'
,
'contents'
:
'Contents'
}
"""Mapping of node class name to label text."""
bibliographic_fields
=
{
# language-dependent: fixed
'author'
:
'author'
,
'authors'
:
'authors'
,
'organization'
:
'organization'
,
'address'
:
'address'
,
'contact'
:
'contact'
,
'version'
:
'version'
,
'revision'
:
'revision'
,
'status'
:
'status'
,
'date'
:
'date'
,
'copyright'
:
'copyright'
,
'dedication'
:
'dedication'
,
'abstract'
:
'abstract'
}
"""English (lowcased) to canonical name mapping for bibliographic fields."""
author_separators
=
[
';'
,
','
]
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""
lib/python/docutils/languages/es.py
deleted
100644 → 0
View file @
e1142d2d
# Author: Marcelo Huerta San Martn
# Contact: mghsm@uol.com.ar
# Revision: $Revision: 1.1 $
# Date: $Date: 2003/07/10 15:49:34 $
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/spec/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Spanish-language mappings for language-dependent features of Docutils.
"""
__docformat__
=
'reStructuredText'
labels
=
{
'author'
:
u'Autor'
,
'authors'
:
u'Autores'
,
'organization'
:
u'Organizaci
\
u00f3
n'
,
'address'
:
u'Direcci
\
u00f3
n'
,
'contact'
:
u'Contacto'
,
'version'
:
u'Versi
\
u00f3
n'
,
'revision'
:
u'Revisi
\
u00f3
n'
,
'status'
:
u'Estado'
,
'date'
:
u'Fecha'
,
'copyright'
:
u'Copyright'
,
'dedication'
:
u'Dedicatoria'
,
'abstract'
:
u'Resumen'
,
'attention'
:
u'
\
u00a1
Atenci
\
u00f3
n!'
,
'caution'
:
u'
\
u00a1
Precauci
\
u00f3
n!'
,
'danger'
:
u'
\
u00a1
PELIGRO!'
,
'error'
:
u'Error'
,
'hint'
:
u'Sugerencia'
,
'important'
:
u'Importante'
,
'note'
:
u'Nota'
,
'tip'
:
u'Consejo'
,
'warning'
:
u'Advertencia'
,
'contents'
:
u'Contenido'
}
"""Mapping of node class name to label text."""
bibliographic_fields
=
{
u'autor'
:
'author'
,
u'autores'
:
'authors'
,
u'organizaci
\
u00f3
n'
:
'organization'
,
u'direcci
\
u00f3
n'
:
'address'
,
u'contacto'
:
'contact'
,
u'versi
\
u00f3
n'
:
'version'
,
u'revisi
\
u00f3
n'
:
'revision'
,
u'estado'
:
'status'
,
u'fecha'
:
'date'
,
u'copyright'
:
'copyright'
,
u'dedicatoria'
:
'dedication'
,
u'resumen'
:
'abstract'
}
"""Spanish (lowcased) to canonical name mapping for bibliographic fields."""
author_separators
=
[
';'
,
','
]
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""
lib/python/docutils/languages/fr.py
deleted
100644 → 0
View file @
e1142d2d
# Author: Stefane Fermigier
# Contact: sf@fermigier.com
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:34 $
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/spec/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
French-language mappings for language-dependent features of Docutils.
"""
__docformat__
=
'reStructuredText'
labels
=
{
u'author'
:
u'Auteur'
,
u'authors'
:
u'Auteurs'
,
u'organization'
:
u'Organisation'
,
u'address'
:
u'Adresse'
,
u'contact'
:
u'Contact'
,
u'version'
:
u'Version'
,
u'revision'
:
u'R
\
u00e9
vision'
,
u'status'
:
u'Statut'
,
u'date'
:
u'Date'
,
u'copyright'
:
u'Copyright'
,
u'dedication'
:
u'D
\
u00e9
dicace'
,
u'abstract'
:
u'R
\
u00e9
sum
\
u00e9
'
,
u'attention'
:
u'Attention!'
,
u'caution'
:
u'Avertissement!'
,
u'danger'
:
u'!DANGER!'
,
u'error'
:
u'Erreur'
,
u'hint'
:
u'Indication'
,
u'important'
:
u'Important'
,
u'note'
:
u'Note'
,
u'tip'
:
u'Astuce'
,
u'warning'
:
u'Avis'
,
u'contents'
:
u'Sommaire'
}
"""Mapping of node class name to label text."""
bibliographic_fields
=
{
u'auteur'
:
u'author'
,
u'auteurs'
:
u'authors'
,
u'organisation'
:
u'organization'
,
u'adresse'
:
u'address'
,
u'contact'
:
u'contact'
,
u'version'
:
u'version'
,
u'r
\
u00e9
vision'
:
u'revision'
,
u'statut'
:
u'status'
,
u'date'
:
u'date'
,
u'copyright'
:
u'copyright'
,
u'd
\
u00e9
dicace'
:
u'dedication'
,
u'r
\
u00e9
sum
\
u00e9
'
:
u'abstract'
}
"""French (lowcased) to canonical name mapping for bibliographic fields."""
author_separators
=
[
';'
,
','
]
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""
lib/python/docutils/languages/it.py
deleted
100644 → 0
View file @
e1142d2d
# Author: Nicola Larosa
# Contact: docutils@tekNico.net
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:34 $
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/spec/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Italian-language mappings for language-dependent features of Docutils.
"""
__docformat__
=
'reStructuredText'
labels
=
{
'author'
:
'Autore'
,
'authors'
:
'Autori'
,
'organization'
:
'Organizzazione'
,
'address'
:
'Indirizzo'
,
'contact'
:
'Contatti'
,
'version'
:
'Versione'
,
'revision'
:
'Revisione'
,
'status'
:
'Status'
,
'date'
:
'Data'
,
'copyright'
:
'Copyright'
,
'dedication'
:
'Dedica'
,
'abstract'
:
'Riassunto'
,
'attention'
:
'Attenzione!'
,
'caution'
:
'Cautela!'
,
'danger'
:
'!PERICOLO!'
,
'error'
:
'Errore'
,
'hint'
:
'Suggerimento'
,
'important'
:
'Importante'
,
'note'
:
'Nota'
,
'tip'
:
'Consiglio'
,
'warning'
:
'Avvertenza'
,
'contents'
:
'Indice'
}
"""Mapping of node class name to label text."""
bibliographic_fields
=
{
'autore'
:
'author'
,
'autori'
:
'authors'
,
'organizzazione'
:
'organization'
,
'indirizzo'
:
'address'
,
'contatti'
:
'contact'
,
'versione'
:
'version'
,
'revisione'
:
'revision'
,
'status'
:
'status'
,
'data'
:
'date'
,
'copyright'
:
'copyright'
,
'dedica'
:
'dedication'
,
'riassunto'
:
'abstract'
}
"""Italian (lowcased) to canonical name mapping for bibliographic fields."""
author_separators
=
[
';'
,
','
]
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""
lib/python/docutils/languages/ru.py
deleted
100644 → 0
View file @
e1142d2d
# Author: Roman Suzi
# Contact: rnd@onego.ru
# Revision: $Revision: 1.1 $
# Date: $Date: 2003/07/10 15:49:34 $
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/spec/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Russian-language mappings for language-dependent features of Docutils.
"""
__docformat__
=
'reStructuredText'
labels
=
{
u'abstract'
:
u'
\
u0410
\
u043d
\
u043d
\
u043e
\
u0442
\
u0430
\
u0446
\
u0438
\
u044f
'
,
u'address'
:
u'
\
u0410
\
u0434
\
u0440
\
u0435
\
u0441
'
,
u'attention'
:
u'
\
u0412
\
u043d
\
u0438
\
u043c
\
u0430
\
u043d
\
u0438
\
u0435
!'
,
u'author'
:
u'
\
u0410
\
u0432
\
u0442
\
u043e
\
u0440
'
,
u'authors'
:
u'
\
u0410
\
u0432
\
u0442
\
u043e
\
u0440
\
u044b
'
,
u'caution'
:
u'
\
u041e
\
u0441
\
u0442
\
u043e
\
u0440
\
u043e
\
u0436
\
u043d
\
u043e
!'
,
u'contact'
:
u'
\
u041a
\
u043e
\
u043d
\
u0442
\
u0430
\
u043a
\
u0442
'
,
u'contents'
:
u'
\
u0421
\
u043e
\
u0434
\
u0435
\
u0440
\
u0436
\
u0430
\
u043d
\
u0438
\
u0435
'
,
u'copyright'
:
u'
\
u041f
\
u0440
\
u0430
\
u0432
\
u0430
'
u'
\
u043a
\
u043e
\
u043f
\
u0438
\
u0440
\
u043e
\
u0432
\
u0430
\
u043d
\
u0438
\
u044f
'
,
u'danger'
:
u'
\
u041e
\
u041f
\
u0410
\
u0421
\
u041d
\
u041e
!'
,
u'date'
:
u'
\
u0414
\
u0430
\
u0442
\
u0430
'
,
u'dedication'
:
u'
\
u041f
\
u043e
\
u0441
\
u0432
\
u044f
\
u0449
\
u0435
\
u043d
\
u0438
\
u0435
'
,
u'error'
:
u'
\
u041e
\
u0448
\
u0438
\
u0431
\
u043a
\
u0430
'
,
u'hint'
:
u'
\
u0421
\
u043e
\
u0432
\
u0435
\
u0442
'
,
u'important'
:
u'
\
u0412
\
u0430
\
u0436
\
u043d
\
u043e
'
,
u'note'
:
u'
\
u041f
\
u0440
\
u0438
\
u043c
\
u0435
\
u0447
\
u0430
\
u043d
\
u0438
\
u0435
'
,
u'organization'
:
u'
\
u041e
\
u0440
\
u0433
\
u0430
\
u043d
\
u0438
\
u0437
\
u0430
\
u0446
\
u0438
\
u044f
'
,
u'revision'
:
u'
\
u0420
\
u0435
\
u0434
\
u0430
\
u043a
\
u0446
\
u0438
\
u044f
'
,
u'status'
:
u'
\
u0421
\
u0442
\
u0430
\
u0442
\
u0443
\
u0441
'
,
u'tip'
:
u'
\
u041f
\
u043e
\
u0434
\
u0441
\
u043a
\
u0430
\
u0437
\
u043a
\
u0430
'
,
u'version'
:
u'
\
u0412
\
u0435
\
u0440
\
u0441
\
u0438
\
u044f
'
,
u'warning'
:
u'
\
u041f
\
u0440
\
u0435
\
u0434
\
u0443
\
u043f
\
u0440
\
u0435
\
u0436
'
u'
\
u0434
\
u0435
\
u043d
\
u0438
\
u0435
'
}
"""Mapping of node class name to label text."""
bibliographic_fields
=
{
u'
\
u0410
\
u043d
\
u043d
\
u043e
\
u0442
\
u0430
\
u0446
\
u0438
\
u044f
'
:
u'abstract'
,
u'
\
u0410
\
u0434
\
u0440
\
u0435
\
u0441
'
:
u'address'
,
u'
\
u0410
\
u0432
\
u0442
\
u043e
\
u0440
'
:
u'author'
,
u'
\
u0410
\
u0432
\
u0442
\
u043e
\
u0440
\
u044b
'
:
u'authors'
,
u'
\
u041a
\
u043e
\
u043d
\
u0442
\
u0430
\
u043a
\
u0442
'
:
u'contact'
,
u'
\
u041f
\
u0440
\
u0430
\
u0432
\
u0430
\
u043a
\
u043e
\
u043f
\
u0438
\
u0440
\
u043e
'
u'
\
u0432
\
u0430
\
u043d
\
u0438
\
u044f
'
:
u'copyright'
,
u'
\
u0414
\
u0430
\
u0442
\
u0430
'
:
u'date'
,
u'
\
u041f
\
u043e
\
u0441
\
u0432
\
u044f
\
u0449
\
u0435
\
u043d
\
u0438
\
u0435
'
:
u'dedication'
,
u'
\
u041e
\
u0440
\
u0433
\
u0430
\
u043d
\
u0438
\
u0437
\
u0430
\
u0446
\
u0438
\
u044f
'
:
u'organization'
,
u'
\
u0420
\
u0435
\
u0434
\
u0430
\
u043a
\
u0446
\
u0438
\
u044f
'
:
u'revision'
,
u'
\
u0421
\
u0442
\
u0430
\
u0442
\
u0443
\
u0441
'
:
u'status'
,
u'
\
u0412
\
u0435
\
u0440
\
u0441
\
u0438
\
u044f
'
:
u'version'
}
"""Russian (lowcased) to canonical name mapping for bibliographic fields."""
author_separators
=
[
';'
,
','
]
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""
lib/python/docutils/languages/sk.py
deleted
100644 → 0
View file @
e1142d2d
# :Author: Miroslav Vasko
# :Contact: zemiak@zoznam.sk
# :Revision: $Revision: 1.3 $
# :Date: $Date: 2003/07/10 15:49:34 $
# :Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/spec/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Slovak-language mappings for language-dependent features of Docutils.
"""
__docformat__
=
'reStructuredText'
labels
=
{
'author'
:
u'Autor'
,
'authors'
:
u'Autori'
,
'organization'
:
u'Organiz
\
u00E1
cia'
,
'address'
:
u'Adresa'
,
'contact'
:
u'Kontakt'
,
'version'
:
u'Verzia'
,
'revision'
:
u'Rev
\
u00ED
zia'
,
'status'
:
u'Stav'
,
'date'
:
u'D
\
u00E1
tum'
,
'copyright'
:
u'Copyright'
,
'dedication'
:
u'Venovanie'
,
'abstract'
:
u'Abstraktne'
,
'attention'
:
u'Pozor!'
,
'caution'
:
u'Opatrne!'
,
'danger'
:
u'!NEBEZPE
\
u010c
ENSTVO!'
,
'error'
:
u'Chyba'
,
'hint'
:
u'Rada'
,
'important'
:
u'D
\
u00F4
le
\
u017E
it
\
u00E9
'
,
'note'
:
u'Pozn
\
u00E1
mka'
,
'tip'
:
u'Tip'
,
'warning'
:
u'Varovanie'
,
'contents'
:
u'Obsah'
}
"""Mapping of node class name to label text."""
bibliographic_fields
=
{
u'autor'
:
'author'
,
u'autori'
:
'authors'
,
u'organiz
\
u00E1
cia'
:
'organization'
,
u'adresa'
:
'address'
,
u'kontakt'
:
'contact'
,
u'verzia'
:
'version'
,
u'rev
\
u00ED
zia'
:
'revision'
,
u'stav'
:
'status'
,
u'd
\
u00E1
tum'
:
'date'
,
u'copyright'
:
'copyright'
,
u'venovanie'
:
'dedication'
,
u'abstraktne'
:
'abstract'
}
"""Slovak (lowcased) to canonical name mapping for bibliographic fields."""
author_separators
=
[
';'
,
','
]
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""
lib/python/docutils/languages/sv.py
deleted
100644 → 0
View file @
e1142d2d
# Author: Adam Chodorowski
# Contact: chodorowski@users.sourceforge.net
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:34 $
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/spec/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Swedish language mappings for language-dependent features of Docutils.
"""
__docformat__
=
'reStructuredText'
labels
=
{
'author'
:
u'F
\
u00f6
rfattare'
,
'authors'
:
u'F
\
u00f6
rfattare'
,
'organization'
:
u'Organisation'
,
'address'
:
u'Adress'
,
'contact'
:
u'Kontakt'
,
'version'
:
u'Version'
,
'revision'
:
u'Revision'
,
'status'
:
u'Status'
,
'date'
:
u'Datum'
,
'copyright'
:
u'Copyright'
,
'dedication'
:
u'Dedikation'
,
'abstract'
:
u'Sammanfattning'
,
'attention'
:
u'Observera!'
,
'caution'
:
u'Varning!'
,
'danger'
:
u'FARA!'
,
'error'
:
u'Fel'
,
'hint'
:
u'V
\
u00e4
gledning'
,
'important'
:
u'Viktigt'
,
'note'
:
u'Notera'
,
'tip'
:
u'Tips'
,
'warning'
:
u'Varning'
,
'contents'
:
u'Inneh
\
u00e5
ll'
}
"""Mapping of node class name to label text."""
bibliographic_fields
=
{
# 'Author' and 'Authors' identical in Swedish; assume the plural:
u'f
\
u00f6
rfattare'
:
'authors'
,
u' n/a'
:
'author'
,
u'organisation'
:
'organization'
,
u'adress'
:
'address'
,
u'kontakt'
:
'contact'
,
u'version'
:
'version'
,
u'revision'
:
'revision'
,
u'status'
:
'status'
,
u'datum'
:
'date'
,
u'copyright'
:
'copyright'
,
u'dedikation'
:
'dedication'
,
u'sammanfattning'
:
'abstract'
}
"""Swedish (lowcased) to canonical name mapping for bibliographic fields."""
author_separators
=
[
';'
,
','
]
"""List of separator strings for the 'Authors' bibliographic field. Tried in
order."""
lib/python/docutils/parsers/__init__.py
deleted
100644 → 0
View file @
e1142d2d
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:39 $
# Copyright: This module has been placed in the public domain.
"""
This package contains Docutils parser modules.
"""
__docformat__
=
'reStructuredText'
from
docutils
import
Component
class
Parser
(
Component
):
component_type
=
'parser'
def
parse
(
self
,
inputstring
,
document
):
"""Override to parse `inputstring` into document tree `document`."""
raise
NotImplementedError
(
'subclass must override this method'
)
def
setup_parse
(
self
,
inputstring
,
document
):
"""Initial parse setup. Call at start of `self.parse()`."""
self
.
inputstring
=
inputstring
self
.
document
=
document
document
.
reporter
.
attach_observer
(
document
.
note_parse_message
)
def
finish_parse
(
self
):
"""Finalize parse details. Call at end of `self.parse()`."""
self
.
document
.
reporter
.
detach_observer
(
self
.
document
.
note_parse_message
)
_parser_aliases
=
{
'restructuredtext'
:
'rst'
,
'rest'
:
'rst'
,
'restx'
:
'rst'
,
'rtxt'
:
'rst'
,}
def
get_parser_class
(
parser_name
):
"""Return the Parser class from the `parser_name` module."""
parser_name
=
parser_name
.
lower
()
if
_parser_aliases
.
has_key
(
parser_name
):
parser_name
=
_parser_aliases
[
parser_name
]
module
=
__import__
(
parser_name
,
globals
(),
locals
())
return
module
.
Parser
lib/python/docutils/parsers/rst/__init__.py
deleted
100644 → 0
View file @
e1142d2d
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:40 $
# Copyright: This module has been placed in the public domain.
"""
This is ``docutils.parsers.rst`` package. It exports a single class, `Parser`,
the reStructuredText parser.
Usage
=====
1. Create a parser::
parser = docutils.parsers.rst.Parser()
Several optional arguments may be passed to modify the parser's behavior.
Please see `Customizing the Parser`_ below for details.
2. Gather input (a multi-line string), by reading a file or the standard
input::
input = sys.stdin.read()
3. Create a new empty `docutils.nodes.document` tree::
document = docutils.utils.new_document(source, settings)
See `docutils.utils.new_document()` for parameter details.
4. Run the parser, populating the document tree::
parser.parse(input, document)
Parser Overview
===============
The reStructuredText parser is implemented as a state machine, examining its
input one line at a time. To understand how the parser works, please first
become familiar with the `docutils.statemachine` module, then see the
`states` module.
Customizing the Parser
----------------------
Anything that isn't already customizable is that way simply because that type
of customizability hasn't been implemented yet. Patches welcome!
When instantiating an object of the `Parser` class, two parameters may be
passed: ``rfc2822`` and ``inliner``. Pass ``rfc2822=1`` to enable an initial
RFC-2822 style header block, parsed as a "field_list" element (with "class"
attribute set to "rfc2822"). Currently this is the only body-level element
which is customizable without subclassing. (Tip: subclass `Parser` and change
its "state_classes" and "initial_state" attributes to refer to new classes.
Contact the author if you need more details.)
The ``inliner`` parameter takes an instance of `states.Inliner` or a subclass.
It handles inline markup recognition. A common extension is the addition of
further implicit hyperlinks, like "RFC 2822". This can be done by subclassing
`states.Inliner`, adding a new method for the implicit markup, and adding a
``(pattern, method)`` pair to the "implicit_dispatch" attribute of the
subclass. See `states.Inliner.implicit_inline()` for details. Explicit
inline markup can be customized in a `states.Inliner` subclass via the
``patterns.initial`` and ``dispatch`` attributes (and new methods as
appropriate).
"""
__docformat__
=
'reStructuredText'
import
docutils.parsers
import
docutils.statemachine
from
docutils.parsers.rst
import
states
class
Parser
(
docutils
.
parsers
.
Parser
):
"""The reStructuredText parser."""
supported
=
(
'restructuredtext'
,
'rst'
,
'rest'
,
'restx'
,
'rtxt'
,
'rstx'
)
"""Aliases this parser supports."""
settings_spec
=
(
'reStructuredText Parser Options'
,
None
,
((
'Recognize and link to PEP references (like "PEP 258").'
,
[
'--pep-references'
],
{
'action'
:
'store_true'
}),
(
'Recognize and link to RFC references (like "RFC 822").'
,
[
'--rfc-references'
],
{
'action'
:
'store_true'
}),
(
'Set number of spaces for tab expansion (default 8).'
,
[
'--tab-width'
],
{
'metavar'
:
'<width>'
,
'type'
:
'int'
,
'default'
:
8
}),
(
'Remove spaces before footnote references.'
,
[
'--trim-footnote-reference-space'
],
{
'action'
:
'store_true'
}),))
def
__init__
(
self
,
rfc2822
=
None
,
inliner
=
None
):
if
rfc2822
:
self
.
initial_state
=
'RFC2822Body'
else
:
self
.
initial_state
=
'Body'
self
.
state_classes
=
states
.
state_classes
self
.
inliner
=
inliner
def
parse
(
self
,
inputstring
,
document
):
"""Parse `inputstring` and populate `document`, a document tree."""
self
.
setup_parse
(
inputstring
,
document
)
debug
=
document
.
reporter
[
''
].
debug
self
.
statemachine
=
states
.
RSTStateMachine
(
state_classes
=
self
.
state_classes
,
initial_state
=
self
.
initial_state
,
debug
=
debug
)
inputlines
=
docutils
.
statemachine
.
string2lines
(
inputstring
,
tab_width
=
document
.
settings
.
tab_width
,
convert_whitespace
=
1
)
self
.
statemachine
.
run
(
inputlines
,
document
,
inliner
=
self
.
inliner
)
self
.
finish_parse
()
lib/python/docutils/parsers/rst/directives/__init__.py
deleted
100644 → 0
View file @
e1142d2d
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.4 $
# Date: $Date: 2003/09/29 20:09:25 $
# Copyright: This module has been placed in the public domain.
"""
This package contains directive implementation modules.
The interface for directive functions is as follows::
def directive_fn(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
code...
# Set function attributes:
directive_fn.arguments = ...
directive_fn.options = ...
direcitve_fn.content = ...
Parameters:
- ``name`` is the directive type or name.
- ``arguments`` is a list of positional arguments.
- ``options`` is a dictionary mapping option names to values.
- ``content`` is a list of strings, the directive content.
- ``lineno`` is the line number of the first line of the directive.
- ``content_offset`` is the line offset of the first line of the content from
the beginning of the current input. Used when initiating a nested parse.
- ``block_text`` is a string containing the entire directive. Include it as
the content of a literal block in a system message if there is a problem.
- ``state`` is the state which called the directive function.
- ``state_machine`` is the state machine which controls the state which called
the directive function.
Function attributes, interpreted by the directive parser (which calls the
directive function):
- ``arguments``: A 3-tuple specifying the expected positional arguments, or
``None`` if the directive has no arguments. The 3 items in the tuple are
``(required, optional, whitespace OK in last argument)``:
1. The number of required arguments.
2. The number of optional arguments.
3. A boolean, indicating if the final argument may contain whitespace.
Arguments are normally single whitespace-separated words. The final
argument may contain whitespace if the third item in the argument spec tuple
is 1/True. If the form of the arguments is more complex, specify only one
argument (either required or optional) and indicate that final whitespace is
OK; the client code must do any context-sensitive parsing.
- ``options``: A dictionary, mapping known option names to conversion
functions such as `int` or `float`. ``None`` or an empty dict implies no
options to parse. Several directive option conversion functions are defined
in this module.
- ``content``: A boolean; true if content is allowed. Client code must handle
the case where content is required but not supplied (an empty content list
will be supplied).
Directive functions return a list of nodes which will be inserted into the
document tree at the point where the directive was encountered (can be an
empty list).
See `Creating reStructuredText Directives`_ for more information.
.. _Creating reStructuredText Directives:
http://docutils.sourceforge.net/spec/howto/rst-directives.html
"""
__docformat__
=
'reStructuredText'
from
docutils
import
nodes
from
docutils.parsers.rst.languages
import
en
as
_fallback_language_module
_directive_registry
=
{
'attention'
:
(
'admonitions'
,
'attention'
),
'caution'
:
(
'admonitions'
,
'caution'
),
'danger'
:
(
'admonitions'
,
'danger'
),
'error'
:
(
'admonitions'
,
'error'
),
'important'
:
(
'admonitions'
,
'important'
),
'note'
:
(
'admonitions'
,
'note'
),
'tip'
:
(
'admonitions'
,
'tip'
),
'hint'
:
(
'admonitions'
,
'hint'
),
'warning'
:
(
'admonitions'
,
'warning'
),
'admonition'
:
(
'admonitions'
,
'admonition'
),
'sidebar'
:
(
'body'
,
'sidebar'
),
'topic'
:
(
'body'
,
'topic'
),
'line-block'
:
(
'body'
,
'line_block'
),
'parsed-literal'
:
(
'body'
,
'parsed_literal'
),
'rubric'
:
(
'body'
,
'rubric'
),
'epigraph'
:
(
'body'
,
'epigraph'
),
'highlights'
:
(
'body'
,
'highlights'
),
'pull-quote'
:
(
'body'
,
'pull_quote'
),
#'questions': ('body', 'question_list'),
'image'
:
(
'images'
,
'image'
),
'figure'
:
(
'images'
,
'figure'
),
'contents'
:
(
'parts'
,
'contents'
),
'sectnum'
:
(
'parts'
,
'sectnum'
),
#'footnotes': ('parts', 'footnotes'),
#'citations': ('parts', 'citations'),
'target-notes'
:
(
'references'
,
'target_notes'
),
'meta'
:
(
'html'
,
'meta'
),
#'imagemap': ('html', 'imagemap'),
'raw'
:
(
'misc'
,
'raw'
),
# 'include': ('misc', 'include'),
'replace'
:
(
'misc'
,
'replace'
),
'unicode'
:
(
'misc'
,
'unicode_directive'
),
'class'
:
(
'misc'
,
'class_directive'
),
'restructuredtext-test-directive'
:
(
'misc'
,
'directive_test_function'
),}
"""Mapping of directive name to (module name, function name). The directive
name is canonical & must be lowercase. Language-dependent names are defined
in the ``language`` subpackage."""
_modules
=
{}
"""Cache of imported directive modules."""
_directives
=
{}
"""Cache of imported directive functions."""
def
directive
(
directive_name
,
language_module
,
document
):
"""
Locate and return a directive function from its language-dependent name.
If not found in the current language, check English. Return None if the
named directive cannot be found.
"""
normname
=
directive_name
.
lower
()
messages
=
[]
msg_text
=
[]
if
_directives
.
has_key
(
normname
):
return
_directives
[
normname
],
messages
canonicalname
=
None
try
:
canonicalname
=
language_module
.
directives
[
normname
]
except
AttributeError
,
error
:
msg_text
.
append
(
'Problem retrieving directive entry from language '
'module %r: %s.'
%
(
language_module
,
error
))
except
KeyError
:
msg_text
.
append
(
'No directive entry for "%s" in module "%s".'
%
(
directive_name
,
language_module
.
__name__
))
if
not
canonicalname
:
try
:
canonicalname
=
_fallback_language_module
.
directives
[
normname
]
msg_text
.
append
(
'Using English fallback for directive "%s".'
%
directive_name
)
except
KeyError
:
msg_text
.
append
(
'Trying "%s" as canonical directive name.'
%
directive_name
)
# The canonical name should be an English name, but just in case:
canonicalname
=
normname
if
msg_text
:
message
=
document
.
reporter
.
info
(
'
\
n
'
.
join
(
msg_text
),
line
=
document
.
current_line
)
messages
.
append
(
message
)
try
:
modulename
,
functionname
=
_directive_registry
[
canonicalname
]
except
KeyError
:
return
None
,
messages
if
_modules
.
has_key
(
modulename
):
module
=
_modules
[
modulename
]
else
:
try
:
module
=
__import__
(
modulename
,
globals
(),
locals
())
except
ImportError
:
return
None
,
messages
try
:
function
=
getattr
(
module
,
functionname
)
_directives
[
normname
]
=
function
except
AttributeError
:
return
None
,
messages
return
function
,
messages
def
register_directive
(
name
,
directive
):
"""Register a nonstandard application-defined directive function."""
_directives
[
name
]
=
directive
def
flag
(
argument
):
"""
Check for a valid flag option (no argument) and return ``None``.
(Directive option conversion function.)
Raise ``ValueError`` if an argument is found.
"""
if
argument
and
argument
.
strip
():
raise
ValueError
(
'no argument is allowed; "%s" supplied'
%
argument
)
else
:
return
None
def
unchanged_required
(
argument
):
"""
Return the argument text, unchanged.
(Directive option conversion function.)
Raise ``ValueError`` if no argument is found.
"""
if
argument
is
None
:
raise
ValueError
(
'argument required but none supplied'
)
else
:
return
argument
# unchanged!
def
unchanged
(
argument
):
"""
Return the argument text, unchanged.
(Directive option conversion function.)
No argument implies empty string ("").
"""
if
argument
is
None
:
return
u''
else
:
return
argument
# unchanged!
def
path
(
argument
):
"""
Return the path argument unwrapped (with newlines removed).
(Directive option conversion function.)
Raise ``ValueError`` if no argument is found or if the path contains
internal whitespace.
"""
if
argument
is
None
:
raise
ValueError
(
'argument required but none supplied'
)
else
:
path
=
''
.
join
([
s
.
strip
()
for
s
in
argument
.
splitlines
()])
if
path
.
find
(
' '
)
==
-
1
:
return
path
else
:
raise
ValueError
(
'path contains whitespace'
)
def
nonnegative_int
(
argument
):
"""
Check for a nonnegative integer argument; raise ``ValueError`` if not.
(Directive option conversion function.)
"""
value
=
int
(
argument
)
if
value
<
0
:
raise
ValueError
(
'negative value; must be positive or zero'
)
return
value
def
class_option
(
argument
):
"""
Convert the argument into an ID-compatible string and return it.
(Directive option conversion function.)
Raise ``ValueError`` if no argument is found.
"""
if
argument
is
None
:
raise
ValueError
(
'argument required but none supplied'
)
return
nodes
.
make_id
(
argument
)
def
format_values
(
values
):
return
'%s, or "%s"'
%
(
', '
.
join
([
'"%s"'
%
s
for
s
in
values
[:
-
1
]]),
values
[
-
1
])
def
choice
(
argument
,
values
):
"""
Directive option utility function, supplied to enable options whose
argument must be a member of a finite set of possible values (must be
lower case). A custom conversion function must be written to use it. For
example::
from docutils.parsers.rst import directives
def yesno(argument):
return directives.choice(argument, ('yes', 'no'))
Raise ``ValueError`` if no argument is found or if the argument's value is
not valid (not an entry in the supplied list).
"""
try
:
value
=
argument
.
lower
().
strip
()
except
AttributeError
:
raise
ValueError
(
'must supply an argument; choose from %s'
%
format_values
(
values
))
if
value
in
values
:
return
value
else
:
raise
ValueError
(
'"%s" unknown; choose from %s'
%
(
argument
,
format_values
(
values
)))
lib/python/docutils/parsers/rst/directives/admonitions.py
deleted
100644 → 0
View file @
e1142d2d
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:44 $
# Copyright: This module has been placed in the public domain.
"""
Admonition directives.
"""
__docformat__
=
'reStructuredText'
from
docutils.parsers.rst
import
states
,
directives
from
docutils
import
nodes
def
make_admonition
(
node_class
,
name
,
arguments
,
options
,
content
,
lineno
,
content_offset
,
block_text
,
state
,
state_machine
):
if
not
content
:
error
=
state_machine
.
reporter
.
error
(
'The "%s" admonition is empty; content required.'
%
(
name
),
nodes
.
literal_block
(
block_text
,
block_text
),
line
=
lineno
)
return
[
error
]
text
=
'
\
n
'
.
join
(
content
)
admonition_node
=
node_class
(
text
)
if
arguments
:
title_text
=
arguments
[
0
]
textnodes
,
messages
=
state
.
inline_text
(
title_text
,
lineno
)
admonition_node
+=
nodes
.
title
(
title_text
,
''
,
*
textnodes
)
admonition_node
+=
messages
if
options
.
has_key
(
'class'
):
class_value
=
options
[
'class'
]
else
:
class_value
=
'admonition-'
+
nodes
.
make_id
(
title_text
)
admonition_node
.
set_class
(
class_value
)
state
.
nested_parse
(
content
,
content_offset
,
admonition_node
)
return
[
admonition_node
]
def
admonition
(
*
args
):
return
make_admonition
(
nodes
.
admonition
,
*
args
)
admonition
.
arguments
=
(
1
,
0
,
1
)
admonition
.
options
=
{
'class'
:
directives
.
class_option
}
admonition
.
content
=
1
def
attention
(
*
args
):
return
make_admonition
(
nodes
.
attention
,
*
args
)
attention
.
content
=
1
def
caution
(
*
args
):
return
make_admonition
(
nodes
.
caution
,
*
args
)
caution
.
content
=
1
def
danger
(
*
args
):
return
make_admonition
(
nodes
.
danger
,
*
args
)
danger
.
content
=
1
def
error
(
*
args
):
return
make_admonition
(
nodes
.
error
,
*
args
)
error
.
content
=
1
def
hint
(
*
args
):
return
make_admonition
(
nodes
.
hint
,
*
args
)
hint
.
content
=
1
def
important
(
*
args
):
return
make_admonition
(
nodes
.
important
,
*
args
)
important
.
content
=
1
def
note
(
*
args
):
return
make_admonition
(
nodes
.
note
,
*
args
)
note
.
content
=
1
def
tip
(
*
args
):
return
make_admonition
(
nodes
.
tip
,
*
args
)
tip
.
content
=
1
def
warning
(
*
args
):
return
make_admonition
(
nodes
.
warning
,
*
args
)
warning
.
content
=
1
lib/python/docutils/parsers/rst/directives/body.py
deleted
100644 → 0
View file @
e1142d2d
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:44 $
# Copyright: This module has been placed in the public domain.
"""
Directives for additional body elements.
"""
__docformat__
=
'reStructuredText'
import
sys
from
docutils
import
nodes
from
docutils.parsers.rst
import
directives
def
topic
(
name
,
arguments
,
options
,
content
,
lineno
,
content_offset
,
block_text
,
state
,
state_machine
,
node_class
=
nodes
.
topic
):
if
not
state_machine
.
match_titles
:
error
=
state_machine
.
reporter
.
error
(
'The "%s" directive may not be used within topics, sidebars, '
'or body elements.'
%
name
,
nodes
.
literal_block
(
block_text
,
block_text
),
line
=
lineno
)
return
[
error
]
if
not
content
:
warning
=
state_machine
.
reporter
.
warning
(
'Content block expected for the "%s" directive; none found.'
%
name
,
nodes
.
literal_block
(
block_text
,
block_text
),
line
=
lineno
)
return
[
warning
]
title_text
=
arguments
[
0
]
textnodes
,
messages
=
state
.
inline_text
(
title_text
,
lineno
)
titles
=
[
nodes
.
title
(
title_text
,
''
,
*
textnodes
)]
if
options
.
has_key
(
'subtitle'
):
textnodes
,
more_messages
=
state
.
inline_text
(
options
[
'subtitle'
],
lineno
)
titles
.
append
(
nodes
.
subtitle
(
options
[
'subtitle'
],
''
,
*
textnodes
))
messages
.
extend
(
more_messages
)
text
=
'
\
n
'
.
join
(
content
)
node
=
node_class
(
text
,
*
(
titles
+
messages
))
if
options
.
has_key
(
'class'
):
node
.
set_class
(
options
[
'class'
])
if
text
:
state
.
nested_parse
(
content
,
content_offset
,
node
)
return
[
node
]
topic
.
arguments
=
(
1
,
0
,
1
)
topic
.
options
=
{
'class'
:
directives
.
class_option
}
topic
.
content
=
1
def
sidebar
(
name
,
arguments
,
options
,
content
,
lineno
,
content_offset
,
block_text
,
state
,
state_machine
):
return
topic
(
name
,
arguments
,
options
,
content
,
lineno
,
content_offset
,
block_text
,
state
,
state_machine
,
node_class
=
nodes
.
sidebar
)
sidebar
.
arguments
=
(
1
,
0
,
1
)
sidebar
.
options
=
{
'subtitle'
:
directives
.
unchanged_required
,
'class'
:
directives
.
class_option
}
sidebar
.
content
=
1
def
line_block
(
name
,
arguments
,
options
,
content
,
lineno
,
content_offset
,
block_text
,
state
,
state_machine
,
node_class
=
nodes
.
line_block
):
if
not
content
:
warning
=
state_machine
.
reporter
.
warning
(
'Content block expected for the "%s" directive; none found.'
%
name
,
nodes
.
literal_block
(
block_text
,
block_text
),
line
=
lineno
)
return
[
warning
]
text
=
'
\
n
'
.
join
(
content
)
text_nodes
,
messages
=
state
.
inline_text
(
text
,
lineno
)
node
=
node_class
(
text
,
''
,
*
text_nodes
,
**
options
)
return
[
node
]
+
messages
line_block
.
options
=
{
'class'
:
directives
.
class_option
}
line_block
.
content
=
1
def
parsed_literal
(
name
,
arguments
,
options
,
content
,
lineno
,
content_offset
,
block_text
,
state
,
state_machine
):
return
line_block
(
name
,
arguments
,
options
,
content
,
lineno
,
content_offset
,
block_text
,
state
,
state_machine
,
node_class
=
nodes
.
literal_block
)
parsed_literal
.
options
=
{
'class'
:
directives
.
class_option
}
parsed_literal
.
content
=
1
def
rubric
(
name
,
arguments
,
options
,
content
,
lineno
,
content_offset
,
block_text
,
state
,
state_machine
):
rubric_text
=
arguments
[
0
]
textnodes
,
messages
=
state
.
inline_text
(
rubric_text
,
lineno
)
rubric
=
nodes
.
rubric
(
rubric_text
,
''
,
*
textnodes
,
**
options
)
return
[
rubric
]
+
messages
rubric
.
arguments
=
(
1
,
0
,
1
)
rubric
.
options
=
{
'class'
:
directives
.
class_option
}
def
epigraph
(
name
,
arguments
,
options
,
content
,
lineno
,
content_offset
,
block_text
,
state
,
state_machine
):
block_quote
,
messages
=
state
.
block_quote
(
content
,
content_offset
)
block_quote
.
set_class
(
'epigraph'
)
return
[
block_quote
]
+
messages
epigraph
.
content
=
1
def
highlights
(
name
,
arguments
,
options
,
content
,
lineno
,
content_offset
,
block_text
,
state
,
state_machine
):
block_quote
,
messages
=
state
.
block_quote
(
content
,
content_offset
)
block_quote
.
set_class
(
'highlights'
)
return
[
block_quote
]
+
messages
highlights
.
content
=
1
def
pull_quote
(
name
,
arguments
,
options
,
content
,
lineno
,
content_offset
,
block_text
,
state
,
state_machine
):
block_quote
,
messages
=
state
.
block_quote
(
content
,
content_offset
)
block_quote
.
set_class
(
'pull-quote'
)
return
[
block_quote
]
+
messages
pull_quote
.
content
=
1
lib/python/docutils/parsers/rst/directives/html.py
deleted
100644 → 0
View file @
e1142d2d
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:44 $
# Copyright: This module has been placed in the public domain.
"""
Directives for typically HTML-specific constructs.
"""
__docformat__
=
'reStructuredText'
import
sys
from
docutils
import
nodes
,
utils
from
docutils.parsers.rst
import
states
from
docutils.transforms
import
components
def
meta
(
name
,
arguments
,
options
,
content
,
lineno
,
content_offset
,
block_text
,
state
,
state_machine
):
node
=
nodes
.
Element
()
if
content
:
new_line_offset
,
blank_finish
=
state
.
nested_list_parse
(
content
,
content_offset
,
node
,
initial_state
=
'MetaBody'
,
blank_finish
=
1
,
state_machine_kwargs
=
metaSMkwargs
)
if
(
new_line_offset
-
content_offset
)
!=
len
(
content
):
# incomplete parse of block?
error
=
state_machine
.
reporter
.
error
(
'Invalid meta directive.'
,
nodes
.
literal_block
(
block_text
,
block_text
),
line
=
lineno
)
node
+=
error
else
:
error
=
state_machine
.
reporter
.
error
(
'Empty meta directive.'
,
nodes
.
literal_block
(
block_text
,
block_text
),
line
=
lineno
)
node
+=
error
return
node
.
get_children
()
meta
.
content
=
1
def
imagemap
(
name
,
arguments
,
options
,
content
,
lineno
,
content_offset
,
block_text
,
state
,
state_machine
):
return
[]
class
MetaBody
(
states
.
SpecializedBody
):
class
meta
(
nodes
.
Special
,
nodes
.
PreBibliographic
,
nodes
.
Element
):
"""HTML-specific "meta" element."""
pass
def
field_marker
(
self
,
match
,
context
,
next_state
):
"""Meta element."""
node
,
blank_finish
=
self
.
parsemeta
(
match
)
self
.
parent
+=
node
return
[],
next_state
,
[]
def
parsemeta
(
self
,
match
):
name
=
self
.
parse_field_marker
(
match
)
indented
,
indent
,
line_offset
,
blank_finish
=
\
self
.
state_machine
.
get_first_known_indented
(
match
.
end
())
node
=
self
.
meta
()
pending
=
nodes
.
pending
(
components
.
Filter
,
{
'component'
:
'writer'
,
'format'
:
'html'
,
'nodes'
:
[
node
]})
node
[
'content'
]
=
' '
.
join
(
indented
)
if
not
indented
:
line
=
self
.
state_machine
.
line
msg
=
self
.
reporter
.
info
(
'No content for meta tag "%s".'
%
name
,
nodes
.
literal_block
(
line
,
line
),
line
=
self
.
state_machine
.
abs_line_number
())
return
msg
,
blank_finish
tokens
=
name
.
split
()
try
:
attname
,
val
=
utils
.
extract_name_value
(
tokens
[
0
])[
0
]
node
[
attname
.
lower
()]
=
val
except
utils
.
NameValueError
:
node
[
'name'
]
=
tokens
[
0
]
for
token
in
tokens
[
1
:]:
try
:
attname
,
val
=
utils
.
extract_name_value
(
token
)[
0
]
node
[
attname
.
lower
()]
=
val
except
utils
.
NameValueError
,
detail
:
line
=
self
.
state_machine
.
line
msg
=
self
.
reporter
.
error
(
'Error parsing meta tag attribute "%s": %s.'
%
(
token
,
detail
),
nodes
.
literal_block
(
line
,
line
),
line
=
self
.
state_machine
.
abs_line_number
())
return
msg
,
blank_finish
self
.
document
.
note_pending
(
pending
)
return
pending
,
blank_finish
metaSMkwargs
=
{
'state_classes'
:
(
MetaBody
,)}
lib/python/docutils/parsers/rst/directives/images.py
deleted
100644 → 0
View file @
e1142d2d
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:44 $
# Copyright: This module has been placed in the public domain.
"""
Directives for figures and simple images.
"""
__docformat__
=
'reStructuredText'
import
sys
from
docutils
import
nodes
,
utils
from
docutils.parsers.rst
import
directives
try
:
import
Image
# PIL
except
ImportError
:
Image
=
None
align_values
=
(
'top'
,
'middle'
,
'bottom'
,
'left'
,
'center'
,
'right'
)
def
align
(
argument
):
return
directives
.
choice
(
argument
,
align_values
)
def
image
(
name
,
arguments
,
options
,
content
,
lineno
,
content_offset
,
block_text
,
state
,
state_machine
):
reference
=
''
.
join
(
arguments
[
0
].
split
(
'
\
n
'
))
if
reference
.
find
(
' '
)
!=
-
1
:
error
=
state_machine
.
reporter
.
error
(
'Image URI contains whitespace.'
,
nodes
.
literal_block
(
block_text
,
block_text
),
line
=
lineno
)
return
[
error
]
options
[
'uri'
]
=
reference
image_node
=
nodes
.
image
(
block_text
,
**
options
)
return
[
image_node
]
image
.
arguments
=
(
1
,
0
,
1
)
image
.
options
=
{
'alt'
:
directives
.
unchanged
,
'height'
:
directives
.
nonnegative_int
,
'width'
:
directives
.
nonnegative_int
,
'scale'
:
directives
.
nonnegative_int
,
'align'
:
align
,
'class'
:
directives
.
class_option
}
def
figure
(
name
,
arguments
,
options
,
content
,
lineno
,
content_offset
,
block_text
,
state
,
state_machine
):
figwidth
=
options
.
setdefault
(
'figwidth'
)
figclass
=
options
.
setdefault
(
'figclass'
)
del
options
[
'figwidth'
]
del
options
[
'figclass'
]
(
image_node
,)
=
image
(
name
,
arguments
,
options
,
content
,
lineno
,
content_offset
,
block_text
,
state
,
state_machine
)
if
isinstance
(
image_node
,
nodes
.
system_message
):
return
[
image_node
]
figure_node
=
nodes
.
figure
(
''
,
image_node
)
if
figwidth
==
'image'
:
if
Image
:
# PIL doesn't like Unicode paths:
try
:
i
=
Image
.
open
(
str
(
image_node
[
'uri'
]))
except
(
IOError
,
UnicodeError
):
pass
else
:
figure_node
[
'width'
]
=
i
.
size
[
0
]
elif
figwidth
is
not
None
:
figure_node
[
'width'
]
=
figwidth
if
figclass
:
figure_node
.
set_class
(
figclass
)
if
content
:
node
=
nodes
.
Element
()
# anonymous container for parsing
state
.
nested_parse
(
content
,
content_offset
,
node
)
first_node
=
node
[
0
]
if
isinstance
(
first_node
,
nodes
.
paragraph
):
caption
=
nodes
.
caption
(
first_node
.
rawsource
,
''
,
*
first_node
.
children
)
figure_node
+=
caption
elif
not
(
isinstance
(
first_node
,
nodes
.
comment
)
and
len
(
first_node
)
==
0
):
error
=
state_machine
.
reporter
.
error
(
'Figure caption must be a paragraph or empty comment.'
,
nodes
.
literal_block
(
block_text
,
block_text
),
line
=
lineno
)
return
[
figure_node
,
error
]
if
len
(
node
)
>
1
:
figure_node
+=
nodes
.
legend
(
''
,
*
node
[
1
:])
return
[
figure_node
]
def
figwidth_value
(
argument
):
if
argument
.
lower
()
==
'image'
:
return
'image'
else
:
return
directives
.
nonnegative_int
(
argument
)
figure
.
arguments
=
(
1
,
0
,
1
)
figure
.
options
=
{
'figwidth'
:
figwidth_value
,
'figclass'
:
directives
.
class_option
}
figure
.
options
.
update
(
image
.
options
)
figure
.
content
=
1
lib/python/docutils/parsers/rst/directives/misc.py
deleted
100644 → 0
View file @
e1142d2d
# Authors: David Goodger, Dethe Elza
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:44 $
# Copyright: This module has been placed in the public domain.
"""Miscellaneous directives."""
__docformat__
=
'reStructuredText'
import
sys
import
os.path
import
re
from
urllib2
import
urlopen
,
URLError
from
docutils
import
io
,
nodes
,
statemachine
,
utils
from
docutils.parsers.rst
import
directives
,
states
from
docutils.transforms
import
misc
def
include
(
name
,
arguments
,
options
,
content
,
lineno
,
content_offset
,
block_text
,
state
,
state_machine
):
"""Include a reST file as part of the content of this reST file."""
source
=
state_machine
.
input_lines
.
source
(
lineno
-
state_machine
.
input_offset
-
1
)
source_dir
=
os
.
path
.
dirname
(
os
.
path
.
abspath
(
source
))
path
=
''
.
join
(
arguments
[
0
].
splitlines
())
if
path
.
find
(
' '
)
!=
-
1
:
error
=
state_machine
.
reporter
.
error
(
'"%s" directive path contains whitespace.'
%
name
,
nodes
.
literal_block
(
block_text
,
block_text
),
line
=
lineno
)
return
[
error
]
path
=
os
.
path
.
normpath
(
os
.
path
.
join
(
source_dir
,
path
))
path
=
utils
.
relative_path
(
None
,
path
)
try
:
include_file
=
io
.
FileInput
(
source_path
=
path
,
encoding
=
state
.
document
.
settings
.
input_encoding
,
handle_io_errors
=
None
)
except
IOError
,
error
:
severe
=
state_machine
.
reporter
.
severe
(
'Problems with "%s" directive path:
\
n
%s: %s.'
%
(
name
,
error
.
__class__
.
__name__
,
error
),
nodes
.
literal_block
(
block_text
,
block_text
),
line
=
lineno
)
return
[
severe
]
include_text
=
include_file
.
read
()
if
options
.
has_key
(
'literal'
):
literal_block
=
nodes
.
literal_block
(
include_text
,
include_text
,
source
=
path
)
literal_block
.
line
=
1
return
literal_block
else
:
include_lines
=
statemachine
.
string2lines
(
include_text
,
convert_whitespace
=
1
)
state_machine
.
insert_input
(
include_lines
,
path
)
return
[]
include
.
arguments
=
(
1
,
0
,
1
)
include
.
options
=
{
'literal'
:
directives
.
flag
}
def
raw
(
name
,
arguments
,
options
,
content
,
lineno
,
content_offset
,
block_text
,
state
,
state_machine
):
"""
Pass through content unchanged
Content is included in output based on type argument
Content may be included inline (content section of directive) or
imported from a file or url.
"""
attributes
=
{
'format'
:
arguments
[
0
]}
if
content
:
if
options
.
has_key
(
'file'
)
or
options
.
has_key
(
'url'
):
error
=
state_machine
.
reporter
.
error
(
'"%s" directive may not both specify an external file and '
'have content.'
%
name
,
nodes
.
literal_block
(
block_text
,
block_text
),
line
=
lineno
)
return
[
error
]
text
=
'
\
n
'
.
join
(
content
)
elif
options
.
has_key
(
'file'
):
if
options
.
has_key
(
'url'
):
error
=
state_machine
.
reporter
.
error
(
'The "file" and "url" options may not be simultaneously '
'specified for the "%s" directive.'
%
name
,
nodes
.
literal_block
(
block_text
,
block_text
),
line
=
lineno
)
return
[
error
]
source_dir
=
os
.
path
.
dirname
(
os
.
path
.
abspath
(
state
.
document
.
current_source
))
path
=
os
.
path
.
normpath
(
os
.
path
.
join
(
source_dir
,
options
[
'file'
]))
path
=
utils
.
relative_path
(
None
,
path
)
try
:
raw_file
=
open
(
path
)
except
IOError
,
error
:
severe
=
state_machine
.
reporter
.
severe
(
'Problems with "%s" directive path:
\
n
%s.'
%
(
name
,
error
),
nodes
.
literal_block
(
block_text
,
block_text
),
line
=
lineno
)
return
[
severe
]
text
=
raw_file
.
read
()
raw_file
.
close
()
attributes
[
'source'
]
=
path
elif
options
.
has_key
(
'url'
):
try
:
raw_file
=
urlopen
(
options
[
'url'
])
except
(
URLError
,
IOError
,
OSError
),
error
:
severe
=
state_machine
.
reporter
.
severe
(
'Problems with "%s" directive URL "%s":
\
n
%s.'
%
(
name
,
options
[
'url'
],
error
),
nodes
.
literal_block
(
block_text
,
block_text
),
line
=
lineno
)
return
[
severe
]
text
=
raw_file
.
read
()
raw_file
.
close
()
attributes
[
'source'
]
=
options
[
'file'
]
else
:
error
=
state_machine
.
reporter
.
warning
(
'The "%s" directive requires content; none supplied.'
%
(
name
),
nodes
.
literal_block
(
block_text
,
block_text
),
line
=
lineno
)
return
[
error
]
raw_node
=
nodes
.
raw
(
''
,
text
,
**
attributes
)
return
[
raw_node
]
raw
.
arguments
=
(
1
,
0
,
1
)
raw
.
options
=
{
'file'
:
directives
.
path
,
'url'
:
directives
.
path
}
raw
.
content
=
1
def
replace
(
name
,
arguments
,
options
,
content
,
lineno
,
content_offset
,
block_text
,
state
,
state_machine
):
if
not
isinstance
(
state
,
states
.
SubstitutionDef
):
error
=
state_machine
.
reporter
.
error
(
'Invalid context: the "%s" directive can only be used within a '
'substitution definition.'
%
(
name
),
nodes
.
literal_block
(
block_text
,
block_text
),
line
=
lineno
)
return
[
error
]
text
=
'
\
n
'
.
join
(
content
)
element
=
nodes
.
Element
(
text
)
if
text
:
state
.
nested_parse
(
content
,
content_offset
,
element
)
if
len
(
element
)
!=
1
or
not
isinstance
(
element
[
0
],
nodes
.
paragraph
):
messages
=
[]
for
node
in
element
:
if
isinstance
(
node
,
nodes
.
system_message
):
if
node
.
has_key
(
'backrefs'
):
del
node
[
'backrefs'
]
messages
.
append
(
node
)
error
=
state_machine
.
reporter
.
error
(
'Error in "%s" directive: may contain a single paragraph '
'only.'
%
(
name
),
line
=
lineno
)
messages
.
append
(
error
)
return
messages
else
:
return
element
[
0
].
children
else
:
error
=
state_machine
.
reporter
.
error
(
'The "%s" directive is empty; content required.'
%
(
name
),
line
=
lineno
)
return
[
error
]
replace
.
content
=
1
def
unicode_directive
(
name
,
arguments
,
options
,
content
,
lineno
,
content_offset
,
block_text
,
state
,
state_machine
):
r"""
Convert Unicode character codes (numbers) to characters. Codes may be
decimal numbers, hexadecimal numbers (prefixed by ``0x``, ``x``, ``\
x``,
``U+``, ``u``, or ``\
u``; e.g. ``U+
262E``), or XML-style numeric character
entities (e.g. ``☮``). Text following ".." is a comment and is
ignored. Spaces are ignored, and any other text remains as-is.
"""
if
not
isinstance
(
state
,
states
.
SubstitutionDef
):
error
=
state_machine
.
reporter
.
error
(
'Invalid context: the "%s" directive can only be used within a '
'substitution definition.'
%
(
name
),
nodes
.
literal_block
(
block_text
,
block_text
),
line
=
lineno
)
return
[
error
]
codes
=
arguments
[
0
].
split
(
'.. '
)[
0
].
split
()
element
=
nodes
.
Element
()
for
code
in
codes
:
try
:
if
code
.
isdigit
():
element
+=
nodes
.
Text
(
unichr
(
int
(
code
)))
else
:
match
=
unicode_pattern
.
match
(
code
)
if
match
:
value
=
match
.
group
(
1
)
or
match
.
group
(
2
)
element
+=
nodes
.
Text
(
unichr
(
int
(
value
,
16
)))
else
:
element
+=
nodes
.
Text
(
code
)
except
ValueError
,
err
:
error
=
state_machine
.
reporter
.
error
(
'Invalid character code: %s
\
n
%s'
%
(
code
,
err
),
nodes
.
literal_block
(
block_text
,
block_text
),
line
=
lineno
)
return
[
error
]
return
element
.
children
unicode_directive
.
arguments
=
(
1
,
0
,
1
)
unicode_pattern
=
re
.
compile
(
r'(?:0x|x|\x00x|U\
+?|
\x00u)([0-9a-f]+)$|&#x([0-9a-f]+);$'
,
re
.
IGNORECASE
)
def
class_directive
(
name
,
arguments
,
options
,
content
,
lineno
,
content_offset
,
block_text
,
state
,
state_machine
):
""""""
class_value
=
nodes
.
make_id
(
arguments
[
0
])
if
class_value
:
pending
=
nodes
.
pending
(
misc
.
ClassAttribute
,
{
'class'
:
class_value
,
'directive'
:
name
},
block_text
)
state_machine
.
document
.
note_pending
(
pending
)
return
[
pending
]
else
:
error
=
state_machine
.
reporter
.
error
(
'Invalid class attribute value for "%s" directive: %s'
%
(
name
,
arguments
[
0
]),
nodes
.
literal_block
(
block_text
,
block_text
),
line
=
lineno
)
return
[
error
]
class_directive
.
arguments
=
(
1
,
0
,
0
)
class_directive
.
content
=
1
def
directive_test_function
(
name
,
arguments
,
options
,
content
,
lineno
,
content_offset
,
block_text
,
state
,
state_machine
):
if
content
:
text
=
'
\
n
'
.
join
(
content
)
info
=
state_machine
.
reporter
.
info
(
'Directive processed. Type="%s", arguments=%r, options=%r, '
'content:'
%
(
name
,
arguments
,
options
),
nodes
.
literal_block
(
text
,
text
),
line
=
lineno
)
else
:
info
=
state_machine
.
reporter
.
info
(
'Directive processed. Type="%s", arguments=%r, options=%r, '
'content: None'
%
(
name
,
arguments
,
options
),
line
=
lineno
)
return
[
info
]
directive_test_function
.
arguments
=
(
0
,
1
,
1
)
directive_test_function
.
options
=
{
'option'
:
directives
.
unchanged_required
}
directive_test_function
.
content
=
1
lib/python/docutils/parsers/rst/directives/parts.py
deleted
100644 → 0
View file @
e1142d2d
# Author: David Goodger, Dmitry Jemerov
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:44 $
# Copyright: This module has been placed in the public domain.
"""
Directives for document parts.
"""
__docformat__
=
'reStructuredText'
from
docutils
import
nodes
from
docutils.transforms
import
parts
from
docutils.parsers.rst
import
directives
backlinks_values
=
(
'top'
,
'entry'
,
'none'
)
def
backlinks
(
arg
):
value
=
directives
.
choice
(
arg
,
backlinks_values
)
if
value
==
'none'
:
return
None
else
:
return
value
def
contents
(
name
,
arguments
,
options
,
content
,
lineno
,
content_offset
,
block_text
,
state
,
state_machine
):
"""Table of contents."""
if
arguments
:
title_text
=
arguments
[
0
]
text_nodes
,
messages
=
state
.
inline_text
(
title_text
,
lineno
)
title
=
nodes
.
title
(
title_text
,
''
,
*
text_nodes
)
else
:
messages
=
[]
title
=
None
pending
=
nodes
.
pending
(
parts
.
Contents
,
{
'title'
:
title
},
block_text
)
pending
.
details
.
update
(
options
)
state_machine
.
document
.
note_pending
(
pending
)
return
[
pending
]
+
messages
contents
.
arguments
=
(
0
,
1
,
1
)
contents
.
options
=
{
'depth'
:
directives
.
nonnegative_int
,
'local'
:
directives
.
flag
,
'backlinks'
:
backlinks
,
'class'
:
directives
.
class_option
}
def
sectnum
(
name
,
arguments
,
options
,
content
,
lineno
,
content_offset
,
block_text
,
state
,
state_machine
):
"""Automatic section numbering."""
pending
=
nodes
.
pending
(
parts
.
SectNum
)
pending
.
details
.
update
(
options
)
state_machine
.
document
.
note_pending
(
pending
)
return
[
pending
]
sectnum
.
options
=
{
'depth'
:
int
}
lib/python/docutils/parsers/rst/directives/references.py
deleted
100644 → 0
View file @
e1142d2d
# Author: David Goodger, Dmitry Jemerov
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:44 $
# Copyright: This module has been placed in the public domain.
"""
Directives for references and targets.
"""
__docformat__
=
'reStructuredText'
from
docutils
import
nodes
from
docutils.transforms
import
references
def
target_notes
(
name
,
arguments
,
options
,
content
,
lineno
,
content_offset
,
block_text
,
state
,
state_machine
):
"""Target footnote generation."""
pending
=
nodes
.
pending
(
references
.
TargetNotes
)
state_machine
.
document
.
note_pending
(
pending
)
nodelist
=
[
pending
]
return
nodelist
lib/python/docutils/parsers/rst/languages/__init__.py
deleted
100644 → 0
View file @
e1142d2d
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:49 $
# Copyright: This module has been placed in the public domain.
# Internationalization details are documented in
# <http://docutils.sf.net/spec/howto/i18n.html>.
"""
This package contains modules for language-dependent features of
reStructuredText.
"""
__docformat__
=
'reStructuredText'
_languages
=
{}
def
get_language
(
language_code
):
if
_languages
.
has_key
(
language_code
):
return
_languages
[
language_code
]
try
:
module
=
__import__
(
language_code
,
globals
(),
locals
())
except
ImportError
:
return
None
_languages
[
language_code
]
=
module
return
module
lib/python/docutils/parsers/rst/languages/af.py
deleted
100644 → 0
View file @
e1142d2d
# Author: Jannie Hofmeyr
# Contact: jhsh@sun.ac.za
# Revision: $Revision: 1.1 $
# Date: $Date: 2003/07/10 15:49:49 $
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/spec/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Afrikaans-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__
=
'reStructuredText'
directives
=
{
'aandag'
:
'attention'
,
'versigtig'
:
'caution'
,
'gevaar'
:
'danger'
,
'fout'
:
'error'
,
'wenk'
:
'hint'
,
'belangrik'
:
'important'
,
'nota'
:
'note'
,
'tip'
:
'tip'
,
# hint and tip both have the same translation: wenk
'waarskuwing'
:
'warning'
,
'vermaning'
:
'admonition'
,
'kantstreep'
:
'sidebar'
,
'onderwerp'
:
'topic'
,
'lynblok'
:
'line-block'
,
'parsed-literal (translation required)'
:
'parsed-literal'
,
'rubriek'
:
'rubric'
,
'epigraaf'
:
'epigraph'
,
'hoogtepunte'
:
'highlights'
,
'pull-quote (translation required)'
:
'pull-quote'
,
#'vrae': 'questions',
#'qa': 'questions',
#'faq': 'questions',
'meta'
:
'meta'
,
#'beeldkaart': 'imagemap',
'beeld'
:
'image'
,
'figuur'
:
'figure'
,
'insluiting'
:
'include'
,
'rou'
:
'raw'
,
'vervang'
:
'replace'
,
'unicode'
:
'unicode'
,
# should this be translated? unikode
'klas'
:
'class'
,
'inhoud'
:
'contents'
,
'sectnum'
:
'sectnum'
,
'section-numbering'
:
'sectnum'
,
#'voetnote': 'footnotes',
#'aanhalings': 'citations',
'teikennotas'
:
'target-notes'
,
'restructuredtext-test-directive'
:
'restructuredtext-test-directive'
}
"""Afrikaans name to registered (in directives/__init__.py) directive name
mapping."""
roles
=
{
'afkorting'
:
'abbreviation'
,
'ab'
:
'abbreviation'
,
'akroniem'
:
'acronym'
,
'ac'
:
'acronym'
,
'indeks'
:
'index'
,
'i'
:
'index'
,
'voetskrif'
:
'subscript'
,
'sub'
:
'subscript'
,
'boskrif'
:
'superscript'
,
'sup'
:
'superscript'
,
'titelverwysing'
:
'title-reference'
,
'titel'
:
'title-reference'
,
't'
:
'title-reference'
,
'pep-verwysing'
:
'pep-reference'
,
'pep'
:
'pep-reference'
,
'rfc-verwysing'
:
'rfc-reference'
,
'rfc'
:
'rfc-reference'
,
'nadruk'
:
'emphasis'
,
'sterk'
:
'strong'
,
'literal (translation required)'
:
'literal'
,
'benoemde verwysing'
:
'named-reference'
,
'anonieme verwysing'
:
'anonymous-reference'
,
'voetnootverwysing'
:
'footnote-reference'
,
'aanhalingverwysing'
:
'citation-reference'
,
'vervangingsverwysing'
:
'substitution-reference'
,
'teiken'
:
'target'
,
'uri-verwysing'
:
'uri-reference'
,
'uri'
:
'uri-reference'
,
'url'
:
'uri-reference'
,}
"""Mapping of Afrikaans role names to canonical role names for interpreted text.
"""
lib/python/docutils/parsers/rst/languages/de.py
deleted
100644 → 0
View file @
e1142d2d
# -*- coding: iso-8859-1 -*-
# Author: Engelbert Gruber
# Contact: grubert@users.sourceforge.net
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:49 $
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/spec/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
German-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__
=
'reStructuredText'
directives
=
{
'achtung'
:
'attention'
,
'vorsicht'
:
'caution'
,
'gefahr'
:
'danger'
,
'fehler'
:
'error'
,
'hinweis'
:
'hint'
,
'wichtig'
:
'important'
,
'notiz'
:
'note'
,
'tip'
:
'tip'
,
'warnung'
:
'warning'
,
'ermahnung'
:
'admonition'
,
'kasten'
:
'sidebar'
,
# seitenkasten ?
'thema'
:
'topic'
,
'line-block'
:
'line-block'
,
'parsed-literal'
:
'parsed-literal'
,
'rubrik'
:
'rubric'
,
'epigraph (translation required)'
:
'epigraph'
,
'highlights (translation required)'
:
'highlights'
,
'pull-quote (translation required)'
:
'pull-quote'
,
# kasten too ?
#'questions': 'questions',
#'qa': 'questions',
#'faq': 'questions',
'meta'
:
'meta'
,
#'imagemap': 'imagemap',
'bild'
:
'image'
,
'abbildung'
:
'figure'
,
'raw'
:
'raw'
,
# unbearbeitet
'include'
:
'include'
,
# einfügen, "füge ein" would be more like a command.
# einfügung would be the noun.
'ersetzung'
:
'replace'
,
# ersetzen, ersetze
'unicode'
:
'unicode'
,
'klasse'
:
'class'
,
# offer class too ?
'inhalt'
:
'contents'
,
'sectnum'
:
'sectnum'
,
'section-numbering'
:
'sectnum'
,
'target-notes'
:
'target-notes'
,
#'footnotes': 'footnotes',
#'citations': 'citations',
'restructuredtext-test-directive'
:
'restructuredtext-test-directive'
}
"""German name to registered (in directives/__init__.py) directive name
mapping."""
roles
=
{
'abbreviation (translation required)'
:
'abbreviation'
,
'acronym (translation required)'
:
'acronym'
,
'index (translation required)'
:
'index'
,
'subscript (translation required)'
:
'subscript'
,
'superscript (translation required)'
:
'superscript'
,
'title-reference (translation required)'
:
'title-reference'
,
'pep-reference (translation required)'
:
'pep-reference'
,
'rfc-reference (translation required)'
:
'rfc-reference'
,
'emphasis (translation required)'
:
'emphasis'
,
'strong (translation required)'
:
'strong'
,
'literal (translation required)'
:
'literal'
,
'named-reference (translation required)'
:
'named-reference'
,
'anonymous-reference (translation required)'
:
'anonymous-reference'
,
'footnote-reference (translation required)'
:
'footnote-reference'
,
'citation-reference (translation required)'
:
'citation-reference'
,
'substitution-reference (translation required)'
:
'substitution-reference'
,
'target (translation required)'
:
'target'
,
'uri-reference (translation required)'
:
'uri-reference'
,}
"""Mapping of German role names to canonical role names for interpreted text.
"""
lib/python/docutils/parsers/rst/languages/en.py
deleted
100644 → 0
View file @
e1142d2d
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:49 $
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/spec/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
English-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__
=
'reStructuredText'
directives
=
{
# language-dependent: fixed
'attention'
:
'attention'
,
'caution'
:
'caution'
,
'danger'
:
'danger'
,
'error'
:
'error'
,
'hint'
:
'hint'
,
'important'
:
'important'
,
'note'
:
'note'
,
'tip'
:
'tip'
,
'warning'
:
'warning'
,
'admonition'
:
'admonition'
,
'sidebar'
:
'sidebar'
,
'topic'
:
'topic'
,
'line-block'
:
'line-block'
,
'parsed-literal'
:
'parsed-literal'
,
'rubric'
:
'rubric'
,
'epigraph'
:
'epigraph'
,
'highlights'
:
'highlights'
,
'pull-quote'
:
'pull-quote'
,
#'questions': 'questions',
#'qa': 'questions',
#'faq': 'questions',
'meta'
:
'meta'
,
#'imagemap': 'imagemap',
'image'
:
'image'
,
'figure'
:
'figure'
,
'include'
:
'include'
,
'raw'
:
'raw'
,
'replace'
:
'replace'
,
'unicode'
:
'unicode'
,
'class'
:
'class'
,
'contents'
:
'contents'
,
'sectnum'
:
'sectnum'
,
'section-numbering'
:
'sectnum'
,
#'footnotes': 'footnotes',
#'citations': 'citations',
'target-notes'
:
'target-notes'
,
'restructuredtext-test-directive'
:
'restructuredtext-test-directive'
}
"""English name to registered (in directives/__init__.py) directive name
mapping."""
roles
=
{
# language-dependent: fixed
'abbreviation'
:
'abbreviation'
,
'ab'
:
'abbreviation'
,
'acronym'
:
'acronym'
,
'ac'
:
'acronym'
,
'index'
:
'index'
,
'i'
:
'index'
,
'subscript'
:
'subscript'
,
'sub'
:
'subscript'
,
'superscript'
:
'superscript'
,
'sup'
:
'superscript'
,
'title-reference'
:
'title-reference'
,
'title'
:
'title-reference'
,
't'
:
'title-reference'
,
'pep-reference'
:
'pep-reference'
,
'pep'
:
'pep-reference'
,
'rfc-reference'
:
'rfc-reference'
,
'rfc'
:
'rfc-reference'
,
'emphasis'
:
'emphasis'
,
'strong'
:
'strong'
,
'literal'
:
'literal'
,
'named-reference'
:
'named-reference'
,
'anonymous-reference'
:
'anonymous-reference'
,
'footnote-reference'
:
'footnote-reference'
,
'citation-reference'
:
'citation-reference'
,
'substitution-reference'
:
'substitution-reference'
,
'target'
:
'target'
,
'uri-reference'
:
'uri-reference'
,
'uri'
:
'uri-reference'
,
'url'
:
'uri-reference'
,}
"""Mapping of English role names to canonical role names for interpreted text.
"""
lib/python/docutils/parsers/rst/languages/es.py
deleted
100644 → 0
View file @
e1142d2d
# Author: Marcelo Huerta San Martn
# Contact: mghsm@uol.com.ar
# Revision: $Revision: 1.1 $
# Date: $Date: 2003/07/10 15:49:49 $
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/spec/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Spanish-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__
=
'reStructuredText'
directives
=
{
u'atenci
\
u00f3
n'
:
'attention'
,
u'atencion'
:
'attention'
,
u'precauci
\
u00f3
n'
:
'caution'
,
u'precaucion'
:
'caution'
,
u'peligro'
:
'danger'
,
u'error'
:
'error'
,
u'sugerencia'
:
'hint'
,
u'importante'
:
'important'
,
u'nota'
:
'note'
,
u'consejo'
:
'tip'
,
u'advertencia'
:
'warning'
,
u'exhortacion'
:
'admonition'
,
u'exhortaci
\
u00f3
n'
:
'admonition'
,
u'nota-al-margen'
:
'sidebar'
,
u'tema'
:
'topic'
,
u'bloque-de-lineas'
:
'line-block'
,
u'bloque-de-l
\
u00ed
neas'
:
'line-block'
,
u'literal-evaluado'
:
'parsed-literal'
,
u'firma'
:
'rubric'
,
u'ep
\
u00ed
grafe'
:
'epigraph'
,
u'epigrafe'
:
'epigraph'
,
u'destacado'
:
'highlights'
,
u'cita-destacada'
:
'pull-quote'
,
#'questions': 'questions',
#'qa': 'questions',
#'faq': 'questions',
u'meta'
:
'meta'
,
#'imagemap': 'imagemap',
u'imagen'
:
'image'
,
u'figura'
:
'figure'
,
u'incluir'
:
'include'
,
u'raw'
:
'raw'
,
u'reemplazar'
:
'replace'
,
u'unicode'
:
'unicode'
,
u'clase'
:
'class'
,
u'contenido'
:
'contents'
,
u'numseccion'
:
'sectnum'
,
u'numsecci
\
u00f3
n'
:
'sectnum'
,
u'numeracion-seccion'
:
'sectnum'
,
u'numeraci
\
u00f3
n-secci
\
u00f3
n'
:
'sectnum'
,
u'notas-destino'
:
'target-notes'
,
#'footnotes': 'footnotes',
#'citations': 'citations',
u'restructuredtext-test-directive'
:
'restructuredtext-test-directive'
}
"""Spanish name to registered (in directives/__init__.py) directive name
mapping."""
roles
=
{
u'abreviatura'
:
'abbreviation'
,
u'ab'
:
'abbreviation'
,
u'acronimo'
:
'acronym'
,
u'acronimo'
:
'acronym'
,
u'ac'
:
'acronym'
,
u'indice'
:
'index'
,
u'i'
:
'index'
,
u'subscript (translation required)'
:
'subscript'
,
u'superscript (translation required)'
:
'superscript'
,
u'referencia-titulo'
:
'title-reference'
,
u'titulo'
:
'title-reference'
,
u't'
:
'title-reference'
,
u'referencia-pep'
:
'pep-reference'
,
u'pep'
:
'pep-reference'
,
u'referencia-rfc'
:
'rfc-reference'
,
u'rfc'
:
'rfc-reference'
,
u'enfasis'
:
'emphasis'
,
u'
\
u00e9
nfasis'
:
'emphasis'
,
u'destacado'
:
'strong'
,
u'literal'
:
'literal'
,
u'referencia-con-nombre'
:
'named-reference'
,
u'referencia-anonima'
:
'anonymous-reference'
,
u'referencia-an
\
u00f3
nima'
:
'anonymous-reference'
,
u'referencia-nota-al-pie'
:
'footnote-reference'
,
u'referencia-cita'
:
'citation-reference'
,
u'referencia-sustitucion'
:
'substitution-reference'
,
u'referencia-sustituci
\
u00f3
n'
:
'substitution-reference'
,
u'destino'
:
'target'
,
u'referencia-uri'
:
'uri-reference'
,
u'uri'
:
'uri-reference'
,
u'url'
:
'uri-reference'
,
}
"""Mapping of Spanish role names to canonical role names for interpreted text.
"""
lib/python/docutils/parsers/rst/languages/fr.py
deleted
100644 → 0
View file @
e1142d2d
# Authors: David Goodger; William Dode
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:49 $
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/spec/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
French-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__
=
'reStructuredText'
directives
=
{
u'attention'
:
'attention'
,
u'pr
\
u00E9
caution'
:
'caution'
,
u'danger'
:
'danger'
,
u'erreur'
:
'error'
,
u'conseil'
:
'hint'
,
u'important'
:
'important'
,
u'note'
:
'note'
,
u'astuce'
:
'tip'
,
u'avertissement'
:
'warning'
,
u'admonition'
:
'admonition'
,
u'encadr
\
u00E9
'
:
'sidebar'
,
u'sujet'
:
'topic'
,
u'bloc-textuel'
:
'line-block'
,
u'bloc-interpr
\
u00E9
t
\
u00E9
'
:
'parsed-literal'
,
u'code-interpr
\
u00E9
t
\
u00E9
'
:
'parsed-literal'
,
u'intertitre'
:
'rubric'
,
u'exergue'
:
'epigraph'
,
u'
\
u00E9
pigraphe'
:
'epigraph'
,
u'chapeau'
:
'highlights'
,
u'accroche'
:
'pull-quote'
,
#u'questions': 'questions',
#u'qr': 'questions',
#u'faq': 'questions',
u'm
\
u00E9
ta'
:
'meta'
,
#u'imagemap (translation required)': 'imagemap',
u'image'
:
'image'
,
u'figure'
:
'figure'
,
u'inclure'
:
'include'
,
u'brut'
:
'raw'
,
u'remplacer'
:
'replace'
,
u'remplace'
:
'replace'
,
u'unicode'
:
'unicode'
,
u'classe'
:
'class'
,
u'sommaire'
:
'contents'
,
u'table-des-mati
\
u00E8
res'
:
'contents'
,
u'sectnum'
:
'sectnum'
,
u'section-num
\
u00E9
rot
\
u00E9
e'
:
'sectnum'
,
u'liens'
:
'target-notes'
,
#u'footnotes (translation required)': 'footnotes',
#u'citations (translation required)': 'citations',
}
"""French name to registered (in directives/__init__.py) directive name
mapping."""
roles
=
{
u'abr
\
u00E9
viation'
:
'abbreviation'
,
u'acronyme'
:
'acronym'
,
u'sigle'
:
'acronym'
,
u'index'
:
'index'
,
u'indice'
:
'subscript'
,
u'ind'
:
'subscript'
,
u'exposant'
:
'superscript'
,
u'exp'
:
'superscript'
,
u'titre-r
\
u00E9
f
\
u00E9
rence'
:
'title-reference'
,
u'titre'
:
'title-reference'
,
u'pep-r
\
u00E9
f
\
u00E9
rence'
:
'pep-reference'
,
u'rfc-r
\
u00E9
f
\
u00E9
rence'
:
'rfc-reference'
,
u'emphase'
:
'emphasis'
,
u'fort'
:
'strong'
,
u'litt
\
u00E9
ral'
:
'literal'
,
u'nomm
\
u00E9
e-r
\
u00E9
f
\
u00E9
rence'
:
'named-reference'
,
u'anonyme-r
\
u00E9
f
\
u00E9
rence'
:
'anonymous-reference'
,
u'note-r
\
u00E9
f
\
u00E9
rence'
:
'footnote-reference'
,
u'citation-r
\
u00E9
f
\
u00E9
rence'
:
'citation-reference'
,
u'substitution-r
\
u00E9
f
\
u00E9
rence'
:
'substitution-reference'
,
u'lien'
:
'target'
,
u'uri-r
\
u00E9
f
\
u00E9
rence'
:
'uri-reference'
,}
"""Mapping of French role names to canonical role names for interpreted text.
"""
lib/python/docutils/parsers/rst/languages/it.py
deleted
100644 → 0
View file @
e1142d2d
# Author: Nicola Larosa
# Contact: docutils@tekNico.net
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:49 $
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/spec/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Italian-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__
=
'reStructuredText'
directives
=
{
'attenzione'
:
'attention'
,
'cautela'
:
'caution'
,
'pericolo'
:
'danger'
,
'errore'
:
'error'
,
'suggerimento'
:
'hint'
,
'importante'
:
'important'
,
'nota'
:
'note'
,
'consiglio'
:
'tip'
,
'avvertenza'
:
'warning'
,
'admonition (translation required)'
:
'admonition'
,
'sidebar (translation required)'
:
'sidebar'
,
'argomento'
:
'topic'
,
'blocco di linee'
:
'line-block'
,
'parsed-literal'
:
'parsed-literal'
,
'rubric (translation required)'
:
'rubric'
,
'epigraph (translation required)'
:
'epigraph'
,
'highlights (translation required)'
:
'highlights'
,
'pull-quote (translation required)'
:
'pull-quote'
,
#'questions': 'questions',
#'qa': 'questions',
#'faq': 'questions',
'meta'
:
'meta'
,
#'imagemap': 'imagemap',
'immagine'
:
'image'
,
'figura'
:
'figure'
,
'includi'
:
'include'
,
'grezzo'
:
'raw'
,
'sostituisci'
:
'replace'
,
'unicode'
:
'unicode'
,
'class (translation required)'
:
'class'
,
'indice'
:
'contents'
,
'seznum'
:
'sectnum'
,
'section-numbering'
:
'sectnum'
,
'target-notes'
:
'target-notes'
,
#'footnotes': 'footnotes',
#'citations': 'citations',
'restructuredtext-test-directive'
:
'restructuredtext-test-directive'
}
"""Italian name to registered (in directives/__init__.py) directive name
mapping."""
roles
=
{
'abbreviation (translation required)'
:
'abbreviation'
,
'acronym (translation required)'
:
'acronym'
,
'index (translation required)'
:
'index'
,
'subscript (translation required)'
:
'subscript'
,
'superscript (translation required)'
:
'superscript'
,
'title-reference (translation required)'
:
'title-reference'
,
'pep-reference (translation required)'
:
'pep-reference'
,
'rfc-reference (translation required)'
:
'rfc-reference'
,
'emphasis (translation required)'
:
'emphasis'
,
'strong (translation required)'
:
'strong'
,
'literal (translation required)'
:
'literal'
,
'named-reference (translation required)'
:
'named-reference'
,
'anonymous-reference (translation required)'
:
'anonymous-reference'
,
'footnote-reference (translation required)'
:
'footnote-reference'
,
'citation-reference (translation required)'
:
'citation-reference'
,
'substitution-reference (translation required)'
:
'substitution-reference'
,
'target (translation required)'
:
'target'
,
'uri-reference (translation required)'
:
'uri-reference'
,}
"""Mapping of Italian role names to canonical role names for interpreted text.
"""
lib/python/docutils/parsers/rst/languages/ru.py
deleted
100644 → 0
View file @
e1142d2d
# Author: Roman Suzi
# Contact: rnd@onego.ru
# Revision: $Revision: 1.1 $
# Date: $Date: 2003/07/10 15:49:49 $
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/spec/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Russian-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__
=
'reStructuredText'
directives
=
{
u'
\
u0431
\
u043b
\
u043e
\
u043a
-
\
u0441
\
u0442
\
u0440
\
u043e
\
u043a
'
:
u'line-block'
,
u'meta'
:
u'meta'
,
u'
\
u043e
\
u0431
\
u0440
\
u0430
\
u0431
\
u043e
\
u0442
\
u0430
\
u043d
\
u043d
\
u044b
\
u0439
-
\
u043b
\
u0438
\
u0442
\
u0435
\
u0440
\
u0430
\
u043b
'
:
u'parsed-literal'
,
u'
\
u0432
\
u044b
\
u0434
\
u0435
\
u043b
\
u0435
\
u043d
\
u043d
\
u0430
\
u044f
-
\
u0446
\
u0438
\
u0442
\
u0430
\
u0442
\
u0430
'
:
u'pull-quote'
,
u'
\
u0441
\
u044b
\
u0440
\
u043e
\
u0439
'
:
u'raw'
,
u'
\
u0437
\
u0430
\
u043c
\
u0435
\
u043d
\
u0430
'
:
u'replace'
,
u'
\
u0442
\
u0435
\
u0441
\
u0442
\
u043e
\
u0432
\
u0430
\
u044f
-
\
u0434
\
u0438
\
u0440
\
u0435
\
u043a
\
u0442
\
u0438
\
u0432
\
u0430
-restructuredtext'
:
u'restructuredtext-test-directive'
,
u'
\
u0446
\
u0435
\
u043b
\
u0435
\
u0432
\
u044b
\
u0435
-
\
u0441
\
u043d
\
u043e
\
u0441
\
u043a
\
u0438
'
:
u'target-notes'
,
u'unicode'
:
u'unicode'
,
u'
\
u0431
\
u043e
\
u043a
\
u043e
\
u0432
\
u0430
\
u044f
-
\
u043f
\
u043e
\
u043b
\
u043e
\
u0441
\
u0430
'
:
u'sidebar'
,
u'
\
u0432
\
u0430
\
u0436
\
u043d
\
u043e
'
:
u'important'
,
u'
\
u0432
\
u043a
\
u043b
\
u044e
\
u0447
\
u0430
\
u0442
\
u044c
'
:
u'include'
,
u'
\
u0432
\
u043d
\
u0438
\
u043c
\
u0430
\
u043d
\
u0438
\
u0435
'
:
u'attention'
,
u'
\
u0432
\
u044b
\
u0434
\
u0435
\
u043b
\
u0435
\
u043d
\
u0438
\
u0435
'
:
u'highlights'
,
u'
\
u0437
\
u0430
\
u043c
\
u0435
\
u0447
\
u0430
\
u043d
\
u0438
\
u0435
'
:
u'admonition'
,
u'
\
u0438
\
u0437
\
u043e
\
u0431
\
u0440
\
u0430
\
u0436
\
u0435
\
u043d
\
u0438
\
u0435
'
:
u'image'
,
u'
\
u043a
\
u043b
\
u0430
\
u0441
\
u0441
'
:
u'class'
,
u'
\
u043d
\
u043e
\
u043c
\
u0435
\
u0440
-
\
u0440
\
u0430
\
u0437
\
u0434
\
u0435
\
u043b
\
u0430
'
:
u'sectnum'
,
u'
\
u043d
\
u0443
\
u043c
\
u0435
\
u0440
\
u0430
\
u0446
\
u0438
\
u044f
-
\
u0440
\
u0430
\
u0437
'
u'
\
u0434
\
u0435
\
u043b
\
u043e
\
u0432
'
:
u'sectnum'
,
u'
\
u043e
\
u043f
\
u0430
\
u0441
\
u043d
\
u043e
'
:
u'danger'
,
u'
\
u043e
\
u0441
\
u0442
\
u043e
\
u0440
\
u043e
\
u0436
\
u043d
\
u043e
'
:
u'caution'
,
u'
\
u043e
\
u0448
\
u0438
\
u0431
\
u043a
\
u0430
'
:
u'error'
,
u'
\
u043f
\
u043e
\
u0434
\
u0441
\
u043a
\
u0430
\
u0437
\
u043a
\
u0430
'
:
u'tip'
,
u'
\
u043f
\
u0440
\
u0435
\
u0434
\
u0443
\
u043f
\
u0440
\
u0435
\
u0436
\
u0434
\
u0435
\
u043d
'
u'
\
u0438
\
u0435
'
:
u'warning'
,
u'
\
u043f
\
u0440
\
u0438
\
u043c
\
u0435
\
u0447
\
u0430
\
u043d
\
u0438
\
u0435
'
:
u'note'
,
u'
\
u0440
\
u0438
\
u0441
\
u0443
\
u043d
\
u043e
\
u043a
'
:
u'figure'
,
u'
\
u0440
\
u0443
\
u0431
\
u0440
\
u0438
\
u043a
\
u0430
'
:
u'rubric'
,
u'
\
u0441
\
u043e
\
u0432
\
u0435
\
u0442
'
:
u'hint'
,
u'
\
u0441
\
u043e
\
u0434
\
u0435
\
u0440
\
u0436
\
u0430
\
u043d
\
u0438
\
u0435
'
:
u'contents'
,
u'
\
u0442
\
u0435
\
u043c
\
u0430
'
:
u'topic'
,
u'
\
u044d
\
u043f
\
u0438
\
u0433
\
u0440
\
u0430
\
u0444
'
:
u'epigraph'
}
"""Russian name to registered (in directives/__init__.py) directive name
mapping."""
roles
=
{
u'
\
u0430
\
u043a
\
u0440
\
u043e
\
u043d
\
u0438
\
u043c
'
:
'acronym'
,
u'
\
u0430
\
u043d
\
u043e
\
u043d
\
u0438
\
u043c
\
u043d
\
u0430
\
u044f
-
\
u0441
\
u0441
\
u044b
\
u043b
\
u043a
\
u0430
'
:
'anonymous-reference'
,
u'
\
u0431
\
u0443
\
u043a
\
u0432
\
u0430
\
u043b
\
u044c
\
u043d
\
u043e
'
:
'literal'
,
u'
\
u0432
\
u0435
\
u0440
\
u0445
\
u043d
\
u0438
\
u0439
-
\
u0438
\
u043d
\
u0434
\
u0435
\
u043a
\
u0441
'
:
'superscript'
,
u'
\
u0432
\
u044b
\
u0434
\
u0435
\
u043b
\
u0435
\
u043d
\
u0438
\
u0435
'
:
'emphasis'
,
u'
\
u0438
\
u043c
\
u0435
\
u043d
\
u043e
\
u0432
\
u0430
\
u043d
\
u043d
\
u0430
\
u044f
-
\
u0441
\
u0441
\
u044b
\
u043b
\
u043a
\
u0430
'
:
'named-reference'
,
u'
\
u0438
\
u043d
\
u0434
\
u0435
\
u043a
\
u0441
'
:
'index'
,
u'
\
u043d
\
u0438
\
u0436
\
u043d
\
u0438
\
u0439
-
\
u0438
\
u043d
\
u0434
\
u0435
\
u043a
\
u0441
'
:
'subscript'
,
u'
\
u0441
\
u0438
\
u043b
\
u044c
\
u043d
\
u043e
\
u0435
-
\
u0432
\
u044b
\
u0434
\
u0435
\
u043b
\
u0435
\
u043d
\
u0438
\
u0435
'
:
'strong'
,
u'
\
u0441
\
u043e
\
u043a
\
u0440
\
u0430
\
u0449
\
u0435
\
u043d
\
u0438
\
u0435
'
:
'abbreviation'
,
u'
\
u0441
\
u0441
\
u044b
\
u043b
\
u043a
\
u0430
-
\
u0437
\
u0430
\
u043c
\
u0435
\
u043d
\
u0430
'
:
'substitution-reference'
,
u'
\
u0441
\
u0441
\
u044b
\
u043b
\
u043a
\
u0430
-
\
u043d
\
u0430
-pep'
:
'pep-reference'
,
u'
\
u0441
\
u0441
\
u044b
\
u043b
\
u043a
\
u0430
-
\
u043d
\
u0430
-rfc'
:
'rfc-reference'
,
u'
\
u0441
\
u0441
\
u044b
\
u043b
\
u043a
\
u0430
-
\
u043d
\
u0430
-uri'
:
'uri-reference'
,
u'
\
u0441
\
u0441
\
u044b
\
u043b
\
u043a
\
u0430
-
\
u043d
\
u0430
-
\
u0437
\
u0430
\
u0433
\
u043b
\
u0430
\
u0432
\
u0438
\
u0435
'
:
'title-reference'
,
u'
\
u0441
\
u0441
\
u044b
\
u043b
\
u043a
\
u0430
-
\
u043d
\
u0430
-
\
u0441
\
u043d
\
u043e
\
u0441
\
u043a
\
u0443
'
:
'footnote-reference'
,
u'
\
u0446
\
u0438
\
u0442
\
u0430
\
u0442
\
u043d
\
u0430
\
u044f
-
\
u0441
\
u0441
\
u044b
\
u043b
\
u043a
\
u0430
'
:
'citation-reference'
,
u'
\
u0446
\
u0435
\
u043b
\
u044c
'
:
'target'
}
"""Mapping of Russian role names to canonical role names for interpreted text.
"""
lib/python/docutils/parsers/rst/languages/sk.py
deleted
100644 → 0
View file @
e1142d2d
# Author: Miroslav Vasko
# Contact: zemiak@zoznam.sk
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:49 $
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/spec/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Slovak-language mappings for language-dependent features of
reStructuredText.
"""
__docformat__
=
'reStructuredText'
directives
=
{
u'pozor'
:
'attention'
,
u'opatrne'
:
'caution'
,
u'nebezpe
\
xe8
enstvo'
:
'danger'
,
u'chyba'
:
'error'
,
u'rada'
:
'hint'
,
u'd
\
xf4
le
\
x9e
it
\
xe9
'
:
'important'
,
u'pozn
\
xe1
mka'
:
'note'
,
u'tip'
:
'tip'
,
u'varovanie'
:
'warning'
,
u'admonition (translation required)'
:
'admonition'
,
u'sidebar (translation required)'
:
'sidebar'
,
u't
\
xe9
ma'
:
'topic'
,
u'blok-riadkov'
:
'line-block'
,
u'parsed-literal'
:
'parsed-literal'
,
u'rubric (translation required)'
:
'rubric'
,
u'epigraph (translation required)'
:
'epigraph'
,
u'highlights (translation required)'
:
'highlights'
,
u'pull-quote (translation required)'
:
'pull-quote'
,
#u'questions': 'questions',
#u'qa': 'questions',
#u'faq': 'questions',
u'meta'
:
'meta'
,
#u'imagemap': 'imagemap',
u'obr
\
xe1
zok'
:
'image'
,
u'tvar'
:
'figure'
,
u'vlo
\
x9e
i
\
x9d
'
:
'include'
,
u'raw'
:
'raw'
,
u'nahradi
\
x9d
'
:
'replace'
,
u'unicode'
:
'unicode'
,
u'class (translation required)'
:
'class'
,
u'obsah'
:
'contents'
,
u'
\
xe8
as
\
x9d
'
:
'sectnum'
,
u'
\
xe8
as
\
x9d
-
\
xe8
\
xed
slovanie'
:
'sectnum'
,
u'cie
\
xbe
ov
\
xe9
-pozn
\
xe1
mky'
:
'target-notes'
,
#u'footnotes': 'footnotes',
#u'citations': 'citations',
}
"""Slovak name to registered (in directives/__init__.py) directive name
mapping."""
roles
=
{
u'abbreviation (translation required)'
:
'abbreviation'
,
u'acronym (translation required)'
:
'acronym'
,
u'index (translation required)'
:
'index'
,
u'subscript (translation required)'
:
'subscript'
,
u'superscript (translation required)'
:
'superscript'
,
u'title-reference (translation required)'
:
'title-reference'
,
u'pep-reference (translation required)'
:
'pep-reference'
,
u'rfc-reference (translation required)'
:
'rfc-reference'
,
u'emphasis (translation required)'
:
'emphasis'
,
u'strong (translation required)'
:
'strong'
,
u'literal (translation required)'
:
'literal'
,
u'named-reference (translation required)'
:
'named-reference'
,
u'anonymous-reference (translation required)'
:
'anonymous-reference'
,
u'footnote-reference (translation required)'
:
'footnote-reference'
,
u'citation-reference (translation required)'
:
'citation-reference'
,
u'substitution-reference (translation required)'
:
'substitution-reference'
,
u'target (translation required)'
:
'target'
,
u'uri-reference (translation required)'
:
'uri-reference'
,}
"""Mapping of Slovak role names to canonical role names for interpreted text.
"""
lib/python/docutils/parsers/rst/languages/sv.py
deleted
100644 → 0
View file @
e1142d2d
# Author: Adam Chodorowski
# Contact: chodorowski@users.sourceforge.net
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:49 $
# Copyright: This module has been placed in the public domain.
# New language mappings are welcome. Before doing a new translation, please
# read <http://docutils.sf.net/spec/howto/i18n.html>. Two files must be
# translated for each language: one in docutils/languages, the other in
# docutils/parsers/rst/languages.
"""
Swedish language mappings for language-dependent features of reStructuredText.
"""
__docformat__
=
'reStructuredText'
directives
=
{
u'observera'
:
'attention'
,
u'caution (translation required)'
:
'caution'
,
u'fara'
:
'danger'
,
u'fel'
:
'error'
,
u'v
\
u00e4
gledning'
:
'hint'
,
u'viktigt'
:
'important'
,
u'notera'
:
'note'
,
u'tips'
:
'tip'
,
u'varning'
:
'warning'
,
u'admonition (translation required)'
:
'admonition'
,
u'sidebar (translation required)'
:
'sidebar'
,
u'
\
u00e4
mne'
:
'topic'
,
u'line-block (translation required)'
:
'line-block'
,
u'parsed-literal (translation required)'
:
'parsed-literal'
,
u'mellanrubrik'
:
'rubric'
,
u'epigraph (translation required)'
:
'epigraph'
,
u'highlights (translation required)'
:
'highlights'
,
u'pull-quote (translation required)'
:
'pull-quote'
,
# u'fr\u00e5gor': 'questions',
# NOTE: A bit long, but recommended by http://www.nada.kth.se/dataterm/:
# u'fr\u00e5gor-och-svar': 'questions',
# u'vanliga-fr\u00e5gor': 'questions',
u'meta'
:
'meta'
,
# u'bildkarta': 'imagemap', # FIXME: Translation might be too literal.
u'bild'
:
'image'
,
u'figur'
:
'figure'
,
u'inkludera'
:
'include'
,
u'r
\
u00e5
'
:
'raw'
,
# FIXME: Translation might be too literal.
u'ers
\
u00e4
tt'
:
'replace'
,
u'unicode'
:
'unicode'
,
u'class (translation required)'
:
'class'
,
u'inneh
\
u00e5
ll'
:
'contents'
,
u'sektionsnumrering'
:
'sectnum'
,
u'target-notes (translation required)'
:
'target-notes'
,
# u'fotnoter': 'footnotes',
# u'citeringar': 'citations',
}
"""Swedish name to registered (in directives/__init__.py) directive name
mapping."""
roles
=
{
u'abbreviation (translation required)'
:
'abbreviation'
,
u'acronym (translation required)'
:
'acronym'
,
u'index (translation required)'
:
'index'
,
u'subscript (translation required)'
:
'subscript'
,
u'superscript (translation required)'
:
'superscript'
,
u'title-reference (translation required)'
:
'title-reference'
,
u'pep-reference (translation required)'
:
'pep-reference'
,
u'rfc-reference (translation required)'
:
'rfc-reference'
,
u'emphasis (translation required)'
:
'emphasis'
,
u'strong (translation required)'
:
'strong'
,
u'literal (translation required)'
:
'literal'
,
u'named-reference (translation required)'
:
'named-reference'
,
u'anonymous-reference (translation required)'
:
'anonymous-reference'
,
u'footnote-reference (translation required)'
:
'footnote-reference'
,
u'citation-reference (translation required)'
:
'citation-reference'
,
u'substitution-reference (translation required)'
:
'substitution-reference'
,
u'target (translation required)'
:
'target'
,
u'uri-reference (translation required)'
:
'uri-reference'
,}
"""Mapping of Swedish role names to canonical role names for interpreted text.
"""
lib/python/docutils/parsers/rst/roman.py
deleted
100644 → 0
View file @
e1142d2d
"""Convert to and from Roman numerals"""
__author__
=
"Mark Pilgrim (f8dy@diveintopython.org)"
__version__
=
"1.4"
__date__
=
"8 August 2001"
__copyright__
=
"""Copyright (c) 2001 Mark Pilgrim
This program is part of "Dive Into Python", a free Python tutorial for
experienced programmers. Visit http://diveintopython.org/ for the
latest version.
This program is free software; you can redistribute it and/or modify
it under the terms of the Python 2.1.1 license, available at
http://www.python.org/2.1.1/license.html
"""
import
re
#Define exceptions
class
RomanError
(
Exception
):
pass
class
OutOfRangeError
(
RomanError
):
pass
class
NotIntegerError
(
RomanError
):
pass
class
InvalidRomanNumeralError
(
RomanError
):
pass
#Define digit mapping
romanNumeralMap
=
((
'M'
,
1000
),
(
'CM'
,
900
),
(
'D'
,
500
),
(
'CD'
,
400
),
(
'C'
,
100
),
(
'XC'
,
90
),
(
'L'
,
50
),
(
'XL'
,
40
),
(
'X'
,
10
),
(
'IX'
,
9
),
(
'V'
,
5
),
(
'IV'
,
4
),
(
'I'
,
1
))
def
toRoman
(
n
):
"""convert integer to Roman numeral"""
if
not
(
0
<
n
<
5000
):
raise
OutOfRangeError
,
"number out of range (must be 1..4999)"
if
int
(
n
)
<>
n
:
raise
NotIntegerError
,
"decimals can not be converted"
result
=
""
for
numeral
,
integer
in
romanNumeralMap
:
while
n
>=
integer
:
result
+=
numeral
n
-=
integer
return
result
#Define pattern to detect valid Roman numerals
romanNumeralPattern
=
re
.
compile
(
'''
^ # beginning of string
M{0,4} # thousands - 0 to 4 M's
(CM|CD|D?C{0,3}) # hundreds - 900 (CM), 400 (CD), 0-300 (0 to 3 C's),
# or 500-800 (D, followed by 0 to 3 C's)
(XC|XL|L?X{0,3}) # tens - 90 (XC), 40 (XL), 0-30 (0 to 3 X's),
# or 50-80 (L, followed by 0 to 3 X's)
(IX|IV|V?I{0,3}) # ones - 9 (IX), 4 (IV), 0-3 (0 to 3 I's),
# or 5-8 (V, followed by 0 to 3 I's)
$ # end of string
'''
,
re
.
VERBOSE
)
def
fromRoman
(
s
):
"""convert Roman numeral to integer"""
if
not
s
:
raise
InvalidRomanNumeralError
,
'Input can not be blank'
if
not
romanNumeralPattern
.
search
(
s
):
raise
InvalidRomanNumeralError
,
'Invalid Roman numeral: %s'
%
s
result
=
0
index
=
0
for
numeral
,
integer
in
romanNumeralMap
:
while
s
[
index
:
index
+
len
(
numeral
)]
==
numeral
:
result
+=
integer
index
+=
len
(
numeral
)
return
result
lib/python/docutils/parsers/rst/states.py
deleted
100644 → 0
View file @
e1142d2d
This source diff could not be displayed because it is too large. You can
view the blob
instead.
lib/python/docutils/parsers/rst/tableparser.py
deleted
100644 → 0
View file @
e1142d2d
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:41 $
# Copyright: This module has been placed in the public domain.
"""
This module defines table parser classes,which parse plaintext-graphic tables
and produce a well-formed data structure suitable for building a CALS table.
:Classes:
- `GridTableParser`: Parse fully-formed tables represented with a grid.
- `SimpleTableParser`: Parse simple tables, delimited by top & bottom
borders.
:Exception class: `TableMarkupError`
:Function:
`update_dict_of_lists()`: Merge two dictionaries containing list values.
"""
__docformat__
=
'reStructuredText'
import
re
import
sys
from
docutils
import
DataError
class
TableMarkupError
(
DataError
):
pass
class
TableParser
:
"""
Abstract superclass for the common parts of the syntax-specific parsers.
"""
head_body_separator_pat
=
None
"""Matches the row separator between head rows and body rows."""
def
parse
(
self
,
block
):
"""
Analyze the text `block` and return a table data structure.
Given a plaintext-graphic table in `block` (list of lines of text; no
whitespace padding), parse the table, construct and return the data
necessary to construct a CALS table or equivalent.
Raise `TableMarkupError` if there is any problem with the markup.
"""
self
.
setup
(
block
)
self
.
find_head_body_sep
()
self
.
parse_table
()
structure
=
self
.
structure_from_cells
()
return
structure
def
find_head_body_sep
(
self
):
"""Look for a head/body row separator line; store the line index."""
for
i
in
range
(
len
(
self
.
block
)):
line
=
self
.
block
[
i
]
if
self
.
head_body_separator_pat
.
match
(
line
):
if
self
.
head_body_sep
:
raise
TableMarkupError
(
'Multiple head/body row separators in table (at line '
'offset %s and %s); only one allowed.'
%
(
self
.
head_body_sep
,
i
))
else
:
self
.
head_body_sep
=
i
self
.
block
[
i
]
=
line
.
replace
(
'='
,
'-'
)
if
self
.
head_body_sep
==
0
or
self
.
head_body_sep
==
(
len
(
self
.
block
)
-
1
):
raise
TableMarkupError
(
'The head/body row separator may not be '
'the first or last line of the table.'
)
class
GridTableParser
(
TableParser
):
"""
Parse a grid table using `parse()`.
Here's an example of a grid table::
+------------------------+------------+----------+----------+
| Header row, column 1 | Header 2 | Header 3 | Header 4 |
+========================+============+==========+==========+
| body row 1, column 1 | column 2 | column 3 | column 4 |
+------------------------+------------+----------+----------+
| body row 2 | Cells may span columns. |
+------------------------+------------+---------------------+
| body row 3 | Cells may | - Table cells |
+------------------------+ span rows. | - contain |
| body row 4 | | - body elements. |
+------------------------+------------+---------------------+
Intersections use '+', row separators use '-' (except for one optional
head/body row separator, which uses '='), and column separators use '|'.
Passing the above table to the `parse()` method will result in the
following data structure::
([24, 12, 10, 10],
[[(0, 0, 1, ['Header row, column 1']),
(0, 0, 1, ['Header 2']),
(0, 0, 1, ['Header 3']),
(0, 0, 1, ['Header 4'])]],
[[(0, 0, 3, ['body row 1, column 1']),
(0, 0, 3, ['column 2']),
(0, 0, 3, ['column 3']),
(0, 0, 3, ['column 4'])],
[(0, 0, 5, ['body row 2']),
(0, 2, 5, ['Cells may span columns.']),
None,
None],
[(0, 0, 7, ['body row 3']),
(1, 0, 7, ['Cells may', 'span rows.', '']),
(1, 1, 7, ['- Table cells', '- contain', '- body elements.']),
None],
[(0, 0, 9, ['body row 4']), None, None, None]])
The first item is a list containing column widths (colspecs). The second
item is a list of head rows, and the third is a list of body rows. Each
row contains a list of cells. Each cell is either None (for a cell unused
because of another cell's span), or a tuple. A cell tuple contains four
items: the number of extra rows used by the cell in a vertical span
(morerows); the number of extra columns used by the cell in a horizontal
span (morecols); the line offset of the first line of the cell contents;
and the cell contents, a list of lines of text.
"""
head_body_separator_pat
=
re
.
compile
(
r'\
+=[=+]+=
\+ *$'
)
def
setup
(
self
,
block
):
self
.
block
=
block
[:]
# make a copy; it may be modified
self
.
block
.
disconnect
()
# don't propagate changes to parent
self
.
bottom
=
len
(
block
)
-
1
self
.
right
=
len
(
block
[
0
])
-
1
self
.
head_body_sep
=
None
self
.
done
=
[
-
1
]
*
len
(
block
[
0
])
self
.
cells
=
[]
self
.
rowseps
=
{
0
:
[
0
]}
self
.
colseps
=
{
0
:
[
0
]}
def
parse_table
(
self
):
"""
Start with a queue of upper-left corners, containing the upper-left
corner of the table itself. Trace out one rectangular cell, remember
it, and add its upper-right and lower-left corners to the queue of
potential upper-left corners of further cells. Process the queue in
top-to-bottom order, keeping track of how much of each text column has
been seen.
We'll end up knowing all the row and column boundaries, cell positions
and their dimensions.
"""
corners
=
[(
0
,
0
)]
while
corners
:
top
,
left
=
corners
.
pop
(
0
)
if
top
==
self
.
bottom
or
left
==
self
.
right
\
or
top
<=
self
.
done
[
left
]:
continue
result
=
self
.
scan_cell
(
top
,
left
)
if
not
result
:
continue
bottom
,
right
,
rowseps
,
colseps
=
result
update_dict_of_lists
(
self
.
rowseps
,
rowseps
)
update_dict_of_lists
(
self
.
colseps
,
colseps
)
self
.
mark_done
(
top
,
left
,
bottom
,
right
)
cellblock
=
self
.
block
.
get_2D_block
(
top
+
1
,
left
+
1
,
bottom
,
right
)
cellblock
.
disconnect
()
# lines in cell can't sync with parent
self
.
cells
.
append
((
top
,
left
,
bottom
,
right
,
cellblock
))
corners
.
extend
([(
top
,
right
),
(
bottom
,
left
)])
corners
.
sort
()
if
not
self
.
check_parse_complete
():
raise
TableMarkupError
(
'Malformed table; parse incomplete.'
)
def
mark_done
(
self
,
top
,
left
,
bottom
,
right
):
"""For keeping track of how much of each text column has been seen."""
before
=
top
-
1
after
=
bottom
-
1
for
col
in
range
(
left
,
right
):
assert
self
.
done
[
col
]
==
before
self
.
done
[
col
]
=
after
def
check_parse_complete
(
self
):
"""Each text column should have been completely seen."""
last
=
self
.
bottom
-
1
for
col
in
range
(
self
.
right
):
if
self
.
done
[
col
]
!=
last
:
return
None
return
1
def
scan_cell
(
self
,
top
,
left
):
"""Starting at the top-left corner, start tracing out a cell."""
assert
self
.
block
[
top
][
left
]
==
'+'
result
=
self
.
scan_right
(
top
,
left
)
return
result
def
scan_right
(
self
,
top
,
left
):
"""
Look for the top-right corner of the cell, and make note of all column
boundaries ('+').
"""
colseps
=
{}
line
=
self
.
block
[
top
]
for
i
in
range
(
left
+
1
,
self
.
right
+
1
):
if
line
[
i
]
==
'+'
:
colseps
[
i
]
=
[
top
]
result
=
self
.
scan_down
(
top
,
left
,
i
)
if
result
:
bottom
,
rowseps
,
newcolseps
=
result
update_dict_of_lists
(
colseps
,
newcolseps
)
return
bottom
,
i
,
rowseps
,
colseps
elif
line
[
i
]
!=
'-'
:
return
None
return
None
def
scan_down
(
self
,
top
,
left
,
right
):
"""
Look for the bottom-right corner of the cell, making note of all row
boundaries.
"""
rowseps
=
{}
for
i
in
range
(
top
+
1
,
self
.
bottom
+
1
):
if
self
.
block
[
i
][
right
]
==
'+'
:
rowseps
[
i
]
=
[
right
]
result
=
self
.
scan_left
(
top
,
left
,
i
,
right
)
if
result
:
newrowseps
,
colseps
=
result
update_dict_of_lists
(
rowseps
,
newrowseps
)
return
i
,
rowseps
,
colseps
elif
self
.
block
[
i
][
right
]
!=
'|'
:
return
None
return
None
def
scan_left
(
self
,
top
,
left
,
bottom
,
right
):
"""
Noting column boundaries, look for the bottom-left corner of the cell.
It must line up with the starting point.
"""
colseps
=
{}
line
=
self
.
block
[
bottom
]
for
i
in
range
(
right
-
1
,
left
,
-
1
):
if
line
[
i
]
==
'+'
:
colseps
[
i
]
=
[
bottom
]
elif
line
[
i
]
!=
'-'
:
return
None
if
line
[
left
]
!=
'+'
:
return
None
result
=
self
.
scan_up
(
top
,
left
,
bottom
,
right
)
if
result
is
not
None
:
rowseps
=
result
return
rowseps
,
colseps
return
None
def
scan_up
(
self
,
top
,
left
,
bottom
,
right
):
"""
Noting row boundaries, see if we can return to the starting point.
"""
rowseps
=
{}
for
i
in
range
(
bottom
-
1
,
top
,
-
1
):
if
self
.
block
[
i
][
left
]
==
'+'
:
rowseps
[
i
]
=
[
left
]
elif
self
.
block
[
i
][
left
]
!=
'|'
:
return
None
return
rowseps
def
structure_from_cells
(
self
):
"""
From the data collected by `scan_cell()`, convert to the final data
structure.
"""
rowseps
=
self
.
rowseps
.
keys
()
# list of row boundaries
rowseps
.
sort
()
rowindex
=
{}
for
i
in
range
(
len
(
rowseps
)):
rowindex
[
rowseps
[
i
]]
=
i
# row boundary -> row number mapping
colseps
=
self
.
colseps
.
keys
()
# list of column boundaries
colseps
.
sort
()
colindex
=
{}
for
i
in
range
(
len
(
colseps
)):
colindex
[
colseps
[
i
]]
=
i
# column boundary -> col number map
colspecs
=
[(
colseps
[
i
]
-
colseps
[
i
-
1
]
-
1
)
for
i
in
range
(
1
,
len
(
colseps
))]
# list of column widths
# prepare an empty table with the correct number of rows & columns
onerow
=
[
None
for
i
in
range
(
len
(
colseps
)
-
1
)]
rows
=
[
onerow
[:]
for
i
in
range
(
len
(
rowseps
)
-
1
)]
# keep track of # of cells remaining; should reduce to zero
remaining
=
(
len
(
rowseps
)
-
1
)
*
(
len
(
colseps
)
-
1
)
for
top
,
left
,
bottom
,
right
,
block
in
self
.
cells
:
rownum
=
rowindex
[
top
]
colnum
=
colindex
[
left
]
assert
rows
[
rownum
][
colnum
]
is
None
,
(
'Cell (row %s, column %s) already used.'
%
(
rownum
+
1
,
colnum
+
1
))
morerows
=
rowindex
[
bottom
]
-
rownum
-
1
morecols
=
colindex
[
right
]
-
colnum
-
1
remaining
-=
(
morerows
+
1
)
*
(
morecols
+
1
)
# write the cell into the table
rows
[
rownum
][
colnum
]
=
(
morerows
,
morecols
,
top
+
1
,
block
)
assert
remaining
==
0
,
'Unused cells remaining.'
if
self
.
head_body_sep
:
# separate head rows from body rows
numheadrows
=
rowindex
[
self
.
head_body_sep
]
headrows
=
rows
[:
numheadrows
]
bodyrows
=
rows
[
numheadrows
:]
else
:
headrows
=
[]
bodyrows
=
rows
return
(
colspecs
,
headrows
,
bodyrows
)
class
SimpleTableParser
(
TableParser
):
"""
Parse a simple table using `parse()`.
Here's an example of a simple table::
===== =====
col 1 col 2
===== =====
1 Second column of row 1.
2 Second column of row 2.
Second line of paragraph.
3 - Second column of row 3.
- Second item in bullet
list (row 3, column 2).
4 is a span
------------
5
===== =====
Top and bottom borders use '=', column span underlines use '-', column
separation is indicated with spaces.
Passing the above table to the `parse()` method will result in the
following data structure, whose interpretation is the same as for
`GridTableParser`::
([5, 25],
[[(0, 0, 1, ['col 1']),
(0, 0, 1, ['col 2'])]],
[[(0, 0, 3, ['1']),
(0, 0, 3, ['Second column of row 1.'])],
[(0, 0, 4, ['2']),
(0, 0, 4, ['Second column of row 2.',
'Second line of paragraph.'])],
[(0, 0, 6, ['3']),
(0, 0, 6, ['- Second column of row 3.',
'',
'- Second item in bullet',
' list (row 3, column 2).'])],
[(0, 1, 10, ['4 is a span'])],
[(0, 0, 12, ['5']),
(0, 0, 12, [''])]])
"""
head_body_separator_pat
=
re
.
compile
(
'=[ =]*$'
)
span_pat
=
re
.
compile
(
'-[ -]*$'
)
def
setup
(
self
,
block
):
self
.
block
=
block
[:]
# make a copy; it will be modified
self
.
block
.
disconnect
()
# don't propagate changes to parent
# Convert top & bottom borders to column span underlines:
self
.
block
[
0
]
=
self
.
block
[
0
].
replace
(
'='
,
'-'
)
self
.
block
[
-
1
]
=
self
.
block
[
-
1
].
replace
(
'='
,
'-'
)
self
.
head_body_sep
=
None
self
.
columns
=
[]
self
.
border_end
=
None
self
.
table
=
[]
self
.
done
=
[
-
1
]
*
len
(
block
[
0
])
self
.
rowseps
=
{
0
:
[
0
]}
self
.
colseps
=
{
0
:
[
0
]}
def
parse_table
(
self
):
"""
First determine the column boundaries from the top border, then
process rows. Each row may consist of multiple lines; accumulate
lines until a row is complete. Call `self.parse_row` to finish the
job.
"""
# Top border must fully describe all table columns.
self
.
columns
=
self
.
parse_columns
(
self
.
block
[
0
],
0
)
self
.
border_end
=
self
.
columns
[
-
1
][
1
]
firststart
,
firstend
=
self
.
columns
[
0
]
offset
=
1
# skip top border
start
=
1
text_found
=
None
while
offset
<
len
(
self
.
block
):
line
=
self
.
block
[
offset
]
if
self
.
span_pat
.
match
(
line
):
# Column span underline or border; row is complete.
self
.
parse_row
(
self
.
block
[
start
:
offset
],
start
,
(
line
.
rstrip
(),
offset
))
start
=
offset
+
1
text_found
=
None
elif
line
[
firststart
:
firstend
].
strip
():
# First column not blank, therefore it's a new row.
if
text_found
and
offset
!=
start
:
self
.
parse_row
(
self
.
block
[
start
:
offset
],
start
)
start
=
offset
text_found
=
1
elif
not
text_found
:
start
=
offset
+
1
offset
+=
1
def
parse_columns
(
self
,
line
,
offset
):
"""
Given a column span underline, return a list of (begin, end) pairs.
"""
cols
=
[]
end
=
0
while
1
:
begin
=
line
.
find
(
'-'
,
end
)
end
=
line
.
find
(
' '
,
begin
)
if
begin
<
0
:
break
if
end
<
0
:
end
=
len
(
line
)
cols
.
append
((
begin
,
end
))
if
self
.
columns
:
if
cols
[
-
1
][
1
]
!=
self
.
border_end
:
raise
TableMarkupError
(
'Column span incomplete at line '
'offset %s.'
%
offset
)
# Allow for an unbounded rightmost column:
cols
[
-
1
]
=
(
cols
[
-
1
][
0
],
self
.
columns
[
-
1
][
1
])
return
cols
def
init_row
(
self
,
colspec
,
offset
):
i
=
0
cells
=
[]
for
start
,
end
in
colspec
:
morecols
=
0
try
:
assert
start
==
self
.
columns
[
i
][
0
]
while
end
!=
self
.
columns
[
i
][
1
]:
i
+=
1
morecols
+=
1
except
(
AssertionError
,
IndexError
):
raise
TableMarkupError
(
'Column span alignment problem at '
'line offset %s.'
%
(
offset
+
1
))
cells
.
append
([
0
,
morecols
,
offset
,
[]])
i
+=
1
return
cells
def
parse_row
(
self
,
lines
,
start
,
spanline
=
None
):
"""
Given the text `lines` of a row, parse it and append to `self.table`.
The row is parsed according to the current column spec (either
`spanline` if provided or `self.columns`). For each column, extract
text from each line, and check for text in column margins. Finally,
adjust for insigificant whitespace.
"""
if
not
(
lines
or
spanline
):
# No new row, just blank lines.
return
if
spanline
:
columns
=
self
.
parse_columns
(
*
spanline
)
span_offset
=
spanline
[
1
]
else
:
columns
=
self
.
columns
[:]
span_offset
=
start
self
.
check_columns
(
lines
,
start
,
columns
)
row
=
self
.
init_row
(
columns
,
start
)
for
i
in
range
(
len
(
columns
)):
start
,
end
=
columns
[
i
]
cellblock
=
lines
.
get_2D_block
(
0
,
start
,
len
(
lines
),
end
)
cellblock
.
disconnect
()
# lines in cell can't sync with parent
row
[
i
][
3
]
=
cellblock
self
.
table
.
append
(
row
)
def
check_columns
(
self
,
lines
,
first_line
,
columns
):
"""
Check for text in column margins and text overflow in the last column.
Raise TableMarkupError if anything but whitespace is in column margins.
Adjust the end value for the last column if there is text overflow.
"""
# "Infinite" value for a dummy last column's beginning, used to
# check for text overflow:
columns
.
append
((
sys
.
maxint
,
None
))
lastcol
=
len
(
columns
)
-
2
for
i
in
range
(
len
(
columns
)
-
1
):
start
,
end
=
columns
[
i
]
nextstart
=
columns
[
i
+
1
][
0
]
offset
=
0
for
line
in
lines
:
if
i
==
lastcol
and
line
[
end
:].
strip
():
text
=
line
[
start
:].
rstrip
()
new_end
=
start
+
len
(
text
)
columns
[
i
]
=
(
start
,
new_end
)
main_start
,
main_end
=
self
.
columns
[
-
1
]
if
new_end
>
main_end
:
self
.
columns
[
-
1
]
=
(
main_start
,
new_end
)
elif
line
[
end
:
nextstart
].
strip
():
raise
TableMarkupError
(
'Text in column margin at line '
'offset %s.'
%
(
first_line
+
offset
))
offset
+=
1
columns
.
pop
()
def
structure_from_cells
(
self
):
colspecs
=
[
end
-
start
for
start
,
end
in
self
.
columns
]
first_body_row
=
0
if
self
.
head_body_sep
:
for
i
in
range
(
len
(
self
.
table
)):
if
self
.
table
[
i
][
0
][
2
]
>
self
.
head_body_sep
:
first_body_row
=
i
break
return
(
colspecs
,
self
.
table
[:
first_body_row
],
self
.
table
[
first_body_row
:])
def
update_dict_of_lists
(
master
,
newdata
):
"""
Extend the list values of `master` with those from `newdata`.
Both parameters must be dictionaries containing list values.
"""
for
key
,
values
in
newdata
.
items
():
master
.
setdefault
(
key
,
[]).
extend
(
values
)
lib/python/docutils/readers/__init__.py
deleted
100644 → 0
View file @
e1142d2d
# Authors: David Goodger; Ueli Schlaepfer
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:54 $
# Copyright: This module has been placed in the public domain.
"""
This package contains Docutils Reader modules.
"""
__docformat__
=
'reStructuredText'
import
sys
from
docutils
import
utils
,
parsers
,
Component
from
docutils.transforms
import
universal
class
Reader
(
Component
):
"""
Abstract base class for docutils Readers.
Each reader module or package must export a subclass also called 'Reader'.
The three steps of a Reader's responsibility are defined: `scan()`,
`parse()`, and `transform()`. Call `read()` to process a document.
"""
component_type
=
'reader'
def
__init__
(
self
,
parser
=
None
,
parser_name
=
'restructuredtext'
):
"""
Initialize the Reader instance.
Several instance attributes are defined with dummy initial values.
Subclasses may use these attributes as they wish.
"""
self
.
parser
=
parser
"""A `parsers.Parser` instance shared by all doctrees. May be left
unspecified if the document source determines the parser."""
if
parser
is
None
and
parser_name
:
self
.
set_parser
(
parser_name
)
self
.
source
=
None
"""`docutils.io` IO object, source of input data."""
self
.
input
=
None
"""Raw text input; either a single string or, for more complex cases,
a collection of strings."""
def
set_parser
(
self
,
parser_name
):
"""Set `self.parser` by name."""
parser_class
=
parsers
.
get_parser_class
(
parser_name
)
self
.
parser
=
parser_class
()
def
read
(
self
,
source
,
parser
,
settings
):
self
.
source
=
source
if
not
self
.
parser
:
self
.
parser
=
parser
self
.
settings
=
settings
self
.
input
=
self
.
source
.
read
()
self
.
parse
()
return
self
.
document
def
parse
(
self
):
"""Parse `self.input` into a document tree."""
self
.
document
=
document
=
self
.
new_document
()
self
.
parser
.
parse
(
self
.
input
,
document
)
document
.
current_source
=
document
.
current_line
=
None
def
new_document
(
self
):
"""Create and return a new empty document tree (root node)."""
document
=
utils
.
new_document
(
self
.
source
.
source_path
,
self
.
settings
)
return
document
_reader_aliases
=
{}
def
get_reader_class
(
reader_name
):
"""Return the Reader class from the `reader_name` module."""
reader_name
=
reader_name
.
lower
()
if
_reader_aliases
.
has_key
(
reader_name
):
reader_name
=
_reader_aliases
[
reader_name
]
module
=
__import__
(
reader_name
,
globals
(),
locals
())
return
module
.
Reader
lib/python/docutils/readers/pep.py
deleted
100644 → 0
View file @
e1142d2d
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:54 $
# Copyright: This module has been placed in the public domain.
"""
Python Enhancement Proposal (PEP) Reader.
"""
__docformat__
=
'reStructuredText'
from
docutils.readers
import
standalone
from
docutils.transforms
import
peps
,
references
from
docutils.parsers
import
rst
class
Inliner
(
rst
.
states
.
Inliner
):
"""
Extend `rst.Inliner` for local PEP references.
"""
pep_url
=
rst
.
states
.
Inliner
.
pep_url_local
class
Reader
(
standalone
.
Reader
):
supported
=
(
'pep'
,)
"""Contexts this reader supports."""
settings_spec
=
(
'PEP Reader Option Defaults'
,
'The --pep-references and --rfc-references options (for the '
'reStructuredText parser) are on by default.'
,
())
default_transforms
=
(
references
.
Substitutions
,
peps
.
Headers
,
peps
.
Contents
,
references
.
ChainedTargets
,
references
.
AnonymousHyperlinks
,
references
.
IndirectHyperlinks
,
peps
.
TargetNotes
,
references
.
Footnotes
,
references
.
ExternalTargets
,
references
.
InternalTargets
,)
settings_default_overrides
=
{
'pep_references'
:
1
,
'rfc_references'
:
1
}
inliner_class
=
Inliner
def
__init__
(
self
,
parser
=
None
,
parser_name
=
None
):
"""`parser` should be ``None``."""
if
parser
is
None
:
parser
=
rst
.
Parser
(
rfc2822
=
1
,
inliner
=
self
.
inliner_class
())
standalone
.
Reader
.
__init__
(
self
,
parser
,
''
)
lib/python/docutils/readers/python/__init__.py
deleted
100644 → 0
View file @
e1142d2d
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.1 $
# Date: $Date: 2003/07/10 15:49:58 $
# Copyright: This module has been placed in the public domain.
"""
This package contains the Python Source Reader modules.
"""
__docformat__
=
'reStructuredText'
import
sys
import
docutils.readers
class
Reader
(
docutils
.
readers
.
Reader
):
pass
lib/python/docutils/readers/python/moduleparser.py
deleted
100644 → 0
View file @
e1142d2d
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.1 $
# Date: $Date: 2003/07/10 15:48:20 $
# Copyright: This module has been placed in the public domain.
"""
Parser for Python modules.
The `parse_module()` function takes a module's text and file name, runs it
through the module parser (using compiler.py and tokenize.py) and produces a
"module documentation tree": a high-level AST full of nodes that are
interesting from an auto-documentation standpoint. For example, given this
module (x.py)::
# comment
'''Docstring'''
'''Additional docstring'''
__docformat__ = 'reStructuredText'
a = 1
'''Attribute docstring'''
class C(Super):
'''C's docstring'''
class_attribute = 1
'''class_attribute's docstring'''
def __init__(self, text=None):
'''__init__'s docstring'''
self.instance_attribute = (text * 7
+ ' whaddyaknow')
'''instance_attribute's docstring'''
def f(x, # parameter x
y=a*5, # parameter y
*args): # parameter args
'''f's docstring'''
return [x + item for item in args]
f.function_attribute = 1
'''f.function_attribute's docstring'''
The module parser will produce this module documentation tree::
<Module filename="test data">
<Comment lineno=1>
comment
<Docstring>
Docstring
<Docstring lineno="5">
Additional docstring
<Attribute lineno="7" name="__docformat__">
<Expression lineno="7">
'reStructuredText'
<Attribute lineno="9" name="a">
<Expression lineno="9">
1
<Docstring lineno="10">
Attribute docstring
<Class bases="Super" lineno="12" name="C">
<Docstring lineno="12">
C's docstring
<Attribute lineno="16" name="class_attribute">
<Expression lineno="16">
1
<Docstring lineno="17">
class_attribute's docstring
<Method lineno="19" name="__init__">
<Docstring lineno="19">
__init__'s docstring
<ParameterList lineno="19">
<Parameter lineno="19" name="self">
<Parameter lineno="19" name="text">
<Default lineno="19">
None
<Attribute lineno="22" name="self.instance_attribute">
<Expression lineno="22">
(text * 7 + ' whaddyaknow')
<Docstring lineno="24">
instance_attribute's docstring
<Function lineno="27" name="f">
<Docstring lineno="27">
f's docstring
<ParameterList lineno="27">
<Parameter lineno="27" name="x">
<Comment>
# parameter x
<Parameter lineno="27" name="y">
<Default lineno="27">
a * 5
<Comment>
# parameter y
<ExcessPositionalArguments lineno="27" name="args">
<Comment>
# parameter args
<Attribute lineno="33" name="f.function_attribute">
<Expression lineno="33">
1
<Docstring lineno="34">
f.function_attribute's docstring
(Comments are not implemented yet.)
compiler.parse() provides most of what's needed for this doctree, and
"tokenize" can be used to get the rest. We can determine the line number from
the compiler.parse() AST, and the TokenParser.rhs(lineno) method provides the
rest.
The Docutils Python reader component will transform this module doctree into a
Python-specific Docutils doctree, and then a `stylist transform`_ will
further transform it into a generic doctree. Namespaces will have to be
compiled for each of the scopes, but I'm not certain at what stage of
processing.
It's very important to keep all docstring processing out of this, so that it's
a completely generic and not tool-specific.
> Why perform all of those transformations? Why not go from the AST to a
> generic doctree? Or, even from the AST to the final output?
I want the docutils.readers.python.moduleparser.parse_module() function to
produce a standard documentation-oriented tree that can be used by any tool.
We can develop it together without having to compromise on the rest of our
design (i.e., HappyDoc doesn't have to be made to work like Docutils, and
vice-versa). It would be a higher-level version of what compiler.py provides.
The Python reader component transforms this generic AST into a Python-specific
doctree (it knows about modules, classes, functions, etc.), but this is
specific to Docutils and cannot be used by HappyDoc or others. The stylist
transform does the final layout, converting Python-specific structures
("class" sections, etc.) into a generic doctree using primitives (tables,
sections, lists, etc.). This generic doctree does *not* know about Python
structures any more. The advantage is that this doctree can be handed off to
any of the output writers to create any output format we like.
The latter two transforms are separate because I want to be able to have
multiple independent layout styles (multiple runtime-selectable "stylist
transforms"). Each of the existing tools (HappyDoc, pydoc, epydoc, Crystal,
etc.) has its own fixed format. I personally don't like the tables-based
format produced by these tools, and I'd like to be able to customize the
format easily. That's the goal of stylist transforms, which are independent
from the Reader component itself. One stylist transform could produce
HappyDoc-like output, another could produce output similar to module docs in
the Python library reference manual, and so on.
It's for exactly this reason:
>> It's very important to keep all docstring processing out of this, so that
>> it's a completely generic and not tool-specific.
... but it goes past docstring processing. It's also important to keep style
decisions and tool-specific data transforms out of this module parser.
Issues
======
* At what point should namespaces be computed? Should they be part of the
basic AST produced by the ASTVisitor walk, or generated by another tree
traversal?
* At what point should a distinction be made between local variables &
instance attributes in __init__ methods?
* Docstrings are getting their lineno from their parents. Should the
TokenParser find the real line no's?
* Comments: include them? How and when? Only full-line comments, or
parameter comments too? (See function "f" above for an example.)
* Module could use more docstrings & refactoring in places.
"""
__docformat__
=
'reStructuredText'
import
sys
import
compiler
import
compiler.ast
import
tokenize
import
token
from
compiler.consts
import
OP_ASSIGN
from
compiler.visitor
import
ASTVisitor
from
types
import
StringType
,
UnicodeType
,
TupleType
def
parse_module
(
module_text
,
filename
):
"""Return a module documentation tree from `module_text`."""
ast
=
compiler
.
parse
(
module_text
)
token_parser
=
TokenParser
(
module_text
)
visitor
=
ModuleVisitor
(
filename
,
token_parser
)
compiler
.
walk
(
ast
,
visitor
,
walker
=
visitor
)
return
visitor
.
module
class
Node
:
"""
Base class for module documentation tree nodes.
"""
def
__init__
(
self
,
node
):
self
.
children
=
[]
"""List of child nodes."""
self
.
lineno
=
node
.
lineno
"""Line number of this node (or ``None``)."""
def
__str__
(
self
,
indent
=
' '
,
level
=
0
):
return
''
.
join
([
'%s%s
\
n
'
%
(
indent
*
level
,
repr
(
self
))]
+
[
child
.
__str__
(
indent
,
level
+
1
)
for
child
in
self
.
children
])
def
__repr__
(
self
):
parts
=
[
self
.
__class__
.
__name__
]
for
name
,
value
in
self
.
attlist
():
parts
.
append
(
'%s="%s"'
%
(
name
,
value
))
return
'<%s>'
%
' '
.
join
(
parts
)
def
attlist
(
self
,
**
atts
):
if
self
.
lineno
is
not
None
:
atts
[
'lineno'
]
=
self
.
lineno
attlist
=
atts
.
items
()
attlist
.
sort
()
return
attlist
def
append
(
self
,
node
):
self
.
children
.
append
(
node
)
def
extend
(
self
,
node_list
):
self
.
children
.
extend
(
node_list
)
class
TextNode
(
Node
):
def
__init__
(
self
,
node
,
text
):
Node
.
__init__
(
self
,
node
)
self
.
text
=
trim_docstring
(
text
)
def
__str__
(
self
,
indent
=
' '
,
level
=
0
):
prefix
=
indent
*
(
level
+
1
)
text
=
'
\
n
'
.
join
([
prefix
+
line
for
line
in
self
.
text
.
splitlines
()])
return
Node
.
__str__
(
self
,
indent
,
level
)
+
text
+
'
\
n
'
class
Module
(
Node
):
def
__init__
(
self
,
node
,
filename
):
Node
.
__init__
(
self
,
node
)
self
.
filename
=
filename
def
attlist
(
self
):
return
Node
.
attlist
(
self
,
filename
=
self
.
filename
)
class
Docstring
(
TextNode
):
pass
class
Comment
(
TextNode
):
pass
class
Import
(
Node
):
def
__init__
(
self
,
node
,
names
,
from_name
=
None
):
Node
.
__init__
(
self
,
node
)
self
.
names
=
names
self
.
from_name
=
from_name
def
__str__
(
self
,
indent
=
' '
,
level
=
0
):
prefix
=
indent
*
(
level
+
1
)
lines
=
[]
for
name
,
as
in
self
.
names
:
if
as
:
lines
.
append
(
'%s%s as %s'
%
(
prefix
,
name
,
as
))
else
:
lines
.
append
(
'%s%s'
%
(
prefix
,
name
))
text
=
'
\
n
'
.
join
(
lines
)
return
Node
.
__str__
(
self
,
indent
,
level
)
+
text
+
'
\
n
'
def
attlist
(
self
):
if
self
.
from_name
:
atts
=
{
'from'
:
self
.
from_name
}
else
:
atts
=
{}
return
Node
.
attlist
(
self
,
**
atts
)
class
Attribute
(
Node
):
def
__init__
(
self
,
node
,
name
):
Node
.
__init__
(
self
,
node
)
self
.
name
=
name
def
attlist
(
self
):
return
Node
.
attlist
(
self
,
name
=
self
.
name
)
class
AttributeTuple
(
Node
):
def
__init__
(
self
,
node
,
names
):
Node
.
__init__
(
self
,
node
)
self
.
names
=
names
def
attlist
(
self
):
return
Node
.
attlist
(
self
,
names
=
' '
.
join
(
self
.
names
))
class
Expression
(
TextNode
):
def
__str__
(
self
,
indent
=
' '
,
level
=
0
):
prefix
=
indent
*
(
level
+
1
)
return
'%s%s%s
\
n
'
%
(
Node
.
__str__
(
self
,
indent
,
level
),
prefix
,
self
.
text
.
encode
(
'unicode-escape'
))
class
Function
(
Attribute
):
pass
class
ParameterList
(
Node
):
pass
class
Parameter
(
Attribute
):
pass
class
ParameterTuple
(
AttributeTuple
):
def
attlist
(
self
):
return
Node
.
attlist
(
self
,
names
=
normalize_parameter_name
(
self
.
names
))
class
ExcessPositionalArguments
(
Parameter
):
pass
class
ExcessKeywordArguments
(
Parameter
):
pass
class
Default
(
Expression
):
pass
class
Class
(
Node
):
def
__init__
(
self
,
node
,
name
,
bases
=
None
):
Node
.
__init__
(
self
,
node
)
self
.
name
=
name
self
.
bases
=
bases
or
[]
def
attlist
(
self
):
atts
=
{
'name'
:
self
.
name
}
if
self
.
bases
:
atts
[
'bases'
]
=
' '
.
join
(
self
.
bases
)
return
Node
.
attlist
(
self
,
**
atts
)
class
Method
(
Function
):
pass
class
BaseVisitor
(
ASTVisitor
):
def
__init__
(
self
,
token_parser
):
ASTVisitor
.
__init__
(
self
)
self
.
token_parser
=
token_parser
self
.
context
=
[]
self
.
documentable
=
None
def
default
(
self
,
node
,
*
args
):
self
.
documentable
=
None
#print 'in default (%s)' % node.__class__.__name__
#ASTVisitor.default(self, node, *args)
def
default_visit
(
self
,
node
,
*
args
):
#print 'in default_visit (%s)' % node.__class__.__name__
ASTVisitor
.
default
(
self
,
node
,
*
args
)
class
DocstringVisitor
(
BaseVisitor
):
def
visitDiscard
(
self
,
node
):
if
self
.
documentable
:
self
.
visit
(
node
.
expr
)
def
visitConst
(
self
,
node
):
if
self
.
documentable
:
if
type
(
node
.
value
)
in
(
StringType
,
UnicodeType
):
self
.
documentable
.
append
(
Docstring
(
node
,
node
.
value
))
else
:
self
.
documentable
=
None
def
visitStmt
(
self
,
node
):
self
.
default_visit
(
node
)
class
AssignmentVisitor
(
DocstringVisitor
):
def
visitAssign
(
self
,
node
):
visitor
=
AttributeVisitor
(
self
.
token_parser
)
compiler
.
walk
(
node
,
visitor
,
walker
=
visitor
)
if
visitor
.
attributes
:
self
.
context
[
-
1
].
extend
(
visitor
.
attributes
)
if
len
(
visitor
.
attributes
)
==
1
:
self
.
documentable
=
visitor
.
attributes
[
0
]
else
:
self
.
documentable
=
None
class
ModuleVisitor
(
AssignmentVisitor
):
def
__init__
(
self
,
filename
,
token_parser
):
AssignmentVisitor
.
__init__
(
self
,
token_parser
)
self
.
filename
=
filename
self
.
module
=
None
def
visitModule
(
self
,
node
):
self
.
module
=
module
=
Module
(
node
,
self
.
filename
)
if
node
.
doc
is
not
None
:
module
.
append
(
Docstring
(
node
,
node
.
doc
))
self
.
context
.
append
(
module
)
self
.
documentable
=
module
self
.
visit
(
node
.
node
)
self
.
context
.
pop
()
def
visitImport
(
self
,
node
):
self
.
context
[
-
1
].
append
(
Import
(
node
,
node
.
names
))
self
.
documentable
=
None
def
visitFrom
(
self
,
node
):
self
.
context
[
-
1
].
append
(
Import
(
node
,
node
.
names
,
from_name
=
node
.
modname
))
self
.
documentable
=
None
def
visitFunction
(
self
,
node
):
visitor
=
FunctionVisitor
(
self
.
token_parser
)
compiler
.
walk
(
node
,
visitor
,
walker
=
visitor
)
self
.
context
[
-
1
].
append
(
visitor
.
function
)
def
visitClass
(
self
,
node
):
visitor
=
ClassVisitor
(
self
.
token_parser
)
compiler
.
walk
(
node
,
visitor
,
walker
=
visitor
)
self
.
context
[
-
1
].
append
(
visitor
.
klass
)
class
AttributeVisitor
(
BaseVisitor
):
def
__init__
(
self
,
token_parser
):
BaseVisitor
.
__init__
(
self
,
token_parser
)
self
.
attributes
=
[]
def
visitAssign
(
self
,
node
):
# Don't visit the expression itself, just the attribute nodes:
for
child
in
node
.
nodes
:
self
.
dispatch
(
child
)
expression_text
=
self
.
token_parser
.
rhs
(
node
.
lineno
)
expression
=
Expression
(
node
,
expression_text
)
for
attribute
in
self
.
attributes
:
attribute
.
append
(
expression
)
def
visitAssName
(
self
,
node
):
self
.
attributes
.
append
(
Attribute
(
node
,
node
.
name
))
def
visitAssTuple
(
self
,
node
):
attributes
=
self
.
attributes
self
.
attributes
=
[]
self
.
default_visit
(
node
)
names
=
[
attribute
.
name
for
attribute
in
self
.
attributes
]
att_tuple
=
AttributeTuple
(
node
,
names
)
att_tuple
.
lineno
=
self
.
attributes
[
0
].
lineno
self
.
attributes
=
attributes
self
.
attributes
.
append
(
att_tuple
)
def
visitAssAttr
(
self
,
node
):
self
.
default_visit
(
node
,
node
.
attrname
)
def
visitGetattr
(
self
,
node
,
suffix
):
self
.
default_visit
(
node
,
node
.
attrname
+
'.'
+
suffix
)
def
visitName
(
self
,
node
,
suffix
):
self
.
attributes
.
append
(
Attribute
(
node
,
node
.
name
+
'.'
+
suffix
))
class
FunctionVisitor
(
DocstringVisitor
):
in_function
=
0
function_class
=
Function
def
visitFunction
(
self
,
node
):
if
self
.
in_function
:
self
.
documentable
=
None
# Don't bother with nested function definitions.
return
self
.
in_function
=
1
self
.
function
=
function
=
self
.
function_class
(
node
,
node
.
name
)
if
node
.
doc
is
not
None
:
function
.
append
(
Docstring
(
node
,
node
.
doc
))
self
.
context
.
append
(
function
)
self
.
documentable
=
function
self
.
parse_parameter_list
(
node
)
self
.
visit
(
node
.
code
)
self
.
context
.
pop
()
def
parse_parameter_list
(
self
,
node
):
parameters
=
[]
special
=
[]
argnames
=
list
(
node
.
argnames
)
if
node
.
kwargs
:
special
.
append
(
ExcessKeywordArguments
(
node
,
argnames
[
-
1
]))
argnames
.
pop
()
if
node
.
varargs
:
special
.
append
(
ExcessPositionalArguments
(
node
,
argnames
[
-
1
]))
argnames
.
pop
()
defaults
=
list
(
node
.
defaults
)
defaults
=
[
None
]
*
(
len
(
argnames
)
-
len
(
defaults
))
+
defaults
function_parameters
=
self
.
token_parser
.
function_parameters
(
node
.
lineno
)
#print >>sys.stderr, function_parameters
for
argname
,
default
in
zip
(
argnames
,
defaults
):
if
type
(
argname
)
is
TupleType
:
parameter
=
ParameterTuple
(
node
,
argname
)
argname
=
normalize_parameter_name
(
argname
)
else
:
parameter
=
Parameter
(
node
,
argname
)
if
default
:
parameter
.
append
(
Default
(
node
,
function_parameters
[
argname
]))
parameters
.
append
(
parameter
)
if
parameters
or
special
:
special
.
reverse
()
parameters
.
extend
(
special
)
parameter_list
=
ParameterList
(
node
)
parameter_list
.
extend
(
parameters
)
self
.
function
.
append
(
parameter_list
)
class
ClassVisitor
(
AssignmentVisitor
):
in_class
=
0
def
__init__
(
self
,
token_parser
):
AssignmentVisitor
.
__init__
(
self
,
token_parser
)
self
.
bases
=
[]
def
visitClass
(
self
,
node
):
if
self
.
in_class
:
self
.
documentable
=
None
# Don't bother with nested class definitions.
return
self
.
in_class
=
1
#import mypdb as pdb
#pdb.set_trace()
for
base
in
node
.
bases
:
self
.
visit
(
base
)
self
.
klass
=
klass
=
Class
(
node
,
node
.
name
,
self
.
bases
)
if
node
.
doc
is
not
None
:
klass
.
append
(
Docstring
(
node
,
node
.
doc
))
self
.
context
.
append
(
klass
)
self
.
documentable
=
klass
self
.
visit
(
node
.
code
)
self
.
context
.
pop
()
def
visitGetattr
(
self
,
node
,
suffix
=
None
):
if
suffix
:
name
=
node
.
attrname
+
'.'
+
suffix
else
:
name
=
node
.
attrname
self
.
default_visit
(
node
,
name
)
def
visitName
(
self
,
node
,
suffix
=
None
):
if
suffix
:
name
=
node
.
name
+
'.'
+
suffix
else
:
name
=
node
.
name
self
.
bases
.
append
(
name
)
def
visitFunction
(
self
,
node
):
if
node
.
name
==
'__init__'
:
visitor
=
InitMethodVisitor
(
self
.
token_parser
)
else
:
visitor
=
MethodVisitor
(
self
.
token_parser
)
compiler
.
walk
(
node
,
visitor
,
walker
=
visitor
)
self
.
context
[
-
1
].
append
(
visitor
.
function
)
class
MethodVisitor
(
FunctionVisitor
):
function_class
=
Method
class
InitMethodVisitor
(
MethodVisitor
,
AssignmentVisitor
):
pass
class
TokenParser
:
def
__init__
(
self
,
text
):
self
.
text
=
text
+
'
\
n
\
n
'
self
.
lines
=
self
.
text
.
splitlines
(
1
)
self
.
generator
=
tokenize
.
generate_tokens
(
iter
(
self
.
lines
).
next
)
self
.
next
()
def
__iter__
(
self
):
return
self
def
next
(
self
):
self
.
token
=
self
.
generator
.
next
()
self
.
type
,
self
.
string
,
self
.
start
,
self
.
end
,
self
.
line
=
self
.
token
return
self
.
token
def
goto_line
(
self
,
lineno
):
while
self
.
start
[
0
]
<
lineno
:
self
.
next
()
return
token
def
rhs
(
self
,
lineno
):
"""
Return a whitespace-normalized expression string from the right-hand
side of an assignment at line `lineno`.
"""
self
.
goto_line
(
lineno
)
while
self
.
string
!=
'='
:
self
.
next
()
self
.
stack
=
None
while
self
.
type
!=
token
.
NEWLINE
and
self
.
string
!=
';'
:
if
self
.
string
==
'='
and
not
self
.
stack
:
self
.
tokens
=
[]
self
.
stack
=
[]
self
.
_type
=
None
self
.
_string
=
None
self
.
_backquote
=
0
else
:
self
.
note_token
()
self
.
next
()
self
.
next
()
text
=
''
.
join
(
self
.
tokens
)
return
text
.
strip
()
closers
=
{
')'
:
'('
,
']'
:
'['
,
'}'
:
'{'
}
openers
=
{
'('
:
1
,
'['
:
1
,
'{'
:
1
}
del_ws_prefix
=
{
'.'
:
1
,
'='
:
1
,
')'
:
1
,
']'
:
1
,
'}'
:
1
,
':'
:
1
,
','
:
1
}
no_ws_suffix
=
{
'.'
:
1
,
'='
:
1
,
'('
:
1
,
'['
:
1
,
'{'
:
1
}
def
note_token
(
self
):
if
self
.
type
==
tokenize
.
NL
:
return
del_ws
=
self
.
del_ws_prefix
.
has_key
(
self
.
string
)
append_ws
=
not
self
.
no_ws_suffix
.
has_key
(
self
.
string
)
if
self
.
openers
.
has_key
(
self
.
string
):
self
.
stack
.
append
(
self
.
string
)
if
(
self
.
_type
==
token
.
NAME
or
self
.
closers
.
has_key
(
self
.
_string
)):
del_ws
=
1
elif
self
.
closers
.
has_key
(
self
.
string
):
assert
self
.
stack
[
-
1
]
==
self
.
closers
[
self
.
string
]
self
.
stack
.
pop
()
elif
self
.
string
==
'`'
:
if
self
.
_backquote
:
del_ws
=
1
assert
self
.
stack
[
-
1
]
==
'`'
self
.
stack
.
pop
()
else
:
append_ws
=
0
self
.
stack
.
append
(
'`'
)
self
.
_backquote
=
not
self
.
_backquote
if
del_ws
and
self
.
tokens
and
self
.
tokens
[
-
1
]
==
' '
:
del
self
.
tokens
[
-
1
]
self
.
tokens
.
append
(
self
.
string
)
self
.
_type
=
self
.
type
self
.
_string
=
self
.
string
if
append_ws
:
self
.
tokens
.
append
(
' '
)
def
function_parameters
(
self
,
lineno
):
"""
Return a dictionary mapping parameters to defaults
(whitespace-normalized strings).
"""
self
.
goto_line
(
lineno
)
while
self
.
string
!=
'def'
:
self
.
next
()
while
self
.
string
!=
'('
:
self
.
next
()
name
=
None
default
=
None
parameter_tuple
=
None
self
.
tokens
=
[]
parameters
=
{}
self
.
stack
=
[
self
.
string
]
self
.
next
()
while
1
:
if
len
(
self
.
stack
)
==
1
:
if
parameter_tuple
:
# Just encountered ")".
#print >>sys.stderr, 'parameter_tuple: %r' % self.tokens
name
=
''
.
join
(
self
.
tokens
).
strip
()
self
.
tokens
=
[]
parameter_tuple
=
None
if
self
.
string
in
(
')'
,
','
):
if
name
:
if
self
.
tokens
:
default_text
=
''
.
join
(
self
.
tokens
).
strip
()
else
:
default_text
=
None
parameters
[
name
]
=
default_text
self
.
tokens
=
[]
name
=
None
default
=
None
if
self
.
string
==
')'
:
break
elif
self
.
type
==
token
.
NAME
:
if
name
and
default
:
self
.
note_token
()
else
:
assert
name
is
None
,
(
'token=%r name=%r parameters=%r stack=%r'
%
(
self
.
token
,
name
,
parameters
,
self
.
stack
))
name
=
self
.
string
#print >>sys.stderr, 'name=%r' % name
elif
self
.
string
==
'='
:
assert
name
is
not
None
,
'token=%r'
%
(
self
.
token
,)
assert
default
is
None
,
'token=%r'
%
(
self
.
token
,)
assert
self
.
tokens
==
[],
'token=%r'
%
(
self
.
token
,)
default
=
1
self
.
_type
=
None
self
.
_string
=
None
self
.
_backquote
=
0
elif
name
:
self
.
note_token
()
elif
self
.
string
==
'('
:
parameter_tuple
=
1
self
.
_type
=
None
self
.
_string
=
None
self
.
_backquote
=
0
self
.
note_token
()
else
:
# ignore these tokens:
assert
(
self
.
string
in
(
'*'
,
'**'
,
'
\
n
'
)
or
self
.
type
==
tokenize
.
COMMENT
),
(
'token=%r'
%
(
self
.
token
,))
else
:
self
.
note_token
()
self
.
next
()
return
parameters
def
trim_docstring
(
text
):
"""
Trim indentation and blank lines from docstring text & return it.
See PEP 257.
"""
if
not
text
:
return
text
# Convert tabs to spaces (following the normal Python rules)
# and split into a list of lines:
lines
=
text
.
expandtabs
().
splitlines
()
# Determine minimum indentation (first line doesn't count):
indent
=
sys
.
maxint
for
line
in
lines
[
1
:]:
stripped
=
line
.
lstrip
()
if
stripped
:
indent
=
min
(
indent
,
len
(
line
)
-
len
(
stripped
))
# Remove indentation (first line is special):
trimmed
=
[
lines
[
0
].
strip
()]
if
indent
<
sys
.
maxint
:
for
line
in
lines
[
1
:]:
trimmed
.
append
(
line
[
indent
:].
rstrip
())
# Strip off trailing and leading blank lines:
while
trimmed
and
not
trimmed
[
-
1
]:
trimmed
.
pop
()
while
trimmed
and
not
trimmed
[
0
]:
trimmed
.
pop
(
0
)
# Return a single string:
return
'
\
n
'
.
join
(
trimmed
)
def
normalize_parameter_name
(
name
):
"""
Converts a tuple like ``('a', ('b', 'c'), 'd')`` into ``'(a, (b, c), d)'``
"""
if
type
(
name
)
is
TupleType
:
return
'(%s)'
%
', '
.
join
([
normalize_parameter_name
(
n
)
for
n
in
name
])
else
:
return
name
lib/python/docutils/readers/standalone.py
deleted
100644 → 0
View file @
e1142d2d
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:49:54 $
# Copyright: This module has been placed in the public domain.
"""
Standalone file Reader for the reStructuredText markup syntax.
"""
__docformat__
=
'reStructuredText'
import
sys
from
docutils
import
readers
from
docutils.transforms
import
frontmatter
,
references
from
docutils.parsers.rst
import
Parser
class
Reader
(
readers
.
Reader
):
supported
=
(
'standalone'
,)
"""Contexts this reader supports."""
document
=
None
"""A single document tree."""
settings_spec
=
(
'Standalone Reader'
,
None
,
((
'Disable the promotion of a lone top-level section title to '
'document title (and subsequent section title to document '
'subtitle promotion; enabled by default).'
,
[
'--no-doc-title'
],
{
'dest'
:
'doctitle_xform'
,
'action'
:
'store_false'
,
'default'
:
1
}),
(
'Disable the bibliographic field list transform (enabled by '
'default).'
,
[
'--no-doc-info'
],
{
'dest'
:
'docinfo_xform'
,
'action'
:
'store_false'
,
'default'
:
1
}),))
default_transforms
=
(
references
.
Substitutions
,
frontmatter
.
DocTitle
,
frontmatter
.
DocInfo
,
references
.
ChainedTargets
,
references
.
AnonymousHyperlinks
,
references
.
IndirectHyperlinks
,
references
.
Footnotes
,
references
.
ExternalTargets
,
references
.
InternalTargets
,)
lib/python/docutils/transforms/__init__.py
deleted
100644 → 0
View file @
e1142d2d
# Authors: David Goodger, Ueli Schlaepfer
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:50:00 $
# Copyright: This module has been placed in the public domain.
"""
This package contains modules for standard tree transforms available
to Docutils components. Tree transforms serve a variety of purposes:
- To tie up certain syntax-specific "loose ends" that remain after the
initial parsing of the input plaintext. These transforms are used to
supplement a limited syntax.
- To automate the internal linking of the document tree (hyperlink
references, footnote references, etc.).
- To extract useful information from the document tree. These
transforms may be used to construct (for example) indexes and tables
of contents.
Each transform is an optional step that a Docutils Reader may choose to
perform on the parsed document, depending on the input context. A Docutils
Reader may also perform Reader-specific transforms before or after performing
these standard transforms.
"""
__docformat__
=
'reStructuredText'
from
docutils
import
languages
,
ApplicationError
,
TransformSpec
class
TransformError
(
ApplicationError
):
pass
class
Transform
:
"""
Docutils transform component abstract base class.
"""
default_priority
=
None
"""Numerical priority of this transform, 0 through 999 (override)."""
def
__init__
(
self
,
document
,
startnode
=
None
):
"""
Initial setup for in-place document transforms.
"""
self
.
document
=
document
"""The document tree to transform."""
self
.
startnode
=
startnode
"""Node from which to begin the transform. For many transforms which
apply to the document as a whole, `startnode` is not set (i.e. its
value is `None`)."""
self
.
language
=
languages
.
get_language
(
document
.
settings
.
language_code
)
"""Language module local to this document."""
def
apply
(
self
):
"""Override to apply the transform to the document tree."""
raise
NotImplementedError
(
'subclass must override this method'
)
class
Transformer
(
TransformSpec
):
"""
Stores transforms (`Transform` classes) and applies them to document
trees. Also keeps track of components by component type name.
"""
from
docutils.transforms
import
universal
default_transforms
=
(
universal
.
Decorations
,
universal
.
FinalChecks
,
universal
.
Messages
)
"""These transforms are applied to all document trees."""
def
__init__
(
self
,
document
):
self
.
transforms
=
[]
"""List of transforms to apply. Each item is a 3-tuple:
``(priority string, transform class, pending node or None)``."""
self
.
document
=
document
"""The `nodes.document` object this Transformer is attached to."""
self
.
applied
=
[]
"""Transforms already applied, in order."""
self
.
sorted
=
0
"""Boolean: is `self.tranforms` sorted?"""
self
.
components
=
{}
"""Mapping of component type name to component object. Set by
`self.populate_from_components()`."""
self
.
serialno
=
0
"""Internal serial number to keep track of the add order of
transforms."""
def
add_transform
(
self
,
transform_class
,
priority
=
None
):
"""
Store a single transform. Use `priority` to override the default.
"""
if
priority
is
None
:
priority
=
transform_class
.
default_priority
priority_string
=
self
.
get_priority_string
(
priority
)
self
.
transforms
.
append
((
priority_string
,
transform_class
,
None
))
self
.
sorted
=
0
def
add_transforms
(
self
,
transform_list
):
"""Store multiple transforms, with default priorities."""
for
transform_class
in
transform_list
:
priority_string
=
self
.
get_priority_string
(
transform_class
.
default_priority
)
self
.
transforms
.
append
((
priority_string
,
transform_class
,
None
))
self
.
sorted
=
0
def
add_pending
(
self
,
pending
,
priority
=
None
):
"""Store a transform with an associated `pending` node."""
transform_class
=
pending
.
transform
if
priority
is
None
:
priority
=
transform_class
.
default_priority
priority_string
=
self
.
get_priority_string
(
priority
)
self
.
transforms
.
append
((
priority_string
,
transform_class
,
pending
))
self
.
sorted
=
0
def
get_priority_string
(
self
,
priority
):
"""
Return a string, `priority` combined with `self.serialno`.
This ensures FIFO order on transforms with identical priority.
"""
self
.
serialno
+=
1
return
'%03d-%03d'
%
(
priority
,
self
.
serialno
)
def
populate_from_components
(
self
,
components
):
"""
Store each component's default transforms, with default priorities.
Also, store components by type name in a mapping for later lookup.
"""
self
.
add_transforms
(
self
.
default_transforms
)
for
component
in
components
:
if
component
is
None
:
continue
self
.
add_transforms
(
component
.
default_transforms
)
self
.
components
[
component
.
component_type
]
=
component
self
.
sorted
=
0
def
apply_transforms
(
self
):
"""Apply all of the stored transforms, in priority order."""
self
.
document
.
reporter
.
attach_observer
(
self
.
document
.
note_transform_message
)
while
self
.
transforms
:
if
not
self
.
sorted
:
# Unsorted initially, and whenever a transform is added.
self
.
transforms
.
sort
()
self
.
transforms
.
reverse
()
self
.
sorted
=
1
priority
,
transform_class
,
pending
=
self
.
transforms
.
pop
()
transform
=
transform_class
(
self
.
document
,
startnode
=
pending
)
transform
.
apply
()
self
.
applied
.
append
((
priority
,
transform_class
,
pending
))
lib/python/docutils/transforms/components.py
deleted
100644 → 0
View file @
e1142d2d
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:50:00 $
# Copyright: This module has been placed in the public domain.
"""
Docutils component-related transforms.
"""
__docformat__
=
'reStructuredText'
import
sys
import
os
import
re
import
time
from
docutils
import
nodes
,
utils
from
docutils
import
ApplicationError
,
DataError
from
docutils.transforms
import
Transform
,
TransformError
class
Filter
(
Transform
):
"""
Include or exclude elements which depend on a specific Docutils component.
For use with `nodes.pending` elements. A "pending" element's dictionary
attribute ``details`` must contain the keys "component" and "format". The
value of ``details['component']`` must match the type name of the
component the elements depend on (e.g. "writer"). The value of
``details['format']`` is the name of a specific format or context of that
component (e.g. "html"). If the matching Docutils component supports that
format or context, the "pending" element is replaced by the contents of
``details['nodes']`` (a list of nodes); otherwise, the "pending" element
is removed.
For example, the reStructuredText "meta" directive creates a "pending"
element containing a "meta" element (in ``pending.details['nodes']``).
Only writers (``pending.details['component'] == 'writer'``) supporting the
"html" format (``pending.details['format'] == 'html'``) will include the
"meta" element; it will be deleted from the output of all other writers.
"""
default_priority
=
780
def
apply
(
self
):
pending
=
self
.
startnode
component_type
=
pending
.
details
[
'component'
]
# 'reader' or 'writer'
format
=
pending
.
details
[
'format'
]
component
=
self
.
document
.
transformer
.
components
[
component_type
]
if
component
.
supports
(
format
):
pending
.
parent
.
replace
(
pending
,
pending
.
details
[
'nodes'
])
else
:
pending
.
parent
.
remove
(
pending
)
lib/python/docutils/transforms/frontmatter.py
deleted
100644 → 0
View file @
e1142d2d
# Authors: David Goodger, Ueli Schlaepfer
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:50:00 $
# Copyright: This module has been placed in the public domain.
"""
Transforms related to the front matter of a document (information
found before the main text):
- `DocTitle`: Used to transform a lone top level section's title to
the document title, and promote a remaining lone top-level section's
title to the document subtitle.
- `DocInfo`: Used to transform a bibliographic field list into docinfo
elements.
"""
__docformat__
=
'reStructuredText'
import
re
from
docutils
import
nodes
,
utils
from
docutils.transforms
import
TransformError
,
Transform
class
DocTitle
(
Transform
):
"""
In reStructuredText_, there is no way to specify a document title
and subtitle explicitly. Instead, we can supply the document title
(and possibly the subtitle as well) implicitly, and use this
two-step transform to "raise" or "promote" the title(s) (and their
corresponding section contents) to the document level.
1. If the document contains a single top-level section as its
first non-comment element, the top-level section's title
becomes the document's title, and the top-level section's
contents become the document's immediate contents. The lone
top-level section header must be the first non-comment element
in the document.
For example, take this input text::
=================
Top-Level Title
=================
A paragraph.
Once parsed, it looks like this::
<document>
<section name="top-level title">
<title>
Top-Level Title
<paragraph>
A paragraph.
After running the DocTitle transform, we have::
<document name="top-level title">
<title>
Top-Level Title
<paragraph>
A paragraph.
2. If step 1 successfully determines the document title, we
continue by checking for a subtitle.
If the lone top-level section itself contains a single
second-level section as its first non-comment element, that
section's title is promoted to the document's subtitle, and
that section's contents become the document's immediate
contents. Given this input text::
=================
Top-Level Title
=================
Second-Level Title
~~~~~~~~~~~~~~~~~~
A paragraph.
After parsing and running the Section Promotion transform, the
result is::
<document name="top-level title">
<title>
Top-Level Title
<subtitle name="second-level title">
Second-Level Title
<paragraph>
A paragraph.
(Note that the implicit hyperlink target generated by the
"Second-Level Title" is preserved on the "subtitle" element
itself.)
Any comment elements occurring before the document title or
subtitle are accumulated and inserted as the first body elements
after the title(s).
"""
default_priority
=
320
def
apply
(
self
):
if
not
getattr
(
self
.
document
.
settings
,
'doctitle_xform'
,
1
):
return
if
self
.
promote_document_title
():
self
.
promote_document_subtitle
()
def
promote_document_title
(
self
):
section
,
index
=
self
.
candidate_index
()
if
index
is
None
:
return
None
document
=
self
.
document
# Transfer the section's attributes to the document element (at root):
document
.
attributes
.
update
(
section
.
attributes
)
document
[:]
=
(
section
[:
1
]
# section title
+
document
[:
index
]
# everything that was in the
# document before the section
+
section
[
1
:])
# everything that was in the section
return
1
def
promote_document_subtitle
(
self
):
subsection
,
index
=
self
.
candidate_index
()
if
index
is
None
:
return
None
subtitle
=
nodes
.
subtitle
()
# Transfer the subsection's attributes to the new subtitle:
subtitle
.
attributes
.
update
(
subsection
.
attributes
)
# Transfer the contents of the subsection's title to the subtitle:
subtitle
[:]
=
subsection
[
0
][:]
document
=
self
.
document
document
[:]
=
(
document
[:
1
]
# document title
+
[
subtitle
]
# everything that was before the section:
+
document
[
1
:
index
]
# everything that was in the subsection:
+
subsection
[
1
:])
return
1
def
candidate_index
(
self
):
"""
Find and return the promotion candidate and its index.
Return (None, None) if no valid candidate was found.
"""
document
=
self
.
document
index
=
document
.
first_child_not_matching_class
(
nodes
.
PreBibliographic
)
if
index
is
None
or
len
(
document
)
>
(
index
+
1
)
or
\
not
isinstance
(
document
[
index
],
nodes
.
section
):
return
None
,
None
else
:
return
document
[
index
],
index
class
DocInfo
(
Transform
):
"""
This transform is specific to the reStructuredText_ markup syntax;
see "Bibliographic Fields" in the `reStructuredText Markup
Specification`_ for a high-level description. This transform
should be run *after* the `DocTitle` transform.
Given a field list as the first non-comment element after the
document title and subtitle (if present), registered bibliographic
field names are transformed to the corresponding DTD elements,
becoming child elements of the "docinfo" element (except for a
dedication and/or an abstract, which become "topic" elements after
"docinfo").
For example, given this document fragment after parsing::
<document>
<title>
Document Title
<field_list>
<field>
<field_name>
Author
<field_body>
<paragraph>
A. Name
<field>
<field_name>
Status
<field_body>
<paragraph>
$RCSfile: frontmatter.py,v $
...
After running the bibliographic field list transform, the
resulting document tree would look like this::
<document>
<title>
Document Title
<docinfo>
<author>
A. Name
<status>
frontmatter.py
...
The "Status" field contained an expanded RCS keyword, which is
normally (but optionally) cleaned up by the transform. The sole
contents of the field body must be a paragraph containing an
expanded RCS keyword of the form "$keyword: expansion text $". Any
RCS keyword can be processed in any bibliographic field. The
dollar signs and leading RCS keyword name are removed. Extra
processing is done for the following RCS keywords:
- "RCSfile" expands to the name of the file in the RCS or CVS
repository, which is the name of the source file with a ",v"
suffix appended. The transform will remove the ",v" suffix.
- "Date" expands to the format "YYYY/MM/DD hh:mm:ss" (in the UTC
time zone). The RCS Keywords transform will extract just the
date itself and transform it to an ISO 8601 format date, as in
"2000-12-31".
(Since the source file for this text is itself stored under CVS,
we can't show an example of the "Date" RCS keyword because we
can't prevent any RCS keywords used in this explanation from
being expanded. Only the "RCSfile" keyword is stable; its
expansion text changes only if the file name changes.)
"""
default_priority
=
340
biblio_nodes
=
{
'author'
:
nodes
.
author
,
'authors'
:
nodes
.
authors
,
'organization'
:
nodes
.
organization
,
'address'
:
nodes
.
address
,
'contact'
:
nodes
.
contact
,
'version'
:
nodes
.
version
,
'revision'
:
nodes
.
revision
,
'status'
:
nodes
.
status
,
'date'
:
nodes
.
date
,
'copyright'
:
nodes
.
copyright
,
'dedication'
:
nodes
.
topic
,
'abstract'
:
nodes
.
topic
}
"""Canonical field name (lowcased) to node class name mapping for
bibliographic fields (field_list)."""
def
apply
(
self
):
if
not
getattr
(
self
.
document
.
settings
,
'docinfo_xform'
,
1
):
return
document
=
self
.
document
index
=
document
.
first_child_not_matching_class
(
nodes
.
PreBibliographic
)
if
index
is
None
:
return
candidate
=
document
[
index
]
if
isinstance
(
candidate
,
nodes
.
field_list
):
biblioindex
=
document
.
first_child_not_matching_class
(
nodes
.
Titular
)
nodelist
=
self
.
extract_bibliographic
(
candidate
)
del
document
[
index
]
# untransformed field list (candidate)
document
[
biblioindex
:
biblioindex
]
=
nodelist
return
def
extract_bibliographic
(
self
,
field_list
):
docinfo
=
nodes
.
docinfo
()
bibliofields
=
self
.
language
.
bibliographic_fields
labels
=
self
.
language
.
labels
topics
=
{
'dedication'
:
None
,
'abstract'
:
None
}
for
field
in
field_list
:
try
:
name
=
field
[
0
][
0
].
astext
()
normedname
=
nodes
.
fully_normalize_name
(
name
)
if
not
(
len
(
field
)
==
2
and
bibliofields
.
has_key
(
normedname
)
and
self
.
check_empty_biblio_field
(
field
,
name
)):
raise
TransformError
canonical
=
bibliofields
[
normedname
]
biblioclass
=
self
.
biblio_nodes
[
canonical
]
if
issubclass
(
biblioclass
,
nodes
.
TextElement
):
if
not
self
.
check_compound_biblio_field
(
field
,
name
):
raise
TransformError
utils
.
clean_rcs_keywords
(
field
[
1
][
0
],
self
.
rcs_keyword_substitutions
)
docinfo
.
append
(
biblioclass
(
''
,
''
,
*
field
[
1
][
0
]))
elif
issubclass
(
biblioclass
,
nodes
.
authors
):
self
.
extract_authors
(
field
,
name
,
docinfo
)
elif
issubclass
(
biblioclass
,
nodes
.
topic
):
if
topics
[
canonical
]:
field
[
-
1
]
+=
self
.
document
.
reporter
.
warning
(
'There can only be one "%s" field.'
%
name
,
base_node
=
field
)
raise
TransformError
title
=
nodes
.
title
(
name
,
labels
[
canonical
])
topics
[
canonical
]
=
biblioclass
(
''
,
title
,
CLASS
=
canonical
,
*
field
[
1
].
children
)
else
:
docinfo
.
append
(
biblioclass
(
''
,
*
field
[
1
].
children
))
except
TransformError
:
if
len
(
field
[
-
1
])
==
1
\
and
isinstance
(
field
[
-
1
][
0
],
nodes
.
paragraph
):
utils
.
clean_rcs_keywords
(
field
[
-
1
][
0
],
self
.
rcs_keyword_substitutions
)
docinfo
.
append
(
field
)
nodelist
=
[]
if
len
(
docinfo
)
!=
0
:
nodelist
.
append
(
docinfo
)
for
name
in
(
'dedication'
,
'abstract'
):
if
topics
[
name
]:
nodelist
.
append
(
topics
[
name
])
return
nodelist
def
check_empty_biblio_field
(
self
,
field
,
name
):
if
len
(
field
[
-
1
])
<
1
:
field
[
-
1
]
+=
self
.
document
.
reporter
.
warning
(
'Cannot extract empty bibliographic field "%s".'
%
name
,
base_node
=
field
)
return
None
return
1
def
check_compound_biblio_field
(
self
,
field
,
name
):
if
len
(
field
[
-
1
])
>
1
:
field
[
-
1
]
+=
self
.
document
.
reporter
.
warning
(
'Cannot extract compound bibliographic field "%s".'
%
name
,
base_node
=
field
)
return
None
if
not
isinstance
(
field
[
-
1
][
0
],
nodes
.
paragraph
):
field
[
-
1
]
+=
self
.
document
.
reporter
.
warning
(
'Cannot extract bibliographic field "%s" containing '
'anything other than a single paragraph.'
%
name
,
base_node
=
field
)
return
None
return
1
rcs_keyword_substitutions
=
[
(
re
.
compile
(
r'\
$
' r'
Date
:
(
\
d
\
d
\
d
\
d
)
/
(
\
d
\
d
)
/
(
\
d
\
d
)
[
\
d
:]
+
\
$$
',
re.IGNORECASE), r'
\
1
-
\
2
-
\
3
'),
(re.compile(r'
\
$
' r'
RCSfile
:
(.
+
),
v
\
$$
', re.IGNORECASE), r'
\
1
'),
(re.compile(r'
\
$
[
a
-
zA
-
Z
]
+
:
(.
+
)
\
$$
'), r'
\
1
'),]
def extract_authors(self, field, name, docinfo):
try:
if len(field[1]) == 1:
if isinstance(field[1][0], nodes.paragraph):
authors = self.authors_from_one_paragraph(field)
elif isinstance(field[1][0], nodes.bullet_list):
authors = self.authors_from_bullet_list(field)
else:
raise TransformError
else:
authors = self.authors_from_paragraphs(field)
authornodes = [nodes.author('', '', *author)
for author in authors if author]
if len(authornodes) > 1:
docinfo.append(nodes.authors('', *authornodes))
elif len(authornodes) == 1:
docinfo.append(authornodes[0])
else:
raise TransformError
except TransformError:
field[-1] += self.document.reporter.warning(
'
Bibliographic
field
"%s"
incompatible
with
extraction
:
'
'
it
must
contain
either
a
single
paragraph
(
with
authors
'
'
separated
by
one
of
"%s"
),
multiple
paragraphs
(
one
per
'
'
author
),
or
a
bullet
list
with
one
paragraph
(
one
author
)
'
'
per
item
.
'
% (name, ''.join(self.language.author_separators)),
base_node=field)
raise
def authors_from_one_paragraph(self, field):
text = field[1][0].astext().strip()
if not text:
raise TransformError
for authorsep in self.language.author_separators:
authornames = text.split(authorsep)
if len(authornames) > 1:
break
authornames = [author.strip() for author in authornames]
authors = [[nodes.Text(author)] for author in authornames if author]
return authors
def authors_from_bullet_list(self, field):
authors = []
for item in field[1][0]:
if len(item) != 1 or not isinstance(item[0], nodes.paragraph):
raise TransformError
authors.append(item[0].children)
if not authors:
raise TransformError
return authors
def authors_from_paragraphs(self, field):
for item in field[1]:
if not isinstance(item, nodes.paragraph):
raise TransformError
authors = [item.children for item in field[1]]
return authors
lib/python/docutils/transforms/misc.py
deleted
100644 → 0
View file @
e1142d2d
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:50:00 $
# Copyright: This module has been placed in the public domain.
"""
Miscellaneous transforms.
"""
__docformat__
=
'reStructuredText'
from
docutils
import
nodes
from
docutils.transforms
import
Transform
,
TransformError
class
CallBack
(
Transform
):
"""
Inserts a callback into a document. The callback is called when the
transform is applied, which is determined by its priority.
For use with `nodes.pending` elements. Requires a ``details['callback']``
entry, a bound method or function which takes one parameter: the pending
node. Other data can be stored in the ``details`` attribute or in the
object hosting the callback method.
"""
default_priority
=
990
def
apply
(
self
):
pending
=
self
.
startnode
pending
.
details
[
'callback'
](
pending
)
pending
.
parent
.
remove
(
pending
)
class
ClassAttribute
(
Transform
):
default_priority
=
210
def
apply
(
self
):
pending
=
self
.
startnode
class_value
=
pending
.
details
[
'class'
]
parent
=
pending
.
parent
child
=
pending
while
parent
:
for
index
in
range
(
parent
.
index
(
child
)
+
1
,
len
(
parent
)):
element
=
parent
[
index
]
if
isinstance
(
element
,
nodes
.
comment
):
continue
element
.
set_class
(
class_value
)
pending
.
parent
.
remove
(
pending
)
return
else
:
child
=
parent
parent
=
parent
.
parent
error
=
self
.
document
.
reporter
.
error
(
'No suitable element following "%s" directive'
%
pending
.
details
[
'directive'
],
nodes
.
literal_block
(
pending
.
rawsource
,
pending
.
rawsource
),
line
=
pending
.
line
)
pending
.
parent
.
replace
(
pending
,
error
)
lib/python/docutils/transforms/parts.py
deleted
100644 → 0
View file @
e1142d2d
# Authors: David Goodger, Ueli Schlaepfer, Dmitry Jemerov
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:50:00 $
# Copyright: This module has been placed in the public domain.
"""
Transforms related to document parts.
"""
__docformat__
=
'reStructuredText'
import
re
import
sys
from
docutils
import
nodes
,
utils
from
docutils.transforms
import
TransformError
,
Transform
class
SectNum
(
Transform
):
"""
Automatically assigns numbers to the titles of document sections.
It is possible to limit the maximum section level for which the numbers
are added. For those sections that are auto-numbered, the "autonum"
attribute is set, informing the contents table generator that a different
form of the TOC should be used.
"""
default_priority
=
710
"""Should be applied before `Contents`."""
def
apply
(
self
):
self
.
maxdepth
=
self
.
startnode
.
details
.
get
(
'depth'
,
sys
.
maxint
)
self
.
startnode
.
parent
.
remove
(
self
.
startnode
)
self
.
update_section_numbers
(
self
.
document
)
def
update_section_numbers
(
self
,
node
,
prefix
=
(),
depth
=
0
):
depth
+=
1
sectnum
=
1
for
child
in
node
:
if
isinstance
(
child
,
nodes
.
section
):
numbers
=
prefix
+
(
str
(
sectnum
),)
title
=
child
[
0
]
# Use for spacing:
generated
=
nodes
.
generated
(
''
,
'.'
.
join
(
numbers
)
+
u'
\
u00a0
'
*
3
,
CLASS
=
'sectnum'
)
title
.
insert
(
0
,
generated
)
title
[
'auto'
]
=
1
if
depth
<
self
.
maxdepth
:
self
.
update_section_numbers
(
child
,
numbers
,
depth
)
sectnum
+=
1
class
Contents
(
Transform
):
"""
This transform generates a table of contents from the entire document tree
or from a single branch. It locates "section" elements and builds them
into a nested bullet list, which is placed within a "topic". A title is
either explicitly specified, taken from the appropriate language module,
or omitted (local table of contents). The depth may be specified.
Two-way references between the table of contents and section titles are
generated (requires Writer support).
This transform requires a startnode, which which contains generation
options and provides the location for the generated table of contents (the
startnode is replaced by the table of contents "topic").
"""
default_priority
=
720
def
apply
(
self
):
topic
=
nodes
.
topic
(
CLASS
=
'contents'
)
details
=
self
.
startnode
.
details
if
details
.
has_key
(
'class'
):
topic
.
set_class
(
details
[
'class'
])
title
=
details
[
'title'
]
if
details
.
has_key
(
'local'
):
startnode
=
self
.
startnode
.
parent
# @@@ generate an error if the startnode (directive) not at
# section/document top-level? Drag it up until it is?
while
not
isinstance
(
startnode
,
nodes
.
Structural
):
startnode
=
startnode
.
parent
else
:
startnode
=
self
.
document
if
not
title
:
title
=
nodes
.
title
(
''
,
self
.
language
.
labels
[
'contents'
])
if
title
:
name
=
title
.
astext
()
topic
+=
title
else
:
name
=
self
.
language
.
labels
[
'contents'
]
name
=
nodes
.
fully_normalize_name
(
name
)
if
not
self
.
document
.
has_name
(
name
):
topic
[
'name'
]
=
name
self
.
document
.
note_implicit_target
(
topic
)
self
.
toc_id
=
topic
[
'id'
]
if
details
.
has_key
(
'backlinks'
):
self
.
backlinks
=
details
[
'backlinks'
]
else
:
self
.
backlinks
=
self
.
document
.
settings
.
toc_backlinks
contents
=
self
.
build_contents
(
startnode
)
if
len
(
contents
):
topic
+=
contents
self
.
startnode
.
parent
.
replace
(
self
.
startnode
,
topic
)
else
:
self
.
startnode
.
parent
.
remove
(
self
.
startnode
)
def
build_contents
(
self
,
node
,
level
=
0
):
level
+=
1
sections
=
[]
i
=
len
(
node
)
-
1
while
i
>=
0
and
isinstance
(
node
[
i
],
nodes
.
section
):
sections
.
append
(
node
[
i
])
i
-=
1
sections
.
reverse
()
entries
=
[]
autonum
=
0
depth
=
self
.
startnode
.
details
.
get
(
'depth'
,
sys
.
maxint
)
for
section
in
sections
:
title
=
section
[
0
]
auto
=
title
.
get
(
'auto'
)
# May be set by SectNum.
entrytext
=
self
.
copy_and_filter
(
title
)
reference
=
nodes
.
reference
(
''
,
''
,
refid
=
section
[
'id'
],
*
entrytext
)
ref_id
=
self
.
document
.
set_id
(
reference
)
entry
=
nodes
.
paragraph
(
''
,
''
,
reference
)
item
=
nodes
.
list_item
(
''
,
entry
)
if
self
.
backlinks
==
'entry'
:
title
[
'refid'
]
=
ref_id
elif
self
.
backlinks
==
'top'
:
title
[
'refid'
]
=
self
.
toc_id
if
level
<
depth
:
subsects
=
self
.
build_contents
(
section
,
level
)
item
+=
subsects
entries
.
append
(
item
)
if
entries
:
contents
=
nodes
.
bullet_list
(
''
,
*
entries
)
if
auto
:
contents
.
set_class
(
'auto-toc'
)
return
contents
else
:
return
[]
def
copy_and_filter
(
self
,
node
):
"""Return a copy of a title, with references, images, etc. removed."""
visitor
=
ContentsFilter
(
self
.
document
)
node
.
walkabout
(
visitor
)
return
visitor
.
get_entry_text
()
class
ContentsFilter
(
nodes
.
TreeCopyVisitor
):
def
get_entry_text
(
self
):
return
self
.
get_tree_copy
().
get_children
()
def
visit_citation_reference
(
self
,
node
):
raise
nodes
.
SkipNode
def
visit_footnote_reference
(
self
,
node
):
raise
nodes
.
SkipNode
def
visit_image
(
self
,
node
):
if
node
.
hasattr
(
'alt'
):
self
.
parent
.
append
(
nodes
.
Text
(
node
[
'alt'
]))
raise
nodes
.
SkipNode
def
ignore_node_but_process_children
(
self
,
node
):
raise
nodes
.
SkipDeparture
visit_interpreted
=
ignore_node_but_process_children
visit_problematic
=
ignore_node_but_process_children
visit_reference
=
ignore_node_but_process_children
visit_target
=
ignore_node_but_process_children
lib/python/docutils/transforms/peps.py
deleted
100644 → 0
View file @
e1142d2d
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:50:00 $
# Copyright: This module has been placed in the public domain.
"""
Transforms for PEP processing.
- `Headers`: Used to transform a PEP's initial RFC-2822 header. It remains a
field list, but some entries get processed.
- `Contents`: Auto-inserts a table of contents.
- `PEPZero`: Special processing for PEP 0.
"""
__docformat__
=
'reStructuredText'
import
sys
import
os
import
re
import
time
from
docutils
import
nodes
,
utils
from
docutils
import
ApplicationError
,
DataError
from
docutils.transforms
import
Transform
,
TransformError
from
docutils.transforms
import
parts
,
references
,
misc
class
Headers
(
Transform
):
"""
Process fields in a PEP's initial RFC-2822 header.
"""
default_priority
=
360
pep_url
=
'pep-%04d.html'
pep_cvs_url
=
(
'http://cvs.sourceforge.net/cgi-bin/viewcvs.cgi/python/'
'python/nondist/peps/pep-%04d.txt'
)
rcs_keyword_substitutions
=
(
(
re
.
compile
(
r'\
$
' r'
RCSfile
:
(.
+
),
v
\
$$
', re.IGNORECASE), r'
\
1
'),
(re.compile(r'
\
$
[
a
-
zA
-
Z
]
+
:
(.
+
)
\
$$
'), r'
\
1
'),)
def apply(self):
if not len(self.document):
# @@@ replace these DataErrors with proper system messages
raise DataError('
Document
tree
is
empty
.
')
header = self.document[0]
if not isinstance(header, nodes.field_list) or
\
header.get('
class
') != '
rfc2822
':
raise DataError('
Document
does
not
begin
with
an
RFC
-
2822
'
'
header
;
it
is
not
a
PEP
.
')
pep = None
for field in header:
if field[0].astext().lower() == '
pep
': # should be the first field
value = field[1].astext()
try:
pep = int(value)
cvs_url = self.pep_cvs_url % pep
except ValueError:
pep = value
cvs_url = None
msg = self.document.reporter.warning(
'"PEP"
header
must
contain
an
integer
;
"%s"
is
an
'
'
invalid
value
.
' % pep, base_node=field)
msgid = self.document.set_id(msg)
prb = nodes.problematic(value, value or '
(
none
)
',
refid=msgid)
prbid = self.document.set_id(prb)
msg.add_backref(prbid)
if len(field[1]):
field[1][0][:] = [prb]
else:
field[1] += nodes.paragraph('', '', prb)
break
if pep is None:
raise DataError('
Document
does
not
contain
an
RFC
-
2822
"PEP"
'
'
header
.
')
if pep == 0:
# Special processing for PEP 0.
pending = nodes.pending(PEPZero)
self.document.insert(1, pending)
self.document.note_pending(pending)
if len(header) < 2 or header[1][0].astext().lower() != '
title
':
raise DataError('
No
title
!
')
for field in header:
name = field[0].astext().lower()
body = field[1]
if len(body) > 1:
raise DataError('
PEP
header
field
body
contains
multiple
'
'
elements
:
\
n
%
s
' % field.pformat(level=1))
elif len(body) == 1:
if not isinstance(body[0], nodes.paragraph):
raise DataError('
PEP
header
field
body
may
only
contain
'
'
a
single
paragraph
:
\
n
%
s
'
% field.pformat(level=1))
elif name == '
last
-
modified
':
date = time.strftime(
'
%
d
-%
b
-%
Y
',
time.localtime(os.stat(self.document['
source
'])[8]))
if cvs_url:
body += nodes.paragraph(
'', '', nodes.reference('', date, refuri=cvs_url))
else:
# empty
continue
para = body[0]
if name == '
author
':
for node in para:
if isinstance(node, nodes.reference):
node.parent.replace(node, mask_email(node))
elif name == '
discussions
-
to
':
for node in para:
if isinstance(node, nodes.reference):
node.parent.replace(node, mask_email(node, pep))
elif name in ('
replaces
', '
replaced
-
by
', '
requires
'):
newbody = []
space = nodes.Text('
')
for refpep in re.split('
,
?
\
s
+
', body.astext()):
pepno = int(refpep)
newbody.append(nodes.reference(
refpep, refpep, refuri=self.pep_url % pepno))
newbody.append(space)
para[:] = newbody[:-1] # drop trailing space
elif name == '
last
-
modified
':
utils.clean_rcs_keywords(para, self.rcs_keyword_substitutions)
if cvs_url:
date = para.astext()
para[:] = [nodes.reference('', date, refuri=cvs_url)]
elif name == '
content
-
type
':
pep_type = para.astext()
uri = self.pep_url % 12
para[:] = [nodes.reference('', pep_type, refuri=uri)]
elif name == '
version
' and len(body):
utils.clean_rcs_keywords(para, self.rcs_keyword_substitutions)
class Contents(Transform):
"""
Insert a table of contents transform placeholder into the document after
the RFC 2822 header.
"""
default_priority = 380
def apply(self):
pending = nodes.pending(parts.Contents, {'
title
': None})
self.document.insert(1, pending)
self.document.note_pending(pending)
class TargetNotes(Transform):
"""
Locate the "References" section, insert a placeholder for an external
target footnote insertion transform at the end, and schedule the
transform to run immediately.
"""
default_priority = 520
def apply(self):
doc = self.document
i = len(doc) - 1
refsect = copyright = None
while i >= 0 and isinstance(doc[i], nodes.section):
title_words = doc[i][0].astext().lower().split()
if '
references
' in title_words:
refsect = doc[i]
break
elif '
copyright
' in title_words:
copyright = i
i -= 1
if not refsect:
refsect = nodes.section()
refsect += nodes.title('', '
References
')
doc.set_id(refsect)
if copyright:
# Put the new "References" section before "Copyright":
doc.insert(copyright, refsect)
else:
# Put the new "References" section at end of doc:
doc.append(refsect)
pending = nodes.pending(references.TargetNotes)
refsect.append(pending)
self.document.note_pending(pending, 0)
pending = nodes.pending(misc.CallBack,
details={'
callback
': self.cleanup_callback})
refsect.append(pending)
self.document.note_pending(pending, 1)
def cleanup_callback(self, pending):
"""
Remove an empty "References" section.
Called after the `references.TargetNotes` transform is complete.
"""
if len(pending.parent) == 2: # <title> and <pending>
pending.parent.parent.remove(pending.parent)
class PEPZero(Transform):
"""
Special processing for PEP 0.
"""
default_priority =760
def apply(self):
visitor = PEPZeroSpecial(self.document)
self.document.walk(visitor)
self.startnode.parent.remove(self.startnode)
class PEPZeroSpecial(nodes.SparseNodeVisitor):
"""
Perform the special processing needed by PEP 0:
- Mask email addresses.
- Link PEP numbers in the second column of 4-column tables to the PEPs
themselves.
"""
pep_url = Headers.pep_url
def unknown_visit(self, node):
pass
def visit_reference(self, node):
node.parent.replace(node, mask_email(node))
def visit_field_list(self, node):
if node.hasattr('
class
') and node['
class
'] == '
rfc2822
':
raise nodes.SkipNode
def visit_tgroup(self, node):
self.pep_table = node['
cols
'] == 4
self.entry = 0
def visit_colspec(self, node):
self.entry += 1
if self.pep_table and self.entry == 2:
node['
class
'] = '
num
'
def visit_row(self, node):
self.entry = 0
def visit_entry(self, node):
self.entry += 1
if self.pep_table and self.entry == 2 and len(node) == 1:
node['
class
'] = '
num
'
p = node[0]
if isinstance(p, nodes.paragraph) and len(p) == 1:
text = p.astext()
try:
pep = int(text)
ref = self.pep_url % pep
p[0] = nodes.reference(text, text, refuri=ref)
except ValueError:
pass
non_masked_addresses = ('
peps
@
python
.
org
',
'
python
-
list
@
python
.
org
',
'
python
-
dev
@
python
.
org
')
def mask_email(ref, pepno=None):
"""
Mask the email address in `ref` and return a replacement node.
`ref` is returned unchanged if it contains no email address.
For email addresses such as "user@host", mask the address as "user at
host" (text) to thwart simple email address harvesters (except for those
listed in `non_masked_addresses`). If a PEP number (`pepno`) is given,
return a reference including a default email subject.
"""
if ref.hasattr('
refuri
') and ref['
refuri
'].startswith('
mailto
:
'):
if ref['
refuri
'][8:] in non_masked_addresses:
replacement = ref[0]
else:
replacement_text = ref.astext().replace('
@
', '
&
#32;at ')
replacement
=
nodes
.
raw
(
''
,
replacement_text
,
format
=
'html'
)
if
pepno
is
None
:
return
replacement
else
:
ref
[
'refuri'
]
+=
'?subject=PEP%%20%s'
%
pepno
ref
[:]
=
[
replacement
]
return
ref
else
:
return
ref
lib/python/docutils/transforms/references.py
deleted
100644 → 0
View file @
e1142d2d
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:50:00 $
# Copyright: This module has been placed in the public domain.
"""
Transforms for resolving references.
"""
__docformat__
=
'reStructuredText'
import
sys
import
re
from
docutils
import
nodes
,
utils
from
docutils.transforms
import
TransformError
,
Transform
indices
=
xrange
(
sys
.
maxint
)
class
ChainedTargets
(
Transform
):
"""
Attributes "refuri" and "refname" are migrated from the final direct
target up the chain of contiguous adjacent internal targets, using
`ChainedTargetResolver`.
"""
default_priority
=
420
def
apply
(
self
):
visitor
=
ChainedTargetResolver
(
self
.
document
)
self
.
document
.
walk
(
visitor
)
class
ChainedTargetResolver
(
nodes
.
SparseNodeVisitor
):
"""
Copy reference attributes up the length of a hyperlink target chain.
"Chained targets" are multiple adjacent internal hyperlink targets which
"point to" an external or indirect target. After the transform, all
chained targets will effectively point to the same place.
Given the following ``document`` as input::
<document>
<target id="a" name="a">
<target id="b" name="b">
<target id="c" name="c" refuri="http://chained.external.targets">
<target id="d" name="d">
<paragraph>
I'm known as "d".
<target id="e" name="e">
<target id="id1">
<target id="f" name="f" refname="d">
``ChainedTargetResolver(document).walk()`` will transform the above into::
<document>
<target id="a" name="a" refuri="http://chained.external.targets">
<target id="b" name="b" refuri="http://chained.external.targets">
<target id="c" name="c" refuri="http://chained.external.targets">
<target id="d" name="d">
<paragraph>
I'm known as "d".
<target id="e" name="e" refname="d">
<target id="id1" refname="d">
<target id="f" name="f" refname="d">
"""
def
unknown_visit
(
self
,
node
):
pass
def
visit_target
(
self
,
node
):
if
node
.
hasattr
(
'refuri'
):
attname
=
'refuri'
call_if_named
=
self
.
document
.
note_external_target
elif
node
.
hasattr
(
'refname'
):
attname
=
'refname'
call_if_named
=
self
.
document
.
note_indirect_target
elif
node
.
hasattr
(
'refid'
):
attname
=
'refid'
call_if_named
=
None
else
:
return
attval
=
node
[
attname
]
index
=
node
.
parent
.
index
(
node
)
for
i
in
range
(
index
-
1
,
-
1
,
-
1
):
sibling
=
node
.
parent
[
i
]
if
not
isinstance
(
sibling
,
nodes
.
target
)
\
or
sibling
.
hasattr
(
'refuri'
)
\
or
sibling
.
hasattr
(
'refname'
)
\
or
sibling
.
hasattr
(
'refid'
):
break
sibling
[
attname
]
=
attval
if
sibling
.
hasattr
(
'name'
)
and
call_if_named
:
call_if_named
(
sibling
)
class
AnonymousHyperlinks
(
Transform
):
"""
Link anonymous references to targets. Given::
<paragraph>
<reference anonymous="1">
internal
<reference anonymous="1">
external
<target anonymous="1" id="id1">
<target anonymous="1" id="id2" refuri="http://external">
Corresponding references are linked via "refid" or resolved via "refuri"::
<paragraph>
<reference anonymous="1" refid="id1">
text
<reference anonymous="1" refuri="http://external">
external
<target anonymous="1" id="id1">
<target anonymous="1" id="id2" refuri="http://external">
"""
default_priority
=
440
def
apply
(
self
):
if
len
(
self
.
document
.
anonymous_refs
)
\
!=
len
(
self
.
document
.
anonymous_targets
):
msg
=
self
.
document
.
reporter
.
error
(
'Anonymous hyperlink mismatch: %s references but %s '
'targets.
\
n
See "backrefs" attribute for IDs.'
%
(
len
(
self
.
document
.
anonymous_refs
),
len
(
self
.
document
.
anonymous_targets
)))
msgid
=
self
.
document
.
set_id
(
msg
)
for
ref
in
self
.
document
.
anonymous_refs
:
prb
=
nodes
.
problematic
(
ref
.
rawsource
,
ref
.
rawsource
,
refid
=
msgid
)
prbid
=
self
.
document
.
set_id
(
prb
)
msg
.
add_backref
(
prbid
)
ref
.
parent
.
replace
(
ref
,
prb
)
return
for
ref
,
target
in
zip
(
self
.
document
.
anonymous_refs
,
self
.
document
.
anonymous_targets
):
if
target
.
hasattr
(
'refuri'
):
ref
[
'refuri'
]
=
target
[
'refuri'
]
ref
.
resolved
=
1
else
:
ref
[
'refid'
]
=
target
[
'id'
]
self
.
document
.
note_refid
(
ref
)
target
.
referenced
=
1
class
IndirectHyperlinks
(
Transform
):
"""
a) Indirect external references::
<paragraph>
<reference refname="indirect external">
indirect external
<target id="id1" name="direct external"
refuri="http://indirect">
<target id="id2" name="indirect external"
refname="direct external">
The "refuri" attribute is migrated back to all indirect targets
from the final direct target (i.e. a target not referring to
another indirect target)::
<paragraph>
<reference refname="indirect external">
indirect external
<target id="id1" name="direct external"
refuri="http://indirect">
<target id="id2" name="indirect external"
refuri="http://indirect">
Once the attribute is migrated, the preexisting "refname" attribute
is dropped.
b) Indirect internal references::
<target id="id1" name="final target">
<paragraph>
<reference refname="indirect internal">
indirect internal
<target id="id2" name="indirect internal 2"
refname="final target">
<target id="id3" name="indirect internal"
refname="indirect internal 2">
Targets which indirectly refer to an internal target become one-hop
indirect (their "refid" attributes are directly set to the internal
target's "id"). References which indirectly refer to an internal
target become direct internal references::
<target id="id1" name="final target">
<paragraph>
<reference refid="id1">
indirect internal
<target id="id2" name="indirect internal 2" refid="id1">
<target id="id3" name="indirect internal" refid="id1">
"""
default_priority
=
460
def
apply
(
self
):
for
target
in
self
.
document
.
indirect_targets
:
if
not
target
.
resolved
:
self
.
resolve_indirect_target
(
target
)
self
.
resolve_indirect_references
(
target
)
def
resolve_indirect_target
(
self
,
target
):
refname
=
target
[
'refname'
]
reftarget_id
=
self
.
document
.
nameids
.
get
(
refname
)
if
not
reftarget_id
:
self
.
nonexistent_indirect_target
(
target
)
return
reftarget
=
self
.
document
.
ids
[
reftarget_id
]
if
isinstance
(
reftarget
,
nodes
.
target
)
\
and
not
reftarget
.
resolved
and
reftarget
.
hasattr
(
'refname'
):
if
hasattr
(
target
,
'multiply_indirect'
):
#and target.multiply_indirect):
#del target.multiply_indirect
self
.
circular_indirect_reference
(
target
)
return
target
.
multiply_indirect
=
1
self
.
resolve_indirect_target
(
reftarget
)
# multiply indirect
del
target
.
multiply_indirect
if
reftarget
.
hasattr
(
'refuri'
):
target
[
'refuri'
]
=
reftarget
[
'refuri'
]
if
target
.
hasattr
(
'name'
):
self
.
document
.
note_external_target
(
target
)
elif
reftarget
.
hasattr
(
'refid'
):
target
[
'refid'
]
=
reftarget
[
'refid'
]
self
.
document
.
note_refid
(
target
)
else
:
try
:
target
[
'refid'
]
=
reftarget
[
'id'
]
self
.
document
.
note_refid
(
target
)
except
KeyError
:
self
.
nonexistent_indirect_target
(
target
)
return
del
target
[
'refname'
]
target
.
resolved
=
1
reftarget
.
referenced
=
1
def
nonexistent_indirect_target
(
self
,
target
):
self
.
indirect_target_error
(
target
,
'which does not exist'
)
def
circular_indirect_reference
(
self
,
target
):
self
.
indirect_target_error
(
target
,
'forming a circular reference'
)
def
indirect_target_error
(
self
,
target
,
explanation
):
naming
=
''
if
target
.
hasattr
(
'name'
):
naming
=
'"%s" '
%
target
[
'name'
]
reflist
=
self
.
document
.
refnames
.
get
(
target
[
'name'
],
[])
else
:
reflist
=
self
.
document
.
refids
.
get
(
target
[
'id'
],
[])
naming
+=
'(id="%s")'
%
target
[
'id'
]
msg
=
self
.
document
.
reporter
.
error
(
'Indirect hyperlink target %s refers to target "%s", %s.'
%
(
naming
,
target
[
'refname'
],
explanation
),
base_node
=
target
)
msgid
=
self
.
document
.
set_id
(
msg
)
for
ref
in
reflist
:
prb
=
nodes
.
problematic
(
ref
.
rawsource
,
ref
.
rawsource
,
refid
=
msgid
)
prbid
=
self
.
document
.
set_id
(
prb
)
msg
.
add_backref
(
prbid
)
ref
.
parent
.
replace
(
ref
,
prb
)
target
.
resolved
=
1
def
resolve_indirect_references
(
self
,
target
):
if
target
.
hasattr
(
'refid'
):
attname
=
'refid'
call_if_named
=
0
call_method
=
self
.
document
.
note_refid
elif
target
.
hasattr
(
'refuri'
):
attname
=
'refuri'
call_if_named
=
1
call_method
=
self
.
document
.
note_external_target
else
:
return
attval
=
target
[
attname
]
if
target
.
hasattr
(
'name'
):
name
=
target
[
'name'
]
try
:
reflist
=
self
.
document
.
refnames
[
name
]
except
KeyError
,
instance
:
if
target
.
referenced
:
return
msg
=
self
.
document
.
reporter
.
info
(
'Indirect hyperlink target "%s" is not referenced.'
%
name
,
base_node
=
target
)
target
.
referenced
=
1
return
delatt
=
'refname'
else
:
id
=
target
[
'id'
]
try
:
reflist
=
self
.
document
.
refids
[
id
]
except
KeyError
,
instance
:
if
target
.
referenced
:
return
msg
=
self
.
document
.
reporter
.
info
(
'Indirect hyperlink target id="%s" is not referenced.'
%
id
,
base_node
=
target
)
target
.
referenced
=
1
return
delatt
=
'refid'
for
ref
in
reflist
:
if
ref
.
resolved
:
continue
del
ref
[
delatt
]
ref
[
attname
]
=
attval
if
not
call_if_named
or
ref
.
hasattr
(
'name'
):
call_method
(
ref
)
ref
.
resolved
=
1
if
isinstance
(
ref
,
nodes
.
target
):
self
.
resolve_indirect_references
(
ref
)
target
.
referenced
=
1
class
ExternalTargets
(
Transform
):
"""
Given::
<paragraph>
<reference refname="direct external">
direct external
<target id="id1" name="direct external" refuri="http://direct">
The "refname" attribute is replaced by the direct "refuri" attribute::
<paragraph>
<reference refuri="http://direct">
direct external
<target id="id1" name="direct external" refuri="http://direct">
"""
default_priority
=
640
def
apply
(
self
):
for
target
in
self
.
document
.
external_targets
:
if
target
.
hasattr
(
'refuri'
)
and
target
.
hasattr
(
'name'
):
name
=
target
[
'name'
]
refuri
=
target
[
'refuri'
]
try
:
reflist
=
self
.
document
.
refnames
[
name
]
except
KeyError
,
instance
:
if
target
.
referenced
:
continue
msg
=
self
.
document
.
reporter
.
info
(
'External hyperlink target "%s" is not referenced.'
%
name
,
base_node
=
target
)
target
.
referenced
=
1
continue
for
ref
in
reflist
:
if
ref
.
resolved
:
continue
del
ref
[
'refname'
]
ref
[
'refuri'
]
=
refuri
ref
.
resolved
=
1
target
.
referenced
=
1
class
InternalTargets
(
Transform
):
"""
Given::
<paragraph>
<reference refname="direct internal">
direct internal
<target id="id1" name="direct internal">
The "refname" attribute is replaced by "refid" linking to the target's
"id"::
<paragraph>
<reference refid="id1">
direct internal
<target id="id1" name="direct internal">
"""
default_priority
=
660
def
apply
(
self
):
for
target
in
self
.
document
.
internal_targets
:
if
target
.
hasattr
(
'refuri'
)
or
target
.
hasattr
(
'refid'
)
\
or
not
target
.
hasattr
(
'name'
):
continue
name
=
target
[
'name'
]
refid
=
target
[
'id'
]
try
:
reflist
=
self
.
document
.
refnames
[
name
]
except
KeyError
,
instance
:
if
target
.
referenced
:
continue
msg
=
self
.
document
.
reporter
.
info
(
'Internal hyperlink target "%s" is not referenced.'
%
name
,
base_node
=
target
)
target
.
referenced
=
1
continue
for
ref
in
reflist
:
if
ref
.
resolved
:
continue
del
ref
[
'refname'
]
ref
[
'refid'
]
=
refid
ref
.
resolved
=
1
target
.
referenced
=
1
class
Footnotes
(
Transform
):
"""
Assign numbers to autonumbered footnotes, and resolve links to footnotes,
citations, and their references.
Given the following ``document`` as input::
<document>
<paragraph>
A labeled autonumbered footnote referece:
<footnote_reference auto="1" id="id1" refname="footnote">
<paragraph>
An unlabeled autonumbered footnote referece:
<footnote_reference auto="1" id="id2">
<footnote auto="1" id="id3">
<paragraph>
Unlabeled autonumbered footnote.
<footnote auto="1" id="footnote" name="footnote">
<paragraph>
Labeled autonumbered footnote.
Auto-numbered footnotes have attribute ``auto="1"`` and no label.
Auto-numbered footnote_references have no reference text (they're
empty elements). When resolving the numbering, a ``label`` element
is added to the beginning of the ``footnote``, and reference text
to the ``footnote_reference``.
The transformed result will be::
<document>
<paragraph>
A labeled autonumbered footnote referece:
<footnote_reference auto="1" id="id1" refid="footnote">
2
<paragraph>
An unlabeled autonumbered footnote referece:
<footnote_reference auto="1" id="id2" refid="id3">
1
<footnote auto="1" id="id3" backrefs="id2">
<label>
1
<paragraph>
Unlabeled autonumbered footnote.
<footnote auto="1" id="footnote" name="footnote" backrefs="id1">
<label>
2
<paragraph>
Labeled autonumbered footnote.
Note that the footnotes are not in the same order as the references.
The labels and reference text are added to the auto-numbered ``footnote``
and ``footnote_reference`` elements. Footnote elements are backlinked to
their references via "refids" attributes. References are assigned "id"
and "refid" attributes.
After adding labels and reference text, the "auto" attributes can be
ignored.
"""
default_priority
=
620
autofootnote_labels
=
None
"""Keep track of unlabeled autonumbered footnotes."""
symbols
=
[
# Entries 1-4 and 6 below are from section 12.51 of
# The Chicago Manual of Style, 14th edition.
'*'
,
# asterisk/star
u'
\
u2020
'
,
# dagger †
u'
\
u2021
'
,
# double dagger ‡
u'
\
u00A7
'
,
# section mark §
u'
\
u00B6
'
,
# paragraph mark (pilcrow) ¶
# (parallels ['||'] in CMoS)
'#'
,
# number sign
# The entries below were chosen arbitrarily.
u'
\
u2660
'
,
# spade suit ♠
u'
\
u2665
'
,
# heart suit ♥
u'
\
u2666
'
,
# diamond suit ♦
u'
\
u2663
'
,
# club suit ♣
]
def
apply
(
self
):
self
.
autofootnote_labels
=
[]
startnum
=
self
.
document
.
autofootnote_start
self
.
document
.
autofootnote_start
=
self
.
number_footnotes
(
startnum
)
self
.
number_footnote_references
(
startnum
)
self
.
symbolize_footnotes
()
self
.
resolve_footnotes_and_citations
()
def
number_footnotes
(
self
,
startnum
):
"""
Assign numbers to autonumbered footnotes.
For labeled autonumbered footnotes, copy the number over to
corresponding footnote references.
"""
for
footnote
in
self
.
document
.
autofootnotes
:
while
1
:
label
=
str
(
startnum
)
startnum
+=
1
if
not
self
.
document
.
nameids
.
has_key
(
label
):
break
footnote
.
insert
(
0
,
nodes
.
label
(
''
,
label
))
if
footnote
.
hasattr
(
'dupname'
):
continue
if
footnote
.
hasattr
(
'name'
):
name
=
footnote
[
'name'
]
for
ref
in
self
.
document
.
footnote_refs
.
get
(
name
,
[]):
ref
+=
nodes
.
Text
(
label
)
ref
.
delattr
(
'refname'
)
ref
[
'refid'
]
=
footnote
[
'id'
]
footnote
.
add_backref
(
ref
[
'id'
])
self
.
document
.
note_refid
(
ref
)
ref
.
resolved
=
1
else
:
footnote
[
'name'
]
=
label
self
.
document
.
note_explicit_target
(
footnote
,
footnote
)
self
.
autofootnote_labels
.
append
(
label
)
return
startnum
def
number_footnote_references
(
self
,
startnum
):
"""Assign numbers to autonumbered footnote references."""
i
=
0
for
ref
in
self
.
document
.
autofootnote_refs
:
if
ref
.
resolved
or
ref
.
hasattr
(
'refid'
):
continue
try
:
label
=
self
.
autofootnote_labels
[
i
]
except
IndexError
:
msg
=
self
.
document
.
reporter
.
error
(
'Too many autonumbered footnote references: only %s '
'corresponding footnotes available.'
%
len
(
self
.
autofootnote_labels
),
base_node
=
ref
)
msgid
=
self
.
document
.
set_id
(
msg
)
for
ref
in
self
.
document
.
autofootnote_refs
[
i
:]:
if
ref
.
resolved
or
ref
.
hasattr
(
'refname'
):
continue
prb
=
nodes
.
problematic
(
ref
.
rawsource
,
ref
.
rawsource
,
refid
=
msgid
)
prbid
=
self
.
document
.
set_id
(
prb
)
msg
.
add_backref
(
prbid
)
ref
.
parent
.
replace
(
ref
,
prb
)
break
ref
+=
nodes
.
Text
(
label
)
id
=
self
.
document
.
nameids
[
label
]
footnote
=
self
.
document
.
ids
[
id
]
ref
[
'refid'
]
=
id
self
.
document
.
note_refid
(
ref
)
footnote
.
add_backref
(
ref
[
'id'
])
ref
.
resolved
=
1
i
+=
1
def
symbolize_footnotes
(
self
):
"""Add symbols indexes to "[*]"-style footnotes and references."""
labels
=
[]
for
footnote
in
self
.
document
.
symbol_footnotes
:
reps
,
index
=
divmod
(
self
.
document
.
symbol_footnote_start
,
len
(
self
.
symbols
))
labeltext
=
self
.
symbols
[
index
]
*
(
reps
+
1
)
labels
.
append
(
labeltext
)
footnote
.
insert
(
0
,
nodes
.
label
(
''
,
labeltext
))
self
.
document
.
symbol_footnote_start
+=
1
self
.
document
.
set_id
(
footnote
)
i
=
0
for
ref
in
self
.
document
.
symbol_footnote_refs
:
try
:
ref
+=
nodes
.
Text
(
labels
[
i
])
except
IndexError
:
msg
=
self
.
document
.
reporter
.
error
(
'Too many symbol footnote references: only %s '
'corresponding footnotes available.'
%
len
(
labels
),
base_node
=
ref
)
msgid
=
self
.
document
.
set_id
(
msg
)
for
ref
in
self
.
document
.
symbol_footnote_refs
[
i
:]:
if
ref
.
resolved
or
ref
.
hasattr
(
'refid'
):
continue
prb
=
nodes
.
problematic
(
ref
.
rawsource
,
ref
.
rawsource
,
refid
=
msgid
)
prbid
=
self
.
document
.
set_id
(
prb
)
msg
.
add_backref
(
prbid
)
ref
.
parent
.
replace
(
ref
,
prb
)
break
footnote
=
self
.
document
.
symbol_footnotes
[
i
]
ref
[
'refid'
]
=
footnote
[
'id'
]
self
.
document
.
note_refid
(
ref
)
footnote
.
add_backref
(
ref
[
'id'
])
i
+=
1
def
resolve_footnotes_and_citations
(
self
):
"""
Link manually-labeled footnotes and citations to/from their
references.
"""
for
footnote
in
self
.
document
.
footnotes
:
label
=
footnote
[
'name'
]
if
self
.
document
.
footnote_refs
.
has_key
(
label
):
reflist
=
self
.
document
.
footnote_refs
[
label
]
self
.
resolve_references
(
footnote
,
reflist
)
for
citation
in
self
.
document
.
citations
:
label
=
citation
[
'name'
]
if
self
.
document
.
citation_refs
.
has_key
(
label
):
reflist
=
self
.
document
.
citation_refs
[
label
]
self
.
resolve_references
(
citation
,
reflist
)
def
resolve_references
(
self
,
note
,
reflist
):
id
=
note
[
'id'
]
for
ref
in
reflist
:
if
ref
.
resolved
:
continue
ref
.
delattr
(
'refname'
)
ref
[
'refid'
]
=
id
note
.
add_backref
(
ref
[
'id'
])
ref
.
resolved
=
1
note
.
resolved
=
1
class
Substitutions
(
Transform
):
"""
Given the following ``document`` as input::
<document>
<paragraph>
The
<substitution_reference refname="biohazard">
biohazard
symbol is deservedly scary-looking.
<substitution_definition name="biohazard">
<image alt="biohazard" uri="biohazard.png">
The ``substitution_reference`` will simply be replaced by the
contents of the corresponding ``substitution_definition``.
The transformed result will be::
<document>
<paragraph>
The
<image alt="biohazard" uri="biohazard.png">
symbol is deservedly scary-looking.
<substitution_definition name="biohazard">
<image alt="biohazard" uri="biohazard.png">
"""
default_priority
=
220
"""The Substitutions transform has to be applied very early, before
`docutils.tranforms.frontmatter.DocTitle` and others."""
def
apply
(
self
):
defs
=
self
.
document
.
substitution_defs
normed
=
self
.
document
.
substitution_names
for
refname
,
refs
in
self
.
document
.
substitution_refs
.
items
():
for
ref
in
refs
:
key
=
None
if
defs
.
has_key
(
refname
):
key
=
refname
else
:
normed_name
=
refname
.
lower
()
if
normed
.
has_key
(
normed_name
):
key
=
normed
[
normed_name
]
if
key
is
None
:
msg
=
self
.
document
.
reporter
.
error
(
'Undefined substitution referenced: "%s".'
%
refname
,
base_node
=
ref
)
msgid
=
self
.
document
.
set_id
(
msg
)
prb
=
nodes
.
problematic
(
ref
.
rawsource
,
ref
.
rawsource
,
refid
=
msgid
)
prbid
=
self
.
document
.
set_id
(
prb
)
msg
.
add_backref
(
prbid
)
ref
.
parent
.
replace
(
ref
,
prb
)
else
:
ref
.
parent
.
replace
(
ref
,
defs
[
key
].
get_children
())
self
.
document
.
substitution_refs
=
None
# release replaced references
class
TargetNotes
(
Transform
):
"""
Creates a footnote for each external target in the text, and corresponding
footnote references after each reference.
"""
default_priority
=
540
"""The TargetNotes transform has to be applied after `IndirectHyperlinks`
but before `Footnotes`."""
def
apply
(
self
):
notes
=
{}
nodelist
=
[]
for
target
in
self
.
document
.
external_targets
:
name
=
target
.
get
(
'name'
)
if
not
name
:
print
>>
sys
.
stderr
,
'no name on target: %r'
%
target
continue
refs
=
self
.
document
.
refnames
.
get
(
name
,
[])
if
not
refs
:
continue
footnote
=
self
.
make_target_footnote
(
target
,
refs
,
notes
)
if
not
notes
.
has_key
(
target
[
'refuri'
]):
notes
[
target
[
'refuri'
]]
=
footnote
nodelist
.
append
(
footnote
)
if
len
(
self
.
document
.
anonymous_targets
)
\
==
len
(
self
.
document
.
anonymous_refs
):
for
target
,
ref
in
zip
(
self
.
document
.
anonymous_targets
,
self
.
document
.
anonymous_refs
):
if
target
.
hasattr
(
'refuri'
):
footnote
=
self
.
make_target_footnote
(
target
,
[
ref
],
notes
)
if
not
notes
.
has_key
(
target
[
'refuri'
]):
notes
[
target
[
'refuri'
]]
=
footnote
nodelist
.
append
(
footnote
)
self
.
startnode
.
parent
.
replace
(
self
.
startnode
,
nodelist
)
def
make_target_footnote
(
self
,
target
,
refs
,
notes
):
refuri
=
target
[
'refuri'
]
if
notes
.
has_key
(
refuri
):
# duplicate?
footnote
=
notes
[
refuri
]
footnote_name
=
footnote
[
'name'
]
else
:
# original
footnote
=
nodes
.
footnote
()
footnote_id
=
self
.
document
.
set_id
(
footnote
)
# Use a colon; they can't be produced inside names by the parser:
footnote_name
=
'target_note: '
+
footnote_id
footnote
[
'auto'
]
=
1
footnote
[
'name'
]
=
footnote_name
footnote_paragraph
=
nodes
.
paragraph
()
footnote_paragraph
+=
nodes
.
reference
(
''
,
refuri
,
refuri
=
refuri
)
footnote
+=
footnote_paragraph
self
.
document
.
note_autofootnote
(
footnote
)
self
.
document
.
note_explicit_target
(
footnote
,
footnote
)
for
ref
in
refs
:
if
isinstance
(
ref
,
nodes
.
target
):
continue
refnode
=
nodes
.
footnote_reference
(
refname
=
footnote_name
,
auto
=
1
)
self
.
document
.
note_autofootnote_ref
(
refnode
)
self
.
document
.
note_footnote_ref
(
refnode
)
index
=
ref
.
parent
.
index
(
ref
)
+
1
reflist
=
[
refnode
]
if
not
self
.
document
.
settings
.
trim_footnote_reference_space
:
reflist
.
insert
(
0
,
nodes
.
Text
(
' '
))
ref
.
parent
.
insert
(
index
,
reflist
)
return
footnote
lib/python/docutils/transforms/universal.py
deleted
100644 → 0
View file @
e1142d2d
# Authors: David Goodger, Ueli Schlaepfer
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:50:00 $
# Copyright: This module has been placed in the public domain.
"""
Transforms needed by most or all documents:
- `Decorations`: Generate a document's header & footer.
- `Messages`: Placement of system messages stored in
`nodes.document.transform_messages`.
- `TestMessages`: Like `Messages`, used on test runs.
- `FinalReferences`: Resolve remaining references.
"""
__docformat__
=
'reStructuredText'
import
re
import
sys
import
time
from
docutils
import
nodes
,
utils
from
docutils.transforms
import
TransformError
,
Transform
class
Decorations
(
Transform
):
"""
Populate a document's decoration element (header, footer).
"""
default_priority
=
820
def
apply
(
self
):
header
=
self
.
generate_header
()
footer
=
self
.
generate_footer
()
if
header
or
footer
:
decoration
=
nodes
.
decoration
()
decoration
+=
header
decoration
+=
footer
document
=
self
.
document
index
=
document
.
first_child_not_matching_class
(
nodes
.
PreDecorative
)
if
index
is
None
:
document
+=
decoration
else
:
document
[
index
:
index
]
=
[
decoration
]
def
generate_header
(
self
):
return
None
def
generate_footer
(
self
):
# @@@ Text is hard-coded for now.
# Should be made dynamic (language-dependent).
settings
=
self
.
document
.
settings
if
settings
.
generator
or
settings
.
datestamp
or
settings
.
source_link
\
or
settings
.
source_url
:
text
=
[]
if
settings
.
source_link
and
settings
.
_source
\
or
settings
.
source_url
:
if
settings
.
source_url
:
source
=
settings
.
source_url
else
:
source
=
utils
.
relative_path
(
settings
.
_destination
,
settings
.
_source
)
text
.
extend
([
nodes
.
reference
(
''
,
'View document source'
,
refuri
=
source
),
nodes
.
Text
(
'.
\
n
'
)])
if
settings
.
datestamp
:
datestamp
=
time
.
strftime
(
settings
.
datestamp
,
time
.
gmtime
())
text
.
append
(
nodes
.
Text
(
'Generated on: '
+
datestamp
+
'.
\
n
'
))
if
settings
.
generator
:
text
.
extend
([
nodes
.
Text
(
'Generated by '
),
nodes
.
reference
(
''
,
'Docutils'
,
refuri
=
'http://docutils.sourceforge.net/'
),
nodes
.
Text
(
' from '
),
nodes
.
reference
(
''
,
'reStructuredText'
,
refuri
=
'http://'
'docutils.sourceforge.net/rst.html'
),
nodes
.
Text
(
' source.
\
n
'
)])
footer
=
nodes
.
footer
()
footer
+=
nodes
.
paragraph
(
''
,
''
,
*
text
)
return
footer
else
:
return
None
class
Messages
(
Transform
):
"""
Place any system messages generated after parsing into a dedicated section
of the document.
"""
default_priority
=
860
def
apply
(
self
):
unfiltered
=
self
.
document
.
transform_messages
threshold
=
self
.
document
.
reporter
[
'writer'
].
report_level
messages
=
[]
for
msg
in
unfiltered
:
if
msg
[
'level'
]
>=
threshold
and
not
msg
.
parent
:
messages
.
append
(
msg
)
if
messages
:
section
=
nodes
.
section
(
CLASS
=
'system-messages'
)
# @@@ get this from the language module?
section
+=
nodes
.
title
(
''
,
'Docutils System Messages'
)
section
+=
messages
self
.
document
.
transform_messages
[:]
=
[]
self
.
document
+=
section
class
TestMessages
(
Transform
):
"""
Append all post-parse system messages to the end of the document.
"""
default_priority
=
890
def
apply
(
self
):
for
msg
in
self
.
document
.
transform_messages
:
if
not
msg
.
parent
:
self
.
document
+=
msg
class
FinalChecks
(
Transform
):
"""
Perform last-minute checks.
- Check for dangling references (incl. footnote & citation).
"""
default_priority
=
840
def
apply
(
self
):
visitor
=
FinalCheckVisitor
(
self
.
document
)
self
.
document
.
walk
(
visitor
)
if
self
.
document
.
settings
.
expose_internals
:
visitor
=
InternalAttributeExposer
(
self
.
document
)
self
.
document
.
walk
(
visitor
)
class
FinalCheckVisitor
(
nodes
.
SparseNodeVisitor
):
def
unknown_visit
(
self
,
node
):
pass
def
visit_reference
(
self
,
node
):
if
node
.
resolved
or
not
node
.
hasattr
(
'refname'
):
return
refname
=
node
[
'refname'
]
id
=
self
.
document
.
nameids
.
get
(
refname
)
if
id
is
None
:
msg
=
self
.
document
.
reporter
.
error
(
'Unknown target name: "%s".'
%
(
node
[
'refname'
]),
base_node
=
node
)
msgid
=
self
.
document
.
set_id
(
msg
)
prb
=
nodes
.
problematic
(
node
.
rawsource
,
node
.
rawsource
,
refid
=
msgid
)
prbid
=
self
.
document
.
set_id
(
prb
)
msg
.
add_backref
(
prbid
)
node
.
parent
.
replace
(
node
,
prb
)
else
:
del
node
[
'refname'
]
node
[
'refid'
]
=
id
self
.
document
.
ids
[
id
].
referenced
=
1
node
.
resolved
=
1
visit_footnote_reference
=
visit_citation_reference
=
visit_reference
class
InternalAttributeExposer
(
nodes
.
GenericNodeVisitor
):
def
__init__
(
self
,
document
):
nodes
.
GenericNodeVisitor
.
__init__
(
self
,
document
)
self
.
internal_attributes
=
document
.
settings
.
expose_internals
def
default_visit
(
self
,
node
):
for
att
in
self
.
internal_attributes
:
value
=
getattr
(
node
,
att
,
None
)
if
value
is
not
None
:
node
[
'internal:'
+
att
]
=
value
lib/python/docutils/writers/__init__.py
deleted
100644 → 0
View file @
e1142d2d
# Authors: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:50:05 $
# Copyright: This module has been placed in the public domain.
"""
This package contains Docutils Writer modules.
"""
__docformat__
=
'reStructuredText'
import
sys
import
docutils
from
docutils
import
languages
,
Component
from
docutils.transforms
import
universal
class
Writer
(
Component
):
"""
Abstract base class for docutils Writers.
Each writer module or package must export a subclass also called 'Writer'.
Each writer must support all standard node types listed in
`docutils.nodes.node_class_names`.
Call `write()` to process a document.
"""
component_type
=
'writer'
document
=
None
"""The document to write."""
language
=
None
"""Language module for the document."""
destination
=
None
"""`docutils.io` IO object; where to write the document."""
def
__init__
(
self
):
"""Initialize the Writer instance."""
def
write
(
self
,
document
,
destination
):
self
.
document
=
document
self
.
language
=
languages
.
get_language
(
document
.
settings
.
language_code
)
self
.
destination
=
destination
self
.
translate
()
output
=
self
.
destination
.
write
(
self
.
output
)
return
output
def
translate
(
self
):
"""
Override to do final document tree translation.
This is usually done with a `docutils.nodes.NodeVisitor` subclass, in
combination with a call to `docutils.nodes.Node.walk()` or
`docutils.nodes.Node.walkabout()`. The ``NodeVisitor`` subclass must
support all standard elements (listed in
`docutils.nodes.node_class_names`) and possibly non-standard elements
used by the current Reader as well.
"""
raise
NotImplementedError
(
'subclass must override this method'
)
_writer_aliases
=
{
'html'
:
'html4css1'
,
'latex'
:
'latex2e'
,
'pprint'
:
'pseudoxml'
,
'pformat'
:
'pseudoxml'
,
'pdf'
:
'rlpdf'
,
'xml'
:
'docutils_xml'
,}
def
get_writer_class
(
writer_name
):
"""Return the Writer class from the `writer_name` module."""
writer_name
=
writer_name
.
lower
()
if
_writer_aliases
.
has_key
(
writer_name
):
writer_name
=
_writer_aliases
[
writer_name
]
module
=
__import__
(
writer_name
,
globals
(),
locals
())
return
module
.
Writer
lib/python/docutils/writers/docutils_xml.py
deleted
100644 → 0
View file @
e1142d2d
# Authors: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:50:05 $
# Copyright: This module has been placed in the public domain.
"""
Simple internal document tree Writer, writes Docutils XML.
"""
__docformat__
=
'reStructuredText'
import
docutils
from
docutils
import
writers
class
Writer
(
writers
.
Writer
):
supported
=
(
'xml'
,)
"""Formats this writer supports."""
settings_spec
=
(
'"Docutils XML" Writer Options'
,
'Warning: the --newlines and --indents options may adversely affect '
'whitespace; use them only for reading convenience.'
,
((
'Generate XML with newlines before and after tags.'
,
[
'--newlines'
],
{
'action'
:
'store_true'
}),
(
'Generate XML with indents and newlines.'
,
[
'--indents'
],
{
'action'
:
'store_true'
}),
(
'Omit the XML declaration. Use with caution.'
,
[
'--no-xml-declaration'
],
{
'dest'
:
'xml_declaration'
,
'default'
:
1
,
'action'
:
'store_false'
}),
(
'Omit the DOCTYPE declaration.'
,
[
'--no-doctype'
],
{
'dest'
:
'doctype_declaration'
,
'default'
:
1
,
'action'
:
'store_false'
}),))
output
=
None
"""Final translated form of `document`."""
xml_declaration
=
'<?xml version="1.0" encoding="%s"?>
\
n
'
#xml_stylesheet = '<?xml-stylesheet type="text/xsl" href="%s"?>\n'
doctype
=
(
'<!DOCTYPE document PUBLIC'
' "+//IDN docutils.sourceforge.net//DTD Docutils Generic//EN//XML"'
' "http://docutils.sourceforge.net/spec/docutils.dtd">
\
n
'
)
generator
=
'<!-- Generated by Docutils %s -->
\
n
'
def
translate
(
self
):
settings
=
self
.
document
.
settings
indent
=
newline
=
''
if
settings
.
newlines
:
newline
=
'
\
n
'
if
settings
.
indents
:
newline
=
'
\
n
'
indent
=
' '
output_prefix
=
[]
if
settings
.
xml_declaration
:
output_prefix
.
append
(
self
.
xml_declaration
%
settings
.
output_encoding
)
if
settings
.
doctype_declaration
:
output_prefix
.
append
(
self
.
doctype
)
output_prefix
.
append
(
self
.
generator
%
docutils
.
__version__
)
docnode
=
self
.
document
.
asdom
().
childNodes
[
0
]
self
.
output
=
(
''
.
join
(
output_prefix
)
+
docnode
.
toprettyxml
(
indent
,
newline
))
lib/python/docutils/writers/html4css1.py
deleted
100644 → 0
View file @
e1142d2d
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:50:05 $
# Copyright: This module has been placed in the public domain.
"""
Simple HyperText Markup Language document tree Writer.
The output conforms to the HTML 4.01 Transitional DTD and to the Extensible
HTML version 1.0 Transitional DTD (*almost* strict). The output contains a
minimum of formatting information. A cascading style sheet ("default.css" by
default) is required for proper viewing with a modern graphical browser.
"""
__docformat__
=
'reStructuredText'
import
sys
import
os
import
os.path
import
time
import
re
from
types
import
ListType
import
docutils
from
docutils
import
nodes
,
utils
,
writers
,
languages
class
Writer
(
writers
.
Writer
):
supported
=
(
'html'
,
'html4css1'
,
'xhtml'
)
"""Formats this writer supports."""
settings_spec
=
(
'HTML-Specific Options'
,
None
,
((
'Specify a stylesheet URL, used verbatim. Default is '
'"default.css". Overridden by --stylesheet-path.'
,
[
'--stylesheet'
],
{
'default'
:
'default.css'
,
'metavar'
:
'<URL>'
}),
(
'Specify a stylesheet file, relative to the current working '
'directory. The path is adjusted relative to the output HTML '
'file. Overrides --stylesheet.'
,
[
'--stylesheet-path'
],
{
'metavar'
:
'<file>'
}),
(
'Link to the stylesheet in the output HTML file. This is the '
'default.'
,
[
'--link-stylesheet'
],
{
'dest'
:
'embed_stylesheet'
,
'action'
:
'store_false'
}),
(
'Embed the stylesheet in the output HTML file. The stylesheet '
'file must be accessible during processing (--stylesheet-path is '
'recommended). The stylesheet is embedded inside a comment, so it '
'must not contain the text "--" (two hyphens). Default: link the '
'stylesheet, do not embed it.'
,
[
'--embed-stylesheet'
],
{
'action'
:
'store_true'
}),
(
'Format for footnote references: one of "superscript" or '
'"brackets". Default is "superscript".'
,
[
'--footnote-references'
],
{
'choices'
:
[
'superscript'
,
'brackets'
],
'default'
:
'superscript'
,
'metavar'
:
'<format>'
}),
(
'Format for block quote attributions: one of "dash" (em-dash '
'prefix), "parentheses"/"parens", or "none". Default is "dash".'
,
[
'--attribution'
],
{
'choices'
:
[
'dash'
,
'parentheses'
,
'parens'
,
'none'
],
'default'
:
'dash'
,
'metavar'
:
'<format>'
}),
(
'Remove extra vertical whitespace between items of bullet lists '
'and enumerated lists, when list items are "simple" (i.e., all '
'items each contain one paragraph and/or one "simple" sublist '
'only). Default: enabled.'
,
[
'--compact-lists'
],
{
'default'
:
1
,
'action'
:
'store_true'
}),
(
'Disable compact simple bullet and enumerated lists.'
,
[
'--no-compact-lists'
],
{
'dest'
:
'compact_lists'
,
'action'
:
'store_false'
}),
(
'Omit the XML declaration. Use with caution.'
,
[
'--no-xml-declaration'
],
{
'dest'
:
'xml_declaration'
,
'default'
:
1
,
'action'
:
'store_false'
}),))
relative_path_settings
=
(
'stylesheet_path'
,)
output
=
None
"""Final translated form of `document`."""
def
__init__
(
self
):
writers
.
Writer
.
__init__
(
self
)
self
.
translator_class
=
HTMLTranslator
def
translate
(
self
):
visitor
=
self
.
translator_class
(
self
.
document
)
self
.
document
.
walkabout
(
visitor
)
self
.
output
=
visitor
.
astext
()
self
.
head_prefix
=
visitor
.
head_prefix
self
.
stylesheet
=
visitor
.
stylesheet
self
.
head
=
visitor
.
head
self
.
body_prefix
=
visitor
.
body_prefix
self
.
body_pre_docinfo
=
visitor
.
body_pre_docinfo
self
.
docinfo
=
visitor
.
docinfo
self
.
body
=
visitor
.
body
self
.
body_suffix
=
visitor
.
body_suffix
class
HTMLTranslator
(
nodes
.
NodeVisitor
):
"""
This HTML writer has been optimized to produce visually compact
lists (less vertical whitespace). HTML's mixed content models
allow list items to contain "<li><p>body elements</p></li>" or
"<li>just text</li>" or even "<li>text<p>and body
elements</p>combined</li>", each with different effects. It would
be best to stick with strict body elements in list items, but they
affect vertical spacing in browsers (although they really
shouldn't).
Here is an outline of the optimization:
- Check for and omit <p> tags in "simple" lists: list items
contain either a single paragraph, a nested simple list, or a
paragraph followed by a nested simple list. This means that
this list can be compact:
- Item 1.
- Item 2.
But this list cannot be compact:
- Item 1.
This second paragraph forces space between list items.
- Item 2.
- In non-list contexts, omit <p> tags on a paragraph if that
paragraph is the only child of its parent (footnotes & citations
are allowed a label first).
- Regardless of the above, in definitions, table cells, field bodies,
option descriptions, and list items, mark the first child with
'class="first"' and the last child with 'class="last"'. The stylesheet
sets the margins (top & bottom respecively) to 0 for these elements.
The ``no_compact_lists`` setting (``--no-compact-lists`` command-line
option) disables list whitespace optimization.
"""
xml_declaration
=
'<?xml version="1.0" encoding="%s" ?>
\
n
'
doctype
=
(
'<!DOCTYPE html'
' PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"'
' "http://www.w3.org/TR/xhtml1/DTD/'
'xhtml1-transitional.dtd">
\
n
'
)
html_head
=
(
'<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="%s" '
'lang="%s">
\
n
<head>
\
n
'
)
content_type
=
(
'<meta http-equiv="Content-Type" content="text/html; '
'charset=%s" />
\
n
'
)
generator
=
(
'<meta name="generator" content="Docutils %s: '
'http://docutils.sourceforge.net/" />
\
n
'
)
stylesheet_link
=
'<link rel="stylesheet" href="%s" type="text/css" />
\
n
'
embedded_stylesheet
=
'<style type="text/css"><!--
\
n
\
n
%s
\
n
--></style>
\
n
'
named_tags
=
{
'a'
:
1
,
'applet'
:
1
,
'form'
:
1
,
'frame'
:
1
,
'iframe'
:
1
,
'img'
:
1
,
'map'
:
1
}
words_and_spaces
=
re
.
compile
(
r'\
S+| +|
\n'
)
def
__init__
(
self
,
document
):
nodes
.
NodeVisitor
.
__init__
(
self
,
document
)
self
.
settings
=
settings
=
document
.
settings
lcode
=
settings
.
language_code
self
.
language
=
languages
.
get_language
(
lcode
)
self
.
head_prefix
=
[
self
.
doctype
,
self
.
html_head
%
(
lcode
,
lcode
),
self
.
content_type
%
settings
.
output_encoding
,
self
.
generator
%
docutils
.
__version__
]
if
settings
.
xml_declaration
:
self
.
head_prefix
.
insert
(
0
,
self
.
xml_declaration
%
settings
.
output_encoding
)
self
.
head
=
[]
if
settings
.
embed_stylesheet
:
stylesheet
=
self
.
get_stylesheet_reference
(
os
.
path
.
join
(
os
.
getcwd
(),
'dummy'
))
stylesheet_text
=
open
(
stylesheet
).
read
()
self
.
stylesheet
=
[
self
.
embedded_stylesheet
%
stylesheet_text
]
else
:
stylesheet
=
self
.
get_stylesheet_reference
()
if
stylesheet
:
self
.
stylesheet
=
[
self
.
stylesheet_link
%
stylesheet
]
else
:
self
.
stylesheet
=
[]
self
.
body_prefix
=
[
'</head>
\
n
<body>
\
n
'
]
self
.
body_pre_docinfo
=
[]
self
.
docinfo
=
[]
self
.
body
=
[]
self
.
body_suffix
=
[
'</body>
\
n
</html>
\
n
'
]
self
.
section_level
=
0
self
.
context
=
[]
self
.
topic_class
=
''
self
.
colspecs
=
[]
self
.
compact_p
=
1
self
.
compact_simple
=
None
self
.
in_docinfo
=
None
self
.
in_sidebar
=
None
def
get_stylesheet_reference
(
self
,
relative_to
=
None
):
settings
=
self
.
settings
if
settings
.
stylesheet_path
:
if
relative_to
==
None
:
relative_to
=
settings
.
_destination
return
utils
.
relative_path
(
relative_to
,
settings
.
stylesheet_path
)
else
:
return
settings
.
stylesheet
def
astext
(
self
):
return
''
.
join
(
self
.
head_prefix
+
self
.
head
+
self
.
stylesheet
+
self
.
body_prefix
+
self
.
body_pre_docinfo
+
self
.
docinfo
+
self
.
body
+
self
.
body_suffix
)
def
encode
(
self
,
text
):
"""Encode special characters in `text` & return."""
# @@@ A codec to do these and all other HTML entities would be nice.
text
=
text
.
replace
(
"&"
,
"&"
)
text
=
text
.
replace
(
"<"
,
"<"
)
text
=
text
.
replace
(
'"'
,
"""
)
text
=
text
.
replace
(
">"
,
">"
)
text
=
text
.
replace
(
"@"
,
"@"
)
# may thwart some address harvesters
return
text
def
attval
(
self
,
text
,
whitespace
=
re
.
compile
(
'[
\
n
\
r
\
t
\
v
\
f
]'
)):
"""Cleanse, HTML encode, and return attribute value text."""
return
self
.
encode
(
whitespace
.
sub
(
' '
,
text
))
def
starttag
(
self
,
node
,
tagname
,
suffix
=
'
\
n
'
,
infix
=
''
,
**
attributes
):
"""
Construct and return a start tag given a node (id & class attributes
are extracted), tag name, and optional attributes.
"""
tagname
=
tagname
.
lower
()
atts
=
{}
for
(
name
,
value
)
in
attributes
.
items
():
atts
[
name
.
lower
()]
=
value
for
att
in
(
'class'
,):
# append to node attribute
if
node
.
has_key
(
att
)
or
atts
.
has_key
(
att
):
atts
[
att
]
=
\
(
node
.
get
(
att
,
''
)
+
' '
+
atts
.
get
(
att
,
''
)).
strip
()
for
att
in
(
'id'
,):
# node attribute overrides
if
node
.
has_key
(
att
):
atts
[
att
]
=
node
[
att
]
if
atts
.
has_key
(
'id'
)
and
self
.
named_tags
.
has_key
(
tagname
):
atts
[
'name'
]
=
atts
[
'id'
]
# for compatibility with old browsers
attlist
=
atts
.
items
()
attlist
.
sort
()
parts
=
[
tagname
]
for
name
,
value
in
attlist
:
if
value
is
None
:
# boolean attribute
# According to the HTML spec, ``<element boolean>`` is good,
# ``<element boolean="boolean">`` is bad.
# (But the XHTML (XML) spec says the opposite. <sigh>)
parts
.
append
(
name
.
lower
())
elif
isinstance
(
value
,
ListType
):
values
=
[
unicode
(
v
)
for
v
in
value
]
parts
.
append
(
'%s="%s"'
%
(
name
.
lower
(),
self
.
attval
(
' '
.
join
(
values
))))
else
:
parts
.
append
(
'%s="%s"'
%
(
name
.
lower
(),
self
.
attval
(
unicode
(
value
))))
return
'<%s%s>%s'
%
(
' '
.
join
(
parts
),
infix
,
suffix
)
def
emptytag
(
self
,
node
,
tagname
,
suffix
=
'
\
n
'
,
**
attributes
):
"""Construct and return an XML-compatible empty tag."""
return
self
.
starttag
(
node
,
tagname
,
suffix
,
infix
=
' /'
,
**
attributes
)
def
visit_Text
(
self
,
node
):
self
.
body
.
append
(
self
.
encode
(
node
.
astext
()))
def
depart_Text
(
self
,
node
):
pass
def
visit_abbreviation
(
self
,
node
):
# @@@ implementation incomplete ("title" attribute)
self
.
body
.
append
(
self
.
starttag
(
node
,
'abbr'
,
''
))
def
depart_abbreviation
(
self
,
node
):
self
.
body
.
append
(
'</abbr>'
)
def
visit_acronym
(
self
,
node
):
# @@@ implementation incomplete ("title" attribute)
self
.
body
.
append
(
self
.
starttag
(
node
,
'acronym'
,
''
))
def
depart_acronym
(
self
,
node
):
self
.
body
.
append
(
'</acronym>'
)
def
visit_address
(
self
,
node
):
self
.
visit_docinfo_item
(
node
,
'address'
,
meta
=
None
)
self
.
body
.
append
(
self
.
starttag
(
node
,
'pre'
,
CLASS
=
'address'
))
def
depart_address
(
self
,
node
):
self
.
body
.
append
(
'
\
n
</pre>
\
n
'
)
self
.
depart_docinfo_item
()
def
visit_admonition
(
self
,
node
,
name
=
''
):
self
.
body
.
append
(
self
.
starttag
(
node
,
'div'
,
CLASS
=
(
name
or
'admonition'
)))
if
name
:
self
.
body
.
append
(
'<p class="admonition-title">'
+
self
.
language
.
labels
[
name
]
+
'</p>
\
n
'
)
def
depart_admonition
(
self
,
node
=
None
):
self
.
body
.
append
(
'</div>
\
n
'
)
def
visit_attention
(
self
,
node
):
self
.
visit_admonition
(
node
,
'attention'
)
def
depart_attention
(
self
,
node
):
self
.
depart_admonition
()
attribution_formats
=
{
'dash'
:
(
'—'
,
''
),
'parentheses'
:
(
'('
,
')'
),
'parens'
:
(
'('
,
')'
),
'none'
:
(
''
,
''
)}
def
visit_attribution
(
self
,
node
):
prefix
,
suffix
=
self
.
attribution_formats
[
self
.
settings
.
attribution
]
self
.
context
.
append
(
suffix
)
self
.
body
.
append
(
self
.
starttag
(
node
,
'p'
,
prefix
,
CLASS
=
'attribution'
))
def
depart_attribution
(
self
,
node
):
self
.
body
.
append
(
self
.
context
.
pop
()
+
'</p>
\
n
'
)
def
visit_author
(
self
,
node
):
self
.
visit_docinfo_item
(
node
,
'author'
)
def
depart_author
(
self
,
node
):
self
.
depart_docinfo_item
()
def
visit_authors
(
self
,
node
):
pass
def
depart_authors
(
self
,
node
):
pass
def
visit_block_quote
(
self
,
node
):
self
.
body
.
append
(
self
.
starttag
(
node
,
'blockquote'
))
def
depart_block_quote
(
self
,
node
):
self
.
body
.
append
(
'</blockquote>
\
n
'
)
def
check_simple_list
(
self
,
node
):
"""Check for a simple list that can be rendered compactly."""
visitor
=
SimpleListChecker
(
self
.
document
)
try
:
node
.
walk
(
visitor
)
except
nodes
.
NodeFound
:
return
None
else
:
return
1
def
visit_bullet_list
(
self
,
node
):
atts
=
{}
old_compact_simple
=
self
.
compact_simple
self
.
context
.
append
((
self
.
compact_simple
,
self
.
compact_p
))
self
.
compact_p
=
None
self
.
compact_simple
=
(
self
.
settings
.
compact_lists
and
(
self
.
compact_simple
or
self
.
topic_class
==
'contents'
or
self
.
check_simple_list
(
node
)))
if
self
.
compact_simple
and
not
old_compact_simple
:
atts
[
'class'
]
=
'simple'
self
.
body
.
append
(
self
.
starttag
(
node
,
'ul'
,
**
atts
))
def
depart_bullet_list
(
self
,
node
):
self
.
compact_simple
,
self
.
compact_p
=
self
.
context
.
pop
()
self
.
body
.
append
(
'</ul>
\
n
'
)
def
visit_caption
(
self
,
node
):
self
.
body
.
append
(
self
.
starttag
(
node
,
'p'
,
''
,
CLASS
=
'caption'
))
def
depart_caption
(
self
,
node
):
self
.
body
.
append
(
'</p>
\
n
'
)
def
visit_caution
(
self
,
node
):
self
.
visit_admonition
(
node
,
'caution'
)
def
depart_caution
(
self
,
node
):
self
.
depart_admonition
()
def
visit_citation
(
self
,
node
):
self
.
body
.
append
(
self
.
starttag
(
node
,
'table'
,
CLASS
=
'citation'
,
frame
=
"void"
,
rules
=
"none"
))
self
.
body
.
append
(
'<colgroup><col class="label" /><col /></colgroup>
\
n
'
'<col />
\
n
'
'<tbody valign="top">
\
n
'
'<tr>'
)
self
.
footnote_backrefs
(
node
)
def
depart_citation
(
self
,
node
):
self
.
body
.
append
(
'</td></tr>
\
n
'
'</tbody>
\
n
</table>
\
n
'
)
def
visit_citation_reference
(
self
,
node
):
href
=
''
if
node
.
has_key
(
'refid'
):
href
=
'#'
+
node
[
'refid'
]
elif
node
.
has_key
(
'refname'
):
href
=
'#'
+
self
.
document
.
nameids
[
node
[
'refname'
]]
self
.
body
.
append
(
self
.
starttag
(
node
,
'a'
,
'['
,
href
=
href
,
CLASS
=
'citation-reference'
))
def
depart_citation_reference
(
self
,
node
):
self
.
body
.
append
(
']</a>'
)
def
visit_classifier
(
self
,
node
):
self
.
body
.
append
(
' <span class="classifier-delimiter">:</span> '
)
self
.
body
.
append
(
self
.
starttag
(
node
,
'span'
,
''
,
CLASS
=
'classifier'
))
def
depart_classifier
(
self
,
node
):
self
.
body
.
append
(
'</span>'
)
def
visit_colspec
(
self
,
node
):
self
.
colspecs
.
append
(
node
)
def
depart_colspec
(
self
,
node
):
pass
def
write_colspecs
(
self
):
width
=
0
for
node
in
self
.
colspecs
:
width
+=
node
[
'colwidth'
]
for
node
in
self
.
colspecs
:
colwidth
=
int
(
node
[
'colwidth'
]
*
100.0
/
width
+
0.5
)
self
.
body
.
append
(
self
.
emptytag
(
node
,
'col'
,
width
=
'%i%%'
%
colwidth
))
self
.
colspecs
=
[]
def
visit_comment
(
self
,
node
,
sub
=
re
.
compile
(
'-(?=-)'
).
sub
):
"""Escape double-dashes in comment text."""
self
.
body
.
append
(
'<!-- %s -->
\
n
'
%
sub
(
'- '
,
node
.
astext
()))
# Content already processed:
raise
nodes
.
SkipNode
def
visit_contact
(
self
,
node
):
self
.
visit_docinfo_item
(
node
,
'contact'
,
meta
=
None
)
def
depart_contact
(
self
,
node
):
self
.
depart_docinfo_item
()
def
visit_copyright
(
self
,
node
):
self
.
visit_docinfo_item
(
node
,
'copyright'
)
def
depart_copyright
(
self
,
node
):
self
.
depart_docinfo_item
()
def
visit_danger
(
self
,
node
):
self
.
visit_admonition
(
node
,
'danger'
)
def
depart_danger
(
self
,
node
):
self
.
depart_admonition
()
def
visit_date
(
self
,
node
):
self
.
visit_docinfo_item
(
node
,
'date'
)
def
depart_date
(
self
,
node
):
self
.
depart_docinfo_item
()
def
visit_decoration
(
self
,
node
):
pass
def
depart_decoration
(
self
,
node
):
pass
def
visit_definition
(
self
,
node
):
self
.
body
.
append
(
'</dt>
\
n
'
)
self
.
body
.
append
(
self
.
starttag
(
node
,
'dd'
,
''
))
if
len
(
node
):
node
[
0
].
set_class
(
'first'
)
node
[
-
1
].
set_class
(
'last'
)
def
depart_definition
(
self
,
node
):
self
.
body
.
append
(
'</dd>
\
n
'
)
def
visit_definition_list
(
self
,
node
):
self
.
body
.
append
(
self
.
starttag
(
node
,
'dl'
))
def
depart_definition_list
(
self
,
node
):
self
.
body
.
append
(
'</dl>
\
n
'
)
def
visit_definition_list_item
(
self
,
node
):
pass
def
depart_definition_list_item
(
self
,
node
):
pass
def
visit_description
(
self
,
node
):
self
.
body
.
append
(
self
.
starttag
(
node
,
'td'
,
''
))
if
len
(
node
):
node
[
0
].
set_class
(
'first'
)
node
[
-
1
].
set_class
(
'last'
)
def
depart_description
(
self
,
node
):
self
.
body
.
append
(
'</td>'
)
def
visit_docinfo
(
self
,
node
):
self
.
context
.
append
(
len
(
self
.
body
))
self
.
body
.
append
(
self
.
starttag
(
node
,
'table'
,
CLASS
=
'docinfo'
,
frame
=
"void"
,
rules
=
"none"
))
self
.
body
.
append
(
'<col class="docinfo-name" />
\
n
'
'<col class="docinfo-content" />
\
n
'
'<tbody valign="top">
\
n
'
)
self
.
in_docinfo
=
1
def
depart_docinfo
(
self
,
node
):
self
.
body
.
append
(
'</tbody>
\
n
</table>
\
n
'
)
self
.
in_docinfo
=
None
start
=
self
.
context
.
pop
()
self
.
body_pre_docinfo
=
self
.
body
[:
start
]
self
.
docinfo
=
self
.
body
[
start
:]
self
.
body
=
[]
def
visit_docinfo_item
(
self
,
node
,
name
,
meta
=
1
):
if
meta
:
self
.
head
.
append
(
'<meta name="%s" content="%s" />
\
n
'
%
(
name
,
self
.
attval
(
node
.
astext
())))
self
.
body
.
append
(
self
.
starttag
(
node
,
'tr'
,
''
))
self
.
body
.
append
(
'<th class="docinfo-name">%s:</th>
\
n
<td>'
%
self
.
language
.
labels
[
name
])
if
len
(
node
):
if
isinstance
(
node
[
0
],
nodes
.
Element
):
node
[
0
].
set_class
(
'first'
)
if
isinstance
(
node
[
-
1
],
nodes
.
Element
):
node
[
-
1
].
set_class
(
'last'
)
def
depart_docinfo_item
(
self
):
self
.
body
.
append
(
'</td></tr>
\
n
'
)
def
visit_doctest_block
(
self
,
node
):
self
.
body
.
append
(
self
.
starttag
(
node
,
'pre'
,
CLASS
=
'doctest-block'
))
def
depart_doctest_block
(
self
,
node
):
self
.
body
.
append
(
'
\
n
</pre>
\
n
'
)
def
visit_document
(
self
,
node
):
self
.
body
.
append
(
self
.
starttag
(
node
,
'div'
,
CLASS
=
'document'
))
def
depart_document
(
self
,
node
):
self
.
body
.
append
(
'</div>
\
n
'
)
def
visit_emphasis
(
self
,
node
):
self
.
body
.
append
(
'<em>'
)
def
depart_emphasis
(
self
,
node
):
self
.
body
.
append
(
'</em>'
)
def
visit_entry
(
self
,
node
):
if
isinstance
(
node
.
parent
.
parent
,
nodes
.
thead
):
tagname
=
'th'
else
:
tagname
=
'td'
atts
=
{}
if
node
.
has_key
(
'morerows'
):
atts
[
'rowspan'
]
=
node
[
'morerows'
]
+
1
if
node
.
has_key
(
'morecols'
):
atts
[
'colspan'
]
=
node
[
'morecols'
]
+
1
self
.
body
.
append
(
self
.
starttag
(
node
,
tagname
,
''
,
**
atts
))
self
.
context
.
append
(
'</%s>
\
n
'
%
tagname
.
lower
())
if
len
(
node
)
==
0
:
# empty cell
self
.
body
.
append
(
' '
)
else
:
node
[
0
].
set_class
(
'first'
)
node
[
-
1
].
set_class
(
'last'
)
def
depart_entry
(
self
,
node
):
self
.
body
.
append
(
self
.
context
.
pop
())
def
visit_enumerated_list
(
self
,
node
):
"""
The 'start' attribute does not conform to HTML 4.01's strict.dtd, but
CSS1 doesn't help. CSS2 isn't widely enough supported yet to be
usable.
"""
atts
=
{}
if
node
.
has_key
(
'start'
):
atts
[
'start'
]
=
node
[
'start'
]
if
node
.
has_key
(
'enumtype'
):
atts
[
'class'
]
=
node
[
'enumtype'
]
# @@@ To do: prefix, suffix. How? Change prefix/suffix to a
# single "format" attribute? Use CSS2?
old_compact_simple
=
self
.
compact_simple
self
.
context
.
append
((
self
.
compact_simple
,
self
.
compact_p
))
self
.
compact_p
=
None
self
.
compact_simple
=
(
self
.
settings
.
compact_lists
and
(
self
.
compact_simple
or
self
.
topic_class
==
'contents'
or
self
.
check_simple_list
(
node
)))
if
self
.
compact_simple
and
not
old_compact_simple
:
atts
[
'class'
]
=
(
atts
.
get
(
'class'
,
''
)
+
' simple'
).
strip
()
self
.
body
.
append
(
self
.
starttag
(
node
,
'ol'
,
**
atts
))
def
depart_enumerated_list
(
self
,
node
):
self
.
compact_simple
,
self
.
compact_p
=
self
.
context
.
pop
()
self
.
body
.
append
(
'</ol>
\
n
'
)
def
visit_error
(
self
,
node
):
self
.
visit_admonition
(
node
,
'error'
)
def
depart_error
(
self
,
node
):
self
.
depart_admonition
()
def
visit_field
(
self
,
node
):
self
.
body
.
append
(
self
.
starttag
(
node
,
'tr'
,
''
,
CLASS
=
'field'
))
def
depart_field
(
self
,
node
):
self
.
body
.
append
(
'</tr>
\
n
'
)
def
visit_field_body
(
self
,
node
):
self
.
body
.
append
(
self
.
starttag
(
node
,
'td'
,
''
,
CLASS
=
'field-body'
))
if
len
(
node
):
node
[
0
].
set_class
(
'first'
)
node
[
-
1
].
set_class
(
'last'
)
def
depart_field_body
(
self
,
node
):
self
.
body
.
append
(
'</td>
\
n
'
)
def
visit_field_list
(
self
,
node
):
self
.
body
.
append
(
self
.
starttag
(
node
,
'table'
,
frame
=
'void'
,
rules
=
'none'
,
CLASS
=
'field-list'
))
self
.
body
.
append
(
'<col class="field-name" />
\
n
'
'<col class="field-body" />
\
n
'
'<tbody valign="top">
\
n
'
)
def
depart_field_list
(
self
,
node
):
self
.
body
.
append
(
'</tbody>
\
n
</table>
\
n
'
)
def
visit_field_name
(
self
,
node
):
atts
=
{}
if
self
.
in_docinfo
:
atts
[
'class'
]
=
'docinfo-name'
else
:
atts
[
'class'
]
=
'field-name'
if
len
(
node
.
astext
())
>
14
:
atts
[
'colspan'
]
=
2
self
.
context
.
append
(
'</tr>
\
n
<tr><td> </td>'
)
else
:
self
.
context
.
append
(
''
)
self
.
body
.
append
(
self
.
starttag
(
node
,
'th'
,
''
,
**
atts
))
def
depart_field_name
(
self
,
node
):
self
.
body
.
append
(
':</th>'
)
self
.
body
.
append
(
self
.
context
.
pop
())
def
visit_figure
(
self
,
node
):
atts
=
{
'class'
:
'figure'
}
if
node
.
get
(
'width'
):
atts
[
'style'
]
=
'width: %spx'
%
node
[
'width'
]
self
.
body
.
append
(
self
.
starttag
(
node
,
'div'
,
**
atts
))
def
depart_figure
(
self
,
node
):
self
.
body
.
append
(
'</div>
\
n
'
)
def
visit_footer
(
self
,
node
):
self
.
context
.
append
(
len
(
self
.
body
))
def
depart_footer
(
self
,
node
):
start
=
self
.
context
.
pop
()
footer
=
([
'<hr class="footer"/>
\
n
'
,
self
.
starttag
(
node
,
'div'
,
CLASS
=
'footer'
)]
+
self
.
body
[
start
:]
+
[
'</div>
\
n
'
])
self
.
body_suffix
[:
0
]
=
footer
del
self
.
body
[
start
:]
def
visit_footnote
(
self
,
node
):
self
.
body
.
append
(
self
.
starttag
(
node
,
'table'
,
CLASS
=
'footnote'
,
frame
=
"void"
,
rules
=
"none"
))
self
.
body
.
append
(
'<colgroup><col class="label" /><col /></colgroup>
\
n
'
'<tbody valign="top">
\
n
'
'<tr>'
)
self
.
footnote_backrefs
(
node
)
def
footnote_backrefs
(
self
,
node
):
if
self
.
settings
.
footnote_backlinks
and
node
.
hasattr
(
'backrefs'
):
backrefs
=
node
[
'backrefs'
]
if
len
(
backrefs
)
==
1
:
self
.
context
.
append
(
''
)
self
.
context
.
append
(
'<a class="fn-backref" href="#%s" '
'name="%s">'
%
(
backrefs
[
0
],
node
[
'id'
]))
else
:
i
=
1
backlinks
=
[]
for
backref
in
backrefs
:
backlinks
.
append
(
'<a class="fn-backref" href="#%s">%s</a>'
%
(
backref
,
i
))
i
+=
1
self
.
context
.
append
(
'<em>(%s)</em> '
%
', '
.
join
(
backlinks
))
self
.
context
.
append
(
'<a name="%s">'
%
node
[
'id'
])
else
:
self
.
context
.
append
(
''
)
self
.
context
.
append
(
'<a name="%s">'
%
node
[
'id'
])
def
depart_footnote
(
self
,
node
):
self
.
body
.
append
(
'</td></tr>
\
n
'
'</tbody>
\
n
</table>
\
n
'
)
def
visit_footnote_reference
(
self
,
node
):
href
=
''
if
node
.
has_key
(
'refid'
):
href
=
'#'
+
node
[
'refid'
]
elif
node
.
has_key
(
'refname'
):
href
=
'#'
+
self
.
document
.
nameids
[
node
[
'refname'
]]
format
=
self
.
settings
.
footnote_references
if
format
==
'brackets'
:
suffix
=
'['
self
.
context
.
append
(
']'
)
elif
format
==
'superscript'
:
suffix
=
'<sup>'
self
.
context
.
append
(
'</sup>'
)
else
:
# shouldn't happen
suffix
=
'???'
self
.
content
.
append
(
'???'
)
self
.
body
.
append
(
self
.
starttag
(
node
,
'a'
,
suffix
,
href
=
href
,
CLASS
=
'footnote-reference'
))
def
depart_footnote_reference
(
self
,
node
):
self
.
body
.
append
(
self
.
context
.
pop
()
+
'</a>'
)
def
visit_generated
(
self
,
node
):
pass
def
depart_generated
(
self
,
node
):
pass
def
visit_header
(
self
,
node
):
self
.
context
.
append
(
len
(
self
.
body
))
def
depart_header
(
self
,
node
):
start
=
self
.
context
.
pop
()
self
.
body_prefix
.
append
(
self
.
starttag
(
node
,
'div'
,
CLASS
=
'header'
))
self
.
body_prefix
.
extend
(
self
.
body
[
start
:])
self
.
body_prefix
.
append
(
'<hr />
\
n
</div>
\
n
'
)
del
self
.
body
[
start
:]
def
visit_hint
(
self
,
node
):
self
.
visit_admonition
(
node
,
'hint'
)
def
depart_hint
(
self
,
node
):
self
.
depart_admonition
()
def
visit_image
(
self
,
node
):
atts
=
node
.
attributes
.
copy
()
if
atts
.
has_key
(
'class'
):
del
atts
[
'class'
]
# prevent duplication with node attrs
atts
[
'src'
]
=
atts
[
'uri'
]
del
atts
[
'uri'
]
if
not
atts
.
has_key
(
'alt'
):
atts
[
'alt'
]
=
atts
[
'src'
]
if
isinstance
(
node
.
parent
,
nodes
.
TextElement
):
self
.
context
.
append
(
''
)
else
:
self
.
body
.
append
(
'<p>'
)
self
.
context
.
append
(
'</p>
\
n
'
)
self
.
body
.
append
(
self
.
emptytag
(
node
,
'img'
,
''
,
**
atts
))
def
depart_image
(
self
,
node
):
self
.
body
.
append
(
self
.
context
.
pop
())
def
visit_important
(
self
,
node
):
self
.
visit_admonition
(
node
,
'important'
)
def
depart_important
(
self
,
node
):
self
.
depart_admonition
()
def
visit_inline
(
self
,
node
):
self
.
body
.
append
(
self
.
starttag
(
node
,
'span'
,
''
))
def
depart_inline
(
self
,
node
):
self
.
body
.
append
(
'</span>'
)
def
visit_label
(
self
,
node
):
self
.
body
.
append
(
self
.
starttag
(
node
,
'td'
,
'%s['
%
self
.
context
.
pop
(),
CLASS
=
'label'
))
def
depart_label
(
self
,
node
):
self
.
body
.
append
(
']</a></td><td>%s'
%
self
.
context
.
pop
())
def
visit_legend
(
self
,
node
):
self
.
body
.
append
(
self
.
starttag
(
node
,
'div'
,
CLASS
=
'legend'
))
def
depart_legend
(
self
,
node
):
self
.
body
.
append
(
'</div>
\
n
'
)
def
visit_line_block
(
self
,
node
):
self
.
body
.
append
(
self
.
starttag
(
node
,
'pre'
,
CLASS
=
'line-block'
))
def
depart_line_block
(
self
,
node
):
self
.
body
.
append
(
'
\
n
</pre>
\
n
'
)
def
visit_list_item
(
self
,
node
):
self
.
body
.
append
(
self
.
starttag
(
node
,
'li'
,
''
))
if
len
(
node
):
node
[
0
].
set_class
(
'first'
)
def
depart_list_item
(
self
,
node
):
self
.
body
.
append
(
'</li>
\
n
'
)
def
visit_literal
(
self
,
node
):
"""Process text to prevent tokens from wrapping."""
self
.
body
.
append
(
self
.
starttag
(
node
,
'tt'
,
''
,
CLASS
=
'literal'
))
text
=
node
.
astext
()
for
token
in
self
.
words_and_spaces
.
findall
(
text
):
if
token
.
strip
():
# Protect text like "--an-option" from bad line wrapping:
self
.
body
.
append
(
'<span class="pre">%s</span>'
%
self
.
encode
(
token
))
elif
token
in
(
'
\
n
'
,
' '
):
# Allow breaks at whitespace:
self
.
body
.
append
(
token
)
else
:
# Protect runs of multiple spaces; the last space can wrap:
self
.
body
.
append
(
' '
*
(
len
(
token
)
-
1
)
+
' '
)
self
.
body
.
append
(
'</tt>'
)
# Content already processed:
raise
nodes
.
SkipNode
def
visit_literal_block
(
self
,
node
):
self
.
body
.
append
(
self
.
starttag
(
node
,
'pre'
,
CLASS
=
'literal-block'
))
def
depart_literal_block
(
self
,
node
):
self
.
body
.
append
(
'
\
n
</pre>
\
n
'
)
def
visit_meta
(
self
,
node
):
self
.
head
.
append
(
self
.
emptytag
(
node
,
'meta'
,
**
node
.
attributes
))
def
depart_meta
(
self
,
node
):
pass
def
visit_note
(
self
,
node
):
self
.
visit_admonition
(
node
,
'note'
)
def
depart_note
(
self
,
node
):
self
.
depart_admonition
()
def
visit_option
(
self
,
node
):
if
self
.
context
[
-
1
]:
self
.
body
.
append
(
', '
)
def
depart_option
(
self
,
node
):
self
.
context
[
-
1
]
+=
1
def
visit_option_argument
(
self
,
node
):
self
.
body
.
append
(
node
.
get
(
'delimiter'
,
' '
))
self
.
body
.
append
(
self
.
starttag
(
node
,
'var'
,
''
))
def
depart_option_argument
(
self
,
node
):
self
.
body
.
append
(
'</var>'
)
def
visit_option_group
(
self
,
node
):
atts
=
{}
if
len
(
node
.
astext
())
>
14
:
atts
[
'colspan'
]
=
2
self
.
context
.
append
(
'</tr>
\
n
<tr><td> </td>'
)
else
:
self
.
context
.
append
(
''
)
self
.
body
.
append
(
self
.
starttag
(
node
,
'td'
,
**
atts
))
self
.
body
.
append
(
'<kbd>'
)
self
.
context
.
append
(
0
)
# count number of options
def
depart_option_group
(
self
,
node
):
self
.
context
.
pop
()
self
.
body
.
append
(
'</kbd></td>
\
n
'
)
self
.
body
.
append
(
self
.
context
.
pop
())
def
visit_option_list
(
self
,
node
):
self
.
body
.
append
(
self
.
starttag
(
node
,
'table'
,
CLASS
=
'option-list'
,
frame
=
"void"
,
rules
=
"none"
))
self
.
body
.
append
(
'<col class="option" />
\
n
'
'<col class="description" />
\
n
'
'<tbody valign="top">
\
n
'
)
def
depart_option_list
(
self
,
node
):
self
.
body
.
append
(
'</tbody>
\
n
</table>
\
n
'
)
def
visit_option_list_item
(
self
,
node
):
self
.
body
.
append
(
self
.
starttag
(
node
,
'tr'
,
''
))
def
depart_option_list_item
(
self
,
node
):
self
.
body
.
append
(
'</tr>
\
n
'
)
def
visit_option_string
(
self
,
node
):
self
.
body
.
append
(
self
.
starttag
(
node
,
'span'
,
''
,
CLASS
=
'option'
))
def
depart_option_string
(
self
,
node
):
self
.
body
.
append
(
'</span>'
)
def
visit_organization
(
self
,
node
):
self
.
visit_docinfo_item
(
node
,
'organization'
)
def
depart_organization
(
self
,
node
):
self
.
depart_docinfo_item
()
def
visit_paragraph
(
self
,
node
):
# Omit <p> tags if this is an only child and optimizable.
if
(
self
.
compact_simple
or
self
.
compact_p
and
(
len
(
node
.
parent
)
==
1
or
len
(
node
.
parent
)
==
2
and
isinstance
(
node
.
parent
[
0
],
nodes
.
label
))):
self
.
context
.
append
(
''
)
else
:
self
.
body
.
append
(
self
.
starttag
(
node
,
'p'
,
''
))
self
.
context
.
append
(
'</p>
\
n
'
)
def
depart_paragraph
(
self
,
node
):
self
.
body
.
append
(
self
.
context
.
pop
())
def
visit_problematic
(
self
,
node
):
if
node
.
hasattr
(
'refid'
):
self
.
body
.
append
(
'<a href="#%s" name="%s">'
%
(
node
[
'refid'
],
node
[
'id'
]))
self
.
context
.
append
(
'</a>'
)
else
:
self
.
context
.
append
(
''
)
self
.
body
.
append
(
self
.
starttag
(
node
,
'span'
,
''
,
CLASS
=
'problematic'
))
def
depart_problematic
(
self
,
node
):
self
.
body
.
append
(
'</span>'
)
self
.
body
.
append
(
self
.
context
.
pop
())
def
visit_raw
(
self
,
node
):
if
node
.
get
(
'format'
)
==
'html'
:
self
.
body
.
append
(
node
.
astext
())
# Keep non-HTML raw text out of output:
raise
nodes
.
SkipNode
def
visit_reference
(
self
,
node
):
if
node
.
has_key
(
'refuri'
):
href
=
node
[
'refuri'
]
elif
node
.
has_key
(
'refid'
):
href
=
'#'
+
node
[
'refid'
]
elif
node
.
has_key
(
'refname'
):
href
=
'#'
+
self
.
document
.
nameids
[
node
[
'refname'
]]
self
.
body
.
append
(
self
.
starttag
(
node
,
'a'
,
''
,
href
=
href
,
CLASS
=
'reference'
))
def
depart_reference
(
self
,
node
):
self
.
body
.
append
(
'</a>'
)
def
visit_revision
(
self
,
node
):
self
.
visit_docinfo_item
(
node
,
'revision'
,
meta
=
None
)
def
depart_revision
(
self
,
node
):
self
.
depart_docinfo_item
()
def
visit_row
(
self
,
node
):
self
.
body
.
append
(
self
.
starttag
(
node
,
'tr'
,
''
))
def
depart_row
(
self
,
node
):
self
.
body
.
append
(
'</tr>
\
n
'
)
def
visit_rubric
(
self
,
node
):
self
.
body
.
append
(
self
.
starttag
(
node
,
'p'
,
''
,
CLASS
=
'rubric'
))
def
depart_rubric
(
self
,
node
):
self
.
body
.
append
(
'</p>
\
n
'
)
def
visit_section
(
self
,
node
):
self
.
section_level
+=
1
self
.
body
.
append
(
self
.
starttag
(
node
,
'div'
,
CLASS
=
'section'
))
def
depart_section
(
self
,
node
):
self
.
section_level
-=
1
self
.
body
.
append
(
'</div>
\
n
'
)
def
visit_sidebar
(
self
,
node
):
self
.
body
.
append
(
self
.
starttag
(
node
,
'div'
,
CLASS
=
'sidebar'
))
self
.
in_sidebar
=
1
def
depart_sidebar
(
self
,
node
):
self
.
body
.
append
(
'</div>
\
n
'
)
self
.
in_sidebar
=
None
def
visit_status
(
self
,
node
):
self
.
visit_docinfo_item
(
node
,
'status'
,
meta
=
None
)
def
depart_status
(
self
,
node
):
self
.
depart_docinfo_item
()
def
visit_strong
(
self
,
node
):
self
.
body
.
append
(
'<strong>'
)
def
depart_strong
(
self
,
node
):
self
.
body
.
append
(
'</strong>'
)
def
visit_subscript
(
self
,
node
):
self
.
body
.
append
(
self
.
starttag
(
node
,
'sub'
,
''
))
def
depart_subscript
(
self
,
node
):
self
.
body
.
append
(
'</sub>'
)
def
visit_substitution_definition
(
self
,
node
):
"""Internal only."""
raise
nodes
.
SkipNode
def
visit_substitution_reference
(
self
,
node
):
self
.
unimplemented_visit
(
node
)
def
visit_subtitle
(
self
,
node
):
if
isinstance
(
node
.
parent
,
nodes
.
sidebar
):
self
.
body
.
append
(
self
.
starttag
(
node
,
'p'
,
''
,
CLASS
=
'sidebar-subtitle'
))
self
.
context
.
append
(
'</p>
\
n
'
)
else
:
self
.
body
.
append
(
self
.
starttag
(
node
,
'h2'
,
''
,
CLASS
=
'subtitle'
))
self
.
context
.
append
(
'</h2>
\
n
'
)
def
depart_subtitle
(
self
,
node
):
self
.
body
.
append
(
self
.
context
.
pop
())
def
visit_superscript
(
self
,
node
):
self
.
body
.
append
(
self
.
starttag
(
node
,
'sup'
,
''
))
def
depart_superscript
(
self
,
node
):
self
.
body
.
append
(
'</sup>'
)
def
visit_system_message
(
self
,
node
):
if
node
[
'level'
]
<
self
.
document
.
reporter
[
'writer'
].
report_level
:
# Level is too low to display:
raise
nodes
.
SkipNode
self
.
body
.
append
(
self
.
starttag
(
node
,
'div'
,
CLASS
=
'system-message'
))
self
.
body
.
append
(
'<p class="system-message-title">'
)
attr
=
{}
backref_text
=
''
if
node
.
hasattr
(
'id'
):
attr
[
'name'
]
=
node
[
'id'
]
if
node
.
hasattr
(
'backrefs'
):
backrefs
=
node
[
'backrefs'
]
if
len
(
backrefs
)
==
1
:
backref_text
=
(
'; <em><a href="#%s">backlink</a></em>'
%
backrefs
[
0
])
else
:
i
=
1
backlinks
=
[]
for
backref
in
backrefs
:
backlinks
.
append
(
'<a href="#%s">%s</a>'
%
(
backref
,
i
))
i
+=
1
backref_text
=
(
'; <em>backlinks: %s</em>'
%
', '
.
join
(
backlinks
))
if
node
.
hasattr
(
'line'
):
line
=
', line %s'
%
node
[
'line'
]
else
:
line
=
''
if
attr
:
a_start
=
self
.
starttag
({},
'a'
,
''
,
**
attr
)
a_end
=
'</a>'
else
:
a_start
=
a_end
=
''
self
.
body
.
append
(
'System Message: %s%s/%s%s (<tt>%s</tt>%s)%s</p>
\
n
'
%
(
a_start
,
node
[
'type'
],
node
[
'level'
],
a_end
,
self
.
encode
(
node
[
'source'
]),
line
,
backref_text
))
def
depart_system_message
(
self
,
node
):
self
.
body
.
append
(
'</div>
\
n
'
)
def
visit_table
(
self
,
node
):
self
.
body
.
append
(
self
.
starttag
(
node
,
'table'
,
CLASS
=
"table"
,
frame
=
'border'
,
rules
=
'all'
))
def
depart_table
(
self
,
node
):
self
.
body
.
append
(
'</table>
\
n
'
)
def
visit_target
(
self
,
node
):
if
not
(
node
.
has_key
(
'refuri'
)
or
node
.
has_key
(
'refid'
)
or
node
.
has_key
(
'refname'
)):
self
.
body
.
append
(
self
.
starttag
(
node
,
'a'
,
''
,
CLASS
=
'target'
))
self
.
context
.
append
(
'</a>'
)
else
:
self
.
context
.
append
(
''
)
def
depart_target
(
self
,
node
):
self
.
body
.
append
(
self
.
context
.
pop
())
def
visit_tbody
(
self
,
node
):
self
.
write_colspecs
()
self
.
body
.
append
(
self
.
context
.
pop
())
# '</colgroup>\n' or ''
self
.
body
.
append
(
self
.
starttag
(
node
,
'tbody'
,
valign
=
'top'
))
def
depart_tbody
(
self
,
node
):
self
.
body
.
append
(
'</tbody>
\
n
'
)
def
visit_term
(
self
,
node
):
self
.
body
.
append
(
self
.
starttag
(
node
,
'dt'
,
''
))
def
depart_term
(
self
,
node
):
"""
Leave the end tag to `self.visit_definition()`, in case there's a
classifier.
"""
pass
def
visit_tgroup
(
self
,
node
):
# Mozilla needs <colgroup>:
self
.
body
.
append
(
self
.
starttag
(
node
,
'colgroup'
))
# Appended by thead or tbody:
self
.
context
.
append
(
'</colgroup>
\
n
'
)
def
depart_tgroup
(
self
,
node
):
pass
def
visit_thead
(
self
,
node
):
self
.
write_colspecs
()
self
.
body
.
append
(
self
.
context
.
pop
())
# '</colgroup>\n'
# There may or may not be a <thead>; this is for <tbody> to use:
self
.
context
.
append
(
''
)
self
.
body
.
append
(
self
.
starttag
(
node
,
'thead'
,
valign
=
'bottom'
))
def
depart_thead
(
self
,
node
):
self
.
body
.
append
(
'</thead>
\
n
'
)
def
visit_tip
(
self
,
node
):
self
.
visit_admonition
(
node
,
'tip'
)
def
depart_tip
(
self
,
node
):
self
.
depart_admonition
()
def
visit_title
(
self
,
node
):
"""Only 6 section levels are supported by HTML."""
check_id
=
0
if
isinstance
(
node
.
parent
,
nodes
.
topic
):
self
.
body
.
append
(
self
.
starttag
(
node
,
'p'
,
''
,
CLASS
=
'topic-title'
))
check_id
=
1
elif
isinstance
(
node
.
parent
,
nodes
.
sidebar
):
self
.
body
.
append
(
self
.
starttag
(
node
,
'p'
,
''
,
CLASS
=
'sidebar-title'
))
check_id
=
1
elif
isinstance
(
node
.
parent
,
nodes
.
admonition
):
self
.
body
.
append
(
self
.
starttag
(
node
,
'p'
,
''
,
CLASS
=
'admonition-title'
))
check_id
=
1
elif
self
.
section_level
==
0
:
# document title
self
.
head
.
append
(
'<title>%s</title>
\
n
'
%
self
.
encode
(
node
.
astext
()))
self
.
body
.
append
(
self
.
starttag
(
node
,
'h1'
,
''
,
CLASS
=
'title'
))
self
.
context
.
append
(
'</h1>
\
n
'
)
else
:
self
.
body
.
append
(
self
.
starttag
(
node
,
'h%s'
%
self
.
section_level
,
''
))
atts
=
{}
if
node
.
parent
.
hasattr
(
'id'
):
atts
[
'name'
]
=
node
.
parent
[
'id'
]
if
node
.
hasattr
(
'refid'
):
atts
[
'class'
]
=
'toc-backref'
atts
[
'href'
]
=
'#'
+
node
[
'refid'
]
self
.
body
.
append
(
self
.
starttag
({},
'a'
,
''
,
**
atts
))
self
.
context
.
append
(
'</a></h%s>
\
n
'
%
(
self
.
section_level
))
if
check_id
:
if
node
.
parent
.
hasattr
(
'id'
):
self
.
body
.
append
(
self
.
starttag
({},
'a'
,
''
,
name
=
node
.
parent
[
'id'
]))
self
.
context
.
append
(
'</a></p>
\
n
'
)
else
:
self
.
context
.
append
(
'</p>
\
n
'
)
def
depart_title
(
self
,
node
):
self
.
body
.
append
(
self
.
context
.
pop
())
def
visit_title_reference
(
self
,
node
):
self
.
body
.
append
(
self
.
starttag
(
node
,
'cite'
,
''
))
def
depart_title_reference
(
self
,
node
):
self
.
body
.
append
(
'</cite>'
)
def
visit_topic
(
self
,
node
):
self
.
body
.
append
(
self
.
starttag
(
node
,
'div'
,
CLASS
=
'topic'
))
self
.
topic_class
=
node
.
get
(
'class'
)
def
depart_topic
(
self
,
node
):
self
.
body
.
append
(
'</div>
\
n
'
)
self
.
topic_class
=
''
def
visit_transition
(
self
,
node
):
self
.
body
.
append
(
self
.
emptytag
(
node
,
'hr'
))
def
depart_transition
(
self
,
node
):
pass
def
visit_version
(
self
,
node
):
self
.
visit_docinfo_item
(
node
,
'version'
,
meta
=
None
)
def
depart_version
(
self
,
node
):
self
.
depart_docinfo_item
()
def
visit_warning
(
self
,
node
):
self
.
visit_admonition
(
node
,
'warning'
)
def
depart_warning
(
self
,
node
):
self
.
depart_admonition
()
def
unimplemented_visit
(
self
,
node
):
raise
NotImplementedError
(
'visiting unimplemented node type: %s'
%
node
.
__class__
.
__name__
)
class
SimpleListChecker
(
nodes
.
GenericNodeVisitor
):
"""
Raise `nodes.SkipNode` if non-simple list item is encountered.
Here "simple" means a list item containing nothing other than a single
paragraph, a simple list, or a paragraph followed by a simple list.
"""
def
default_visit
(
self
,
node
):
raise
nodes
.
NodeFound
def
visit_bullet_list
(
self
,
node
):
pass
def
visit_enumerated_list
(
self
,
node
):
pass
def
visit_list_item
(
self
,
node
):
children
=
[]
for
child
in
node
.
get_children
():
if
not
isinstance
(
child
,
nodes
.
Invisible
):
children
.
append
(
child
)
if
(
children
and
isinstance
(
children
[
0
],
nodes
.
paragraph
)
and
(
isinstance
(
children
[
-
1
],
nodes
.
bullet_list
)
or
isinstance
(
children
[
-
1
],
nodes
.
enumerated_list
))):
children
.
pop
()
if
len
(
children
)
<=
1
:
return
else
:
raise
nodes
.
NodeFound
def
visit_paragraph
(
self
,
node
):
raise
nodes
.
SkipNode
def
invisible_visit
(
self
,
node
):
"""Invisible nodes should be ignored."""
pass
visit_comment
=
invisible_visit
visit_substitution_definition
=
invisible_visit
visit_target
=
invisible_visit
visit_pending
=
invisible_visit
lib/python/docutils/writers/html4zope.py
deleted
100644 → 0
View file @
e1142d2d
# Author: Andreas Jung
# Contact: andreas@andreas-jung.com
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:50:05 $
# Copyright: This module has been placed in the public domain.
"""
Writer module to integrate reST into Zope. This writer subclasses the standard
html4css1 writer and changes the starting level for <H> elements from 1 to 3
(default behaviour inside Zope.
"""
__docformat__
=
'reStructuredText'
from
docutils
import
nodes
from
html4css1
import
Writer
as
CSS1Writer
,
HTMLTranslator
as
CSS1HTMLTranslator
import
os
default_level
=
int
(
os
.
environ
.
get
(
'STX_DEFAULT_LEVEL'
,
3
))
class
Writer
(
CSS1Writer
):
def
__init__
(
self
):
CSS1Writer
.
__init__
(
self
)
self
.
translator_class
=
HTMLTranslator
class
HTMLTranslator
(
CSS1HTMLTranslator
):
def
astext
(
self
):
return
''
.
join
(
self
.
body
)
def
visit_title
(
self
,
node
):
"""Only 6 section levels are supported by HTML."""
if
isinstance
(
node
.
parent
,
nodes
.
topic
):
self
.
body
.
append
(
self
.
starttag
(
node
,
'p'
,
''
,
CLASS
=
'topic-title'
))
if
node
.
parent
.
hasattr
(
'id'
):
self
.
body
.
append
(
self
.
starttag
({},
'a'
,
''
,
name
=
node
.
parent
[
'id'
]))
self
.
context
.
append
(
'</a></p>
\
n
'
)
else
:
self
.
context
.
append
(
'</p>
\
n
'
)
elif
self
.
section_level
==
0
:
# document title
self
.
head
.
append
(
'<title>%s</title>
\
n
'
%
self
.
encode
(
node
.
astext
()))
self
.
body
.
append
(
self
.
starttag
(
node
,
'h%d'
%
default_level
,
''
,
CLASS
=
'title'
))
self
.
context
.
append
(
'</h%d>
\
n
'
%
default_level
)
else
:
self
.
body
.
append
(
self
.
starttag
(
node
,
'h%s'
%
(
default_level
+
self
.
section_level
-
1
),
''
))
atts
=
{}
if
node
.
parent
.
hasattr
(
'id'
):
atts
[
'name'
]
=
node
.
parent
[
'id'
]
if
node
.
hasattr
(
'refid'
):
atts
[
'class'
]
=
'toc-backref'
atts
[
'href'
]
=
'#'
+
node
[
'refid'
]
self
.
body
.
append
(
self
.
starttag
({},
'a'
,
''
,
**
atts
))
self
.
context
.
append
(
'</a></h%s>
\
n
'
%
((
default_level
+
self
.
section_level
-
1
)))
lib/python/docutils/writers/latex2e.py
deleted
100644 → 0
View file @
e1142d2d
"""
:Author: Engelbert Gruber
:Contact: grubert@users.sourceforge.net
:Revision: $Revision: 1.1 $
:Date: $Date: 2003/07/10 15:50:05 $
:Copyright: This module has been placed in the public domain.
LaTeX2e document tree Writer.
"""
__docformat__
=
'reStructuredText'
# code contributions from several people included, thanks too all.
# some named: David Abrahams, Julien Letessier, who is missing.
#
# convention deactivate code by two # e.g. ##.
import
sys
import
time
import
re
import
string
from
types
import
ListType
from
docutils
import
writers
,
nodes
,
languages
class
Writer
(
writers
.
Writer
):
supported
=
(
'latex'
,
'latex2e'
)
"""Formats this writer supports."""
settings_spec
=
(
'LaTeX-Specific Options'
,
'The LaTeX "--output-encoding" default is "latin-1:strict".'
,
((
'Specify documentclass. Default is "article".'
,
[
'--documentclass'
],
{
'default'
:
'article'
,
}),
(
'Format for footnote references: one of "superscript" or '
'"brackets". Default is "brackets".'
,
[
'--footnote-references'
],
{
'choices'
:
[
'superscript'
,
'brackets'
],
'default'
:
'brackets'
,
'metavar'
:
'<format>'
}),
(
'Format for block quote attributions: one of "dash" (em-dash '
'prefix), "parentheses"/"parens", or "none". Default is "dash".'
,
[
'--attribution'
],
{
'choices'
:
[
'dash'
,
'parentheses'
,
'parens'
,
'none'
],
'default'
:
'dash'
,
'metavar'
:
'<format>'
}),
(
'Specify a stylesheet file. The file will be "input" by latex '
'in the document header. Default is "style.tex". '
'If this is set to "" disables input.'
'Overridden by --stylesheet-path.'
,
[
'--stylesheet'
],
{
'default'
:
'style.tex'
,
'metavar'
:
'<file>'
}),
(
'Specify a stylesheet file, relative to the current working '
'directory.'
'Overrides --stylesheet.'
,
[
'--stylesheet-path'
],
{
'metavar'
:
'<file>'
}),
(
'Link to the stylesheet in the output LaTeX file. This is the '
'default.'
,
[
'--link-stylesheet'
],
{
'dest'
:
'embed_stylesheet'
,
'action'
:
'store_false'
}),
(
'Embed the stylesheet in the output LaTeX file. The stylesheet '
'file must be accessible during processing (--stylesheet-path is '
'recommended).'
,
[
'--embed-stylesheet'
],
{
'action'
:
'store_true'
}),
(
'Table of contents by docutils (default) or latex. Latex(writer) '
'supports only one ToC per document, but docutils does not write '
'pagenumbers.'
,
[
'--use-latex-toc'
],
{
'default'
:
0
}),
(
'Color of any hyperlinks embedded in text '
'(default: "blue", "0" to disable).'
,
[
'--hyperlink-color'
],
{
'default'
:
'blue'
}),))
settings_defaults
=
{
'output_encoding'
:
'latin-1'
}
output
=
None
"""Final translated form of `document`."""
def
translate
(
self
):
visitor
=
LaTeXTranslator
(
self
.
document
)
self
.
document
.
walkabout
(
visitor
)
self
.
output
=
visitor
.
astext
()
self
.
head_prefix
=
visitor
.
head_prefix
self
.
head
=
visitor
.
head
self
.
body_prefix
=
visitor
.
body_prefix
self
.
body
=
visitor
.
body
self
.
body_suffix
=
visitor
.
body_suffix
"""
Notes on LaTeX
--------------
* latex does not support multiple tocs in one document.
(might be no limitation except for docutils documentation)
* width
* linewidth - width of a line in the local environment
* textwidth - the width of text on the page
Maybe always use linewidth ?
"""
class
Babel
:
"""Language specifics for LaTeX."""
# country code by a.schlock.
# partly manually converted from iso and babel stuff, dialects and some
_ISO639_TO_BABEL
=
{
'no'
:
'norsk'
,
#XXX added by hand ( forget about nynorsk?)
'gd'
:
'scottish'
,
#XXX added by hand
'hu'
:
'magyar'
,
#XXX added by hand
'pt'
:
'portuguese'
,
#XXX added by hand
'sl'
:
'slovenian'
,
'af'
:
'afrikaans'
,
'bg'
:
'bulgarian'
,
'br'
:
'breton'
,
'ca'
:
'catalan'
,
'cs'
:
'czech'
,
'cy'
:
'welsh'
,
'da'
:
'danish'
,
'fr'
:
'french'
,
# french, francais, canadien, acadian
'de'
:
'ngerman'
,
#XXX rather than german
# ngerman, naustrian, german, germanb, austrian
'el'
:
'greek'
,
'en'
:
'english'
,
# english, USenglish, american, UKenglish, british, canadian
'eo'
:
'esperanto'
,
'es'
:
'spanish'
,
'et'
:
'estonian'
,
'eu'
:
'basque'
,
'fi'
:
'finnish'
,
'ga'
:
'irish'
,
'gl'
:
'galician'
,
'he'
:
'hebrew'
,
'hr'
:
'croatian'
,
'hu'
:
'hungarian'
,
'is'
:
'icelandic'
,
'it'
:
'italian'
,
'la'
:
'latin'
,
'nl'
:
'dutch'
,
'pl'
:
'polish'
,
'pt'
:
'portuguese'
,
'ro'
:
'romanian'
,
'ru'
:
'russian'
,
'sk'
:
'slovak'
,
'sr'
:
'serbian'
,
'sv'
:
'swedish'
,
'tr'
:
'turkish'
,
'uk'
:
'ukrainian'
}
def
__init__
(
self
,
lang
):
self
.
language
=
lang
# pdflatex does not produce double quotes for ngerman in tt.
self
.
double_quote_replacment
=
None
if
re
.
search
(
'^de'
,
self
.
language
):
# maybe use: {\glqq} {\grqq}.
self
.
quotes
=
(
"
\
"
`"
,
"
\
"
'"
)
self
.
double_quote_replacment
=
"{
\
\
dq}"
else
:
self
.
quotes
=
(
"``"
,
"''"
)
self
.
quote_index
=
0
def
next_quote
(
self
):
q
=
self
.
quotes
[
self
.
quote_index
]
self
.
quote_index
=
(
self
.
quote_index
+
1
)
%
2
return
q
def
quote_quotes
(
self
,
text
):
t
=
None
for
part
in
text
.
split
(
'"'
):
if
t
==
None
:
t
=
part
else
:
t
+=
self
.
next_quote
()
+
part
return
t
def
double_quotes_in_tt
(
self
,
text
):
if
not
self
.
double_quote_replacment
:
return
text
return
text
.
replace
(
'"'
,
self
.
double_quote_replacment
)
def
get_language
(
self
):
if
self
.
_ISO639_TO_BABEL
.
has_key
(
self
.
language
):
return
self
.
_ISO639_TO_BABEL
[
self
.
language
]
else
:
# support dialects.
l
=
self
.
language
.
split
(
"_"
)[
0
]
if
self
.
_ISO639_TO_BABEL
.
has_key
(
l
):
return
self
.
_ISO639_TO_BABEL
[
l
]
return
None
latex_headings
=
{
'optionlist_environment'
:
[
'
\
\
newcommand{
\
\
optionlistlabel}[1]{
\
\
bf #1
\
\
hfill}
\
n
'
'
\
\
newenvironment{optionlist}[1]
\
n
'
'{
\
\
begin{list}{}
\
n
'
' {
\
\
setlength{
\
\
labelwidth}{#1}
\
n
'
'
\
\
setlength{
\
\
rightmargin}{1cm}
\
n
'
'
\
\
setlength{
\
\
leftmargin}{
\
\
rightmargin}
\
n
'
'
\
\
addtolength{
\
\
leftmargin}{
\
\
labelwidth}
\
n
'
'
\
\
addtolength{
\
\
leftmargin}{
\
\
labelsep}
\
n
'
'
\
\
renewcommand{
\
\
makelabel}{
\
\
optionlistlabel}}
\
n
'
'}{
\
\
end{list}}
\
n
'
,
],
'footnote_floats'
:
[
'% begin: floats for footnotes tweaking.
\
n
'
,
'
\
\
setlength{
\
\
floatsep}{0.5em}
\
n
'
,
'
\
\
setlength{
\
\
textfloatsep}{
\
\
fill}
\
n
'
,
'
\
\
addtolength{
\
\
textfloatsep}{3em}
\
n
'
,
'
\
\
renewcommand{
\
\
textfraction}{0.5}
\
n
'
,
'
\
\
renewcommand{
\
\
topfraction}{0.5}
\
n
'
,
'
\
\
renewcommand{
\
\
bottomfraction}{0.5}
\
n
'
,
'
\
\
setcounter{totalnumber}{50}
\
n
'
,
'
\
\
setcounter{topnumber}{50}
\
n
'
,
'
\
\
setcounter{bottomnumber}{50}
\
n
'
,
'% end floats for footnotes
\
n
'
,
],
'some_commands'
:
[
'% some commands, that could be overwritten in the style file.
\
n
'
'
\
\
newcommand{
\
\
rubric}[1]'
'{
\
\
subsection*{~
\
\
hfill {
\
\
it #1}
\
\
hfill ~}}
\
n
'
'% end of "some commands"
\
n
'
,
]
}
class
LaTeXTranslator
(
nodes
.
NodeVisitor
):
# When options are given to the documentclass, latex will pass them
# to other packages, as done with babel.
# Dummy settings might be taken from document settings
d_options
=
'10pt'
# papersize, fontsize
d_paper
=
'a4paper'
d_margins
=
'2cm'
latex_head
=
'
\
\
documentclass[%s]{%s}
\
n
'
encoding
=
'
\
\
usepackage[%s]{inputenc}
\
n
'
linking
=
'
\
\
usepackage[colorlinks=%s,linkcolor=%s,urlcolor=%s]{hyperref}
\
n
'
geometry
=
'
\
\
usepackage[%s,margin=%s,nohead]{geometry}
\
n
'
stylesheet
=
'
\
\
input{%s}
\
n
'
# add a generated on day , machine by user using docutils version.
generator
=
'%% generator Docutils: http://docutils.sourceforge.net/
\
n
'
# use latex tableofcontents or let docutils do it.
use_latex_toc
=
0
# table kind: if 0 tabularx (single page), 1 longtable
# maybe should be decided on row count.
use_longtable
=
1
# TODO: use mixins for different implementations.
# list environment for option-list. else tabularx
use_optionlist_for_option_list
=
1
# list environment for docinfo. else tabularx
use_optionlist_for_docinfo
=
0
# NOT YET IN USE
# default link color
hyperlink_color
=
"blue"
def
__init__
(
self
,
document
):
nodes
.
NodeVisitor
.
__init__
(
self
,
document
)
self
.
settings
=
settings
=
document
.
settings
self
.
use_latex_toc
=
settings
.
use_latex_toc
self
.
hyperlink_color
=
settings
.
hyperlink_color
if
self
.
hyperlink_color
==
'0'
:
self
.
hyperlink_color
=
'black'
self
.
colorlinks
=
'false'
else
:
self
.
colorlinks
=
'true'
# language: labels, bibliographic_fields, and author_separators.
# to allow writing labes for specific languages.
self
.
language
=
languages
.
get_language
(
settings
.
language_code
)
self
.
babel
=
Babel
(
settings
.
language_code
)
self
.
author_separator
=
self
.
language
.
author_separators
[
0
]
if
self
.
babel
.
get_language
():
self
.
d_options
+=
',%s'
%
\
self
.
babel
.
get_language
()
self
.
head_prefix
=
[
self
.
latex_head
%
(
self
.
d_options
,
self
.
settings
.
documentclass
),
'
\
\
usepackage{babel}
\
n
'
,
# language is in documents settings.
'
\
\
usepackage{shortvrb}
\
n
'
,
# allows verb in footnotes.
self
.
encoding
%
self
.
to_latex_encoding
(
settings
.
output_encoding
),
# * tabularx: for docinfo, automatic width of columns, always on one page.
'
\
\
usepackage{tabularx}
\
n
'
,
'
\
\
usepackage{longtable}
\
n
'
,
# possible other packages.
# * fancyhdr
# * ltxtable is a combination of tabularx and longtable (pagebreaks).
# but ??
#
# extra space between text in tables and the line above them
'
\
\
setlength{
\
\
extrarowheight}{2pt}
\
n
'
,
'
\
\
usepackage{amsmath}
\
n
'
,
# what fore amsmath.
'
\
\
usepackage{graphicx}
\
n
'
,
'
\
\
usepackage{color}
\
n
'
,
'
\
\
usepackage{multirow}
\
n
'
,
self
.
linking
%
(
self
.
colorlinks
,
self
.
hyperlink_color
,
self
.
hyperlink_color
),
# geometry and fonts might go into style.tex.
self
.
geometry
%
(
self
.
d_paper
,
self
.
d_margins
),
#
self
.
generator
,
# latex lengths
'
\
\
newlength{
\
\
admonitionwidth}
\
n
'
,
'
\
\
setlength{
\
\
admonitionwidth}{0.9
\
\
textwidth}
\
n
'
# width for docinfo tablewidth
'
\
\
newlength{
\
\
docinfowidth}
\
n
'
,
'
\
\
setlength{
\
\
docinfowidth}{0.9
\
\
textwidth}
\
n
'
]
self
.
head_prefix
.
extend
(
latex_headings
[
'optionlist_environment'
]
)
self
.
head_prefix
.
extend
(
latex_headings
[
'footnote_floats'
]
)
self
.
head_prefix
.
extend
(
latex_headings
[
'some_commands'
]
)
## stylesheet is last: so it might be possible to overwrite defaults.
stylesheet
=
self
.
get_stylesheet_reference
()
if
stylesheet
:
self
.
head_prefix
.
append
(
self
.
stylesheet
%
(
stylesheet
))
if
self
.
linking
:
# and maybe check for pdf
self
.
pdfinfo
=
[
]
self
.
pdfauthor
=
None
# pdftitle, pdfsubject, pdfauthor, pdfkeywords, pdfcreator, pdfproducer
else
:
self
.
pdfinfo
=
None
# NOTE: Latex wants a date and an author, rst puts this into
# docinfo, so normally we donot want latex author/date handling.
# latex article has its own handling of date and author, deactivate.
self
.
latex_docinfo
=
0
self
.
head
=
[
]
if
not
self
.
latex_docinfo
:
self
.
head
.
extend
(
[
'
\
\
author{}
\
n
'
,
'
\
\
date{}
\
n
'
]
)
self
.
body_prefix
=
[
'
\
\
raggedbottom
\
n
'
]
# separate title, so we can appen subtitle.
self
.
title
=
""
self
.
body
=
[]
self
.
body_suffix
=
[
'
\
n
'
]
self
.
section_level
=
0
self
.
context
=
[]
self
.
topic_class
=
''
# column specification for tables
self
.
colspecs
=
[]
# Flags to encode
# ---------------
# verbatim: to tell encode not to encode.
self
.
verbatim
=
0
# insert_newline: to tell encode to replace blanks by "~".
self
.
insert_none_breaking_blanks
=
0
# insert_newline: to tell encode to add latex newline.
self
.
insert_newline
=
0
# mbox_newline: to tell encode to add mbox and newline.
self
.
mbox_newline
=
0
# enumeration is done by list environment.
self
.
_enum_cnt
=
0
# docinfo.
self
.
docinfo
=
None
# inside literal block: no quote mangling.
self
.
literal_block
=
0
self
.
literal
=
0
def
get_stylesheet_reference
(
self
):
if
self
.
settings
.
stylesheet_path
:
return
self
.
settings
.
stylesheet_path
else
:
return
self
.
settings
.
stylesheet
def
to_latex_encoding
(
self
,
docutils_encoding
):
"""
Translate docutils encoding name into latex's.
Default fallback method is remove "-" and "_" chars from docutils_encoding.
"""
tr
=
{
"iso-8859-1"
:
"latin1"
,
# west european
"iso-8859-2"
:
"latin2"
,
# east european
"iso-8859-3"
:
"latin3"
,
# esperanto, maltese
"iso-8859-4"
:
"latin4"
,
# north european,scandinavian, baltic
"iso-8859-5"
:
"iso88595"
,
# cyrillic (ISO)
"iso-8859-9"
:
"latin5"
,
# turkish
"iso-8859-15"
:
"latin9"
,
# latin9, update to latin1.
"mac_cyrillic"
:
"maccyr"
,
# cyrillic (on Mac)
"windows-1251"
:
"cp1251"
,
# cyrillic (on Windows)
"koi8-r"
:
"koi8-r"
,
# cyrillic (Russian)
"koi8-u"
:
"koi8-u"
,
# cyrillic (Ukrainian)
"windows-1250"
:
"cp1250"
,
#
"windows-1252"
:
"cp1252"
,
#
"us-ascii"
:
"ascii"
,
# ASCII (US)
# unmatched encodings
#"": "applemac",
#"": "ansinew", # windows 3.1 ansi
#"": "ascii", # ASCII encoding for the range 32--127.
#"": "cp437", # dos latine us
#"": "cp850", # dos latin 1
#"": "cp852", # dos latin 2
#"": "decmulti",
#"": "latin10",
#"iso-8859-6": "" # arabic
#"iso-8859-7": "" # greek
#"iso-8859-8": "" # hebrew
#"iso-8859-10": "" # latin6, more complete iso-8859-4
}
if
tr
.
has_key
(
docutils_encoding
.
lower
()):
return
tr
[
docutils_encoding
.
lower
()]
return
docutils_encoding
.
translate
(
string
.
maketrans
(
""
,
""
),
"_-"
).
lower
()
def
language_label
(
self
,
docutil_label
):
return
self
.
language
.
labels
[
docutil_label
]
def
encode
(
self
,
text
):
"""
Encode special characters in `text` & return.
# $ % & ~ _ ^
\
{ }
Escaping with a backslash does not help with backslashes, ~ and ^.
< > are only available in math-mode (really ?)
$ starts math- mode.
AND quotes:
"""
if
self
.
verbatim
:
return
text
# compile the regexps once. do it here so one can see them.
#
# first the braces.
if
not
self
.
__dict__
.
has_key
(
'encode_re_braces'
):
self
.
encode_re_braces
=
re
.
compile
(
r'([{}])'
)
text
=
self
.
encode_re_braces
.
sub
(
r'{\\\1}'
,
text
)
if
not
self
.
__dict__
.
has_key
(
'encode_re_bslash'
):
# find backslash: except in the form '{\{}' or '{\}}'.
self
.
encode_re_bslash
=
re
.
compile
(
r'(?<!{)(\\)(?![{}]})'
)
# then the backslash: except in the form from line above:
# either '{\{}' or '{\}}'.
text
=
self
.
encode_re_bslash
.
sub
(
r'{\\textbackslash}'
,
text
)
# then dollar
text
=
text
.
replace
(
"$"
,
'{
\
\
$}'
)
# then all that needs math mode
text
=
text
.
replace
(
"<"
,
'{$<$}'
)
text
=
text
.
replace
(
">"
,
'{$>$}'
)
# then
text
=
text
.
replace
(
"&"
,
'{
\
\
&}'
)
text
=
text
.
replace
(
"_"
,
'{
\
\
_}'
)
# the ^:
# * verb|^| does not work in mbox.
# * mathmode has wedge. hat{~} would also work.
text
=
text
.
replace
(
"^"
,
'{
\
\
ensuremath{^
\
\
wedge}}'
)
text
=
text
.
replace
(
"%"
,
'{
\
\
%}'
)
text
=
text
.
replace
(
"#"
,
'{
\
\
#}'
)
text
=
text
.
replace
(
"~"
,
'{
\
\
~{ }}'
)
if
self
.
literal_block
or
self
.
literal
:
# pdflatex does not produce doublequotes for ngerman.
text
=
self
.
babel
.
double_quotes_in_tt
(
text
)
else
:
text
=
self
.
babel
.
quote_quotes
(
text
)
if
self
.
insert_newline
:
# HACK: insert a blank before the newline, to avoid
# ! LaTeX Error: There's no line here to end.
text
=
text
.
replace
(
"
\
n
"
,
'~
\
\
\
\
\
n
'
)
elif
self
.
mbox_newline
:
text
=
text
.
replace
(
"
\
n
"
,
'}
\
\
\
\
\
n
\
\
mbox{'
)
if
self
.
insert_none_breaking_blanks
:
text
=
text
.
replace
(
' '
,
'~'
)
# unicode !!!
text
=
text
.
replace
(
u'
\
u2020
'
,
'{$
\
\
dagger$}'
)
return
text
def
attval
(
self
,
text
,
whitespace
=
re
.
compile
(
'[
\
n
\
r
\
t
\
v
\
f
]'
)):
"""Cleanse, encode, and return attribute value text."""
return
self
.
encode
(
whitespace
.
sub
(
' '
,
text
))
def
astext
(
self
):
if
self
.
pdfinfo
:
if
self
.
pdfauthor
:
self
.
pdfinfo
.
append
(
'pdfauthor={%s}'
%
self
.
pdfauthor
)
pdfinfo
=
'
\
\
hypersetup{
\
n
'
+
',
\
n
'
.
join
(
self
.
pdfinfo
)
+
'
\
n
}
\
n
'
else
:
pdfinfo
=
''
title
=
'
\
\
title{%s}
\
n
'
%
self
.
title
return
''
.
join
(
self
.
head_prefix
+
[
title
]
+
self
.
head
+
[
pdfinfo
]
+
self
.
body_prefix
+
self
.
body
+
self
.
body_suffix
)
def
visit_Text
(
self
,
node
):
self
.
body
.
append
(
self
.
encode
(
node
.
astext
()))
def
depart_Text
(
self
,
node
):
pass
def
visit_address
(
self
,
node
):
self
.
visit_docinfo_item
(
node
,
'address'
)
def
depart_address
(
self
,
node
):
self
.
depart_docinfo_item
(
node
)
def
visit_admonition
(
self
,
node
,
name
):
self
.
body
.
append
(
'
\
\
begin{center}
\
\
begin{sffamily}
\
n
'
)
self
.
body
.
append
(
'
\
\
fbox{
\
\
parbox{
\
\
admonitionwidth}{
\
n
'
)
self
.
body
.
append
(
'
\
\
textbf{
\
\
large '
+
self
.
language
.
labels
[
name
]
+
'}
\
n
'
);
self
.
body
.
append
(
'
\
\
vspace{2mm}
\
n
'
)
def
depart_admonition
(
self
):
self
.
body
.
append
(
'}}
\
n
'
)
# end parbox fbox
self
.
body
.
append
(
'
\
\
end{sffamily}
\
n
\
\
end{center}
\
n
'
);
def
visit_attention
(
self
,
node
):
self
.
visit_admonition
(
node
,
'attention'
)
def
depart_attention
(
self
,
node
):
self
.
depart_admonition
()
def
visit_author
(
self
,
node
):
self
.
visit_docinfo_item
(
node
,
'author'
)
def
depart_author
(
self
,
node
):
self
.
depart_docinfo_item
(
node
)
def
visit_authors
(
self
,
node
):
# ignore. visit_author is called for each one
# self.visit_docinfo_item(node, 'author')
pass
def
depart_authors
(
self
,
node
):
# self.depart_docinfo_item(node)
pass
def
visit_block_quote
(
self
,
node
):
self
.
body
.
append
(
'
\
\
begin{quote}
\
n
'
)
def
depart_block_quote
(
self
,
node
):
self
.
body
.
append
(
'
\
\
end{quote}
\
n
'
)
def
visit_bullet_list
(
self
,
node
):
if
not
self
.
use_latex_toc
and
self
.
topic_class
==
'contents'
:
self
.
body
.
append
(
'
\
\
begin{list}{}{}
\
n
'
)
else
:
self
.
body
.
append
(
'
\
\
begin{itemize}
\
n
'
)
def
depart_bullet_list
(
self
,
node
):
if
not
self
.
use_latex_toc
and
self
.
topic_class
==
'contents'
:
self
.
body
.
append
(
'
\
\
end{list}
\
n
'
)
else
:
self
.
body
.
append
(
'
\
\
end{itemize}
\
n
'
)
def
visit_caption
(
self
,
node
):
self
.
body
.
append
(
'
\
\
caption{'
)
def
depart_caption
(
self
,
node
):
self
.
body
.
append
(
'}'
)
def
visit_caution
(
self
,
node
):
self
.
visit_admonition
(
node
,
'caution'
)
def
depart_caution
(
self
,
node
):
self
.
depart_admonition
()
def
visit_citation
(
self
,
node
):
self
.
visit_footnote
(
node
)
def
depart_citation
(
self
,
node
):
self
.
depart_footnote
(
node
)
def
visit_title_reference
(
self
,
node
):
# BUG title-references are what?
pass
def
depart_title_reference
(
self
,
node
):
pass
def
visit_citation_reference
(
self
,
node
):
href
=
''
if
node
.
has_key
(
'refid'
):
href
=
node
[
'refid'
]
elif
node
.
has_key
(
'refname'
):
href
=
self
.
document
.
nameids
[
node
[
'refname'
]]
self
.
body
.
append
(
'[
\
\
hyperlink{%s}{'
%
href
)
def
depart_citation_reference
(
self
,
node
):
self
.
body
.
append
(
'}]'
)
def
visit_classifier
(
self
,
node
):
self
.
body
.
append
(
'(
\
\
textbf{'
)
def
depart_classifier
(
self
,
node
):
self
.
body
.
append
(
'})
\
n
'
)
def
visit_colspec
(
self
,
node
):
if
self
.
use_longtable
:
self
.
colspecs
.
append
(
node
)
else
:
self
.
context
[
-
1
]
+=
1
def
depart_colspec
(
self
,
node
):
pass
def
visit_comment
(
self
,
node
,
sub
=
re
.
compile
(
'
\
n
'
).
sub
):
"""Escape end of line by a ne comment start in comment text."""
self
.
body
.
append
(
'%% %s
\
n
'
%
sub
(
'
\
n
% '
,
node
.
astext
()))
raise
nodes
.
SkipNode
def
visit_contact
(
self
,
node
):
self
.
visit_docinfo_item
(
node
,
'contact'
)
def
depart_contact
(
self
,
node
):
self
.
depart_docinfo_item
(
node
)
def
visit_copyright
(
self
,
node
):
self
.
visit_docinfo_item
(
node
,
'copyright'
)
def
depart_copyright
(
self
,
node
):
self
.
depart_docinfo_item
(
node
)
def
visit_danger
(
self
,
node
):
self
.
visit_admonition
(
node
,
'danger'
)
def
depart_danger
(
self
,
node
):
self
.
depart_admonition
()
def
visit_date
(
self
,
node
):
self
.
visit_docinfo_item
(
node
,
'date'
)
def
depart_date
(
self
,
node
):
self
.
depart_docinfo_item
(
node
)
def
visit_decoration
(
self
,
node
):
pass
def
depart_decoration
(
self
,
node
):
pass
def
visit_definition
(
self
,
node
):
self
.
body
.
append
(
'%[visit_definition]
\
n
'
)
def
depart_definition
(
self
,
node
):
self
.
body
.
append
(
'
\
n
'
)
self
.
body
.
append
(
'%[depart_definition]
\
n
'
)
def
visit_definition_list
(
self
,
node
):
self
.
body
.
append
(
'
\
\
begin{description}
\
n
'
)
def
depart_definition_list
(
self
,
node
):
self
.
body
.
append
(
'
\
\
end{description}
\
n
'
)
def
visit_definition_list_item
(
self
,
node
):
self
.
body
.
append
(
'%[visit_definition_list_item]
\
n
'
)
def
depart_definition_list_item
(
self
,
node
):
self
.
body
.
append
(
'%[depart_definition_list_item]
\
n
'
)
def
visit_description
(
self
,
node
):
if
self
.
use_optionlist_for_option_list
:
self
.
body
.
append
(
' '
)
else
:
self
.
body
.
append
(
' & '
)
def
depart_description
(
self
,
node
):
pass
def
visit_docinfo
(
self
,
node
):
self
.
docinfo
=
[]
self
.
docinfo
.
append
(
'%'
+
'_'
*
75
+
'
\
n
'
)
self
.
docinfo
.
append
(
'
\
\
begin{center}
\
n
'
)
self
.
docinfo
.
append
(
'
\
\
begin{tabularx}{
\
\
docinfowidth}{lX}
\
n
'
)
def
depart_docinfo
(
self
,
node
):
self
.
docinfo
.
append
(
'
\
\
end{tabularx}
\
n
'
)
self
.
docinfo
.
append
(
'
\
\
end{center}
\
n
'
)
self
.
body
=
self
.
docinfo
+
self
.
body
# clear docinfo, so field names are no longer appended.
self
.
docinfo
=
None
if
self
.
use_latex_toc
:
self
.
body
.
append
(
'
\
\
tableofcontents
\
n
\
n
\
\
bigskip
\
n
'
)
def
visit_docinfo_item
(
self
,
node
,
name
):
if
not
self
.
latex_docinfo
:
self
.
docinfo
.
append
(
'
\
\
textbf{%s}: &
\
n
\
t
'
%
self
.
language_label
(
name
))
if
name
==
'author'
:
if
not
self
.
pdfinfo
==
None
:
if
not
self
.
pdfauthor
:
self
.
pdfauthor
=
self
.
attval
(
node
.
astext
())
else
:
self
.
pdfauthor
+=
self
.
author_separator
+
self
.
attval
(
node
.
astext
())
if
self
.
latex_docinfo
:
self
.
head
.
append
(
'
\
\
author{%s}
\
n
'
%
self
.
attval
(
node
.
astext
()))
raise
nodes
.
SkipNode
elif
name
==
'date'
:
if
self
.
latex_docinfo
:
self
.
head
.
append
(
'
\
\
date{%s}
\
n
'
%
self
.
attval
(
node
.
astext
()))
raise
nodes
.
SkipNode
if
name
==
'address'
:
# BUG will fail if latex_docinfo is set.
self
.
insert_newline
=
1
self
.
docinfo
.
append
(
'{
\
\
raggedright
\
n
'
)
self
.
context
.
append
(
' }
\
\
\
\
\
n
'
)
else
:
self
.
context
.
append
(
'
\
\
\
\
\
n
'
)
self
.
context
.
append
(
self
.
docinfo
)
self
.
context
.
append
(
len
(
self
.
body
))
def
depart_docinfo_item
(
self
,
node
):
size
=
self
.
context
.
pop
()
dest
=
self
.
context
.
pop
()
tail
=
self
.
context
.
pop
()
tail
=
self
.
body
[
size
:]
+
[
tail
]
del
self
.
body
[
size
:]
dest
.
extend
(
tail
)
# for address we did set insert_newline
self
.
insert_newline
=
0
def
visit_doctest_block
(
self
,
node
):
self
.
body
.
append
(
'
\
\
begin{verbatim}'
)
self
.
verbatim
=
1
def
depart_doctest_block
(
self
,
node
):
self
.
body
.
append
(
'
\
\
end{verbatim}
\
n
'
)
self
.
verbatim
=
0
def
visit_document
(
self
,
node
):
self
.
body_prefix
.
append
(
'
\
\
begin{document}
\
n
'
)
self
.
body_prefix
.
append
(
'
\
\
maketitle
\
n
\
n
'
)
# alternative use titlepage environment.
# \begin{titlepage}
def
depart_document
(
self
,
node
):
self
.
body_suffix
.
append
(
'
\
\
end{document}
\
n
'
)
def
visit_emphasis
(
self
,
node
):
self
.
body
.
append
(
'
\
\
emph{'
)
def
depart_emphasis
(
self
,
node
):
self
.
body
.
append
(
'}'
)
def
visit_entry
(
self
,
node
):
# cell separation
column_one
=
1
if
self
.
context
[
-
1
]
>
0
:
column_one
=
0
if
not
column_one
:
self
.
body
.
append
(
' & '
)
# multi{row,column}
if
node
.
has_key
(
'morerows'
)
and
node
.
has_key
(
'morecols'
):
raise
NotImplementedError
(
'LaTeX can
\
'
t handle cells that'
'span multiple rows *and* columns, sorry.'
)
atts
=
{}
if
node
.
has_key
(
'morerows'
):
count
=
node
[
'morerows'
]
+
1
self
.
body
.
append
(
'
\
\
multirow{%d}*{'
%
count
)
self
.
context
.
append
(
'}'
)
elif
node
.
has_key
(
'morecols'
):
# the vertical bar before column is missing if it is the first column.
# the one after always.
if
column_one
:
bar
=
'|'
else
:
bar
=
''
count
=
node
[
'morecols'
]
+
1
self
.
body
.
append
(
'
\
\
multicolumn{%d}{%sl|}{'
%
(
count
,
bar
))
self
.
context
.
append
(
'}'
)
else
:
self
.
context
.
append
(
''
)
# header / not header
if
isinstance
(
node
.
parent
.
parent
,
nodes
.
thead
):
self
.
body
.
append
(
'
\
\
textbf{'
)
self
.
context
.
append
(
'}'
)
else
:
self
.
context
.
append
(
''
)
def
depart_entry
(
self
,
node
):
self
.
body
.
append
(
self
.
context
.
pop
())
# header / not header
self
.
body
.
append
(
self
.
context
.
pop
())
# multirow/column
self
.
context
[
-
1
]
+=
1
def
visit_enumerated_list
(
self
,
node
):
# We create our own enumeration list environment.
# This allows to set the style and starting value
# and unlimited nesting.
self
.
_enum_cnt
+=
1
enum_style
=
{
'arabic'
:
'arabic'
,
'loweralpha'
:
'alph'
,
'upperalpha'
:
'Alph'
,
'lowerroman'
:
'roman'
,
'upperroman'
:
'Roman'
}
enum_suffix
=
""
if
node
.
has_key
(
'suffix'
):
enum_suffix
=
node
[
'suffix'
]
enum_prefix
=
""
if
node
.
has_key
(
'prefix'
):
enum_prefix
=
node
[
'prefix'
]
enum_type
=
"arabic"
if
node
.
has_key
(
'enumtype'
):
enum_type
=
node
[
'enumtype'
]
if
enum_style
.
has_key
(
enum_type
):
enum_type
=
enum_style
[
enum_type
]
counter_name
=
"listcnt%d"
%
self
.
_enum_cnt
;
self
.
body
.
append
(
'
\
\
newcounter{%s}
\
n
'
%
counter_name
)
self
.
body
.
append
(
'
\
\
begin{list}{%s
\
\
%s{%s}%s}
\
n
'
%
\
(
enum_prefix
,
enum_type
,
counter_name
,
enum_suffix
))
self
.
body
.
append
(
'{
\
n
'
)
self
.
body
.
append
(
'
\
\
usecounter{%s}
\
n
'
%
counter_name
)
# set start after usecounter, because it initializes to zero.
if
node
.
has_key
(
'start'
):
self
.
body
.
append
(
'
\
\
addtocounter{%s}{%d}
\
n
'
\
%
(
counter_name
,
node
[
'start'
]
-
1
))
## set rightmargin equal to leftmargin
self
.
body
.
append
(
'
\
\
setlength{
\
\
rightmargin}{
\
\
leftmargin}
\
n
'
)
self
.
body
.
append
(
'}
\
n
'
)
def
depart_enumerated_list
(
self
,
node
):
self
.
body
.
append
(
'
\
\
end{list}
\
n
'
)
def
visit_error
(
self
,
node
):
self
.
visit_admonition
(
node
,
'error'
)
def
depart_error
(
self
,
node
):
self
.
depart_admonition
()
def
visit_field
(
self
,
node
):
# real output is done in siblings: _argument, _body, _name
pass
def
depart_field
(
self
,
node
):
self
.
body
.
append
(
'
\
n
'
)
##self.body.append('%[depart_field]\n')
def
visit_field_argument
(
self
,
node
):
self
.
body
.
append
(
'%[visit_field_argument]
\
n
'
)
def
depart_field_argument
(
self
,
node
):
self
.
body
.
append
(
'%[depart_field_argument]
\
n
'
)
def
visit_field_body
(
self
,
node
):
# BUG by attach as text we loose references.
if
self
.
docinfo
:
self
.
docinfo
.
append
(
'%s
\
\
\
\
\
n
'
%
node
.
astext
())
raise
nodes
.
SkipNode
# BUG: what happens if not docinfo
def
depart_field_body
(
self
,
node
):
self
.
body
.
append
(
'
\
n
'
)
def
visit_field_list
(
self
,
node
):
if
not
self
.
docinfo
:
self
.
body
.
append
(
'
\
\
begin{quote}
\
n
'
)
self
.
body
.
append
(
'
\
\
begin{description}
\
n
'
)
def
depart_field_list
(
self
,
node
):
if
not
self
.
docinfo
:
self
.
body
.
append
(
'
\
\
end{description}
\
n
'
)
self
.
body
.
append
(
'
\
\
end{quote}
\
n
'
)
def
visit_field_name
(
self
,
node
):
# BUG this duplicates docinfo_item
if
self
.
docinfo
:
self
.
docinfo
.
append
(
'
\
\
textbf{%s}: &
\
n
\
t
'
%
node
.
astext
())
raise
nodes
.
SkipNode
else
:
self
.
body
.
append
(
'
\
\
item ['
)
def
depart_field_name
(
self
,
node
):
if
not
self
.
docinfo
:
self
.
body
.
append
(
':]'
)
def
visit_figure
(
self
,
node
):
self
.
body
.
append
(
'
\
\
begin{figure}
\
n
'
)
def
depart_figure
(
self
,
node
):
self
.
body
.
append
(
'
\
\
end{figure}
\
n
'
)
def
visit_footer
(
self
,
node
):
self
.
context
.
append
(
len
(
self
.
body
))
def
depart_footer
(
self
,
node
):
start
=
self
.
context
.
pop
()
footer
=
([
'
\
n
\
\
begin{center}
\
sm
a
ll
\
n
'
]
+
self
.
body
[
start
:]
+
[
'
\
n
\
\
end{center}
\
n
'
])
self
.
body_suffix
[:
0
]
=
footer
del
self
.
body
[
start
:]
def
visit_footnote
(
self
,
node
):
notename
=
node
[
'id'
]
self
.
body
.
append
(
'
\
\
begin{figure}[b]'
)
self
.
body
.
append
(
'
\
\
hypertarget{%s}'
%
notename
)
def
depart_footnote
(
self
,
node
):
self
.
body
.
append
(
'
\
\
end{figure}
\
n
'
)
def
visit_footnote_reference
(
self
,
node
):
href
=
''
if
node
.
has_key
(
'refid'
):
href
=
node
[
'refid'
]
elif
node
.
has_key
(
'refname'
):
href
=
self
.
document
.
nameids
[
node
[
'refname'
]]
format
=
self
.
settings
.
footnote_references
if
format
==
'brackets'
:
suffix
=
'['
self
.
context
.
append
(
']'
)
elif
format
==
'superscript'
:
suffix
=
'
\
\
raisebox{.5em}[0em]{
\
\
scriptsize'
self
.
context
.
append
(
'}'
)
else
:
# shouldn't happen
raise
AssertionError
(
'Illegal footnote reference format.'
)
self
.
body
.
append
(
'%s
\
\
hyperlink{%s}{'
%
(
suffix
,
href
))
def
depart_footnote_reference
(
self
,
node
):
self
.
body
.
append
(
'}%s'
%
self
.
context
.
pop
())
def
visit_generated
(
self
,
node
):
pass
def
depart_generated
(
self
,
node
):
pass
def
visit_header
(
self
,
node
):
self
.
context
.
append
(
len
(
self
.
body
))
def
depart_header
(
self
,
node
):
start
=
self
.
context
.
pop
()
self
.
body_prefix
.
append
(
'
\
n
\
\
verb|begin_header|
\
n
'
)
self
.
body_prefix
.
extend
(
self
.
body
[
start
:])
self
.
body_prefix
.
append
(
'
\
n
\
\
verb|end_header|
\
n
'
)
del
self
.
body
[
start
:]
def
visit_hint
(
self
,
node
):
self
.
visit_admonition
(
node
,
'hint'
)
def
depart_hint
(
self
,
node
):
self
.
depart_admonition
()
def
visit_image
(
self
,
node
):
atts
=
node
.
attributes
.
copy
()
href
=
atts
[
'uri'
]
##self.body.append('\\begin{center}\n')
self
.
body
.
append
(
'
\
n
\
\
includegraphics{%s}
\
n
'
%
href
)
##self.body.append('\\end{center}\n')
def
depart_image
(
self
,
node
):
pass
def
visit_important
(
self
,
node
):
self
.
visit_admonition
(
node
,
'important'
)
def
depart_important
(
self
,
node
):
self
.
depart_admonition
()
def
visit_interpreted
(
self
,
node
):
# @@@ Incomplete, pending a proper implementation on the
# Parser/Reader end.
self
.
visit_literal
(
node
)
def
depart_interpreted
(
self
,
node
):
self
.
depart_literal
(
node
)
def
visit_label
(
self
,
node
):
# footnote/citation label
self
.
body
.
append
(
'['
)
def
depart_label
(
self
,
node
):
self
.
body
.
append
(
']'
)
def
visit_legend
(
self
,
node
):
self
.
body
.
append
(
'{
\
\
small '
)
def
depart_legend
(
self
,
node
):
self
.
body
.
append
(
'}'
)
def
visit_line_block
(
self
,
node
):
"""line-block:
* whitespace (including linebreaks) is significant
* inline markup is supported.
* serif typeface
"""
self
.
body
.
append
(
'
\
\
begin{flushleft}
\
n
'
)
self
.
insert_none_breaking_blanks
=
1
self
.
line_block_without_mbox
=
1
if
self
.
line_block_without_mbox
:
self
.
insert_newline
=
1
else
:
self
.
mbox_newline
=
1
self
.
body
.
append
(
'
\
\
mbox{'
)
def
depart_line_block
(
self
,
node
):
if
self
.
line_block_without_mbox
:
self
.
insert_newline
=
0
else
:
self
.
body
.
append
(
'}'
)
self
.
mbox_newline
=
0
self
.
insert_none_breaking_blanks
=
0
self
.
body
.
append
(
'
\
n
\
\
end{flushleft}
\
n
'
)
def
visit_list_item
(
self
,
node
):
self
.
body
.
append
(
'
\
\
item '
)
def
depart_list_item
(
self
,
node
):
self
.
body
.
append
(
'
\
n
'
)
def
visit_literal
(
self
,
node
):
self
.
literal
=
1
self
.
body
.
append
(
'
\
\
texttt{'
)
def
depart_literal
(
self
,
node
):
self
.
body
.
append
(
'}'
)
self
.
literal
=
0
def
visit_literal_block
(
self
,
node
):
"""
.. parsed-literal::
"""
# typically in a typewriter/monospaced typeface.
# care must be taken with the text, because inline markup is recognized.
#
# possibilities:
# * verbatim: is no possibility, as inline markup does not work.
# * obey..: is from julien and never worked for me (grubert).
self
.
use_for_literal_block
=
"mbox"
self
.
literal_block
=
1
if
(
self
.
use_for_literal_block
==
"mbox"
):
self
.
mbox_newline
=
1
self
.
insert_none_breaking_blanks
=
1
self
.
body
.
append
(
'
\
\
begin{ttfamily}
\
\
begin{flushleft}
\
n
\
\
mbox{'
)
else
:
self
.
body
.
append
(
'{
\
\
obeylines
\
\
obeyspaces
\
\
ttfamily
\
n
'
)
def
depart_literal_block
(
self
,
node
):
if
(
self
.
use_for_literal_block
==
"mbox"
):
self
.
body
.
append
(
'}
\
n
\
\
end{flushleft}
\
\
end{ttfamily}
\
n
'
)
self
.
insert_none_breaking_blanks
=
0
self
.
mbox_newline
=
0
else
:
self
.
body
.
append
(
'}
\
n
'
)
self
.
literal_block
=
0
def
visit_meta
(
self
,
node
):
self
.
body
.
append
(
'[visit_meta]
\
n
'
)
# BUG maybe set keywords for pdf
##self.head.append(self.starttag(node, 'meta', **node.attributes))
def
depart_meta
(
self
,
node
):
self
.
body
.
append
(
'[depart_meta]
\
n
'
)
def
visit_note
(
self
,
node
):
self
.
visit_admonition
(
node
,
'note'
)
def
depart_note
(
self
,
node
):
self
.
depart_admonition
()
def
visit_option
(
self
,
node
):
if
self
.
context
[
-
1
]:
# this is not the first option
self
.
body
.
append
(
', '
)
def
depart_option
(
self
,
node
):
# flag tha the first option is done.
self
.
context
[
-
1
]
+=
1
def
visit_option_argument
(
self
,
node
):
"""The delimiter betweeen an option and its argument."""
self
.
body
.
append
(
node
.
get
(
'delimiter'
,
' '
))
def
depart_option_argument
(
self
,
node
):
pass
def
visit_option_group
(
self
,
node
):
if
self
.
use_optionlist_for_option_list
:
self
.
body
.
append
(
'
\
\
item ['
)
else
:
atts
=
{}
if
len
(
node
.
astext
())
>
14
:
self
.
body
.
append
(
'
\
\
multicolumn{2}{l}{'
)
self
.
context
.
append
(
'}
\
\
\
\
\
n
'
)
else
:
self
.
context
.
append
(
''
)
self
.
body
.
append
(
'
\
\
texttt{'
)
# flag for first option
self
.
context
.
append
(
0
)
def
depart_option_group
(
self
,
node
):
self
.
context
.
pop
()
# the flag
if
self
.
use_optionlist_for_option_list
:
self
.
body
.
append
(
'] '
)
else
:
self
.
body
.
append
(
'}'
)
self
.
body
.
append
(
self
.
context
.
pop
())
def
visit_option_list
(
self
,
node
):
self
.
body
.
append
(
'% [option list]
\
n
'
)
if
self
.
use_optionlist_for_option_list
:
self
.
body
.
append
(
'
\
\
begin{optionlist}{3cm}
\
n
'
)
else
:
self
.
body
.
append
(
'
\
\
begin{center}
\
n
'
)
# BUG: use admwidth or make it relative to textwidth ?
self
.
body
.
append
(
'
\
\
begin{tabularx}{.9
\
\
linewidth}{lX}
\
n
'
)
def
depart_option_list
(
self
,
node
):
if
self
.
use_optionlist_for_option_list
:
self
.
body
.
append
(
'
\
\
end{optionlist}
\
n
'
)
else
:
self
.
body
.
append
(
'
\
\
end{tabularx}
\
n
'
)
self
.
body
.
append
(
'
\
\
end{center}
\
n
'
)
def
visit_option_list_item
(
self
,
node
):
pass
def
depart_option_list_item
(
self
,
node
):
if
not
self
.
use_optionlist_for_option_list
:
self
.
body
.
append
(
'
\
\
\
\
\
n
'
)
def
visit_option_string
(
self
,
node
):
##self.body.append(self.starttag(node, 'span', '', CLASS='option'))
pass
def
depart_option_string
(
self
,
node
):
##self.body.append('</span>')
pass
def
visit_organization
(
self
,
node
):
self
.
visit_docinfo_item
(
node
,
'organization'
)
def
depart_organization
(
self
,
node
):
self
.
depart_docinfo_item
(
node
)
def
visit_paragraph
(
self
,
node
):
if
not
self
.
topic_class
==
'contents'
:
self
.
body
.
append
(
'
\
n
'
)
def
depart_paragraph
(
self
,
node
):
if
self
.
topic_class
==
'contents'
:
self
.
body
.
append
(
'
\
n
'
)
else
:
self
.
body
.
append
(
'
\
n
'
)
def
visit_problematic
(
self
,
node
):
self
.
body
.
append
(
'{
\
\
color{red}
\
\
bfseries{}'
)
def
depart_problematic
(
self
,
node
):
self
.
body
.
append
(
'}'
)
def
visit_raw
(
self
,
node
):
if
node
.
has_key
(
'format'
)
and
node
[
'format'
].
lower
()
==
'latex'
:
self
.
body
.
append
(
node
.
astext
())
raise
nodes
.
SkipNode
def
visit_reference
(
self
,
node
):
# for pdflatex hyperrefs might be supported
if
node
.
has_key
(
'refuri'
):
href
=
node
[
'refuri'
]
elif
node
.
has_key
(
'refid'
):
href
=
'#'
+
node
[
'refid'
]
elif
node
.
has_key
(
'refname'
):
href
=
'#'
+
self
.
document
.
nameids
[
node
[
'refname'
]]
##self.body.append('[visit_reference]')
self
.
body
.
append
(
'
\
\
href{%s}{'
%
href
)
def
depart_reference
(
self
,
node
):
self
.
body
.
append
(
'}'
)
##self.body.append('[depart_reference]')
def
visit_revision
(
self
,
node
):
self
.
visit_docinfo_item
(
node
,
'revision'
)
def
depart_revision
(
self
,
node
):
self
.
depart_docinfo_item
(
node
)
def
visit_row
(
self
,
node
):
self
.
context
.
append
(
0
)
def
depart_row
(
self
,
node
):
self
.
context
.
pop
()
# remove cell counter
self
.
body
.
append
(
'
\
\
\
\
\
\
hline
\
n
'
)
def
visit_section
(
self
,
node
):
self
.
section_level
+=
1
def
depart_section
(
self
,
node
):
self
.
section_level
-=
1
def
visit_sidebar
(
self
,
node
):
# BUG: this is just a hack to make sidebars render something
self
.
body
.
append
(
'
\
\
begin{center}
\
\
begin{sffamily}
\
n
'
)
self
.
body
.
append
(
'
\
\
fbox{
\
\
colorbox[gray]{0.80}{
\
\
parbox{
\
\
admonitionwidth}{
\
n
'
)
def
depart_sidebar
(
self
,
node
):
self
.
body
.
append
(
'}}}
\
n
'
)
# end parbox colorbox fbox
self
.
body
.
append
(
'
\
\
end{sffamily}
\
n
\
\
end{center}
\
n
'
);
attribution_formats
=
{
'dash'
:
(
'---'
,
''
),
'parentheses'
:
(
'('
,
')'
),
'parens'
:
(
'('
,
')'
),
'none'
:
(
''
,
''
)}
def
visit_attribution
(
self
,
node
):
prefix
,
suffix
=
self
.
attribution_formats
[
self
.
settings
.
attribution
]
self
.
body
.
append
(
'
\
n
\
\
begin{flushright}
\
n
'
)
self
.
body
.
append
(
prefix
)
self
.
context
.
append
(
suffix
)
def
depart_attribution
(
self
,
node
):
self
.
body
.
append
(
self
.
context
.
pop
()
+
'
\
n
'
)
self
.
body
.
append
(
'
\
\
end{flushright}
\
n
'
)
def
visit_status
(
self
,
node
):
self
.
visit_docinfo_item
(
node
,
'status'
)
def
depart_status
(
self
,
node
):
self
.
depart_docinfo_item
(
node
)
def
visit_strong
(
self
,
node
):
self
.
body
.
append
(
'
\
\
textbf{'
)
def
depart_strong
(
self
,
node
):
self
.
body
.
append
(
'}'
)
def
visit_substitution_definition
(
self
,
node
):
raise
nodes
.
SkipNode
def
visit_substitution_reference
(
self
,
node
):
self
.
unimplemented_visit
(
node
)
def
visit_subtitle
(
self
,
node
):
if
isinstance
(
node
.
parent
,
nodes
.
sidebar
):
self
.
body
.
append
(
'~
\
\
\
\
\
n
\
\
textbf{'
)
self
.
context
.
append
(
'}
\
n
\
\
smallskip
\
n
'
)
else
:
self
.
title
=
self
.
title
+
\
'
\
\
\
\
\
n
\
\
large{%s}
\
n
'
%
self
.
encode
(
node
.
astext
())
raise
nodes
.
SkipNode
def
depart_subtitle
(
self
,
node
):
if
isinstance
(
node
.
parent
,
nodes
.
sidebar
):
self
.
body
.
append
(
self
.
context
.
pop
())
def
visit_system_message
(
self
,
node
):
if
node
[
'level'
]
<
self
.
document
.
reporter
[
'writer'
].
report_level
:
raise
nodes
.
SkipNode
def
depart_system_message
(
self
,
node
):
self
.
body
.
append
(
'
\
n
'
)
def
get_colspecs
(
self
):
"""
Return column specification for longtable.
Assumes reST line length being 80 characters.
"""
width
=
80
total_width
=
0.0
# first see if we get too wide.
for
node
in
self
.
colspecs
:
colwidth
=
float
(
node
[
'colwidth'
])
/
width
total_width
+=
colwidth
# donot make it full linewidth
factor
=
0.93
if
total_width
>
1.0
:
factor
/=
total_width
latex_table_spec
=
""
for
node
in
self
.
colspecs
:
colwidth
=
factor
*
float
(
node
[
'colwidth'
])
/
width
latex_table_spec
+=
"|p{%.2f
\
\
linewidth}"
%
colwidth
self
.
colspecs
=
[]
return
latex_table_spec
+
"|"
def
visit_table
(
self
,
node
):
if
self
.
use_longtable
:
self
.
body
.
append
(
'
\
n
\
\
begin{longtable}[c]'
)
else
:
self
.
body
.
append
(
'
\
n
\
\
begin{tabularx}{
\
\
linewidth}'
)
self
.
context
.
append
(
'table_sentinel'
)
# sentinel
self
.
context
.
append
(
0
)
# column counter
def
depart_table
(
self
,
node
):
if
self
.
use_longtable
:
self
.
body
.
append
(
'
\
\
end{longtable}
\
n
'
)
else
:
self
.
body
.
append
(
'
\
\
end{tabularx}
\
n
'
)
sentinel
=
self
.
context
.
pop
()
if
sentinel
!=
'table_sentinel'
:
print
'context:'
,
self
.
context
+
[
sentinel
]
raise
AssertionError
def
table_preamble
(
self
):
if
self
.
use_longtable
:
self
.
body
.
append
(
'{%s}
\
n
'
%
self
.
get_colspecs
())
else
:
if
self
.
context
[
-
1
]
!=
'table_sentinel'
:
self
.
body
.
append
(
'{%s}'
%
(
'|X'
*
self
.
context
.
pop
()
+
'|'
))
self
.
body
.
append
(
'
\
n
\
\
hline'
)
def
visit_target
(
self
,
node
):
if
not
(
node
.
has_key
(
'refuri'
)
or
node
.
has_key
(
'refid'
)
or
node
.
has_key
(
'refname'
)):
self
.
body
.
append
(
'
\
\
hypertarget{%s}{'
%
node
[
'name'
])
self
.
context
.
append
(
'}'
)
else
:
self
.
context
.
append
(
''
)
def
depart_target
(
self
,
node
):
self
.
body
.
append
(
self
.
context
.
pop
())
def
visit_tbody
(
self
,
node
):
# BUG write preamble if not yet done (colspecs not [])
# for tables without heads.
if
self
.
colspecs
:
self
.
visit_thead
(
None
)
self
.
depart_thead
(
None
)
self
.
body
.
append
(
'%[visit_tbody]
\
n
'
)
def
depart_tbody
(
self
,
node
):
self
.
body
.
append
(
'%[depart_tbody]
\
n
'
)
def
visit_term
(
self
,
node
):
self
.
body
.
append
(
'
\
\
item['
)
def
depart_term
(
self
,
node
):
# definition list term.
self
.
body
.
append
(
':]
\
n
'
)
def
visit_tgroup
(
self
,
node
):
#self.body.append(self.starttag(node, 'colgroup'))
#self.context.append('</colgroup>\n')
pass
def
depart_tgroup
(
self
,
node
):
pass
def
visit_thead
(
self
,
node
):
# number_of_columns will be zero after get_colspecs.
# BUG ! push onto context for depart to pop it.
number_of_columns
=
len
(
self
.
colspecs
)
self
.
table_preamble
()
#BUG longtable needs firstpage and lastfooter too.
self
.
body
.
append
(
'
\
\
hline
\
n
'
)
def
depart_thead
(
self
,
node
):
if
self
.
use_longtable
:
# the table header written should be on every page
# => \endhead
self
.
body
.
append
(
'
\
\
endhead
\
n
'
)
# and the firsthead => \endfirsthead
# BUG i want a "continued from previous page" on every not
# firsthead, but then we need the header twice.
#
# there is a \endfoot and \endlastfoot too.
# but we need the number of columns to
# self.body.append('\\multicolumn{%d}{c}{"..."}\n' % number_of_columns)
# self.body.append('\\hline\n\\endfoot\n')
# self.body.append('\\hline\n')
# self.body.append('\\endlastfoot\n')
def
visit_tip
(
self
,
node
):
self
.
visit_admonition
(
node
,
'tip'
)
def
depart_tip
(
self
,
node
):
self
.
depart_admonition
()
def
visit_title
(
self
,
node
):
"""Only 3 section levels are supported by LaTeX article (AFAIR)."""
if
isinstance
(
node
.
parent
,
nodes
.
topic
):
# section titles before the table of contents.
if
node
.
parent
.
hasattr
(
'id'
):
self
.
body
.
append
(
'
\
\
hypertarget{%s}{}'
%
node
.
parent
[
'id'
])
# BUG: latex chokes on center environment with "perhaps a missing item".
# so we use hfill.
self
.
body
.
append
(
'
\
\
subsection*{~
\
\
hfill '
)
# the closing brace for subsection.
self
.
context
.
append
(
'
\
\
hfill ~}
\
n
'
)
elif
isinstance
(
node
.
parent
,
nodes
.
sidebar
):
self
.
body
.
append
(
'
\
\
textbf{
\
\
large '
)
self
.
context
.
append
(
'}
\
n
\
\
smallskip
\
n
'
)
elif
self
.
section_level
==
0
:
# document title
self
.
title
=
self
.
encode
(
node
.
astext
())
if
not
self
.
pdfinfo
==
None
:
self
.
pdfinfo
.
append
(
'pdftitle={%s}'
%
self
.
encode
(
node
.
astext
())
)
raise
nodes
.
SkipNode
else
:
self
.
body
.
append
(
'
\
n
\
n
'
)
self
.
body
.
append
(
'%'
+
'_'
*
75
)
self
.
body
.
append
(
'
\
n
\
n
'
)
if
node
.
parent
.
hasattr
(
'id'
):
self
.
body
.
append
(
'
\
\
hypertarget{%s}{}
\
n
'
%
node
.
parent
[
'id'
])
# section_level 0 is title and handled above.
# BUG: latex has no deeper sections (actually paragrah is no section either).
if
self
.
use_latex_toc
:
section_star
=
""
else
:
section_star
=
"*"
if
(
self
.
section_level
<=
3
):
# 1,2,3
self
.
body
.
append
(
'
\
\
%ssection%s{'
%
(
'sub'
*
(
self
.
section_level
-
1
),
section_star
))
elif
(
self
.
section_level
==
4
):
#self.body.append('\\paragraph*{')
self
.
body
.
append
(
'
\
\
subsubsection%s{'
%
(
section_star
))
else
:
#self.body.append('\\subparagraph*{')
self
.
body
.
append
(
'
\
\
subsubsection%s{'
%
(
section_star
))
# BUG: self.body.append( '\\label{%s}\n' % name)
self
.
context
.
append
(
'}
\
n
'
)
def
depart_title
(
self
,
node
):
self
.
body
.
append
(
self
.
context
.
pop
())
if
isinstance
(
node
.
parent
,
nodes
.
sidebar
):
return
# BUG level depends on style.
elif
node
.
parent
.
hasattr
(
'id'
)
and
not
self
.
use_latex_toc
:
# pdflatex allows level 0 to 3
# ToC would be the only on level 0 so i choose to decrement the rest.
# "Table of contents" bookmark to see the ToC. To avoid this
# we set all zeroes to one.
l
=
self
.
section_level
if
l
>
0
:
l
=
l
-
1
self
.
body
.
append
(
'
\
\
pdfbookmark[%d]{%s}{%s}
\
n
'
%
\
(
l
,
node
.
astext
(),
node
.
parent
[
'id'
]))
def
visit_topic
(
self
,
node
):
self
.
topic_class
=
node
.
get
(
'class'
)
if
self
.
use_latex_toc
:
self
.
topic_class
=
''
raise
nodes
.
SkipNode
def
depart_topic
(
self
,
node
):
self
.
topic_class
=
''
self
.
body
.
append
(
'
\
n
'
)
def
visit_rubric
(
self
,
node
):
# self.body.append('\\hfill {\\color{red}\\bfseries{}')
# self.context.append('} \\hfill ~\n')
self
.
body
.
append
(
'
\
\
rubric{'
)
self
.
context
.
append
(
'}
\
n
'
)
def
depart_rubric
(
self
,
node
):
self
.
body
.
append
(
self
.
context
.
pop
())
def
visit_transition
(
self
,
node
):
self
.
body
.
append
(
'
\
n
\
n
'
)
self
.
body
.
append
(
'%'
+
'_'
*
75
)
self
.
body
.
append
(
'
\
n
\
\
hspace*{
\
\
fill}
\
\
hrulefill
\
\
hspace*{
\
\
fill}'
)
self
.
body
.
append
(
'
\
n
\
n
'
)
def
depart_transition
(
self
,
node
):
#self.body.append('[depart_transition]')
pass
def
visit_version
(
self
,
node
):
self
.
visit_docinfo_item
(
node
,
'version'
)
def
depart_version
(
self
,
node
):
self
.
depart_docinfo_item
(
node
)
def
visit_warning
(
self
,
node
):
self
.
visit_admonition
(
node
,
'warning'
)
def
depart_warning
(
self
,
node
):
self
.
depart_admonition
()
def
unimplemented_visit
(
self
,
node
):
raise
NotImplementedError
(
'visiting unimplemented node type: %s'
%
node
.
__class__
.
__name__
)
# def unknown_visit(self, node):
# def default_visit(self, node):
# vim: set ts=4 et ai :
lib/python/docutils/writers/pep_html.py
deleted
100644 → 0
View file @
e1142d2d
# Author: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:50:05 $
# Copyright: This module has been placed in the public domain.
"""
PEP HTML Writer.
"""
__docformat__
=
'reStructuredText'
import
sys
import
docutils
from
docutils
import
nodes
,
frontend
,
utils
from
docutils.writers
import
html4css1
class
Writer
(
html4css1
.
Writer
):
settings_spec
=
html4css1
.
Writer
.
settings_spec
+
(
'PEP/HTML-Specific Options'
,
'The HTML --footnote-references option is set to "brackets" by '
'default.'
,
((
'Specify a PEP stylesheet URL, used verbatim. Default is '
'--stylesheet
\
'
s value. If given, --pep-stylesheet overrides '
'--stylesheet.'
,
[
'--pep-stylesheet'
],
{
'metavar'
:
'<URL>'
}),
(
'Specify a PEP stylesheet file, relative to the current working '
'directory. The path is adjusted relative to the output HTML '
'file. Overrides --pep-stylesheet and --stylesheet-path.'
,
[
'--pep-stylesheet-path'
],
{
'metavar'
:
'<path>'
}),
(
'Specify a template file. Default is "pep-html-template".'
,
[
'--pep-template'
],
{
'default'
:
'pep-html-template'
,
'metavar'
:
'<file>'
}),
(
'Python
\
'
s home URL. Default is ".." (parent directory).'
,
[
'--python-home'
],
{
'default'
:
'..'
,
'metavar'
:
'<URL>'
}),
(
'Home URL prefix for PEPs. Default is "." (current directory).'
,
[
'--pep-home'
],
{
'default'
:
'.'
,
'metavar'
:
'<URL>'
}),
# Workaround for SourceForge's broken Python
# (``import random`` causes a segfault).
(
frontend
.
SUPPRESS_HELP
,
[
'--no-random'
],
{
'action'
:
'store_true'
}),))
settings_default_overrides
=
{
'footnote_references'
:
'brackets'
}
relative_path_settings
=
(
'pep_stylesheet_path'
,
'pep_template'
)
def
__init__
(
self
):
html4css1
.
Writer
.
__init__
(
self
)
self
.
translator_class
=
HTMLTranslator
def
translate
(
self
):
html4css1
.
Writer
.
translate
(
self
)
settings
=
self
.
document
.
settings
template
=
open
(
settings
.
pep_template
).
read
()
# Substitutions dict for template:
subs
=
{}
subs
[
'encoding'
]
=
settings
.
output_encoding
subs
[
'version'
]
=
docutils
.
__version__
subs
[
'stylesheet'
]
=
''
.
join
(
self
.
stylesheet
)
pyhome
=
settings
.
python_home
subs
[
'pyhome'
]
=
pyhome
subs
[
'pephome'
]
=
settings
.
pep_home
if
pyhome
==
'..'
:
subs
[
'pepindex'
]
=
'.'
else
:
subs
[
'pepindex'
]
=
pyhome
+
'/peps/'
index
=
self
.
document
.
first_child_matching_class
(
nodes
.
field_list
)
header
=
self
.
document
[
index
]
pepnum
=
header
[
0
][
1
].
astext
()
subs
[
'pep'
]
=
pepnum
if
settings
.
no_random
:
subs
[
'banner'
]
=
0
else
:
import
random
subs
[
'banner'
]
=
random
.
randrange
(
64
)
try
:
subs
[
'pepnum'
]
=
'%04i'
%
int
(
pepnum
)
except
:
subs
[
'pepnum'
]
=
pepnum
subs
[
'title'
]
=
header
[
1
][
1
].
astext
()
subs
[
'body'
]
=
''
.
join
(
self
.
body_pre_docinfo
+
self
.
docinfo
+
self
.
body
)
subs
[
'body_suffix'
]
=
''
.
join
(
self
.
body_suffix
)
self
.
output
=
template
%
subs
class
HTMLTranslator
(
html4css1
.
HTMLTranslator
):
def
get_stylesheet_reference
(
self
,
relative_to
=
None
):
settings
=
self
.
settings
if
relative_to
==
None
:
relative_to
=
settings
.
_destination
if
settings
.
pep_stylesheet_path
:
return
utils
.
relative_path
(
relative_to
,
settings
.
pep_stylesheet_path
)
elif
settings
.
pep_stylesheet
:
return
settings
.
pep_stylesheet
elif
settings
.
_stylesheet_path
:
return
utils
.
relative_path
(
relative_to
,
settings
.
stylesheet_path
)
else
:
return
settings
.
stylesheet
def
depart_field_list
(
self
,
node
):
html4css1
.
HTMLTranslator
.
depart_field_list
(
self
,
node
)
if
node
.
get
(
'class'
)
==
'rfc2822'
:
self
.
body
.
append
(
'<hr />
\
n
'
)
lib/python/docutils/writers/pseudoxml.py
deleted
100644 → 0
View file @
e1142d2d
# Authors: David Goodger
# Contact: goodger@users.sourceforge.net
# Revision: $Revision: 1.3 $
# Date: $Date: 2003/07/10 15:50:05 $
# Copyright: This module has been placed in the public domain.
"""
Simple internal document tree Writer, writes indented pseudo-XML.
"""
__docformat__
=
'reStructuredText'
from
docutils
import
writers
class
Writer
(
writers
.
Writer
):
supported
=
(
'pprint'
,
'pformat'
,
'pseudoxml'
)
"""Formats this writer supports."""
output
=
None
"""Final translated form of `document`."""
def
translate
(
self
):
self
.
output
=
self
.
document
.
pformat
()
def
supports
(
self
,
format
):
"""This writer supports all format-specific elements."""
return
1
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment