summaryrefslogtreecommitdiff
path: root/Master/texmf-dist/scripts
diff options
context:
space:
mode:
authorKarl Berry <karl@freefriends.org>2016-07-21 21:18:20 +0000
committerKarl Berry <karl@freefriends.org>2016-07-21 21:18:20 +0000
commit24289a6c830263430a9ff06f2dd20d6ec3482079 (patch)
treede97f9224a4eae78a947c52df01eb7d1b63c37b0 /Master/texmf-dist/scripts
parent75ba721d706ce924641fd8f7715a83809b757812 (diff)
pythontex (21jul16)
git-svn-id: svn://tug.org/texlive/trunk@41746 c570f23f-e606-0410-a88d-b1316a301751
Diffstat (limited to 'Master/texmf-dist/scripts')
-rwxr-xr-xMaster/texmf-dist/scripts/pythontex/depythontex.py8
-rwxr-xr-xMaster/texmf-dist/scripts/pythontex/depythontex2.py490
-rwxr-xr-xMaster/texmf-dist/scripts/pythontex/depythontex3.py490
-rwxr-xr-xMaster/texmf-dist/scripts/pythontex/pythontex.py20
-rwxr-xr-xMaster/texmf-dist/scripts/pythontex/pythontex2.py747
-rwxr-xr-xMaster/texmf-dist/scripts/pythontex/pythontex3.py747
-rwxr-xr-xMaster/texmf-dist/scripts/pythontex/pythontex_engines.py706
7 files changed, 1784 insertions, 1424 deletions
diff --git a/Master/texmf-dist/scripts/pythontex/depythontex.py b/Master/texmf-dist/scripts/pythontex/depythontex.py
index 2aae645084f..30256c6d1c3 100755
--- a/Master/texmf-dist/scripts/pythontex/depythontex.py
+++ b/Master/texmf-dist/scripts/pythontex/depythontex.py
@@ -3,13 +3,13 @@
'''
This is the depythontex wrapper script. It automatically detects the version
-of Python, and then imports the correct code from depythontex2.py or
-depythontex3.py. It is intended for use with the default Python installation
-on your system. If you wish to use a different version of Python, you could
+of Python, and then imports the correct code from depythontex2.py or
+depythontex3.py. It is intended for use with the default Python installation
+on your system. If you wish to use a different version of Python, you could
launch depythontex2.py or depythontex3.py directly. The version of Python
does not matter for depythontex, since no code is executed.
-Copyright (c) 2013-2014, Geoffrey M. Poore
+Copyright (c) 2013-2016, Geoffrey M. Poore
All rights reserved.
Licensed under the BSD 3-Clause License:
http://www.opensource.org/licenses/BSD-3-Clause
diff --git a/Master/texmf-dist/scripts/pythontex/depythontex2.py b/Master/texmf-dist/scripts/pythontex/depythontex2.py
index 155df48d510..0975b9b8ca4 100755
--- a/Master/texmf-dist/scripts/pythontex/depythontex2.py
+++ b/Master/texmf-dist/scripts/pythontex/depythontex2.py
@@ -4,50 +4,50 @@
'''
PythonTeX depythontex script.
-This script takes a LaTeX document that uses the PythonTeX package and
-creates a new document that does not depend on PythonTeX. It substitutes all
-externally generated content into a copy of the original LaTeX document.
-This is useful when you need a document that relies on few external packages
-or custom macros (for example, for submission to a journal or conversion to
+This script takes a LaTeX document that uses the PythonTeX package and
+creates a new document that does not depend on PythonTeX. It substitutes all
+externally generated content into a copy of the original LaTeX document.
+This is useful when you need a document that relies on few external packages
+or custom macros (for example, for submission to a journal or conversion to
another document format).
-If you just want to share a document that uses PythonTeX, keep in mind that
-the document can be modified and compiled just like a regular LaTeX document,
-without needing Python or any other external tools, so long as the following
+If you just want to share a document that uses PythonTeX, keep in mind that
+the document can be modified and compiled just like a regular LaTeX document,
+without needing Python or any other external tools, so long as the following
conditions are met:
* A copy of pythontex.sty is included with the document.
* The pythontex-files-<name> directory is included with the document.
* The PythonTeX-specific parts of the document are not modified.
-To work, this script requires that the original LaTeX document be compiled
-with the package option `depythontex`. That creates an auxiliary file with
+To work, this script requires that the original LaTeX document be compiled
+with the package option `depythontex`. That creates an auxiliary file with
the extension .depytx that contains information about all content that needs
to be substituted.
-This script is purposely written in a simple, largely linear form to
-facilitate customization. Most of the key substitutions are performed by a
+This script is purposely written in a simple, largely linear form to
+facilitate customization. Most of the key substitutions are performed by a
few functions defined near the beginning of the script, so if you need custom
-substitutions, you should begin there. By default, all typeset code is
-wrapped in `\verb` commands and verbatim environments, since these have the
-greatest generality. However, the command-line option --listing allows code
-to be typeset with the fancyvrb, listings, minted, or PythonTeX packages
+substitutions, you should begin there. By default, all typeset code is
+wrapped in `\verb` commands and verbatim environments, since these have the
+greatest generality. However, the command-line option --listing allows code
+to be typeset with the fancyvrb, listings, minted, or PythonTeX packages
instead.
-The script automatically extracts all arguments of all commands and
-environments that it replaces, so that these are available if desired for
-customized substitution. Two additional pieces of information are also
-available for any typeset code: the Pygments lexer (often the same as the
+The script automatically extracts all arguments of all commands and
+environments that it replaces, so that these are available if desired for
+customized substitution. Two additional pieces of information are also
+available for any typeset code: the Pygments lexer (often the same as the
language) and the starting line number (if line numbering was used).
Keep in mind that some manual adjustments may be required after a document is
-depythontex'ed. While depythontex attempts to create an exact copy of the
-original document, in many cases an identical copy is impossible. For
-example, typeset code may have a different appearance or layout when it is
+depythontex'ed. While depythontex attempts to create an exact copy of the
+original document, in many cases an identical copy is impossible. For
+example, typeset code may have a different appearance or layout when it is
typeset with a different package.
-Copyright (c) 2013-2014, Geoffrey M. Poore
+Copyright (c) 2013-2016, Geoffrey M. Poore
All rights reserved.
Licensed under the BSD 3-Clause License:
http://www.opensource.org/licenses/BSD-3-Clause
@@ -86,7 +86,7 @@ import codecs
# Script parameters
# Version
-__version__ = '0.14'
+__version__ = '0.15'
# Functions and parameters for customizing the script output
@@ -103,77 +103,77 @@ listing = None #'verbatim', 'fancyvrb', 'listings', 'minted', 'pythontex'
preamble_additions = list()
# Lexer dict
-# If you are using Pygments lexers that don't directly correspond to the
-# languages used by the listings package, you can submit replacements via the
-# command line option --lexer-dict, or edit this dict manually here. When
-# listings is used, all lexers are checked against this dict to see if a
-# substitution should be made. This approach could easily be modified to
+# If you are using Pygments lexers that don't directly correspond to the
+# languages used by the listings package, you can submit replacements via the
+# command line option --lexer-dict, or edit this dict manually here. When
+# listings is used, all lexers are checked against this dict to see if a
+# substitution should be made. This approach could easily be modified to
# work with another, non-Pygments highlighting package.
lexer_dict = dict()
-def replace_code_cmd(name, arglist, linenum, code_replacement,
+def replace_code_cmd(name, arglist, linenum, code_replacement,
code_replacement_mode, after, lexer, firstnumber):
'''
Typeset code from a command with a command.
-
+
It is only ever called if there is indeed code to typeset.
-
- Usually, code from a command is also typeset with a command. This
- function primarily deals with that case. In cases where code from a
+
+ Usually, code from a command is also typeset with a command. This
+ function primarily deals with that case. In cases where code from a
command is typeset with an environment (for example, `\inputpygments`),
- this function performs some preprocessing and then uses
+ this function performs some preprocessing and then uses
replace_code_env() to do the real work. This approach prevents the two
functions from unnecessarily duplicating each other, while still giving
the desired output.
-
+
Args:
name (str): name of the command
- arglist (list, of str/None): all arguments given to the original
- command; the last argument is what is typeset, unless a
+ arglist (list, of str/None): all arguments given to the original
+ command; the last argument is what is typeset, unless a
code_replacement is specified or other instructions are given
linenum (int): line number in the original TeX document
code_replacement (str/None): replacement for the code; usually None
- for commands, because typically the code to be typeset is the
- last argument passed to the command, rather than something
+ for commands, because typically the code to be typeset is the
+ last argument passed to the command, rather than something
captured elsewhere (like the body of an environment) or something
preprocessed (like a console environment's content)
- code_replacement_mode (str/None): mode in which the replacement is
- to be typeset; raw/None (as TeX; generally unused for code),
+ code_replacement_mode (str/None): mode in which the replacement is
+ to be typeset; raw/None (as TeX; generally unused for code),
verb (inline), or verbatim (environment)
- after (str): text immediately following the command; usually
+ after (str): text immediately following the command; usually
shouldn't be needed
lexer (str/None): Pygments lexer
Returns:
(replacement, after) (tuple, of str)
-
+
'''
# Get the correct replacement
if code_replacement is None:
code_replacement = arglist[-1]
-
+
# We only consider two possible modes of typesetting, verbatim and inline
# verbatim
if code_replacement_mode == 'verbatim':
- # Sometimes we must replace a command with an environment, for
+ # Sometimes we must replace a command with an environment, for
# example, for `\inputpygments`
-
- # Make sure the introduction of an environment where a command was
- # previously won't produce errors with following content; make sure
+
+ # Make sure the introduction of an environment where a command was
+ # previously won't produce errors with following content; make sure
# that any following content is on a separate line
if bool(match('[ \t]*\S', after)):
after = '\n' + after
# Rather than duplicating much of replace_code_env(), just use it
- return replace_code_env(name, arglist, linenum, code_replacement,
+ return replace_code_env(name, arglist, linenum, code_replacement,
code_replacement_mode, after, lexer, firstnumber)
else:
# Usually, we're replacing a command with a command
-
+
# Wrap the replacement in appropriate delimiters
- if (listing in ('verbatim', 'fancyvrb', 'minted') or
- (listing in ('listings', 'pythontex') and
+ if (listing in ('verbatim', 'fancyvrb', 'minted') or
+ (listing in ('listings', 'pythontex') and
('{' in code_replacement or '}' in code_replacement))):
- for delim in ('|', '/', '`', '!', '&', '#', '@', ':', '%', '~', '$',
+ for delim in ('|', '/', '`', '!', '&', '#', '@', ':', '%', '~', '$',
'=', '+', '-', '^', '_', '?', ';'):
if delim not in code_replacement:
break
@@ -198,40 +198,40 @@ def replace_code_cmd(name, arglist, linenum, code_replacement,
else:
code_replacement = r'\pygment{' + lexer + '}' + code_replacement
return (code_replacement, after)
-
-def replace_code_env(name, arglist, linenum, code_replacement,
+
+def replace_code_env(name, arglist, linenum, code_replacement,
code_replacement_mode, after, lexer, firstnumber):
'''
Typeset code from an environment with an environment.
-
+
It is only ever called if there is indeed code to typeset.
-
+
Usually it is only used to typeset code from an environment. However,
some commands bring in code that must be typeset as an environment. In
- those cases, replace_code_cmd() is called initially, and after it
+ those cases, replace_code_cmd() is called initially, and after it
performs some preprocessing, this function is called. This approach
avoids unnecessary duplication between the two functions.
-
+
Args:
name (str): name of the environment
- arglist (list, of str/None): all arguments given to the original
+ arglist (list, of str/None): all arguments given to the original
environment
- linenum (int): line number in the original TeX document where
+ linenum (int): line number in the original TeX document where
the environment began
code_replacement (str): replacement for the code; unlike the case of
commands, this is always not None if the function is called
- code_replacement_mode (str/None): mode in which the replacement is
- to be typeset; raw/None (as TeX; generally unused for code),
+ code_replacement_mode (str/None): mode in which the replacement is
+ to be typeset; raw/None (as TeX; generally unused for code),
verb (inline), or verbatim (environment)
- after (str): text immediately following the environment; usually
+ after (str): text immediately following the environment; usually
shouldn't be needed
lexer (str/None): Pygments lexer
firstnumber (str/None): the first number of the listing, if the listing
had numbered lines
Returns:
(replacement, after) (tuple, of str)
-
+
'''
# Currently, there is no need to test for code_replacement_mode, because
# this function is only ever called if the mode is 'verbatim'. That may
@@ -245,7 +245,7 @@ def replace_code_env(name, arglist, linenum, code_replacement,
pre = '\\begin{Verbatim}'
else:
pre = '\\begin{{Verbatim}}[numbers=left,firstnumber={0}]'.format(firstnumber)
- post = '\\end{Verbatim}'
+ post = '\\end{Verbatim}'
elif listing == 'listings':
if lexer is None:
if firstnumber is None:
@@ -300,26 +300,26 @@ def replace_print_cmd(name, arglist, linenum,
after):
'''
Typeset printed content from a command.
-
+
It is only ever called if there is indeed printed content to typeset.
-
+
Args:
name (str): name of the command
- arglist (list, of str/None): all arguments given to the original
+ arglist (list, of str/None): all arguments given to the original
command
linenum (int): line number in the original TeX document
print_replacement (str): printed content, read directly from file
into a single string
- print_replacement_mode (str/None): mode in which the replacement is
- to be typeset; raw/None (as TeX), inlineverb (or v) (as inline),
+ print_replacement_mode (str/None): mode in which the replacement is
+ to be typeset; raw/None (as TeX), inlineverb (or v) (as inline),
or verb (as environment)
source (str/None): source of the replacement content
after (str): text immediately following the command; important in
some situations, because spacing can depend on what's next
Returns:
(replacement, after) (tuple, of str)
-
- '''
+
+ '''
if print_replacement_mode == 'verb':
if print_replacement.count('\n') > 1:
print('* DePythonTeX error:')
@@ -327,7 +327,7 @@ def replace_print_cmd(name, arglist, linenum,
print(' This is not possible in inline verbatim mode')
sys.exit(1)
print_replacement = print_replacement.rstrip('\n')
- for delim in ('|', '/', '`', '!', '&', '#', '@', ':', '%', '~', '$',
+ for delim in ('|', '/', '`', '!', '&', '#', '@', ':', '%', '~', '$',
'=', '+', '-', '^', '_', '?', ';'):
if delim not in print_replacement:
break
@@ -335,71 +335,71 @@ def replace_print_cmd(name, arglist, linenum,
elif print_replacement_mode == 'verbatim':
if bool(match('\s*?\n', after)):
# Usually, we would end the verbatim environment with a newline.
- # This is fine if there is content in `after` before the next
+ # This is fine if there is content in `after` before the next
# newline---in fact, it's desirable, because the verbatim package
- # doesn't allow for content on the same line as the end of the
- # environment. But if `after` is an empty line, then adding a
+ # doesn't allow for content on the same line as the end of the
+ # environment. But if `after` is an empty line, then adding a
# newline will throw off spacing and must be avoided
print_replacement = '\\begin{verbatim}\n' + print_replacement + '\\end{verbatim}'
else:
print_replacement = '\\begin{verbatim}\n' + print_replacement + '\\end{verbatim}\n'
else:
- # When printed content from a file is included as LaTeX code, we have
- # to be particularly careful to ensure that the content produces the
- # same output when substituted as when brought in by `\input`. In
- # particular, `\input` strips newlines from each line of content and
+ # When printed content from a file is included as LaTeX code, we have
+ # to be particularly careful to ensure that the content produces the
+ # same output when substituted as when brought in by `\input`. In
+ # particular, `\input` strips newlines from each line of content and
# adds a space at the end of each line. This space is inside the
- # `\input`, so it will not merge with following spaces. So when we
- # substitute the content, sometimes we need to replace the final
+ # `\input`, so it will not merge with following spaces. So when we
+ # substitute the content, sometimes we need to replace the final
# newline with a space that cannot be gobbled.
#
- # It gets more complicated. This final space is often not
- # desirable. It can be prevented by either printing an `\endinput`
- # command, to terminate the `\input`, or printing a percent
+ # It gets more complicated. This final space is often not
+ # desirable. It can be prevented by either printing an `\endinput`
+ # command, to terminate the `\input`, or printing a percent
# character % in the last line of the content, which comments out the
- # final newline. So we must check for `\endinput` anywhere in
- # printed content, and % in the final line, and remove any content
- # after them. It's also possible that the print is followed by
+ # final newline. So we must check for `\endinput` anywhere in
+ # printed content, and % in the final line, and remove any content
+ # after them. It's also possible that the print is followed by
# an `\unskip` that eats the space, so we need to check for that too.
#
- # It turns out that the same approach is needed when a command like
+ # It turns out that the same approach is needed when a command like
# `\py` brings in content ending in a newline
- if (print_replacement.endswith('\\endinput\n') and
+ if (print_replacement.endswith('\\endinput\n') and
not print_replacement.endswith('\\string\\endinput\n')):
- # If `\endinput` is present, everything from it on should be
+ # If `\endinput` is present, everything from it on should be
# discarded, unless the `\endinput` is not actually a command
- # but rather a typeset name (for example, `\string\endinput` or
- # `\verb|\endinput|`). It's impossible to check for all cases in
- # which `\endinput` is not a command (at least, without actually
- # using LaTeX), and even checking for most of them would require
- # a good bit of parsing. We assume that `\endinput`, as a
- # command, will only ever occur at the immediate end of the
- # printed content. Later, we issue a warning in case it appears
+ # but rather a typeset name (for example, `\string\endinput` or
+ # `\verb|\endinput|`). It's impossible to check for all cases in
+ # which `\endinput` is not a command (at least, without actually
+ # using LaTeX), and even checking for most of them would require
+ # a good bit of parsing. We assume that `\endinput`, as a
+ # command, will only ever occur at the immediate end of the
+ # printed content. Later, we issue a warning in case it appears
# anywhere else.
print_replacement = print_replacement.rsplit(r'\endinput', 1)[0]
- elif (print_replacement.endswith('%\n') and
- not print_replacement.endswith('\\%\n') and
+ elif (print_replacement.endswith('%\n') and
+ not print_replacement.endswith('\\%\n') and
not print_replacement.endswith('\\string%\n')):
# Perform an analogous check for a terminating percent characer %.
- # This case would be a bit easier to parse fully, since a percent
- # that comments out the last newline would have to be in the
- # final line of the replacement. But it would still be
- # very difficult to perform a complete check. Later, we issue a
- # warning if there is reason to think that a percent character
+ # This case would be a bit easier to parse fully, since a percent
+ # that comments out the last newline would have to be in the
+ # final line of the replacement. But it would still be
+ # very difficult to perform a complete check. Later, we issue a
+ # warning if there is reason to think that a percent character
# was active in the last line.
print_replacement = print_replacement.rsplit(r'%', 1)[0]
elif print_replacement.endswith('\n'):
# We can't just use `else` because that would catch content
# from `\py` and similar
- # By default, LaTeX strips newlines and adds a space at the end
- # of each line of content that is brought in by `\input`. This
- # may or may not be desirable, but we replicate the effect here
- # for consistency with the original document. We use `\space{}`
- # because plain `\space` would gobble a following space, which
+ # By default, LaTeX strips newlines and adds a space at the end
+ # of each line of content that is brought in by `\input`. This
+ # may or may not be desirable, but we replicate the effect here
+ # for consistency with the original document. We use `\space{}`
+ # because plain `\space` would gobble a following space, which
# isn't consistent with the `\input` behavior being replicated.
if bool(match(r'\\unskip\s+\S', after)):
- # If there's an `\unskip`, fix the spacing and remove the
- # `\unskip`. Since this is inline, the `\unskip` must
+ # If there's an `\unskip`, fix the spacing and remove the
+ # `\unskip`. Since this is inline, the `\unskip` must
# immediately follow the command to do any good; otherwise,
# it eliminates spaces that precede it, but doesn't get into
# the `\input` content.
@@ -410,9 +410,9 @@ def replace_print_cmd(name, arglist, linenum,
# the `\n`, and it will yield a space.
pass
elif bool(match('\s*$', after)):
- # If the rest of the current line, and the next line, are
+ # If the rest of the current line, and the next line, are
# whitespace, we will get the correct spacing without needing
- # `\space{}`. We could leave `\n`, but it would be
+ # `\space{}`. We could leave `\n`, but it would be
# extraneous whitespace.
print_replacement = print_replacement[:-1]
else:
@@ -431,7 +431,7 @@ def replace_print_cmd(name, arglist, linenum,
after = sub('^\s+', '\n', after)
# Issue warnings, if warranted
# Warn about `\endinput`
- if (r'\endinput' in print_replacement and
+ if (r'\endinput' in print_replacement and
print_replacement.count(r'\endinput') != print_replacement.count(r'\string\endinput')):
print('* DePythonTeX warning:')
print(' "\\endinput" was present in printed content near line ' + str(linenum))
@@ -441,7 +441,7 @@ def replace_print_cmd(name, arglist, linenum,
# Warn if it looks like there are active `%` that could comment
# out part of the original document. We only need to check the
# last line of printed content, because only there could
- # percent characters escape from their original confines within
+ # percent characters escape from their original confines within
# `\input`, and comment out part of the document.
if print_replacement.endswith('\n'):
if print_replacement.count('\n') > 1:
@@ -471,42 +471,42 @@ def replace_print_cmd(name, arglist, linenum,
print(' If it should have adjusted the spacing of printed content')
print(' you should double-check the spacing')
return (print_replacement, after)
-
+
def replace_print_env(name, arglist, linenum,
print_replacement, print_replacement_mode, source,
after):
'''
Typeset printed content from an environment.
-
+
It is only ever called if there is indeed printed content to typeset.
-
+
This should be similar to replace_print_cmd(). The main difference is
- that the environment context typically ends with a newline, so
+ that the environment context typically ends with a newline, so
substitution has to be a little different to ensure that spacing after
the environment isn't modified.
-
+
Args:
name (str): name of the environment
- arglist (list, of str/None): all arguments given to the original
+ arglist (list, of str/None): all arguments given to the original
environment
- linenum (int): line number in the original TeX document where the
+ linenum (int): line number in the original TeX document where the
environment began
print_replacement (str): printed content, read directly from file
into a single string
- print_replacement_mode (str/None): mode in which the replacement is
- to be typeset; raw/None (as TeX), inlineverb (or v) (as inline),
+ print_replacement_mode (str/None): mode in which the replacement is
+ to be typeset; raw/None (as TeX), inlineverb (or v) (as inline),
or verb (as environment)
source (str/None): source of the replacement content
after (str): text immediately following the command; important in
some situations, because spacing can depend on what's next
Returns:
(replacement, after) (tuple, of str)
-
+
#### The inlineverb and verb modes should work, but haven't been tested
since there are currently no environments that use them; they are only
used by `\printpythontex`, which is a command.
- '''
+ '''
if print_replacement_mode == 'verb':
if print_replacement.count('\n') > 1:
print('* DePythonTeX error:')
@@ -514,119 +514,119 @@ def replace_print_env(name, arglist, linenum,
print(' This is not possible in inline verbatim mode')
sys.exit(1)
print_replacement = print_replacement.rstrip('\n')
- for delim in ('|', '/', '`', '!', '&', '#', '@', ':', '%', '~', '$',
+ for delim in ('|', '/', '`', '!', '&', '#', '@', ':', '%', '~', '$',
'=', '+', '-', '^', '_', '?', ';'):
if delim not in print_replacement:
break
print_replacement = r'\verb' + delim + print_replacement + delim
if not bool(match('[ \t]+\S', after)):
- # If there is text on the same line as the end of the
- # environment, we're fine (this is unusual). Otherwise,
+ # If there is text on the same line as the end of the
+ # environment, we're fine (this is unusual). Otherwise,
# we need to toss the newline at the end of the environment
- # and gobble leading spaces. Leading spaces need to be
- # gobbled because previously they were at the beginning of a
+ # and gobble leading spaces. Leading spaces need to be
+ # gobbled because previously they were at the beginning of a
# line, where they would have been discarded.
if not bool(match('\s*$', after)):
after = sub('^\s*?\n\s*', '', after)
elif print_replacement_mode == 'verbatim':
if bool(match('\s*?\n', after)):
# Usually, we would end the verbatim environment with a newline.
- # This is fine if there is content in `after` before the next
+ # This is fine if there is content in `after` before the next
# newline---in fact, it's desirable, because the verbatim package
- # doesn't allow for content on the same line as the end of the
- # environment. But if `after` is an empty line, then adding a
+ # doesn't allow for content on the same line as the end of the
+ # environment. But if `after` is an empty line, then adding a
# newline will throw off spacing and must be avoided
print_replacement = '\\begin{verbatim}\n' + print_replacement + '\\end{verbatim}'
else:
print_replacement = '\\begin{verbatim}\n' + print_replacement + '\\end{verbatim}\n'
else:
- # When printed content is included as LaTeX code, we have to be
- # particularly careful to ensure that the content produces the same
- # output when substituted as when brought in by `\input`. In
- # particular, `\input` strips newlines from each line of content and
+ # When printed content is included as LaTeX code, we have to be
+ # particularly careful to ensure that the content produces the same
+ # output when substituted as when brought in by `\input`. In
+ # particular, `\input` strips newlines from each line of content and
# adds a space at the end of each line. This space is inside the
- # `\input`, so it will not merge with following spaces. So when we
- # substitute the content, sometimes we need to replace the final
+ # `\input`, so it will not merge with following spaces. So when we
+ # substitute the content, sometimes we need to replace the final
# newline with a space that cannot be gobbled.
#
- # It gets more complicated. This final space is often not
- # desirable. It can be prevented by either printing an `\endinput`
- # command, to terminate the `\input`, or printing a percent
+ # It gets more complicated. This final space is often not
+ # desirable. It can be prevented by either printing an `\endinput`
+ # command, to terminate the `\input`, or printing a percent
# character % in the last line of the content, which comments out the
- # final newline. So we must check for `\endinput` anywhere in
- # printed content, and % in the final line, and remove any content
- # after them. It's also possible that the print is followed by
+ # final newline. So we must check for `\endinput` anywhere in
+ # printed content, and % in the final line, and remove any content
+ # after them. It's also possible that the print is followed by
# an `\unskip` that eats the space, so we need to check for that too.
- if (print_replacement.endswith('\\endinput\n') and
+ if (print_replacement.endswith('\\endinput\n') and
not print_replacement.endswith('\\string\\endinput\n')):
- # If `\endinput` is present, everything from it on should be
+ # If `\endinput` is present, everything from it on should be
# discarded, unless the `\endinput` is not actually a command
- # but rather a typeset name (for example, `\string\endinput` or
- # `\verb|\endinput|`). It's impossible to check for all cases in
- # which `\endinput` is not a command (at least, without actually
- # using LaTeX), and even checking for most of them would require
- # a good bit of parsing. We assume that `\endinput`, as a
- # command, will only ever occur at the immediate end of the
- # printed content. Later, we issue a warning in case it appears
+ # but rather a typeset name (for example, `\string\endinput` or
+ # `\verb|\endinput|`). It's impossible to check for all cases in
+ # which `\endinput` is not a command (at least, without actually
+ # using LaTeX), and even checking for most of them would require
+ # a good bit of parsing. We assume that `\endinput`, as a
+ # command, will only ever occur at the immediate end of the
+ # printed content. Later, we issue a warning in case it appears
# anywhere else.
print_replacement = print_replacement.rsplit(r'\endinput', 1)[0]
if not bool(match('[ \t]+\S', after)):
- # If there is text on the same line as the end of the
- # environment, we're fine (this is unusual). Otherwise,
+ # If there is text on the same line as the end of the
+ # environment, we're fine (this is unusual). Otherwise,
# we need to toss the newline at the end of the environment
- # and gobble leading spaces. Leading spaces need to be
- # gobbled because previously they were at the beginning of a
+ # and gobble leading spaces. Leading spaces need to be
+ # gobbled because previously they were at the beginning of a
# line, where they would have been discarded.
if not bool(match('\s*$', after)):
after = sub('^\s*?\n\s*', '', after)
- elif (print_replacement.endswith('%\n') and
- not print_replacement.endswith('\\%\n') and
+ elif (print_replacement.endswith('%\n') and
+ not print_replacement.endswith('\\%\n') and
not print_replacement.endswith('\\string%\n')):
# Perform an analogous check for a terminating percent characer %.
- # This case would be a bit easier to parse fully, since a percent
- # that comments out the last newline would have to be in the
- # final line of the replacement. But it would still be
- # very difficult to perform a complete check. Later, we issue a
- # warning if there is reason to think that a percent character
+ # This case would be a bit easier to parse fully, since a percent
+ # that comments out the last newline would have to be in the
+ # final line of the replacement. But it would still be
+ # very difficult to perform a complete check. Later, we issue a
+ # warning if there is reason to think that a percent character
# was active in the last line.
print_replacement = print_replacement.rsplit(r'%', 1)[0]
if not bool(match('[ \t]+\S', after)):
- # If there is text on the same line as the end of the
- # environment, we're fine (this is unusual). Otherwise,
+ # If there is text on the same line as the end of the
+ # environment, we're fine (this is unusual). Otherwise,
# we need to toss the newline at the end of the environment
- # and gobble leading spaces. Leading spaces need to be
- # gobbled because previously they were at the beginning of a
+ # and gobble leading spaces. Leading spaces need to be
+ # gobbled because previously they were at the beginning of a
# line, where they would have been discarded.
if not bool(match('\s*$', after)):
after = sub('^\s*?\n\s*', '', after)
else:
- # By default, LaTeX strips newlines and adds a space at the end
- # of each line of content that is brought in by `\input`. This
- # may or may not be desirable, but we replicate the effect here
- # for consistency with the original document. We use `\space{}`
- # because plain `\space` would gobble a following space, which
+ # By default, LaTeX strips newlines and adds a space at the end
+ # of each line of content that is brought in by `\input`. This
+ # may or may not be desirable, but we replicate the effect here
+ # for consistency with the original document. We use `\space{}`
+ # because plain `\space` would gobble a following space, which
# isn't consistent with the `\input` behavior being replicated.
if bool(match(r'\s*\\unskip\s+\S', after)):
- # If there's an `\unskip`, fix the spacing and remove the
+ # If there's an `\unskip`, fix the spacing and remove the
# `\unskip`
print_replacement = print_replacement.rstrip(' \t\n')
after = sub(r'^\s*\\unskip\s+', '', after)
elif bool(match('[ \t]+\S', after)):
- # If the next character after the end of the environment is
+ # If the next character after the end of the environment is
# not whitespace (usually not allowed), we can just leave
- # the `\n` in printed content, and it will yield a space.
- # So we need do nothing. But if there is text on that line
+ # the `\n` in printed content, and it will yield a space.
+ # So we need do nothing. But if there is text on that line
# we need `\space{}`.
after = sub('^\s+', '\\space', after)
forced_double_space_list.append((name, linenum))
else:
# If the line at the end of the environment is blank,
- # we can just discard it and keep the newline at the end of
+ # we can just discard it and keep the newline at the end of
# the printed content; the newline gives us the needed space
after = after.split('\n', 1)[1]
# Issue warnings, if warranted
# Warn about `\endinput`
- if (r'\endinput' in print_replacement and
+ if (r'\endinput' in print_replacement and
print_replacement.count(r'\endinput') != print_replacement.count(r'\string\endinput')):
print('* DePythonTeX warning:')
print(' "\\endinput" was present in printed content near line ' + str(linenum))
@@ -636,7 +636,7 @@ def replace_print_env(name, arglist, linenum,
# Warn if it looks like there are active `%` that could comment
# out part of the original document. We only need to check the
# last line of printed content, because only there could
- # percent characters escape from their original confines within
+ # percent characters escape from their original confines within
# `\input`, and comment out part of the document.
if print_replacement.endswith('\n'):
if print_replacement.count('\n') > 1:
@@ -673,9 +673,9 @@ def replace_print_env(name, arglist, linenum,
# Deal with argv
# Parse argv
parser = argparse.ArgumentParser()
-parser.add_argument('--version', action='version',
+parser.add_argument('--version', action='version',
version='DePythonTeX {0}'.format(__version__))
-parser.add_argument('--encoding', default='utf-8',
+parser.add_argument('--encoding', default='utf-8',
help='encoding for all text files (see codecs module for encodings)')
parser.add_argument('--overwrite', default=False, action='store_true',
help='overwrite existing output, if it exists (off by default)')
@@ -755,7 +755,7 @@ if args.output is not None:
ans = input(' Do you want to overwrite this file? [y,n]\n ')
if ans != 'y':
sys.exit(1)
-# Make sure the .depytx file exists
+# Make sure the .depytx file exists
depytxfile_name = texfile_name.rsplit('.')[0] + '.depytx'
if not os.path.isfile(depytxfile_name):
print('* DePythonTeX error:')
@@ -768,7 +768,7 @@ if not os.path.isfile(depytxfile_name):
# Start opening files and loading data
# Read in the LaTeX file
-# We read into a list with an empty first entry, so that we don't have to
+# We read into a list with an empty first entry, so that we don't have to
# worry about zero indexing when comparing list index to file line number
f = open(texfile_name, 'r', encoding=encoding)
tex = ['']
@@ -798,7 +798,7 @@ if settings['version'] != __version__:
print(' Do a complete compile cycle to update the auxiliary file')
print(' Attempting to proceed')
# Go ahead and open the outfile, even though we don't need it until the end
-# This lets us change working directories for convenience without worrying
+# This lets us change working directories for convenience without worrying
# about having to modify the outfile path
if args.output is not None:
outfile = open(outfile_name, 'w', encoding=encoding)
@@ -807,15 +807,15 @@ if args.output is not None:
# Change working directory to the document directory
-# Technically, we could get by without this, but that would require a lot of
-# path modification. This way, we can just use all paths straight out of the
+# Technically, we could get by without this, but that would require a lot of
+# path modification. This way, we can just use all paths straight out of the
# .depytx without any modification, which is much simpler and less error-prone.
if os.path.split(texfile_name)[0] != '':
os.chdir(os.path.split(texfile_name)[0])
-
+
# Open and process the file of macros
# Read in the macros
if os.path.isfile(os.path.expanduser(os.path.normcase(settings['macrofile']))):
@@ -830,7 +830,7 @@ else:
sys.exit(1)
# Create a dict for storing macros
macrodict = defaultdict(list)
-# Create variables for keeping track of whether we're inside a macro or
+# Create variables for keeping track of whether we're inside a macro or
# environment
# These must exist before we begin processing
inside_macro = False
@@ -842,13 +842,13 @@ for line in macros:
if inside_macro:
# If we're in a macro, look for the end-of-macro command
if r'\endpytx@SVMCR' in line:
- # If the current line contains the end-of-macro command, split
- # off any content that comes before it. Also reset
+ # If the current line contains the end-of-macro command, split
+ # off any content that comes before it. Also reset
# `inside_macro`.
macrodict[current_macro].append(line.rsplit(r'\endpytx@SVMCR', 1)[0])
inside_macro = False
else:
- # If the current line doesn't end the macro, we add the whole
+ # If the current line doesn't end the macro, we add the whole
# line to the macro dict
macrodict[current_macro].append(line)
elif inside_environment:
@@ -856,7 +856,7 @@ for line in macros:
# If the environment is ending, we reset inside_environment
inside_environment = False
else:
- # If we're still in the environment, add the current line to the
+ # If we're still in the environment, add the current line to the
# macro dict
macrodict[current_macro].append(line)
else:
@@ -865,10 +865,10 @@ for line in macros:
# file to increase readability). Once we've determined which one,
# we need to get its name and extract any content.
if line.startswith(r'\begin{'):
- # Any \begin will indicate a use of fancyvrb to save verbatim
- # content, since that is the only time an environment is used in
+ # Any \begin will indicate a use of fancyvrb to save verbatim
+ # content, since that is the only time an environment is used in
# the macro file. All other content is saved in a standard macro.
- # We extract the name of the macro in which the verbatim content
+ # We extract the name of the macro in which the verbatim content
# is saved.
current_macro = line.rsplit('{', 1)[1].rstrip('}\n')
inside_environment = True
@@ -894,8 +894,8 @@ for line in macros:
# Start at 1, since the first entry in the tex list is `''`
texlinenum = 1
# Create a variable for storing the current line(s) we are processing.
-# This contains all lines from immediately after the last successfully
-# processed line up to and including texlinenum. We may have to process
+# This contains all lines from immediately after the last successfully
+# processed line up to and including texlinenum. We may have to process
# multiple lines at once if a macro is split over multiple lines, etc.
texcontent = tex[texlinenum]
# Create a list for storing processed content.
@@ -908,21 +908,21 @@ for n, depytxline in enumerate(depytx):
depy_type, depy_name, depy_args, depy_typeset, depy_linenum, depy_lexer = depytxcontent.split(':')
if depy_lexer == '':
depy_lexer = None
-
+
# Do a quick check on validity of info
# #### Eventually add 'cp' and 'pc'
if not (depy_type in ('cmd', 'env') and
all([letter in ('o', 'm', 'v', 'n', '|') for letter in depy_args]) and
('|' not in depy_args or (depy_args.count('|') == 1 and depy_args.endswith('|'))) and
- depy_typeset in ('c', 'p', 'n')):
+ depy_typeset in ('c', 'p', 'n')):
print('* PythonTeX error:')
print(' Invalid \\Depythontex string for operation on line ' + str(depy_linenum))
print(' The offending string was ' + depytxcontent)
sys.exit(1)
- # If depy_args contains a `|` to indicate `\obeylines`, strip it and
- # store in a variable. Create a bool to keep track of obeylines
- # status, which governs whether we can look on the next line for
- # arguments. (If obeylines is active, a newline terminates the
+ # If depy_args contains a `|` to indicate `\obeylines`, strip it and
+ # store in a variable. Create a bool to keep track of obeylines
+ # status, which governs whether we can look on the next line for
+ # arguments. (If obeylines is active, a newline terminates the
# argument search.)
if depy_args.endswith('|'):
obeylines = True
@@ -932,8 +932,8 @@ for n, depytxline in enumerate(depytx):
# Get the line number as an integer
# We don't have to adjust for zero indexing in tex
depy_linenum = int(depy_linenum)
-
-
+
+
# Check for information passed from LaTeX
# This will be extra listings information, or replacements to plug in
code_replacement = None
@@ -983,13 +983,13 @@ for n, depytxline in enumerate(depytx):
elif nextdepytxline.startswith('FILE:'):
source = 'file'
try:
- typeset, f_name = nextdepytxline.rstrip('\n').split(':', 2)[1:]
+ typeset, f_name = nextdepytxline.rstrip('\n').split(':', 2)[1:]
except:
print('* DePythonTeX error:')
print(' Improperly formatted file information on line ' + str(depy_linenum))
print(' The file information was "' + nextdepytxline + '"')
sys.exit(1)
- # Files that are brought in have an optional mode that
+ # Files that are brought in have an optional mode that
# determines if they need special handling (for example, verbatim)
if ':mode=' in f_name:
f_name, mode = f_name.split(':mode=')
@@ -1002,32 +1002,32 @@ for n, depytxline in enumerate(depytx):
code_replacement_mode = mode
if depy_type == 'cmd' and code_replacement_mode != 'verbatim':
# Usually, code from commands is typeset with commands
- # and code from environments is typeset in
- # environments. The except is code from commands
+ # and code from environments is typeset in
+ # environments. The except is code from commands
# that bring in external files, like `\inputpygments`
code_replacement = replacement
else:
# If we're replacing an environment of code with a
# file, then we lose the newline at the beginning
# of the environment, and need to get it back.
- code_replacement = '\n' + replacement
+ code_replacement = '\n' + replacement
elif typeset == 'p':
print_replacement_mode = mode
- print_replacement = replacement
+ print_replacement = replacement
else:
print('* DePythonTeX error:')
print(' Improper typesetting information for file information on line ' + str(depy_linenum))
print(' The file information was "' + nextdepytxline + '"')
sys.exit(1)
- # Increment the line in depytx to check for more information
+ # Increment the line in depytx to check for more information
# from LaTeX
scan_ahead_line += 1
if scan_ahead_line == len(depytx):
break
else:
nextdepytxline = depytx[scan_ahead_line]
-
-
+
+
# If the line we're looking for is within the range currently held by
# texcontent, do nothing. Otherwise, transfer content from tex
# to texout until we get to the line of tex that we're looking for
@@ -1039,12 +1039,12 @@ for n, depytxline in enumerate(depytx):
texlinenum += 1
texcontent = tex[texlinenum]
-
+
# Deal with arguments
- # All arguments are parsed and stored in a list variables, even if
- # they are not used, for completeness; this makes it easy to add
+ # All arguments are parsed and stored in a list variables, even if
+ # they are not used, for completeness; this makes it easy to add
# functionality
- # Start by splitting the current line into what comes before the
+ # Start by splitting the current line into what comes before the
# command or environment, and what is after it
if depy_type == 'cmd':
try:
@@ -1060,10 +1060,10 @@ for n, depytxline in enumerate(depytx):
print('* DePythonTeX error:')
print(' Could not find environment "' + depy_name + '" on line ' + str(depy_linenum))
sys.exit(1)
- # We won't need the content from before the command or environment
+ # We won't need the content from before the command or environment
# again, so we go ahead and store it
texout.append(before)
-
+
# Parse the arguments
# Create a list for storing the recovered arguments
arglist = list()
@@ -1075,7 +1075,7 @@ for n, depytxline in enumerate(depytx):
# Account for possible line breaks before end of arg
while ']' not in after:
texlinenum += 1
- after += tex[texlinenum]
+ after += tex[texlinenum]
optarg, after = after[1:].split(']', 1)
else:
if obeylines:
@@ -1084,7 +1084,7 @@ for n, depytxline in enumerate(depytx):
after = after.split('[', 1)[1]
while ']' not in after:
texlinenum += 1
- after += tex[texlinenum]
+ after += tex[texlinenum]
optarg, after = after.split(']', 1)
else:
optarg = None
@@ -1102,7 +1102,7 @@ for n, depytxline in enumerate(depytx):
after = after.split('[', 1)[1]
while ']' not in after:
texlinenum += 1
- after += tex[texlinenum]
+ after += tex[texlinenum]
optarg, after = after.split(']', 1)
else:
optarg = None
@@ -1141,8 +1141,8 @@ for n, depytxline in enumerate(depytx):
# Go through the argument character by character to find the
# closing brace.
# If possible, use a very simple approach
- if (r'\{' not in after and r'\}' not in after and
- r'\string' not in after and
+ if (r'\{' not in after and r'\}' not in after and
+ r'\string' not in after and
after.count('{') + 1 == after.count('}')):
pos = 0
lbraces = 1
@@ -1158,7 +1158,7 @@ for n, depytxline in enumerate(depytx):
if pos == len(after):
texlinenum += 1
after += tex[texlinenum]
- # If a simple parsing approach won't work, parse in much
+ # If a simple parsing approach won't work, parse in much
# greater depth
else:
pos = 0
@@ -1183,7 +1183,7 @@ for n, depytxline in enumerate(depytx):
# First, jump ahead to after `\string`
pos += 7 #+= len(r'\string')
# See if `\string` is followed by a regular macro
- # If so, jump past it; otherwise, figure out if a
+ # If so, jump past it; otherwise, figure out if a
# single-character macro, or just a single character, is next,
# and jump past it
standard_macro = match(r'\\[a-zA-Z]+', line[pos:])
@@ -1194,7 +1194,7 @@ for n, depytxline in enumerate(depytx):
else:
pos += 1
elif line[pos] == '\\':
- # If the current position is a backslash, figure out what
+ # If the current position is a backslash, figure out what
# macro is used, and jump past it
# The macro must either be a standard alphabetic macro,
# or a single-character macro
@@ -1239,8 +1239,8 @@ for n, depytxline in enumerate(depytx):
after += tex[texlinenum]
mainarg, after = after[1:].split(delim, 1)
arglist.append(mainarg)
-
-
+
+
# Do substitution, depending on what is required
# Need a variable for processed content to be added to texout
processed = None
@@ -1251,8 +1251,8 @@ for n, depytxline in enumerate(depytx):
if after.count('\n') < 2:
texlinenum += 1
after += tex[texlinenum]
- processed, texcontent = replace_code_cmd(depy_name, arglist,
- depy_linenum,
+ processed, texcontent = replace_code_cmd(depy_name, arglist,
+ depy_linenum,
code_replacement,
code_replacement_mode,
after, depy_lexer,
@@ -1291,7 +1291,7 @@ for n, depytxline in enumerate(depytx):
depy_linenum,
code_replacement,
code_replacement_mode,
- after, depy_lexer,
+ after, depy_lexer,
firstnumber)
elif depy_typeset == 'p' and print_replacement is not None:
if depy_type == 'cmd':
@@ -1300,7 +1300,7 @@ for n, depytxline in enumerate(depytx):
if after.count('\n') < 2:
texlinenum += 1
after += tex[texlinenum]
- processed, texcontent = replace_print_cmd(depy_name, arglist,
+ processed, texcontent = replace_print_cmd(depy_name, arglist,
depy_linenum,
print_replacement,
print_replacement_mode,
@@ -1320,9 +1320,9 @@ for n, depytxline in enumerate(depytx):
if after.count('\n') < 2:
texlinenum += 1
after += tex[texlinenum]
- processed, texcontent = replace_print_env(depy_name, arglist,
+ processed, texcontent = replace_print_env(depy_name, arglist,
depy_linenum,
- print_replacement,
+ print_replacement,
print_replacement_mode,
source,
after)
@@ -1346,8 +1346,8 @@ for n, depytxline in enumerate(depytx):
texcontent = after
# #### Once it's supported on the TeX side, need to add support for
# pc and cp
-
-
+
+
# Store any processed content
if processed is not None:
texout.append(processed)
diff --git a/Master/texmf-dist/scripts/pythontex/depythontex3.py b/Master/texmf-dist/scripts/pythontex/depythontex3.py
index 7fb97cce61d..567363eeaa0 100755
--- a/Master/texmf-dist/scripts/pythontex/depythontex3.py
+++ b/Master/texmf-dist/scripts/pythontex/depythontex3.py
@@ -4,50 +4,50 @@
'''
PythonTeX depythontex script.
-This script takes a LaTeX document that uses the PythonTeX package and
-creates a new document that does not depend on PythonTeX. It substitutes all
-externally generated content into a copy of the original LaTeX document.
-This is useful when you need a document that relies on few external packages
-or custom macros (for example, for submission to a journal or conversion to
+This script takes a LaTeX document that uses the PythonTeX package and
+creates a new document that does not depend on PythonTeX. It substitutes all
+externally generated content into a copy of the original LaTeX document.
+This is useful when you need a document that relies on few external packages
+or custom macros (for example, for submission to a journal or conversion to
another document format).
-If you just want to share a document that uses PythonTeX, keep in mind that
-the document can be modified and compiled just like a regular LaTeX document,
-without needing Python or any other external tools, so long as the following
+If you just want to share a document that uses PythonTeX, keep in mind that
+the document can be modified and compiled just like a regular LaTeX document,
+without needing Python or any other external tools, so long as the following
conditions are met:
* A copy of pythontex.sty is included with the document.
* The pythontex-files-<name> directory is included with the document.
* The PythonTeX-specific parts of the document are not modified.
-To work, this script requires that the original LaTeX document be compiled
-with the package option `depythontex`. That creates an auxiliary file with
+To work, this script requires that the original LaTeX document be compiled
+with the package option `depythontex`. That creates an auxiliary file with
the extension .depytx that contains information about all content that needs
to be substituted.
-This script is purposely written in a simple, largely linear form to
-facilitate customization. Most of the key substitutions are performed by a
+This script is purposely written in a simple, largely linear form to
+facilitate customization. Most of the key substitutions are performed by a
few functions defined near the beginning of the script, so if you need custom
-substitutions, you should begin there. By default, all typeset code is
-wrapped in `\verb` commands and verbatim environments, since these have the
-greatest generality. However, the command-line option --listing allows code
-to be typeset with the fancyvrb, listings, minted, or PythonTeX packages
+substitutions, you should begin there. By default, all typeset code is
+wrapped in `\verb` commands and verbatim environments, since these have the
+greatest generality. However, the command-line option --listing allows code
+to be typeset with the fancyvrb, listings, minted, or PythonTeX packages
instead.
-The script automatically extracts all arguments of all commands and
-environments that it replaces, so that these are available if desired for
-customized substitution. Two additional pieces of information are also
-available for any typeset code: the Pygments lexer (often the same as the
+The script automatically extracts all arguments of all commands and
+environments that it replaces, so that these are available if desired for
+customized substitution. Two additional pieces of information are also
+available for any typeset code: the Pygments lexer (often the same as the
language) and the starting line number (if line numbering was used).
Keep in mind that some manual adjustments may be required after a document is
-depythontex'ed. While depythontex attempts to create an exact copy of the
-original document, in many cases an identical copy is impossible. For
-example, typeset code may have a different appearance or layout when it is
+depythontex'ed. While depythontex attempts to create an exact copy of the
+original document, in many cases an identical copy is impossible. For
+example, typeset code may have a different appearance or layout when it is
typeset with a different package.
-Copyright (c) 2013-2014, Geoffrey M. Poore
+Copyright (c) 2013-2016, Geoffrey M. Poore
All rights reserved.
Licensed under the BSD 3-Clause License:
http://www.opensource.org/licenses/BSD-3-Clause
@@ -86,7 +86,7 @@ import codecs
# Script parameters
# Version
-__version__ = '0.14'
+__version__ = '0.15'
# Functions and parameters for customizing the script output
@@ -103,77 +103,77 @@ listing = None #'verbatim', 'fancyvrb', 'listings', 'minted', 'pythontex'
preamble_additions = list()
# Lexer dict
-# If you are using Pygments lexers that don't directly correspond to the
-# languages used by the listings package, you can submit replacements via the
-# command line option --lexer-dict, or edit this dict manually here. When
-# listings is used, all lexers are checked against this dict to see if a
-# substitution should be made. This approach could easily be modified to
+# If you are using Pygments lexers that don't directly correspond to the
+# languages used by the listings package, you can submit replacements via the
+# command line option --lexer-dict, or edit this dict manually here. When
+# listings is used, all lexers are checked against this dict to see if a
+# substitution should be made. This approach could easily be modified to
# work with another, non-Pygments highlighting package.
lexer_dict = dict()
-def replace_code_cmd(name, arglist, linenum, code_replacement,
+def replace_code_cmd(name, arglist, linenum, code_replacement,
code_replacement_mode, after, lexer, firstnumber):
'''
Typeset code from a command with a command.
-
+
It is only ever called if there is indeed code to typeset.
-
- Usually, code from a command is also typeset with a command. This
- function primarily deals with that case. In cases where code from a
+
+ Usually, code from a command is also typeset with a command. This
+ function primarily deals with that case. In cases where code from a
command is typeset with an environment (for example, `\inputpygments`),
- this function performs some preprocessing and then uses
+ this function performs some preprocessing and then uses
replace_code_env() to do the real work. This approach prevents the two
functions from unnecessarily duplicating each other, while still giving
the desired output.
-
+
Args:
name (str): name of the command
- arglist (list, of str/None): all arguments given to the original
- command; the last argument is what is typeset, unless a
+ arglist (list, of str/None): all arguments given to the original
+ command; the last argument is what is typeset, unless a
code_replacement is specified or other instructions are given
linenum (int): line number in the original TeX document
code_replacement (str/None): replacement for the code; usually None
- for commands, because typically the code to be typeset is the
- last argument passed to the command, rather than something
+ for commands, because typically the code to be typeset is the
+ last argument passed to the command, rather than something
captured elsewhere (like the body of an environment) or something
preprocessed (like a console environment's content)
- code_replacement_mode (str/None): mode in which the replacement is
- to be typeset; raw/None (as TeX; generally unused for code),
+ code_replacement_mode (str/None): mode in which the replacement is
+ to be typeset; raw/None (as TeX; generally unused for code),
verb (inline), or verbatim (environment)
- after (str): text immediately following the command; usually
+ after (str): text immediately following the command; usually
shouldn't be needed
lexer (str/None): Pygments lexer
Returns:
(replacement, after) (tuple, of str)
-
+
'''
# Get the correct replacement
if code_replacement is None:
code_replacement = arglist[-1]
-
+
# We only consider two possible modes of typesetting, verbatim and inline
# verbatim
if code_replacement_mode == 'verbatim':
- # Sometimes we must replace a command with an environment, for
+ # Sometimes we must replace a command with an environment, for
# example, for `\inputpygments`
-
- # Make sure the introduction of an environment where a command was
- # previously won't produce errors with following content; make sure
+
+ # Make sure the introduction of an environment where a command was
+ # previously won't produce errors with following content; make sure
# that any following content is on a separate line
if bool(match('[ \t]*\S', after)):
after = '\n' + after
# Rather than duplicating much of replace_code_env(), just use it
- return replace_code_env(name, arglist, linenum, code_replacement,
+ return replace_code_env(name, arglist, linenum, code_replacement,
code_replacement_mode, after, lexer, firstnumber)
else:
# Usually, we're replacing a command with a command
-
+
# Wrap the replacement in appropriate delimiters
- if (listing in ('verbatim', 'fancyvrb', 'minted') or
- (listing in ('listings', 'pythontex') and
+ if (listing in ('verbatim', 'fancyvrb', 'minted') or
+ (listing in ('listings', 'pythontex') and
('{' in code_replacement or '}' in code_replacement))):
- for delim in ('|', '/', '`', '!', '&', '#', '@', ':', '%', '~', '$',
+ for delim in ('|', '/', '`', '!', '&', '#', '@', ':', '%', '~', '$',
'=', '+', '-', '^', '_', '?', ';'):
if delim not in code_replacement:
break
@@ -198,40 +198,40 @@ def replace_code_cmd(name, arglist, linenum, code_replacement,
else:
code_replacement = r'\pygment{' + lexer + '}' + code_replacement
return (code_replacement, after)
-
-def replace_code_env(name, arglist, linenum, code_replacement,
+
+def replace_code_env(name, arglist, linenum, code_replacement,
code_replacement_mode, after, lexer, firstnumber):
'''
Typeset code from an environment with an environment.
-
+
It is only ever called if there is indeed code to typeset.
-
+
Usually it is only used to typeset code from an environment. However,
some commands bring in code that must be typeset as an environment. In
- those cases, replace_code_cmd() is called initially, and after it
+ those cases, replace_code_cmd() is called initially, and after it
performs some preprocessing, this function is called. This approach
avoids unnecessary duplication between the two functions.
-
+
Args:
name (str): name of the environment
- arglist (list, of str/None): all arguments given to the original
+ arglist (list, of str/None): all arguments given to the original
environment
- linenum (int): line number in the original TeX document where
+ linenum (int): line number in the original TeX document where
the environment began
code_replacement (str): replacement for the code; unlike the case of
commands, this is always not None if the function is called
- code_replacement_mode (str/None): mode in which the replacement is
- to be typeset; raw/None (as TeX; generally unused for code),
+ code_replacement_mode (str/None): mode in which the replacement is
+ to be typeset; raw/None (as TeX; generally unused for code),
verb (inline), or verbatim (environment)
- after (str): text immediately following the environment; usually
+ after (str): text immediately following the environment; usually
shouldn't be needed
lexer (str/None): Pygments lexer
firstnumber (str/None): the first number of the listing, if the listing
had numbered lines
Returns:
(replacement, after) (tuple, of str)
-
+
'''
# Currently, there is no need to test for code_replacement_mode, because
# this function is only ever called if the mode is 'verbatim'. That may
@@ -245,7 +245,7 @@ def replace_code_env(name, arglist, linenum, code_replacement,
pre = '\\begin{Verbatim}'
else:
pre = '\\begin{{Verbatim}}[numbers=left,firstnumber={0}]'.format(firstnumber)
- post = '\\end{Verbatim}'
+ post = '\\end{Verbatim}'
elif listing == 'listings':
if lexer is None:
if firstnumber is None:
@@ -300,26 +300,26 @@ def replace_print_cmd(name, arglist, linenum,
after):
'''
Typeset printed content from a command.
-
+
It is only ever called if there is indeed printed content to typeset.
-
+
Args:
name (str): name of the command
- arglist (list, of str/None): all arguments given to the original
+ arglist (list, of str/None): all arguments given to the original
command
linenum (int): line number in the original TeX document
print_replacement (str): printed content, read directly from file
into a single string
- print_replacement_mode (str/None): mode in which the replacement is
- to be typeset; raw/None (as TeX), inlineverb (or v) (as inline),
+ print_replacement_mode (str/None): mode in which the replacement is
+ to be typeset; raw/None (as TeX), inlineverb (or v) (as inline),
or verb (as environment)
source (str/None): source of the replacement content
after (str): text immediately following the command; important in
some situations, because spacing can depend on what's next
Returns:
(replacement, after) (tuple, of str)
-
- '''
+
+ '''
if print_replacement_mode == 'verb':
if print_replacement.count('\n') > 1:
print('* DePythonTeX error:')
@@ -327,7 +327,7 @@ def replace_print_cmd(name, arglist, linenum,
print(' This is not possible in inline verbatim mode')
sys.exit(1)
print_replacement = print_replacement.rstrip('\n')
- for delim in ('|', '/', '`', '!', '&', '#', '@', ':', '%', '~', '$',
+ for delim in ('|', '/', '`', '!', '&', '#', '@', ':', '%', '~', '$',
'=', '+', '-', '^', '_', '?', ';'):
if delim not in print_replacement:
break
@@ -335,71 +335,71 @@ def replace_print_cmd(name, arglist, linenum,
elif print_replacement_mode == 'verbatim':
if bool(match('\s*?\n', after)):
# Usually, we would end the verbatim environment with a newline.
- # This is fine if there is content in `after` before the next
+ # This is fine if there is content in `after` before the next
# newline---in fact, it's desirable, because the verbatim package
- # doesn't allow for content on the same line as the end of the
- # environment. But if `after` is an empty line, then adding a
+ # doesn't allow for content on the same line as the end of the
+ # environment. But if `after` is an empty line, then adding a
# newline will throw off spacing and must be avoided
print_replacement = '\\begin{verbatim}\n' + print_replacement + '\\end{verbatim}'
else:
print_replacement = '\\begin{verbatim}\n' + print_replacement + '\\end{verbatim}\n'
else:
- # When printed content from a file is included as LaTeX code, we have
- # to be particularly careful to ensure that the content produces the
- # same output when substituted as when brought in by `\input`. In
- # particular, `\input` strips newlines from each line of content and
+ # When printed content from a file is included as LaTeX code, we have
+ # to be particularly careful to ensure that the content produces the
+ # same output when substituted as when brought in by `\input`. In
+ # particular, `\input` strips newlines from each line of content and
# adds a space at the end of each line. This space is inside the
- # `\input`, so it will not merge with following spaces. So when we
- # substitute the content, sometimes we need to replace the final
+ # `\input`, so it will not merge with following spaces. So when we
+ # substitute the content, sometimes we need to replace the final
# newline with a space that cannot be gobbled.
#
- # It gets more complicated. This final space is often not
- # desirable. It can be prevented by either printing an `\endinput`
- # command, to terminate the `\input`, or printing a percent
+ # It gets more complicated. This final space is often not
+ # desirable. It can be prevented by either printing an `\endinput`
+ # command, to terminate the `\input`, or printing a percent
# character % in the last line of the content, which comments out the
- # final newline. So we must check for `\endinput` anywhere in
- # printed content, and % in the final line, and remove any content
- # after them. It's also possible that the print is followed by
+ # final newline. So we must check for `\endinput` anywhere in
+ # printed content, and % in the final line, and remove any content
+ # after them. It's also possible that the print is followed by
# an `\unskip` that eats the space, so we need to check for that too.
#
- # It turns out that the same approach is needed when a command like
+ # It turns out that the same approach is needed when a command like
# `\py` brings in content ending in a newline
- if (print_replacement.endswith('\\endinput\n') and
+ if (print_replacement.endswith('\\endinput\n') and
not print_replacement.endswith('\\string\\endinput\n')):
- # If `\endinput` is present, everything from it on should be
+ # If `\endinput` is present, everything from it on should be
# discarded, unless the `\endinput` is not actually a command
- # but rather a typeset name (for example, `\string\endinput` or
- # `\verb|\endinput|`). It's impossible to check for all cases in
- # which `\endinput` is not a command (at least, without actually
- # using LaTeX), and even checking for most of them would require
- # a good bit of parsing. We assume that `\endinput`, as a
- # command, will only ever occur at the immediate end of the
- # printed content. Later, we issue a warning in case it appears
+ # but rather a typeset name (for example, `\string\endinput` or
+ # `\verb|\endinput|`). It's impossible to check for all cases in
+ # which `\endinput` is not a command (at least, without actually
+ # using LaTeX), and even checking for most of them would require
+ # a good bit of parsing. We assume that `\endinput`, as a
+ # command, will only ever occur at the immediate end of the
+ # printed content. Later, we issue a warning in case it appears
# anywhere else.
print_replacement = print_replacement.rsplit(r'\endinput', 1)[0]
- elif (print_replacement.endswith('%\n') and
- not print_replacement.endswith('\\%\n') and
+ elif (print_replacement.endswith('%\n') and
+ not print_replacement.endswith('\\%\n') and
not print_replacement.endswith('\\string%\n')):
# Perform an analogous check for a terminating percent characer %.
- # This case would be a bit easier to parse fully, since a percent
- # that comments out the last newline would have to be in the
- # final line of the replacement. But it would still be
- # very difficult to perform a complete check. Later, we issue a
- # warning if there is reason to think that a percent character
+ # This case would be a bit easier to parse fully, since a percent
+ # that comments out the last newline would have to be in the
+ # final line of the replacement. But it would still be
+ # very difficult to perform a complete check. Later, we issue a
+ # warning if there is reason to think that a percent character
# was active in the last line.
print_replacement = print_replacement.rsplit(r'%', 1)[0]
elif print_replacement.endswith('\n'):
# We can't just use `else` because that would catch content
# from `\py` and similar
- # By default, LaTeX strips newlines and adds a space at the end
- # of each line of content that is brought in by `\input`. This
- # may or may not be desirable, but we replicate the effect here
- # for consistency with the original document. We use `\space{}`
- # because plain `\space` would gobble a following space, which
+ # By default, LaTeX strips newlines and adds a space at the end
+ # of each line of content that is brought in by `\input`. This
+ # may or may not be desirable, but we replicate the effect here
+ # for consistency with the original document. We use `\space{}`
+ # because plain `\space` would gobble a following space, which
# isn't consistent with the `\input` behavior being replicated.
if bool(match(r'\\unskip\s+\S', after)):
- # If there's an `\unskip`, fix the spacing and remove the
- # `\unskip`. Since this is inline, the `\unskip` must
+ # If there's an `\unskip`, fix the spacing and remove the
+ # `\unskip`. Since this is inline, the `\unskip` must
# immediately follow the command to do any good; otherwise,
# it eliminates spaces that precede it, but doesn't get into
# the `\input` content.
@@ -410,9 +410,9 @@ def replace_print_cmd(name, arglist, linenum,
# the `\n`, and it will yield a space.
pass
elif bool(match('\s*$', after)):
- # If the rest of the current line, and the next line, are
+ # If the rest of the current line, and the next line, are
# whitespace, we will get the correct spacing without needing
- # `\space{}`. We could leave `\n`, but it would be
+ # `\space{}`. We could leave `\n`, but it would be
# extraneous whitespace.
print_replacement = print_replacement[:-1]
else:
@@ -431,7 +431,7 @@ def replace_print_cmd(name, arglist, linenum,
after = sub('^\s+', '\n', after)
# Issue warnings, if warranted
# Warn about `\endinput`
- if (r'\endinput' in print_replacement and
+ if (r'\endinput' in print_replacement and
print_replacement.count(r'\endinput') != print_replacement.count(r'\string\endinput')):
print('* DePythonTeX warning:')
print(' "\\endinput" was present in printed content near line ' + str(linenum))
@@ -441,7 +441,7 @@ def replace_print_cmd(name, arglist, linenum,
# Warn if it looks like there are active `%` that could comment
# out part of the original document. We only need to check the
# last line of printed content, because only there could
- # percent characters escape from their original confines within
+ # percent characters escape from their original confines within
# `\input`, and comment out part of the document.
if print_replacement.endswith('\n'):
if print_replacement.count('\n') > 1:
@@ -471,42 +471,42 @@ def replace_print_cmd(name, arglist, linenum,
print(' If it should have adjusted the spacing of printed content')
print(' you should double-check the spacing')
return (print_replacement, after)
-
+
def replace_print_env(name, arglist, linenum,
print_replacement, print_replacement_mode, source,
after):
'''
Typeset printed content from an environment.
-
+
It is only ever called if there is indeed printed content to typeset.
-
+
This should be similar to replace_print_cmd(). The main difference is
- that the environment context typically ends with a newline, so
+ that the environment context typically ends with a newline, so
substitution has to be a little different to ensure that spacing after
the environment isn't modified.
-
+
Args:
name (str): name of the environment
- arglist (list, of str/None): all arguments given to the original
+ arglist (list, of str/None): all arguments given to the original
environment
- linenum (int): line number in the original TeX document where the
+ linenum (int): line number in the original TeX document where the
environment began
print_replacement (str): printed content, read directly from file
into a single string
- print_replacement_mode (str/None): mode in which the replacement is
- to be typeset; raw/None (as TeX), inlineverb (or v) (as inline),
+ print_replacement_mode (str/None): mode in which the replacement is
+ to be typeset; raw/None (as TeX), inlineverb (or v) (as inline),
or verb (as environment)
source (str/None): source of the replacement content
after (str): text immediately following the command; important in
some situations, because spacing can depend on what's next
Returns:
(replacement, after) (tuple, of str)
-
+
#### The inlineverb and verb modes should work, but haven't been tested
since there are currently no environments that use them; they are only
used by `\printpythontex`, which is a command.
- '''
+ '''
if print_replacement_mode == 'verb':
if print_replacement.count('\n') > 1:
print('* DePythonTeX error:')
@@ -514,119 +514,119 @@ def replace_print_env(name, arglist, linenum,
print(' This is not possible in inline verbatim mode')
sys.exit(1)
print_replacement = print_replacement.rstrip('\n')
- for delim in ('|', '/', '`', '!', '&', '#', '@', ':', '%', '~', '$',
+ for delim in ('|', '/', '`', '!', '&', '#', '@', ':', '%', '~', '$',
'=', '+', '-', '^', '_', '?', ';'):
if delim not in print_replacement:
break
print_replacement = r'\verb' + delim + print_replacement + delim
if not bool(match('[ \t]+\S', after)):
- # If there is text on the same line as the end of the
- # environment, we're fine (this is unusual). Otherwise,
+ # If there is text on the same line as the end of the
+ # environment, we're fine (this is unusual). Otherwise,
# we need to toss the newline at the end of the environment
- # and gobble leading spaces. Leading spaces need to be
- # gobbled because previously they were at the beginning of a
+ # and gobble leading spaces. Leading spaces need to be
+ # gobbled because previously they were at the beginning of a
# line, where they would have been discarded.
if not bool(match('\s*$', after)):
after = sub('^\s*?\n\s*', '', after)
elif print_replacement_mode == 'verbatim':
if bool(match('\s*?\n', after)):
# Usually, we would end the verbatim environment with a newline.
- # This is fine if there is content in `after` before the next
+ # This is fine if there is content in `after` before the next
# newline---in fact, it's desirable, because the verbatim package
- # doesn't allow for content on the same line as the end of the
- # environment. But if `after` is an empty line, then adding a
+ # doesn't allow for content on the same line as the end of the
+ # environment. But if `after` is an empty line, then adding a
# newline will throw off spacing and must be avoided
print_replacement = '\\begin{verbatim}\n' + print_replacement + '\\end{verbatim}'
else:
print_replacement = '\\begin{verbatim}\n' + print_replacement + '\\end{verbatim}\n'
else:
- # When printed content is included as LaTeX code, we have to be
- # particularly careful to ensure that the content produces the same
- # output when substituted as when brought in by `\input`. In
- # particular, `\input` strips newlines from each line of content and
+ # When printed content is included as LaTeX code, we have to be
+ # particularly careful to ensure that the content produces the same
+ # output when substituted as when brought in by `\input`. In
+ # particular, `\input` strips newlines from each line of content and
# adds a space at the end of each line. This space is inside the
- # `\input`, so it will not merge with following spaces. So when we
- # substitute the content, sometimes we need to replace the final
+ # `\input`, so it will not merge with following spaces. So when we
+ # substitute the content, sometimes we need to replace the final
# newline with a space that cannot be gobbled.
#
- # It gets more complicated. This final space is often not
- # desirable. It can be prevented by either printing an `\endinput`
- # command, to terminate the `\input`, or printing a percent
+ # It gets more complicated. This final space is often not
+ # desirable. It can be prevented by either printing an `\endinput`
+ # command, to terminate the `\input`, or printing a percent
# character % in the last line of the content, which comments out the
- # final newline. So we must check for `\endinput` anywhere in
- # printed content, and % in the final line, and remove any content
- # after them. It's also possible that the print is followed by
+ # final newline. So we must check for `\endinput` anywhere in
+ # printed content, and % in the final line, and remove any content
+ # after them. It's also possible that the print is followed by
# an `\unskip` that eats the space, so we need to check for that too.
- if (print_replacement.endswith('\\endinput\n') and
+ if (print_replacement.endswith('\\endinput\n') and
not print_replacement.endswith('\\string\\endinput\n')):
- # If `\endinput` is present, everything from it on should be
+ # If `\endinput` is present, everything from it on should be
# discarded, unless the `\endinput` is not actually a command
- # but rather a typeset name (for example, `\string\endinput` or
- # `\verb|\endinput|`). It's impossible to check for all cases in
- # which `\endinput` is not a command (at least, without actually
- # using LaTeX), and even checking for most of them would require
- # a good bit of parsing. We assume that `\endinput`, as a
- # command, will only ever occur at the immediate end of the
- # printed content. Later, we issue a warning in case it appears
+ # but rather a typeset name (for example, `\string\endinput` or
+ # `\verb|\endinput|`). It's impossible to check for all cases in
+ # which `\endinput` is not a command (at least, without actually
+ # using LaTeX), and even checking for most of them would require
+ # a good bit of parsing. We assume that `\endinput`, as a
+ # command, will only ever occur at the immediate end of the
+ # printed content. Later, we issue a warning in case it appears
# anywhere else.
print_replacement = print_replacement.rsplit(r'\endinput', 1)[0]
if not bool(match('[ \t]+\S', after)):
- # If there is text on the same line as the end of the
- # environment, we're fine (this is unusual). Otherwise,
+ # If there is text on the same line as the end of the
+ # environment, we're fine (this is unusual). Otherwise,
# we need to toss the newline at the end of the environment
- # and gobble leading spaces. Leading spaces need to be
- # gobbled because previously they were at the beginning of a
+ # and gobble leading spaces. Leading spaces need to be
+ # gobbled because previously they were at the beginning of a
# line, where they would have been discarded.
if not bool(match('\s*$', after)):
after = sub('^\s*?\n\s*', '', after)
- elif (print_replacement.endswith('%\n') and
- not print_replacement.endswith('\\%\n') and
+ elif (print_replacement.endswith('%\n') and
+ not print_replacement.endswith('\\%\n') and
not print_replacement.endswith('\\string%\n')):
# Perform an analogous check for a terminating percent characer %.
- # This case would be a bit easier to parse fully, since a percent
- # that comments out the last newline would have to be in the
- # final line of the replacement. But it would still be
- # very difficult to perform a complete check. Later, we issue a
- # warning if there is reason to think that a percent character
+ # This case would be a bit easier to parse fully, since a percent
+ # that comments out the last newline would have to be in the
+ # final line of the replacement. But it would still be
+ # very difficult to perform a complete check. Later, we issue a
+ # warning if there is reason to think that a percent character
# was active in the last line.
print_replacement = print_replacement.rsplit(r'%', 1)[0]
if not bool(match('[ \t]+\S', after)):
- # If there is text on the same line as the end of the
- # environment, we're fine (this is unusual). Otherwise,
+ # If there is text on the same line as the end of the
+ # environment, we're fine (this is unusual). Otherwise,
# we need to toss the newline at the end of the environment
- # and gobble leading spaces. Leading spaces need to be
- # gobbled because previously they were at the beginning of a
+ # and gobble leading spaces. Leading spaces need to be
+ # gobbled because previously they were at the beginning of a
# line, where they would have been discarded.
if not bool(match('\s*$', after)):
after = sub('^\s*?\n\s*', '', after)
else:
- # By default, LaTeX strips newlines and adds a space at the end
- # of each line of content that is brought in by `\input`. This
- # may or may not be desirable, but we replicate the effect here
- # for consistency with the original document. We use `\space{}`
- # because plain `\space` would gobble a following space, which
+ # By default, LaTeX strips newlines and adds a space at the end
+ # of each line of content that is brought in by `\input`. This
+ # may or may not be desirable, but we replicate the effect here
+ # for consistency with the original document. We use `\space{}`
+ # because plain `\space` would gobble a following space, which
# isn't consistent with the `\input` behavior being replicated.
if bool(match(r'\s*\\unskip\s+\S', after)):
- # If there's an `\unskip`, fix the spacing and remove the
+ # If there's an `\unskip`, fix the spacing and remove the
# `\unskip`
print_replacement = print_replacement.rstrip(' \t\n')
after = sub(r'^\s*\\unskip\s+', '', after)
elif bool(match('[ \t]+\S', after)):
- # If the next character after the end of the environment is
+ # If the next character after the end of the environment is
# not whitespace (usually not allowed), we can just leave
- # the `\n` in printed content, and it will yield a space.
- # So we need do nothing. But if there is text on that line
+ # the `\n` in printed content, and it will yield a space.
+ # So we need do nothing. But if there is text on that line
# we need `\space{}`.
after = sub('^\s+', '\\space', after)
forced_double_space_list.append((name, linenum))
else:
# If the line at the end of the environment is blank,
- # we can just discard it and keep the newline at the end of
+ # we can just discard it and keep the newline at the end of
# the printed content; the newline gives us the needed space
after = after.split('\n', 1)[1]
# Issue warnings, if warranted
# Warn about `\endinput`
- if (r'\endinput' in print_replacement and
+ if (r'\endinput' in print_replacement and
print_replacement.count(r'\endinput') != print_replacement.count(r'\string\endinput')):
print('* DePythonTeX warning:')
print(' "\\endinput" was present in printed content near line ' + str(linenum))
@@ -636,7 +636,7 @@ def replace_print_env(name, arglist, linenum,
# Warn if it looks like there are active `%` that could comment
# out part of the original document. We only need to check the
# last line of printed content, because only there could
- # percent characters escape from their original confines within
+ # percent characters escape from their original confines within
# `\input`, and comment out part of the document.
if print_replacement.endswith('\n'):
if print_replacement.count('\n') > 1:
@@ -673,9 +673,9 @@ def replace_print_env(name, arglist, linenum,
# Deal with argv
# Parse argv
parser = argparse.ArgumentParser()
-parser.add_argument('--version', action='version',
+parser.add_argument('--version', action='version',
version='DePythonTeX {0}'.format(__version__))
-parser.add_argument('--encoding', default='utf-8',
+parser.add_argument('--encoding', default='utf-8',
help='encoding for all text files (see codecs module for encodings)')
parser.add_argument('--overwrite', default=False, action='store_true',
help='overwrite existing output, if it exists (off by default)')
@@ -755,7 +755,7 @@ if args.output is not None:
ans = input(' Do you want to overwrite this file? [y,n]\n ')
if ans != 'y':
sys.exit(1)
-# Make sure the .depytx file exists
+# Make sure the .depytx file exists
depytxfile_name = texfile_name.rsplit('.')[0] + '.depytx'
if not os.path.isfile(depytxfile_name):
print('* DePythonTeX error:')
@@ -768,7 +768,7 @@ if not os.path.isfile(depytxfile_name):
# Start opening files and loading data
# Read in the LaTeX file
-# We read into a list with an empty first entry, so that we don't have to
+# We read into a list with an empty first entry, so that we don't have to
# worry about zero indexing when comparing list index to file line number
f = open(texfile_name, 'r', encoding=encoding)
tex = ['']
@@ -798,7 +798,7 @@ if settings['version'] != __version__:
print(' Do a complete compile cycle to update the auxiliary file')
print(' Attempting to proceed')
# Go ahead and open the outfile, even though we don't need it until the end
-# This lets us change working directories for convenience without worrying
+# This lets us change working directories for convenience without worrying
# about having to modify the outfile path
if args.output is not None:
outfile = open(outfile_name, 'w', encoding=encoding)
@@ -807,15 +807,15 @@ if args.output is not None:
# Change working directory to the document directory
-# Technically, we could get by without this, but that would require a lot of
-# path modification. This way, we can just use all paths straight out of the
+# Technically, we could get by without this, but that would require a lot of
+# path modification. This way, we can just use all paths straight out of the
# .depytx without any modification, which is much simpler and less error-prone.
if os.path.split(texfile_name)[0] != '':
os.chdir(os.path.split(texfile_name)[0])
-
+
# Open and process the file of macros
# Read in the macros
if os.path.isfile(os.path.expanduser(os.path.normcase(settings['macrofile']))):
@@ -830,7 +830,7 @@ else:
sys.exit(1)
# Create a dict for storing macros
macrodict = defaultdict(list)
-# Create variables for keeping track of whether we're inside a macro or
+# Create variables for keeping track of whether we're inside a macro or
# environment
# These must exist before we begin processing
inside_macro = False
@@ -842,13 +842,13 @@ for line in macros:
if inside_macro:
# If we're in a macro, look for the end-of-macro command
if r'\endpytx@SVMCR' in line:
- # If the current line contains the end-of-macro command, split
- # off any content that comes before it. Also reset
+ # If the current line contains the end-of-macro command, split
+ # off any content that comes before it. Also reset
# `inside_macro`.
macrodict[current_macro].append(line.rsplit(r'\endpytx@SVMCR', 1)[0])
inside_macro = False
else:
- # If the current line doesn't end the macro, we add the whole
+ # If the current line doesn't end the macro, we add the whole
# line to the macro dict
macrodict[current_macro].append(line)
elif inside_environment:
@@ -856,7 +856,7 @@ for line in macros:
# If the environment is ending, we reset inside_environment
inside_environment = False
else:
- # If we're still in the environment, add the current line to the
+ # If we're still in the environment, add the current line to the
# macro dict
macrodict[current_macro].append(line)
else:
@@ -865,10 +865,10 @@ for line in macros:
# file to increase readability). Once we've determined which one,
# we need to get its name and extract any content.
if line.startswith(r'\begin{'):
- # Any \begin will indicate a use of fancyvrb to save verbatim
- # content, since that is the only time an environment is used in
+ # Any \begin will indicate a use of fancyvrb to save verbatim
+ # content, since that is the only time an environment is used in
# the macro file. All other content is saved in a standard macro.
- # We extract the name of the macro in which the verbatim content
+ # We extract the name of the macro in which the verbatim content
# is saved.
current_macro = line.rsplit('{', 1)[1].rstrip('}\n')
inside_environment = True
@@ -894,8 +894,8 @@ for line in macros:
# Start at 1, since the first entry in the tex list is `''`
texlinenum = 1
# Create a variable for storing the current line(s) we are processing.
-# This contains all lines from immediately after the last successfully
-# processed line up to and including texlinenum. We may have to process
+# This contains all lines from immediately after the last successfully
+# processed line up to and including texlinenum. We may have to process
# multiple lines at once if a macro is split over multiple lines, etc.
texcontent = tex[texlinenum]
# Create a list for storing processed content.
@@ -908,21 +908,21 @@ for n, depytxline in enumerate(depytx):
depy_type, depy_name, depy_args, depy_typeset, depy_linenum, depy_lexer = depytxcontent.split(':')
if depy_lexer == '':
depy_lexer = None
-
+
# Do a quick check on validity of info
# #### Eventually add 'cp' and 'pc'
if not (depy_type in ('cmd', 'env') and
all([letter in ('o', 'm', 'v', 'n', '|') for letter in depy_args]) and
('|' not in depy_args or (depy_args.count('|') == 1 and depy_args.endswith('|'))) and
- depy_typeset in ('c', 'p', 'n')):
+ depy_typeset in ('c', 'p', 'n')):
print('* PythonTeX error:')
print(' Invalid \\Depythontex string for operation on line ' + str(depy_linenum))
print(' The offending string was ' + depytxcontent)
sys.exit(1)
- # If depy_args contains a `|` to indicate `\obeylines`, strip it and
- # store in a variable. Create a bool to keep track of obeylines
- # status, which governs whether we can look on the next line for
- # arguments. (If obeylines is active, a newline terminates the
+ # If depy_args contains a `|` to indicate `\obeylines`, strip it and
+ # store in a variable. Create a bool to keep track of obeylines
+ # status, which governs whether we can look on the next line for
+ # arguments. (If obeylines is active, a newline terminates the
# argument search.)
if depy_args.endswith('|'):
obeylines = True
@@ -932,8 +932,8 @@ for n, depytxline in enumerate(depytx):
# Get the line number as an integer
# We don't have to adjust for zero indexing in tex
depy_linenum = int(depy_linenum)
-
-
+
+
# Check for information passed from LaTeX
# This will be extra listings information, or replacements to plug in
code_replacement = None
@@ -983,13 +983,13 @@ for n, depytxline in enumerate(depytx):
elif nextdepytxline.startswith('FILE:'):
source = 'file'
try:
- typeset, f_name = nextdepytxline.rstrip('\n').split(':', 2)[1:]
+ typeset, f_name = nextdepytxline.rstrip('\n').split(':', 2)[1:]
except:
print('* DePythonTeX error:')
print(' Improperly formatted file information on line ' + str(depy_linenum))
print(' The file information was "' + nextdepytxline + '"')
sys.exit(1)
- # Files that are brought in have an optional mode that
+ # Files that are brought in have an optional mode that
# determines if they need special handling (for example, verbatim)
if ':mode=' in f_name:
f_name, mode = f_name.split(':mode=')
@@ -1002,32 +1002,32 @@ for n, depytxline in enumerate(depytx):
code_replacement_mode = mode
if depy_type == 'cmd' and code_replacement_mode != 'verbatim':
# Usually, code from commands is typeset with commands
- # and code from environments is typeset in
- # environments. The except is code from commands
+ # and code from environments is typeset in
+ # environments. The except is code from commands
# that bring in external files, like `\inputpygments`
code_replacement = replacement
else:
# If we're replacing an environment of code with a
# file, then we lose the newline at the beginning
# of the environment, and need to get it back.
- code_replacement = '\n' + replacement
+ code_replacement = '\n' + replacement
elif typeset == 'p':
print_replacement_mode = mode
- print_replacement = replacement
+ print_replacement = replacement
else:
print('* DePythonTeX error:')
print(' Improper typesetting information for file information on line ' + str(depy_linenum))
print(' The file information was "' + nextdepytxline + '"')
sys.exit(1)
- # Increment the line in depytx to check for more information
+ # Increment the line in depytx to check for more information
# from LaTeX
scan_ahead_line += 1
if scan_ahead_line == len(depytx):
break
else:
nextdepytxline = depytx[scan_ahead_line]
-
-
+
+
# If the line we're looking for is within the range currently held by
# texcontent, do nothing. Otherwise, transfer content from tex
# to texout until we get to the line of tex that we're looking for
@@ -1039,12 +1039,12 @@ for n, depytxline in enumerate(depytx):
texlinenum += 1
texcontent = tex[texlinenum]
-
+
# Deal with arguments
- # All arguments are parsed and stored in a list variables, even if
- # they are not used, for completeness; this makes it easy to add
+ # All arguments are parsed and stored in a list variables, even if
+ # they are not used, for completeness; this makes it easy to add
# functionality
- # Start by splitting the current line into what comes before the
+ # Start by splitting the current line into what comes before the
# command or environment, and what is after it
if depy_type == 'cmd':
try:
@@ -1060,10 +1060,10 @@ for n, depytxline in enumerate(depytx):
print('* DePythonTeX error:')
print(' Could not find environment "' + depy_name + '" on line ' + str(depy_linenum))
sys.exit(1)
- # We won't need the content from before the command or environment
+ # We won't need the content from before the command or environment
# again, so we go ahead and store it
texout.append(before)
-
+
# Parse the arguments
# Create a list for storing the recovered arguments
arglist = list()
@@ -1075,7 +1075,7 @@ for n, depytxline in enumerate(depytx):
# Account for possible line breaks before end of arg
while ']' not in after:
texlinenum += 1
- after += tex[texlinenum]
+ after += tex[texlinenum]
optarg, after = after[1:].split(']', 1)
else:
if obeylines:
@@ -1084,7 +1084,7 @@ for n, depytxline in enumerate(depytx):
after = after.split('[', 1)[1]
while ']' not in after:
texlinenum += 1
- after += tex[texlinenum]
+ after += tex[texlinenum]
optarg, after = after.split(']', 1)
else:
optarg = None
@@ -1102,7 +1102,7 @@ for n, depytxline in enumerate(depytx):
after = after.split('[', 1)[1]
while ']' not in after:
texlinenum += 1
- after += tex[texlinenum]
+ after += tex[texlinenum]
optarg, after = after.split(']', 1)
else:
optarg = None
@@ -1141,8 +1141,8 @@ for n, depytxline in enumerate(depytx):
# Go through the argument character by character to find the
# closing brace.
# If possible, use a very simple approach
- if (r'\{' not in after and r'\}' not in after and
- r'\string' not in after and
+ if (r'\{' not in after and r'\}' not in after and
+ r'\string' not in after and
after.count('{') + 1 == after.count('}')):
pos = 0
lbraces = 1
@@ -1158,7 +1158,7 @@ for n, depytxline in enumerate(depytx):
if pos == len(after):
texlinenum += 1
after += tex[texlinenum]
- # If a simple parsing approach won't work, parse in much
+ # If a simple parsing approach won't work, parse in much
# greater depth
else:
pos = 0
@@ -1183,7 +1183,7 @@ for n, depytxline in enumerate(depytx):
# First, jump ahead to after `\string`
pos += 7 #+= len(r'\string')
# See if `\string` is followed by a regular macro
- # If so, jump past it; otherwise, figure out if a
+ # If so, jump past it; otherwise, figure out if a
# single-character macro, or just a single character, is next,
# and jump past it
standard_macro = match(r'\\[a-zA-Z]+', line[pos:])
@@ -1194,7 +1194,7 @@ for n, depytxline in enumerate(depytx):
else:
pos += 1
elif line[pos] == '\\':
- # If the current position is a backslash, figure out what
+ # If the current position is a backslash, figure out what
# macro is used, and jump past it
# The macro must either be a standard alphabetic macro,
# or a single-character macro
@@ -1239,8 +1239,8 @@ for n, depytxline in enumerate(depytx):
after += tex[texlinenum]
mainarg, after = after[1:].split(delim, 1)
arglist.append(mainarg)
-
-
+
+
# Do substitution, depending on what is required
# Need a variable for processed content to be added to texout
processed = None
@@ -1251,8 +1251,8 @@ for n, depytxline in enumerate(depytx):
if after.count('\n') < 2:
texlinenum += 1
after += tex[texlinenum]
- processed, texcontent = replace_code_cmd(depy_name, arglist,
- depy_linenum,
+ processed, texcontent = replace_code_cmd(depy_name, arglist,
+ depy_linenum,
code_replacement,
code_replacement_mode,
after, depy_lexer,
@@ -1291,7 +1291,7 @@ for n, depytxline in enumerate(depytx):
depy_linenum,
code_replacement,
code_replacement_mode,
- after, depy_lexer,
+ after, depy_lexer,
firstnumber)
elif depy_typeset == 'p' and print_replacement is not None:
if depy_type == 'cmd':
@@ -1300,7 +1300,7 @@ for n, depytxline in enumerate(depytx):
if after.count('\n') < 2:
texlinenum += 1
after += tex[texlinenum]
- processed, texcontent = replace_print_cmd(depy_name, arglist,
+ processed, texcontent = replace_print_cmd(depy_name, arglist,
depy_linenum,
print_replacement,
print_replacement_mode,
@@ -1320,9 +1320,9 @@ for n, depytxline in enumerate(depytx):
if after.count('\n') < 2:
texlinenum += 1
after += tex[texlinenum]
- processed, texcontent = replace_print_env(depy_name, arglist,
+ processed, texcontent = replace_print_env(depy_name, arglist,
depy_linenum,
- print_replacement,
+ print_replacement,
print_replacement_mode,
source,
after)
@@ -1346,8 +1346,8 @@ for n, depytxline in enumerate(depytx):
texcontent = after
# #### Once it's supported on the TeX side, need to add support for
# pc and cp
-
-
+
+
# Store any processed content
if processed is not None:
texout.append(processed)
diff --git a/Master/texmf-dist/scripts/pythontex/pythontex.py b/Master/texmf-dist/scripts/pythontex/pythontex.py
index 68814f54884..0c6e5c70bb1 100755
--- a/Master/texmf-dist/scripts/pythontex/pythontex.py
+++ b/Master/texmf-dist/scripts/pythontex/pythontex.py
@@ -3,20 +3,20 @@
'''
This is the PythonTeX wrapper script. It automatically detects the version
-of Python, and then imports the correct code from pythontex2.py or
-pythontex3.py. It is intended for use with the default Python installation
-on your system. If you wish to use a different version of Python, you could
-launch pythontex2.py or pythontex3.py directly. You should also consider the
+of Python, and then imports the correct code from pythontex2.py or
+pythontex3.py. It is intended for use with the default Python installation
+on your system. If you wish to use a different version of Python, you could
+launch pythontex2.py or pythontex3.py directly. You should also consider the
command-line option `--interpreter`. This allows you to specify the command
-that is actually used to execute the code from your LaTeX documents. Except
-for Python console content, it doesn't matter which version of Python is used
+that is actually used to execute the code from your LaTeX documents. Except
+for Python console content, it doesn't matter which version of Python is used
to launch pythontex.py; pythontex.py just manages the execution of code from
-your LaTeX document. The interpreter setting is what determines the version
+your LaTeX document. The interpreter setting is what determines the version
under which your code is actually executed.
Licensed under the BSD 3-Clause License:
-Copyright (c) 2012-2014, Geoffrey M. Poore
+Copyright (c) 2012-2016, Geoffrey M. Poore
All rights reserved.
@@ -55,8 +55,8 @@ elif sys.version_info.major == 3:
import pythontex3 as pythontex
else:
sys.exit('PythonTeX require Python 3.2+; you are using 3.{0}'.format(sys.version_info.minor))
-
-# The "if" statement is needed for multiprocessing under Windows; see the
+
+# The "if" statement is needed for multiprocessing under Windows; see the
# multiprocessing documentation.
if __name__ == '__main__':
pythontex.main()
diff --git a/Master/texmf-dist/scripts/pythontex/pythontex2.py b/Master/texmf-dist/scripts/pythontex/pythontex2.py
index f0d509f0d7b..519b36f3066 100755
--- a/Master/texmf-dist/scripts/pythontex/pythontex2.py
+++ b/Master/texmf-dist/scripts/pythontex/pythontex2.py
@@ -4,16 +4,16 @@
'''
This is the main PythonTeX script. It should be launched via pythontex.py.
-Two versions of this script are provided. One, with name ending in "2", runs
+Two versions of this script are provided. One, with name ending in "2", runs
under Python 2.7. The other, with name ending in "3", runs under Python 3.2+.
-This script needs to be able to import pythontex_engines.py; in general it
+This script needs to be able to import pythontex_engines.py; in general it
should be in the same directory.
Licensed under the BSD 3-Clause License:
-Copyright (c) 2012-2014, Geoffrey M. Poore
+Copyright (c) 2012-2016, Geoffrey M. Poore
All rights reserved.
@@ -77,7 +77,7 @@ else:
# Script parameters
# Version
-__version__ = '0.14'
+__version__ = '0.15'
@@ -86,7 +86,7 @@ class Pytxcode(object):
def __init__(self, data, gobble):
self.delims, self.code = data.split('#\n', 1)
self.family, self.session, self.restart, self.instance, self.command, self.context, self.args_run, self.args_prettyprint, self.input_file, self.line = self.delims.split('#')
- self.instance_int = int(self.instance)
+ self.instance_int = int(self.instance)
self.line_int = int(self.line)
self.key_run = self.family + '#' + self.session + '#' + self.restart
self.key_typeset = self.key_run + '#' + self.instance
@@ -118,41 +118,43 @@ class Pytxcode(object):
self.is_typeset = False
else:
self.is_typeset = True
-
+
if gobble == 'auto':
self.code = textwrap.dedent(self.code)
-
+
+ self.sub_template = None
+
def process_argv(data, temp_data):
'''
Process command line options using the argparse module.
-
+
Most options are passed via the file of code, rather than via the command
line.
'''
-
+
# Create a command line argument parser
parser = argparse.ArgumentParser()
parser.add_argument('TEXNAME',
help='LaTeX file, with or without .tex extension')
- parser.add_argument('--version', action='version',
- version='PythonTeX {0}'.format(data['version']))
- parser.add_argument('--encoding', default='UTF-8',
+ parser.add_argument('--version', action='version',
+ version='PythonTeX {0}'.format(data['version']))
+ parser.add_argument('--encoding', default='UTF-8',
help='encoding for all text files (see codecs module for encodings)')
- parser.add_argument('--error-exit-code', default='true',
- choices=('true', 'false'),
+ parser.add_argument('--error-exit-code', default='true',
+ choices=('true', 'false'),
help='return exit code of 1 if there are errors (not desirable with some TeX editors and workflows)')
group_run = parser.add_mutually_exclusive_group()
group_run.add_argument('--runall', nargs='?', default='false',
const='true', choices=('true', 'false'),
help='run ALL code; equivalent to package option')
- group_run.add_argument('--rerun', default='errors',
+ group_run.add_argument('--rerun', default='errors',
choices=('never', 'modified', 'errors', 'warnings', 'always'),
help='set conditions for rerunning code; equivalent to package option')
- parser.add_argument('--hashdependencies', nargs='?', default='false',
- const='true', choices=('true', 'false'),
+ parser.add_argument('--hashdependencies', nargs='?', default='false',
+ const='true', choices=('true', 'false'),
help='hash dependencies (such as external data) to check for modification, rather than using mtime; equivalent to package option')
parser.add_argument('-j', '--jobs', metavar='N', default=None, type=int,
help='Allow N jobs at once; defaults to cpu_count().')
@@ -160,17 +162,17 @@ def process_argv(data, temp_data):
help='verbose output')
parser.add_argument('--interpreter', default=None, help='set a custom interpreter; argument should be in the form "<interpreter>:<command>, <interp>:<cmd>, ..." where <interpreter> is "python", "ruby", etc., and <command> is the command for invoking the interpreter; argument may also be in the form of a Python dictionary')
group_debug = parser.add_mutually_exclusive_group()
- group_debug.add_argument('--debug', nargs='?', default=None,
+ group_debug.add_argument('--debug', nargs='?', default=None,
const='default',
metavar='<family>:<session>:<restart>',
help='Run the specified session (or default session) with the default debugger, if available. If there is only one session, it need not be specified. If the session name is unambiguous, it is sufficient. The full <family>:<session>:<restart> (for example, py:default:default) is only needed when the session name alone would be ambiguous.')
- group_debug.add_argument('--interactive', nargs='?', default=None,
+ group_debug.add_argument('--interactive', nargs='?', default=None,
const='default',
metavar='<family>:<session>:<restart>',
help='Run the specified session (or default session) in interactive mode. If there is only one session, it need not be specified. If the session name is unambiguous, it is sufficient. The full <family>:<session>:<restart> (for example, py:default:default) is only needed when the session name alone would be ambiguous.')
args = parser.parse_args()
-
- # Store the parsed argv in data and temp_data
+
+ # Store the parsed argv in data and temp_data
data['encoding'] = args.encoding
if args.error_exit_code == 'true':
temp_data['error_exit_code'] = True
@@ -205,7 +207,7 @@ def process_argv(data, temp_data):
for interp in interp_list:
if interp:
try:
- k, v = interp.split(':')
+ k, v = interp.split(':', 1)
k = k.strip(' \'"')
v = v.strip(' \'"')
interpreter_dict[k] = v
@@ -215,7 +217,7 @@ def process_argv(data, temp_data):
print('Invalid --interpreter argument')
return sys.exit(2)
# If the Python interpreter wasn't set, then try to set an appropriate
- # default value, based on how PythonTeX was launched (pythontex.py,
+ # default value, based on how PythonTeX was launched (pythontex.py,
# pythontex2.py, or pythontex3.py).
if not set_python_interpreter:
if temp_data['python'] == 2:
@@ -230,12 +232,12 @@ def process_argv(data, temp_data):
directly. This should only be done when you want
to use Python version {0}, but have a different
version installed as the default. (Otherwise, you
- should start PythonTeX with pythontex.py.) For
+ should start PythonTeX with pythontex.py.) For
this to work correctly, you should install Python
version 3.3+, which has a Windows wrapper (py) that
- PythonTeX can use to run the correct version of
+ PythonTeX can use to run the correct version of
Python. If you do not want to install Python 3.3+,
- you can also use the --interpreter command-line
+ you can also use the --interpreter command-line
option to tell PythonTeX how to access the version
of Python you wish to use.
'''.format(temp_data['python'])
@@ -255,12 +257,12 @@ def process_argv(data, temp_data):
directly. This should only be done when you want
to use Python version {0}, but have a different
version installed as the default. (Otherwise, you
- should start PythonTeX with pythontex.py.) For
+ should start PythonTeX with pythontex.py.) For
this to work correctly, you should install Python
version 3.3+, which has a Windows wrapper (py) that
- PythonTeX can use to run the correct version of
+ PythonTeX can use to run the correct version of
Python. If you do not want to install Python 3.3+,
- you can also use the --interpreter command-line
+ you can also use the --interpreter command-line
option to tell PythonTeX how to access the version
of Python you wish to use.
'''.format(temp_data['python'])
@@ -268,10 +270,10 @@ def process_argv(data, temp_data):
return sys.exit(2)
else:
interpreter_dict['python'] = 'python3'
-
+
if args.TEXNAME is not None:
- # Determine if we a dealing with just a filename, or a name plus
- # path. If there's a path, we need to make the document directory
+ # Determine if we a dealing with just a filename, or a name plus
+ # path. If there's a path, we need to make the document directory
# the current working directory.
dir, raw_jobname = os.path.split(args.TEXNAME)
dir = os.path.expanduser(os.path.normcase(dir))
@@ -287,30 +289,30 @@ def process_argv(data, temp_data):
print(' Code file ' + raw_jobname + '.pytxcode does not exist.')
print(' Run LaTeX to create it.')
return sys.exit(1)
-
- # We need a "sanitized" version of the jobname, with spaces and
- # asterisks replaced with hyphens. This is done to avoid TeX issues
- # with spaces in file names, paralleling the approach taken in
- # pythontex.sty. From now on, we will use the sanitized version every
- # time we create a file that contains the jobname string. The raw
- # version will only be used in reference to pre-existing files created
+
+ # We need a "sanitized" version of the jobname, with spaces and
+ # asterisks replaced with hyphens. This is done to avoid TeX issues
+ # with spaces in file names, paralleling the approach taken in
+ # pythontex.sty. From now on, we will use the sanitized version every
+ # time we create a file that contains the jobname string. The raw
+ # version will only be used in reference to pre-existing files created
# on the TeX side, such as the .pytxcode file.
jobname = raw_jobname.replace(' ', '-').replace('"', '').replace('*', '-')
# Store the results in data
data['raw_jobname'] = raw_jobname
data['jobname'] = jobname
-
- # We need to check to make sure that the "sanitized" jobname doesn't
- # lead to a collision with a file that already has that name, so that
+
+ # We need to check to make sure that the "sanitized" jobname doesn't
+ # lead to a collision with a file that already has that name, so that
# two files attempt to use the same PythonTeX folder.
- #
+ #
# If <jobname>.<ext> and <raw_jobname>.<ext> both exist, where <ext>
- # is a common LaTeX extension, we exit. We operate under the
- # assumption that there should be only a single file <jobname> in the
- # document root directory that has a common LaTeX extension. That
- # could be false, but if so, the user probably has worse things to
+ # is a common LaTeX extension, we exit. We operate under the
+ # assumption that there should be only a single file <jobname> in the
+ # document root directory that has a common LaTeX extension. That
+ # could be false, but if so, the user probably has worse things to
# worry about than a potential PythonTeX output collision.
- # If <jobname>* and <raw_jobname>* both exist, we issue a warning but
+ # If <jobname>* and <raw_jobname>* both exist, we issue a warning but
# attempt to proceed.
if jobname != raw_jobname:
resolved = False
@@ -335,9 +337,9 @@ def process_argv(data, temp_data):
print(' ' + jobname + '*')
print(' Attempting to proceed.')
temp_data['warnings'] += 1
- break
-
-
+ break
+
+
def load_code_get_settings(data, temp_data):
@@ -357,7 +359,7 @@ def load_code_get_settings(data, temp_data):
print(' Code file ' + raw_jobname + '.pytxcode does not exist.')
print(' Run LaTeX to create it.')
return sys.exit(1)
-
+
# Split code and settings
try:
pytxcode, pytxsettings = pytxcode.rsplit('=>PYTHONTEX:SETTINGS#', 1)
@@ -365,8 +367,8 @@ def load_code_get_settings(data, temp_data):
print('The .pytxcode file appears to have an outdated format or be invalid')
print('Run LaTeX to make sure the file is current')
return sys.exit(1)
-
-
+
+
# Prepare to process settings
#
# Create a dict for storing settings.
@@ -374,7 +376,7 @@ def load_code_get_settings(data, temp_data):
# Create a dict for storing Pygments settings.
# Each dict entry will itself be a dict.
pygments_settings = defaultdict(dict)
-
+
# Create a dict of processing functions, and generic processing functions
settings_func = dict()
def set_kv_data(k, v):
@@ -396,7 +398,7 @@ def load_code_get_settings(data, temp_data):
def set_kv_data_fvextfile(k, v):
# Error checking on TeX side should be enough, but be careful anyway
try:
- v = int(v)
+ v = int(v)
except ValueError:
print('* PythonTeX error')
print(' Unable to parse package option fvextfile.')
@@ -468,7 +470,7 @@ def load_code_get_settings(data, temp_data):
settings_func['pyconbanner'] = set_kv_data
settings_func['pyconfilename'] = set_kv_data
settings_func['depythontex'] = set_kv_data
-
+
# Process settings
for line in pytxsettings.split('\n'):
if line:
@@ -486,33 +488,33 @@ def load_code_get_settings(data, temp_data):
print(' The version of the PythonTeX scripts does not match the last code')
print(' saved by the document--run LaTeX to create an updated version.\n')
sys.exit(1)
-
+
# Store all results that haven't already been stored.
data['settings'] = settings
data['pygments_settings'] = pygments_settings
-
+
# Create a tuple of vital quantities that invalidate old saved data
# Don't need to include outputdir, because if that changes, no old output
# fvextfile could be checked on a case-by-case basis, which would result
- # in faster output, but that would involve a good bit of additional
+ # in faster output, but that would involve a good bit of additional
# logic, which probably isn't worth it for a feature that will rarely be
# changed.
- data['vitals'] = (data['version'], data['encoding'],
+ data['vitals'] = (data['version'], data['encoding'],
settings['gobble'], settings['fvextfile'])
-
+
# Create tuples of vital quantities
data['code_vitals'] = (settings['workingdir'], settings['keeptemps'],
settings['makestderr'], settings['stderrfilename'])
data['cons_vitals'] = (settings['workingdir'])
data['typeset_vitals'] = ()
-
+
# Pass any customizations to types
for k in engine_dict:
engine_dict[k].customize(pyfuture=settings['pyfuture'],
pyconfuture=settings['pyconfuture'],
pyconbanner=settings['pyconbanner'],
pyconfilename=settings['pyconfilename'])
-
+
# Store code
# Do this last, so that Pygments settings are available
if pytxcode.startswith('=>PYTHONTEX#'):
@@ -529,12 +531,12 @@ def set_upgrade_compatibility(data, old, temp_data):
When upgrading, modify settings to maintain backward compatibility when
possible and important
'''
- if (old['version'].startswith('v') and
+ if (old['version'].startswith('v') and
not data['settings']['workingdirset'] and
data['settings']['outputdir'] != '.'):
old['compatibility'] = '0.13'
do_upgrade_compatibility(data, old, temp_data)
-
+
def do_upgrade_compatibility(data, old_data, temp_data):
if 'compatibility' in old_data:
@@ -549,10 +551,10 @@ def do_upgrade_compatibility(data, old_data, temp_data):
directory rather than the output directory. PythonTeX has detected
that you have been using the output directory as the working directory.
It will continue to use the output directory for now. To keep your
- current settings long-term and avoid seeing this message in the future,
- add the following command to the preamble of your document, right after
+ current settings long-term and avoid seeing this message in the future,
+ add the following command to the preamble of your document, right after
the "\\usepackage{pythontex}": "\setpythontexworkingdir{<outputdir>}".
- If you wish to continue with the new settings instead, simply delete
+ If you wish to continue with the new settings instead, simply delete
the file with extension .pkl in the output directory, and run PythonTeX.
**** End PythonTeX upgrade message ****
'''
@@ -561,29 +563,29 @@ def do_upgrade_compatibility(data, old_data, temp_data):
def get_old_data(data, old_data, temp_data):
'''
- Load data from the last run, if it exists, into the dict old_data.
- Determine the path to the PythonTeX scripts, either by using a previously
+ Load data from the last run, if it exists, into the dict old_data.
+ Determine the path to the PythonTeX scripts, either by using a previously
found, saved path or via kpsewhich.
-
- The old data is used for determining when PythonTeX has been upgraded,
- when any settings have changed, when code has changed (via hashes), and
- what files may need to be cleaned up. The location of the PythonTeX
- scripts is needed so that they can be imported by the scripts created by
- PythonTeX. The location of the scripts is confirmed even if they were
- previously located, to make sure that the path is still valid. Finding
- the scripts depends on having a TeX installation that includes the
+
+ The old data is used for determining when PythonTeX has been upgraded,
+ when any settings have changed, when code has changed (via hashes), and
+ what files may need to be cleaned up. The location of the PythonTeX
+ scripts is needed so that they can be imported by the scripts created by
+ PythonTeX. The location of the scripts is confirmed even if they were
+ previously located, to make sure that the path is still valid. Finding
+ the scripts depends on having a TeX installation that includes the
Kpathsea library (TeX Live and MiKTeX, possibly others).
-
+
All code that relies on old_data is written based on the assumption that
- if old_data exists and has the current PythonTeX version, then it
+ if old_data exists and has the current PythonTeX version, then it
contains all needed information. Thus, all code relying on old_data must
- check that it was loaded and that it has the current version. If not,
+ check that it was loaded and that it has the current version. If not,
code should adapt gracefully.
'''
# Create a string containing the name of the data file
pythontex_data_file = os.path.expanduser(os.path.normcase(os.path.join(data['settings']['outputdir'], 'pythontex_data.pkl')))
-
+
# Load the old data if it exists (read as binary pickle)
if os.path.isfile(pythontex_data_file):
f = open(pythontex_data_file, 'rb')
@@ -613,9 +615,9 @@ def get_old_data(data, old_data, temp_data):
os.remove(f)
else:
temp_data['loaded_old_data'] = False
-
+
# Set the utilspath
- # Assume that if the utils aren't in the same location as
+ # Assume that if the utils aren't in the same location as
# `pythontex.py`, then they are somewhere else on `sys.path` that
# will always be available (for example, installed as a Python module),
# and thus specifying a path isn't necessary.
@@ -636,9 +638,9 @@ def modified_dependencies(key, data, old_data, temp_data):
old_dep_hash_dict = old_data['dependencies'][key]
workingdir = data['settings']['workingdir']
for dep in old_dep_hash_dict.keys():
- # We need to know if the path is relative (based off the
- # working directory) or absolute. We can't use
- # os.path.isabs() alone for determining the distinction,
+ # We need to know if the path is relative (based off the
+ # working directory) or absolute. We can't use
+ # os.path.isabs() alone for determining the distinction,
# because we must take into account the possibility of an
# initial ~ (tilde) standing for the home directory.
dep_file = os.path.expanduser(os.path.normcase(dep))
@@ -650,15 +652,15 @@ def modified_dependencies(key, data, old_data, temp_data):
print(' It belongs to ' + key.replace('#', ':'))
print(' Relative paths to dependencies must be specified from the working directory.')
temp_data['errors'] += 1
- # A removed dependency should trigger an error, but it
- # shouldn't cause code to execute. Running the code
- # again would just give more errors when it can't find
- # the dependency. (There won't be issues when a
- # dependency is added or removed, because that would
- # involve modifying code, which would trigger
+ # A removed dependency should trigger an error, but it
+ # shouldn't cause code to execute. Running the code
+ # again would just give more errors when it can't find
+ # the dependency. (There won't be issues when a
+ # dependency is added or removed, because that would
+ # involve modifying code, which would trigger
# re-execution.)
elif hashdependencies:
- # Read and hash the file in binary. Opening in text mode
+ # Read and hash the file in binary. Opening in text mode
# would require an unnecessary decoding and encoding cycle.
f = open(dep_file, 'rb')
hasher = sha1()
@@ -707,28 +709,28 @@ def should_rerun(hash, old_hash, old_exit_status, key, rerun, data, old_data, te
def hash_all(data, temp_data, old_data, engine_dict):
'''
Hash the code to see what has changed and needs to be updated.
-
- Save the hashes in hashdict. Create update_code, a list of bools
- regarding whether code should be executed. Create update_pygments, a
- list of bools determining what needs updated Pygments highlighting.
- Update pygments_settings to account for Pygments (as opposed to PythonTeX)
+
+ Save the hashes in hashdict. Create update_code, a list of bools
+ regarding whether code should be executed. Create update_pygments, a
+ list of bools determining what needs updated Pygments highlighting.
+ Update pygments_settings to account for Pygments (as opposed to PythonTeX)
commands and environments.
'''
- # Note that the PythonTeX information that accompanies code must be
- # hashed in addition to the code itself; the code could stay the same,
- # but its context or args could change, which might require that code be
- # executed. All of the PythonTeX information is hashed except for the
- # input line number. Context-dependent code is going too far if
+ # Note that the PythonTeX information that accompanies code must be
+ # hashed in addition to the code itself; the code could stay the same,
+ # but its context or args could change, which might require that code be
+ # executed. All of the PythonTeX information is hashed except for the
+ # input line number. Context-dependent code is going too far if
# it depends on that.
-
+
# Create variables to more easily access parts of data
pytxcode = temp_data['pytxcode']
encoding = data['encoding']
loaded_old_data = temp_data['loaded_old_data']
rerun = temp_data['rerun']
pygments_settings = data['pygments_settings']
-
+
# Calculate cumulative hashes for all code that is executed
# Calculate individual hashes for all code that will be typeset
code_hasher = defaultdict(sha1)
@@ -759,31 +761,31 @@ def hash_all(data, temp_data, old_data, engine_dict):
typeset_hasher[c.key_typeset].update(c.hashable_delims_typeset.encode(encoding))
typeset_hasher[c.key_typeset].update(c.code.encode(encoding))
typeset_hasher[c.key_typeset].update(c.args_prettyprint.encode(encoding))
-
+
# Store hashes
code_hash_dict = {}
for key in code_hasher:
family = key.split('#', 1)[0]
- code_hash_dict[key] = (code_hasher[key].hexdigest(),
+ code_hash_dict[key] = (code_hasher[key].hexdigest(),
cc_hasher[family].hexdigest(),
engine_dict[family].get_hash())
data['code_hash_dict'] = code_hash_dict
-
+
cons_hash_dict = {}
for key in cons_hasher:
family = key.split('#', 1)[0]
- cons_hash_dict[key] = (cons_hasher[key].hexdigest(),
+ cons_hash_dict[key] = (cons_hasher[key].hexdigest(),
cc_hasher[family].hexdigest(),
engine_dict[family].get_hash())
data['cons_hash_dict'] = cons_hash_dict
-
+
typeset_hash_dict = {}
for key in typeset_hasher:
typeset_hash_dict[key] = typeset_hasher[key].hexdigest()
data['typeset_hash_dict'] = typeset_hash_dict
-
-
+
+
# See what needs to be updated.
# In the process, copy over macros and files that may be reused.
code_update = {}
@@ -810,27 +812,27 @@ def hash_all(data, temp_data, old_data, engine_dict):
old_typeset_hash_dict = old_data['typeset_hash_dict']
old_pygments_settings = old_data['pygments_settings']
for s in pygments_settings:
- if (s in old_pygments_settings and
+ if (s in old_pygments_settings and
pygments_settings[s] == old_pygments_settings[s]):
pygments_settings_changed[s] = False
else:
pygments_settings_changed[s] = True
- # If old data was loaded (and thus is compatible) determine what has
- # changed so that only
+ # If old data was loaded (and thus is compatible) determine what has
+ # changed so that only
# modified code may be executed. Otherwise, execute everything.
# We don't have to worry about checking for changes in pyfuture, because
# custom code and default code are hashed. The treatment of keeptemps
# could be made more efficient (if changed to 'none', just delete old temp
- # files rather than running everything again), but given that it is
+ # files rather than running everything again), but given that it is
# intended as a debugging aid, that probable isn't worth it.
- # We don't have to worry about hashdependencies changing, because if it
+ # We don't have to worry about hashdependencies changing, because if it
# does the hashes won't match (file contents vs. mtime) and thus code will
# be re-executed.
if loaded_old_data and data['code_vitals'] == old_data['code_vitals']:
# Compare the hash values, and set which code needs to be run
for key in code_hash_dict:
- if (key in old_code_hash_dict and
+ if (key in old_code_hash_dict and
not should_rerun(code_hash_dict[key], old_code_hash_dict[key], old_exit_status[key], key, rerun, data, old_data, temp_data)):
code_update[key] = False
macros[key] = old_macros[key]
@@ -838,15 +840,15 @@ def hash_all(data, temp_data, old_data, engine_dict):
dependencies[key] = old_dependencies[key]
exit_status[key] = old_exit_status[key]
else:
- code_update[key] = True
- else:
+ code_update[key] = True
+ else:
for key in code_hash_dict:
code_update[key] = True
-
+
if loaded_old_data and data['cons_vitals'] == old_data['cons_vitals']:
# Compare the hash values, and set which code needs to be run
for key in cons_hash_dict:
- if (key in old_cons_hash_dict and
+ if (key in old_cons_hash_dict and
not should_rerun(cons_hash_dict[key], old_cons_hash_dict[key], old_exit_status[key], key, rerun, data, old_data, temp_data)):
cons_update[key] = False
macros[key] = old_macros[key]
@@ -855,17 +857,17 @@ def hash_all(data, temp_data, old_data, engine_dict):
dependencies[key] = old_dependencies[key]
exit_status[key] = old_exit_status[key]
else:
- cons_update[key] = True
- else:
+ cons_update[key] = True
+ else:
for key in cons_hash_dict:
cons_update[key] = True
-
+
if loaded_old_data and data['typeset_vitals'] == old_data['typeset_vitals']:
for key in typeset_hash_dict:
family = key.split('#', 1)[0]
if family in pygments_settings:
if (not pygments_settings_changed[family] and
- key in old_typeset_hash_dict and
+ key in old_typeset_hash_dict and
typeset_hash_dict[key] == old_typeset_hash_dict[key]):
pygments_update[key] = False
if key in old_pygments_macros:
@@ -902,7 +904,7 @@ def hash_all(data, temp_data, old_data, engine_dict):
for s in pygments_style_list:
formatter = LatexFormatter(style=s, commandprefix='PYG'+s)
pygments_style_defs[s] = formatter.get_style_defs()
-
+
# Save to data
temp_data['code_update'] = code_update
temp_data['cons_update'] = cons_update
@@ -916,9 +918,9 @@ def hash_all(data, temp_data, old_data, engine_dict):
data['typeset_cache'] = typeset_cache
data['dependencies'] = dependencies
data['exit_status'] = exit_status
-
-
- # Clean up for code that will be run again, and for code that no longer
+
+
+ # Clean up for code that will be run again, and for code that no longer
# exists.
if loaded_old_data:
# Take care of code files
@@ -948,8 +950,8 @@ def hash_all(data, temp_data, old_data, engine_dict):
if os.path.isfile(f):
os.remove(f)
# Take care of old Pygments files
- # The approach here is a little different since there isn't a
- # Pygments-specific hash dict, but there is a Pygments-specific
+ # The approach here is a little different since there isn't a
+ # Pygments-specific hash dict, but there is a Pygments-specific
# dict of lists of files.
for key in pygments_update:
if pygments_update[key] and key in old_pygments_files:
@@ -989,7 +991,7 @@ def parse_code_write_scripts(data, temp_data, engine_dict):
files = data['files']
debug = temp_data['debug']
interactive = temp_data['interactive']
-
+
# Tweak the update dicts to work with debug command-line option.
# #### This should probably be refactored later, once the debug interface
# stabilizes
@@ -1035,12 +1037,12 @@ def parse_code_write_scripts(data, temp_data, engine_dict):
temp_data['debug_key'] = arg_key
else:
temp_data['interactive_key'] = arg_key
-
-
-
- # We need to keep track of the last instance for each session, so
- # that duplicates can be eliminated. Some LaTeX environments process
- # their content multiple times and thus will create duplicates. We
+
+
+
+ # We need to keep track of the last instance for each session, so
+ # that duplicates can be eliminated. Some LaTeX environments process
+ # their content multiple times and thus will create duplicates. We
# need to initialize everything at -1, since instances begin at zero.
def negative_one():
return -1
@@ -1054,7 +1056,7 @@ def parse_code_write_scripts(data, temp_data, engine_dict):
if c.is_typeset and pygments_update[c.key_typeset]:
pygments_list.append(c)
elif c.is_cons:
- # Only append to Pygments if not run, since Pygments is
+ # Only append to Pygments if not run, since Pygments is
# automatically taken care of during run for console content
if cons_update[c.key_run]:
cons_dict[c.key_run].append(c)
@@ -1067,7 +1069,7 @@ def parse_code_write_scripts(data, temp_data, engine_dict):
cc_dict_begin[c.cc_type].append(c)
else:
cc_dict_end[c.cc_type].append(c)
-
+
# Save
temp_data['code_dict'] = code_dict
temp_data['cc_dict_begin'] = cc_dict_begin
@@ -1100,7 +1102,7 @@ def parse_code_write_scripts(data, temp_data, engine_dict):
sessionfile.close()
code_index_dict[key] = code_index
temp_data['code_index_dict'] = code_index_dict
-
+
# Write synchronization file if in debug mode
if debug is not None:
# Might improve tracking/cleanup of syncdb files
@@ -1150,7 +1152,7 @@ def do_multiprocessing(data, temp_data, old_data, engine_dict):
pygments_settings = data['pygments_settings']
jobs = temp_data['jobs']
verbose = temp_data['verbose']
-
+
code_dict = temp_data['code_dict']
cons_dict = temp_data['cons_dict']
cc_dict_begin = temp_data['cc_dict_begin']
@@ -1163,21 +1165,21 @@ def do_multiprocessing(data, temp_data, old_data, engine_dict):
pygments_files = data['pygments_files']
pygments_macros = data['pygments_macros']
typeset_cache = data['typeset_cache']
-
+
errors = temp_data['errors']
warnings = temp_data['warnings']
-
+
makestderr = data['settings']['makestderr']
stderrfilename = data['settings']['stderrfilename']
code_index_dict = temp_data['code_index_dict']
-
+
hashdependencies = temp_data['hashdependencies']
dependencies = data['dependencies']
exit_status = data['exit_status']
start_time = data['start_time']
debug = temp_data['debug']
interactive = temp_data['interactive']
-
+
# If in debug or interactive mode, short-circuit the whole process
# #### This should probably be refactored later, once debugging is more
# mature
@@ -1185,17 +1187,17 @@ def do_multiprocessing(data, temp_data, old_data, engine_dict):
import shlex
if debug is not None:
print('Entering debug mode for "{0}"\n'.format(debug) + '-'*20 + '\n')
- key = temp_data['debug_key']
+ key = temp_data['debug_key']
else:
print('Entering interactive mode for "{0}"\n'.format(interactive) + '-'*20 + '\n')
- key = temp_data['interactive_key']
+ key = temp_data['interactive_key']
basename = key.replace('#', '_')
family, session, restart = key.split('#')
# #### Revise as debugging is expanded
if debug is not None and engine_dict[family].language != 'python':
return sys.exit('Currently, debug only supports Python')
if debug is not None:
- # #### Eventually, should move to pythontex_engines.py and
+ # #### Eventually, should move to pythontex_engines.py and
# provide means for customization
command = '{python} {debug} {file}.py --interactive'
command = command.replace('{python}', interpreter_dict['python'])
@@ -1222,23 +1224,23 @@ def do_multiprocessing(data, temp_data, old_data, engine_dict):
exec_cmd = shlex.split(command.format(file=script.replace('\\', '\\\\'), File=script_full.replace('\\', '\\\\')))
try:
proc = subprocess.Popen(exec_cmd)
- except WindowsError as e:
- if e.errno == 2:
+ except (WindowsError, FileNotFoundError) as e:
+ if platform.system() == 'Windows' and e.errno == 2:
# Batch files won't be found when called without extension. They
# would be found if `shell=True`, but then getting the right
# exit code is tricky. So we perform some `cmd` trickery that
- # is essentially equivalent to `shell=True`, but gives correct
+ # is essentially equivalent to `shell=True`, but gives correct
# exit codes. Note that `subprocess.Popen()` works with strings
# under Windows; a list is not required.
exec_cmd_string = ' '.join(exec_cmd)
exec_cmd_string = 'cmd /C "@echo off & call {0} & if errorlevel 1 exit 1"'.format(exec_cmd_string)
proc = subprocess.Popen(exec_cmd_string)
else:
- raise
+ raise
proc.wait()
os.chdir(orig_cwd)
# Do a basic update of pickled data
- # This is only really needed for tracking the code file and the
+ # This is only really needed for tracking the code file and the
# synchronization file (if it was created)
if temp_data['loaded_old_data'] and key in old_data['exit_status']:
exit_status[key] = old_data['exit_status'][key]
@@ -1253,29 +1255,29 @@ def do_multiprocessing(data, temp_data, old_data, engine_dict):
pickle.dump(data, f, -1)
f.close()
return
-
-
- # Create a pool for multiprocessing. Set the maximum number of
+
+
+ # Create a pool for multiprocessing. Set the maximum number of
# concurrent processes to a user-specified value for jobs. If the user
- # has not specified a value, then it will be None, and
+ # has not specified a value, then it will be None, and
# multiprocessing.Pool() will use cpu_count().
pool = multiprocessing.Pool(jobs)
tasks = []
-
+
# If verbose, print a list of processes
if verbose:
print('\n* PythonTeX will run the following processes')
print(' with working directory {0}'.format(workingdir))
print(' (maximum concurrent processes = {0})'.format(jobs))
-
- # Add code processes. Note that everything placed in the codedict
+
+ # Add code processes. Note that everything placed in the codedict
# needs to be executed, based on previous testing, except for custom code.
for key in code_dict:
family = key.split('#')[0]
# Uncomment the following for debugging, and comment out what follows
'''run_code(encoding, outputdir, workingdir, code_dict[key],
engine_dict[family].language,
- engine_dict[family].command,
+ engine_dict[family].commands,
engine_dict[family].created,
engine_dict[family].extension,
makestderr, stderrfilename,
@@ -1285,10 +1287,10 @@ def do_multiprocessing(data, temp_data, old_data, engine_dict):
engine_dict[family].linenumbers,
engine_dict[family].lookbehind,
keeptemps, hashdependencies)'''
- tasks.append(pool.apply_async(run_code, [encoding, outputdir,
+ tasks.append(pool.apply_async(run_code, [encoding, outputdir,
workingdir, code_dict[key],
engine_dict[family].language,
- engine_dict[family].command,
+ engine_dict[family].commands,
engine_dict[family].created,
engine_dict[family].extension,
makestderr, stderrfilename,
@@ -1300,18 +1302,18 @@ def do_multiprocessing(data, temp_data, old_data, engine_dict):
keeptemps, hashdependencies]))
if verbose:
print(' - Code process ' + key.replace('#', ':'))
-
+
# Add console processes
for key in cons_dict:
family = key.split('#')[0]
if engine_dict[family].language.startswith('python'):
if family in pygments_settings:
# Uncomment the following for debugging
- '''python_console(jobname, encoding, outputdir, workingdir,
+ '''python_console(jobname, encoding, outputdir, workingdir,
fvextfile, pygments_settings[family],
cc_dict_begin[family], cons_dict[key],
cc_dict_end[family], engine_dict[family].startup,
- engine_dict[family].banner,
+ engine_dict[family].banner,
engine_dict[family].filename)'''
tasks.append(pool.apply_async(python_console, [jobname, encoding,
outputdir, workingdir,
@@ -1333,31 +1335,31 @@ def do_multiprocessing(data, temp_data, old_data, engine_dict):
cc_dict_end[family],
engine_dict[family].startup,
engine_dict[family].banner,
- engine_dict[family].filename]))
+ engine_dict[family].filename]))
else:
print('* PythonTeX error')
print(' Currently, non-Python consoles are not supported')
errors += 1
if verbose:
print(' - Console process ' + key.replace('#', ':'))
-
+
# Add a Pygments process
if pygments_list:
- tasks.append(pool.apply_async(do_pygments, [encoding, outputdir,
+ tasks.append(pool.apply_async(do_pygments, [encoding, outputdir,
fvextfile,
pygments_list,
pygments_settings,
typeset_cache]))
if verbose:
print(' - Pygments process')
-
+
# Execute the processes
pool.close()
pool.join()
-
+
# Get the outputs of processes
# Get the files and macros created. Get the number of errors and warnings
- # produced. Get any messages returned. Get the exit_status, which is a
+ # produced. Get any messages returned. Get the exit_status, which is a
# dictionary of code that failed and thus must be run again (its hash is
# set to a null string). Keep track of whether there were any new files,
# so that the last time of file creation in .pytxmcr can be updated.
@@ -1375,7 +1377,7 @@ def do_multiprocessing(data, temp_data, old_data, engine_dict):
errors += result['errors']
warnings += result['warnings']
exit_status[key] = (result['errors'], result['warnings'])
- messages.extend(result['messages'])
+ messages.extend(result['messages'])
elif result['process'] == 'console':
key = result['key']
files[key].extend(result['files'])
@@ -1399,8 +1401,8 @@ def do_multiprocessing(data, temp_data, old_data, engine_dict):
pygments_macros.update(result['pygments_macros'])
errors += result['errors']
warnings += result['warnings']
- messages.extend(result['messages'])
-
+ messages.extend(result['messages'])
+
# Do a quick check to see if any dependencies were modified since the
# beginning of the run. If so, reset them so they will run next time and
# issue a warning
@@ -1419,11 +1421,11 @@ def do_multiprocessing(data, temp_data, old_data, engine_dict):
for s in set(unresolved_sessions):
print(' - ' + s)
warnings += 1
-
-
+
+
# Save all content (only needs to be done if code was indeed run).
# Save a commented-out time corresponding to the last time PythonTeX ran
- # and created files, so that tools like latexmk can easily detect when
+ # and created files, so that tools like latexmk can easily detect when
# another run is needed.
if tasks:
if new_files or not temp_data['loaded_old_data']:
@@ -1431,28 +1433,33 @@ def do_multiprocessing(data, temp_data, old_data, engine_dict):
else:
last_new_file_time = old_data['last_new_file_time']
data['last_new_file_time'] = last_new_file_time
-
+
macro_file = open(os.path.expanduser(os.path.normcase(os.path.join(outputdir, jobname + '.pytxmcr'))), 'w', encoding=encoding)
macro_file.write('%Last time of file creation: ' + str(last_new_file_time) + '\n\n')
for key in macros:
macro_file.write(''.join(macros[key]))
macro_file.close()
-
+
pygments_macro_file = open(os.path.expanduser(os.path.normcase(os.path.join(outputdir, jobname + '.pytxpyg'))), 'w', encoding=encoding)
# Only save Pygments styles that are used
style_set = set([pygments_settings[k]['formatter_options']['style'] for k in pygments_settings if k != ':GLOBAL'])
+ if style_set:
+ from pygments.formatters import LatexFormatter
+ formatter = LatexFormatter(style='default', commandprefix='PYG')
+ PYG_style_defs = formatter.get_style_defs()
+ pygments_macro_file.write(PYG_style_defs)
for key in pygments_style_defs:
if key in style_set:
pygments_macro_file.write(''.join(pygments_style_defs[key]))
for key in pygments_macros:
pygments_macro_file.write(''.join(pygments_macros[key]))
pygments_macro_file.close()
-
+
pythontex_data_file = os.path.expanduser(os.path.normcase(os.path.join(outputdir, 'pythontex_data.pkl')))
f = open(pythontex_data_file, 'wb')
pickle.dump(data, f, -1)
f.close()
-
+
# Print any errors and warnings.
if messages:
print('\n'.join(messages))
@@ -1465,15 +1472,15 @@ def do_multiprocessing(data, temp_data, old_data, engine_dict):
-def run_code(encoding, outputdir, workingdir, code_list, language, command,
- command_created, extension, makestderr, stderrfilename,
- code_index, errorsig, warningsig, linesig, stderrlookbehind,
+def run_code(encoding, outputdir, workingdir, code_list, language, commands,
+ command_created, extension, makestderr, stderrfilename,
+ code_index, errorsig, warningsig, linesig, stderrlookbehind,
keeptemps, hashdependencies):
'''
Function for multiprocessing code files
'''
import shlex
-
+
# Create what's needed for storing results
family = code_list[0].family
session = code_list[0].session
@@ -1485,64 +1492,68 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
warnings = 0
unknowns = 0
messages = []
-
- # Create message lists only for stderr, one for undelimited stderr and
- # one for delimited, so it's easy to keep track of if there is any
+
+ # Create message lists only for stderr, one for undelimited stderr and
+ # one for delimited, so it's easy to keep track of if there is any
# stderr. These are added onto messages at the end.
err_messages_ud = []
err_messages_d = []
-
+
# We need to let the user know we are switching code files
# We check at the end to see if there were indeed any errors and warnings
# and if not, clear messages.
messages.append('\n---- Messages for ' + key_run.replace('#', ':') + ' ----')
-
+
# Open files for stdout and stderr, run the code, then close the files
basename = key_run.replace('#', '_')
out_file_name = os.path.expanduser(os.path.normcase(os.path.join(outputdir, basename + '.out')))
err_file_name = os.path.expanduser(os.path.normcase(os.path.join(outputdir, basename + '.err')))
out_file = open(out_file_name, 'w', encoding=encoding)
err_file = open(err_file_name, 'w', encoding=encoding)
- # Note that command is a string, which must be converted to list
- # Must double-escape any backslashes so that they survive `shlex.split()`
script = os.path.expanduser(os.path.normcase(os.path.join(outputdir, basename)))
if os.path.isabs(script):
script_full = script
else:
script_full = os.path.expanduser(os.path.normcase(os.path.join(os.getcwd(), outputdir, basename)))
- # `shlex.split()` only works with Unicode after 2.7.2
- if (sys.version_info.major == 2 and sys.version_info.micro < 3):
- exec_cmd = shlex.split(bytes(command.format(file=script.replace('\\', '\\\\'), File=script_full.replace('\\', '\\\\'))))
- exec_cmd = [unicode(elem) for elem in exec_cmd]
- else:
- exec_cmd = shlex.split(command.format(file=script.replace('\\', '\\\\'), File=script_full.replace('\\', '\\\\')))
- # Add any created files due to the command
- # This needs to be done before attempts to execute, to prevent orphans
+ # #### Need to revise so that intermediate files can be detected and cleaned up
for f in command_created:
- files.append(f.format(file=script))
- try:
- proc = subprocess.Popen(exec_cmd, stdout=out_file, stderr=err_file)
- except WindowsError as e:
- if e.errno == 2:
- # Batch files won't be found when called without extension. They
- # would be found if `shell=True`, but then getting the right
- # exit code is tricky. So we perform some `cmd` trickery that
- # is essentially equivalent to `shell=True`, but gives correct
- # exit codes. Note that `subprocess.Popen()` works with strings
- # under Windows; a list is not required.
- exec_cmd_string = ' '.join(exec_cmd)
- exec_cmd_string = 'cmd /C "@echo off & call {0} & if errorlevel 1 exit 1"'.format(exec_cmd_string)
- proc = subprocess.Popen(exec_cmd_string, stdout=out_file, stderr=err_file)
+ files.append(f.format(file=script, File=script_full))
+ for command in commands:
+ # Note that command is a string, which must be converted to list
+ # Must double-escape any backslashes so that they survive `shlex.split()`
+ # `shlex.split()` only works with Unicode after 2.7.2
+ if (sys.version_info.major == 2 and sys.version_info.micro < 3):
+ exec_cmd = shlex.split(bytes(command.format(file=script.replace('\\', '\\\\'), File=script_full.replace('\\', '\\\\'), workingdir=workingdir.replace('\\', '\\\\'))))
+ exec_cmd = [unicode(elem) for elem in exec_cmd]
else:
- raise
-
- proc.wait()
+ exec_cmd = shlex.split(command.format(file=script.replace('\\', '\\\\'), File=script_full.replace('\\', '\\\\'), workingdir=workingdir.replace('\\', '\\\\')))
+ # Add any created files due to the command
+ # This needs to be done before attempts to execute, to prevent orphans
+ try:
+ proc = subprocess.Popen(exec_cmd, stdout=out_file, stderr=err_file)
+ except WindowsError as e:
+ if e.errno == 2:
+ # Batch files won't be found when called without extension. They
+ # would be found if `shell=True`, but then getting the right
+ # exit code is tricky. So we perform some `cmd` trickery that
+ # is essentially equivalent to `shell=True`, but gives correct
+ # exit codes. Note that `subprocess.Popen()` works with strings
+ # under Windows; a list is not required.
+ exec_cmd_string = ' '.join(exec_cmd)
+ exec_cmd_string = 'cmd /C "@echo off & call {0} & if errorlevel 1 exit 1"'.format(exec_cmd_string)
+ proc = subprocess.Popen(exec_cmd_string, stdout=out_file, stderr=err_file)
+ else:
+ raise
+
+ proc.wait()
+ if proc.returncode != 0:
+ break
out_file.close()
err_file.close()
-
+
# Process saved stdout into file(s) that are included in the TeX document.
#
- # Go through the saved output line by line, and save any printed content
+ # Go through the saved output line by line, and save any printed content
# to its own file, named based on instance.
#
# The very end of the stdout lists dependencies, if any, so we start by
@@ -1563,7 +1574,7 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
valid_stdout = False
if proc.returncode == 0:
raise ValueError('Missing "created" and/or "dependencies" delims in stdout; invalid template?')
-
+
if valid_stdout:
# Add created files to created list
for c in created.splitlines():
@@ -1571,11 +1582,11 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
files.append(c)
else:
files.append(os.path.join(workingdir, c))
-
+
# Create a set of dependencies, to eliminate duplicates in the event
# that there are any. This is mainly useful when dependencies are
- # automatically determined (for example, through redefining open()),
- # may be specified multiple times as a result, and are hashed (and
+ # automatically determined (for example, through redefining open()),
+ # may be specified multiple times as a result, and are hashed (and
# of a large enough size that hashing time is non-negligible).
deps = set([dep for dep in deps.splitlines()])
# Process dependencies; get mtimes and (if specified) hashes
@@ -1584,21 +1595,21 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
if not os.path.isabs(dep_file):
dep_file = os.path.expanduser(os.path.normcase(os.path.join(workingdir, dep_file)))
if not os.path.isfile(dep_file):
- # If we can't find the file, we return a null hash and issue
- # an error. We don't need to change the exit status. If the
- # code does depend on the file, there will be a separate
- # error when the code attempts to use the file. If the code
- # doesn't really depend on the file, then the error will be
- # raised again anyway the next time PythonTeX runs when the
+ # If we can't find the file, we return a null hash and issue
+ # an error. We don't need to change the exit status. If the
+ # code does depend on the file, there will be a separate
+ # error when the code attempts to use the file. If the code
+ # doesn't really depend on the file, then the error will be
+ # raised again anyway the next time PythonTeX runs when the
# dependency is listed but not found.
dependencies[dep] = (None, None)
messages.append('* PythonTeX error')
messages.append(' Cannot find dependency "' + dep + '"')
messages.append(' It belongs to ' + key_run.replace('#', ':'))
messages.append(' Relative paths to dependencies must be specified from the working directory.')
- errors += 1
+ errors += 1
elif hashdependencies:
- # Read and hash the file in binary. Opening in text mode
+ # Read and hash the file in binary. Opening in text mode
# would require an unnecessary decoding and encoding cycle.
hasher = sha1()
f = open(dep_file, 'rb')
@@ -1607,12 +1618,12 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
dependencies[dep] = (os.path.getmtime(dep_file), hasher.hexdigest())
else:
dependencies[dep] = (os.path.getmtime(dep_file), '')
-
+
for block in out.split('=>PYTHONTEX:STDOUT#')[1:]:
if block:
delims, content = block.split('#\n', 1)
- if content:
- instance, command = delims.split('#')
+ instance, command = delims.split('#')
+ if content or command in ('s', 'sub'):
if instance.endswith('CC'):
messages.append('* PythonTeX warning')
messages.append(' Custom code for "' + family + '" attempted to print or write to stdout')
@@ -1627,6 +1638,17 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
else:
fname = os.path.join(outputdir, basename + '_' + instance + '.stdout')
f = open(os.path.expanduser(os.path.normcase(fname)), 'w', encoding=encoding)
+ if command in ('s', 'sub'):
+ if content:
+ fields = [x.split('\n', 1)[1].rsplit('\n', 1)[0] for x in content.split('=>PYTHONTEX:FIELD_DELIM#')[1:]]
+ content = code_list[int(instance)].sub_template.format(*fields)
+ else:
+ # If no replacement fields, de-templatize
+ content = code_list[int(instance)].sub_template.replace('{{', '{').replace('}}', '}')
+ if command == 's':
+ # Remove newline added by printing, prevent
+ # LaTeX from adding a space after content
+ content = content.rsplit('\n', 1)[0] + '\\endinput\n'
f.write(content)
f.close()
files.append(fname)
@@ -1657,8 +1679,8 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
# Create the full basename that will be replaced in stderr
# We need two versions, one with the correct slashes for the OS,
# and one with the opposite slashes. This is needed when a language
- # doesn't obey the OS's slash convention in paths given in stderr.
- # For example, Windows uses backslashes, but Ruby under Windows uses
+ # doesn't obey the OS's slash convention in paths given in stderr.
+ # For example, Windows uses backslashes, but Ruby under Windows uses
# forward in paths given in stderr.
# #### Consider os.path.normcase(), making search case-insensitive
outputdir_exp = os.path.expanduser(outputdir)
@@ -1667,7 +1689,7 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
fullbasename_reslashed = fullbasename_correct.replace('\\', '/')
else:
fullbasename_reslashed = fullbasename_correct.replace('/', '\\')
-
+
if err_ud:
it = iter(code_index.items())
index_now = next(it)
@@ -1675,7 +1697,7 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
start_errgobble = None
for n, line in enumerate(err_ud):
if basename in line:
- # Get the gobbleation. This is used to determine if
+ # Get the gobbleation. This is used to determine if
# other lines containing the basename are a continuation,
# or separate messages.
errgobble = match('(\s*)', line).groups()[0]
@@ -1683,7 +1705,7 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
start_errgobble = errgobble
# Only issue a message and track down the line numer if
# this is indeed the start of a new message, rather than
- # a continuation of an old message that happens to
+ # a continuation of an old message that happens to
# contain the basename
if errgobble == start_errgobble:
# Determine the corresponding line number in the document
@@ -1709,15 +1731,15 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
else:
doclinenum = '??'
input_file = '??'
-
- # Try to determine if we are dealing with an error or a
+
+ # Try to determine if we are dealing with an error or a
# warning.
found = False
index = n
if stderrlookbehind:
while index >= 0:
- # The order here is important. If a line matches
- # both the error and warning patterns, default to
+ # The order here is important. If a line matches
+ # both the error and warning patterns, default to
# error.
past_line = err_ud[index]
if (index < n and basename in past_line):
@@ -1739,11 +1761,11 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
index -= 1
else:
while index < len(err_ud):
- # The order here is important. If a line matches
- # both the error and warning patterns, default to
+ # The order here is important. If a line matches
+ # both the error and warning patterns, default to
# error.
future_line = err_ud[index]
- if (index > n and basename in future_line and
+ if (index > n and basename in future_line and
future_line.startswith(start_errgobble)):
break
for pattern in warningsig:
@@ -1773,7 +1795,7 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
err_messages_ud.append(' ' + line.replace(outputdir_exp, '<outputdir>').rstrip('\n'))
else:
err_messages_ud.append(' ' + line.rstrip('\n'))
-
+
# Create .stderr
if makestderr and err_messages_ud:
process = False
@@ -1822,13 +1844,14 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
else:
codelinenum = '1'
else:
+ errlinenum = '??'
codelinenum = '??'
messages.append('* PythonTeX error')
messages.append(' Line number ' + str(errlinenum) + ' could not be synced with the document')
messages.append(' Content from stderr is not delimited, and cannot be resolved')
errors += 1
process = False
-
+
if process:
if int(index_now[0]) > err_key_last_int:
err_key = basename + '_' + index_now[0]
@@ -1849,7 +1872,7 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
err_dict[err_key].append(line)
elif process:
err_dict[err_key].append(line)
-
+
if err_d:
start_errgobble = None
msg = []
@@ -1858,11 +1881,11 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
if line.startswith('=>PYTHONTEX:STDERR#'):
# Store the last group of messages. Messages
# can't be directly appended to the main list, because
- # a PythonTeX message must be inserted at the beginning
+ # a PythonTeX message must be inserted at the beginning
# of each chunk of stderr that never references
# the script that was executed. If the script is never
- # referenced, then line numbers aren't automatically
- # synced. These types of situations are created by
+ # referenced, then line numbers aren't automatically
+ # synced. These types of situations are created by
# warnings.warn() etc.
if msg:
if not found_basename:
@@ -1872,10 +1895,10 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
doclinenum = str(code_index[instance].line_int)
input_file = code_index[instance].input_file
# Try to identify alert. We have to parse all
- # lines for signs of errors and warnings. This
+ # lines for signs of errors and warnings. This
# may result in overcounting, but it's the best
- # we can do--otherwise, we could easily
- # undercount, or, finding a warning, miss a
+ # we can do--otherwise, we could easily
+ # undercount, or, finding a warning, miss a
# subsequent error. When this code is actually
# used, it's already a sign that normal parsing
# has failed.
@@ -1909,7 +1932,7 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
last_delim = line
elif basename in line:
found_basename = True
- # Get the gobbleation. This is used to determine if
+ # Get the gobbleation. This is used to determine if
# other lines containing the basename are a continuation,
# or separate messages.
errgobble = match('(\s*)', line).groups()[0]
@@ -1917,7 +1940,7 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
start_errgobble = errgobble
# Only issue a message and track down the line numer if
# this is indeed the start of a new message, rather than
- # a continuation of an old message that happens to
+ # a continuation of an old message that happens to
# contain the basename
if errgobble == start_errgobble:
# Determine the corresponding line number in the document
@@ -1942,18 +1965,18 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
else:
doclinenum = '??'
input_file = '??'
-
- # Try to determine if we are dealing with an error or a
+
+ # Try to determine if we are dealing with an error or a
# warning.
found = False
index = n
if stderrlookbehind:
while index >= 0:
- # The order here is important. If a line matches
- # both the error and warning patterns, default to
+ # The order here is important. If a line matches
+ # both the error and warning patterns, default to
# error.
past_line = err_d[index]
- if (past_line.startswith('=>PYTHONTEX:STDERR#') or
+ if (past_line.startswith('=>PYTHONTEX:STDERR#') or
(index < n and basename in past_line)):
break
for pattern in warningsig:
@@ -1973,11 +1996,11 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
index -= 1
else:
while index < len(err_d):
- # The order here is important. If a line matches
- # both the error and warning patterns, default to
+ # The order here is important. If a line matches
+ # both the error and warning patterns, default to
# error.
future_line = err_d[index]
- if (future_line.startswith('=>PYTHONTEX:STDERR#') or
+ if (future_line.startswith('=>PYTHONTEX:STDERR#') or
(index > n and basename in future_line and future_line.startswith(start_errgobble))):
break
for pattern in warningsig:
@@ -2020,10 +2043,10 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
doclinenum = str(code_index[instance].line_int)
input_file = code_index[instance].input_file
# Try to identify alert. We have to parse all
- # lines for signs of errors and warnings. This
+ # lines for signs of errors and warnings. This
# may result in overcounting, but it's the best
- # we can do--otherwise, we could easily
- # undercount, or, finding a warning, miss a
+ # we can do--otherwise, we could easily
+ # undercount, or, finding a warning, miss a
# subsequent error. When this code is actually
# used, it's already a sign that normal parsing
# has failed.
@@ -2050,7 +2073,7 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
else:
err_messages_d.append('* PythonTeX stderr - {0} near line {1}:'.format(alert_type, doclinenum))
err_messages_d.extend(msg)
-
+
# Create .stderr
if makestderr and err_messages_d:
process = False
@@ -2075,17 +2098,17 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
# Calculate the line number in the document
# Account for inline
ei = code_index[instance]
- # Store the `instance` in case it's
+ # Store the `instance` in case it's
# incremented later
last_instance = instance
# If the error or warning was actually triggered
# later on (for example, multiline string with
- # missing final delimiter), look ahead and
+ # missing final delimiter), look ahead and
# determine the correct instance, so that
# we get the correct line number. We don't
# associate the created stderr with this later
# instance, however, but rather with the instance
- # in which the error began. Doing that might
+ # in which the error began. Doing that might
# possibly be preferable in some cases, but would
# also require that the current stderr be split
# between multiple instances, requiring extra
@@ -2112,9 +2135,10 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
instance = last_instance
else:
codelinenum = '??'
+ errlinenum = '??'
messages.append('* PythonTeX notice')
messages.append(' Line number ' + str(errlinenum) + ' could not be synced with the document')
-
+
line = line.replace(str(errlinenum), str(codelinenum), 1)
if fullbasename_correct in line:
fullbasename = fullbasename_correct
@@ -2138,7 +2162,7 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
f.write(''.join(err_dict[err_key]))
f.close()
files.append(stderr_file_name)
-
+
# Clean up temp files, and update the list of existing files
if keeptemps == 'none':
for ext in [extension, 'pytxmcr', 'out', 'err']:
@@ -2157,7 +2181,7 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
# Take care of any unknowns, based on exit code
# Interpret the exit code as an indicator of whether there were errors,
- # and treat unknowns accordingly. This will cause all warnings to be
+ # and treat unknowns accordingly. This will cause all warnings to be
# misinterpreted as errors if warnings trigger a nonzero exit code.
# It will also cause all warnings to be misinterpreted as errors if there
# is a single error that causes a nonzero exit code. That isn't ideal,
@@ -2176,7 +2200,7 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
{0} message(s) could not be classified
Interpreted as {1}, based on the return code(s)'''
messages[0] += textwrap.dedent(unknowns_message.format(unknowns, unknowns_type))
-
+
# Take care of anything that has escaped detection thus far.
if proc.returncode == 1 and not errors:
errors += 1
@@ -2187,7 +2211,7 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
The following command was executed:
"{0}"'''
messages[0] += textwrap.dedent(command_message.format(' '.join(exec_cmd)))
-
+
# Add any stderr messages; otherwise, clear the default message header
if err_messages_ud:
messages.extend(err_messages_ud)
@@ -2195,7 +2219,7 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
messages.extend(err_messages_d)
if len(messages) == 1:
messages = []
-
+
# Return a dict of dicts of results
return {'process': 'code',
'key': key_run,
@@ -2209,18 +2233,18 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
-def do_pygments(encoding, outputdir, fvextfile, pygments_list,
+def do_pygments(encoding, outputdir, fvextfile, pygments_list,
pygments_settings, typeset_cache):
'''
Create Pygments content.
-
+
To be run during multiprocessing.
'''
# Lazy import
from pygments import highlight
from pygments.lexers import get_lexer_by_name
from pygments.formatters import LatexFormatter
-
+
# Create what's needed for storing results
pygments_files = defaultdict(list)
pygments_macros = defaultdict(list)
@@ -2228,16 +2252,17 @@ def do_pygments(encoding, outputdir, fvextfile, pygments_list,
warnings = 0
messages = []
messages.append('\n---- Messages for Pygments ----')
-
+
# Create dicts of formatters and lexers.
formatter = dict()
lexer = dict()
for codetype in pygments_settings:
if codetype != ':GLOBAL':
- formatter[codetype] = LatexFormatter(**pygments_settings[codetype]['formatter_options'])
- lexer[codetype] = get_lexer_by_name(pygments_settings[codetype]['lexer'],
- **pygments_settings[codetype]['lexer_options'])
-
+ p = pygments_settings[codetype]['formatter_options'].copy()
+ p['commandprefix'] = 'PYG'
+ formatter[codetype] = LatexFormatter(**p)
+ lexer[codetype] = get_lexer_by_name(pygments_settings[codetype]['lexer'], **p)
+
# Actually parse and highlight the code.
for c in pygments_list:
if c.is_cons:
@@ -2258,19 +2283,19 @@ def do_pygments(encoding, outputdir, fvextfile, pygments_list,
if c.is_inline or content.count('\n') < fvextfile:
# Highlighted code brought in via macros needs SaveVerbatim
if c.args_prettyprint:
- processed = sub(r'\\begin{Verbatim}\[(.+)\]',
+ processed = sub(r'\\begin{Verbatim}\[(.+)\]',
r'\\begin{{pytx@SaveVerbatim}}[\1, {4}]{{pytx@{0}@{1}@{2}@{3}}}'.format(c.family, c.session, c.restart, c.instance, c.args_prettyprint), processed, count=1)
else:
- processed = sub(r'\\begin{Verbatim}\[(.+)\]',
+ processed = sub(r'\\begin{Verbatim}\[(.+)\]',
r'\\begin{{pytx@SaveVerbatim}}[\1]{{pytx@{0}@{1}@{2}@{3}}}'.format(c.family, c.session, c.restart, c.instance), processed, count=1)
processed = processed.rsplit('\\', 1)[0] + '\\end{pytx@SaveVerbatim}\n\n'
pygments_macros[c.key_typeset].append(processed)
else:
if c.args_prettyprint:
- processed = sub(r'\\begin{Verbatim}\[(.+)\]',
+ processed = sub(r'\\begin{Verbatim}\[(.+)\]',
r'\\begin{{pytx@Verbatim}}[\1, {4}]{{pytx@{0}@{1}@{2}@{3}}}'.format(c.family, c.session, c.restart, c.instance, c.args_prettyprint), processed, count=1)
else:
- processed = sub(r'\\begin{Verbatim}\[(.+)\]',
+ processed = sub(r'\\begin{Verbatim}\[(.+)\]',
r'\\begin{{pytx@Verbatim}}[\1]{{pytx@{0}@{1}@{2}@{3}}}'.format(c.family, c.session, c.restart, c.instance), processed, count=1)
processed = processed.rsplit('\\', 1)[0] + '\\end{pytx@Verbatim}\n\n'
fname = os.path.join(outputdir, c.key_typeset.replace('#', '_') + '.pygtex')
@@ -2278,7 +2303,7 @@ def do_pygments(encoding, outputdir, fvextfile, pygments_list,
f.write(processed)
f.close()
pygments_files[c.key_typeset].append(fname)
-
+
if len(messages) == 1:
messages = []
# Return a dict of dicts of results
@@ -2287,7 +2312,7 @@ def do_pygments(encoding, outputdir, fvextfile, pygments_list,
'pygments_macros': pygments_macros,
'errors': errors,
'warnings': warnings,
- 'messages': messages}
+ 'messages': messages}
@@ -2296,7 +2321,7 @@ def python_console(jobname, encoding, outputdir, workingdir, fvextfile,
pygments_settings, cc_begin_list, cons_list, cc_end_list,
startup, banner, filename):
'''
- Use Python's ``code`` module to typeset emulated Python interactive
+ Use Python's ``code`` module to typeset emulated Python interactive
sessions, optionally highlighting with Pygments.
'''
# Create what's needed for storing results
@@ -2311,7 +2336,7 @@ def python_console(jobname, encoding, outputdir, workingdir, fvextfile,
warnings = 0
messages = []
messages.append('\n---- Messages for ' + key_run.replace('#', ':') + ' ----')
-
+
# Lazy import what's needed
import code
from collections import deque
@@ -2324,14 +2349,14 @@ def python_console(jobname, encoding, outputdir, workingdir, fvextfile,
self._orig_write(unicode(s))
else:
from io import StringIO
-
+
# Create a custom console class
class Console(code.InteractiveConsole):
'''
A subclass of code.InteractiveConsole that takes a list and treats it
as a series of console input.
'''
-
+
def __init__(self, banner, filename):
if banner == 'none':
self.banner = 'NULL BANNER'
@@ -2350,7 +2375,7 @@ def python_console(jobname, encoding, outputdir, workingdir, fvextfile,
self.filename = None
code.InteractiveConsole.__init__(self, filename=self.filename)
self.iostdout = StringIO()
-
+
def consolize(self, startup, cons_list):
self.console_code = deque()
# Delimiters are passed straight through and need newlines
@@ -2365,10 +2390,10 @@ def python_console(jobname, encoding, outputdir, workingdir, fvextfile,
sys.path.append(os.getcwd())
else:
sys.exit('Cannot find directory "{workingdir}"')
-
+
if docdir not in sys.path:
sys.path.append(docdir)
-
+
del docdir
'''
cons_config = cons_config.format(workingdir=os.path.expanduser(os.path.normcase(workingdir)))[1:]
@@ -2378,21 +2403,24 @@ def python_console(jobname, encoding, outputdir, workingdir, fvextfile,
for c in cons_list:
self.console_code.append('=>PYTHONTEX#{0}#{1}#\n'.format(c.instance, c.command))
self.console_code.extend(c.code.splitlines())
+ # Reset sys.excepthook to its default, to prevent apport systems
+ # in some Linux distributions from breaking exception handling
+ sys.excepthook = sys.__excepthook__
old_stdout = sys.stdout
sys.stdout = self.iostdout
self.interact(self.banner)
sys.stdout = old_stdout
self.session_log = self.iostdout.getvalue()
-
+
def raw_input(self, prompt):
- # Have to do a lot of looping and trying to make sure we get
+ # Have to do a lot of looping and trying to make sure we get
# something valid to execute
try:
line = self.console_code.popleft()
except IndexError:
raise EOFError
while line.startswith('=>PYTHONTEX#'):
- # Get new lines until we get one that doesn't begin with a
+ # Get new lines until we get one that doesn't begin with a
# delimiter. Then write the last delimited line.
old_line = line
try:
@@ -2405,19 +2433,19 @@ def python_console(jobname, encoding, outputdir, workingdir, fvextfile,
else:
self.write('\n')
return line
-
+
def write(self, data):
self.iostdout.write(data)
-
+
# Need to combine all custom code and user code to pass to consolize
cons_list = cc_begin_list + cons_list + cc_end_list
- # Create a dict for looking up exceptions. This is needed for startup
+ # Create a dict for looking up exceptions. This is needed for startup
# commands and for code commands and environments, since their output
# isn't typeset
cons_index = {}
for c in cons_list:
- cons_index[c.instance] = c.line
-
+ cons_index[c.instance] = c.line
+
# Consolize the code
# If the working directory is changed as part of the console code,
# then we need to get back to where we were.
@@ -2425,7 +2453,7 @@ def python_console(jobname, encoding, outputdir, workingdir, fvextfile,
cwd = os.getcwd()
con.consolize(startup, cons_list)
os.chdir(cwd)
-
+
# Set up Pygments, if applicable
if pygments_settings is not None:
pygmentize = True
@@ -2433,12 +2461,13 @@ def python_console(jobname, encoding, outputdir, workingdir, fvextfile,
from pygments import highlight
from pygments.lexers import get_lexer_by_name
from pygments.formatters import LatexFormatter
- formatter = LatexFormatter(**pygments_settings['formatter_options'])
- lexer = get_lexer_by_name(pygments_settings['lexer'],
- **pygments_settings['lexer_options'])
+ p = pygments_settings['formatter_options'].copy()
+ p['commandprefix'] = 'PYG'
+ formatter = LatexFormatter(**p)
+ lexer = get_lexer_by_name(pygments_settings['lexer'], **p)
else:
pygmentize = False
-
+
# Process the console output
output = con.session_log.split('=>PYTHONTEX#')
# Extract banner
@@ -2455,8 +2484,8 @@ def python_console(jobname, encoding, outputdir, workingdir, fvextfile,
exception = False
console_content_lines = console_content.splitlines()
for line in console_content_lines:
- if (not line.startswith(sys.ps1) and
- not line.startswith(sys.ps2) and
+ if (not line.startswith(sys.ps1) and
+ not line.startswith(sys.ps2) and
line and not line.isspace()):
exception = True
break
@@ -2477,8 +2506,8 @@ def python_console(jobname, encoding, outputdir, workingdir, fvextfile,
exception = False
console_content_lines = console_content.splitlines()
for line in console_content_lines:
- if (line and not line.startswith(sys.ps1) and
- not line.startswith(sys.ps2) and
+ if (line and not line.startswith(sys.ps1) and
+ not line.startswith(sys.ps2) and
not line.isspace()):
exception = True
break
@@ -2502,7 +2531,7 @@ def python_console(jobname, encoding, outputdir, workingdir, fvextfile,
else:
if command == 'i':
# Currently, there isn't any error checking for invalid
- # content; it is assumed that a single line of commands
+ # content; it is assumed that a single line of commands
# was entered, producing one or more lines of output.
# Given that the current ``\pycon`` command doesn't
# allow line breaks to be written to the .pytxcode, that
@@ -2521,14 +2550,16 @@ def python_console(jobname, encoding, outputdir, workingdir, fvextfile,
# Process for LaTeX
if pygmentize:
processed = highlight(console_content, lexer, formatter)
- if console_content.count('\n') < fvextfile:
- processed = sub(r'\\begin{Verbatim}\[(.+)\]',
+ # #### Need to add wrapping:
+ #processed = highlight('\n'.join([textwrap.fill(x) for x in console_content.splitlines(True)]), lexer, formatter)
+ if console_content.count('\n') < fvextfile:
+ processed = sub(r'\\begin{Verbatim}\[(.+)\]',
r'\\begin{{pytx@SaveVerbatim}}[\1]{{pytx@{0}}}'.format(key_typeset.replace('#', '@')),
processed, count=1)
processed = processed.rsplit('\\', 1)[0] + '\\end{pytx@SaveVerbatim}\n\n'
pygments_macros[key_typeset].append(processed)
else:
- processed = sub(r'\\begin{Verbatim}\[(.+)\]',
+ processed = sub(r'\\begin{Verbatim}\[(.+)\]',
r'\\begin{{pytx@Verbatim}}[\1]{{pytx@{0}}}'.format(key_typeset.replace('#', '@')),
processed, count=1)
processed = processed.rsplit('\\', 1)[0] + '\\end{pytx@Verbatim}\n\n'
@@ -2536,10 +2567,10 @@ def python_console(jobname, encoding, outputdir, workingdir, fvextfile,
f = open(os.path.expanduser(os.path.normcase(fname)), 'w', encoding=encoding)
f.write(processed)
f.close()
- pygments_files[key_typeset].append(fname)
+ pygments_files[key_typeset].append(fname)
else:
if console_content.count('\n') < fvextfile:
- processed = ('\\begin{{pytx@SaveVerbatim}}{{pytx@{0}}}\n'.format(key_typeset.replace('#', '@')) +
+ processed = ('\\begin{{pytx@SaveVerbatim}}{{pytx@{0}}}\n'.format(key_typeset.replace('#', '@')) +
console_content + '\\end{pytx@SaveVerbatim}\n\n')
macros.append(processed)
else:
@@ -2550,10 +2581,10 @@ def python_console(jobname, encoding, outputdir, workingdir, fvextfile,
f.write(processed)
f.close()
files.append(fname)
-
+
if len(messages) == 1:
messages = []
-
+
# Return a dict of dicts of results
return {'process': 'console',
'key': key_run,
@@ -2565,7 +2596,7 @@ def python_console(jobname, encoding, outputdir, workingdir, fvextfile,
'dependencies': dependencies,
'errors': errors,
'warnings': warnings,
- 'messages': messages}
+ 'messages': messages}
@@ -2586,25 +2617,25 @@ def main(python=None):
temp_data = {'errors': 0, 'warnings': 0, 'python': python}
old_data = dict()
-
+
# Process command-line options.
#
- # This gets the raw_jobname (actual job name), jobname (a sanitized job
+ # This gets the raw_jobname (actual job name), jobname (a sanitized job
# name, used for creating files named after the jobname), and any options.
process_argv(data, temp_data)
- # If there aren't errors in argv, and the program is going to run
- # (rather than just exit due to --version or --help command-line options),
- # print PythonTeX version. Flush to make the message go out immediately,
+ # If there aren't errors in argv, and the program is going to run
+ # (rather than just exit due to --version or --help command-line options),
+ # print PythonTeX version. Flush to make the message go out immediately,
# so that the user knows PythonTeX has started.
print('This is PythonTeX {0}'.format(__version__))
sys.stdout.flush()
- # Once we have the encoding (from argv), we set stdout and stderr to use
- # this encoding. Later, we will parse the saved stderr of scripts
- # executed via multiprocessing subprocesses, and print the parsed results
- # to stdout. The saved stderr uses the same encoding that was used
- # for the files that created it (this is important for code containing
+ # Once we have the encoding (from argv), we set stdout and stderr to use
+ # this encoding. Later, we will parse the saved stderr of scripts
+ # executed via multiprocessing subprocesses, and print the parsed results
+ # to stdout. The saved stderr uses the same encoding that was used
+ # for the files that created it (this is important for code containing
# unicode characters), so we also need stdout for the main PythonTeX
- # script to support this encoding. Setting stderr encoding is primarily
+ # script to support this encoding. Setting stderr encoding is primarily
# a matter of symmetry. Ideally, pythontex*.py will be bug-free,
# and stderr won't be needed!
if sys.version_info[0] == 2:
@@ -2617,7 +2648,7 @@ def main(python=None):
# Load the code and process the settings it passes from the TeX side.
#
- # This gets a list containing the code (the part of the code file
+ # This gets a list containing the code (the part of the code file
# containing the settings is removed) and the processed settings.
load_code_get_settings(data, temp_data)
# Now that the settings are loaded, check if outputdir exits.
@@ -2628,19 +2659,19 @@ def main(python=None):
# Load/create old_data
get_old_data(data, old_data, temp_data)
-
-
+
+
# Hash the code. Determine what needs to be executed. Determine whether
- # Pygments should be used. Update pygments_settings to account for
- # Pygments commands and environments (as opposed to PythonTeX commands
+ # Pygments should be used. Update pygments_settings to account for
+ # Pygments commands and environments (as opposed to PythonTeX commands
# and environments).
hash_all(data, temp_data, old_data, engine_dict)
-
-
+
+
# Parse the code and write scripts for execution.
parse_code_write_scripts(data, temp_data, engine_dict)
-
-
+
+
# Execute the code and perform Pygments highlighting via multiprocessing.
do_multiprocessing(data, temp_data, old_data, engine_dict)
@@ -2650,11 +2681,11 @@ def main(python=None):
if 'upgrade_message' in temp_data:
print(temp_data['upgrade_message'])
sys.exit()
-
+
# Print exit message
print('\n--------------------------------------------------')
- # If some rerun settings are used, there may be unresolved errors or
- # warnings; if so, print a summary of those along with the current
+ # If some rerun settings are used, there may be unresolved errors or
+ # warnings; if so, print a summary of those along with the current
# error and warning summary
unresolved_errors = 0
unresolved_warnings = 0
@@ -2669,13 +2700,13 @@ def main(python=None):
if unresolved_warnings != 0 or unresolved_errors != 0:
print('PythonTeX: {0}'.format(data['raw_jobname']))
print(' - Old: {0} error(s), {1} warnings(s)'.format(unresolved_errors, unresolved_warnings))
- print(' - Current: {0} error(s), {1} warnings(s)'.format(temp_data['errors'], temp_data['warnings']))
+ print(' - Current: {0} error(s), {1} warnings(s)'.format(temp_data['errors'], temp_data['warnings']))
else:
print('PythonTeX: {0} - {1} error(s), {2} warning(s)\n'.format(data['raw_jobname'], temp_data['errors'], temp_data['warnings']))
-
+
if 'upgrade_message' in temp_data:
print(temp_data['upgrade_message'])
-
+
# Exit with appropriate exit code based on user settings.
if temp_data['error_exit_code'] and temp_data['errors'] > 0:
sys.exit(1)
@@ -2684,8 +2715,8 @@ def main(python=None):
-# The "if" statement is needed for multiprocessing under Windows; see the
-# multiprocessing documentation. It is also needed in this case when the
+# The "if" statement is needed for multiprocessing under Windows; see the
+# multiprocessing documentation. It is also needed in this case when the
# script is invoked via the wrapper.
if __name__ == '__main__':
#// Python 2
diff --git a/Master/texmf-dist/scripts/pythontex/pythontex3.py b/Master/texmf-dist/scripts/pythontex/pythontex3.py
index 83d744adeee..b872dbaa2c4 100755
--- a/Master/texmf-dist/scripts/pythontex/pythontex3.py
+++ b/Master/texmf-dist/scripts/pythontex/pythontex3.py
@@ -4,16 +4,16 @@
'''
This is the main PythonTeX script. It should be launched via pythontex.py.
-Two versions of this script are provided. One, with name ending in "2", runs
+Two versions of this script are provided. One, with name ending in "2", runs
under Python 2.7. The other, with name ending in "3", runs under Python 3.2+.
-This script needs to be able to import pythontex_engines.py; in general it
+This script needs to be able to import pythontex_engines.py; in general it
should be in the same directory.
Licensed under the BSD 3-Clause License:
-Copyright (c) 2012-2014, Geoffrey M. Poore
+Copyright (c) 2012-2016, Geoffrey M. Poore
All rights reserved.
@@ -77,7 +77,7 @@ else:
# Script parameters
# Version
-__version__ = '0.14'
+__version__ = '0.15'
@@ -86,7 +86,7 @@ class Pytxcode(object):
def __init__(self, data, gobble):
self.delims, self.code = data.split('#\n', 1)
self.family, self.session, self.restart, self.instance, self.command, self.context, self.args_run, self.args_prettyprint, self.input_file, self.line = self.delims.split('#')
- self.instance_int = int(self.instance)
+ self.instance_int = int(self.instance)
self.line_int = int(self.line)
self.key_run = self.family + '#' + self.session + '#' + self.restart
self.key_typeset = self.key_run + '#' + self.instance
@@ -118,41 +118,43 @@ class Pytxcode(object):
self.is_typeset = False
else:
self.is_typeset = True
-
+
if gobble == 'auto':
self.code = textwrap.dedent(self.code)
-
+
+ self.sub_template = None
+
def process_argv(data, temp_data):
'''
Process command line options using the argparse module.
-
+
Most options are passed via the file of code, rather than via the command
line.
'''
-
+
# Create a command line argument parser
parser = argparse.ArgumentParser()
parser.add_argument('TEXNAME',
help='LaTeX file, with or without .tex extension')
- parser.add_argument('--version', action='version',
- version='PythonTeX {0}'.format(data['version']))
- parser.add_argument('--encoding', default='UTF-8',
+ parser.add_argument('--version', action='version',
+ version='PythonTeX {0}'.format(data['version']))
+ parser.add_argument('--encoding', default='UTF-8',
help='encoding for all text files (see codecs module for encodings)')
- parser.add_argument('--error-exit-code', default='true',
- choices=('true', 'false'),
+ parser.add_argument('--error-exit-code', default='true',
+ choices=('true', 'false'),
help='return exit code of 1 if there are errors (not desirable with some TeX editors and workflows)')
group_run = parser.add_mutually_exclusive_group()
group_run.add_argument('--runall', nargs='?', default='false',
const='true', choices=('true', 'false'),
help='run ALL code; equivalent to package option')
- group_run.add_argument('--rerun', default='errors',
+ group_run.add_argument('--rerun', default='errors',
choices=('never', 'modified', 'errors', 'warnings', 'always'),
help='set conditions for rerunning code; equivalent to package option')
- parser.add_argument('--hashdependencies', nargs='?', default='false',
- const='true', choices=('true', 'false'),
+ parser.add_argument('--hashdependencies', nargs='?', default='false',
+ const='true', choices=('true', 'false'),
help='hash dependencies (such as external data) to check for modification, rather than using mtime; equivalent to package option')
parser.add_argument('-j', '--jobs', metavar='N', default=None, type=int,
help='Allow N jobs at once; defaults to cpu_count().')
@@ -160,17 +162,17 @@ def process_argv(data, temp_data):
help='verbose output')
parser.add_argument('--interpreter', default=None, help='set a custom interpreter; argument should be in the form "<interpreter>:<command>, <interp>:<cmd>, ..." where <interpreter> is "python", "ruby", etc., and <command> is the command for invoking the interpreter; argument may also be in the form of a Python dictionary')
group_debug = parser.add_mutually_exclusive_group()
- group_debug.add_argument('--debug', nargs='?', default=None,
+ group_debug.add_argument('--debug', nargs='?', default=None,
const='default',
metavar='<family>:<session>:<restart>',
help='Run the specified session (or default session) with the default debugger, if available. If there is only one session, it need not be specified. If the session name is unambiguous, it is sufficient. The full <family>:<session>:<restart> (for example, py:default:default) is only needed when the session name alone would be ambiguous.')
- group_debug.add_argument('--interactive', nargs='?', default=None,
+ group_debug.add_argument('--interactive', nargs='?', default=None,
const='default',
metavar='<family>:<session>:<restart>',
help='Run the specified session (or default session) in interactive mode. If there is only one session, it need not be specified. If the session name is unambiguous, it is sufficient. The full <family>:<session>:<restart> (for example, py:default:default) is only needed when the session name alone would be ambiguous.')
args = parser.parse_args()
-
- # Store the parsed argv in data and temp_data
+
+ # Store the parsed argv in data and temp_data
data['encoding'] = args.encoding
if args.error_exit_code == 'true':
temp_data['error_exit_code'] = True
@@ -205,7 +207,7 @@ def process_argv(data, temp_data):
for interp in interp_list:
if interp:
try:
- k, v = interp.split(':')
+ k, v = interp.split(':', 1)
k = k.strip(' \'"')
v = v.strip(' \'"')
interpreter_dict[k] = v
@@ -215,7 +217,7 @@ def process_argv(data, temp_data):
print('Invalid --interpreter argument')
return sys.exit(2)
# If the Python interpreter wasn't set, then try to set an appropriate
- # default value, based on how PythonTeX was launched (pythontex.py,
+ # default value, based on how PythonTeX was launched (pythontex.py,
# pythontex2.py, or pythontex3.py).
if not set_python_interpreter:
if temp_data['python'] == 2:
@@ -230,12 +232,12 @@ def process_argv(data, temp_data):
directly. This should only be done when you want
to use Python version {0}, but have a different
version installed as the default. (Otherwise, you
- should start PythonTeX with pythontex.py.) For
+ should start PythonTeX with pythontex.py.) For
this to work correctly, you should install Python
version 3.3+, which has a Windows wrapper (py) that
- PythonTeX can use to run the correct version of
+ PythonTeX can use to run the correct version of
Python. If you do not want to install Python 3.3+,
- you can also use the --interpreter command-line
+ you can also use the --interpreter command-line
option to tell PythonTeX how to access the version
of Python you wish to use.
'''.format(temp_data['python'])
@@ -255,12 +257,12 @@ def process_argv(data, temp_data):
directly. This should only be done when you want
to use Python version {0}, but have a different
version installed as the default. (Otherwise, you
- should start PythonTeX with pythontex.py.) For
+ should start PythonTeX with pythontex.py.) For
this to work correctly, you should install Python
version 3.3+, which has a Windows wrapper (py) that
- PythonTeX can use to run the correct version of
+ PythonTeX can use to run the correct version of
Python. If you do not want to install Python 3.3+,
- you can also use the --interpreter command-line
+ you can also use the --interpreter command-line
option to tell PythonTeX how to access the version
of Python you wish to use.
'''.format(temp_data['python'])
@@ -268,10 +270,10 @@ def process_argv(data, temp_data):
return sys.exit(2)
else:
interpreter_dict['python'] = 'python3'
-
+
if args.TEXNAME is not None:
- # Determine if we a dealing with just a filename, or a name plus
- # path. If there's a path, we need to make the document directory
+ # Determine if we a dealing with just a filename, or a name plus
+ # path. If there's a path, we need to make the document directory
# the current working directory.
dir, raw_jobname = os.path.split(args.TEXNAME)
dir = os.path.expanduser(os.path.normcase(dir))
@@ -287,30 +289,30 @@ def process_argv(data, temp_data):
print(' Code file ' + raw_jobname + '.pytxcode does not exist.')
print(' Run LaTeX to create it.')
return sys.exit(1)
-
- # We need a "sanitized" version of the jobname, with spaces and
- # asterisks replaced with hyphens. This is done to avoid TeX issues
- # with spaces in file names, paralleling the approach taken in
- # pythontex.sty. From now on, we will use the sanitized version every
- # time we create a file that contains the jobname string. The raw
- # version will only be used in reference to pre-existing files created
+
+ # We need a "sanitized" version of the jobname, with spaces and
+ # asterisks replaced with hyphens. This is done to avoid TeX issues
+ # with spaces in file names, paralleling the approach taken in
+ # pythontex.sty. From now on, we will use the sanitized version every
+ # time we create a file that contains the jobname string. The raw
+ # version will only be used in reference to pre-existing files created
# on the TeX side, such as the .pytxcode file.
jobname = raw_jobname.replace(' ', '-').replace('"', '').replace('*', '-')
# Store the results in data
data['raw_jobname'] = raw_jobname
data['jobname'] = jobname
-
- # We need to check to make sure that the "sanitized" jobname doesn't
- # lead to a collision with a file that already has that name, so that
+
+ # We need to check to make sure that the "sanitized" jobname doesn't
+ # lead to a collision with a file that already has that name, so that
# two files attempt to use the same PythonTeX folder.
- #
+ #
# If <jobname>.<ext> and <raw_jobname>.<ext> both exist, where <ext>
- # is a common LaTeX extension, we exit. We operate under the
- # assumption that there should be only a single file <jobname> in the
- # document root directory that has a common LaTeX extension. That
- # could be false, but if so, the user probably has worse things to
+ # is a common LaTeX extension, we exit. We operate under the
+ # assumption that there should be only a single file <jobname> in the
+ # document root directory that has a common LaTeX extension. That
+ # could be false, but if so, the user probably has worse things to
# worry about than a potential PythonTeX output collision.
- # If <jobname>* and <raw_jobname>* both exist, we issue a warning but
+ # If <jobname>* and <raw_jobname>* both exist, we issue a warning but
# attempt to proceed.
if jobname != raw_jobname:
resolved = False
@@ -335,9 +337,9 @@ def process_argv(data, temp_data):
print(' ' + jobname + '*')
print(' Attempting to proceed.')
temp_data['warnings'] += 1
- break
-
-
+ break
+
+
def load_code_get_settings(data, temp_data):
@@ -357,7 +359,7 @@ def load_code_get_settings(data, temp_data):
print(' Code file ' + raw_jobname + '.pytxcode does not exist.')
print(' Run LaTeX to create it.')
return sys.exit(1)
-
+
# Split code and settings
try:
pytxcode, pytxsettings = pytxcode.rsplit('=>PYTHONTEX:SETTINGS#', 1)
@@ -365,8 +367,8 @@ def load_code_get_settings(data, temp_data):
print('The .pytxcode file appears to have an outdated format or be invalid')
print('Run LaTeX to make sure the file is current')
return sys.exit(1)
-
-
+
+
# Prepare to process settings
#
# Create a dict for storing settings.
@@ -374,7 +376,7 @@ def load_code_get_settings(data, temp_data):
# Create a dict for storing Pygments settings.
# Each dict entry will itself be a dict.
pygments_settings = defaultdict(dict)
-
+
# Create a dict of processing functions, and generic processing functions
settings_func = dict()
def set_kv_data(k, v):
@@ -396,7 +398,7 @@ def load_code_get_settings(data, temp_data):
def set_kv_data_fvextfile(k, v):
# Error checking on TeX side should be enough, but be careful anyway
try:
- v = int(v)
+ v = int(v)
except ValueError:
print('* PythonTeX error')
print(' Unable to parse package option fvextfile.')
@@ -468,7 +470,7 @@ def load_code_get_settings(data, temp_data):
settings_func['pyconbanner'] = set_kv_data
settings_func['pyconfilename'] = set_kv_data
settings_func['depythontex'] = set_kv_data
-
+
# Process settings
for line in pytxsettings.split('\n'):
if line:
@@ -486,33 +488,33 @@ def load_code_get_settings(data, temp_data):
print(' The version of the PythonTeX scripts does not match the last code')
print(' saved by the document--run LaTeX to create an updated version.\n')
sys.exit(1)
-
+
# Store all results that haven't already been stored.
data['settings'] = settings
data['pygments_settings'] = pygments_settings
-
+
# Create a tuple of vital quantities that invalidate old saved data
# Don't need to include outputdir, because if that changes, no old output
# fvextfile could be checked on a case-by-case basis, which would result
- # in faster output, but that would involve a good bit of additional
+ # in faster output, but that would involve a good bit of additional
# logic, which probably isn't worth it for a feature that will rarely be
# changed.
- data['vitals'] = (data['version'], data['encoding'],
+ data['vitals'] = (data['version'], data['encoding'],
settings['gobble'], settings['fvextfile'])
-
+
# Create tuples of vital quantities
data['code_vitals'] = (settings['workingdir'], settings['keeptemps'],
settings['makestderr'], settings['stderrfilename'])
data['cons_vitals'] = (settings['workingdir'])
data['typeset_vitals'] = ()
-
+
# Pass any customizations to types
for k in engine_dict:
engine_dict[k].customize(pyfuture=settings['pyfuture'],
pyconfuture=settings['pyconfuture'],
pyconbanner=settings['pyconbanner'],
pyconfilename=settings['pyconfilename'])
-
+
# Store code
# Do this last, so that Pygments settings are available
if pytxcode.startswith('=>PYTHONTEX#'):
@@ -529,12 +531,12 @@ def set_upgrade_compatibility(data, old, temp_data):
When upgrading, modify settings to maintain backward compatibility when
possible and important
'''
- if (old['version'].startswith('v') and
+ if (old['version'].startswith('v') and
not data['settings']['workingdirset'] and
data['settings']['outputdir'] != '.'):
old['compatibility'] = '0.13'
do_upgrade_compatibility(data, old, temp_data)
-
+
def do_upgrade_compatibility(data, old_data, temp_data):
if 'compatibility' in old_data:
@@ -549,10 +551,10 @@ def do_upgrade_compatibility(data, old_data, temp_data):
directory rather than the output directory. PythonTeX has detected
that you have been using the output directory as the working directory.
It will continue to use the output directory for now. To keep your
- current settings long-term and avoid seeing this message in the future,
- add the following command to the preamble of your document, right after
+ current settings long-term and avoid seeing this message in the future,
+ add the following command to the preamble of your document, right after
the "\\usepackage{pythontex}": "\setpythontexworkingdir{<outputdir>}".
- If you wish to continue with the new settings instead, simply delete
+ If you wish to continue with the new settings instead, simply delete
the file with extension .pkl in the output directory, and run PythonTeX.
**** End PythonTeX upgrade message ****
'''
@@ -561,29 +563,29 @@ def do_upgrade_compatibility(data, old_data, temp_data):
def get_old_data(data, old_data, temp_data):
'''
- Load data from the last run, if it exists, into the dict old_data.
- Determine the path to the PythonTeX scripts, either by using a previously
+ Load data from the last run, if it exists, into the dict old_data.
+ Determine the path to the PythonTeX scripts, either by using a previously
found, saved path or via kpsewhich.
-
- The old data is used for determining when PythonTeX has been upgraded,
- when any settings have changed, when code has changed (via hashes), and
- what files may need to be cleaned up. The location of the PythonTeX
- scripts is needed so that they can be imported by the scripts created by
- PythonTeX. The location of the scripts is confirmed even if they were
- previously located, to make sure that the path is still valid. Finding
- the scripts depends on having a TeX installation that includes the
+
+ The old data is used for determining when PythonTeX has been upgraded,
+ when any settings have changed, when code has changed (via hashes), and
+ what files may need to be cleaned up. The location of the PythonTeX
+ scripts is needed so that they can be imported by the scripts created by
+ PythonTeX. The location of the scripts is confirmed even if they were
+ previously located, to make sure that the path is still valid. Finding
+ the scripts depends on having a TeX installation that includes the
Kpathsea library (TeX Live and MiKTeX, possibly others).
-
+
All code that relies on old_data is written based on the assumption that
- if old_data exists and has the current PythonTeX version, then it
+ if old_data exists and has the current PythonTeX version, then it
contains all needed information. Thus, all code relying on old_data must
- check that it was loaded and that it has the current version. If not,
+ check that it was loaded and that it has the current version. If not,
code should adapt gracefully.
'''
# Create a string containing the name of the data file
pythontex_data_file = os.path.expanduser(os.path.normcase(os.path.join(data['settings']['outputdir'], 'pythontex_data.pkl')))
-
+
# Load the old data if it exists (read as binary pickle)
if os.path.isfile(pythontex_data_file):
f = open(pythontex_data_file, 'rb')
@@ -613,9 +615,9 @@ def get_old_data(data, old_data, temp_data):
os.remove(f)
else:
temp_data['loaded_old_data'] = False
-
+
# Set the utilspath
- # Assume that if the utils aren't in the same location as
+ # Assume that if the utils aren't in the same location as
# `pythontex.py`, then they are somewhere else on `sys.path` that
# will always be available (for example, installed as a Python module),
# and thus specifying a path isn't necessary.
@@ -636,9 +638,9 @@ def modified_dependencies(key, data, old_data, temp_data):
old_dep_hash_dict = old_data['dependencies'][key]
workingdir = data['settings']['workingdir']
for dep in old_dep_hash_dict.keys():
- # We need to know if the path is relative (based off the
- # working directory) or absolute. We can't use
- # os.path.isabs() alone for determining the distinction,
+ # We need to know if the path is relative (based off the
+ # working directory) or absolute. We can't use
+ # os.path.isabs() alone for determining the distinction,
# because we must take into account the possibility of an
# initial ~ (tilde) standing for the home directory.
dep_file = os.path.expanduser(os.path.normcase(dep))
@@ -650,15 +652,15 @@ def modified_dependencies(key, data, old_data, temp_data):
print(' It belongs to ' + key.replace('#', ':'))
print(' Relative paths to dependencies must be specified from the working directory.')
temp_data['errors'] += 1
- # A removed dependency should trigger an error, but it
- # shouldn't cause code to execute. Running the code
- # again would just give more errors when it can't find
- # the dependency. (There won't be issues when a
- # dependency is added or removed, because that would
- # involve modifying code, which would trigger
+ # A removed dependency should trigger an error, but it
+ # shouldn't cause code to execute. Running the code
+ # again would just give more errors when it can't find
+ # the dependency. (There won't be issues when a
+ # dependency is added or removed, because that would
+ # involve modifying code, which would trigger
# re-execution.)
elif hashdependencies:
- # Read and hash the file in binary. Opening in text mode
+ # Read and hash the file in binary. Opening in text mode
# would require an unnecessary decoding and encoding cycle.
f = open(dep_file, 'rb')
hasher = sha1()
@@ -707,28 +709,28 @@ def should_rerun(hash, old_hash, old_exit_status, key, rerun, data, old_data, te
def hash_all(data, temp_data, old_data, engine_dict):
'''
Hash the code to see what has changed and needs to be updated.
-
- Save the hashes in hashdict. Create update_code, a list of bools
- regarding whether code should be executed. Create update_pygments, a
- list of bools determining what needs updated Pygments highlighting.
- Update pygments_settings to account for Pygments (as opposed to PythonTeX)
+
+ Save the hashes in hashdict. Create update_code, a list of bools
+ regarding whether code should be executed. Create update_pygments, a
+ list of bools determining what needs updated Pygments highlighting.
+ Update pygments_settings to account for Pygments (as opposed to PythonTeX)
commands and environments.
'''
- # Note that the PythonTeX information that accompanies code must be
- # hashed in addition to the code itself; the code could stay the same,
- # but its context or args could change, which might require that code be
- # executed. All of the PythonTeX information is hashed except for the
- # input line number. Context-dependent code is going too far if
+ # Note that the PythonTeX information that accompanies code must be
+ # hashed in addition to the code itself; the code could stay the same,
+ # but its context or args could change, which might require that code be
+ # executed. All of the PythonTeX information is hashed except for the
+ # input line number. Context-dependent code is going too far if
# it depends on that.
-
+
# Create variables to more easily access parts of data
pytxcode = temp_data['pytxcode']
encoding = data['encoding']
loaded_old_data = temp_data['loaded_old_data']
rerun = temp_data['rerun']
pygments_settings = data['pygments_settings']
-
+
# Calculate cumulative hashes for all code that is executed
# Calculate individual hashes for all code that will be typeset
code_hasher = defaultdict(sha1)
@@ -759,31 +761,31 @@ def hash_all(data, temp_data, old_data, engine_dict):
typeset_hasher[c.key_typeset].update(c.hashable_delims_typeset.encode(encoding))
typeset_hasher[c.key_typeset].update(c.code.encode(encoding))
typeset_hasher[c.key_typeset].update(c.args_prettyprint.encode(encoding))
-
+
# Store hashes
code_hash_dict = {}
for key in code_hasher:
family = key.split('#', 1)[0]
- code_hash_dict[key] = (code_hasher[key].hexdigest(),
+ code_hash_dict[key] = (code_hasher[key].hexdigest(),
cc_hasher[family].hexdigest(),
engine_dict[family].get_hash())
data['code_hash_dict'] = code_hash_dict
-
+
cons_hash_dict = {}
for key in cons_hasher:
family = key.split('#', 1)[0]
- cons_hash_dict[key] = (cons_hasher[key].hexdigest(),
+ cons_hash_dict[key] = (cons_hasher[key].hexdigest(),
cc_hasher[family].hexdigest(),
engine_dict[family].get_hash())
data['cons_hash_dict'] = cons_hash_dict
-
+
typeset_hash_dict = {}
for key in typeset_hasher:
typeset_hash_dict[key] = typeset_hasher[key].hexdigest()
data['typeset_hash_dict'] = typeset_hash_dict
-
-
+
+
# See what needs to be updated.
# In the process, copy over macros and files that may be reused.
code_update = {}
@@ -810,27 +812,27 @@ def hash_all(data, temp_data, old_data, engine_dict):
old_typeset_hash_dict = old_data['typeset_hash_dict']
old_pygments_settings = old_data['pygments_settings']
for s in pygments_settings:
- if (s in old_pygments_settings and
+ if (s in old_pygments_settings and
pygments_settings[s] == old_pygments_settings[s]):
pygments_settings_changed[s] = False
else:
pygments_settings_changed[s] = True
- # If old data was loaded (and thus is compatible) determine what has
- # changed so that only
+ # If old data was loaded (and thus is compatible) determine what has
+ # changed so that only
# modified code may be executed. Otherwise, execute everything.
# We don't have to worry about checking for changes in pyfuture, because
# custom code and default code are hashed. The treatment of keeptemps
# could be made more efficient (if changed to 'none', just delete old temp
- # files rather than running everything again), but given that it is
+ # files rather than running everything again), but given that it is
# intended as a debugging aid, that probable isn't worth it.
- # We don't have to worry about hashdependencies changing, because if it
+ # We don't have to worry about hashdependencies changing, because if it
# does the hashes won't match (file contents vs. mtime) and thus code will
# be re-executed.
if loaded_old_data and data['code_vitals'] == old_data['code_vitals']:
# Compare the hash values, and set which code needs to be run
for key in code_hash_dict:
- if (key in old_code_hash_dict and
+ if (key in old_code_hash_dict and
not should_rerun(code_hash_dict[key], old_code_hash_dict[key], old_exit_status[key], key, rerun, data, old_data, temp_data)):
code_update[key] = False
macros[key] = old_macros[key]
@@ -838,15 +840,15 @@ def hash_all(data, temp_data, old_data, engine_dict):
dependencies[key] = old_dependencies[key]
exit_status[key] = old_exit_status[key]
else:
- code_update[key] = True
- else:
+ code_update[key] = True
+ else:
for key in code_hash_dict:
code_update[key] = True
-
+
if loaded_old_data and data['cons_vitals'] == old_data['cons_vitals']:
# Compare the hash values, and set which code needs to be run
for key in cons_hash_dict:
- if (key in old_cons_hash_dict and
+ if (key in old_cons_hash_dict and
not should_rerun(cons_hash_dict[key], old_cons_hash_dict[key], old_exit_status[key], key, rerun, data, old_data, temp_data)):
cons_update[key] = False
macros[key] = old_macros[key]
@@ -855,17 +857,17 @@ def hash_all(data, temp_data, old_data, engine_dict):
dependencies[key] = old_dependencies[key]
exit_status[key] = old_exit_status[key]
else:
- cons_update[key] = True
- else:
+ cons_update[key] = True
+ else:
for key in cons_hash_dict:
cons_update[key] = True
-
+
if loaded_old_data and data['typeset_vitals'] == old_data['typeset_vitals']:
for key in typeset_hash_dict:
family = key.split('#', 1)[0]
if family in pygments_settings:
if (not pygments_settings_changed[family] and
- key in old_typeset_hash_dict and
+ key in old_typeset_hash_dict and
typeset_hash_dict[key] == old_typeset_hash_dict[key]):
pygments_update[key] = False
if key in old_pygments_macros:
@@ -902,7 +904,7 @@ def hash_all(data, temp_data, old_data, engine_dict):
for s in pygments_style_list:
formatter = LatexFormatter(style=s, commandprefix='PYG'+s)
pygments_style_defs[s] = formatter.get_style_defs()
-
+
# Save to data
temp_data['code_update'] = code_update
temp_data['cons_update'] = cons_update
@@ -916,9 +918,9 @@ def hash_all(data, temp_data, old_data, engine_dict):
data['typeset_cache'] = typeset_cache
data['dependencies'] = dependencies
data['exit_status'] = exit_status
-
-
- # Clean up for code that will be run again, and for code that no longer
+
+
+ # Clean up for code that will be run again, and for code that no longer
# exists.
if loaded_old_data:
# Take care of code files
@@ -948,8 +950,8 @@ def hash_all(data, temp_data, old_data, engine_dict):
if os.path.isfile(f):
os.remove(f)
# Take care of old Pygments files
- # The approach here is a little different since there isn't a
- # Pygments-specific hash dict, but there is a Pygments-specific
+ # The approach here is a little different since there isn't a
+ # Pygments-specific hash dict, but there is a Pygments-specific
# dict of lists of files.
for key in pygments_update:
if pygments_update[key] and key in old_pygments_files:
@@ -989,7 +991,7 @@ def parse_code_write_scripts(data, temp_data, engine_dict):
files = data['files']
debug = temp_data['debug']
interactive = temp_data['interactive']
-
+
# Tweak the update dicts to work with debug command-line option.
# #### This should probably be refactored later, once the debug interface
# stabilizes
@@ -1035,12 +1037,12 @@ def parse_code_write_scripts(data, temp_data, engine_dict):
temp_data['debug_key'] = arg_key
else:
temp_data['interactive_key'] = arg_key
-
-
-
- # We need to keep track of the last instance for each session, so
- # that duplicates can be eliminated. Some LaTeX environments process
- # their content multiple times and thus will create duplicates. We
+
+
+
+ # We need to keep track of the last instance for each session, so
+ # that duplicates can be eliminated. Some LaTeX environments process
+ # their content multiple times and thus will create duplicates. We
# need to initialize everything at -1, since instances begin at zero.
def negative_one():
return -1
@@ -1054,7 +1056,7 @@ def parse_code_write_scripts(data, temp_data, engine_dict):
if c.is_typeset and pygments_update[c.key_typeset]:
pygments_list.append(c)
elif c.is_cons:
- # Only append to Pygments if not run, since Pygments is
+ # Only append to Pygments if not run, since Pygments is
# automatically taken care of during run for console content
if cons_update[c.key_run]:
cons_dict[c.key_run].append(c)
@@ -1067,7 +1069,7 @@ def parse_code_write_scripts(data, temp_data, engine_dict):
cc_dict_begin[c.cc_type].append(c)
else:
cc_dict_end[c.cc_type].append(c)
-
+
# Save
temp_data['code_dict'] = code_dict
temp_data['cc_dict_begin'] = cc_dict_begin
@@ -1100,7 +1102,7 @@ def parse_code_write_scripts(data, temp_data, engine_dict):
sessionfile.close()
code_index_dict[key] = code_index
temp_data['code_index_dict'] = code_index_dict
-
+
# Write synchronization file if in debug mode
if debug is not None:
# Might improve tracking/cleanup of syncdb files
@@ -1150,7 +1152,7 @@ def do_multiprocessing(data, temp_data, old_data, engine_dict):
pygments_settings = data['pygments_settings']
jobs = temp_data['jobs']
verbose = temp_data['verbose']
-
+
code_dict = temp_data['code_dict']
cons_dict = temp_data['cons_dict']
cc_dict_begin = temp_data['cc_dict_begin']
@@ -1163,21 +1165,21 @@ def do_multiprocessing(data, temp_data, old_data, engine_dict):
pygments_files = data['pygments_files']
pygments_macros = data['pygments_macros']
typeset_cache = data['typeset_cache']
-
+
errors = temp_data['errors']
warnings = temp_data['warnings']
-
+
makestderr = data['settings']['makestderr']
stderrfilename = data['settings']['stderrfilename']
code_index_dict = temp_data['code_index_dict']
-
+
hashdependencies = temp_data['hashdependencies']
dependencies = data['dependencies']
exit_status = data['exit_status']
start_time = data['start_time']
debug = temp_data['debug']
interactive = temp_data['interactive']
-
+
# If in debug or interactive mode, short-circuit the whole process
# #### This should probably be refactored later, once debugging is more
# mature
@@ -1185,17 +1187,17 @@ def do_multiprocessing(data, temp_data, old_data, engine_dict):
import shlex
if debug is not None:
print('Entering debug mode for "{0}"\n'.format(debug) + '-'*20 + '\n')
- key = temp_data['debug_key']
+ key = temp_data['debug_key']
else:
print('Entering interactive mode for "{0}"\n'.format(interactive) + '-'*20 + '\n')
- key = temp_data['interactive_key']
+ key = temp_data['interactive_key']
basename = key.replace('#', '_')
family, session, restart = key.split('#')
# #### Revise as debugging is expanded
if debug is not None and engine_dict[family].language != 'python':
return sys.exit('Currently, debug only supports Python')
if debug is not None:
- # #### Eventually, should move to pythontex_engines.py and
+ # #### Eventually, should move to pythontex_engines.py and
# provide means for customization
command = '{python} {debug} {file}.py --interactive'
command = command.replace('{python}', interpreter_dict['python'])
@@ -1222,23 +1224,23 @@ def do_multiprocessing(data, temp_data, old_data, engine_dict):
exec_cmd = shlex.split(command.format(file=script.replace('\\', '\\\\'), File=script_full.replace('\\', '\\\\')))
try:
proc = subprocess.Popen(exec_cmd)
- except WindowsError as e:
- if e.errno == 2:
+ except (WindowsError, FileNotFoundError) as e:
+ if platform.system() == 'Windows' and e.errno == 2:
# Batch files won't be found when called without extension. They
# would be found if `shell=True`, but then getting the right
# exit code is tricky. So we perform some `cmd` trickery that
- # is essentially equivalent to `shell=True`, but gives correct
+ # is essentially equivalent to `shell=True`, but gives correct
# exit codes. Note that `subprocess.Popen()` works with strings
# under Windows; a list is not required.
exec_cmd_string = ' '.join(exec_cmd)
exec_cmd_string = 'cmd /C "@echo off & call {0} & if errorlevel 1 exit 1"'.format(exec_cmd_string)
proc = subprocess.Popen(exec_cmd_string)
else:
- raise
+ raise
proc.wait()
os.chdir(orig_cwd)
# Do a basic update of pickled data
- # This is only really needed for tracking the code file and the
+ # This is only really needed for tracking the code file and the
# synchronization file (if it was created)
if temp_data['loaded_old_data'] and key in old_data['exit_status']:
exit_status[key] = old_data['exit_status'][key]
@@ -1253,29 +1255,29 @@ def do_multiprocessing(data, temp_data, old_data, engine_dict):
pickle.dump(data, f, -1)
f.close()
return
-
-
- # Create a pool for multiprocessing. Set the maximum number of
+
+
+ # Create a pool for multiprocessing. Set the maximum number of
# concurrent processes to a user-specified value for jobs. If the user
- # has not specified a value, then it will be None, and
+ # has not specified a value, then it will be None, and
# multiprocessing.Pool() will use cpu_count().
pool = multiprocessing.Pool(jobs)
tasks = []
-
+
# If verbose, print a list of processes
if verbose:
print('\n* PythonTeX will run the following processes')
print(' with working directory {0}'.format(workingdir))
print(' (maximum concurrent processes = {0})'.format(jobs))
-
- # Add code processes. Note that everything placed in the codedict
+
+ # Add code processes. Note that everything placed in the codedict
# needs to be executed, based on previous testing, except for custom code.
for key in code_dict:
family = key.split('#')[0]
# Uncomment the following for debugging, and comment out what follows
'''run_code(encoding, outputdir, workingdir, code_dict[key],
engine_dict[family].language,
- engine_dict[family].command,
+ engine_dict[family].commands,
engine_dict[family].created,
engine_dict[family].extension,
makestderr, stderrfilename,
@@ -1285,10 +1287,10 @@ def do_multiprocessing(data, temp_data, old_data, engine_dict):
engine_dict[family].linenumbers,
engine_dict[family].lookbehind,
keeptemps, hashdependencies)'''
- tasks.append(pool.apply_async(run_code, [encoding, outputdir,
+ tasks.append(pool.apply_async(run_code, [encoding, outputdir,
workingdir, code_dict[key],
engine_dict[family].language,
- engine_dict[family].command,
+ engine_dict[family].commands,
engine_dict[family].created,
engine_dict[family].extension,
makestderr, stderrfilename,
@@ -1300,18 +1302,18 @@ def do_multiprocessing(data, temp_data, old_data, engine_dict):
keeptemps, hashdependencies]))
if verbose:
print(' - Code process ' + key.replace('#', ':'))
-
+
# Add console processes
for key in cons_dict:
family = key.split('#')[0]
if engine_dict[family].language.startswith('python'):
if family in pygments_settings:
# Uncomment the following for debugging
- '''python_console(jobname, encoding, outputdir, workingdir,
+ '''python_console(jobname, encoding, outputdir, workingdir,
fvextfile, pygments_settings[family],
cc_dict_begin[family], cons_dict[key],
cc_dict_end[family], engine_dict[family].startup,
- engine_dict[family].banner,
+ engine_dict[family].banner,
engine_dict[family].filename)'''
tasks.append(pool.apply_async(python_console, [jobname, encoding,
outputdir, workingdir,
@@ -1333,31 +1335,31 @@ def do_multiprocessing(data, temp_data, old_data, engine_dict):
cc_dict_end[family],
engine_dict[family].startup,
engine_dict[family].banner,
- engine_dict[family].filename]))
+ engine_dict[family].filename]))
else:
print('* PythonTeX error')
print(' Currently, non-Python consoles are not supported')
errors += 1
if verbose:
print(' - Console process ' + key.replace('#', ':'))
-
+
# Add a Pygments process
if pygments_list:
- tasks.append(pool.apply_async(do_pygments, [encoding, outputdir,
+ tasks.append(pool.apply_async(do_pygments, [encoding, outputdir,
fvextfile,
pygments_list,
pygments_settings,
typeset_cache]))
if verbose:
print(' - Pygments process')
-
+
# Execute the processes
pool.close()
pool.join()
-
+
# Get the outputs of processes
# Get the files and macros created. Get the number of errors and warnings
- # produced. Get any messages returned. Get the exit_status, which is a
+ # produced. Get any messages returned. Get the exit_status, which is a
# dictionary of code that failed and thus must be run again (its hash is
# set to a null string). Keep track of whether there were any new files,
# so that the last time of file creation in .pytxmcr can be updated.
@@ -1375,7 +1377,7 @@ def do_multiprocessing(data, temp_data, old_data, engine_dict):
errors += result['errors']
warnings += result['warnings']
exit_status[key] = (result['errors'], result['warnings'])
- messages.extend(result['messages'])
+ messages.extend(result['messages'])
elif result['process'] == 'console':
key = result['key']
files[key].extend(result['files'])
@@ -1399,8 +1401,8 @@ def do_multiprocessing(data, temp_data, old_data, engine_dict):
pygments_macros.update(result['pygments_macros'])
errors += result['errors']
warnings += result['warnings']
- messages.extend(result['messages'])
-
+ messages.extend(result['messages'])
+
# Do a quick check to see if any dependencies were modified since the
# beginning of the run. If so, reset them so they will run next time and
# issue a warning
@@ -1419,11 +1421,11 @@ def do_multiprocessing(data, temp_data, old_data, engine_dict):
for s in set(unresolved_sessions):
print(' - ' + s)
warnings += 1
-
-
+
+
# Save all content (only needs to be done if code was indeed run).
# Save a commented-out time corresponding to the last time PythonTeX ran
- # and created files, so that tools like latexmk can easily detect when
+ # and created files, so that tools like latexmk can easily detect when
# another run is needed.
if tasks:
if new_files or not temp_data['loaded_old_data']:
@@ -1431,28 +1433,33 @@ def do_multiprocessing(data, temp_data, old_data, engine_dict):
else:
last_new_file_time = old_data['last_new_file_time']
data['last_new_file_time'] = last_new_file_time
-
+
macro_file = open(os.path.expanduser(os.path.normcase(os.path.join(outputdir, jobname + '.pytxmcr'))), 'w', encoding=encoding)
macro_file.write('%Last time of file creation: ' + str(last_new_file_time) + '\n\n')
for key in macros:
macro_file.write(''.join(macros[key]))
macro_file.close()
-
+
pygments_macro_file = open(os.path.expanduser(os.path.normcase(os.path.join(outputdir, jobname + '.pytxpyg'))), 'w', encoding=encoding)
# Only save Pygments styles that are used
style_set = set([pygments_settings[k]['formatter_options']['style'] for k in pygments_settings if k != ':GLOBAL'])
+ if style_set:
+ from pygments.formatters import LatexFormatter
+ formatter = LatexFormatter(style='default', commandprefix='PYG')
+ PYG_style_defs = formatter.get_style_defs()
+ pygments_macro_file.write(PYG_style_defs)
for key in pygments_style_defs:
if key in style_set:
pygments_macro_file.write(''.join(pygments_style_defs[key]))
for key in pygments_macros:
pygments_macro_file.write(''.join(pygments_macros[key]))
pygments_macro_file.close()
-
+
pythontex_data_file = os.path.expanduser(os.path.normcase(os.path.join(outputdir, 'pythontex_data.pkl')))
f = open(pythontex_data_file, 'wb')
pickle.dump(data, f, -1)
f.close()
-
+
# Print any errors and warnings.
if messages:
print('\n'.join(messages))
@@ -1465,15 +1472,15 @@ def do_multiprocessing(data, temp_data, old_data, engine_dict):
-def run_code(encoding, outputdir, workingdir, code_list, language, command,
- command_created, extension, makestderr, stderrfilename,
- code_index, errorsig, warningsig, linesig, stderrlookbehind,
+def run_code(encoding, outputdir, workingdir, code_list, language, commands,
+ command_created, extension, makestderr, stderrfilename,
+ code_index, errorsig, warningsig, linesig, stderrlookbehind,
keeptemps, hashdependencies):
'''
Function for multiprocessing code files
'''
import shlex
-
+
# Create what's needed for storing results
family = code_list[0].family
session = code_list[0].session
@@ -1485,64 +1492,68 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
warnings = 0
unknowns = 0
messages = []
-
- # Create message lists only for stderr, one for undelimited stderr and
- # one for delimited, so it's easy to keep track of if there is any
+
+ # Create message lists only for stderr, one for undelimited stderr and
+ # one for delimited, so it's easy to keep track of if there is any
# stderr. These are added onto messages at the end.
err_messages_ud = []
err_messages_d = []
-
+
# We need to let the user know we are switching code files
# We check at the end to see if there were indeed any errors and warnings
# and if not, clear messages.
messages.append('\n---- Messages for ' + key_run.replace('#', ':') + ' ----')
-
+
# Open files for stdout and stderr, run the code, then close the files
basename = key_run.replace('#', '_')
out_file_name = os.path.expanduser(os.path.normcase(os.path.join(outputdir, basename + '.out')))
err_file_name = os.path.expanduser(os.path.normcase(os.path.join(outputdir, basename + '.err')))
out_file = open(out_file_name, 'w', encoding=encoding)
err_file = open(err_file_name, 'w', encoding=encoding)
- # Note that command is a string, which must be converted to list
- # Must double-escape any backslashes so that they survive `shlex.split()`
script = os.path.expanduser(os.path.normcase(os.path.join(outputdir, basename)))
if os.path.isabs(script):
script_full = script
else:
script_full = os.path.expanduser(os.path.normcase(os.path.join(os.getcwd(), outputdir, basename)))
- # `shlex.split()` only works with Unicode after 2.7.2
- if (sys.version_info.major == 2 and sys.version_info.micro < 3):
- exec_cmd = shlex.split(bytes(command.format(file=script.replace('\\', '\\\\'), File=script_full.replace('\\', '\\\\'))))
- exec_cmd = [unicode(elem) for elem in exec_cmd]
- else:
- exec_cmd = shlex.split(command.format(file=script.replace('\\', '\\\\'), File=script_full.replace('\\', '\\\\')))
- # Add any created files due to the command
- # This needs to be done before attempts to execute, to prevent orphans
+ # #### Need to revise so that intermediate files can be detected and cleaned up
for f in command_created:
- files.append(f.format(file=script))
- try:
- proc = subprocess.Popen(exec_cmd, stdout=out_file, stderr=err_file)
- except WindowsError as e:
- if e.errno == 2:
- # Batch files won't be found when called without extension. They
- # would be found if `shell=True`, but then getting the right
- # exit code is tricky. So we perform some `cmd` trickery that
- # is essentially equivalent to `shell=True`, but gives correct
- # exit codes. Note that `subprocess.Popen()` works with strings
- # under Windows; a list is not required.
- exec_cmd_string = ' '.join(exec_cmd)
- exec_cmd_string = 'cmd /C "@echo off & call {0} & if errorlevel 1 exit 1"'.format(exec_cmd_string)
- proc = subprocess.Popen(exec_cmd_string, stdout=out_file, stderr=err_file)
+ files.append(f.format(file=script, File=script_full))
+ for command in commands:
+ # Note that command is a string, which must be converted to list
+ # Must double-escape any backslashes so that they survive `shlex.split()`
+ # `shlex.split()` only works with Unicode after 2.7.2
+ if (sys.version_info.major == 2 and sys.version_info.micro < 3):
+ exec_cmd = shlex.split(bytes(command.format(file=script.replace('\\', '\\\\'), File=script_full.replace('\\', '\\\\'), workingdir=workingdir.replace('\\', '\\\\'))))
+ exec_cmd = [unicode(elem) for elem in exec_cmd]
else:
- raise
-
- proc.wait()
+ exec_cmd = shlex.split(command.format(file=script.replace('\\', '\\\\'), File=script_full.replace('\\', '\\\\'), workingdir=workingdir.replace('\\', '\\\\')))
+ # Add any created files due to the command
+ # This needs to be done before attempts to execute, to prevent orphans
+ try:
+ proc = subprocess.Popen(exec_cmd, stdout=out_file, stderr=err_file)
+ except WindowsError as e:
+ if e.errno == 2:
+ # Batch files won't be found when called without extension. They
+ # would be found if `shell=True`, but then getting the right
+ # exit code is tricky. So we perform some `cmd` trickery that
+ # is essentially equivalent to `shell=True`, but gives correct
+ # exit codes. Note that `subprocess.Popen()` works with strings
+ # under Windows; a list is not required.
+ exec_cmd_string = ' '.join(exec_cmd)
+ exec_cmd_string = 'cmd /C "@echo off & call {0} & if errorlevel 1 exit 1"'.format(exec_cmd_string)
+ proc = subprocess.Popen(exec_cmd_string, stdout=out_file, stderr=err_file)
+ else:
+ raise
+
+ proc.wait()
+ if proc.returncode != 0:
+ break
out_file.close()
err_file.close()
-
+
# Process saved stdout into file(s) that are included in the TeX document.
#
- # Go through the saved output line by line, and save any printed content
+ # Go through the saved output line by line, and save any printed content
# to its own file, named based on instance.
#
# The very end of the stdout lists dependencies, if any, so we start by
@@ -1563,7 +1574,7 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
valid_stdout = False
if proc.returncode == 0:
raise ValueError('Missing "created" and/or "dependencies" delims in stdout; invalid template?')
-
+
if valid_stdout:
# Add created files to created list
for c in created.splitlines():
@@ -1571,11 +1582,11 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
files.append(c)
else:
files.append(os.path.join(workingdir, c))
-
+
# Create a set of dependencies, to eliminate duplicates in the event
# that there are any. This is mainly useful when dependencies are
- # automatically determined (for example, through redefining open()),
- # may be specified multiple times as a result, and are hashed (and
+ # automatically determined (for example, through redefining open()),
+ # may be specified multiple times as a result, and are hashed (and
# of a large enough size that hashing time is non-negligible).
deps = set([dep for dep in deps.splitlines()])
# Process dependencies; get mtimes and (if specified) hashes
@@ -1584,21 +1595,21 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
if not os.path.isabs(dep_file):
dep_file = os.path.expanduser(os.path.normcase(os.path.join(workingdir, dep_file)))
if not os.path.isfile(dep_file):
- # If we can't find the file, we return a null hash and issue
- # an error. We don't need to change the exit status. If the
- # code does depend on the file, there will be a separate
- # error when the code attempts to use the file. If the code
- # doesn't really depend on the file, then the error will be
- # raised again anyway the next time PythonTeX runs when the
+ # If we can't find the file, we return a null hash and issue
+ # an error. We don't need to change the exit status. If the
+ # code does depend on the file, there will be a separate
+ # error when the code attempts to use the file. If the code
+ # doesn't really depend on the file, then the error will be
+ # raised again anyway the next time PythonTeX runs when the
# dependency is listed but not found.
dependencies[dep] = (None, None)
messages.append('* PythonTeX error')
messages.append(' Cannot find dependency "' + dep + '"')
messages.append(' It belongs to ' + key_run.replace('#', ':'))
messages.append(' Relative paths to dependencies must be specified from the working directory.')
- errors += 1
+ errors += 1
elif hashdependencies:
- # Read and hash the file in binary. Opening in text mode
+ # Read and hash the file in binary. Opening in text mode
# would require an unnecessary decoding and encoding cycle.
hasher = sha1()
f = open(dep_file, 'rb')
@@ -1607,12 +1618,12 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
dependencies[dep] = (os.path.getmtime(dep_file), hasher.hexdigest())
else:
dependencies[dep] = (os.path.getmtime(dep_file), '')
-
+
for block in out.split('=>PYTHONTEX:STDOUT#')[1:]:
if block:
delims, content = block.split('#\n', 1)
- if content:
- instance, command = delims.split('#')
+ instance, command = delims.split('#')
+ if content or command in ('s', 'sub'):
if instance.endswith('CC'):
messages.append('* PythonTeX warning')
messages.append(' Custom code for "' + family + '" attempted to print or write to stdout')
@@ -1627,6 +1638,17 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
else:
fname = os.path.join(outputdir, basename + '_' + instance + '.stdout')
f = open(os.path.expanduser(os.path.normcase(fname)), 'w', encoding=encoding)
+ if command in ('s', 'sub'):
+ if content:
+ fields = [x.split('\n', 1)[1].rsplit('\n', 1)[0] for x in content.split('=>PYTHONTEX:FIELD_DELIM#')[1:]]
+ content = code_list[int(instance)].sub_template.format(*fields)
+ else:
+ # If no replacement fields, de-templatize
+ content = code_list[int(instance)].sub_template.replace('{{', '{').replace('}}', '}')
+ if command == 's':
+ # Remove newline added by printing, prevent
+ # LaTeX from adding a space after content
+ content = content.rsplit('\n', 1)[0] + '\\endinput\n'
f.write(content)
f.close()
files.append(fname)
@@ -1657,8 +1679,8 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
# Create the full basename that will be replaced in stderr
# We need two versions, one with the correct slashes for the OS,
# and one with the opposite slashes. This is needed when a language
- # doesn't obey the OS's slash convention in paths given in stderr.
- # For example, Windows uses backslashes, but Ruby under Windows uses
+ # doesn't obey the OS's slash convention in paths given in stderr.
+ # For example, Windows uses backslashes, but Ruby under Windows uses
# forward in paths given in stderr.
# #### Consider os.path.normcase(), making search case-insensitive
outputdir_exp = os.path.expanduser(outputdir)
@@ -1667,7 +1689,7 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
fullbasename_reslashed = fullbasename_correct.replace('\\', '/')
else:
fullbasename_reslashed = fullbasename_correct.replace('/', '\\')
-
+
if err_ud:
it = iter(code_index.items())
index_now = next(it)
@@ -1675,7 +1697,7 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
start_errgobble = None
for n, line in enumerate(err_ud):
if basename in line:
- # Get the gobbleation. This is used to determine if
+ # Get the gobbleation. This is used to determine if
# other lines containing the basename are a continuation,
# or separate messages.
errgobble = match('(\s*)', line).groups()[0]
@@ -1683,7 +1705,7 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
start_errgobble = errgobble
# Only issue a message and track down the line numer if
# this is indeed the start of a new message, rather than
- # a continuation of an old message that happens to
+ # a continuation of an old message that happens to
# contain the basename
if errgobble == start_errgobble:
# Determine the corresponding line number in the document
@@ -1709,15 +1731,15 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
else:
doclinenum = '??'
input_file = '??'
-
- # Try to determine if we are dealing with an error or a
+
+ # Try to determine if we are dealing with an error or a
# warning.
found = False
index = n
if stderrlookbehind:
while index >= 0:
- # The order here is important. If a line matches
- # both the error and warning patterns, default to
+ # The order here is important. If a line matches
+ # both the error and warning patterns, default to
# error.
past_line = err_ud[index]
if (index < n and basename in past_line):
@@ -1739,11 +1761,11 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
index -= 1
else:
while index < len(err_ud):
- # The order here is important. If a line matches
- # both the error and warning patterns, default to
+ # The order here is important. If a line matches
+ # both the error and warning patterns, default to
# error.
future_line = err_ud[index]
- if (index > n and basename in future_line and
+ if (index > n and basename in future_line and
future_line.startswith(start_errgobble)):
break
for pattern in warningsig:
@@ -1773,7 +1795,7 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
err_messages_ud.append(' ' + line.replace(outputdir_exp, '<outputdir>').rstrip('\n'))
else:
err_messages_ud.append(' ' + line.rstrip('\n'))
-
+
# Create .stderr
if makestderr and err_messages_ud:
process = False
@@ -1822,13 +1844,14 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
else:
codelinenum = '1'
else:
+ errlinenum = '??'
codelinenum = '??'
messages.append('* PythonTeX error')
messages.append(' Line number ' + str(errlinenum) + ' could not be synced with the document')
messages.append(' Content from stderr is not delimited, and cannot be resolved')
errors += 1
process = False
-
+
if process:
if int(index_now[0]) > err_key_last_int:
err_key = basename + '_' + index_now[0]
@@ -1849,7 +1872,7 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
err_dict[err_key].append(line)
elif process:
err_dict[err_key].append(line)
-
+
if err_d:
start_errgobble = None
msg = []
@@ -1858,11 +1881,11 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
if line.startswith('=>PYTHONTEX:STDERR#'):
# Store the last group of messages. Messages
# can't be directly appended to the main list, because
- # a PythonTeX message must be inserted at the beginning
+ # a PythonTeX message must be inserted at the beginning
# of each chunk of stderr that never references
# the script that was executed. If the script is never
- # referenced, then line numbers aren't automatically
- # synced. These types of situations are created by
+ # referenced, then line numbers aren't automatically
+ # synced. These types of situations are created by
# warnings.warn() etc.
if msg:
if not found_basename:
@@ -1872,10 +1895,10 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
doclinenum = str(code_index[instance].line_int)
input_file = code_index[instance].input_file
# Try to identify alert. We have to parse all
- # lines for signs of errors and warnings. This
+ # lines for signs of errors and warnings. This
# may result in overcounting, but it's the best
- # we can do--otherwise, we could easily
- # undercount, or, finding a warning, miss a
+ # we can do--otherwise, we could easily
+ # undercount, or, finding a warning, miss a
# subsequent error. When this code is actually
# used, it's already a sign that normal parsing
# has failed.
@@ -1909,7 +1932,7 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
last_delim = line
elif basename in line:
found_basename = True
- # Get the gobbleation. This is used to determine if
+ # Get the gobbleation. This is used to determine if
# other lines containing the basename are a continuation,
# or separate messages.
errgobble = match('(\s*)', line).groups()[0]
@@ -1917,7 +1940,7 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
start_errgobble = errgobble
# Only issue a message and track down the line numer if
# this is indeed the start of a new message, rather than
- # a continuation of an old message that happens to
+ # a continuation of an old message that happens to
# contain the basename
if errgobble == start_errgobble:
# Determine the corresponding line number in the document
@@ -1942,18 +1965,18 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
else:
doclinenum = '??'
input_file = '??'
-
- # Try to determine if we are dealing with an error or a
+
+ # Try to determine if we are dealing with an error or a
# warning.
found = False
index = n
if stderrlookbehind:
while index >= 0:
- # The order here is important. If a line matches
- # both the error and warning patterns, default to
+ # The order here is important. If a line matches
+ # both the error and warning patterns, default to
# error.
past_line = err_d[index]
- if (past_line.startswith('=>PYTHONTEX:STDERR#') or
+ if (past_line.startswith('=>PYTHONTEX:STDERR#') or
(index < n and basename in past_line)):
break
for pattern in warningsig:
@@ -1973,11 +1996,11 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
index -= 1
else:
while index < len(err_d):
- # The order here is important. If a line matches
- # both the error and warning patterns, default to
+ # The order here is important. If a line matches
+ # both the error and warning patterns, default to
# error.
future_line = err_d[index]
- if (future_line.startswith('=>PYTHONTEX:STDERR#') or
+ if (future_line.startswith('=>PYTHONTEX:STDERR#') or
(index > n and basename in future_line and future_line.startswith(start_errgobble))):
break
for pattern in warningsig:
@@ -2020,10 +2043,10 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
doclinenum = str(code_index[instance].line_int)
input_file = code_index[instance].input_file
# Try to identify alert. We have to parse all
- # lines for signs of errors and warnings. This
+ # lines for signs of errors and warnings. This
# may result in overcounting, but it's the best
- # we can do--otherwise, we could easily
- # undercount, or, finding a warning, miss a
+ # we can do--otherwise, we could easily
+ # undercount, or, finding a warning, miss a
# subsequent error. When this code is actually
# used, it's already a sign that normal parsing
# has failed.
@@ -2050,7 +2073,7 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
else:
err_messages_d.append('* PythonTeX stderr - {0} near line {1}:'.format(alert_type, doclinenum))
err_messages_d.extend(msg)
-
+
# Create .stderr
if makestderr and err_messages_d:
process = False
@@ -2075,17 +2098,17 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
# Calculate the line number in the document
# Account for inline
ei = code_index[instance]
- # Store the `instance` in case it's
+ # Store the `instance` in case it's
# incremented later
last_instance = instance
# If the error or warning was actually triggered
# later on (for example, multiline string with
- # missing final delimiter), look ahead and
+ # missing final delimiter), look ahead and
# determine the correct instance, so that
# we get the correct line number. We don't
# associate the created stderr with this later
# instance, however, but rather with the instance
- # in which the error began. Doing that might
+ # in which the error began. Doing that might
# possibly be preferable in some cases, but would
# also require that the current stderr be split
# between multiple instances, requiring extra
@@ -2112,9 +2135,10 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
instance = last_instance
else:
codelinenum = '??'
+ errlinenum = '??'
messages.append('* PythonTeX notice')
messages.append(' Line number ' + str(errlinenum) + ' could not be synced with the document')
-
+
line = line.replace(str(errlinenum), str(codelinenum), 1)
if fullbasename_correct in line:
fullbasename = fullbasename_correct
@@ -2138,7 +2162,7 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
f.write(''.join(err_dict[err_key]))
f.close()
files.append(stderr_file_name)
-
+
# Clean up temp files, and update the list of existing files
if keeptemps == 'none':
for ext in [extension, 'pytxmcr', 'out', 'err']:
@@ -2157,7 +2181,7 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
# Take care of any unknowns, based on exit code
# Interpret the exit code as an indicator of whether there were errors,
- # and treat unknowns accordingly. This will cause all warnings to be
+ # and treat unknowns accordingly. This will cause all warnings to be
# misinterpreted as errors if warnings trigger a nonzero exit code.
# It will also cause all warnings to be misinterpreted as errors if there
# is a single error that causes a nonzero exit code. That isn't ideal,
@@ -2176,7 +2200,7 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
{0} message(s) could not be classified
Interpreted as {1}, based on the return code(s)'''
messages[0] += textwrap.dedent(unknowns_message.format(unknowns, unknowns_type))
-
+
# Take care of anything that has escaped detection thus far.
if proc.returncode == 1 and not errors:
errors += 1
@@ -2187,7 +2211,7 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
The following command was executed:
"{0}"'''
messages[0] += textwrap.dedent(command_message.format(' '.join(exec_cmd)))
-
+
# Add any stderr messages; otherwise, clear the default message header
if err_messages_ud:
messages.extend(err_messages_ud)
@@ -2195,7 +2219,7 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
messages.extend(err_messages_d)
if len(messages) == 1:
messages = []
-
+
# Return a dict of dicts of results
return {'process': 'code',
'key': key_run,
@@ -2209,18 +2233,18 @@ def run_code(encoding, outputdir, workingdir, code_list, language, command,
-def do_pygments(encoding, outputdir, fvextfile, pygments_list,
+def do_pygments(encoding, outputdir, fvextfile, pygments_list,
pygments_settings, typeset_cache):
'''
Create Pygments content.
-
+
To be run during multiprocessing.
'''
# Lazy import
from pygments import highlight
from pygments.lexers import get_lexer_by_name
from pygments.formatters import LatexFormatter
-
+
# Create what's needed for storing results
pygments_files = defaultdict(list)
pygments_macros = defaultdict(list)
@@ -2228,16 +2252,17 @@ def do_pygments(encoding, outputdir, fvextfile, pygments_list,
warnings = 0
messages = []
messages.append('\n---- Messages for Pygments ----')
-
+
# Create dicts of formatters and lexers.
formatter = dict()
lexer = dict()
for codetype in pygments_settings:
if codetype != ':GLOBAL':
- formatter[codetype] = LatexFormatter(**pygments_settings[codetype]['formatter_options'])
- lexer[codetype] = get_lexer_by_name(pygments_settings[codetype]['lexer'],
- **pygments_settings[codetype]['lexer_options'])
-
+ p = pygments_settings[codetype]['formatter_options'].copy()
+ p['commandprefix'] = 'PYG'
+ formatter[codetype] = LatexFormatter(**p)
+ lexer[codetype] = get_lexer_by_name(pygments_settings[codetype]['lexer'], **p)
+
# Actually parse and highlight the code.
for c in pygments_list:
if c.is_cons:
@@ -2258,19 +2283,19 @@ def do_pygments(encoding, outputdir, fvextfile, pygments_list,
if c.is_inline or content.count('\n') < fvextfile:
# Highlighted code brought in via macros needs SaveVerbatim
if c.args_prettyprint:
- processed = sub(r'\\begin{Verbatim}\[(.+)\]',
+ processed = sub(r'\\begin{Verbatim}\[(.+)\]',
r'\\begin{{pytx@SaveVerbatim}}[\1, {4}]{{pytx@{0}@{1}@{2}@{3}}}'.format(c.family, c.session, c.restart, c.instance, c.args_prettyprint), processed, count=1)
else:
- processed = sub(r'\\begin{Verbatim}\[(.+)\]',
+ processed = sub(r'\\begin{Verbatim}\[(.+)\]',
r'\\begin{{pytx@SaveVerbatim}}[\1]{{pytx@{0}@{1}@{2}@{3}}}'.format(c.family, c.session, c.restart, c.instance), processed, count=1)
processed = processed.rsplit('\\', 1)[0] + '\\end{pytx@SaveVerbatim}\n\n'
pygments_macros[c.key_typeset].append(processed)
else:
if c.args_prettyprint:
- processed = sub(r'\\begin{Verbatim}\[(.+)\]',
+ processed = sub(r'\\begin{Verbatim}\[(.+)\]',
r'\\begin{{pytx@Verbatim}}[\1, {4}]{{pytx@{0}@{1}@{2}@{3}}}'.format(c.family, c.session, c.restart, c.instance, c.args_prettyprint), processed, count=1)
else:
- processed = sub(r'\\begin{Verbatim}\[(.+)\]',
+ processed = sub(r'\\begin{Verbatim}\[(.+)\]',
r'\\begin{{pytx@Verbatim}}[\1]{{pytx@{0}@{1}@{2}@{3}}}'.format(c.family, c.session, c.restart, c.instance), processed, count=1)
processed = processed.rsplit('\\', 1)[0] + '\\end{pytx@Verbatim}\n\n'
fname = os.path.join(outputdir, c.key_typeset.replace('#', '_') + '.pygtex')
@@ -2278,7 +2303,7 @@ def do_pygments(encoding, outputdir, fvextfile, pygments_list,
f.write(processed)
f.close()
pygments_files[c.key_typeset].append(fname)
-
+
if len(messages) == 1:
messages = []
# Return a dict of dicts of results
@@ -2287,7 +2312,7 @@ def do_pygments(encoding, outputdir, fvextfile, pygments_list,
'pygments_macros': pygments_macros,
'errors': errors,
'warnings': warnings,
- 'messages': messages}
+ 'messages': messages}
@@ -2296,7 +2321,7 @@ def python_console(jobname, encoding, outputdir, workingdir, fvextfile,
pygments_settings, cc_begin_list, cons_list, cc_end_list,
startup, banner, filename):
'''
- Use Python's ``code`` module to typeset emulated Python interactive
+ Use Python's ``code`` module to typeset emulated Python interactive
sessions, optionally highlighting with Pygments.
'''
# Create what's needed for storing results
@@ -2311,7 +2336,7 @@ def python_console(jobname, encoding, outputdir, workingdir, fvextfile,
warnings = 0
messages = []
messages.append('\n---- Messages for ' + key_run.replace('#', ':') + ' ----')
-
+
# Lazy import what's needed
import code
from collections import deque
@@ -2324,14 +2349,14 @@ def python_console(jobname, encoding, outputdir, workingdir, fvextfile,
self._orig_write(unicode(s))
else:
from io import StringIO
-
+
# Create a custom console class
class Console(code.InteractiveConsole):
'''
A subclass of code.InteractiveConsole that takes a list and treats it
as a series of console input.
'''
-
+
def __init__(self, banner, filename):
if banner == 'none':
self.banner = 'NULL BANNER'
@@ -2350,7 +2375,7 @@ def python_console(jobname, encoding, outputdir, workingdir, fvextfile,
self.filename = None
code.InteractiveConsole.__init__(self, filename=self.filename)
self.iostdout = StringIO()
-
+
def consolize(self, startup, cons_list):
self.console_code = deque()
# Delimiters are passed straight through and need newlines
@@ -2365,10 +2390,10 @@ def python_console(jobname, encoding, outputdir, workingdir, fvextfile,
sys.path.append(os.getcwd())
else:
sys.exit('Cannot find directory "{workingdir}"')
-
+
if docdir not in sys.path:
sys.path.append(docdir)
-
+
del docdir
'''
cons_config = cons_config.format(workingdir=os.path.expanduser(os.path.normcase(workingdir)))[1:]
@@ -2378,21 +2403,24 @@ def python_console(jobname, encoding, outputdir, workingdir, fvextfile,
for c in cons_list:
self.console_code.append('=>PYTHONTEX#{0}#{1}#\n'.format(c.instance, c.command))
self.console_code.extend(c.code.splitlines())
+ # Reset sys.excepthook to its default, to prevent apport systems
+ # in some Linux distributions from breaking exception handling
+ sys.excepthook = sys.__excepthook__
old_stdout = sys.stdout
sys.stdout = self.iostdout
self.interact(self.banner)
sys.stdout = old_stdout
self.session_log = self.iostdout.getvalue()
-
+
def raw_input(self, prompt):
- # Have to do a lot of looping and trying to make sure we get
+ # Have to do a lot of looping and trying to make sure we get
# something valid to execute
try:
line = self.console_code.popleft()
except IndexError:
raise EOFError
while line.startswith('=>PYTHONTEX#'):
- # Get new lines until we get one that doesn't begin with a
+ # Get new lines until we get one that doesn't begin with a
# delimiter. Then write the last delimited line.
old_line = line
try:
@@ -2405,19 +2433,19 @@ def python_console(jobname, encoding, outputdir, workingdir, fvextfile,
else:
self.write('\n')
return line
-
+
def write(self, data):
self.iostdout.write(data)
-
+
# Need to combine all custom code and user code to pass to consolize
cons_list = cc_begin_list + cons_list + cc_end_list
- # Create a dict for looking up exceptions. This is needed for startup
+ # Create a dict for looking up exceptions. This is needed for startup
# commands and for code commands and environments, since their output
# isn't typeset
cons_index = {}
for c in cons_list:
- cons_index[c.instance] = c.line
-
+ cons_index[c.instance] = c.line
+
# Consolize the code
# If the working directory is changed as part of the console code,
# then we need to get back to where we were.
@@ -2425,7 +2453,7 @@ def python_console(jobname, encoding, outputdir, workingdir, fvextfile,
cwd = os.getcwd()
con.consolize(startup, cons_list)
os.chdir(cwd)
-
+
# Set up Pygments, if applicable
if pygments_settings is not None:
pygmentize = True
@@ -2433,12 +2461,13 @@ def python_console(jobname, encoding, outputdir, workingdir, fvextfile,
from pygments import highlight
from pygments.lexers import get_lexer_by_name
from pygments.formatters import LatexFormatter
- formatter = LatexFormatter(**pygments_settings['formatter_options'])
- lexer = get_lexer_by_name(pygments_settings['lexer'],
- **pygments_settings['lexer_options'])
+ p = pygments_settings['formatter_options'].copy()
+ p['commandprefix'] = 'PYG'
+ formatter = LatexFormatter(**p)
+ lexer = get_lexer_by_name(pygments_settings['lexer'], **p)
else:
pygmentize = False
-
+
# Process the console output
output = con.session_log.split('=>PYTHONTEX#')
# Extract banner
@@ -2455,8 +2484,8 @@ def python_console(jobname, encoding, outputdir, workingdir, fvextfile,
exception = False
console_content_lines = console_content.splitlines()
for line in console_content_lines:
- if (not line.startswith(sys.ps1) and
- not line.startswith(sys.ps2) and
+ if (not line.startswith(sys.ps1) and
+ not line.startswith(sys.ps2) and
line and not line.isspace()):
exception = True
break
@@ -2477,8 +2506,8 @@ def python_console(jobname, encoding, outputdir, workingdir, fvextfile,
exception = False
console_content_lines = console_content.splitlines()
for line in console_content_lines:
- if (line and not line.startswith(sys.ps1) and
- not line.startswith(sys.ps2) and
+ if (line and not line.startswith(sys.ps1) and
+ not line.startswith(sys.ps2) and
not line.isspace()):
exception = True
break
@@ -2502,7 +2531,7 @@ def python_console(jobname, encoding, outputdir, workingdir, fvextfile,
else:
if command == 'i':
# Currently, there isn't any error checking for invalid
- # content; it is assumed that a single line of commands
+ # content; it is assumed that a single line of commands
# was entered, producing one or more lines of output.
# Given that the current ``\pycon`` command doesn't
# allow line breaks to be written to the .pytxcode, that
@@ -2521,14 +2550,16 @@ def python_console(jobname, encoding, outputdir, workingdir, fvextfile,
# Process for LaTeX
if pygmentize:
processed = highlight(console_content, lexer, formatter)
- if console_content.count('\n') < fvextfile:
- processed = sub(r'\\begin{Verbatim}\[(.+)\]',
+ # #### Need to add wrapping:
+ #processed = highlight('\n'.join([textwrap.fill(x) for x in console_content.splitlines(True)]), lexer, formatter)
+ if console_content.count('\n') < fvextfile:
+ processed = sub(r'\\begin{Verbatim}\[(.+)\]',
r'\\begin{{pytx@SaveVerbatim}}[\1]{{pytx@{0}}}'.format(key_typeset.replace('#', '@')),
processed, count=1)
processed = processed.rsplit('\\', 1)[0] + '\\end{pytx@SaveVerbatim}\n\n'
pygments_macros[key_typeset].append(processed)
else:
- processed = sub(r'\\begin{Verbatim}\[(.+)\]',
+ processed = sub(r'\\begin{Verbatim}\[(.+)\]',
r'\\begin{{pytx@Verbatim}}[\1]{{pytx@{0}}}'.format(key_typeset.replace('#', '@')),
processed, count=1)
processed = processed.rsplit('\\', 1)[0] + '\\end{pytx@Verbatim}\n\n'
@@ -2536,10 +2567,10 @@ def python_console(jobname, encoding, outputdir, workingdir, fvextfile,
f = open(os.path.expanduser(os.path.normcase(fname)), 'w', encoding=encoding)
f.write(processed)
f.close()
- pygments_files[key_typeset].append(fname)
+ pygments_files[key_typeset].append(fname)
else:
if console_content.count('\n') < fvextfile:
- processed = ('\\begin{{pytx@SaveVerbatim}}{{pytx@{0}}}\n'.format(key_typeset.replace('#', '@')) +
+ processed = ('\\begin{{pytx@SaveVerbatim}}{{pytx@{0}}}\n'.format(key_typeset.replace('#', '@')) +
console_content + '\\end{pytx@SaveVerbatim}\n\n')
macros.append(processed)
else:
@@ -2550,10 +2581,10 @@ def python_console(jobname, encoding, outputdir, workingdir, fvextfile,
f.write(processed)
f.close()
files.append(fname)
-
+
if len(messages) == 1:
messages = []
-
+
# Return a dict of dicts of results
return {'process': 'console',
'key': key_run,
@@ -2565,7 +2596,7 @@ def python_console(jobname, encoding, outputdir, workingdir, fvextfile,
'dependencies': dependencies,
'errors': errors,
'warnings': warnings,
- 'messages': messages}
+ 'messages': messages}
@@ -2586,25 +2617,25 @@ def main(python=None):
temp_data = {'errors': 0, 'warnings': 0, 'python': python}
old_data = dict()
-
+
# Process command-line options.
#
- # This gets the raw_jobname (actual job name), jobname (a sanitized job
+ # This gets the raw_jobname (actual job name), jobname (a sanitized job
# name, used for creating files named after the jobname), and any options.
process_argv(data, temp_data)
- # If there aren't errors in argv, and the program is going to run
- # (rather than just exit due to --version or --help command-line options),
- # print PythonTeX version. Flush to make the message go out immediately,
+ # If there aren't errors in argv, and the program is going to run
+ # (rather than just exit due to --version or --help command-line options),
+ # print PythonTeX version. Flush to make the message go out immediately,
# so that the user knows PythonTeX has started.
print('This is PythonTeX {0}'.format(__version__))
sys.stdout.flush()
- # Once we have the encoding (from argv), we set stdout and stderr to use
- # this encoding. Later, we will parse the saved stderr of scripts
- # executed via multiprocessing subprocesses, and print the parsed results
- # to stdout. The saved stderr uses the same encoding that was used
- # for the files that created it (this is important for code containing
+ # Once we have the encoding (from argv), we set stdout and stderr to use
+ # this encoding. Later, we will parse the saved stderr of scripts
+ # executed via multiprocessing subprocesses, and print the parsed results
+ # to stdout. The saved stderr uses the same encoding that was used
+ # for the files that created it (this is important for code containing
# unicode characters), so we also need stdout for the main PythonTeX
- # script to support this encoding. Setting stderr encoding is primarily
+ # script to support this encoding. Setting stderr encoding is primarily
# a matter of symmetry. Ideally, pythontex*.py will be bug-free,
# and stderr won't be needed!
if sys.version_info[0] == 2:
@@ -2617,7 +2648,7 @@ def main(python=None):
# Load the code and process the settings it passes from the TeX side.
#
- # This gets a list containing the code (the part of the code file
+ # This gets a list containing the code (the part of the code file
# containing the settings is removed) and the processed settings.
load_code_get_settings(data, temp_data)
# Now that the settings are loaded, check if outputdir exits.
@@ -2628,19 +2659,19 @@ def main(python=None):
# Load/create old_data
get_old_data(data, old_data, temp_data)
-
-
+
+
# Hash the code. Determine what needs to be executed. Determine whether
- # Pygments should be used. Update pygments_settings to account for
- # Pygments commands and environments (as opposed to PythonTeX commands
+ # Pygments should be used. Update pygments_settings to account for
+ # Pygments commands and environments (as opposed to PythonTeX commands
# and environments).
hash_all(data, temp_data, old_data, engine_dict)
-
-
+
+
# Parse the code and write scripts for execution.
parse_code_write_scripts(data, temp_data, engine_dict)
-
-
+
+
# Execute the code and perform Pygments highlighting via multiprocessing.
do_multiprocessing(data, temp_data, old_data, engine_dict)
@@ -2650,11 +2681,11 @@ def main(python=None):
if 'upgrade_message' in temp_data:
print(temp_data['upgrade_message'])
sys.exit()
-
+
# Print exit message
print('\n--------------------------------------------------')
- # If some rerun settings are used, there may be unresolved errors or
- # warnings; if so, print a summary of those along with the current
+ # If some rerun settings are used, there may be unresolved errors or
+ # warnings; if so, print a summary of those along with the current
# error and warning summary
unresolved_errors = 0
unresolved_warnings = 0
@@ -2669,13 +2700,13 @@ def main(python=None):
if unresolved_warnings != 0 or unresolved_errors != 0:
print('PythonTeX: {0}'.format(data['raw_jobname']))
print(' - Old: {0} error(s), {1} warnings(s)'.format(unresolved_errors, unresolved_warnings))
- print(' - Current: {0} error(s), {1} warnings(s)'.format(temp_data['errors'], temp_data['warnings']))
+ print(' - Current: {0} error(s), {1} warnings(s)'.format(temp_data['errors'], temp_data['warnings']))
else:
print('PythonTeX: {0} - {1} error(s), {2} warning(s)\n'.format(data['raw_jobname'], temp_data['errors'], temp_data['warnings']))
-
+
if 'upgrade_message' in temp_data:
print(temp_data['upgrade_message'])
-
+
# Exit with appropriate exit code based on user settings.
if temp_data['error_exit_code'] and temp_data['errors'] > 0:
sys.exit(1)
@@ -2684,8 +2715,8 @@ def main(python=None):
-# The "if" statement is needed for multiprocessing under Windows; see the
-# multiprocessing documentation. It is also needed in this case when the
+# The "if" statement is needed for multiprocessing under Windows; see the
+# multiprocessing documentation. It is also needed in this case when the
# script is invoked via the wrapper.
if __name__ == '__main__':
#// Python 2
diff --git a/Master/texmf-dist/scripts/pythontex/pythontex_engines.py b/Master/texmf-dist/scripts/pythontex/pythontex_engines.py
index 83ae0acd50e..6bcbd1690db 100755
--- a/Master/texmf-dist/scripts/pythontex/pythontex_engines.py
+++ b/Master/texmf-dist/scripts/pythontex/pythontex_engines.py
@@ -4,8 +4,8 @@ PythonTeX code engines.
Provides a class for managing the different languages/types of code
that may be executed. A class instance is created for each language/type of
-code. The class provides a method for assembling the scripts that are
-executed, combining user code with templates. It also creates the records
+code. The class provides a method for assembling the scripts that are
+executed, combining user code with templates. It also creates the records
needed to synchronize `stderr` with the document.
Each instance of the class is automatically added to the `engines_dict` upon
@@ -17,7 +17,7 @@ document (script for execution).
-Copyright (c) 2012-2014, Geoffrey M. Poore
+Copyright (c) 2012-2016, Geoffrey M. Poore
All rights reserved.
Licensed under the BSD 3-Clause License:
http://www.opensource.org/licenses/BSD-3-Clause
@@ -28,88 +28,109 @@ Licensed under the BSD 3-Clause License:
import os
import sys
import textwrap
+import re
from hashlib import sha1
from collections import OrderedDict, namedtuple
-interpreter_dict = {k:k for k in ('python', 'ruby', 'julia', 'octave')}
-# The {file} field needs to be replaced by itself, since the actual
+interpreter_dict = {k:k for k in ('python', 'ruby', 'julia', 'octave', 'bash', 'sage', 'rustc')}
+# The {file} field needs to be replaced by itself, since the actual
# substitution of the real file can only be done at runtime, whereas the
-# substitution for the interpreter should be done when the engine is
+# substitution for the interpreter should be done when the engine is
# initialized.
interpreter_dict['file'] = '{file}'
interpreter_dict['File'] = '{File}'
+interpreter_dict['workingdir'] = '{workingdir}'
engine_dict = {}
-CodeIndex = namedtuple('CodeIndex', ['input_file', 'command',
- 'line_int', 'lines_total',
+CodeIndex = namedtuple('CodeIndex', ['input_file', 'command',
+ 'line_int', 'lines_total',
'lines_user', 'lines_input',
'inline_count'])
class CodeEngine(object):
'''
- The base class that is used for defining language engines. Each command
+ The base class that is used for defining language engines. Each command
and environment family is based on an engine.
-
+
The class assembles the individual scripts that PythonTeX executes, using
- templates and user code. It also creates the records needed for
+ templates and user code. It also creates the records needed for
synchronizing `stderr` with the document.
'''
- def __init__(self, name, language, extension, command, template, wrapper,
- formatter, errors=None, warnings=None,
- linenumbers=None, lookbehind=False,
+ def __init__(self, name, language, extension, commands, template, wrapper,
+ formatter, sub=None, errors=None, warnings=None,
+ linenumbers=None, lookbehind=False,
console=False, startup=None, created=None):
# Save raw arguments so that they may be reused by subtypes
- self._rawargs = (name, language, extension, command, template, wrapper,
- formatter, errors, warnings,
+ self._rawargs = (name, language, extension, commands, template, wrapper,
+ formatter, sub, errors, warnings,
linenumbers, lookbehind, console, startup, created)
-
+
# Type check all strings, and make sure everything is Unicode
if sys.version_info[0] == 2:
- if (not isinstance(name, basestring) or
- not isinstance(language, basestring) or
- not isinstance(extension, basestring) or
- not isinstance(command, basestring) or
+ if (not isinstance(name, basestring) or
+ not isinstance(language, basestring) or
+ not isinstance(extension, basestring) or
not isinstance(template, basestring) or
not isinstance(wrapper, basestring) or
- not isinstance(formatter, basestring)):
+ not isinstance(formatter, basestring) or
+ not isinstance(sub, basestring)):
raise TypeError('CodeEngine needs string in initialization')
self.name = unicode(name)
self.language = unicode(language)
self.extension = unicode(extension)
- self.command = unicode(command)
self.template = unicode(template)
self.wrapper = unicode(wrapper)
self.formatter = unicode(formatter)
+ self.sub = unicode(sub)
else:
- if (not isinstance(name, str) or
- not isinstance(language, str) or
- not isinstance(extension, str) or
- not isinstance(command, str) or
+ if (not isinstance(name, str) or
+ not isinstance(language, str) or
+ not isinstance(extension, str) or
not isinstance(template, str) or
not isinstance(wrapper, str) or
- not isinstance(formatter, str)):
+ not isinstance(formatter, str) or
+ not isinstance(sub, str)):
raise TypeError('CodeEngine needs string in initialization')
self.name = name
self.language = language
self.extension = extension
- self.command = command
self.template = template
self.wrapper = wrapper
self.formatter = formatter
+ self.sub = sub
# Perform some additional formatting on some strings.
self.extension = self.extension.lstrip('.')
self.template = self._dedent(self.template)
self.wrapper = self._dedent(self.wrapper)
+ # Deal with commands
+ if sys.version_info.major == 2:
+ if isinstance(commands, basestring):
+ commands = [commands]
+ elif not isinstance(commands, list) and not isinstance(commands, tuple):
+ raise TypeError('CodeEngine needs "commands" to be a string, list, or tuple')
+ for c in commands:
+ if not isinstance(c, basestring):
+ raise TypeError('CodeEngine needs "commands" to contain strings')
+ commands = [unicode(c) for c in commands]
+ else:
+ if isinstance(commands, str):
+ commands = [commands]
+ elif not isinstance(commands, list) and not isinstance(commands, tuple):
+ raise TypeError('CodeEngine needs "commands" to be a string, list, or tuple')
+ for c in commands:
+ if not isinstance(c, str):
+ raise TypeError('CodeEngine needs "commands" to contain strings')
+ self.commands = commands
# Make sure formatter string ends with a newline
if not self.formatter.endswith('\n'):
self.formatter = self.formatter + '\n'
-
+
# Type check errors, warnings, and linenumbers
if errors is None:
errors = []
@@ -180,12 +201,12 @@ class CodeEngine(object):
if not isinstance(lookbehind, bool):
raise TypeError('CodeEngine needs "lookbehind" to be bool')
self.lookbehind = lookbehind
-
+
# Type check console
if not isinstance(console, bool):
raise TypeError('CodeEngine needs "console" to be bool')
self.console = console
-
+
# Type check startup
if startup is None:
startup = ''
@@ -203,7 +224,7 @@ class CodeEngine(object):
if not startup.endswith('\n'):
startup += '\n'
self.startup = self._dedent(startup)
-
+
# Type check created; make sure it is an iterable and contains Unicode
if created is None:
created = []
@@ -226,19 +247,24 @@ class CodeEngine(object):
if not isinstance(f, str):
raise TypeError('CodeEngine needs "created" to contain strings')
self.created = created
-
- # The base PythonTeX type does not support extend; it is used in
+
+ # The base PythonTeX type does not support extend; it is used in
# subtyping. But a dummy extend is needed to fill the extend field
# in templates, if it is provided.
self.extend = ''
-
+
# Create dummy variables for console
self.banner = ''
self.filename = ''
-
+
# Each type needs to add itself to a dict, for later access by name
self._register()
-
+
+ # Regex for working with `sub` commands and environments
+ # Generated if used
+ self.sub_field_re = None
+
+
def _dedent(self, s):
'''
Dedent and strip leading newlines
@@ -247,29 +273,29 @@ class CodeEngine(object):
while s.startswith('\n'):
s = s[1:]
return s
-
+
def _register(self):
'''
Add instance to a dict for later access by name
'''
engine_dict[self.name] = self
-
+
def customize(self, **kwargs):
'''
Customize the template on the fly.
-
- This provides customization based on command line arguments
+
+ This provides customization based on command line arguments
(`--interpreter`) and customization from the TeX side (imports from
- `__future__`). Ideally, this function should be restricted to this
- and similar cases. The custom code command and environment are
+ `__future__`). Ideally, this function should be restricted to this
+ and similar cases. The custom code command and environment are
insufficient for such cases, because the command is at a level above
- that of code and because of the requirement that imports from
+ that of code and because of the requirement that imports from
`__future__` be at the very beginning of a script.
'''
# Take care of `--interpreter`
# The `interpreter_dict` has entries that allow `{file}` and
# `{outputdir}` fields to be replaced with themselves
- self.command = self.command.format(**interpreter_dict)
+ self.commands = [c.format(**interpreter_dict) for c in self.commands]
# Take care of `__future__`
if self.language.startswith('python'):
if sys.version_info[0] == 2 and 'pyfuture' in kwargs:
@@ -317,7 +343,7 @@ class CodeEngine(object):
self.filename = kwargs['pyconfilename']
_hash = None
-
+
def get_hash(self):
'''
Return a hash of all vital type information (template, etc.). Create
@@ -328,7 +354,8 @@ class CodeEngine(object):
# the user, since a unique hash is all that's needed.
if self._hash is None:
hasher = sha1()
- hasher.update(self.command.encode('utf8'))
+ for c in self.commands:
+ hasher.update(c.encode('utf8'))
hasher.update(self.template.encode('utf8'))
hasher.update(self.wrapper.encode('utf8'))
hasher.update(self.formatter.encode('utf8'))
@@ -338,13 +365,13 @@ class CodeEngine(object):
hasher.update(self.filename.encode('utf8'))
self._hash = hasher.hexdigest()
return self._hash
-
+
def _process_future(self, code_list):
'''
- Go through a given list of code and extract all imports from
- `__future__`, so that they can be relocated to the beginning of the
+ Go through a given list of code and extract all imports from
+ `__future__`, so that they can be relocated to the beginning of the
script.
-
+
The approach isn't foolproof and doesn't support compound statements.
'''
done = False
@@ -355,8 +382,8 @@ class CodeEngine(object):
code = c.code.split('\n')
for l, line in enumerate(code):
# Detect __future__ imports
- if (line.startswith('from __future__') or
- line.startswith('import __future__') and
+ if (line.startswith('from __future__') or
+ line.startswith('import __future__') and
not in_triplequote):
changed = True
if ';' in line:
@@ -365,15 +392,15 @@ class CodeEngine(object):
future_imports.append(line)
code[l] = ''
# Ignore comments, empty lines, and lines with complete docstrings
- elif (line.startswith('\n') or line.startswith('#') or
+ elif (line.startswith('\n') or line.startswith('#') or
line.isspace() or
- ('"""' in line and line.count('"""')%2 == 0) or
+ ('"""' in line and line.count('"""')%2 == 0) or
("'''" in line and line.count("'''")%2 == 0)):
pass
# Detect if entering or leaving a docstring
elif line.count('"""')%2 == 1 or line.count("'''")%2 == 1:
in_triplequote = not in_triplequote
- # Stop looking for future imports as soon as a non-comment,
+ # Stop looking for future imports as soon as a non-comment,
# non-empty, non-docstring, non-future import line is found
elif not in_triplequote:
done = True
@@ -386,7 +413,7 @@ class CodeEngine(object):
return '\n'.join(future_imports)
else:
return ''
-
+
def _get_future(self, cc_list_begin, code_list):
'''
Process custom code and user code for imports from `__future__`
@@ -397,31 +424,31 @@ class CodeEngine(object):
return cc_future + '\n' + code_future
else:
return cc_future + code_future
-
- def get_script(self, encoding, utilspath, outputdir, workingdir,
+
+ def get_script(self, encoding, utilspath, outputdir, workingdir,
cc_list_begin, code_list, cc_list_end, debug, interactive):
'''
Assemble the script that will be executed. In the process, assemble
an index of line numbers that may be used to correlate script line
- numbers with document line numbers and user code line numbers in the
+ numbers with document line numbers and user code line numbers in the
event of errors or warnings.
'''
lines_total = 0
script = []
code_index = OrderedDict()
-
+
# Take care of future
if self.language.startswith('python'):
future = self._get_future(cc_list_begin, code_list)
else:
future = ''
-
+
# Split template into beginning and ending segments
try:
script_begin, script_end = self.template.split('{body}')
except:
raise ValueError('Template for ' + self.name + ' is missing {body}')
-
+
# Add beginning to script
if os.path.isabs(os.path.expanduser(os.path.normcase(workingdir))):
workingdir_full = workingdir
@@ -429,12 +456,12 @@ class CodeEngine(object):
workingdir_full = os.path.join(os.getcwd(), workingdir).replace('\\', '/')
# Correct workingdir if in debug or interactive mode, so that it's
# relative to the script path
- # #### May refactor this once debugging functionality is more
+ # #### May refactor this once debugging functionality is more
# fully implemented
if debug is not None or interactive is not None:
if not os.path.isabs(os.path.expanduser(os.path.normcase(workingdir))):
workingdir = os.path.relpath(workingdir, outputdir)
- script_begin = script_begin.format(encoding=encoding, future=future,
+ script_begin = script_begin.format(encoding=encoding, future=future,
utilspath=utilspath,
workingdir=os.path.expanduser(os.path.normcase(workingdir)),
Workingdir=workingdir_full,
@@ -446,7 +473,7 @@ class CodeEngine(object):
created_delim='=>PYTHONTEX:CREATED#')
script.append(script_begin)
lines_total += script_begin.count('\n')
-
+
# Prep wrapper
try:
wrapper_begin, wrapper_end = self.wrapper.split('{code}')
@@ -457,7 +484,7 @@ class CodeEngine(object):
# (and perhaps others) will use the line number from the NEXT
# line of code that is non-empty, not from the line of code where
# the error started. In these cases, it's important
- # to make sure that the line number is triggered immediately
+ # to make sure that the line number is triggered immediately
# after user code, so that the line number makes sense. Hence,
# we need to strip all whitespace from the part of the wrapper
# that follows user code. For symetry, we do the same for both
@@ -469,9 +496,9 @@ class CodeEngine(object):
wrapper_begin = wrapper_begin.replace('{stdoutdelim}', stdoutdelim).replace('{stderrdelim}', stderrdelim)
wrapper_begin_offset = wrapper_begin.count('\n')
wrapper_end_offset = wrapper_end.count('\n')
-
+
# Take care of custom code
- # Line counters must be reset for cc begin, code, and cc end, since
+ # Line counters must be reset for cc begin, code, and cc end, since
# all three are separate
lines_user = 0
inline_count = 0
@@ -483,7 +510,7 @@ class CodeEngine(object):
args=c.args_run,
instance=c.instance,
line=c.line))
-
+
# Actual code
lines_input = c.code.count('\n')
code_index[c.instance] = CodeIndex(c.input_file, c.command, c.line_int, lines_total, lines_user, lines_input, inline_count)
@@ -492,11 +519,11 @@ class CodeEngine(object):
inline_count += 1
lines_total += lines_input
lines_user += lines_input
-
+
# Wrapper after
script.append(wrapper_end)
lines_total += wrapper_end_offset
-
+
# Take care of user code
lines_user = 0
inline_count = 0
@@ -508,22 +535,33 @@ class CodeEngine(object):
args=c.args_run,
instance=c.instance,
line=c.line))
-
+
# Actual code
- lines_input = c.code.count('\n')
- code_index[c.instance] = CodeIndex(c.input_file, c.command, c.line_int, lines_total, lines_user, lines_input, inline_count)
- if c.command == 'i':
- script.append(self.formatter.format(code=c.code.rstrip('\n')))
- inline_count += 1
+ if c.command in ('s', 'sub'):
+ field_list = self.process_sub(c)
+ code = ''.join(self.sub.format(field_delim='=>PYTHONTEX:FIELD_DELIM#', field=field) for field in field_list)
+ lines_input = code.count('\n')
+ code_index[c.instance] = CodeIndex(c.input_file, c.command, c.line_int, lines_total, lines_user, lines_input, inline_count)
+ script.append(code)
+ # #### The traceback system will need to be redone to give
+ # better line numbers
+ lines_total += lines_input
+ lines_user += lines_input
else:
- script.append(c.code)
- lines_total += lines_input
- lines_user += lines_input
-
+ lines_input = c.code.count('\n')
+ code_index[c.instance] = CodeIndex(c.input_file, c.command, c.line_int, lines_total, lines_user, lines_input, inline_count)
+ if c.command == 'i':
+ script.append(self.formatter.format(code=c.code.rstrip('\n')))
+ inline_count += 1
+ else:
+ script.append(c.code)
+ lines_total += lines_input
+ lines_user += lines_input
+
# Wrapper after
script.append(wrapper_end)
lines_total += wrapper_end_offset
-
+
# Take care of custom code
lines_user = 0
inline_count = 0
@@ -535,7 +573,7 @@ class CodeEngine(object):
args=c.args_run,
instance=c.instance,
line=c.line))
-
+
# Actual code
lines_input = c.code.count('\n')
code_index[c.instance] = CodeIndex(c.input_file, c.command, c.line_int, lines_total, lines_user, lines_input, inline_count)
@@ -544,32 +582,105 @@ class CodeEngine(object):
inline_count += 1
lines_total += lines_input
lines_user += lines_input
-
+
# Wrapper after
script.append(wrapper_end)
lines_total += wrapper_end_offset
-
+
# Finish script
script.append(script_end.format(dependencies_delim='=>PYTHONTEX:DEPENDENCIES#', created_delim='=>PYTHONTEX:CREATED#'))
-
+
return script, code_index
-
+ def process_sub(self, pytxcode):
+ '''
+ Take the code part of a `sub` command or environment, which is
+ essentially an interpolation string, and extract the replacement
+ fields. Process the replacement fields into a form suitable for
+ execution and process the string into a template into which the output
+ may be substituted.
+ '''
+ start = '!'
+ open_delim = '{'
+ close_delim = '}'
+ if self.sub_field_re is None:
+ field_pattern_list = []
+
+ # {s}: start, {o}: open_delim, {c}: close_delim
+ field_content_1_recursive = r'(?:[^{o}{c}\n]*|{o}R{c})+'
+ field_content_1_final_inner = r'[^{o}{c}\n]*'
+ field_1 = '{s}{o}(?!{o})' + field_content_1_recursive + '(?<!{c}){c}'
+ for n in range(5): # Want to allow 5 levels inside
+ field_1 = field_1.replace('R', field_content_1_recursive)
+ field_1 = field_1.replace('R', field_content_1_final_inner)
+ field_1 = field_1.format(s=re.escape(start), o=re.escape(open_delim), c=re.escape(close_delim))
+ field_pattern_list.append(field_1)
+
+ for n in range(2, 6+1): # Want to allow 5 levels inside
+ field_n = '{s}' + '{o}'*n + '(?!{o})F(?<!{c})' + '{c}'*n
+ field_n = field_n.replace('F', '(?:[^{o}{c}\n]*|{o}{{1,{n_minus}}}(?!{o})|{c}{{1,{n_minus}}}(?!{c}))+')
+ field_n = field_n.format(s=re.escape(start), o=re.escape(open_delim), c=re.escape(close_delim), n_minus=n-1)
+ field_pattern_list.append(field_n)
+
+ field = '|'.join(field_pattern_list)
+
+ escaped_start = '(?<!{s})(?:{s}{s})+(?={s}{o}|{o})'.format(s=re.escape(start), o=re.escape(open_delim))
+
+ pattern = '''
+ (?P<escaped>{es}) |
+ (?P<field>{f}) |
+ (?P<invalid>{so}) |
+ (?P<text_literal_start>{s}+) |
+ (?P<text_general>[^{s}]+)
+ '''.format(es=escaped_start, f=field, so=re.escape(start + open_delim), s=re.escape(start))
+ self.sub_field_re = re.compile(pattern, re.VERBOSE)
+
+ template_list = []
+ field_list = []
+ field_number = 0
+ for m in self.sub_field_re.finditer(pytxcode.code):
+ if m.lastgroup == 'escaped':
+ template_list.append(m.group().replace(start+start, start))
+ elif m.lastgroup == 'field':
+ template_list.append('{{{0}}}'.format(field_number))
+ field_list.append(m.group()[1:].lstrip(open_delim).rstrip(close_delim).strip())
+ field_number += 1
+ elif m.lastgroup.startswith('text'):
+ template_list.append(m.group().replace('{', '{{').replace('}', '}}'))
+ else:
+ msg = '''\
+ * PythonTeX error:
+ Invalid "sub" command or environment. Invalid replacement fields.
+ {0}on or after line {1}
+ '''.format(pytxcode.input_file + ': ' if pytxcode.input_file else '', pytxcode.line)
+ msg = textwrap.dedent(msg)
+ sys.exit(msg)
+
+ pytxcode.sub_template = ''.join(template_list)
+
+ return field_list
+
+
+
+
+
+
+
class SubCodeEngine(CodeEngine):
'''
Create Engine instances that inherit from existing instances.
'''
- def __init__(self, base, name, language=None, extension=None, command=None,
- template=None, wrapper=None, formatter=None, errors=None,
- warnings=None, linenumbers=None, lookbehind=False,
+ def __init__(self, base, name, language=None, extension=None, commands=None,
+ template=None, wrapper=None, formatter=None, sub=None,
+ errors=None, warnings=None, linenumbers=None, lookbehind=False,
console=None, created=None, startup=None, extend=None):
-
- self._rawargs = (name, language, extension, command, template, wrapper,
- formatter, errors, warnings,
+
+ self._rawargs = (name, language, extension, commands, template, wrapper,
+ formatter, sub, errors, warnings,
linenumbers, lookbehind, console, startup, created)
-
+
base_rawargs = engine_dict[base]._rawargs
args = []
for n, arg in enumerate(self._rawargs):
@@ -577,11 +688,11 @@ class SubCodeEngine(CodeEngine):
args.append(base_rawargs[n])
else:
args.append(arg)
-
+
CodeEngine.__init__(self, *args)
-
+
self.extend = engine_dict[base].extend
-
+
if extend is not None:
if sys.version_info[0] == 2:
if not isinstance(extend, basestring):
@@ -601,15 +712,15 @@ class PythonConsoleEngine(CodeEngine):
'''
This uses the Engine class to store information needed for emulating
Python interactive consoles.
-
- In the current form, it isn't used as a real engine, but rather as a
- convenient storage class that keeps the treatment of all languages/code
+
+ In the current form, it isn't used as a real engine, but rather as a
+ convenient storage class that keeps the treatment of all languages/code
types uniform.
'''
def __init__(self, name, startup=None):
- CodeEngine.__init__(self, name=name, language='python',
- extension='', command='', template='',
- wrapper='', formatter='', errors=None,
+ CodeEngine.__init__(self, name=name, language='python',
+ extension='', commands='', template='',
+ wrapper='', formatter='', sub='', errors=None,
warnings=None, linenumbers=None, lookbehind=False,
console=True, startup=startup, created=None)
@@ -618,13 +729,13 @@ class PythonConsoleEngine(CodeEngine):
python_template = '''
# -*- coding: {encoding} -*-
-
+
{future}
import os
import sys
import codecs
-
+
if '--interactive' not in sys.argv[1:]:
if sys.version_info[0] == 2:
sys.stdout = codecs.getwriter('{encoding}')(sys.stdout, 'strict')
@@ -632,12 +743,12 @@ python_template = '''
else:
sys.stdout = codecs.getwriter('{encoding}')(sys.stdout.buffer, 'strict')
sys.stderr = codecs.getwriter('{encoding}')(sys.stderr.buffer, 'strict')
-
+
if '{utilspath}' and '{utilspath}' not in sys.path:
- sys.path.append('{utilspath}')
+ sys.path.append('{utilspath}')
from pythontex_utils import PythonTeXUtils
pytex = PythonTeXUtils()
-
+
pytex.docdir = os.getcwd()
if os.path.isdir('{workingdir}'):
os.chdir('{workingdir}')
@@ -648,14 +759,14 @@ python_template = '''
sys.exit('Cannot find directory {workingdir}')
if pytex.docdir not in sys.path:
sys.path.append(pytex.docdir)
-
+
{extend}
-
+
pytex.id = '{family}_{session}_{restart}'
pytex.family = '{family}'
pytex.session = '{session}'
pytex.restart = '{restart}'
-
+
{body}
pytex.cleanup()
@@ -667,25 +778,34 @@ python_wrapper = '''
pytex.args = '{args}'
pytex.instance = '{instance}'
pytex.line = '{line}'
-
+
print('{stdoutdelim}')
sys.stderr.write('{stderrdelim}\\n')
pytex.before()
-
+
{code}
-
+
pytex.after()
'''
+python_sub = '''print('{field_delim}')\nprint({field})\n'''
+
CodeEngine('python', 'python', '.py', '{python} {file}.py',
- python_template, python_wrapper, 'print(pytex.formatter({code}))',
- 'Error:', 'Warning:', ['line {number}', ':{number}:'])
+ python_template, python_wrapper, 'print(pytex.formatter({code}))',
+ python_sub, 'Error:', 'Warning:', ['line {number}', ':{number}:'])
SubCodeEngine('python', 'py')
SubCodeEngine('python', 'pylab', extend='from pylab import *')
+
+SubCodeEngine('python', 'sage', language='sage', extension='.sage',
+ template=python_template.replace('{future}', ''),
+ extend = 'pytex.formatter = latex',
+ commands='{sage} {file}.sage')
+
+
sympy_extend = '''
from sympy import *
pytex.set_formatter('sympy_latex')
@@ -707,15 +827,15 @@ PythonConsoleEngine('sympycon', startup='from sympy import *')
ruby_template = '''
# -*- coding: {encoding} -*-
-
+
unless ARGV.include?('--interactive')
$stdout.set_encoding('{encoding}')
$stderr.set_encoding('{encoding}')
end
-
+
class RubyTeXUtils
- attr_accessor :id, :family, :session, :restart,
- :command, :context, :args,
+ attr_accessor :id, :family, :session, :restart,
+ :command, :context, :args,
:instance, :line, :dependencies, :created,
:docdir, :_context_raw
def initialize
@@ -770,11 +890,11 @@ ruby_template = '''
if @created
@created.each {{ |x| puts x }}
end
- end
+ end
end
-
+
rbtex = RubyTeXUtils.new
-
+
rbtex.docdir = Dir.pwd
if File.directory?('{workingdir}')
Dir.chdir('{workingdir}')
@@ -783,14 +903,14 @@ ruby_template = '''
abort('Cannot change to directory {workingdir}')
end
$LOAD_PATH.push(rbtex.docdir) unless $LOAD_PATH.include?(rbtex.docdir)
-
+
{extend}
-
+
rbtex.id = '{family}_{session}_{restart}'
rbtex.family = '{family}'
rbtex.session = '{session}'
rbtex.restart = '{restart}'
-
+
{body}
rbtex.cleanup
@@ -802,18 +922,21 @@ ruby_wrapper = '''
rbtex.args = '{args}'
rbtex.instance = '{instance}'
rbtex.line = '{line}'
-
+
puts '{stdoutdelim}'
$stderr.puts '{stderrdelim}'
rbtex.before
-
+
{code}
-
+
rbtex.after
'''
-CodeEngine('ruby', 'ruby', '.rb', '{ruby} {file}.rb', ruby_template,
- ruby_wrapper, 'puts rbtex.formatter({code})',
+ruby_sub = '''puts '{field_delim}'\nputs {field}\n'''
+
+
+CodeEngine('ruby', 'ruby', '.rb', '{ruby} {file}.rb', ruby_template,
+ ruby_wrapper, 'puts rbtex.formatter({code})', ruby_sub,
['Error)', '(Errno', 'error'], 'warning:', ':{number}:')
SubCodeEngine('ruby', 'rb')
@@ -823,26 +946,26 @@ SubCodeEngine('ruby', 'rb')
julia_template = '''
# -*- coding: UTF-8 -*-
-
+
# Currently, Julia only supports UTF-8
# So can't set stdout and stderr encoding
-
+
type JuliaTeXUtils
- id::String
- family::String
- session::String
- restart::String
- command::String
+ id::AbstractString
+ family::AbstractString
+ session::AbstractString
+ restart::AbstractString
+ command::AbstractString
context::Dict
- args::String
- instance::String
- line::String
-
- _dependencies::Array{{String}}
- _created::Array{{String}}
- docdir::String
- _context_raw::String
-
+ args::AbstractString
+ instance::AbstractString
+ line::AbstractString
+
+ _dependencies::Array{{AbstractString}}
+ _created::Array{{AbstractString}}
+ docdir::AbstractString
+ _context_raw::AbstractString
+
formatter::Function
before::Function
after::Function
@@ -854,26 +977,26 @@ julia_template = '''
pt_to_mm::Function
pt_to_bp::Function
cleanup::Function
-
+
self::JuliaTeXUtils
-
+
function JuliaTeXUtils()
self = new()
self.self = self
- self._dependencies = Array(String, 0)
- self._created = Array(String, 0)
+ self._dependencies = Array(AbstractString, 0)
+ self._created = Array(AbstractString, 0)
self._context_raw = ""
-
+
function formatter(expr)
string(expr)
end
self.formatter = formatter
-
+
function null()
end
self.before = null
self.after = null
-
+
function add_dependencies(files...)
for file in files
push!(self._dependencies, file)
@@ -886,17 +1009,17 @@ julia_template = '''
end
end
self.add_created = add_created
-
+
function set_context(expr)
if expr != "" && expr != self._context_raw
- self.context = {{strip(x[1]) => strip(x[2]) for x in map(x -> split(x, "="), split(expr, ","))}}
+ self.context = Dict{{Any, Any}}([ strip(x[1]) => strip(x[2]) for x in map(x -> split(x, "="), split(expr, ",")) ])
self._context_raw = expr
end
end
self.set_context = set_context
-
+
function pt_to_in(expr)
- if isa(expr, String)
+ if isa(expr, AbstractString)
if sizeof(expr) > 2 && expr[end-1:end] == "pt"
expr = expr[1:end-2]
end
@@ -906,22 +1029,22 @@ julia_template = '''
end
end
self.pt_to_in = pt_to_in
-
+
function pt_to_cm(expr)
return self.pt_to_in(expr)*2.54
end
self.pt_to_cm = pt_to_cm
-
+
function pt_to_mm(expr)
return self.pt_to_in(expr)*25.4
end
self.pt_to_mm = pt_to_mm
-
+
function pt_to_bp(expr)
return self.pt_to_in(expr)*72
end
self.pt_to_bp = pt_to_bp
-
+
function cleanup()
println("{dependencies_delim}")
for f in self._dependencies
@@ -933,13 +1056,13 @@ julia_template = '''
end
end
self.cleanup = cleanup
-
+
return self
end
end
-
+
jltex = JuliaTeXUtils()
-
+
jltex.docdir = pwd()
try
cd("{workingdir}")
@@ -950,17 +1073,17 @@ julia_template = '''
end
if !(in(jltex.docdir, LOAD_PATH))
push!(LOAD_PATH, jltex.docdir)
- end
-
+ end
+
{extend}
-
+
jltex.id = "{family}_{session}_{restart}"
jltex.family = "{family}"
jltex.session = "{session}"
jltex.restart = "{restart}"
-
+
{body}
-
+
jltex.cleanup()
'''
@@ -968,29 +1091,34 @@ julia_wrapper = '''
jltex.command = "{command}"
jltex.set_context("{context}")
jltex.args = "{args}"
- jltex.instance = "{instance}"
+ jltex.instance = "{instance}"
jltex.line = "{line}"
-
+
println("{stdoutdelim}")
write(STDERR, "{stderrdelim}\\n")
- jltex.before()
-
+ jltex.before()
+
{code}
-
+
jltex.after()
'''
-CodeEngine('julia', 'julia', '.jl', '{julia} "{file}.jl"', julia_template,
- julia_wrapper, 'println(jltex.formatter({code}))',
+julia_sub = '''println("{field_delim}")\nprintln({field})\n'''
+
+
+CodeEngine('julia', 'julia', '.jl', '{julia} "{file}.jl"', julia_template,
+ julia_wrapper, 'println(jltex.formatter({code}))', julia_sub,
'ERROR:', 'WARNING:', ':{number}', True)
SubCodeEngine('julia', 'jl')
+
+
octave_template = '''
# Octave only supports @CLASS, not classdef
# So use a struct plus functions as a substitute for a utilities class
-
+
global octavetex = struct();
octavetex.docdir = pwd();
try
@@ -1002,30 +1130,30 @@ octave_template = '''
error("Could not find directory {workingdir}");
end
end
- if find_dir_in_path(octavetex.docdir)
+ if dir_in_loadpath(octavetex.docdir)
else
addpath(octavetex.docdir);
end
-
+
{extend}
-
+
octavetex.dependencies = {{}};
octavetex.created = {{}};
octavetex._context_raw = '';
-
+
function octavetex_formatter(argin)
disp(argin);
end
octavetex.formatter = @(argin) octavetex_formatter(argin);
-
+
function octavetex_before()
end
octavetex.before = @() octavetex_before();
-
+
function octavetex_after()
end
octavetex.after = @() octavetex_after();
-
+
function octavetex_add_dependencies(varargin)
global octavetex;
for i = 1:length(varargin)
@@ -1033,7 +1161,7 @@ octave_template = '''
end
end
octavetex.add_dependencies = @(varargin) octavetex_add_dependencies(varargin{{:}});
-
+
function octavetex_add_created(varargin)
global octavetex;
for i = 1:length(varargin)
@@ -1041,7 +1169,7 @@ octave_template = '''
end
end
octavetex.add_created = @(varargin) octavetex_add_created(varargin{{:}});
-
+
function octavetex_set_context(argin)
global octavetex;
if ~strcmp(argin, octavetex._context_raw)
@@ -1058,7 +1186,7 @@ octave_template = '''
end
end
octavetex.set_context = @(argin) octavetex_set_context(argin);
-
+
function out = octavetex_pt_to_in(argin)
if ischar(argin)
if length(argin) > 2 && argin(end-1:end) == 'pt'
@@ -1071,22 +1199,22 @@ octave_template = '''
end
end
octavetex.pt_to_in = @(argin) octavetex_pt_to_in(argin);
-
+
function out = octavetex_pt_to_cm(argin)
out = octavetex_pt_to_in(argin)*2.54;
end
octavetex.pt_to_cm = @(argin) octavetex_pt_to_cm(argin);
-
+
function out = octavetex_pt_to_mm(argin)
out = octavetex_pt_to_in(argin)*25.4;
end
octavetex.pt_to_mm = @(argin) octavetex_pt_to_mm(argin);
-
+
function out = octavetex_pt_to_bp(argin)
out = octavetex_pt_to_in(argin)*72;
end
octavetex.pt_to_bp = @(argin) octavetex_pt_to_bp(argin);
-
+
function octavetex_cleanup()
global octavetex;
fprintf(strcat('{dependencies_delim}', "\\n"));
@@ -1096,18 +1224,18 @@ octave_template = '''
fprintf(strcat('{created_delim}', "\\n"));
for i = 1:length(octavetex.created)
fprintf(strcat(octavetex.created{{i}}, "\\n"));
- end
+ end
end
octavetex.cleanup = @() octavetex_cleanup();
-
+
octavetex.id = '{family}_{session}_{restart}';
octavetex.family = '{family}';
octavetex.session = '{session}';
octavetex.restart = '{restart}';
-
+
{body}
- octavetex.cleanup()
+ octavetex.cleanup()
'''
octave_wrapper = '''
@@ -1116,17 +1244,187 @@ octave_wrapper = '''
octavetex.args = '{args}';
octavetex.instance = '{instance}';
octavetex.line = '{line}';
-
- octavetex.before()
-
+
+ octavetex.before()
+
fprintf(strcat('{stdoutdelim}', "\\n"));
fprintf(stderr, strcat('{stderrdelim}', "\\n"));
{code}
-
+
octavetex.after()
'''
+octave_sub = '''disp("{field_delim}")\ndisp({field})\n'''
+
CodeEngine('octave', 'octave', '.m',
- '{octave} -q "{File}.m"',
- octave_template, octave_wrapper, 'disp({code})',
+ '{octave} -q "{File}.m"',
+ octave_template, octave_wrapper, 'disp({code})', octave_sub,
'error', 'warning', 'line {number}')
+
+bash_template = '''
+ cd "{workingdir}"
+ {body}
+ echo "{dependencies_delim}"
+ echo "{created_delim}"
+ '''
+
+bash_wrapper = '''
+ echo "{stdoutdelim}"
+ >&2 echo "{stderrdelim}"
+ {code}
+ '''
+
+bash_sub = '''echo "{field_delim}"\necho {field}\n'''
+
+CodeEngine('bash', 'bash', '.sh',
+ '{bash} "{file}.sh"',
+ bash_template, bash_wrapper, '{code}', bash_sub,
+ ['error', 'Error'], ['warning', 'Warning'],
+ 'line {number}')
+
+
+rust_template = '''
+ // -*- coding: utf-8 -*-
+ #![allow(dead_code)]
+ #![allow(unused_imports)]
+
+
+ mod rust_tex_utils {{
+ use std::fmt;
+ use std::collections;
+ use std::io::prelude::*;
+
+ pub struct RustTeXUtils {{
+ _formatter: Box<FnMut(&fmt::Display) -> String>,
+ _before: Box<FnMut()>,
+ _after: Box<FnMut()>,
+ pub family: &'static str,
+ pub session: &'static str,
+ pub restart: &'static str,
+ pub dependencies: Vec<String>,
+ pub created: Vec<String>,
+ pub command: &'static str,
+ pub context: collections::HashMap<&'static str, &'static str>,
+ pub args: collections::HashMap<&'static str, &'static str>,
+ pub instance: &'static str,
+ pub line: &'static str,
+ }}
+
+ impl RustTeXUtils {{
+ pub fn new() -> Self {{
+ RustTeXUtils {{
+ _formatter: Box::new(|x: &fmt::Display| format!("{{}}", x)),
+ _before: Box::new(|| {{}}),
+ _after: Box::new(|| {{}}),
+ family: "{family}",
+ session: "{session}",
+ restart: "{restart}",
+ dependencies: Vec::new(),
+ created: Vec::new(),
+ command: "",
+ context: collections::HashMap::new(),
+ args: collections::HashMap::new(),
+ instance: "",
+ line: "",
+ }}
+ }}
+
+
+ pub fn formatter<A: fmt::Display>(&mut self, x: A) -> String {{
+ (*self._formatter)(&x)
+ }}
+ pub fn set_formatter<F: FnMut(&fmt::Display) -> String + 'static>(&mut self, f: F) {{
+ self._formatter = Box::new(f);
+ }}
+
+ pub fn before(&mut self) {{
+ (*self._before)();
+ }}
+ pub fn set_before<F: FnMut() + 'static>(&mut self, f: F) {{
+ self._before = Box::new(f);
+ }}
+
+ pub fn after(&mut self) {{
+ (*self._after)();
+ }}
+ pub fn set_after<F: FnMut() + 'static>(&mut self, f: F) {{
+ self._after = Box::new(f);
+ }}
+
+ pub fn add_dependencies<SS: IntoIterator>(&mut self, deps: SS) where SS::Item: Into<String> {{
+ self.dependencies.append(&mut deps.into_iter().map(|x| x.into()).collect());
+ }}
+
+ pub fn add_created<SS: IntoIterator>(&mut self, crts: SS) where SS::Item: Into<String> {{
+ self.created.append(&mut crts.into_iter().map(|x| x.into()).collect());
+ }}
+
+ pub fn cleanup(self) {{
+ println!("{{}}", "{dependencies_delim}");
+ for x in self.dependencies {{
+ println!("{{}}", x);
+ }}
+ println!("{{}}", "{created_delim}");
+ for x in self.created {{
+ println!("{{}}", x);
+ }}
+ }}
+
+ pub fn setup_wrapper(&mut self, cmd: &'static str, cxt: &'static str, ags: &'static str, ist: &'static str, lne: &'static str) {{
+ fn parse_map(kvs: &'static str) -> collections::HashMap<&'static str, &'static str> {{
+ kvs.split(',').filter(|s| !s.is_empty()).map(|kv| {{
+ let (k, v) = kv.split_at(kv.find('=').expect(&format!("Error parsing supposed key-value pair ({{}})", kv)));
+ (k.trim(), v[1..].trim())
+ }}).collect()
+ }}
+ self.command = cmd;
+ self.context = parse_map(cxt);
+ self.args = parse_map(ags);
+ self.instance = ist;
+ self.line = lne;
+ }}
+ }}
+ }}
+
+
+ use std::{{io, fmt, env, path, ffi, collections}};
+ use std::io::prelude::*;
+
+
+ #[allow(unused_mut)]
+ fn main() {{
+ let mut rstex = rust_tex_utils::RustTeXUtils::new();
+ if env::set_current_dir(ffi::OsString::from("{workingdir}".to_string())).is_err() && env::args().all(|x| x != "--manual") {{
+ panic!("Could not change to the specified working directory ({workingdir})");
+ }}
+
+ {extend}
+
+ {body}
+
+ rstex.cleanup();
+ }}
+ '''
+
+rust_wrapper = '''
+ rstex.setup_wrapper("{command}", "{context}", "{args}", "{instance}", "{line}");
+ println!("{stdoutdelim}");
+ writeln!(io::stderr(), "{stderrdelim}").unwrap();
+ rstex.before();
+
+ {code}
+
+ rstex.after();
+ '''
+
+rust_sub = '''println!("{field_delim}");\nprintln!("{{}}", {field});\n'''
+
+CodeEngine('rust', 'rust', '.rs',
+ # The full script name has to be used in order to make Windows and Unix behave nicely
+ # together when naming executables. Despite appearances, using `.exe` works on Unix too.
+ ['{rustc} --crate-type bin -o {File}.exe -L {workingdir} {file}.rs', '{File}.exe'],
+ rust_template, rust_wrapper, 'println!("{{}}", rstex.formatter({code}));', rust_sub,
+ errors='error:', warnings='warning:', linenumbers='.rs:{number}',
+ created='{File}.exe')
+
+SubCodeEngine('rust', 'rs')