summaryrefslogtreecommitdiff
path: root/Master/texmf-dist/source/latex/l3experimental/l3str/l3tl-analysis.dtx
diff options
context:
space:
mode:
Diffstat (limited to 'Master/texmf-dist/source/latex/l3experimental/l3str/l3tl-analysis.dtx')
-rw-r--r--Master/texmf-dist/source/latex/l3experimental/l3str/l3tl-analysis.dtx612
1 files changed, 396 insertions, 216 deletions
diff --git a/Master/texmf-dist/source/latex/l3experimental/l3str/l3tl-analysis.dtx b/Master/texmf-dist/source/latex/l3experimental/l3str/l3tl-analysis.dtx
index c3310545ac0..ca9ac5399b0 100644
--- a/Master/texmf-dist/source/latex/l3experimental/l3str/l3tl-analysis.dtx
+++ b/Master/texmf-dist/source/latex/l3experimental/l3str/l3tl-analysis.dtx
@@ -1,6 +1,6 @@
% \iffalse meta-comment
%
-%% File: l3tl-analysis.dtx Copyright (C) 2011 The LaTeX3 Project
+%% File: l3tl-analysis.dtx Copyright (C) 2011-2012 The LaTeX3 Project
%%
%% It may be distributed and/or modified under the conditions of the
%% LaTeX Project Public License (LPPL), either version 1.3c of this
@@ -88,22 +88,83 @@
%
% \subsection{Internal}
%
+% \begin{variable}{\s_tl}
+% The format used to store token lists internally uses
+% the scan mark \cs{s_tl} as a delimiter.
+% \end{variable}
+%
+% The task of the \pkg{l3tl-analysis} module is to convert
+% token lists to an internal format which allows us to extract
+% all the relevant information about individual tokens
+% (category code, character code), as well as reconstruct
+% the token list quickly. This internal format is used in
+% \pkg{l3regex} where we need to support arbitrary tokens,
+% and it is used in conversion functions in \pkg{l3str},
+% where we wish to support clusters of characters instead
+% of single characters.
+%
+% The internal format thus takes the form of a succession
+% of items of the form
+% \begin{quote}
+% \meta{tokens} \cs{s_tl} \meta{catcode} \meta{char code} \cs{s_tl}
+% \end{quote}
+% Here, the \meta{tokens} \texttt{o}- and \texttt{x}-expand
+% to the original token in the token list (even special
+% category codes) or to the cluster of tokens corresponding
+% to one Unicode character in the given encoding.
+% The \meta{catcode} is given as a single hexadecimal digit,
+% $0$ for control sequences. The \meta{char code} is given
+% as a decimal number, $-1$ for control sequences.
+%
+% Using delimited arguments lets us build the \meta{tokens}
+% progressively when doing an encoding conversion in \pkg{l3str}.
+% On the other hand, the delimiter \cs{s_tl} may not appear
+% unbraced in \meta{tokens}. This is not a problem because we
+% are careful to wrap control sequences in braces (as an argument
+% to \cs{exp_not:n}) when converting from a general token list
+% to the internal format.
+%
% \begin{function}{\tl_set_analysis:Nn}
% \begin{syntax}
-% \cs{tl_analysis:Nn} \meta{tl var} \Arg{token list}
+% \cs{tl_set_analysis:Nn} \meta{tl var} \Arg{token list}
% \end{syntax}
-% This function analysiss each token in the \meta{token list},
-% converting each \meta{token} to
-% \begin{quote}
-% \Arg{tokens} \cs{q_mark} \meta{catcode} \meta{char code} |,|
-% \end{quote}
-% where \meta{tokens} expands in one step to the \meta{token},
-% and \meta{catcode} and \meta{char code} are the category code
-% and character code of the token if it is a character token,
-% and otherwise are $0$ and $-1$ respectively.
-% The result of the conversion is then stored in \meta{tl var}.
+% This function analyzes each \meta{token} in the \meta{token list},
+% converting each \meta{token} to one item in the internal format.
+% The result of the analysis is then stored in \meta{tl var}.
+% The following transformation is performed:
+% \begin{itemize}
+% \item a control sequence |\cs| becomes
+% |\exp_not:n { \cs } \s_tl 0 -1 \s_tl|;
+% \item a begin-group character |{| becomes
+% |\exp_after:wN { \if_false: } \fi: \s_tl 1| \meta{char code} \cs{s_tl};
+% \item an end-group character |}| becomes
+% |\if_false: { \fi: } \s_tl 2| \meta{char code} \cs{s_tl};
+% \item a character with any other category code becomes
+% |\exp_not:n| \Arg{character} \cs{s_tl} \meta{hex catcode}
+% \meta{char code} \cs{s_tl}.
+% \end{itemize}
+% We can note that in every case the part before \cs{s_tl}
+% both \texttt{o}-expands and \texttt{x}-expands to the token
+% which is represented.
% \end{function}
%
+% \begin{function}{\tl_set_analysis_from_str:Nn}
+% \begin{syntax}
+% \cs{tl_set_analysis:Nn} \meta{tl var} \Arg{token list}
+% \end{syntax}
+% This function analyzes each \meta{token} in the \meta{token list},
+% after conversion to a string.
+% The result of the analysis is then stored in \meta{tl var}.
+% \begin{itemize}
+% \item Spaces become \verb*| |\cs{s_tl} |A32| \cs{s_tl}.
+% \item Other characters become
+% \meta{character} \cs{s_tl} |C| \meta{char code} \cs{s_tl}.
+% \end{itemize}
+% \end{function}
+%
+% ^^A todo: ask LuaTeX list for an \ifx\undefined <active char>
+% ^^A which does not add the <active char> in memory.
+%
% \end{documentation}
%
% \begin{implementation}
@@ -122,6 +183,33 @@
%
% \subsection{Variables and helper functions}
%
+% \begin{variable}{\s_tl}
+% The scan mark \cs{s_tl} is used as a delimiter in the internal
+% format. This is more practical than using a quark, because we
+% would then need to control expansion much more carefully: compare
+% \cs{int_value:w} |`#1| \cs{s_tl} with
+% \cs{int_value:w} |`#1| \cs{exp_stop_f:} \cs{exp_not:N} \cs{q_mark}
+% to extract a character code followed by the delimiter in an
+% \texttt{x}-expansion.
+% \begin{macrocode}
+\scan_new:N \s_tl
+% \end{macrocode}
+% \end{variable}
+%
+% \begin{variable}{\l_tl_analysis_token}
+% \begin{variable}{\l_tl_analysis_char_token}
+% The tokens in the token list are probed with the \TeX{}
+% primitive \tn{futurelet}. We use \cs{l_tl_analysis_token}
+% in that construction. In some cases, we convert the
+% following token to a string before probing it: then the
+% token variable used is \cs{l_tl_analysis_char_token}.
+% \begin{macrocode}
+\cs_new_eq:NN \l_tl_analysis_token ?
+\cs_new_eq:NN \l_tl_analysis_char_token ?
+% \end{macrocode}
+% \end{variable}
+% \end{variable}
+%
% \begin{variable}{\l_tl_analysis_normal_int}
% The number of normal (\texttt{N}-type argument) tokens
% since the last special token.
@@ -148,14 +236,6 @@
% \end{macrocode}
% \end{variable}
%
-% \begin{variable}{\l_tl_analysis_char_int}
-% Holds the character code of the token currently considered,
-% in cases where it is a character.
-% \begin{macrocode}
-\int_new:N \l_tl_analysis_char_int
-% \end{macrocode}
-% \end{variable}
-%
% \begin{variable}{\l_tl_analysis_type_int}
% When encountering special characters, we record their \enquote{type}
% in this integer.
@@ -168,36 +248,40 @@
% The result of the conversion is stored in this token list,
% with a succession of items of the form
% \begin{quote}
-% \meta{tokens} \cs{q_mark} \meta{catcode} \meta{char code} |,|
+% \meta{tokens} \cs{s_tl} \meta{catcode} \meta{char code} \cs{s_tl}
% \end{quote}
% \begin{macrocode}
\tl_new:N \g_tl_analysis_result_tl
% \end{macrocode}
% \end{variable}
%
-% \begin{macro}[int]{\tl_analysis_extract_charcode:}
-% \begin{macro}[aux]{\tl_analysis_extract_charcode_aux:w}
-% Extracting the character code from the meaning of \cs{l_tl_analysis_token}.
-% This has no error checking, and should only be assumed to work
-% for begin-group and end-group tokens.
+% \begin{macro}[int, EXP]{\tl_analysis_extract_charcode:}
+% \begin{macro}[aux, EXP]{\tl_analysis_extract_charcode_aux:w}
+% Extracting the character code from the meaning of
+% \cs{l_tl_analysis_token}. This has no error checking,
+% and should only be assumed to work for begin-group
+% and end-group character tokens. It produces a number
+% in the form |`|\meta{char}.
% \begin{macrocode}
-\cs_new_protected_nopar:Npn \tl_analysis_extract_charcode:
+\cs_new_nopar:Npn \tl_analysis_extract_charcode:
{
\exp_after:wN \tl_analysis_extract_charcode_aux:w
\token_to_meaning:N \l_tl_analysis_token
}
-\cs_new_protected_nopar:Npn \tl_analysis_extract_charcode_aux:w #1 ~ #2 ~ { ` }
+\cs_new:Npn \tl_analysis_extract_charcode_aux:w #1 ~ #2 ~ { ` }
% \end{macrocode}
% \end{macro}
% \end{macro}
%
-% \begin{macro}[int]{\tl_analysis_cs_space_count:NN}
-% \begin{macro}[aux]{\tl_analysis_cs_space_count:w}
-% \begin{macro}[aux]{\tl_analysis_cs_space_count_end:w}
+% \begin{macro}[int, EXP]{\tl_analysis_cs_space_count:NN}
+% \begin{macro}[aux, EXP]{\tl_analysis_cs_space_count:w}
+% \begin{macro}[aux, EXP]{\tl_analysis_cs_space_count_end:w}
% Counts the number of spaces in the string representation of its
% second argument, as well as the number of characters following
% the last space in that representation, and feeds the two numbers
% as semicolon-delimited arguments to the first argument.
+% When this function is used, the escape character is printable
+% and non-space.
% \begin{macrocode}
\cs_new:Npn \tl_analysis_cs_space_count:NN #1 #2
{
@@ -224,10 +308,10 @@
%
% Our goal is to produce a token list of the form roughly
% \begin{quote}
-% \Arg{token 1} \cs{q_mark} \meta{catcode 1} \meta{char code 1} |,| \\
-% \Arg{token 2} \cs{q_mark} \meta{catcode 2} \meta{char code 2} |,| \\
+% \meta{token 1} \cs{s_tl} \meta{catcode 1} \meta{char code 1} \cs{s_tl} \\
+% \meta{token 2} \cs{s_tl} \meta{catcode 2} \meta{char code 2} \cs{s_tl} \\
% \ldots{}
-% \Arg{token N} \cs{q_mark} \meta{catcode N} \meta{char code N} |,|
+% \meta{token N} \cs{s_tl} \meta{catcode N} \meta{char code N} \cs{s_tl}
% \end{quote}
% Most but not all tokens can be grabbed as an undelimited
% (\texttt{N}-type) argument by \TeX{}.
@@ -239,12 +323,12 @@
% registers, and removed from the input stream by some means.
%
% To ease the difficult first pass, we first do some setup
-% with \cs{tl_analysis_i_setup:n}.
+% with \cs{tl_analysis_setup:n}.
% Active characters set equal to non-active characters
% cause trouble, so we disable all active characters by
-% setting them equal to \texttt{undefined}.
+% setting them equal to \texttt{undefined} locally.
% We also set there the escape character to be printable
-% (backslash, but this later changes to a forward slash):
+% (backslash, but this later oscillates between solidus and backslash):
% this makes it possible to distinguish characters from
% control sequences.
%
@@ -259,10 +343,6 @@
% \item an end-group token (category code $2$),
% either space (character code $32$), or non-space;
% \item a space token (category code $10$, character code $32$);
-%^^A \item a character with category code $3$, $4$, $6$, $7$, $8$, $11$,
-%^^A or $12$, or with category code $10$ but character code $\neq 32$;
-%^^A \item a non-character (primitive, macro, register, font,
-%^^A undefined, \emph{etc.}).
% \item anything else (then the token is always an \texttt{N}-type argument).
% \end{itemize}
% The token itself can \enquote{look like} one of the following
@@ -270,19 +350,18 @@
% \item a non-active character, in which case its meaning
% is automatically that associated to its character code
% and category code, we call it \enquote{true} character;
-% \item an active character;
+% \item an active character (we eliminate those in the setup step);
% \item a control sequence.
% \end{itemize}
% The only tokens which are not valid \texttt{N}-type arguments are
% true begin-group characters, true end-group characters, and true spaces.
% We will detect those characters by scanning ahead with \tn{futurelet},
-% then distinguishing true characters from tokens set equal to them
-% using the \tn{string} representation.
+% then distinguishing true characters from control sequences set equal
+% to them using the \tn{string} representation.
%
% The second pass is a simple exercise in expandable loops.
%
% \begin{macro}[int]{\tl_analysis:n}
-% \begin{macro}[int]{\tl_set_analysis:Nn}
% Everything is done within a group, and all definitions will be local.
% We use \cs{group_align_safe_begin/end:} to avoid problems in case
% \cs{tl_analysis:n} is used within an alignment and its argument
@@ -298,6 +377,13 @@
\group_align_safe_end:
\group_end:
}
+% \end{macrocode}
+% \end{macro}
+%
+% \begin{macro}[int]{\tl_set_analysis:Nn}
+% This function is used in \pkg{l3regex}. Simply copy the result
+% of \cs{tl_analysis:n} into the given token list.
+% \begin{macrocode}
\cs_new_protected:Npn \tl_set_analysis:Nn #1#2
{
\tl_analysis:n {#2}
@@ -305,6 +391,41 @@
}
% \end{macrocode}
% \end{macro}
+%
+% \begin{macro}[int]{\tl_set_analysis_from_str:Nn}
+% \begin{macro}[aux, EXP]{\tl_analysis_from_str_loop:N}
+% This function, used in \pkg{l3str} could be emulated by passing the
+% token list through \cs{tl_to_str:n} and using
+% \cs{tl_set_analysis:Nn}, but this would be terribly inefficient.
+% Instead, convert to a string with spaces of category code other,
+% and for each character output one item of the analysis:
+% \meta{character} \cs{s_tl} \meta{catcode} \meta{char code} \cs{s_tl}
+% where the \meta{catcode} is either |A| (for spaces) or |C|
+% (for other characters).
+% \begin{macrocode}
+\cs_new_protected:Npn \tl_set_analysis_from_str:Nn #1#2
+ {
+ \str_gset_other:Nn \g_tl_analysis_result_tl {#2}
+ \tl_set:Nx #1
+ {
+ \exp_after:wN \tl_analysis_from_str_loop:N
+ \g_tl_analysis_result_tl \prg_map_break:
+ \prg_break_point:n { }
+ }
+ }
+\group_begin:
+ \char_set_catcode_other:N A
+ \char_set_catcode_other:N C
+ \cs_new_protected:Npn \tl_analysis_from_str_loop:N #1
+ {
+ #1 \s_tl
+ \if_charcode:w #1 ~ A \else: C \fi:
+ \int_value:w `#1 \s_tl
+ \tl_analysis_from_str_loop:N
+ }
+\group_end:
+% \end{macrocode}
+% \end{macro}
% \end{macro}
%
% \subsection{Setup}
@@ -316,20 +437,22 @@
% \texttt{undefined}. Since Unicode contains too many characters
% to loop over all of them, we instead loop over the input token
% list as a string: any active character in the token list
-% must appear in its string representation.
-% ^^A todo: ask LuaTeX list for an \ifx\undefined <active char>
-% ^^A which does not add the <active char> in memory.
+% must appear in its string representation. The string is shortened
+% a little by making the escape character unprintable. The active
+% space must be disabled separately (the loop skips over it otherwise),
+% and we end the loop by feeding an odd non-\texttt{N}-type
+% argument to the looping macro.
% \begin{macrocode}
\cs_new_protected:Npn \tl_analysis_setup:n #1
{
- \int_set:Nn \tex_escapechar:D { 92 }
+ \int_set_eq:NN \tex_escapechar:D \c_minus_one
\exp_after:wN \tl_analysis_disable_loop:N
\tl_to_str:n {#1} { ~ } { ? ~ \prg_map_break: }
\prg_break_point:n { }
}
\group_begin:
\char_set_catcode_active:N \^^@
- \cs_new_protected_nopar:Npn \tl_analysis_disable_loop:N #1
+ \cs_new_protected:Npn \tl_analysis_disable_loop:N #1
{
\tex_lccode:D \c_zero `#1 ~
\tl_to_lowercase:n { \tex_let:D ^^@ } \c_undefined:D
@@ -349,22 +472,22 @@
%
% After the setup step, we have $11$ types of tokens:
% \begin{itemize}
-% \item[1] a true non-space begin-group character;
-% \item[2] a true space begin-group character;
-% \item[3] a true non-space end-group character;
-% \item[4] a true space end-group character;
-% \item[5] a true space blank space character;
-% \item[6] an undefined active character;
-% \item[7] any other true character;
-% \item[8] a control sequence equal to a begin-group token (category code $1$);
-% \item[9] a control sequence equal to an end-group token (category code $2$);
-% \item[10] a control sequence equal to a space token
+% \item[1.] a true non-space begin-group character;
+% \item[2.] a true space begin-group character;
+% \item[3.] a true non-space end-group character;
+% \item[4.] a true space end-group character;
+% \item[5.] a true space blank space character;
+% \item[6.] an undefined active character;
+% \item[7.] any other true character;
+% \item[8.] a control sequence equal to a begin-group token (category code $1$);
+% \item[9.] a control sequence equal to an end-group token (category code $2$);
+% \item[10.] a control sequence equal to a space token
% (character code $32$, category code $10$);
-% \item[11] any other control sequence.
+% \item[11.] any other control sequence.
% \end{itemize}
% Our first tool is \tn{futurelet}. This cannot distinguish
-% cases $1$ and $2$ from $8$, nor cases $3$ and $4$ from $9$,
-% nor case $5$ from case $10$. Those cases will be distinguished
+% case $8$ from $1$ or $2$, nor case $9$ from $3$ or $4$,
+% nor case $10$ from case $5$. Those cases will be distinguished
% by applying the \tn{string} primitive to the following token,
% after possibly changing the escape character to ensure that
% a control sequence's string representation cannot be mistaken
@@ -376,7 +499,7 @@
% \cs{str_tail:n} \Arg{token} is non-empty, because the
% escape character is printable.
%
-% \begin{macro}{\tl_analysis_i:n}
+% \begin{macro}[int]{\tl_analysis_i:n}
% We read tokens one by one using \tn{futurelet}.
% While performing the loop, we keep track of the number of
% true begin-group characters minus the number of
@@ -385,6 +508,7 @@
% \begin{macrocode}
\cs_new_protected:Npn \tl_analysis_i:n #1
{
+ \int_set:Nn \tex_escapechar:D { 92 }
\int_zero:N \l_tl_analysis_normal_int
\int_zero:N \l_tl_analysis_index_int
\int_zero:N \l_tl_analysis_nesting_int
@@ -403,17 +527,21 @@
% \end{macro}
%
% \begin{macro}[int]{\tl_analysis_i_type:w}
-% At this point, \cs{l_tl_analysis_token} holds the meaning of the following
-% token. We store in \cs{l_tl_analysis_type_int} the meaning of the token ahead:
+% At this point, \cs{l_tl_analysis_token} holds the meaning
+% of the following token. We store in \cs{l_tl_analysis_type_int}
+% the meaning of the token ahead:
% \begin{itemize}
-% \item[0] space token;
-% \item[1] begin-group token;
-% \item[-1] end-group token;
-% \item[2] other.
+% \item 0 space token;
+% \item 1 begin-group token;
+% \item -1 end-group token;
+% \item 2 other.
% \end{itemize}
-% The values $0$, $1$, $-1$ correspond to how much a true such character
-% changes the nesting level ($2$ is used only here, and is irrelevant later).
-% Then call the auxiliary for each case.
+% The values $0$, $1$, $-1$ correspond to how much a true such
+% character changes the nesting level ($2$ is used only here,
+% and is irrelevant later). Then call the auxiliary for each case.
+% Note that nesting conditionals here is safe because we only skip
+% over \cs{l_tl_analysis_token} if it matches with one of the
+% character tokens (hence is not a primitive conditional).
% \begin{macrocode}
\cs_new_protected_nopar:Npn \tl_analysis_i_type:w
{
@@ -441,65 +569,38 @@
% \end{macrocode}
% \end{macro}
%
-% \begin{macro}[int]{\tl_analysis_i_safe:N}
-% \begin{macro}[aux]{\tl_analysis_i_cs:ww}
-% The upcoming token is safe: we can simply grab it in a second pass.
-% Since other branches of the code must pass their tokens through
-% \tn{string}, we do it here as well. It's been optimized in an evil way,
-% sorry.
-%^^A todo: document
-% \begin{macrocode}
-\cs_new_protected:Npn \tl_analysis_i_safe:N #1
- {
- \if_charcode:w
- \scan_stop:
- \exp_after:wN \use_none:n \token_to_str:N #1 \prg_do_nothing:
- \scan_stop:
- \int_incr:N \l_tl_analysis_normal_int
- \else:
- \tl_analysis_cs_space_count:NN \tl_analysis_i_cs:ww #1
- \fi:
- \tl_analysis_i_loop:w
- }
-\cs_new_protected:Npn \tl_analysis_i_cs:ww #1; #2;
- {
- \if_num:w #1 > \c_zero
- \tex_skip:D \l_tl_analysis_index_int
- = \int_eval:w \l_tl_analysis_normal_int + \c_one sp \scan_stop:
- \tex_advance:D \l_tl_analysis_index_int #1 \exp_stop_f:
- \l_tl_analysis_normal_int #2 \exp_stop_f:
- \else:
- \tex_advance:D \l_tl_analysis_normal_int #2 \exp_stop_f:
- \fi:
- }
-% \end{macrocode}
-% \end{macro}
-% \end{macro}
-%
-% \begin{macro}[aux]{\tl_analysis_i_space:w}
+% \begin{macro}[int]{\tl_analysis_i_space:w}
% \begin{macro}[aux]{\tl_analysis_i_space_test:w}
% In this branch, the following token's meaning is a blank space.
% Apply \tn{string} to that token: if it is a control sequence
% the result starts with the escape character; otherwise it is
% a true blank space, whose string representation is also a blank space.
% We test for that in \cs{tl_analysis_i_space_test:w},
-% after grabbing the first character of the string representation
-% as \cs{l_tl_analysis_token}.
+% after grabbing as \cs{l_tl_analysis_char_token} the first character
+% of the string representation.
% Also, since \cs{tl_analysis_i_store:} expects the special token to be
% stored in the relevant \tn{toks} register, we do that. The extra
% \cs{exp_not:n} is unnecessary of course, but it makes the treatment
% of all tokens more homogeneous.
+% If we discover that the next token was actually a control sequence
+% instead of a true space, then we step the counter of normal tokens.
+% We now have in front of us the whole string representation of
+% the control sequence, including potential spaces; those will appear
+% to be true spaces later in this pass. Hence, all other branches of
+% the code in this first pass need to consider the string representation,
+% so that the second pass does not need to test the meaning of tokens,
+% only strings.
% \begin{macrocode}
\cs_new_protected_nopar:Npn \tl_analysis_i_space:w
{
\tex_afterassignment:D \tl_analysis_i_space_test:w
\exp_after:wN \cs_set_eq:NN
- \exp_after:wN \l_tl_analysis_token
+ \exp_after:wN \l_tl_analysis_char_token
\token_to_str:N
}
\cs_new_protected_nopar:Npn \tl_analysis_i_space_test:w
{
- \if_meaning:w \l_tl_analysis_token \c_space_token
+ \if_meaning:w \l_tl_analysis_char_token \c_space_token
\tex_toks:D \l_tl_analysis_index_int { \exp_not:n { ~ } }
\tl_analysis_i_store:
\else:
@@ -511,17 +612,20 @@
% \end{macro}
% \end{macro}
%
-% \begin{macro}[int]{\tl_analysis_i_bgroup:w}
-% \begin{macro}[int]{\tl_analysis_i_egroup:w}
+% \begin{macro}[int]{\tl_analysis_i_bgroup:w, \tl_analysis_i_egroup:w}
% \begin{macro}[aux]{\tl_analysis_i_group:nw}
% \begin{macro}[aux]{\tl_analysis_i_group_test:w}
% The token might be either a true character token with
% catcode $1$ or $2$, or it could be a control sequence.
% The only tricky case is if the character code happens
% to be equal to the escape character: then we change
-% the escape character, so that the string representation
-% of the true character and of a control sequence set equal
-% to it start differently.
+% the escape character from backslash to solidus or back,
+% so that the string representation of the true character
+% and of a control sequence set equal to it start differently.
+% Then probe what the first character of that string
+% representation is: this is the place where we need
+% \cs{l_tl_analysis_char_token} to be a separate control
+% sequence from \cs{l_tl_analysis_token}, to compare them.
% \begin{macrocode}
\group_begin:
\char_set_catcode_group_begin:N \^^@
@@ -533,22 +637,21 @@
\cs_new_protected_nopar:Npn \tl_analysis_i_egroup:w
{ \tl_analysis_i_group:nw { \if_false: ^^B \fi: ^^@ } }
\group_end:
-\cs_new_protected_nopar:Npn \tl_analysis_i_group:nw #1
+\cs_new_protected:Npn \tl_analysis_i_group:nw #1
{
- \l_tl_analysis_char_int = \tl_analysis_extract_charcode: \scan_stop:
- \tex_lccode:D \c_zero \l_tl_analysis_char_int
+ \tex_lccode:D \c_zero = \tl_analysis_extract_charcode: \scan_stop:
\tl_to_lowercase:n { \tex_toks:D \l_tl_analysis_index_int {#1} }
- \if_num:w \l_tl_analysis_char_int = \tex_escapechar:D
+ \if_num:w \tex_lccode:D \c_zero = \tex_escapechar:D
\int_set:Nn \tex_escapechar:D { 139 - \tex_escapechar:D }
\fi:
\tex_afterassignment:D \tl_analysis_i_group_test:w
\exp_after:wN \cs_set_eq:NN
- \exp_after:wN \l_tl_analysis_token
+ \exp_after:wN \l_tl_analysis_char_token
\token_to_str:N
}
\cs_new_protected_nopar:Npn \tl_analysis_i_group_test:w
{
- \if_num:w \tl_analysis_extract_charcode: = \l_tl_analysis_char_int
+ \if_charcode:w \l_tl_analysis_token \l_tl_analysis_char_token
\tl_analysis_i_store:
\else:
\int_incr:N \l_tl_analysis_normal_int
@@ -559,37 +662,41 @@
% \end{macro}
% \end{macro}
% \end{macro}
-% \end{macro}
%
% \begin{macro}[int]{\tl_analysis_i_store:}
-% This function is called each time we meet a special token,
-% and the value of \cs{l_tl_analysis_type_int} indicates which case
+% This function is called each time we meet a special token;
+% at this point, the \tn{toks} register \cs{l_tl_analysis_index_int}
+% holds a token list which expands to the given special token.
+% Also, the value of \cs{l_tl_analysis_type_int} indicates which case
% we are in:
% \begin{itemize}
-% \item[-1] end-group character;
-% \item[0] space character;
-% \item[1] begin-group character.
+% \item -1 end-group character;
+% \item 0 space character;
+% \item 1 begin-group character.
% \end{itemize}
% We need to distinguish further the case of a space character
% (code $32$) from other character codes, because those will
-% behave differently in the second pass. Namely, we change the
-% cases above to
+% behave differently in the second pass. Namely, after testing
+% the \tn{lccode} of $0$ (which holds the present character code)
+% we change the cases above to
% \begin{itemize}
-% \item[-2] space end-group character;
-% \item[-1] non-space end-group character;
-% \item[0] space blank space character;
-% \item[1] non-space begin-group character;
-% \item[2] space begin-group character.
+% \item -2 space end-group character;
+% \item -1 non-space end-group character;
+% \item 0 space blank space character;
+% \item 1 non-space begin-group character;
+% \item 2 space begin-group character.
% \end{itemize}
% This has the property that non-space characters correspond to odd
% values of \cs{l_tl_analysis_type_int}.
-% Also, the \tn{toks} register number \cs{l_tl_analysis_index_int}
-% holds a token list which expands to the given special token.
+% The number of normal tokens, and the type of special token,
+% are packed into a \tn{skip} register.
+% Finally, we check whether we reached the last closing brace, in which
+% case we stop by disabling the looping function (locally).
% \begin{macrocode}
\cs_new_protected_nopar:Npn \tl_analysis_i_store:
{
\tex_advance:D \l_tl_analysis_nesting_int \l_tl_analysis_type_int
- \if_num:w \l_tl_analysis_char_int = \c_thirty_two
+ \if_num:w \tex_lccode:D \c_zero = \c_thirty_two
\tex_multiply:D \l_tl_analysis_type_int \c_two
\fi:
\tex_skip:D \l_tl_analysis_index_int
@@ -603,12 +710,61 @@
% \end{macrocode}
% \end{macro}
%
+% \begin{macro}[int]{\tl_analysis_i_safe:N}
+% \begin{macro}[aux]{\tl_analysis_i_cs:ww}
+% This should be the simplest case: since the upcoming token is safe,
+% we can simply grab it in a second pass. However, other branches of
+% the code must pass their tokens through \tn{string}, hence we do it
+% here as well, with some optimizations. If the token is a single
+% character (including space), the \cs{if_charcode:w} test yields
+% true, and we simply count one \enquote{normal} token. On the other
+% hand, if the token is a control sequence, we should replace it by
+% its string representation for compatibility with other code
+% branches. Instead of slowly looping through the characters with
+% the main code, we use the knowledge of how the second pass works:
+% if the control sequence name contains no space, count that token
+% as a number of normal tokens equal to its string length. If the
+% control sequence contains spaces, they should be registered as
+% special characters by increasing \cs{l_tl_analysis_index_int}
+% (no need to carefully count character between each space), and
+% all characters after the last space should be counted in the
+% following sequence of \enquote{normal} tokens.
+% \begin{macrocode}
+\cs_new_protected:Npn \tl_analysis_i_safe:N #1
+ {
+ \if_charcode:w
+ \scan_stop:
+ \exp_after:wN \use_none:n \token_to_str:N #1 \prg_do_nothing:
+ \scan_stop:
+ \int_incr:N \l_tl_analysis_normal_int
+ \else:
+ \tl_analysis_cs_space_count:NN \tl_analysis_i_cs:ww #1
+ \fi:
+ \tl_analysis_i_loop:w
+ }
+\cs_new_protected:Npn \tl_analysis_i_cs:ww #1; #2;
+ {
+ \if_num:w #1 > \c_zero
+ \tex_skip:D \l_tl_analysis_index_int
+ = \int_eval:w \l_tl_analysis_normal_int + \c_one sp \scan_stop:
+ \tex_advance:D \l_tl_analysis_index_int #1 \exp_stop_f:
+ \l_tl_analysis_normal_int #2 \exp_stop_f:
+ \else:
+ \tex_advance:D \l_tl_analysis_normal_int #2 \exp_stop_f:
+ \fi:
+ }
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+%
% \subsection{Second pass}
%
% The second pass is an exercise in expandable loops.
+% All the necessary information is stored in \tn{skip}
+% and \tn{toks} registers.
%
% \begin{macro}[int]{\tl_analysis_ii:n}
-% \begin{macro}[aux]{\tl_analysis_ii_loop:w}
+% \begin{macro}[int, EXP]{\tl_analysis_ii_loop:w}
% Start the loop with the index $0$. No need for an end-marker:
% the loop will stop by itself when the last index is read.
% We will repeatedly oscillate between reading long stretches
@@ -631,16 +787,18 @@
% \end{macro}
% \end{macro}
%
-% \begin{macro}[aux]{\tl_analysis_ii_normals:ww}
-% \begin{macro}[aux]{\tl_analysis_ii_normal:wwN}
+% \begin{macro}[int, EXP]{\tl_analysis_ii_normals:ww}
+% \begin{macro}[aux, EXP]{\tl_analysis_ii_normal:wwN}
% The first argument is the number of normal tokens which remain
% to be read, and the second argument is the index in the array
% produced in the first step.
% A character's string representation is always one character long,
% while a control sequence is always longer (we have set the escape
% character to a printable value). In both cases, we leave
-% \cs{exp_not:n} \Arg{token} \cs{q_mark} in the input stream
-% (after \texttt{x}-expansion).
+% \cs{exp_not:n} \Arg{token} \cs{s_tl} in the input stream
+% (after \texttt{x}-expansion). Here, \cs{exp_not:n} is used
+% rather than \cs{exp_not:N} because |#3| could be \cs{s_tl},
+% hence must be hidden behind braces in the result.
% \begin{macrocode}
\cs_new:Npn \tl_analysis_ii_normals:ww #1;
{
@@ -651,7 +809,7 @@
}
\cs_new:Npn \tl_analysis_ii_normal:wwN #1; #2; #3
{
- \exp_not:n { \exp_not:n { #3 } } \exp_not:N \q_mark
+ \exp_not:n { \exp_not:n { #3 } } \s_tl
\if_charcode:w
\scan_stop:
\exp_after:wN \use_none:n \token_to_str:N #3 \prg_do_nothing:
@@ -666,41 +824,53 @@
% \end{macro}
% \end{macro}
%
-% \begin{macro}[aux]{\tl_analysis_ii_char:Nww}
+% \begin{macro}[int, EXP]{\tl_analysis_ii_char:Nww}
% If the normal token we grab is a character, leave
-% \meta{catcode} \meta{charcode} followed by a comma
+% \meta{catcode} \meta{charcode} followed by \cs{s_tl}
% in the input stream, and call \cs{tl_analysis_ii_normals:ww}
% with its first argument decremented.
% \begin{macrocode}
-\cs_new:Npn \tl_analysis_ii_char:Nww #1
- {
- \if_meaning:w #1 \c_undefined:D D \else:
- \if_catcode:w #1 \c_catcode_other_token C \else:
- \if_catcode:w #1 \c_catcode_letter_token B \else:
- \if_catcode:w #1 \c_math_toggle_token 3 \else:
- \if_catcode:w #1 \c_alignment_token 4 \else:
- \if_catcode:w #1 \c_math_superscript_token 7 \else:
- \if_catcode:w #1 \c_math_subscript_token 8 \else:
- 6
- \fi: \fi: \fi: \fi: \fi: \fi: \fi:
- \int_value:w `#1 ,
- \exp_after:wN \tl_analysis_ii_normals:ww
- \int_use:N \int_eval:w \c_minus_one +
- }
+\group_begin:
+ \char_set_catcode_other:N A
+ \char_set_catcode_other:N B
+ \char_set_catcode_other:N C
+ \char_set_uccode:nn { `? } { `D }
+ \tl_to_uppercase:n
+ {
+ \cs_new:Npn \tl_analysis_ii_char:Nww #1
+ {
+ \if_meaning:w #1 \c_undefined:D ? \else:
+ \if_catcode:w #1 \c_catcode_other_token C \else:
+ \if_catcode:w #1 \c_catcode_letter_token B \else:
+ \if_catcode:w #1 \c_math_toggle_token 3 \else:
+ \if_catcode:w #1 \c_alignment_token 4 \else:
+ \if_catcode:w #1 \c_math_superscript_token 7 \else:
+ \if_catcode:w #1 \c_math_subscript_token 8 \else:
+ \if_catcode:w #1 \c_space_token A \else:
+ 6
+ \fi: \fi: \fi: \fi: \fi: \fi: \fi: \fi:
+ \int_value:w `#1 \s_tl
+ \exp_after:wN \tl_analysis_ii_normals:ww
+ \int_use:N \int_eval:w \c_minus_one +
+ }
+ }
+\group_end:
% \end{macrocode}
% \end{macro}
%
-% \begin{macro}[aux]{\tl_analysis_ii_cs:Nww}
+% \begin{macro}[int, EXP]{\tl_analysis_ii_cs:Nww}
+% \begin{macro}[aux, EXP]{\tl_analysis_ii_cs_test:ww}
% If the token we grab is a control sequence, leave
-% |0-1,| (as category code and character code) in the input stream,
+% |0 -1| (as category code and character code) in the input stream,
+% followed by \cs{s_tl},
% and call \cs{tl_analysis_ii_normals:ww} with updated arguments.
% \begin{macrocode}
\cs_new:Npn \tl_analysis_ii_cs:Nww #1
{
- 0 -1 ,
+ 0 -1 \s_tl
\tl_analysis_cs_space_count:NN \tl_analysis_ii_cs_test:ww #1
}
-\cs_new:Npn \tl_analysis_ii_cs_test:ww #1 ; #2 ; #3; #4;
+\cs_new:Npn \tl_analysis_ii_cs_test:ww #1 ; #2 ; #3 ; #4 ;
{
\exp_after:wN \tl_analysis_ii_normals:ww
\int_use:N \int_eval:w
@@ -711,16 +881,17 @@
\fi:
- #2
\exp_after:wN ;
- \int_use:N \int_eval:w #4 + #1;
+ \int_use:N \int_eval:w #4 + #1 ;
}
% \end{macrocode}
% \end{macro}
+% \end{macro}
%
-% \begin{macro}[aux]{\tl_analysis_ii_special:w}
-% \begin{macro}[aux]{\tl_analysis_ii_special_char:wN}
-% \begin{macro}[aux]{\tl_analysis_ii_special_space:w}
+% \begin{macro}[int, EXP]{\tl_analysis_ii_special:w}
+% \begin{macro}[aux, EXP]{\tl_analysis_ii_special_char:wN}
+% \begin{macro}[aux, EXP]{\tl_analysis_ii_special_space:w}
% Here, |#1| is the current index in the array built in the first pass.
-% Check whether we reached the end (we shouldn't insert the trailing
+% Check now whether we reached the end (we shouldn't keep the trailing
% end-group character that marked the end of the token list in the
% first pass).
% Unpack the \tn{toks} register: when \texttt{x}-expanding again,
@@ -728,35 +899,39 @@
% Then leave the category code in the input stream, followed by
% the character code, and call \cs{tl_analysis_ii_loop:w} with the next index.
% \begin{macrocode}
-\cs_new:Npn \tl_analysis_ii_special:w \fi: \tl_analysis_ii_normal:wwN 0; #1;
- {
- \fi:
- \if_num:w #1 = \l_tl_analysis_index_int
- \exp_after:wN \prg_map_break:
- \fi:
- \tex_the:D \tex_toks:D #1 \exp_stop_f: \exp_not:N \q_mark
- \if_case:w \etex_gluestretch:D \tex_skip:D #1 \exp_stop_f:
- A
- \or: 1
- \or: 1
- \else: 2
- \fi:
- \if_int_odd:w \etex_gluestretch:D \tex_skip:D #1 \exp_stop_f:
- \exp_after:wN \tl_analysis_ii_special_char:wN \int_use:N
- \else:
- \exp_after:wN \tl_analysis_ii_special_space:w \int_use:N
- \fi:
- \int_eval:w \c_one + #1 \exp_after:wN ;
- \token_to_str:N
- }
+\group_begin:
+ \char_set_catcode_other:N A
+ \cs_new:Npn \tl_analysis_ii_special:w
+ \fi: \tl_analysis_ii_normal:wwN 0 ; #1 ;
+ {
+ \fi:
+ \if_num:w #1 = \l_tl_analysis_index_int
+ \exp_after:wN \prg_map_break:
+ \fi:
+ \tex_the:D \tex_toks:D #1 \s_tl
+ \if_case:w \etex_gluestretch:D \tex_skip:D #1 \exp_stop_f:
+ A
+ \or: 1
+ \or: 1
+ \else: 2
+ \fi:
+ \if_int_odd:w \etex_gluestretch:D \tex_skip:D #1 \exp_stop_f:
+ \exp_after:wN \tl_analysis_ii_special_char:wN \int_use:N
+ \else:
+ \exp_after:wN \tl_analysis_ii_special_space:w \int_use:N
+ \fi:
+ \int_eval:w \c_one + #1 \exp_after:wN ;
+ \token_to_str:N
+ }
+\group_end:
\cs_new:Npn \tl_analysis_ii_special_char:wN #1 ; #2
{
- \int_value:w `#2 ,
+ \int_value:w `#2 \s_tl
\tl_analysis_ii_loop:w #1 ;
}
\cs_new:Npn \tl_analysis_ii_special_space:w #1 ; ~
{
- 32 ,
+ 32 \s_tl
\tl_analysis_ii_loop:w #1 ;
}
% \end{macrocode}
@@ -777,7 +952,7 @@
{ tl-analysis }
{
\exp_after:wN \tl_show_analysis_loop:wNw \g_tl_analysis_result_tl
- \q_mark { ? \prg_map_break: } ,
+ \s_tl { ? \prg_map_break: } \s_tl
\prg_break_point:n { }
}
}
@@ -790,16 +965,16 @@
% \end{macro}
% \end{macro}
%
-% \begin{macro}[aux, EXP]{\tl_show_analysis_loop:wNw}
-% Here, |#1| \texttt{o}-expands to the token;
-% |#2| is the category code (one hexadecimal digit),
+% \begin{macro}[aux, rEXP]{\tl_show_analysis_loop:wNw}
+% Here, |#1| \texttt{o}- and \texttt{x}-expands to the token;
+% |#2| is the category code (one uppercase hexadecimal digit),
% $0$ for control sequences;
% |#3| is the character code, which we ignore.
% In the cases of control sequences and active characters,
% the meaning may overflow one line, and we want to truncate
% it. Those cases are thus separated out.
% \begin{macrocode}
-\cs_new:Npn \tl_show_analysis_loop:wNw #1 \q_mark #2 #3,
+\cs_new:Npn \tl_show_analysis_loop:wNw #1 \s_tl #2 #3 \s_tl
{
\use_none:n #2
\iow_newline: > \c_space_tl \c_space_tl
@@ -820,9 +995,9 @@
% \end{macrocode}
% \end{macro}
%
-% \begin{macro}[aux]{\tl_show_analysis_normal:n}
+% \begin{macro}[aux, rEXP]{\tl_show_analysis_normal:n}
% Non-active characters are a simple matter of printing
-% the character, and its meaning. One can check that
+% the character, and its meaning. Our test suite checks that
% begin-group and end-group characters do not mess up
% \TeX{}'s alignment status.
% \begin{macrocode}
@@ -834,10 +1009,10 @@
% \end{macrocode}
% \end{macro}
%
-% \begin{macro}[aux]{\tl_show_analysis_cs:n}
-% \begin{macro}[aux]{\tl_show_analysis_active:n}
-% \begin{macro}[aux]{\tl_show_analysis_long:nn}
-% \begin{macro}[aux]{\tl_show_analysis_long_aux:nnn}
+% \begin{macro}[aux, rEXP]{\tl_show_analysis_cs:n}
+% \begin{macro}[aux, rEXP]{\tl_show_analysis_active:n}
+% \begin{macro}[aux, rEXP]{\tl_show_analysis_long:nn}
+% \begin{macro}[aux, rEXP]{\tl_show_analysis_long_aux:nnn}
% Control sequences and active characters are printed in the same way,
% making sure not to go beyond the \cs{l_iow_line_length_int}. In case
% of an overflow, we replace the last characters by
@@ -853,17 +1028,20 @@
{ \token_to_str:N #1 }
{ \token_to_meaning:N #1 }
}
-\cs_new_nopar:Npn \tl_show_analysis_long_aux:nnn #1#2#3
+\cs_new:Npn \tl_show_analysis_long_aux:nnn #1#2#3
{
\int_compare:nNnTF
- { \str_length:n { >>> #1 ~ ( #3 #2 ) } }
- > { \l_iow_line_length_int }
+ { \str_length:n { #1 ~ ( #3 #2 ) } }
+ > { \l_iow_line_length_int - \c_three }
{
- \str_substr:nnn { >>> #1 ~ ( #3 #2 ) } \c_three
- { \l_iow_line_length_int - \str_length:N \c_tl_show_analysis_etc_str }
+ \str_substr:nnn { #1 ~ ( #3 #2 ) } \c_zero
+ {
+ \l_iow_line_length_int - \c_three
+ - \str_length:N \c_tl_show_analysis_etc_str
+ }
\c_tl_show_analysis_etc_str
}
- {#1~(#3#2)}
+ { #1 ~ ( #3 #2 ) }
}
% \end{macrocode}
% \end{macro}
@@ -871,21 +1049,23 @@
% \end{macro}
% \end{macro}
%
+% \subsection{Messages}
+%
% \begin{variable}{\c_tl_show_analysis_etc_str}
% When a control sequence (or active character)
% and its meaning are too long to fit in one line
% of the terminal, the end is replaced by this token list.
% \begin{macrocode}
-\str_const:Nx \c_tl_show_analysis_etc_str % (
- { \iow_char:N \\ETC. ) }
+\tl_const:Nx \c_tl_show_analysis_etc_str % (
+ { \token_to_str:N \ETC.) }
% \end{macrocode}
% \end{variable}
%
% \begin{macrocode}
\msg_kernel_new:nnn { tl-analysis } { show }
{
- Token~list~
- \str_if_eq:nnF {#1} { \l_tl_tmpa_tl } { \token_to_str:N #1~}
+ The~token~list~
+ \str_if_eq:nnF {#1} { \l_tl_tmpa_tl } { \token_to_str:N #1 ~ }
\tl_if_empty:NTF #1
{ is~empty }
{ contains~the~tokens: }