summaryrefslogtreecommitdiff
path: root/Master/texmf-dist/source/latex/l3experimental
diff options
context:
space:
mode:
authorKarl Berry <karl@freefriends.org>2013-03-12 22:38:22 +0000
committerKarl Berry <karl@freefriends.org>2013-03-12 22:38:22 +0000
commit574c4946ce553944c797750da25ba8844775996f (patch)
tree9ea4729ca09c49a4564d99d9134a5417eb7b2af3 /Master/texmf-dist/source/latex/l3experimental
parent5c30ca128a813719715e012a0d72c1ccaba2d811 (diff)
latex3 (12mar13)
git-svn-id: svn://tug.org/texlive/trunk@29361 c570f23f-e606-0410-a88d-b1316a301751
Diffstat (limited to 'Master/texmf-dist/source/latex/l3experimental')
-rw-r--r--Master/texmf-dist/source/latex/l3experimental/l3dt/l3dt.dtx8
-rw-r--r--Master/texmf-dist/source/latex/l3experimental/l3str/l3flag.dtx2
-rw-r--r--Master/texmf-dist/source/latex/l3experimental/l3str/l3regex.dtx59
-rw-r--r--Master/texmf-dist/source/latex/l3experimental/l3str/l3str-convert.dtx3623
-rw-r--r--Master/texmf-dist/source/latex/l3experimental/l3str/l3str-format.dtx389
-rw-r--r--Master/texmf-dist/source/latex/l3experimental/l3str/l3str.dtx4140
-rw-r--r--Master/texmf-dist/source/latex/l3experimental/l3str/l3str.ins47
-rw-r--r--Master/texmf-dist/source/latex/l3experimental/l3str/l3tl-analysis.dtx12
-rw-r--r--Master/texmf-dist/source/latex/l3experimental/xgalley/l3galley.dtx4
9 files changed, 4250 insertions, 4034 deletions
diff --git a/Master/texmf-dist/source/latex/l3experimental/l3dt/l3dt.dtx b/Master/texmf-dist/source/latex/l3experimental/l3dt/l3dt.dtx
index 984739d1efd..63a3218f06a 100644
--- a/Master/texmf-dist/source/latex/l3experimental/l3dt/l3dt.dtx
+++ b/Master/texmf-dist/source/latex/l3experimental/l3dt/l3dt.dtx
@@ -1,6 +1,6 @@
% \iffalse meta-comment
%
-%% File l3dt.dtx Copyright (C) 2011,2012 The LaTeX3 Project
+%% File l3dt.dtx Copyright (C) 2011-2013 The LaTeX3 Project
%%
%% It may be distributed and/or modified under the conditions of the
%% LaTeX Project Public License (LPPL), either version 1.3c of this
@@ -36,7 +36,7 @@
%
%<*driver|package>
\RequirePackage{expl3}
-\GetIdInfo$Id: l3dt.dtx 4190 2012-09-03 00:23:31Z bruno $
+\GetIdInfo$Id: l3dt.dtx 4420 2013-01-08 20:00:04Z joseph $
{L3 Experimental data tables}
%</driver|package>
%<*driver>
@@ -1195,7 +1195,7 @@
}
% \end{macrocode}
% The second iteration is along the row. This is basically the same as
-% \cs{prop_if_in:NnTF} with the \cs{q_@@} in place of \cs{q_prop}.
+% \cs{prop_if_in:NnTF} with the \cs{q_@@} in place of \cs{q__prop}.
% \begin{macrocode}
\cs_new:Npn \@@_if_in_row:nn #1#2
{
@@ -1204,7 +1204,7 @@
}
\cs_new:Npn \@@_if_in_row:nwn #1 \q_@@ #2 \q_@@ #3
{
- \str_if_eq:xxTF {#1} {#2}
+ \str_if_eq_x:nnTF {#1} {#2}
{ \@@_if_in_row:N }
{ \@@_if_in_row:nwn {#1} }
}
diff --git a/Master/texmf-dist/source/latex/l3experimental/l3str/l3flag.dtx b/Master/texmf-dist/source/latex/l3experimental/l3str/l3flag.dtx
index d3b17183671..64d23d43896 100644
--- a/Master/texmf-dist/source/latex/l3experimental/l3str/l3flag.dtx
+++ b/Master/texmf-dist/source/latex/l3experimental/l3str/l3flag.dtx
@@ -210,7 +210,7 @@
\else:
\exp_after:wN \use_none_delimit_by_q_stop:w
\fi:
- \cs_set_eq:cN { @@_#2_#1: } \c_undefined:D
+ \cs_set_eq:cN { @@_#2_#1: } \tex_undefined:D
\exp_after:wN \@@_clear:ww
\int_use:N \__int_eval:w \c_one + #1 ;
#2 \q_stop
diff --git a/Master/texmf-dist/source/latex/l3experimental/l3str/l3regex.dtx b/Master/texmf-dist/source/latex/l3experimental/l3str/l3regex.dtx
index 3d25a841a80..7260ca7667f 100644
--- a/Master/texmf-dist/source/latex/l3experimental/l3str/l3regex.dtx
+++ b/Master/texmf-dist/source/latex/l3experimental/l3str/l3regex.dtx
@@ -1,6 +1,6 @@
% \iffalse meta-comment
%
-%% File: l3regex.dtx Copyright (C) 2011-2012 The LaTeX3 Project
+%% File: l3regex.dtx Copyright (C) 2011-2013 The LaTeX3 Project
%%
%% It may be distributed and/or modified under the conditions of the
%% LaTeX Project Public License (LPPL), either version 1.3c of this
@@ -35,7 +35,7 @@
%
%<*driver|package>
\RequirePackage{expl3}
-\GetIdInfo$Id: l3regex.dtx 4341 2012-11-27 08:39:42Z bruno $
+\GetIdInfo$Id: l3regex.dtx 4455 2013-01-22 20:38:19Z bruno $
{L3 Experimental regular expressions}
%</driver|package>
%<*driver>
@@ -320,7 +320,11 @@
% still be escaped without harm. Any escape sequence which matches a
% single character (|\d|, |\D|, \emph{etc.}) is supported in character
% classes. If the first character is |^|, then
-% the meaning of the character class is inverted. Ranges of characters
+% the meaning of the character class is inverted; |^| appearing anywhere
+% else in the range is not special. If the first character (possibly
+% following a leading |^|) is |]| then it does not need to be escaped
+% since ending the range there would make it empty.
+% Ranges of characters
% can be expressed using |-|, for instance, |[\D 0-5]| and |[^6-9]| are
% equivalent.
%
@@ -712,7 +716,7 @@
%<*package>
\ProvidesExplPackage
{\ExplFileName}{\ExplFileDate}{\ExplFileVersion}{\ExplFileDescription}
-\RequirePackage{l3tl-build, l3tl-analysis, l3flag, l3str}
+\RequirePackage{l3tl-build, l3tl-analysis, l3flag, l3str, l3str-convert}
%</package>
% \end{macrocode}
%
@@ -911,7 +915,7 @@
% This function makes showing regular expressions easier, and lets us
% define |\D| in terms of |\d| for instance. There is a subtelty: the
% end of the query is marked by $-2$, and will thus match |\D| and
-% other negated properties; this case is catched by another part of
+% other negated properties; this case is caught by another part of
% the code.
% \begin{macrocode}
\cs_new_protected:Npn \@@_item_reverse:n #1
@@ -1375,7 +1379,7 @@
{
\__msg_kernel_error:nnx { regex } { x-overflow } {#1}
\tl_set:Nx \l_@@_internal_b_tl
- { \if_false: } \fi: \@@_escape_loop:N
+ { \if_false: } \fi:
}
{
\char_set_lccode:nn { \c_zero } {#1}
@@ -1384,7 +1388,6 @@
\tl_set:Nx \l_@@_internal_b_tl
{ \if_false: } \fi:
\@@_escape_raw:N ^^@
- \@@_escape_loop:N
}
}
}
@@ -1414,7 +1417,7 @@
}
\cs_new:Npn \@@_escape_x_test_two:N #1
{
- \if_charcode:w \c_lbrace_str #1
+ \if_charcode:w \c_left_brace_str #1
\exp_after:wN \@@_escape_x_loop:N
\else:
\__str_hexadecimal_use:NTF #1
@@ -1454,7 +1457,7 @@
{ \@@_escape_x_loop:N }
{
;
- \exp_after:wN \token_if_eq_charcode:NNTF \c_rbrace_str #1
+ \exp_after:wN \token_if_eq_charcode:NNTF \c_right_brace_str #1
{ \@@_escape_loop:N }
{
\if_false: { \fi: }
@@ -1487,9 +1490,7 @@
% \end{itemize}
% The code is ugly, and highly based on magic numbers and the ascii
% codes of characters. This is mostly unavoidable for performance
-% reasons: testing for instance with \cs{__str_if_contains_char:nN}
-% would be much slower. Maybe the tests can be optimized a little
-% bit more.
+% reasons. Maybe the tests can be optimized a little bit more.
% Here, \enquote{alphanumeric} means \texttt{0}--\texttt{9},
% \texttt{A}--\texttt{Z}, \texttt{a}--\texttt{z};
% \enquote{special} character means non-alphanumeric
@@ -2111,17 +2112,17 @@
% brace, leading to the range $[a,\infty]$ or $[a,b]$, encoded as
% $\{a\}\{-1\}$ and $\{a\}\{b-a\}$.
% \begin{macrocode}
-\cs_new_protected:cpn { @@_compile_quantifier_ \c_lbrace_str :w }
+\cs_new_protected:cpn { @@_compile_quantifier_ \c_left_brace_str :w }
{
\@@_get_digits:NTFw \l_@@_internal_a_int
{ \@@_compile_quantifier_braced_auxi:w }
- { \@@_compile_quantifier_abort:xNN { \c_lbrace_str } }
+ { \@@_compile_quantifier_abort:xNN { \c_left_brace_str } }
}
\cs_new_protected:Npn \@@_compile_quantifier_braced_auxi:w #1#2
{
\str_case_x:nnn { #1 #2 }
{
- { \@@_compile_special:N \c_rbrace_str }
+ { \@@_compile_special:N \c_right_brace_str }
{
\exp_args:No \@@_compile_quantifier_lazyness:nnNN
{ \int_use:N \l_@@_internal_a_int } { 0 }
@@ -2135,28 +2136,28 @@
}
{
\@@_compile_quantifier_abort:xNN
- { \c_lbrace_str \int_use:N \l_@@_internal_a_int }
+ { \c_left_brace_str \int_use:N \l_@@_internal_a_int }
#1 #2
}
}
\cs_new_protected:Npn \@@_compile_quantifier_braced_auxii:w #1#2
{
\str_if_eq_x:nnTF
- { #1 #2 } { \@@_compile_special:N \c_rbrace_str }
+ { #1 #2 } { \@@_compile_special:N \c_right_brace_str }
{
\exp_args:No \@@_compile_quantifier_lazyness:nnNN
{ \int_use:N \l_@@_internal_a_int } { -1 }
}
{
\@@_compile_quantifier_abort:xNN
- { \c_lbrace_str \int_use:N \l_@@_internal_a_int , }
+ { \c_left_brace_str \int_use:N \l_@@_internal_a_int , }
#1 #2
}
}
\cs_new_protected:Npn \@@_compile_quantifier_braced_auxiii:w #1#2
{
\str_if_eq_x:nnTF
- { #1 #2 } { \@@_compile_special:N \c_rbrace_str }
+ { #1 #2 } { \@@_compile_special:N \c_right_brace_str }
{
\if_int_compare:w \l_@@_internal_a_int > \l_@@_internal_b_int
\__msg_kernel_error:nnxx { regex } { backwards-quantifier }
@@ -2173,7 +2174,7 @@
{
\@@_compile_quantifier_abort:xNN
{
- \c_lbrace_str
+ \c_left_brace_str
\int_use:N \l_@@_internal_a_int ,
\int_use:N \l_@@_internal_b_int
}
@@ -2856,7 +2857,7 @@
% to forbid nesting |\c|. Additionally, disable submatch tracking
% since groups don't escape the scope of |\c{...}|.
% \begin{macrocode}
-\cs_new_protected_nopar:cpn { @@_compile_c_ \c_lbrace_str :w }
+\cs_new_protected_nopar:cpn { @@_compile_c_ \c_left_brace_str :w }
{
\@@_compile:w
\@@_disable_submatches:
@@ -2875,7 +2876,7 @@
% dangling class or group). Then insert the corresponding test in the
% outer regex.
% \begin{macrocode}
-\cs_new_protected:cpn { @@_compile_ \c_rbrace_str : }
+\cs_new_protected:cpn { @@_compile_ \c_right_brace_str : }
{
\@@_if_in_cs:TF
{
@@ -2883,7 +2884,7 @@
\@@_compile_one:x
{ \@@_item_cs:n { \exp_not:o \l_@@_internal_regex } }
}
- { \exp_after:wN \@@_compile_raw:N \c_rbrace_str }
+ { \exp_after:wN \@@_compile_raw:N \c_right_brace_str }
}
% \end{macrocode}
% \end{macro}
@@ -2907,7 +2908,7 @@
\@@_if_in_class_or_catcode:TF
{ \@@_compile_raw_error:N u #1 #2 }
{
- \str_if_eq_x:nnTF {#1#2} { \@@_compile_special:N \c_lbrace_str }
+ \str_if_eq_x:nnTF {#1#2} { \@@_compile_special:N \c_left_brace_str }
{
\tl_set:Nx \l_@@_internal_a_tl { \if_false: } \fi:
\@@_compile_u_loop:NN
@@ -2925,7 +2926,7 @@
{
\token_if_eq_meaning:NNTF #1 \@@_compile_special:N
{
- \exp_after:wN \token_if_eq_charcode:NNTF \c_rbrace_str #2
+ \exp_after:wN \token_if_eq_charcode:NNTF \c_right_brace_str #2
{ \if_false: { \fi: } \@@_compile_u_end: }
{ #2 \@@_compile_u_loop:NN }
}
@@ -4733,7 +4734,7 @@
\tl_clear:N \l_@@_balance_tl
\@@_escape_use:nnnn
{
- \if_charcode:w \c_rbrace_str ##1
+ \if_charcode:w \c_right_brace_str ##1
\@@_replacement_rbrace:N \else: \__tl_build_one:n \fi: ##1
}
{ \@@_replacement_escaped:N ##1 }
@@ -4820,7 +4821,7 @@
% \begin{macrocode}
\cs_new_protected:Npn \@@_replacement_g:w #1#2
{
- \str_if_eq_x:nnTF { #1#2 } { \__tl_build_one:n \c_lbrace_str }
+ \str_if_eq_x:nnTF { #1#2 } { \__tl_build_one:n \c_left_brace_str }
{
\int_zero:N \l_@@_internal_a_int
\@@_replacement_g_digits:NN
@@ -4877,7 +4878,7 @@
}
{ \@@_replacement_error:NNN c #1#2 }
}
-\cs_new_protected_nopar:cpn { @@_replacement_c_ \c_lbrace_str :w }
+\cs_new_protected_nopar:cpn { @@_replacement_c_ \c_left_brace_str :w }
{
\if_case:w \l_@@_replacement_csnames_int
\__tl_build_one:n
@@ -4898,7 +4899,7 @@
% \begin{macrocode}
\cs_new_protected:Npn \@@_replacement_u:w #1#2
{
- \str_if_eq_x:nnTF { #1#2 } { \__tl_build_one:n \c_lbrace_str }
+ \str_if_eq_x:nnTF { #1#2 } { \__tl_build_one:n \c_left_brace_str }
{
\if_case:w \l_@@_replacement_csnames_int
\__tl_build_one:n { \exp_not:n { \exp_after:wN \exp_not:V \cs:w } }
diff --git a/Master/texmf-dist/source/latex/l3experimental/l3str/l3str-convert.dtx b/Master/texmf-dist/source/latex/l3experimental/l3str/l3str-convert.dtx
new file mode 100644
index 00000000000..1810468ff7d
--- /dev/null
+++ b/Master/texmf-dist/source/latex/l3experimental/l3str/l3str-convert.dtx
@@ -0,0 +1,3623 @@
+% \iffalse meta-comment
+%
+%% File: l3str-convert.dtx Copyright (C) 2013 The LaTeX3 Project
+%%
+%% It may be distributed and/or modified under the conditions of the
+%% LaTeX Project Public License (LPPL), either version 1.3c of this
+%% license or (at your option) any later version. The latest version
+%% of this license is in the file
+%%
+%% http://www.latex-project.org/lppl.txt
+%%
+%% This file is part of the "l3experimental bundle" (The Work in LPPL)
+%% and all files in that bundle must be distributed together.
+%%
+%% The released version of this bundle is available from CTAN.
+%%
+%% -----------------------------------------------------------------------
+%%
+%% The development version of the bundle can be found at
+%%
+%% http://www.latex-project.org/svnroot/experimental/trunk/
+%%
+%% for those people who are interested.
+%%
+%%%%%%%%%%%
+%% NOTE: %%
+%%%%%%%%%%%
+%%
+%% Snapshots taken from the repository represent work in progress and may
+%% not work or may contain conflicting material! We therefore ask
+%% people _not_ to put them into distributions, archives, etc. without
+%% prior consultation with the LaTeX3 Project.
+%%
+%% -----------------------------------------------------------------------
+%
+%<*driver|package>
+\RequirePackage{expl3}
+\GetIdInfo$Id: l3str-convert.dtx 4339 2013-01-08 10:22:00Z bruno $
+ {L3 Experimental string encoding conversions}
+%</driver|package>
+%<*driver>
+\documentclass[full]{l3doc}
+\usepackage{amsmath}
+\begin{document}
+ \tableofcontents
+ \DocInput{\jobname.dtx}
+\end{document}
+%</driver>
+% \fi
+%
+%
+% \title{^^A
+% The \textsf{l3str-convert} package: string encoding conversions^^A
+% \thanks{This file describes v\ExplFileVersion,
+% last revised \ExplFileDate.}^^A
+% }
+%
+% \author{^^A
+% The \LaTeX3 Project\thanks
+% {^^A
+% E-mail:
+% \href{mailto:latex-team@latex-project.org}
+% {latex-team@latex-project.org}^^A
+% }^^A
+% }
+%
+% \date{Released \ExplFileDate}
+%
+% \maketitle
+%
+% \newcommand{\hexnum}[1]{\text{\texttt{\char`\"}#1}}
+% \begin{documentation}
+%
+% \section{Encoding and escaping schemes}
+%
+% Traditionally, string encodings only specify how strings of characters
+% should be stored as bytes. However, the resulting lists of bytes are
+% often to be used in contexts where only a restricted subset of bytes
+% are permitted (\emph{e.g.}, \textsc{pdf} string objects,
+% \textsc{url}s). Hence, storing a string of characters is done in two
+% steps.
+% \begin{itemize}
+% \item The code points (\enquote{character codes}) are expressed as
+% bytes following a given \enquote{encoding}. This can be
+% \textsc{utf-16}, \textsc{iso 8859-1}, \emph{etc.} See
+% Table~\ref{tab:encodings} for a list of supported
+% encodings.\footnote{Encodings and escapings will be added as they
+% are requested.}
+% \item Bytes are translated to \TeX{} tokens through a given
+% \enquote{escaping}. Those are defined for the most part by the
+% \texttt{pdf} file format. See Table~\ref{tab:escapings} for a
+% list of escaping methods supported.\footnotemark
+% \end{itemize}
+%
+% \begin{table}\centering
+% \caption{\label{tab:encodings}Supported encodings.
+% Non-alphanumeric characters are ignored,
+% and capital letters are lower-cased
+% before searching for the encoding in this list.}
+% \begin{tabular}{cc}
+% \toprule
+% \meta{Encoding} & description \\
+% \midrule
+% \texttt{utf8} & \textsc{utf-8} \\
+% \texttt{utf16} & \textsc{utf-16}, with byte-order mark \\
+% \texttt{utf16be} & \textsc{utf-16}, big-endian \\
+% \texttt{utf16le} & \textsc{utf-16}, little-endian \\
+% \texttt{utf32} & \textsc{utf-32}, with byte-order mark \\
+% \texttt{utf32be} & \textsc{utf-32}, big-endian \\
+% \texttt{utf32le} & \textsc{utf-32}, little-endian \\
+% \midrule
+% \texttt{iso88591}, \texttt{latin1} & \textsc{iso 8859-1} \\
+% \texttt{iso88592}, \texttt{latin2} & \textsc{iso 8859-2} \\
+% \texttt{iso88593}, \texttt{latin3} & \textsc{iso 8859-3} \\
+% \texttt{iso88594}, \texttt{latin4} & \textsc{iso 8859-4} \\
+% \texttt{iso88595} & \textsc{iso 8859-5} \\
+% \texttt{iso88596} & \textsc{iso 8859-6} \\
+% \texttt{iso88597} & \textsc{iso 8859-7} \\
+% \texttt{iso88598} & \textsc{iso 8859-8} \\
+% \texttt{iso88599}, \texttt{latin5} & \textsc{iso 8859-9} \\
+% \texttt{iso885910}, \texttt{latin6} & \textsc{iso 8859-10} \\
+% \texttt{iso885911} & \textsc{iso 8859-11} \\
+% \texttt{iso885913}, \texttt{latin7} & \textsc{iso 8859-13} \\
+% \texttt{iso885914}, \texttt{latin8} & \textsc{iso 8859-14} \\
+% \texttt{iso885915}, \texttt{latin9} & \textsc{iso 8859-15} \\
+% \texttt{iso885916}, \texttt{latin10} & \textsc{iso 8859-16} \\
+% \midrule
+% Empty & Native (Unicode) string. \\
+% \bottomrule
+% \end{tabular}
+% \end{table}
+%
+% \begin{table}\centering
+% \caption{\label{tab:escapings}Supported escapings.
+% Non-alphanumeric characters are ignored,
+% and capital letters are lower-cased
+% before searching for the escaping in this list.}
+% \begin{tabular}{cc}
+% \toprule
+% \meta{Escaping} & description \\
+% \midrule
+% \texttt{bytes}, or empty
+% & arbitrary bytes \\
+% \texttt{hex}, \texttt{hexadecimal}
+% & byte $=$ two hexadecimal digits \\
+% \texttt{name}
+% & see \tn{pdfescapename} \\
+% \texttt{string}
+% & see \tn{pdfescapestring} \\
+% \texttt{url}
+% & encoding used in \textsc{url}s \\
+% \bottomrule
+% \end{tabular}
+% \end{table}
+%
+% \section{Conversion functions}
+%
+% \begin{function}{\str_set_convert:Nnnn, \str_gset_convert:Nnnn}
+% \begin{syntax}
+% \cs{str_set_convert:Nnnn} \meta{str~var} \Arg{string} \Arg{name~1} \Arg{name~2}
+% \end{syntax}
+% This function converts the \meta{string} from the encoding given by
+% \meta{name~1} to the encoding given by \meta{name~2}, and stores the
+% result in the \meta{str~var}. Each \meta{name} can have the form
+% \meta{encoding} or \meta{encoding}\texttt{/}\meta{escaping}, where
+% the possible values of \meta{encoding} and \meta{escaping} are given
+% in Tables~\ref{tab:encodings} and~\ref{tab:escapings}, respectively.
+% The default escaping is to input and output bytes directly. The
+% special case of an empty \meta{name} indicates the use of
+% \enquote{native} strings, 8-bit for pdf\TeX{}, and Unicode strings
+% for the other two engines.
+%
+% For example,
+% \begin{verbatim}
+% \str_set_convert:Nnnn \l_foo_str { Hello! } { } { utf16/hex }
+% \end{verbatim}
+% results in the variable \cs{l_foo_str} holding the string
+% \texttt{FEFF00480065006C006C006F0021}. This is obtained by
+% converting each character in the (native) string \texttt{Hello!} to
+% the \textsc{utf-16} encoding, and expressing each byte as a pair of
+% hexadecimal digits. Note the presence of a (big-endian) byte order
+% mark \hexnum{FEFF}, which can be avoided by specifying the encoding
+% \texttt{utf16be/hex}.
+%
+% An error is raised if the \meta{string} is not valid according to
+% the \meta{escaping~1} and \meta{encoding~1}, or if it cannot be
+% reencoded in the \meta{encoding~2} and \meta{escaping~2} (for
+% instance, if a character does not exist in the \meta{encoding~2}).
+% Erroneous input is replaced by the Unicode replacement character
+% \hexnum{FFFD}, and characters which cannot be reencoded are replaced
+% by either the replacement character \hexnum{FFFD} if it exists in
+% the \meta{encoding~2}, or an encoding-specific replacement
+% character, or the question mark character.
+% \end{function}
+%
+% \begin{function}[TF]{\str_set_convert:Nnnn, \str_gset_convert:Nnnn}
+% \begin{syntax}
+% \cs{str_set_convert:NnnnTF} \meta{str~var} \Arg{string} \Arg{name~1} \Arg{name~2} \Arg{true code} \Arg{false code}
+% \end{syntax}
+% As \cs{str_set_convert:Nnnn}, converts the \meta{string} from the
+% encoding given by \meta{name~1} to the encoding given by
+% \meta{name~2}, and assigns the result to \meta{str~var}. Contrarily
+% to \cs{str_set_convert:Nnnn}, the conditional variant does not raise
+% errors in case the \meta{string} is not valid according to the
+% \meta{name~1} encoding, or cannot be expressed in the \meta{name~2}
+% encoding. Instead, the \meta{false code} is performed.
+% \end{function}
+%
+% \begin{variable}{\c_max_char_int}
+% The maximum valid character code, $255$ for pdf\TeX{}, and $1114111$
+% for \XeTeX{} and \LuaTeX{}.
+% \end{variable}
+%
+% \section{Internal string functions}
+%
+% \begin{function}{\__str_gset_other:Nn}
+% \begin{syntax}
+% \cs{__str_gset_other:Nn} \meta{tl~var} \Arg{token list}
+% \end{syntax}
+% Converts the \meta{token list} to an \meta{other string}, where
+% spaces have category code \enquote{other}, and assigns the result to
+% the \meta{tl~var}, globally.
+% \end{function}
+%
+% \begin{function}{\__str_hexadecimal_use:NTF}
+% \begin{syntax}
+% \cs{__str_hexadecimal_use:NTF} \meta{token} \Arg{true code} \Arg{false code}
+% \end{syntax}
+% If the \meta{token} is a hexadecimal digit (upper case or lower
+% case), its upper-case version is left in the input stream,
+% \emph{followed} by the \meta{true code}. Otherwise, the \meta{false
+% code} is left in the input stream.
+% \begin{texnote}
+% This function fails on some inputs if the escape character is a
+% hexadecimal digit. We are thus careful to set the escape
+% character to a known (safe) value before using it.
+% \end{texnote}
+% \end{function}
+%
+% \begin{function}[EXP]{\__str_output_byte:n}
+% \begin{syntax}
+% \cs{__str_output_byte:n} \Arg{intexpr}
+% \end{syntax}
+% Expands to a character token with category other and character code
+% equal to the value of \meta{intexpr}. The value of \meta{intexpr}
+% must be in the range $[-1, 255]$, and any value outside this range
+% results in undefined behaviour. The special value $-1$ is used to
+% produce an empty result.
+% \end{function}
+%
+% \section{Possibilities, and things to do}
+%
+% Encoding/escaping-related tasks.
+% \begin{itemize}
+% \item Describe the internal format in the code comments. Refuse code
+% points in $[\hexnum{D800}, \hexnum{DFFF}]$ in the internal
+% representation?
+% \item Add documentation about each encoding and escaping method, and
+% add examples.
+% \item The \texttt{hex} unescaping should raise an error for
+% odd-token count strings.
+% \item Decide what bytes should be escaped in the \texttt{url}
+% escaping. Perhaps |!'()*-./0123456789_| are safe, and all other
+% characters should be escaped?
+% \item Automate generation of 8-bit mapping files.
+% \item Change the framework for 8-bit encodings: for decoding from
+% 8-bit to Unicode, use $256$ integer registers; for encoding, use a
+% tree-box.
+% \item More encodings (see Heiko's \pkg{stringenc}). CESU?
+% \item More escapings: shell escapes, lua escapes, etc?
+% \end{itemize}
+%
+% \end{documentation}
+%
+% \begin{implementation}
+%
+% \section{\pkg{l3str} implementation}
+%
+% \begin{macrocode}
+%<*initex|package>
+% \end{macrocode}
+%
+% \begin{macrocode}
+%<@@=str>
+% \end{macrocode}
+%
+% \begin{macrocode}
+\ProvidesExplPackage
+ {\ExplFileName}{\ExplFileDate}{\ExplFileVersion}{\ExplFileDescription}
+\RequirePackage{l3str,l3tl-analysis,l3tl-build,l3flag}
+% \end{macrocode}
+%
+% \subsection{Helpers}
+%
+% \subsubsection{A function unrelated to strings}
+%
+% \begin{macro}[EXP,aux]{\use_ii_i:nn}
+% A function used to swap its arguments.
+% \begin{macrocode}
+\cs_if_exist:NF \use_ii_i:nn
+ { \cs_new:Npn \use_ii_i:nn #1#2 { #2 #1 } }
+% \end{macrocode}
+% \end{macro}
+%
+% \subsubsection{Variables and constants}
+%
+% \begin{variable}{\c_max_char_int}
+% The maximum valid character code is $255$ for pdf\TeX{}, and
+% $1114111$ for other engines.
+% \begin{macrocode}
+\int_const:Nn \c_max_char_int
+ { \pdftex_if_engine:TF { "FF } { "10FFFF } }
+% \end{macrocode}
+% \end{variable}
+%
+% \begin{macro}{\@@_tmp:w}
+% \begin{variable}{\l_@@_internal_int}
+% \begin{variable}{\l_@@_internal_tl}
+% Internal scratch space for some functions.
+% \begin{macrocode}
+\cs_new_protected_nopar:Npn \@@_tmp:w { }
+\tl_new:N \l_@@_internal_tl
+\int_new:N \l_@@_internal_int
+% \end{macrocode}
+% \end{variable}
+% \end{variable}
+% \end{macro}
+%
+% \begin{variable}{\g_@@_result_tl}
+% The \cs{g_@@_result_tl} variable is used to hold the result of
+% various internal string operations (mostly conversions) which are
+% typically performed in a group. The variable is global so that it
+% remains defined outside the group, to be assigned to a user-provided
+% variable.
+% \begin{macrocode}
+\tl_new:N \g_@@_result_tl
+% \end{macrocode}
+% \end{variable}
+%
+% \begin{variable}{\c_@@_replacement_char_int}
+% When converting, invalid bytes are replaced by the Unicode
+% replacement character \hexnum{FFFD}.
+% \begin{macrocode}
+\int_const:Nn \c_@@_replacement_char_int { "FFFD }
+% \end{macrocode}
+% \end{variable}
+%
+% \begin{variable}
+% {
+% \c_forty_eight, \c_fifty_eight, \c_sixty_five, \c_ninety_one,
+% \c_ninety_seven, \c_one_hundred_twenty_three,
+% \c_one_hundred_twenty_seven
+% }
+% We declare here some integer values which delimit ranges of \textsc{ascii}
+% characters of various types. This is mostly used in \pkg{l3regex}.
+% \begin{macrocode}
+\int_const:Nn \c_forty_eight { 48 }
+\int_const:Nn \c_fifty_eight { 58 }
+\int_const:Nn \c_sixty_five { 65 }
+\int_const:Nn \c_ninety_one { 91 }
+\int_const:Nn \c_ninety_seven { 97 }
+\int_const:Nn \c_one_hundred_twenty_three { 123 }
+\int_const:Nn \c_one_hundred_twenty_seven { 127 }
+% \end{macrocode}
+% \end{variable}
+%
+% \begin{variable}{\g_@@_alias_prop}
+% To avoid needing one file per encoding/escaping alias, we keep track
+% of those in a property list.
+% \begin{macrocode}
+\prop_new:N \g_@@_alias_prop
+\prop_gput:Nnn \g_@@_alias_prop { latin1 } { iso88591 }
+\prop_gput:Nnn \g_@@_alias_prop { latin2 } { iso88592 }
+\prop_gput:Nnn \g_@@_alias_prop { latin3 } { iso88593 }
+\prop_gput:Nnn \g_@@_alias_prop { latin4 } { iso88594 }
+\prop_gput:Nnn \g_@@_alias_prop { latin5 } { iso88599 }
+\prop_gput:Nnn \g_@@_alias_prop { latin6 } { iso885910 }
+\prop_gput:Nnn \g_@@_alias_prop { latin7 } { iso885913 }
+\prop_gput:Nnn \g_@@_alias_prop { latin8 } { iso885914 }
+\prop_gput:Nnn \g_@@_alias_prop { latin9 } { iso885915 }
+\prop_gput:Nnn \g_@@_alias_prop { latin10 } { iso885916 }
+\prop_gput:Nnn \g_@@_alias_prop { utf16le } { utf16 }
+\prop_gput:Nnn \g_@@_alias_prop { utf16be } { utf16 }
+\prop_gput:Nnn \g_@@_alias_prop { utf32le } { utf32 }
+\prop_gput:Nnn \g_@@_alias_prop { utf32be } { utf32 }
+\prop_gput:Nnn \g_@@_alias_prop { hexadecimal } { hex }
+% \end{macrocode}
+% \end{variable}
+%
+% \begin{variable}{\g_@@_error_bool}
+% In conversion functions with a built-in conditional, errors are not
+% reported directly to the user, but the information is collected in
+% this boolean, used at the end to decide on which branch of the
+% conditional to take.
+% \begin{macrocode}
+\bool_new:N \g_@@_error_bool
+% \end{macrocode}
+% \end{variable}
+%
+% \begin{variable}{str_byte, str_error}
+% Conversions from one \meta{encoding}/\meta{escaping} pair to another
+% are done within \texttt{x}-expanding assignments. Errors are
+% signalled by raising the relevant flag.
+% \begin{macrocode}
+\flag_new:n { str_byte }
+\flag_new:n { str_error }
+% \end{macrocode}
+% \end{variable}
+%
+% \subsubsection{Escaping spaces}
+%
+% \begin{macro}[int]{\@@_gset_other:Nn}
+% \begin{macro}[aux,EXP]{\@@_gset_other_loop:w}
+% \begin{macro}[aux,EXP]{\@@_gset_other_end:w}
+% This function could be done by using \cs{@@_to_other:n} within
+% an \texttt{x}-expansion, but that would take a time quadratic in the
+% size of the string. Instead, we can \enquote{leave the result behind
+% us} in the input stream, to be captured into the expanding
+% assignment. This gives us a linear time.
+% \begin{macrocode}
+\group_begin:
+\char_set_lccode:nn { `\* } { `\ }
+\char_set_lccode:nn { `\A } { `\A }
+\tl_to_lowercase:n
+ {
+ \group_end:
+ \cs_new_protected:Npn \@@_gset_other:Nn #1#2
+ {
+ \tl_gset:Nx #1
+ {
+ \exp_after:wN \@@_gset_other_loop:w \tl_to_str:n {#2} ~ %
+ A ~ A ~ A ~ A ~ A ~ A ~ A ~ A ~ A ~ \q_stop
+ }
+ }
+ \cs_new:Npn \@@_gset_other_loop:w
+ #1 ~ #2 ~ #3 ~ #4 ~ #5 ~ #6 ~ #7 ~ #8 ~ #9 ~
+ {
+ \if_meaning:w A #9
+ \@@_gset_other_end:w
+ \fi:
+ #1 * #2 * #3 * #4 * #5 * #6 * #7 * #8 * #9
+ \@@_gset_other_loop:w *
+ }
+ \cs_new:Npn \@@_gset_other_end:w \fi: #1 * A #2 \q_stop
+ { \fi: #1 }
+ }
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+%
+% \subsection{String conditionals}
+%
+% \begin{macro}[EXP]{\@@_if_contains_char:NNT, \@@_if_contains_char:NNTF}
+% \begin{macro}[EXP]{\@@_if_contains_char:nNTF}
+% \begin{macro}[EXP,aux]{\@@_if_contains_char_aux:NN}
+% \begin{macro}[EXP,aux]{\@@_if_contains_char_true:}
+% \begin{syntax}
+% \cs{@@_if_contains_char:nNTF} \Arg{token list} \meta{char}
+% \end{syntax}
+% Expects the \meta{token list} to be an \meta{other string}: the
+% caller is responsible for ensuring that no (too-)special catcodes
+% remain. Spaces with catcode $10$ are ignored.
+% Loop over the characters of the string, comparing character codes.
+% The loop is broken if character codes match. Otherwise we return
+% \enquote{false}.
+% \begin{macrocode}
+\prg_new_conditional:Npnn \@@_if_contains_char:NN #1#2 { T , TF }
+ {
+ \exp_after:wN \@@_if_contains_char_aux:NN \exp_after:wN #2
+ #1 { \__prg_break:n { ? \fi: } }
+ \__prg_break_point:
+ \prg_return_false:
+ }
+\prg_new_conditional:Npnn \@@_if_contains_char:nN #1#2 { TF }
+ {
+ \@@_if_contains_char_aux:NN #2 #1 { \__prg_break:n { ? \fi: } }
+ \__prg_break_point:
+ \prg_return_false:
+ }
+\cs_new:Npn \@@_if_contains_char_aux:NN #1#2
+ {
+ \if_charcode:w #1 #2
+ \exp_after:wN \@@_if_contains_char_true:
+ \fi:
+ \@@_if_contains_char_aux:NN #1
+ }
+\cs_new_nopar:Npn \@@_if_contains_char_true:
+ { \__prg_break:n { \prg_return_true: \use_none:n } }
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+%
+% \begin{macro}[aux, rEXP]{\@@_octal_use:NTF}
+% \begin{syntax}
+% \cs{@@_octal_use:NTF} \meta{token} \Arg{true code} \Arg{false code}
+% \end{syntax}
+% If the \meta{token} is an octal digit, it is left in the input
+% stream, \emph{followed} by the \meta{true code}. Otherwise, the
+% \meta{false code} is left in the input stream.
+% \begin{texnote}
+% This function will fail if the escape character is an octal
+% digit. We are thus careful to set the escape character to a known
+% value before using it.
+% \end{texnote}
+% \TeX{} dutifully detects octal digits for us: if |#1| is an octal
+% digit, then the right-hand side of the comparison is |'1#1|, greater
+% than $1$. Otherwise, the right-hand side stops as |'1|, and the
+% conditional takes the \texttt{false} branch.
+% \begin{macrocode}
+\prg_new_conditional:Npnn \@@_octal_use:N #1 { TF }
+ {
+ \if_int_compare:w \c_one < '1 \token_to_str:N #1 \exp_stop_f:
+ #1 \prg_return_true:
+ \else:
+ \prg_return_false:
+ \fi:
+ }
+% \end{macrocode}
+% \end{macro}
+%
+% \begin{macro}[aux, rEXP]{\@@_hexadecimal_use:NTF}
+% \TeX{} detects uppercase hexadecimal digits for us (see
+% \cs{@@_octal_use:NTF}), but not the lowercase letters, which we
+% need to detect and replace by their uppercase counterpart.
+% \begin{macrocode}
+\prg_new_conditional:Npnn \@@_hexadecimal_use:N #1 { TF }
+ {
+ \if_int_compare:w \c_two < "1 \token_to_str:N #1 \exp_stop_f:
+ #1 \prg_return_true:
+ \else:
+ \if_case:w \__int_eval:w
+ \exp_after:wN ` \token_to_str:N #1 - `a
+ \__int_eval_end:
+ A
+ \or: B
+ \or: C
+ \or: D
+ \or: E
+ \or: F
+ \else:
+ \prg_return_false:
+ \exp_after:wN \use_none:n
+ \fi:
+ \prg_return_true:
+ \fi:
+ }
+% \end{macrocode}
+% \end{macro}
+%
+% \subsection{Conversions}
+%
+% \subsubsection{Producing one byte or character}
+%
+% \begin{variable}{\c_@@_byte_0_tl, \c_@@_byte_1_tl, \c_@@_byte_255_tl}
+% \begin{variable}{\c_@@_byte_-1_tl}
+% For each integer $N$ in the range $[0,255]$, we create a constant
+% token list which holds three character tokens with category code
+% other: the character with character code $N$, followed by the
+% representation of $N$ as two hexadecimal digits. The value $-1$ is
+% given a default token list which ensures that later functions give
+% an empty result for the input $-1$.
+% \begin{macrocode}
+\group_begin:
+ \char_set_catcode_other:n { \c_zero }
+ \tl_set:Nx \l_@@_internal_tl { \tl_to_str:n { 0123456789ABCDEF } }
+ \exp_args:No \tl_map_inline:nn { \l_@@_internal_tl " }
+ { \char_set_lccode:nn {`#1} { \c_zero } }
+ \tl_map_inline:Nn \l_@@_internal_tl
+ {
+ \tl_map_inline:Nn \l_@@_internal_tl
+ {
+ \char_set_lccode:nn { \c_zero } {"#1##1}
+ \tl_to_lowercase:n
+ {
+ \tl_const:cx
+ { c_@@_byte_ \int_eval:n {"#1##1} _tl }
+ { ^^@ #1 ##1 }
+ }
+ }
+ }
+\group_end:
+\tl_const:cn { c_@@_byte_-1_tl } { { } \use_none:n { } }
+% \end{macrocode}
+% \end{variable}
+% \end{variable}
+%
+% \begin{macro}[int, EXP]{\@@_output_byte:n}
+% \begin{macro}[int, EXP]{\@@_output_byte:w}
+% \begin{macro}[int, EXP]{\@@_output_hexadecimal:n}
+% \begin{macro}[int, EXP]{\@@_output_hexadecimal:w}
+% \begin{macro}[int, EXP]{\@@_output_end:}
+% Those functions must be used carefully: feeding them a value outside
+% the range $[-1,255]$ will attempt to use the undefined token list
+% variable \cs{c_@@_byte_\meta{number}_tl}. Assuming that the
+% argument is in the right range, we expand the corresponding token
+% list, and pick either the byte (first token) or the hexadecimal
+% representations (second and third tokens). The value $-1$ produces
+% an empty result in both cases.
+% \begin{macrocode}
+\cs_new:Npn \@@_output_byte:n #1
+ { \@@_output_byte:w #1 \@@_output_end: }
+\cs_new_nopar:Npn \@@_output_byte:w
+ {
+ \exp_after:wN \exp_after:wN
+ \exp_after:wN \use_i:nnn
+ \cs:w c_@@_byte_ \int_use:N \__int_eval:w
+ }
+\cs_new:Npn \@@_output_hexadecimal:n #1
+ { \@@_output_hexadecimal:w #1 \@@_output_end: }
+\cs_new_nopar:Npn \@@_output_hexadecimal:w
+ {
+ \exp_after:wN \exp_after:wN
+ \exp_after:wN \use_none:n
+ \cs:w c_@@_byte_ \int_use:N \__int_eval:w
+ }
+\cs_new_nopar:Npn \@@_output_end:
+ { \__int_eval_end: _tl \cs_end: }
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+%
+% \begin{macro}[int, rEXP]{\@@_output_byte_pair_be:n}
+% \begin{macro}[int, rEXP]{\@@_output_byte_pair_le:n}
+% \begin{macro}[aux, rEXP]{\@@_output_byte_pair:nnN}
+% Convert a number in the range $[0,65535]$ to a pair of bytes, either
+% big-endian or little-endian.
+% \begin{macrocode}
+\cs_new:Npn \@@_output_byte_pair_be:n #1
+ {
+ \exp_args:Nf \@@_output_byte_pair:nnN
+ { \int_div_truncate:nn { #1 } { "100 } } {#1} \use:nn
+ }
+\cs_new:Npn \@@_output_byte_pair_le:n #1
+ {
+ \exp_args:Nf \@@_output_byte_pair:nnN
+ { \int_div_truncate:nn { #1 } { "100 } } {#1} \use_ii_i:nn
+ }
+\cs_new:Npn \@@_output_byte_pair:nnN #1#2#3
+ {
+ #3
+ { \@@_output_byte:n { #1 } }
+ { \@@_output_byte:n { #2 - #1 * "100 } }
+ }
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+%
+% \subsubsection{Mapping functions for conversions}
+%
+% \begin{macro}{\@@_convert_gmap:N}
+% \begin{macro}[aux, rEXP]{\@@_convert_gmap_loop:NN}
+% This maps the function |#1| over all characters in
+% \cs{g_@@_result_tl}, which should be a byte string in most cases,
+% sometimes a native string.
+% \begin{macrocode}
+\cs_new_protected:Npn \@@_convert_gmap:N #1
+ {
+ \tl_gset:Nx \g_@@_result_tl
+ {
+ \exp_after:wN \@@_convert_gmap_loop:NN
+ \exp_after:wN #1
+ \g_@@_result_tl { ? \__prg_break: }
+ \__prg_break_point:
+ }
+ }
+\cs_new:Npn \@@_convert_gmap_loop:NN #1#2
+ {
+ \use_none:n #2
+ #1#2
+ \@@_convert_gmap_loop:NN #1
+ }
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+%
+% \begin{macro}{\@@_convert_gmap_internal:N}
+% \begin{macro}[aux, rEXP]{\@@_convert_gmap_internal_loop:Nw}
+% This maps the function |#1| over all character codes in
+% \cs{g_@@_result_tl}, which must be in the internal representation.
+% \begin{macrocode}
+\cs_new_protected:Npn \@@_convert_gmap_internal:N #1
+ {
+ \tl_gset:Nx \g_@@_result_tl
+ {
+ \exp_after:wN \@@_convert_gmap_internal_loop:Nww
+ \exp_after:wN #1
+ \g_@@_result_tl \s__tl \q_stop \__prg_break: \s__tl
+ \__prg_break_point:
+ }
+ }
+\cs_new:Npn \@@_convert_gmap_internal_loop:Nww #1 #2 \s__tl #3 \s__tl
+ {
+ \use_none_delimit_by_q_stop:w #3 \q_stop
+ #1 {#3}
+ \@@_convert_gmap_internal_loop:Nww #1
+ }
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+%
+% \subsubsection{Error-reporting during conversion}
+%
+% \begin{macro}[int]{\@@_if_flag_error:nnx}
+% \begin{macro}[aux]{\@@_if_flag_no_error:nnx}
+% When converting using the function \cs{str_set_convert:Nnnn}, errors
+% should be reported to the user after each step in the
+% conversion. Errors are signalled by raising some flag (typically
+% \texttt{@@_error}), so here we test that flag: if it is raised,
+% give the user an error, otherwise remove the arguments. On the other
+% hand, in the conditional functions \cs{str_set_convert:NnnnTF},
+% errors should be suppressed. This is done by changing
+% \cs{@@_if_flag_error:nnx} into \cs{@@_if_flag_no_error:nnx}
+% locally.
+% \begin{macrocode}
+\cs_new_protected:Npn \@@_if_flag_error:nnx #1
+ {
+ \flag_if_raised:nTF {#1}
+ { \__msg_kernel_error:nnx { str } }
+ { \use_none:nn }
+ }
+\cs_new_protected:Npn \@@_if_flag_no_error:nnx #1#2#3
+ { \flag_if_raised:nT {#1} { \bool_gset_true:N \g_@@_error_bool } }
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+%
+% \begin{macro}[int]{\@@_if_flag_times:nT}
+% At the end of each conversion step, we raise all relevant errors as
+% one error message, built on the fly. The height of each flag
+% indicates how many times a given error was encountered. This
+% function prints |#2| followed by the number of occurrences of an
+% error if it occurred, nothing otherwise.
+% \begin{macrocode}
+\cs_new_protected:Npn \@@_if_flag_times:nT #1#2
+ { \flag_if_raised:nT {#1} { #2~(x \flag_height:n {#1} ) } }
+% \end{macrocode}
+% \end{macro}
+%
+% \subsubsection{Framework for conversions}
+%
+% Most functions in this module expect to be working with
+% \enquote{native} strings. Strings can also be stored as bytes, in one
+% of many encodings, for instance \textsc{utf8}. The bytes themselves
+% can be expressed in various ways in terms of \TeX{} tokens, for
+% instance as pairs of hexadecimal digits. The questions of going from
+% arbitrary Unicode code points to bytes, and from bytes to tokens are
+% mostly independent.
+%
+% Conversions are done in four steps:
+% \begin{itemize}
+% \item \enquote{unescape} produces a string of bytes;
+% \item \enquote{decode} takes in a string of bytes, and converts it
+% to a list of Unicode characters in an internal representation,
+% with items of the form
+% \begin{quote}
+% \meta{bytes} \cs{s__tl} \meta{Unicode code point} \cs{s__tl}
+% \end{quote}
+% where we have collected the \meta{bytes} which combined to form
+% this particular Unicode character, and the \meta{Unicode code
+% point} is in the range $[0,\hexnum{10FFFF}]$.
+% \item \enquote{encode} encodes the internal list of code points as a
+% byte string in the new encoding;
+% \item \enquote{escape} escapes bytes as requested.
+% \end{itemize}
+% The process is modified in case one of the encoding is empty (or the
+% conversion function has been set equal to the empty encoding because
+% it was not found): then the unescape or escape step is ignored, and
+% the decode or encode steps work on tokens instead of bytes. Otherwise,
+% each step must ensure that it passes a correct byte string or internal
+% string to the next step.
+%
+% \begin{macro}{\str_set_convert:Nnnn, \str_gset_convert:Nnnn}
+% \begin{macro}[TF]{\str_set_convert:Nnnn, \str_gset_convert:Nnnn}
+% \begin{macro}[aux]{\@@_convert:nNNnnn}
+% The input string is stored in \cs{g_@@_result_tl}, then we:
+% unescape and decode; encode and escape; exit the group and store the
+% result in the user's variable. The various conversion functions all
+% act on \cs{g_@@_result_tl}. Errors are silenced for the conditional
+% functions by redefining \cs{@@_if_flag_error:nnx} locally.
+% \begin{macrocode}
+\cs_new_protected_nopar:Npn \str_set_convert:Nnnn
+ { \@@_convert:nNNnnn { } \tl_set_eq:NN }
+\cs_new_protected_nopar:Npn \str_gset_convert:Nnnn
+ { \@@_convert:nNNnnn { } \tl_gset_eq:NN }
+\prg_new_protected_conditional:Npnn
+ \str_set_convert:Nnnn #1#2#3#4 { T , F , TF }
+ {
+ \bool_gset_false:N \g_@@_error_bool
+ \@@_convert:nNNnnn
+ { \cs_set_eq:NN \@@_if_flag_error:nnx \@@_if_flag_no_error:nnx }
+ \tl_set_eq:NN #1 {#2} {#3} {#4}
+ \bool_if:NTF \g_@@_error_bool \prg_return_false: \prg_return_true:
+ }
+\prg_new_protected_conditional:Npnn
+ \str_gset_convert:Nnnn #1#2#3#4 { T , F , TF }
+ {
+ \bool_gset_false:N \g_@@_error_bool
+ \@@_convert:nNNnnn
+ { \cs_set_eq:NN \@@_if_flag_error:nnx \@@_if_flag_no_error:nnx }
+ \tl_gset_eq:NN #1 {#2} {#3} {#4}
+ \bool_if:NTF \g_@@_error_bool \prg_return_false: \prg_return_true:
+ }
+\cs_new_protected:Npn \@@_convert:nNNnnn #1#2#3#4#5#6
+ {
+ \group_begin:
+ #1
+ \@@_gset_other:Nn \g_@@_result_tl {#4}
+ \exp_after:wN \@@_convert:wwwnn
+ \tl_to_str:n {#5} /// \q_stop
+ { decode } { unescape }
+ \prg_do_nothing:
+ \@@_convert_decode_:
+ \exp_after:wN \@@_convert:wwwnn
+ \tl_to_str:n {#6} /// \q_stop
+ { encode } { escape }
+ \use_ii_i:nn
+ \@@_convert_encode_:
+ \group_end:
+ #2 #3 \g_@@_result_tl
+ }
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+%
+% \begin{macro}[aux]{\@@_convert:wwwnn}
+% \begin{macro}[aux]{\@@_convert:NNnNN}
+% The task of \cs{@@_convert:wwwnn} is to split
+% \meta{encoding}/\meta{escaping} pairs into their components, |#1|
+% and |#2|. Calls to \cs{@@_convert:nnn} ensure that the
+% corresponding conversion functions are defined. The third auxiliary
+% does the main work.
+% \begin{itemize}
+% \item |#1| is the encoding conversion function;
+% \item |#2| is the escaping function;
+% \item |#3| is the escaping name for use in an error message;
+% \item |#4| is \cs{prg_do_nothing:} for unescaping/decoding, and
+% \cs{use_ii_i:nn} for encoding/escaping;
+% \item |#5| is the default encoding function (either
+% \enquote{decode} or \enquote{encode}), for which there should be
+% no escaping.
+% \end{itemize}
+% Let us ignore the native encoding for a second. In the
+% unescaping/decoding phase, we want to do |#2#1| in this order, and
+% in the encoding/escaping phase, the order should be reversed:
+% |#4#2#1| does exactly that. If one of the encodings is the default
+% (native), then the escaping should be ignored, with an error if any
+% was given, and only the encoding, |#1|, should be performed.
+% \begin{macrocode}
+\cs_new_protected:Npn \@@_convert:wwwnn
+ #1 / #2 // #3 \q_stop #4#5
+ {
+ \@@_convert:nnn {enc} {#4} {#1}
+ \@@_convert:nnn {esc} {#5} {#2}
+ \exp_args:Ncc \@@_convert:NNnNN
+ { @@_convert_#4_#1: } { @@_convert_#5_#2: } {#2}
+ }
+\cs_new_protected:Npn \@@_convert:NNnNN #1#2#3#4#5
+ {
+ \if_meaning:w #1 #5
+ \tl_if_empty:nF {#3}
+ { \__msg_kernel_error:nnx { str } { native-escaping } {#3} }
+ #1
+ \else:
+ #4 #2 #1
+ \fi:
+ }
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+%
+% \begin{macro}[aux]{\@@_convert:nnn}
+% \begin{macro}[aux]{\@@_convert:nnnn}
+% The arguments of \cs{@@_convert:nnn} are: \texttt{enc} or
+% \texttt{esc}, used to build filenames, the type of the conversion
+% (unescape, decode, encode, escape), and the encoding or escaping
+% name. If the function is already defined, no need to do anything.
+% Otherwise, filter out all non-alphanumerics in the name, and
+% lowercase it. Feed that, and the same three arguments, to
+% \cs{@@_convert:nnnn}. The task is then to make sure that the
+% conversion function |#3_#1| corresponding to the type |#3| and
+% filtered name |#1| is defined, then set our initial conversion
+% function |#3_#4| equal to that.
+%
+% How do we get the |#3_#1| conversion to be defined if it isn't?
+% Two main cases.
+%
+% First, if |#1| is a key in \cs{g_@@_alias_prop}, then the value
+% \cs{l_@@_internal_tl} tells us what file to load. Loading is
+% skipped if the file was already read, \emph{i.e.}, if the conversion
+% command based on \cs{l_@@_internal_tl} already exists. Otherwise,
+% try to load the file; if that fails, there is an error, use the
+% default empty name instead.
+%
+% Second, |#1| may be absent from the property list. The
+% \cs{cs_if_exist:cF} test is automatically false, and we search for a
+% file defining the encoding or escaping |#1| (this should allow
+% third-party \texttt{.def} files). If the file is not found, there is
+% an error, use the default empty name instead.
+%
+% In all cases, the conversion based on \cs{l_@@_internal_tl} is
+% defined, so we can set the |#3_#1| function equal to that. In some
+% cases (\emph{e.g.}, \texttt{utf16be}), the |#3_#1| function is
+% actually defined within the file we just loaded, and it is different
+% from the \cs{l_@@_internal_tl}-based function: we mustn't clobber
+% that different definition.
+% \begin{macrocode}
+\cs_new_protected:Npn \@@_convert:nnn #1#2#3
+ {
+ \cs_if_exist:cF { @@_convert_#2_#3: }
+ {
+ \exp_args:Nx \@@_convert:nnnn
+ { \@@_convert_lowercase_alphanum:n {#3} }
+ {#1} {#2} {#3}
+ }
+ }
+\cs_new_protected:Npn \@@_convert:nnnn #1#2#3#4
+ {
+ \cs_if_exist:cF { @@_convert_#3_#1: }
+ {
+ \prop_get:NnNF \g_@@_alias_prop {#1} \l_@@_internal_tl
+ { \tl_set:Nn \l_@@_internal_tl {#1} }
+ \cs_if_exist:cF { @@_convert_#3_ \l_@@_internal_tl : }
+ {
+ \file_if_exist:nTF { l3str-#2- \l_@@_internal_tl .def }
+ {
+ \group_begin:
+ \@@_load_catcodes:
+ \file_input:n { l3str-#2- \l_@@_internal_tl .def }
+ \group_end:
+ }
+ {
+ \tl_clear:N \l_@@_internal_tl
+ \__msg_kernel_error:nnxx { str } { unknown-#2 } {#4} {#1}
+ }
+ }
+ \cs_if_exist:cF { @@_convert_#3_#1: }
+ {
+ \cs_gset_eq:cc { @@_convert_#3_#1: }
+ { @@_convert_#3_ \l_@@_internal_tl : }
+ }
+ }
+ \cs_gset_eq:cc { @@_convert_#3_#4: } { @@_convert_#3_#1: }
+ }
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+%
+% \begin{macro}[int, rEXP]{\@@_convert_lowercase_alphanum:n}
+% \begin{macro}[aux, rEXP]{\@@_convert_lowercase_alphanum_loop:N}
+% This function keeps only letters and digits, with upper case letters
+% converted to lower case.
+% \begin{macrocode}
+\cs_new:Npn \@@_convert_lowercase_alphanum:n #1
+ {
+ \exp_after:wN \@@_convert_lowercase_alphanum_loop:N
+ \tl_to_str:n {#1} { ? \__prg_break: }
+ \__prg_break_point:
+ }
+\cs_new:Npn \@@_convert_lowercase_alphanum_loop:N #1
+ {
+ \use_none:n #1
+ \if_int_compare:w `#1 < \c_ninety_one
+ \if_int_compare:w `#1 < \c_sixty_five
+ \if_int_compare:w \c_one < 1#1 \exp_stop_f:
+ #1
+ \fi:
+ \else:
+ \@@_output_byte:n { `#1 + \c_thirty_two }
+ \fi:
+ \else:
+ \if_int_compare:w `#1 < \c_one_hundred_twenty_three
+ \if_int_compare:w `#1 < \c_ninety_seven
+ \else:
+ #1
+ \fi:
+ \fi:
+ \fi:
+ \@@_convert_lowercase_alphanum_loop:N
+ }
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+%
+% \begin{macro}[int]{\@@_load_catcodes:}
+% Since encoding files may be loaded at arbitrary places in a \TeX{}
+% document, including within verbatim mode, we set the catcodes of all
+% characters appearing in any encoding definition file.
+% \begin{macrocode}
+\cs_new_protected:Npn \@@_load_catcodes:
+ {
+ \char_set_catcode_escape:N \\
+ \char_set_catcode_group_begin:N \{
+ \char_set_catcode_group_end:N \}
+ \char_set_catcode_math_toggle:N \$
+ \char_set_catcode_alignment:N \&
+ \char_set_catcode_parameter:N \#
+ \char_set_catcode_math_superscript:N \^
+ \char_set_catcode_ignore:N \ %
+ \char_set_catcode_space:N \~
+ \tl_map_function:nN { abcdefghijklmnopqrstuvwxyz_:ABCDEFILNPSTUX }
+ \char_set_catcode_letter:N
+ \tl_map_function:nN { 0123456789"'?*+-.(),`!/<>[];= }
+ \char_set_catcode_other:N
+ \char_set_catcode_comment:N \%
+ \int_set:Nn \tex_endlinechar:D {32}
+ }
+% \end{macrocode}
+% \end{macro}
+%
+% \subsubsection{Byte unescape and escape}
+%
+% Strings of bytes may need to be stored in auxiliary files in safe
+% \enquote{escaping} formats. Each such escaping is only loaded as
+% needed. By default, on input any non-byte is filtered out, while the
+% output simply consists in letting bytes through.
+%
+% \begin{macro}[int, rEXP]{\@@_filter_bytes:n}
+% \begin{macro}[aux, rEXP]{\@@_filter_bytes_aux:N}
+% In the case of pdf\TeX{}, every character is a byte. For
+% Unicode-aware engines, test the character code; non-bytes cause us
+% to raise the flag \texttt{str_byte}. Spaces have already been given
+% the correct category code when this function is called.
+% \begin{macrocode}
+\pdftex_if_engine:TF
+ { \cs_new_eq:NN \@@_filter_bytes:n \use:n }
+ {
+ \cs_new:Npn \@@_filter_bytes:n #1
+ {
+ \@@_filter_bytes_aux:N #1
+ { ? \__prg_break: }
+ \__prg_break_point:
+ }
+ \cs_new:Npn \@@_filter_bytes_aux:N #1
+ {
+ \use_none:n #1
+ \if_int_compare:w `#1 < 256 \exp_stop_f:
+ #1
+ \else:
+ \flag_raise:n { str_byte }
+ \fi:
+ \@@_filter_bytes_aux:N
+ }
+ }
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+%
+% \begin{macro}[int]{\@@_convert_unescape_:}
+% \begin{macro}[int]{\@@_convert_unescape_bytes:}
+% The simplest unescaping method removes non-bytes from
+% \cs{g_@@_result_tl}.
+% \begin{macrocode}
+\pdftex_if_engine:TF
+ { \cs_new_protected_nopar:Npn \@@_convert_unescape_: { } }
+ {
+ \cs_new_protected_nopar:Npn \@@_convert_unescape_:
+ {
+ \flag_clear:n { str_byte }
+ \tl_gset:Nx \g_@@_result_tl
+ { \exp_args:No \@@_filter_bytes:n \g_@@_result_tl }
+ \@@_if_flag_error:nnx { str_byte } { non-byte } { bytes }
+ }
+ }
+\cs_new_eq:NN \@@_convert_unescape_bytes: \@@_convert_unescape_:
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+%
+% \begin{macro}[int]{\@@_convert_escape_:}
+% \begin{macro}[int]{\@@_convert_escape_bytes:}
+% The simplest form of escape leaves the bytes from the previous step
+% of the conversion unchanged.
+% \begin{macrocode}
+\cs_new_protected_nopar:Npn \@@_convert_escape_: { }
+\cs_new_eq:NN \@@_convert_escape_bytes: \@@_convert_escape_:
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+%
+% \subsubsection{Native strings}
+%
+% \begin{macro}[int]{\@@_convert_decode_:}
+% \begin{macro}[aux, rEXP]{\@@_decode_native_char:N}
+% Convert each character to its character code, one at a time.
+% \begin{macrocode}
+\cs_new_protected_nopar:Npn \@@_convert_decode_:
+ { \@@_convert_gmap:N \@@_decode_native_char:N }
+\cs_new:Npn \@@_decode_native_char:N #1
+ { #1 \s__tl \__int_value:w `#1 \s__tl }
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+%
+% \begin{macro}[int]{\@@_convert_encode_:}
+% The conversion from an internal string to native character tokens is
+% very different in pdf\TeX{} and in other engines. For Unicode-aware
+% engines, we need the definitions to be read when the null byte has
+% category code $12$, so we set that inside a group.
+% \begin{macrocode}
+\group_begin:
+ \char_set_catcode_other:n { 0 }
+ \pdftex_if_engine:TF
+% \end{macrocode}
+% \begin{macro}[aux, EXP]{\@@_encode_native_char:n}
+% Since pdf\TeX{} only supports 8-bit characters, and we have a table
+% of all bytes, the conversion can be done in linear time within an
+% \texttt{x}-expanding assignment. Look out for character codes larger
+% than $255$, those characters are replaced by |?|, and raise a flag,
+% which then triggers a pdf\TeX{}-specific error.
+% \begin{macrocode}
+ {
+ \cs_new_protected_nopar:Npn \@@_convert_encode_:
+ {
+ \flag_clear:n { str_error }
+ \@@_convert_gmap_internal:N \@@_encode_native_char:n
+ \@@_if_flag_error:nnx { str_error }
+ { pdfTeX-native-overflow } { }
+ }
+ \cs_new:Npn \@@_encode_native_char:n #1
+ {
+ \if_int_compare:w #1 < \c_two_hundred_fifty_six
+ \@@_output_byte:n {#1}
+ \else:
+ \flag_raise:n { str_error }
+ ?
+ \fi:
+ }
+ \__msg_kernel_new:nnnn { str } { pdfTeX-native-overflow }
+ { Character~code~too~large~for~pdfTeX. }
+ {
+ The~pdfTeX~engine~only~supports~8-bit~characters:~
+ valid~character~codes~are~in~the~range~[0,255].~
+ To~manipulate~arbitrary~Unicode,~use~LuaTeX~or~XeTeX.
+ }
+ }
+% \end{macrocode}
+% \end{macro}
+% \begin{macro}[aux]{\@@_encode_native_loop:w}
+% \begin{macro}[aux]{\@@_encode_native_flush:}
+% \begin{macro}[aux, rEXP]{\@@_encode_native_filter:N}
+% In Unicode-aware engines, since building particular characters
+% cannot be done expandably in \TeX{}, we cannot hope to get a
+% linear-time function. However, we get quite close using the
+% \pkg{l3tl-build} module, which abuses \tn{toks} to reach an almost
+% linear time. Use the standard lowercase trick to produce an
+% arbitrary character from the null character, and add that character
+% to the end of the token list being built. At the end of the loop,
+% put the token list together with \cs{__tl_build_end:}. Note that we
+% use an \texttt{x}-expanding assignment because it is slightly
+% faster. Unicode-aware engines will never incur an overflow because
+% the internal string is guaranteed to only contain code points in
+% $[0,\hexnum{10FFFF}]$.
+% \begin{macrocode}
+ {
+ \cs_new_protected_nopar:Npn \@@_convert_encode_:
+ {
+ \int_zero:N \l__tl_build_offset_int
+ \__tl_gbuild_x:Nw \g_@@_result_tl
+ \exp_after:wN \@@_encode_native_loop:w
+ \g_@@_result_tl \s__tl { \q_stop \__prg_break: } \s__tl
+ \__prg_break_point:
+ \__tl_build_end:
+ }
+ \cs_new_protected:Npn \@@_encode_native_loop:w #1 \s__tl #2 \s__tl
+ {
+ \use_none_delimit_by_q_stop:w #2 \q_stop
+ \tex_lccode:D \l_@@_internal_int \__int_eval:w #2 \__int_eval_end:
+ \tl_to_lowercase:n { \__tl_build_one:n { ^^@ } }
+ \@@_encode_native_loop:w
+ }
+ }
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+% End the group to restore the catcode of the null byte.
+% \begin{macrocode}
+\group_end:
+% \end{macrocode}
+% \end{macro}
+%
+% \subsubsection{8-bit encodings}
+%
+% This section will be entirely rewritten: it is not yet clear in what
+% situations 8-bit encodings are used, hence I don't know what exactly
+% should be optimized. The current approach is reasonably efficient to
+% convert long strings, and it scales well when using many different
+% encodings. An approach based on csnames would have a smaller constant
+% load time for each individual conversion, but has a large hash table
+% cost. Using a range of \tn{count} registers works for decoding, but
+% not for encoding: one possibility there would be to use a binary tree
+% for the mapping of Unicode characters to bytes, stored as a box, one
+% per encoding.
+%
+% Since the section is going to be rewritten, documentation lacks.
+%
+% All the 8-bit encodings which \pkg{l3str} supports rely on the same
+% internal functions.
+%
+% \begin{macro}[int]{\@@_declare_eight_bit_encoding:nnn}
+% \begin{syntax}
+% \cs{@@_declare_eight_bit_encoding:nnn} \Arg{name} \Arg{mapping} \Arg{missing}
+% \end{syntax}
+% This declares the encoding \meta{name} to map bytes to Unicode
+% characters according to the \meta{mapping}, and map those bytes
+% which are not mentionned in the \meta{mapping} either to the
+% replacement character (if they appear in \meta{missing}), or to
+% themselves.
+%
+% All the 8-bit encoding definition file start with
+% \cs{@@_declare_eight_bit_encoding:nnn} \Arg{encoding name}
+% \Arg{mapping} \Arg{missing bytes}. The \meta{mapping} argument is a
+% token list of pairs \Arg{byte} \Arg{Unicode} expressed in uppercase
+% hexadecimal notation. The \meta{missing} argument is a token list
+% of \Arg{byte}. Every \meta{byte} which does not appear in the
+% \meta{mapping} nor the \meta{missing} lists maps to the same code
+% point in Unicode.
+% \begin{macrocode}
+\cs_new_protected:Npn \@@_declare_eight_bit_encoding:nnn #1#2#3
+ {
+ \tl_set:Nn \l_@@_internal_tl {#1}
+ \cs_new_protected_nopar:cpn { @@_convert_decode_#1: }
+ { \@@_convert_decode_eight_bit:n {#1} }
+ \cs_new_protected_nopar:cpn { @@_convert_encode_#1: }
+ { \@@_convert_encode_eight_bit:n {#1} }
+ \tl_const:cn { c_@@_encoding_#1_tl } {#2}
+ \tl_const:cn { c_@@_encoding_#1_missing_tl } {#3}
+ }
+% \end{macrocode}
+% \end{macro}
+%
+% \begin{macro}[int]{\@@_convert_decode_eight_bit:n}
+% \begin{macro}[aux]{\@@_decode_eight_bit_load:nn}
+% \begin{macro}[aux]{\@@_decode_eight_bit_load_missing:n}
+% \begin{macro}[aux, EXP]{\@@_decode_eight_bit_char:N}
+%^^A todo: document
+% \begin{macrocode}
+\cs_new_protected:Npn \@@_convert_decode_eight_bit:n #1
+ {
+ \group_begin:
+ \int_zero:N \l_@@_internal_int
+ \exp_last_unbraced:Nx \@@_decode_eight_bit_load:nn
+ { \tl_use:c { c_@@_encoding_#1_tl } }
+ { \q_stop \__prg_break: } { }
+ \__prg_break_point:
+ \exp_last_unbraced:Nx \@@_decode_eight_bit_load_missing:n
+ { \tl_use:c { c_@@_encoding_#1_missing_tl } }
+ { \q_stop \__prg_break: }
+ \__prg_break_point:
+ \flag_clear:n { str_error }
+ \@@_convert_gmap:N \@@_decode_eight_bit_char:N
+ \@@_if_flag_error:nnx { str_error } { decode-8-bit } {#1}
+ \group_end:
+ }
+\cs_new_protected:Npn \@@_decode_eight_bit_load:nn #1#2
+ {
+ \use_none_delimit_by_q_stop:w #1 \q_stop
+ \tex_dimen:D "#1 = \l_@@_internal_int sp \scan_stop:
+ \tex_skip:D \l_@@_internal_int = "#1 sp \scan_stop:
+ \tex_toks:D \l_@@_internal_int \exp_after:wN { \__int_value:w "#2 }
+ \tex_advance:D \l_@@_internal_int \c_one
+ \@@_decode_eight_bit_load:nn
+ }
+\cs_new_protected:Npn \@@_decode_eight_bit_load_missing:n #1
+ {
+ \use_none_delimit_by_q_stop:w #1 \q_stop
+ \tex_dimen:D "#1 = \l_@@_internal_int sp \scan_stop:
+ \tex_skip:D \l_@@_internal_int = "#1 sp \scan_stop:
+ \tex_toks:D \l_@@_internal_int \exp_after:wN
+ { \int_use:N \c_@@_replacement_char_int }
+ \tex_advance:D \l_@@_internal_int \c_one
+ \@@_decode_eight_bit_load_missing:n
+ }
+\cs_new:Npn \@@_decode_eight_bit_char:N #1
+ {
+ #1 \s__tl
+ \if_int_compare:w \tex_dimen:D `#1 < \l_@@_internal_int
+ \if_int_compare:w \tex_skip:D \tex_dimen:D `#1 = `#1 \exp_stop_f:
+ \tex_the:D \tex_toks:D \tex_dimen:D
+ \fi:
+ \fi:
+ \__int_value:w `#1 \s__tl
+ }
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+%
+% \begin{macro}[int]{\@@_convert_encode_eight_bit:n}
+% \begin{macro}[aux]{\@@_encode_eight_bit_load:nn}
+% \begin{macro}[aux, rEXP]{\@@_encode_eight_bit_char:n}
+% \begin{macro}[aux, rEXP]{\@@_encode_eight_bit_char_aux:n}
+%^^A todo: document
+% \begin{macrocode}
+\cs_new_protected:Npn \@@_convert_encode_eight_bit:n #1
+ {
+ \group_begin:
+ \int_zero:N \l_@@_internal_int
+ \exp_last_unbraced:Nx \@@_encode_eight_bit_load:nn
+ { \tl_use:c { c_@@_encoding_#1_tl } }
+ { \q_stop \__prg_break: } { }
+ \__prg_break_point:
+ \flag_clear:n { str_error }
+ \@@_convert_gmap_internal:N \@@_encode_eight_bit_char:n
+ \@@_if_flag_error:nnx { str_error } { encode-8-bit } {#1}
+ \group_end:
+ }
+\cs_new_protected:Npn \@@_encode_eight_bit_load:nn #1#2
+ {
+ \use_none_delimit_by_q_stop:w #1 \q_stop
+ \tex_dimen:D "#2 = \l_@@_internal_int sp \scan_stop:
+ \tex_skip:D \l_@@_internal_int = "#2 sp \scan_stop:
+ \exp_args:NNf \tex_toks:D \l_@@_internal_int
+ { \@@_output_byte:n { "#1 } }
+ \tex_advance:D \l_@@_internal_int \c_one
+ \@@_encode_eight_bit_load:nn
+ }
+\cs_new:Npn \@@_encode_eight_bit_char:n #1
+ {
+ \if_int_compare:w #1 > \c_max_register_int
+ \flag_raise:n { str_error }
+ \else:
+ \if_int_compare:w \tex_dimen:D #1 < \l_@@_internal_int
+ \if_int_compare:w \tex_skip:D \tex_dimen:D #1 = #1 \exp_stop_f:
+ \tex_the:D \tex_toks:D \tex_dimen:D #1 \exp_stop_f:
+ \exp_after:wN \exp_after:wN \exp_after:wN \use_none:nn
+ \fi:
+ \fi:
+ \@@_encode_eight_bit_char_aux:n {#1}
+ \fi:
+ }
+\cs_new:Npn \@@_encode_eight_bit_char_aux:n #1
+ {
+ \if_int_compare:w #1 < \c_two_hundred_fifty_six
+ \@@_output_byte:n {#1}
+ \else:
+ \flag_raise:n { str_error }
+ \fi:
+ }
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+%
+% \subsection{Messages}
+%
+% General messages, and messages for the encodings and escapings loaded
+% by default (\enquote{native}, and \enquote{bytes}).
+% \begin{macrocode}
+\__msg_kernel_new:nnn { str } { unknown-esc }
+ { Escaping~scheme~'#1'~(filtered:~'#2')~unknown. }
+\__msg_kernel_new:nnn { str } { unknown-enc }
+ { Encoding~scheme~'#1'~(filtered:~'#2')~unknown. }
+\__msg_kernel_new:nnnn { str } { native-escaping }
+ { The~'native'~encoding~scheme~does~not~support~any~escaping. }
+ {
+ Since~native~strings~do~not~consist~in~bytes,~
+ none~of~the~escaping~methods~make~sense.~
+ The~specified~escaping,~'#1',~will be ignored.
+ }
+\__msg_kernel_new:nnn { str } { file-not-found }
+ { File~'l3str-#1.def'~not~found. }
+% \end{macrocode}
+%
+% Message used when the \enquote{bytes} unescaping fails because the
+% string given to \cs{str_set_convert:Nnnn} contains a non-byte. This
+% cannot happen for the pdf\TeX{} engine, since that engine only
+% supports 8-bit characters. Messages used for other escapings and
+% encodings are defined in each definition file.
+% \begin{macrocode}
+\pdftex_if_engine:F
+ {
+ \__msg_kernel_new:nnnn { str } { non-byte }
+ { String~invalid~in~escaping~'#1':~it~may~only~contain~bytes. }
+ {
+ Some~characters~in~the~string~you~asked~to~convert~are~not~
+ 8-bit~characters.~Perhaps~the~string~is~a~'native'~Unicode~string?~
+ If~it~is,~try~using\\
+ \\
+ \iow_indent:n
+ {
+ \iow_char:N\\str_set_convert:Nnnn \\
+ \ \ <str~var>~\{~<string>~\}~\{~native~\}~\{~<target~encoding>~\}
+ }
+ }
+ }
+% \end{macrocode}
+%
+% Those messages are used when converting to and from 8-bit encodings.
+% \begin{macrocode}
+\__msg_kernel_new:nnnn { str } { decode-8-bit }
+ { Invalid~string~in~encoding~'#1'. }
+ {
+ LaTeX~came~across~a~byte~which~is~not~defined~to~represent~
+ any~character~in~the~encoding~'#1'.
+ }
+\__msg_kernel_new:nnnn { str } { encode-8-bit }
+ { Unicode~string~cannot~be~converted~to~encoding~'#1'. }
+ {
+ The~encoding~'#1'~only~contains~a~subset~of~all~Unicode~characters.~
+ LaTeX~was~asked~to~convert~a~string~to~that~encoding,~but~that~
+ string~contains~a~character~that~'#1'~does~not~support.
+ }
+% \end{macrocode}
+%
+% \begin{macrocode}
+%</initex|package>
+% \end{macrocode}
+%
+% \subsection{Escaping definition files}
+%
+% Several of those encodings are defined by the pdf file format. The
+% following byte storage methods are defined:
+% \begin{itemize}
+% \item \texttt{bytes} (default), non-bytes are filtered out, and
+% bytes are left untouched (this is defined by default);
+% \item \texttt{hex} or \texttt{hexadecimal}, as per the pdf\TeX{}
+% primitive \tn{pdfescapehex}
+% \item \texttt{name}, as per the pdf\TeX{} primitive
+% \tn{pdfescapename}
+% \item \texttt{string}, as per the pdf\TeX{} primitive
+% \tn{pdfescapestring}
+% \item \texttt{url}, as per the percent encoding of urls.
+% \end{itemize}
+%
+% \subsubsection{Unescape methods}
+%
+% \begin{macro}[int]{\@@_convert_unescape_hex:}
+% \begin{macro}[aux, rEXP]{\@@_unescape_hex_auxi:N}
+% \begin{macro}[aux, rEXP]{\@@_unescape_hex_auxii:N}
+% Take chars two by two, and interpret each pair as the hexadecimal
+% code for a byte. Anything else than hexadecimal digits is ignored,
+% raising the flag. A string which contains an odd number of
+% hexadecimal digits gets |0| appended to it: this is equivalent to
+% appending a |0| in all cases, and dropping it if it is alone.
+% \begin{macrocode}
+%<*hex>
+\cs_new_protected_nopar:Npn \@@_convert_unescape_hex:
+ {
+ \group_begin:
+ \flag_clear:n { str_error }
+ \int_set:Nn \tex_escapechar:D { 92 }
+ \tl_gset:Nx \g_@@_result_tl
+ {
+ \@@_output_byte:w "
+ \exp_last_unbraced:Nf \@@_unescape_hex_auxi:N
+ { \tl_to_str:N \g_@@_result_tl }
+ 0 { ? 0 - \c_one \__prg_break: }
+ \__prg_break_point:
+ \@@_output_end:
+ }
+ \@@_if_flag_error:nnx { str_error } { unescape-hex } { }
+ \group_end:
+ }
+\cs_new:Npn \@@_unescape_hex_auxi:N #1
+ {
+ \use_none:n #1
+ \@@_hexadecimal_use:NTF #1
+ { \@@_unescape_hex_auxii:N }
+ {
+ \flag_raise:n { str_error }
+ \@@_unescape_hex_auxi:N
+ }
+ }
+\cs_new:Npn \@@_unescape_hex_auxii:N #1
+ {
+ \use_none:n #1
+ \@@_hexadecimal_use:NTF #1
+ {
+ \@@_output_end:
+ \@@_output_byte:w " \@@_unescape_hex_auxi:N
+ }
+ {
+ \flag_raise:n { str_error }
+ \@@_unescape_hex_auxii:N
+ }
+ }
+\__msg_kernel_new:nnnn { str } { unescape-hex }
+ { String~invalid~in~escaping~'hex':~only~hexadecimal~digits~allowed. }
+ {
+ Some~characters~in~the~string~you~asked~to~convert~are~not~
+ hexadecimal~digits~(0-9,~A-F,~a-f)~nor~spaces.
+ }
+%</hex>
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+%
+% \begin{macro}[int]{\@@_convert_unescape_name:}
+% \begin{macro}[aux, rEXP]{\@@_unescape_name_loop:wNN}
+% \begin{macro}[int]{\@@_convert_unescape_url:}
+% \begin{macro}[aux, rEXP]{\@@_unescape_url_loop:wNN}
+% The \cs{@@_convert_unescape_name:} function replaces each
+% occurrence of |#| followed by two hexadecimal digits in
+% \cs{g_@@_result_tl} by the corresponding byte. The \texttt{url}
+% function is identical, with escape character |%| instead of |#|.
+% Thus we define the two together. The arguments of \cs{@@_tmp:w} are
+% the character code of |#| or |%| in hexadecimal, the name of the
+% main function to define, and the name of the auxiliary which
+% performs the loop.
+%
+% The looping auxiliary |#3| finds the next escape character, reads
+% the following two characters, and tests them. The test
+% \cs{@@_hexadecimal_use:NTF} leaves the upper-case digit in the
+% input stream, hence we surround the test with
+% \cs{@@_output_byte:w}~|"| and \cs{@@_output_end:}. If both
+% characters are hexadecimal digits, they should be removed before
+% looping: this is done by \cs{use_i:nnn}. If one of the characters
+% is not a hexadecimal digit, then feed |"#1| to
+% \cs{@@_output_byte:w} to produce the escape character, raise the
+% flag, and call the looping function followed by the two characters
+% (remove \cs{use_i:nnn}).
+% \begin{macrocode}
+%<*name|url>
+\cs_set_protected:Npn \@@_tmp:w #1#2#3
+ {
+ \cs_new_protected:cpn { @@_convert_unescape_#2: }
+ {
+ \group_begin:
+ \flag_clear:n { str_byte }
+ \flag_clear:n { str_error }
+ \int_set:Nn \tex_escapechar:D { 92 }
+ \tl_gset:Nx \g_@@_result_tl
+ {
+ \exp_after:wN #3 \g_@@_result_tl
+ #1 ? { ? \__prg_break: }
+ \__prg_break_point:
+ }
+ \@@_if_flag_error:nnx { str_byte } { non-byte } { #2 }
+ \@@_if_flag_error:nnx { str_error } { unescape-#2 } { }
+ \group_end:
+ }
+ \cs_new:Npn #3 ##1#1##2##3
+ {
+ \@@_filter_bytes:n {##1}
+ \use_none:n ##3
+ \@@_output_byte:w "
+ \@@_hexadecimal_use:NTF ##2
+ {
+ \@@_hexadecimal_use:NTF ##3
+ { }
+ {
+ \flag_raise:n { str_error }
+ * \c_zero + `#1 \use_i:nn
+ }
+ }
+ {
+ \flag_raise:n { str_error }
+ 0 + `#1 \use_i:nn
+ }
+ \@@_output_end:
+ \use_i:nnn #3 ##2##3
+ }
+ \__msg_kernel_new:nnnn { str } { unescape-#2 }
+ { String~invalid~in~escaping~'#2'. }
+ {
+ LaTeX~came~across~the~escape~character~'#1'~not~followed~by~
+ two~hexadecimal~digits.~This~is~invalid~in~the~escaping~'#2'.
+ }
+ }
+%</name|url>
+%<*name>
+\exp_after:wN \@@_tmp:w \c_hash_str { name }
+ \@@_unescape_name_loop:wNN
+%</name>
+%<*url>
+\exp_after:wN \@@_tmp:w \c_percent_str { url }
+ \@@_unescape_url_loop:wNN
+%</url>
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+%
+% \begin{macro}[int]{\@@_convert_unescape_string:}
+% \begin{macro}[aux, rEXP]{\@@_unescape_string_newlines:wN}
+% \begin{macro}[aux, rEXP]{\@@_unescape_string_loop:wNNN}
+% \begin{macro}[aux, rEXP]{\@@_unescape_string_repeat:NNNNNN}
+% The \texttt{string} escaping is somewhat similar to the
+% \texttt{name} and \texttt{url} escapings, with escape character |\|.
+% The first step is to convert all three line endings, |^^J|, |^^M|,
+% and |^^M^^J| to the common |^^J|, as per the \textsc{pdf}
+% specification. This step cannot raise the flag.
+%
+% Then the following escape sequences are decoded.
+% \begin{itemize}\def\makelabel#1{\hss\llap{\ttfamily\string#1}}
+% \item[\n] Line feed ($10$)
+% \item[\r] Carriage return ($13$)
+% \item[\t] Horizontal tab ($9$)
+% \item[\b] Backspace ($8$)
+% \item[\f] Form feed ($12$)
+% \item[\(] Left parenthesis
+% \item[\)] Right parenthesis
+% \item[\\] Backslash
+% \item[\ddd] (backslash followed by $1$ to $3$ octal digits) Byte
+% \texttt{ddd} (octal), subtracting $256$ in case of overflow.
+% \end{itemize}
+% If followed by an end-of-line character, the backslash and the
+% end-of-line are ignored. If followed by anything else, the backslash
+% is ignored, raising the error flag.
+%^^A Be paranoid: \tl_to_lowercase:n is unsafe.
+% \begin{macrocode}
+%<*string>
+\group_begin:
+ \char_set_lccode:nn {`\*} {`\\}
+ \char_set_catcode_other:N \^^J
+ \char_set_catcode_other:N \^^M
+ \tl_to_lowercase:n
+ {
+ \cs_new_protected_nopar:Npn \@@_convert_unescape_string:
+ {
+ \group_begin:
+ \flag_clear:n { str_byte }
+ \flag_clear:n { str_error }
+ \int_set:Nn \tex_escapechar:D { 92 }
+ \tl_gset:Nx \g_@@_result_tl
+ {
+ \exp_after:wN \@@_unescape_string_newlines:wN
+ \g_@@_result_tl \__prg_break: ^^M ?
+ \__prg_break_point:
+ }
+ \tl_gset:Nx \g_@@_result_tl
+ {
+ \exp_after:wN \@@_unescape_string_loop:wNNN
+ \g_@@_result_tl * ?? { ? \__prg_break: }
+ \__prg_break_point:
+ }
+ \@@_if_flag_error:nnx { str_byte } { non-byte } { string }
+ \@@_if_flag_error:nnx { str_error } { unescape-string } { }
+ \group_end:
+ }
+ \cs_new:Npn \@@_unescape_string_loop:wNNN #1 *#2#3#4
+ }
+ {
+ \@@_filter_bytes:n {#1}
+ \use_none:n #4
+ \@@_output_byte:w '
+ \@@_octal_use:NTF #2
+ {
+ \@@_octal_use:NTF #3
+ {
+ \@@_octal_use:NTF #4
+ {
+ \if_int_compare:w #2 > \c_three
+ - 256
+ \fi:
+ \@@_unescape_string_repeat:NNNNNN
+ }
+ { \@@_unescape_string_repeat:NNNNNN ? }
+ }
+ { \@@_unescape_string_repeat:NNNNNN ?? }
+ }
+ {
+ \str_case_x:nnn {#2}
+ {
+ { \c_backslash_str } { 134 }
+ { ( } { 50 }
+ { ) } { 51 }
+ { r } { 15 }
+ { f } { 14 }
+ { n } { 12 }
+ { t } { 11 }
+ { b } { 10 }
+ { ^^J } { 0 - \c_one }
+ }
+ {
+ \flag_raise:n { str_error }
+ 0 - \c_one \use_i:nn
+ }
+ }
+ \@@_output_end:
+ \use_i:nn \@@_unescape_string_loop:wNNN #2#3#4
+ }
+ \cs_new:Npn \@@_unescape_string_repeat:NNNNNN #1#2#3#4#5#6
+ { \@@_output_end: \@@_unescape_string_loop:wNNN }
+ \cs_new:Npn \@@_unescape_string_newlines:wN #1 ^^M #2
+ {
+ #1
+ \if_charcode:w ^^J #2 \else: ^^J \fi:
+ \@@_unescape_string_newlines:wN #2
+ }
+ \__msg_kernel_new:nnnn { str } { unescape-string }
+ { String~invalid~in~escaping~'string'. }
+ {
+ LaTeX~came~across~an~escape~character~'\c_backslash_str'~
+ not~followed~by~any~of:~'n',~'r',~'t',~'b',~'f',~'(',~')',~
+ '\c_backslash_str',~one~to~three~octal~digits,~or~the~end~
+ of~a~line.
+ }
+\group_end:
+%</string>
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+%
+% \subsubsection{Escape methods}
+%
+% Currently, none of the escape methods can lead to errors, assuming
+% that their input is made out of bytes.
+%
+% \begin{macro}[int]{\@@_convert_escape_hex:}
+% \begin{macro}[aux, rEXP]{\@@_escape_hex_char:N}
+% Loop and convert each byte to hexadecimal.
+% \begin{macrocode}
+%<*hex>
+\cs_new_protected_nopar:Npn \@@_convert_escape_hex:
+ { \@@_convert_gmap:N \@@_escape_hex_char:N }
+\cs_new:Npn \@@_escape_hex_char:N #1
+ { \@@_output_hexadecimal:n { `#1 } }
+%</hex>
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+%
+% \begin{macro}[int]{\@@_convert_escape_name:}
+% \begin{macro}[aux, rEXP]{\@@_escape_name_char:N}
+% \begin{macro}[aux, rEXP]{\@@_if_escape_name:NTF}
+% \begin{variable}{\c_@@_escape_name_str}
+% \begin{variable}{\c_@@_escape_name_not_str}
+% For each byte, test whether it should be output as is, or be
+% \enquote{hash-encoded}. Roughly, bytes outside the range
+% $[\hexnum{2A},\hexnum{7E}]$ are hash-encoded. We keep two lists of
+% exceptions: characters in \cs{c_@@_escape_name_not_str} are not
+% hash-encoded, and characters in the \cs{c_@@_escape_name_str} are
+% encoded.
+% \begin{macrocode}
+%<*name>
+\str_const:Nn \c_@@_escape_name_not_str { ! " $ & ' } %$
+\str_const:Nn \c_@@_escape_name_str { {}/<>[] }
+\cs_new_protected_nopar:Npn \@@_convert_escape_name:
+ { \@@_convert_gmap:N \@@_escape_name_char:N }
+\cs_new:Npn \@@_escape_name_char:N #1
+ {
+ \@@_if_escape_name:NTF #1 {#1}
+ { \c_hash_str \@@_output_hexadecimal:n {`#1} }
+ }
+\prg_new_conditional:Npnn \@@_if_escape_name:N #1 { TF }
+ {
+ \if_int_compare:w `#1 < "2A \exp_stop_f:
+ \@@_if_contains_char:NNTF \c_@@_escape_name_not_str #1
+ \prg_return_true: \prg_return_false:
+ \else:
+ \if_int_compare:w `#1 > "7E \exp_stop_f:
+ \prg_return_false:
+ \else:
+ \@@_if_contains_char:NNTF \c_@@_escape_name_str #1
+ \prg_return_false: \prg_return_true:
+ \fi:
+ \fi:
+ }
+%</name>
+% \end{macrocode}
+% \end{variable}
+% \end{variable}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+%
+% \begin{macro}[int]{\@@_convert_escape_string:}
+% \begin{macro}[aux, rEXP]{\@@_escape_string_char:N}
+% \begin{macro}[aux, rEXP]{\@@_if_escape_string:NTF}
+% \begin{variable}{\c_@@_escape_string_str}
+% Any character below (and including) space, and any character above
+% (and including) \texttt{del}, are converted to octal. One backslash
+% is added before each parenthesis and backslash.
+% \begin{macrocode}
+%<*string>
+\str_const:Nx \c_@@_escape_string_str
+ { \c_backslash_str ( ) }
+\cs_new_protected_nopar:Npn \@@_convert_escape_string:
+ { \@@_convert_gmap:N \@@_escape_string_char:N }
+\cs_new:Npn \@@_escape_string_char:N #1
+ {
+ \@@_if_escape_string:NTF #1
+ {
+ \@@_if_contains_char:NNT
+ \c_@@_escape_string_str #1
+ { \c_backslash_str }
+ #1
+ }
+ {
+ \c_backslash_str
+ \int_div_truncate:nn {`#1} {64}
+ \int_mod:nn { \int_div_truncate:nn {`#1} \c_eight } \c_eight
+ \int_mod:nn {`#1} \c_eight
+ }
+ }
+\prg_new_conditional:Npnn \@@_if_escape_string:N #1 { TF }
+ {
+ \if_int_compare:w `#1 < "21 \exp_stop_f:
+ \prg_return_false:
+ \else:
+ \if_int_compare:w `#1 > "7E \exp_stop_f:
+ \prg_return_false:
+ \else:
+ \prg_return_true:
+ \fi:
+ \fi:
+ }
+%</string>
+% \end{macrocode}
+% \end{variable}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+%
+% \begin{macro}[int]{\@@_convert_escape_url:}
+% \begin{macro}[aux, rEXP]{\@@_escape_url_char:N}
+% \begin{macro}[aux, rEXP]{\@@_if_escape_url:NTF}
+% This function is similar to \cs{@@_convert_escape_name:}, escaping
+% different characters.
+% \begin{macrocode}
+%<*url>
+\cs_new_protected_nopar:Npn \@@_convert_escape_url:
+ { \@@_convert_gmap:N \@@_escape_url_char:N }
+\cs_new:Npn \@@_escape_url_char:N #1
+ {
+ \@@_if_escape_url:NTF #1 {#1}
+ { \c_percent_str \@@_output_hexadecimal:n { `#1 } }
+ }
+\prg_new_conditional:Npnn \@@_if_escape_url:N #1 { TF }
+ {
+ \if_int_compare:w `#1 < "41 \exp_stop_f:
+ \@@_if_contains_char:nNTF { "-.<> } #1
+ \prg_return_true: \prg_return_false:
+ \else:
+ \if_int_compare:w `#1 > "7E \exp_stop_f:
+ \prg_return_false:
+ \else:
+ \@@_if_contains_char:nNTF { [ ] } #1
+ \prg_return_false: \prg_return_true:
+ \fi:
+ \fi:
+ }
+%</url>
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+%
+% \subsection{Encoding definition files}
+%
+% The \texttt{native} encoding is automatically defined. Other encodings
+% are loaded as needed. The following encodings are supported:
+% \begin{itemize}
+% \item \textsc{utf-8};
+% \item \textsc{utf-16}, big-, little-endian, or with byte order mark;
+% \item \textsc{utf-32}, big-, little-endian, or with byte order mark;
+% \item the \textsc{iso 8859} code pages, numbered from $1$ to $16$,
+% skipping the inexistent \textsc{iso 8859-12}.
+% \end{itemize}
+%
+% \subsubsection{\textsc{utf-8} support}
+%
+% \begin{macrocode}
+%<*utf8>
+% \end{macrocode}
+%
+% \begin{macro}[int]{\@@_convert_encode_utf8:}
+% \begin{macro}[aux, rEXP]{\@@_encode_utf_viii_char:n}
+% \begin{macro}[aux, rEXP]{\@@_encode_utf_viii_loop:wwnnw}
+% Loop through the internal string, and convert each character to its
+% \textsc{utf-8} representation. The representation is built from the
+% right-most (least significant) byte to the left-most (most
+% significant) byte. Continuation bytes are in the range $[128,191]$,
+% taking $64$ different values, hence we roughly want to express the
+% character code in base $64$, shifting the first digit in the
+% representation by some number depending on how many continuation
+% bytes there are. In the range $[0,127]$, output the corresponding
+% byte directly. In the range $[128,2047]$, output the remainder
+% modulo $64$, plus $128$ as a continuation byte, then output the
+% quotient (which is in the range $[0,31]$), shifted by $192$. In the
+% next range, $[2048,65535]$, split the character code into residue
+% and quotient modulo $64$, output the residue as a first continuation
+% byte, then repeat; this leaves us with a quotient in the range
+% $[0,15]$, which we output shifted by $224$. The last range,
+% $[65536,1114111]$, follows the same pattern: once we realize that
+% dividing twice by $64$ leaves us with a number larger than $15$, we
+% repeat, producing a last continuation byte, and offset the quotient
+% by $240$ for the leading byte.
+%
+% How is that implemented? \cs{@@_encode_utf_vii_loop:wwnnw} takes
+% successive quotients as its first argument, the quotient from the
+% previous step as its second argument (except in step~$1$), the bound
+% for quotients that trigger one more step or not, and finally the
+% offset used if this step should produce the leading byte. Leading
+% bytes can be in the ranges $[0,127]$, $[192,223]$, $[224,239]$, and
+% $[240,247]$ (really, that last limit should be $244$ because Unicode
+% stops at the code point $1114111$). At each step, if the quotient
+% |#1| is less than the limit |#3| for that range, output the leading
+% byte (|#1| shifted by |#4|) and stop. Otherwise, we need one more
+% step: use the quotient of |#1| by $64$, and |#1| as arguments for
+% the looping auxiliary, and output the continuation byte
+% corresponding to the remainder $|#2|-64|#1|+128$. The bizarre
+% construction |\c_minus_one + \c_zero *| removes the spurious initial
+% continuation byte (better methods welcome).
+% \begin{macrocode}
+\cs_new_protected_nopar:cpn { @@_convert_encode_utf8: }
+ { \@@_convert_gmap_internal:N \@@_encode_utf_viii_char:n }
+\cs_new:Npn \@@_encode_utf_viii_char:n #1
+ {
+ \@@_encode_utf_viii_loop:wwnnw #1 ; \c_minus_one + \c_zero * ;
+ { 128 } { \c_zero }
+ { 32 } { 192 }
+ { 16 } { 224 }
+ { 8 } { 240 }
+ \q_stop
+ }
+\cs_new:Npn \@@_encode_utf_viii_loop:wwnnw #1; #2; #3#4 #5 \q_stop
+ {
+ \if_int_compare:w #1 < #3 \exp_stop_f:
+ \@@_output_byte:n { #1 + #4 }
+ \exp_after:wN \use_none_delimit_by_q_stop:w
+ \fi:
+ \exp_after:wN \@@_encode_utf_viii_loop:wwnnw
+ \__int_value:w \int_div_truncate:nn {#1} {64} ; #1 ;
+ #5 \q_stop
+ \@@_output_byte:n { #2 - 64 * ( #1 - \c_two ) }
+ }
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+%
+% \begin{variable}
+% {
+% \l_@@_missing_flag ,
+% \l_@@_extra_flag ,
+% \l_@@_overlong_flag ,
+% \l_@@_overflow_flag ,
+% }
+% When decoding a string that is purportedly in the \textsc{utf-8}
+% encoding, four different errors can occur, signalled by a specific
+% flag for each (we define those flags using \cs{flag_clear_new:n}
+% rather than \cs{flag_new:n}, because they are shared with other
+% encoding definition files).
+% \begin{itemize}
+% \item \enquote{Missing continuation byte}: a leading byte is not
+% followed by the right number of continuation bytes.
+% \item \enquote{Extra continuation byte}: a continuation byte
+% appears where it was not expected, \emph{i.e.}, not after an
+% appropriate leading byte.
+% \item \enquote{Overlong}: a Unicode character is expressed using
+% more bytes than necessary, for instance, \hexnum{C0}\hexnum{80}
+% for the code point $0$, instead of a single null byte.
+% \item \enquote{Overflow}: this occurs when decoding produces
+% Unicode code points greater than $1114111$.
+% \end{itemize}
+% We only raise one \LaTeX3 error message, combining all the errors
+% which occurred. In the short message, the leading comma must be
+% removed to get a grammatically correct sentence. In the long text,
+% first remind the user what a correct \textsc{utf-8} string should
+% look like, then add error-specific information.
+% \begin{macrocode}
+\flag_clear_new:n { str_missing }
+\flag_clear_new:n { str_extra }
+\flag_clear_new:n { str_overlong }
+\flag_clear_new:n { str_overflow }
+\__msg_kernel_new:nnnn { str } { utf8-decode }
+ {
+ Invalid~UTF-8~string: \exp_last_unbraced:Nf \use_none:n
+ \@@_if_flag_times:nT { str_missing } { ,~missing~continuation~byte }
+ \@@_if_flag_times:nT { str_extra } { ,~extra~continuation~byte }
+ \@@_if_flag_times:nT { str_overlong } { ,~overlong~form }
+ \@@_if_flag_times:nT { str_overflow } { ,~code~point~too~large }
+ .
+ }
+ {
+ In~the~UTF-8~encoding,~each~Unicode~character~consists~in~
+ 1~to~4~bytes,~with~the~following~bit~pattern: \\
+ \iow_indent:n
+ {
+ Code~point~\ \ \ \ <~128:~0xxxxxxx \\
+ Code~point~\ \ \ <~2048:~110xxxxx~10xxxxxx \\
+ Code~point~\ \ <~65536:~1110xxxx~10xxxxxx~10xxxxxx \\
+ Code~point~ <~1114112:~11110xxx~10xxxxxx~10xxxxxx~10xxxxxx \\
+ }
+ Bytes~of~the~form~10xxxxxx~are~called~continuation~bytes.
+ \flag_if_raised:nT { str_missing }
+ {
+ \\\\
+ A~leading~byte~(in~the~range~[192,255])~was~not~followed~by~
+ the~appropriate~number~of~continuation~bytes.
+ }
+ \flag_if_raised:nT { str_extra }
+ {
+ \\\\
+ LaTeX~came~across~a~continuation~byte~when~it~was~not~expected.
+ }
+ \flag_if_raised:nT { str_overlong }
+ {
+ \\\\
+ Every~Unicode~code~point~must~be~expressed~in~the~shortest~
+ possible~form.~For~instance,~'0xC0'~'0x83'~is~not~a~valid~
+ representation~for~the~code~point~3.
+ }
+ \flag_if_raised:nT { str_overflow }
+ {
+ \\\\
+ Unicode~limits~code~points~to~the~range~[0,1114111].
+ }
+ }
+% \end{macrocode}
+% \end{variable}
+%
+% \begin{macro}[int]{\@@_convert_decode_utf8:}
+% \begin{macro}[aux, rEXP]
+% {
+% \@@_decode_utf_viii_start:N,
+% \@@_decode_utf_viii_continuation:wwN,
+% \@@_decode_utf_viii_aux:wNnnwN
+% }
+% \begin{macro}[aux, rEXP]
+% {\@@_decode_utf_viii_overflow:w, \@@_decode_utf_viii_end:}
+% Decoding is significantly harder than encoding. As before, lower
+% some flags, which are tested at the end (in bulk, to trigger at most
+% one \LaTeX3 error, as explained above). We expect successive
+% multi-byte sequences of the form \meta{start byte}
+% \meta{continuation bytes}. The \texttt{_start} auxiliary tests the
+% first byte:
+% \begin{itemize}
+% \item $[0,\hexnum{7F}]$: the byte stands alone, and is converted
+% to its own character code;
+% \item $[\hexnum{80}, \hexnum{BF}]$: unexpected continuation byte,
+% raise the appropriate flag, and convert that byte to the
+% replacement character \hexnum{FFFD};
+% \item $[\hexnum{C0}, \hexnum{FF}]$: this byte should be followed
+% by some continuation byte(s).
+% \end{itemize}
+% In the first two cases, \cs{use_none_delimit_by_q_stop:w} removes
+% data that only the third case requires, namely the limits of ranges
+% of Unicode characters which can be expressed with $1$, $2$, $3$, or
+% $4$ bytes.
+%
+% We can now concentrate on the multi-byte case and the
+% \texttt{_continuation} auxiliary. We expect |#3| to be in the range
+% $[\hexnum{80}, \hexnum{BF}]$. The test for this goes as follows: if
+% the character code is less than \hexnum{80}, we compare it to
+% $-\hexnum{C0}$, yielding \texttt{false}; otherwise to \hexnum{C0},
+% yielding \texttt{true} in the range $[\hexnum{80}, \hexnum{BF}]$ and
+% \texttt{false} otherwise. If we find that the byte is not a
+% continuation range, stop the current slew of bytes, output the
+% replacement character, and continue parsing with the \texttt{_start}
+% auxiliary, starting at the byte we just tested. Once we know that
+% the byte is a continuation byte, leave it behind us in the input
+% stream, compute what code point the bytes read so far would produce,
+% and feed that number to the \texttt{_aux} function.
+%
+% The \texttt{_aux} function tests whether we should look for more
+% continuation bytes or not. If the number it receives as |#1| is less
+% than the maximum |#4| for the current range, then we are done: check
+% for an overlong representation by comparing |#1| with the maximum
+% |#3| for the previous range. Otherwise, we call the
+% \texttt{_continuation} auxiliary again, after shifting the
+% \enquote{current code point} by |#4| (maximum from the range we just
+% checkedd).
+%
+% Two additional tests are needed: if we reach the end of the list of
+% range maxima and we are still not done, then we are faced with an
+% overflow. Clean up, and again insert the code point \hexnum{FFFD}
+% for the replacement character. Also, every time we read a byte, we
+% need to check whether we reached the end of the string. In a correct
+% \textsc{utf-8} string, this happens automatically when the
+% \texttt{_start} auxiliary leaves its first argument in the input
+% stream: the end-marker begins with \cs{__prg_break:}, which ends
+% the loop. On the other hand, if the end is reached when looking for
+% a continuation byte, the \cs{use_none:n} |#3| construction removes
+% the first token from the end-marker, and leaves the \texttt{_end}
+% auxiliary, which raises the appropriate error flag before ending the
+% mapping.
+% \begin{macrocode}
+\cs_new_protected_nopar:cpn { @@_convert_decode_utf8: }
+ {
+ \flag_clear:n { str_error }
+ \flag_clear:n { str_missing }
+ \flag_clear:n { str_extra }
+ \flag_clear:n { str_overlong }
+ \flag_clear:n { str_overflow }
+ \tl_gset:Nx \g_@@_result_tl
+ {
+ \exp_after:wN \@@_decode_utf_viii_start:N \g_@@_result_tl
+ { \__prg_break: \@@_decode_utf_viii_end: }
+ \__prg_break_point:
+ }
+ \@@_if_flag_error:nnx { str_error } { utf8-decode } { }
+ }
+\cs_new:Npn \@@_decode_utf_viii_start:N #1
+ {
+ #1
+ \if_int_compare:w `#1 < "C0 \exp_stop_f:
+ \s__tl
+ \if_int_compare:w `#1 < "80 \exp_stop_f:
+ \__int_value:w `#1
+ \else:
+ \flag_raise:n { str_extra }
+ \flag_raise:n { str_error }
+ \int_use:N \c_@@_replacement_char_int
+ \fi:
+ \else:
+ \exp_after:wN \@@_decode_utf_viii_continuation:wwN
+ \int_use:N \__int_eval:w `#1 - "C0 \exp_after:wN \__int_eval_end:
+ \fi:
+ \s__tl
+ \use_none_delimit_by_q_stop:w {"80} {"800} {"10000} {"110000} \q_stop
+ \@@_decode_utf_viii_start:N
+ }
+\cs_new:Npn \@@_decode_utf_viii_continuation:wwN
+ #1 \s__tl #2 \@@_decode_utf_viii_start:N #3
+ {
+ \use_none:n #3
+ \if_int_compare:w `#3 <
+ \if_int_compare:w `#3 < "80 \exp_stop_f: - \fi:
+ "C0 \exp_stop_f:
+ #3
+ \exp_after:wN \@@_decode_utf_viii_aux:wNnnwN
+ \int_use:N \__int_eval:w
+ #1 * "40 + `#3 - "80
+ \exp_after:wN \__int_eval_end:
+ \else:
+ \s__tl
+ \flag_raise:n { str_missing }
+ \flag_raise:n { str_error }
+ \int_use:N \c_@@_replacement_char_int
+ \fi:
+ \s__tl
+ #2
+ \@@_decode_utf_viii_start:N #3
+ }
+\cs_new:Npn \@@_decode_utf_viii_aux:wNnnwN
+ #1 \s__tl #2#3#4 #5 \@@_decode_utf_viii_start:N #6
+ {
+ \if_int_compare:w #1 < #4 \exp_stop_f:
+ \s__tl
+ \if_int_compare:w #1 < #3 \exp_stop_f:
+ \flag_raise:n { str_overlong }
+ \flag_raise:n { str_error }
+ \int_use:N \c_@@_replacement_char_int
+ \else:
+ #1
+ \fi:
+ \else:
+ \if_meaning:w \q_stop #5
+ \@@_decode_utf_viii_overflow:w #1
+ \fi:
+ \exp_after:wN \@@_decode_utf_viii_continuation:wwN
+ \int_use:N \__int_eval:w #1 - #4 \exp_after:wN \__int_eval_end:
+ \fi:
+ \s__tl
+ #2 {#4} #5
+ \@@_decode_utf_viii_start:N
+ }
+\cs_new:Npn \@@_decode_utf_viii_overflow:w #1 \fi: #2 \fi:
+ {
+ \fi: \fi:
+ \flag_raise:n { str_overflow }
+ \flag_raise:n { str_error }
+ \int_use:N \c_@@_replacement_char_int
+ }
+\cs_new_nopar:Npn \@@_decode_utf_viii_end:
+ {
+ \s__tl
+ \flag_raise:n { str_missing }
+ \flag_raise:n { str_error }
+ \int_use:N \c_@@_replacement_char_int \s__tl
+ \__prg_break:
+ }
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+%
+% \begin{macrocode}
+%</utf8>
+% \end{macrocode}
+%
+% \subsubsection{\textsc{utf-16} support}
+%
+% The definitions are done in a category code regime where the bytes
+% $254$ and $255$ used by the byte order mark have catcode~$12$.
+% \begin{macrocode}
+%<*utf16>
+\group_begin:
+ \char_set_catcode_other:N \^^fe
+ \char_set_catcode_other:N \^^ff
+% \end{macrocode}
+%
+% \begin{macro}[int]
+% {
+% \@@_convert_encode_utf16: ,
+% \@@_convert_encode_utf16be: ,
+% \@@_convert_encode_utf16le: ,
+% }
+% \begin{macro}[aux, rEXP]
+% {
+% \@@_encode_utf_xvi_aux:N ,
+% \@@_encode_utf_xvi_char:n ,
+% }
+% When the endianness is not specified, it is big-endian by default,
+% and we add a byte-order mark. Convert characters one by one in a
+% loop, with different behaviours depending on the character code.
+% \begin{itemize}
+% \item $[0, \hexnum{D7FF}]$: converted to two bytes;
+% \item $[\hexnum{D800}, \hexnum{DFFF}]$ are used as surrogates:
+% they cannot be converted and are replaced by the replacement
+% character;
+% \item $[\hexnum{E000}, \hexnum{FFFF}]$: converted to two bytes;
+% \item $[\hexnum{10000}, \hexnum{10FFFF}]$: converted to a pair of
+% surrogates, each two bytes. The magic \hexnum{D7C0} is
+% $\hexnum{D800}-\hexnum{10000}/\hexnum{400}$.
+% \end{itemize}
+% For the duration of this operation, \cs{@@_tmp:w} is defined as a
+% function to convert a number in the range $[0, \hexnum{FFFF}]$ to a
+% pair of bytes (either big endian or little endian), by feeding the
+% quotient of the division of |#1| by \hexnum{100}, followed by |#1|
+% to \cs{@@_encode_utf_xvi_be:nn} or its \texttt{le} analog: those
+% compute the remainder, and output two bytes for the quotient and
+% remainder.
+% \begin{macrocode}
+ \cs_new_protected_nopar:cpn { @@_convert_encode_utf16: }
+ {
+ \@@_encode_utf_xvi_aux:N \@@_output_byte_pair_be:n
+ \tl_gput_left:Nx \g_@@_result_tl { ^^fe ^^ff }
+ }
+ \cs_new_protected_nopar:cpn { @@_convert_encode_utf16be: }
+ { \@@_encode_utf_xvi_aux:N \@@_output_byte_pair_be:n }
+ \cs_new_protected_nopar:cpn { @@_convert_encode_utf16le: }
+ { \@@_encode_utf_xvi_aux:N \@@_output_byte_pair_le:n }
+ \cs_new_protected:Npn \@@_encode_utf_xvi_aux:N #1
+ {
+ \flag_clear:n { str_error }
+ \cs_set_eq:NN \@@_tmp:w #1
+ \@@_convert_gmap_internal:N \@@_encode_utf_xvi_char:n
+ \@@_if_flag_error:nnx { str_error } { utf16-encode } { }
+ }
+ \cs_new:Npn \@@_encode_utf_xvi_char:n #1
+ {
+ \if_int_compare:w #1 < "D800 \exp_stop_f:
+ \@@_tmp:w {#1}
+ \else:
+ \if_int_compare:w #1 < "10000 \exp_stop_f:
+ \if_int_compare:w #1 < "E000 \exp_stop_f:
+ \flag_raise:n { str_error }
+ \@@_tmp:w { \c_@@_replacement_char_int }
+ \else:
+ \@@_tmp:w {#1}
+ \fi:
+ \else:
+ \exp_args:Nf \@@_tmp:w { \int_div_truncate:nn {#1} {"400} + "D7C0 }
+ \exp_args:Nf \@@_tmp:w { \int_mod:nn {#1} {"400} + "DC00 }
+ \fi:
+ \fi:
+ }
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+%
+% \begin{variable}
+% {
+% \l_@@_missing_flag ,
+% \l_@@_extra_flag ,
+% \l_@@_end_flag ,
+% }
+% When encoding a Unicode string to \textsc{utf-16}, only one error
+% can occur: code points in the range $[\hexnum{D800},
+% \hexnum{DFFF}]$, corresponding to surrogates, cannot be encoded. We
+% use the all-purpose flag \texttt{@@_error} to signal that error.
+%
+% When decoding a Unicode string which is purportedly in
+% \textsc{utf-16}, three errors can occur: a missing trail surrogate,
+% an unexpected trail surrogate, and a string containing an odd number
+% of bytes.
+% \begin{macrocode}
+ \flag_clear_new:n { str_missing }
+ \flag_clear_new:n { str_extra }
+ \flag_clear_new:n { str_end }
+ \__msg_kernel_new:nnnn { str } { utf16-encode }
+ { Unicode~string~cannot~be~expressed~in~UTF-16:~surrogate. }
+ {
+ Surrogate~code~points~(in~the~range~[U+D800,~U+DFFF])~
+ can~be~expressed~in~the~UTF-8~and~UTF-32~encodings,~
+ but~not~in~the~UTF-16~encoding.
+ }
+ \__msg_kernel_new:nnnn { str } { utf16-decode }
+ {
+ Invalid~UTF-16~string: \exp_last_unbraced:Nf \use_none:n
+ \@@_if_flag_times:nT { str_missing } { ,~missing~trail~surrogate }
+ \@@_if_flag_times:nT { str_extra } { ,~extra~trail~surrogate }
+ \@@_if_flag_times:nT { str_end } { ,~odd~number~of~bytes }
+ .
+ }
+ {
+ In~the~UTF-16~encoding,~each~Unicode~character~is~encoded~as~
+ 2~or~4~bytes: \\
+ \iow_indent:n
+ {
+ Code~point~in~[U+0000,~U+D7FF]:~two~bytes \\
+ Code~point~in~[U+D800,~U+DFFF]:~illegal \\
+ Code~point~in~[U+E000,~U+FFFF]:~two~bytes \\
+ Code~point~in~[U+10000,~U+10FFFF]:~
+ a~lead~surrogate~and~a~trail~surrogate \\
+ }
+ Lead~surrogates~are~pairs~of~bytes~in~the~range~[0xD800,~0xDBFF],~
+ and~trail~surrogates~are~in~the~range~[0xDC00,~0xDFFF].
+ \flag_if_raised:nT { str_missing }
+ {
+ \\\\
+ A~lead~surrogate~was~not~followed~by~a~trail~surrogate.
+ }
+ \flag_if_raised:nT { str_extra }
+ {
+ \\\\
+ LaTeX~came~across~a~trail~surrogate~when~it~was~not~expected.
+ }
+ \flag_if_raised:nT { str_end }
+ {
+ \\\\
+ The~string~contained~an~odd~number~of~bytes.~This~is~invalid:~
+ the~basic~code~unit~for~UTF-16~is~16~bits~(2~bytes).
+ }
+ }
+% \end{macrocode}
+% \end{variable}
+%
+% \begin{macro}[int]
+% {
+% \@@_convert_decode_utf16: ,
+% \@@_convert_decode_utf16be: ,
+% \@@_convert_decode_utf16le: ,
+% }
+% \begin{macro}[aux]{\@@_decode_utf_xvi_bom:NN, \@@_decode_utf_xvi:Nw}
+% As for \textsc{utf-8}, decoding \textsc{utf-16} is harder than
+% encoding it. If the endianness is unknown, check the first two
+% bytes: if those are \hexnum{FE} and \hexnum{FF} in either order,
+% remove them and use the corresponding endianness, otherwise assume
+% big-endianness. The three endianness cases are based on a common
+% auxiliary whose first argument is $1$ for big-endian and $2$ for
+% little-endian, and whose second argument, delimited by the scan mark
+% \cs{s__stop}, is expanded once (the string may be long; passing
+% \cs{g_@@_result_tl} as an argument before expansion is cheaper).
+%
+% The \cs{@@_decode_utf_xvi:Nw} function defines \cs{@@_tmp:w} to
+% take two arguments and return the character code of the first one if
+% the string is big-endian, and the second one if the string is
+% little-endian, then loops over the string using
+% \cs{@@_decode_utf_xvi_pair:NN} described below.
+% \begin{macrocode}
+ \cs_new_protected_nopar:cpn { @@_convert_decode_utf16be: }
+ { \@@_decode_utf_xvi:Nw 1 \g_@@_result_tl \s__stop }
+ \cs_new_protected_nopar:cpn { @@_convert_decode_utf16le: }
+ { \@@_decode_utf_xvi:Nw 2 \g_@@_result_tl \s__stop }
+ \cs_new_protected_nopar:cpn { @@_convert_decode_utf16: }
+ {
+ \exp_after:wN \@@_decode_utf_xvi_bom:NN
+ \g_@@_result_tl \s__stop \s__stop \s__stop
+ }
+ \cs_new_protected:Npn \@@_decode_utf_xvi_bom:NN #1#2
+ {
+ \str_if_eq_x:nnTF { #1#2 } { ^^ff ^^fe }
+ { \@@_decode_utf_xvi:Nw 2 }
+ {
+ \str_if_eq_x:nnTF { #1#2 } { ^^fe ^^ff }
+ { \@@_decode_utf_xvi:Nw 1 }
+ { \@@_decode_utf_xvi:Nw 1 #1#2 }
+ }
+ }
+ \cs_new_protected:Npn \@@_decode_utf_xvi:Nw #1#2 \s__stop
+ {
+ \flag_clear:n { str_error }
+ \flag_clear:n { str_missing }
+ \flag_clear:n { str_extra }
+ \flag_clear:n { str_end }
+ \cs_set:Npn \@@_tmp:w ##1 ##2 { ` ## #1 }
+ \tl_gset:Nx \g_@@_result_tl
+ {
+ \exp_after:wN \@@_decode_utf_xvi_pair:NN
+ #2 \q_nil \q_nil
+ \__prg_break_point:
+ }
+ \@@_if_flag_error:nnx { str_error } { utf16-decode } { }
+ }
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+%
+% \begin{macro}[aux, rEXP]
+% {
+% \@@_decode_utf_xvi_pair:NN ,
+% \@@_decode_utf_xvi_quad:NNwNN ,
+% \@@_decode_utf_xvi_pair_end:Nw ,
+% }
+% \begin{macro}[aux, rEXP]
+% {
+% \@@_decode_utf_xvi_error:nNN ,
+% \@@_decode_utf_xvi_extra:NNw ,
+% }
+% Bytes are read two at a time. At this stage, |\@@_tmp:w #1#2|
+% expands to the character code of the most significant byte, and we
+% distinguish cases depending on which range it lies in:
+% \begin{itemize}
+% \item $[\hexnum{D8}, \hexnum{DB}]$ signals a lead surrogate, and
+% the integer expression yields $1$ (\eTeX{} rounds ties away from
+% zero);
+% \item $[\hexnum{DC}, \hexnum{DF}]$ signals a trail surrogate,
+% unexpected here, and the integer expression yields $2$;
+% \item any other value signals a code point in the Basic
+% Multilingual Plane, which stands for itself, and the
+% \cs{if_case:w} construction expands to nothing (cases other than
+% $1$ or $2$), leaving the relevant material in the input stream,
+% followed by another call to the \texttt{_pair} auxiliary.
+% \end{itemize}
+% The case of a lead surrogate is treated by the \texttt{_quad}
+% auxiliary, whose arguments |#1|, |#2|, |#4| and |#5| are the four
+% bytes. We expect the most significant byte of |#4#5| to be in the
+% range $[\hexnum{DC}, \hexnum{DF}]$ (trail surrogate). The test is
+% similar to the test used for continuation bytes in the
+% \textsc{utf-8} decoding functions. In the case where |#4#5| is
+% indeed a trail surrogate, leave |#1#2#4#5| \cs{s__tl}
+% \meta{code~point} \cs{s__tl}, and remove the pair |#4#5| before
+% looping with \cs{@@_decode_utf_xvi_pair:NN}. Otherwise, of course,
+% complain about the missing surrogate.
+%
+% The magic number \hexnum{D7F7} is such that
+% $\hexnum{D7F7}*\hexnum{400} = \hexnum{D800}*\hexnum{400} +
+% \hexnum{DC00} - \hexnum{10000}$.
+%
+% Every time we read a pair of bytes, we test for the end-marker
+% \cs{q_nil}. When reaching the end, we additionally check that the
+% string had an even length. Also, if the end is reached when
+% expecting a trail surrogate, we treat that as a missing surrogate.
+% \begin{macrocode}
+ \cs_new:Npn \@@_decode_utf_xvi_pair:NN #1#2
+ {
+ \if_meaning:w \q_nil #2
+ \@@_decode_utf_xvi_pair_end:Nw #1
+ \fi:
+ \if_case:w
+ \__int_eval:w ( \@@_tmp:w #1#2 - "D6 ) / \c_four \__int_eval_end:
+ \or: \exp_after:wN \@@_decode_utf_xvi_quad:NNwNN
+ \or: \exp_after:wN \@@_decode_utf_xvi_extra:NNw
+ \fi:
+ #1#2 \s__tl
+ \int_eval:n { "100 * \@@_tmp:w #1#2 + \@@_tmp:w #2#1 } \s__tl
+ \@@_decode_utf_xvi_pair:NN
+ }
+ \cs_new:Npn \@@_decode_utf_xvi_quad:NNwNN
+ #1#2 #3 \@@_decode_utf_xvi_pair:NN #4#5
+ {
+ \if_meaning:w \q_nil #5
+ \@@_decode_utf_xvi_error:nNN { missing } #1#2
+ \@@_decode_utf_xvi_pair_end:Nw #4
+ \fi:
+ \if_int_compare:w
+ \if_int_compare:w \@@_tmp:w #4#5 < "DC \exp_stop_f:
+ \c_zero = \c_one
+ \else:
+ \@@_tmp:w #4#5 < "E0 \exp_stop_f:
+ \fi:
+ #1 #2 #4 #5 \s__tl
+ \int_eval:n
+ {
+ ( "100 * \@@_tmp:w #1#2 + \@@_tmp:w #2#1 - "D7F7 ) * "400
+ + "100 * \@@_tmp:w #4#5 + \@@_tmp:w #5#4
+ }
+ \s__tl
+ \exp_after:wN \use_i:nnn
+ \else:
+ \@@_decode_utf_xvi_error:nNN { missing } #1#2
+ \fi:
+ \@@_decode_utf_xvi_pair:NN #4#5
+ }
+ \cs_new:Npn \@@_decode_utf_xvi_pair_end:Nw #1 \fi:
+ {
+ \fi:
+ \if_meaning:w \q_nil #1
+ \else:
+ \@@_decode_utf_xvi_error:nNN { end } #1 \prg_do_nothing:
+ \fi:
+ \__prg_break:
+ }
+ \cs_new:Npn \@@_decode_utf_xvi_extra:NNw #1#2 \s__tl #3 \s__tl
+ { \@@_decode_utf_xvi_error:nNN { extra } #1#2 }
+ \cs_new:Npn \@@_decode_utf_xvi_error:nNN #1#2#3
+ {
+ \flag_raise:n { str_error }
+ \flag_raise:n { str_#1 }
+ #2 #3 \s__tl
+ \int_use:N \c_@@_replacement_char_int \s__tl
+ }
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+%
+% Restore the original catcodes of bytes $254$ and $255$.
+% \begin{macrocode}
+\group_end:
+%</utf16>
+% \end{macrocode}
+%
+% \subsubsection{\textsc{utf-32} support}
+%
+% The definitions are done in a category code regime where the bytes
+% $0$, $254$ and $255$ used by the byte order mark have catcode
+% \enquote{other}.
+% \begin{macrocode}
+%<*utf32>
+\group_begin:
+ \char_set_catcode_other:N \^^00
+ \char_set_catcode_other:N \^^fe
+ \char_set_catcode_other:N \^^ff
+% \end{macrocode}
+%
+% \begin{macro}[int]
+% {
+% \@@_convert_encode_utf32: ,
+% \@@_convert_encode_utf32be: ,
+% \@@_convert_encode_utf32le: ,
+% }
+% \begin{macro}[aux, rEXP]
+% {
+% \@@_encode_utf_xxxii_be:n ,
+% \@@_encode_utf_xxxii_be_aux:nn ,
+% \@@_encode_utf_xxxii_le:n ,
+% \@@_encode_utf_xxxii_le_aux:nn ,
+% }
+% Convert each integer in the comma-list \cs{g_@@_result_tl} to a
+% sequence of four bytes. The functions for big-endian and
+% little-endian encodings are very similar, but the
+% \cs{@@_output_byte:n} instructions are reversed.
+% \begin{macrocode}
+ \cs_new_protected_nopar:cpn { @@_convert_encode_utf32: }
+ {
+ \@@_convert_gmap_internal:N \@@_encode_utf_xxxii_be:n
+ \tl_gput_left:Nx \g_@@_result_tl { ^^00 ^^00 ^^fe ^^ff }
+ }
+ \cs_new_protected_nopar:cpn { @@_convert_encode_utf32be: }
+ { \@@_convert_gmap_internal:N \@@_encode_utf_xxxii_be:n }
+ \cs_new_protected_nopar:cpn { @@_convert_encode_utf32le: }
+ { \@@_convert_gmap_internal:N \@@_encode_utf_xxxii_le:n }
+ \cs_new:Npn \@@_encode_utf_xxxii_be:n #1
+ {
+ \exp_args:Nf \@@_encode_utf_xxxii_be_aux:nn
+ { \int_div_truncate:nn {#1} { "100 } } {#1}
+ }
+ \cs_new:Npn \@@_encode_utf_xxxii_be_aux:nn #1#2
+ {
+ ^^00
+ \@@_output_byte_pair_be:n {#1}
+ \@@_output_byte:n { #2 - #1 * "100 }
+ }
+ \cs_new:Npn \@@_encode_utf_xxxii_le:n #1
+ {
+ \exp_args:Nf \@@_encode_utf_xxxii_le_aux:nn
+ { \int_div_truncate:nn {#1} { "100 } } {#1}
+ }
+ \cs_new:Npn \@@_encode_utf_xxxii_le_aux:nn #1#2
+ {
+ \@@_output_byte:n { #2 - #1 * "100 }
+ \@@_output_byte_pair_le:n {#1}
+ ^^00
+ }
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+%
+% \begin{variable}{str_overflow, str_end}
+% There can be no error when encoding in \textsc{utf-32}. When
+% decoding, the string may not have length $4n$, or it may contain
+% code points larger than \hexnum{10FFFF}. The latter case often
+% happens if the encoding was in fact not \textsc{utf-32}, because
+% most arbitrary strings are not valid in \textsc{utf-32}.
+% \begin{macrocode}
+ \flag_clear_new:n { str_overflow }
+ \flag_clear_new:n { str_end }
+ \__msg_kernel_new:nnnn { str } { utf32-decode }
+ {
+ Invalid~UTF-32~string: \exp_last_unbraced:Nf \use_none:n
+ \@@_if_flag_times:nT { str_overflow } { ,~code~point~too~large }
+ \@@_if_flag_times:nT { str_end } { ,~truncated~string }
+ .
+ }
+ {
+ In~the~UTF-32~encoding,~every~Unicode~character~
+ (in~the~range~[U+0000,~U+10FFFF])~is~encoded~as~4~bytes.
+ \flag_if_raised:nT { str_overflow }
+ {
+ \\\\
+ LaTeX~came~across~a~code~point~larger~than~1114111,~
+ the~maximum~code~point~defined~by~Unicode.~
+ Perhaps~the~string~was~not~encoded~in~the~UTF-32~encoding?
+ }
+ \flag_if_raised:nT { str_end }
+ {
+ \\\\
+ The~length~of~the~string~is~not~a~multiple~of~4.~
+ Perhaps~the~string~was~truncated?
+ }
+ }
+% \end{macrocode}
+% \end{variable}
+%
+% \begin{macro}[int]
+% {
+% \@@_convert_decode_utf32: ,
+% \@@_convert_decode_utf32be: ,
+% \@@_convert_decode_utf32le: ,
+% }
+% \begin{macro}[aux]
+% {\@@_decode_utf_xxxii_bom:NNNN, \@@_decode_utf_xxxii:Nw}
+% \begin{macro}[aux, rEXP]
+% {\@@_decode_utf_xxxii_loop:NNNN, \@@_decode_utf_xxxii_end:w}
+%
+% The structure is similar to \textsc{utf-16} decoding functions. If
+% the endianness is not given, test the first $4$ bytes of the string
+% (possibly \cs{s__stop} if the string is too short) for the presence
+% of a byte-order mark. If there is a byte-order mark, use that
+% endianness, and remove the $4$ bytes, otherwise default to
+% big-endian, and leave the $4$ bytes in place. The
+% \cs{@@_decode_utf_xxxii:Nw} auxiliary recieves $1$ or $2$ as its
+% first argument indicating endianness, and the string to convert as
+% its second argument (expanded or not). It sets \cs{@@_tmp:w} to
+% expand to the character code of either of its two arguments
+% depending on endianness, then triggers the \texttt{_loop} auxiliary
+% inside an \texttt{x}-expanding assignment to \cs{g_@@_result_tl}.
+%
+% The \texttt{_loop} auxiliary first checks for the end-of-string
+% marker \cs{s__stop}, calling the \texttt{_end} auxiliary if
+% appropriate. Otherwise, leave the \meta{4~bytes} \cs{s__tl} behind,
+% then check that the code point is not overflowing: the leading byte
+% must be $0$, and the following byte at most $16$.
+%
+% In the ending code, we check that there remains no byte: there
+% should be nothing left until the first \cs{s__stop}. Break the map.
+% \begin{macrocode}
+ \cs_new_protected_nopar:cpn { @@_convert_decode_utf32be: }
+ { \@@_decode_utf_xxxii:Nw 1 \g_@@_result_tl \s__stop }
+ \cs_new_protected_nopar:cpn { @@_convert_decode_utf32le: }
+ { \@@_decode_utf_xxxii:Nw 2 \g_@@_result_tl \s__stop }
+ \cs_new_protected_nopar:cpn { @@_convert_decode_utf32: }
+ {
+ \exp_after:wN \@@_decode_utf_xxxii_bom:NNNN \g_@@_result_tl
+ \s__stop \s__stop \s__stop \s__stop \s__stop
+ }
+ \cs_new_protected:Npn \@@_decode_utf_xxxii_bom:NNNN #1#2#3#4
+ {
+ \str_if_eq_x:nnTF { #1#2#3#4 } { ^^ff ^^fe ^^00 ^^00 }
+ { \@@_decode_utf_xxxii:Nw 2 }
+ {
+ \str_if_eq_x:nnTF { #1#2#3#4 } { ^^00 ^^00 ^^fe ^^ff }
+ { \@@_decode_utf_xxxii:Nw 1 }
+ { \@@_decode_utf_xxxii:Nw 1 #1#2#3#4 }
+ }
+ }
+ \cs_new_protected:Npn \@@_decode_utf_xxxii:Nw #1#2 \s__stop
+ {
+ \flag_clear:n { str_overflow }
+ \flag_clear:n { str_end }
+ \flag_clear:n { str_error }
+ \cs_set:Npn \@@_tmp:w ##1 ##2 { ` ## #1 }
+ \tl_gset:Nx \g_@@_result_tl
+ {
+ \exp_after:wN \@@_decode_utf_xxxii_loop:NNNN
+ #2 \s__stop \s__stop \s__stop \s__stop
+ \__prg_break_point:
+ }
+ \@@_if_flag_error:nnx { str_error } { utf32-decode } { }
+ }
+ \cs_new:Npn \@@_decode_utf_xxxii_loop:NNNN #1#2#3#4
+ {
+ \if_meaning:w \s__stop #4
+ \exp_after:wN \@@_decode_utf_xxxii_end:w
+ \fi:
+ #1#2#3#4 \s__tl
+ \if_int_compare:w \@@_tmp:w #1#4 > \c_zero
+ \flag_raise:n { str_overflow }
+ \flag_raise:n { str_error }
+ \int_use:N \c_@@_replacement_char_int
+ \else:
+ \if_int_compare:w \@@_tmp:w #2#3 > \c_sixteen
+ \flag_raise:n { str_overflow }
+ \flag_raise:n { str_error }
+ \int_use:N \c_@@_replacement_char_int
+ \else:
+ \int_eval:n
+ { \@@_tmp:w #2#3*"10000 + \@@_tmp:w #3#2*"100 + \@@_tmp:w #4#1 }
+ \fi:
+ \fi:
+ \s__tl
+ \@@_decode_utf_xxxii_loop:NNNN
+ }
+ \cs_new:Npn \@@_decode_utf_xxxii_end:w #1 \s__stop
+ {
+ \tl_if_empty:nF {#1}
+ {
+ \flag_raise:n { str_end }
+ \flag_raise:n { str_error }
+ #1 \s__tl
+ \int_use:N \c_@@_replacement_char_int \s__tl
+ }
+ \__prg_break:
+ }
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+%
+% Restore the original catcodes of bytes $0$, $254$ and $255$.
+% \begin{macrocode}
+\group_end:
+%</utf32>
+% \end{macrocode}
+%
+% \subsubsection{\textsc{iso 8859} support}
+%
+% The \textsc{iso-8859-1} encoding exactly matches with the $256$ first
+% Unicode characters. For other 8-bit encodings of the \textsc{iso-8859}
+% family, we keep track only of differences, and of unassigned bytes.
+% \begin{macrocode}
+%<*iso88591>
+\@@_declare_eight_bit_encoding:nnn { iso88591 }
+ {
+ }
+ {
+ }
+%</iso88591>
+% \end{macrocode}
+%
+% \begin{macrocode}
+%<*iso88592>
+\@@_declare_eight_bit_encoding:nnn { iso88592 }
+ {
+ { A1 } { 0104 }
+ { A2 } { 02D8 }
+ { A3 } { 0141 }
+ { A5 } { 013D }
+ { A6 } { 015A }
+ { A9 } { 0160 }
+ { AA } { 015E }
+ { AB } { 0164 }
+ { AC } { 0179 }
+ { AE } { 017D }
+ { AF } { 017B }
+ { B1 } { 0105 }
+ { B2 } { 02DB }
+ { B3 } { 0142 }
+ { B5 } { 013E }
+ { B6 } { 015B }
+ { B7 } { 02C7 }
+ { B9 } { 0161 }
+ { BA } { 015F }
+ { BB } { 0165 }
+ { BC } { 017A }
+ { BD } { 02DD }
+ { BE } { 017E }
+ { BF } { 017C }
+ { C0 } { 0154 }
+ { C3 } { 0102 }
+ { C5 } { 0139 }
+ { C6 } { 0106 }
+ { C8 } { 010C }
+ { CA } { 0118 }
+ { CC } { 011A }
+ { CF } { 010E }
+ { D0 } { 0110 }
+ { D1 } { 0143 }
+ { D2 } { 0147 }
+ { D5 } { 0150 }
+ { D8 } { 0158 }
+ { D9 } { 016E }
+ { DB } { 0170 }
+ { DE } { 0162 }
+ { E0 } { 0155 }
+ { E3 } { 0103 }
+ { E5 } { 013A }
+ { E6 } { 0107 }
+ { E8 } { 010D }
+ { EA } { 0119 }
+ { EC } { 011B }
+ { EF } { 010F }
+ { F0 } { 0111 }
+ { F1 } { 0144 }
+ { F2 } { 0148 }
+ { F5 } { 0151 }
+ { F8 } { 0159 }
+ { F9 } { 016F }
+ { FB } { 0171 }
+ { FE } { 0163 }
+ { FF } { 02D9 }
+ }
+ {
+ }
+%</iso88592>
+% \end{macrocode}
+%
+% \begin{macrocode}
+%<*iso88593>
+\@@_declare_eight_bit_encoding:nnn { iso88593 }
+ {
+ { A1 } { 0126 }
+ { A2 } { 02D8 }
+ { A6 } { 0124 }
+ { A9 } { 0130 }
+ { AA } { 015E }
+ { AB } { 011E }
+ { AC } { 0134 }
+ { AF } { 017B }
+ { B1 } { 0127 }
+ { B6 } { 0125 }
+ { B9 } { 0131 }
+ { BA } { 015F }
+ { BB } { 011F }
+ { BC } { 0135 }
+ { BF } { 017C }
+ { C5 } { 010A }
+ { C6 } { 0108 }
+ { D5 } { 0120 }
+ { D8 } { 011C }
+ { DD } { 016C }
+ { DE } { 015C }
+ { E5 } { 010B }
+ { E6 } { 0109 }
+ { F5 } { 0121 }
+ { F8 } { 011D }
+ { FD } { 016D }
+ { FE } { 015D }
+ { FF } { 02D9 }
+ }
+ {
+ { A5 }
+ { AE }
+ { BE }
+ { C3 }
+ { D0 }
+ { E3 }
+ { F0 }
+ }
+%</iso88593>
+% \end{macrocode}
+%
+% \begin{macrocode}
+%<*iso88594>
+\@@_declare_eight_bit_encoding:nnn { iso88594 }
+ {
+ { A1 } { 0104 }
+ { A2 } { 0138 }
+ { A3 } { 0156 }
+ { A5 } { 0128 }
+ { A6 } { 013B }
+ { A9 } { 0160 }
+ { AA } { 0112 }
+ { AB } { 0122 }
+ { AC } { 0166 }
+ { AE } { 017D }
+ { B1 } { 0105 }
+ { B2 } { 02DB }
+ { B3 } { 0157 }
+ { B5 } { 0129 }
+ { B6 } { 013C }
+ { B7 } { 02C7 }
+ { B9 } { 0161 }
+ { BA } { 0113 }
+ { BB } { 0123 }
+ { BC } { 0167 }
+ { BD } { 014A }
+ { BE } { 017E }
+ { BF } { 014B }
+ { C0 } { 0100 }
+ { C7 } { 012E }
+ { C8 } { 010C }
+ { CA } { 0118 }
+ { CC } { 0116 }
+ { CF } { 012A }
+ { D0 } { 0110 }
+ { D1 } { 0145 }
+ { D2 } { 014C }
+ { D3 } { 0136 }
+ { D9 } { 0172 }
+ { DD } { 0168 }
+ { DE } { 016A }
+ { E0 } { 0101 }
+ { E7 } { 012F }
+ { E8 } { 010D }
+ { EA } { 0119 }
+ { EC } { 0117 }
+ { EF } { 012B }
+ { F0 } { 0111 }
+ { F1 } { 0146 }
+ { F2 } { 014D }
+ { F3 } { 0137 }
+ { F9 } { 0173 }
+ { FD } { 0169 }
+ { FE } { 016B }
+ { FF } { 02D9 }
+ }
+ {
+ }
+%</iso88594>
+% \end{macrocode}
+%
+% \begin{macrocode}
+%<*iso88595>
+\@@_declare_eight_bit_encoding:nnn { iso88595 }
+ {
+ { A1 } { 0401 }
+ { A2 } { 0402 }
+ { A3 } { 0403 }
+ { A4 } { 0404 }
+ { A5 } { 0405 }
+ { A6 } { 0406 }
+ { A7 } { 0407 }
+ { A8 } { 0408 }
+ { A9 } { 0409 }
+ { AA } { 040A }
+ { AB } { 040B }
+ { AC } { 040C }
+ { AE } { 040E }
+ { AF } { 040F }
+ { B0 } { 0410 }
+ { B1 } { 0411 }
+ { B2 } { 0412 }
+ { B3 } { 0413 }
+ { B4 } { 0414 }
+ { B5 } { 0415 }
+ { B6 } { 0416 }
+ { B7 } { 0417 }
+ { B8 } { 0418 }
+ { B9 } { 0419 }
+ { BA } { 041A }
+ { BB } { 041B }
+ { BC } { 041C }
+ { BD } { 041D }
+ { BE } { 041E }
+ { BF } { 041F }
+ { C0 } { 0420 }
+ { C1 } { 0421 }
+ { C2 } { 0422 }
+ { C3 } { 0423 }
+ { C4 } { 0424 }
+ { C5 } { 0425 }
+ { C6 } { 0426 }
+ { C7 } { 0427 }
+ { C8 } { 0428 }
+ { C9 } { 0429 }
+ { CA } { 042A }
+ { CB } { 042B }
+ { CC } { 042C }
+ { CD } { 042D }
+ { CE } { 042E }
+ { CF } { 042F }
+ { D0 } { 0430 }
+ { D1 } { 0431 }
+ { D2 } { 0432 }
+ { D3 } { 0433 }
+ { D4 } { 0434 }
+ { D5 } { 0435 }
+ { D6 } { 0436 }
+ { D7 } { 0437 }
+ { D8 } { 0438 }
+ { D9 } { 0439 }
+ { DA } { 043A }
+ { DB } { 043B }
+ { DC } { 043C }
+ { DD } { 043D }
+ { DE } { 043E }
+ { DF } { 043F }
+ { E0 } { 0440 }
+ { E1 } { 0441 }
+ { E2 } { 0442 }
+ { E3 } { 0443 }
+ { E4 } { 0444 }
+ { E5 } { 0445 }
+ { E6 } { 0446 }
+ { E7 } { 0447 }
+ { E8 } { 0448 }
+ { E9 } { 0449 }
+ { EA } { 044A }
+ { EB } { 044B }
+ { EC } { 044C }
+ { ED } { 044D }
+ { EE } { 044E }
+ { EF } { 044F }
+ { F0 } { 2116 }
+ { F1 } { 0451 }
+ { F2 } { 0452 }
+ { F3 } { 0453 }
+ { F4 } { 0454 }
+ { F5 } { 0455 }
+ { F6 } { 0456 }
+ { F7 } { 0457 }
+ { F8 } { 0458 }
+ { F9 } { 0459 }
+ { FA } { 045A }
+ { FB } { 045B }
+ { FC } { 045C }
+ { FD } { 00A7 }
+ { FE } { 045E }
+ { FF } { 045F }
+ }
+ {
+ }
+%</iso88595>
+% \end{macrocode}
+%
+% \begin{macrocode}
+%<*iso88596>
+\@@_declare_eight_bit_encoding:nnn { iso88596 }
+ {
+ { AC } { 060C }
+ { BB } { 061B }
+ { BF } { 061F }
+ { C1 } { 0621 }
+ { C2 } { 0622 }
+ { C3 } { 0623 }
+ { C4 } { 0624 }
+ { C5 } { 0625 }
+ { C6 } { 0626 }
+ { C7 } { 0627 }
+ { C8 } { 0628 }
+ { C9 } { 0629 }
+ { CA } { 062A }
+ { CB } { 062B }
+ { CC } { 062C }
+ { CD } { 062D }
+ { CE } { 062E }
+ { CF } { 062F }
+ { D0 } { 0630 }
+ { D1 } { 0631 }
+ { D2 } { 0632 }
+ { D3 } { 0633 }
+ { D4 } { 0634 }
+ { D5 } { 0635 }
+ { D6 } { 0636 }
+ { D7 } { 0637 }
+ { D8 } { 0638 }
+ { D9 } { 0639 }
+ { DA } { 063A }
+ { E0 } { 0640 }
+ { E1 } { 0641 }
+ { E2 } { 0642 }
+ { E3 } { 0643 }
+ { E4 } { 0644 }
+ { E5 } { 0645 }
+ { E6 } { 0646 }
+ { E7 } { 0647 }
+ { E8 } { 0648 }
+ { E9 } { 0649 }
+ { EA } { 064A }
+ { EB } { 064B }
+ { EC } { 064C }
+ { ED } { 064D }
+ { EE } { 064E }
+ { EF } { 064F }
+ { F0 } { 0650 }
+ { F1 } { 0651 }
+ { F2 } { 0652 }
+ }
+ {
+ { A1 }
+ { A2 }
+ { A3 }
+ { A5 }
+ { A6 }
+ { A7 }
+ { A8 }
+ { A9 }
+ { AA }
+ { AB }
+ { AE }
+ { AF }
+ { B0 }
+ { B1 }
+ { B2 }
+ { B3 }
+ { B4 }
+ { B5 }
+ { B6 }
+ { B7 }
+ { B8 }
+ { B9 }
+ { BA }
+ { BC }
+ { BD }
+ { BE }
+ { C0 }
+ { DB }
+ { DC }
+ { DD }
+ { DE }
+ { DF }
+ }
+%</iso88596>
+% \end{macrocode}
+%
+% \begin{macrocode}
+%<*iso88597>
+\@@_declare_eight_bit_encoding:nnn { iso88597 }
+ {
+ { A1 } { 2018 }
+ { A2 } { 2019 }
+ { A4 } { 20AC }
+ { A5 } { 20AF }
+ { AA } { 037A }
+ { AF } { 2015 }
+ { B4 } { 0384 }
+ { B5 } { 0385 }
+ { B6 } { 0386 }
+ { B8 } { 0388 }
+ { B9 } { 0389 }
+ { BA } { 038A }
+ { BC } { 038C }
+ { BE } { 038E }
+ { BF } { 038F }
+ { C0 } { 0390 }
+ { C1 } { 0391 }
+ { C2 } { 0392 }
+ { C3 } { 0393 }
+ { C4 } { 0394 }
+ { C5 } { 0395 }
+ { C6 } { 0396 }
+ { C7 } { 0397 }
+ { C8 } { 0398 }
+ { C9 } { 0399 }
+ { CA } { 039A }
+ { CB } { 039B }
+ { CC } { 039C }
+ { CD } { 039D }
+ { CE } { 039E }
+ { CF } { 039F }
+ { D0 } { 03A0 }
+ { D1 } { 03A1 }
+ { D3 } { 03A3 }
+ { D4 } { 03A4 }
+ { D5 } { 03A5 }
+ { D6 } { 03A6 }
+ { D7 } { 03A7 }
+ { D8 } { 03A8 }
+ { D9 } { 03A9 }
+ { DA } { 03AA }
+ { DB } { 03AB }
+ { DC } { 03AC }
+ { DD } { 03AD }
+ { DE } { 03AE }
+ { DF } { 03AF }
+ { E0 } { 03B0 }
+ { E1 } { 03B1 }
+ { E2 } { 03B2 }
+ { E3 } { 03B3 }
+ { E4 } { 03B4 }
+ { E5 } { 03B5 }
+ { E6 } { 03B6 }
+ { E7 } { 03B7 }
+ { E8 } { 03B8 }
+ { E9 } { 03B9 }
+ { EA } { 03BA }
+ { EB } { 03BB }
+ { EC } { 03BC }
+ { ED } { 03BD }
+ { EE } { 03BE }
+ { EF } { 03BF }
+ { F0 } { 03C0 }
+ { F1 } { 03C1 }
+ { F2 } { 03C2 }
+ { F3 } { 03C3 }
+ { F4 } { 03C4 }
+ { F5 } { 03C5 }
+ { F6 } { 03C6 }
+ { F7 } { 03C7 }
+ { F8 } { 03C8 }
+ { F9 } { 03C9 }
+ { FA } { 03CA }
+ { FB } { 03CB }
+ { FC } { 03CC }
+ { FD } { 03CD }
+ { FE } { 03CE }
+ }
+ {
+ { AE }
+ { D2 }
+ }
+%</iso88597>
+% \end{macrocode}
+%
+% \begin{macrocode}
+%<*iso88598>
+\@@_declare_eight_bit_encoding:nnn { iso88598 }
+ {
+ { AA } { 00D7 }
+ { BA } { 00F7 }
+ { DF } { 2017 }
+ { E0 } { 05D0 }
+ { E1 } { 05D1 }
+ { E2 } { 05D2 }
+ { E3 } { 05D3 }
+ { E4 } { 05D4 }
+ { E5 } { 05D5 }
+ { E6 } { 05D6 }
+ { E7 } { 05D7 }
+ { E8 } { 05D8 }
+ { E9 } { 05D9 }
+ { EA } { 05DA }
+ { EB } { 05DB }
+ { EC } { 05DC }
+ { ED } { 05DD }
+ { EE } { 05DE }
+ { EF } { 05DF }
+ { F0 } { 05E0 }
+ { F1 } { 05E1 }
+ { F2 } { 05E2 }
+ { F3 } { 05E3 }
+ { F4 } { 05E4 }
+ { F5 } { 05E5 }
+ { F6 } { 05E6 }
+ { F7 } { 05E7 }
+ { F8 } { 05E8 }
+ { F9 } { 05E9 }
+ { FA } { 05EA }
+ { FD } { 200E }
+ { FE } { 200F }
+ }
+ {
+ { A1 }
+ { BF }
+ { C0 }
+ { C1 }
+ { C2 }
+ { C3 }
+ { C4 }
+ { C5 }
+ { C6 }
+ { C7 }
+ { C8 }
+ { C9 }
+ { CA }
+ { CB }
+ { CC }
+ { CD }
+ { CE }
+ { CF }
+ { D0 }
+ { D1 }
+ { D2 }
+ { D3 }
+ { D4 }
+ { D5 }
+ { D6 }
+ { D7 }
+ { D8 }
+ { D9 }
+ { DA }
+ { DB }
+ { DC }
+ { DD }
+ { DE }
+ { FB }
+ { FC }
+ }
+%</iso88598>
+% \end{macrocode}
+%
+% \begin{macrocode}
+%<*iso88599>
+\@@_declare_eight_bit_encoding:nnn { iso88599 }
+ {
+ { D0 } { 011E }
+ { DD } { 0130 }
+ { DE } { 015E }
+ { F0 } { 011F }
+ { FD } { 0131 }
+ { FE } { 015F }
+ }
+ {
+ }
+%</iso88599>
+% \end{macrocode}
+%
+% \begin{macrocode}
+%<*iso885910>
+\@@_declare_eight_bit_encoding:nnn { iso885910 }
+ {
+ { A1 } { 0104 }
+ { A2 } { 0112 }
+ { A3 } { 0122 }
+ { A4 } { 012A }
+ { A5 } { 0128 }
+ { A6 } { 0136 }
+ { A8 } { 013B }
+ { A9 } { 0110 }
+ { AA } { 0160 }
+ { AB } { 0166 }
+ { AC } { 017D }
+ { AE } { 016A }
+ { AF } { 014A }
+ { B1 } { 0105 }
+ { B2 } { 0113 }
+ { B3 } { 0123 }
+ { B4 } { 012B }
+ { B5 } { 0129 }
+ { B6 } { 0137 }
+ { B8 } { 013C }
+ { B9 } { 0111 }
+ { BA } { 0161 }
+ { BB } { 0167 }
+ { BC } { 017E }
+ { BD } { 2015 }
+ { BE } { 016B }
+ { BF } { 014B }
+ { C0 } { 0100 }
+ { C7 } { 012E }
+ { C8 } { 010C }
+ { CA } { 0118 }
+ { CC } { 0116 }
+ { D1 } { 0145 }
+ { D2 } { 014C }
+ { D7 } { 0168 }
+ { D9 } { 0172 }
+ { E0 } { 0101 }
+ { E7 } { 012F }
+ { E8 } { 010D }
+ { EA } { 0119 }
+ { EC } { 0117 }
+ { F1 } { 0146 }
+ { F2 } { 014D }
+ { F7 } { 0169 }
+ { F9 } { 0173 }
+ { FF } { 0138 }
+ }
+ {
+ }
+%</iso885910>
+% \end{macrocode}
+%
+% \begin{macrocode}
+%<*iso885911>
+\@@_declare_eight_bit_encoding:nnn { iso885911 }
+ {
+ { A1 } { 0E01 }
+ { A2 } { 0E02 }
+ { A3 } { 0E03 }
+ { A4 } { 0E04 }
+ { A5 } { 0E05 }
+ { A6 } { 0E06 }
+ { A7 } { 0E07 }
+ { A8 } { 0E08 }
+ { A9 } { 0E09 }
+ { AA } { 0E0A }
+ { AB } { 0E0B }
+ { AC } { 0E0C }
+ { AD } { 0E0D }
+ { AE } { 0E0E }
+ { AF } { 0E0F }
+ { B0 } { 0E10 }
+ { B1 } { 0E11 }
+ { B2 } { 0E12 }
+ { B3 } { 0E13 }
+ { B4 } { 0E14 }
+ { B5 } { 0E15 }
+ { B6 } { 0E16 }
+ { B7 } { 0E17 }
+ { B8 } { 0E18 }
+ { B9 } { 0E19 }
+ { BA } { 0E1A }
+ { BB } { 0E1B }
+ { BC } { 0E1C }
+ { BD } { 0E1D }
+ { BE } { 0E1E }
+ { BF } { 0E1F }
+ { C0 } { 0E20 }
+ { C1 } { 0E21 }
+ { C2 } { 0E22 }
+ { C3 } { 0E23 }
+ { C4 } { 0E24 }
+ { C5 } { 0E25 }
+ { C6 } { 0E26 }
+ { C7 } { 0E27 }
+ { C8 } { 0E28 }
+ { C9 } { 0E29 }
+ { CA } { 0E2A }
+ { CB } { 0E2B }
+ { CC } { 0E2C }
+ { CD } { 0E2D }
+ { CE } { 0E2E }
+ { CF } { 0E2F }
+ { D0 } { 0E30 }
+ { D1 } { 0E31 }
+ { D2 } { 0E32 }
+ { D3 } { 0E33 }
+ { D4 } { 0E34 }
+ { D5 } { 0E35 }
+ { D6 } { 0E36 }
+ { D7 } { 0E37 }
+ { D8 } { 0E38 }
+ { D9 } { 0E39 }
+ { DA } { 0E3A }
+ { DF } { 0E3F }
+ { E0 } { 0E40 }
+ { E1 } { 0E41 }
+ { E2 } { 0E42 }
+ { E3 } { 0E43 }
+ { E4 } { 0E44 }
+ { E5 } { 0E45 }
+ { E6 } { 0E46 }
+ { E7 } { 0E47 }
+ { E8 } { 0E48 }
+ { E9 } { 0E49 }
+ { EA } { 0E4A }
+ { EB } { 0E4B }
+ { EC } { 0E4C }
+ { ED } { 0E4D }
+ { EE } { 0E4E }
+ { EF } { 0E4F }
+ { F0 } { 0E50 }
+ { F1 } { 0E51 }
+ { F2 } { 0E52 }
+ { F3 } { 0E53 }
+ { F4 } { 0E54 }
+ { F5 } { 0E55 }
+ { F6 } { 0E56 }
+ { F7 } { 0E57 }
+ { F8 } { 0E58 }
+ { F9 } { 0E59 }
+ { FA } { 0E5A }
+ { FB } { 0E5B }
+ }
+ {
+ { DB }
+ { DC }
+ { DD }
+ { DE }
+ }
+%</iso885911>
+% \end{macrocode}
+%
+% \begin{macrocode}
+%<*iso885913>
+\@@_declare_eight_bit_encoding:nnn { iso885913 }
+ {
+ { A1 } { 201D }
+ { A5 } { 201E }
+ { A8 } { 00D8 }
+ { AA } { 0156 }
+ { AF } { 00C6 }
+ { B4 } { 201C }
+ { B8 } { 00F8 }
+ { BA } { 0157 }
+ { BF } { 00E6 }
+ { C0 } { 0104 }
+ { C1 } { 012E }
+ { C2 } { 0100 }
+ { C3 } { 0106 }
+ { C6 } { 0118 }
+ { C7 } { 0112 }
+ { C8 } { 010C }
+ { CA } { 0179 }
+ { CB } { 0116 }
+ { CC } { 0122 }
+ { CD } { 0136 }
+ { CE } { 012A }
+ { CF } { 013B }
+ { D0 } { 0160 }
+ { D1 } { 0143 }
+ { D2 } { 0145 }
+ { D4 } { 014C }
+ { D8 } { 0172 }
+ { D9 } { 0141 }
+ { DA } { 015A }
+ { DB } { 016A }
+ { DD } { 017B }
+ { DE } { 017D }
+ { E0 } { 0105 }
+ { E1 } { 012F }
+ { E2 } { 0101 }
+ { E3 } { 0107 }
+ { E6 } { 0119 }
+ { E7 } { 0113 }
+ { E8 } { 010D }
+ { EA } { 017A }
+ { EB } { 0117 }
+ { EC } { 0123 }
+ { ED } { 0137 }
+ { EE } { 012B }
+ { EF } { 013C }
+ { F0 } { 0161 }
+ { F1 } { 0144 }
+ { F2 } { 0146 }
+ { F4 } { 014D }
+ { F8 } { 0173 }
+ { F9 } { 0142 }
+ { FA } { 015B }
+ { FB } { 016B }
+ { FD } { 017C }
+ { FE } { 017E }
+ { FF } { 2019 }
+ }
+ {
+ }
+%</iso885913>
+% \end{macrocode}
+%
+% \begin{macrocode}
+%<*iso885914>
+\@@_declare_eight_bit_encoding:nnn { iso885914 }
+ {
+ { A1 } { 1E02 }
+ { A2 } { 1E03 }
+ { A4 } { 010A }
+ { A5 } { 010B }
+ { A6 } { 1E0A }
+ { A8 } { 1E80 }
+ { AA } { 1E82 }
+ { AB } { 1E0B }
+ { AC } { 1EF2 }
+ { AF } { 0178 }
+ { B0 } { 1E1E }
+ { B1 } { 1E1F }
+ { B2 } { 0120 }
+ { B3 } { 0121 }
+ { B4 } { 1E40 }
+ { B5 } { 1E41 }
+ { B7 } { 1E56 }
+ { B8 } { 1E81 }
+ { B9 } { 1E57 }
+ { BA } { 1E83 }
+ { BB } { 1E60 }
+ { BC } { 1EF3 }
+ { BD } { 1E84 }
+ { BE } { 1E85 }
+ { BF } { 1E61 }
+ { D0 } { 0174 }
+ { D7 } { 1E6A }
+ { DE } { 0176 }
+ { F0 } { 0175 }
+ { F7 } { 1E6B }
+ { FE } { 0177 }
+ }
+ {
+ }
+%</iso885914>
+% \end{macrocode}
+%
+% \begin{macrocode}
+%<*iso885915>
+\@@_declare_eight_bit_encoding:nnn { iso885915 }
+ {
+ { A4 } { 20AC }
+ { A6 } { 0160 }
+ { A8 } { 0161 }
+ { B4 } { 017D }
+ { B8 } { 017E }
+ { BC } { 0152 }
+ { BD } { 0153 }
+ { BE } { 0178 }
+ }
+ {
+ }
+%</iso885915>
+% \end{macrocode}
+%
+% \begin{macrocode}
+%<*iso885916>
+\@@_declare_eight_bit_encoding:nnn { iso885916 }
+ {
+ { A1 } { 0104 }
+ { A2 } { 0105 }
+ { A3 } { 0141 }
+ { A4 } { 20AC }
+ { A5 } { 201E }
+ { A6 } { 0160 }
+ { A8 } { 0161 }
+ { AA } { 0218 }
+ { AC } { 0179 }
+ { AE } { 017A }
+ { AF } { 017B }
+ { B2 } { 010C }
+ { B3 } { 0142 }
+ { B4 } { 017D }
+ { B5 } { 201D }
+ { B8 } { 017E }
+ { B9 } { 010D }
+ { BA } { 0219 }
+ { BC } { 0152 }
+ { BD } { 0153 }
+ { BE } { 0178 }
+ { BF } { 017C }
+ { C3 } { 0102 }
+ { C5 } { 0106 }
+ { D0 } { 0110 }
+ { D1 } { 0143 }
+ { D5 } { 0150 }
+ { D7 } { 015A }
+ { D8 } { 0170 }
+ { DD } { 0118 }
+ { DE } { 021A }
+ { E3 } { 0103 }
+ { E5 } { 0107 }
+ { F0 } { 0111 }
+ { F1 } { 0144 }
+ { F5 } { 0151 }
+ { F7 } { 015B }
+ { F8 } { 0171 }
+ { FD } { 0119 }
+ { FE } { 021B }
+ }
+ {
+ }
+%</iso885916>
+% \end{macrocode}
+%
+% \end{implementation}
+%
+% \PrintIndex
diff --git a/Master/texmf-dist/source/latex/l3experimental/l3str/l3str-format.dtx b/Master/texmf-dist/source/latex/l3experimental/l3str/l3str-format.dtx
index ea6c38e6564..7d42697daab 100644
--- a/Master/texmf-dist/source/latex/l3experimental/l3str/l3str-format.dtx
+++ b/Master/texmf-dist/source/latex/l3experimental/l3str/l3str-format.dtx
@@ -1,6 +1,6 @@
% \iffalse meta-comment
%
-%% File: l3str-format.dtx Copyright (C) 2012 The LaTeX3 Project
+%% File: l3str-format.dtx Copyright (C) 2012-2013 The LaTeX3 Project
%%
%% It may be distributed and/or modified under the conditions of the
%% LaTeX Project Public License (LPPL), either version 1.3c of this
@@ -100,7 +100,9 @@
% data should be formatted. The list of allowed \meta{styles}
% depends on the type.
% \end{itemize}
-% The choice of \meta{alignment} |=| is not implemented yet.
+% The choice of \meta{alignment} |=| is only valid for numeric types: in
+% this case the padding is inserted between the sign and the rest of the
+% number.
%
% \section{Formatting various data-types}
%
@@ -173,7 +175,7 @@
% \end{macrocode}
%
% \begin{macrocode}
-%<@@=str>
+%<@@=str_format>
% \end{macrocode}
%
% \begin{macrocode}
@@ -201,11 +203,11 @@
% \end{macrocode}
% \end{macro}
%
-% \begin{macro}[aux, EXP]{\@@_format_if_digit:NTF}
+% \begin{macro}[aux, EXP]{\@@_if_digit:NTF}
% Here we expect |#1| to be a character with category other, or
% \cs{s__stop}.
% \begin{macrocode}
-\prg_new_conditional:Npnn \@@_format_if_digit:N #1 { TF }
+\prg_new_conditional:Npnn \@@_if_digit:N #1 { TF }
{
\if_int_compare:w \c_nine < 1 #1 \exp_stop_f:
\prg_return_true: \else: \prg_return_false: \fi:
@@ -214,27 +216,50 @@
% \end{macro}
%
% \begin{macro}[aux, EXP]
-% {\@@_format_put:nw, \@@_format_put:ow, \@@_format_put:fw}
+% {\@@_put:nw, \@@_put:ow, \@@_put:fw}
% Put |#1| after an \cs{s__stop} delimiter.
% \begin{macrocode}
-\cs_new:Npn \@@_format_put:nw #1 #2 \s__stop { #2 \s__stop #1 }
-\cs_generate_variant:Nn \@@_format_put:nw { o , f }
+\cs_new:Npn \@@_put:nw #1 #2 \s__stop { #2 \s__stop #1 }
+\cs_generate_variant:Nn \@@_put:nw { o , f }
% \end{macrocode}
% \end{macro}
%
+% \begin{macro}[aux, EXP, TF]{\@@_if_in:nN}
+% \begin{macro}[aux, EXP]{\@@_if_in_aux:NN}
+% A copy of \cs{__str_if_contains_char:nNTF} to avoid relying on
+% this weird internal string function.
+% \begin{macrocode}
+\prg_new_conditional:Npnn \@@_if_in:nN #1#2 { TF }
+ {
+ \@@_if_in_aux:NN #2 #1
+ { #2 \prg_return_false: \exp_after:wN \__prg_break: \else: }
+ \__prg_break_point:
+ }
+\cs_new:Npn \@@_if_in_aux:NN #1#2
+ {
+ \if_charcode:w #1 #2
+ \prg_return_true:
+ \exp_after:wN \__prg_break:
+ \fi:
+ \@@_if_in_aux:NN #1
+ }
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+%
% \subsection{Parsing a format specification}
%
-% \begin{macro}[aux, EXP]{\@@_format_parse:n}
+% \begin{macro}[aux, EXP]{\@@_parse:n}
% \begin{macro}[aux, EXP]
% {
-% \@@_format_parse_auxi:NN,
-% \@@_format_parse_auxii:nN,
-% \@@_format_parse_auxiii:nN,
-% \@@_format_parse_auxiv:nwN,
-% \@@_format_parse_auxv:nN,
-% \@@_format_parse_auxvi:nwN,
-% \@@_format_parse_auxvii:nN,
-% \@@_format_parse_end:nwn,
+% \@@_parse_auxi:NN,
+% \@@_parse_auxii:nN,
+% \@@_parse_auxiii:nN,
+% \@@_parse_auxiv:nwN,
+% \@@_parse_auxv:nN,
+% \@@_parse_auxvi:nwN,
+% \@@_parse_auxvii:nN,
+% \@@_parse_end:nwn,
% }
% The goal is to parse
% \begin{equation*}
@@ -242,59 +267,59 @@
% [\meta{sign}] [\meta{width}] [.\meta{precision}] [\meta{style}]
% \end{equation*}
% \begin{macrocode}
-\cs_new:Npn \@@_format_parse:n #1
+\cs_new:Npn \@@_parse:n #1
{
- \exp_last_unbraced:Nf \@@_format_parse_auxi:NN
- \@@_to_other:n {#1} \s__stop \s__stop {#1}
+ \exp_last_unbraced:Nf \@@_parse_auxi:NN
+ \__str_to_other:n {#1} \s__stop \s__stop {#1}
}
-\cs_new:Npx \@@_format_parse_auxi:NN #1#2
+\cs_new:Npx \@@_parse_auxi:NN #1#2
{
- \exp_not:N \@@_if_contains_char:nNTF { < > = ^ } #2
- { \exp_not:N \@@_format_parse_auxiii:nN { #1 #2 } }
+ \exp_not:N \@@_if_in:nNTF { < > = ^ } #2
+ { \exp_not:N \@@_parse_auxiii:nN { #1 #2 } }
{
- \exp_not:N \@@_format_parse_auxii:nN
+ \exp_not:N \@@_parse_auxii:nN
{ \c_catcode_other_space_tl } #1 #2
}
}
-\cs_new:Npn \@@_format_parse_auxii:nN #1#2
+\cs_new:Npn \@@_parse_auxii:nN #1#2
{
- \@@_if_contains_char:nNTF { < > = ^ } #2
- { \@@_format_parse_auxiii:nN { #1 #2 } }
- { \@@_format_parse_auxiii:nN { #1 ? } #2 }
+ \@@_if_in:nNTF { < > = ^ } #2
+ { \@@_parse_auxiii:nN { #1 #2 } }
+ { \@@_parse_auxiii:nN { #1 ? } #2 }
}
-\cs_new:Npx \@@_format_parse_auxiii:nN #1#2
+\cs_new:Npx \@@_parse_auxiii:nN #1#2
{
- \exp_not:N \@@_if_contains_char:nNTF
+ \exp_not:N \@@_if_in:nNTF
{ + - \c_catcode_other_space_tl }
#2
- { \exp_not:N \@@_format_parse_auxiv:nwN { #1 #2 } ; }
- { \exp_not:N \@@_format_parse_auxiv:nwN { #1 ? } ; #2 }
+ { \exp_not:N \@@_parse_auxiv:nwN { #1 #2 } ; }
+ { \exp_not:N \@@_parse_auxiv:nwN { #1 ? } ; #2 }
}
-\cs_new:Npn \@@_format_parse_auxiv:nwN #1#2; #3
+\cs_new:Npn \@@_parse_auxiv:nwN #1#2; #3
{
- \@@_format_if_digit:NTF #3
- { \@@_format_parse_auxiv:nwN {#1} #2 #3 ; }
- { \@@_format_parse_auxv:nN { #1 {#2} } #3 }
+ \@@_if_digit:NTF #3
+ { \@@_parse_auxiv:nwN {#1} #2 #3 ; }
+ { \@@_parse_auxv:nN { #1 {#2} } #3 }
}
-\cs_new:Npn \@@_format_parse_auxv:nN #1#2
+\cs_new:Npn \@@_parse_auxv:nN #1#2
{
\token_if_eq_charcode:NNTF . #2
- { \@@_format_parse_auxvi:nwN {#1} 0 ; }
- { \@@_format_parse_auxvii:nN { #1 { } } #2 }
+ { \@@_parse_auxvi:nwN {#1} 0 ; }
+ { \@@_parse_auxvii:nN { #1 { } } #2 }
}
-\cs_new:Npn \@@_format_parse_auxvi:nwN #1#2; #3
+\cs_new:Npn \@@_parse_auxvi:nwN #1#2; #3
{
- \@@_format_if_digit:NTF #3
- { \@@_format_parse_auxvi:nwN {#1} #2 #3 ; }
- { \@@_format_parse_auxvii:nN { #1 {#2} } #3 }
+ \@@_if_digit:NTF #3
+ { \@@_parse_auxvi:nwN {#1} #2 #3 ; }
+ { \@@_parse_auxvii:nN { #1 {#2} } #3 }
}
-\cs_new:Npn \@@_format_parse_auxvii:nN #1#2
+\cs_new:Npn \@@_parse_auxvii:nN #1#2
{
\token_if_eq_meaning:NNTF \s__stop #2
- { \@@_format_parse_end:nwn { #1 ? } #2 }
- { \@@_format_parse_end:nwn { #1 #2 } }
+ { \@@_parse_end:nwn { #1 ? } #2 }
+ { \@@_parse_end:nwn { #1 #2 } }
}
-\cs_new:Npn \@@_format_parse_end:nwn #1 #2 \s__stop \s__stop #3
+\cs_new:Npn \@@_parse_end:nwn #1 #2 \s__stop \s__stop #3
{
\tl_if_empty:nF {#2}
{ \__msg_kernel_expandable_error:nnn { str } { invalid-format } {#3} }
@@ -316,40 +341,40 @@
% padding between the \meta{sign} and the \meta{body}, hence the need to
% keep those separate.
%
-% \begin{macro}[aux, EXP]{\@@_format_align_<:nnnN}
+% \begin{macro}[aux, EXP]{\@@_align_<:nnnN}
% \begin{quote}
-% \cs{@@_format_align_<:nnnN} \Arg{body} \Arg{sign} \Arg{width}
+% \cs{@@_align_<:nnnN} \Arg{body} \Arg{sign} \Arg{width}
% \meta{fill}
% \end{quote}
% Aligning \enquote{\meta{sign} \meta{body}} to the left
% entails appending |#4| the correct number of times. Then convert
% the result to a string.
% \begin{macrocode}
-\cs_new:cpn { @@_format_align_<:nnnN } #1#2#3#4
+\cs_new:cpn { @@_align_<:nnnN } #1#2#3#4
{
\use:nf { #2 #1 }
{
\prg_replicate:nn
- { \int_max:nn { #3 - \@@_count_unsafe:n { #2 #1 } } { 0 } }
+ { \int_max:nn { #3 - \__str_count_unsafe:n { #2 #1 } } { 0 } }
{#4}
}
}
% \end{macrocode}
% \end{macro}
%
-% \begin{macro}[aux, EXP]{\@@_format_align_>:nnnN}
+% \begin{macro}[aux, EXP]{\@@_align_>:nnnN}
% \begin{quote}
-% \cs{@@_format_align_>:nnnN} \Arg{body} \Arg{sign} \Arg{width}
+% \cs{@@_align_>:nnnN} \Arg{body} \Arg{sign} \Arg{width}
% \meta{fill}
% \end{quote}
% Aligning an \enquote{\meta{sign} \meta{body}} to the right
% entails prepending |#4| the correct number of times. Then convert
% the result to a string.
% \begin{macrocode}
-\cs_new:cpn { @@_format_align_>:nnnN } #1#2#3#4
+\cs_new:cpn { @@_align_>:nnnN } #1#2#3#4
{
\prg_replicate:nn
- { \int_max:nn { #3 - \@@_count_unsafe:n { #2 #1 } } { 0 } }
+ { \int_max:nn { #3 - \__str_count_unsafe:n { #2 #1 } } { 0 } }
{#4}
#2 #1
}
@@ -357,9 +382,9 @@
% \end{macro}
%
% \begingroup\catcode`\^=12
-% \begin{macro}[aux, EXP]{\@@_format_align_^:nnnN}
+% \begin{macro}[aux, EXP]{\@@_align_^:nnnN}
% \begin{quote}
-% \cs{@@_format_align_^:nnnN} \Arg{body} \Arg{sign} \Arg{width}
+% \cs{@@_align_^:nnnN} \Arg{body} \Arg{sign} \Arg{width}
% \meta{fill}
% \end{quote}
% Centering \enquote{\meta{sign} \meta{body}} entails
@@ -367,14 +392,14 @@
% number of |#4| to be added is odd, we add one more after than
% before.
% \begin{macrocode}
-\cs_new:cpn { @@_format_align_^:nnnN } #1#2#3#4
+\cs_new:cpn { @@_align_^:nnnN } #1#2#3#4
{
\use:fnf
{
\prg_replicate:nn
{
\int_max:nn \c_zero
- { #3 - \@@_count_unsafe:n { #2 #1 } - \c_one }
+ { #3 - \__str_count_unsafe:n { #2 #1 } - \c_one }
/ \c_two
}
{#4}
@@ -384,7 +409,7 @@
\prg_replicate:nn
{
\int_max:nn \c_zero
- { #3 - \@@_count_unsafe:n { #2 #1 } }
+ { #3 - \__str_count_unsafe:n { #2 #1 } }
/ \c_two
}
{#4}
@@ -394,21 +419,21 @@
% \end{macro}
% \endgroup
%
-% \begin{macro}[aux, EXP]{\@@_format_align_=:nnnN}
+% \begin{macro}[aux, EXP]{\@@_align_=:nnnN}
% \begin{quote}
-% \cs{@@_format_align_=:nnnN} \Arg{body} \Arg{sign} \Arg{width}
+% \cs{@@_align_=:nnnN} \Arg{body} \Arg{sign} \Arg{width}
% \meta{fill}
% \end{quote}
% The special numeric alignment |=| means that we insert the
% appropriate number of copies of |#4| between the \meta{sign} and the
% \meta{body}. Then convert the result to a string.
% \begin{macrocode}
-\cs_new:cpn { @@_format_align_=:nnnN } #1#2#3#4
+\cs_new:cpn { @@_align_=:nnnN } #1#2#3#4
{
\use:nf {#2}
{
\prg_replicate:nn
- { \int_max:nn { #3 - \@@_count_unsafe:n { #2 #1 } } { 0 } }
+ { \int_max:nn { #3 - \__str_count_unsafe:n { #2 #1 } } { 0 } }
{#4}
}
#1
@@ -420,7 +445,7 @@
% \subsection{Formatting token lists}
%
% \begin{macro}[EXP]{\tl_format:Nn, \tl_format:cn, \tl_format:nn}
-% Call \cs{@@_format_tl:NNNnnNn} to read the parsed \meta{format
+% Call \cs{@@_tl:NNNnnNn} to read the parsed \meta{format
% specification}. Then convert the result to a string.
% \begin{macrocode}
\cs_new_nopar:Npn \tl_format:Nn { \exp_args:No \tl_format:nn }
@@ -429,52 +454,52 @@
{
\tl_to_str:f
{
- \exp_last_unbraced:Nf \@@_format_tl:NNNnnNn
- { \@@_format_parse:n {#2} }
+ \exp_last_unbraced:Nf \@@_tl:NNNnnNn
+ { \@@_parse:n {#2} }
{#1}
}
}
% \end{macrocode}
% \end{macro}
%
-% \begin{macro}[aux, EXP]{\@@_format_tl:NNNnnNn}
+% \begin{macro}[aux, EXP]{\@@_tl:NNNnnNn}
% \begin{quote}
-% \cs{@@_format_tl:NNNnnNn} \meta{fill} \meta{alignment} \meta{sign}
+% \cs{@@_tl:NNNnnNn} \meta{fill} \meta{alignment} \meta{sign}
% \Arg{width} \Arg{precision} \meta{style} \Arg{token list}
% \end{quote}
% First check that the \meta{alignment} is not |=|, and set the
% default alignment |?| to |<|. Place the modified information after
% a trailing \cs{s__stop} for later retrieval. Then check that there
% was no \meta{sign}. The width will be useful later, store it after
-% \cs{s__stop}. Afterwards, check the \meta{precision}: if it is
-% empty, we will eventually use the whole string, otherwise we will
-% only use a substring, starting at the index $1$, and ending at |#5|.
+% \cs{s__stop}. Afterwards, store the precision, and the function
+% \cs{__str_range_unsafe:nnn} that will be used to extract the first
+% |#5| characters of the string.
% There is a need to use the \enquote{unsafe} function, as otherwise
% leading spaces would get stripped by |f|-expansion. Finally, check
% that the \meta{style} is |?| or |s|.
% \begin{macrocode}
-\cs_new:Npn \@@_format_tl:NNNnnNn #1#2#3#4#5#6
+\cs_new:Npn \@@_tl:NNNnnNn #1#2#3#4#5#6
{
\token_if_eq_charcode:NNTF #2 =
{
\__msg_kernel_expandable_error:nnnn
{ str } { invalid-align-format } {#2} {tl}
- \@@_format_put:nw { #1 < }
+ \@@_put:nw { #1 < }
}
{
\token_if_eq_charcode:NNTF #2 ?
- { \@@_format_put:nw { #1 < } }
- { \@@_format_put:nw { #1 #2 } }
+ { \@@_put:nw { #1 < } }
+ { \@@_put:nw { #1 #2 } }
}
\token_if_eq_charcode:NNF #3 ?
{
\__msg_kernel_expandable_error:nnnn
{ str } { invalid-sign-format } {#3} {tl}
}
- \@@_format_put:nw { {#4} }
+ \@@_put:nw { {#4} }
\tl_if_empty:nTF {#5}
- { \@@_format_put:nw { \use:n { } } }
- { \@@_format_put:nw { \@@_substr_unsafe:nnn { {1} {#5} } } }
+ { \@@_put:nw { \__str_range_unsafe:nnn { {1} {-1} } } }
+ { \@@_put:nw { \__str_range_unsafe:nnn { {1} {#5} } } }
\token_if_eq_charcode:NNF #6 s
{
\token_if_eq_charcode:NNF #6 ?
@@ -483,15 +508,15 @@
{ str } { invalid-style-format } {#6} {tl}
}
}
- \@@_format_tl_s:NNnnNNn
+ \@@_tl_s:NNnnNNn
\s__stop
}
% \end{macrocode}
% \end{macro}
%
-% \begin{macro}[aux, EXP]{\@@_format_tl_s:NNnnNNn}
+% \begin{macro}[aux, EXP]{\@@_tl_s:NNnnNNn}
% \begin{quote}
-% \cs{@@_format_tl_s:NNnnNNn} \cs{s__stop} \meta{function}
+% \cs{@@_tl_s:NNnnNNn} \cs{s__stop} \meta{function}
% \Arg{arguments} \Arg{width} \meta{fill} \meta{alignment}
% \Arg{token list}
% \end{quote}
@@ -499,13 +524,13 @@
% that |f|-expanding \meta{function} \Arg{other string}
% \meta{arguments} yields the piece of the \meta{other string} that we
% want to output. The \meta{other string} is built from the
-% \meta{token list} by |f|-expanding \cs{@@_to_other:n}.
+% \meta{token list} by |f|-expanding \cs{__str_to_other:n}.
% \begin{macrocode}
-\cs_new:Npn \@@_format_tl_s:NNnnNNn #1#2#3#4#5#6#7
+\cs_new:Npn \@@_tl_s:NNnnNNn #1#2#3#4#5#6#7
{
\exp_args:Nc \exp_args:Nf
- { @@_format_align_#6:nnnN }
- { \exp_args:Nf #2 { \@@_to_other:n {#7} } #3 }
+ { @@_align_#6:nnnN }
+ { \exp_args:Nf #2 { \__str_to_other:n {#7} } #3 }
{ }
{#4} #5
}
@@ -522,29 +547,29 @@
\cs_new:Npn \seq_format:Nn #1#2
{
\tl_to_str:f
- { \@@_format_seq:of {#1} { \@@_format_parse:n {#2} } }
+ { \@@_seq:of {#1} { \@@_parse:n {#2} } }
}
\cs_generate_variant:Nn \seq_format:Nn { c }
% \end{macrocode}
% \end{macro}
%
-% \begin{macro}[aux, EXP]{\@@_format_seq:nn, \@@_format_seq:of}
+% \begin{macro}[aux, EXP]{\@@_seq:nn, \@@_seq:of}
% The first argument is the contents of a \texttt{seq} variable. The
% second is a parsed \meta{format specification}. Set up the loop.
% \begin{macrocode}
-\cs_new:Npn \@@_format_seq:nn #1#2
+\cs_new:Npn \@@_seq:nn #1#2
{
- \@@_format_seq_loop:nnNn { } {#2}
+ \@@_seq_loop:nnNn { } {#2}
#1
- { ? \@@_format_seq_end:w } { }
+ { ? \@@_seq_end:w } { }
}
-\cs_generate_variant:Nn \@@_format_seq:nn { of }
+\cs_generate_variant:Nn \@@_seq:nn { of }
% \end{macrocode}
% \end{macro}
%
-% \begin{macro}[aux, EXP]{\@@_format_seq_loop:nnNn}
+% \begin{macro}[aux, EXP]{\@@_seq_loop:nnNn}
% \begin{quote}
-% \cs{@@_format_seq_loop:nnNn} \Arg{done} \Arg{parsed format}
+% \cs{@@_seq_loop:nnNn} \Arg{done} \Arg{parsed format}
% \cs{__seq_item:n} \Arg{item}
% \end{quote}
% The first argument is the result of formatting the items read so
@@ -552,52 +577,52 @@
% until we reach the end of the sequence, where |\use_none:n #3| ends
% the loop.
% \begin{macrocode}
-\cs_new:Npn \@@_format_seq_loop:nnNn #1#2#3#4
+\cs_new:Npn \@@_seq_loop:nnNn #1#2#3#4
{
\use_none:n #3
- \exp_args:Nf \@@_format_seq_loop:nnNn
- { \use:nf {#1} { \@@_format_tl:NNNnnNn #2 {#4} } }
+ \exp_args:Nf \@@_seq_loop:nnNn
+ { \use:nf {#1} { \@@_tl:NNNnnNn #2 {#4} } }
{#2}
}
% \end{macrocode}
% \end{macro}
%
-% \begin{macro}[aux, EXP]{\@@_format_seq_end:w}
+% \begin{macro}[aux, EXP]{\@@_seq_end:w}
% Pick the right piece in the loop above.
% \begin{macrocode}
-\cs_new:Npn \@@_format_seq_end:w #1#2#3#4 { \use_ii:nnn #3 }
+\cs_new:Npn \@@_seq_end:w #1#2#3#4 { \use_ii:nnn #3 }
% \end{macrocode}
% \end{macro}
%
% \subsection{Formatting integers}
%
% \begin{macro}[EXP]{\int_format:nn}
-% Evalute the first argument and feed it to \cs{@@_format_int:nn}.
+% Evalute the first argument and feed it to \cs{@@_int:nn}.
% \begin{macrocode}
\cs_new:Npn \int_format:nn #1
- { \exp_args:Nf \@@_format_int:nn { \int_eval:n {#1} } }
+ { \exp_args:Nf \@@_int:nn { \int_eval:n {#1} } }
% \end{macrocode}
% \end{macro}
%
-% \begin{macro}[aux, EXP]{\@@_format_int:nn}
+% \begin{macro}[aux, EXP]{\@@_int:nn}
% Parse the \meta{format specification} and feed it to
-% \cs{@@_format_int:NNNnnNn}. Then convert the result to a string
+% \cs{@@_int:NNNnnNn}. Then convert the result to a string
% \begin{macrocode}
-\cs_new:Npn \@@_format_int:nn #1#2
+\cs_new:Npn \@@_int:nn #1#2
{
\tl_to_str:f
{
- \exp_last_unbraced:Nf \@@_format_int:NNNnnNn
- { \@@_format_parse:n {#2} }
+ \exp_last_unbraced:Nf \@@_int:NNNnnNn
+ { \@@_parse:n {#2} }
{#1}
}
}
% \end{macrocode}
% \end{macro}
%
-% \begin{macro}[aux, EXP]{\@@_format_int:NNNnnNn}
+% \begin{macro}[aux, EXP]{\@@_int:NNNnnNn}
% \begin{quote}
-% \cs{@@_format_int:NNNnnNn} \meta{fill} \meta{alignment}
+% \cs{@@_int:NNNnnNn} \meta{fill} \meta{alignment}
% \meta{sign} \Arg{width} \Arg{precision} \meta{style} \Arg{integer}
% \end{quote}
% First set the
@@ -611,22 +636,22 @@
% the \meta{precision} was absent. Finally, dispatch depending on the
% \meta{style}.
% \begin{macrocode}
-\cs_new:Npn \@@_format_int:NNNnnNn #1#2#3#4#5#6#7
+\cs_new:Npn \@@_int:NNNnnNn #1#2#3#4#5#6#7
{
\token_if_eq_charcode:NNTF #2 ?
- { \@@_format_put:nw { #1 > } }
- { \@@_format_put:nw { #1 #2 } }
+ { \@@_put:nw { #1 > } }
+ { \@@_put:nw { #1 #2 } }
\int_compare:nNnTF {#7} < \c_zero
- { \@@_format_put:nw { - } }
+ { \@@_put:nw { - } }
{
\str_case:nnn {#3}
{
- { ~ } { \@@_format_put:ow { \c_catcode_other_space_tl } }
- { + } { \@@_format_put:nw { + } }
+ { ~ } { \@@_put:ow { \c_catcode_other_space_tl } }
+ { + } { \@@_put:nw { + } }
}
- { \@@_format_put:nw { { } } }
+ { \@@_put:nw { { } } }
}
- \@@_format_put:nw { {#4} }
+ \@@_put:nw { {#4} }
\tl_if_empty:nF {#5}
{
\__msg_kernel_expandable_error:nnnn
@@ -634,25 +659,25 @@
}
\str_case:nnn {#6}
{
- { ? } { \@@_format_int:NwnnNNn \use:n }
- { d } { \@@_format_int:NwnnNNn \use:n }
- { b } { \@@_format_int:NwnnNNn \int_to_binary:n }
- { o } { \@@_format_int:NwnnNNn \int_to_octal:n }
- { X } { \@@_format_int:NwnnNNn \int_to_hexadecimal:n }
+ { ? } { \@@_int:NwnnNNn \use:n }
+ { d } { \@@_int:NwnnNNn \use:n }
+ { b } { \@@_int:NwnnNNn \int_to_binary:n }
+ { o } { \@@_int:NwnnNNn \int_to_octal:n }
+ { X } { \@@_int:NwnnNNn \int_to_hexadecimal:n }
}
{
\__msg_kernel_expandable_error:nnnn
{ str } { invalid-style-format } {#6} { int }
- \@@_format_int:NwnnNNn \use:n
+ \@@_int:NwnnNNn \use:n
}
\s__stop {#7}
}
% \end{macrocode}
% \end{macro}
%
-% \begin{macro}[aux, EXP]{\@@_format_int:NwnnNNn}
+% \begin{macro}[aux, EXP]{\@@_int:NwnnNNn}
% \begin{quote}
-% \cs{@@_format_int:NwnnNNn} \meta{function} \cs{s__stop}
+% \cs{@@_int:NwnnNNn} \meta{function} \cs{s__stop}
% \Arg{width} \Arg{sign} \meta{fill} \meta{alignment} \Arg{integer}
% \end{quote}
% Use the |format_align| function corresponding to the
@@ -665,48 +690,48 @@
% \item the \meta{fill} character.
% \end{itemize}
% \begin{macrocode}
-\cs_new:Npn \@@_format_int:NwnnNNn #1#2 \s__stop #3#4#5#6#7
+\cs_new:Npn \@@_int:NwnnNNn #1#2 \s__stop #3#4#5#6#7
{
\exp_args:Nc \exp_args:Nf
- { @@_format_align_#6:nnnN }
+ { @@_align_#6:nnnN }
{ #1 { \int_abs:n {#7} } }
{#4}
{#3} #5
}
% \end{macrocode}
-% ^^A todo: note similarity with \@@_format_tl_s:NNnnNNn
+% ^^A todo: note similarity with \@@_tl_s:NNnnNNn
% \end{macro}
%
% \subsection{Formatting floating points}
%
% \begin{macro}[EXP]{\fp_format:nn}
% Evalute the first argument to an internal floating point number, and
-% feed it to \cs{@@_format_fp:nn}.
+% feed it to \cs{@@_fp:nn}.
% \begin{macrocode}
\cs_new:Npn \fp_format:nn #1
- { \exp_args:Nf \@@_format_fp:nn { \__fp_parse:n {#1} } }
+ { \exp_args:Nf \@@_fp:nn { \__fp_parse:n {#1} } }
% \end{macrocode}
% \end{macro}
%
-% \begin{macro}[aux, EXP]{\@@_format_fp:nn}
+% \begin{macro}[aux, EXP]{\@@_fp:nn}
% Parse the \meta{format specification} and feed it to
-% \cs{@@_format_fp:NNNnnNn}. Then convert the result to a string
+% \cs{@@_fp:NNNnnNn}. Then convert the result to a string
% \begin{macrocode}
-\cs_new:Npn \@@_format_fp:nn #1#2
+\cs_new:Npn \@@_fp:nn #1#2
{
\tl_to_str:f
{
- \exp_last_unbraced:Nf \@@_format_fp:NNNnnNw
- { \@@_format_parse:n {#2} }
+ \exp_last_unbraced:Nf \@@_fp:NNNnnNw
+ { \@@_parse:n {#2} }
#1
}
}
% \end{macrocode}
% \end{macro}
%
-% \begin{macro}[aux, EXP]{\@@_format_fp:NNNnnNw}
+% \begin{macro}[aux, EXP]{\@@_fp:NNNnnNw}
% \begin{quote}
-% \cs{@@_format_fp:NNNnnNw} \meta{fill} \meta{alignment}
+% \cs{@@_fp:NNNnnNw} \meta{fill} \meta{alignment}
% \meta{format sign} \Arg{width} \Arg{precision} \meta{style}
% \cs{s__fp} \cs{__fp_chk:w} \meta{fp type} \meta{fp sign} \meta{fp
% body} |;|
@@ -723,37 +748,37 @@
% (default precision). Finally, dispatch depending on the
% \meta{style}.
% \begin{macrocode}
-\cs_new:Npn \@@_format_fp:NNNnnNw
+\cs_new:Npn \@@_fp:NNNnnNw
#1#2#3#4#5#6 \s__fp \__fp_chk:w #7 #8
{
\token_if_eq_charcode:NNTF #2 ?
- { \@@_format_put:nw { #1 > } }
- { \@@_format_put:nw { #1 #2 } }
+ { \@@_put:nw { #1 > } }
+ { \@@_put:nw { #1 #2 } }
\token_if_eq_meaning:NNTF 2 #8
- { \@@_format_put:nw { - } }
+ { \@@_put:nw { - } }
{
\str_case:nnn {#3}
{
- { ~ } { \@@_format_put:ow { \c_catcode_other_space_tl } }
- { + } { \@@_format_put:nw { + } }
+ { ~ } { \@@_put:ow { \c_catcode_other_space_tl } }
+ { + } { \@@_put:nw { + } }
}
- { \@@_format_put:nw { { } } }
+ { \@@_put:nw { { } } }
}
- \@@_format_put:nw { {#4} }
+ \@@_put:nw { {#4} }
\tl_if_empty:nTF {#5}
- { \@@_format_put:nw { { 6} } }
- { \@@_format_put:nw { {#5} } }
+ { \@@_put:nw { { 6} } }
+ { \@@_put:nw { {#5} } }
\str_case:nnn {#6}
{
- { e } { \@@_format_fp:wnnnNNw \@@_format_fp_e:wn }
- { f } { \@@_format_fp:wnnnNNw \@@_format_fp_f:wn }
- { g } { \@@_format_fp:wnnnNNw \@@_format_fp_g:wn }
- { ? } { \@@_format_fp:wnnnNNw \@@_format_fp_g:wn }
+ { e } { \@@_fp:wnnnNNw \@@_fp_e:wn }
+ { f } { \@@_fp:wnnnNNw \@@_fp_f:wn }
+ { g } { \@@_fp:wnnnNNw \@@_fp_g:wn }
+ { ? } { \@@_fp:wnnnNNw \@@_fp_g:wn }
}
{
\__msg_kernel_expandable_error:nnnn
{ str } { invalid-style-format } {#6} { fp }
- \@@_format_fp:wnnnNNw \@@_format_fp_g:wn
+ \@@_fp:wnnnNNw \@@_fp_g:wn
}
\s__stop
\s__fp \__fp_chk:w #7 #8
@@ -761,19 +786,19 @@
% \end{macrocode}
% \end{macro}
%
-% \begin{macro}[aux, EXP]{\@@_format_fp:wnnnNNw}
+% \begin{macro}[aux, EXP]{\@@_fp:wnnnNNw}
% \begin{quote}
-% \cs{@@_format_fp:wnnnNNw} \meta{formatting function} \cs{s__stop}
+% \cs{@@_fp:wnnnNNw} \meta{formatting function} \cs{s__stop}
% \Arg{precision} \Arg{width} \Arg{sign} \meta{fill}
% \meta{alignment} \cs{s__fp} \cs{__fp_chk:w} \meta{fp type}
% \meta{fp sign} \meta{fp body} |;|
% \end{quote}
% \begin{macrocode}
-\cs_new:Npn \@@_format_fp:wnnnNNw
+\cs_new:Npn \@@_fp:wnnnNNw
#1 \s__stop #2 #3 #4 #5#6 #7 ;
{
\exp_args:Nc \exp_args:Nf
- { @@_format_align_#6:nnnN }
+ { @@_align_#6:nnnN }
{ #1 #7 ; {#2} }
{#4}
{#3} #5
@@ -781,17 +806,17 @@
% \end{macrocode}
% \end{macro}
%
-% \begin{macro}[aux, EXP]{\@@_format_fp_round:wn}
+% \begin{macro}[aux, EXP]{\@@_fp_round:wn}
% Round the given floating point (not its absolute value, to play
% nicely with unusual rounding modes).
% \begin{macrocode}
-\cs_new:Npn \@@_format_fp_round:wn #1 ; #2
+\cs_new:Npn \@@_fp_round:wn #1 ; #2
{ \__fp_parse:n { round ( #1; , #2 - \__fp_exponent:w #1; ) } }
% \end{macrocode}
% \end{macro}
%
-% \begin{macro}[aux, EXP]{\@@_format_fp_e:wn}
-% \begin{macro}[aux, EXP]{\@@_format_fp_e_aux:wn}
+% \begin{macro}[aux, EXP]{\@@_fp_e:wn}
+% \begin{macro}[aux, EXP]{\@@_fp_e_aux:wn}
% With the |e| type, first filter out special cases. In the normal
% case, round to |#4+1| significant figures (one before the decimal
% separator, |#4| after).
@@ -801,7 +826,7 @@
\tl_to_lowercase:n
{
\group_end:
- \cs_new:Npn \@@_format_fp_e:wn \s__fp \__fp_chk:w #1#2#3 ; #4
+ \cs_new:Npn \@@_fp_e:wn \s__fp \__fp_chk:w #1#2#3 ; #4
{
\int_case:nnn {#1}
{
@@ -810,26 +835,26 @@
{3} { nan }
}
{
- \exp_last_unbraced:Nf \@@_format_fp_e_aux:wn
- \@@_format_fp_round:wn \s__fp \__fp_chk:w #1#2#3 ; { #4 + 1 }
+ \exp_last_unbraced:Nf \@@_fp_e_aux:wn
+ \@@_fp_round:wn \s__fp \__fp_chk:w #1#2#3 ; { #4 + 1 }
{#4}
}
}
- \cs_new:Npn \@@_format_fp_e_aux:wn
+ \cs_new:Npn \@@_fp_e_aux:wn
\s__fp \__fp_chk:w #1#2 #3 #4#5#6#7 ; #8
{
- \@@_format_put:fw { \int_eval:n { #3 - 1 } }
- \@@_format_put:nw { e }
+ \@@_put:fw { \int_eval:n { #3 - 1 } }
+ \@@_put:nw { e }
\int_compare:nNnTF {#8} > \c_sixteen
{
- \@@_format_put:fw { \prg_replicate:nn { #8 - \c_fifteen } {0} }
- \@@_format_put:fw { \use_none:n #4#5#6#7 }
+ \@@_put:fw { \prg_replicate:nn { #8 - \c_fifteen } {0} }
+ \@@_put:fw { \use_none:n #4#5#6#7 }
}
{
- \@@_format_put:fw
- { \str_substr:nnn { #4#5#6#7 0 } { 2 } { #8 + 1 } }
+ \@@_put:fw
+ { \str_range:nnn { #4#5#6#7 0 } { 2 } { #8 + 1 } }
}
- \@@_format_put:fw { \use_i:nnnn #4 . }
+ \@@_put:fw { \use_i:nnnn #4 . }
\use_none:n \s__stop
}
}
@@ -837,12 +862,12 @@
% \end{macro}
% \end{macro}
%
-% \begin{macro}[aux, EXP]{\@@_format_fp_f:wn}
-% \begin{macro}[aux, EXP]{\@@_format_fp_f_aux:wwwn}
+% \begin{macro}[aux, EXP]{\@@_fp_f:wn}
+% \begin{macro}[aux, EXP]{\@@_fp_f_aux:wwwn}
% With the |f| type, first filter out special cases. In the normal
% case, round to |#4| (absolute) decimal places.
% \begin{macrocode}
-\cs_new:Npn \@@_format_fp_f:wn \s__fp \__fp_chk:w #1#2#3 ; #4
+\cs_new:Npn \@@_fp_f:wn \s__fp \__fp_chk:w #1#2#3 ; #4
{
\int_case:nnn {#1}
{
@@ -851,25 +876,25 @@
{3} { nan }
}
{
- \exp_last_unbraced:Nf \@@_format_fp_f_aux:wwwn
+ \exp_last_unbraced:Nf \@@_fp_f_aux:wwwn
\fp_to_decimal:n
{ abs ( round ( \s__fp \__fp_chk:w #1#2#3 ; , #4 ) ) }
. . ;
{#4}
}
}
-\cs_new:Npn \@@_format_fp_f_aux:wwwn #1 . #2 . #3 ; #4
+\cs_new:Npn \@@_fp_f_aux:wwwn #1 . #2 . #3 ; #4
{
\use:nf
{ #1 . #2 }
- { \prg_replicate:nn { #4 - \@@_count_unsafe:n {#2} } {0} }
+ { \prg_replicate:nn { #4 - \__str_count_unsafe:n {#2} } {0} }
}
% \end{macrocode}
% \end{macro}
% \end{macro}
%
-% \begin{macro}[aux, EXP]{\@@_format_fp_g:wn}
-% \begin{macro}[aux, EXP]{\@@_format_fp_g_aux:wn}
+% \begin{macro}[aux, EXP]{\@@_fp_g:wn}
+% \begin{macro}[aux, EXP]{\@@_fp_g_aux:wn}
% With the |g| type, first filter out special cases. In the normal
% case, round to |#4| significant figures, then test the exponent: if
% $-4\leq \meta{exponent} < \meta{precision}$, use the presentation
@@ -880,7 +905,7 @@
% \cs{fp_to_decimal:n} and \cs{fp_to_scientific:n}, acting on the
% (absolute value of the) rounded value.
% \begin{macrocode}
-\cs_new:Npn \@@_format_fp_g:wn \s__fp \__fp_chk:w #1#2 ; #3
+\cs_new:Npn \@@_fp_g:wn \s__fp \__fp_chk:w #1#2 ; #3
{
\int_case:nnn {#1}
{
@@ -889,13 +914,13 @@
{3} { nan }
}
{
- \exp_last_unbraced:Nf \@@_format_fp_g_aux:wn
- \@@_format_fp_round:wn \s__fp \__fp_chk:w #1#2 ;
+ \exp_last_unbraced:Nf \@@_fp_g_aux:wn
+ \@@_fp_round:wn \s__fp \__fp_chk:w #1#2 ;
{ \int_max:nn {1} {#3} }
{ \int_max:nn {1} {#3} }
}
}
-\cs_new:Npn \@@_format_fp_g_aux:wn #1; #2
+\cs_new:Npn \@@_fp_g_aux:wn #1; #2
{
\int_compare:nNnTF { \__fp_exponent:w #1; } < { -3 }
{ \fp_to_scientific:n }
diff --git a/Master/texmf-dist/source/latex/l3experimental/l3str/l3str.dtx b/Master/texmf-dist/source/latex/l3experimental/l3str/l3str.dtx
index 301682c64be..0a4d72afd2f 100644
--- a/Master/texmf-dist/source/latex/l3experimental/l3str/l3str.dtx
+++ b/Master/texmf-dist/source/latex/l3experimental/l3str/l3str.dtx
@@ -1,6 +1,6 @@
% \iffalse meta-comment
%
-%% File: l3str.dtx Copyright (C) 2011-2012 The LaTeX3 Project
+%% File: l3str.dtx Copyright (C) 2011-2013 The LaTeX3 Project
%%
%% It may be distributed and/or modified under the conditions of the
%% LaTeX Project Public License (LPPL), either version 1.3c of this
@@ -35,14 +35,13 @@
%
%<*driver|package>
\RequirePackage{expl3}
-\GetIdInfo$Id: l3str.dtx 4339 2012-11-24 19:16:43Z joseph $
+\GetIdInfo$Id: l3str.dtx 4452 2013-01-20 10:18:32Z joseph $
{L3 Experimental strings}
%</driver|package>
%<*driver>
\documentclass[full]{l3doc}
\usepackage{amsmath}
\begin{document}
- \tableofcontents
\DocInput{\jobname.dtx}
\end{document}
%</driver>
@@ -67,8 +66,9 @@
% \date{Released \ExplFileDate}
%
% \maketitle
+%
+% \tableofcontents
%
-% \newcommand{\hexnum}[1]{\text{\texttt{\char`\"}#1}}
% \begin{documentation}
%
% \LaTeX3 provides a set of functions to manipulate token lists
@@ -76,42 +76,39 @@
% characters.
%
% String variables are simply specialised token lists, but by convention
-% should be named with the suffix \texttt{\ldots str}. Such variables
+% should be named with the suffix \texttt{\ldots{}str}. Such variables
% should contain characters with category code $12$ (other), except
-% spaces, which have category code $10$ (blank space). All the functions
-% in this module first convert their argument to a string for internal
-% processing, and will not treat a token list or the corresponding
-% string representation differently.
+% spaces, which have category code $10$ (blank space). All the
+% functions in this module which accept a token list argument first
+% convert it to a string using \cs{tl_to_str:n} for internal processing,
+% and will not treat a token list or the corresponding string
+% representation differently.
%
-% Most functions in this module come in three flavours:
+% Most expandable functions in this module come in three flavours:
% \begin{itemize}
-% \item \cs{str_...:N...}, which expect a token list or string
+% \item \cs{str_...:N}, which expect a token list or string
% variable as their argument;
-% \item \cs{str_...:n...}, taking any token list (or string) as an
+% \item \cs{str_...:n}, taking any token list (or string) as an
% argument;
-% \item \cs{str_..._ignore_spaces:n...}, which ignores any space
-% encountered during the operation: these functions are faster than
-% those which take care of escaping spaces appropriately;
+% \item \cs{str_..._ignore_spaces:n}, which ignores any space
+% encountered during the operation: these functions are typically
+% faster than those which take care of escaping spaces
+% appropriately.
% \end{itemize}
%
% \section{Building strings}
%
-% \begin{variable}{\c_max_char_int}
-% The maximum valid character code, $255$ for pdf\TeX{}, and $1114111$
-% for \XeTeX{} and \LuaTeX{}.
-% \end{variable}
-%
% \begin{variable}
% {
% \c_backslash_str,
-% \c_lbrace_str,
-% \c_rbrace_str,
+% \c_left_brace_str,
+% \c_right_brace_str,
% \c_hash_str,
% \c_tilde_str,
% \c_percent_str
% }
-% Constant strings, containing a single character, with category code
-% $12$. Any character can be accessed as \cs{iow_char:N}
+% Constant strings, containing a single character token, with category
+% code $12$. Any character can be accessed as \cs{iow_char:N}
% |\|\meta{character}.
% \end{variable}
%
@@ -121,16 +118,31 @@
% \cs{tl_to_str:n} \Arg{token list}
% \end{syntax}
% Converts the \meta{token list} to a \meta{string}, leaving the
-% resulting tokens in the input stream.
+% resulting character tokens in the input stream.
% \begin{texnote}
-% The string representation of a token list may depend on the
-% category codes in effect when it is evaluated, and the value of
-% the \tn{escapechar}: for instance |\tl_to_str:n {\a}| normally
-% produces the three character \enquote{backslash},
-% \enquote{lower-case a}, \enquote{space}, but it may also produce
-% $1$ or $2$ characters depending on the escape character, and the
-% category code of \texttt{a}. This impacts almost all functions in
-% the module, which use \cs{tl_to_str:n} internally.
+% Converting a \meta{token list} to a \meta{string} yields a
+% concatenation of the string representations of every token in the
+% \meta{token list}.
+% The string representation of a control sequence is
+% \begin{itemize}
+% \item an escape character, whose character code is given by the
+% internal parameter \tn{escapechar}, absent if the
+% \tn{escapechar} is negative;
+% \item the control sequence name, as defined by \cs{cs_to_str:N};
+% \item a space, unless the control sequence name is a single
+% character whose category at the time of expansion of
+% \cs{tl_to_str:n} is not \enquote{letter}.
+% \end{itemize}
+% The string representation of an explicit character token is that
+% character, doubled in the case of (explicit) macro parameter
+% characters (normally |#|).
+% In particular, the string representation of a token list may
+% depend on the category codes in effect when it is evaluated, and
+% the value of the \tn{escapechar}: for instance |\tl_to_str:n {\a}|
+% normally produces the three character \enquote{backslash},
+% \enquote{lower-case a}, \enquote{space}, but it may also produce a
+% single \enquote{lower-case a} if the escape character is negative
+% and \texttt{a} is currently not a letter.
% \end{texnote}
% \end{function}
%
@@ -139,7 +151,7 @@
% \cs{str_new:N} \meta{str~var}
% \end{syntax}
% Creates a new \meta{str~var} or raises an error if the name is
-% already taken. The declaration is global. The \meta{str~var} will
+% already taken. The declaration is global. The \meta{str~var} will
% initially be empty.
% \end{function}
%
@@ -148,7 +160,7 @@
% \cs{str_const:Nn} \meta{str~var} \Arg{token list}
% \end{syntax}
% Creates a new constant \meta{str~var} or raises an error if the name
-% is already taken. The value of the \meta{str~var} will be set
+% is already taken. The value of the \meta{str~var} will be set
% globally to the \meta{token list}, converted to a string.
% \end{function}
%
@@ -201,13 +213,12 @@
% \begin{syntax}
% \cs{str_count:n} \Arg{token list}
% \end{syntax}
-% Leaves the number of tokens in the string representation of
-% \meta{token list}
-% in the input stream as an integer denotation. The functions differ
-% in their treatment of spaces. In the case of \cs{str_count:N} and
-% \cs{str_count:n}, all characters including spaces are counted. The
-% \cs{str_count_ignore_spaces:n} leaves the number of non-space
-% characters in the input stream.
+% Leaves in the input stream the number of characters in the string
+% representation of \meta{token list}, as an integer denotation. The
+% functions differ in their treatment of spaces. In the case of
+% \cs{str_count:N} and \cs{str_count:n}, all characters including
+% spaces are counted. The \cs{str_count_ignore_spaces:n} function
+% leaves the number of non-space characters in the input stream.
% \end{function}
%
% \begin{function}[EXP]{\str_count_spaces:N, \str_count_spaces:n}
@@ -226,14 +237,14 @@
% \end{syntax}
% Converts the \meta{token list} into a \meta{string}. The first
% character in the \meta{string} is then left in the input stream,
-% with category code \enquote{other}. The functions differ in their
-% treatment of spaces. In the case of \cs{str_head:N} and
-% \cs{str_head:n}, a leading space is returned with category code $10$
-% (blank space). The \cs{str_head_ignore_spaces:n} function leaves
-% the first non-space character in the input stream. If the
-% \meta{token list} is empty (or blank in the case of the
-% \texttt{_ignore_spaces} variant), then nothing is left on the input
-% stream.
+% with category code \enquote{other}. The functions differ if the
+% first character is a space: \cs{str_head:N} and \cs{str_head:n}
+% return a space token with category code~$10$ (blank space), while
+% the \cs{str_head_ignore_spaces:n} function ignores this space
+% character and leaves the first non-space character in the input
+% stream. If the \meta{string} is empty (or only contains spaces in
+% the case of the \texttt{_ignore_spaces} function), then nothing is
+% left on the input stream.
% \end{function}
%
% \begin{function}[EXP]{\str_tail:N, \str_tail:n, \str_tail_ignore_spaces:n}
@@ -257,35 +268,35 @@
% \end{syntax}
% Converts the \meta{token list} to a \meta{string}, and leaves in the
% input stream the character in position \meta{integer expression} of
-% the \meta{string}. In the case of \cs{str_item:Nn} and
-% \cs{str_item:nn}, all characters including spaces are taken into
-% account. The \cs{str_item_ignore_spaces:nn} function skips spaces
-% in its argument. If the \meta{integer expression} is negative,
+% the \meta{string}, starting at $1$ for the first (left-most)
+% character. In the case of \cs{str_item:Nn} and \cs{str_item:nn},
+% all characters including spaces are taken into account. The
+% \cs{str_item_ignore_spaces:nn} function skips spaces when counting
+% characters. If the \meta{integer expression} is negative,
% characters are counted from the end of the \meta{string}. Hence,
-% $-1$ is the right-most character, \emph{etc.}, while $1$ is the
-% first (left-most) character.
+% $-1$ is the right-most character, \emph{etc.}
% \end{function}
%
% \begin{function}[EXP]
-% {\str_substr:Nnn, \str_substr:nnn, \str_substr_ignore_spaces:nnn}
+% {\str_range:Nnn, \str_range:nnn, \str_range_ignore_spaces:nnn}
% \begin{syntax}
-% \cs{str_substr:nnn} \Arg{token list} \Arg{start index} \Arg{end index}
+% \cs{str_range:nnn} \Arg{token list} \Arg{start index} \Arg{end index}
% \end{syntax}
% Converts the \meta{token list} to a \meta{string}, and leaves in the
-% input stream the characters from the \meta{start index} inclusive to
-% the \meta{end index} exclusive. Note that the token count of the
-% substring is equal to the difference between the two \meta{indices}.
-% If either of \meta{start index} or \meta{end index} is negative,
-% then it is incremented by the token count of the list. If either of
-% \meta{start index} or \meta{end index} is empty, it is replaced by
-% the corresponding end-point of the string. Both \meta{start index}
-% and \meta{end index} count from $1$ for the first (left most)
-% character. For instance,
+% input stream the characters from the \meta{start index} to the
+% \meta{end index} inclusive. Positive \meta{indices} are counted
+% from the start of the string, $1$~being the first character, and
+% negative \meta{indices} are counted from the end of the string,
+% $-1$~being the last character. If either of \meta{start index} or
+% \meta{end index} is~$0$, the result is empty. For instance,
% \begin{verbatim}
-% \iow_term:x { \str_substr:nnn { abcdef } { 2 } { 5 } }
-% \iow_term:x { \str_substr:nnn { abcdef } { -4 } { } }
+% \iow_term:x { \str_range:nnn { abcdef } { 2 } { 5 } }
+% \iow_term:x { \str_range:nnn { abcdef } { -4 } { -1 } }
+% \iow_term:x { \str_range:nnn { abcdef } { -2 } { -1 } }
+% \iow_term:x { \str_range:nnn { abcdef } { 0 } { -1 } }
% \end{verbatim}
-% will print \texttt{bcd} and \texttt{cdef} to the terminal.
+% will print \texttt{bcd}, \texttt{cdef}, \texttt{ef}, and an empty
+% line to the terminal.
% \end{function}
%
% \section{String conditionals}
@@ -299,9 +310,10 @@
% \cs{str_if_eq_p:nn} \Arg{tl_1} \Arg{tl_2}
% \cs{str_if_eq:nnTF} \Arg{tl_1} \Arg{tl_2} \Arg{true code} \Arg{false code}
% \end{syntax}
-% Compares the two \meta{token lists} on a character by character
-% basis, and is \texttt{true} if the two lists contain the same
-% characters in the same order. Thus for example
+% Compares the string representations of the two \meta{token lists} on
+% a character by character basis, and is \texttt{true} if the two
+% lists contain the same characters in the same order. Thus for
+% example
% \begin{verbatim}
% \str_if_eq_p:no { abc } { \tl_to_str:n { abc } }
% \end{verbatim}
@@ -387,211 +399,63 @@
% code and so should only be used for short-term storage.
% \end{variable}
%
-% \section{Encoding functions}
-%
-% Traditionally, string encodings only specify how strings of characters
-% should be stored as bytes. However, the resulting lists of bytes are
-% often to be used in contexts where only a restricted subset of bytes
-% are permitted (\emph{e.g.}, \textsc{pdf} string objects,
-% \textsc{url}s). Hence, storing a string of characters is done in two
-% steps.
-% \begin{itemize}
-% \item The code points (\enquote{character codes}) are expressed as
-% bytes following a given \enquote{encoding}. This can be
-% \textsc{utf-16}, \textsc{iso 8859-1}, \emph{etc.} See
-% Table~\ref{tab:encodings} for a list of supported
-% encodings.\footnote{Encodings and escapings will be added as they
-% are requested.}
-% \item Bytes are translated to \TeX{} tokens through a given
-% \enquote{escaping}. Those are defined for the most part by the
-% \texttt{pdf} file format. See Table~\ref{tab:escapings} for a
-% list of escaping methods supported.\footnotemark
-% \end{itemize}
-%
-% \begin{table}\centering
-% \caption{\label{tab:encodings}Supported encodings.
-% Non-alphanumeric characters are ignored,
-% and capital letters are lower-cased
-% before searching for the encoding in this list.}
-% \begin{tabular}{cc}
-% \toprule
-% \meta{Encoding} & description \\
-% \midrule
-% \texttt{utf8} & \textsc{utf-8} \\
-% \texttt{utf16} & \textsc{utf-16}, with byte-order mark \\
-% \texttt{utf16be} & \textsc{utf-16}, big-endian \\
-% \texttt{utf16le} & \textsc{utf-16}, little-endian \\
-% \texttt{utf32} & \textsc{utf-32}, with byte-order mark \\
-% \texttt{utf32be} & \textsc{utf-32}, big-endian \\
-% \texttt{utf32le} & \textsc{utf-32}, little-endian \\
-% \texttt{iso88591}, \texttt{latin1} & \textsc{iso 8859-1} \\
-% \texttt{iso88592}, \texttt{latin2} & \textsc{iso 8859-2} \\
-% \texttt{iso88593}, \texttt{latin3} & \textsc{iso 8859-3} \\
-% \texttt{iso88594}, \texttt{latin4} & \textsc{iso 8859-4} \\
-% \texttt{iso88595} & \textsc{iso 8859-5} \\
-% \texttt{iso88596} & \textsc{iso 8859-6} \\
-% \texttt{iso88597} & \textsc{iso 8859-7} \\
-% \texttt{iso88598} & \textsc{iso 8859-8} \\
-% \texttt{iso88599}, \texttt{latin5} & \textsc{iso 8859-9} \\
-% \texttt{iso885910}, \texttt{latin6} & \textsc{iso 8859-10} \\
-% \texttt{iso885911} & \textsc{iso 8859-11} \\
-% \texttt{iso885913}, \texttt{latin7} & \textsc{iso 8859-13} \\
-% \texttt{iso885914}, \texttt{latin8} & \textsc{iso 8859-14} \\
-% \texttt{iso885915}, \texttt{latin9} & \textsc{iso 8859-15} \\
-% \texttt{iso885916}, \texttt{latin10} & \textsc{iso 8859-16} \\
-% \midrule
-% Empty & Native (Unicode) string. \\
-% \bottomrule
-% \end{tabular}
-% \end{table}
-%
-% \begin{table}\centering
-% \caption{\label{tab:escapings}Supported escapings.
-% Non-alphanumeric characters are ignored,
-% and capital letters are lower-cased
-% before searching for the escaping in this list.}
-% \begin{tabular}{cc}
-% \toprule
-% \meta{Escaping} & description \\
-% \midrule
-% \texttt{bytes}, or empty
-% & arbitrary bytes \\
-% \texttt{hex}, \texttt{hexadecimal}
-% & byte $=$ two hexadecimal digits \\
-% \texttt{name}
-% & see \tn{pdfescapename} \\
-% \texttt{string}
-% & see \tn{pdfescapestring} \\
-% \texttt{url}
-% & encoding used in \textsc{url}s \\
-% \bottomrule
-% \end{tabular}
-% \end{table}
-%
-% \begin{function}{\str_set_convert:Nnnn,\str_gset_convert:Nnnn}
-% \begin{syntax}
-% \cs{str_set_convert:Nnnn} \meta{str~var} \Arg{string} \Arg{name~1} \Arg{name~2}
-% \end{syntax}
-% This function converts the \meta{string} from the encoding given by
-% \meta{name~1} to the encoding given by \meta{name~2}, and stores the
-% result in the \meta{str~var}. Each \meta{name} can have the form
-% \meta{encoding} or \meta{encoding}\texttt{/}\meta{escaping}, where
-% the possible values of \meta{encoding} and \meta{escaping} are given
-% in Tables~\ref{tab:encodings} and~\ref{tab:escapings}, respectively.
-% The default escaping is to input and output bytes directly. The
-% special case of an empty \meta{name} indicates the use of
-% \enquote{native} strings, 8-bit for pdf\TeX{}, and Unicode strings
-% for the other two engines.
-%
-% For example,
-% \begin{verbatim}
-% \str_set_convert:Nnnn \l_foo_str { Hello! } { } { utf16/hex }
-% \end{verbatim}
-% results in the variable \cs{l_foo_str} holding the string
-% \texttt{FEFF00480065006C006C006F0021}. This is obtained by
-% converting each character in the (native) string \texttt{Hello!} to
-% the \textsc{utf-16} encoding, and expressing each byte as a pair of
-% hexadecimal digits. Note the presence of a (big-endian) byte order
-% mark \hexnum{FEFF}, which can be avoided by specifying the encoding
-% \texttt{utf16be/hex}.
-%
-% An error is raised if the \meta{string} is not valid according to
-% the \meta{escaping~1} and \meta{encoding~1}, or if it cannot be
-% reencoded in the \meta{encoding~2} and \meta{escaping~2} (for
-% instance, if a character does not exist in the \meta{encoding~2}).
-% Erroneous input is replaced by the Unicode replacement character
-% \hexnum{FFFD}, and characters which cannot be reencoded are replaced
-% by either the replacement character \hexnum{FFFD} if it exists in
-% the \meta{encoding~2}, or an encoding-specific replacement
-% character, or the question mark character.
-% \end{function}
-%
-% \begin{function}[TF]{\str_set_convert:Nnnn,\str_gset_convert:Nnnn}
-% \begin{syntax}
-% \cs{str_set_convert:NnnnTF} \meta{str~var} \Arg{string} \Arg{name~1} \Arg{name~2} \Arg{true code} \Arg{false code}
-% \end{syntax}
-% As \cs{str_set_convert:Nnnn}, converts the \meta{string} from the
-% encoding given by \meta{name~1} to the encoding given by
-% \meta{name~2}, and assigns the result to \meta{str~var}. Contrarily
-% to \cs{str_set_convert:Nnnn}, the conditional variant does not raise
-% errors in case the \meta{string} is not valid according to the
-% \meta{name~1} encoding, or cannot be expressed in the \meta{name~2}
-% encoding. Instead, the \meta{false code} is performed.
-% \end{function}
-%
-% \section{Internal string functions}
+% \section{Internal \pkg{l3str} functions}
%
-% \begin{function}{\__str_gset_other:Nn}
+% \begin{function}[EXP]{\__str_to_other:n}
% \begin{syntax}
-% \cs{__str_gset_other:Nn} \meta{tl~var} \Arg{token list}
+% \cs{__str_to_other:n} \Arg{token list}
% \end{syntax}
-% Converts the \meta{token list} to an \meta{other string}, where
-% spaces have category code \enquote{other}, and assigns the result to
-% the \meta{tl~var}, globally.
+% Converts the \meta{token list} to a \meta{other string}, where
+% spaces have category code \enquote{other}. This function can be
+% \texttt{f}-expanded without fear of losing a leading space, since
+% spaces do not have category code $10$ in its result. It takes a
+% time quadratic in the character count of the string, but there exist
+% non-expandable ways to reach linear time.
% \end{function}
%
-% \begin{function}{\__str_hexadecimal_use:NTF}
+% \begin{function}[EXP]{\__str_count_unsafe:n}
% \begin{syntax}
-% \cs{__str_hexadecimal_use:NTF} \meta{token} \Arg{true code} \Arg{false code}
+% \cs{__str_count_unsafe:n} \Arg{other string}
% \end{syntax}
-% If the \meta{token} is a hexadecimal digit (upper case or lower
-% case), its upper-case version is left in the input stream,
-% \emph{followed} by the \meta{true code}. Otherwise, the \meta{false
-% code} is left in the input stream.
-% \begin{texnote}
-% This function fails on some inputs if the escape character is a
-% hexadecimal digit. We are thus careful to set the escape
-% character to a known (safe) value before using it.
-% \end{texnote}
+% This function expects an argument that is entirely made of
+% characters with category \enquote{other}, as produced by
+% \cs{__str_to_other:n}. It leaves in the input stream the number of
+% character tokens in the \meta{other string}, faster than the
+% analoguous \cs{str_count:n} function.
% \end{function}
%
-% \begin{function}[EXP]{\__str_output_byte:n}
+% \begin{function}[EXP]{\__str_range_unsafe:nnn}
% \begin{syntax}
-% \cs{__str_output_byte:n} \Arg{intexpr}
+% \cs{__str_range_unsafe:nnn} \Arg{other string} \Arg{start index} \Arg{end index}
% \end{syntax}
-% Expands to a character token with category other and character code
-% equal to the value of \meta{intexpr}. The value of \meta{intexpr}
-% must be in the range $[-1, 255]$, and any value outside this range
-% results in undefined behaviour. The special value $-1$ is used to
-% produce an empty result.
+% Identical to \cs{str_range:nnn} except that the first argument is
+% expected to be entirely made of characters with category
+% \enquote{other}, as produced by \cs{__str_to_other:n}, and the
+% result is also an \meta{other string}.
% \end{function}
%
-% \section{Possibilities, and things to do}
+% \section{Possible additions to \pkg{l3str}}
%
-% Encoding/escaping-related tasks.
+% Semantically correct copies of some \texttt{tl} functions.
% \begin{itemize}
-% \item Describe the internal format in the code comments. Refuse code
-% points in $[\hexnum{D800}, \hexnum{DFFF}]$ in the internal
-% representation?
-% \item Add documentation about each encoding and escaping method, and
-% add examples.
-% \item The \texttt{hex} unescaping should raise an error for
-% odd-token count strings.
-% \item Decide what bytes should be escaped in the \texttt{url}
-% escaping. Perhaps |!'()*-./0123456789_| are safe, and all other
-% characters should be escaped?
-% \item Automate generation of 8-bit mapping files.
-% \item Change the framework for 8-bit encodings: for decoding from
-% 8-bit to Unicode, use $256$ integer registers; for encoding, use a
-% tree-box.
-% \item More encodings (see Heiko's \pkg{stringenc}). CESU?
-% \item More escapings: shell escapes, lua escapes, etc?
+% \item \cs{c_space_str}
+% \item \cs{str_clear:N}, \cs{str_gclear:N}, \cs{str_clear_new:N},
+% \cs{str_gclear_new:N}.
+% \item \cs{str_concat:NNN}, \cs{str_gconcat:NNN}
+% \item \cs{str_set_eq:NN}, \cs{str_gset_eq:NN}
+% \item \cs{str_if_empty:NTF}, \cs{str_if_empty_p:N}
+% \item \cs{str_if_exist:NTF}, \cs{str_if_exist_p:N}
+% \item \cs{str_use:N}
% \end{itemize}
%
-% Other string tasks.
+% Some functions that are not copies of \texttt{tl} functions.
% \begin{itemize}
+% \item \cs{str_if_blank:NTF}, \cs{str_if_blank_p:N}.
+% \item \cs{str_map_inline:Nn}, \cs{str_map_function:NN},
+% \cs{str_map_variable:NNn}, and \texttt{:n} analogs.
% \item Expandable \cs{str_if_in:nnTF}?
-% \item \cs{str_if_head_eq:nN}
+% \item \cs{str_if_head_eq:nNTF}, \cs{str_if_head_eq_p:nN}
% \item \cs{str_if_numeric/decimal/integer:n}, perhaps in \pkg{l3fp}?
-% \item Should \cs{str_item:Nn} be \cs{str_char:Nn}?
-% \item Should \cs{str_substr:Nnn} be \cs{str_range:Nnn}?
-% \item Introduce \cs{str_slice:Nnnn} with a third \enquote{step}
-% argument? Or should we simply have \cs{str_slice:Nn}
-% \meta{string} \Arg{clist}, where the \meta{clist}'s items are
-% either one integer expression, two integer expressions separated
-% by |:|, or three integer expressions separated by |:|, \emph{cf.}
-% Python's extended slice syntax?
-% \item Analog of \texttt{printf}?
% \end{itemize}
%
% \end{documentation}
@@ -611,7 +475,6 @@
% \begin{macrocode}
\ProvidesExplPackage
{\ExplFileName}{\ExplFileDate}{\ExplFileVersion}{\ExplFileDescription}
-\RequirePackage{l3tl-analysis,l3tl-build,l3flag}
% \end{macrocode}
%
% The following string-related functions are currently defined in
@@ -623,23 +486,11 @@
% \item \cs{token_to_str:N}, \cs{cs_to_str:N}
% \item \cs{str_head:n}, \cs{__str_head:w}, (copied here)
% \item \cs{str_tail:n}, \cs{__str_tail:w}, (copied here)
-% \item \cs{str_count_ignore_spaces} (unchanged)
-% \item \cs{str_count_loop:NNNNNNNNN} (unchanged)
+% \item \cs{__str_count_ignore_spaces} (unchanged)
+% \item \cs{__str_count_loop:NNNNNNNNN} (unchanged)
% \end{itemize}
%
-% \subsection{Helpers}
-%
-% \subsubsection{A function unrelated to strings}
-%
-% \begin{macro}[EXP,aux]{\use_ii_i:nn}
-% A function used to swap its arguments.
-% \begin{macrocode}
-\cs_if_exist:NF \use_ii_i:nn
- { \cs_new:Npn \use_ii_i:nn #1#2 { #2 #1 } }
-% \end{macrocode}
-% \end{macro}
-%
-% \subsubsection{Assigning strings}
+% \subsection{String assignments}
%
% \begin{macro}{\str_new:N, \str_new:c}
% A string is simply a token list.
@@ -686,73 +537,13 @@
% \end{macrocode}
% \end{macro}
%
-% \subsubsection{Variables and constants}
-%
-% \begin{macro}{\@@_tmp:w}
-% \begin{variable}{\l_@@_internal_int}
-% \begin{variable}{\l_@@_internal_tl}
-% Internal scratch space for some functions.
-% \begin{macrocode}
-\cs_new_protected_nopar:Npn \@@_tmp:w { }
-\tl_new:N \l_@@_internal_tl
-\int_new:N \l_@@_internal_int
-% \end{macrocode}
-% \end{variable}
-% \end{variable}
-% \end{macro}
-%
-% \begin{variable}{\g_@@_result_tl}
-% The \cs{g_@@_result_tl} variable is used to hold the result of
-% various internal string operations (mostly conversions) which are
-% typically performed in a group. The variable is global so that it
-% remains defined outside the group, to be assigned to a user-provided
-% variable.
-% \begin{macrocode}
-\tl_new:N \g_@@_result_tl
-% \end{macrocode}
-% \end{variable}
-%
-% \begin{variable}
-% {
-% \c_forty_eight, \c_fifty_eight, \c_sixty_five, \c_ninety_one,
-% \c_ninety_seven, \c_one_hundred_twenty_three,
-% \c_one_hundred_twenty_seven
-% }
-% We declare here some integer values which delimit ranges of ASCII
-% characters of various types. This is mostly used in \pkg{l3regex}.
-% \begin{macrocode}
-\int_const:Nn \c_forty_eight { 48 }
-\int_const:Nn \c_fifty_eight { 58 }
-\int_const:Nn \c_sixty_five { 65 }
-\int_const:Nn \c_ninety_one { 91 }
-\int_const:Nn \c_ninety_seven { 97 }
-\int_const:Nn \c_one_hundred_twenty_three { 123 }
-\int_const:Nn \c_one_hundred_twenty_seven { 127 }
-% \end{macrocode}
-% \end{variable}
-%
-% \begin{variable}{\c_max_char_int}
-% The maximum valid character code is $255$ for pdf\TeX{}, and
-% $1114111$ for other engines.
-% \begin{macrocode}
-\int_const:Nn \c_max_char_int
- { \pdftex_if_engine:TF { "FF } { "10FFFF } }
-% \end{macrocode}
-% \end{variable}
-%
-% \begin{variable}{\c_@@_replacement_char_int}
-% When converting, invalid bytes are replaced by the Unicode
-% replacement character \hexnum{FFFD}.
-% \begin{macrocode}
-\int_const:Nn \c_@@_replacement_char_int { "FFFD }
-% \end{macrocode}
-% \end{variable}
+% \subsection{String variables and constants}
%
% \begin{variable}
% {
% \c_backslash_str,
-% \c_lbrace_str,
-% \c_rbrace_str,
+% \c_left_brace_str,
+% \c_right_brace_str,
% \c_hash_str,
% \c_tilde_str,
% \c_percent_str
@@ -760,153 +551,36 @@
% For all of those strings, use \cs{cs_to_str:N} to get characters with
% the correct category code.
% \begin{macrocode}
-\tl_const:Nx \c_backslash_str { \cs_to_str:N \\ }
-\tl_const:Nx \c_lbrace_str { \cs_to_str:N \{ }
-\tl_const:Nx \c_rbrace_str { \cs_to_str:N \} }
-\tl_const:Nx \c_hash_str { \cs_to_str:N \# }
-\tl_const:Nx \c_tilde_str { \cs_to_str:N \~ }
-\tl_const:Nx \c_percent_str { \cs_to_str:N \% }
-% \end{macrocode}
-% \end{variable}
-%
-% \begin{variable}{\g_@@_alias_prop}
-% To avoid needing one file per encoding/escaping alias, we keep track
-% of those in a property list.
-% \begin{macrocode}
-\prop_new:N \g_@@_alias_prop
-\prop_gput:Nnn \g_@@_alias_prop { latin1 } { iso88591 }
-\prop_gput:Nnn \g_@@_alias_prop { latin2 } { iso88592 }
-\prop_gput:Nnn \g_@@_alias_prop { latin3 } { iso88593 }
-\prop_gput:Nnn \g_@@_alias_prop { latin4 } { iso88594 }
-\prop_gput:Nnn \g_@@_alias_prop { latin5 } { iso88599 }
-\prop_gput:Nnn \g_@@_alias_prop { latin6 } { iso885910 }
-\prop_gput:Nnn \g_@@_alias_prop { latin7 } { iso885913 }
-\prop_gput:Nnn \g_@@_alias_prop { latin8 } { iso885914 }
-\prop_gput:Nnn \g_@@_alias_prop { latin9 } { iso885915 }
-\prop_gput:Nnn \g_@@_alias_prop { latin10 } { iso885916 }
-\prop_gput:Nnn \g_@@_alias_prop { utf16le } { utf16 }
-\prop_gput:Nnn \g_@@_alias_prop { utf16be } { utf16 }
-\prop_gput:Nnn \g_@@_alias_prop { utf32le } { utf32 }
-\prop_gput:Nnn \g_@@_alias_prop { utf32be } { utf32 }
-\prop_gput:Nnn \g_@@_alias_prop { hexadecimal } { hex }
+\str_const:Nx \c_backslash_str { \cs_to_str:N \\ }
+\str_const:Nx \c_left_brace_str { \cs_to_str:N \{ }
+\str_const:Nx \c_right_brace_str { \cs_to_str:N \} }
+\str_const:Nx \c_hash_str { \cs_to_str:N \# }
+\str_const:Nx \c_tilde_str { \cs_to_str:N \~ }
+\str_const:Nx \c_percent_str { \cs_to_str:N \% }
% \end{macrocode}
% \end{variable}
%
-% \begin{variable}{\g_@@_error_bool}
-% In conversion functions with a built-in conditional, errors are not
-% reported directly to the user, but the information is collected in
-% this boolean, used at the end to decide on which branch of the
-% conditional to take.
+% \begin{variable}{\l_tmpa_str, \l_tmpb_str, \g_tmpa_str, \g_tmpb_str}
+% Scratch strings.
% \begin{macrocode}
-\bool_new:N \g_@@_error_bool
+\str_new:N \l_tmpa_str
+\str_new:N \l_tmpb_str
+\str_new:N \g_tmpa_str
+\str_new:N \g_tmpb_str
% \end{macrocode}
% \end{variable}
%
-% \begin{variable}{str_byte, str_error}
-% Conversions from one \meta{encoding}/\meta{escaping} pair to another
-% are done within \texttt{x}-expanding assignments. Errors are
-% signalled by raising the relevant flag.
-% \begin{macrocode}
-\flag_new:n { str_byte }
-\flag_new:n { str_error }
-% \end{macrocode}
-% \end{variable}
-%
-% \subsubsection{Escaping spaces}
+% \subsection{Counting characters}
%
-% \begin{macro}[EXP]{\@@_to_other:n}
-% \begin{macro}[EXP, aux]{\@@_to_other_loop:w, \@@_to_other_end:w}
-% \begin{syntax}
-% \cs{@@_to_other:n} \Arg{token list}
-% \end{syntax}
-% Converts the \meta{token list} to a \meta{other string}, where
-% spaces have category code \enquote{other}. First apply
-% \cs{tl_to_str:n}, then replace all spaces by \enquote{other} spaces,
-% $8$ at a time, storing the converted part of the string between the
-% \cs{q_mark} and \cs{q_stop} markers. This function can be
-% \texttt{f}-expanded without fear of losing a leading space, since
-% spaces do not have category code $10$ in its result. This function
-% takes a time quadratic in the token count of the string;
-% \cs{@@_gset_other:Nn} is faster but not expandable.
+% \begin{macro}[EXP]{\str_count_spaces:N, \str_count_spaces:n}
+% \begin{macro}[EXP, aux]{\@@_count_spaces_loop:wwwwwwwww}
+% To speed up this function, we grab and discard $9$ space-delimited
+% arguments in each iteration of the loop. The loop stops when the
+% last argument is one of the trailing |X|\meta{number}, and that
+% \meta{number} is added to the sum of $9$ that preceeds, to adjust
+% the result.
% \begin{macrocode}
-\group_begin:
-\char_set_lccode:nn { `\* } { `\ }
-\char_set_lccode:nn { `\A } { `\A }
-\tl_to_lowercase:n
- {
- \group_end:
- \cs_new:Npn \@@_to_other:n #1
- {
- \exp_after:wN \@@_to_other_loop:w \tl_to_str:n {#1} ~ %
- A ~ A ~ A ~ A ~ A ~ A ~ A ~ A ~ \q_mark \q_stop
- }
- \cs_new:Npn \@@_to_other_loop:w
- #1 ~ #2 ~ #3 ~ #4 ~ #5 ~ #6 ~ #7 ~ #8 ~ #9 \q_stop
- {
- \if_meaning:w A #8
- \@@_to_other_end:w
- \fi:
- \@@_to_other_loop:w
- #9 #1 * #2 * #3 * #4 * #5 * #6 * #7 * #8 * \q_stop
- }
- \cs_new:Npn \@@_to_other_end:w \fi: #1 \q_mark #2 * A #3 \q_stop
- { \fi: #2 }
- }
-% \end{macrocode}
-% \end{macro}
-% \end{macro}
-%
-% \begin{macro}[int]{\@@_gset_other:Nn}
-% \begin{macro}[aux,EXP]{\@@_gset_other_loop:w}
-% \begin{macro}[aux,EXP]{\@@_gset_other_end:w}
-% This function could be done by using \cs{@@_to_other:n} within
-% an \texttt{x}-expansion, but that would take a time quadratic in the
-% size of the string. Instead, we can \enquote{leave the result behind
-% us} in the input stream, to be captured into the expanding
-% assignment. This gives us a linear time.
-% \begin{macrocode}
-\group_begin:
-\char_set_lccode:nn { `\* } { `\ }
-\char_set_lccode:nn { `\A } { `\A }
-\tl_to_lowercase:n
- {
- \group_end:
- \cs_new_protected:Npn \@@_gset_other:Nn #1#2
- {
- \tl_gset:Nx #1
- {
- \exp_after:wN \@@_gset_other_loop:w \tl_to_str:n {#2} ~ %
- A ~ A ~ A ~ A ~ A ~ A ~ A ~ A ~ A ~ \q_stop
- }
- }
- \cs_new:Npn \@@_gset_other_loop:w
- #1 ~ #2 ~ #3 ~ #4 ~ #5 ~ #6 ~ #7 ~ #8 ~ #9 ~
- {
- \if_meaning:w A #9
- \@@_gset_other_end:w
- \fi:
- #1 * #2 * #3 * #4 * #5 * #6 * #7 * #8 * #9
- \@@_gset_other_loop:w *
- }
- \cs_new:Npn \@@_gset_other_end:w \fi: #1 * A #2 \q_stop
- { \fi: #1 }
- }
-% \end{macrocode}
-% \end{macro}
-% \end{macro}
-% \end{macro}
-%
-% \subsection{Characters given by their position}
-%
-% \begin{macro}[EXP]{\str_count_spaces:N}
-% \begin{macro}[EXP]{\str_count_spaces:n}
-% \begin{macro}[EXP,aux]{\@@_count_spaces_loop:wwwwwwwww}
-% To speed up this function, we grab $9$ spaces in each step. The
-% loop stops when the last argument is one of the trailing
-% |X|\meta{number}, and that \meta{number} is added to the sum of $9$
-% that preceeds, to adjust the result.
-% \begin{macrocode}
-\cs_new:Npn \str_count_spaces:N
+\cs_new_nopar:Npn \str_count_spaces:N
{ \exp_args:No \str_count_spaces:n }
\cs_new:Npn \str_count_spaces:n #1
{
@@ -921,30 +595,29 @@
\cs_new:Npn \@@_count_spaces_loop:wwwwwwwww #1~#2~#3~#4~#5~#6~#7~#8~#9~
{
\if_meaning:w X #9
- \exp_after:wN \use_none_delimit_by_q_stop:w
+ \use_i_delimit_by_q_stop:nw
\fi:
\c_nine + \@@_count_spaces_loop:wwwwwwwww
}
% \end{macrocode}
% \end{macro}
% \end{macro}
-% \end{macro}
%
-% \begin{macro}[EXP]{\str_count:N}
-% \begin{macro}[EXP]{\str_count:n}
-% \begin{macro}[EXP]{\@@_count_unsafe:n}
-% \begin{macro}[EXP]{\str_count_ignore_spaces:n}
-% \begin{macro}[EXP,aux]{\@@_count:n, \@@_count_loop:NNNNNNNNN}
-% To measure the token count of a string we could first escape all spaces
-% using \cs{@@_to_other:o}, then measure the count of this token
-% list. However, this would be quadratic in the length of the string,
-% and we can do better. Namely, add the number of spaces (counted
-% using the functions defined above) to the length ignoring spaces.
-% To measure the length ignoring spaces we use the same technique as
-% for counting spaces: loop, grabbing $9$ characters at each step, and
-% end as soon as we reach one of the $9$ trailing items. The
-% \texttt{_unsafe} variant expects a token list consisting entirely of
-% category code $12$ characters.
+% \begin{macro}[EXP]{\str_count:N, \str_count:n, \str_count_ignore_spaces:n}
+% \begin{macro}[EXP, int]{\@@_count_unsafe:n}
+% \begin{macro}[EXP, aux]{\@@_count:n, \@@_count_loop:NNNNNNNNN}
+% To count characters in a string we could first escape all spaces
+% using \cs{@@_to_other:n}, then pass the result to \cs{tl_count:n}.
+% However, the escaping step would be quadratic in the number of
+% characters in the string, and we can do better. Namely, sum the
+% number of spaces (\cs{str_count_spaces:n}) and the result of
+% \cs{tl_count:n}, which ignores spaces. Since strings tend to be
+% longer than token lists, we use specialized functions to count
+% characters ignoring spaces. Namely, loop, grabbing $9$ non-space
+% characters at each step, and end as soon as we reach one of the $9$
+% trailing items. The \texttt{_unsafe} variant expects a token list
+% already converted to category code $12$ characters, and is used by
+% \cs{str_item:nn} and \cs{str_range:nnn}.
% \begin{macrocode}
\cs_new_nopar:Npn \str_count:N { \exp_args:No \str_count:n }
\cs_new:Npn \str_count:n #1
@@ -987,22 +660,20 @@
% \end{macro}
% \end{macro}
% \end{macro}
-% \end{macro}
-% \end{macro}
%
-% \begin{macro}[EXP]{\str_head:N}
-% \begin{macro}[EXP]{\str_head:n}
-% \begin{macro}[EXP]{\str_head_ignore_spaces:n}
-% \begin{macro}[EXP,aux]{\@@_head:w}
+% \subsection{Head and tail of a string}
+%
+% \begin{macro}[EXP]{\str_head:N, \str_head:n, \str_head_ignore_spaces:n}
+% \begin{macro}[EXP, aux]{\@@_head:w}
% The \texttt{_ignore_spaces} variant is almost identical to
% \cs{tl_head:n}. As usual, \cs{str_head:N} expands its argument and
-% hands it to \cs{str_head:n}. To circumvent the fact that \TeX{}
+% hands it to \cs{str_head:n}. To circumvent the fact that \TeX{}
% skips spaces when grabbing undelimited macro parameters,
% \cs{@@_head:w} takes an argument delimited by a space. If |#1|
% starts with a non-space character, \cs{use_i_delimit_by_q_stop:nw}
% leaves that in the input stream. On the other hand, if |#1| starts
-% with a space, the \cs{@@_head:w} takes an empty argument, and
-% the single (braced) space in the definition of \cs{@@_head:w}
+% with a space, the \cs{@@_head:w} takes an empty argument, and the
+% single (initially braced) space in the definition of \cs{@@_head:w}
% makes its way to the output. Finally, for an empty argument, the
% (braced) empty brace group in the definition of \cs{str_head:n}
% gives an empty result after passing through
@@ -1025,23 +696,23 @@
% \end{macrocode}
% \end{macro}
% \end{macro}
-% \end{macro}
-% \end{macro}
%
-% \begin{macro}[EXP]{\str_tail:N}
-% \begin{macro}[EXP]{\str_tail:n}
-% \begin{macro}[EXP]{\str_tail_ignore_spaces:n}
-% \begin{macro}[EXP,aux]{\@@_tail_auxi:w}
-% \begin{macro}[EXP,aux]{\@@_tail_auxii:w}
-% As when fetching the head of a string, the \texttt{_ignore_spaces}
-% variant is similar to \cs{tl_tail:n}. The more commonly used
-% \cs{str_tail:n} function is a little bit more convoluted: hitting
-% the front of the string with \cs{reverse_if:N} \cs{if_charcode:w}
-% \cs{scan_stop:} removes the first character (which necessarily makes
-% the test true, since it cannot match \cs{scan_stop:}). The auxiliary
-% function inserts the required \cs{fi:} to close the conditional, and
-% leaves the tail of the string in the input string. The details are
-% such that an empty string has an empty tail.
+% \begin{macro}[EXP]{\str_tail:N, \str_tail:n, \str_tail_ignore_spaces:n}
+% \begin{macro}[EXP, aux]{\@@_tail_auxi:w, \@@_tail_auxii:w}
+% Getting the tail is a little bit more convoluted than the head of a
+% string. We hit the front of the string with \cs{reverse_if:N}
+% \cs{if_charcode:w} \cs{scan_stop:}. This removes the first
+% character, and necessarily makes the test true, since the character
+% cannot match \cs{scan_stop:}. The auxiliary function then inserts
+% the required \cs{fi:} to close the conditional, and leaves the tail
+% of the string in the input stream. The details are such that an
+% empty string has an empty tail (this requires in particular that the
+% end-marker |X| be unexpandable and not a control sequence). The
+% \texttt{_ignore_spaces} is rather simpler: after converting the
+% input to a string, \cs{@@_tail_auxii:w} removes one undelimited
+% argument and leaves everything else until an end-marker \cs{q_mark}.
+% One can check that an empty (or blank) string yields an empty
+% tail.
% \begin{macrocode}
\cs_new_nopar:Npn \str_tail:N { \exp_args:No \str_tail:n }
\cs_set:Npn \str_tail:n #1
@@ -1054,26 +725,63 @@
\cs_new:Npn \str_tail_ignore_spaces:n #1
{
\exp_after:wN \@@_tail_auxii:w
- \tl_to_str:n {#1} X X \q_stop
+ \tl_to_str:n {#1} \q_mark \q_mark \q_stop
}
-\cs_new:Npn \@@_tail_auxii:w #1 #2 X #3 \q_stop { #2 }
+\cs_new:Npn \@@_tail_auxii:w #1 #2 \q_mark #3 \q_stop { #2 }
% \end{macrocode}
% \end{macro}
% \end{macro}
-% \end{macro}
+%
+% \subsection{Accessing specific characters in a string}
+%
+% \begin{macro}[EXP, int]{\@@_to_other:n}
+% \begin{macro}[EXP, aux]{\@@_to_other_loop:w, \@@_to_other_end:w}
+% First apply \cs{tl_to_str:n}, then replace all spaces by
+% \enquote{other} spaces, $8$ at a time, storing the converted part of
+% the string between the \cs{q_mark} and \cs{q_stop} markers. The end
+% is detected when \cs{@@_to_other_loop:w} finds one of the trailing
+% |A|, distinguished from any contents of the initial token list by
+% their category. Then \cs{@@_to_other_end:w} is called, and finds
+% the result between \cs{q_mark} and the first |A| (well, there is
+% also the need to remove a space).
+% \begin{macrocode}
+\group_begin:
+\char_set_lccode:nn { `\* } { `\ }
+\char_set_lccode:nn { `\A } { `\A }
+\tl_to_lowercase:n
+ {
+ \group_end:
+ \cs_new:Npn \@@_to_other:n #1
+ {
+ \exp_after:wN \@@_to_other_loop:w \tl_to_str:n {#1} ~ %
+ A ~ A ~ A ~ A ~ A ~ A ~ A ~ A ~ \q_mark \q_stop
+ }
+ \cs_new:Npn \@@_to_other_loop:w
+ #1 ~ #2 ~ #3 ~ #4 ~ #5 ~ #6 ~ #7 ~ #8 ~ #9 \q_stop
+ {
+ \if_meaning:w A #8
+ \@@_to_other_end:w
+ \fi:
+ \@@_to_other_loop:w
+ #9 #1 * #2 * #3 * #4 * #5 * #6 * #7 * #8 * \q_stop
+ }
+ \cs_new:Npn \@@_to_other_end:w \fi: #1 \q_mark #2 * A #3 \q_stop
+ { \fi: #2 }
+ }
+% \end{macrocode}
% \end{macro}
% \end{macro}
%
-% \begin{macro}[EXP, int]{\@@_skip_c_zero:w}
-% \begin{macro}[EXP, aux]{\@@_skip_loop:wNNNNNNNN}
-% \begin{macro}[EXP, aux]{\@@_skip_end:w, \@@_skip_end:NNNNNNNN}
+% \begin{macro}[EXP, aux]{\@@_skip_c_zero:w}
+% \begin{macro}[EXP, aux]
+% {\@@_skip_loop:wNNNNNNNN, \@@_skip_end:w, \@@_skip_end:NNNNNNNN}
% Removes |max(#1,0)| characters from the input stream, and then
-% leaves \cs{c_zero}. This should be expanded using
+% leaves \cs{c_zero}. This should be expanded using
% \cs{tex_romannumeral:D}. We remove characters $8$ at a time until
-% there are at most $8$ to remove. Then we do a dirty trick: the
+% there are at most $8$ to remove. Then we do a dirty trick: the
% \cs{if_case:w} construction leaves between $0$ and $8$ times the
% \cs{or:} control sequence, and those \cs{or:} become arguments of
-% \cs{@@_skip_end:NNNNNNNN}. If the number of characters to remove
+% \cs{@@_skip_end:NNNNNNNN}. If the number of characters to remove
% is $6$, say, then there are two \cs{or:} left, and the $8$ arguments
% of \cs{@@_skip_end:NNNNNNNN} are the two \cs{or:}, and $6$
% characters from the input stream, exactly what we wanted to
@@ -1083,7 +791,7 @@
% \begin{macrocode}
\cs_new:Npn \@@_skip_c_zero:w #1;
{
- \if_int_compare:w \__int_eval:w #1 > \c_eight
+ \if_int_compare:w #1 > \c_eight
\exp_after:wN \@@_skip_loop:wNNNNNNNN
\else:
\exp_after:wN \@@_skip_end:w
@@ -1096,34 +804,31 @@
\cs_new:Npn \@@_skip_end:w #1 ;
{
\exp_after:wN \@@_skip_end:NNNNNNNN
- \if_case:w \if_int_compare:w #1 > \c_zero #1 \else: 0 \fi: \exp_stop_f:
- \or: \or: \or: \or: \or: \or: \or: \or:
+ \if_case:w #1 \exp_stop_f: \or: \or: \or: \or: \or: \or: \or: \or:
}
\cs_new:Npn \@@_skip_end:NNNNNNNN #1#2#3#4#5#6#7#8 { \fi: \c_zero }
% \end{macrocode}
% \end{macro}
% \end{macro}
-% \end{macro}
%
-% \begin{macro}[EXP,int]{\@@_collect_delimit_by_q_stop:w}
-% \begin{macro}[EXP,aux]{\@@_collect_loop:wn, \@@_collect_loop:wnNNNNNNN}
-% \begin{macro}[EXP,aux]
-% {\@@_collect_end:wn, \@@_collect_end:nnnnnnnnw} Collects
-% |max(#1,0)| characters, and removes everything else until
+% \begin{macro}[EXP, aux]{\@@_collect_delimit_by_q_stop:w}
+% \begin{macro}[EXP, aux]
+% {
+% \@@_collect_loop:wn, \@@_collect_loop:wnNNNNNNN,
+% \@@_collect_end:wn, \@@_collect_end:nnnnnnnnw
+% }
+% Collects |max(#1,0)| characters, and removes everything else until
% \cs{q_stop}. This is somewhat similar to \cs{@@_skip_c_zero:w}, but
-% this time we can only grab $7$ characters at a time. At the end, we
-% use an \cs{if_case:w} trick again, so that the $8$ first arguments
-% of \cs{@@_collect_end:nnnnnnnnw} are some \cs{or:}, followed by
-% an \cs{fi:}, followed by |#1| characters from the input
-% stream. Simply leaving this in the input stream will close the
-% conditional properly and the \cs{or:} disappear.
+% accepts integer expression arguments. This time we can only grab
+% $7$ characters at a time. At the end, we use an \cs{if_case:w}
+% trick again, so that the $8$ first arguments of
+% \cs{@@_collect_end:nnnnnnnnw} are some \cs{or:}, followed by an
+% \cs{fi:}, followed by |#1| characters from the input stream. Simply
+% leaving this in the input stream will close the conditional properly
+% and the \cs{or:} disappear.
% \begin{macrocode}
\cs_new:Npn \@@_collect_delimit_by_q_stop:w #1;
- {
- \exp_after:wN \@@_collect_loop:wn
- \int_use:N \__int_eval:w #1 ;
- { }
- }
+ { \@@_collect_loop:wn #1 ; { } }
\cs_new:Npn \@@_collect_loop:wn #1 ;
{
\if_int_compare:w #1 > \c_seven
@@ -1150,20 +855,25 @@
% \end{macrocode}
% \end{macro}
% \end{macro}
-% \end{macro}
%
-% \begin{macro}[EXP]{\str_item:Nn}
-% \begin{macro}[EXP]{\str_item:nn}
-% \begin{macro}[EXP]{\str_item_ignore_spaces:nn}
-% \begin{macro}[EXP]{\@@_item_unsafe:nn}
-% \begin{macro}[EXP,aux]{\@@_item:ww}
-% This is mostly shuffling arguments around to avoid measuring the
-% length of the string more than once, and make sure that the
-% parameters given to \cs{@@_skip_c_zero:w} are necessarily within
-% the bounds of the length of the string. The \texttt{_ignore_spaces}
-% function cheats a little bit in that it doesn't hand to
-% \cs{@@_item_unsafe:nn} an \enquote{other string}. This is alright,
-% as everything else is done with undelimited arguments.
+% \begin{macro}[EXP]{\str_item:Nn, \str_item:nn, \str_item_ignore_spaces:nn}
+% \begin{macro}[EXP, aux]{\@@_item_unsafe:nn, \@@_item:ww}
+% The \cs{str_item:nn} hands its argument with spaces escaped to
+% \cs{@@_item_unsafe:nn}, and makes sure to turn the result back into
+% a proper string (with category code~$10$ spaces) eventually. The
+% \cs{str_item_ignore_spaces:nn} function cheats a little bit in that
+% it doesn't hand to \cs{@@_item_unsafe:nn} an \enquote{other string}.
+% This is safe, as everything else is done with undelimited arguments.
+% Then evaluate the \meta{index} argument~|#2| and count characters in
+% the string, passing those two numbers to \cs{@@_item:ww} for further
+% analysis. If the \meta{index} is negative, shift by |#2|, and
+% remove that number of characters before returning the next item in
+% the input stream (and if |#1| is smaller than $-|#2|$, nothing is
+% returned). If the \meta{index} is positive, ignore that number
+% (minus one) of characters before returning the next one. The shift
+% by one is obtained by inserting an empty brace group before the
+% string in that case: that brace group also covers the case where the
+% \meta{index} is zero.
% \begin{macrocode}
\cs_new_nopar:Npn \str_item:Nn { \exp_args:No \str_item:nn }
\cs_new:Npn \str_item:nn #1#2
@@ -1181,8 +891,7 @@
\exp_after:wN \@@_item:ww
\int_use:N \__int_eval:w #2 \exp_after:wN ;
\__int_value:w \@@_count_unsafe:n {#1} ;
- { } #1
- \q_stop
+ #1 \q_stop
}
\cs_new:Npn \@@_item:ww #1; #2;
{
@@ -1192,7 +901,8 @@
{ \use_none_delimit_by_q_stop:w }
{
\exp_after:wN \use_i_delimit_by_q_stop:nw
- \tex_romannumeral:D \@@_skip_c_zero:w #1 + #2 + \c_one ;
+ \tex_romannumeral:D \exp_after:wN \@@_skip_c_zero:w
+ \int_use:N \__int_eval:w #1 + #2 ;
}
}
{
@@ -1200,99 +910,97 @@
{ \use_none_delimit_by_q_stop:w }
{
\exp_after:wN \use_i_delimit_by_q_stop:nw
- \tex_romannumeral:D \@@_skip_c_zero:w #1 ;
+ \tex_romannumeral:D \@@_skip_c_zero:w #1 ; { }
}
}
}
% \end{macrocode}
% \end{macro}
% \end{macro}
-% \end{macro}
-% \end{macro}
+%
+% \begin{macro}[EXP, aux]{\@@_range_normalize:nn}
+% This function converts an \meta{index} argument into an explicit
+% position in the string (a result of $0$ denoting \enquote{out of
+% bounds}). Expects two explicit integer arguments: the
+% \meta{index} |#1| and the string count~|#2|. If |#1| is negative,
+% replace it by $|#1| + |#2| + 1$, then limit to the range $[0,
+% |#2|]$.
+% \begin{macrocode}
+\cs_new:Npn \@@_range_normalize:nn #1#2
+ {
+ \int_eval:n
+ {
+ \if_int_compare:w #1 < \c_zero
+ \if_int_compare:w #1 < -#2 \exp_stop_f:
+ \c_zero
+ \else:
+ #1 + #2 + \c_one
+ \fi:
+ \else:
+ \if_int_compare:w #1 < #2 \exp_stop_f:
+ #1
+ \else:
+ #2
+ \fi:
+ \fi:
+ }
+ }
+% \end{macrocode}
% \end{macro}
%
-% \begin{macro}[EXP]{\str_substr:Nnn}
-% \begin{macro}[EXP]{\str_substr:nnn}
-% \begin{macro}[EXP]{\str_substr_ignore_spaces:nnn}
-% \begin{macro}[EXP,aux]
-% {
-% \@@_substr_unsafe:nnn,
-% \@@_substr:nN,
-% \@@_substr:www,
-% \@@_substr:nnw,
-% \@@_substr_normalize_range:nn
-% }
-% Sanitize the string. Then evaluate the arguments, replacing them by
-% \cs{c_zero} or \cs{c_max_int} if they are empty. Then limit the
-% range to be at most the length of the string (this avoids needing to
-% check for the end of the string when grabbing characters).
+% \begin{macro}[EXP]
+% {\str_range:Nnn, \str_range:nnn, \str_range_ignore_spaces:nnn}
+% \begin{macro}[EXP, int]{\@@_range_unsafe:nnn}
+% \begin{macro}[EXP, aux]{\@@_range:www, \@@_range:nnw}
+% Sanitize the string. Then evaluate the arguments. At this stage we
+% also decrement the \meta{start index}, since our goal is to know how
+% many characters should be removed. Then limit the range to be
+% non-negative and at most the length of the string (this avoids
+% needing to check for the end of the string when grabbing
+% characters), shifting negative numbers by the appropriate amount.
% Afterwards, skip characters, then keep some more, and finally drop
% the end of the string.
% \begin{macrocode}
-\cs_new_nopar:Npn \str_substr:Nnn { \exp_args:No \str_substr:nnn }
-\cs_new:Npn \str_substr:nnn #1#2#3
+\cs_new_nopar:Npn \str_range:Nnn { \exp_args:No \str_range:nnn }
+\cs_new:Npn \str_range:nnn #1#2#3
{
\exp_args:Nf \tl_to_str:n
{
- \exp_args:Nf \@@_substr_unsafe:nnn
+ \exp_args:Nf \@@_range_unsafe:nnn
{ \@@_to_other:n {#1} } {#2} {#3}
}
}
-\cs_new:Npn \str_substr_ignore_spaces:nnn #1
- { \exp_args:No \@@_substr_unsafe:nnn { \tl_to_str:n {#1} } }
-\cs_new:Npn \@@_substr_unsafe:nnn #1#2#3
+\cs_new:Npn \str_range_ignore_spaces:nnn #1
+ { \exp_args:No \@@_range_unsafe:nnn { \tl_to_str:n {#1} } }
+\cs_new:Npn \@@_range_unsafe:nnn #1#2#3
{
- \exp_after:wN \@@_substr:www
+ \exp_after:wN \@@_range:www
\__int_value:w \@@_count_unsafe:n {#1} \exp_after:wN ;
- \int_use:N \__int_eval:w #2 + \c_zero \exp_after:wN ;
- \int_use:N \__int_eval:w
- \exp_args:Nf \@@_substr:nN {#3} \c_max_int ;
- { } #1
- \q_stop
+ \int_use:N \__int_eval:w #2 - \c_one \exp_after:wN ;
+ \int_use:N \__int_eval:w #3 ;
+ #1 \q_stop
}
-\cs_new:Npn \@@_substr:nN #1 #2
- { \tl_if_empty:nTF {#1} {#2} {#1} }
-\cs_new:Npn \@@_substr:www #1; #2; #3;
+\cs_new:Npn \@@_range:www #1; #2; #3;
{
- \exp_args:Nf \@@_substr:nnw
- { \@@_substr_normalize_range:nn {#2} {#1} }
- { \@@_substr_normalize_range:nn {#3} {#1} }
+ \exp_args:Nf \@@_range:nnw
+ { \@@_range_normalize:nn {#2} {#1} }
+ { \@@_range_normalize:nn {#3} {#1} }
}
-\cs_new:Npn \@@_substr:nnw #1#2
+\cs_new:Npn \@@_range:nnw #1#2
{
\exp_after:wN \@@_collect_delimit_by_q_stop:w
- \int_use:N \__int_eval:w #2 + \c_one - #1 \exp_after:wN ;
+ \int_use:N \__int_eval:w #2 - #1 \exp_after:wN ;
\tex_romannumeral:D \@@_skip_c_zero:w #1 ;
}
-\cs_new:Npn \@@_substr_normalize_range:nn #1#2
- {
- \int_eval:n
- {
- \if_int_compare:w #1 < \c_zero
- \if_int_compare:w #1 < - #2 \exp_stop_f:
- \c_zero
- \else:
- #1 + #2 + \c_one
- \fi:
- \else:
- \if_int_compare:w #1 > #2 \exp_stop_f:
- #2
- \else:
- #1
- \fi:
- \fi:
- }
- }
% \end{macrocode}
% \end{macro}
% \end{macro}
% \end{macro}
-% \end{macro}
%
% \subsection{String conditionals}
%
-% \begin{macro}[EXP,pTF]{\str_if_eq:NN}
-% \begin{macro}[EXP,pTF]{\str_if_eq:nn,\str_if_eq_x:nn}
+% \begin{macro}[EXP, pTF]{\str_if_eq:NN}
+% \begin{macro}[EXP, pTF]{\str_if_eq:nn, \str_if_eq_x:nn}
% Note that \cs{str_if_eq:NN} is different from
% \cs{tl_if_eq:NN} because it needs to ignore category codes.
% \begin{macrocode}
@@ -1305,109 +1013,10 @@
% \end{macro}
% \end{macro}
%
-% \begin{macro}{\str_case:nnn, \str_case:onn, \str_case_x:nnn}
+% \begin{macro}[EXP]{\str_case:nnn, \str_case:onn, \str_case_x:nnn}
% Defined in \pkg{l3basics} at present.
% \end{macro}
%
-% \begin{macro}[EXP]{\@@_if_contains_char:NNT, \@@_if_contains_char:NNTF}
-% \begin{macro}[EXP]{\@@_if_contains_char:nNTF}
-% \begin{macro}[EXP,aux]{\@@_if_contains_char_aux:NN}
-% \begin{macro}[EXP,aux]{\@@_if_contains_char_true:}
-% \begin{syntax}
-% \cs{@@_if_contains_char:nNTF} \Arg{token list} \meta{char}
-% \end{syntax}
-% Expects the \meta{token list} to be an \meta{other string}: the
-% caller is responsible for ensuring that no (too-)special catcodes
-% remain. Spaces with catcode $10$ are ignored.
-% Loop over the characters of the string, comparing character codes.
-% The loop is broken if character codes match. Otherwise we return
-% \enquote{false}.
-% \begin{macrocode}
-\prg_new_conditional:Npnn \@@_if_contains_char:NN #1#2 { T , TF }
- {
- \exp_after:wN \@@_if_contains_char_aux:NN \exp_after:wN #2
- #1 { \__prg_break:n { ? \fi: } }
- \__prg_break_point:
- \prg_return_false:
- }
-\prg_new_conditional:Npnn \@@_if_contains_char:nN #1#2 { TF }
- {
- \@@_if_contains_char_aux:NN #2 #1 { \__prg_break:n { ? \fi: } }
- \__prg_break_point:
- \prg_return_false:
- }
-\cs_new:Npn \@@_if_contains_char_aux:NN #1#2
- {
- \if_charcode:w #1 #2
- \exp_after:wN \@@_if_contains_char_true:
- \fi:
- \@@_if_contains_char_aux:NN #1
- }
-\cs_new_nopar:Npn \@@_if_contains_char_true:
- { \__prg_break:n { \prg_return_true: \use_none:n } }
-% \end{macrocode}
-% \end{macro}
-% \end{macro}
-% \end{macro}
-% \end{macro}
-%
-% \begin{macro}[aux, rEXP]{\@@_octal_use:NTF}
-% \begin{syntax}
-% \cs{@@_octal_use:NTF} \meta{token} \Arg{true code} \Arg{false code}
-% \end{syntax}
-% If the \meta{token} is an octal digit, it is left in the input
-% stream, \emph{followed} by the \meta{true code}. Otherwise, the
-% \meta{false code} is left in the input stream.
-% \begin{texnote}
-% This function will fail if the escape character is an octal
-% digit. We are thus careful to set the escape character to a known
-% value before using it.
-% \end{texnote}
-% \TeX{} dutifully detects octal digits for us: if |#1| is an octal
-% digit, then the right-hand side of the comparison is |'1#1|, greater
-% than $1$. Otherwise, the right-hand side stops as |'1|, and the
-% conditional takes the \texttt{false} branch.
-% \begin{macrocode}
-\prg_new_conditional:Npnn \@@_octal_use:N #1 { TF }
- {
- \if_int_compare:w \c_one < '1 \token_to_str:N #1 \exp_stop_f:
- #1 \prg_return_true:
- \else:
- \prg_return_false:
- \fi:
- }
-% \end{macrocode}
-% \end{macro}
-%
-% \begin{macro}[aux, rEXP]{\@@_hexadecimal_use:NTF}
-% \TeX{} detects uppercase hexadecimal digits for us (see
-% \cs{@@_octal_use:NTF}), but not the lowercase letters, which we
-% need to detect and replace by their uppercase counterpart.
-% \begin{macrocode}
-\prg_new_conditional:Npnn \@@_hexadecimal_use:N #1 { TF }
- {
- \if_int_compare:w \c_two < "1 \token_to_str:N #1 \exp_stop_f:
- #1 \prg_return_true:
- \else:
- \if_case:w \__int_eval:w
- \exp_after:wN ` \token_to_str:N #1 - `a
- \__int_eval_end:
- A
- \or: B
- \or: C
- \or: D
- \or: E
- \or: F
- \else:
- \prg_return_false:
- \exp_after:wN \use_none:n
- \fi:
- \prg_return_true:
- \fi:
- }
-% \end{macrocode}
-% \end{macro}
-%
% \subsection{Viewing strings}
%
% \begin{macro}{\str_show:n, \str_show:N, \str_show:c}
@@ -1419,3084 +1028,41 @@
% \end{macrocode}
% \end{macro}
%
-% \subsection{Conversions}
-%
-% \subsubsection{Producing one byte or character}
-%
-% \begin{variable}{\c_@@_byte_0_tl, \c_@@_byte_1_tl, \c_@@_byte_255_tl}
-% \begin{variable}{\c_@@_byte_-1_tl}
-% For each integer $N$ in the range $[0,255]$, we create a constant
-% token list which holds three character tokens with category code
-% other: the character with character code $N$, followed by the
-% representation of $N$ as two hexadecimal digits. The value $-1$ is
-% given a default token list which ensures that later functions give
-% an empty result for the input $-1$.
-% \begin{macrocode}
-\group_begin:
- \char_set_catcode_other:n { \c_zero }
- \tl_set:Nx \l_@@_internal_tl { \tl_to_str:n { 0123456789ABCDEF } }
- \exp_args:No \tl_map_inline:nn { \l_@@_internal_tl " }
- { \char_set_lccode:nn {`#1} { \c_zero } }
- \tl_map_inline:Nn \l_@@_internal_tl
- {
- \tl_map_inline:Nn \l_@@_internal_tl
- {
- \char_set_lccode:nn { \c_zero } {"#1##1}
- \tl_to_lowercase:n
- {
- \tl_const:cx
- { c_@@_byte_ \int_eval:n {"#1##1} _tl }
- { ^^@ #1 ##1 }
- }
- }
- }
-\group_end:
-\tl_const:cn { c_@@_byte_-1_tl } { { } \use_none:n { } }
-% \end{macrocode}
-% \end{variable}
-% \end{variable}
-%
-% \begin{macro}[int, EXP]{\@@_output_byte:n}
-% \begin{macro}[int, EXP]{\@@_output_byte:w}
-% \begin{macro}[int, EXP]{\@@_output_hexadecimal:n}
-% \begin{macro}[int, EXP]{\@@_output_hexadecimal:w}
-% \begin{macro}[int, EXP]{\@@_output_end:}
-% Those functions must be used carefully: feeding them a value outside
-% the range $[-1,255]$ will attempt to use the undefined token list
-% variable \cs{c_@@_byte_\meta{number}_tl}. Assuming that the
-% argument is in the right range, we expand the corresponding token
-% list, and pick either the byte (first token) or the hexadecimal
-% representations (second and third tokens). The value $-1$ produces
-% an empty result in both cases.
-% \begin{macrocode}
-\cs_new:Npn \@@_output_byte:n #1
- { \@@_output_byte:w #1 \@@_output_end: }
-\cs_new_nopar:Npn \@@_output_byte:w
- {
- \exp_after:wN \exp_after:wN
- \exp_after:wN \use_i:nnn
- \cs:w c_@@_byte_ \int_use:N \__int_eval:w
- }
-\cs_new:Npn \@@_output_hexadecimal:n #1
- { \@@_output_hexadecimal:w #1 \@@_output_end: }
-\cs_new_nopar:Npn \@@_output_hexadecimal:w
- {
- \exp_after:wN \exp_after:wN
- \exp_after:wN \use_none:n
- \cs:w c_@@_byte_ \int_use:N \__int_eval:w
- }
-\cs_new_nopar:Npn \@@_output_end:
- { \__int_eval_end: _tl \cs_end: }
-% \end{macrocode}
-% \end{macro}
-% \end{macro}
-% \end{macro}
-% \end{macro}
-% \end{macro}
-%
-% \begin{macro}[int, rEXP]{\@@_output_byte_pair_be:n}
-% \begin{macro}[int, rEXP]{\@@_output_byte_pair_le:n}
-% \begin{macro}[aux, rEXP]{\@@_output_byte_pair:nnN}
-% Convert a number in the range $[0,65535]$ to a pair of bytes, either
-% big-endian or little-endian.
-% \begin{macrocode}
-\cs_new:Npn \@@_output_byte_pair_be:n #1
- {
- \exp_args:Nf \@@_output_byte_pair:nnN
- { \int_div_truncate:nn { #1 } { "100 } } {#1} \use:nn
- }
-\cs_new:Npn \@@_output_byte_pair_le:n #1
- {
- \exp_args:Nf \@@_output_byte_pair:nnN
- { \int_div_truncate:nn { #1 } { "100 } } {#1} \use_ii_i:nn
- }
-\cs_new:Npn \@@_output_byte_pair:nnN #1#2#3
- {
- #3
- { \@@_output_byte:n { #1 } }
- { \@@_output_byte:n { #2 - #1 * "100 } }
- }
-% \end{macrocode}
-% \end{macro}
-% \end{macro}
-% \end{macro}
-%
-% \subsubsection{Mapping functions for conversions}
-%
-% \begin{macro}{\@@_convert_gmap:N}
-% \begin{macro}[aux, rEXP]{\@@_convert_gmap_loop:NN}
-% This maps the function |#1| over all characters in
-% \cs{g_@@_result_tl}, which should be a byte string in most cases,
-% sometimes a native string.
-% \begin{macrocode}
-\cs_new_protected:Npn \@@_convert_gmap:N #1
- {
- \tl_gset:Nx \g_@@_result_tl
- {
- \exp_after:wN \@@_convert_gmap_loop:NN
- \exp_after:wN #1
- \g_@@_result_tl { ? \__prg_break: }
- \__prg_break_point:
- }
- }
-\cs_new:Npn \@@_convert_gmap_loop:NN #1#2
- {
- \use_none:n #2
- #1#2
- \@@_convert_gmap_loop:NN #1
- }
-% \end{macrocode}
-% \end{macro}
-% \end{macro}
-%
-% \begin{macro}{\@@_convert_gmap_internal:N}
-% \begin{macro}[aux, rEXP]{\@@_convert_gmap_internal_loop:Nw}
-% This maps the function |#1| over all character codes in
-% \cs{g_@@_result_tl}, which must be in the internal representation.
-% \begin{macrocode}
-\cs_new_protected:Npn \@@_convert_gmap_internal:N #1
- {
- \tl_gset:Nx \g_@@_result_tl
- {
- \exp_after:wN \@@_convert_gmap_internal_loop:Nww
- \exp_after:wN #1
- \g_@@_result_tl \s__tl \q_stop \__prg_break: \s__tl
- \__prg_break_point:
- }
- }
-\cs_new:Npn \@@_convert_gmap_internal_loop:Nww #1 #2 \s__tl #3 \s__tl
- {
- \use_none_delimit_by_q_stop:w #3 \q_stop
- #1 {#3}
- \@@_convert_gmap_internal_loop:Nww #1
- }
-% \end{macrocode}
-% \end{macro}
-% \end{macro}
-%
-% \subsubsection{Error-reporting during conversion}
-%
-% \begin{macro}[int]{\@@_if_flag_error:nnx}
-% \begin{macro}[aux]{\@@_if_flag_no_error:nnx}
-% When converting using the function \cs{str_set_convert:Nnnn}, errors
-% should be reported to the user after each step in the
-% conversion. Errors are signalled by raising some flag (typically
-% \texttt{@@_error}), so here we test that flag: if it is raised,
-% give the user an error, otherwise remove the arguments. On the other
-% hand, in the conditional functions \cs{str_set_convert:NnnnTF},
-% errors should be suppressed. This is done by changing
-% \cs{@@_if_flag_error:nnx} into \cs{@@_if_flag_no_error:nnx}
-% locally.
-% \begin{macrocode}
-\cs_new_protected:Npn \@@_if_flag_error:nnx #1
- {
- \flag_if_raised:nTF {#1}
- { \__msg_kernel_error:nnx { str } }
- { \use_none:nn }
- }
-\cs_new_protected:Npn \@@_if_flag_no_error:nnx #1#2#3
- { \flag_if_raised:nT {#1} { \bool_gset_true:N \g_@@_error_bool } }
-% \end{macrocode}
-% \end{macro}
-% \end{macro}
-%
-% \begin{macro}[int]{\@@_if_flag_times:nT}
-% At the end of each conversion step, we raise all relevant errors as
-% one error message, built on the fly. The height of each flag
-% indicates how many times a given error was encountered. This
-% function prints |#2| followed by the number of occurrences of an
-% error if it occurred, nothing otherwise.
-% \begin{macrocode}
-\cs_new_protected:Npn \@@_if_flag_times:nT #1#2
- { \flag_if_raised:nT {#1} { #2~(x \flag_height:n {#1} ) } }
-% \end{macrocode}
-% \end{macro}
-%
-% \subsubsection{Framework for conversions}
-%
-% Most functions in this module expect to be working with
-% \enquote{native} strings. Strings can also be stored as bytes, in one
-% of many encodings, for instance \textsc{utf8}. The bytes themselves
-% can be expressed in various ways in terms of \TeX{} tokens, for
-% instance as pairs of hexadecimal digits. The questions of going from
-% arbitrary Unicode code points to bytes, and from bytes to tokens are
-% mostly independent.
-%
-% Conversions are done in four steps:
-% \begin{itemize}
-% \item \enquote{unescape} produces a string of bytes;
-% \item \enquote{decode} takes in a string of bytes, and converts it
-% to a list of Unicode characters in an internal representation,
-% with items of the form
-% \begin{quote}
-% \meta{bytes} \cs{s__tl} \meta{Unicode code point} \cs{s__tl}
-% \end{quote}
-% where we have collected the \meta{bytes} which combined to form
-% this particular Unicode character, and the \meta{Unicode code
-% point} is in the range $[0,\hexnum{10FFFF}]$.
-% \item \enquote{encode} encodes the internal list of code points as a
-% byte string in the new encoding;
-% \item \enquote{escape} escapes bytes as requested.
-% \end{itemize}
-% The process is modified in case one of the encoding is empty (or the
-% conversion function has been set equal to the empty encoding because
-% it was not found): then the unescape or escape step is ignored, and
-% the decode or encode steps work on tokens instead of bytes. Otherwise,
-% each step must ensure that it passes a correct byte string or internal
-% string to the next step.
-%
-% \begin{macro}{\str_set_convert:Nnnn, \str_gset_convert:Nnnn}
-% \begin{macro}[TF]{\str_set_convert:Nnnn, \str_gset_convert:Nnnn}
-% \begin{macro}[aux]{\@@_convert:nNNnnn}
-% The input string is stored in \cs{g_@@_result_tl}, then we:
-% unescape and decode; encode and escape; exit the group and store the
-% result in the user's variable. The various conversion functions all
-% act on \cs{g_@@_result_tl}. Errors are silenced for the conditional
-% functions by redefining \cs{@@_if_flag_error:nnx} locally.
-% \begin{macrocode}
-\cs_new_protected_nopar:Npn \str_set_convert:Nnnn
- { \@@_convert:nNNnnn { } \tl_set_eq:NN }
-\cs_new_protected_nopar:Npn \str_gset_convert:Nnnn
- { \@@_convert:nNNnnn { } \tl_gset_eq:NN }
-\prg_new_protected_conditional:Npnn
- \str_set_convert:Nnnn #1#2#3#4 { T , F , TF }
- {
- \bool_gset_false:N \g_@@_error_bool
- \@@_convert:nNNnnn
- { \cs_set_eq:NN \@@_if_flag_error:nnx \@@_if_flag_no_error:nnx }
- \tl_set_eq:NN #1 {#2} {#3} {#4}
- \bool_if:NTF \g_@@_error_bool \prg_return_false: \prg_return_true:
- }
-\prg_new_protected_conditional:Npnn
- \str_gset_convert:Nnnn #1#2#3#4 { T , F , TF }
- {
- \bool_gset_false:N \g_@@_error_bool
- \@@_convert:nNNnnn
- { \cs_set_eq:NN \@@_if_flag_error:nnx \@@_if_flag_no_error:nnx }
- \tl_gset_eq:NN #1 {#2} {#3} {#4}
- \bool_if:NTF \g_@@_error_bool \prg_return_false: \prg_return_true:
- }
-\cs_new_protected:Npn \@@_convert:nNNnnn #1#2#3#4#5#6
- {
- \group_begin:
- #1
- \@@_gset_other:Nn \g_@@_result_tl {#4}
- \exp_after:wN \@@_convert:wwwnn
- \tl_to_str:n {#5} /// \q_stop
- { decode } { unescape }
- \prg_do_nothing:
- \@@_convert_decode_:
- \exp_after:wN \@@_convert:wwwnn
- \tl_to_str:n {#6} /// \q_stop
- { encode } { escape }
- \use_ii_i:nn
- \@@_convert_encode_:
- \group_end:
- #2 #3 \g_@@_result_tl
- }
-% \end{macrocode}
-% \end{macro}
-% \end{macro}
-% \end{macro}
-%
-% \begin{macro}[aux]{\@@_convert:wwwnn}
-% \begin{macro}[aux]{\@@_convert:NNnNN}
-% The task of \cs{@@_convert:wwwnn} is to split
-% \meta{encoding}/\meta{escaping} pairs into their components, |#1|
-% and |#2|. Calls to \cs{@@_convert:nnn} ensure that the
-% corresponding conversion functions are defined. The third auxiliary
-% does the main work.
-% \begin{itemize}
-% \item |#1| is the encoding conversion function;
-% \item |#2| is the escaping function;
-% \item |#3| is the escaping name for use in an error message;
-% \item |#4| is \cs{prg_do_nothing:} for unescaping/decoding, and
-% \cs{use_ii_i:nn} for encoding/escaping;
-% \item |#5| is the default encoding function (either
-% \enquote{decode} or \enquote{encode}), for which there should be
-% no escaping.
-% \end{itemize}
-% Let us ignore the native encoding for a second. In the
-% unescaping/decoding phase, we want to do |#2#1| in this order, and
-% in the encoding/escaping phase, the order should be reversed:
-% |#4#2#1| does exactly that. If one of the encodings is the default
-% (native), then the escaping should be ignored, with an error if any
-% was given, and only the encoding, |#1|, should be performed.
-% \begin{macrocode}
-\cs_new_protected:Npn \@@_convert:wwwnn
- #1 / #2 // #3 \q_stop #4#5
- {
- \@@_convert:nnn {enc} {#4} {#1}
- \@@_convert:nnn {esc} {#5} {#2}
- \exp_args:Ncc \@@_convert:NNnNN
- { @@_convert_#4_#1: } { @@_convert_#5_#2: } {#2}
- }
-\cs_new_protected:Npn \@@_convert:NNnNN #1#2#3#4#5
- {
- \if_meaning:w #1 #5
- \tl_if_empty:nF {#3}
- { \__msg_kernel_error:nnx { str } { native-escaping } {#3} }
- #1
- \else:
- #4 #2 #1
- \fi:
- }
-% \end{macrocode}
-% \end{macro}
-% \end{macro}
-%
-% \begin{macro}[aux]{\@@_convert:nnn}
-% \begin{macro}[aux]{\@@_convert:nnnn}
-% The arguments of \cs{@@_convert:nnn} are: \texttt{enc} or
-% \texttt{esc}, used to build filenames, the type of the conversion
-% (unescape, decode, encode, escape), and the encoding or escaping
-% name. If the function is already defined, no need to do anything.
-% Otherwise, filter out all non-alphanumerics in the name, and
-% lowercase it. Feed that, and the same three arguments, to
-% \cs{@@_convert:nnnn}. The task is then to make sure that the
-% conversion function |#3_#1| corresponding to the type |#3| and
-% filtered name |#1| is defined, then set our initial conversion
-% function |#3_#4| equal to that.
-%
-% How do we get the |#3_#1| conversion to be defined if it isn't?
-% Two main cases.
-%
-% First, if |#1| is a key in \cs{g_@@_alias_prop}, then the value
-% \cs{l_@@_internal_tl} tells us what file to load. Loading is
-% skipped if the file was already read, \emph{i.e.}, if the conversion
-% command based on \cs{l_@@_internal_tl} already exists. Otherwise,
-% try to load the file; if that fails, there is an error, use the
-% default empty name instead.
-%
-% Second, |#1| may be absent from the property list. The
-% \cs{cs_if_exist:cF} test is automatically false, and we search for a
-% file defining the encoding or escaping |#1| (this should allow
-% third-party \texttt{.def} files). If the file is not found, there is
-% an error, use the default empty name instead.
-%
-% In all cases, the conversion based on \cs{l_@@_internal_tl} is
-% defined, so we can set the |#3_#1| function equal to that. In some
-% cases (\emph{e.g.}, \texttt{utf16be}), the |#3_#1| function is
-% actually defined within the file we just loaded, and it is different
-% from the \cs{l_@@_internal_tl}-based function: we mustn't clobber
-% that different definition.
-% \begin{macrocode}
-\cs_new_protected:Npn \@@_convert:nnn #1#2#3
- {
- \cs_if_exist:cF { @@_convert_#2_#3: }
- {
- \exp_args:Nx \@@_convert:nnnn
- { \@@_convert_lowercase_alphanum:n {#3} }
- {#1} {#2} {#3}
- }
- }
-\cs_new_protected:Npn \@@_convert:nnnn #1#2#3#4
- {
- \cs_if_exist:cF { @@_convert_#3_#1: }
- {
- \prop_get:NnNF \g_@@_alias_prop {#1} \l_@@_internal_tl
- { \tl_set:Nn \l_@@_internal_tl {#1} }
- \cs_if_exist:cF { @@_convert_#3_ \l_@@_internal_tl : }
- {
- \file_if_exist:nTF { l3str-#2- \l_@@_internal_tl .def }
- {
- \group_begin:
- \@@_load_catcodes:
- \file_input:n { l3str-#2- \l_@@_internal_tl .def }
- \group_end:
- }
- {
- \tl_clear:N \l_@@_internal_tl
- \__msg_kernel_error:nnxx { str } { unknown-#2 } {#4} {#1}
- }
- }
- \cs_if_exist:cF { @@_convert_#3_#1: }
- {
- \cs_gset_eq:cc { @@_convert_#3_#1: }
- { @@_convert_#3_ \l_@@_internal_tl : }
- }
- }
- \cs_gset_eq:cc { @@_convert_#3_#4: } { @@_convert_#3_#1: }
- }
-% \end{macrocode}
-% \end{macro}
-% \end{macro}
-%
-% \begin{macro}[int, rEXP]{\@@_convert_lowercase_alphanum:n}
-% \begin{macro}[aux, rEXP]{\@@_convert_lowercase_alphanum_loop:N}
-% This function keeps only letters and digits, with upper case letters
-% converted to lower case.
-% \begin{macrocode}
-\cs_new:Npn \@@_convert_lowercase_alphanum:n #1
- {
- \exp_after:wN \@@_convert_lowercase_alphanum_loop:N
- \tl_to_str:n {#1} { ? \__prg_break: }
- \__prg_break_point:
- }
-\cs_new:Npn \@@_convert_lowercase_alphanum_loop:N #1
- {
- \use_none:n #1
- \if_int_compare:w `#1 < \c_ninety_one
- \if_int_compare:w `#1 < \c_sixty_five
- \if_int_compare:w \c_one < 1#1 \exp_stop_f:
- #1
- \fi:
- \else:
- \@@_output_byte:n { `#1 + \c_thirty_two }
- \fi:
- \else:
- \if_int_compare:w `#1 < \c_one_hundred_twenty_three
- \if_int_compare:w `#1 < \c_ninety_seven
- \else:
- #1
- \fi:
- \fi:
- \fi:
- \@@_convert_lowercase_alphanum_loop:N
- }
-% \end{macrocode}
-% \end{macro}
-% \end{macro}
-%
-% \begin{macro}[int]{\@@_load_catcodes:}
-% Since encoding files may be loaded at arbitrary places in a \TeX{}
-% document, including within verbatim mode, we set the catcodes of all
-% characters appearing in any encoding definition file.
-% \begin{macrocode}
-\cs_new_protected:Npn \@@_load_catcodes:
- {
- \char_set_catcode_escape:N \\
- \char_set_catcode_group_begin:N \{
- \char_set_catcode_group_end:N \}
- \char_set_catcode_math_toggle:N \$
- \char_set_catcode_alignment:N \&
- \char_set_catcode_parameter:N \#
- \char_set_catcode_math_superscript:N \^
- \char_set_catcode_ignore:N \ %
- \char_set_catcode_space:N \~
- \tl_map_function:nN { abcdefghijklmnopqrstuvwxyz_:ABCDEFILNPSTUX }
- \char_set_catcode_letter:N
- \tl_map_function:nN { 0123456789"'?*+-.(),`!/<>[];= }
- \char_set_catcode_other:N
- \char_set_catcode_comment:N \%
- \int_set:Nn \tex_endlinechar:D {32}
- }
-% \end{macrocode}
-% \end{macro}
-%
-% \subsubsection{Byte unescape and escape}
-%
-% Strings of bytes may need to be stored in auxiliary files in safe
-% \enquote{escaping} formats. Each such escaping is only loaded as
-% needed. By default, on input any non-byte is filtered out, while the
-% output simply consists in letting bytes through.
-%
-% \begin{macro}[int, rEXP]{\@@_filter_bytes:n}
-% \begin{macro}[aux, rEXP]{\@@_filter_bytes_aux:N}
-% In the case of pdf\TeX{}, every character is a byte. For
-% Unicode-aware engines, test the character code; non-bytes cause us
-% to raise the flag \texttt{str_byte}. Spaces have already been given
-% the correct category code when this function is called.
-% \begin{macrocode}
-\pdftex_if_engine:TF
- { \cs_new_eq:NN \@@_filter_bytes:n \use:n }
- {
- \cs_new:Npn \@@_filter_bytes:n #1
- {
- \@@_filter_bytes_aux:N #1
- { ? \__prg_break: }
- \__prg_break_point:
- }
- \cs_new:Npn \@@_filter_bytes_aux:N #1
- {
- \use_none:n #1
- \if_int_compare:w `#1 < 256 \exp_stop_f:
- #1
- \else:
- \flag_raise:n { str_byte }
- \fi:
- \@@_filter_bytes_aux:N
- }
- }
-% \end{macrocode}
-% \end{macro}
-% \end{macro}
-%
-% \begin{macro}[int]{\@@_convert_unescape_:}
-% \begin{macro}[int]{\@@_convert_unescape_bytes:}
-% The simplest unescaping method removes non-bytes from
-% \cs{g_@@_result_tl}.
-% \begin{macrocode}
-\pdftex_if_engine:TF
- { \cs_new_protected_nopar:Npn \@@_convert_unescape_: { } }
- {
- \cs_new_protected_nopar:Npn \@@_convert_unescape_:
- {
- \flag_clear:n { str_byte }
- \tl_gset:Nx \g_@@_result_tl
- { \exp_args:No \@@_filter_bytes:n \g_@@_result_tl }
- \@@_if_flag_error:nnx { str_byte } { non-byte } { bytes }
- }
- }
-\cs_new_eq:NN \@@_convert_unescape_bytes: \@@_convert_unescape_:
-% \end{macrocode}
-% \end{macro}
-% \end{macro}
-%
-% \begin{macro}[int]{\@@_convert_escape_:}
-% \begin{macro}[int]{\@@_convert_escape_bytes:}
-% The simplest form of escape leaves the bytes from the previous step
-% of the conversion unchanged.
-% \begin{macrocode}
-\cs_new_protected_nopar:Npn \@@_convert_escape_: { }
-\cs_new_eq:NN \@@_convert_escape_bytes: \@@_convert_escape_:
-% \end{macrocode}
-% \end{macro}
-% \end{macro}
-%
-% \subsubsection{Native strings}
-%
-% \begin{macro}[int]{\@@_convert_decode_:}
-% \begin{macro}[aux, rEXP]{\@@_decode_native_char:N}
-% Convert each character to its character code, one at a time.
-% \begin{macrocode}
-\cs_new_protected_nopar:Npn \@@_convert_decode_:
- { \@@_convert_gmap:N \@@_decode_native_char:N }
-\cs_new:Npn \@@_decode_native_char:N #1
- { #1 \s__tl \__int_value:w `#1 \s__tl }
-% \end{macrocode}
-% \end{macro}
-% \end{macro}
-%
-% \begin{macro}[int]{\@@_convert_encode_:}
-% The conversion from an internal string to native character tokens is
-% very different in pdf\TeX{} and in other engines. For Unicode-aware
-% engines, we need the definitions to be read when the null byte has
-% category code $12$, so we set that inside a group.
-% \begin{macrocode}
-\group_begin:
- \char_set_catcode_other:n { 0 }
- \pdftex_if_engine:TF
-% \end{macrocode}
-% \begin{macro}[aux, EXP]{\@@_encode_native_char:n}
-% Since pdf\TeX{} only supports 8-bit characters, and we have a table
-% of all bytes, the conversion can be done in linear time within an
-% \texttt{x}-expanding assignment. Look out for character codes larger
-% than $255$, those characters are replaced by |?|, and raise a flag,
-% which then triggers a pdf\TeX{}-specific error.
-% \begin{macrocode}
- {
- \cs_new_protected_nopar:Npn \@@_convert_encode_:
- {
- \flag_clear:n { str_error }
- \@@_convert_gmap_internal:N \@@_encode_native_char:n
- \@@_if_flag_error:nnx { str_error }
- { pdfTeX-native-overflow } { }
- }
- \cs_new:Npn \@@_encode_native_char:n #1
- {
- \if_int_compare:w #1 < \c_two_hundred_fifty_six
- \@@_output_byte:n {#1}
- \else:
- \flag_raise:n { str_error }
- ?
- \fi:
- }
- \__msg_kernel_new:nnnn { str } { pdfTeX-native-overflow }
- { Character~code~too~large~for~pdfTeX. }
- {
- The~pdfTeX~engine~only~supports~8-bit~characters:~
- valid~character~codes~are~in~the~range~[0,255].~
- To~manipulate~arbitrary~Unicode,~use~LuaTeX~or~XeTeX.
- }
- }
-% \end{macrocode}
-% \end{macro}
-% \begin{macro}[aux]{\@@_encode_native_loop:w}
-% \begin{macro}[aux]{\@@_encode_native_flush:}
-% \begin{macro}[aux, rEXP]{\@@_encode_native_filter:N}
-% In Unicode-aware engines, since building particular characters
-% cannot be done expandably in \TeX{}, we cannot hope to get a
-% linear-time function. However, we get quite close using the
-% \pkg{l3tl-build} module, which abuses \tn{toks} to reach an almost
-% linear time. Use the standard lowercase trick to produce an
-% arbitrary character from the null character, and add that character
-% to the end of the token list being built. At the end of the loop,
-% put the token list together with \cs{__tl_build_end:}. Note that we
-% use an \texttt{x}-expanding assignment because it is slightly
-% faster. Unicode-aware engines will never incur an overflow because
-% the internal string is guaranteed to only contain code points in
-% $[0,\hexnum{10FFFF}]$.
-% \begin{macrocode}
- {
- \cs_new_protected_nopar:Npn \@@_convert_encode_:
- {
- \int_zero:N \l__tl_build_offset_int
- \__tl_gbuild_x:Nw \g_@@_result_tl
- \exp_after:wN \@@_encode_native_loop:w
- \g_@@_result_tl \s__tl { \q_stop \__prg_break: } \s__tl
- \__prg_break_point:
- \__tl_build_end:
- }
- \cs_new_protected:Npn \@@_encode_native_loop:w #1 \s__tl #2 \s__tl
- {
- \use_none_delimit_by_q_stop:w #2 \q_stop
- \tex_lccode:D \l_@@_internal_int \__int_eval:w #2 \__int_eval_end:
- \tl_to_lowercase:n { \__tl_build_one:n { ^^@ } }
- \@@_encode_native_loop:w
- }
- }
-% \end{macrocode}
-% \end{macro}
-% \end{macro}
-% \end{macro}
-% End the group to restore the catcode of the null byte.
-% \begin{macrocode}
-\group_end:
-% \end{macrocode}
-% \end{macro}
-%
-% \subsubsection{8-bit encodings}
-%
-% This section will be entirely rewritten: it is not yet clear in what
-% situations 8-bit encodings are used, hence I don't know what exactly
-% should be optimized. The current approach is reasonably efficient to
-% convert long strings, and it scales well when using many different
-% encodings. An approach based on csnames would have a smaller constant
-% load time for each individual conversion, but has a large hash table
-% cost. Using a range of \tn{count} registers works for decoding, but
-% not for encoding: one possibility there would be to use a binary tree
-% for the mapping of Unicode characters to bytes, stored as a box, one
-% per encoding.
-%
-% Since the section is going to be rewritten, documentation lacks.
-%
-% All the 8-bit encodings which \pkg{l3str} supports rely on the same
-% internal functions.
-%
-% \begin{macro}[int]{\@@_declare_eight_bit_encoding:nnn}
-% \begin{syntax}
-% \cs{@@_declare_eight_bit_encoding:nnn} \Arg{name} \Arg{mapping} \Arg{missing}
-% \end{syntax}
-% This declares the encoding \meta{name} to map bytes to Unicode
-% characters according to the \meta{mapping}, and map those bytes
-% which are not mentionned in the \meta{mapping} either to the
-% replacement character (if they appear in \meta{missing}), or to
-% themselves.
-%
-% All the 8-bit encoding definition file start with
-% \cs{@@_declare_eight_bit_encoding:nnn} \Arg{encoding name}
-% \Arg{mapping} \Arg{missing bytes}. The \meta{mapping} argument is a
-% token list of pairs \Arg{byte} \Arg{Unicode} expressed in uppercase
-% hexadecimal notation. The \meta{missing} argument is a token list
-% of \Arg{byte}. Every \meta{byte} which does not appear in the
-% \meta{mapping} nor the \meta{missing} lists maps to the same code
-% point in Unicode.
-% \begin{macrocode}
-\cs_new_protected:Npn \@@_declare_eight_bit_encoding:nnn #1#2#3
- {
- \tl_set:Nn \l_@@_internal_tl {#1}
- \cs_new_protected_nopar:cpn { @@_convert_decode_#1: }
- { \@@_convert_decode_eight_bit:n {#1} }
- \cs_new_protected_nopar:cpn { @@_convert_encode_#1: }
- { \@@_convert_encode_eight_bit:n {#1} }
- \tl_const:cn { c_@@_encoding_#1_tl } {#2}
- \tl_const:cn { c_@@_encoding_#1_missing_tl } {#3}
- }
-% \end{macrocode}
-% \end{macro}
-%
-% \begin{macro}[int]{\@@_convert_decode_eight_bit:n}
-% \begin{macro}[aux]{\@@_decode_eight_bit_load:nn}
-% \begin{macro}[aux]{\@@_decode_eight_bit_load_missing:n}
-% \begin{macro}[aux, EXP]{\@@_decode_eight_bit_char:N}
-%^^A todo: document
-% \begin{macrocode}
-\cs_new_protected:Npn \@@_convert_decode_eight_bit:n #1
- {
- \group_begin:
- \int_zero:N \l_@@_internal_int
- \exp_last_unbraced:Nx \@@_decode_eight_bit_load:nn
- { \tl_use:c { c_@@_encoding_#1_tl } }
- { \q_stop \__prg_break: } { }
- \__prg_break_point:
- \exp_last_unbraced:Nx \@@_decode_eight_bit_load_missing:n
- { \tl_use:c { c_@@_encoding_#1_missing_tl } }
- { \q_stop \__prg_break: }
- \__prg_break_point:
- \flag_clear:n { str_error }
- \@@_convert_gmap:N \@@_decode_eight_bit_char:N
- \@@_if_flag_error:nnx { str_error } { decode-8-bit } {#1}
- \group_end:
- }
-\cs_new_protected:Npn \@@_decode_eight_bit_load:nn #1#2
- {
- \use_none_delimit_by_q_stop:w #1 \q_stop
- \tex_dimen:D "#1 = \l_@@_internal_int sp \scan_stop:
- \tex_skip:D \l_@@_internal_int = "#1 sp \scan_stop:
- \tex_toks:D \l_@@_internal_int \exp_after:wN { \__int_value:w "#2 }
- \tex_advance:D \l_@@_internal_int \c_one
- \@@_decode_eight_bit_load:nn
- }
-\cs_new_protected:Npn \@@_decode_eight_bit_load_missing:n #1
- {
- \use_none_delimit_by_q_stop:w #1 \q_stop
- \tex_dimen:D "#1 = \l_@@_internal_int sp \scan_stop:
- \tex_skip:D \l_@@_internal_int = "#1 sp \scan_stop:
- \tex_toks:D \l_@@_internal_int \exp_after:wN
- { \int_use:N \c_@@_replacement_char_int }
- \tex_advance:D \l_@@_internal_int \c_one
- \@@_decode_eight_bit_load_missing:n
- }
-\cs_new:Npn \@@_decode_eight_bit_char:N #1
- {
- #1 \s__tl
- \if_int_compare:w \tex_dimen:D `#1 < \l_@@_internal_int
- \if_int_compare:w \tex_skip:D \tex_dimen:D `#1 = `#1 \exp_stop_f:
- \tex_the:D \tex_toks:D \tex_dimen:D
- \fi:
- \fi:
- \__int_value:w `#1 \s__tl
- }
-% \end{macrocode}
-% \end{macro}
-% \end{macro}
-% \end{macro}
-% \end{macro}
-%
-% \begin{macro}[int]{\@@_convert_encode_eight_bit:n}
-% \begin{macro}[aux]{\@@_encode_eight_bit_load:nn}
-% \begin{macro}[aux, rEXP]{\@@_encode_eight_bit_char:n}
-% \begin{macro}[aux, rEXP]{\@@_encode_eight_bit_char_aux:n}
-%^^A todo: document
-% \begin{macrocode}
-\cs_new_protected:Npn \@@_convert_encode_eight_bit:n #1
- {
- \group_begin:
- \int_zero:N \l_@@_internal_int
- \exp_last_unbraced:Nx \@@_encode_eight_bit_load:nn
- { \tl_use:c { c_@@_encoding_#1_tl } }
- { \q_stop \__prg_break: } { }
- \__prg_break_point:
- \flag_clear:n { str_error }
- \@@_convert_gmap_internal:N \@@_encode_eight_bit_char:n
- \@@_if_flag_error:nnx { str_error } { encode-8-bit } {#1}
- \group_end:
- }
-\cs_new_protected:Npn \@@_encode_eight_bit_load:nn #1#2
- {
- \use_none_delimit_by_q_stop:w #1 \q_stop
- \tex_dimen:D "#2 = \l_@@_internal_int sp \scan_stop:
- \tex_skip:D \l_@@_internal_int = "#2 sp \scan_stop:
- \exp_args:NNf \tex_toks:D \l_@@_internal_int
- { \@@_output_byte:n { "#1 } }
- \tex_advance:D \l_@@_internal_int \c_one
- \@@_encode_eight_bit_load:nn
- }
-\cs_new:Npn \@@_encode_eight_bit_char:n #1
- {
- \if_int_compare:w #1 > \c_max_register_int
- \flag_raise:n { str_error }
- \else:
- \if_int_compare:w \tex_dimen:D #1 < \l_@@_internal_int
- \if_int_compare:w \tex_skip:D \tex_dimen:D #1 = #1 \exp_stop_f:
- \tex_the:D \tex_toks:D \tex_dimen:D #1 \exp_stop_f:
- \exp_after:wN \exp_after:wN \exp_after:wN \use_none:nn
- \fi:
- \fi:
- \@@_encode_eight_bit_char_aux:n {#1}
- \fi:
- }
-\cs_new:Npn \@@_encode_eight_bit_char_aux:n #1
- {
- \if_int_compare:w #1 < \c_two_hundred_fifty_six
- \@@_output_byte:n {#1}
- \else:
- \flag_raise:n { str_error }
- \fi:
- }
-% \end{macrocode}
-% \end{macro}
-% \end{macro}
-% \end{macro}
-% \end{macro}
-%
-% \subsection{Messages}
-%
-% General messages, and messages for the encodings and escapings loaded
-% by default (\enquote{native}, and \enquote{bytes}).
-% \begin{macrocode}
-\__msg_kernel_new:nnn { str } { unknown-esc }
- { Escaping~scheme~'#1'~(filtered:~'#2')~unknown. }
-\__msg_kernel_new:nnn { str } { unknown-enc }
- { Encoding~scheme~'#1'~(filtered:~'#2')~unknown. }
-\__msg_kernel_new:nnnn { str } { native-escaping }
- { The~'native'~encoding~scheme~does~not~support~any~escaping. }
- {
- Since~native~strings~do~not~consist~in~bytes,~
- none~of~the~escaping~methods~make~sense.~
- The~specified~escaping,~'#1',~will be ignored.
- }
-\__msg_kernel_new:nnn { str } { file-not-found }
- { File~'l3str-#1.def'~not~found. }
-% \end{macrocode}
-%
-% Message used when the \enquote{bytes} unescaping fails because the
-% string given to \cs{str_set_convert:Nnnn} contains a non-byte. This
-% cannot happen for the pdf\TeX{} engine, since that engine only
-% supports 8-bit characters. Messages used for other escapings and
-% encodings are defined in each definition file.
-% \begin{macrocode}
-\pdftex_if_engine:F
- {
- \__msg_kernel_new:nnnn { str } { non-byte }
- { String~invalid~in~escaping~'#1':~it~may~only~contain~bytes. }
- {
- Some~characters~in~the~string~you~asked~to~convert~are~not~
- 8-bit~characters.~Perhaps~the~string~is~a~'native'~Unicode~string?~
- If~it~is,~try~using\\
- \\
- \iow_indent:n
- {
- \iow_char:N\\str_set_convert:Nnnn \\
- \ \ <str~var>~\{~<string>~\}~\{~native~\}~\{~<target~encoding>~\}
- }
- }
- }
-% \end{macrocode}
-%
-% Those messages are used when converting to and from 8-bit encodings.
-% \begin{macrocode}
-\__msg_kernel_new:nnnn { str } { decode-8-bit }
- { Invalid~string~in~encoding~'#1'. }
- {
- LaTeX~came~across~a~byte~which~is~not~defined~to~represent~
- any~character~in~the~encoding~'#1'.
- }
-\__msg_kernel_new:nnnn { str } { encode-8-bit }
- { Unicode~string~cannot~be~converted~to~encoding~'#1'. }
- {
- The~encoding~'#1'~only~contains~a~subset~of~all~Unicode~characters.~
- LaTeX~was~asked~to~convert~a~string~to~that~encoding,~but~that~
- string~contains~a~character~that~'#1'~does~not~support.
- }
-% \end{macrocode}
-%
% \subsection{Deprecated string functions}
-%
-% Deprecated 2012-05-13 for removal by 2012-08-31.
-%
-% \begin{macro}{\str_length:N, \str_length:n, \str_length_ignore_spaces:n}
-% \begin{macrocode}
-\cs_new_eq:NN \str_length:N \str_count:N
-\cs_new_eq:NN \str_length:n \str_count:n
-\cs_new_eq:NN \str_length_ignore_spaces:n \str_count_ignore_spaces:n
-% \end{macrocode}
-% \end{macro}
-%
-% \begin{macrocode}
-%</initex|package>
-% \end{macrocode}
-%
-% \subsection{Escaping definition files}
-%
-% Several of those encodings are defined by the pdf file format. The
-% following byte storage methods are defined:
-% \begin{itemize}
-% \item \texttt{bytes} (default), non-bytes are filtered out, and
-% bytes are left untouched (this is defined by default);
-% \item \texttt{hex} or \texttt{hexadecimal}, as per the pdf\TeX{}
-% primitive \tn{pdfescapehex}
-% \item \texttt{name}, as per the pdf\TeX{} primitive
-% \tn{pdfescapename}
-% \item \texttt{string}, as per the pdf\TeX{} primitive
-% \tn{pdfescapestring}
-% \item \texttt{url}, as per the percent encoding of urls.
-% \end{itemize}
-%
-% \subsubsection{Unescape methods}
-%
-% \begin{macro}[int]{\@@_convert_unescape_hex:}
-% \begin{macro}[aux, rEXP]{\@@_unescape_hex_auxi:N}
-% \begin{macro}[aux, rEXP]{\@@_unescape_hex_auxii:N}
-% Take chars two by two, and interpret each pair as the hexadecimal
-% code for a byte. Anything else than hexadecimal digits is ignored,
-% raising the flag. A string which contains an odd number of
-% hexadecimal digits gets |0| appended to it: this is equivalent to
-% appending a |0| in all cases, and dropping it if it is alone.
-% \begin{macrocode}
-%<*hex>
-\cs_new_protected_nopar:Npn \@@_convert_unescape_hex:
- {
- \group_begin:
- \flag_clear:n { str_error }
- \int_set:Nn \tex_escapechar:D { 92 }
- \tl_gset:Nx \g_@@_result_tl
- {
- \@@_output_byte:w "
- \exp_last_unbraced:Nf \@@_unescape_hex_auxi:N
- { \tl_to_str:N \g_@@_result_tl }
- 0 { ? 0 - \c_one \__prg_break: }
- \__prg_break_point:
- \@@_output_end:
- }
- \@@_if_flag_error:nnx { str_error } { unescape-hex } { }
- \group_end:
- }
-\cs_new:Npn \@@_unescape_hex_auxi:N #1
- {
- \use_none:n #1
- \@@_hexadecimal_use:NTF #1
- { \@@_unescape_hex_auxii:N }
- {
- \flag_raise:n { str_error }
- \@@_unescape_hex_auxi:N
- }
- }
-\cs_new:Npn \@@_unescape_hex_auxii:N #1
- {
- \use_none:n #1
- \@@_hexadecimal_use:NTF #1
- {
- \@@_output_end:
- \@@_output_byte:w " \@@_unescape_hex_auxi:N
- }
- {
- \flag_raise:n { str_error }
- \@@_unescape_hex_auxii:N
- }
- }
-\__msg_kernel_new:nnnn { str } { unescape-hex }
- { String~invalid~in~escaping~'hex':~only~hexadecimal~digits~allowed. }
- {
- Some~characters~in~the~string~you~asked~to~convert~are~not~
- hexadecimal~digits~(0-9,~A-F,~a-f)~nor~spaces.
- }
-%</hex>
-% \end{macrocode}
-% \end{macro}
-% \end{macro}
-% \end{macro}
-%
-% \begin{macro}[int]{\@@_convert_unescape_name:}
-% \begin{macro}[aux, rEXP]{\@@_unescape_name_loop:wNN}
-% \begin{macro}[int]{\@@_convert_unescape_url:}
-% \begin{macro}[aux, rEXP]{\@@_unescape_url_loop:wNN}
-% The \cs{@@_convert_unescape_name:} function replaces each
-% occurrence of |#| followed by two hexadecimal digits in
-% \cs{g_@@_result_tl} by the corresponding byte. The \texttt{url}
-% function is identical, with escape character |%| instead of |#|.
-% Thus we define the two together. The arguments of \cs{@@_tmp:w} are
-% the character code of |#| or |%| in hexadecimal, the name of the
-% main function to define, and the name of the auxiliary which
-% performs the loop.
-%
-% The looping auxiliary |#3| finds the next escape character, reads
-% the following two characters, and tests them. The test
-% \cs{@@_hexadecimal_use:NTF} leaves the upper-case digit in the
-% input stream, hence we surround the test with
-% \cs{@@_output_byte:w}~|"| and \cs{@@_output_end:}. If both
-% characters are hexadecimal digits, they should be removed before
-% looping: this is done by \cs{use_i:nnn}. If one of the characters
-% is not a hexadecimal digit, then feed |"#1| to
-% \cs{@@_output_byte:w} to produce the escape character, raise the
-% flag, and call the looping function followed by the two characters
-% (remove \cs{use_i:nnn}).
-% \begin{macrocode}
-%<*name|url>
-\cs_set_protected:Npn \@@_tmp:w #1#2#3
- {
- \cs_new_protected:cpn { @@_convert_unescape_#2: }
- {
- \group_begin:
- \flag_clear:n { str_byte }
- \flag_clear:n { str_error }
- \int_set:Nn \tex_escapechar:D { 92 }
- \tl_gset:Nx \g_@@_result_tl
- {
- \exp_after:wN #3 \g_@@_result_tl
- #1 ? { ? \__prg_break: }
- \__prg_break_point:
- }
- \@@_if_flag_error:nnx { str_byte } { non-byte } { #2 }
- \@@_if_flag_error:nnx { str_error } { unescape-#2 } { }
- \group_end:
- }
- \cs_new:Npn #3 ##1#1##2##3
- {
- \@@_filter_bytes:n {##1}
- \use_none:n ##3
- \@@_output_byte:w "
- \@@_hexadecimal_use:NTF ##2
- {
- \@@_hexadecimal_use:NTF ##3
- { }
- {
- \flag_raise:n { str_error }
- * \c_zero + `#1 \use_i:nn
- }
- }
- {
- \flag_raise:n { str_error }
- 0 + `#1 \use_i:nn
- }
- \@@_output_end:
- \use_i:nnn #3 ##2##3
- }
- \__msg_kernel_new:nnnn { str } { unescape-#2 }
- { String~invalid~in~escaping~'#2'. }
- {
- LaTeX~came~across~the~escape~character~'#1'~not~followed~by~
- two~hexadecimal~digits.~This~is~invalid~in~the~escaping~'#2'.
- }
- }
-%</name|url>
-%<*name>
-\exp_after:wN \@@_tmp:w \c_hash_str { name }
- \@@_unescape_name_loop:wNN
-%</name>
-%<*url>
-\exp_after:wN \@@_tmp:w \c_percent_str { url }
- \@@_unescape_url_loop:wNN
-%</url>
-% \end{macrocode}
-% \end{macro}
-% \end{macro}
-% \end{macro}
-% \end{macro}
-%
-% \begin{macro}[int]{\@@_convert_unescape_string:}
-% \begin{macro}[aux, rEXP]{\@@_unescape_string_newlines:wN}
-% \begin{macro}[aux, rEXP]{\@@_unescape_string_loop:wNNN}
-% \begin{macro}[aux, rEXP]{\@@_unescape_string_repeat:NNNNNN}
-% The \texttt{string} escaping is somewhat similar to the
-% \texttt{name} and \texttt{url} escapings, with escape character |\|.
-% The first step is to convert all three line endings, |^^J|, |^^M|,
-% and |^^M^^J| to the common |^^J|, as per the \textsc{pdf}
-% specification. This step cannot raise the flag.
-%
-% Then the following escape sequences are decoded.
-% \begin{itemize}\def\makelabel#1{\hss\llap{\ttfamily\string#1}}
-% \item[\n] Line feed ($10$)
-% \item[\r] Carriage return ($13$)
-% \item[\t] Horizontal tab ($9$)
-% \item[\b] Backspace ($8$)
-% \item[\f] Form feed ($12$)
-% \item[\(] Left parenthesis
-% \item[\)] Right parenthesis
-% \item[\\] Backslash
-% \item[\ddd] (backslash followed by $1$ to $3$ octal digits) Byte
-% \texttt{ddd} (octal), subtracting $256$ in case of overflow.
-% \end{itemize}
-% If followed by an end-of-line character, the backslash and the
-% end-of-line are ignored. If followed by anything else, the backslash
-% is ignored, raising the error flag.
-%^^A Be paranoid: \tl_to_lowercase:n is unsafe.
-% \begin{macrocode}
-%<*string>
-\group_begin:
- \char_set_lccode:nn {`\*} {`\\}
- \char_set_catcode_other:N \^^J
- \char_set_catcode_other:N \^^M
- \tl_to_lowercase:n
- {
- \cs_new_protected_nopar:Npn \@@_convert_unescape_string:
- {
- \group_begin:
- \flag_clear:n { str_byte }
- \flag_clear:n { str_error }
- \int_set:Nn \tex_escapechar:D { 92 }
- \tl_gset:Nx \g_@@_result_tl
- {
- \exp_after:wN \@@_unescape_string_newlines:wN
- \g_@@_result_tl \__prg_break: ^^M ?
- \__prg_break_point:
- }
- \tl_gset:Nx \g_@@_result_tl
- {
- \exp_after:wN \@@_unescape_string_loop:wNNN
- \g_@@_result_tl * ?? { ? \__prg_break: }
- \__prg_break_point:
- }
- \@@_if_flag_error:nnx { str_byte } { non-byte } { string }
- \@@_if_flag_error:nnx { str_error } { unescape-string } { }
- \group_end:
- }
- \cs_new:Npn \@@_unescape_string_loop:wNNN #1 *#2#3#4
- }
- {
- \@@_filter_bytes:n {#1}
- \use_none:n #4
- \@@_output_byte:w '
- \@@_octal_use:NTF #2
- {
- \@@_octal_use:NTF #3
- {
- \@@_octal_use:NTF #4
- {
- \if_int_compare:w #2 > \c_three
- - 256
- \fi:
- \@@_unescape_string_repeat:NNNNNN
- }
- { \@@_unescape_string_repeat:NNNNNN ? }
- }
- { \@@_unescape_string_repeat:NNNNNN ?? }
- }
- {
- \str_case_x:nnn {#2}
- {
- { \c_backslash_str } { 134 }
- { ( } { 50 }
- { ) } { 51 }
- { r } { 15 }
- { f } { 14 }
- { n } { 12 }
- { t } { 11 }
- { b } { 10 }
- { ^^J } { 0 - \c_one }
- }
- {
- \flag_raise:n { str_error }
- 0 - \c_one \use_i:nn
- }
- }
- \@@_output_end:
- \use_i:nn \@@_unescape_string_loop:wNNN #2#3#4
- }
- \cs_new:Npn \@@_unescape_string_repeat:NNNNNN #1#2#3#4#5#6
- { \@@_output_end: \@@_unescape_string_loop:wNNN }
- \cs_new:Npn \@@_unescape_string_newlines:wN #1 ^^M #2
- {
- #1
- \if_charcode:w ^^J #2 \else: ^^J \fi:
- \@@_unescape_string_newlines:wN #2
- }
- \__msg_kernel_new:nnnn { str } { unescape-string }
- { String~invalid~in~escaping~'string'. }
- {
- LaTeX~came~across~an~escape~character~'\c_backslash_str'~
- not~followed~by~any~of:~'n',~'r',~'t',~'b',~'f',~'(',~')',~
- '\c_backslash_str',~one~to~three~octal~digits,~or~the~end~
- of~a~line.
- }
-\group_end:
-%</string>
-% \end{macrocode}
-% \end{macro}
-% \end{macro}
-% \end{macro}
-% \end{macro}
-%
-% \subsubsection{Escape methods}
-%
-% Currently, none of the escape methods can lead to errors, assuming
-% that their input is made out of bytes.
-%
-% \begin{macro}[int]{\@@_convert_escape_hex:}
-% \begin{macro}[aux, rEXP]{\@@_escape_hex_char:N}
-% Loop and convert each byte to hexadecimal.
-% \begin{macrocode}
-%<*hex>
-\cs_new_protected_nopar:Npn \@@_convert_escape_hex:
- { \@@_convert_gmap:N \@@_escape_hex_char:N }
-\cs_new:Npn \@@_escape_hex_char:N #1
- { \@@_output_hexadecimal:n { `#1 } }
-%</hex>
-% \end{macrocode}
-% \end{macro}
-% \end{macro}
-%
-% \begin{macro}[int]{\@@_convert_escape_name:}
-% \begin{macro}[aux, rEXP]{\@@_escape_name_char:N}
-% \begin{macro}[aux, rEXP]{\@@_if_escape_name:NTF}
-% \begin{variable}{\c_@@_escape_name_str}
-% \begin{variable}{\c_@@_escape_name_not_str}
-% For each byte, test whether it should be output as is, or be
-% \enquote{hash-encoded}. Roughly, bytes outside the range
-% $[\hexnum{2A},\hexnum{7E}]$ are hash-encoded. We keep two lists of
-% exceptions: characters in \cs{c_@@_escape_name_not_str} are not
-% hash-encoded, and characters in the \cs{c_@@_escape_name_str} are
-% encoded.
-% \begin{macrocode}
-%<*name>
-\str_const:Nn \c_@@_escape_name_not_str { ! " $ & ' } %$
-\str_const:Nn \c_@@_escape_name_str { {}/<>[] }
-\cs_new_protected_nopar:Npn \@@_convert_escape_name:
- { \@@_convert_gmap:N \@@_escape_name_char:N }
-\cs_new:Npn \@@_escape_name_char:N #1
- {
- \@@_if_escape_name:NTF #1 {#1}
- { \c_hash_str \@@_output_hexadecimal:n {`#1} }
- }
-\prg_new_conditional:Npnn \@@_if_escape_name:N #1 { TF }
- {
- \if_int_compare:w `#1 < "2A \exp_stop_f:
- \@@_if_contains_char:NNTF \c_@@_escape_name_not_str #1
- \prg_return_true: \prg_return_false:
- \else:
- \if_int_compare:w `#1 > "7E \exp_stop_f:
- \prg_return_false:
- \else:
- \@@_if_contains_char:NNTF \c_@@_escape_name_str #1
- \prg_return_false: \prg_return_true:
- \fi:
- \fi:
- }
-%</name>
-% \end{macrocode}
-% \end{variable}
-% \end{variable}
-% \end{macro}
-% \end{macro}
-% \end{macro}
-%
-% \begin{macro}[int]{\@@_convert_escape_string:}
-% \begin{macro}[aux, rEXP]{\@@_escape_string_char:N}
-% \begin{macro}[aux, rEXP]{\@@_if_escape_string:NTF}
-% \begin{variable}{\c_@@_escape_string_str}
-% Any character below (and including) space, and any character above
-% (and including) \texttt{del}, are converted to octal. One backslash
-% is added before each parenthesis and backslash.
-% \begin{macrocode}
-%<*string>
-\str_const:Nx \c_@@_escape_string_str
- { \c_backslash_str ( ) }
-\cs_new_protected_nopar:Npn \@@_convert_escape_string:
- { \@@_convert_gmap:N \@@_escape_string_char:N }
-\cs_new:Npn \@@_escape_string_char:N #1
- {
- \@@_if_escape_string:NTF #1
- {
- \@@_if_contains_char:NNT
- \c_@@_escape_string_str #1
- { \c_backslash_str }
- #1
- }
- {
- \c_backslash_str
- \int_div_truncate:nn {`#1} {64}
- \int_mod:nn { \int_div_truncate:nn {`#1} \c_eight } \c_eight
- \int_mod:nn {`#1} \c_eight
- }
- }
-\prg_new_conditional:Npnn \@@_if_escape_string:N #1 { TF }
- {
- \if_int_compare:w `#1 < "21 \exp_stop_f:
- \prg_return_false:
- \else:
- \if_int_compare:w `#1 > "7E \exp_stop_f:
- \prg_return_false:
- \else:
- \prg_return_true:
- \fi:
- \fi:
- }
-%</string>
-% \end{macrocode}
-% \end{variable}
-% \end{macro}
-% \end{macro}
-% \end{macro}
-%
-% \begin{macro}[int]{\@@_convert_escape_url:}
-% \begin{macro}[aux, rEXP]{\@@_escape_url_char:N}
-% \begin{macro}[aux, rEXP]{\@@_if_escape_url:NTF}
-% This function is similar to \cs{@@_convert_escape_name:}, escaping
-% different characters.
-% \begin{macrocode}
-%<*url>
-\cs_new_protected_nopar:Npn \@@_convert_escape_url:
- { \@@_convert_gmap:N \@@_escape_url_char:N }
-\cs_new:Npn \@@_escape_url_char:N #1
- {
- \@@_if_escape_url:NTF #1 {#1}
- { \c_percent_str \@@_output_hexadecimal:n { `#1 } }
- }
-\prg_new_conditional:Npnn \@@_if_escape_url:N #1 { TF }
- {
- \if_int_compare:w `#1 < "41 \exp_stop_f:
- \@@_if_contains_char:nNTF { "-.<> } #1
- \prg_return_true: \prg_return_false:
- \else:
- \if_int_compare:w `#1 > "7E \exp_stop_f:
- \prg_return_false:
- \else:
- \@@_if_contains_char:nNTF { [ ] } #1
- \prg_return_false: \prg_return_true:
- \fi:
- \fi:
- }
-%</url>
-% \end{macrocode}
-% \end{macro}
-% \end{macro}
-% \end{macro}
-%
-% \subsection{Encoding definition files}
-%
-% The \texttt{native} encoding is automatically defined. Other encodings
-% are loaded as needed. The following encodings are supported:
-% \begin{itemize}
-% \item \textsc{utf-8};
-% \item \textsc{utf-16}, big-, little-endian, or with byte order mark;
-% \item \textsc{utf-32}, big-, little-endian, or with byte order mark;
-% \item the \textsc{iso 8859} code pages, numbered from $1$ to $16$,
-% skipping the inexistent \textsc{iso 8859-12}.
-% \end{itemize}
-%
-% \subsubsection{\textsc{utf-8} support}
-%
-% \begin{macrocode}
-%<*utf8>
-% \end{macrocode}
-%
-% \begin{macro}[int]{\@@_convert_encode_utf8:}
-% \begin{macro}[aux, rEXP]{\@@_encode_utf_viii_char:n}
-% \begin{macro}[aux, rEXP]{\@@_encode_utf_viii_loop:wwnnw}
-% Loop through the internal string, and convert each character to its
-% \textsc{utf-8} representation. The representation is built from the
-% right-most (least significant) byte to the left-most (most
-% significant) byte. Continuation bytes are in the range $[128,191]$,
-% taking $64$ different values, hence we roughly want to express the
-% character code in base $64$, shifting the first digit in the
-% representation by some number depending on how many continuation
-% bytes there are. In the range $[0,127]$, output the corresponding
-% byte directly. In the range $[128,2047]$, output the remainder
-% modulo $64$, plus $128$ as a continuation byte, then output the
-% quotient (which is in the range $[0,31]$), shifted by $192$. In the
-% next range, $[2048,65535]$, split the character code into residue
-% and quotient modulo $64$, output the residue as a first continuation
-% byte, then repeat; this leaves us with a quotient in the range
-% $[0,15]$, which we output shifted by $224$. The last range,
-% $[65536,1114111]$, follows the same pattern: once we realize that
-% dividing twice by $64$ leaves us with a number larger than $15$, we
-% repeat, producing a last continuation byte, and offset the quotient
-% by $240$ for the leading byte.
-%
-% How is that implemented? \cs{@@_encode_utf_vii_loop:wwnnw} takes
-% successive quotients as its first argument, the quotient from the
-% previous step as its second argument (except in step~$1$), the bound
-% for quotients that trigger one more step or not, and finally the
-% offset used if this step should produce the leading byte. Leading
-% bytes can be in the ranges $[0,127]$, $[192,223]$, $[224,239]$, and
-% $[240,247]$ (really, that last limit should be $244$ because Unicode
-% stops at the code point $1114111$). At each step, if the quotient
-% |#1| is less than the limit |#3| for that range, output the leading
-% byte (|#1| shifted by |#4|) and stop. Otherwise, we need one more
-% step: use the quotient of |#1| by $64$, and |#1| as arguments for
-% the looping auxiliary, and output the continuation byte
-% corresponding to the remainder $|#2|-64|#1|+128$. The bizarre
-% construction |\c_minus_one + \c_zero *| removes the spurious initial
-% continuation byte (better methods welcome).
-% \begin{macrocode}
-\cs_new_protected_nopar:cpn { @@_convert_encode_utf8: }
- { \@@_convert_gmap_internal:N \@@_encode_utf_viii_char:n }
-\cs_new:Npn \@@_encode_utf_viii_char:n #1
- {
- \@@_encode_utf_viii_loop:wwnnw #1 ; \c_minus_one + \c_zero * ;
- { 128 } { \c_zero }
- { 32 } { 192 }
- { 16 } { 224 }
- { 8 } { 240 }
- \q_stop
- }
-\cs_new:Npn \@@_encode_utf_viii_loop:wwnnw #1; #2; #3#4 #5 \q_stop
- {
- \if_int_compare:w #1 < #3 \exp_stop_f:
- \@@_output_byte:n { #1 + #4 }
- \exp_after:wN \use_none_delimit_by_q_stop:w
- \fi:
- \exp_after:wN \@@_encode_utf_viii_loop:wwnnw
- \__int_value:w \int_div_truncate:nn {#1} {64} ; #1 ;
- #5 \q_stop
- \@@_output_byte:n { #2 - 64 * ( #1 - \c_two ) }
- }
-% \end{macrocode}
-% \end{macro}
-% \end{macro}
-% \end{macro}
-%
-% \begin{variable}
-% {
-% \l_@@_missing_flag ,
-% \l_@@_extra_flag ,
-% \l_@@_overlong_flag ,
-% \l_@@_overflow_flag ,
-% }
-% When decoding a string that is purportedly in the \textsc{utf-8}
-% encoding, four different errors can occur, signalled by a specific
-% flag for each (we define those flags using \cs{flag_clear_new:n}
-% rather than \cs{flag_new:n}, because they are shared with other
-% encoding definition files).
-% \begin{itemize}
-% \item \enquote{Missing continuation byte}: a leading byte is not
-% followed by the right number of continuation bytes.
-% \item \enquote{Extra continuation byte}: a continuation byte
-% appears where it was not expected, \emph{i.e.}, not after an
-% appropriate leading byte.
-% \item \enquote{Overlong}: a Unicode character is expressed using
-% more bytes than necessary, for instance, \hexnum{C0}\hexnum{80}
-% for the code point $0$, instead of a single null byte.
-% \item \enquote{Overflow}: this occurs when decoding produces
-% Unicode code points greater than $1114111$.
-% \end{itemize}
-% We only raise one \LaTeX3 error message, combining all the errors
-% which occurred. In the short message, the leading comma must be
-% removed to get a grammatically correct sentence. In the long text,
-% first remind the user what a correct \textsc{utf-8} string should
-% look like, then add error-specific information.
-% \begin{macrocode}
-\flag_clear_new:n { str_missing }
-\flag_clear_new:n { str_extra }
-\flag_clear_new:n { str_overlong }
-\flag_clear_new:n { str_overflow }
-\__msg_kernel_new:nnnn { str } { utf8-decode }
- {
- Invalid~UTF-8~string: \exp_last_unbraced:Nf \use_none:n
- \@@_if_flag_times:nT { str_missing } { ,~missing~continuation~byte }
- \@@_if_flag_times:nT { str_extra } { ,~extra~continuation~byte }
- \@@_if_flag_times:nT { str_overlong } { ,~overlong~form }
- \@@_if_flag_times:nT { str_overflow } { ,~code~point~too~large }
- .
- }
- {
- In~the~UTF-8~encoding,~each~Unicode~character~consists~in~
- 1~to~4~bytes,~with~the~following~bit~pattern: \\
- \iow_indent:n
- {
- Code~point~\ \ \ \ <~128:~0xxxxxxx \\
- Code~point~\ \ \ <~2048:~110xxxxx~10xxxxxx \\
- Code~point~\ \ <~65536:~1110xxxx~10xxxxxx~10xxxxxx \\
- Code~point~ <~1114112:~11110xxx~10xxxxxx~10xxxxxx~10xxxxxx \\
- }
- Bytes~of~the~form~10xxxxxx~are~called~continuation~bytes.
- \flag_if_raised:nT { str_missing }
- {
- \\\\
- A~leading~byte~(in~the~range~[192,255])~was~not~followed~by~
- the~appropriate~number~of~continuation~bytes.
- }
- \flag_if_raised:nT { str_extra }
- {
- \\\\
- LaTeX~came~across~a~continuation~byte~when~it~was~not~expected.
- }
- \flag_if_raised:nT { str_overlong }
- {
- \\\\
- Every~Unicode~code~point~must~be~expressed~in~the~shortest~
- possible~form.~For~instance,~'0xC0'~'0x83'~is~not~a~valid~
- representation~for~the~code~point~3.
- }
- \flag_if_raised:nT { str_overflow }
- {
- \\\\
- Unicode~limits~code~points~to~the~range~[0,1114111].
- }
- }
-% \end{macrocode}
-% \end{variable}
%
-% \begin{macro}[int]{\@@_convert_decode_utf8:}
-% \begin{macro}[aux, rEXP]
-% {
-% \@@_decode_utf_viii_start:N,
-% \@@_decode_utf_viii_continuation:wwN,
-% \@@_decode_utf_viii_aux:wNnnwN
-% }
-% \begin{macro}[aux, rEXP]
-% {\@@_decode_utf_viii_overflow:w, \@@_decode_utf_viii_end:}
-% Decoding is significantly harder than encoding. As before, lower
-% some flags, which are tested at the end (in bulk, to trigger at most
-% one \LaTeX3 error, as explained above). We expect successive
-% multi-byte sequences of the form \meta{start byte}
-% \meta{continuation bytes}. The \texttt{_start} auxiliary tests the
-% first byte:
-% \begin{itemize}
-% \item $[0,\hexnum{7F}]$: the byte stands alone, and is converted
-% to its own character code;
-% \item $[\hexnum{80}, \hexnum{BF}]$: unexpected continuation byte,
-% raise the appropriate flag, and convert that byte to the
-% replacement character \hexnum{FFFD};
-% \item $[\hexnum{C0}, \hexnum{FF}]$: this byte should be followed
-% by some continuation byte(s).
-% \end{itemize}
-% In the first two cases, \cs{use_none_delimit_by_q_stop:w} removes
-% data that only the third case requires, namely the limits of ranges
-% of Unicode characters which can be expressed with $1$, $2$, $3$, or
-% $4$ bytes.
-%
-% We can now concentrate on the multi-byte case and the
-% \texttt{_continuation} auxiliary. We expect |#3| to be in the range
-% $[\hexnum{80}, \hexnum{BF}]$. The test for this goes as follows: if
-% the character code is less than \hexnum{80}, we compare it to
-% $-\hexnum{C0}$, yielding \texttt{false}; otherwise to \hexnum{C0},
-% yielding \texttt{true} in the range $[\hexnum{80}, \hexnum{BF}]$ and
-% \texttt{false} otherwise. If we find that the byte is not a
-% continuation range, stop the current slew of bytes, output the
-% replacement character, and continue parsing with the \texttt{_start}
-% auxiliary, starting at the byte we just tested. Once we know that
-% the byte is a continuation byte, leave it behind us in the input
-% stream, compute what code point the bytes read so far would produce,
-% and feed that number to the \texttt{_aux} function.
-%
-% The \texttt{_aux} function tests whether we should look for more
-% continuation bytes or not. If the number it receives as |#1| is less
-% than the maximum |#4| for the current range, then we are done: check
-% for an overlong representation by comparing |#1| with the maximum
-% |#3| for the previous range. Otherwise, we call the
-% \texttt{_continuation} auxiliary again, after shifting the
-% \enquote{current code point} by |#4| (maximum from the range we just
-% checkedd).
-%
-% Two additional tests are needed: if we reach the end of the list of
-% range maxima and we are still not done, then we are faced with an
-% overflow. Clean up, and again insert the code point \hexnum{FFFD}
-% for the replacement character. Also, every time we read a byte, we
-% need to check whether we reached the end of the string. In a correct
-% \textsc{utf-8} string, this happens automatically when the
-% \texttt{_start} auxiliary leaves its first argument in the input
-% stream: the end-marker begins with \cs{__prg_break:}, which ends
-% the loop. On the other hand, if the end is reached when looking for
-% a continuation byte, the \cs{use_none:n} |#3| construction removes
-% the first token from the end-marker, and leaves the \texttt{_end}
-% auxiliary, which raises the appropriate error flag before ending the
-% mapping.
-% \begin{macrocode}
-\cs_new_protected_nopar:cpn { @@_convert_decode_utf8: }
- {
- \flag_clear:n { str_error }
- \flag_clear:n { str_missing }
- \flag_clear:n { str_extra }
- \flag_clear:n { str_overlong }
- \flag_clear:n { str_overflow }
- \tl_gset:Nx \g_@@_result_tl
- {
- \exp_after:wN \@@_decode_utf_viii_start:N \g_@@_result_tl
- { \__prg_break: \@@_decode_utf_viii_end: }
- \__prg_break_point:
- }
- \@@_if_flag_error:nnx { str_error } { utf8-decode } { }
- }
-\cs_new:Npn \@@_decode_utf_viii_start:N #1
- {
- #1
- \if_int_compare:w `#1 < "C0 \exp_stop_f:
- \s__tl
- \if_int_compare:w `#1 < "80 \exp_stop_f:
- \__int_value:w `#1
- \else:
- \flag_raise:n { str_extra }
- \flag_raise:n { str_error }
- \int_use:N \c_@@_replacement_char_int
- \fi:
- \else:
- \exp_after:wN \@@_decode_utf_viii_continuation:wwN
- \int_use:N \__int_eval:w `#1 - "C0 \exp_after:wN \__int_eval_end:
- \fi:
- \s__tl
- \use_none_delimit_by_q_stop:w {"80} {"800} {"10000} {"110000} \q_stop
- \@@_decode_utf_viii_start:N
- }
-\cs_new:Npn \@@_decode_utf_viii_continuation:wwN
- #1 \s__tl #2 \@@_decode_utf_viii_start:N #3
- {
- \use_none:n #3
- \if_int_compare:w `#3 <
- \if_int_compare:w `#3 < "80 \exp_stop_f: - \fi:
- "C0 \exp_stop_f:
- #3
- \exp_after:wN \@@_decode_utf_viii_aux:wNnnwN
- \int_use:N \__int_eval:w
- #1 * "40 + `#3 - "80
- \exp_after:wN \__int_eval_end:
- \else:
- \s__tl
- \flag_raise:n { str_missing }
- \flag_raise:n { str_error }
- \int_use:N \c_@@_replacement_char_int
- \fi:
- \s__tl
- #2
- \@@_decode_utf_viii_start:N #3
- }
-\cs_new:Npn \@@_decode_utf_viii_aux:wNnnwN
- #1 \s__tl #2#3#4 #5 \@@_decode_utf_viii_start:N #6
- {
- \if_int_compare:w #1 < #4 \exp_stop_f:
- \s__tl
- \if_int_compare:w #1 < #3 \exp_stop_f:
- \flag_raise:n { str_overlong }
- \flag_raise:n { str_error }
- \int_use:N \c_@@_replacement_char_int
- \else:
- #1
- \fi:
- \else:
- \if_meaning:w \q_stop #5
- \@@_decode_utf_viii_overflow:w #1
- \fi:
- \exp_after:wN \@@_decode_utf_viii_continuation:wwN
- \int_use:N \__int_eval:w #1 - #4 \exp_after:wN \__int_eval_end:
- \fi:
- \s__tl
- #2 {#4} #5
- \@@_decode_utf_viii_start:N
- }
-\cs_new:Npn \@@_decode_utf_viii_overflow:w #1 \fi: #2 \fi:
- {
- \fi: \fi:
- \flag_raise:n { str_overflow }
- \flag_raise:n { str_error }
- \int_use:N \c_@@_replacement_char_int
- }
-\cs_new_nopar:Npn \@@_decode_utf_viii_end:
+% Deprecated 2013-01-20 for removal by 2013-04-30
+% \begin{macro}[EXP]
+% {\str_substr:Nnn, \str_substr:nnn, \str_substr_ignore_spaces:nnn}
+% \begin{macro}[EXP, aux]{\@@_substr:nnn}
+% These functions used to allow for an empty argument to denote the
+% start/end of the string. We reimplement them here by first checking
+% for an empty argument, then only calling the appropriate version of
+% the \cs{str_range:nnn} function.
+% \begin{macrocode}
+\cs_new:Npn \str_substr:Nnn #1 { \@@_substr:nnn { \str_range:Nnn #1 } }
+\cs_new:Npn \str_substr:nnn #1 { \@@_substr:nnn { \str_range:nnn {#1} } }
+\cs_new:Npn \str_substr_ignore_spaces:nnn #1
+ { \@@_substr:nnn { \str_range_ignore_spaces:nnn {#1} } }
+\cs_new:Npn \@@_substr:nnn #1#2#3
{
- \s__tl
- \flag_raise:n { str_missing }
- \flag_raise:n { str_error }
- \int_use:N \c_@@_replacement_char_int \s__tl
- \__prg_break:
+ \tl_if_empty:nTF {#2}
+ { \tl_if_empty:nTF {#3} { #1 { 1} { -1 } } { #1 { 1} {#3} } }
+ { \tl_if_empty:nTF {#3} { #1 {#2} { -1 } } { #1 {#2} {#3} } }
}
% \end{macrocode}
% \end{macro}
% \end{macro}
-% \end{macro}
%
+% Deprecated 2013-01-20 for removal by 2013-04-30
+% \begin{variable}{\c_lbrace_str, \c_rbrace_str}
% \begin{macrocode}
-%</utf8>
-% \end{macrocode}
-%
-% \subsubsection{\textsc{utf-16} support}
-%
-% The definitions are done in a category code regime where the bytes
-% $254$ and $255$ used by the byte order mark have catcode~$12$.
-% \begin{macrocode}
-%<*utf16>
-\group_begin:
- \char_set_catcode_other:N \^^fe
- \char_set_catcode_other:N \^^ff
-% \end{macrocode}
-%
-% \begin{macro}[int]
-% {
-% \@@_convert_encode_utf16: ,
-% \@@_convert_encode_utf16be: ,
-% \@@_convert_encode_utf16le: ,
-% }
-% \begin{macro}[aux, rEXP]
-% {
-% \@@_encode_utf_xvi_aux:N ,
-% \@@_encode_utf_xvi_char:n ,
-% }
-% When the endianness is not specified, it is big-endian by default,
-% and we add a byte-order mark. Convert characters one by one in a
-% loop, with different behaviours depending on the character code.
-% \begin{itemize}
-% \item $[0, \hexnum{D7FF}]$: converted to two bytes;
-% \item $[\hexnum{D800}, \hexnum{DFFF}]$ are used as surrogates:
-% they cannot be converted and are replaced by the replacement
-% character;
-% \item $[\hexnum{E000}, \hexnum{FFFF}]$: converted to two bytes;
-% \item $[\hexnum{10000}, \hexnum{10FFFF}]$: converted to a pair of
-% surrogates, each two bytes. The magic \hexnum{D7C0} is
-% $\hexnum{D800}-\hexnum{10000}/\hexnum{400}$.
-% \end{itemize}
-% For the duration of this operation, \cs{@@_tmp:w} is defined as a
-% function to convert a number in the range $[0, \hexnum{FFFF}]$ to a
-% pair of bytes (either big endian or little endian), by feeding the
-% quotient of the division of |#1| by \hexnum{100}, followed by |#1|
-% to \cs{@@_encode_utf_xvi_be:nn} or its \texttt{le} analog: those
-% compute the remainder, and output two bytes for the quotient and
-% remainder.
-% \begin{macrocode}
- \cs_new_protected_nopar:cpn { @@_convert_encode_utf16: }
- {
- \@@_encode_utf_xvi_aux:N \@@_output_byte_pair_be:n
- \tl_gput_left:Nx \g_@@_result_tl { ^^fe ^^ff }
- }
- \cs_new_protected_nopar:cpn { @@_convert_encode_utf16be: }
- { \@@_encode_utf_xvi_aux:N \@@_output_byte_pair_be:n }
- \cs_new_protected_nopar:cpn { @@_convert_encode_utf16le: }
- { \@@_encode_utf_xvi_aux:N \@@_output_byte_pair_le:n }
- \cs_new_protected:Npn \@@_encode_utf_xvi_aux:N #1
- {
- \flag_clear:n { str_error }
- \cs_set_eq:NN \@@_tmp:w #1
- \@@_convert_gmap_internal:N \@@_encode_utf_xvi_char:n
- \@@_if_flag_error:nnx { str_error } { utf16-encode } { }
- }
- \cs_new:Npn \@@_encode_utf_xvi_char:n #1
- {
- \if_int_compare:w #1 < "D800 \exp_stop_f:
- \@@_tmp:w {#1}
- \else:
- \if_int_compare:w #1 < "10000 \exp_stop_f:
- \if_int_compare:w #1 < "E000 \exp_stop_f:
- \flag_raise:n { str_error }
- \@@_tmp:w { \c_@@_replacement_char_int }
- \else:
- \@@_tmp:w {#1}
- \fi:
- \else:
- \exp_args:Nf \@@_tmp:w { \int_div_truncate:nn {#1} {"400} + "D7C0 }
- \exp_args:Nf \@@_tmp:w { \int_mod:nn {#1} {"400} + "DC00 }
- \fi:
- \fi:
- }
-% \end{macrocode}
-% \end{macro}
-% \end{macro}
-%
-% \begin{variable}
-% {
-% \l_@@_missing_flag ,
-% \l_@@_extra_flag ,
-% \l_@@_end_flag ,
-% }
-% When encoding a Unicode string to \textsc{utf-16}, only one error
-% can occur: code points in the range $[\hexnum{D800},
-% \hexnum{DFFF}]$, corresponding to surrogates, cannot be encoded. We
-% use the all-purpose flag \texttt{@@_error} to signal that error.
-%
-% When decoding a Unicode string which is purportedly in
-% \textsc{utf-16}, three errors can occur: a missing trail surrogate,
-% an unexpected trail surrogate, and a string containing an odd number
-% of bytes.
-% \begin{macrocode}
- \flag_clear_new:n { str_missing }
- \flag_clear_new:n { str_extra }
- \flag_clear_new:n { str_end }
- \__msg_kernel_new:nnnn { str } { utf16-encode }
- { Unicode~string~cannot~be~expressed~in~UTF-16:~surrogate. }
- {
- Surrogate~code~points~(in~the~range~[U+D800,~U+DFFF])~
- can~be~expressed~in~the~UTF-8~and~UTF-32~encodings,~
- but~not~in~the~UTF-16~encoding.
- }
- \__msg_kernel_new:nnnn { str } { utf16-decode }
- {
- Invalid~UTF-16~string: \exp_last_unbraced:Nf \use_none:n
- \@@_if_flag_times:nT { str_missing } { ,~missing~trail~surrogate }
- \@@_if_flag_times:nT { str_extra } { ,~extra~trail~surrogate }
- \@@_if_flag_times:nT { str_end } { ,~odd~number~of~bytes }
- .
- }
- {
- In~the~UTF-16~encoding,~each~Unicode~character~is~encoded~as~
- 2~or~4~bytes: \\
- \iow_indent:n
- {
- Code~point~in~[U+0000,~U+D7FF]:~two~bytes \\
- Code~point~in~[U+D800,~U+DFFF]:~illegal \\
- Code~point~in~[U+E000,~U+FFFF]:~two~bytes \\
- Code~point~in~[U+10000,~U+10FFFF]:~
- a~lead~surrogate~and~a~trail~surrogate \\
- }
- Lead~surrogates~are~pairs~of~bytes~in~the~range~[0xD800,~0xDBFF],~
- and~trail~surrogates~are~in~the~range~[0xDC00,~0xDFFF].
- \flag_if_raised:nT { str_missing }
- {
- \\\\
- A~lead~surrogate~was~not~followed~by~a~trail~surrogate.
- }
- \flag_if_raised:nT { str_extra }
- {
- \\\\
- LaTeX~came~across~a~trail~surrogate~when~it~was~not~expected.
- }
- \flag_if_raised:nT { str_end }
- {
- \\\\
- The~string~contained~an~odd~number~of~bytes.~This~is~invalid:~
- the~basic~code~unit~for~UTF-16~is~16~bits~(2~bytes).
- }
- }
+\cs_new_eq:NN \c_lbrace_str \c_left_brace_str
+\cs_new_eq:NN \c_rbrace_str \c_right_brace_str
% \end{macrocode}
% \end{variable}
%
-% \begin{macro}[int]
-% {
-% \@@_convert_decode_utf16: ,
-% \@@_convert_decode_utf16be: ,
-% \@@_convert_decode_utf16le: ,
-% }
-% \begin{macro}[aux]{\@@_decode_utf_xvi_bom:NN, \@@_decode_utf_xvi:Nw}
-% As for \textsc{utf-8}, decoding \textsc{utf-16} is harder than
-% encoding it. If the endianness is unknown, check the first two
-% bytes: if those are \hexnum{FE} and \hexnum{FF} in either order,
-% remove them and use the corresponding endianness, otherwise assume
-% big-endianness. The three endianness cases are based on a common
-% auxiliary whose first argument is $1$ for big-endian and $2$ for
-% little-endian, and whose second argument, delimited by the scan mark
-% \cs{s__stop}, is expanded once (the string may be long; passing
-% \cs{g_@@_result_tl} as an argument before expansion is cheaper).
-%
-% The \cs{@@_decode_utf_xvi:Nw} function defines \cs{@@_tmp:w} to
-% take two arguments and return the character code of the first one if
-% the string is big-endian, and the second one if the string is
-% little-endian, then loops over the string using
-% \cs{@@_decode_utf_xvi_pair:NN} described below.
-% \begin{macrocode}
- \cs_new_protected_nopar:cpn { @@_convert_decode_utf16be: }
- { \@@_decode_utf_xvi:Nw 1 \g_@@_result_tl \s__stop }
- \cs_new_protected_nopar:cpn { @@_convert_decode_utf16le: }
- { \@@_decode_utf_xvi:Nw 2 \g_@@_result_tl \s__stop }
- \cs_new_protected_nopar:cpn { @@_convert_decode_utf16: }
- {
- \exp_after:wN \@@_decode_utf_xvi_bom:NN
- \g_@@_result_tl \s__stop \s__stop \s__stop
- }
- \cs_new_protected:Npn \@@_decode_utf_xvi_bom:NN #1#2
- {
- \str_if_eq_x:nnTF { #1#2 } { ^^ff ^^fe }
- { \@@_decode_utf_xvi:Nw 2 }
- {
- \str_if_eq_x:nnTF { #1#2 } { ^^fe ^^ff }
- { \@@_decode_utf_xvi:Nw 1 }
- { \@@_decode_utf_xvi:Nw 1 #1#2 }
- }
- }
- \cs_new_protected:Npn \@@_decode_utf_xvi:Nw #1#2 \s__stop
- {
- \flag_clear:n { str_error }
- \flag_clear:n { str_missing }
- \flag_clear:n { str_extra }
- \flag_clear:n { str_end }
- \cs_set:Npn \@@_tmp:w ##1 ##2 { ` ## #1 }
- \tl_gset:Nx \g_@@_result_tl
- {
- \exp_after:wN \@@_decode_utf_xvi_pair:NN
- #2 \q_nil \q_nil
- \__prg_break_point:
- }
- \@@_if_flag_error:nnx { str_error } { utf16-decode } { }
- }
-% \end{macrocode}
-% \end{macro}
-% \end{macro}
-%
-% \begin{macro}[aux, rEXP]
-% {
-% \@@_decode_utf_xvi_pair:NN ,
-% \@@_decode_utf_xvi_quad:NNwNN ,
-% \@@_decode_utf_xvi_pair_end:Nw ,
-% }
-% \begin{macro}[aux, rEXP]
-% {
-% \@@_decode_utf_xvi_error:nNN ,
-% \@@_decode_utf_xvi_extra:NNw ,
-% }
-% Bytes are read two at a time. At this stage, |\@@_tmp:w #1#2|
-% expands to the character code of the most significant byte, and we
-% distinguish cases depending on which range it lies in:
-% \begin{itemize}
-% \item $[\hexnum{D8}, \hexnum{DB}]$ signals a lead surrogate, and
-% the integer expression yields $1$ (\eTeX{} rounds ties away from
-% zero);
-% \item $[\hexnum{DC}, \hexnum{DF}]$ signals a trail surrogate,
-% unexpected here, and the integer expression yields $2$;
-% \item any other value signals a code point in the Basic
-% Multilingual Plane, which stands for itself, and the
-% \cs{if_case:w} construction expands to nothing (cases other than
-% $1$ or $2$), leaving the relevant material in the input stream,
-% followed by another call to the \texttt{_pair} auxiliary.
-% \end{itemize}
-% The case of a lead surrogate is treated by the \texttt{_quad}
-% auxiliary, whose arguments |#1|, |#2|, |#4| and |#5| are the four
-% bytes. We expect the most significant byte of |#4#5| to be in the
-% range $[\hexnum{DC}, \hexnum{DF}]$ (trail surrogate). The test is
-% similar to the test used for continuation bytes in the
-% \textsc{utf-8} decoding functions. In the case where |#4#5| is
-% indeed a trail surrogate, leave |#1#2#4#5| \cs{s__tl}
-% \meta{code~point} \cs{s__tl}, and remove the pair |#4#5| before
-% looping with \cs{@@_decode_utf_xvi_pair:NN}. Otherwise, of course,
-% complain about the missing surrogate.
-%
-% The magic number \hexnum{D7F7} is such that
-% $\hexnum{D7F7}*\hexnum{400} = \hexnum{D800}*\hexnum{400} +
-% \hexnum{DC00} - \hexnum{10000}$.
-%
-% Every time we read a pair of bytes, we test for the end-marker
-% \cs{q_nil}. When reaching the end, we additionally check that the
-% string had an even length. Also, if the end is reached when
-% expecting a trail surrogate, we treat that as a missing surrogate.
-% \begin{macrocode}
- \cs_new:Npn \@@_decode_utf_xvi_pair:NN #1#2
- {
- \if_meaning:w \q_nil #2
- \@@_decode_utf_xvi_pair_end:Nw #1
- \fi:
- \if_case:w
- \__int_eval:w ( \@@_tmp:w #1#2 - "D6 ) / \c_four \__int_eval_end:
- \or: \exp_after:wN \@@_decode_utf_xvi_quad:NNwNN
- \or: \exp_after:wN \@@_decode_utf_xvi_extra:NNw
- \fi:
- #1#2 \s__tl
- \int_eval:n { "100 * \@@_tmp:w #1#2 + \@@_tmp:w #2#1 } \s__tl
- \@@_decode_utf_xvi_pair:NN
- }
- \cs_new:Npn \@@_decode_utf_xvi_quad:NNwNN
- #1#2 #3 \@@_decode_utf_xvi_pair:NN #4#5
- {
- \if_meaning:w \q_nil #5
- \@@_decode_utf_xvi_error:nNN { missing } #1#2
- \@@_decode_utf_xvi_pair_end:Nw #4
- \fi:
- \if_int_compare:w
- \if_int_compare:w \@@_tmp:w #4#5 < "DC \exp_stop_f:
- \c_zero = \c_one
- \else:
- \@@_tmp:w #4#5 < "E0 \exp_stop_f:
- \fi:
- #1 #2 #4 #5 \s__tl
- \int_eval:n
- {
- ( "100 * \@@_tmp:w #1#2 + \@@_tmp:w #2#1 - "D7F7 ) * "400
- + "100 * \@@_tmp:w #4#5 + \@@_tmp:w #5#4
- }
- \s__tl
- \exp_after:wN \use_i:nnn
- \else:
- \@@_decode_utf_xvi_error:nNN { missing } #1#2
- \fi:
- \@@_decode_utf_xvi_pair:NN #4#5
- }
- \cs_new:Npn \@@_decode_utf_xvi_pair_end:Nw #1 \fi:
- {
- \fi:
- \if_meaning:w \q_nil #1
- \else:
- \@@_decode_utf_xvi_error:nNN { end } #1 \prg_do_nothing:
- \fi:
- \__prg_break:
- }
- \cs_new:Npn \@@_decode_utf_xvi_extra:NNw #1#2 \s__tl #3 \s__tl
- { \@@_decode_utf_xvi_error:nNN { extra } #1#2 }
- \cs_new:Npn \@@_decode_utf_xvi_error:nNN #1#2#3
- {
- \flag_raise:n { str_error }
- \flag_raise:n { str_#1 }
- #2 #3 \s__tl
- \int_use:N \c_@@_replacement_char_int \s__tl
- }
-% \end{macrocode}
-% \end{macro}
-% \end{macro}
-%
-% Restore the original catcodes of bytes $254$ and $255$.
-% \begin{macrocode}
-\group_end:
-%</utf16>
-% \end{macrocode}
-%
-% \subsubsection{\textsc{utf-32} support}
-%
-% The definitions are done in a category code regime where the bytes
-% $0$, $254$ and $255$ used by the byte order mark have catcode
-% \enquote{other}.
-% \begin{macrocode}
-%<*utf32>
-\group_begin:
- \char_set_catcode_other:N \^^00
- \char_set_catcode_other:N \^^fe
- \char_set_catcode_other:N \^^ff
-% \end{macrocode}
-%
-% \begin{macro}[int]
-% {
-% \@@_convert_encode_utf32: ,
-% \@@_convert_encode_utf32be: ,
-% \@@_convert_encode_utf32le: ,
-% }
-% \begin{macro}[aux, rEXP]
-% {
-% \@@_encode_utf_xxxii_be:n ,
-% \@@_encode_utf_xxxii_be_aux:nn ,
-% \@@_encode_utf_xxxii_le:n ,
-% \@@_encode_utf_xxxii_le_aux:nn ,
-% }
-% Convert each integer in the comma-list \cs{g_@@_result_tl} to a
-% sequence of four bytes. The functions for big-endian and
-% little-endian encodings are very similar, but the
-% \cs{@@_output_byte:n} instructions are reversed.
-% \begin{macrocode}
- \cs_new_protected_nopar:cpn { @@_convert_encode_utf32: }
- {
- \@@_convert_gmap_internal:N \@@_encode_utf_xxxii_be:n
- \tl_gput_left:Nx \g_@@_result_tl { ^^00 ^^00 ^^fe ^^ff }
- }
- \cs_new_protected_nopar:cpn { @@_convert_encode_utf32be: }
- { \@@_convert_gmap_internal:N \@@_encode_utf_xxxii_be:n }
- \cs_new_protected_nopar:cpn { @@_convert_encode_utf32le: }
- { \@@_convert_gmap_internal:N \@@_encode_utf_xxxii_le:n }
- \cs_new:Npn \@@_encode_utf_xxxii_be:n #1
- {
- \exp_args:Nf \@@_encode_utf_xxxii_be_aux:nn
- { \int_div_truncate:nn {#1} { "100 } } {#1}
- }
- \cs_new:Npn \@@_encode_utf_xxxii_be_aux:nn #1#2
- {
- ^^00
- \@@_output_byte_pair_be:n {#1}
- \@@_output_byte:n { #2 - #1 * "100 }
- }
- \cs_new:Npn \@@_encode_utf_xxxii_le:n #1
- {
- \exp_args:Nf \@@_encode_utf_xxxii_le_aux:nn
- { \int_div_truncate:nn {#1} { "100 } } {#1}
- }
- \cs_new:Npn \@@_encode_utf_xxxii_le_aux:nn #1#2
- {
- \@@_output_byte:n { #2 - #1 * "100 }
- \@@_output_byte_pair_le:n {#1}
- ^^00
- }
-% \end{macrocode}
-% \end{macro}
-% \end{macro}
-%
-% \begin{variable}{str_overflow, str_end}
-% There can be no error when encoding in \textsc{utf-32}. When
-% decoding, the string may not have length $4n$, or it may contain
-% code points larger than \hexnum{10FFFF}. The latter case often
-% happens if the encoding was in fact not \textsc{utf-32}, because
-% most arbitrary strings are not valid in \textsc{utf-32}.
-% \begin{macrocode}
- \flag_clear_new:n { str_overflow }
- \flag_clear_new:n { str_end }
- \__msg_kernel_new:nnnn { str } { utf32-decode }
- {
- Invalid~UTF-32~string: \exp_last_unbraced:Nf \use_none:n
- \@@_if_flag_times:nT { str_overflow } { ,~code~point~too~large }
- \@@_if_flag_times:nT { str_end } { ,~truncated~string }
- .
- }
- {
- In~the~UTF-32~encoding,~every~Unicode~character~
- (in~the~range~[U+0000,~U+10FFFF])~is~encoded~as~4~bytes.
- \flag_if_raised:nT { str_overflow }
- {
- \\\\
- LaTeX~came~across~a~code~point~larger~than~1114111,~
- the~maximum~code~point~defined~by~Unicode.~
- Perhaps~the~string~was~not~encoded~in~the~UTF-32~encoding?
- }
- \flag_if_raised:nT { str_end }
- {
- \\\\
- The~length~of~the~string~is~not~a~multiple~of~4.~
- Perhaps~the~string~was~truncated?
- }
- }
-% \end{macrocode}
-% \end{variable}
-%
-% \begin{macro}[int]
-% {
-% \@@_convert_decode_utf32: ,
-% \@@_convert_decode_utf32be: ,
-% \@@_convert_decode_utf32le: ,
-% }
-% \begin{macro}[aux]
-% {\@@_decode_utf_xxxii_bom:NNNN, \@@_decode_utf_xxxii:Nw}
-% \begin{macro}[aux, rEXP]
-% {\@@_decode_utf_xxxii_loop:NNNN, \@@_decode_utf_xxxii_end:w}
-%
-% The structure is similar to \textsc{utf-16} decoding functions. If
-% the endianness is not given, test the first $4$ bytes of the string
-% (possibly \cs{s__stop} if the string is too short) for the presence
-% of a byte-order mark. If there is a byte-order mark, use that
-% endianness, and remove the $4$ bytes, otherwise default to
-% big-endian, and leave the $4$ bytes in place. The
-% \cs{@@_decode_utf_xxxii:Nw} auxiliary recieves $1$ or $2$ as its
-% first argument indicating endianness, and the string to convert as
-% its second argument (expanded or not). It sets \cs{@@_tmp:w} to
-% expand to the character code of either of its two arguments
-% depending on endianness, then triggers the \texttt{_loop} auxiliary
-% inside an \texttt{x}-expanding assignment to \cs{g_@@_result_tl}.
-%
-% The \texttt{_loop} auxiliary first checks for the end-of-string
-% marker \cs{s__stop}, calling the \texttt{_end} auxiliary if
-% appropriate. Otherwise, leave the \meta{4~bytes} \cs{s__tl} behind,
-% then check that the code point is not overflowing: the leading byte
-% must be $0$, and the following byte at most $16$.
-%
-% In the ending code, we check that there remains no byte: there
-% should be nothing left until the first \cs{s__stop}. Break the map.
-% \begin{macrocode}
- \cs_new_protected_nopar:cpn { @@_convert_decode_utf32be: }
- { \@@_decode_utf_xxxii:Nw 1 \g_@@_result_tl \s__stop }
- \cs_new_protected_nopar:cpn { @@_convert_decode_utf32le: }
- { \@@_decode_utf_xxxii:Nw 2 \g_@@_result_tl \s__stop }
- \cs_new_protected_nopar:cpn { @@_convert_decode_utf32: }
- {
- \exp_after:wN \@@_decode_utf_xxxii_bom:NNNN \g_@@_result_tl
- \s__stop \s__stop \s__stop \s__stop \s__stop
- }
- \cs_new_protected:Npn \@@_decode_utf_xxxii_bom:NNNN #1#2#3#4
- {
- \str_if_eq_x:nnTF { #1#2#3#4 } { ^^ff ^^fe ^^00 ^^00 }
- { \@@_decode_utf_xxxii:Nw 2 }
- {
- \str_if_eq_x:nnTF { #1#2#3#4 } { ^^00 ^^00 ^^fe ^^ff }
- { \@@_decode_utf_xxxii:Nw 1 }
- { \@@_decode_utf_xxxii:Nw 1 #1#2#3#4 }
- }
- }
- \cs_new_protected:Npn \@@_decode_utf_xxxii:Nw #1#2 \s__stop
- {
- \flag_clear:n { str_overflow }
- \flag_clear:n { str_end }
- \flag_clear:n { str_error }
- \cs_set:Npn \@@_tmp:w ##1 ##2 { ` ## #1 }
- \tl_gset:Nx \g_@@_result_tl
- {
- \exp_after:wN \@@_decode_utf_xxxii_loop:NNNN
- #2 \s__stop \s__stop \s__stop \s__stop
- \__prg_break_point:
- }
- \@@_if_flag_error:nnx { str_error } { utf32-decode } { }
- }
- \cs_new:Npn \@@_decode_utf_xxxii_loop:NNNN #1#2#3#4
- {
- \if_meaning:w \s__stop #4
- \exp_after:wN \@@_decode_utf_xxxii_end:w
- \fi:
- #1#2#3#4 \s__tl
- \if_int_compare:w \@@_tmp:w #1#4 > \c_zero
- \flag_raise:n { str_overflow }
- \flag_raise:n { str_error }
- \int_use:N \c_@@_replacement_char_int
- \else:
- \if_int_compare:w \@@_tmp:w #2#3 > \c_sixteen
- \flag_raise:n { str_overflow }
- \flag_raise:n { str_error }
- \int_use:N \c_@@_replacement_char_int
- \else:
- \int_eval:n
- { \@@_tmp:w #2#3*"10000 + \@@_tmp:w #3#2*"100 + \@@_tmp:w #4#1 }
- \fi:
- \fi:
- \s__tl
- \@@_decode_utf_xxxii_loop:NNNN
- }
- \cs_new:Npn \@@_decode_utf_xxxii_end:w #1 \s__stop
- {
- \tl_if_empty:nF {#1}
- {
- \flag_raise:n { str_end }
- \flag_raise:n { str_error }
- #1 \s__tl
- \int_use:N \c_@@_replacement_char_int \s__tl
- }
- \__prg_break:
- }
-% \end{macrocode}
-% \end{macro}
-% \end{macro}
-% \end{macro}
-%
-% Restore the original catcodes of bytes $0$, $254$ and $255$.
-% \begin{macrocode}
-\group_end:
-%</utf32>
-% \end{macrocode}
-%
-% \subsubsection{\textsc{iso 8859} support}
-%
-% The \textsc{iso-8859-1} encoding exactly matches with the $256$ first
-% Unicode characters. For other 8-bit encodings of the \textsc{iso-8859}
-% family, we keep track only of differences, and of unassigned bytes.
-% \begin{macrocode}
-%<*iso88591>
-\@@_declare_eight_bit_encoding:nnn { iso88591 }
- {
- }
- {
- }
-%</iso88591>
-% \end{macrocode}
-%
-% \begin{macrocode}
-%<*iso88592>
-\@@_declare_eight_bit_encoding:nnn { iso88592 }
- {
- { A1 } { 0104 }
- { A2 } { 02D8 }
- { A3 } { 0141 }
- { A5 } { 013D }
- { A6 } { 015A }
- { A9 } { 0160 }
- { AA } { 015E }
- { AB } { 0164 }
- { AC } { 0179 }
- { AE } { 017D }
- { AF } { 017B }
- { B1 } { 0105 }
- { B2 } { 02DB }
- { B3 } { 0142 }
- { B5 } { 013E }
- { B6 } { 015B }
- { B7 } { 02C7 }
- { B9 } { 0161 }
- { BA } { 015F }
- { BB } { 0165 }
- { BC } { 017A }
- { BD } { 02DD }
- { BE } { 017E }
- { BF } { 017C }
- { C0 } { 0154 }
- { C3 } { 0102 }
- { C5 } { 0139 }
- { C6 } { 0106 }
- { C8 } { 010C }
- { CA } { 0118 }
- { CC } { 011A }
- { CF } { 010E }
- { D0 } { 0110 }
- { D1 } { 0143 }
- { D2 } { 0147 }
- { D5 } { 0150 }
- { D8 } { 0158 }
- { D9 } { 016E }
- { DB } { 0170 }
- { DE } { 0162 }
- { E0 } { 0155 }
- { E3 } { 0103 }
- { E5 } { 013A }
- { E6 } { 0107 }
- { E8 } { 010D }
- { EA } { 0119 }
- { EC } { 011B }
- { EF } { 010F }
- { F0 } { 0111 }
- { F1 } { 0144 }
- { F2 } { 0148 }
- { F5 } { 0151 }
- { F8 } { 0159 }
- { F9 } { 016F }
- { FB } { 0171 }
- { FE } { 0163 }
- { FF } { 02D9 }
- }
- {
- }
-%</iso88592>
-% \end{macrocode}
-%
-% \begin{macrocode}
-%<*iso88593>
-\@@_declare_eight_bit_encoding:nnn { iso88593 }
- {
- { A1 } { 0126 }
- { A2 } { 02D8 }
- { A6 } { 0124 }
- { A9 } { 0130 }
- { AA } { 015E }
- { AB } { 011E }
- { AC } { 0134 }
- { AF } { 017B }
- { B1 } { 0127 }
- { B6 } { 0125 }
- { B9 } { 0131 }
- { BA } { 015F }
- { BB } { 011F }
- { BC } { 0135 }
- { BF } { 017C }
- { C5 } { 010A }
- { C6 } { 0108 }
- { D5 } { 0120 }
- { D8 } { 011C }
- { DD } { 016C }
- { DE } { 015C }
- { E5 } { 010B }
- { E6 } { 0109 }
- { F5 } { 0121 }
- { F8 } { 011D }
- { FD } { 016D }
- { FE } { 015D }
- { FF } { 02D9 }
- }
- {
- { A5 }
- { AE }
- { BE }
- { C3 }
- { D0 }
- { E3 }
- { F0 }
- }
-%</iso88593>
-% \end{macrocode}
-%
-% \begin{macrocode}
-%<*iso88594>
-\@@_declare_eight_bit_encoding:nnn { iso88594 }
- {
- { A1 } { 0104 }
- { A2 } { 0138 }
- { A3 } { 0156 }
- { A5 } { 0128 }
- { A6 } { 013B }
- { A9 } { 0160 }
- { AA } { 0112 }
- { AB } { 0122 }
- { AC } { 0166 }
- { AE } { 017D }
- { B1 } { 0105 }
- { B2 } { 02DB }
- { B3 } { 0157 }
- { B5 } { 0129 }
- { B6 } { 013C }
- { B7 } { 02C7 }
- { B9 } { 0161 }
- { BA } { 0113 }
- { BB } { 0123 }
- { BC } { 0167 }
- { BD } { 014A }
- { BE } { 017E }
- { BF } { 014B }
- { C0 } { 0100 }
- { C7 } { 012E }
- { C8 } { 010C }
- { CA } { 0118 }
- { CC } { 0116 }
- { CF } { 012A }
- { D0 } { 0110 }
- { D1 } { 0145 }
- { D2 } { 014C }
- { D3 } { 0136 }
- { D9 } { 0172 }
- { DD } { 0168 }
- { DE } { 016A }
- { E0 } { 0101 }
- { E7 } { 012F }
- { E8 } { 010D }
- { EA } { 0119 }
- { EC } { 0117 }
- { EF } { 012B }
- { F0 } { 0111 }
- { F1 } { 0146 }
- { F2 } { 014D }
- { F3 } { 0137 }
- { F9 } { 0173 }
- { FD } { 0169 }
- { FE } { 016B }
- { FF } { 02D9 }
- }
- {
- }
-%</iso88594>
-% \end{macrocode}
-%
-% \begin{macrocode}
-%<*iso88595>
-\@@_declare_eight_bit_encoding:nnn { iso88595 }
- {
- { A1 } { 0401 }
- { A2 } { 0402 }
- { A3 } { 0403 }
- { A4 } { 0404 }
- { A5 } { 0405 }
- { A6 } { 0406 }
- { A7 } { 0407 }
- { A8 } { 0408 }
- { A9 } { 0409 }
- { AA } { 040A }
- { AB } { 040B }
- { AC } { 040C }
- { AE } { 040E }
- { AF } { 040F }
- { B0 } { 0410 }
- { B1 } { 0411 }
- { B2 } { 0412 }
- { B3 } { 0413 }
- { B4 } { 0414 }
- { B5 } { 0415 }
- { B6 } { 0416 }
- { B7 } { 0417 }
- { B8 } { 0418 }
- { B9 } { 0419 }
- { BA } { 041A }
- { BB } { 041B }
- { BC } { 041C }
- { BD } { 041D }
- { BE } { 041E }
- { BF } { 041F }
- { C0 } { 0420 }
- { C1 } { 0421 }
- { C2 } { 0422 }
- { C3 } { 0423 }
- { C4 } { 0424 }
- { C5 } { 0425 }
- { C6 } { 0426 }
- { C7 } { 0427 }
- { C8 } { 0428 }
- { C9 } { 0429 }
- { CA } { 042A }
- { CB } { 042B }
- { CC } { 042C }
- { CD } { 042D }
- { CE } { 042E }
- { CF } { 042F }
- { D0 } { 0430 }
- { D1 } { 0431 }
- { D2 } { 0432 }
- { D3 } { 0433 }
- { D4 } { 0434 }
- { D5 } { 0435 }
- { D6 } { 0436 }
- { D7 } { 0437 }
- { D8 } { 0438 }
- { D9 } { 0439 }
- { DA } { 043A }
- { DB } { 043B }
- { DC } { 043C }
- { DD } { 043D }
- { DE } { 043E }
- { DF } { 043F }
- { E0 } { 0440 }
- { E1 } { 0441 }
- { E2 } { 0442 }
- { E3 } { 0443 }
- { E4 } { 0444 }
- { E5 } { 0445 }
- { E6 } { 0446 }
- { E7 } { 0447 }
- { E8 } { 0448 }
- { E9 } { 0449 }
- { EA } { 044A }
- { EB } { 044B }
- { EC } { 044C }
- { ED } { 044D }
- { EE } { 044E }
- { EF } { 044F }
- { F0 } { 2116 }
- { F1 } { 0451 }
- { F2 } { 0452 }
- { F3 } { 0453 }
- { F4 } { 0454 }
- { F5 } { 0455 }
- { F6 } { 0456 }
- { F7 } { 0457 }
- { F8 } { 0458 }
- { F9 } { 0459 }
- { FA } { 045A }
- { FB } { 045B }
- { FC } { 045C }
- { FD } { 00A7 }
- { FE } { 045E }
- { FF } { 045F }
- }
- {
- }
-%</iso88595>
-% \end{macrocode}
-%
-% \begin{macrocode}
-%<*iso88596>
-\@@_declare_eight_bit_encoding:nnn { iso88596 }
- {
- { AC } { 060C }
- { BB } { 061B }
- { BF } { 061F }
- { C1 } { 0621 }
- { C2 } { 0622 }
- { C3 } { 0623 }
- { C4 } { 0624 }
- { C5 } { 0625 }
- { C6 } { 0626 }
- { C7 } { 0627 }
- { C8 } { 0628 }
- { C9 } { 0629 }
- { CA } { 062A }
- { CB } { 062B }
- { CC } { 062C }
- { CD } { 062D }
- { CE } { 062E }
- { CF } { 062F }
- { D0 } { 0630 }
- { D1 } { 0631 }
- { D2 } { 0632 }
- { D3 } { 0633 }
- { D4 } { 0634 }
- { D5 } { 0635 }
- { D6 } { 0636 }
- { D7 } { 0637 }
- { D8 } { 0638 }
- { D9 } { 0639 }
- { DA } { 063A }
- { E0 } { 0640 }
- { E1 } { 0641 }
- { E2 } { 0642 }
- { E3 } { 0643 }
- { E4 } { 0644 }
- { E5 } { 0645 }
- { E6 } { 0646 }
- { E7 } { 0647 }
- { E8 } { 0648 }
- { E9 } { 0649 }
- { EA } { 064A }
- { EB } { 064B }
- { EC } { 064C }
- { ED } { 064D }
- { EE } { 064E }
- { EF } { 064F }
- { F0 } { 0650 }
- { F1 } { 0651 }
- { F2 } { 0652 }
- }
- {
- { A1 }
- { A2 }
- { A3 }
- { A5 }
- { A6 }
- { A7 }
- { A8 }
- { A9 }
- { AA }
- { AB }
- { AE }
- { AF }
- { B0 }
- { B1 }
- { B2 }
- { B3 }
- { B4 }
- { B5 }
- { B6 }
- { B7 }
- { B8 }
- { B9 }
- { BA }
- { BC }
- { BD }
- { BE }
- { C0 }
- { DB }
- { DC }
- { DD }
- { DE }
- { DF }
- }
-%</iso88596>
-% \end{macrocode}
-%
-% \begin{macrocode}
-%<*iso88597>
-\@@_declare_eight_bit_encoding:nnn { iso88597 }
- {
- { A1 } { 2018 }
- { A2 } { 2019 }
- { A4 } { 20AC }
- { A5 } { 20AF }
- { AA } { 037A }
- { AF } { 2015 }
- { B4 } { 0384 }
- { B5 } { 0385 }
- { B6 } { 0386 }
- { B8 } { 0388 }
- { B9 } { 0389 }
- { BA } { 038A }
- { BC } { 038C }
- { BE } { 038E }
- { BF } { 038F }
- { C0 } { 0390 }
- { C1 } { 0391 }
- { C2 } { 0392 }
- { C3 } { 0393 }
- { C4 } { 0394 }
- { C5 } { 0395 }
- { C6 } { 0396 }
- { C7 } { 0397 }
- { C8 } { 0398 }
- { C9 } { 0399 }
- { CA } { 039A }
- { CB } { 039B }
- { CC } { 039C }
- { CD } { 039D }
- { CE } { 039E }
- { CF } { 039F }
- { D0 } { 03A0 }
- { D1 } { 03A1 }
- { D3 } { 03A3 }
- { D4 } { 03A4 }
- { D5 } { 03A5 }
- { D6 } { 03A6 }
- { D7 } { 03A7 }
- { D8 } { 03A8 }
- { D9 } { 03A9 }
- { DA } { 03AA }
- { DB } { 03AB }
- { DC } { 03AC }
- { DD } { 03AD }
- { DE } { 03AE }
- { DF } { 03AF }
- { E0 } { 03B0 }
- { E1 } { 03B1 }
- { E2 } { 03B2 }
- { E3 } { 03B3 }
- { E4 } { 03B4 }
- { E5 } { 03B5 }
- { E6 } { 03B6 }
- { E7 } { 03B7 }
- { E8 } { 03B8 }
- { E9 } { 03B9 }
- { EA } { 03BA }
- { EB } { 03BB }
- { EC } { 03BC }
- { ED } { 03BD }
- { EE } { 03BE }
- { EF } { 03BF }
- { F0 } { 03C0 }
- { F1 } { 03C1 }
- { F2 } { 03C2 }
- { F3 } { 03C3 }
- { F4 } { 03C4 }
- { F5 } { 03C5 }
- { F6 } { 03C6 }
- { F7 } { 03C7 }
- { F8 } { 03C8 }
- { F9 } { 03C9 }
- { FA } { 03CA }
- { FB } { 03CB }
- { FC } { 03CC }
- { FD } { 03CD }
- { FE } { 03CE }
- }
- {
- { AE }
- { D2 }
- }
-%</iso88597>
-% \end{macrocode}
-%
% \begin{macrocode}
-%<*iso88598>
-\@@_declare_eight_bit_encoding:nnn { iso88598 }
- {
- { AA } { 00D7 }
- { BA } { 00F7 }
- { DF } { 2017 }
- { E0 } { 05D0 }
- { E1 } { 05D1 }
- { E2 } { 05D2 }
- { E3 } { 05D3 }
- { E4 } { 05D4 }
- { E5 } { 05D5 }
- { E6 } { 05D6 }
- { E7 } { 05D7 }
- { E8 } { 05D8 }
- { E9 } { 05D9 }
- { EA } { 05DA }
- { EB } { 05DB }
- { EC } { 05DC }
- { ED } { 05DD }
- { EE } { 05DE }
- { EF } { 05DF }
- { F0 } { 05E0 }
- { F1 } { 05E1 }
- { F2 } { 05E2 }
- { F3 } { 05E3 }
- { F4 } { 05E4 }
- { F5 } { 05E5 }
- { F6 } { 05E6 }
- { F7 } { 05E7 }
- { F8 } { 05E8 }
- { F9 } { 05E9 }
- { FA } { 05EA }
- { FD } { 200E }
- { FE } { 200F }
- }
- {
- { A1 }
- { BF }
- { C0 }
- { C1 }
- { C2 }
- { C3 }
- { C4 }
- { C5 }
- { C6 }
- { C7 }
- { C8 }
- { C9 }
- { CA }
- { CB }
- { CC }
- { CD }
- { CE }
- { CF }
- { D0 }
- { D1 }
- { D2 }
- { D3 }
- { D4 }
- { D5 }
- { D6 }
- { D7 }
- { D8 }
- { D9 }
- { DA }
- { DB }
- { DC }
- { DD }
- { DE }
- { FB }
- { FC }
- }
-%</iso88598>
-% \end{macrocode}
-%
-% \begin{macrocode}
-%<*iso88599>
-\@@_declare_eight_bit_encoding:nnn { iso88599 }
- {
- { D0 } { 011E }
- { DD } { 0130 }
- { DE } { 015E }
- { F0 } { 011F }
- { FD } { 0131 }
- { FE } { 015F }
- }
- {
- }
-%</iso88599>
-% \end{macrocode}
-%
-% \begin{macrocode}
-%<*iso885910>
-\@@_declare_eight_bit_encoding:nnn { iso885910 }
- {
- { A1 } { 0104 }
- { A2 } { 0112 }
- { A3 } { 0122 }
- { A4 } { 012A }
- { A5 } { 0128 }
- { A6 } { 0136 }
- { A8 } { 013B }
- { A9 } { 0110 }
- { AA } { 0160 }
- { AB } { 0166 }
- { AC } { 017D }
- { AE } { 016A }
- { AF } { 014A }
- { B1 } { 0105 }
- { B2 } { 0113 }
- { B3 } { 0123 }
- { B4 } { 012B }
- { B5 } { 0129 }
- { B6 } { 0137 }
- { B8 } { 013C }
- { B9 } { 0111 }
- { BA } { 0161 }
- { BB } { 0167 }
- { BC } { 017E }
- { BD } { 2015 }
- { BE } { 016B }
- { BF } { 014B }
- { C0 } { 0100 }
- { C7 } { 012E }
- { C8 } { 010C }
- { CA } { 0118 }
- { CC } { 0116 }
- { D1 } { 0145 }
- { D2 } { 014C }
- { D7 } { 0168 }
- { D9 } { 0172 }
- { E0 } { 0101 }
- { E7 } { 012F }
- { E8 } { 010D }
- { EA } { 0119 }
- { EC } { 0117 }
- { F1 } { 0146 }
- { F2 } { 014D }
- { F7 } { 0169 }
- { F9 } { 0173 }
- { FF } { 0138 }
- }
- {
- }
-%</iso885910>
-% \end{macrocode}
-%
-% \begin{macrocode}
-%<*iso885911>
-\@@_declare_eight_bit_encoding:nnn { iso885911 }
- {
- { A1 } { 0E01 }
- { A2 } { 0E02 }
- { A3 } { 0E03 }
- { A4 } { 0E04 }
- { A5 } { 0E05 }
- { A6 } { 0E06 }
- { A7 } { 0E07 }
- { A8 } { 0E08 }
- { A9 } { 0E09 }
- { AA } { 0E0A }
- { AB } { 0E0B }
- { AC } { 0E0C }
- { AD } { 0E0D }
- { AE } { 0E0E }
- { AF } { 0E0F }
- { B0 } { 0E10 }
- { B1 } { 0E11 }
- { B2 } { 0E12 }
- { B3 } { 0E13 }
- { B4 } { 0E14 }
- { B5 } { 0E15 }
- { B6 } { 0E16 }
- { B7 } { 0E17 }
- { B8 } { 0E18 }
- { B9 } { 0E19 }
- { BA } { 0E1A }
- { BB } { 0E1B }
- { BC } { 0E1C }
- { BD } { 0E1D }
- { BE } { 0E1E }
- { BF } { 0E1F }
- { C0 } { 0E20 }
- { C1 } { 0E21 }
- { C2 } { 0E22 }
- { C3 } { 0E23 }
- { C4 } { 0E24 }
- { C5 } { 0E25 }
- { C6 } { 0E26 }
- { C7 } { 0E27 }
- { C8 } { 0E28 }
- { C9 } { 0E29 }
- { CA } { 0E2A }
- { CB } { 0E2B }
- { CC } { 0E2C }
- { CD } { 0E2D }
- { CE } { 0E2E }
- { CF } { 0E2F }
- { D0 } { 0E30 }
- { D1 } { 0E31 }
- { D2 } { 0E32 }
- { D3 } { 0E33 }
- { D4 } { 0E34 }
- { D5 } { 0E35 }
- { D6 } { 0E36 }
- { D7 } { 0E37 }
- { D8 } { 0E38 }
- { D9 } { 0E39 }
- { DA } { 0E3A }
- { DF } { 0E3F }
- { E0 } { 0E40 }
- { E1 } { 0E41 }
- { E2 } { 0E42 }
- { E3 } { 0E43 }
- { E4 } { 0E44 }
- { E5 } { 0E45 }
- { E6 } { 0E46 }
- { E7 } { 0E47 }
- { E8 } { 0E48 }
- { E9 } { 0E49 }
- { EA } { 0E4A }
- { EB } { 0E4B }
- { EC } { 0E4C }
- { ED } { 0E4D }
- { EE } { 0E4E }
- { EF } { 0E4F }
- { F0 } { 0E50 }
- { F1 } { 0E51 }
- { F2 } { 0E52 }
- { F3 } { 0E53 }
- { F4 } { 0E54 }
- { F5 } { 0E55 }
- { F6 } { 0E56 }
- { F7 } { 0E57 }
- { F8 } { 0E58 }
- { F9 } { 0E59 }
- { FA } { 0E5A }
- { FB } { 0E5B }
- }
- {
- { DB }
- { DC }
- { DD }
- { DE }
- }
-%</iso885911>
-% \end{macrocode}
-%
-% \begin{macrocode}
-%<*iso885913>
-\@@_declare_eight_bit_encoding:nnn { iso885913 }
- {
- { A1 } { 201D }
- { A5 } { 201E }
- { A8 } { 00D8 }
- { AA } { 0156 }
- { AF } { 00C6 }
- { B4 } { 201C }
- { B8 } { 00F8 }
- { BA } { 0157 }
- { BF } { 00E6 }
- { C0 } { 0104 }
- { C1 } { 012E }
- { C2 } { 0100 }
- { C3 } { 0106 }
- { C6 } { 0118 }
- { C7 } { 0112 }
- { C8 } { 010C }
- { CA } { 0179 }
- { CB } { 0116 }
- { CC } { 0122 }
- { CD } { 0136 }
- { CE } { 012A }
- { CF } { 013B }
- { D0 } { 0160 }
- { D1 } { 0143 }
- { D2 } { 0145 }
- { D4 } { 014C }
- { D8 } { 0172 }
- { D9 } { 0141 }
- { DA } { 015A }
- { DB } { 016A }
- { DD } { 017B }
- { DE } { 017D }
- { E0 } { 0105 }
- { E1 } { 012F }
- { E2 } { 0101 }
- { E3 } { 0107 }
- { E6 } { 0119 }
- { E7 } { 0113 }
- { E8 } { 010D }
- { EA } { 017A }
- { EB } { 0117 }
- { EC } { 0123 }
- { ED } { 0137 }
- { EE } { 012B }
- { EF } { 013C }
- { F0 } { 0161 }
- { F1 } { 0144 }
- { F2 } { 0146 }
- { F4 } { 014D }
- { F8 } { 0173 }
- { F9 } { 0142 }
- { FA } { 015B }
- { FB } { 016B }
- { FD } { 017C }
- { FE } { 017E }
- { FF } { 2019 }
- }
- {
- }
-%</iso885913>
-% \end{macrocode}
-%
-% \begin{macrocode}
-%<*iso885914>
-\@@_declare_eight_bit_encoding:nnn { iso885914 }
- {
- { A1 } { 1E02 }
- { A2 } { 1E03 }
- { A4 } { 010A }
- { A5 } { 010B }
- { A6 } { 1E0A }
- { A8 } { 1E80 }
- { AA } { 1E82 }
- { AB } { 1E0B }
- { AC } { 1EF2 }
- { AF } { 0178 }
- { B0 } { 1E1E }
- { B1 } { 1E1F }
- { B2 } { 0120 }
- { B3 } { 0121 }
- { B4 } { 1E40 }
- { B5 } { 1E41 }
- { B7 } { 1E56 }
- { B8 } { 1E81 }
- { B9 } { 1E57 }
- { BA } { 1E83 }
- { BB } { 1E60 }
- { BC } { 1EF3 }
- { BD } { 1E84 }
- { BE } { 1E85 }
- { BF } { 1E61 }
- { D0 } { 0174 }
- { D7 } { 1E6A }
- { DE } { 0176 }
- { F0 } { 0175 }
- { F7 } { 1E6B }
- { FE } { 0177 }
- }
- {
- }
-%</iso885914>
-% \end{macrocode}
-%
-% \begin{macrocode}
-%<*iso885915>
-\@@_declare_eight_bit_encoding:nnn { iso885915 }
- {
- { A4 } { 20AC }
- { A6 } { 0160 }
- { A8 } { 0161 }
- { B4 } { 017D }
- { B8 } { 017E }
- { BC } { 0152 }
- { BD } { 0153 }
- { BE } { 0178 }
- }
- {
- }
-%</iso885915>
-% \end{macrocode}
-%
-% \begin{macrocode}
-%<*iso885916>
-\@@_declare_eight_bit_encoding:nnn { iso885916 }
- {
- { A1 } { 0104 }
- { A2 } { 0105 }
- { A3 } { 0141 }
- { A4 } { 20AC }
- { A5 } { 201E }
- { A6 } { 0160 }
- { A8 } { 0161 }
- { AA } { 0218 }
- { AC } { 0179 }
- { AE } { 017A }
- { AF } { 017B }
- { B2 } { 010C }
- { B3 } { 0142 }
- { B4 } { 017D }
- { B5 } { 201D }
- { B8 } { 017E }
- { B9 } { 010D }
- { BA } { 0219 }
- { BC } { 0152 }
- { BD } { 0153 }
- { BE } { 0178 }
- { BF } { 017C }
- { C3 } { 0102 }
- { C5 } { 0106 }
- { D0 } { 0110 }
- { D1 } { 0143 }
- { D5 } { 0150 }
- { D7 } { 015A }
- { D8 } { 0170 }
- { DD } { 0118 }
- { DE } { 021A }
- { E3 } { 0103 }
- { E5 } { 0107 }
- { F0 } { 0111 }
- { F1 } { 0144 }
- { F5 } { 0151 }
- { F7 } { 015B }
- { F8 } { 0171 }
- { FD } { 0119 }
- { FE } { 021B }
- }
- {
- }
-%</iso885916>
+%</initex|package>
% \end{macrocode}
%
% \end{implementation}
diff --git a/Master/texmf-dist/source/latex/l3experimental/l3str/l3str.ins b/Master/texmf-dist/source/latex/l3experimental/l3str/l3str.ins
index 4605b50c851..8f6983abde3 100644
--- a/Master/texmf-dist/source/latex/l3experimental/l3str/l3str.ins
+++ b/Master/texmf-dist/source/latex/l3experimental/l3str/l3str.ins
@@ -1,6 +1,6 @@
\iffalse meta-comment
-File l3str.ins Copyright (C) 2011 The LaTeX3 Project
+File l3str.ins Copyright (C) 2011, 2013 The LaTeX3 Project
It may be distributed and/or modified under the conditions of the
LaTeX Project Public License (LPPL), either version 1.3c of this
@@ -38,6 +38,7 @@ Do not distribute a modified version of this file.
\generate{\file{l3flag.sty} {\from{l3flag.dtx} {package}}}
\generate{\file{l3str.sty} {\from{l3str.dtx} {package}}}
\generate{\file{l3regex.sty} {\from{l3regex.dtx} {package}}}
+\generate{\file{l3str-convert.sty} {\from{l3str-convert.dtx} {package}}}
\generate{\file{l3str-format.sty} {\from{l3str-format.dtx} {package}}}
\generate{\file{l3tl-analysis.sty} {\from{l3tl-analysis.dtx} {package}}}
\generate{\file{l3tl-build.sty} {\from{l3tl-build.dtx} {package}}}
@@ -45,36 +46,36 @@ Do not distribute a modified version of this file.
% Escapings.
\generate{%
- \file{l3str-esc-hex.def} {\from{l3str.dtx}{hex}}%
- \file{l3str-esc-name.def} {\from{l3str.dtx}{name}}%
- \file{l3str-esc-string.def} {\from{l3str.dtx}{string}}%
- \file{l3str-esc-url.def} {\from{l3str.dtx}{url}}%
+ \file{l3str-esc-hex.def} {\from{l3str-convert.dtx}{hex}}%
+ \file{l3str-esc-name.def} {\from{l3str-convert.dtx}{name}}%
+ \file{l3str-esc-string.def} {\from{l3str-convert.dtx}{string}}%
+ \file{l3str-esc-url.def} {\from{l3str-convert.dtx}{url}}%
}
% UTF encodings.
\generate{%
- \file{l3str-enc-utf8.def} {\from{l3str.dtx}{utf8}}%
- \file{l3str-enc-utf16.def} {\from{l3str.dtx}{utf16}}%
- \file{l3str-enc-utf32.def} {\from{l3str.dtx}{utf32}}%
+ \file{l3str-enc-utf8.def} {\from{l3str-convert.dtx}{utf8}}%
+ \file{l3str-enc-utf16.def} {\from{l3str-convert.dtx}{utf16}}%
+ \file{l3str-enc-utf32.def} {\from{l3str-convert.dtx}{utf32}}%
}
% ISO-8859 encodings.
\generate{%
- \file{l3str-enc-iso88591.def} {\from{l3str.dtx}{iso88591}}%
- \file{l3str-enc-iso88592.def} {\from{l3str.dtx}{iso88592}}%
- \file{l3str-enc-iso88593.def} {\from{l3str.dtx}{iso88593}}%
- \file{l3str-enc-iso88594.def} {\from{l3str.dtx}{iso88594}}%
- \file{l3str-enc-iso88595.def} {\from{l3str.dtx}{iso88595}}%
- \file{l3str-enc-iso88596.def} {\from{l3str.dtx}{iso88596}}%
- \file{l3str-enc-iso88597.def} {\from{l3str.dtx}{iso88597}}%
- \file{l3str-enc-iso88598.def} {\from{l3str.dtx}{iso88598}}%
- \file{l3str-enc-iso88599.def} {\from{l3str.dtx}{iso88599}}%
- \file{l3str-enc-iso885910.def} {\from{l3str.dtx}{iso885910}}%
- \file{l3str-enc-iso885911.def} {\from{l3str.dtx}{iso885911}}%
- \file{l3str-enc-iso885913.def} {\from{l3str.dtx}{iso885913}}%
- \file{l3str-enc-iso885914.def} {\from{l3str.dtx}{iso885914}}%
- \file{l3str-enc-iso885915.def} {\from{l3str.dtx}{iso885915}}%
- \file{l3str-enc-iso885916.def} {\from{l3str.dtx}{iso885916}}%
+ \file{l3str-enc-iso88591.def} {\from{l3str-convert.dtx}{iso88591}}%
+ \file{l3str-enc-iso88592.def} {\from{l3str-convert.dtx}{iso88592}}%
+ \file{l3str-enc-iso88593.def} {\from{l3str-convert.dtx}{iso88593}}%
+ \file{l3str-enc-iso88594.def} {\from{l3str-convert.dtx}{iso88594}}%
+ \file{l3str-enc-iso88595.def} {\from{l3str-convert.dtx}{iso88595}}%
+ \file{l3str-enc-iso88596.def} {\from{l3str-convert.dtx}{iso88596}}%
+ \file{l3str-enc-iso88597.def} {\from{l3str-convert.dtx}{iso88597}}%
+ \file{l3str-enc-iso88598.def} {\from{l3str-convert.dtx}{iso88598}}%
+ \file{l3str-enc-iso88599.def} {\from{l3str-convert.dtx}{iso88599}}%
+ \file{l3str-enc-iso885910.def} {\from{l3str-convert.dtx}{iso885910}}%
+ \file{l3str-enc-iso885911.def} {\from{l3str-convert.dtx}{iso885911}}%
+ \file{l3str-enc-iso885913.def} {\from{l3str-convert.dtx}{iso885913}}%
+ \file{l3str-enc-iso885914.def} {\from{l3str-convert.dtx}{iso885914}}%
+ \file{l3str-enc-iso885915.def} {\from{l3str-convert.dtx}{iso885915}}%
+ \file{l3str-enc-iso885916.def} {\from{l3str-convert.dtx}{iso885916}}%
}
\endbatchfile
diff --git a/Master/texmf-dist/source/latex/l3experimental/l3str/l3tl-analysis.dtx b/Master/texmf-dist/source/latex/l3experimental/l3str/l3tl-analysis.dtx
index 4ceef260a99..b9e5bbd087e 100644
--- a/Master/texmf-dist/source/latex/l3experimental/l3str/l3tl-analysis.dtx
+++ b/Master/texmf-dist/source/latex/l3experimental/l3str/l3tl-analysis.dtx
@@ -126,7 +126,7 @@
% information about individual tokens (category code, character code),
% as well as reconstruct the token list quickly. This internal format is
% used in \pkg{l3regex} where we need to support arbitrary tokens, and
-% it is used in conversion functions in \pkg{l3str}, where we wish to
+% it is used in conversion functions in \pkg{l3str-convert}, where we wish to
% support clusters of characters instead of single tokens.
%
% We thus need a way to encode any \meta{token} (even begin-group and
@@ -143,12 +143,12 @@
% The \meta{tokens} \texttt{o}- \emph{and} \texttt{x}-expand to the
% original token in the token list or to the cluster of tokens
% corresponding to one Unicode character in the given encoding (for
-% \pkg{l3str}). The \meta{catcode} is given as a single hexadecimal
+% \pkg{l3str-convert}). The \meta{catcode} is given as a single hexadecimal
% digit, $0$ for control sequences. The \meta{char code} is given as a
% decimal number, $-1$ for control sequences.
%
% Using delimited arguments lets us build the \meta{tokens}
-% progressively when doing an encoding conversion in \pkg{l3str}. On the
+% progressively when doing an encoding conversion in \pkg{l3str-convert}. On the
% other hand, the delimiter \cs{s__tl} may not appear unbraced in
% \meta{tokens}. This is not a problem because we are careful to wrap
% control sequences in braces (as an argument to \cs{exp_not:n}) when
@@ -423,7 +423,7 @@
\cs_new_protected:Npn \@@_disable_loop:N #1
{
\tex_lccode:D \c_zero `#1 ~
- \tl_to_lowercase:n { \tex_let:D ^^@ } \c_undefined:D
+ \tl_to_lowercase:n { \tex_let:D ^^@ } \tex_undefined:D
\@@_disable_loop:N
}
\group_end:
@@ -807,7 +807,7 @@
{
\cs_new:Npn \@@_b_char:Nww #1
{
- \if_meaning:w #1 \c_undefined:D ? \else:
+ \if_meaning:w #1 \tex_undefined:D ? \else:
\if_catcode:w #1 \c_catcode_other_token C \else:
\if_catcode:w #1 \c_catcode_letter_token B \else:
\if_catcode:w #1 \c_math_toggle_token 3 \else:
@@ -1073,7 +1073,7 @@
{ \str_count:n { #1 ~ ( #4 #2 #3 ) } }
> { \l_iow_line_count_int - \c_three }
{
- \str_substr:nnn { #1 ~ ( #4 #2 #3 ) } \c_one
+ \str_range:nnn { #1 ~ ( #4 #2 #3 ) } \c_one
{
\l_iow_line_count_int - \c_three
- \str_count:N \c_@@_show_etc_str
diff --git a/Master/texmf-dist/source/latex/l3experimental/xgalley/l3galley.dtx b/Master/texmf-dist/source/latex/l3experimental/xgalley/l3galley.dtx
index e2407f043a7..60ebd14993f 100644
--- a/Master/texmf-dist/source/latex/l3experimental/xgalley/l3galley.dtx
+++ b/Master/texmf-dist/source/latex/l3experimental/xgalley/l3galley.dtx
@@ -37,7 +37,7 @@
%
%<*driver|package>
\RequirePackage{xparse}
-\GetIdInfo$Id: l3galley.dtx 4388 2012-12-20 22:15:35Z joseph $
+\GetIdInfo$Id: l3galley.dtx 4402 2013-01-08 08:59:44Z bruno $
{L3 Experimental galley code}
%</driver|package>
%<*driver>
@@ -279,7 +279,7 @@
% \end{syntax}
% Adds a cutout section to the active paragraph shape, leaving
% \meta{unaltered lines} unchanged and then applying the \meta{indents}
-% (a comma list). The cutout will be places on the left or right as indicated
+% (a comma list). The cutout will be placed on the left or right as indicated
% by the function name, and will apply to exactly the number of lines
% specified (the total of the \meta{unaltered lines} and the number of
% entries in the \meta{indents} list). Several cutouts may be applied