summaryrefslogtreecommitdiff
path: root/Master/texmf-dist/source
diff options
context:
space:
mode:
authorKarl Berry <karl@freefriends.org>2014-07-20 22:14:30 +0000
committerKarl Berry <karl@freefriends.org>2014-07-20 22:14:30 +0000
commit6c2678db440b5260ec13602bde6efebdd00f8549 (patch)
treeed93fc7244f4e5ef1baf86ccb157597dabf6ac1e /Master/texmf-dist/source
parent9790eead4cceaffc951c93887d06de70c02e196a (diff)
l3
git-svn-id: svn://tug.org/texlive/trunk@34671 c570f23f-e606-0410-a88d-b1316a301751
Diffstat (limited to 'Master/texmf-dist/source')
-rw-r--r--Master/texmf-dist/source/latex/l3experimental/l3str/l3regex.dtx75
-rw-r--r--Master/texmf-dist/source/latex/l3experimental/l3str/l3str-expl.dtx (renamed from Master/texmf-dist/source/latex/l3experimental/l3str/l3str.dtx)3
-rw-r--r--Master/texmf-dist/source/latex/l3experimental/l3str/l3str.ins6
-rw-r--r--Master/texmf-dist/source/latex/l3kernel/expl3.dtx54
-rw-r--r--Master/texmf-dist/source/latex/l3kernel/l3.ins4
-rw-r--r--Master/texmf-dist/source/latex/l3kernel/l3basics.dtx291
-rw-r--r--Master/texmf-dist/source/latex/l3kernel/l3candidates.dtx1529
-rw-r--r--Master/texmf-dist/source/latex/l3kernel/l3clist.dtx342
-rw-r--r--Master/texmf-dist/source/latex/l3kernel/l3doc.dtx11
-rw-r--r--Master/texmf-dist/source/latex/l3kernel/l3drivers.dtx14
-rw-r--r--Master/texmf-dist/source/latex/l3kernel/l3expan.dtx34
-rw-r--r--Master/texmf-dist/source/latex/l3kernel/l3file.dtx34
-rw-r--r--Master/texmf-dist/source/latex/l3kernel/l3format.ins1
-rw-r--r--Master/texmf-dist/source/latex/l3kernel/l3fp-expo.dtx16
-rw-r--r--Master/texmf-dist/source/latex/l3kernel/l3fp-parse.dtx15
-rw-r--r--Master/texmf-dist/source/latex/l3kernel/l3fp.dtx19
-rw-r--r--Master/texmf-dist/source/latex/l3kernel/l3int.dtx4
-rw-r--r--Master/texmf-dist/source/latex/l3kernel/l3keys.dtx3
-rw-r--r--Master/texmf-dist/source/latex/l3kernel/l3prop.dtx59
-rw-r--r--Master/texmf-dist/source/latex/l3kernel/l3seq.dtx190
-rw-r--r--Master/texmf-dist/source/latex/l3kernel/l3skip.dtx147
-rw-r--r--Master/texmf-dist/source/latex/l3kernel/l3str.dtx573
-rw-r--r--Master/texmf-dist/source/latex/l3kernel/l3tl.dtx151
-rw-r--r--Master/texmf-dist/source/latex/l3kernel/l3unicode-data.def378
-rw-r--r--Master/texmf-dist/source/latex/l3kernel/l3unicode-data.dtx768
-rw-r--r--Master/texmf-dist/source/latex/l3packages/l3keys2e/l3keys2e.dtx6
-rw-r--r--Master/texmf-dist/source/latex/l3packages/xparse/xparse.dtx6
-rw-r--r--Master/texmf-dist/source/latex/l3packages/xtemplate/xtemplate.dtx6
28 files changed, 3522 insertions, 1217 deletions
diff --git a/Master/texmf-dist/source/latex/l3experimental/l3str/l3regex.dtx b/Master/texmf-dist/source/latex/l3experimental/l3str/l3regex.dtx
index 00eeafe26e1..182e09b4e51 100644
--- a/Master/texmf-dist/source/latex/l3experimental/l3str/l3regex.dtx
+++ b/Master/texmf-dist/source/latex/l3experimental/l3str/l3regex.dtx
@@ -1,6 +1,6 @@
% \iffalse meta-comment
%
-%% File: l3regex.dtx Copyright (C) 2011-2013 The LaTeX3 Project
+%% File: l3regex.dtx Copyright (C) 2011-2014 The LaTeX3 Project
%%
%% It may be distributed and/or modified under the conditions of the
%% LaTeX Project Public License (LPPL), either version 1.3c of this
@@ -35,7 +35,7 @@
%
%<*driver|package>
\RequirePackage{expl3}
-\GetIdInfo$Id: l3regex.dtx 5067 2014-06-06 16:51:35Z bruno $
+\GetIdInfo$Id: l3regex.dtx 5218 2014-07-17 14:23:10Z bruno $
{L3 Experimental regular expressions}
%</driver|package>
%<*driver>
@@ -105,7 +105,7 @@
% it can be compiled once, and stored in a regex
% variable using \cs{regex_const:Nn}. For example,
% \begin{verbatim}
-% \regex_const:Nn \c_foo_regex { \c{begin} \cB. (\c[^BE].*) \cE. }
+% \regex_const:Nn \c_foo_regex { \c{begin} \cB. (\c[^BE].*) \cE. }
% \end{verbatim}
% stores in \cs{c_foo_regex} a regular expression which matches the
% starting marker for an environment: \cs{begin}, followed by a
@@ -314,6 +314,7 @@
% |(?i)[^aeiou]| matches any character which is not a vowel. Neither
% character properties, nor |\c{...}| nor |\u{...}| are affected by the
% |i| option.
+% ^^A \]
%
% In character classes, only |[|, |^|, |-|, |]|, |\| and spaces are
% special, and should be escaped. Other non-alphanumeric characters can
@@ -353,11 +354,13 @@
%
% \subsection{Syntax of the replacement text}
%
-% Most of the features described in regular expressions do not make sense
-% within the replacement text. Escaped characters are supported as inside
-% regular expressions. The whole match is accessed as |\0|, and the first
-% $9$ submatches are accessed as |\1|, \ldots{}, |\9|. Submatches with
-% numbers higher than $9$ are accessed as |\g{|\meta{number}|}| instead.
+% Most of the features described in regular expressions do not make
+% sense within the replacement text. Escaped characters are supported
+% as inside regular expressions. The whole match is accessed as~|\0|,
+% and the first~$9$ submatches are accessed as |\1|, \ldots{},~|\9|.
+% Further submatches are accessed through |\g{|\meta{number}|}| where
+% \meta{number} is any non-negative integer. If there are fewer than
+% \meta{number} capturing groups, the submatch is empty.
%
% For instance,
% \begin{verbatim}
@@ -366,18 +369,37 @@
% \end{verbatim}
% results in \cs{l_my_tl} holding |H(ell--el)(o,--o) w(or--o)(ld--l)!|
%
+% Submatches keep the same category codes as in the original token list.
% The characters inserted by the replacement have category code $12$
-% (other) by default. The escape sequence |\c| allows to insert characters
+% (other) by default, with the exception of space characters. Spaces
+% inserted through \verb*|\ | have category code $10$, while spaces
+% inserted through |\x20| or |\x{20}| have category code $12$.
+% The escape sequence |\c| allows to insert characters
% with arbitrary category codes, as well as control sequences.
% \begin{l3regex-syntax}
-% \item[\\cXY] Produces the character |Y| (which can be given as
-% an escape sequence such as |\t| for tab) with category code |X|,
-% which must be one of |CBEMTPUDSLOA|.
+% \item[\\cXY] Produces the character~|Y| (which can be given as an
+% escape sequence such as~|\t| for tab, or |\(| or~|\)| for a
+% parenthesis) with category code~|X|, which must be one of
+% |CBEMTPUDSLOA|.
% \item[\\c\Arg{text}] Produces the control sequence with csname
-% \meta{text}. The \meta{text} may contain references to the submatches
-% |\0|, |\1| \emph{etc.}
+% \meta{text}. The \meta{text} may contain references to the
+% submatches |\0|, |\1|, \emph{etc.}
% \end{l3regex-syntax}
%
+% The escape sequence |\u|\Arg{tl~var~name} allows to insert the
+% contents of the token list with name \meta{tl~var~name} directly into
+% the replacement, avoiding the need to escape special characters.
+% Within the construction |\c|\Arg{text}, the |\u|~escape sequence only
+% expands its argument once, in effect performing \cs{tl_to_str:v}.
+% Submatches can be used within the argument of |\u|. For instance,
+% \begin{verbatim}
+% \tl_set:Nn \l_my_one_tl { first }
+% \tl_set:Nn \l_my_two_tl { \emph{second} }
+% \tl_set:Nn \l_my_tl { one , two , one , one }
+% \regex_replace_all:nnN { [^,]+ } { \u{l_my_\0_tl} } \l_my_tl
+% \end{verbatim}
+% results in \cs{l_my_tl} holding |first,\emph{second},first,first|.
+%
% \subsection{Pre-compiling regular expressions}
%
% If a regular expression is to be used several times,
@@ -888,6 +910,15 @@
% \end{macrocode}
% \end{variable}
%
+% \begin{variable}{\l_@@_cs_name_tl}
+% This variable is used in \cs{@@_item_cs:n} to store the csname of
+% the currently-tested token when the regex contains a sub-regex for
+% testing csnames.
+% \begin{macrocode}
+\tl_new:N \l_@@_cs_name_tl
+% \end{macrocode}
+% \end{variable}
+%
% \subsubsection{Testing characters}
%
% \begin{macro}[int]{\@@_break_point:TF}
@@ -1092,22 +1123,25 @@
% indeed matches. The three \cs{exp_after:wN} expand the contents
% of the \tn{toks}\meta{current position} (of the form \cs{exp_not:n}
% \Arg{control sequence}) to \meta{control sequence}.
+% We store the cs name before building states for the cs, as those
+% states may overlap with toks registers storing the user's input.
% \begin{macrocode}
\cs_new_protected:Npn \@@_item_cs:n #1
{
\int_compare:nNnT \l_@@_current_catcode_int = \c_zero
{
\group_begin:
- \@@_single_match:
- \@@_disable_submatches:
- \@@_build_for_cs:n {#1}
- \bool_set_eq:NN \l_@@_saved_success_bool \g_@@_success_bool
- \exp_args:Nx \@@_match:n
+ \tl_set:Nx \l_@@_cs_name_tl
{
\exp_after:wN \exp_after:wN
\exp_after:wN \cs_to_str:N
\tex_the:D \tex_toks:D \l_@@_current_pos_int
}
+ \@@_single_match:
+ \@@_disable_submatches:
+ \@@_build_for_cs:n {#1}
+ \bool_set_eq:NN \l_@@_saved_success_bool \g_@@_success_bool
+ \exp_args:NV \@@_match:n \l_@@_cs_name_tl
\if_meaning:w \c_true_bool \g_@@_success_bool
\group_insert_after:N \@@_break_true:w
\fi:
@@ -4787,6 +4821,7 @@
% As in parsing a regular expression, we use an auxiliary built from
% |#1| if defined. Otherwise, check for escaped digits (standing from
% submatches from $0$ to $9$): anything else is a raw character.
+% We use \cs{token_to_str:N} to give spaces the right category code.
% \begin{macrocode}
\cs_new_protected:Npn \@@_replacement_escaped:N #1
{
@@ -4795,7 +4830,7 @@
\if_int_compare:w \c_one < 1#1 \exp_stop_f:
\@@_replacement_put_submatch:n {#1}
\else:
- \__tl_build_one:n #1
+ \__tl_build_one:o { \token_to_str:N #1 }
\fi:
}
}
diff --git a/Master/texmf-dist/source/latex/l3experimental/l3str/l3str.dtx b/Master/texmf-dist/source/latex/l3experimental/l3str/l3str-expl.dtx
index 09d44a5f28c..9305a8fe07c 100644
--- a/Master/texmf-dist/source/latex/l3experimental/l3str/l3str.dtx
+++ b/Master/texmf-dist/source/latex/l3experimental/l3str/l3str-expl.dtx
@@ -35,8 +35,9 @@
%
%<*driver|package>
\RequirePackage{expl3}
-\GetIdInfo$Id: l3str.dtx 4889 2014-05-26 19:59:25Z joseph $
+\GetIdInfo$Id: l3str-expl.dtx 5226 2014-07-18 18:08:01Z joseph $
{L3 Experimental strings}
+\def\ExplSyntaxName{l3str}
%</driver|package>
%<*driver>
\documentclass[full]{l3doc}
diff --git a/Master/texmf-dist/source/latex/l3experimental/l3str/l3str.ins b/Master/texmf-dist/source/latex/l3experimental/l3str/l3str.ins
index 8f6983abde3..0cb61fee4cc 100644
--- a/Master/texmf-dist/source/latex/l3experimental/l3str/l3str.ins
+++ b/Master/texmf-dist/source/latex/l3experimental/l3str/l3str.ins
@@ -35,9 +35,9 @@ Do not distribute a modified version of this file.
\keepsilent
-\generate{\file{l3flag.sty} {\from{l3flag.dtx} {package}}}
-\generate{\file{l3str.sty} {\from{l3str.dtx} {package}}}
-\generate{\file{l3regex.sty} {\from{l3regex.dtx} {package}}}
+\generate{\file{l3flag.sty} {\from{l3flag.dtx} {package}}}
+\generate{\file{l3str.sty} {\from{l3str-expl.dtx}{package}}}
+\generate{\file{l3regex.sty} {\from{l3regex.dtx} {package}}}
\generate{\file{l3str-convert.sty} {\from{l3str-convert.dtx} {package}}}
\generate{\file{l3str-format.sty} {\from{l3str-format.dtx} {package}}}
\generate{\file{l3tl-analysis.sty} {\from{l3tl-analysis.dtx} {package}}}
diff --git a/Master/texmf-dist/source/latex/l3kernel/expl3.dtx b/Master/texmf-dist/source/latex/l3kernel/expl3.dtx
index a36906f5137..254a560cea4 100644
--- a/Master/texmf-dist/source/latex/l3kernel/expl3.dtx
+++ b/Master/texmf-dist/source/latex/l3kernel/expl3.dtx
@@ -36,8 +36,8 @@
%<*driver|generic|package>
\def\ExplFileName{expl3}
\def\ExplFileDescription{L3 programming layer}
-\def\ExplFileDate{2014/06/10}
-\def\ExplFileVersion{5105}
+\def\ExplFileDate{2014/07/20}
+\def\ExplFileVersion{5241}
%</driver|generic|package>
%<*driver>
\documentclass[full]{l3doc}
@@ -1387,6 +1387,30 @@
% \end{variable}
% \end{macro}
%
+% \begin{variable}[int]{\c__expl_def_ext_tl}
+% Set up to load Unicode data for various string/text manipulations.
+% AS the full mechanisms are still be constructed, this requires some
+% fiddling about with the \cs{\ProvidesExplFile} command. A similar
+% requirement applies a little later to loading the native drivers, but
+% as that may not happen depending on options the same trick has to be
+% repeated!
+% \begin{macrocode}
+\tl_const:Nn \c__expl_def_ext_tl { def }
+\group_begin:
+\cs_set_protected:Npn \ProvidesExplFile
+ {
+ \char_set_catcode_space:n { `\ }
+ \ProvidesExplFileAux
+ }
+\cs_set_protected:Npn \ProvidesExplFileAux #1#2#3#4
+ {
+ \group_end:
+ \ProvidesFile {#1} [ #2~v#3~#4 ]
+ }
+\@onefilewithoptions { l3unicode-data } [ ] [ ] \c__expl_def_ext_tl
+% \end{macrocode}
+% \end{variable}
+%
% \begin{macro}
% {
% \box_rotate:Nn,
@@ -1455,34 +1479,25 @@
}
% \end{macrocode}
% \end{macro}
-% \begin{variable}[int]{\c__expl_def_ext_tl}
% For native drivers, just load the appropriate file. As \cs{expl3} syntax
% is already on and the full mechanism is only engaged at the end of the
% loader, \cs{ProvidesExplFile} is temporarily redefined here.
% \begin{macrocode}
{
- \cs_set_protected:Npn \ProvidesExplFile
+ \group_begin:
+ \cs_set_protected:Npn \ProvidesExplFile
{
- \group_begin:
- \char_set_catcode_space:n { `\ }
- \ProvidesExplFileAux
+ \char_set_catcode_space:n { `\ }
+ \ProvidesExplFileAux
}
\cs_set_protected:Npn \ProvidesExplFileAux #1#2#3#4
{
\group_end:
\ProvidesFile {#1} [ #2~v#3~#4 ]
}
- \tl_const:Nn \c__expl_def_ext_tl { def }
\@onefilewithoptions { l3 \l__expl_driver_tl } [ ] [ ] \c__expl_def_ext_tl
- \cs_set_protected:Npn \ProvidesExplFile #1#2#3#4
- {
- \ProvidesFile {#1} [ #2~v#3~#4 ]
- \ExplSyntaxOn
- }
- \cs_undefine:N \ProvidesExplFileAux
}
% \end{macrocode}
-% \end{variable}
%
% \begin{macro}{\@pushfilename, \@popfilename}
% \begin{macro}[aux]{\__expl_status_pop:w}
@@ -1658,7 +1673,7 @@
% appropriate case! To allow this loading to take place a temporary
% definition of \cs{ProvidesExplFile} is provided
% \begin{macrocode}
-\cs_set_protected:Npn \ProvidesExplFile
+\cs_set_protected:Npn \ProvidesExplFile
{
\group_begin:
\char_set_catcode_space:n { `\ }
@@ -1667,7 +1682,7 @@
\cs_set_protected:Npn \ProvidesExplFileAux #1#2#3#4
{
\group_end:
- \iow_log:x { File:~#1~#2~v#3~#4 }
+ \iow_log:x { File:~#1~#2~v#3~#4 }
}
\tex_input:D
l3
@@ -1683,6 +1698,11 @@
{ dvips }
}
.def \scan_stop:
+% \end{macrocode}
+% The same considerations apply to loading Unicode support data, so that too
+% is done here.
+% \begin{macrocode}
+\tex_input:D l3unicode-data.def \scan_stop:
\cs_undefine:N \ProvidesExplFile
\cs_undefine:N \ProvidesExplFileAux
% \end{macrocode}
diff --git a/Master/texmf-dist/source/latex/l3kernel/l3.ins b/Master/texmf-dist/source/latex/l3kernel/l3.ins
index 72b9fa583c3..3b99651a170 100644
--- a/Master/texmf-dist/source/latex/l3kernel/l3.ins
+++ b/Master/texmf-dist/source/latex/l3kernel/l3.ins
@@ -48,6 +48,7 @@ Do not distribute a modified version of this file.
\from{l3basics.dtx} {package}
\from{l3expan.dtx} {package}
\from{l3tl.dtx} {package}
+ \from{l3str.dtx} {package}
\from{l3seq.dtx} {package}
\from{l3int.dtx} {package}
\from{l3quark.dtx} {package}
@@ -82,7 +83,8 @@ Do not distribute a modified version of this file.
\generate{\file{expl3-generic.tex}{\from{expl3.dtx} {generic,loader}}}
\generate{\file{l3doc.cls} {\from{l3doc.dtx} {class}}}
-\generate{\file{l3doc.ist} {\from{l3doc.dtx} {docist}}}
+% not distributed:
+%\generate{\file{l3doc.ist} {\from{l3doc.dtx} {docist}}}
\generate{\file{l3dvipdfmx.def} {\from{l3drivers.dtx} {package,dvipdfmx} }}
\generate{\file{l3dvips.def} {\from{l3drivers.dtx} {package,dvips} }}
diff --git a/Master/texmf-dist/source/latex/l3kernel/l3basics.dtx b/Master/texmf-dist/source/latex/l3kernel/l3basics.dtx
index 339eb9eacb1..b36707ccc97 100644
--- a/Master/texmf-dist/source/latex/l3kernel/l3basics.dtx
+++ b/Master/texmf-dist/source/latex/l3kernel/l3basics.dtx
@@ -37,7 +37,7 @@
\documentclass[full]{l3doc}
%</driver>
%<*driver|package>
-\GetIdInfo$Id: l3basics.dtx 5033 2014-06-04 05:32:44Z joseph $
+\GetIdInfo$Id: l3basics.dtx 5187 2014-07-09 12:02:17Z will $
{L3 Basic definitions}
%</driver|package>
%<*driver>
@@ -743,7 +743,7 @@
% \end{verbatim}
% after two expansions of \cs{use:c}.
%
-% \begin{function}[TF, added = 2012-11-10]
+% \begin{function}[EXP,added = 2012-11-10]
% {\cs_if_exist_use:N,\cs_if_exist_use:c}
% \begin{syntax}
% \cs{cs_if_exist_use:N} \meta{control sequence}
@@ -1088,89 +1088,6 @@
% \cs{cs_if_exist:N}).
% \end{function}
%
-% \subsection{Testing string equality}
-%
-% \begin{function}[EXP,pTF]
-% {
-% \str_if_eq:nn, \str_if_eq:Vn, \str_if_eq:on, \str_if_eq:no,
-% \str_if_eq:nV, \str_if_eq:VV
-% }
-% \begin{syntax}
-% \cs{str_if_eq_p:nn} \Arg{tl_1} \Arg{tl_2}
-% \cs{str_if_eq:nnTF} \Arg{tl_1} \Arg{tl_2} \Arg{true code} \Arg{false code}
-% \end{syntax}
-% Compares the two \meta{token lists} on a character by character
-% basis, and is \texttt{true} if the two lists contain the same
-% characters in the same order. Thus for example
-% \begin{verbatim}
-% \str_if_eq_p:no { abc } { \tl_to_str:n { abc } }
-% \end{verbatim}
-% is logically \texttt{true}.
-% \end{function}
-%
-% \begin{function}[EXP,pTF, added = 2012-06-05]{\str_if_eq_x:nn}
-% \begin{syntax}
-% \cs{str_if_eq_x_p:nn} \Arg{tl_1} \Arg{tl_2}
-% \cs{str_if_eq_x:nnTF} \Arg{tl_1} \Arg{tl_2} \Arg{true code} \Arg{false code}
-% \end{syntax}
-% Compares the full expansion of two \meta{token lists} on a character by
-% character basis, and is \texttt{true} if the two lists contain the same
-% characters in the same order. Thus for example
-% \begin{verbatim}
-% \str_if_eq_x_p:nn { abc } { \tl_to_str:n { abc } }
-% \end{verbatim}
-% is logically \texttt{true}.
-% \end{function}
-%
-% \begin{function}[added = 2013-07-24, EXP, TF]{\str_case:nn, \str_case:on}
-% \begin{syntax}
-% \cs{str_case:nnTF} \Arg{test string} \\
-% ~~|{| \\
-% ~~~~\Arg{string case_1} \Arg{code case_1} \\
-% ~~~~\Arg{string case_2} \Arg{code case_2} \\
-% ~~~~\ldots \\
-% ~~~~\Arg{string case_n} \Arg{code case_n} \\
-% ~~|}| \\
-% ~~\Arg{true code}
-% ~~\Arg{false code}
-% \end{syntax}
-% This function compares the \meta{test string} in turn with each
-% of the \meta{string cases}. If the two are equal (as described for
-% \cs{str_if_eq:nnTF} then the
-% associated \meta{code} is left in the input stream. If any of the
-% cases are matched, the \meta{true code} is also inserted into the
-% input stream (after the code for the appropriate case), while if none
-% match then the \meta{false code} is inserted. The function
-% \cs{str_case:nn}, which does nothing if there is no match, is also
-% available.
-% \end{function}
-%
-% \begin{function}[added = 2013-07-24, EXP, TF]{\str_case_x:nn}
-% \begin{syntax}
-% \cs{str_case_x:nnn} \Arg{test string} \\
-% ~~|{| \\
-% ~~~~\Arg{string case_1} \Arg{code case_1} \\
-% ~~~~\Arg{string case_2} \Arg{code case_2} \\
-% ~~~~\ldots \\
-% ~~~~\Arg{string case_n} \Arg{code case_n} \\
-% ~~|}| \\
-% ~~\Arg{true code}
-% ~~\Arg{false code}
-% \end{syntax}
-% This function compares the full expansion of the \meta{test string}
-% in turn with the full expansion of the \meta{string cases}. If the two
-% full expansions are equal (as described for \cs{str_if_eq:nnTF} then the
-% associated \meta{code} is left in the input stream. If any of the
-% cases are matched, the \meta{true code} is also inserted into the
-% input stream (after the code for the appropriate case), while if none
-% match then the \meta{false code} is inserted. The function
-% \cs{str_case_x:nn}, which does nothing if there is no match, is also
-% available.
-% The \meta{test string} is expanded in each comparison, and must
-% always yield the same result: for example, random numbers must
-% not be used within this string.
-% \end{function}
-%
% \subsection{Engine-specific conditionals}
%
% \begin{function}[updated = 2011-09-06,EXP,pTF]{\luatex_if_engine:}
@@ -1377,27 +1294,6 @@
% case (if one is found) and either the \texttt{true code} or
% \texttt{false code} for the over all outcome, as appropriate.
% \end{function}
-%
-% \begin{function}[EXP]{\__str_if_eq_x:nn}
-% \begin{syntax}
-% \cs{__str_if_eq_x:nn} \Arg{tl_1} \Arg{tl_2}
-% \end{syntax}
-% Compares the full expansion of two \meta{token lists} on a character by
-% character basis, and is \texttt{true} if the two lists contain the same
-% characters in the same order. Leaves |0| in the input stream if the
-% condition is true, and |+1| or |-1| otherwise.
-% \end{function}
-%
-% \begin{function}[EXP]{\__str_if_eq_x_return:nn}
-% \begin{syntax}
-% \cs{__str_if_eq_x_return:nn} \Arg{tl_1} \Arg{tl_2}
-% \end{syntax}
-% Compares the full expansion of two \meta{token lists} on a character by
-% character basis, and is \texttt{true} if the two lists contain the same
-% characters in the same order. Either \cs{prg_return_true:} or
-% \cs{prg_return_false:} is then left in the input stream. This is a version
-% of \cs{str_if_eq_x:nn(TF)} coded for speed.
-% \end{function}
%
% \end{documentation}
%
@@ -3074,179 +2970,6 @@
% \end{macrocode}
% \end{macro}
%
-% \subsection{String comparisons}
-%
-% \begin{macro}[int, EXP]{\__str_if_eq_x:nn}
-% \begin{macro}[aux, EXP]{\__str_escape_x:n}
-% String comparisons rely on the primitive \cs{(pdf)strcmp} if available:
-% \LuaTeX{} does not have it, so emulation is required. As the net result
-% is that we do not \emph{always} use the primitive, the correct approach
-% is to wrap up in a function with defined behaviour. That's done by
-% providing a wrapper and then redefining in the \LuaTeX{} case. Note that
-% the necessary Lua code is covered in \pkg{l3boostrap}: long-term this may
-% need to go into a separate Lua file, but at present it's somewhere that
-% spaces are not skipped for ease-of-input. The need to detokenize and force
-% expansion of input arises from the case where a |#| token is used in the
-% input, \emph{e.g.}~|\__str_if_eq_x:nn {#} { \tl_to_str:n {#} }|, which
-% otherwise will fail as \cs{luatex_luaescapestring:D} does not double
-% such tokens.
-% \begin{macrocode}
-\cs_new:Npn \__str_if_eq_x:nn #1#2 { \pdftex_strcmp:D {#1} {#2} }
-\luatex_if_engine:T
- {
- \cs_set:Npn \__str_if_eq_x:nn #1#2
- {
- \luatex_directlua:D
- {
- l3kernel.strcmp
- (
- " \__str_escape_x:n {#1} " ,
- " \__str_escape_x:n {#2} "
- )
- }
- }
- \cs_new:Npn \__str_escape_x:n #1
- {
- \luatex_luaescapestring:D
- {
- \etex_detokenize:D \exp_after:wN { \luatex_expanded:D {#1} }
- }
- }
- }
-% \end{macrocode}
-% \end{macro}
-% \end{macro}
-%
-% \begin{macro}[pTF, EXP]{\str_if_eq:nn, \str_if_eq_x:nn}
-% Modern engines provide a direct way of comparing two token lists,
-% but returning a number. This set of conditionals therefore make life
-% a bit clearer. The \texttt{nn} and \texttt{xx} versions are created
-% directly as this is most efficient. These should eventually
-% move somewhere else.
-% \begin{macrocode}
-\prg_new_conditional:Npnn \str_if_eq:nn #1#2 { p , T , F , TF }
- {
- \if_int_compare:w \__str_if_eq_x:nn { \exp_not:n {#1} } { \exp_not:n {#2} }
- = \c_zero
- \prg_return_true: \else: \prg_return_false: \fi:
- }
-\prg_new_conditional:Npnn \str_if_eq_x:nn #1#2 { p , T , F , TF }
- {
- \if_int_compare:w \__str_if_eq_x:nn {#1} {#2} = \c_zero
- \prg_return_true: \else: \prg_return_false: \fi:
- }
-% \end{macrocode}
-% \end{macro}
-%
-% \begin{macro}[int, EXP]{\__str_if_eq_x_return:nn}
-% It turns out that we often need to compare a token list
-% with the result of applying some function to it, and
-% return with \cs{prg_return_true/false:}. This test is
-% similar to \cs{str_if_eq:nnTF}, but hard-coded for speed.
-% \begin{macrocode}
-\cs_new:Npn \__str_if_eq_x_return:nn #1 #2
- {
- \if_int_compare:w \__str_if_eq_x:nn {#1} {#2} = \c_zero
- \prg_return_true:
- \else:
- \prg_return_false:
- \fi:
- }
-% \end{macrocode}
-% \end{macro}
-%
-% \begin{macro}[EXP]{\str_case:nn, \str_case_x:nn}
-% \begin{macro}[EXP, TF]{\str_case:nn, \str_case_x:nn}
-% \begin{macro}[EXP, aux]{\__str_case:nnTF, \__str_case_x:nnTF}
-% \begin{macro}[int, EXP]{\__prg_case_end:nw}
-% \begin{macro}[aux, EXP]
-% {\__str_case:nw, \__str_case_x:nw, \__str_case_end:nw}
-% The aim here is to allow the case statement to be evaluated
-% using a known number of expansion steps (two), and without
-% needing to use an explicit \enquote{end of recursion} marker.
-% That is achieved by using the test input as the final case,
-% as this will always be true. The trick is then to tidy up
-% the output such that the appropriate case code plus either
-% the \texttt{true} or \texttt{false} branch code is inserted.
-% \begin{macrocode}
-\cs_new:Npn \str_case:nn #1#2
- {
- \tex_romannumeral:D
- \__str_case:nnTF {#1} {#2} { } { }
- }
-\cs_new:Npn \str_case:nnT #1#2#3
- {
- \tex_romannumeral:D
- \__str_case:nnTF {#1} {#2} {#3} { }
- }
-\cs_new:Npn \str_case:nnF #1#2
- {
- \tex_romannumeral:D
- \__str_case:nnTF {#1} {#2} { }
- }
-\cs_new:Npn \str_case:nnTF #1#2
- {
- \tex_romannumeral:D
- \__str_case:nnTF {#1} {#2}
- }
-\cs_new:Npn \__str_case:nnTF #1#2#3#4
- { \__str_case:nw {#1} #2 {#1} { } \q_mark {#3} \q_mark {#4} \q_stop }
-\cs_new:Npn \__str_case:nw #1#2#3
- {
- \str_if_eq:nnTF {#1} {#2}
- { \__str_case_end:nw {#3} }
- { \__str_case:nw {#1} }
- }
-\cs_new:Npn \str_case_x:nn #1#2
- {
- \tex_romannumeral:D
- \__str_case_x:nnTF {#1} {#2} { } { }
- }
-\cs_new:Npn \str_case_x:nnT #1#2#3
- {
- \tex_romannumeral:D
- \__str_case_x:nnTF {#1} {#2} {#3} { }
- }
-\cs_new:Npn \str_case_x:nnF #1#2
- {
- \tex_romannumeral:D
- \__str_case_x:nnTF {#1} {#2} { }
- }
-\cs_new:Npn \str_case_x:nnTF #1#2
- {
- \tex_romannumeral:D
- \__str_case_x:nnTF {#1} {#2}
- }
-\cs_new:Npn \__str_case_x:nnTF #1#2#3#4
- { \__str_case_x:nw {#1} #2 {#1} { } \q_mark {#3} \q_mark {#4} \q_stop }
-\cs_new:Npn \__str_case_x:nw #1#2#3
- {
- \str_if_eq_x:nnTF {#1} {#2}
- { \__str_case_end:nw {#3} }
- { \__str_case_x:nw {#1} }
- }
-% \end{macrocode}
-% To tidy up the recursion, there are two outcomes. If there was a hit to
-% one of the cases searched for, then |#1| will be the code to insert,
-% |#2| will be the \emph{next} case to check on and |#3| will be all of
-% the rest of the cases code. That means that |#4| will be the \texttt{true}
-% branch code, and |#5| will be tidy up the spare \cs{q_mark} and the
-% \texttt{false} branch. On the other hand, if none of the cases matched
-% then we arrive here using the \enquote{termination} case of comparing
-% the search with itself. That means that |#1| will be empty, |#2| will be
-% the first \cs{q_mark} and so |#4| will be the \texttt{false} code (the
-% \texttt{true} code is mopped up by |#3|).
-% \begin{macrocode}
-\cs_new:Npn \__prg_case_end:nw #1#2#3 \q_mark #4#5 \q_stop
- { \c_zero #1 #4 }
-\cs_new_eq:NN \__str_case_end:nw \__prg_case_end:nw
-% \end{macrocode}
-% \end{macro}
-% \end{macro}
-% \end{macro}
-% \end{macro}
-% \end{macro}
-%
% \subsection{Breaking out of mapping functions}
%
% \begin{macro}[EXP]{\__prg_break_point:Nn, \__prg_map_break:Nn}
@@ -3285,16 +3008,6 @@
% \end{macro}
% \end{macro}
%
-% \subsection{Deprecated functions}
-%
-% \begin{macro}{\str_case:nnn, \str_case_x:nnn}
-% Deprecated 2013-07-15.
-% \begin{macrocode}
-\cs_new_eq:NN \str_case:nnn \str_case:nnF
-\cs_new_eq:NN \str_case_x:nnn \str_case_x:nnF
-% \end{macrocode}
-% \end{macro}
-%
% \begin{macrocode}
%</initex|package>
% \end{macrocode}
diff --git a/Master/texmf-dist/source/latex/l3kernel/l3candidates.dtx b/Master/texmf-dist/source/latex/l3kernel/l3candidates.dtx
index a65e95aaca8..f722c6f0a1d 100644
--- a/Master/texmf-dist/source/latex/l3kernel/l3candidates.dtx
+++ b/Master/texmf-dist/source/latex/l3kernel/l3candidates.dtx
@@ -38,7 +38,7 @@
\documentclass[full]{l3doc}
%</driver>
%<*driver|package>
-\GetIdInfo$Id: l3candidates.dtx 4972 2014-05-31 16:42:18Z bruno $
+\GetIdInfo$Id: l3candidates.dtx 5232 2014-07-18 19:32:54Z joseph $
{L3 Experimental additions to l3kernel}
%</driver|package>
%<*driver>
@@ -70,14 +70,40 @@
%
% \begin{documentation}
%
+% \section{Important notice}
+%
% This module provides a space in which functions can be added to
-% \pkg{l3kernel} (\pkg{expl3}) while still being experimental. As such, the
+% \pkg{l3kernel} (\pkg{expl3}) while still being experimental.
+% \begin{quote}
+% \bfseries
+% As such, the
% functions here may not remain in their current form, or indeed at all,
-% in \pkg{l3kernel} in the future. In contrast to the material in
+% in \pkg{l3kernel} in the future.
+% \end{quote}
+% In contrast to the material in
% \pkg{l3experimental}, the functions here are all \emph{small} additions to
% the kernel. We encourage programmers to test them out and report back on
% the \texttt{LaTeX-L} mailing list.
%
+% \medskip
+%
+% Thus, if you intend to use any of these functions from the candidate module in a public package
+% offered to others for productive use (e.g., being placed on CTAN) please consider the following points carefully:
+% \begin{itemize}
+% \item Be prepared that your public packages might require updating when such functions
+% are being finalized.
+% \item Consider informing us that you use a particular function in your public package, e.g., by
+% discussing this on the \texttt{LaTeX-L}
+% mailing list. This way it becomes easier to coordinate any updates necessary without a issues
+% for the users of your package.
+% \item Discussing and understanding use cases for a particular addition or concept also helps to
+% ensure that we provide the right interfaces in the final version so please give us feedback
+% if you consider a certain candidate function useful (or not).
+% \end{itemize}
+% We only add functions in this space if we consider them being serious candidates for a final inclusion
+% into the kernel. However, real use sometimes leads to better ideas, so functions from this module are
+% \textbf{not necessarily stable} and we may have to adjust them!
+%
% \section{Additions to \pkg{l3box}}
%
% \subsection{Affine transformations}
@@ -117,8 +143,27 @@
% of the \meta{box} before the resizing is applied. A negative size will
% cause the material in the \meta{box} to be reversed in direction, but the
% reference point of the \meta{box} will be unchanged.
-% Thus negative $y$-sizes will result in a box a depth dependent on the
-% height of the original box a height dependent on the depth.
+% Thus negative $y$-sizes will result in a box with depth dependent on the
+% height of the original box and height dependent on the depth of the original.
+% The resizing applies within the current \TeX{} group level.
+% \end{function}
+%
+% \begin{function}
+% {\box_resize_to_ht:Nn, \box_resize_to_ht:cn}
+% \begin{syntax}
+% \cs{box_resize_to_ht:Nn} \meta{box} \Arg{y-size}
+% \end{syntax}
+% Resize the \meta{box} to \meta{y-size} vertically, scaling the horizontal
+% size by the same amount (\meta{y-size} is a dimension expression).
+% The \meta{y-size} is the height only, not including depth, of
+% the box.
+% The updated \meta{box} will be an hbox, irrespective of the nature
+% of the \meta{box} before the resizing is applied.
+% A negative size will
+% cause the material in the \meta{box} to be reversed in direction, but the
+% reference point of the \meta{box} will be unchanged.
+% Thus negative $y$-sizes will result in a box with depth dependent on the
+% height of the original box and height dependent on the depth of the original.
% The resizing applies within the current \TeX{} group level.
% \end{function}
%
@@ -137,6 +182,21 @@
% The resizing applies within the current \TeX{} group level.
% \end{function}
%
+% \begin{function}[added = 2014-07-03]
+% {\box_resize_to_wd_and_ht:Nnn, \box_resize_to_wd_and_ht:cnn}
+% \begin{syntax}
+% \cs{box_resize_to_wd_and_ht:Nnn} \meta{box} \Arg{x-size} \Arg{y-size}
+% \end{syntax}
+% Resize the \meta{box} to a \emph{height} of
+% \meta{x-size} horizontally and \meta{y-size}
+% vertically (both of the sizes are dimension expressions).
+% The \meta{y-size} is the \emph{height} of the box, ignoring any depth.
+% The updated \meta{box} will be an hbox, irrespective of the nature
+% of the \meta{box} before the resizing is applied. Negative sizes will
+% cause the material in the \meta{box} to be reversed in direction, but the
+% reference point of the \meta{box} will be unchanged.
+% \end{function}
+%
% \begin{function}{\box_rotate:Nn, \box_rotate:cn}
% \begin{syntax}
% \cs{box_rotate:Nn} \meta{box} \Arg{angle}
@@ -249,70 +309,6 @@
% correctly.
% \end{variable}
%
-% \section{Additions to \pkg{l3clist}}
-%
-% \begin{function}[EXP]{\clist_item:Nn, \clist_item:cn, \clist_item:nn}
-% \begin{syntax}
-% \cs{clist_item:Nn} \meta{comma list} \Arg{integer expression}
-% \end{syntax}
-% Indexing items in the \meta{comma list} from~$1$ at the top (left), this
-% function will evaluate the \meta{integer expression} and leave the
-% appropriate item from the comma list in the input stream. If the
-% \meta{integer expression} is negative, indexing occurs from the
-% bottom (right) of the comma list. When the \meta{integer expression}
-% is larger than the number of items in the \meta{comma list} (as
-% calculated by \cs{clist_count:N}) then the function will expand to
-% nothing.
-% \begin{texnote}
-% The result is returned within the \tn{unexpanded}
-% primitive (\cs{exp_not:n}), which means that the \meta{item}
-% will not expand further when appearing in an \texttt{x}-type
-% argument expansion.
-% \end{texnote}
-% \end{function}
-%
-% \begin{function}
-% {
-% \clist_set_from_seq:NN, \clist_set_from_seq:cN,
-% \clist_set_from_seq:Nc, \clist_set_from_seq:cc,
-% \clist_gset_from_seq:NN, \clist_gset_from_seq:cN,
-% \clist_gset_from_seq:Nc, \clist_gset_from_seq:cc
-% }
-% \begin{syntax}
-% \cs{clist_set_from_seq:NN} \meta{comma list} \meta{sequence}
-% \end{syntax}
-% Sets the \meta{comma list} to be equal to the content of the
-% \meta{sequence}.
-% Items which contain either spaces or commas are surrounded by braces.
-% \end{function}
-%
-% \begin{function}
-% {
-% \clist_const:Nn, \clist_const:Nx,
-% \clist_const:cn, \clist_const:cx
-% }
-% \begin{syntax}
-% \cs{clist_const:Nn} \meta{clist~var} \Arg{comma list}
-% \end{syntax}
-% Creates a new constant \meta{clist~var} or raises an error
-% if the name is already taken. The value of the
-% \meta{clist~var} will be set globally to the
-% \meta{comma list}.
-% \end{function}
-%
-% \begin{function}[EXP, pTF]{\clist_if_empty:n}
-% \begin{syntax}
-% \cs{clist_if_empty_p:n} \Arg{comma list}
-% \cs{clist_if_empty:nTF} \Arg{comma list} \Arg{true code} \Arg{false code}
-% \end{syntax}
-% Tests if the \meta{comma list} is empty (containing no items).
-% The rules for space trimming are as for other \texttt{n}-type
-% comma-list functions, hence the comma list |{~,~,,~}| (without
-% outer braces) is empty, while |{~,{},}| (without outer braces)
-% contains one element, which happens to be empty: the comma-list
-% is not empty.
-% \end{function}
-%
% \section{Additions to \pkg{l3coffins}}
%
% \begin{function}{\coffin_resize:Nnn, \coffin_resize:cnn}
@@ -344,6 +340,19 @@
%
% \section{Additions to \pkg{l3file}}
%
+% \begin{function}[TF, added = 2014-07-02]{\file_if_exist_input:n}
+% \begin{syntax}
+% \cs{file_if_exist_input:n} \Arg{file name}
+% \cs{file_if_exist_input:nTF} \Arg{file name} \Arg{true code} \Arg{false code}
+% \end{syntax}
+% Searches for \meta{file name} using the current \TeX{} search
+% path and the additional paths controlled by
+% \cs{file_path_include:n}). If found, inserts the \meta{true code} then
+% reads in the file as additional \LaTeX{} source as described for
+% \cs{file_input:n}. Note that \cs{file_if_exist_input:n} does not raise
+% an error if the file is not found, in contrast to \cs{file_input:n}.
+% \end{function}
+%
% \begin{function}[added = 2012-02-11]{\ior_map_inline:Nn}
% \begin{syntax}
% \cs{ior_map_inline:Nn} \meta{stream} \Arg{inline function}
@@ -427,22 +436,6 @@
% \end{texnote}
% \end{function}
%
-% \section{Additions to \pkg{l3fp}}
-%
-% \begin{function}
-% {
-% \fp_set_from_dim:Nn, \fp_set_from_dim:cn,
-% \fp_gset_from_dim:Nn, \fp_gset_from_dim:cn
-% }
-% \begin{syntax}
-% \cs{fp_set_from_dim:Nn} \meta{floating point variable} \Arg{dimexpr}
-% \end{syntax}
-% Sets the \meta{floating point variable} to the distance represented
-% by the \meta{dimension expression} in the units points. This means
-% that distances given in other units are first converted to points
-% before being assigned to the \meta{floating point variable}.
-% \end{function}
-%
% \section{Additions to \pkg{l3prop}}
%
% \begin{function}[rEXP]
@@ -460,48 +453,11 @@
% will expand to the value corresponding to \texttt{mykey}: for each
% pair in \cs{l_my_prop} the function \cs{str_if_eq:nnT} receives
% \texttt{mykey}, the \meta{key} and the \meta{value} as its three
-% arguments. For that specific task, \cs{prop_get:Nn} is faster.
-% \end{function}
-%
-% \begin{function}[EXP]{\prop_get:Nn, \prop_get:cn}
-% \begin{syntax}
-% \cs{prop_get:Nn} \meta{property list} \Arg{key}
-% \end{syntax}
-% Expands to the \meta{value} corresponding to the \meta{key} in
-% the \meta{property list}. If the \meta{key} is missing, this has
-% an empty expansion.
-% \begin{texnote}
-% This function is slower than the non-expandable analogue
-% \cs{prop_get:NnN}.
-% The result is returned within the \tn{unexpanded}
-% primitive (\cs{exp_not:n}), which means that the \meta{value}
-% will not expand further when appearing in an \texttt{x}-type
-% argument expansion.
-% \end{texnote}
+% arguments. For that specific task, \cs{prop_item:Nn} is faster.
% \end{function}
%
% \section{Additions to \pkg{l3seq}}
%
-% \begin{function}[EXP]{\seq_item:Nn, \seq_item:cn}
-% \begin{syntax}
-% \cs{seq_item:Nn} \meta{sequence} \Arg{integer expression}
-% \end{syntax}
-% Indexing items in the \meta{sequence} from~$1$ at the top (left), this
-% function will evaluate the \meta{integer expression} and leave the
-% appropriate item from the sequence in the input stream. If the
-% \meta{integer expression} is negative, indexing occurs from the
-% bottom (right) of the sequence. When the \meta{integer expression}
-% is larger than the number of items in the \meta{sequence} (as
-% calculated by \cs{seq_count:N}) then the function will expand to
-% nothing.
-% \begin{texnote}
-% The result is returned within the \tn{unexpanded}
-% primitive (\cs{exp_not:n}), which means that the \meta{item}
-% will not expand further when appearing in an \texttt{x}-type
-% argument expansion.
-% \end{texnote}
-% \end{function}
-%
% \begin{function}[rEXP]
% {
% \seq_mapthread_function:NNN, \seq_mapthread_function:NcN,
@@ -520,31 +476,6 @@
% occur).
% \end{function}
%
-% \begin{function}
-% {
-% \seq_set_from_clist:NN, \seq_set_from_clist:cN,
-% \seq_set_from_clist:Nc, \seq_set_from_clist:cc,
-% \seq_set_from_clist:Nn, \seq_set_from_clist:cn,
-% \seq_gset_from_clist:NN, \seq_gset_from_clist:cN,
-% \seq_gset_from_clist:Nc, \seq_gset_from_clist:cc,
-% \seq_gset_from_clist:Nn, \seq_gset_from_clist:cn
-% }
-% \begin{syntax}
-% \cs{seq_set_from_clist:NN} \meta{sequence} \meta{comma-list}
-% \end{syntax}
-% Sets the \meta{sequence} within the current \TeX{} group to be equal
-% to the content of the \meta{comma-list}.
-% \end{function}
-%
-% \begin{function}{\seq_reverse:N, \seq_greverse:N}
-% \begin{syntax}
-% \cs{seq_reverse:N} \meta{sequence}
-% \end{syntax}
-% Reverses the order of items in the \meta{sequence}, and
-% assigns the result to \meta{sequence}, locally or globally
-% according to the variant chosen.
-% \end{function}
-%
% \begin{function}{\seq_set_filter:NNn, \seq_gset_filter:NNn}
% \begin{syntax}
% \cs{seq_set_filter:NNn} \meta{sequence_1} \meta{sequence_2} \Arg{inline boolexpr}
@@ -580,44 +511,6 @@
%
% \section{Additions to \pkg{l3skip}}
%
-% \begin{function}[added = 2013-05-06, updated=2014-05-31, EXP]
-% {\dim_to_pt:n}
-% \begin{syntax}
-% \cs{dim_to_pt:n} \Arg{dimexpr}
-% \end{syntax}
-% Evaluates the \meta{dimension expression}, and leaves the result,
-% expressed in points (\texttt{pt}) in the input stream, with \emph{no
-% units}. The result is rounded by \TeX{} to four or five decimal
-% places. If the decimal part of the result is zero, it is omitted,
-% together with the decimal marker.
-%
-% For example
-% \begin{verbatim}
-% \dim_to_pt:n { 1 bp }
-% \end{verbatim}
-% leaves |1.00374| in the input stream, \emph{i.e.}~the magnitude of
-% one \enquote{big point} when converted to points.
-% \end{function}
-%
-% \begin{function}[added = 2013-05-06, updated=2014-05-31, EXP]
-% {\dim_to_unit:nn}
-% \begin{syntax}
-% \cs{dim_to_unit:nn} \Arg{dimexpr_1} \Arg{dimexpr_2}
-% \end{syntax}
-% Evaluates the \meta{dimension expressions}, and leaves the value of
-% \meta{dimexpr_1}, expressed in a unit given by \meta{dimexpr_2}, in
-% the input stream. The result is a decimal number, rounded by \TeX{}
-% to four or five decimal places. If the decimal part of the result
-% is zero, it is omitted, together with the decimal marker.
-%
-% For example
-% \begin{verbatim}
-% \dim_to_unit:nn { 1 bp } { 1 mm }
-% \end{verbatim}
-% leaves |0.35277| in the input stream, \emph{i.e.}~the magnitude of
-% one \enquote{big point} when converted to millimeters.
-% \end{function}
-%
% \begin{function}{\skip_split_finite_else_action:nnNN}
% \begin{syntax}
% \cs{skip_split_finite_else_action:nnNN} \Arg{skipexpr} \Arg{action}
@@ -698,22 +591,129 @@
% \end{texnote}
% \end{function}
%
-% \begin{function}[EXP]{\tl_item:nn, \tl_item:Nn, \tl_item:cn}
+% \begin{function}[rEXP, added = 2014-06-30]^^A
+% {
+% \tl_lower_case:n, \tl_upper_case:n, \tl_mixed_case:n,
+% \tl_lower_case:nn, \tl_upper_case:nn, \tl_mixed_case:nn
+% }
% \begin{syntax}
-% \cs{tl_item:nn} \Arg{token list} \Arg{integer expression}
+% \cs{tl_upper_case:n} \Arg{tokens}
+% \cs{tl_upper_case:nn} \Arg{language} \Arg{tokens}
% \end{syntax}
-% Indexing items in the \meta{token list} from~$1$ on the left, this
-% function will evaluate the \meta{integer expression} and leave the
-% appropriate item from the \meta{token list} in the input stream.
-% If the \meta{integer expression} is negative, indexing occurs from
-% the right of the token list, starting at $-1$ for the right-most item.
-% If the index is out of bounds, then thr function expands to nothing.
-% \begin{texnote}
-% The result is returned within the \tn{unexpanded}
-% primitive (\cs{exp_not:n}), which means that the \meta{item}
-% will not expand further when appearing in an \texttt{x}-type
-% argument expansion.
-% \end{texnote}
+% These functions are intended to be applied to input which may be
+% regarded broadly as \enquote{text}. They traverse the \meta{tokens} and
+% change the case of characters as discussed below. The character code of
+% the characters replaced may be arbitrary: the replacement characters will
+% have stand document-level category codes ($11$ for letters, $12$ for
+% letter-like characters which can also be case-changed).
+% \end{function}
+%
+% The functions are \texttt{x}-type expandable: tokens are returned protected
+% from further expansion where appropriate. Begin-group and end-group
+% characters in the \meta{tokens} are normalized and become |{| and |}|,
+% respectively. Any tokens within such a group will \emph{not} be
+% case-changed, and thus for example
+% \begin{verbatim}
+% \tl_upper_case:n { Some~text~{$y = mx + c$}~with~{Protection} }
+% \end{verbatim}
+% will become
+% \begin{verbatim}
+% SOME~TEXT~{$y = mx + c$}~WITH~{Protection}
+% \end{verbatim}
+%
+% `Mixed' case conversion may be regarded informally as converting the first
+% character of the \meta{tokens} to upper case and the rest to lower case.
+% However, the process is more complex than this as there are some cases
+% where a single lower case character maps to a special form,
+% for example \texttt{ij} in Dutch which becomes \texttt{IJ}. As such,
+% \cs{tl_mixed_case:n(n)} implement a more sophisticated mapping which
+% accounts for this and for modifying accents on the first letter.
+% Spaces at the start of the \meta{tokens} are ignored when finding the
+% first \enquote{letter} for conversion, while a brace group will terminate
+% this search. For example
+% \begin{verbatim}
+% \tl_mixed_case:n { hello~WORLD } % => "Hello world"
+% \tl_mixed_case:n { ~hello~WORLD } % => " Hello world"
+% \tl_mixed_case:n { {hello}~WORLD } % => "{hello} world"
+% \end{verbatim}
+% where the brace group is retained. (Note that the Unicode Consortium
+% describe this as `title case', but that in English title case applies
+% on a word-by-word basis. The `mixed' case implemented here is a lower
+% level concept needed for both `title' and `sentence' casing of text.)
+%
+% As is generally true for \pkg{expl3}, these functions are designed to
+% work with engine-native input only. As such, when used with \pdfTeX{}
+% \emph{only} the characters \texttt{a}--\texttt{zA}--\texttt{Z} are
+% modified. When used with \XeTeX{} or \LuaTeX{} a full range of Unicode
+% transformations are enabled. Specifically, the standard mappings here
+% follow those defined by the \href{http://www.unicode.org}^^A
+% {Unicode Consortium} in \texttt{UnicodeData.txt} and
+% \texttt{SpecialCasing.txt}. Note that in some cases, \pdfTeX{} can
+% interpret the input to a case change but not generate the correct output
+% (for example in the mapping i to I-dot in Turkish): in these cases the
+% input is left unchanged.
+%
+% Context-sensitive mappings are enabled: language-dependent cases are
+% discussed below. The \enquote{final sigma} rule for Greek letters is
+% enabled and active for all inputs. It is implemented here in a modified
+% form which takes account of the requirements of the likely real use cases,
+% performance and expandability. Thus a capital sigma will map to a
+% final-sigma if it is followed by a space or one of the characters: ^^A{[(
+% |!'),.:;?]}|. (Feedback on this area is very welcome.)
+%
+% Language-sensitive conversions are enabled using the \meta{language}
+% argument, and follow Unicode Consortium guidelines. Currently, the
+% languages recognised for special handling are as follows.
+% \begin{itemize}
+% \item Azeri and Turkish (\texttt{az} and \texttt{tr}).
+% The case pairs I/i-dotless and I-dot/i are activated for these
+% languages. The combining dot mark is removed when lower
+% casing I-dot and introduced when upper casing i-dotless.
+% \item Lithuanian (\texttt{lt}).
+% The lower case letters i and j should retain a dot above when the
+% accents grave, acute or tilde are present. This is implemented for
+% lower casing of the relevant upper case letters both when input as
+% single Unicode codepoints and when using combining accents. The
+% combining dot is removed when upper casing in these cases. Note that
+% \emph{only} the accents used in Lithuanian are covered: the behaviour
+% of other accents are not modified.
+% \item Dutch (\texttt{nl}).
+% Capitalisation of \texttt{ij} at the beginning of mixed cased
+% input produces \texttt{IJ} rather than \texttt{Ij}. The output
+% retains two separate letters, thus this transformation \emph{is}
+% available using \pdfTeX{}.
+% \end{itemize}
+%
+% Creating additional context-sensitive mappings requires knowledge
+% of the underlying mapping implementation used here. The team are happy
+% to add these to the kernel where they are well-documented
+% (\emph{e.g.}~in Unicode Consortium or relevant government publications).
+%
+% \begin{function}[added = 2014-06-25]^^A
+% {
+% \tl_set_from_file:Nnn, \tl_set_from_file:cnn,
+% \tl_gset_from_file:Nnn, \tl_gset_from_file:cnn
+% }
+% \begin{syntax}
+% \cs{tl_set_from_file:Nnn} \meta{tl} \Arg{setup} \Arg{filename}
+% \end{syntax}
+% Defines \meta{tl} to the contents of \meta{filename}.
+% Category codes may need to be set appropriately via the \meta{setup}
+% argument.
+% \end{function}
+%
+% \begin{function}[added = 2014-06-25]^^A
+% {
+% \tl_set_from_file_x:Nnn, \tl_set_from_file_x:cnn,
+% \tl_gset_from_file_x:Nnn, \tl_gset_from_file_x:cnn
+% }
+% \begin{syntax}
+% \cs{tl_set_from_file_x:Nnn} \meta{tl} \Arg{setup} \Arg{filename}
+% \end{syntax}
+% Defines \meta{tl} to the contents of \meta{filename}, expanding
+% the contents of the file as it is read. Category codes and other
+% definitions may need to be set appropriately via the \meta{setup}
+% argument.
% \end{function}
%
% \section{Additions to \pkg{l3tokens}}
@@ -989,8 +989,8 @@
{
\fp_to_dim:n
{
- \l_@@_cos_fp * \dim_to_fp:n {#1}
- - ( \l_@@_sin_fp * \dim_to_fp:n {#2} )
+ \l_@@_cos_fp * \dim_to_fp:n {#1}
+ - \l_@@_sin_fp * \dim_to_fp:n {#2}
}
}
}
@@ -1071,7 +1071,8 @@
% \end{variable}
%
% \begin{macro}{\box_resize:Nnn, \box_resize:cnn}
-% \begin{macro}[aux]{\@@_resize:Nnn}
+% \begin{macro}[aux]{\@@_resize_set_corners:N}
+% \begin{macro}[aux]{\@@_resize:Nn}
% Resizing a box starts by working out the various dimensions of the
% existing box.
% \begin{macrocode}
@@ -1080,33 +1081,37 @@
\hbox_set:Nn #1
{
\group_begin:
- \dim_set:Nn \l_@@_top_dim { \box_ht:N #1 }
- \dim_set:Nn \l_@@_bottom_dim { -\box_dp:N #1 }
- \dim_set:Nn \l_@@_right_dim { \box_wd:N #1 }
- \dim_zero:N \l_@@_left_dim
+ \@@_resize_set_corners:N #1
% \end{macrocode}
% The $x$-scaling and resulting box size is easy enough to work
% out: the dimension is that given as |#2|, and the scale is simply the
% new width divided by the old one.
% \begin{macrocode}
\fp_set:Nn \l_@@_scale_x_fp
- { \dim_to_fp:n {#2} / ( \dim_to_fp:n \l_@@_right_dim ) }
+ { \dim_to_fp:n {#2} / \dim_to_fp:n { \l_@@_right_dim } }
% \end{macrocode}
% The $y$-scaling needs both the height and the depth of the current box.
% \begin{macrocode}
\fp_set:Nn \l_@@_scale_y_fp
{
- \dim_to_fp:n {#3} /
- ( \dim_to_fp:n { \l_@@_top_dim - \l_@@_bottom_dim } )
+ \dim_to_fp:n {#3}
+ / \dim_to_fp:n { \l_@@_top_dim - \l_@@_bottom_dim }
}
% \end{macrocode}
% Hand off to the auxiliary which does the work.
% \begin{macrocode}
- \@@_resize:Nnn #1 {#2} {#3}
+ \@@_resize:Nn #1 {#2}
\group_end:
}
}
\cs_generate_variant:Nn \box_resize:Nnn { c }
+\cs_new_protected:Npn \@@_resize_set_corners:N #1
+ {
+ \dim_set:Nn \l_@@_top_dim { \box_ht:N #1 }
+ \dim_set:Nn \l_@@_bottom_dim { -\box_dp:N #1 }
+ \dim_set:Nn \l_@@_right_dim { \box_wd:N #1 }
+ \dim_zero:N \l_@@_left_dim
+ }
% \end{macrocode}
% With at least one real scaling to do, the next phase is to find the new
% edge co-ordinates. In the $x$~direction this is relatively easy: just
@@ -1116,7 +1121,7 @@
% scale value. Once that is all done, the common resize/rescale code can
% be employed.
% \begin{macrocode}
-\cs_new_protected:Npn \@@_resize:Nnn #1#2#3
+\cs_new_protected:Npn \@@_resize:Nn #1#2
{
\dim_set:Nn \l_@@_right_new_dim { \dim_abs:n {#2} }
\dim_set:Nn \l_@@_bottom_new_dim
@@ -1128,31 +1133,48 @@
% \end{macrocode}
% \end{macro}
% \end{macro}
+% \end{macro}
%
+% \begin{macro}{\box_resize_to_ht:Nn, \box_resize_to_ht:cn}
% \begin{macro}{\box_resize_to_ht_plus_dp:Nn, \box_resize_to_ht_plus_dp:cn}
% \begin{macro}{\box_resize_to_wd:Nn, \box_resize_to_wd:cn}
-% Scaling to a total height or to a width is a simplified version of the main
+% \begin{macro}{\box_resize_to_wd_and_ht:Nnn, \box_resize_to_wd_and_ht:cnn}
+% Scaling to a (total) height or to a width is a simplified version of the main
% resizing operation, with the scale simply copied between the two parts. The
% internal auxiliary is called using the scaling value twice, as the sign for
% both parts is needed (as this allows the same internal code to be used as
% for the general case).
% \begin{macrocode}
+\cs_new_protected:Npn \box_resize_to_ht:Nn #1#2
+ {
+ \hbox_set:Nn #1
+ {
+ \group_begin:
+ \@@_resize_set_corners:N #1
+ \fp_set:Nn \l_@@_scale_y_fp
+ {
+ \dim_to_fp:n {#2}
+ / \dim_to_fp:n { \l_@@_top_dim }
+ }
+ \fp_set_eq:NN \l_@@_scale_x_fp \l_@@_scale_y_fp
+ \@@_resize:Nn #1 {#2}
+ \group_end:
+ }
+ }
+\cs_generate_variant:Nn \box_resize_to_ht:Nn { c }
\cs_new_protected:Npn \box_resize_to_ht_plus_dp:Nn #1#2
{
\hbox_set:Nn #1
{
\group_begin:
- \dim_set:Nn \l_@@_top_dim { \box_ht:N #1 }
- \dim_set:Nn \l_@@_bottom_dim { -\box_dp:N #1 }
- \dim_set:Nn \l_@@_right_dim { \box_wd:N #1 }
- \dim_zero:N \l_@@_left_dim
+ \@@_resize_set_corners:N #1
\fp_set:Nn \l_@@_scale_y_fp
{
- \dim_to_fp:n {#2} /
- ( \dim_to_fp:n { \l_@@_top_dim - \l_@@_bottom_dim } )
+ \dim_to_fp:n {#2}
+ / \dim_to_fp:n { \l_@@_top_dim - \l_@@_bottom_dim }
}
\fp_set_eq:NN \l_@@_scale_x_fp \l_@@_scale_y_fp
- \@@_resize:Nnn #1 {#2} {#2}
+ \@@_resize:Nn #1 {#2}
\group_end:
}
}
@@ -1162,21 +1184,38 @@
\hbox_set:Nn #1
{
\group_begin:
- \dim_set:Nn \l_@@_top_dim { \box_ht:N #1 }
- \dim_set:Nn \l_@@_bottom_dim { -\box_dp:N #1 }
- \dim_set:Nn \l_@@_right_dim { \box_wd:N #1 }
- \dim_zero:N \l_@@_left_dim
+ \@@_resize_set_corners:N #1
\fp_set:Nn \l_@@_scale_x_fp
- { \dim_to_fp:n {#2} / ( \dim_to_fp:n \l_@@_right_dim ) }
+ { \dim_to_fp:n {#2} / \dim_to_fp:n { \l_@@_right_dim } }
\fp_set_eq:NN \l_@@_scale_y_fp \l_@@_scale_x_fp
- \@@_resize:Nnn #1 {#2} {#2}
+ \@@_resize:Nn #1 {#2}
\group_end:
}
}
\cs_generate_variant:Nn \box_resize_to_wd:Nn { c }
+\cs_new_protected:Npn \box_resize_to_wd_and_ht:Nnn #1#2#3
+ {
+ \hbox_set:Nn #1
+ {
+ \group_begin:
+ \@@_resize_set_corners:N #1
+ \fp_set:Nn \l_@@_scale_x_fp
+ { \dim_to_fp:n {#2} / \dim_to_fp:n { \l_@@_right_dim } }
+ \fp_set:Nn \l_@@_scale_y_fp
+ {
+ \dim_to_fp:n {#3}
+ / \dim_to_fp:n { \l_@@_top_dim }
+ }
+ \@@_resize:Nn #1 {#2}
+ \group_end:
+ }
+ }
+\cs_generate_variant:Nn \box_resize_to_wd_and_ht:Nnn { c }
% \end{macrocode}
% \end{macro}
% \end{macro}
+% \end{macro}
+% \end{macro}
%
% \begin{macro}{\box_scale:Nnn, \box_scale:cnn}
% When scaling a box, setting the scaling itself is easy enough. The
@@ -1390,206 +1429,6 @@
% \end{macrocode}
% \end{macro}
%
-% \subsection{Additions to \pkg{l3clist}}
-%
-% \begin{macrocode}
-%<@@=clist>
-% \end{macrocode}
-%
-% \begin{macro}{\clist_item:Nn, \clist_item:cn}
-% \begin{macro}[aux]{\@@_item:nnNn}
-% \begin{macro}[aux]{\@@_item_N_loop:nw}
-% To avoid needing to test the end of the list at each step,
-% we first compute the \meta{length} of the list. If the item number
-% is~$0$, less than $-\meta{length}$, or more than $\meta{length}$,
-% the result is empty. If it is negative, but not less than $-\meta{length}$,
-% add $\meta{length}+1$ to the item number before performing the loop.
-% The loop itself is very simple, return the item if the counter
-% reached~$1$, otherwise, decrease the counter and repeat.
-% \begin{macrocode}
-\cs_new:Npn \clist_item:Nn #1#2
- {
- \exp_args:Nfo \@@_item:nnNn
- { \clist_count:N #1 }
- #1
- \@@_item_N_loop:nw
- {#2}
- }
-\cs_new:Npn \@@_item:nnNn #1#2#3#4
- {
- \int_compare:nNnTF {#4} < \c_zero
- {
- \int_compare:nNnTF {#4} < { - #1 }
- { \use_none_delimit_by_q_stop:w }
- { \exp_args:Nf #3 { \int_eval:n { #4 + \c_one + #1 } } }
- }
- {
- \int_compare:nNnTF {#4} > {#1}
- { \use_none_delimit_by_q_stop:w }
- { #3 {#4} }
- }
- { } , #2 , \q_stop
- }
-\cs_new:Npn \@@_item_N_loop:nw #1 #2,
- {
- \int_compare:nNnTF {#1} = \c_zero
- { \use_i_delimit_by_q_stop:nw { \exp_not:n {#2} } }
- { \exp_args:Nf \@@_item_N_loop:nw { \int_eval:n { #1 - 1 } } }
- }
-\cs_generate_variant:Nn \clist_item:Nn { c }
-% \end{macrocode}
-% \end{macro}
-% \end{macro}
-% \end{macro}
-%
-% \begin{macro}{\clist_item:nn}
-% \begin{macro}[aux]{
-% \@@_item_n:nw,
-% \@@_item_n_loop:nw,
-% \@@_item_n_end:n,
-% \@@_item_n_strip:w}
-% This starts in the same way as \cs{clist_item:Nn} by counting the items
-% of the comma list. The final item should be space-trimmed before being
-% brace-stripped, hence we insert a couple of odd-looking
-% \cs{prg_do_nothing:} to avoid losing braces. Blank items are ignored.
-% \begin{macrocode}
-\cs_new:Npn \clist_item:nn #1#2
- {
- \exp_args:Nf \@@_item:nnNn
- { \clist_count:n {#1} }
- {#1}
- \@@_item_n:nw
- {#2}
- }
-\cs_new:Npn \@@_item_n:nw #1
- { \@@_item_n_loop:nw {#1} \prg_do_nothing: }
-\cs_new:Npn \@@_item_n_loop:nw #1 #2,
- {
- \exp_args:No \tl_if_blank:nTF {#2}
- { \@@_item_n_loop:nw {#1} \prg_do_nothing: }
- {
- \int_compare:nNnTF {#1} = \c_zero
- { \exp_args:No \@@_item_n_end:n {#2} }
- {
- \exp_args:Nf \@@_item_n_loop:nw
- { \int_eval:n { #1 - 1 } }
- \prg_do_nothing:
- }
- }
- }
-\cs_new:Npn \@@_item_n_end:n #1 #2 \q_stop
- {
- \__tl_trim_spaces:nn { \q_mark #1 }
- { \exp_last_unbraced:No \@@_item_n_strip:w } ,
- }
-\cs_new:Npn \@@_item_n_strip:w #1 , { \exp_not:n {#1} }
-% \end{macrocode}
-% \end{macro}
-% \end{macro}
-%
-% \begin{macro}
-% {
-% \clist_set_from_seq:NN, \clist_set_from_seq:cN,
-% \clist_set_from_seq:Nc, \clist_set_from_seq:cc
-% }
-% \UnitTested
-% \begin{macro}
-% {
-% \clist_gset_from_seq:NN, \clist_gset_from_seq:cN,
-% \clist_gset_from_seq:Nc, \clist_gset_from_seq:cc
-% }
-% \UnitTested
-% \begin{macro}[aux]{\@@_set_from_seq:NNNN}
-% \begin{macro}[aux]{\@@_wrap_item:n}
-% \begin{macro}[aux]{\@@_set_from_seq:w}
-% Setting a comma list from a comma-separated list is done using a simple
-% mapping. We wrap most items with \cs{exp_not:n}, and a comma. Items which
-% contain a comma or a space are surrounded by an extra set of braces. The
-% first comma must be removed, except in the case of an empty comma-list.
-% \begin{macrocode}
-\cs_new_protected:Npn \clist_set_from_seq:NN
- { \@@_set_from_seq:NNNN \clist_clear:N \tl_set:Nx }
-\cs_new_protected:Npn \clist_gset_from_seq:NN
- { \@@_set_from_seq:NNNN \clist_gclear:N \tl_gset:Nx }
-\cs_new_protected:Npn \@@_set_from_seq:NNNN #1#2#3#4
- {
- \seq_if_empty:NTF #4
- { #1 #3 }
- {
- #2 #3
- {
- \exp_last_unbraced:Nf \use_none:n
- { \seq_map_function:NN #4 \@@_wrap_item:n }
- }
- }
- }
-\cs_new:Npn \@@_wrap_item:n #1
- {
- ,
- \tl_if_empty:oTF { \@@_set_from_seq:w #1 ~ , #1 ~ }
- { \exp_not:n {#1} }
- { \exp_not:n { {#1} } }
- }
-\cs_new:Npn \@@_set_from_seq:w #1 , #2 ~ { }
-\cs_generate_variant:Nn \clist_set_from_seq:NN { Nc }
-\cs_generate_variant:Nn \clist_set_from_seq:NN { c , cc }
-\cs_generate_variant:Nn \clist_gset_from_seq:NN { Nc }
-\cs_generate_variant:Nn \clist_gset_from_seq:NN { c , cc }
-% \end{macrocode}
-% \end{macro}
-% \end{macro}
-% \end{macro}
-% \end{macro}
-% \end{macro}
-%
-% \begin{macro}
-% {
-% \clist_const:Nn, \clist_const:cn,
-% \clist_const:Nx, \clist_const:cx
-% }
-% Creating and initializing a constant comma list is done in a way
-% similar to \cs{clist_set:Nn} and \cs{clist_gset:Nn}, being careful
-% to strip spaces.
-% \begin{macrocode}
-\cs_new_protected:Npn \clist_const:Nn #1#2
- { \tl_const:Nx #1 { \@@_trim_spaces:n {#2} } }
-\cs_generate_variant:Nn \clist_const:Nn { c , Nx , cx }
-% \end{macrocode}
-% \end{macro}
-%
-% \begin{macro}[EXP, pTF]{\clist_if_empty:n}
-% \begin{macro}[aux, EXP]{\@@_if_empty_n:w}
-% \begin{macro}[aux, EXP]{\@@_if_empty_n:wNw}
-% As usual, we insert a token (here |?|) before grabbing
-% any argument: this avoids losing braces. The argument
-% of \cs{tl_if_empty:oTF} is empty if |#1| is |?| followed
-% by blank spaces (besides, this particular variant of
-% the emptiness test is optimized). If the item of the
-% comma list is blank, grab the next one. As soon as one
-% item is non-blank, exit: the second auxiliary will grab
-% \cs{prg_return_false:} as |#2|, unless every item in
-% the comma list was blank and the loop actually got broken
-% by the trailing |\q_mark \prg_return_false:| item.
-% \begin{macrocode}
-\prg_new_conditional:Npnn \clist_if_empty:n #1 { p , T , F , TF }
- {
- \@@_if_empty_n:w ? #1
- , \q_mark \prg_return_false:
- , \q_mark \prg_return_true:
- \q_stop
- }
-\cs_new:Npn \@@_if_empty_n:w #1 ,
- {
- \tl_if_empty:oTF { \use_none:nn #1 ? }
- { \@@_if_empty_n:w ? }
- { \@@_if_empty_n:wNw }
- }
-\cs_new:Npn \@@_if_empty_n:wNw #1 \q_mark #2#3 \q_stop {#2}
-% \end{macrocode}
-% \end{macro}
-% \end{macro}
-% \end{macro}
-%
% \subsection{Additions to \pkg{l3coffins}}
%
% \begin{macrocode}
@@ -1796,16 +1635,16 @@
{
\fp_to_dim:n
{
- \dim_to_fp:n {#1} * \l_@@_cos_fp
- - ( \dim_to_fp:n {#2} * \l_@@_sin_fp )
+ \dim_to_fp:n {#1} * \l_@@_cos_fp
+ - \dim_to_fp:n {#2} * \l_@@_sin_fp
}
}
\dim_set:Nn #4
{
\fp_to_dim:n
{
- \dim_to_fp:n {#1} * \l_@@_sin_fp
- + ( \dim_to_fp:n {#2} * \l_@@_cos_fp )
+ \dim_to_fp:n {#1} * \l_@@_sin_fp
+ + \dim_to_fp:n {#2} * \l_@@_cos_fp
}
}
}
@@ -1927,7 +1766,8 @@
{ \dim_to_fp:n {#2} / \dim_to_fp:n { \coffin_wd:N #1 } }
\fp_set:Nn \l_@@_scale_y_fp
{
- \dim_to_fp:n {#3} / \dim_to_fp:n { \coffin_ht:N #1 + \coffin_dp:N #1 }
+ \dim_to_fp:n {#3}
+ / \dim_to_fp:n { \coffin_ht:N #1 + \coffin_dp:N #1 }
}
\box_resize:Nnn #1 {#2} {#3}
\@@_resize_common:Nnn #1 {#2} {#3}
@@ -2052,6 +1892,43 @@
% \subsection{Additions to \pkg{l3file}}
%
% \begin{macrocode}
+%<@@=file>
+% \end{macrocode}
+%
+% \begin{macro}[TF]{\file_if_exist_input:n}
+% Input of a file with a test for existence cannot be done the usual
+% way as the tokens to insert are in an odd place.
+% \begin{macrocode}
+\cs_new_protected:Npn \file_if_exist_input:n #1
+ {
+ \file_if_exist:nT {#1}
+ { \@@_input:V \l_@@_internal_name_tl }
+ }
+\cs_new_protected:Npn \file_if_exist_input:nT #1#2
+ {
+ \file_if_exist:nT {#1}
+ {
+ #2
+ \@@_input:V \l_@@_internal_name_tl
+ }
+ }
+\cs_new_protected:Npn \file_if_exist_input:nF #1
+ {
+ \file_if_exist:nTF {#1}
+ { \@@_input:V \l_@@_internal_name_tl }
+ }
+\cs_new_protected:Npn \file_if_exist_input:nTF #1#2
+ {
+ \file_if_exist:nTF {#1}
+ {
+ #2
+ \@@_input:V \l_@@_internal_name_tl
+ }
+ }
+% \end{macrocode}
+% \end{macro}
+%
+% \begin{macrocode}
%<@@=ior>
% \end{macrocode}
%
@@ -2111,28 +1988,6 @@
% \end{macro}
% \end{macro}
%
-% \subsection{Additions to \pkg{l3fp}}
-%
-% \begin{macrocode}
-%<@@=fp>
-% \end{macrocode}
-%
-% \begin{macro}
-% {
-% \fp_set_from_dim:Nn, \fp_set_from_dim:cn,
-% \fp_gset_from_dim:Nn, \fp_gset_from_dim:cn
-% }
-% Use the appropriate function from \pkg{l3fp-convert}.
-% \begin{macrocode}
-\cs_new_protected:Npn \fp_set_from_dim:Nn #1#2
- { \tl_set:Nx #1 { \dim_to_fp:n {#2} } }
-\cs_new_protected:Npn \fp_gset_from_dim:Nn #1#2
- { \tl_gset:Nx #1 { \dim_to_fp:n {#2} } }
-\cs_generate_variant:Nn \fp_set_from_dim:Nn { c }
-\cs_generate_variant:Nn \fp_gset_from_dim:Nn { c }
-% \end{macrocode}
-% \end{macro}
-%
% \subsection{Additions to \pkg{l3prop}}
%
% \begin{macrocode}
@@ -2168,78 +2023,12 @@
% \end{macro}
% \end{macro}
%
-% \begin{macro}[EXP]{\prop_get:Nn, \prop_get:cn}
-% \begin{macro}[aux, EXP]{\@@_get_Nn:nwwn}
-% Getting the value corresponding to a key in a property list in an
-% expandable fashion is similar to mapping some tokens. Go through
-% the property list one \meta{key}--\meta{value} pair at a time: the
-% arguments of \cs{@@_get_Nn:nwn} are the \meta{key} we are looking
-% for, a \meta{key} of the property list, and its associated value.
-% The \meta{keys} are compared (as strings). If they match, the
-% \meta{value} is returned, within \cs{exp_not:n}. The loop
-% terminates even if the \meta{key} is missing, and yields an empty
-% value, because we have appended the appropriate
-% \meta{key}--\meta{empty value} pair to the property list.
-% \begin{macrocode}
-\cs_new:Npn \prop_get:Nn #1#2
- {
- \exp_last_unbraced:Noo \@@_get_Nn:nwwn { \tl_to_str:n {#2} } #1
- \@@_pair:wn \tl_to_str:n {#2} \s_@@ { }
- \__prg_break_point:
- }
-\cs_new:Npn \@@_get_Nn:nwwn #1#2 \@@_pair:wn #3 \s_@@ #4
- {
- \str_if_eq_x:nnTF {#1} {#3}
- { \__prg_break:n { \exp_not:n {#4} } }
- { \@@_get_Nn:nwwn {#1} }
- }
-\cs_generate_variant:Nn \prop_get:Nn { c }
-% \end{macrocode}
-% \end{macro}
-% \end{macro}
-%
% \subsection{Additions to \pkg{l3seq}}
%
% \begin{macrocode}
%<@@=seq>
% \end{macrocode}
%
-% \begin{macro}{\seq_item:Nn, \seq_item:cn}
-% \begin{macro}[aux]{\@@_item:wNn, \@@_item:nnn}
-% The idea here is to find the offset of the item from the left, then use
-% a loop to grab the correct item. If the resulting offset is too large,
-% then the stop code |{ ? \__prg_break: } { }| will be used by the auxiliary,
-% terminating the loop and returning nothing at all.
-% \begin{macrocode}
-\cs_new:Npn \seq_item:Nn #1
- { \exp_after:wN \@@_item:wNn #1 \q_stop #1 }
-\cs_new:Npn \@@_item:wNn \s_@@ #1 \q_stop #2#3
- {
- \exp_args:Nf \@@_item:nnn
- {
- \int_eval:n
- {
- \int_compare:nNnT {#3} < \c_zero
- { \seq_count:N #2 + \c_one + }
- #3
- }
- }
- #1
- { ? \__prg_break: } { }
- \__prg_break_point:
- }
-\cs_new:Npn \@@_item:nnn #1#2#3
- {
- \use_none:n #2
- \int_compare:nNnTF {#1} = \c_one
- { \__prg_break:n { \exp_not:n {#3} } }
- { \exp_args:Nf \@@_item:nnn { \int_eval:n { #1 - 1 } } }
- }
-\cs_generate_variant:Nn \seq_item:Nn { c }
-% \end{macrocode}
-% \end{macro}
-% \end{macro}
-%
% \begin{macro}
% {
% \seq_mapthread_function:NNN, \seq_mapthread_function:NcN,
@@ -2288,110 +2077,6 @@
% \end{macro}
% \end{macro}
%
-% \begin{macro}
-% {
-% \seq_set_from_clist:NN, \seq_set_from_clist:cN,
-% \seq_set_from_clist:Nc, \seq_set_from_clist:cc,
-% \seq_set_from_clist:Nn, \seq_set_from_clist:cn
-% }
-% \begin{macro}
-% {
-% \seq_gset_from_clist:NN, \seq_gset_from_clist:cN,
-% \seq_gset_from_clist:Nc, \seq_gset_from_clist:cc,
-% \seq_gset_from_clist:Nn, \seq_gset_from_clist:cn
-% }
-% Setting a sequence from a comma-separated list is done using a simple
-% mapping.
-% \begin{macrocode}
-\cs_new_protected:Npn \seq_set_from_clist:NN #1#2
- {
- \tl_set:Nx #1
- { \s_@@ \clist_map_function:NN #2 \@@_wrap_item:n }
- }
-\cs_new_protected:Npn \seq_set_from_clist:Nn #1#2
- {
- \tl_set:Nx #1
- { \s_@@ \clist_map_function:nN {#2} \@@_wrap_item:n }
- }
-\cs_new_protected:Npn \seq_gset_from_clist:NN #1#2
- {
- \tl_gset:Nx #1
- { \s_@@ \clist_map_function:NN #2 \@@_wrap_item:n }
- }
-\cs_new_protected:Npn \seq_gset_from_clist:Nn #1#2
- {
- \tl_gset:Nx #1
- { \s_@@ \clist_map_function:nN {#2} \@@_wrap_item:n }
- }
-\cs_generate_variant:Nn \seq_set_from_clist:NN { Nc }
-\cs_generate_variant:Nn \seq_set_from_clist:NN { c , cc }
-\cs_generate_variant:Nn \seq_set_from_clist:Nn { c }
-\cs_generate_variant:Nn \seq_gset_from_clist:NN { Nc }
-\cs_generate_variant:Nn \seq_gset_from_clist:NN { c , cc }
-\cs_generate_variant:Nn \seq_gset_from_clist:Nn { c }
-% \end{macrocode}
-% \end{macro}
-% \end{macro}
-%
-% \begin{macro}
-% {\seq_reverse:N, \seq_reverse:c, \seq_greverse:N, \seq_greverse:c}
-% \begin{macro}[aux]{\@@_reverse:NN}
-% \begin{macro}[aux, EXP]{\@@_reverse_item:nwn}
-% Previously, \cs{seq_reverse:N} was coded by collecting the items
-% in reverse order after an \cs{exp_stop_f:} marker.
-% \begin{verbatim}
-% \cs_new_protected:Npn \seq_reverse:N #1
-% {
-% \cs_set_eq:NN \@@_item:n \@@_reverse_item:nw
-% \tl_set:Nf #2 { #2 \exp_stop_f: }
-% }
-% \cs_new:Npn \@@_reverse_item:nw #1 #2 \exp_stop_f:
-% {
-% #2 \exp_stop_f:
-% \@@_item:n {#1}
-% }
-% \end{verbatim}
-% At first, this seems optimal, since we can forget about each item
-% as soon as it is placed after \cs{exp_stop_f:}. Unfortunately,
-% \TeX{}'s usual tail recursion does not take place in this case:
-% since the following \cs{@@_reverse_item:nw} only reads
-% tokens until \cs{exp_stop_f:}, and never reads the
-% |\@@_item:n {#1}| left by the previous call, \TeX{} cannot
-% remove that previous call from the stack, and in particular
-% must retain the various macro parameters in memory, until the
-% end of the replacement text is reached. The stack is thus
-% only flushed after all the \cs{@@_reverse_item:nw} are
-% expanded. Keeping track of the arguments of all those calls
-% uses up a memory quadratic in the length of the sequence.
-% \TeX{} can then not cope with more than a few thousand items.
-%
-% Instead, we collect the items in the argument
-% of \cs{exp_not:n}. The previous calls are cleanly removed
-% from the stack, and the memory consumption becomes linear.
-% \begin{macrocode}
-\cs_new_protected_nopar:Npn \seq_reverse:N
- { \@@_reverse:NN \tl_set:Nx }
-\cs_new_protected_nopar:Npn \seq_greverse:N
- { \@@_reverse:NN \tl_gset:Nx }
-\cs_new_protected:Npn \@@_reverse:NN #1 #2
- {
- \cs_set_eq:NN \@@_tmp:w \@@_item:n
- \cs_set_eq:NN \@@_item:n \@@_reverse_item:nwn
- #1 #2 { #2 \exp_not:n { } }
- \cs_set_eq:NN \@@_item:n \@@_tmp:w
- }
-\cs_new:Npn \@@_reverse_item:nwn #1 #2 \exp_not:n #3
- {
- #2
- \exp_not:n { \@@_item:n {#1} #3 }
- }
-\cs_generate_variant:Nn \seq_reverse:N { c }
-\cs_generate_variant:Nn \seq_greverse:N { c }
-% \end{macrocode}
-% \end{macro}
-% \end{macro}
-% \end{macro}
-%
% \begin{macro}{\seq_set_filter:NNn, \seq_gset_filter:NNn}
% \begin{macro}[aux]{\@@_set_filter:NNNn}
% Similar to \cs{seq_map_inline:Nn}, without a
@@ -2439,42 +2124,6 @@
% \subsection{Additions to \pkg{l3skip}}
%
% \begin{macrocode}
-%<@@=dim>
-% \end{macrocode}
-%
-% \begin{macro}[EXP]{\dim_to_pt:n}
-% A copy of the internal function \cs{@@_strip_pt:n}, which should
-% perhaps be eliminated in favor of \cs{dim_to_pt:n}.
-% \begin{macrocode}
-\cs_new_eq:NN \dim_to_pt:n \@@_strip_pt:n
-% \end{macrocode}
-% \end{macro}
-%
-% \begin{macro}[EXP]{\dim_to_unit:nn}
-% An analog of \cs{dim_ratio:nn} that produces a decimal number as its
-% result, rather than a rational fraction for use within dimension
-% expressions. The naive implementation as
-% \begin{verbatim}
-% \cs_new:Npn \dim_to_unit:nn #1#2
-% { \dim_to_pt:n { 1pt * \dim_ratio:nn {#1} {#2} } }
-% \end{verbatim}
-% would not ignore trailing tokens (see documentation), so we need a
-% bit more work.
-% \begin{macrocode}
-\cs_new:Npn \dim_to_unit:nn #1#2
- {
- \dim_to_pt:n
- {
- 1pt *
- \dim_ratio:nn
- { \dim_to_pt:n {#1} pt }
- { \dim_to_pt:n {#2} pt }
- }
- }
-% \end{macrocode}
-% \end{macro}
-%
-% \begin{macrocode}
%<@@=skip>
% \end{macrocode}
%
@@ -2667,37 +2316,537 @@
% \end{macro}
% \end{macro}
%
-% \begin{macro}{\tl_item:nn, \tl_item:Nn, \tl_item:cn}
-% \begin{macro}[aux]{\@@_item:nn}
-% The idea here is to find the offset of the item from the left, then use
-% a loop to grab the correct item. If the resulting offset is too large,
-% then \cs{quark_if_recursion_tail_stop:n} terminates the loop, and returns
-% nothing at all.
+% \begin{macro}^^A
+% {
+% \tl_set_from_file:Nnn, \tl_set_from_file:cnn,
+% \tl_gset_from_file:Nnn, \tl_gset_from_file:cnn
+% }
+% \begin{macro}[aux]{\@@_set_from_file:NNnn}
+% \begin{macro}[aux]{\@@_from_file_do:w}
+% The approach here is similar to that for doing a rescan, and so the same
+% internals can be reused. Thus the plan is to insert a pair of tokens of
+% the same charcode but different catcodes after the file has been read.
+% This plus \cs{exp_not:N} allows the primitive to be used to carry out
+% a set operation.
+% \begin{macrocode}
+\cs_new_protected_nopar:Npn \tl_set_from_file:Nnn
+ { \@@_set_from_file:NNnn \tl_set:Nn }
+\cs_new_protected_nopar:Npn \tl_gset_from_file:Nnn
+ { \@@_set_from_file:NNnn \tl_gset:Nn }
+\cs_generate_variant:Nn \tl_set_from_file:Nnn { c }
+\cs_generate_variant:Nn \tl_gset_from_file:Nnn { c }
+\cs_new_protected:Npn \@@_set_from_file:NNnn #1#2#3#4
+ {
+ \__file_if_exist:nT {#4}
+ {
+ \group_begin:
+ \exp_args:No \etex_everyeof:D
+ { \c_@@_rescan_marker_tl \exp_not:N }
+ #3 \scan_stop:
+ \exp_after:wN \@@_from_file_do:w
+ \exp_after:wN \prg_do_nothing:
+ \tex_input:D \l__file_internal_name_tl \scan_stop:
+ \exp_args:NNNo \group_end:
+ #1 #2 \l_@@_internal_a_tl
+ }
+ }
+\exp_args:Nno \use:nn
+ { \cs_set_protected:Npn \@@_from_file_do:w #1 }
+ { \c_@@_rescan_marker_tl }
+ { \tl_set:No \l_@@_internal_a_tl {#1} }
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+%
+% \begin{macro}^^A
+% {
+% \tl_set_from_file_x:Nnn, \tl_set_from_file_x:cnn,
+% \tl_gset_from_file_x:Nnn, \tl_gset_from_file_x:cnn
+% }
+% \begin{macro}[aux]{\@@_set_from_file_x:NNnn}
+% When reading a file and allowing expansion of the content, the set up
+% only needs to prevent \TeX{} complaining about the end of the file. That
+% is done simply, with a group then used to trap the definition needed.
+% Once the business is done using some scratch space, the tokens can be
+% transferred to the real target.
+% \begin{macrocode}
+\cs_new_protected_nopar:Npn \tl_set_from_file_x:Nnn
+ { \@@_set_from_file_x:NNnn \tl_set:Nn }
+\cs_new_protected_nopar:Npn \tl_gset_from_file_x:Nnn
+ { \@@_set_from_file_x:NNnn \tl_gset:Nn }
+\cs_generate_variant:Nn \tl_set_from_file_x:Nnn { c }
+\cs_generate_variant:Nn \tl_gset_from_file_x:Nnn { c }
+\cs_new_protected:Npn \@@_set_from_file_x:NNnn #1#2#3#4
+ {
+ \__file_if_exist:nT {#4}
+ {
+ \group_begin:
+ \etex_everyeof:D { \exp_not:N }
+ #3 \scan_stop:
+ \tl_set:Nx \l_@@_internal_a_tl
+ { \tex_input:D \l__file_internal_name_tl \c_space_token }
+ \exp_args:NNNo \group_end:
+ #1 #2 \l_@@_internal_a_tl
+ }
+ }
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+%
+% \subsubsection{Unicode case changing}
+%
+% The mechanisms needed for case changing are somewhat involved, particularly
+% to allow for all of the special cases. These functions also require the
+% appropriate data extracted from the Unicode documentation (either manually
+% or automatically), which is covered by \pkg{l3unicode-data}.
+%
+% \begin{macro}[rEXP]{\tl_lower_case:n, \tl_upper_case:n, \tl_mixed_case:n}
+% \begin{macro}[rEXP]{\tl_lower_case:nn, \tl_upper_case:nn, \tl_mixed_case:nn}
+% The user level functions here are all wrappers around the internal
+% functions for case changing. Note that \cs{tl_mixed_case:nn} could be
+% done without an internal, but this way the logic is slightly clearer as
+% everything essentially follows the same path.
+% \begin{macrocode}
+\cs_new_nopar:Npn \tl_lower_case:n { \@@_change_case:nnn { lower } { } }
+\cs_new_nopar:Npn \tl_upper_case:n { \@@_change_case:nnn { upper } { } }
+\cs_new_nopar:Npn \tl_mixed_case:n { \@@_mixed_case:nn { } }
+\cs_new_nopar:Npn \tl_lower_case:nn { \@@_change_case:nnn { lower } }
+\cs_new_nopar:Npn \tl_upper_case:nn { \@@_change_case:nnn { upper } }
+\cs_new_nopar:Npn \tl_mixed_case:nn { \@@_mixed_case:nn }
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+%
+% \begin{macro}[aux]{\@@_change_case:nnn}
+% \begin{macro}[aux]{\@@_change_case_loop:wnn}
+% \begin{macro}[aux]{\@@_change_case_N_type:Nwnn}
+% \begin{macro}[aux]{\@@_change_case_group:nwnn}
+% \begin{macro}[aux]{\@@_change_case_space:wnn}
+% \begin{macro}[aux]{\@@_change_case_char:NNNNNNNNn}
+% \begin{macro}[aux]^^A
+% {
+% \@@_change_case_lower_sigma:Nnn ,
+% \@@_change_case_upper_sigma:Nnn ,
+% \@@_change_case_mixed_sigma:Nnn
+% }
+% \begin{macro}[aux]{\@@_change_case_lower_sigma:Nw}
+% \begin{macro}[aux]{\@@_change_case_lower_sigma_loop:Nw}
+% The mechanism for the core conversion of case is based on the approach used
+% in \cs{@@_act:NNNnn}. Thus the idea is to use a loop which is will grab
+% the entire token list plus a quark: the latter is used as an end marker and
+% to avoid any brace stripping. Depending on the nature of the first item
+% in the grabbed argument, it can eithsrprocessed as a single token,
+% treated as a group or treated as a space (the latter requires special
+% treatment). In contrast to \cs{@@_act:NNNnn}, there is no need for this
+% process to be \texttt{f}-type expandable: things are done using only
+% \texttt{x}-type requirements. Also, for \enquote{normal} tokens there is
+% a bit more work to do here: to allow selection of case matches using
+% character code, it's important that control sequences are filtered out
+% before doing the lookup.
+% \begin{macrocode}
+\cs_new:Npn \@@_change_case:nnn #1#2#3
+ {
+ \@@_change_case_loop:wnn #3
+ \q_recursion_tail \q_recursion_stop {#1} {#2}
+ }
+\cs_new:Npn \@@_change_case_loop:wnn #1 \q_recursion_stop
+ {
+ \tl_if_head_is_N_type:nTF {#1}
+ { \@@_change_case_N_type:Nwnn }
+ {
+ \tl_if_head_is_group:nTF {#1}
+ { \@@_change_case_group:nwnn }
+ { \@@_change_case_space:wnn }
+ }
+ #1 \q_recursion_stop
+ }
+\cs_new:Npn \@@_change_case_N_type:Nwnn #1#2 \q_recursion_stop #3#4
+ {
+ \quark_if_recursion_tail_stop_do:Nn #1 { \use_none:nn }
+ \token_if_cs:NTF #1
+ { \exp_not:N #1 }
+ {
+ \cs_if_exist_use:cF { @@_change_case_ #3 _ #4 :Nnn }
+ { \use_iii:nnn }
+ #1 {#2}
+ {
+ \use:c { @@_change_case_ #3 _ sigma:Nnn } #1 {#2}
+ {
+ \exp_after:wN \@@_change_case_char:NNNNNNNNn
+ \int_use:N \__int_eval:w 1000000 + `#1 \__int_eval_end:
+ #1 {#3}
+ }
+ }
+ }
+ \@@_change_case_loop:wnn #2 \q_recursion_stop {#3} {#4}
+ }
+\cs_new:Npn \@@_change_case_group:nwnn #1#2 \q_recursion_stop
+ {
+ { \exp_not:n {#1} }
+ \@@_change_case_loop:wnn #2 \q_recursion_stop
+ }
+\exp_last_unbraced:NNo \cs_new:Npn \@@_change_case_space:wnn \c_space_tl
+ {
+ \c_space_tl
+ \@@_change_case_loop:wnn
+ }
+% \end{macrocode}
+% Actually look for the char in the appropriate table.
+% \begin{macrocode}
+\cs_new:Npn \__tl_change_case_char:NNNNNNNNn #1#2#3#4#5#6#7#8#9
+ {
+ \exp_args:NNv \str_case:nnF #8
+ { c_@@_ #9 _ #6 _ #7 _tl }
+ { \exp_not:N #8 }
+ }
+% \end{macrocode}
+% If the current char is an upper case sigma, the a check is made on the next
+% item in the input. If it is another \texttt{N}-type token then further tests
+% are needed to decide what to do.
% \begin{macrocode}
-\cs_new:Npn \tl_item:nn #1#2
+\cs_new:Npn \@@_change_case_lower_sigma:Nnn #1#2
{
- \exp_args:Nf \@@_item:nn
+ \int_compare:nNnTF { `#1 } = { "03A3 }
{
- \int_eval:n
+ \tl_if_head_is_N_type:nTF {#2}
+ { \@@_change_case_lower_sigma:Nw #2 \q_recursion_stop }
{
- \int_compare:nNnT {#2} < \c_zero
- { \tl_count:n {#1} + \c_one + }
- #2
+ \tl_if_head_is_group:nTF {#2}
+ { \c_@@_std_sigma_tl }
+ { \c_@@_final_sigma_tl }
}
}
- #1
- \q_recursion_tail
- \__prg_break_point:
}
-\cs_new:Npn \@@_item:nn #1#2
+% \end{macrocode}
+% Assuming the next token is not a control sequence, a loop is used to test
+% if the next char is something that can be interpreted as the end of a word.
+% Rather than use all of the Unicode data for this, the simplifying
+% assumption is made that in real text the end of a word will be indicated by
+% a small number of chars. As this may have to be extended over time to other
+% cases, the easiest handling is offered by using the numerical values for
+% these chars.
+% \begin{macrocode}
+\cs_new:Npn \@@_change_case_lower_sigma:Nw #1#2 \q_recursion_stop
+ {
+ \token_if_cs:NTF #1
+ { \c_@@_std_sigma_tl }
+ {
+ \exp_after:wN \@@_change_case_lower_sigma_loop:Nw
+ \exp_after:wN #1 \c_@@_after_final_sigma_clist
+ , \q_recursion_tail , \q_recursion_stop
+ }
+ }
+\cs_new:Npn \@@_change_case_lower_sigma_loop:Nw #1#2 ,
+ {
+ \quark_if_recursion_tail_stop_do:nn {#2}
+ { \c_@@_std_sigma_tl }
+ \int_compare:nNnT { `#1 } = { "#2 }
+ { \use_i_delimit_by_q_recursion_stop:nw { \c_@@_final_sigma_tl } }
+ \@@_change_case_lower_sigma_loop:Nw #1
+ }
+\cs_new_eq:NN \@@_change_case_upper_sigma:Nnn \use_iii:nnn
+\cs_new_eq:NN \@@_change_case_mixed_sigma:Nnn \use_iii:nnn
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+%
+% \begin{macro}[aux]{\@@_mixed_case:nn}
+% \begin{macro}[aux]{\@@_mixed_case_loop:wn}
+% \begin{macro}[aux]{\@@_mixed_case_N_type:Nwn}
+% \begin{macro}[aux]{\@@_mixed_case_skip:Nwn}
+% \begin{macro}[aux]{\@@_mixed_case_skip_tidy:nNwn}
+% \begin{macro}[aux]{\@@_mixed_case_group:nwn}
+% \begin{macro}[aux]{\@@_mixed_case_space:wn}
+% Mixed (title) casing needs an entire set of functions to itself. These are
+% more or less the same as those for general case changes but the
+% requirements here are subtly different. What is needed is a loop which
+% looks for the first \enquote{real} char in the input (skipping any
+% pre-letter chars). Once one is found, it is case changed to upper case but
+% first checking that there is not an entry in the exceptions list. Once that
+% process is done, all remaining chars are lower cased so there is a switch
+% to the normal system. Note that simply grabbing the first token in the
+% input is no good here: it can't handle pre-letter tokens or any special
+% treatment of the first letter found (\emph{e.g.}~words starting with
+% \texttt{i} in Turkish). Spaces at the start of the input are passed
+% through without counting as being the \enquote{start} of the first word,
+% while a brace group forces a switch to the standard lower casing routine.
+% \begin{macrocode}
+\cs_new:Npn \@@_mixed_case:nn #1#2
+ {
+ \@@_mixed_case_loop:wn #2
+ \q_recursion_tail \q_recursion_stop {#1}
+ }
+\cs_new:Npn \@@_mixed_case_loop:wn #1 \q_recursion_stop
+ {
+ \tl_if_head_is_N_type:nTF {#1}
+ { \@@_mixed_case_N_type:Nwn }
+ {
+ \tl_if_head_is_group:nTF {#1}
+ { \@@_mixed_case_group:nwn }
+ { \@@_mixed_case_space:wn }
+ }
+ #1 \q_recursion_stop
+ }
+\cs_new:Npn \@@_mixed_case_N_type:Nwn #1#2 \q_recursion_stop #3
+ {
+ \quark_if_recursion_tail_stop_do:Nn #1 { \use_none:nn }
+ \token_if_cs:NTF #1
+ { \exp_not:N #1 }
+ {
+ \cs_if_exist_use:cF { @@_change_case_mixed_ #3 :Nnn }
+ {
+ \cs_if_exist_use:cF { @@_change_case_upper_ #3 :Nnn }
+ { \use_iii:nnn }
+ }
+ #1 {#2}
+ {
+ \exp_after:wN \@@_mixed_case_skip:Nwn \exp_after:wN #1
+ \c__tl_mixed_skip_clist , \q_recursion_tail , \q_recursion_stop
+ {
+ \exp_args:NNV \str_case:nnF #1 \c_@@_mixed_exceptions_tl
+ {
+ \exp_after:wN \@@_change_case_char:NNNNNNNNn
+ \int_use:N \__int_eval:w 1000000 + `#1 \__int_eval_end:
+ #1 { upper }
+ }
+ }
+ }
+ }
+ \@@_change_case_loop:wnn #2 \q_recursion_stop { lower } {#3}
+ }
+% \end{macrocode}
+% Looking for chars to skip when title casing uses the standard \enquote{loop
+% around a list} approach. If there is a hit, there is a bit of tidying up
+% to do: retain the char and switch the looping system to stick with the
+% title loop rather than the lower case one. That means swapping an
+% auxiliary and removing a trailing |{ lower }|, which is easiest to do
+% with a dedicated function.
+% \begin{macrocode}
+\cs_new:Npn \@@_mixed_case_skip:Nwn #1#2 ,
+ {
+ \quark_if_recursion_tail_stop_do:nn {#2} { \use:n }
+ \int_compare:nNnT { `#1 } = { "#2 }
+ {
+ \use_i_delimit_by_q_recursion_stop:nw
+ {
+ #1
+ \@@_mixed_case_skip_tidy:nNwn
+ }
+ }
+ \@@_mixed_case_skip:Nwn #1
+ }
+\cs_new:Npn \@@_mixed_case_skip_tidy:nNwn #1#2#3 \q_recursion_stop #4
+ {
+ \@@_mixed_case_loop:wn #3 \q_recursion_stop
+ }
+\cs_new:Npn \@@_mixed_case_group:nwn #1#2 \q_recursion_stop
+ {
+ { \exp_not:n {#1} }
+ \@@_change_case_loop:wnn #2 \q_recursion_stop { lower }
+ }
+\exp_last_unbraced:NNo \cs_new:Npn \@@_mixed_case_space:wn \c_space_tl
+ {
+ \c_space_tl
+ \@@_mixed_case_loop:wn
+ }
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+%
+% \begin{macro}[aux]^^A
+% {
+% \@@_change_case_lower_tr:Nnn ,
+% \@@_change_case_lower_az:Nnn
+% }
+% \begin{macro}[aux]{\@@_change_case_lower_tr:Nw}
+% \begin{macro}[aux]^^A
+% {
+% \@@_change_case_upper_tr:Nnn ,
+% \@@_change_case_upper_az:Nnn
+% }
+% The Turkic languages need special treatment for dotted-i and dotless-i.
+% The lower casing rule can be expressed in terms of searching first for
+% either a dotless-I or a dotted-I. In the latter case the mapping is
+% easy, but in the former there is a second stage search.
+% \begin{macrocode}
+\cs_new:Npn \@@_change_case_lower_tr:Nnn #1#2#3
+ {
+ \int_compare:nNnTF { `#1 } = { "0049 }
+ {
+ \tl_if_head_is_N_type:nTF {#2}
+ { \@@_change_case_lower_tr:Nw #2 \q_recursion_stop }
+ { \c_@@_dotless_i_tl }
+ }
+ {
+ \int_compare:nNnTF { `#1 } = { "0130 }
+ { i }
+ {#3}
+ }
+ }
+\cs_new_nopar:Npn \@@_change_case_lower_az:Nnn
+ { \@@_change_case_lower_tr:Nnn }
+% \end{macrocode}
+% After a dotless-I there may be a dot-above character. If there is then
+% a dotted-i should be produced, otherwise output a dotless-i. When the
+% combination is found both the dotless-I and the dot-above char have to
+% be removed from the input, which is done by the \cs{use_i:nn}
+% (it grabs \cs{@@_change_case_loop:wn} and the dot-above char and
+% discards the latter).
+% \begin{macrocode}
+\cs_new:Npn \@@_change_case_lower_tr:Nw #1#2 \q_recursion_stop
+ {
+ \bool_if:nTF
+ {
+ \token_if_cs_p:N #1
+ || ! ( \int_compare_p:nNn { `#1 } = { "0307 } )
+ }
+ { \c_@@_dotless_i_tl }
+ {
+ i
+ \use_i:nn
+ }
+ }
+% \end{macrocode}
+% Upper casing is easier: just one exception with no context.
+% \begin{macrocode}
+\cs_new:Npn \@@_change_case_upper_tr:Nnn #1#2#3
+ {
+ \int_compare:nNnTF { `#1 } = { "0069 }
+ { \c_@@_dotted_I_tl }
+ {#3}
+ }
+\cs_new_nopar:Npn \@@_change_case_upper_az:Nnn
+ { \@@_change_case_upper_tr:Nnn }
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+% \begin{macro}[aux]{\@@_change_case_lower_lt:Nnn}
+% \begin{macro}[aux]{\@@_change_case_lower_lt:Nw}
+% \begin{macro}[aux]{\@@_change_case_upper_lt:Nnn}
+% \begin{macro}[aux]{\@@_change_case_upper_lt:Nw}
+% For Lithuanian, the issue to be dealt with is dots over lower case
+% letters: these should be present if there is another accent. That means
+% that there is some work to do when lower casing I and J. The first step
+% is a simple match attempt: \cs{c_@@_accents_lt_tl} contains
+% accented upper case letters which should gain a dot-above char in their
+% lower case form. The second stage is to check for I, J and I-ogonek, and
+% if the current char is a match to look for a following accent. As the
+% current char is still needed in case-changed form, the standard code is
+% inserted before hunting for an accent.
+% \begin{macrocode}
+\cs_new:Npn \@@_change_case_lower_lt:Nnn #1#2#3
+ {
+ \exp_args:NNV \str_case:nnF #1 \c_@@_accents_lt_tl
+ {
+ #3
+ \bool_if:nT
+ {
+ \int_compare_p:nNn { `#1 } = { "0049 }
+ || \int_compare_p:nNn { `#1 } = { "004A }
+ || \int_compare_p:nNn { `#1 } = { "012E }
+ }
+ {
+ \tl_if_head_is_N_type:nT {#2}
+ { \@@_change_case_lower_lt:Nw #2 \q_recursion_stop }
+ }
+ }
+ }
+% \end{macrocode}
+% Grab the next char and see if it is one of the accents used in Lithuanian:
+% if it is, add the dot-above char into the output.
+% \begin{macrocode}
+\cs_new:Npn \@@_change_case_lower_lt:Nw #1#2 \q_recursion_stop
+ {
+ \bool_if:nT
+ {
+ ! ( \token_if_cs_p:N #1 )
+ &&
+ (
+ \int_compare_p:nNn { `#1 } = { "0300 }
+ || \int_compare_p:nNn { `#1 } = { "0301 }
+ || \int_compare_p:nNn { `#1 } = { "0303 }
+ )
+ }
+ { \c_@@_dot_above_tl }
+ }
+% \end{macrocode}
+% For upper casing, the chars themselves are always converted as normal.
+% The test required here is for a dot-above char after an I, J or I-ogonek.
+% If there is one, it's removed using \cs{use_i:nn} (which preserves the
+% loop and discards the char).
+% \begin{macrocode}
+\cs_new:Npn \@@_change_case_upper_lt:Nnn #1#2#3
+ {
+ #3
+ \bool_if:nT
+ {
+ \tl_if_head_is_N_type_p:n {#2}
+ &&
+ (
+ \int_compare_p:nNn { `#1 } = { "0069 }
+ || \int_compare_p:nNn { `#1 } = { "006A }
+ || \int_compare_p:nNn { `#1 } = { "012F }
+ )
+ }
+ { \@@_change_case_upper_lt:Nw #2 \q_recursion_stop }
+ }
+\cs_new:Npn \@@_change_case_upper_lt:Nw #1#2 \q_recursion_stop
{
- \__quark_if_recursion_tail_break:nN {#2} \__prg_break:
- \int_compare:nNnTF {#1} = \c_one
- { \__prg_break:n { \exp_not:n {#2} } }
- { \exp_args:Nf \@@_item:nn { \int_eval:n { #1 - 1 } } }
+ \bool_if:nT
+ {
+ ! ( \token_if_cs_p:N #1 )
+ &&
+ \int_compare_p:nNn { `#1 } = { "0307 }
+ }
+ { \use_i:nn }
+ }
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+% \begin{macro}[aux]{\@@_change_case_mixed_nl:Nnn}
+% \begin{macro}[aux]{\@@_change_case_mixed_nl:Nw}
+% For Dutch, there is a single look-ahead test for \texttt{ij} when
+% title casing. If the appropriate letters are found, produce \texttt{IJ}
+% and gobble the \texttt{j}.
+% \begin{macrocode}
+\cs_new:Npn \@@_change_case_mixed_nl:Nnn #1#2
+ {
+ \int_compare:nNnTF { `#1 } = { `i }
+ {
+ I
+ \tl_if_head_is_N_type:nT {#2}
+ { \@@_change_case_mixed_nl:Nw #2 \q_recursion_stop }
+ }
+ }
+\cs_new:Npn \@@_change_case_mixed_nl:Nw #1#2 \q_recursion_stop
+ {
+ \bool_if:nT
+ {
+ ! ( \token_if_cs_p:N #1 )
+ &&
+ \int_compare_p:nNn { `#1 } = { `j }
+ }
+ {
+ J
+ \use_i:nn
+ }
}
-\cs_new_nopar:Npn \tl_item:Nn { \exp_args:No \tl_item:nn }
-\cs_generate_variant:Nn \tl_item:Nn { c }
% \end{macrocode}
% \end{macro}
% \end{macro}
@@ -2812,6 +2961,24 @@
% \end{macro}
% \end{macro}
%
+% \subsection{Deprecated candidates}
+%
+% \begin{macro}
+% {
+% \fp_set_from_dim:Nn, \fp_set_from_dim:cn,
+% \fp_gset_from_dim:Nn, \fp_gset_from_dim:cn
+% }
+% Deprecated 2014-07-17.
+% \begin{macrocode}
+\cs_new_protected:Npn \fp_set_from_dim:Nn #1#2
+ { \fp_set:Nn #1 { \dim_to_fp:n {#2} } }
+\cs_new_protected:Npn \fp_gset_from_dim:Nn #1#2
+ { \fp_gset:Nn #1 { \dim_to_fp:n {#2} } }
+\cs_generate_variant:Nn \fp_set_from_dim:Nn { c }
+\cs_generate_variant:Nn \fp_gset_from_dim:Nn { c }
+% \end{macrocode}
+% \end{macro}
+%
% \begin{macrocode}
%</initex|package>
% \end{macrocode}
diff --git a/Master/texmf-dist/source/latex/l3kernel/l3clist.dtx b/Master/texmf-dist/source/latex/l3kernel/l3clist.dtx
index dcd332df4ae..d0389fc311c 100644
--- a/Master/texmf-dist/source/latex/l3kernel/l3clist.dtx
+++ b/Master/texmf-dist/source/latex/l3kernel/l3clist.dtx
@@ -39,7 +39,7 @@
\documentclass[full]{l3doc}
%</driver>
%<*driver|package>
-\GetIdInfo$Id: l3clist.dtx 4728 2014-05-04 13:25:37Z joseph $
+\GetIdInfo$Id: l3clist.dtx 5237 2014-07-19 15:09:54Z bruno $
{L3 Comma separated lists}
%</driver|package>
%<*driver>
@@ -104,6 +104,20 @@
% initially contain no items.
% \end{function}
%
+% \begin{function}[added = 2014-07-05]
+% {
+% \clist_const:Nn, \clist_const:Nx,
+% \clist_const:cn, \clist_const:cx
+% }
+% \begin{syntax}
+% \cs{clist_const:Nn} \meta{clist~var} \Arg{comma list}
+% \end{syntax}
+% Creates a new constant \meta{clist~var} or raises an error
+% if the name is already taken. The value of the
+% \meta{clist~var} will be set globally to the
+% \meta{comma list}.
+% \end{function}
+%
% \begin{function}
% {\clist_clear:N, \clist_clear:c, \clist_gclear:N, \clist_gclear:c}
% \begin{syntax}
@@ -139,6 +153,21 @@
% \meta{comma list_2}.
% \end{function}
%
+% \begin{function}[added = 2014-07-17]
+% {
+% \clist_set_from_seq:NN, \clist_set_from_seq:cN,
+% \clist_set_from_seq:Nc, \clist_set_from_seq:cc,
+% \clist_gset_from_seq:NN, \clist_gset_from_seq:cN,
+% \clist_gset_from_seq:Nc, \clist_gset_from_seq:cc
+% }
+% \begin{syntax}
+% \cs{clist_set_from_seq:NN} \meta{comma list} \meta{sequence}
+% \end{syntax}
+% Converts the data in the \meta{sequence} into a \meta{comma list}:
+% the original \meta{sequence} is unchanged.
+% Items which contain either spaces or commas are surrounded by braces.
+% \end{function}
+%
% \begin{function}
% {
% \clist_concat:NNN, \clist_concat:ccc,
@@ -264,6 +293,30 @@
% \end{texnote}
% \end{function}
%
+% \begin{function}[added = 2014-07-18]
+% {
+% \clist_reverse:N, \clist_reverse:c,
+% \clist_greverse:N, \clist_greverse:c
+% }
+% \begin{syntax}
+% \cs{clist_reverse:N} \meta{comma list}
+% \end{syntax}
+% Reverses the order of items stored in the \meta{comma list}.
+% \end{function}
+%
+% \begin{function}[added = 2014-07-18]{\clist_reverse:n}
+% \begin{syntax}
+% \cs{clist_reverse:n} \Arg{comma list}
+% \end{syntax}
+% Leaves the items in the \meta{comma list} in the input stream in
+% reverse order. Braces and spaces are preserved by this process.
+% \begin{texnote}
+% The result is returned within \tn{exp_not:n}, which means that the
+% comma list will not expand further when appearing in an
+% \texttt{x}-type argument expansion.
+% \end{texnote}
+% \end{function}
+%
% \section{Comma list conditionals}
%
% \begin{function}[EXP,pTF]{\clist_if_empty:N, \clist_if_empty:c}
@@ -274,6 +327,19 @@
% Tests if the \meta{comma list} is empty (containing no items).
% \end{function}
%
+% \begin{function}[EXP, pTF, added = 2014-07-05]{\clist_if_empty:n}
+% \begin{syntax}
+% \cs{clist_if_empty_p:n} \Arg{comma list}
+% \cs{clist_if_empty:nTF} \Arg{comma list} \Arg{true code} \Arg{false code}
+% \end{syntax}
+% Tests if the \meta{comma list} is empty (containing no items).
+% The rules for space trimming are as for other \texttt{n}-type
+% comma-list functions, hence the comma list |{~,~,,~}| (without
+% outer braces) is empty, while |{~,{},}| (without outer braces)
+% contains one element, which happens to be empty: the comma-list
+% is not empty.
+% \end{function}
+%
% \begin{function}[updated = 2011-09-06, TF]
% {
% \clist_if_in:Nn, \clist_if_in:NV, \clist_if_in:No,
@@ -573,6 +639,29 @@
% Spaces are removed from both sides of each item.
% \end{function}
%
+% \section{Using a single item}
+%
+% \begin{function}[added = 2014-07-17, EXP]
+% {\clist_item:Nn, \clist_item:cn, \clist_item:nn}
+% \begin{syntax}
+% \cs{clist_item:Nn} \meta{comma list} \Arg{integer expression}
+% \end{syntax}
+% Indexing items in the \meta{comma list} from~$1$ at the top (left), this
+% function will evaluate the \meta{integer expression} and leave the
+% appropriate item from the comma list in the input stream. If the
+% \meta{integer expression} is negative, indexing occurs from the
+% bottom (right) of the comma list. When the \meta{integer expression}
+% is larger than the number of items in the \meta{comma list} (as
+% calculated by \cs{clist_count:N}) then the function will expand to
+% nothing.
+% \begin{texnote}
+% The result is returned within the \tn{unexpanded}
+% primitive (\cs{exp_not:n}), which means that the \meta{item}
+% will not expand further when appearing in an \texttt{x}-type
+% argument expansion.
+% \end{texnote}
+% \end{function}
+%
% \section{Viewing comma lists}
%
% \begin{function}[updated = 2012-09-09]{\clist_show:N, \clist_show:c}
@@ -658,6 +747,21 @@
% \end{macrocode}
% \end{macro}
%
+% \begin{macro}
+% {
+% \clist_const:Nn, \clist_const:cn,
+% \clist_const:Nx, \clist_const:cx
+% }
+% Creating and initializing a constant comma list is done in a way
+% similar to \cs{clist_set:Nn} and \cs{clist_gset:Nn}, being careful
+% to strip spaces.
+% \begin{macrocode}
+\cs_new_protected:Npn \clist_const:Nn #1#2
+ { \tl_const:Nx #1 { \@@_trim_spaces:n {#2} } }
+\cs_generate_variant:Nn \clist_const:Nn { c , Nx , cx }
+% \end{macrocode}
+% \end{macro}
+%
% \begin{macro}{\clist_clear:N, \clist_clear:c}
% \UnitTested
% \begin{macro}{\clist_gclear:N, \clist_gclear:c}
@@ -709,6 +813,61 @@
% \end{macro}
% \end{macro}
%
+% \begin{macro}
+% {
+% \clist_set_from_seq:NN, \clist_set_from_seq:cN,
+% \clist_set_from_seq:Nc, \clist_set_from_seq:cc
+% }
+% \UnitTested
+% \begin{macro}
+% {
+% \clist_gset_from_seq:NN, \clist_gset_from_seq:cN,
+% \clist_gset_from_seq:Nc, \clist_gset_from_seq:cc
+% }
+% \UnitTested
+% \begin{macro}[aux]{\@@_set_from_seq:NNNN}
+% \begin{macro}[aux]{\@@_wrap_item:n}
+% \begin{macro}[aux]{\@@_set_from_seq:w}
+% Setting a comma list from a comma-separated list is done using a simple
+% mapping. We wrap most items with \cs{exp_not:n}, and a comma. Items which
+% contain a comma or a space are surrounded by an extra set of braces. The
+% first comma must be removed, except in the case of an empty comma-list.
+% \begin{macrocode}
+\cs_new_protected:Npn \clist_set_from_seq:NN
+ { \@@_set_from_seq:NNNN \clist_clear:N \tl_set:Nx }
+\cs_new_protected:Npn \clist_gset_from_seq:NN
+ { \@@_set_from_seq:NNNN \clist_gclear:N \tl_gset:Nx }
+\cs_new_protected:Npn \@@_set_from_seq:NNNN #1#2#3#4
+ {
+ \seq_if_empty:NTF #4
+ { #1 #3 }
+ {
+ #2 #3
+ {
+ \exp_last_unbraced:Nf \use_none:n
+ { \seq_map_function:NN #4 \@@_wrap_item:n }
+ }
+ }
+ }
+\cs_new:Npn \@@_wrap_item:n #1
+ {
+ ,
+ \tl_if_empty:oTF { \@@_set_from_seq:w #1 ~ , #1 ~ }
+ { \exp_not:n {#1} }
+ { \exp_not:n { {#1} } }
+ }
+\cs_new:Npn \@@_set_from_seq:w #1 , #2 ~ { }
+\cs_generate_variant:Nn \clist_set_from_seq:NN { Nc }
+\cs_generate_variant:Nn \clist_set_from_seq:NN { c , cc }
+\cs_generate_variant:Nn \clist_gset_from_seq:NN { Nc }
+\cs_generate_variant:Nn \clist_gset_from_seq:NN { c , cc }
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+%
% \begin{macro}{\clist_concat:NNN, \clist_concat:ccc}
% \UnitTested
% \begin{macro}{\clist_gconcat:NNN, \clist_gconcat:ccc}
@@ -1154,6 +1313,61 @@
% \end{macro}
% \end{macro}
%
+% \begin{macro}
+% {
+% \clist_reverse:N, \clist_reverse:c,
+% \clist_greverse:N, \clist_greverse:c
+% }
+% Use \cs{clist_reverse:n} in an \texttt{x}-expanding assignment. The
+% extra work that \cs{clist_reverse:n} does to preserve braces and
+% spaces would not be needed for the well-controlled case of
+% \texttt{N}-type comma lists, but the slow-down is not too bad.
+% \begin{macrocode}
+\cs_new_protected:Npn \clist_reverse:N #1
+ { \tl_set:Nx #1 { \exp_args:No \clist_reverse:n {#1} } }
+\cs_new_protected:Npn \clist_greverse:N #1
+ { \tl_gset:Nx #1 { \exp_args:No \clist_reverse:n {#1} } }
+\cs_generate_variant:Nn \clist_reverse:N { c }
+\cs_generate_variant:Nn \clist_greverse:N { c }
+% \end{macrocode}
+% \end{macro}
+%
+% \begin{macro}[EXP]{\clist_reverse:n}
+% \begin{macro}[aux, EXP]{\@@_reverse:wwNww, \@@_reverse_end:ww}
+% The reversed token list is built one item at a time, and stored
+% between \cs{q_stop} and \cs{q_mark}, in the form of |?| followed by
+% zero or more instances of ``\meta{item}|,|''. We start from a comma
+% list ``\meta{item_1}|,|\ldots|,|\meta{item_n}''. During the loop,
+% the auxiliary \cs{@@_reverse:wwNww} receives ``|?|\meta{item_i}'' as
+% |#1|, ``\meta{item_{i+1}}|,|\ldots|,|\meta{item_n}'' as |#2|,
+% \cs{@@_reverse:wwNww} as |#3|, what remains until \cs{q_stop} as
+% |#4|, and ``\meta{item_{i-1}}|,|\ldots|,|\meta{item_1}|,|'' as |#5|.
+% The auxiliary moves |#1| just before |#5|, with a comma, and calls
+% itself (|#3|). After the last item is moved, \cs{@@_reverse:wwNww}
+% receives ``\cs{q_mark} \cs{@@_reverse:wwNww} |!|'' as its argument
+% |#1|, thus \cs{@@_reverse_end:ww} as its argument |#3|. This second
+% auxiliary cleans up until the marker~|!|, removes the trailing comma
+% (introduced when the first item was moved after \cs{q_stop}), and
+% leaves its argument~|#1| within \cs{exp_not:n}. There is also a
+% need to remove a leading comma, hence \cs{exp_not:o} and
+% \cs{use_none:n}.
+% \begin{macrocode}
+\cs_new:Npn \clist_reverse:n #1
+ {
+ \@@_reverse:wwNww ? #1 ,
+ \q_mark \@@_reverse:wwNww ! ,
+ \q_mark \@@_reverse_end:ww
+ \q_stop ? \q_mark
+ }
+\cs_new:Npn \@@_reverse:wwNww
+ #1 , #2 \q_mark #3 #4 \q_stop ? #5 \q_mark
+ { #3 ? #2 \q_mark #3 #4 \q_stop #1 , #5 \q_mark }
+\cs_new:Npn \@@_reverse_end:ww #1 ! #2 , \q_mark
+ { \exp_not:o { \use_none:n #2 } }
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+%
% \subsection{Comma list conditionals}
%
% \begin{macro}[pTF]{\clist_if_empty:N, \clist_if_empty:c}
@@ -1165,6 +1379,39 @@
% \end{macrocode}
% \end{macro}
%
+% \begin{macro}[EXP, pTF]{\clist_if_empty:n}
+% \begin{macro}[aux, EXP]{\@@_if_empty_n:w}
+% \begin{macro}[aux, EXP]{\@@_if_empty_n:wNw}
+% As usual, we insert a token (here |?|) before grabbing
+% any argument: this avoids losing braces. The argument
+% of \cs{tl_if_empty:oTF} is empty if |#1| is |?| followed
+% by blank spaces (besides, this particular variant of
+% the emptiness test is optimized). If the item of the
+% comma list is blank, grab the next one. As soon as one
+% item is non-blank, exit: the second auxiliary will grab
+% \cs{prg_return_false:} as |#2|, unless every item in
+% the comma list was blank and the loop actually got broken
+% by the trailing |\q_mark \prg_return_false:| item.
+% \begin{macrocode}
+\prg_new_conditional:Npnn \clist_if_empty:n #1 { p , T , F , TF }
+ {
+ \@@_if_empty_n:w ? #1
+ , \q_mark \prg_return_false:
+ , \q_mark \prg_return_true:
+ \q_stop
+ }
+\cs_new:Npn \@@_if_empty_n:w #1 ,
+ {
+ \tl_if_empty:oTF { \use_none:nn #1 ? }
+ { \@@_if_empty_n:w ? }
+ { \@@_if_empty_n:wNw }
+ }
+\cs_new:Npn \@@_if_empty_n:wNw #1 \q_mark #2#3 \q_stop {#2}
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+%
% \begin{macro}[TF]
% {
% \clist_if_in:Nn, \clist_if_in:NV, \clist_if_in:No,
@@ -1462,6 +1709,99 @@
% \end{macro}
% \end{macro}
%
+% \subsection{Using a single item}
+%
+% \begin{macro}{\clist_item:Nn, \clist_item:cn}
+% \begin{macro}[aux]{\@@_item:nnNn}
+% \begin{macro}[aux]{\@@_item_N_loop:nw}
+% To avoid needing to test the end of the list at each step,
+% we first compute the \meta{length} of the list. If the item number
+% is~$0$, less than $-\meta{length}$, or more than $\meta{length}$,
+% the result is empty. If it is negative, but not less than $-\meta{length}$,
+% add $\meta{length}+1$ to the item number before performing the loop.
+% The loop itself is very simple, return the item if the counter
+% reached~$1$, otherwise, decrease the counter and repeat.
+% \begin{macrocode}
+\cs_new:Npn \clist_item:Nn #1#2
+ {
+ \exp_args:Nfo \@@_item:nnNn
+ { \clist_count:N #1 }
+ #1
+ \@@_item_N_loop:nw
+ {#2}
+ }
+\cs_new:Npn \@@_item:nnNn #1#2#3#4
+ {
+ \int_compare:nNnTF {#4} < \c_zero
+ {
+ \int_compare:nNnTF {#4} < { - #1 }
+ { \use_none_delimit_by_q_stop:w }
+ { \exp_args:Nf #3 { \int_eval:n { #4 + \c_one + #1 } } }
+ }
+ {
+ \int_compare:nNnTF {#4} > {#1}
+ { \use_none_delimit_by_q_stop:w }
+ { #3 {#4} }
+ }
+ { } , #2 , \q_stop
+ }
+\cs_new:Npn \@@_item_N_loop:nw #1 #2,
+ {
+ \int_compare:nNnTF {#1} = \c_zero
+ { \use_i_delimit_by_q_stop:nw { \exp_not:n {#2} } }
+ { \exp_args:Nf \@@_item_N_loop:nw { \int_eval:n { #1 - 1 } } }
+ }
+\cs_generate_variant:Nn \clist_item:Nn { c }
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+%
+% \begin{macro}{\clist_item:nn}
+% \begin{macro}[aux]{
+% \@@_item_n:nw,
+% \@@_item_n_loop:nw,
+% \@@_item_n_end:n,
+% \@@_item_n_strip:w}
+% This starts in the same way as \cs{clist_item:Nn} by counting the items
+% of the comma list. The final item should be space-trimmed before being
+% brace-stripped, hence we insert a couple of odd-looking
+% \cs{prg_do_nothing:} to avoid losing braces. Blank items are ignored.
+% \begin{macrocode}
+\cs_new:Npn \clist_item:nn #1#2
+ {
+ \exp_args:Nf \@@_item:nnNn
+ { \clist_count:n {#1} }
+ {#1}
+ \@@_item_n:nw
+ {#2}
+ }
+\cs_new:Npn \@@_item_n:nw #1
+ { \@@_item_n_loop:nw {#1} \prg_do_nothing: }
+\cs_new:Npn \@@_item_n_loop:nw #1 #2,
+ {
+ \exp_args:No \tl_if_blank:nTF {#2}
+ { \@@_item_n_loop:nw {#1} \prg_do_nothing: }
+ {
+ \int_compare:nNnTF {#1} = \c_zero
+ { \exp_args:No \@@_item_n_end:n {#2} }
+ {
+ \exp_args:Nf \@@_item_n_loop:nw
+ { \int_eval:n { #1 - 1 } }
+ \prg_do_nothing:
+ }
+ }
+ }
+\cs_new:Npn \@@_item_n_end:n #1 #2 \q_stop
+ {
+ \__tl_trim_spaces:nn { \q_mark #1 }
+ { \exp_last_unbraced:No \@@_item_n_strip:w } ,
+ }
+\cs_new:Npn \@@_item_n_strip:w #1 , { \exp_not:n {#1} }
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+%
% \subsection{Viewing comma lists}
%
% \begin{macro}{\clist_show:N, \clist_show:c}
diff --git a/Master/texmf-dist/source/latex/l3kernel/l3doc.dtx b/Master/texmf-dist/source/latex/l3kernel/l3doc.dtx
index 0419b628342..34acef1916e 100644
--- a/Master/texmf-dist/source/latex/l3kernel/l3doc.dtx
+++ b/Master/texmf-dist/source/latex/l3kernel/l3doc.dtx
@@ -56,7 +56,7 @@ Do not distribute a modified version of this file.
\postamble
\endpostamble
\generate{\file{l3doc.cls}{\from{l3doc.dtx}{class,cfg}}}
-\generate{\file{l3doc.ist}{\from{l3doc.dtx}{docist}}}
+%\generate{\file{l3doc.ist}{\from{l3doc.dtx}{docist}}}
\ifx\fmtname\nameofplainTeX
\expandafter\endbatchfile
\else
@@ -78,7 +78,7 @@ Do not distribute a modified version of this file.
%</driver|class>
%
%<*driver|class>
-\GetIdInfo$Id: l3doc.dtx 4728 2014-05-04 13:25:37Z joseph $
+\GetIdInfo$Id: l3doc.dtx 5204 2014-07-15 09:56:33Z mittelba $
{L3 Experimental documentation class}
%</driver|class>
%
@@ -2389,7 +2389,7 @@ Do not distribute a modified version of this file.
\msg_new:nnn {l3doc} {print-index-howto}
{
Generate~the~index~by~executing\\
- \iow_indent:n { makeindex~-s~l3doc.ist~-o~\c_job_name_tl .ind~\c_job_name_tl .idx }
+ \iow_indent:n { makeindex~-s~gind.ist~-o~\c_job_name_tl .ind~\c_job_name_tl .idx }
}
\tl_gput_right:Nn \PrintIndex { \AtEndDocument{ \msg_info:nn {l3doc} {print-index-howto} } }
% \end{macrocode}
@@ -2552,11 +2552,14 @@ Do not distribute a modified version of this file.
%
% Will: Do we need this?
%
+% Frank: at the moment we do not distribute or generate this file.
+% gind.ist is used instead.
+%
% \begin{macrocode}
%<*docist>
actual '='
quote '!'
-level '#'
+level '>'
preamble
"\n \\begin{theindex} \n \\makeatletter\\scan@allowedfalse\n"
postamble
diff --git a/Master/texmf-dist/source/latex/l3kernel/l3drivers.dtx b/Master/texmf-dist/source/latex/l3kernel/l3drivers.dtx
index 9a4bdfece5e..941400bb69a 100644
--- a/Master/texmf-dist/source/latex/l3kernel/l3drivers.dtx
+++ b/Master/texmf-dist/source/latex/l3kernel/l3drivers.dtx
@@ -38,7 +38,7 @@
\documentclass[full]{l3doc}
%</driver>
%<*driver|package>
-\GetIdInfo$Id: l3drivers.dtx 4748 2014-05-06 10:57:07Z joseph $
+\GetIdInfo$Id: l3drivers.dtx 5203 2014-07-15 08:59:02Z joseph $
{L3 Experimental drivers}
%</driver|package>
%<*driver>
@@ -386,9 +386,9 @@
\@@_absolute_lengths:n
{
0~
- \__dim_strip_bp:n { \box_dp:N #1 } ~
- \__dim_strip_bp:n { \box_wd:N #1 } ~
- \__dim_strip_bp:n { - \box_ht:N #1 - \box_dp:N #1 } ~
+ \dim_to_decimal_in_bp:n { \box_dp:N #1 } ~
+ \dim_to_decimal_in_bp:n { \box_wd:N #1 } ~
+ \dim_to_decimal_in_bp:n { - \box_ht:N #1 - \box_dp:N #1 } ~
rectclip
}
}
@@ -397,9 +397,9 @@
\@@_literal:n
{
0~
- \__dim_strip_bp:n { - \box_dp:N #1 } ~
- \__dim_strip_bp:n { \box_wd:N #1 } ~
- \__dim_strip_bp:n { \box_ht:N #1 + \box_dp:N #1 } ~
+ \dim_to_decimal_in_bp:n { - \box_dp:N #1 } ~
+ \dim_to_decimal_in_bp:n { \box_wd:N #1 } ~
+ \dim_to_decimal_in_bp:n { \box_ht:N #1 + \box_dp:N #1 } ~
re~W~n
}
%</dvipdfmx|pdfmode|xdvipdfmx>
diff --git a/Master/texmf-dist/source/latex/l3kernel/l3expan.dtx b/Master/texmf-dist/source/latex/l3kernel/l3expan.dtx
index 6d920d84956..658d8e1861c 100644
--- a/Master/texmf-dist/source/latex/l3kernel/l3expan.dtx
+++ b/Master/texmf-dist/source/latex/l3kernel/l3expan.dtx
@@ -37,7 +37,7 @@
\documentclass[full]{l3doc}
%</driver>
%<*driver|package>
-\GetIdInfo$Id: l3expan.dtx 4712 2014-04-30 08:17:49Z joseph $
+\GetIdInfo$Id: l3expan.dtx 5146 2014-06-16 13:12:18Z joseph $
{L3 Argument expansion}
%</driver|package>
%<*driver>
@@ -1661,38 +1661,6 @@
% \end{macro}
% \end{macro}
%
-%\subsection{Variants which cannot be created earlier}
-%
-% \begin{macro}[EXP,pTF]
-% {\str_if_eq:Vn, \str_if_eq:on, \str_if_eq:nV, \str_if_eq:no, \str_if_eq:VV}
-% \begin{macro}[EXP]{\str_case:on}
-% \begin{macro}[EXP, TF]{\str_case:on}
-% These cannot come earlier as they need \cs{cs_generate_variant:Nn}.
-% \begin{macrocode}
-\cs_generate_variant:Nn \str_if_eq_p:nn { V , o }
-\cs_generate_variant:Nn \str_if_eq_p:nn { nV , no , VV }
-\cs_generate_variant:Nn \str_if_eq:nnT { V , o }
-\cs_generate_variant:Nn \str_if_eq:nnT { nV , no , VV }
-\cs_generate_variant:Nn \str_if_eq:nnF { V , o }
-\cs_generate_variant:Nn \str_if_eq:nnF { nV , no , VV }
-\cs_generate_variant:Nn \str_if_eq:nnTF { V , o }
-\cs_generate_variant:Nn \str_if_eq:nnTF { nV , no , VV }
-\cs_generate_variant:Nn \str_case:nn { o }
-\cs_generate_variant:Nn \str_case:nnT { o }
-\cs_generate_variant:Nn \str_case:nnF { o }
-\cs_generate_variant:Nn \str_case:nnTF { o }
-% \end{macrocode}
-% \end{macro}
-% \end{macro}
-% \end{macro}
-%
-% \begin{macro}{\str_case:onn}
-% Deprecated 2013-07-15.
-% \begin{macrocode}
-\cs_new_eq:NN \str_case:onn \str_case:onF
-% \end{macrocode}
-% \end{macro}
-%
% \begin{macrocode}
%</initex|package>
% \end{macrocode}
diff --git a/Master/texmf-dist/source/latex/l3kernel/l3file.dtx b/Master/texmf-dist/source/latex/l3kernel/l3file.dtx
index 2c9210f8d76..a6a43277ac0 100644
--- a/Master/texmf-dist/source/latex/l3kernel/l3file.dtx
+++ b/Master/texmf-dist/source/latex/l3kernel/l3file.dtx
@@ -37,7 +37,7 @@
\documentclass[full]{l3doc}
%</driver>
%<*driver|package>
-\GetIdInfo$Id: l3file.dtx 4890 2014-05-26 20:59:31Z joseph $
+\GetIdInfo$Id: l3file.dtx 5188 2014-07-09 12:02:22Z will $
{L3 File and I/O operations}
%</driver|package>
%<*driver>
@@ -318,7 +318,7 @@
%
% \section{Writing to files}
%
-% \begin{function}[updated = 2012-06-05]{\iow_now:Nn, \iow_now:Nx}
+% \begin{function}[updated = 2012-06-05]{\iow_now:Nn, \iow_now:Nx, \iow_now:cn, \iow_now:cx}
% \begin{syntax}
% \cs{iow_now:Nn} \meta{stream} \Arg{tokens}
% \end{syntax}
@@ -343,7 +343,7 @@
% file immediately: it is a dedicated version of \cs{iow_now:Nn}.
% \end{function}
%
-% \begin{function}{\iow_shipout:Nn, \iow_shipout:Nx}
+% \begin{function}{\iow_shipout:Nn, \iow_shipout:Nx, \iow_shipout:cn, \iow_shipout:cx}
% \begin{syntax}
% \cs{iow_shipout:Nn} \meta{stream} \Arg{tokens}
% \end{syntax}
@@ -355,7 +355,7 @@
% (\emph{cf.}~\cs{iow_shipout_x:Nn}).
% \end{function}
%
-% \begin{function}[updated = 2012-09-08]{\iow_shipout_x:Nn, \iow_shipout_x:Nx}
+% \begin{function}[updated = 2012-09-08]{\iow_shipout_x:Nn, \iow_shipout_x:Nx, \iow_shipout_x:cn, \iow_shipout_x:cx}
% \begin{syntax}
% \cs{iow_shipout_x:Nn} \meta{stream} \Arg{tokens}
% \end{syntax}
@@ -506,7 +506,10 @@
% \end{variable}
%
% \begin{variable}{\l__file_internal_name_tl}
-% Used to return the full name of a file for internal use.
+% Used to return the full name of a file for internal use. This is
+% set by \cs{file_if_exist:n(TF)} and \cs{__file_if_exist:nT}, and
+% the value may then be used to load a file directly provided no
+% further operations intervene.
% \end{variable}
%
% \begin{function}[added = 2012-02-09]{\__file_name_sanitize:nn}
@@ -772,18 +775,17 @@
{ \@@_input:V \l_@@_internal_name_tl }
}
% \end{macrocode}
-% This code is spun out as a separate function so it is available
-% for other kernel file operations which have the same logic.
+% This code is spun out as a separate function to encapsulate the
+% error message into a easy-to-reuse form.
% \begin{macrocode}
\cs_new_protected:Npn \@@_if_exist:nT #1#2
{
- \file_add_path:nN {#1} \l_@@_internal_name_tl
- \quark_if_no_value:NTF \l_@@_internal_name_tl
+ \file_if_exist:nTF {#1}
+ {#2}
{
\@@_name_sanitize:nn {#1}
{ \__msg_kernel_error:nnx { kernel } { file-not-found } }
}
- { #2 }
}
\cs_new_protected:Npn \@@_input:n #1
{
@@ -1265,28 +1267,28 @@
%
% \subsubsection{Deferred writing}
%
-% \begin{macro}{\iow_shipout_x:Nn, \iow_shipout_x:Nx}
+% \begin{macro}{\iow_shipout_x:Nn, \iow_shipout_x:Nx, \iow_shipout_x:cn, \iow_shipout_x:cx}
% First the easy part, this is the primitive, which expects its
% argument to be braced.
% \begin{macrocode}
\cs_new_protected:Npn \iow_shipout_x:Nn #1#2
{ \tex_write:D #1 {#2} }
-\cs_generate_variant:Nn \iow_shipout_x:Nn { Nx }
+\cs_generate_variant:Nn \iow_shipout_x:Nn { c, Nx, cx }
% \end{macrocode}
% \end{macro}
%
-% \begin{macro}{\iow_shipout:Nn, \iow_shipout:Nx}
+% \begin{macro}{\iow_shipout:Nn, \iow_shipout:Nx, \iow_shipout:cn, \iow_shipout:cx}
% With \eTeX{} available deferred writing without expansion is easy.
% \begin{macrocode}
\cs_new_protected:Npn \iow_shipout:Nn #1#2
{ \tex_write:D #1 { \exp_not:n {#2} } }
-\cs_generate_variant:Nn \iow_shipout:Nn { Nx }
+\cs_generate_variant:Nn \iow_shipout:Nn { c, Nx, cx }
% \end{macrocode}
% \end{macro}
%
% \subsubsection{Immediate writing}
%
-% \begin{macro}{\iow_now:Nn, \iow_now:Nx}
+% \begin{macro}{\iow_now:Nn,\iow_now:Nx,\iow_now:cn,\iow_now:cx}
% This routine writes the second argument onto the output stream without
% expansion. If this stream isn't open, the output goes to the terminal
% instead. If the first argument is no output stream at all, we get an
@@ -1297,7 +1299,7 @@
% \begin{macrocode}
\cs_new_protected:Npn \iow_now:Nn #1#2
{ \tex_immediate:D \tex_write:D #1 { \exp_not:n {#2} } }
-\cs_generate_variant:Nn \iow_now:Nn { Nx }
+\cs_generate_variant:Nn \iow_now:Nn { c, Nx, cx }
% \end{macrocode}
% \end{macro}
%
diff --git a/Master/texmf-dist/source/latex/l3kernel/l3format.ins b/Master/texmf-dist/source/latex/l3kernel/l3format.ins
index defd3b714c3..9f0a08405a1 100644
--- a/Master/texmf-dist/source/latex/l3kernel/l3format.ins
+++ b/Master/texmf-dist/source/latex/l3kernel/l3format.ins
@@ -46,6 +46,7 @@ Do not distribute a modified version of this file.
\from{l3expan.dtx} {initex}
\from{l3tl.dtx} {initex}
\from{l3seq.dtx} {initex}
+ \from{l3str.dtx} {initex}
% ======== FORMAT ONLY =========
\from{l3alloc.dtx} {initex}
% ==============================
diff --git a/Master/texmf-dist/source/latex/l3kernel/l3fp-expo.dtx b/Master/texmf-dist/source/latex/l3kernel/l3fp-expo.dtx
index ec7ac32602d..0bcc77eb831 100644
--- a/Master/texmf-dist/source/latex/l3kernel/l3fp-expo.dtx
+++ b/Master/texmf-dist/source/latex/l3kernel/l3fp-expo.dtx
@@ -36,7 +36,7 @@
%
%<*driver>
\documentclass[full]{l3doc}
-\GetIdInfo$Id: l3fp-expo.dtx 4889 2014-05-26 19:59:25Z joseph $
+\GetIdInfo$Id: l3fp-expo.dtx 5191 2014-07-10 17:31:12Z bruno $
{L3 Floating-point exponential-related functions}
\begin{document}
\DocInput{\jobname.dtx}
@@ -85,7 +85,8 @@
% number, which we write in the form $a\cdot 10^{b}$ with $a\in[0.1,1)$.
%
% \emph{The rest of this section is actually not in sync with the code.
-% Or is the code not in sync with the section?}
+% Or is the code not in sync with the section? In the current code,
+% $c\in [1,10]$ will be such that $0.7\leq ac < 1.4$.}
%
% We are given a positive normal number, of the form $a\cdot 10^{b}$
% with $a\in[0.1,1)$. To compute its logarithm, we find a small integer
@@ -229,7 +230,7 @@
% \end{macro}
%
% \begin{macro}[aux, EXP]{\@@_ln_x_ii:wnnnn}
-% We have thus found $c$. It is chosen such that $0.7\leq ac < 1.4$
+% We have thus found $c \in [1,10]$ such that $0.7\leq ac < 1.4$
% in all cases. Compute $ 1 + x = 1 + ac \in [1.7,2.4)$.
% \begin{macrocode}
\cs_new:Npn \@@_ln_x_ii:wnnnn #1; #2#3#4#5
@@ -240,12 +241,13 @@
\exp_after:wN \@@_ln_x_iv:wnnnnnnnn
\int_use:N \__int_eval:w
\exp_after:wN \@@_ln_x_iii_var:NNNNNw
- \int_use:N \__int_eval:w 9999 9999 + #1*#2#3 +
- \exp_after:wN \@@_ln_x_iii:NNNNNw
- \int_use:N \__int_eval:w 1 0000 0000 + #1*#4#5 ;
+ \int_use:N \__int_eval:w 9999 9990 + #1*#2#3 +
+ \exp_after:wN \@@_ln_x_iii:NNNNNNw
+ \int_use:N \__int_eval:w 10 0000 0000 + #1*#4#5 ;
{20000} {0000} {0000} {0000}
} %^^A todo: reoptimize (a generalization attempt failed).
-\cs_new:Npn \@@_ln_x_iii:NNNNNw #1 #2#3#4#5 #6; { #1; {#2#3#4#5} {#6} }
+\cs_new:Npn \@@_ln_x_iii:NNNNNNw #1#2 #3#4#5#6 #7;
+ { #1#2; {#3#4#5#6} {#7} }
\cs_new:Npn \@@_ln_x_iii_var:NNNNNw #1 #2#3#4#5 #6;
{
#1#2#3#4#5 + \c_one ;
diff --git a/Master/texmf-dist/source/latex/l3kernel/l3fp-parse.dtx b/Master/texmf-dist/source/latex/l3kernel/l3fp-parse.dtx
index f0cdec7ffbb..88f30d3ce87 100644
--- a/Master/texmf-dist/source/latex/l3kernel/l3fp-parse.dtx
+++ b/Master/texmf-dist/source/latex/l3kernel/l3fp-parse.dtx
@@ -36,7 +36,7 @@
%
%<*driver>
\documentclass[full]{l3doc}
-\GetIdInfo$Id: l3fp-parse.dtx 4975 2014-05-31 19:26:32Z bruno $
+\GetIdInfo$Id: l3fp-parse.dtx 5223 2014-07-18 01:54:05Z bruno $
{L3 Floating-point expression parsing}
\begin{document}
\DocInput{\jobname.dtx}
@@ -107,12 +107,11 @@
% irrelevant for the order of evaluation, but serve as signals), from
% the tightest binding to the loosest binding.
% \begin{itemize}
-% \item[32] Juxtaposition for implicit multiplication.
% \item[16] Function calls with multiple arguments.
% \item[15] Function calls expecting exactly one argument.
% \item[14] Binary |**| and~|^| (right to left).
% \item[12] Unary |+|, |-|, |!| (right to left).
-% \item[10] Binary |*|, |/| and~|%|.
+% \item[10] Binary |*|, |/|, and juxtaposition (implicit~|*|).
% \item[9] Binary |+| and~|-|.
% \item[7] Comparisons.
% \item[5] Logical \texttt{and}, denoted by~|&&|.
@@ -412,7 +411,7 @@
%
% Functions are implemented as prefix operators with very high
% precedence, so that their argument is the first number that can
-% possibly be built, except for juxtaposition.
+% possibly be built.
%
% Note that contrarily to the \texttt{infix} functions discussed
% earlier, the \texttt{prefix} functions do perform tests on the
@@ -472,7 +471,6 @@
% Once a number is found, \cs{@@_parse_one:Nw} also finds an infix
% operator. This goes as follows.
% \begin{itemize}
-% \item
% \item If the next token is a control sequence, it could be the
% special marker \cs{s_@@_mark}, and
% otherwise it is a case of juxtaposing numbers, such as
@@ -2310,22 +2308,23 @@
% \end{macrocode}
% \end{macro}
%
+% ^^A todo: can |...(1,2,3)pt| really occur? If not, simplify.
% \begin{macro}[aux, EXP]
% {\@@_parse_infix_juxtapose:N, \@@_parse_apply_juxtapose:NwwN}
% Juxtaposition follows the same scheme as other binary operations,
% but calls \cs{@@_parse_apply_juxtapose:NwwN} rather than directly
% calling \cs{@@_parse_apply_binary:NwNwN}. This lets us catch errors
-% such as |max(1,2,3)pt| where one operand of the juxtaposition is not
+% such as |...(1,2,3)pt| where one operand of the juxtaposition is not
% a single number: both |#3| and~|#5| of the \texttt{apply} auxiliary
% must be empty.
% \begin{macrocode}
\cs_new:Npn \@@_parse_infix_juxtapose:N #1
{
- \if_int_compare:w #1 < \c_thirty_two
+ \if_int_compare:w #1 < \c_ten
\exp_after:wN @
\exp_after:wN \@@_parse_apply_juxtapose:NwwN
\tex_romannumeral:D
- \@@_parse_operand:Nw \c_thirty_two
+ \@@_parse_operand:Nw \c_ten
\exp_after:wN \@@_parse_expand:w
\else:
\exp_after:wN @
diff --git a/Master/texmf-dist/source/latex/l3kernel/l3fp.dtx b/Master/texmf-dist/source/latex/l3kernel/l3fp.dtx
index 6dff7765268..80e4bd46480 100644
--- a/Master/texmf-dist/source/latex/l3kernel/l3fp.dtx
+++ b/Master/texmf-dist/source/latex/l3kernel/l3fp.dtx
@@ -38,7 +38,7 @@
\documentclass[full]{l3doc}
%</driver>
%<*driver|package>
-\GetIdInfo$Id: l3fp.dtx 4712 2014-04-30 08:17:49Z joseph $
+\GetIdInfo$Id: l3fp.dtx 5223 2014-07-18 01:54:05Z bruno $
{L3 Floating points}
%</driver|package>
%<*driver>
@@ -762,12 +762,11 @@
% expressions, in order of decreasing precedence: operations listed
% earlier bind more tightly than operations listed below them.
% \begin{itemize}
-% \item Implicit multiplication by juxtaposition (\texttt{2pi},
-% \texttt{3(4+5)}, \emph{etc}).
% \item Function calls (\texttt{sin}, \texttt{ln}, \emph{etc}).
% \item Binary |**| and |^| (right associative).
% \item Unary |+|, |-|, |!|.
-% \item Binary |*|, |/| and |%|.
+% \item Binary |*|, |/|, and implicit multiplication by juxtaposition
+% (\texttt{2pi}, \texttt{3(4+5)}, \emph{etc}).
% \item Binary |+| and |-|.
% \item Comparisons |>=|, |!=|, |<?|, \emph{etc}.
% \item Logical \texttt{and}, denoted by |&&|.
@@ -1173,18 +1172,6 @@
% Other names for $1$ and $+0$.
% \end{variable}
%
-% \begin{function}[EXP, added = 2012-05-08, tested = m3fp-convert002]
-% {\dim_to_fp:n}
-% \begin{syntax}
-% \cs{dim_to_fp:n} \Arg{dimexpr}
-% \end{syntax}
-% Expands to an internal floating point number equal to the value of
-% the \meta{dimexpr} in \texttt{pt}. Since dimension expressions are
-% evaluated much faster than their floating point equivalent,
-% \cs{dim_to_fp:n} can be used to speed up parts of a computation
-% where a low precision is acceptable.
-% \end{function}
-%
% \begin{function}[EXP, added = 2012-05-14, updated = 2012-07-08,
% tested = m3fp-convert003]{\fp_abs:n}
% \begin{syntax}
diff --git a/Master/texmf-dist/source/latex/l3kernel/l3int.dtx b/Master/texmf-dist/source/latex/l3kernel/l3int.dtx
index 5cf8a36cd56..dd98220454f 100644
--- a/Master/texmf-dist/source/latex/l3kernel/l3int.dtx
+++ b/Master/texmf-dist/source/latex/l3kernel/l3int.dtx
@@ -37,7 +37,7 @@
\documentclass[full]{l3doc}
%</driver>
%<*driver|package>
-\GetIdInfo$Id: l3int.dtx 4966 2014-05-31 00:44:34Z bruno $
+\GetIdInfo$Id: l3int.dtx 5197 2014-07-11 06:42:47Z joseph $
{L3 Integers}
%</driver|package>
%<*driver>
@@ -638,7 +638,7 @@
%
% \begin{function}[added = 2014-02-11, EXP]{\int_to_oct:n}
% \begin{syntax}
-% \cs{int_to_octal:n} \Arg{integer expression}
+% \cs{int_to_oct:n} \Arg{integer expression}
% \end{syntax}
% Calculates the value of the \meta{integer expression} and places
% the octal (base~$8$) representation of the result in the input
diff --git a/Master/texmf-dist/source/latex/l3kernel/l3keys.dtx b/Master/texmf-dist/source/latex/l3kernel/l3keys.dtx
index b13c1a32ff0..648508d7777 100644
--- a/Master/texmf-dist/source/latex/l3kernel/l3keys.dtx
+++ b/Master/texmf-dist/source/latex/l3kernel/l3keys.dtx
@@ -37,7 +37,7 @@
\documentclass[full]{l3doc}
%</driver>
%<*driver|package>
-\GetIdInfo$Id: l3keys.dtx 5093 2014-06-08 20:33:14Z joseph $
+\GetIdInfo$Id: l3keys.dtx 5121 2014-06-12 17:37:56Z joseph $
{L3 Key-value interfaces}
%</driver|package>
%<*driver>
@@ -1367,6 +1367,7 @@
% \end{macro}
% \end{macro}
% \end{macro}
+% \end{macro}
%
% \begin{macro}[int]{\@@_choices_make:nn, \@@_multichoices_make:nn}
% \begin{macro}[aux]{\@@_choices_make:Nnn}
diff --git a/Master/texmf-dist/source/latex/l3kernel/l3prop.dtx b/Master/texmf-dist/source/latex/l3kernel/l3prop.dtx
index 265833a8e10..6a74a896de7 100644
--- a/Master/texmf-dist/source/latex/l3kernel/l3prop.dtx
+++ b/Master/texmf-dist/source/latex/l3kernel/l3prop.dtx
@@ -37,7 +37,7 @@
\documentclass[full]{l3doc}
%</driver>
%<*driver|package>
-\GetIdInfo$Id: l3prop.dtx 4712 2014-04-30 08:17:49Z joseph $
+\GetIdInfo$Id: l3prop.dtx 5215 2014-07-17 13:23:20Z joseph $
{L3 Property lists}
%</driver|package>
%<*driver>
@@ -217,6 +217,23 @@
% the \meta{token list variable} is local. See also \cs{prop_gpop:NnNTF}.
% \end{function}
%
+% \begin{function}[added = 2014-07-17, EXP]{\prop_item:Nn, \prop_item:cn}
+% \begin{syntax}
+% \cs{prop_item:Nn} \meta{property list} \Arg{key}
+% \end{syntax}
+% Expands to the \meta{value} corresponding to the \meta{key} in
+% the \meta{property list}. If the \meta{key} is missing, this has
+% an empty expansion.
+% \begin{texnote}
+% This function is slower than the non-expandable analogue
+% \cs{prop_get:NnN}.
+% The result is returned within the \tn{unexpanded}
+% primitive (\cs{exp_not:n}), which means that the \meta{value}
+% will not expand further when appearing in an \texttt{x}-type
+% argument expansion.
+% \end{texnote}
+% \end{function}
+%
% \section{Modifying property lists}
%
% \begin{function}[added = 2012-05-12]
@@ -741,6 +758,36 @@
% \end{macro}
% \end{macro}
%
+% \begin{macro}[EXP]{\prop_item:Nn, \prop_item:cn}
+% \begin{macro}[aux, EXP]{\@@_item_Nn:nwwn}
+% Getting the value corresponding to a key in a property list in an
+% expandable fashion is similar to mapping some tokens. Go through
+% the property list one \meta{key}--\meta{value} pair at a time: the
+% arguments of \cs{@@_item_Nn:nwn} are the \meta{key} we are looking
+% for, a \meta{key} of the property list, and its associated value.
+% The \meta{keys} are compared (as strings). If they match, the
+% \meta{value} is returned, within \cs{exp_not:n}. The loop
+% terminates even if the \meta{key} is missing, and yields an empty
+% value, because we have appended the appropriate
+% \meta{key}--\meta{empty value} pair to the property list.
+% \begin{macrocode}
+\cs_new:Npn \prop_item:Nn #1#2
+ {
+ \exp_last_unbraced:Noo \@@_item_Nn:nwwn { \tl_to_str:n {#2} } #1
+ \@@_pair:wn \tl_to_str:n {#2} \s_@@ { }
+ \__prg_break_point:
+ }
+\cs_new:Npn \@@_item_Nn:nwwn #1#2 \@@_pair:wn #3 \s_@@ #4
+ {
+ \str_if_eq_x:nnTF {#1} {#3}
+ { \__prg_break:n { \exp_not:n {#4} } }
+ { \@@_item_Nn:nwwn {#1} }
+ }
+\cs_generate_variant:Nn \prop_item:Nn { c }
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+%
% \begin{macro}[TF, tested = m3prop004]
% {\prop_pop:NnN, \prop_pop:cnN, \prop_gpop:NnN, \prop_gpop:cnN}
% Popping an item from a property list, keeping track of whether
@@ -1076,6 +1123,16 @@
% \end{macrocode}
% \end{macro}
%
+% \subsection{Deprecated functions}
+%
+% \begin{macro}{\prop_get:Nn, \prop_get:cn}
+% Deprecated 2014-07-17.
+% \begin{macrocode}
+\cs_new_eq:NN \prop_get:Nn \prop_item:Nn
+\cs_new_eq:NN \prop_get:cn \prop_item:cn
+% \end{macrocode}
+% \end{macro}
+%
% \begin{macrocode}
%</initex|package>
% \end{macrocode}
diff --git a/Master/texmf-dist/source/latex/l3kernel/l3seq.dtx b/Master/texmf-dist/source/latex/l3kernel/l3seq.dtx
index 6924433ab00..813aee51c33 100644
--- a/Master/texmf-dist/source/latex/l3kernel/l3seq.dtx
+++ b/Master/texmf-dist/source/latex/l3kernel/l3seq.dtx
@@ -37,7 +37,7 @@
\documentclass[full]{l3doc}
%</driver>
%<*driver|package>
-\GetIdInfo$Id: l3seq.dtx 4712 2014-04-30 08:17:49Z joseph $
+\GetIdInfo$Id: l3seq.dtx 5232 2014-07-18 19:32:54Z joseph $
{L3 Sequences and stacks}
%</driver|package>
%<*driver>
@@ -116,6 +116,22 @@
% \meta{sequence_2}.
% \end{function}
%
+% \begin{function}[added = 2014-07-17]
+% {
+% \seq_set_from_clist:NN, \seq_set_from_clist:cN,
+% \seq_set_from_clist:Nc, \seq_set_from_clist:cc,
+% \seq_set_from_clist:Nn, \seq_set_from_clist:cn,
+% \seq_gset_from_clist:NN, \seq_gset_from_clist:cN,
+% \seq_gset_from_clist:Nc, \seq_gset_from_clist:cc,
+% \seq_gset_from_clist:Nn, \seq_gset_from_clist:cn
+% }
+% \begin{syntax}
+% \cs{seq_set_from_clist:NN} \meta{sequence} \meta{comma-list}
+% \end{syntax}
+% Converts the data in the \meta{comma list} into a \meta{sequence}:
+% the original \meta{comma list} is unchanged.
+% \end{function}
+%
% \begin{function}[added = 2011-08-15, updated = 2012-07-02]
% {
% \seq_set_split:Nnn , \seq_set_split:NnV ,
@@ -273,6 +289,26 @@
% contain the special marker \cs{q_no_value}.
% \end{function}
%
+% \begin{function}[added = 2014-07-17, EXP]{\seq_item:Nn, \seq_item:cn}
+% \begin{syntax}
+% \cs{seq_item:Nn} \meta{sequence} \Arg{integer expression}
+% \end{syntax}
+% Indexing items in the \meta{sequence} from~$1$ at the top (left), this
+% function will evaluate the \meta{integer expression} and leave the
+% appropriate item from the sequence in the input stream. If the
+% \meta{integer expression} is negative, indexing occurs from the
+% bottom (right) of the sequence. When the \meta{integer expression}
+% is larger than the number of items in the \meta{sequence} (as
+% calculated by \cs{seq_count:N}) then the function will expand to
+% nothing.
+% \begin{texnote}
+% The result is returned within the \tn{unexpanded}
+% primitive (\cs{exp_not:n}), which means that the \meta{item}
+% will not expand further when appearing in an \texttt{x}-type
+% argument expansion.
+% \end{texnote}
+% \end{function}
+%
% \section{Recovering values from sequences with branching}
%
% The functions in this section combine tests for non-empty sequences
@@ -407,6 +443,18 @@
% \cs{tl_if_eq:nn(TF)}.
% \end{function}
%
+%
+% \begin{function}[added = 2014-07-18]^^A
+% {^^A
+% \seq_reverse:N, \seq_reverse:c,
+% \seq_greverse:N, \seq_greverse:c
+% }
+% \begin{syntax}
+% \cs{seq_reverse:N} \meta{sequence}
+% \end{syntax}
+% Reverses the order of the items stored in the \meta{sequence}.
+% \end{function}
+%
% \section{Sequence conditionals}
%
% \begin{function}[EXP,pTF]{\seq_if_empty:N, \seq_if_empty:c}
@@ -885,6 +933,51 @@
%
% \begin{macro}
% {
+% \seq_set_from_clist:NN, \seq_set_from_clist:cN,
+% \seq_set_from_clist:Nc, \seq_set_from_clist:cc,
+% \seq_set_from_clist:Nn, \seq_set_from_clist:cn
+% }
+% \begin{macro}
+% {
+% \seq_gset_from_clist:NN, \seq_gset_from_clist:cN,
+% \seq_gset_from_clist:Nc, \seq_gset_from_clist:cc,
+% \seq_gset_from_clist:Nn, \seq_gset_from_clist:cn
+% }
+% Setting a sequence from a comma-separated list is done using a simple
+% mapping.
+% \begin{macrocode}
+\cs_new_protected:Npn \seq_set_from_clist:NN #1#2
+ {
+ \tl_set:Nx #1
+ { \s_@@ \clist_map_function:NN #2 \@@_wrap_item:n }
+ }
+\cs_new_protected:Npn \seq_set_from_clist:Nn #1#2
+ {
+ \tl_set:Nx #1
+ { \s_@@ \clist_map_function:nN {#2} \@@_wrap_item:n }
+ }
+\cs_new_protected:Npn \seq_gset_from_clist:NN #1#2
+ {
+ \tl_gset:Nx #1
+ { \s_@@ \clist_map_function:NN #2 \@@_wrap_item:n }
+ }
+\cs_new_protected:Npn \seq_gset_from_clist:Nn #1#2
+ {
+ \tl_gset:Nx #1
+ { \s_@@ \clist_map_function:nN {#2} \@@_wrap_item:n }
+ }
+\cs_generate_variant:Nn \seq_set_from_clist:NN { Nc }
+\cs_generate_variant:Nn \seq_set_from_clist:NN { c , cc }
+\cs_generate_variant:Nn \seq_set_from_clist:Nn { c }
+\cs_generate_variant:Nn \seq_gset_from_clist:NN { Nc }
+\cs_generate_variant:Nn \seq_gset_from_clist:NN { c , cc }
+\cs_generate_variant:Nn \seq_gset_from_clist:Nn { c }
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+%
+% \begin{macro}
+% {
% \seq_set_split:Nnn , \seq_set_split:NnV ,
% \seq_gset_split:Nnn, \seq_gset_split:NnV
% }
@@ -1155,6 +1248,65 @@
% \end{macro}
% \end{macro}
%
+% \begin{macro}
+% {\seq_reverse:N, \seq_reverse:c, \seq_greverse:N, \seq_greverse:c}
+% \begin{macro}[aux]{\@@_reverse:NN}
+% \begin{macro}[aux, EXP]{\@@_reverse_item:nwn}
+% Previously, \cs{seq_reverse:N} was coded by collecting the items
+% in reverse order after an \cs{exp_stop_f:} marker.
+% \begin{verbatim}
+% \cs_new_protected:Npn \seq_reverse:N #1
+% {
+% \cs_set_eq:NN \@@_item:n \@@_reverse_item:nw
+% \tl_set:Nf #2 { #2 \exp_stop_f: }
+% }
+% \cs_new:Npn \@@_reverse_item:nw #1 #2 \exp_stop_f:
+% {
+% #2 \exp_stop_f:
+% \@@_item:n {#1}
+% }
+% \end{verbatim}
+% At first, this seems optimal, since we can forget about each item
+% as soon as it is placed after \cs{exp_stop_f:}. Unfortunately,
+% \TeX{}'s usual tail recursion does not take place in this case:
+% since the following \cs{@@_reverse_item:nw} only reads
+% tokens until \cs{exp_stop_f:}, and never reads the
+% |\@@_item:n {#1}| left by the previous call, \TeX{} cannot
+% remove that previous call from the stack, and in particular
+% must retain the various macro parameters in memory, until the
+% end of the replacement text is reached. The stack is thus
+% only flushed after all the \cs{@@_reverse_item:nw} are
+% expanded. Keeping track of the arguments of all those calls
+% uses up a memory quadratic in the length of the sequence.
+% \TeX{} can then not cope with more than a few thousand items.
+%
+% Instead, we collect the items in the argument
+% of \cs{exp_not:n}. The previous calls are cleanly removed
+% from the stack, and the memory consumption becomes linear.
+% \begin{macrocode}
+\cs_new_protected_nopar:Npn \seq_reverse:N
+ { \@@_reverse:NN \tl_set:Nx }
+\cs_new_protected_nopar:Npn \seq_greverse:N
+ { \@@_reverse:NN \tl_gset:Nx }
+\cs_new_protected:Npn \@@_reverse:NN #1 #2
+ {
+ \cs_set_eq:NN \@@_tmp:w \@@_item:n
+ \cs_set_eq:NN \@@_item:n \@@_reverse_item:nwn
+ #1 #2 { #2 \exp_not:n { } }
+ \cs_set_eq:NN \@@_item:n \@@_tmp:w
+ }
+\cs_new:Npn \@@_reverse_item:nwn #1 #2 \exp_not:n #3
+ {
+ #2
+ \exp_not:n { \@@_item:n {#1} #3 }
+ }
+\cs_generate_variant:Nn \seq_reverse:N { c }
+\cs_generate_variant:Nn \seq_greverse:N { c }
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+%
% \subsection{Sequence conditionals}
%
% \begin{macro}[pTF]{\seq_if_empty:N, \seq_if_empty:c}
@@ -1434,6 +1586,42 @@
% \end{macro}
% \end{macro}
%
+% \begin{macro}{\seq_item:Nn, \seq_item:cn}
+% \begin{macro}[aux]{\@@_item:wNn, \@@_item:nnn}
+% The idea here is to find the offset of the item from the left, then use
+% a loop to grab the correct item. If the resulting offset is too large,
+% then the stop code |{ ? \__prg_break: } { }| will be used by the auxiliary,
+% terminating the loop and returning nothing at all.
+% \begin{macrocode}
+\cs_new:Npn \seq_item:Nn #1
+ { \exp_after:wN \@@_item:wNn #1 \q_stop #1 }
+\cs_new:Npn \@@_item:wNn \s_@@ #1 \q_stop #2#3
+ {
+ \exp_args:Nf \@@_item:nnn
+ {
+ \int_eval:n
+ {
+ \int_compare:nNnT {#3} < \c_zero
+ { \seq_count:N #2 + \c_one + }
+ #3
+ }
+ }
+ #1
+ { ? \__prg_break: } { }
+ \__prg_break_point:
+ }
+\cs_new:Npn \@@_item:nnn #1#2#3
+ {
+ \use_none:n #2
+ \int_compare:nNnTF {#1} = \c_one
+ { \__prg_break:n { \exp_not:n {#3} } }
+ { \exp_args:Nf \@@_item:nnn { \int_eval:n { #1 - 1 } } }
+ }
+\cs_generate_variant:Nn \seq_item:Nn { c }
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+%
% \subsection{Mapping to sequences}
%
% \begin{macro}{\seq_map_break:}
diff --git a/Master/texmf-dist/source/latex/l3kernel/l3skip.dtx b/Master/texmf-dist/source/latex/l3kernel/l3skip.dtx
index f773f3e2867..d977bc4cebd 100644
--- a/Master/texmf-dist/source/latex/l3kernel/l3skip.dtx
+++ b/Master/texmf-dist/source/latex/l3kernel/l3skip.dtx
@@ -38,7 +38,7 @@
\documentclass[full]{l3doc}
%</driver>
%<*driver|package>
-\GetIdInfo$Id: l3skip.dtx 4972 2014-05-31 16:42:18Z bruno $
+\GetIdInfo$Id: l3skip.dtx 5211 2014-07-17 07:22:35Z joseph $
{L3 Dimensions and skips}
%</driver|package>
%<*driver>
@@ -437,6 +437,73 @@
% \end{texnote}
% \end{function}
%
+% \begin{function}[added = 2014-07-15, EXP]{\dim_to_decimal:n}
+% \begin{syntax}
+% \cs{dim_to_decimal:n} \Arg{dimexpr}
+% \end{syntax}
+% Evaluates the \meta{dimension expression}, and leaves the result,
+% expressed in points (\texttt{pt}) in the input stream, with \emph{no
+% units}. The result is rounded by \TeX{} to four or five decimal
+% places. If the decimal part of the result is zero, it is omitted,
+% together with the decimal marker.
+%
+% For example
+% \begin{verbatim}
+% \dim_to_decimal:n { 1bp }
+% \end{verbatim}
+% leaves |1.00374| in the input stream, \emph{i.e.}~the magnitude of
+% one \enquote{big point} when converted to (\TeX{}) points.
+% \end{function}
+%
+% \begin{function}[added = 2014-07-15, EXP]{\dim_to_decimal_in_bp:n}
+% \begin{syntax}
+% \cs{dim_to_decimal_in_bp:n} \Arg{dimexpr}
+% \end{syntax}
+% Evaluates the \meta{dimension expression}, and leaves the result,
+% expressed in big points (\texttt{bp}) in the input stream, with \emph{no
+% units}. The result is rounded by \TeX{} to four or five decimal
+% places. If the decimal part of the result is zero, it is omitted,
+% together with the decimal marker.
+%
+% For example
+% \begin{verbatim}
+% \dim_to_decimal_in_bp:n { 1pt }
+% \end{verbatim}
+% leaves |0.99628| in the input stream, \emph{i.e.}~the magnitude of
+% one (\TeX{}) point when converted to big points.
+% \end{function}
+%
+% \begin{function}[added = 2014-07-15, EXP]
+% {\dim_to_decimal_in_unit:nn}
+% \begin{syntax}
+% \cs{dim_to_decimal_in_unit:nn} \Arg{dimexpr_1} \Arg{dimexpr_2}
+% \end{syntax}
+% Evaluates the \meta{dimension expressions}, and leaves the value of
+% \meta{dimexpr_1}, expressed in a unit given by \meta{dimexpr_2}, in
+% the input stream. The result is a decimal number, rounded by \TeX{}
+% to four or five decimal places. If the decimal part of the result
+% is zero, it is omitted, together with the decimal marker.
+%
+% For example
+% \begin{verbatim}
+% \dim_to_decimal_in_unit:nn { 1bp } { 1mm }
+% \end{verbatim}
+% leaves |0.35277| in the input stream, \emph{i.e.}~the magnitude of
+% one big point when converted to millimetres.
+% \end{function}
+%
+% \begin{function}[EXP, added = 2012-05-08, tested = m3fp-convert002]
+% {\dim_to_fp:n}
+% \begin{syntax}
+% \cs{dim_to_fp:n} \Arg{dimexpr}
+% \end{syntax}
+% Expands to an internal floating point number equal to the value of
+% the \meta{dimexpr} in \texttt{pt}. Since dimension expressions are
+% evaluated much faster than their floating point equivalent,
+% \cs{dim_to_fp:n} can be used to speed up parts of a computation
+% where a low precision is acceptable.
+% \end{function}
+%
% \section{Viewing \texttt{dim} variables}
%
% \begin{function}{\dim_show:N, \dim_show:c}
@@ -900,25 +967,6 @@
% \end{texnote}
% \end{function}
%
-% \begin{function}[added = 2011-11-11, updated=2014-05-31, EXP]
-% {\__dim_strip_bp:n, \__dim_strip_pt:n}
-% \begin{syntax}
-% \cs{__dim_strip_bp:n} \Arg{dimension expression}
-% \cs{__dim_strip_pt:n} \Arg{dimension expression}
-% \end{syntax}
-% Evaluates the \meta{dimension expression}, expanding any
-% dimensions and token list variables within the \meta{expression}
-% to their content (without requiring \cs{dim_use:N}/\cs{tl_use:N})
-% and applying the standard mathematical rules. The magnitude of the
-% result, expressed in big points (\texttt{bp}) or points (\texttt{pt}),
-% will be left in the input stream with \emph{no units}. If the decimal
-% part of the magnitude is zero, it will be omitted. For example,
-% \begin{verbatim}
-% \__dim_strip_pt:n { 2.5 pt * 2 }
-% \end{verbatim}
-% will leave~|5| in the input stream.
-% \end{function}
-%
% \end{documentation}
%
% \begin{implementation}
@@ -1339,18 +1387,16 @@
% \end{macrocode}
% \end{macro}
%
-% \begin{macro}[EXP, int]{\@@_strip_bp:n}
-% Conversion to big points is done using a scaling inside \cs{__dim_eval:w}
-% as \eTeX{} does that using $64$-bit precision. Here, $800/803$ is the
-% integer fraction for $72/72.27$.
+% \begin{macro}{\dim_use:N, \dim_use:c}
+% Accessing a \meta{dim}.
% \begin{macrocode}
-\cs_new:Npn \@@_strip_bp:n #1
- { \@@_strip_pt:n { ( #1 ) * 800 / 803 } }
+\cs_new_eq:NN \dim_use:N \tex_the:D
+\cs_generate_variant:Nn \dim_use:N { c }
% \end{macrocode}
% \end{macro}
%
-% \begin{macro}[EXP]{\@@_strip_pt:n}
-% \begin{macro}[EXP, aux]{\@@_strip_pt:w}
+% \begin{macro}[EXP]{\dim_to_decimal:n}
+% \begin{macro}[EXP, aux]{\@@_to_decimal:w}
% A function which comes up often enough to deserve a place in the
% kernel. Evaluate the dimension expression~|#1| then remove the
% trailing \texttt{pt}. The argument is put in parentheses as this
@@ -1358,14 +1404,14 @@
% extra tokens lying around. This is used a lot by low-level
% manipulations.
% \begin{macrocode}
-\cs_new:Npn \@@_strip_pt:n #1
+\cs_new:Npn \dim_to_decimal:n #1
{
\exp_after:wN
- \@@_strip_pt:w \dim_use:N \@@_eval:w (#1) \@@_eval_end:
+ \@@_to_decimal:w \dim_use:N \@@_eval:w (#1) \@@_eval_end:
}
\use:x
{
- \cs_new:Npn \exp_not:N \@@_strip_pt:w
+ \cs_new:Npn \exp_not:N \@@_to_decimal:w
##1 . ##2 \tl_to_str:n { pt }
}
{
@@ -1377,14 +1423,37 @@
% \end{macro}
% \end{macro}
%
-% \begin{macro}{\dim_use:N, \dim_use:c}
-% Accessing a \meta{dim}.
+% \begin{macro}[EXP]{\dim_to_decimal_in_bp:n}
+% Conversion to big points is done using a scaling inside \cs{__dim_eval:w}
+% as \eTeX{} does that using $64$-bit precision. Here, $800/803$ is the
+% integer fraction for $72/72.27$. This is a common case so is hand-coded
+% for accuracy (and speed).
% \begin{macrocode}
-\cs_new_eq:NN \dim_use:N \tex_the:D
-\cs_generate_variant:Nn \dim_use:N { c }
+\cs_new:Npn \dim_to_decimal_in_bp:n #1
+ { \dim_to_decimal:n { ( #1 ) * 800 / 803 } }
% \end{macrocode}
% \end{macro}
%
+% \begin{macro}[EXP]{\dim_to_decimal_in_unit:nn}
+% An analogue of \cs{dim_ratio:nn} that produces a decimal number as its
+% result, rather than a rational fraction for use within dimension
+% expressions.
+% \begin{macrocode}
+\cs_new:Npn \dim_to_decimal_in_unit:nn #1#2
+ {
+ \dim_to_decimal:n
+ {
+ 1pt *
+ \dim_ratio:nn {#1} {#2}
+ }
+ }
+% \end{macrocode}
+% \end{macro}
+%
+% \begin{macro}[EXP]{\dim_to_fp:n}
+% Defined in \pkg{l3fp-convert}, documented here.
+% \end{macro}
+%
% \subsection{Viewing \texttt{dim} variables}
%
% \begin{macro}{\dim_show:N, \dim_show:c}
@@ -1870,6 +1939,14 @@
% \end{macrocode}
% \end{macro}
%
+% \begin{macro}{\__dim_strip_bp:n, \__dim_strip_pt:n}
+% Deprecated 2014-07-15.
+% \begin{macrocode}
+\cs_new_eq:NN \__dim_strip_bp:n \dim_to_decimal_in_bp:n
+\cs_new_eq:NN \__dim_strip_pt:n \dim_to_decimal:n
+% \end{macrocode}
+% \end{macro}
+%
% \begin{macrocode}
%</initex|package>
% \end{macrocode}
diff --git a/Master/texmf-dist/source/latex/l3kernel/l3str.dtx b/Master/texmf-dist/source/latex/l3kernel/l3str.dtx
new file mode 100644
index 00000000000..c4460d5ae0b
--- /dev/null
+++ b/Master/texmf-dist/source/latex/l3kernel/l3str.dtx
@@ -0,0 +1,573 @@
+% \iffalse meta-comment
+%
+%% File: l3str.dtx Copyright (C) 2014 The LaTeX3 Project
+%%
+%% It may be distributed and/or modified under the conditions of the
+%% LaTeX Project Public License (LPPL), either version 1.3c of this
+%% license or (at your option) any later version. The latest version
+%% of this license is in the file
+%%
+%% http://www.latex-project.org/lppl.txt
+%%
+%% This file is part of the "l3kernel bundle" (The Work in LPPL)
+%% and all files in that bundle must be distributed together.
+%%
+%% The released version of this bundle is available from CTAN.
+%%
+%% -----------------------------------------------------------------------
+%%
+%% The development version of the bundle can be found at
+%%
+%% http://www.latex-project.org/svnroot/experimental/trunk/
+%%
+%% for those people who are interested.
+%%
+%%%%%%%%%%%
+%% NOTE: %%
+%%%%%%%%%%%
+%%
+%% Snapshots taken from the repository represent work in progress and may
+%% not work or may contain conflicting material! We therefore ask
+%% people _not_ to put them into distributions, archives, etc. without
+%% prior consultation with the LaTeX3 Project.
+%%
+%% -----------------------------------------------------------------------
+%
+%<*driver>
+\documentclass[full]{l3doc}
+%</driver>
+%<*driver|package>
+\GetIdInfo$Id: l3str.dtx 5158 2014-06-19 08:05:56Z joseph $
+ {L3 Strings}
+%</driver|package>
+%<*driver>
+\begin{document}
+ \DocInput{\jobname.dtx}
+\end{document}
+%</driver>
+% \fi
+%
+% \title{^^A
+% The \pkg{l3str} package\\Strings^^A
+% \thanks{This file describes v\ExplFileVersion,
+% last revised \ExplFileDate.}^^A
+% }
+%
+% \author{^^A
+% The \LaTeX3 Project\thanks
+% {^^A
+% E-mail:
+% \href{mailto:latex-team@latex-project.org}
+% {latex-team@latex-project.org}^^A
+% }^^A
+% }
+%
+% \date{Released \ExplFileDate}
+%
+% \maketitle
+%
+% \begin{documentation}
+%
+% \TeX{} associates each character with a category code: as such, there is no
+% concept of a \enquote{string} as commonly understood in many other
+% programming languages. However, there are places where we wish to manipulate
+% token lists while in some sense \enquote{ignoring} category codes: this is
+% done by treating token lists as strings in a \TeX{} sense.
+%
+% A \TeX{} string (and thus an \pkg{expl3} string) is a series of characters
+% which have category code $12$ (\enquote{other}) with the exception of
+% space characters which have category code $10$ (\enquote{space}). Thus
+% at a technical level, a \TeX{} string is a token list with the appropriate
+% category codes. In this documentation, these will simply be referred to as
+% strings: note that they can be stored in token lists as normal.
+%
+% The functions documented here take literal token lists,
+% convert to strings and then carry out manipulations. Thus they may
+% informally be described as \enquote{ignoring} category code. Note that
+% the functions \cs{cs_to_str:N}, \cs{tl_to_str:n}, \cs{tl_to_str:N} and
+% \cs{token_to_str:N} (and variants) will generate strings from the appropriate
+% input: these are documented in \pkg{l3basics}, \pkg{l3tl} and \pkg{l3token},
+% respectively.
+%
+% \section{The first character from a string}
+%
+% \begin{function}[added = 2011-08-10, EXP]{\str_head:n,\str_tail:n}
+% \begin{syntax}
+% \cs{str_head:n} \Arg{token list}
+% \cs{str_tail:n} \Arg{token list}
+% \end{syntax}
+% Converts the \meta{token list} into a string, as described for
+% \cs{tl_to_str:n}. The \cs{str_head:n} function then leaves
+% the first character of this string in the input stream.
+% The \cs{str_tail:n} function leaves all characters except
+% the first in the input stream. The first character may be
+% a space. If the \meta{token list} argument is entirely empty,
+% nothing is left in the input stream.
+% \end{function}
+%
+% \subsection{Tests on strings}
+%
+% \begin{function}[EXP,pTF]
+% {
+% \str_if_eq:nn, \str_if_eq:Vn, \str_if_eq:on, \str_if_eq:no,
+% \str_if_eq:nV, \str_if_eq:VV
+% }
+% \begin{syntax}
+% \cs{str_if_eq_p:nn} \Arg{tl_1} \Arg{tl_2}
+% \cs{str_if_eq:nnTF} \Arg{tl_1} \Arg{tl_2} \Arg{true code} \Arg{false code}
+% \end{syntax}
+% Compares the two \meta{token lists} on a character by character
+% basis, and is \texttt{true} if the two lists contain the same
+% characters in the same order. Thus for example
+% \begin{verbatim}
+% \str_if_eq_p:no { abc } { \tl_to_str:n { abc } }
+% \end{verbatim}
+% is logically \texttt{true}.
+% \end{function}
+%
+% \begin{function}[EXP,pTF, added = 2012-06-05]{\str_if_eq_x:nn}
+% \begin{syntax}
+% \cs{str_if_eq_x_p:nn} \Arg{tl_1} \Arg{tl_2}
+% \cs{str_if_eq_x:nnTF} \Arg{tl_1} \Arg{tl_2} \Arg{true code} \Arg{false code}
+% \end{syntax}
+% Compares the full expansion of two \meta{token lists} on a character by
+% character basis, and is \texttt{true} if the two lists contain the same
+% characters in the same order. Thus for example
+% \begin{verbatim}
+% \str_if_eq_x_p:nn { abc } { \tl_to_str:n { abc } }
+% \end{verbatim}
+% is logically \texttt{true}.
+% \end{function}
+%
+% \begin{function}[added = 2013-07-24, EXP, TF]{\str_case:nn, \str_case:on}
+% \begin{syntax}
+% \cs{str_case:nnTF} \Arg{test string} \\
+% ~~|{| \\
+% ~~~~\Arg{string case_1} \Arg{code case_1} \\
+% ~~~~\Arg{string case_2} \Arg{code case_2} \\
+% ~~~~\ldots \\
+% ~~~~\Arg{string case_n} \Arg{code case_n} \\
+% ~~|}| \\
+% ~~\Arg{true code}
+% ~~\Arg{false code}
+% \end{syntax}
+% This function compares the \meta{test string} in turn with each
+% of the \meta{string cases}. If the two are equal (as described for
+% \cs{str_if_eq:nnTF} then the
+% associated \meta{code} is left in the input stream. If any of the
+% cases are matched, the \meta{true code} is also inserted into the
+% input stream (after the code for the appropriate case), while if none
+% match then the \meta{false code} is inserted. The function
+% \cs{str_case:nn}, which does nothing if there is no match, is also
+% available.
+% \end{function}
+%
+% \begin{function}[added = 2013-07-24, EXP, TF]{\str_case_x:nn}
+% \begin{syntax}
+% \cs{str_case_x:nnn} \Arg{test string} \\
+% ~~|{| \\
+% ~~~~\Arg{string case_1} \Arg{code case_1} \\
+% ~~~~\Arg{string case_2} \Arg{code case_2} \\
+% ~~~~\ldots \\
+% ~~~~\Arg{string case_n} \Arg{code case_n} \\
+% ~~|}| \\
+% ~~\Arg{true code}
+% ~~\Arg{false code}
+% \end{syntax}
+% This function compares the full expansion of the \meta{test string}
+% in turn with the full expansion of the \meta{string cases}. If the two
+% full expansions are equal (as described for \cs{str_if_eq:nnTF} then the
+% associated \meta{code} is left in the input stream. If any of the
+% cases are matched, the \meta{true code} is also inserted into the
+% input stream (after the code for the appropriate case), while if none
+% match then the \meta{false code} is inserted. The function
+% \cs{str_case_x:nn}, which does nothing if there is no match, is also
+% available.
+% The \meta{test string} is expanded in each comparison, and must
+% always yield the same result: for example, random numbers must
+% not be used within this string.
+% \end{function}
+%
+% \section{String manipulation}
+%
+% \begin{function}[rEXP, added = 2014-06-19]{\str_fold_case:n}
+% \begin{syntax}
+% \cs{str_fold_case:n} \Arg{tokens}
+% \end{syntax}
+% Converts the input \meta{tokens} to their string representation, as
+% described for \cs{tl_to_str:n}, and then folds the case of the resulting
+% \meta{string} to remove case information. The result of this process is
+% left in the input stream.
+%
+% String folding is a process used for material such as identifiers rather
+% than for \enquote{text}. The folding provided by \cs{str_fold_case:n}
+% follows the mappings provided by the \href{http://www.unicode.org}^^A
+% {Unicode Consortium}, who
+% \href{http://www.unicode.org/faq/casemap_charprop.html#2}{state}:
+% \begin{quote}
+% Case folding is primarily used for caseless comparison of text, such
+% as identifiers in a computer program, rather than actual text
+% transformation. Case folding in Unicode is based on the lowercase
+% mapping, but includes additional changes to the source text to help make
+% it language-insensitive and consistent. As a result, case-folded text
+% should be used solely for internal processing and generally should not be
+% stored or displayed to the end user.
+% \end{quote}
+% The folding approach implemented by \cs{str_fold_case:n} follows the
+% \enquote{full} scheme defined by the Unicode Consortium
+% (\emph{e.g.}~\SS folds to \texttt{SS}). As case-folding is
+% a language-insensitive process, there is no special treatment of
+% Turkic input (\emph{i.e.}~\texttt{I} always folds to \texttt{i} and
+% not to \texttt{\i}).
+%
+% \begin{texnote}
+% As with all \pkg{expl3} functions, the input supported by
+% \cs{str_fold_case:n} is \emph{engine-native} characters which are or
+% interoperate with \textsc{utf-8}. As such, when used with \pdfTeX{}
+% \emph{only} the Latin alphabet characters A--Z will be case-folded
+% (\emph{i.e.}~the \textsc{ascii} range which coincides with
+% \textsc{utf-8}). Full \textsc{utf-8} support is available with both
+% \XeTeX{} and \LuaTeX{}, subject only to the fact that \XeTeX{} in
+% particular has issues with characters of code above hexadecimal
+% $0\mathrm{xFFF}$ when interacting with \cs{tl_to_str:n}.
+% \end{texnote}
+% \end{function}
+%
+% \subsection{Internal string functions}
+%
+% \begin{function}[EXP]{\__str_if_eq_x:nn}
+% \begin{syntax}
+% \cs{__str_if_eq_x:nn} \Arg{tl_1} \Arg{tl_2}
+% \end{syntax}
+% Compares the full expansion of two \meta{token lists} on a character by
+% character basis, and is \texttt{true} if the two lists contain the same
+% characters in the same order. Leaves |0| in the input stream if the
+% condition is true, and |+1| or |-1| otherwise.
+% \end{function}
+%
+% \begin{function}{\@@_if_eq_x_return:nn}
+% \begin{syntax}
+% \cs{@@_if_eq_x_return:nn} \Arg{tl_1} \Arg{tl_2}
+% \end{syntax}
+% Compares the full expansion of two \meta{token lists} on a character by
+% character basis, and is \texttt{true} if the two lists contain the same
+% characters in the same order. Either \cs{prg_return_true:} or
+% \cs{prg_return_false:} is then left in the input stream. This is a version
+% of \cs{str_if_eq_x:nn(TF)} coded for speed.
+% \end{function}
+%
+% \end{documentation}
+%
+% \begin{implementation}
+%
+% \section{\pkg{l3str} implementation}
+%
+% \begin{macrocode}
+%<*initex|package>
+% \end{macrocode}
+%
+% \begin{macrocode}
+%<@@=str>
+% \end{macrocode}
+%
+% \begin{macro}{\str_head:n, \str_tail:n}
+% \begin{macro}[aux]{\__str_head:w}
+% \begin{macro}[aux]{\__str_tail:w}
+% After \cs{tl_to_str:n}, we have a list of character tokens,
+% all with category code 12, except the space, which has category
+% code 10. Directly using \cs{tl_head:w} would thus lose leading spaces.
+% Instead, we take an argument delimited by an explicit space, and
+% then only use \cs{tl_head:w}. If the string started with a
+% space, then the argument of \cs{__str_head:w} is empty, and
+% the function correctly returns a space character. Otherwise,
+% it returns the first token of |#1|, which is the first token
+% of the string. If the string is empty, we return an empty result.
+%
+% To remove the first character of \cs{tl_to_str:n} |{#1}|,
+% we test it using \cs{if_charcode:w} \cs{scan_stop:},
+% always \texttt{false} for characters. If the argument was non-empty,
+% then \cs{__str_tail:w} returns everything until the first
+% \texttt{X} (with category code letter, no risk of confusing
+% with the user input). If the argument was empty, the first
+% \texttt{X} is taken by \cs{if_charcode:w}, and nothing
+% is returned. We use \texttt{X} as a \meta{marker}, rather than
+% a quark because the test \cs{if_charcode:w} \cs{scan_stop:}
+% \meta{marker} has to be \texttt{false}.
+% \begin{macrocode}
+\cs_new:Npn \str_head:n #1
+ {
+ \exp_after:wN \__str_head:w
+ \tl_to_str:n {#1}
+ { { } } ~ \q_stop
+ }
+\cs_new:Npn \__str_head:w #1 ~ %
+ { \tl_head:w #1 { ~ } }
+\cs_new:Npn \str_tail:n #1
+ {
+ \exp_after:wN \__str_tail:w
+ \reverse_if:N \if_charcode:w
+ \scan_stop: \tl_to_str:n {#1} X X \q_stop
+ }
+\cs_new:Npn \__str_tail:w #1 X #2 \q_stop { \fi: #1 }
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+%
+% \subsection{String comparisons}
+%
+% \begin{macro}[int, EXP]{\__str_if_eq_x:nn}
+% \begin{macro}[aux, EXP]{\__str_escape_x:n}
+% String comparisons rely on the primitive \cs{(pdf)strcmp} if available:
+% \LuaTeX{} does not have it, so emulation is required. As the net result
+% is that we do not \emph{always} use the primitive, the correct approach
+% is to wrap up in a function with defined behaviour. That's done by
+% providing a wrapper and then redefining in the \LuaTeX{} case. Note that
+% the necessary Lua code is covered in \pkg{l3boostrap}: long-term this may
+% need to go into a separate Lua file, but at present it's somewhere that
+% spaces are not skipped for ease-of-input. The need to detokenize and force
+% expansion of input arises from the case where a |#| token is used in the
+% input, \emph{e.g.}~|\__str_if_eq_x:nn {#} { \tl_to_str:n {#} }|, which
+% otherwise will fail as \cs{luatex_luaescapestring:D} does not double
+% such tokens.
+% \begin{macrocode}
+\cs_new:Npn \__str_if_eq_x:nn #1#2 { \pdftex_strcmp:D {#1} {#2} }
+\luatex_if_engine:T
+ {
+ \cs_set:Npn \__str_if_eq_x:nn #1#2
+ {
+ \luatex_directlua:D
+ {
+ l3kernel.strcmp
+ (
+ " \__str_escape_x:n {#1} " ,
+ " \__str_escape_x:n {#2} "
+ )
+ }
+ }
+ \cs_new:Npn \__str_escape_x:n #1
+ {
+ \luatex_luaescapestring:D
+ {
+ \etex_detokenize:D \exp_after:wN { \luatex_expanded:D {#1} }
+ }
+ }
+ }
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+%
+% \begin{macro}[int, EXP]{\__str_if_eq_x_return:nn}
+% It turns out that we often need to compare a token list
+% with the result of applying some function to it, and
+% return with \cs{prg_return_true/false:}. This test is
+% similar to \cs{str_if_eq:nnTF} (see \pkg{l3str}),
+% but is hard-coded for speed.
+% \begin{macrocode}
+\cs_new:Npn \__str_if_eq_x_return:nn #1 #2
+ {
+ \if_int_compare:w \__str_if_eq_x:nn {#1} {#2} = \c_zero
+ \prg_return_true:
+ \else:
+ \prg_return_false:
+ \fi:
+ }
+% \end{macrocode}
+% \end{macro}
+%
+% \begin{macro}[pTF, EXP]
+% {
+% \str_if_eq:nn, \str_if_eq:Vn, \str_if_eq:on, \str_if_eq:nV,
+% \str_if_eq:no, \str_if_eq:VV,
+% \str_if_eq_x:nn
+% }
+% Modern engines provide a direct way of comparing two token lists,
+% but returning a number. This set of conditionals therefore make life
+% a bit clearer. The \texttt{nn} and \texttt{xx} versions are created
+% directly as this is most efficient.
+% \begin{macrocode}
+\prg_new_conditional:Npnn \str_if_eq:nn #1#2 { p , T , F , TF }
+ {
+ \if_int_compare:w \__str_if_eq_x:nn { \exp_not:n {#1} } { \exp_not:n {#2} }
+ = \c_zero
+ \prg_return_true: \else: \prg_return_false: \fi:
+ }
+\cs_generate_variant:Nn \str_if_eq_p:nn { V , o }
+\cs_generate_variant:Nn \str_if_eq_p:nn { nV , no , VV }
+\cs_generate_variant:Nn \str_if_eq:nnT { V , o }
+\cs_generate_variant:Nn \str_if_eq:nnT { nV , no , VV }
+\cs_generate_variant:Nn \str_if_eq:nnF { V , o }
+\cs_generate_variant:Nn \str_if_eq:nnF { nV , no , VV }
+\cs_generate_variant:Nn \str_if_eq:nnTF { V , o }
+\cs_generate_variant:Nn \str_if_eq:nnTF { nV , no , VV }
+\prg_new_conditional:Npnn \str_if_eq_x:nn #1#2 { p , T , F , TF }
+ {
+ \if_int_compare:w \__str_if_eq_x:nn {#1} {#2} = \c_zero
+ \prg_return_true: \else: \prg_return_false: \fi:
+ }
+% \end{macrocode}
+% \end{macro}
+%
+% \begin{macro}[int, EXP]{\@@_if_eq_x_return:nn}
+% \end{macro}
+%
+% \begin{macro}[EXP]{\str_case:nn, \str_case:on, \str_case_x:nn}
+% \begin{macro}[EXP, TF]{\str_case:nn, \str_case:on, \str_case_x:nn}
+% \begin{macro}[EXP, aux]{\@@_case:nnTF, \@@_case_x:nnTF}
+% \begin{macro}[aux, EXP]
+% {\@@_case:nw, \@@_case_x:nw, \@@_case_end:nw}
+% Much the same as \cs{tl_case:nn(TF)} here: just a change in the internal
+% comparison.
+% \begin{macrocode}
+\cs_new:Npn \str_case:nn #1#2
+ {
+ \tex_romannumeral:D
+ \@@_case:nnTF {#1} {#2} { } { }
+ }
+\cs_new:Npn \str_case:nnT #1#2#3
+ {
+ \tex_romannumeral:D
+ \@@_case:nnTF {#1} {#2} {#3} { }
+ }
+\cs_new:Npn \str_case:nnF #1#2
+ {
+ \tex_romannumeral:D
+ \@@_case:nnTF {#1} {#2} { }
+ }
+\cs_new:Npn \str_case:nnTF #1#2
+ {
+ \tex_romannumeral:D
+ \@@_case:nnTF {#1} {#2}
+ }
+\cs_new:Npn \@@_case:nnTF #1#2#3#4
+ { \@@_case:nw {#1} #2 {#1} { } \q_mark {#3} \q_mark {#4} \q_stop }
+\cs_generate_variant:Nn \str_case:nn { o }
+\cs_generate_variant:Nn \str_case:nnT { o }
+\cs_generate_variant:Nn \str_case:nnF { o }
+\cs_generate_variant:Nn \str_case:nnTF { o }
+\cs_new:Npn \@@_case:nw #1#2#3
+ {
+ \str_if_eq:nnTF {#1} {#2}
+ { \@@_case_end:nw {#3} }
+ { \@@_case:nw {#1} }
+ }
+\cs_new:Npn \str_case_x:nn #1#2
+ {
+ \tex_romannumeral:D
+ \@@_case_x:nnTF {#1} {#2} { } { }
+ }
+\cs_new:Npn \str_case_x:nnT #1#2#3
+ {
+ \tex_romannumeral:D
+ \@@_case_x:nnTF {#1} {#2} {#3} { }
+ }
+\cs_new:Npn \str_case_x:nnF #1#2
+ {
+ \tex_romannumeral:D
+ \@@_case_x:nnTF {#1} {#2} { }
+ }
+\cs_new:Npn \str_case_x:nnTF #1#2
+ {
+ \tex_romannumeral:D
+ \@@_case_x:nnTF {#1} {#2}
+ }
+\cs_new:Npn \@@_case_x:nnTF #1#2#3#4
+ { \@@_case_x:nw {#1} #2 {#1} { } \q_mark {#3} \q_mark {#4} \q_stop }
+\cs_new:Npn \@@_case_x:nw #1#2#3
+ {
+ \str_if_eq_x:nnTF {#1} {#2}
+ { \@@_case_end:nw {#3} }
+ { \@@_case_x:nw {#1} }
+ }
+\cs_new_eq:NN \@@_case_end:nw \__prg_case_end:nw
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+%
+% \subsection{String manipulation}
+%
+% \begin{macro}[EXP]{\str_fold_case:n}
+% \begin{macro}[aux, EXP]{\@@_fold_auxi:w}
+% \begin{macro}[aux, EXP]{\@@_fold_auxii:N}
+% \begin{macro}[aux, EXP]{\@@_fold_auxiii:NNNNNNNN}
+% \begin{macro}[aux, EXP]{\@@_fold_end:w}
+% To convert a string to the \enquote{caseless} form, the first stage is to
+% remove tokenization. Once that is done, provided the transformed chars are
+% also detokenized then there is no need to worry about category codes.
+% Spaces need to be retained as part of the mapping, so there is a little
+% work to do in the set up. Data to support this process is loaded later in
+% the \pkg{expl3} bundle.
+% \begin{macrocode}
+\cs_new:Npn \str_fold_case:n #1
+ {
+ \exp_after:wN \@@_fold_auxi:w \tl_to_str:n {#1}
+ { ~ \c_empty_tl } \@@_fold_end:w ? ~
+ }
+% \end{macrocode}
+% A loop using spaces as delimiters: done in this way there is no issue with
+% spaces in the input. Notice that there is a second inner loop with
+% \cs{@@_fold_auxii:N} for each \enquote{word}.
+% \begin{macrocode}
+\cs_new:Npn \@@_fold_auxi:w #1 ~
+ {
+ \@@_fold_auxii:N #1 { ~ \c_space_tl }
+ \@@_fold_auxi:w
+ }
+% \end{macrocode}
+% The idea here is to take a single token and convert it to its decimal
+% character code. This can then be used to split up the input into $100$
+% separate manageable lists for comparison on a case-by-case basis.
+% \begin{macrocode}
+\cs_new:Npn \@@_fold_auxii:N #1
+ {
+ \exp_after:wN \@@_fold_auxiii:NNNNNNNN
+ \int_use:N \__int_eval:w 1000000 + `#1 \__int_eval_end: #1
+ }
+% \end{macrocode}
+% At this stage, use a slow-but-expandable string case selection to look
+% for a matching char. If one is not found then retain the input as-is.
+% This also does some cleanup to allow a simple termination of the two
+% loops.
+% \begin{macrocode}
+\cs_new:Npn \@@_fold_auxiii:NNNNNNNN #1#2#3#4#5#6#7#8
+ {
+ \exp_args:NNv \str_case_x:nnF #8
+ { c_@@_fold_ #6 _ #7 _tl }
+ {
+ #8
+ \exp_after:wN \use_none:n #8
+ }
+ \@@_fold_auxii:N
+ }
+% \end{macrocode}
+% When the end is reached, clean everything up leaving the converted
+% string in the input stream.
+% \begin{macrocode}
+\cs_new:Npn \@@_fold_end:w ? #1 \@@_fold_auxi:w { }
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+%
+% \subsection{Deprecated functions}
+%
+% \begin{macro}{\str_case:nnn, \str_case:onn, \str_case_x:nnn}
+% Deprecated 2013-07-15.
+% \begin{macrocode}
+\cs_new_eq:NN \str_case:nnn \str_case:nnF
+\cs_new_eq:NN \str_case:onn \str_case:onF
+\cs_new_eq:NN \str_case_x:nnn \str_case_x:nnF
+% \end{macrocode}
+% \end{macro}
+%
+% \begin{macrocode}
+%</initex|package>
+% \end{macrocode}
+%
+% \end{implementation}
+%
+% \PrintIndex \ No newline at end of file
diff --git a/Master/texmf-dist/source/latex/l3kernel/l3tl.dtx b/Master/texmf-dist/source/latex/l3kernel/l3tl.dtx
index 88874c36241..d5798d44b3a 100644
--- a/Master/texmf-dist/source/latex/l3kernel/l3tl.dtx
+++ b/Master/texmf-dist/source/latex/l3kernel/l3tl.dtx
@@ -37,7 +37,7 @@
\documentclass[full]{l3doc}
%</driver>
%<*driver|package>
-\GetIdInfo$Id: l3tl.dtx 5070 2014-06-06 18:00:24Z bruno $
+\GetIdInfo$Id: l3tl.dtx 5214 2014-07-17 08:39:43Z joseph $
{L3 Token lists}
%</driver|package>
%<*driver>
@@ -894,20 +894,6 @@
% \end{texnote}
% \end{function}
%
-% \begin{function}[added = 2011-08-10, EXP]{\str_head:n,\str_tail:n}
-% \begin{syntax}
-% \cs{str_head:n} \Arg{token list}
-% \cs{str_tail:n} \Arg{token list}
-% \end{syntax}
-% Converts the \meta{token list} into a string, as described for
-% \cs{tl_to_str:n}. The \cs{str_head:n} function then leaves
-% the first character of this string in the input stream.
-% The \cs{str_tail:n} function leaves all characters except
-% the first in the input stream. The first character may be
-% a space. If the \meta{token list} argument is entirely empty,
-% nothing is left in the input stream.
-% \end{function}
-%
% \begin{function}[updated = 2012-07-09, EXP, pTF]{\tl_if_head_eq_catcode:nN}
% \begin{syntax}
% \cs{tl_if_head_eq_catcode_p:nN} \Arg{token list} \meta{test token}
@@ -989,6 +975,27 @@
% a token by token basis.
% \end{function}
%
+% \section{Using a single item}
+%
+% \begin{function}[added = 2014-07-17, EXP]
+% {\tl_item:nn, \tl_item:Nn, \tl_item:cn}
+% \begin{syntax}
+% \cs{tl_item:nn} \Arg{token list} \Arg{integer expression}
+% \end{syntax}
+% Indexing items in the \meta{token list} from~$1$ on the left, this
+% function will evaluate the \meta{integer expression} and leave the
+% appropriate item from the \meta{token list} in the input stream.
+% If the \meta{integer expression} is negative, indexing occurs from
+% the right of the token list, starting at $-1$ for the right-most item.
+% If the index is out of bounds, then thr function expands to nothing.
+% \begin{texnote}
+% The result is returned within the \tn{unexpanded}
+% primitive (\cs{exp_not:n}), which means that the \meta{item}
+% will not expand further when appearing in an \texttt{x}-type
+% argument expansion.
+% \end{texnote}
+% \end{function}
+%
% \section{Viewing token lists}
%
% \begin{function}[updated = 2012-09-09]{\tl_show:N, \tl_show:c}
@@ -1471,6 +1478,7 @@
% by the \tn{endlinechar}, and an extra \tn{endlinechar} is
% added at the end, we need to set both of those to $-1$,
% \enquote{unprintable}.
+% To be safe, the \meta{setup} |#3| is followed by \cs{scan_stop:}.
% \begin{macrocode}
\cs_new_protected_nopar:Npn \tl_set_rescan:Nnn
{ \@@_set_rescan:NNnn \tl_set:Nn }
@@ -1484,7 +1492,7 @@
\exp_args:No \etex_everyeof:D { \c_@@_rescan_marker_tl \exp_not:N }
\tex_endlinechar:D \c_minus_one
\tex_newlinechar:D \c_minus_one
- #3
+ #3 \scan_stop:
\use:x
{
\group_end:
@@ -1892,8 +1900,15 @@
% \begin{macro}[EXP, TF]{\tl_case:Nn, \tl_case:cn}
% \begin{macro}[EXP, aux]{\@@_case:nnTF}
% \begin{macro}[aux, EXP]{\@@_case:Nw}
+% \begin{macro}[int, EXP]{\__prg_case_end:nw}
% \begin{macro}[aux, EXP]{\@@_case_end:nw}
-% Similar set up to \cs{str_case:nn(TF)} as described in \pkg{l3basics}.
+% The aim here is to allow the case statement to be evaluated
+% using a known number of expansion steps (two), and without
+% needing to use an explicit \enquote{end of recursion} marker.
+% That is achieved by using the test input as the final case,
+% as this will always be true. The trick is then to tidy up
+% the output such that the appropriate case code plus either
+% the \texttt{true} or \texttt{false} branch code is inserted.
% \begin{macrocode}
\cs_new:Npn \tl_case:Nn #1#2
{
@@ -1927,6 +1942,20 @@
\cs_generate_variant:Nn \tl_case:NnT { c }
\cs_generate_variant:Nn \tl_case:NnF { c }
\cs_generate_variant:Nn \tl_case:NnTF { c }
+% \end{macrocode}
+% To tidy up the recursion, there are two outcomes. If there was a hit to
+% one of the cases searched for, then |#1| will be the code to insert,
+% |#2| will be the \emph{next} case to check on and |#3| will be all of
+% the rest of the cases code. That means that |#4| will be the \texttt{true}
+% branch code, and |#5| will be tidy up the spare \cs{q_mark} and the
+% \texttt{false} branch. On the other hand, if none of the cases matched
+% then we arrive here using the \enquote{termination} case of comparing
+% the search with itself. That means that |#1| will be empty, |#2| will be
+% the first \cs{q_mark} and so |#4| will be the \texttt{false} code (the
+% \texttt{true} code is mopped up by |#3|).
+% \begin{macrocode}
+\cs_new:Npn \__prg_case_end:nw #1#2#3 \q_mark #4#5 \q_stop
+ { \c_zero #1 #4 }
\cs_new_eq:NN \@@_case_end:nw \__prg_case_end:nw
% \end{macrocode}
% \end{macro}
@@ -1934,6 +1963,7 @@
% \end{macro}
% \end{macro}
% \end{macro}
+% \end{macro}
%
% \subsection{Mapping to token lists}
%
@@ -2356,7 +2386,7 @@
% \subsection{The first token from a token list}
%
% \begin{macro}{\tl_head:N, \tl_head:n, \tl_head:V, \tl_head:v, \tl_head:f}
-% \begin{macro}[aux]{\@@_head_auxi:nw, \@@_head_auxii:nw}
+% \begin{macro}[aux]{\@@_head_auxi:nw, \@@_head_auxii:n}
% \begin{macro}{\tl_head:w}
% \begin{macro}{\tl_tail:N, \tl_tail:n, \tl_tail:V, \tl_tail:v, \tl_tail:f}
% Finding the head of a token list expandably will always strip braces, which
@@ -2377,8 +2407,8 @@
\if_false: { \fi: \@@_head_auxi:nw #1 { } \q_stop }
}
\cs_new:Npn \@@_head_auxi:nw #1#2 \q_stop
- { \exp_after:wN \@@_head_auxii:nw \exp_after:wN { \if_false: } \fi: {#1} }
-\cs_new:Npn \@@_head_auxii:nw #1
+ { \exp_after:wN \@@_head_auxii:n \exp_after:wN { \if_false: } \fi: {#1} }
+\cs_new:Npn \@@_head_auxii:n #1
{
\exp_after:wN \if_meaning:w \exp_after:wN \q_nil
\tl_to_str:n \exp_after:wN { \use_none:n #1 } \q_nil
@@ -2423,50 +2453,6 @@
% \end{macro}
% \end{macro}
%
-% \begin{macro}{\str_head:n, \str_tail:n}
-% \begin{macro}[aux]{\__str_head:w}
-% \begin{macro}[aux]{\__str_tail:w}
-% After \cs{tl_to_str:n}, we have a list of character tokens,
-% all with category code 12, except the space, which has category
-% code 10. Directly using \cs{tl_head:w} would thus lose leading spaces.
-% Instead, we take an argument delimited by an explicit space, and
-% then only use \cs{tl_head:w}. If the string started with a
-% space, then the argument of \cs{__str_head:w} is empty, and
-% the function correctly returns a space character. Otherwise,
-% it returns the first token of |#1|, which is the first token
-% of the string. If the string is empty, we return an empty result.
-%
-% To remove the first character of \cs{tl_to_str:n} |{#1}|,
-% we test it using \cs{if_charcode:w} \cs{scan_stop:},
-% always \texttt{false} for characters. If the argument was non-empty,
-% then \cs{__str_tail:w} returns everything until the first
-% \texttt{X} (with category code letter, no risk of confusing
-% with the user input). If the argument was empty, the first
-% \texttt{X} is taken by \cs{if_charcode:w}, and nothing
-% is returned. We use \texttt{X} as a \meta{marker}, rather than
-% a quark because the test \cs{if_charcode:w} \cs{scan_stop:}
-% \meta{marker} has to be \texttt{false}.
-% \begin{macrocode}
-\cs_new:Npn \str_head:n #1
- {
- \exp_after:wN \__str_head:w
- \tl_to_str:n {#1}
- { { } } ~ \q_stop
- }
-\cs_new:Npn \__str_head:w #1 ~ %
- { \tl_head:w #1 { ~ } }
-\cs_new:Npn \str_tail:n #1
- {
- \exp_after:wN \__str_tail:w
- \reverse_if:N \if_charcode:w
- \scan_stop: \tl_to_str:n {#1} X X \q_stop
- }
-\cs_new:Npn \__str_tail:w #1 X #2 \q_stop { \fi: #1 }
-% \end{macrocode}
-% \end{macro}
-% \end{macro}
-% \end{macro}
-%
% \begin{macro}[pTF]{\tl_if_head_eq_meaning:nN}
% \begin{macro}[pTF]{\tl_if_head_eq_charcode:nN}
% \begin{macro}[pTF]{\tl_if_head_eq_charcode:fN}
@@ -2670,6 +2656,43 @@
% \end{macro}
% \end{macro}
%
+% \subsection{Using a single item}
+%
+% \begin{macro}{\tl_item:nn, \tl_item:Nn, \tl_item:cn}
+% \begin{macro}[aux]{\@@_item:nn}
+% The idea here is to find the offset of the item from the left, then use
+% a loop to grab the correct item. If the resulting offset is too large,
+% then \cs{quark_if_recursion_tail_stop:n} terminates the loop, and returns
+% nothing at all.
+% \begin{macrocode}
+\cs_new:Npn \tl_item:nn #1#2
+ {
+ \exp_args:Nf \@@_item:nn
+ {
+ \int_eval:n
+ {
+ \int_compare:nNnT {#2} < \c_zero
+ { \tl_count:n {#1} + \c_one + }
+ #2
+ }
+ }
+ #1
+ \q_recursion_tail
+ \__prg_break_point:
+ }
+\cs_new:Npn \@@_item:nn #1#2
+ {
+ \__quark_if_recursion_tail_break:nN {#2} \__prg_break:
+ \int_compare:nNnTF {#1} = \c_one
+ { \__prg_break:n { \exp_not:n {#2} } }
+ { \exp_args:Nf \@@_item:nn { \int_eval:n { #1 - 1 } } }
+ }
+\cs_new_nopar:Npn \tl_item:Nn { \exp_args:No \tl_item:nn }
+\cs_generate_variant:Nn \tl_item:Nn { c }
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+%
% \subsection{Viewing token lists}
%
% \begin{macro}{\tl_show:N, \tl_show:c}
diff --git a/Master/texmf-dist/source/latex/l3kernel/l3unicode-data.def b/Master/texmf-dist/source/latex/l3kernel/l3unicode-data.def
new file mode 100644
index 00000000000..6d5625134b3
--- /dev/null
+++ b/Master/texmf-dist/source/latex/l3kernel/l3unicode-data.def
@@ -0,0 +1,378 @@
+\ProvidesExplFile {l3unicode-data.def} {2014/06/30} {5166} {L3 Unicode data}
+\clist_const:Nn \c__tl_after_final_sigma_clist
+ { 0021 , 0022 , 0029 , 002C , 002E , 003A , 003B , 003F , 005D , 007D }
+\clist_const:Nn \c__tl_mixed_skip_clist
+ { 0028 , 005B , 0060 , 007B }
+\pdftex_if_engine:T
+ {
+ \group_begin:
+ \cs_set_protected:Npn \__unicode_tmp:NN #1#2
+ {
+ \quark_if_recursion_tail_stop:N #1
+ \exp_after:wN \__unicode_tmp:NNNNNNN
+ \tex_number:D \__int_eval:w `#1 \exp_after:wN \__int_eval_end:
+ \tex_number:D \__int_eval:w 100 + `#2 \__int_eval_end:
+ #1 #2
+ \__unicode_tmp:NN
+ }
+ \cs_set_protected:Npn \__unicode_tmp:NNNNNNN #1#2#3#4#5#6#7
+ {
+ \tl_const:cx { c__str_fold_ #1 _ #2 _ tl }
+ { \tl_to_str:n { #6#7 } }
+ \tl_const:cn { c__tl_lower_ #1 _ #2 _ tl } { #6#7 }
+ \tl_const:cn { c__tl_upper_ #4 _ #5 _ tl } { #7#6 }
+ }
+ \__unicode_tmp:NN
+ AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZz
+ \q_recursion_tail ? \q_recursion_stop
+ \group_end:
+ \int_step_inline:nnnn { 0 } { 1 } { 9 }
+ {
+ \int_step_inline:nnnn { 0 } { 1 } { 9 }
+ {
+ \tl_if_exist:cF { c__str_fold_ #1 _ ##1 _ tl }
+ {
+ \tl_const:cn { c__str_fold_ #1 _ ##1 _ tl } { }
+ }
+ \tl_if_exist:cF { c__tl_lower_ #1 _ ##1 _ tl }
+ {
+ \tl_const:cn { c__tl_lower_ #1 _ ##1 _ tl } { }
+ }
+ \tl_if_exist:cF { c__tl_upper_ #1 _ ##1 _ tl }
+ {
+ \tl_const:cn { c__tl_upper_ #1 _ ##1 _ tl } { }
+ }
+ }
+ }
+ \tl_const:Nn \c__tl_mixed_exceptions_tl { }
+ \tl_const:Nn \c__tl_std_sigma_tl { }
+ \tl_const:Nn \c__tl_final_sigma_tl { }
+ \tl_const:Nn \c__tl_accents_lt_tl { }
+ \tl_const:Nn \c__tl_dot_above_tl { }
+ \tl_const:Nn \c__tl_dotless_i_tl { I }
+ \tl_const:Nn \c__tl_dotted_I_tl { i }
+ \tex_endinput:D
+ }
+\group_begin:
+ \cs_set_protected:Npn \__str_tmp:NNn #1#2#3
+ {
+ \tl_const:cx { c__str_fold_#1_#2_tl }
+ { \__str_tmp:Nw #3 \q_recursion_tail { } \q_recursion_stop }
+ }
+ \cs_set:Npn \__str_tmp:Nw #1#2
+ {
+ \quark_if_recursion_tail_stop:N #1
+ \tl_to_str:N #1
+ \tl_if_blank:oT { \use_none:n #2 }
+ { \use:n }
+ { \tl_to_str:n {#2} }
+ \__str_tmp:Nw
+ }
+ \__str_tmp:NNn 0 0 { ÈèĬĭƐɛǴǵϨϩҰұԔԕḔḕṸṹỜờᾤ{ὤι}ⒸⓒⰤⱔⲈⲉꙨꙩ }
+ \__str_tmp:NNn 0 1 { ÉéƑƒჍⴭᾥ{ὥι}ⒹⓓⰥⱕⳭⳮ }
+ \__str_tmp:NNn 0 2 { ÊêĮįǶƕΆάϪϫҲҳԖԗḖḗṺṻỞởᾦ{ὦι}ⒺⓔⰦⱖⲊⲋꙪꙫꜲꜳ }
+ \__str_tmp:NNn 0 3 { ËëƓɠǷƿᾧ{ὧι}ⒻⓕⰧⱗ }
+ \__str_tmp:NNn 0 4 { Ììİ{i̇}ƔɣǸǹΈέϬϭҴҵԘԙḘḙṼṽỠỡᾨ{ὠι}ⒼⓖⰨⱘⲌⲍꙬꙭꜴꜵ }
+ \__str_tmp:NNn 0 5 { ÍíΉήᾩ{ὡι}ⒽⓗⰩⱙ }
+ \__str_tmp:NNn 0 6 { ÎîIJijƖɩǺǻΊίϮϯҶҷԚԛḚḛṾṿỢợᾪ{ὢι}ⒾⓘⰪⱚⲎⲏⳲⳳꜶꜷ }
+ \__str_tmp:NNn 0 7 { ÏïƗɨᾫ{ὣι}ⒿⓙⰫⱛ }
+ \__str_tmp:NNn 0 8 { ÐðĴĵƘƙǼǽΌόϰκҸҹԜԝḜḝẀẁỤụὈὀᾬ{ὤι}ⓀⓚⰬⱜⲐⲑꜸꜹ }
+ \__str_tmp:NNn 0 9 { ÑñϱρὉὁᾭ{ὥι}ⓁⓛⰭⱝ }
+ \__str_tmp:NNn 1 0 { ÒòĶķǾǿΎύҺһԞԟḞḟẂẃỦủὊὂᾮ{ὦι}ⓂⓜⰮⱞⲒⲓꜺꜻ }
+ \__str_tmp:NNn 1 1 { ÓóΏώὋὃᾯ{ὧι}Ⓝⓝ }
+ \__str_tmp:NNn 1 2 { ÔôƜɯȀȁΐ{ΐ}ϴθҼҽԠԡḠḡẄẅỨứὌὄⓄⓞⲔⲕꜼꜽꞠꞡ }
+ \__str_tmp:NNn 1 3 { ÕõĹĺƝɲΑαϵεὍὅⓅⓟAa }
+ \__str_tmp:NNn 1 4 { ÖöȂȃΒβҾҿԢԣḢḣẆẇỪừᾲ{ὰι}ⓆⓠⲖⲗꜾꜿꞢꞣBb }
+ \__str_tmp:NNn 1 5 { ĻļƟɵΓγϷϸև{եւ}ᾳ{αι}ⓇⓡCc }
+ \__str_tmp:NNn 1 6 { ØøƠơȄȅΔδӀӏԤԥḤḥẈẉỬửὐ{ὐ}ᾴ{άι}ⓈⓢⲘⲙꝀꝁꞤꞥDd }
+ \__str_tmp:NNn 1 7 { ÙùĽľΕεϹϲӁӂⓉⓣEe }
+ \__str_tmp:NNn 1 8 { ÚúƢƣȆȇΖζϺϻԦԧḦḧẊẋỮữὒ{ὒ}ᾶ{ᾶ}ⓊⓤⲚⲛꝂꝃꞦꞧFf }
+ \__str_tmp:NNn 1 9 { ÛûĿŀΗηӃӄᾷ{ᾶι}ⓋⓥGg }
+ \__str_tmp:NNn 2 0 { ÜüƤƥȈȉΘθѠѡḨḩẌẍỰựὔ{ὔ}ᾸᾰⓌⓦⲜⲝꝄꝅꞨꞩHh }
+ \__str_tmp:NNn 2 1 { ÝýŁłΙιϽͻӅӆᾹᾱⓍⓧIi }
+ \__str_tmp:NNn 2 2 { ÞþƦʀȊȋΚκϾͼѢѣḪḫẎẏỲỳὖ{ὖ}ᾺὰⓎⓨⲞⲟꝆꝇꞪɦJj }
+ \__str_tmp:NNn 2 3 { ß{ss}ŃńƧƨΛλϿͽӇӈΆάⓏⓩKk }
+ \__str_tmp:NNn 2 4 { ȌȍΜμЀѐѤѥḬḭẐẑỴỵᾼ{αι}ⲠⲡꚀꚁꝈꝉLl }
+ \__str_tmp:NNn 2 5 { ŅņƩʃΝνЁёӉӊὙὑMm }
+ \__str_tmp:NNn 2 6 { ȎȏΞξЂђѦѧḮḯẒẓỶỷιιⲢⲣꚂꚃꝊꝋNn }
+ \__str_tmp:NNn 2 7 { ŇňΟοЃѓӋӌὛὓOo }
+ \__str_tmp:NNn 2 8 { ƬƭȐȑΠπЄєѨѩḰḱẔẕỸỹⲤⲥꚄꚅꝌꝍPp }
+ \__str_tmp:NNn 2 9 { ʼn{ʼn}ΡρЅѕӍӎԱաὝὕQq }
+ \__str_tmp:NNn 3 0 { ŊŋƮʈȒȓІіѪѫԲբḲḳẖ{ẖ}Ỻỻῂ{ὴι}ⲦⲧꚆꚇꝎꝏRr }
+ \__str_tmp:NNn 3 1 { ƯưΣσЇїԳգẗ{ẗ}Ὗὗῃ{ηι}Ss }
+ \__str_tmp:NNn 3 2 { ŌōȔȕΤτЈјѬѭӐӑԴդḴḵẘ{ẘ}Ỽỽῄ{ήι}ⲨⲩꚈꚉꝐꝑTt }
+ \__str_tmp:NNn 3 3 { ƱʊΥυЉљԵեẙ{ẙ}Uu }
+ \__str_tmp:NNn 3 4 { ŎŏƲʋȖȗΦφЊњѮѯӒӓԶզḶḷẚ{aʾ}Ỿỿῆ{ῆ}ⲪⲫꚊꚋꝒꝓVv }
+ \__str_tmp:NNn 3 5 { ƳƴΧχЋћԷէẛṡῇ{ῆι}Ww }
+ \__str_tmp:NNn 3 6 { ŐőȘșΨψЌќѰѱӔӕԸըḸḹῈὲⲬⲭꚌꚍꝔꝕXx }
+ \__str_tmp:NNn 3 7 { ƵƶͅιΩωЍѝԹթΈέYy }
+ \__str_tmp:NNn 3 8 { ŒœȚțΪϊЎўѲѳӖӗԺժḺḻẞ{ss}ῊὴⲮⲯꚎꚏꝖꝗZz }
+ \__str_tmp:NNn 3 9 { ƷʒΫϋЏџԻիΉή }
+ \__str_tmp:NNn 4 0 { ŔŕƸƹȜȝАаѴѵӘәԼլḼḽẠạὨὠῌ{ηι}ⲰⲱꚐꚑꝘꝙ }
+ \__str_tmp:NNn 4 1 { БбԽխὩὡ }
+ \__str_tmp:NNn 4 2 { ŖŗȞȟВвѶѷӚӛԾծḾḿẢảὪὢⲲⲳꚒꚓꝚꝛ }
+ \__str_tmp:NNn 4 3 { ГгԿկὫὣ }
+ \__str_tmp:NNn 4 4 { ŘřƼƽȠƞΰ{ΰ}ДдѸѹӜӝՀհṀṁẤấἈἀὬὤⅠⅰⲴⲵꚔꚕꝜꝝ }
+ \__str_tmp:NNn 4 5 { ЕеՁձἉἁὭὥⅡⅱ }
+ \__str_tmp:NNn 4 6 { ŚśȢȣЖжѺѻӞӟՂղṂṃẦầἊἂὮὦῒ{ῒ}ⅢⅲⲶⲷꚖꚗꝞꝟ }
+ \__str_tmp:NNn 4 7 { ЗзՃճἋἃὯὧΐ{ΐ}Ⅳⅳ }
+ \__str_tmp:NNn 4 8 { ŜŝȤȥИиѼѽӠӡՄմṄṅẨẩἌἄⅤⅴⲸⲹꝠꝡ }
+ \__str_tmp:NNn 4 9 { ЙйՅյἍἅⅥⅵ }
+ \__str_tmp:NNn 5 0 { ŞşȦȧКкѾѿӢӣՆնṆṇẪẫἎἆῖ{ῖ}ⅦⅶⲺⲻꝢꝣ }
+ \__str_tmp:NNn 5 1 { ЛлՇշἏἇῗ{ῗ}Ⅷⅷ }
+ \__str_tmp:NNn 5 2 { ŠšDŽdžȨȩМмҀҁӤӥՈոṈṉẬậῘῐⅨⅸⲼⲽꝤꝥ }
+ \__str_tmp:NNn 5 3 { DždžНнՉչῙῑⅩⅹ }
+ \__str_tmp:NNn 5 4 { ŢţȪȫОоӦӧՊպṊṋẮắῚὶⅪⅺⲾⲿꝦꝧ }
+ \__str_tmp:NNn 5 5 { LJljПпՋջΊίⅫⅻ }
+ \__str_tmp:NNn 5 6 { ĀāŤťLjljȬȭРрӨөՌռႠⴀṌṍẰằⅬⅼⳀⳁꝨꝩff{ff} }
+ \__str_tmp:NNn 5 7 { СсՍսႡⴁⅭⅽfi{fi} }
+ \__str_tmp:NNn 5 8 { ĂăŦŧNJnjȮȯТтӪӫՎվႢⴂṎṏẲẳⅮⅾⳂⳃꝪꝫfl{fl} }
+ \__str_tmp:NNn 5 9 { NjnjУуՏտႣⴃⅯⅿffi{ffi} }
+ \__str_tmp:NNn 6 0 { ĄąŨũȰȱФфӬӭՐրႤⴄṐṑẴẵἘἐⱠⱡⳄⳅꙀꙁꝬꝭffl{ffl}𐐀𐐨 }
+ \__str_tmp:NNn 6 1 { ǍǎХхՑցႥⴅἙἑſt{st}𐐁𐐩 }
+ \__str_tmp:NNn 6 2 { ĆćŪūȲȳςσЦцҊҋӮӯՒւႦⴆṒṓẶặἚἒῢ{ῢ}ⱢɫⳆⳇꙂꙃꝮꝯst{st}𐐂𐐪 }
+ \__str_tmp:NNn 6 3 { ǏǐЧчՓփႧⴇἛἓΰ{ΰ}Ᵽᵽ𐐃𐐫 }
+ \__str_tmp:NNn 6 4 { ĈĉŬŭШшҌҍӰӱՔքႨⴈṔṕẸẹἜἔᾀ{ἀι}ῤ{ῤ}ⰀⰰⱤɽⳈⳉꙄꙅ𐐄𐐬 }
+ \__str_tmp:NNn 6 5 { AaǑǒЩщՕօႩⴉἝἕᾁ{ἁι}Ⰱⰱ𐐅𐐭 }
+ \__str_tmp:NNn 6 6 { BbĊċŮůЪъҎҏӲӳՖֆႪⴊṖṗẺẻᾂ{ἂι}ῦ{ῦ}ⰂⰲⳊⳋꙆꙇ𐐆𐐮 }
+ \__str_tmp:NNn 6 7 { CcǓǔЫыႫⴋᾃ{ἃι}ῧ{ῧ}ⰃⰳⱧⱨ𐐇𐐯 }
+ \__str_tmp:NNn 6 8 { DdČčŰűЬьҐґӴӵႬⴌṘṙẼẽᾄ{ἄι}ῨῠⰄⰴⳌⳍꙈꙉ𐐈𐐰 }
+ \__str_tmp:NNn 6 9 { EeǕǖЭэႭⴍᾅ{ἅι}ῩῡⰅⰵⱩⱪ𐐉𐐱 }
+ \__str_tmp:NNn 7 0 { FfĎďŲųȺⱥЮюҒғӶӷႮⴎṚṛẾếᾆ{ἆι}ῪὺⰆⰶⳎⳏꙊꙋ𐐊𐐲 }
+ \__str_tmp:NNn 7 1 { GgǗǘȻȼЯяႯⴏᾇ{ἇι}ΎύⰇⰷⱫⱬ𐐋𐐳 }
+ \__str_tmp:NNn 7 2 { HhĐđŴŵҔҕӸӹႰⴐṜṝỀềᾈ{ἀι}ῬῥⰈⰸⳐⳑꙌꙍ𐐌𐐴 }
+ \__str_tmp:NNn 7 3 { IiǙǚȽƚႱⴑᾉ{ἁι}ⰉⰹⱭɑꝹꝺ𐐍𐐵 }
+ \__str_tmp:NNn 7 4 { JjĒēŶŷȾⱦҖҗӺӻႲⴒṞṟỂểᾊ{ἂι}ⰊⰺⱮɱⳒⳓꙎꙏ𐐎𐐶 }
+ \__str_tmp:NNn 7 5 { KkǛǜϏϗႳⴓᾋ{ἃι}ⰋⰻⱯɐꝻꝼﬓ{մն}𐐏𐐷 }
+ \__str_tmp:NNn 7 6 { LlĔĕŸÿϐβҘҙӼӽႴⴔṠṡỄễἨἠᾌ{ἄι}ⰌⰼⱰɒⳔⳕꙐꙑﬔ{մե}𐐐𐐸 }
+ \__str_tmp:NNn 7 7 { MmŹźɁɂϑθႵⴕἩἡᾍ{ἅι}ⰍⰽꝽᵹﬕ{մի}𐐑𐐹 }
+ \__str_tmp:NNn 7 8 { NnĖėǞǟҚқӾӿႶⴖṢṣỆệἪἢᾎ{ἆι}ῲ{ὼι}ⰎⰾⱲⱳⳖⳗꙒꙓꝾꝿﬖ{վն}𐐒𐐺 }
+ \__str_tmp:NNn 7 9 { OoŻżɃƀႷⴗἫἣᾏ{ἇι}ῳ{ωι}ↃↄⰏⰿﬗ{մխ}𐐓𐐻 }
+ \__str_tmp:NNn 8 0 { PpĘęǠǡɄʉͰͱҜҝԀԁႸⴘḀḁṤṥỈỉἬἤᾐ{ἠι}ῴ{ώι}ⰐⱀⳘⳙꙔꙕꞀꞁ𐐔𐐼 }
+ \__str_tmp:NNn 8 1 { QqµμŽžɅʌϕφႹⴙἭἥᾑ{ἡι}ⰑⱁⱵⱶ𐐕𐐽 }
+ \__str_tmp:NNn 8 2 { RrĚěǢǣɆɇͲͳϖπҞҟԂԃႺⴚḂḃṦṧỊịἮἦᾒ{ἢι}ῶ{ῶ}ⰒⱂⳚⳛꙖꙗꞂꞃ𐐖𐐾 }
+ \__str_tmp:NNn 8 3 { SsſsႻⴛἯἧᾓ{ἣι}ῷ{ῶι}Ⱃⱃ𐐗𐐿 }
+ \__str_tmp:NNn 8 4 { TtĜĝǤǥɈɉϘϙҠҡԄԅႼⴜḄḅṨṩỌọᾔ{ἤι}ῸὸⰔⱄⳜⳝꙘꙙꞄꞅ𐐘𐑀 }
+ \__str_tmp:NNn 8 5 { UuƁɓႽⴝᾕ{ἥι}ΌόⰕⱅ𐐙𐑁 }
+ \__str_tmp:NNn 8 6 { VvĞğƂƃǦǧɊɋͶͷϚϛҢңԆԇႾⴞḆḇṪṫỎỏᾖ{ἦι}ῺὼΩωⰖⱆⳞⳟꙚꙛꜢꜣꞆꞇ𐐚𐑂 }
+ \__str_tmp:NNn 8 7 { WwႿⴟᾗ{ἧι}ΏώⰗⱇ𐐛𐑃 }
+ \__str_tmp:NNn 8 8 { XxĠġƄƅǨǩɌɍϜϝҤҥԈԉჀⴠḈḉṬṭỐốᾘ{ἠι}ῼ{ωι}ⰘⱈⳠⳡꙜꙝꜤꜥ𐐜𐑄 }
+ \__str_tmp:NNn 8 9 { YyჁⴡᾙ{ἡι}Ⱉⱉ𐐝𐑅 }
+ \__str_tmp:NNn 9 0 { ZzĢģƆɔǪǫɎɏϞϟҦҧԊԋჂⴢḊḋṮṯỒồᾚ{ἢι}KkⰚⱊⱾȿⳢⳣꙞꙟꜦꜧ𐐞𐑆 }
+ \__str_tmp:NNn 9 1 { ƇƈჃⴣᾛ{ἣι}ÅåⰛⱋⱿɀꞋꞌ𐐟𐑇 }
+ \__str_tmp:NNn 9 2 { ÀàĤĥǬǭϠϡҨҩԌԍჄⴤḌḍṰṱỔổἸἰᾜ{ἤι}ⰜⱌⲀⲁꙠꙡꜨꜩ𐐠𐑈 }
+ \__str_tmp:NNn 9 3 { ÁáƉɖჅⴥἹἱᾝ{ἥι}ⰝⱍꞍɥ𐐡𐑉 }
+ \__str_tmp:NNn 9 4 { ÂâĦħƊɗǮǯϢϣҪҫԎԏḎḏṲṳỖỗἺἲᾞ{ἦι}ⰞⱎⲂⲃꙢꙣꜪꜫ𐐢𐑊 }
+ \__str_tmp:NNn 9 5 { ÃãƋƌჇⴧἻἳᾟ{ἧι}Ⱏⱏ𐐣𐑋 }
+ \__str_tmp:NNn 9 6 { ÄäĨĩǰ{ǰ}ϤϥҬҭԐԑḐḑṴṵỘộἼἴᾠ{ὠι}ⰠⱐⲄⲅꙤꙥꜬꜭꞐꞑ𐐤𐑌 }
+ \__str_tmp:NNn 9 7 { ÅåDZdzἽἵᾡ{ὡι}Ⱑⱑ𐐥𐑍 }
+ \__str_tmp:NNn 9 8 { ÆæĪīƎǝDzdzϦϧҮүԒԓḒḓṶṷỚớἾἶᾢ{ὢι}ℲⅎⒶⓐⰢⱒⲆⲇꙦꙧꜮꜯꞒꞓ𐐦𐑎 }
+ \__str_tmp:NNn 9 9 { ÇçƏəἿἷᾣ{ὣι}ⒷⓑⰣⱓⳫⳬ𐐧𐑏 }
+\group_end:
+\tl_const:cn { c__tl_upper_0_0_tl } { dDьЬոՈὀὈᾤ{ὬΙ}ⳬⳫ𐐨𐐀 }
+\tl_const:cn { c__tl_upper_0_1_tl } { eEĭĬǵǴəƏϩϨэЭұҰԕԔչՉḕḔṹṸờỜὁὉᾥ{ὭΙ}ⲉⲈꙩꙨ𐐩𐐁 }
+\tl_const:cn { c__tl_upper_0_2_tl } { fFƒƑюЮպՊὂὊᾦ{ὮΙ}ⳮⳭ𐐪𐐂 }
+\tl_const:cn { c__tl_upper_0_3_tl } { gGįĮɛƐϫϪяЯҳҲԗԖջՋḗḖṻṺởỞὃὋᾧ{ὯΙ}ⲋⲊꙫꙪꜳꜲ𐐫𐐃 }
+\tl_const:cn { c__tl_upper_0_4_tl } { hHѐЀռՌὄὌ𐐬𐐄 }
+\tl_const:cn { c__tl_upper_0_5_tl } { iIıIƕǶǹǸϭϬёЁҵҴԙԘսՍḙḘṽṼỡỠὅὍⲍⲌꙭꙬꜵꜴ𐐭𐐅 }
+\tl_const:cn { c__tl_upper_0_6_tl } { jJђЂվՎ𐐮𐐆 }
+\tl_const:cn { c__tl_upper_0_7_tl } { kKijIJǻǺϯϮѓЃҷҶԛԚտՏḛḚṿṾợỢⲏⲎⳳⳲꜷꜶ𐐯𐐇 }
+\tl_const:cn { c__tl_upper_0_8_tl } { lLɠƓϰΚєЄրՐ𐐰𐐈 }
+\tl_const:cn { c__tl_upper_0_9_tl } { mMĵĴƙƘǽǼϱΡѕЅҹҸԝԜցՑḝḜẁẀụỤⲑⲐꜹꜸ𐐱𐐉 }
+\tl_const:cn { c__tl_upper_1_0_tl } { nNƚȽϲϹіІւՒ𐐲𐐊 }
+\tl_const:cn { c__tl_upper_1_1_tl } { oOķĶǿǾɣƔїЇһҺԟԞփՓḟḞẃẂủỦⲓⲒꜻꜺ𐐳𐐋 }
+\tl_const:cn { c__tl_upper_1_2_tl } { pPјЈքՔᾰᾸⰰⰀ𐐴𐐌 }
+\tl_const:cn { c__tl_upper_1_3_tl } { qQȁȀɥꞍϵΕљЉҽҼԡԠօՕḡḠẅẄứỨᾱᾹⰱⰁⲕⲔꜽꜼꞡꞠ𐐵𐐍 }
+\tl_const:cn { c__tl_upper_1_4_tl } { rRĺĹƞȠɦꞪњЊֆՖⰲⰂ𐐶𐐎 }
+\tl_const:cn { c__tl_upper_1_5_tl } { sSȃȂћЋҿҾԣԢḣḢẇẆừỪᾳ{ΑΙ}ⰳⰃⲗⲖꜿꜾꞣꞢ𐐷𐐏 }
+\tl_const:cn { c__tl_upper_1_6_tl } { tTļĻɨƗϸϷќЌⰴⰄ𐐸𐐐 }
+\tl_const:cn { c__tl_upper_1_7_tl } { uUơƠȅȄɩƖѝЍԥԤḥḤẉẈửỬὑὙⰵⰅⲙⲘꝁꝀꞥꞤ𐐹𐐑 }
+\tl_const:cn { c__tl_upper_1_8_tl } { vVľĽўЎӂӁⰶⰆ𐐺𐐒 }
+\tl_const:cn { c__tl_upper_1_9_tl } { wWƣƢȇȆɫⱢϻϺџЏԧԦḧḦẋẊữỮὓὛⰷⰇⲛⲚꝃꝂꞧꞦ𐐻𐐓 }
+\tl_const:cn { c__tl_upper_2_0_tl } { xXŀĿӄӃⰸⰈⴀႠ𐐼𐐔 }
+\tl_const:cn { c__tl_upper_2_1_tl } { yYƥƤȉȈѡѠḩḨẍẌựỰὕὝⰹⰉⲝⲜⴁႡꝅꝄꞩꞨ𐐽𐐕 }
+\tl_const:cn { c__tl_upper_2_2_tl } { zZłŁӆӅⰺⰊⴂႢ𐐾𐐖 }
+\tl_const:cn { c__tl_upper_2_3_tl } { ȋȊɯƜѣѢḫḪẏẎỳỲὗὟⰻⰋⲟⲞⴃႣꝇꝆ𐐿𐐗 }
+\tl_const:cn { c__tl_upper_2_4_tl } { àÀńŃƨƧӈӇⓐⒶⰼⰌⴄႤ𐑀𐐘 }
+\tl_const:cn { c__tl_upper_2_5_tl } { áÁȍȌɱⱮѥѤḭḬẑẐỵỴⓑⒷⰽⰍⲡⲠⴅႥꚁꚀꝉꝈ𐑁𐐙 }
+\tl_const:cn { c__tl_upper_2_6_tl } { âÂņŅɲƝӊӉιΙⅎℲⓒⒸⰾⰎⴆႦ𐑂𐐚 }
+\tl_const:cn { c__tl_upper_2_7_tl } { ãÃȏȎѧѦḯḮẓẒỷỶⓓⒹⰿⰏⲣⲢⴇႧꚃꚂꝋꝊ𐑃𐐛 }
+\tl_const:cn { c__tl_upper_2_8_tl } { äÄňŇӌӋⓔⒺⱀⰐⴈႨ𐑄𐐜 }
+\tl_const:cn { c__tl_upper_2_9_tl } { åÅƭƬȑȐɵƟѩѨḱḰẕẔỹỸⓕⒻⱁⰑⲥⲤⴉႩꚅꚄꝍꝌ𐑅𐐝 }
+\tl_const:cn { c__tl_upper_3_0_tl } { æÆӎӍⓖⒼⱂⰒⴊႪ𐑆𐐞 }
+\tl_const:cn { c__tl_upper_3_1_tl } { çÇŋŊȓȒѫѪӏӀḳḲỻỺῃ{ΗΙ}ⓗⒽⱃⰓⲧⲦⴋႫꚇꚆꝏꝎ𐑇𐐟 }
+\tl_const:cn { c__tl_upper_3_2_tl } { èÈưƯὠὨⓘⒾⱄⰔⴌႬ𐑈𐐠 }
+\tl_const:cn { c__tl_upper_3_3_tl } { éÉōŌȕȔѭѬӑӐḵḴỽỼὡὩⓙⒿⱅⰕⲩⲨⴍႭꚉꚈꝑꝐ𐑉𐐡 }
+\tl_const:cn { c__tl_upper_3_4_tl } { êÊὢὪⓚⓀⱆⰖⴎႮ𐑊𐐢 }
+\tl_const:cn { c__tl_upper_3_5_tl } { ëËŏŎȗȖѯѮӓӒḷḶẛṠỿỾὣὫⓛⓁⱇⰗⲫⲪⴏႯꚋꚊꝓꝒ𐑋𐐣 }
+\tl_const:cn { c__tl_upper_3_6_tl } { ìÌƴƳἀἈὤὬⓜⓂⱈⰘⴐႰ𐑌𐐤 }
+\tl_const:cn { c__tl_upper_3_7_tl } { íÍőŐșȘɽⱤͅΙѱѰӕӔḹḸἁἉὥὭⓝⓃⱉⰙⲭⲬⴑႱꚍꚌꝕꝔ𐑍𐐥 }
+\tl_const:cn { c__tl_upper_3_8_tl } { îÎƶƵἂἊὦὮⓞⓄⱊⰚⴒႲ𐑎𐐦 }
+\tl_const:cn { c__tl_upper_3_9_tl } { ïÏœŒțȚѳѲӗӖḻḺἃἋὧὯⓟⓅⱋⰛⲯⲮⴓႳꚏꚎꝗꝖ𐑏𐐧 }
+\tl_const:cn { c__tl_upper_4_0_tl } { ðÐʀƦάΆἄἌⓠⓆⱌⰜⴔႴ }
+\tl_const:cn { c__tl_upper_4_1_tl } { ñÑŕŔƹƸȝȜέΈѵѴәӘḽḼạẠἅἍⓡⓇⱍⰝⲱⲰⴕႵꚑꚐꝙꝘ }
+\tl_const:cn { c__tl_upper_4_2_tl } { òÒήΉἆἎⓢⓈⱎⰞⴖႶ }
+\tl_const:cn { c__tl_upper_4_3_tl } { óÓŗŖȟȞʃƩίΊѷѶӛӚḿḾảẢἇἏⓣⓉⱏⰟⲳⲲⴗႷꚓꚒꝛꝚ }
+\tl_const:cn { c__tl_upper_4_4_tl } { ôÔῐῘⓤⓊⱐⰠⴘႸ }
+\tl_const:cn { c__tl_upper_4_5_tl } { õÕřŘƽƼαΑѹѸӝӜᵹꝽṁṀấẤῑῙⓥⓋⱑⰡⲵⲴⴙႹꚕꚔꝝꝜaA }
+\tl_const:cn { c__tl_upper_4_6_tl } { öÖβΒⓦⓌⱒⰢⴚႺbB }
+\tl_const:cn { c__tl_upper_4_7_tl } { śŚƿǷȣȢγΓѻѺӟӞṃṂầẦⓧⓍⱓⰣⲷⲶⴛႻꚗꚖꝟꝞcC }
+\tl_const:cn { c__tl_upper_4_8_tl } { øØʈƮδΔὰᾺⓨⓎⱔⰤⴜႼdD }
+\tl_const:cn { c__tl_upper_4_9_tl } { ùÙŝŜȥȤʉɄεΕѽѼӡӠᵽⱣṅṄẩẨάΆⓩⓏⱕⰥⲹⲸⴝႽꝡꝠeE }
+\tl_const:cn { c__tl_upper_5_0_tl } { úÚʊƱζΖὲῈⱖⰦⴞႾfF }
+\tl_const:cn { c__tl_upper_5_1_tl } { ûÛşŞȧȦʋƲηΗѿѾӣӢṇṆẫẪέΈⱗⰧⲻⲺⴟႿꝣꝢgG }
+\tl_const:cn { c__tl_upper_5_2_tl } { üÜʌɅθΘἐἘὴῊⱘⰨⴠჀhH }
+\tl_const:cn { c__tl_upper_5_3_tl } { ýÝšŠDžDŽȩȨιΙҁҀӥӤṉṈậẬἑἙήΉⱙⰩⲽⲼⴡჁꝥꝤiI }
+\tl_const:cn { c__tl_upper_5_4_tl } { þÞdžDŽκΚἒἚὶῚⱚⰪⴢჂjJ }
+\tl_const:cn { c__tl_upper_5_5_tl } { ÿŸţŢȫȪλΛӧӦṋṊắẮἓἛίΊⱛⰫⲿⲾⴣჃꝧꝦkK }
+\tl_const:cn { c__tl_upper_5_6_tl } { LjLJμΜἔἜὸῸⱜⰬⴤჄlL }
+\tl_const:cn { c__tl_upper_5_7_tl } { āĀťŤljLJȭȬνΝөӨṍṌằẰἕἝόΌⱝⰭⳁⳀⴥჅꝩꝨmM }
+\tl_const:cn { c__tl_upper_5_8_tl } { ʒƷξΞὺῪⱞⰮnN }
+\tl_const:cn { c__tl_upper_5_9_tl } { ăĂŧŦNjNJȯȮοΟӫӪṏṎẳẲύΎⳃⳂⴧჇꝫꝪoO }
+\tl_const:cn { c__tl_upper_6_0_tl } { njNJπΠὼῺῠῨⅰⅠpP }
+\tl_const:cn { c__tl_upper_6_1_tl } { ąĄũŨȱȰρΡӭӬṑṐẵẴώΏῡῩⅱⅡⱡⱠⳅⳄꙁꙀꝭꝬqQ }
+\tl_const:cn { c__tl_upper_6_2_tl } { ǎǍςΣⅲⅢrR }
+\tl_const:cn { c__tl_upper_6_3_tl } { ćĆūŪȳȲσΣҋҊӯӮṓṒặẶⅳⅣⳇⳆꙃꙂꝯꝮsS }
+\tl_const:cn { c__tl_upper_6_4_tl } { ǐǏτΤᾀ{ἈΙ}ⅴⅤtT }
+\tl_const:cn { c__tl_upper_6_5_tl } { ĉĈŭŬυΥҍҌӱӰṕṔẹẸᾁ{ἉΙ}ῥῬⅵⅥⱥȺⳉⳈⴭჍꙅꙄuU }
+\tl_const:cn { c__tl_upper_6_6_tl } { ǒǑφΦᾂ{ἊΙ}ⅶⅦⱦȾvV }
+\tl_const:cn { c__tl_upper_6_7_tl } { ċĊůŮχΧҏҎӳӲṗṖẻẺᾃ{ἋΙ}ⅷⅧⳋⳊꙇꙆwW }
+\tl_const:cn { c__tl_upper_6_8_tl } { ǔǓψΨἠἨᾄ{ἌΙ}ⅸⅨⱨⱧxX }
+\tl_const:cn { c__tl_upper_6_9_tl } { čČűŰωΩґҐӵӴṙṘẽẼἡἩᾅ{ἍΙ}ⅹⅩⳍⳌꙉꙈyY }
+\tl_const:cn { c__tl_upper_7_0_tl } { ǖǕϊΪἢἪᾆ{ἎΙ}ⅺⅪⱪⱩzZ }
+\tl_const:cn { c__tl_upper_7_1_tl } { ďĎųŲϋΫғҒӷӶṛṚếẾἣἫᾇ{ἏΙ}ⅻⅫⳏⳎꙋꙊ }
+\tl_const:cn { c__tl_upper_7_2_tl } { ǘǗȼȻόΌаАἤἬⅼⅬⱬⱫ }
+\tl_const:cn { c__tl_upper_7_3_tl } { đĐŵŴύΎбБҕҔӹӸṝṜềỀἥἭⅽⅭⳑⳐꙍꙌ }
+\tl_const:cn { c__tl_upper_7_4_tl } { ǚǙώΏвВἦἮⅾⅮꝺꝹ }
+\tl_const:cn { c__tl_upper_7_5_tl } { ēĒŷŶȿⱾгГҗҖӻӺṟṞểỂἧἯⅿⅯⳓⳒꙏꙎ }
+\tl_const:cn { c__tl_upper_7_6_tl } { ǜǛɀⱿϐΒдДꝼꝻ }
+\tl_const:cn { c__tl_upper_7_7_tl } { ĕĔǝƎϑΘеЕҙҘӽӼաԱṡṠễỄⳕⳔꙑꙐ }
+\tl_const:cn { c__tl_upper_7_8_tl } { źŹɂɁжЖբԲ }
+\tl_const:cn { c__tl_upper_7_9_tl } { ėĖǟǞзЗқҚӿӾգԳṣṢệỆῳ{ΩΙ}ⱳⱲⳗⳖꙓꙒꝿꝾ }
+\tl_const:cn { c__tl_upper_8_0_tl } { żŻиИդԴᾐ{ἨΙ}ↄↃ }
+\tl_const:cn { c__tl_upper_8_1_tl } { µΜęĘǡǠͱͰϕΦйЙҝҜԁԀեԵḁḀṥṤỉỈᾑ{ἩΙ}ⳙⳘꙕꙔꞁꞀ }
+\tl_const:cn { c__tl_upper_8_2_tl } { žŽϖΠкКզԶᾒ{ἪΙ}ⱶⱵ }
+\tl_const:cn { c__tl_upper_8_3_tl } { ěĚſSǣǢɇɆͳͲϗϏлЛҟҞԃԂէԷḃḂṧṦịỊᾓ{ἫΙ}ⳛⳚꙗꙖꞃꞂ }
+\tl_const:cn { c__tl_upper_8_4_tl } { ƀɃмМըԸἰἸᾔ{ἬΙ} }
+\tl_const:cn { c__tl_upper_8_5_tl } { ĝĜǥǤɉɈϙϘнНҡҠԅԄթԹḅḄṩṨọỌἱἹᾕ{ἭΙ}ⳝⳜꙙꙘꞅꞄ }
+\tl_const:cn { c__tl_upper_8_6_tl } { оОժԺἲἺᾖ{ἮΙ} }
+\tl_const:cn { c__tl_upper_8_7_tl } { ğĞƃƂǧǦɋɊͷͶϛϚпПңҢԇԆիԻḇḆṫṪỏỎἳἻᾗ{ἯΙ}ⳟⳞꙛꙚꜣꜢꞇꞆ }
+\tl_const:cn { c__tl_upper_8_8_tl } { рРլԼἴἼ }
+\tl_const:cn { c__tl_upper_8_9_tl } { ġĠƅƄǩǨɍɌϝϜсСҥҤԉԈխԽḉḈṭṬốỐἵἽⳡⳠꙝꙜꜥꜤ }
+\tl_const:cn { c__tl_upper_9_0_tl } { тТծԾἶἾ }
+\tl_const:cn { c__tl_upper_9_1_tl } { ģĢǫǪɏɎͻϽϟϞуУҧҦԋԊկԿḋḊṯṮồỒἷἿⳣⳢꙟꙞꜧꜦ }
+\tl_const:cn { c__tl_upper_9_2_tl } { ƈƇɐⱯͼϾфФհՀꞌꞋ }
+\tl_const:cn { c__tl_upper_9_3_tl } { ĥĤǭǬɑⱭͽϿϡϠхХҩҨԍԌձՁḍḌṱṰổỔⲁⲀꙡꙠꜩꜨ }
+\tl_const:cn { c__tl_upper_9_4_tl } { ɒⱰцЦղՂ }
+\tl_const:cn { c__tl_upper_9_5_tl } { ħĦǯǮɓƁϣϢчЧҫҪԏԎճՃḏḎṳṲỗỖⲃⲂꙣꙢꜫꜪ }
+\tl_const:cn { c__tl_upper_9_6_tl } { ƌƋɔƆшШմՄᾠ{ὨΙ} }
+\tl_const:cn { c__tl_upper_9_7_tl } { aAĩĨϥϤщЩҭҬԑԐյՅḑḐṵṴộỘᾡ{ὩΙ}ⲅⲄꙥꙤꜭꜬꞑꞐ }
+\tl_const:cn { c__tl_upper_9_8_tl } { bBDzDZɖƉъЪնՆᾢ{ὪΙ} }
+\tl_const:cn { c__tl_upper_9_9_tl } { cCīĪdzDZɗƊϧϦыЫүҮԓԒշՇḓḒṷṶớỚᾣ{ὫΙ}ⲇⲆꙧꙦꜯꜮꞓꞒ }
+\tl_const:cn { c__tl_lower_0_0_tl } { ÈèĬĭƐɛǴǵϨϩҰұԔԕḔḕṸṹỜờⒸⓒⰤⱔⲈⲉꙨꙩ }
+\tl_const:cn { c__tl_lower_0_1_tl } { ÉéƑƒჍⴭⒹⓓⰥⱕⳭⳮ }
+\tl_const:cn { c__tl_lower_0_2_tl } { ÊêĮįǶƕΆάϪϫҲҳԖԗḖḗṺṻỞởⒺⓔⰦⱖⲊⲋꙪꙫꜲꜳ }
+\tl_const:cn { c__tl_lower_0_3_tl } { ËëƓɠǷƿⒻⓕⰧⱗ }
+\tl_const:cn { c__tl_lower_0_4_tl } { Ììİ{i̇}ƔɣǸǹΈέϬϭҴҵԘԙḘḙṼṽỠỡᾨᾠⒼⓖⰨⱘⲌⲍꙬꙭꜴꜵ }
+\tl_const:cn { c__tl_lower_0_5_tl } { ÍíΉήᾩᾡⒽⓗⰩⱙ }
+\tl_const:cn { c__tl_lower_0_6_tl } { ÎîIJijƖɩǺǻΊίϮϯҶҷԚԛḚḛṾṿỢợᾪᾢⒾⓘⰪⱚⲎⲏⳲⳳꜶꜷ }
+\tl_const:cn { c__tl_lower_0_7_tl } { ÏïƗɨᾫᾣⒿⓙⰫⱛ }
+\tl_const:cn { c__tl_lower_0_8_tl } { ÐðĴĵƘƙǼǽΌόҸҹԜԝḜḝẀẁỤụὈὀᾬᾤⓀⓚⰬⱜⲐⲑꜸꜹ }
+\tl_const:cn { c__tl_lower_0_9_tl } { ÑñὉὁᾭᾥⓁⓛⰭⱝ }
+\tl_const:cn { c__tl_lower_1_0_tl } { ÒòĶķǾǿΎύҺһԞԟḞḟẂẃỦủὊὂᾮᾦⓂⓜⰮⱞⲒⲓꜺꜻ }
+\tl_const:cn { c__tl_lower_1_1_tl } { ÓóΏώὋὃᾯᾧⓃⓝ }
+\tl_const:cn { c__tl_lower_1_2_tl } { ÔôƜɯȀȁϴθҼҽԠԡḠḡẄẅỨứὌὄⓄⓞⲔⲕꜼꜽꞠꞡ }
+\tl_const:cn { c__tl_lower_1_3_tl } { ÕõĹĺƝɲΑαὍὅⓅⓟAa }
+\tl_const:cn { c__tl_lower_1_4_tl } { ÖöȂȃΒβҾҿԢԣḢḣẆẇỪừⓆⓠⲖⲗꜾꜿꞢꞣBb }
+\tl_const:cn { c__tl_lower_1_5_tl } { ĻļƟɵΓγϷϸⓇⓡCc }
+\tl_const:cn { c__tl_lower_1_6_tl } { ØøƠơȄȅΔδӀӏԤԥḤḥẈẉỬửⓈⓢⲘⲙꝀꝁꞤꞥDd }
+\tl_const:cn { c__tl_lower_1_7_tl } { ÙùĽľΕεϹϲӁӂⓉⓣEe }
+\tl_const:cn { c__tl_lower_1_8_tl } { ÚúƢƣȆȇΖζϺϻԦԧḦḧẊẋỮữⓊⓤⲚⲛꝂꝃꞦꞧFf }
+\tl_const:cn { c__tl_lower_1_9_tl } { ÛûĿŀΗηӃӄⓋⓥGg }
+\tl_const:cn { c__tl_lower_2_0_tl } { ÜüƤƥȈȉΘθѠѡḨḩẌẍỰựᾸᾰⓌⓦⲜⲝꝄꝅꞨꞩHh }
+\tl_const:cn { c__tl_lower_2_1_tl } { ÝýŁłΙιϽͻӅӆᾹᾱⓍⓧIi }
+\tl_const:cn { c__tl_lower_2_2_tl } { ÞþƦʀȊȋΚκϾͼѢѣḪḫẎẏỲỳᾺὰⓎⓨⲞⲟꝆꝇꞪɦJj }
+\tl_const:cn { c__tl_lower_2_3_tl } { ŃńƧƨΛλϿͽӇӈΆάⓏⓩKk }
+\tl_const:cn { c__tl_lower_2_4_tl } { ȌȍΜμЀѐѤѥḬḭẐẑỴỵᾼᾳⲠⲡꚀꚁꝈꝉLl }
+\tl_const:cn { c__tl_lower_2_5_tl } { ŅņƩʃΝνЁёӉӊὙὑMm }
+\tl_const:cn { c__tl_lower_2_6_tl } { ȎȏΞξЂђѦѧḮḯẒẓỶỷⲢⲣꚂꚃꝊꝋNn }
+\tl_const:cn { c__tl_lower_2_7_tl } { ŇňΟοЃѓӋӌὛὓOo }
+\tl_const:cn { c__tl_lower_2_8_tl } { ƬƭȐȑΠπЄєѨѩḰḱẔẕỸỹⲤⲥꚄꚅꝌꝍPp }
+\tl_const:cn { c__tl_lower_2_9_tl } { ΡρЅѕӍӎԱաὝὕQq }
+\tl_const:cn { c__tl_lower_3_0_tl } { ŊŋƮʈȒȓІіѪѫԲբḲḳỺỻⲦⲧꚆꚇꝎꝏRr }
+\tl_const:cn { c__tl_lower_3_1_tl } { ƯưΣσЇїԳգὟὗSs }
+\tl_const:cn { c__tl_lower_3_2_tl } { ŌōȔȕΤτЈјѬѭӐӑԴդḴḵỼỽⲨⲩꚈꚉꝐꝑTt }
+\tl_const:cn { c__tl_lower_3_3_tl } { ƱʊΥυЉљԵեUu }
+\tl_const:cn { c__tl_lower_3_4_tl } { ŎŏƲʋȖȗΦφЊњѮѯӒӓԶզḶḷỾỿⲪⲫꚊꚋꝒꝓVv }
+\tl_const:cn { c__tl_lower_3_5_tl } { ƳƴΧχЋћԷէWw }
+\tl_const:cn { c__tl_lower_3_6_tl } { ŐőȘșΨψЌќѰѱӔӕԸըḸḹῈὲⲬⲭꚌꚍꝔꝕXx }
+\tl_const:cn { c__tl_lower_3_7_tl } { ƵƶΩωЍѝԹթΈέYy }
+\tl_const:cn { c__tl_lower_3_8_tl } { ŒœȚțΪϊЎўѲѳӖӗԺժḺḻẞßῊὴⲮⲯꚎꚏꝖꝗZz }
+\tl_const:cn { c__tl_lower_3_9_tl } { ƷʒΫϋЏџԻիΉή }
+\tl_const:cn { c__tl_lower_4_0_tl } { ŔŕƸƹȜȝАаѴѵӘәԼլḼḽẠạὨὠῌῃⲰⲱꚐꚑꝘꝙ }
+\tl_const:cn { c__tl_lower_4_1_tl } { БбԽխὩὡ }
+\tl_const:cn { c__tl_lower_4_2_tl } { ŖŗȞȟВвѶѷӚӛԾծḾḿẢảὪὢⲲⲳꚒꚓꝚꝛ }
+\tl_const:cn { c__tl_lower_4_3_tl } { ГгԿկὫὣ }
+\tl_const:cn { c__tl_lower_4_4_tl } { ŘřƼƽȠƞДдѸѹӜӝՀհṀṁẤấἈἀὬὤⅠⅰⲴⲵꚔꚕꝜꝝ }
+\tl_const:cn { c__tl_lower_4_5_tl } { ЕеՁձἉἁὭὥⅡⅱ }
+\tl_const:cn { c__tl_lower_4_6_tl } { ŚśȢȣЖжѺѻӞӟՂղṂṃẦầἊἂὮὦⅢⅲⲶⲷꚖꚗꝞꝟ }
+\tl_const:cn { c__tl_lower_4_7_tl } { ЗзՃճἋἃὯὧⅣⅳ }
+\tl_const:cn { c__tl_lower_4_8_tl } { ŜŝȤȥИиѼѽӠӡՄմṄṅẨẩἌἄⅤⅴⲸⲹꝠꝡ }
+\tl_const:cn { c__tl_lower_4_9_tl } { ЙйՅյἍἅⅥⅵ }
+\tl_const:cn { c__tl_lower_5_0_tl } { ŞşȦȧКкѾѿӢӣՆնṆṇẪẫἎἆⅦⅶⲺⲻꝢꝣ }
+\tl_const:cn { c__tl_lower_5_1_tl } { ЛлՇշἏἇⅧⅷ }
+\tl_const:cn { c__tl_lower_5_2_tl } { ŠšDŽdžȨȩМмҀҁӤӥՈոṈṉẬậῘῐⅨⅸⲼⲽꝤꝥ }
+\tl_const:cn { c__tl_lower_5_3_tl } { DždžНнՉչῙῑⅩⅹ }
+\tl_const:cn { c__tl_lower_5_4_tl } { ŢţȪȫОоӦӧՊպṊṋẮắῚὶⅪⅺⲾⲿꝦꝧ }
+\tl_const:cn { c__tl_lower_5_5_tl } { LJljПпՋջΊίⅫⅻ }
+\tl_const:cn { c__tl_lower_5_6_tl } { ĀāŤťLjljȬȭРрӨөՌռႠⴀṌṍẰằⅬⅼⳀⳁꝨꝩ }
+\tl_const:cn { c__tl_lower_5_7_tl } { СсՍսႡⴁⅭⅽ }
+\tl_const:cn { c__tl_lower_5_8_tl } { ĂăŦŧNJnjȮȯТтӪӫՎվႢⴂṎṏẲẳⅮⅾⳂⳃꝪꝫ }
+\tl_const:cn { c__tl_lower_5_9_tl } { NjnjУуՏտႣⴃⅯⅿ }
+\tl_const:cn { c__tl_lower_6_0_tl } { ĄąŨũȰȱФфӬӭՐրႤⴄṐṑẴẵἘἐⱠⱡⳄⳅꙀꙁꝬꝭ𐐀𐐨 }
+\tl_const:cn { c__tl_lower_6_1_tl } { ǍǎХхՑցႥⴅἙἑ𐐁𐐩 }
+\tl_const:cn { c__tl_lower_6_2_tl } { ĆćŪūȲȳЦцҊҋӮӯՒւႦⴆṒṓẶặἚἒⱢɫⳆⳇꙂꙃꝮꝯ𐐂𐐪 }
+\tl_const:cn { c__tl_lower_6_3_tl } { ǏǐЧчՓփႧⴇἛἓⱣᵽ𐐃𐐫 }
+\tl_const:cn { c__tl_lower_6_4_tl } { ĈĉŬŭШшҌҍӰӱՔքႨⴈṔṕẸẹἜἔⰀⰰⱤɽⳈⳉꙄꙅ𐐄𐐬 }
+\tl_const:cn { c__tl_lower_6_5_tl } { AaǑǒЩщՕօႩⴉἝἕⰁⰱ𐐅𐐭 }
+\tl_const:cn { c__tl_lower_6_6_tl } { BbĊċŮůЪъҎҏӲӳՖֆႪⴊṖṗẺẻⰂⰲⳊⳋꙆꙇ𐐆𐐮 }
+\tl_const:cn { c__tl_lower_6_7_tl } { CcǓǔЫыႫⴋⰃⰳⱧⱨ𐐇𐐯 }
+\tl_const:cn { c__tl_lower_6_8_tl } { DdČčŰűЬьҐґӴӵႬⴌṘṙẼẽῨῠⰄⰴⳌⳍꙈꙉ𐐈𐐰 }
+\tl_const:cn { c__tl_lower_6_9_tl } { EeǕǖЭэႭⴍῩῡⰅⰵⱩⱪ𐐉𐐱 }
+\tl_const:cn { c__tl_lower_7_0_tl } { FfĎďŲųȺⱥЮюҒғӶӷႮⴎṚṛẾếῪὺⰆⰶⳎⳏꙊꙋ𐐊𐐲 }
+\tl_const:cn { c__tl_lower_7_1_tl } { GgǗǘȻȼЯяႯⴏΎύⰇⰷⱫⱬ𐐋𐐳 }
+\tl_const:cn { c__tl_lower_7_2_tl } { HhĐđŴŵҔҕӸӹႰⴐṜṝỀềᾈᾀῬῥⰈⰸⳐⳑꙌꙍ𐐌𐐴 }
+\tl_const:cn { c__tl_lower_7_3_tl } { IiǙǚȽƚႱⴑᾉᾁⰉⰹⱭɑꝹꝺ𐐍𐐵 }
+\tl_const:cn { c__tl_lower_7_4_tl } { JjĒēŶŷȾⱦҖҗӺӻႲⴒṞṟỂểᾊᾂⰊⰺⱮɱⳒⳓꙎꙏ𐐎𐐶 }
+\tl_const:cn { c__tl_lower_7_5_tl } { KkǛǜϏϗႳⴓᾋᾃⰋⰻⱯɐꝻꝼ𐐏𐐷 }
+\tl_const:cn { c__tl_lower_7_6_tl } { LlĔĕŸÿҘҙӼӽႴⴔṠṡỄễἨἠᾌᾄⰌⰼⱰɒⳔⳕꙐꙑ𐐐𐐸 }
+\tl_const:cn { c__tl_lower_7_7_tl } { MmŹźɁɂႵⴕἩἡᾍᾅⰍⰽꝽᵹ𐐑𐐹 }
+\tl_const:cn { c__tl_lower_7_8_tl } { NnĖėǞǟҚқӾӿႶⴖṢṣỆệἪἢᾎᾆⰎⰾⱲⱳⳖⳗꙒꙓꝾꝿ𐐒𐐺 }
+\tl_const:cn { c__tl_lower_7_9_tl } { OoŻżɃƀႷⴗἫἣᾏᾇↃↄⰏⰿ𐐓𐐻 }
+\tl_const:cn { c__tl_lower_8_0_tl } { PpĘęǠǡɄʉͰͱҜҝԀԁႸⴘḀḁṤṥỈỉἬἤⰐⱀⳘⳙꙔꙕꞀꞁ𐐔𐐼 }
+\tl_const:cn { c__tl_lower_8_1_tl } { QqŽžɅʌႹⴙἭἥⰑⱁⱵⱶ𐐕𐐽 }
+\tl_const:cn { c__tl_lower_8_2_tl } { RrĚěǢǣɆɇͲͳҞҟԂԃႺⴚḂḃṦṧỊịἮἦⰒⱂⳚⳛꙖꙗꞂꞃ𐐖𐐾 }
+\tl_const:cn { c__tl_lower_8_3_tl } { SsႻⴛἯἧⰓⱃ𐐗𐐿 }
+\tl_const:cn { c__tl_lower_8_4_tl } { TtĜĝǤǥɈɉϘϙҠҡԄԅႼⴜḄḅṨṩỌọῸὸⰔⱄⳜⳝꙘꙙꞄꞅ𐐘𐑀 }
+\tl_const:cn { c__tl_lower_8_5_tl } { UuƁɓႽⴝΌόⰕⱅ𐐙𐑁 }
+\tl_const:cn { c__tl_lower_8_6_tl } { VvĞğƂƃǦǧɊɋͶͷϚϛҢңԆԇႾⴞḆḇṪṫỎỏῺὼΩωⰖⱆⳞⳟꙚꙛꜢꜣꞆꞇ𐐚𐑂 }
+\tl_const:cn { c__tl_lower_8_7_tl } { WwႿⴟΏώⰗⱇ𐐛𐑃 }
+\tl_const:cn { c__tl_lower_8_8_tl } { XxĠġƄƅǨǩɌɍϜϝҤҥԈԉჀⴠḈḉṬṭỐốᾘᾐῼῳⰘⱈⳠⳡꙜꙝꜤꜥ𐐜𐑄 }
+\tl_const:cn { c__tl_lower_8_9_tl } { YyჁⴡᾙᾑⰙⱉ𐐝𐑅 }
+\tl_const:cn { c__tl_lower_9_0_tl } { ZzĢģƆɔǪǫɎɏϞϟҦҧԊԋჂⴢḊḋṮṯỒồᾚᾒKkⰚⱊⱾȿⳢⳣꙞꙟꜦꜧ𐐞𐑆 }
+\tl_const:cn { c__tl_lower_9_1_tl } { ƇƈჃⴣᾛᾓÅåⰛⱋⱿɀꞋꞌ𐐟𐑇 }
+\tl_const:cn { c__tl_lower_9_2_tl } { ÀàĤĥǬǭϠϡҨҩԌԍჄⴤḌḍṰṱỔổἸἰᾜᾔⰜⱌⲀⲁꙠꙡꜨꜩ𐐠𐑈 }
+\tl_const:cn { c__tl_lower_9_3_tl } { ÁáƉɖჅⴥἹἱᾝᾕⰝⱍꞍɥ𐐡𐑉 }
+\tl_const:cn { c__tl_lower_9_4_tl } { ÂâĦħƊɗǮǯϢϣҪҫԎԏḎḏṲṳỖỗἺἲᾞᾖⰞⱎⲂⲃꙢꙣꜪꜫ𐐢𐑊 }
+\tl_const:cn { c__tl_lower_9_5_tl } { ÃãƋƌჇⴧἻἳᾟᾗⰟⱏ𐐣𐑋 }
+\tl_const:cn { c__tl_lower_9_6_tl } { ÄäĨĩϤϥҬҭԐԑḐḑṴṵỘộἼἴⰠⱐⲄⲅꙤꙥꜬꜭꞐꞑ𐐤𐑌 }
+\tl_const:cn { c__tl_lower_9_7_tl } { ÅåDZdzἽἵⰡⱑ𐐥𐑍 }
+\tl_const:cn { c__tl_lower_9_8_tl } { ÆæĪīƎǝDzdzϦϧҮүԒԓḒḓṶṷỚớἾἶℲⅎⒶⓐⰢⱒⲆⲇꙦꙧꜮꜯꞒꞓ𐐦𐑎 }
+\tl_const:cn { c__tl_lower_9_9_tl } { ÇçƏəἿἷⒷⓑⰣⱓⳫⳬ𐐧𐑏 }
+\tl_const:Nn \c__tl_mixed_exceptions_tl { ß{Ss}ff{Ff}fi{Fi}fl{Fl}ffi{Ffi}ffl{Ffl}ſt{St}st{St}և{Եւ}ﬓ{Մն}ﬔ{Մե}ﬕ{Մի}ﬖ{Վն}ﬗ{Մխ}ᾲ{Ὰͅ}ᾴ{Άͅ}ῂ{Ὴͅ}ῄ{Ήͅ}ῲ{Ὼͅ}ῴ{Ώͅ}ᾷ{ᾼ͂}ῇ{ῌ͂}ῷ{ῼ͂}DžDždžDžLjLjljLjNjNjnjNjDzDzdzDz }
+\tl_const:Nn \c__tl_std_sigma_tl {σ}
+\tl_const:Nn \c__tl_final_sigma_tl {ς}
+\tl_const:Nn \c__tl_dotless_i_tl {ı}
+\tl_const:Nn \c__tl_dot_above_tl {̇}
+\tl_const:Nn \c__tl_dotted_I_tl {İ}
+\tl_const:Nn \c__tl_accents_lt_tl {Ì{i̇̀}Í{i̇́}Ĩ{i̇̃}}
diff --git a/Master/texmf-dist/source/latex/l3kernel/l3unicode-data.dtx b/Master/texmf-dist/source/latex/l3kernel/l3unicode-data.dtx
new file mode 100644
index 00000000000..8d24cae8f46
--- /dev/null
+++ b/Master/texmf-dist/source/latex/l3kernel/l3unicode-data.dtx
@@ -0,0 +1,768 @@
+% \iffalse meta-comment
+%
+%% File: l3unicode-data.dtx Copyright(C) 2014 The LaTeX3 Project
+%%
+%% It may be distributed and/or modified under the conditions of the
+%% LaTeX Project Public License (LPPL), either version 1.3c of this
+%% license or (at your option) any later version. The latest version
+%% of this license is in the file
+%%
+%% http://www.latex-project.org/lppl.txt
+%%
+%% This file is part of the "l3kernel bundle" (The Work in LPPL)
+%% and all files in that bundle must be distributed together.
+%%
+%% The released version of this bundle is available from CTAN.
+%%
+%% -----------------------------------------------------------------------
+%%
+%% The development version of the bundle can be found at
+%%
+%% http://www.latex-project.org/svnroot/experimental/trunk/
+%%
+%% for those people who are interested.
+%%
+%%%%%%%%%%%
+%% NOTE: %%
+%%%%%%%%%%%
+%%
+%% Snapshots taken from the repository represent work in progress and may
+%% not work or may contain conflicting material! We therefore ask
+%% people _not_ to put them into distributions, archives, etc. without
+%% prior consultation with the LaTeX Project Team.
+%%
+%% -----------------------------------------------------------------------
+%%
+%
+% Both the driver and the script need \pkg{expl3}: as the script runs with
+% plain \TeX{}, set up in generic mode.
+%<*driver|script>
+\input expl3-generic\relax
+\GetIdInfo$Id: l3unicode-data.dtx 5166 2014-06-30 07:48:12Z joseph $
+ {L3 Case data script}
+%</driver|script>
+%
+% The same approach as used in \pkg{DocStrip}: if \cs{documentclass}
+% is undefined then skip the driver, allowing the file to be used directly.
+% This works as the \cs{fi} is only seen if \LaTeX{} is not in use. The odd
+% \cs{jobname} business allows the extraction to work with \LaTeX{} provided
+% an appropriate \texttt{.ins} file is set up.
+%<*gobble>
+\ifx\jobname\relax
+ \let\documentclass\undefined
+\fi
+\begingroup\expandafter\expandafter\expandafter\endgroup
+\expandafter\ifx\csname documentclass\endcsname\relax
+\else
+ \csname fi\endcsname
+%</gobble>
+%
+%<*driver>
+ \documentclass[full]{l3doc}
+ \begin{document}
+ \DocInput{\jobname.dtx}
+ \end{document}
+%<*gobble>
+\fi
+%</gobble>
+%</driver>
+% \fi
+%
+% \title{^^A
+% The \textsf{l3unicode-data} script\\Unicode data script^^A
+% \thanks{This file describes v\ExplFileVersion,
+% last revised \ExplFileDate.}^^A
+% }
+%
+% \author{^^A
+% The \LaTeX3 Project\thanks
+% {^^A
+% E-mail:
+% \href{mailto:latex-team@latex-project.org}
+% {latex-team@latex-project.org}^^A
+% }^^A
+% }
+%
+% \date{Released \ExplFileDate}
+%
+% \maketitle
+%
+% \begin{documentation}
+%
+% The Unicode Consortium provide comprehensive data on the standard
+% mapping of characters (or more formally codepoints) when carrying
+% out various different case-changing functions:
+% \begin{itemize}
+% \item Uppercasing
+% \item Lowercasing
+% \item Titlecasing (used for the first codepoint of a word:
+% may be subtly different to uppercasing)
+% \item Folding (removing case for comparison purposes: close
+% but not identical to lowercasing)
+% \end{itemize}
+% This data is available in machine readable format, such that many of
+% the basics of case changing can be set up on an automated basis.
+%
+% This file provides a script which will read the raw Unicode files
+% and convert the material to a form which can be used by \pkg{expl3}.
+% As the conversions here cover the entire UTF-8 range, this cannot
+% be carried out by pdf\TeX{}: at present, the script works only
+% with Lua\TeX{}.
+%
+% Note that this file is designed such that running \LaTeX{} will typeset
+% the documentation using any engine: the script will be run if the file
+% is processed by plain \TeX{}, specifically the |luatex| command.
+% This process requires the files |CaseFolding.txt|, |SpecialCasing.txt|
+% and |UnicodeData.txt| from the \href{http://www.unicode.org/}^^A
+% {Unicode Consortium website}.
+%
+% The file produced by this script, |l3unicode-data.def|, contains
+% appropriate definitions for all of the data structures used by \pkg{expl3}
+% for Unicode transformations. It also provides appropriate alternative
+% definitions for use with \pdfTeX{}.
+%
+% \end{documentation}
+%
+% \begin{implementation}
+%
+% \section{\pkg{l3unicode-data} Implementation}
+%
+% \begin{macrocode}
+%<*script>
+% \end{macrocode}
+%
+% The driver part has loaded \pkg{expl3}: turn on the syntax environment.
+% \begin{macrocode}
+\ExplSyntaxOn
+% \end{macrocode}
+%
+% \subsection{Setup}
+%
+% \begin{macro}{\str_case_x:nvF}
+% One handy variant.
+% \begin{macrocode}
+\cs_generate_variant:Nn \str_case_x:nnF { nv }
+% \end{macrocode}
+% \end{macro}
+%
+% The first step is to generate a series of temporary variables to
+% contain the data as it's extracted. This requires a nested loop
+% to give a total of $100$ token lists. Two sets are generated for
+% use in the upper/lower case part of the script.
+% \begin{macrocode}
+\tl_map_inline:nn { 0123456789 }
+ {
+ \tl_map_inline:nn { 0123456789 }
+ {
+ \tl_new:c { l__unicode_a_ #1 _ ##1 _tl }
+ \tl_new:c { l__unicode_b_ #1 _ ##1 _tl }
+ }
+ }
+% \end{macrocode}
+%
+% \begin{variable}{\g__unicode_data_ior}
+% \begin{variable}{\g__unicode_result_iow}
+% Streams for reading and writing the data.
+% \begin{macrocode}
+\ior_new:N \g__unicode_data_ior
+\iow_new:N \g__unicode_result_iow
+% \end{macrocode}
+% \end{variable}
+% \end{variable}
+%
+% Open the data file for writing.
+% \begin{macrocode}
+\iow_open:Nn \g__unicode_result_iow { l3unicode-data.def }
+% \end{macrocode}
+%
+% Write an identification line to the file: the file data here can't be set
+% automatically and so will need to be edited by hand. As such, the data here
+% the standard SVN filler.
+% \begin{macrocode}
+\iow_now:Nx \g__unicode_result_iow
+ {
+ \exp_not:N \ProvidesExplFile
+ { l3unicode-data.def } ~ { 0000/00/00 } ~ { -1 } ~ { L3~Unicode~data }
+ }
+% \end{macrocode}
+%
+% \subsection{Verbatim copying}
+%
+% \begin{macro}[int]{\__unicode_verb:}
+% \begin{macro}[aux]{\__unicode_verb_auxi:w, \__unicode_verb_auxii:w}
+% \begin{macro}[int]{\__unicode_verb_end:}
+% There are various bits of code which need to be transferred into the data
+% file from the source. This has to take place as part of the general writing
+% process so needs to be done without using DocStrip. That is achieved by
+% having a verbatim-copy mechanism available: this is all set up here.
+% As the line containing the \cs{__unicode_verb:} function will end up with a
+% (category code $12$) space at the start, there is a dedicated function to
+% clear this part up.
+% \begin{macrocode}
+\group_begin:
+ \char_set_catcode_other:n { `\^^M }%
+ \cs_new_protected:Npn \__unicode_verb:%
+ {%
+ \group_begin:%
+ \char_set_catcode_other:n { `\^^M }%
+ \tex_endlinechar:D = `\^^M%
+ \clist_map_inline:nn%
+ { \\ , \{ , \} , \# , \^ , \% , \ }%
+ { \char_set_catcode_other:n { `##1 } }%
+ \__unicode_verb_auxi:w%
+ }%
+ \cs_new_protected:Npn \__unicode_verb_auxi:w#1^^M%
+ {%
+ \exp_after:wN \__unicode_verb_auxii:w \use_none:n #1 ^^M
+ }%
+ \cs_new_protected:Npn \__unicode_verb_auxii:w#1^^M%
+ {%
+ \str_if_eq_x:nnTF {#1} { \token_to_str:N \__unicode_verb_end: }%
+ { \group_end: }%
+ {%
+ \iow_now:Nn \g__unicode_result_iow {#1}%
+ \__unicode_verb_auxii:w%
+ }%
+ }%
+\group_end:%
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+%
+% \subsection{Shared data}
+%
+% There are some data items which can be stored as numbers rather than as
+% literal UTF-8 chars. These could go into the main source files, but as they
+% conceptually go with everything else here this makes more sense. They are
+% safe for use with \pdfTeX{} so are given first.
+% \begin{macrocode}
+\__unicode_verb:
+\clist_const:Nn \c__tl_after_final_sigma_clist
+ { 0021 , 0022 , 0029 , 002C , 002E , 003A , 003B , 003F , 005D , 007D }
+\clist_const:Nn \c__tl_mixed_skip_clist
+ { 0028 , 005B , 0060 , 007B }
+\__unicode_verb_end:
+% \end{macrocode}
+%
+% \subsection{\pdfTeX{} support}
+%
+% As \pdfTeX{} does not support UTF-8 input natively, most of the data
+% here will not be useful. Rather than use two separate mechanisms for
+% each function depending on the engine, the system is designed such that
+% \enquote{truncated} data structures are provided for \pdfTeX{}. These
+% are coded here for direct transfer to the |.def| file, which can then
+% abort loading when \pdfTeX{} is in use.
+%
+% The idea here is simple: map over all of the letters of the Latin
+% alphabet and create appropriate token lists, then add all of the rest
+% of the data structures. For case folding, the tokens are all stored as
+% strings. For the lower case letters, to ensure there are always three
+% digits a bit of maths is used.
+%
+% After the mapping, the small number of fixed data structures that are
+% used for the special case conversions are created. These are mainly empty,
+% but for cases where a match is possible (as the test char is in the \pdfTeX{}
+% range), no-op data is included (as the \emph{output} would be out-of-range).
+% \begin{macrocode}
+\__unicode_verb:
+\pdftex_if_engine:T
+ {
+ \group_begin:
+ \cs_set_protected:Npn \__unicode_tmp:NN #1#2
+ {
+ \quark_if_recursion_tail_stop:N #1
+ \exp_after:wN \__unicode_tmp:NNNNNNN
+ \tex_number:D \__int_eval:w `#1 \exp_after:wN \__int_eval_end:
+ \tex_number:D \__int_eval:w 100 + `#2 \__int_eval_end:
+ #1 #2
+ \__unicode_tmp:NN
+ }
+ \cs_set_protected:Npn \__unicode_tmp:NNNNNNN #1#2#3#4#5#6#7
+ {
+ \tl_const:cx { c__str_fold_ #1 _ #2 _ tl }
+ { \tl_to_str:n { #6#7 } }
+ \tl_const:cn { c__tl_lower_ #1 _ #2 _ tl } { #6#7 }
+ \tl_const:cn { c__tl_upper_ #4 _ #5 _ tl } { #7#6 }
+ }
+ \__unicode_tmp:NN
+ AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZz
+ \q_recursion_tail ? \q_recursion_stop
+ \group_end:
+ \int_step_inline:nnnn { 0 } { 1 } { 9 }
+ {
+ \int_step_inline:nnnn { 0 } { 1 } { 9 }
+ {
+ \tl_if_exist:cF { c__str_fold_ #1 _ ##1 _ tl }
+ {
+ \tl_const:cn { c__str_fold_ #1 _ ##1 _ tl } { }
+ }
+ \tl_if_exist:cF { c__tl_lower_ #1 _ ##1 _ tl }
+ {
+ \tl_const:cn { c__tl_lower_ #1 _ ##1 _ tl } { }
+ }
+ \tl_if_exist:cF { c__tl_upper_ #1 _ ##1 _ tl }
+ {
+ \tl_const:cn { c__tl_upper_ #1 _ ##1 _ tl } { }
+ }
+ }
+ }
+ \tl_const:Nn \c__tl_mixed_exceptions_tl { }
+ \tl_const:Nn \c__tl_std_sigma_tl { }
+ \tl_const:Nn \c__tl_final_sigma_tl { }
+ \tl_const:Nn \c__tl_accents_lt_tl { }
+ \tl_const:Nn \c__tl_dot_above_tl { }
+ \tl_const:Nn \c__tl_dotless_i_tl { I }
+ \tl_const:Nn \c__tl_dotted_I_tl { i }
+ \tex_endinput:D
+ }
+\__unicode_verb_end:
+% \end{macrocode}
+%
+% \subsection{Case folding}
+%
+% \begin{macro}{\__unicode_parse_line:w}
+% \begin{macro}[aux]{\__unicode_parse_line_auxi:Nw}
+% \begin{macro}[aux]{\__unicode_parse_line_auxii:w}
+% \begin{macro}[aux]{\__unicode_parse_line_auxiii:nw}
+% \begin{macro}[aux]{\__unicode_parse_line_auxiv:nn}
+% \begin{macro}[aux]{\__unicode_parse_line_auxv:wnn}
+% The format of |CaseFolding.txt| allows for both blank lines and
+% C-style comments starting with |#|. Thus the first two steps of
+% the parsing routine are set up to deal with these cases.
+% \begin{macrocode}
+\cs_new_protected:Npn \__unicode_parse_line:w #1 \q_stop
+ {
+ \tl_if_blank:nF {#1}
+ { \__unicode_parse_line_auxi:Nw #1 \q_stop }
+ }
+\cs_new_protected:Npn \__unicode_parse_line_auxi:Nw #1#2 \q_stop
+ {
+ \str_if_eq_x:nnF { \exp_not:n {#1} } { \cs_to_str:N \# }
+ { \__unicode_parse_line_auxii:w #1#2 \q_stop }
+ }
+% \end{macrocode}
+% For lines actually containing data, there will be four entries separated by
+% |;| tokens: the hex code for the char itself, which folding regim\'{e}s
+% the line applies to, the hex code(s) for the folded char and a
+% description. Of these, we need all but the last one. In the simple
+% case of core foldings, the mapping is one--one and this information
+% can be passed directly to the next stage. We also handle the full
+% mappings (dropping simple ones plus any Turkic variation): an additional
+% step is needed to parse this case.
+% \begin{macrocode}
+\cs_new_protected:Npn \__unicode_parse_line_auxii:w #1 ;~ #2 ; #3 ; #4 \q_stop
+ {
+ \str_if_eq:nnTF {#2} { C }
+ {
+ \__unicode_parse_line_auxiv:nn
+ {#1} { \luatex_Uchar:D "#3 \c_space_tl }
+ }
+ {
+ \str_if_eq:nnT {#2} { F }
+ { \__unicode_parse_line_auxiii:nw {#1} #3 ~ \q_stop }
+ }
+ }
+% \end{macrocode}
+% Full folding produces two or three Unicode code points from a single
+% input char. To deal with this, we split the relevant part of the input
+% and check how many chars to generate. The entire folding output is
+% braced so that when read back \TeX{} will see this as a group in our
+% replacement code.
+% \begin{macrocode}
+\cs_new_protected:Npn \__unicode_parse_line_auxiii:nw #1 ~ #2 ~ #3 ~ #4 \q_stop
+ {
+ \__unicode_parse_line_auxiv:nn
+ {#1}
+ {
+ {
+ \luatex_Uchar:D "#2 \c_space_tl
+ \luatex_Uchar:D "#3 \c_space_tl
+ \tl_if_empty:nF {#4}
+ { \luatex_Uchar:D "#4 \c_space_tl }
+ }
+ }
+ }
+% \end{macrocode}
+% The final stage of extracting the mapping is to split the various cases
+% up such that comparison and replacement does not need to check every
+% character. That is done by taking the charcode modulo $100$: this splits
+% the list of chars into $100$ much shorter lists. With that done, the
+% input and output chars are added to the appropriate token lists.
+% \begin{macrocode}
+\cs_new_protected:Npn \__unicode_parse_line_auxiv:nn #1#2
+ {
+ \exp_last_unbraced:Nf \__unicode_parse_line_auxv:wnn
+ { \int_eval:n { 1000000 + "#1 } } \q_stop
+ {#1} {#2}
+ }
+% \end{macrocode}
+% As the input is read in string mode, there is a need for a rescan
+% here since \tn{Uchar} requires letters for hexadecimal digits
+% beyond~$9$.
+% \begin{macrocode}
+\cs_new_protected:Npn \__unicode_parse_line_auxv:wnn
+ #1#2#3#4#5#6#7 \q_stop #8#9
+ {
+ \tl_rescan:nn
+ { }
+ {
+ \tl_put_right:cx { l__unicode_a_ #6 _ #7 _tl }
+ {
+ \luatex_Uchar:D "#8 \c_space_tl
+ #9
+ }
+ }
+ }
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+%
+% The main loop can now take place, reading the source data and saving all of
+% the information in the token list array.
+% \begin{macrocode}
+\ior_open:Nn \g__unicode_data_ior { CaseFolding.txt }
+\ior_str_map_inline:Nn \g__unicode_data_ior
+ { \__unicode_parse_line:w #1 \q_stop }
+\ior_close:N \g__unicode_data_ior
+% \end{macrocode}
+%
+% \begin{macro}[aux]{\__str_tmp:NNn}
+% \begin{macro}[aux, EXP]{\__str_tmp:Nw}
+% To ensure that the output of the case-folding function is a string, all of
+% the stored results need to be detokenized. That is done by including a loop
+% in the |.def| file which will do the necessary change. To set that up, a
+% slightly complicated bit of secondary work: write the functions which do
+% the job into the |.def| file itself, using a group to trap the temporary
+% code. There is also a test in the following so that the result only has
+% braces around items which need it: this is a slight performance tweak when
+% the code actually gets used. Notice that everything in the token list is
+% detokenized except for the |{| and |}| chars needed for grouping: if the
+% search part of the list is not detokenized there are issues with \XeTeX{}
+% and chars beyond $0\mathrm{xFFFF}$ (probably a bug, but can be worked
+% around!).
+% \begin{macrocode}
+\__unicode_verb:
+\group_begin:
+ \cs_set_protected:Npn \__str_tmp:NNn #1#2#3
+ {
+ \tl_const:cx { c__str_fold_#1_#2_tl }
+ { \__str_tmp:Nw #3 \q_recursion_tail { } \q_recursion_stop }
+ }
+ \cs_set:Npn \__str_tmp:Nw #1#2
+ {
+ \quark_if_recursion_tail_stop:N #1
+ \tl_to_str:N #1
+ \tl_if_blank:oT { \use_none:n #2 }
+ { \use:n }
+ { \tl_to_str:n {#2} }
+ \__str_tmp:Nw
+ }
+\__unicode_verb_end:
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+%
+% The write loop is simple: map over the array and write everything to the
+% output. The saved data is also cleared to save a second loop later on when
+% dealing with case mappings. The group used for the temporary stuff in the
+% |.def| file is also closed at this point.
+% \begin{macrocode}
+\tl_map_inline:nn { 0123456789 }
+ {
+ \tl_map_inline:nn { 0123456789 }
+ {
+ \iow_now:Nx \g__unicode_result_iow
+ {
+ \c_space_tl \c_space_tl
+ \exp_not:N \__str_tmp:NNn #1 ~ ##1 ~
+ { ~ \exp_not:v { l__unicode_a_ #1 _ ##1 _tl } ~ }
+ }
+ \tl_clear:c { l__unicode_a_ #1 _ ##1 _tl }
+ }
+ }
+\iow_now:Nn \g__unicode_result_iow { \group_end: }
+% \end{macrocode}
+%
+% \subsection{Upper/lower/title casing}
+%
+% Unlike the case folding data, case changing data is split into two parts
+% which we need to combine into a single data structure. There are therefore
+% two parts to this process: first to read the exceptions, then to read the
+% main data and combine it.
+%
+% \begin{macro}^^A
+% {
+% \l__unicode_lower_exceptions_tl,
+% \l__unicode_title_exceptions_tl,
+% \l__unicode_upper_exceptions_tl
+% }
+% There are special cases for lower, title and uppercase changes: these
+% all get read in to appropriate lists. Exceptions could be saved as
+% property lists but that would make life a bit more complex with the
+% titlecase exceptions and wouldn't really gain much (this is after all
+% \enquote{disposable} data). Note that for our purposes, what Unicode call
+% title case is stored in the output as `mixed' case.
+% \begin{macrocode}
+\tl_new:N \l__unicode_lower_exceptions_tl
+\tl_new:N \l__unicode_title_exceptions_tl
+\tl_new:N \l__unicode_upper_exceptions_tl
+% \end{macrocode}
+% \end{macro}
+%
+% \begin{macro}[aux]{\__unicode_parse_line_auxii:w}
+% \begin{macro}[aux]{\__unicode_parse_line_auxiii:nnn}
+% \begin{macro}[aux]{\__unicode_parse_line_auxiv:nwn}
+% \begin{macro}[aux]{\__unicode_brace:n}
+% The format of the special cases data is similar to that of the folding
+% data: as such only some of the parsing is altered. This file has four
+% important data fields: the char at it's lower, title and uppercase
+% equivalents. As most of the titlecase exceptions are also uppercase
+% exceptions, a test is made so that we are only storing truly useful
+% exceptions for titlecase.
+% \begin{macrocode}
+\cs_set_protected:Npn \__unicode_parse_line_auxii:w
+ #1 ;~ #2 ;~ #3 ;~ #4 ; #5 \q_stop
+ {
+ \__unicode_parse_line_auxiii:nnn {#1} {#2} { lower }
+ \str_if_eq:nnF {#3} {#4}
+ { \__unicode_parse_line_auxiii:nnn {#1} {#3} { title } }
+ \__unicode_parse_line_auxiii:nnn {#1} {#4} { upper }
+ }
+% \end{macrocode}
+% Unlike the folding data, the special cases file always has a value for
+% each of the three entries. Some of these have only one hex number in.
+% After a bit of a trick to allow for ease of parsing, we check if there
+% are at least two numbers for the case-changed char. If there are, then
+% save the exception. If not, then the value will also be in the main
+% table and we can ignore it here. There is also a test to see if the
+% current value is a titlecase exception: they don't need extra braces
+% for those.
+% \begin{macrocode}
+\cs_new_protected:Npn \__unicode_parse_line_auxiii:nnn #1#2#3
+ { \use:n { \__unicode_parse_line_auxiv:nwn {#1} #2 ~ } ~ \q_stop {#3} }
+\cs_new_protected:Npn \__unicode_parse_line_auxiv:nwn #1#2 ~ #3 ~ #4 \q_stop #5
+ {
+ \tl_if_empty:nF {#3}
+ {
+ \str_if_eq:nnTF {#5} { title }
+ { \cs_set_eq:NN \__unicode_brace:n \use:n }
+ { \cs_set:Npn \__unicode_brace:n ##1 { { ##1 } } }
+ \tl_rescan:nn
+ { }
+ {
+ \tl_put_right:cx { l__unicode_ #5 _exceptions_tl }
+ {
+ \luatex_Uchar:D "#1 \c_space_tl
+ {
+ \__unicode_brace:n
+ {
+ \luatex_Uchar:D "#2 \c_space_tl
+ \luatex_Uchar:D "#3 \c_space_tl
+ \tl_if_empty:nF {#4}
+ { \luatex_Uchar:D "#4 \c_space_tl }
+ }
+ }
+ }
+ }
+ }
+ }
+\cs_new_eq:NN \__unicode_brace:n \use:n
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+%
+% Parsing set up, read the special cases file. The input contains both
+% general special cases and ones dependent on context. We only want to read
+% the former, so there is a check for the line that splits the two:
+% at that point, simply stop parsing.
+% \begin{macrocode}
+\ior_open:Nn \g__unicode_data_ior { SpecialCasing.txt }
+\ior_str_map_inline:Nn \g__unicode_data_ior
+ {
+ \str_if_eq_x:nnTF {#1} { \cs_to_str:N \# \c_space_tl Conditional~Mappings }
+ { \ior_map_break: }
+ { \__unicode_parse_line:w #1 \q_stop }
+ }
+\ior_close:N \g__unicode_data_ior
+% \end{macrocode}
+%
+% \begin{macro}{\__unicode_parse_line:w}
+% \begin{macro}[aux]{\__unicode_parse_line_auxi:w}
+% \begin{macro}[aux]{\__unicode_parse_line_auxii:nnNn}
+% \begin{macro}[aux]{\__unicode_parse_line_auxiii:wnnNn}
+% \begin{macro}[aux]{\__unicode_parse_line_auxiv:nnNNNn}
+% Much the same as for the case folding set up: parse the lines of
+% data. Here, the lines are longer but always have one--one mappings.
+% \begin{macrocode}
+\cs_set_protected:Npn \__unicode_parse_line:w
+ #1 ; #2 ; #3 ; #4 ; #5 ; #6 ; #7 ; #8 ; #9 ;
+ {
+ \__unicode_parse_line_auxi:w #1 ;
+ }
+% \end{macrocode}
+% With some data items removed, at this stage the hexadecimal
+% representation of the char is |#1|, the upper case char is |#5|,
+% the lower case one |#6| and the title case one |#7|. These may or
+% may not be present and the upper and titlecase values may be
+% identical. Where there are values for upper/lowercase, they are
+% saved into the arrays. For titlecase, since the number of exceptions
+% is small: they are added to the existing list of exceptions we've
+% already started.
+% \begin{macrocode}
+\cs_new_protected:Npn \__unicode_parse_line_auxi:w
+ #1 ; #2 ; #3 ; #4 ; #5 ; #6 ; #7 \q_stop
+ {
+ \tl_if_empty:nF {#5}
+ {
+ \__unicode_parse_line_auxii:nnNn {#1} {#5} a { upper }
+ \str_if_eq:nnF {#5} {#7}
+ {
+ \tl_put_right:Nx \l__unicode_title_exceptions_tl
+ {
+ \luatex_Uchar:D "#1 \c_space_tl
+ \luatex_Uchar:D "#7 \c_space_tl
+ }
+ }
+ }
+ \tl_if_empty:nF {#6}
+ { \__unicode_parse_line_auxii:nnNn {#1} {#6} b { lower } }
+ }
+% \end{macrocode}
+% The array structure here is the same as before, except now there
+% are two separate ones to manage.
+% \begin{macrocode}
+\cs_new_protected:Npn \__unicode_parse_line_auxii:nnNn #1#2#3
+ {
+ \exp_last_unbraced:Nf \__unicode_parse_line_auxiii:wnnNn
+ { \int_eval:n { 1000000 + "#1 } } \q_stop
+ {#1} {#2} #3
+ }
+\cs_new_protected:Npn \__unicode_parse_line_auxiii:wnnNn
+ #1#2#3#4#5#6#7 \q_stop #8#9
+ { \__unicode_parse_line_auxiv:nnNNNn {#8} {#9} #6 #7 }
+% \end{macrocode}
+% The final test required here is to look for the special cases and where
+% appropriate use that rather than the one--one mapping value.
+% \begin{macrocode}
+\cs_new_protected:Npn \__unicode_parse_line_auxiv:nnNNNn #1#2#3#4#5#6
+ {
+ \tl_rescan:nn
+ { }
+ {
+ \tl_put_right:cx { l__unicode_ #5 _ #3 _ #4 _tl }
+ {
+ \luatex_Uchar:D "#1 \c_space_tl
+ \str_case_x:nvF
+ { \luatex_Uchar:D "#1 \c_space_tl }
+ { l__unicode_ #6 _exceptions_tl }
+ { \luatex_Uchar:D "#2 \c_space_tl }
+ }
+ }
+ }
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+%
+% Everything is set up and so the read loop can take place: this time
+% there are no comment chars to worry about and so normal category
+% codes apply.
+% \begin{macrocode}
+\ior_open:Nn \g__unicode_data_ior { UnicodeData.txt }
+\ior_map_inline:Nn \g__unicode_data_ior
+ { \__unicode_parse_line:w #1 \q_stop }
+\ior_close:N \g__unicode_data_ior
+% \end{macrocode}
+%
+% Saving the data uses a single file, with the uppercase array
+% followed by the lowercase one and finally the titlecase exceptions.
+% \begin{macrocode}
+\tl_map_inline:nn { 0123456789 }
+ {
+ \tl_map_inline:nn { 0123456789 }
+ {
+ \iow_now:Nx \g__unicode_result_iow
+ {
+ \tl_const:cn
+ { ~ c__tl_upper_ #1 _ ##1 _tl ~ } ~
+ { ~ \exp_not:v { l__unicode_a_ #1 _ ##1 _tl } ~ }
+ }
+ }
+ }
+\tl_map_inline:nn { 0123456789 }
+ {
+ \tl_map_inline:nn { 0123456789 }
+ {
+ \iow_now:Nx \g__unicode_result_iow
+ {
+ \tl_const:cn
+ { ~ c__tl_lower_ #1 _ ##1 _tl ~ } ~
+ { ~ \exp_not:v { l__unicode_b_ #1 _ ##1 _tl } ~ }
+ }
+ }
+ }
+\iow_now:Nx \g__unicode_result_iow
+ {
+ \tl_const:Nn
+ \exp_not:N \c__tl_mixed_exceptions_tl \c_space_tl
+ { ~ \exp_not:V \l__unicode_title_exceptions_tl \c_space_tl }
+ }
+% \end{macrocode}
+%
+% Data for the special cases is now needed. This is mainly a series of simple
+% token lists with appropriate names and content, but there is also one place
+% where a small mapping list is required.
+% \begin{macrocode}
+\cs_new_protected:Npn \__unicode_special_case:nn #1#2
+ {
+ \quark_if_recursion_tail_stop:n {#1}
+ \iow_now:Nx \g__unicode_result_iow
+ {
+ \tl_const:Nn \exp_not:c { c__tl_ #1 _tl } { \luatex_Uchar:D "#2 }
+ }
+ \__unicode_special_case:nn
+ }
+\__unicode_special_case:nn
+ { std_sigma } { 03C3 }
+ { final_sigma } { 03C2 }
+ { dotless_i } { 0131 }
+ { dot_above } { 0307 }
+ { dotted_I } { 0130 }
+ \q_recursion_tail { }
+ \q_recursion_stop
+\iow_now:Nx \g__unicode_result_iow
+ {
+ \tl_const:Nn \exp_not:N \c__tl_accents_lt_tl
+ {
+ \luatex_Uchar:D "00CC
+ { \luatex_Uchar:D "0069 \luatex_Uchar:D "0307 \luatex_Uchar:D "0300 }
+ \luatex_Uchar:D "00CD
+ { \luatex_Uchar:D "0069 \luatex_Uchar:D "0307 \luatex_Uchar:D "0301 }
+ \luatex_Uchar:D "0128
+ { \luatex_Uchar:D "0069 \luatex_Uchar:D "0307 \luatex_Uchar:D "0303 }
+ }
+ }
+% \end{macrocode}
+%
+% Job done, end the \TeX{} run.
+% \begin{macrocode}
+\iow_close:N \g__unicode_result_iow
+\tex_end:D
+% \end{macrocode}
+%
+% \begin{macrocode}
+%</script>
+% \end{macrocode}
+%
+% \end{implementation}
+%
+% \PrintIndex \ No newline at end of file
diff --git a/Master/texmf-dist/source/latex/l3packages/l3keys2e/l3keys2e.dtx b/Master/texmf-dist/source/latex/l3packages/l3keys2e/l3keys2e.dtx
index 34be20b1580..3976b45c9c0 100644
--- a/Master/texmf-dist/source/latex/l3packages/l3keys2e/l3keys2e.dtx
+++ b/Master/texmf-dist/source/latex/l3packages/l3keys2e/l3keys2e.dtx
@@ -37,8 +37,8 @@
%<*driver|package>
% The version of expl3 required is tested as early as possible, as
% some really old versions do not define \ProvidesExplPackage.
-\RequirePackage{expl3}[2014/06/10]
-%<package>\@ifpackagelater{expl3}{2014/06/10}
+\RequirePackage{expl3}[2014/07/20]
+%<package>\@ifpackagelater{expl3}{2014/07/20}
%<package> {}
%<package> {%
%<package> \PackageError{l3keys2e}{Support package l3kernel too old}
@@ -50,7 +50,7 @@
%<package> }%
%<package> \endinput
%<package> }
-\GetIdInfo$Id: l3keys2e.dtx 5105 2014-06-10 07:28:49Z joseph $
+\GetIdInfo$Id: l3keys2e.dtx 5241 2014-07-20 09:44:41Z joseph $
{LaTeX2e option processing using LaTeX3 keys}
%</driver|package>
%<*driver>
diff --git a/Master/texmf-dist/source/latex/l3packages/xparse/xparse.dtx b/Master/texmf-dist/source/latex/l3packages/xparse/xparse.dtx
index fc736388407..27bb33f151d 100644
--- a/Master/texmf-dist/source/latex/l3packages/xparse/xparse.dtx
+++ b/Master/texmf-dist/source/latex/l3packages/xparse/xparse.dtx
@@ -41,8 +41,8 @@
%<*driver|package>
% The version of expl3 required is tested as early as possible, as
% some really old versions do not define \ProvidesExplPackage.
-\RequirePackage{expl3}[2014/06/10]
-%<package>\@ifpackagelater{expl3}{2014/06/10}
+\RequirePackage{expl3}[2014/07/20]
+%<package>\@ifpackagelater{expl3}{2014/07/20}
%<package> {}
%<package> {%
%<package> \PackageError{xparse}{Support package l3kernel too old}
@@ -54,7 +54,7 @@
%<package> }%
%<package> \endinput
%<package> }
-\GetIdInfo$Id: xparse.dtx 5105 2014-06-10 07:28:49Z joseph $
+\GetIdInfo$Id: xparse.dtx 5241 2014-07-20 09:44:41Z joseph $
{L3 Experimental document command parser}
%</driver|package>
%<*driver>
diff --git a/Master/texmf-dist/source/latex/l3packages/xtemplate/xtemplate.dtx b/Master/texmf-dist/source/latex/l3packages/xtemplate/xtemplate.dtx
index e3204cdfedd..f2c65ec055e 100644
--- a/Master/texmf-dist/source/latex/l3packages/xtemplate/xtemplate.dtx
+++ b/Master/texmf-dist/source/latex/l3packages/xtemplate/xtemplate.dtx
@@ -41,8 +41,8 @@
%<*driver|package>
% The version of expl3 required is tested as early as possible, as
% some really old versions do not define \ProvidesExplPackage.
-\RequirePackage{expl3}[2014/06/10]
-%<package>\@ifpackagelater{expl3}{2014/06/10}
+\RequirePackage{expl3}[2014/07/20]
+%<package>\@ifpackagelater{expl3}{2014/07/20}
%<package> {}
%<package> {%
%<package> \PackageError{xtemplate}{Support package l3kernel too old}
@@ -54,7 +54,7 @@
%<package> }%
%<package> \endinput
%<package> }
-\GetIdInfo$Id: xtemplate.dtx 5105 2014-06-10 07:28:49Z joseph $
+\GetIdInfo$Id: xtemplate.dtx 5241 2014-07-20 09:44:41Z joseph $
{L3 Experimental prototype document functions}
%</driver|package>
%<*driver>