% \iffalse meta-comment % %% File: l3str.dtx Copyright (C) 2011-2017 The LaTeX3 Project % % It may be distributed and/or modified under the conditions of the % LaTeX Project Public License (LPPL), either version 1.3c of this % license or (at your option) any later version. The latest version % of this license is in the file % % http://www.latex-project.org/lppl.txt % % This file is part of the "l3kernel bundle" (The Work in LPPL) % and all files in that bundle must be distributed together. % % ----------------------------------------------------------------------- % % The development version of the bundle can be found at % % https://github.com/latex3/latex3 % % for those people who are interested. % %<*driver> \documentclass[full]{l3doc} \begin{document} \DocInput{\jobname.dtx} \end{document} % % \fi % % \title{^^A % The \pkg{l3str} package\\Strings^^A % } % % \author{^^A % The \LaTeX3 Project\thanks % {^^A % E-mail: % \href{mailto:latex-team@latex-project.org} % {latex-team@latex-project.org}^^A % }^^A % } % % \date{Released 2017/05/29} % % \maketitle % % \begin{documentation} % % \TeX{} associates each character with a category code: as such, there is no % concept of a \enquote{string} as commonly understood in many other % programming languages. However, there are places where we wish to manipulate % token lists while in some sense \enquote{ignoring} category codes: this is % done by treating token lists as strings in a \TeX{} sense. % % A \TeX{} string (and thus an \pkg{expl3} string) is a series of characters % which have category code $12$ (\enquote{other}) with the exception of % space characters which have category code $10$ (\enquote{space}). Thus % at a technical level, a \TeX{} string is a token list with the appropriate % category codes. In this documentation, these will simply be referred to as % strings. % % String variables are simply specialised token lists, but by convention % should be named with the suffix \texttt{\ldots{}str}. Such variables % should contain characters with category code $12$ (other), except % spaces, which have category code $10$ (blank space). All the % functions in this module which accept a token list argument first % convert it to a string using \cs{tl_to_str:n} for internal processing, % and will not treat a token list or the corresponding string % representation differently. % % Note that as string variables are a special case of token list variables % the coverage of \cs[no-index]{str_\ldots{}:N} functions is somewhat smaller than % \cs[no-index]{tl_\ldots{}:N}. % % The functions \cs{cs_to_str:N}, \cs{tl_to_str:n}, \cs{tl_to_str:N} and % \cs{token_to_str:N} (and variants) will generate strings from the appropriate % input: these are documented in \pkg{l3basics}, \pkg{l3tl} and \pkg{l3token}, % respectively. % % Most expandable functions in this module come in three flavours: % \begin{itemize} % \item \cs[no-index]{str_\ldots{}:N}, which expect a token list or string % variable as their argument; % \item \cs[no-index]{str_\ldots{}:n}, taking any token list (or string) as an % argument; % \item \cs[no-index]{str_\ldots{}_ignore_spaces:n}, which ignores any space % encountered during the operation: these functions are typically % faster than those which take care of escaping spaces % appropriately. % \end{itemize} % % \section{Building strings} % % \begin{function}[added = 2015-09-18]{\str_new:N, \str_new:c} % \begin{syntax} % \cs{str_new:N} \meta{str~var} % \end{syntax} % Creates a new \meta{str~var} or raises an error if the name is % already taken. The declaration is global. The \meta{str~var} will % initially be empty. % \end{function} % % \begin{function}[added = 2015-09-18] % {\str_const:Nn, \str_const:Nx, \str_const:cn, \str_const:cx} % \begin{syntax} % \cs{str_const:Nn} \meta{str~var} \Arg{token list} % \end{syntax} % Creates a new constant \meta{str~var} or raises an error if the name % is already taken. The value of the \meta{str~var} will be set % globally to the \meta{token list}, converted to a string. % \end{function} % % \begin{function}[added = 2015-09-18] % {\str_clear:N, \str_clear:c, \str_gclear:N, \str_gclear:c} % \begin{syntax} % \cs{str_clear:N} \meta{str~var} % \end{syntax} % Clears the content of the \meta{str~var}. % \end{function} % % \begin{function}[added = 2015-09-18]{\str_clear_new:N, \str_clear_new:c} % \begin{syntax} % \cs{str_clear_new:N} \meta{str~var} % \end{syntax} % Ensures that the \meta{str~var} exists globally by applying % \cs{str_new:N} if necessary, then applies % \cs[index=str_clear:N]{str_(g)clear:N} to leave % the \meta{str~var} empty. % \end{function} % % \begin{function}[added = 2015-09-18] % { % \str_set_eq:NN, \str_set_eq:cN, \str_set_eq:Nc, \str_set_eq:cc, % \str_gset_eq:NN, \str_gset_eq:cN, \str_gset_eq:Nc, \str_gset_eq:cc % } % \begin{syntax} % \cs{str_set_eq:NN} \meta{str~var_1} \meta{str~var_2} % \end{syntax} % Sets the content of \meta{str~var_1} equal to that of % \meta{str~var_2}. % \end{function} % % \section{Adding data to string variables} % % \begin{function}[added = 2015-09-18] % { % \str_set:Nn, \str_set:Nx, \str_set:cn, \str_set:cx, % \str_gset:Nn, \str_gset:Nx, \str_gset:cn, \str_gset:cx % } % \begin{syntax} % \cs{str_set:Nn} \meta{str var} \Arg{token list} % \end{syntax} % Converts the \meta{token list} to a \meta{string}, and stores the % result in \meta{str var}. % \end{function} % % \begin{function}[added = 2015-09-18] % { % \str_put_left:Nn, \str_put_left:Nx, % \str_put_left:cn, \str_put_left:cx, % \str_gput_left:Nn, \str_gput_left:Nx, % \str_gput_left:cn, \str_gput_left:cx % } % \begin{syntax} % \cs{str_put_left:Nn} \meta{str var} \Arg{token list} % \end{syntax} % Converts the \meta{token list} to a \meta{string}, and prepends the % result to \meta{str var}. The current contents of the \meta{str % var} are not automatically converted to a string. % \end{function} % % \begin{function}[added = 2015-09-18] % { % \str_put_right:Nn, \str_put_right:Nx, % \str_put_right:cn, \str_put_right:cx, % \str_gput_right:Nn, \str_gput_right:Nx, % \str_gput_right:cn, \str_gput_right:cx % } % \begin{syntax} % \cs{str_put_right:Nn} \meta{str var} \Arg{token list} % \end{syntax} % Converts the \meta{token list} to a \meta{string}, and appends the % result to \meta{str var}. The current contents of the \meta{str % var} are not automatically converted to a string. % \end{function} % % \subsection{String conditionals} % % \begin{function}[EXP, pTF, added = 2015-09-18] % {\str_if_exist:N, \str_if_exist:c} % \begin{syntax} % \cs{str_if_exist_p:N} \meta{str~var} % \cs{str_if_exist:NTF} \meta{str~var} \Arg{true code} \Arg{false code} % \end{syntax} % Tests whether the \meta{str~var} is currently defined. This does not % check that the \meta{str~var} really is a string. % \end{function} % % \begin{function}[EXP,pTF, added = 2015-09-18] % {\str_if_empty:N, \str_if_empty:c} % \begin{syntax} % \cs{sr_if_empty_p:N} \meta{str~var} % \cs{str_if_empty:NTF} \meta{str~var} \Arg{true code} \Arg{false code} % \end{syntax} % Tests if the \meta{string variable} is entirely empty % (\emph{i.e.}~contains no characters at all). % \end{function} % % \begin{function}[EXP,pTF, added = 2015-09-18] % {\str_if_eq:NN, \str_if_eq:Nc, \str_if_eq:cN, \str_if_eq:cc} % \begin{syntax} % \cs{str_if_eq_p:NN} \meta{str~var_1} \meta{str~var_2} % \cs{str_if_eq:NNTF} \meta{str~var_1} \meta{str~var_2} \Arg{true code} \Arg{false code} % \end{syntax} % Compares the content of two \meta{str variables} and % is logically \texttt{true} if the two contain the same characters. % \end{function} % % \begin{function}[EXP,pTF] % { % \str_if_eq:nn, \str_if_eq:Vn, \str_if_eq:on, \str_if_eq:no, % \str_if_eq:nV, \str_if_eq:VV % } % \begin{syntax} % \cs{str_if_eq_p:nn} \Arg{tl_1} \Arg{tl_2} % \cs{str_if_eq:nnTF} \Arg{tl_1} \Arg{tl_2} \Arg{true code} \Arg{false code} % \end{syntax} % Compares the two \meta{token lists} on a character by character % basis, and is \texttt{true} if the two lists contain the same % characters in the same order. Thus for example % \begin{verbatim} % \str_if_eq_p:no { abc } { \tl_to_str:n { abc } } % \end{verbatim} % is logically \texttt{true}. % \end{function} % % \begin{function}[EXP,pTF, added = 2012-06-05]{\str_if_eq_x:nn} % \begin{syntax} % \cs{str_if_eq_x_p:nn} \Arg{tl_1} \Arg{tl_2} % \cs{str_if_eq_x:nnTF} \Arg{tl_1} \Arg{tl_2} \Arg{true code} \Arg{false code} % \end{syntax} % Compares the full expansion of two \meta{token lists} on a character by % character basis, and is \texttt{true} if the two lists contain the same % characters in the same order. Thus for example % \begin{verbatim} % \str_if_eq_x_p:nn { abc } { \tl_to_str:n { abc } } % \end{verbatim} % is logically \texttt{true}. % \end{function} % % \begin{function}[added = 2013-07-24, updated = 2015-02-28, EXP, noTF] % {\str_case:nn, \str_case:on, \str_case:nV, \str_case:nv} % \begin{syntax} % \cs{str_case:nnTF} \Arg{test string} \\ % ~~|{| \\ % ~~~~\Arg{string case_1} \Arg{code case_1} \\ % ~~~~\Arg{string case_2} \Arg{code case_2} \\ % ~~~~\ldots \\ % ~~~~\Arg{string case_n} \Arg{code case_n} \\ % ~~|}| \\ % ~~\Arg{true code} % ~~\Arg{false code} % \end{syntax} % This function compares the \meta{test string} in turn with each % of the \meta{string cases}. If the two are equal (as described for % \cs{str_if_eq:nnTF} then the % associated \meta{code} is left in the input stream. If any of the % cases are matched, the \meta{true code} is also inserted into the % input stream (after the code for the appropriate case), while if none % match then the \meta{false code} is inserted. The function % \cs{str_case:nn}, which does nothing if there is no match, is also % available. % \end{function} % % \begin{function}[added = 2013-07-24, EXP, TF]{\str_case_x:nn} % \begin{syntax} % \cs{str_case_x:nnTF} \Arg{test string} \\ % ~~|{| \\ % ~~~~\Arg{string case_1} \Arg{code case_1} \\ % ~~~~\Arg{string case_2} \Arg{code case_2} \\ % ~~~~\ldots \\ % ~~~~\Arg{string case_n} \Arg{code case_n} \\ % ~~|}| \\ % ~~\Arg{true code} % ~~\Arg{false code} % \end{syntax} % This function compares the full expansion of the \meta{test string} % in turn with the full expansion of the \meta{string cases}. If the two % full expansions are equal (as described for \cs{str_if_eq:nnTF} then the % associated \meta{code} is left in the input stream. If any of the % cases are matched, the \meta{true code} is also inserted into the % input stream (after the code for the appropriate case), while if none % match then the \meta{false code} is inserted. The function % \cs{str_case_x:nn}, which does nothing if there is no match, is also % available. % The \meta{test string} is expanded in each comparison, and must % always yield the same result: for example, random numbers must % not be used within this string. % \end{function} % % \section{Working with the content of strings} % % \begin{function}[EXP, added = 2015-09-18]{\str_use:N, \str_use:c} % \begin{syntax} % \cs{str_use:N} \meta{str~var} % \end{syntax} % Recovers the content of a \meta{str~var} and places it % directly in the input stream. An error will be raised if the variable % does not exist or if it is invalid. Note that it is possible to use % a \meta{str} directly without an accessor function. % \end{function} % % \begin{function}[EXP, added = 2015-09-18] % {\str_count:N, \str_count:c, \str_count:n, \str_count_ignore_spaces:n} % \begin{syntax} % \cs{str_count:n} \Arg{token list} % \end{syntax} % Leaves in the input stream the number of characters in the string % representation of \meta{token list}, as an integer denotation. The % functions differ in their treatment of spaces. In the case of % \cs{str_count:N} and \cs{str_count:n}, all characters including % spaces are counted. The \cs{str_count_ignore_spaces:n} function % leaves the number of non-space characters in the input stream. % \end{function} % % \begin{function}[EXP, added = 2015-09-18] % {\str_count_spaces:N, \str_count_spaces:c, \str_count_spaces:n} % \begin{syntax} % \cs{str_count_spaces:n} \Arg{token list} % \end{syntax} % Leaves in the input stream the number of space characters in the % string representation of \meta{token list}, as an integer % denotation. Of course, this function has no \texttt{_ignore_spaces} % variant. % \end{function} % % \begin{function}[EXP, added = 2015-09-18] % {\str_head:N, \str_head:c, \str_head:n, \str_head_ignore_spaces:n} % \begin{syntax} % \cs{str_head:n} \Arg{token list} % \end{syntax} % Converts the \meta{token list} into a \meta{string}. The first % character in the \meta{string} is then left in the input stream, % with category code \enquote{other}. The functions differ if the % first character is a space: \cs{str_head:N} and \cs{str_head:n} % return a space token with category code~$10$ (blank space), while % the \cs{str_head_ignore_spaces:n} function ignores this space % character and leaves the first non-space character in the input % stream. If the \meta{string} is empty (or only contains spaces in % the case of the \texttt{_ignore_spaces} function), then nothing is % left on the input stream. % \end{function} % % \begin{function}[EXP, added = 2015-09-18] % {\str_tail:N, \str_tail:c, \str_tail:n, \str_tail_ignore_spaces:n} % \begin{syntax} % \cs{str_tail:n} \Arg{token list} % \end{syntax} % Converts the \meta{token list} to a \meta{string}, removes the first % character, and leaves the remaining characters (if any) in the input % stream, with category codes $12$ and $10$ (for spaces). The % functions differ in the case where the first character is a space: % \cs{str_tail:N} and \cs{str_tail:n} will trim only that space, while % \cs{str_tail_ignore_spaces:n} removes the first non-space character % and any space before it. If the \meta{token list} is empty (or % blank in the case of the \texttt{_ignore_spaces} variant), then % nothing is left on the input stream. % \end{function} % % \begin{function}[EXP, added = 2015-09-18] % {\str_item:Nn, \str_item:nn, \str_item_ignore_spaces:nn} % \begin{syntax} % \cs{str_item:nn} \Arg{token list} \Arg{integer expression} % \end{syntax} % Converts the \meta{token list} to a \meta{string}, and leaves in the % input stream the character in position \meta{integer expression} of % the \meta{string}, starting at $1$ for the first (left-most) % character. In the case of \cs{str_item:Nn} and \cs{str_item:nn}, % all characters including spaces are taken into account. The % \cs{str_item_ignore_spaces:nn} function skips spaces when counting % characters. If the \meta{integer expression} is negative, % characters are counted from the end of the \meta{string}. Hence, % $-1$ is the right-most character, \emph{etc.} % \end{function} % % \begin{function}[EXP, added = 2015-09-18] % { % \str_range:Nnn, \str_range:cnn, \str_range:nnn, % \str_range_ignore_spaces:nnn % } % \begin{syntax} % \cs{str_range:nnn} \Arg{token list} \Arg{start index} \Arg{end index} % \end{syntax} % Converts the \meta{token list} to a \meta{string}, and leaves in the % input stream the characters from the \meta{start index} to the % \meta{end index} inclusive. Positive \meta{indices} are counted % from the start of the string, $1$~being the first character, and % negative \meta{indices} are counted from the end of the string, % $-1$~being the last character. If either of \meta{start index} or % \meta{end index} is~$0$, the result is empty. For instance, % \begin{verbatim} % \iow_term:x { \str_range:nnn { abcdef } { 2 } { 5 } } % \iow_term:x { \str_range:nnn { abcdef } { -4 } { -1 } } % \iow_term:x { \str_range:nnn { abcdef } { -2 } { -1 } } % \iow_term:x { \str_range:nnn { abcdef } { 0 } { -1 } } % \end{verbatim} % will print \texttt{bcde}, \texttt{cdef}, \texttt{ef}, and an empty % line to the terminal. The \meta{start index} must always be smaller than % or equal to the \meta{end index}: if this is not the case then no output % is generated. Thus % \begin{verbatim} % \iow_term:x { \str_range:nnn { abcdef } { 5 } { 2 } } % \iow_term:x { \str_range:nnn { abcdef } { -1 } { -4 } } % \end{verbatim} % both yield empty strings. % \end{function} % % \section{String manipulation} % % \begin{function}[EXP, added = 2015-03-01] % { % \str_lower_case:n, \str_lower_case:f, % \str_upper_case:n, \str_upper_case:f % } % \begin{syntax} % \cs{str_lower_case:n} \Arg{tokens} % \cs{str_upper_case:n} \Arg{tokens} % \end{syntax} % Converts the input \meta{tokens} to their string representation, as % described for \cs{tl_to_str:n}, and then to the lower or upper % case representation using a one-to-one mapping as described by the % Unicode Consortium file |UnicodeData.txt|. % % These functions are intended for case changing programmatic data in % places where upper/lower case distinctions are meaningful. One example % would be automatically generating a function name from user input where % some case changing is needed. In this situation the input is programmatic, % not textual, case does have meaning and a language-independent one-to-one % mapping is appropriate. For example % \begin{verbatim} % \cs_new_protected:Npn \myfunc:nn #1#2 % { % \cs_set_protected:cpn % { % user % \str_upper_case:f { \tl_head:n {#1} } % \str_lower_case:f { \tl_tail:n {#1} } % } % { #2 } % } % \end{verbatim} % would be used to generate a function with an auto-generated name consisting % of the upper case equivalent of the supplied name followed by the lower % case equivalent of the rest of the input. % % These functions should \emph{not} be used for % \begin{itemize} % \item Caseless comparisons: use \cs{str_fold_case:n} for this % situation (case folding is district from lower casing). % \item Case changing text for typesetting: see the % \cs[index=tl_lower_case:n]{tl_lower_case:n(n)}, % \cs[index=tl_upper_case:n]{tl_upper_case:n(n)} and % \cs[index=tl_mixed_case:n]{tl_mixed_case:n(n)} functions which % correctly deal with context-dependence and other factors appropriate % to text case changing. % \end{itemize} % % \begin{texnote} % As with all \pkg{expl3} functions, the input supported by % \cs{str_fold_case:n} is \emph{engine-native} characters which are or % interoperate with \textsc{utf-8}. As such, when used with \pdfTeX{} % \emph{only} the Latin alphabet characters A--Z will be case-folded % (\emph{i.e.}~the \textsc{ascii} range which coincides with % \textsc{utf-8}). Full \textsc{utf-8} support is available with both % \XeTeX{} and \LuaTeX{}, subject only to the fact that \XeTeX{} in % particular has issues with characters of code above hexadecimal % $0\mathrm{xFFFF}$ when interacting with \cs{tl_to_str:n}. % \end{texnote} % \end{function} % % \begin{function}[EXP, added = 2014-06-19, updated = 2016-03-07] % {\str_fold_case:n, \str_fold_case:V} % \begin{syntax} % \cs{str_fold_case:n} \Arg{tokens} % \end{syntax} % Converts the input \meta{tokens} to their string representation, as % described for \cs{tl_to_str:n}, and then folds the case of the resulting % \meta{string} to remove case information. The result of this process is % left in the input stream. % % String folding is a process used for material such as identifiers rather % than for \enquote{text}. The folding provided by \cs{str_fold_case:n} % follows the mappings provided by the \href{http://www.unicode.org}^^A % {Unicode Consortium}, who % \href{http://www.unicode.org/faq/casemap_charprop.html#2}{state}: % \begin{quote} % Case folding is primarily used for caseless comparison of text, such % as identifiers in a computer program, rather than actual text % transformation. Case folding in Unicode is based on the lowercase % mapping, but includes additional changes to the source text to help make % it language-insensitive and consistent. As a result, case-folded text % should be used solely for internal processing and generally should not be % stored or displayed to the end user. % \end{quote} % The folding approach implemented by \cs{str_fold_case:n} follows the % \enquote{full} scheme defined by the Unicode Consortium % (\emph{e.g.}~\SS folds to \texttt{SS}). As case-folding is % a language-insensitive process, there is no special treatment of % Turkic input (\emph{i.e.}~\texttt{I} always folds to \texttt{i} and % not to \texttt{\i}). % % \begin{texnote} % As with all \pkg{expl3} functions, the input supported by % \cs{str_fold_case:n} is \emph{engine-native} characters which are or % interoperate with \textsc{utf-8}. As such, when used with \pdfTeX{} % \emph{only} the Latin alphabet characters A--Z will be case-folded % (\emph{i.e.}~the \textsc{ascii} range which coincides with % \textsc{utf-8}). Full \textsc{utf-8} support is available with both % \XeTeX{} and \LuaTeX{}, subject only to the fact that \XeTeX{} in % particular has issues with characters of code above hexadecimal % $0\mathrm{xFFFF}$ when interacting with \cs{tl_to_str:n}. % \end{texnote} % \end{function} % % \section{Viewing strings} % % \begin{function}[added = 2015-09-18] % {\str_show:N, \str_show:c, \str_show:n} % \begin{syntax} % \cs{str_show:N} \meta{str~var} % \end{syntax} % Displays the content of the \meta{str~var} on the terminal. % \end{function} % % \section{Constant token lists} % % \begin{variable}[added = 2015-09-19] % { % \c_ampersand_str, % \c_atsign_str, % \c_backslash_str, % \c_left_brace_str, % \c_right_brace_str, % \c_circumflex_str, % \c_colon_str, % \c_dollar_str, % \c_hash_str, % \c_percent_str, % \c_tilde_str, % \c_underscore_str % } % Constant strings, containing a single character token, with category % code $12$. % \end{variable} % % \section{Scratch strings} % % \begin{variable}{\l_tmpa_str, \l_tmpb_str} % Scratch strings for local assignment. These are never used by % the kernel code, and so are safe for use with any \LaTeX3-defined % function. However, they may be overwritten by other non-kernel % code and so should only be used for short-term storage. % \end{variable} % % \begin{variable}{\g_tmpa_str, \g_tmpb_str} % Scratch strings for global assignment. These are never used by % the kernel code, and so are safe for use with any \LaTeX3-defined % function. However, they may be overwritten by other non-kernel % code and so should only be used for short-term storage. % \end{variable} % % \subsection{Internal string functions} % % \begin{function}[EXP]{\__str_if_eq_x:nn} % \begin{syntax} % \cs{__str_if_eq_x:nn} \Arg{tl_1} \Arg{tl_2} % \end{syntax} % Compares the full expansion of two \meta{token lists} on a character by % character basis, and is \texttt{true} if the two lists contain the same % characters in the same order. Leaves |0| in the input stream if the % condition is true, and |+1| or |-1| otherwise. % \end{function} % % \begin{function}{\__str_if_eq_x_return:nn} % \begin{syntax} % \cs{__str_if_eq_x_return:nn} \Arg{tl_1} \Arg{tl_2} % \end{syntax} % Compares the full expansion of two \meta{token lists} on a character by % character basis, and is \texttt{true} if the two lists contain the same % characters in the same order. Either \cs{prg_return_true:} or % \cs{prg_return_false:} is then left in the input stream. This is a version % of \cs{str_if_eq_x:nnTF} coded for speed. % \end{function} % % \begin{function}[EXP]{\__str_to_other:n} % \begin{syntax} % \cs{__str_to_other:n} \Arg{token list} % \end{syntax} % Converts the \meta{token list} to a \meta{other string}, where % spaces have category code \enquote{other}. This function can be % \texttt{f}-expanded without fear of losing a leading space, since % spaces do not have category code $10$ in its result. It takes a % time quadratic in the character count of the string. % \end{function} % % \begin{function}[rEXP]{\__str_to_other_fast:n} % \begin{syntax} % \cs{__str_to_other_fast:n} \Arg{token list} % \end{syntax} % Same behaviour \cs{__str_to_other:n} but only restricted-expandable. % It takes a time linear in the character count of the string. % It is used for \cs{iow_wrap:nnnN}. % \end{function} % % \begin{function}[EXP]{\__str_count:n} % \begin{syntax} % \cs{__str_count:n} \Arg{other string} % \end{syntax} % This function expects an argument that is entirely made of % characters with category \enquote{other}, as produced by % \cs{__str_to_other:n}. It leaves in the input stream the number of % character tokens in the \meta{other string}, faster than the % analogous \cs{str_count:n} function. % \end{function} % % \begin{function}[EXP]{\__str_range:nnn} % \begin{syntax} % \cs{__str_range:nnn} \Arg{other string} \Arg{start index} \Arg{end index} % \end{syntax} % Identical to \cs{str_range:nnn} except that the first argument is % expected to be entirely made of characters with category % \enquote{other}, as produced by \cs{__str_to_other:n}, and the % result is also an \meta{other string}. % \end{function} % % \end{documentation} % % \begin{implementation} % % \section{\pkg{l3str} implementation} % % \begin{macrocode} %<*initex|package> % \end{macrocode} % % \begin{macrocode} %<@@=str> % \end{macrocode} % % \subsection{Creating and setting string variables} % % \begin{macro} % { % \str_new:N, \str_new:c, % \str_use:N, \str_use:c, % \str_clear:N, \str_clear:c, % \str_gclear:N,\str_gclear:c, % \str_clear_new:N, \str_clear_new:c, % \str_gclear_new:N, \str_gclear_new:c % } % \begin{macro} % { % \str_set_eq:NN, \str_set_eq:cN, \str_set_eq:Nc, \str_set_eq:cc, % \str_gset_eq:NN, \str_gset_eq:cN, \str_gset_eq:Nc, \str_gset_eq:cc % } % A string is simply a token list. The full mapping system isn't set up % yet so do things by hand. % \begin{macrocode} \group_begin: \cs_set_protected:Npn \@@_tmp:n #1 { \tl_if_blank:nF {#1} { \cs_new_eq:cc { str_ #1 :N } { tl_ #1 :N } \exp_args:Nc \cs_generate_variant:Nn { str_ #1 :N } { c } \@@_tmp:n } } \@@_tmp:n { new } { use } { clear } { gclear } { clear_new } { gclear_new } { } \group_end: \cs_new_eq:NN \str_set_eq:NN \tl_set_eq:NN \cs_new_eq:NN \str_gset_eq:NN \tl_gset_eq:NN \cs_generate_variant:Nn \str_set_eq:NN { c , Nc , cc } \cs_generate_variant:Nn \str_gset_eq:NN { c , Nc , cc } % \end{macrocode} % \end{macro} % \end{macro} % % \begin{macro} % { % \str_set:Nn, \str_set:Nx, % \str_set:cn, \str_set:cx, % \str_gset:Nn, \str_gset:Nx, % \str_gset:cn, \str_gset:cx, % \str_const:Nn, \str_const:Nx, % \str_const:cn, \str_const:cx, % \str_put_left:Nn, \str_put_left:Nx, % \str_put_left:cn, \str_put_left:cx, % \str_gput_left:Nn, \str_gput_left:Nx, % \str_gput_left:cn, \str_gput_left:cx, % \str_put_right:Nn, \str_put_right:Nx, % \str_put_right:cn, \str_put_right:cx, % \str_gput_right:Nn, \str_gput_right:Nx, % \str_gput_right:cn, \str_gput_right:cx, % } % Simply convert the token list inputs to \meta{strings}. % \begin{macrocode} \group_begin: \cs_set_protected:Npn \@@_tmp:n #1 { \tl_if_blank:nF {#1} { \cs_new_protected:cpx { str_ #1 :Nn } ##1##2 { \exp_not:c { tl_ #1 :Nx } ##1 { \exp_not:N \tl_to_str:n {##2} } } \exp_args:Nc \cs_generate_variant:Nn { str_ #1 :Nn } { Nx , cn , cx } \@@_tmp:n } } \@@_tmp:n { set } { gset } { const } { put_left } { gput_left } { put_right } { gput_right } { } \group_end: % \end{macrocode} % \end{macro} % % \subsection{String comparisons} % % \begin{macro}[pTF, EXP] % { % \str_if_empty:N, \str_if_empty:c, % \str_if_exist:N, \str_if_exist:c % } % More copy-paste! % \begin{macrocode} \prg_new_eq_conditional:NNn \str_if_exist:N \tl_if_exist:N { p , T , F , TF } \prg_new_eq_conditional:NNn \str_if_exist:c \tl_if_exist:c { p , T , F , TF } \prg_new_eq_conditional:NNn \str_if_empty:N \tl_if_empty:N { p , T , F , TF } \prg_new_eq_conditional:NNn \str_if_empty:c \tl_if_empty:c { p , T , F , TF } % \end{macrocode} % \end{macro} % % \begin{macro}[int, EXP]{\@@_if_eq_x:nn} % \begin{macro}[aux, EXP]{\@@_escape_x:n} % String comparisons rely on the primitive \cs[index=pdfstrcmp]{(pdf)strcmp} if available: % \LuaTeX{} does not have it, so emulation is required. As the net result % is that we do not \emph{always} use the primitive, the correct approach % is to wrap up in a function with defined behaviour. That's done by % providing a wrapper and then redefining in the \LuaTeX{} case. Note that % the necessary Lua code is covered in \pkg{l3boostrap}: long-term this may % need to go into a separate Lua file, but at present it's somewhere that % spaces are not skipped for ease-of-input. The need to detokenize and force % expansion of input arises from the case where a |#| token is used in the % input, \emph{e.g.}~|\__str_if_eq_x:nn {#} { \tl_to_str:n {#} }|, which % otherwise will fail as \cs{luatex_luaescapestring:D} does not double % such tokens. % \begin{macrocode} \cs_new:Npn \@@_if_eq_x:nn #1#2 { \pdftex_strcmp:D {#1} {#2} } \cs_if_exist:NT \luatex_luatexversion:D { \cs_set:Npn \@@_if_eq_x:nn #1#2 { \luatex_directlua:D { l3kernel.strcmp ( " \@@_escape_x:n {#1} " , " \@@_escape_x:n {#2} " ) } } \cs_new:Npn \@@_escape_x:n #1 { \luatex_luaescapestring:D { \etex_detokenize:D \exp_after:wN { \luatex_expanded:D {#1} } } } } % \end{macrocode} % \end{macro} % \end{macro} % % \begin{macro}[int, EXP]{\@@_if_eq_x_return:nn} % It turns out that we often need to compare a token list % with the result of applying some function to it, and % return with \cs{prg_return_true/false:}. This test is % similar to \cs{str_if_eq:nnTF} (see \pkg{l3str}), % but is hard-coded for speed. % \begin{macrocode} \cs_new:Npn \@@_if_eq_x_return:nn #1 #2 { \if_int_compare:w \@@_if_eq_x:nn {#1} {#2} = 0 \exp_stop_f: \prg_return_true: \else: \prg_return_false: \fi: } % \end{macrocode} % \end{macro} % % \begin{macro}[pTF, EXP] % { % \str_if_eq:nn, \str_if_eq:Vn, \str_if_eq:on, \str_if_eq:nV, % \str_if_eq:no, \str_if_eq:VV, % \str_if_eq_x:nn % } % Modern engines provide a direct way of comparing two token lists, % but returning a number. This set of conditionals therefore make life % a bit clearer. The \texttt{nn} and \texttt{xx} versions are created % directly as this is most efficient. % \begin{macrocode} \prg_new_conditional:Npnn \str_if_eq:nn #1#2 { p , T , F , TF } { \if_int_compare:w \@@_if_eq_x:nn { \exp_not:n {#1} } { \exp_not:n {#2} } = 0 \exp_stop_f: \prg_return_true: \else: \prg_return_false: \fi: } \cs_generate_variant:Nn \str_if_eq_p:nn { V , o } \cs_generate_variant:Nn \str_if_eq_p:nn { nV , no , VV } \cs_generate_variant:Nn \str_if_eq:nnT { V , o } \cs_generate_variant:Nn \str_if_eq:nnT { nV , no , VV } \cs_generate_variant:Nn \str_if_eq:nnF { V , o } \cs_generate_variant:Nn \str_if_eq:nnF { nV , no , VV } \cs_generate_variant:Nn \str_if_eq:nnTF { V , o } \cs_generate_variant:Nn \str_if_eq:nnTF { nV , no , VV } \prg_new_conditional:Npnn \str_if_eq_x:nn #1#2 { p , T , F , TF } { \if_int_compare:w \@@_if_eq_x:nn {#1} {#2} = 0 \exp_stop_f: \prg_return_true: \else: \prg_return_false: \fi: } % \end{macrocode} % \end{macro} % % \begin{macro}[EXP, pTF] % {\str_if_eq:NN, \str_if_eq:Nc, \str_if_eq:cN, \str_if_eq:cc} % Note that \cs{str_if_eq:NN} is different from % \cs{tl_if_eq:NN} because it needs to ignore category codes. % \begin{macrocode} \prg_new_conditional:Npnn \str_if_eq:NN #1#2 { p , TF , T , F } { \if_int_compare:w \@@_if_eq_x:nn { \tl_to_str:N #1 } { \tl_to_str:N #2 } = 0 \exp_stop_f: \prg_return_true: \else: \prg_return_false: \fi: } \cs_generate_variant:Nn \str_if_eq:NNT { c , Nc , cc } \cs_generate_variant:Nn \str_if_eq:NNF { c , Nc , cc } \cs_generate_variant:Nn \str_if_eq:NNTF { c , Nc , cc } \cs_generate_variant:Nn \str_if_eq_p:NN { c , Nc , cc } % \end{macrocode} % \end{macro} % % \begin{macro}[EXP, noTF] % {\str_case:nn, \str_case:on, \str_case:nV, \str_case:nv, \str_case_x:nn} % \begin{macro}[EXP, aux]{\@@_case:nnTF, \@@_case_x:nnTF} % \begin{macro}[aux, EXP] % {\@@_case:nw, \@@_case_x:nw, \@@_case_end:nw} % Much the same as \cs[index=tl_case:nn]{tl_case:nn(TF)} here: % just a change in the internal comparison. % \begin{macrocode} \cs_new:Npn \str_case:nn #1#2 { \exp:w \@@_case:nnTF {#1} {#2} { } { } } \cs_new:Npn \str_case:nnT #1#2#3 { \exp:w \@@_case:nnTF {#1} {#2} {#3} { } } \cs_new:Npn \str_case:nnF #1#2 { \exp:w \@@_case:nnTF {#1} {#2} { } } \cs_new:Npn \str_case:nnTF #1#2 { \exp:w \@@_case:nnTF {#1} {#2} } \cs_new:Npn \@@_case:nnTF #1#2#3#4 { \@@_case:nw {#1} #2 {#1} { } \q_mark {#3} \q_mark {#4} \q_stop } \cs_generate_variant:Nn \str_case:nn { o , nV , nv } \cs_generate_variant:Nn \str_case:nnT { o , nV , nv } \cs_generate_variant:Nn \str_case:nnF { o , nV , nv } \cs_generate_variant:Nn \str_case:nnTF { o , nV , nv } \cs_new:Npn \@@_case:nw #1#2#3 { \str_if_eq:nnTF {#1} {#2} { \@@_case_end:nw {#3} } { \@@_case:nw {#1} } } \cs_new:Npn \str_case_x:nn #1#2 { \exp:w \@@_case_x:nnTF {#1} {#2} { } { } } \cs_new:Npn \str_case_x:nnT #1#2#3 { \exp:w \@@_case_x:nnTF {#1} {#2} {#3} { } } \cs_new:Npn \str_case_x:nnF #1#2 { \exp:w \@@_case_x:nnTF {#1} {#2} { } } \cs_new:Npn \str_case_x:nnTF #1#2 { \exp:w \@@_case_x:nnTF {#1} {#2} } \cs_new:Npn \@@_case_x:nnTF #1#2#3#4 { \@@_case_x:nw {#1} #2 {#1} { } \q_mark {#3} \q_mark {#4} \q_stop } \cs_new:Npn \@@_case_x:nw #1#2#3 { \str_if_eq_x:nnTF {#1} {#2} { \@@_case_end:nw {#3} } { \@@_case_x:nw {#1} } } \cs_new_eq:NN \@@_case_end:nw \__prg_case_end:nw % \end{macrocode} % \end{macro} % \end{macro} % \end{macro} % % \subsection{Accessing specific characters in a string} % % \begin{macro}[EXP, int]{\@@_to_other:n} % \begin{macro}[EXP, aux]{\@@_to_other_loop:w, \@@_to_other_end:w} % First apply \cs{tl_to_str:n}, then replace all spaces by % \enquote{other} spaces, $8$ at a time, storing the converted part of % the string between the \cs{q_mark} and \cs{q_stop} markers. The end % is detected when \cs{@@_to_other_loop:w} finds one of the trailing % |A|, distinguished from any contents of the initial token list by % their category. Then \cs{@@_to_other_end:w} is called, and finds % the result between \cs{q_mark} and the first |A| (well, there is % also the need to remove a space). % \begin{macrocode} \cs_new:Npn \@@_to_other:n #1 { \exp_after:wN \@@_to_other_loop:w \tl_to_str:n {#1} ~ A ~ A ~ A ~ A ~ A ~ A ~ A ~ A ~ \q_mark \q_stop } \group_begin: \tex_lccode:D `\* = `\ % \tex_lccode:D `\A = `\A \tex_lowercase:D { \group_end: \cs_new:Npn \@@_to_other_loop:w #1 ~ #2 ~ #3 ~ #4 ~ #5 ~ #6 ~ #7 ~ #8 ~ #9 \q_stop { \if_meaning:w A #8 \@@_to_other_end:w \fi: \@@_to_other_loop:w #9 #1 * #2 * #3 * #4 * #5 * #6 * #7 * #8 * \q_stop } \cs_new:Npn \@@_to_other_end:w \fi: #1 \q_mark #2 * A #3 \q_stop { \fi: #2 } } % \end{macrocode} % \end{macro} % \end{macro} % % \begin{macro}[rEXP, int]{\@@_to_other_fast:n} % \begin{macro}[rEXP, aux]{\@@_to_other_fast_loop:w, \@@_to_other_fast_end:w} % The difference with \cs{@@_to_other:n} is that the converted part is % left in the input stream, making these commands only % restricted-expandable. % \begin{macrocode} \cs_new:Npn \@@_to_other_fast:n #1 { \exp_after:wN \@@_to_other_fast_loop:w \tl_to_str:n {#1} ~ A ~ A ~ A ~ A ~ A ~ A ~ A ~ A ~ A ~ \q_stop } \group_begin: \tex_lccode:D `\* = `\ % \tex_lccode:D `\A = `\A \tex_lowercase:D { \group_end: \cs_new:Npn \@@_to_other_fast_loop:w #1 ~ #2 ~ #3 ~ #4 ~ #5 ~ #6 ~ #7 ~ #8 ~ #9 ~ { \if_meaning:w A #9 \@@_to_other_fast_end:w \fi: #1 * #2 * #3 * #4 * #5 * #6 * #7 * #8 * #9 \@@_to_other_fast_loop:w * } \cs_new:Npn \@@_to_other_fast_end:w #1 * A #2 \q_stop {#1} } % \end{macrocode} % \end{macro} % \end{macro} % % \begin{macro}[EXP] % {\str_item:Nn, \str_item:cn, \str_item:nn, \str_item_ignore_spaces:nn} % \begin{macro}[EXP, aux]{\@@_item:nn, \@@_item:w} % The \cs{str_item:nn} hands its argument with spaces escaped to % \cs{@@_item:nn}, and makes sure to turn the result back into % a proper string (with category code~$10$ spaces) eventually. The % \cs{str_item_ignore_spaces:nn} function does not escape spaces, % which are thus ignored by \cs{@@_item:nn} since % everything else is done with undelimited arguments. % Evaluate the \meta{index} argument~|#2| and count characters in % the string, passing those two numbers to \cs{@@_item:w} for % further analysis. If the \meta{index} is negative, shift it by % the \meta{count} to know the how many character to discard, and if % that is still negative give an empty result. If the \meta{index} % is larger than the \meta{count}, give an empty result, and % otherwise discard $\meta{index}-1$ characters before returning the % following one. The shift by $-1$ is obtained by inserting an empty % brace group before the string in that case: that brace group also % covers the case where the \meta{index} is zero. % \begin{macrocode} \cs_new:Npn \str_item:Nn { \exp_args:No \str_item:nn } \cs_generate_variant:Nn \str_item:Nn { c } \cs_new:Npn \str_item:nn #1#2 { \exp_args:Nf \tl_to_str:n { \exp_args:Nf \@@_item:nn { \@@_to_other:n {#1} } {#2} } } \cs_new:Npn \str_item_ignore_spaces:nn #1 { \exp_args:No \@@_item:nn { \tl_to_str:n {#1} } } \cs_new:Npn \@@_item:nn #1#2 { \exp_after:wN \@@_item:w \__int_value:w \__int_eval:w #2 \exp_after:wN ; \__int_value:w \@@_count:n {#1} ; #1 \q_stop } \cs_new:Npn \@@_item:w #1; #2; { \int_compare:nNnTF {#1} < 0 { \int_compare:nNnTF {#1} < {-#2} { \use_none_delimit_by_q_stop:w } { \exp_after:wN \use_i_delimit_by_q_stop:nw \exp:w \exp_after:wN \@@_skip_exp_end:w \__int_value:w \__int_eval:w #1 + #2 ; } } { \int_compare:nNnTF {#1} > {#2} { \use_none_delimit_by_q_stop:w } { \exp_after:wN \use_i_delimit_by_q_stop:nw \exp:w \@@_skip_exp_end:w #1 ; { } } } } % \end{macrocode} % \end{macro} % \end{macro} % % \begin{macro}[EXP, aux]{\@@_skip_exp_end:w} % \begin{macro}[EXP, aux] % {\@@_skip_loop:wNNNNNNNN, \@@_skip_end:w, \@@_skip_end:NNNNNNNN} % Removes |max(#1,0)| characters from the input stream, and then % leaves \cs{exp_end:}. This should be expanded using % \cs{exp:w}. We remove characters $8$ at a time until % there are at most $8$ to remove. Then we do a dirty trick: the % \cs{if_case:w} construction leaves between $0$ and $8$ times the % \cs{or:} control sequence, and those \cs{or:} become arguments of % \cs{@@_skip_end:NNNNNNNN}. If the number of characters to remove % is $6$, say, then there are two \cs{or:} left, and the $8$ arguments % of \cs{@@_skip_end:NNNNNNNN} are the two \cs{or:}, and $6$ % characters from the input stream, exactly what we wanted to % remove. Then close the \cs{if_case:w} conditional with \cs{fi:}, and % stop the initial expansion with \cs{exp_end:} (see places where % \cs{@@_skip_exp_end:w} is called). % \begin{macrocode} \cs_new:Npn \@@_skip_exp_end:w #1; { \if_int_compare:w #1 > 8 \exp_stop_f: \exp_after:wN \@@_skip_loop:wNNNNNNNN \else: \exp_after:wN \@@_skip_end:w \__int_value:w \__int_eval:w \fi: #1 ; } \cs_new:Npn \@@_skip_loop:wNNNNNNNN #1; #2#3#4#5#6#7#8#9 { \exp_after:wN \@@_skip_exp_end:w \__int_value:w \__int_eval:w #1 - 8 ; } \cs_new:Npn \@@_skip_end:w #1 ; { \exp_after:wN \@@_skip_end:NNNNNNNN \if_case:w #1 \exp_stop_f: \or: \or: \or: \or: \or: \or: \or: \or: } \cs_new:Npn \@@_skip_end:NNNNNNNN #1#2#3#4#5#6#7#8 { \fi: \exp_end: } % \end{macrocode} % \end{macro} % \end{macro} % % \begin{macro}[EXP] % {\str_range:Nnn, \str_range:nnn, \str_range_ignore_spaces:nnn} % \begin{macro}[EXP, int]{\@@_range:nnn} % \begin{macro}[EXP, aux]{\@@_range:w, \@@_range:nnw} % Sanitize the string. Then evaluate the arguments. At this stage we % also decrement the \meta{start index}, since our goal is to know how % many characters should be removed. Then limit the range to be % non-negative and at most the length of the string (this avoids % needing to check for the end of the string when grabbing % characters), shifting negative numbers by the appropriate amount. % Afterwards, skip characters, then keep some more, and finally drop % the end of the string. % \begin{macrocode} \cs_new:Npn \str_range:Nnn { \exp_args:No \str_range:nnn } \cs_generate_variant:Nn \str_range:Nnn { c } \cs_new:Npn \str_range:nnn #1#2#3 { \exp_args:Nf \tl_to_str:n { \exp_args:Nf \@@_range:nnn { \@@_to_other:n {#1} } {#2} {#3} } } \cs_new:Npn \str_range_ignore_spaces:nnn #1 { \exp_args:No \@@_range:nnn { \tl_to_str:n {#1} } } \cs_new:Npn \@@_range:nnn #1#2#3 { \exp_after:wN \@@_range:w \__int_value:w \@@_count:n {#1} \exp_after:wN ; \__int_value:w \__int_eval:w #2 - 1 \exp_after:wN ; \__int_value:w \__int_eval:w #3 ; #1 \q_stop } \cs_new:Npn \@@_range:w #1; #2; #3; { \exp_args:Nf \@@_range:nnw { \@@_range_normalize:nn {#2} {#1} } { \@@_range_normalize:nn {#3} {#1} } } \cs_new:Npn \@@_range:nnw #1#2 { \exp_after:wN \@@_collect_delimit_by_q_stop:w \__int_value:w \__int_eval:w #2 - #1 \exp_after:wN ; \exp:w \@@_skip_exp_end:w #1 ; } % \end{macrocode} % \end{macro} % \end{macro} % \end{macro} % \begin{macro}[EXP, aux]{\@@_range_normalize:nn} % This function converts an \meta{index} argument into an explicit % position in the string (a result of $0$ denoting \enquote{out of % bounds}). Expects two explicit integer arguments: the % \meta{index} |#1| and the string count~|#2|. If |#1| is negative, % replace it by $|#1| + |#2| + 1$, then limit to the range $[0, % |#2|]$. % \begin{macrocode} \cs_new:Npn \@@_range_normalize:nn #1#2 { \int_eval:n { \if_int_compare:w #1 < 0 \exp_stop_f: \if_int_compare:w #1 < -#2 \exp_stop_f: 0 \else: #1 + #2 + 1 \fi: \else: \if_int_compare:w #1 < #2 \exp_stop_f: #1 \else: #2 \fi: \fi: } } % \end{macrocode} % \end{macro} % \begin{macro}[EXP, aux]{\@@_collect_delimit_by_q_stop:w} % \begin{macro}[EXP, aux] % { % \@@_collect_loop:wn, \@@_collect_loop:wnNNNNNNN, % \@@_collect_end:wn, \@@_collect_end:nnnnnnnnw % } % Collects |max(#1,0)| characters, and removes everything else until % \cs{q_stop}. This is somewhat similar to \cs{@@_skip_exp_end:w}, but % accepts integer expression arguments. This time we can only grab % $7$ characters at a time. At the end, we use an \cs{if_case:w} % trick again, so that the $8$ first arguments of % \cs{@@_collect_end:nnnnnnnnw} are some \cs{or:}, followed by an % \cs{fi:}, followed by |#1| characters from the input stream. Simply % leaving this in the input stream will close the conditional properly % and the \cs{or:} disappear. % \begin{macrocode} \cs_new:Npn \@@_collect_delimit_by_q_stop:w #1; { \@@_collect_loop:wn #1 ; { } } \cs_new:Npn \@@_collect_loop:wn #1 ; { \if_int_compare:w #1 > 7 \exp_stop_f: \exp_after:wN \@@_collect_loop:wnNNNNNNN \else: \exp_after:wN \@@_collect_end:wn \fi: #1 ; } \cs_new:Npn \@@_collect_loop:wnNNNNNNN #1; #2 #3#4#5#6#7#8#9 { \exp_after:wN \@@_collect_loop:wn \__int_value:w \__int_eval:w #1 - 7 ; { #2 #3#4#5#6#7#8#9 } } \cs_new:Npn \@@_collect_end:wn #1 ; { \exp_after:wN \@@_collect_end:nnnnnnnnw \if_case:w \if_int_compare:w #1 > 0 \exp_stop_f: #1 \else: 0 \fi: \exp_stop_f: \or: \or: \or: \or: \or: \or: \fi: } \cs_new:Npn \@@_collect_end:nnnnnnnnw #1#2#3#4#5#6#7#8 #9 \q_stop { #1#2#3#4#5#6#7#8 } % \end{macrocode} % \end{macro} % \end{macro} % % \subsection{Counting characters} % % \begin{macro}[EXP] % {\str_count_spaces:N, \str_count_spaces:c, \str_count_spaces:n} % \begin{macro}[EXP, aux]{\@@_count_spaces_loop:w} % To speed up this function, we grab and discard $9$ space-delimited % arguments in each iteration of the loop. The loop stops when the % last argument is one of the trailing |X|\meta{number}, and that % \meta{number} is added to the sum of $9$ that precedes, to adjust % the result. % \begin{macrocode} \cs_new:Npn \str_count_spaces:N { \exp_args:No \str_count_spaces:n } \cs_generate_variant:Nn \str_count_spaces:N { c } \cs_new:Npn \str_count_spaces:n #1 { \int_eval:n { \exp_after:wN \@@_count_spaces_loop:w \tl_to_str:n {#1} ~ X 7 ~ X 6 ~ X 5 ~ X 4 ~ X 3 ~ X 2 ~ X 1 ~ X 0 ~ X -1 ~ \q_stop } } \cs_new:Npn \@@_count_spaces_loop:w #1~#2~#3~#4~#5~#6~#7~#8~#9~ { \if_meaning:w X #9 \use_i_delimit_by_q_stop:nw \fi: 9 + \@@_count_spaces_loop:w } % \end{macrocode} % \end{macro} % \end{macro} % % \begin{macro}[EXP] % {\str_count:N, \str_count:c, \str_count:n, \str_count_ignore_spaces:n} % \begin{macro}[EXP, int]{\@@_count:n} % \begin{macro}[EXP, aux]{\@@_count_aux:n, \@@_count_loop:NNNNNNNNN} % To count characters in a string we could first escape all spaces % using \cs{@@_to_other:n}, then pass the result to \cs{tl_count:n}. % However, the escaping step would be quadratic in the number of % characters in the string, and we can do better. Namely, sum the % number of spaces (\cs{str_count_spaces:n}) and the result of % \cs{tl_count:n}, which ignores spaces. Since strings tend to be % longer than token lists, we use specialized functions to count % characters ignoring spaces. Namely, loop, grabbing $9$ non-space % characters at each step, and end as soon as we reach one of the $9$ % trailing items. The internal function \cs{@@_count:n}, used in % \cs{str_item:nn} and \cs{str_range:nnn}, is similar to % \cs{str_count_ignore_spaces:n} but expects its argument to already % be a string or a string with spaces escaped. % \begin{macrocode} \cs_new:Npn \str_count:N { \exp_args:No \str_count:n } \cs_generate_variant:Nn \str_count:N { c } \cs_new:Npn \str_count:n #1 { \@@_count_aux:n { \str_count_spaces:n {#1} + \exp_after:wN \@@_count_loop:NNNNNNNNN \tl_to_str:n {#1} } } \cs_new:Npn \@@_count:n #1 { \@@_count_aux:n { \@@_count_loop:NNNNNNNNN #1 } } \cs_new:Npn \str_count_ignore_spaces:n #1 { \@@_count_aux:n { \exp_after:wN \@@_count_loop:NNNNNNNNN \tl_to_str:n {#1} } } \cs_new:Npn \@@_count_aux:n #1 { \int_eval:n { #1 { X 8 } { X 7 } { X 6 } { X 5 } { X 4 } { X 3 } { X 2 } { X 1 } { X 0 } \q_stop } } \cs_new:Npn \@@_count_loop:NNNNNNNNN #1#2#3#4#5#6#7#8#9 { \if_meaning:w X #9 \exp_after:wN \use_none_delimit_by_q_stop:w \fi: 9 + \@@_count_loop:NNNNNNNNN } % \end{macrocode} % \end{macro} % \end{macro} % \end{macro} % % \subsection{The first character in a string} % % \begin{macro}[EXP] % {\str_head:N, \str_head:c, \str_head:n, \str_head_ignore_spaces:n} % \begin{macro}[EXP, aux]{\@@_head:w} % The \texttt{_ignore_spaces} variant applies \cs{tl_to_str:n} then % grabs the first item, thus skipping spaces. % As usual, \cs{str_head:N} expands its argument and % hands it to \cs{str_head:n}. To circumvent the fact that \TeX{} % skips spaces when grabbing undelimited macro parameters, % \cs{@@_head:w} takes an argument delimited by a space. If |#1| % starts with a non-space character, \cs{use_i_delimit_by_q_stop:nw} % leaves that in the input stream. On the other hand, if |#1| starts % with a space, the \cs{@@_head:w} takes an empty argument, and the % single (initially braced) space in the definition of \cs{@@_head:w} % makes its way to the output. Finally, for an empty argument, the % (braced) empty brace group in the definition of \cs{str_head:n} % gives an empty result after passing through % \cs{use_i_delimit_by_q_stop:nw}. % \begin{macrocode} \cs_new:Npn \str_head:N { \exp_args:No \str_head:n } \cs_generate_variant:Nn \str_head:N { c } \cs_new:Npn \str_head:n #1 { \exp_after:wN \@@_head:w \tl_to_str:n {#1} { { } } ~ \q_stop } \cs_new:Npn \@@_head:w #1 ~ % { \use_i_delimit_by_q_stop:nw #1 { ~ } } \cs_new:Npn \str_head_ignore_spaces:n #1 { \exp_after:wN \use_i_delimit_by_q_stop:nw \tl_to_str:n {#1} { } \q_stop } % \end{macrocode} % \end{macro} % \end{macro} % % \begin{macro}[EXP] % {\str_tail:N, \str_tail:c, \str_tail:n, \str_tail_ignore_spaces:n} % \begin{macro}[EXP, aux]{\@@_tail_auxi:w, \@@_tail_auxii:w} % Getting the tail is a little bit more convoluted than the head of a % string. We hit the front of the string with \cs{reverse_if:N} % \cs{if_charcode:w} \cs{scan_stop:}. This removes the first % character, and necessarily makes the test true, since the character % cannot match \cs{scan_stop:}. The auxiliary function then inserts % the required \cs{fi:} to close the conditional, and leaves the tail % of the string in the input stream. The details are such that an % empty string has an empty tail (this requires in particular that the % end-marker |X| be unexpandable and not a control sequence). The % \texttt{_ignore_spaces} is rather simpler: after converting the % input to a string, \cs{@@_tail_auxii:w} removes one undelimited % argument and leaves everything else until an end-marker \cs{q_mark}. % One can check that an empty (or blank) string yields an empty % tail. % \begin{macrocode} \cs_new:Npn \str_tail:N { \exp_args:No \str_tail:n } \cs_generate_variant:Nn \str_tail:N { c } \cs_new:Npn \str_tail:n #1 { \exp_after:wN \@@_tail_auxi:w \reverse_if:N \if_charcode:w \scan_stop: \tl_to_str:n {#1} X X \q_stop } \cs_new:Npn \@@_tail_auxi:w #1 X #2 \q_stop { \fi: #1 } \cs_new:Npn \str_tail_ignore_spaces:n #1 { \exp_after:wN \@@_tail_auxii:w \tl_to_str:n {#1} \q_mark \q_mark \q_stop } \cs_new:Npn \@@_tail_auxii:w #1 #2 \q_mark #3 \q_stop { #2 } % \end{macrocode} % \end{macro} % \end{macro} % % \subsection{String manipulation} % % \begin{macro}[EXP] % { % \str_fold_case:n, \str_fold_case:V, % \str_lower_case:n, \str_lower_case:f, % \str_upper_case:n, \str_upper_case:f % } % \begin{macro}[aux, EXP]{\@@_change_case:nn} % \begin{macro}[aux, EXP]{\@@_change_case_aux:nn} % \begin{macro}[aux, EXP]{\@@_change_case_result:n} % \begin{macro}[aux, EXP]{\@@_change_case_output:nw, \@@_change_case_output:fw} % \begin{macro}[aux, EXP]{\@@_change_case_end:nw} % \begin{macro}[aux, EXP]{\@@_change_case_loop:nw} % \begin{macro}[aux, EXP]{\@@_change_case_space:n} % \begin{macro}[aux, EXP]{\@@_change_case_char:nN} % \begin{macro}[aux] % {\@@_lookup_lower:N, \@@_lookup_upper:N, \@@_lookup_fold:N} % Case changing for programmatic reasons is done by first detokenizing % input then doing a simple loop that only has to worry about spaces % and everything else. The output is detokenized to allow data sharing % with text-based case changing. % \begin{macrocode} \cs_new:Npn \str_fold_case:n #1 { \@@_change_case:nn {#1} { fold } } \cs_new:Npn \str_lower_case:n #1 { \@@_change_case:nn {#1} { lower } } \cs_new:Npn \str_upper_case:n #1 { \@@_change_case:nn {#1} { upper } } \cs_generate_variant:Nn \str_fold_case:n { V } \cs_generate_variant:Nn \str_lower_case:n { f } \cs_generate_variant:Nn \str_upper_case:n { f } \cs_new:Npn \@@_change_case:nn #1 { \exp_after:wN \@@_change_case_aux:nn \exp_after:wN { \tl_to_str:n {#1} } } \cs_new:Npn \@@_change_case_aux:nn #1#2 { \@@_change_case_loop:nw {#2} #1 \q_recursion_tail \q_recursion_stop \@@_change_case_result:n { } } \cs_new:Npn \@@_change_case_output:nw #1#2 \@@_change_case_result:n #3 { #2 \@@_change_case_result:n { #3 #1 } } \cs_generate_variant:Nn \@@_change_case_output:nw { f } \cs_new:Npn \@@_change_case_end:wn #1 \@@_change_case_result:n #2 { #2 } \cs_new:Npn \@@_change_case_loop:nw #1#2 \q_recursion_stop { \tl_if_head_is_space:nTF {#2} { \@@_change_case_space:n } { \@@_change_case_char:nN } {#1} #2 \q_recursion_stop } \use:x { \cs_new:Npn \exp_not:N \@@_change_case_space:n ##1 \c_space_tl } { \@@_change_case_output:nw { ~ } \@@_change_case_loop:nw {#1} } \cs_new:Npn \@@_change_case_char:nN #1#2 { \quark_if_recursion_tail_stop_do:Nn #2 { \@@_change_case_end:wn } \cs_if_exist:cTF { c__unicode_ #1 _ #2 _tl } { \@@_change_case_output:fw { \tl_to_str:c { c__unicode_ #1 _ #2 _tl } } } { \@@_change_case_char_aux:nN {#1} #2 } \@@_change_case_loop:nw {#1} } % \end{macrocode} % For Unicode engines there's a look up to see if the current character % has a valid one-to-one case change mapping. That's not needed for $8$-bit % engines: as they don't have \cs{utex_char:D} all of the changes they can % make are hard-coded and so already picked up above. % \begin{macrocode} \cs_if_exist:NTF \utex_char:D { \cs_new:Npn \@@_change_case_char_aux:nN #1#2 { \int_compare:nNnTF { \use:c { __str_lookup_ #1 :N } #2 } = { 0 } { \@@_change_case_output:nw {#2} } { \@@_change_case_output:fw { \utex_char:D \use:c { __str_lookup_ #1 :N } #2 ~ } } } \cs_new_protected:Npn \@@_lookup_lower:N #1 { \tex_lccode:D `#1 } \cs_new_protected:Npn \@@_lookup_upper:N #1 { \tex_uccode:D `#1 } \cs_new_eq:NN \@@_lookup_fold:N \@@_lookup_lower:N } { \cs_new:Npn \@@_change_case_char_aux:nN #1#2 { \@@_change_case_output:nw {#2} } } % \end{macrocode} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % % \begin{variable} % { % \c_ampersand_str, % \c_atsign_str, % \c_backslash_str, % \c_left_brace_str, % \c_right_brace_str, % \c_circumflex_str, % \c_colon_str, % \c_dollar_str, % \c_hash_str, % \c_percent_str, % \c_tilde_str, % \c_underscore_str % } % For all of those strings, use \cs{cs_to_str:N} to get characters with % the correct category code without worries % \begin{macrocode} \str_const:Nx \c_ampersand_str { \cs_to_str:N \& } \str_const:Nx \c_atsign_str { \cs_to_str:N \@ } \str_const:Nx \c_backslash_str { \cs_to_str:N \\ } \str_const:Nx \c_left_brace_str { \cs_to_str:N \{ } \str_const:Nx \c_right_brace_str { \cs_to_str:N \} } \str_const:Nx \c_circumflex_str { \cs_to_str:N \^ } \str_const:Nx \c_colon_str { \cs_to_str:N \: } \str_const:Nx \c_dollar_str { \cs_to_str:N \$ } \str_const:Nx \c_hash_str { \cs_to_str:N \# } \str_const:Nx \c_percent_str { \cs_to_str:N \% } \str_const:Nx \c_tilde_str { \cs_to_str:N \~ } \str_const:Nx \c_underscore_str { \cs_to_str:N \_ } % \end{macrocode} % \end{variable} % % \begin{variable}{\l_tmpa_str, \l_tmpb_str, \g_tmpa_str, \g_tmpb_str} % Scratch strings. % \begin{macrocode} \str_new:N \l_tmpa_str \str_new:N \l_tmpb_str \str_new:N \g_tmpa_str \str_new:N \g_tmpb_str % \end{macrocode} % \end{variable} % % \subsection{Viewing strings} % % \begin{macro}{\str_show:n, \str_show:N, \str_show:c} % Displays a string on the terminal. % \begin{macrocode} \cs_new_eq:NN \str_show:n \tl_show:n \cs_new_eq:NN \str_show:N \tl_show:N \cs_generate_variant:Nn \str_show:N { c } % \end{macrocode} % \end{macro} % % \subsection{Unicode data for case changing} % % \begin{macrocode} %<@@=unicode> % \end{macrocode} % % Case changing both for strings and \enquote{text} requires data from % the Unicode Consortium. Some of this is build in to the format (as % \tn{lccode} and \tn{uccode} values) but this covers only the simple % one-to-one situations and does not fully handle for example case folding. % % The data required for cross-module manipulations is loaded here: currently % this means for |str| and |tl| functions. As such, the prefix used is not % |str| but rather |unicode|. For performance (as the entire data set must % be read during each run) and as this code comes somewhat early in the % load process, there is quite a bit of low-level code here. % % As only the data needs to remain at the end of this process, everything % is set up inside a group. % \begin{macrocode} \group_begin: % \end{macrocode} % A read stream is needed. The I/O module is not yet in place \emph{and} % we do not want to use up a stream. We therefore use a known free one in % format mode or look for the next free one in package mode (covers plain, % \LaTeXe{} and Con\TeX{}t MkII and MkIV). % \begin{macrocode} %<*initex> \tex_chardef:D \g_@@_data_ior = 0 \scan_stop: % %<*package> \tex_chardef:D \g_@@_data_ior \etex_numexpr:D \cs_if_exist:NTF \lastallocatedread { \lastallocatedread } { \cs_if_exist:NTF \c_syst_last_allocated_read { \c_syst_last_allocated_read } { \tex_count:D 16 ~ } } + 1 \scan_stop: % % \end{macrocode} % Set up to read each file. As they use C-style comments, there is a need to % deal with |#|. At the same time, spaces are important so they need to be % picked up as they are important. Beyond that, the current category code % scheme works fine. With no I/O loop available, hard-code one that will work % quickly. % \begin{macrocode} \cs_set_protected:Npn \@@_map_inline:n #1 { \group_begin: \tex_catcode:D `\# = 12 \scan_stop: \tex_catcode:D `\ = 10 \scan_stop: \tex_openin:D \g_@@_data_ior = #1 \scan_stop: \cs_if_exist:NT \utex_char:D { \@@_map_loop: } \tex_closein:D \g_@@_data_ior \group_end: } \cs_set_protected:Npn \@@_map_loop: { \tex_ifeof:D \g_@@_data_ior \exp_after:wN \use_none:n \else: \exp_after:wN \use:n \fi: { \tex_read:D \g_@@_data_ior to \l_@@_tmp_tl \if_meaning:w \c_empty_tl \l_@@_tmp_tl \else: \exp_after:wN \@@_parse:w \l_@@_tmp_tl \q_stop \fi: \@@_map_loop: } } % \end{macrocode} % The lead-off parser for each line is common for all of the files. If % the line starts with a |#| it's a comment. There's one special comment % line to look out for in \texttt{SpecialCasing.txt} as we want to ignore % everything after it. As this line does not appear in any other sources % and the test is quite quick (there are relatively few comment lines), it % can be present in all of the passes. % \begin{macrocode} \cs_set_protected:Npn \@@_parse:w #1#2 \q_stop { \reverse_if:N \if:w \c_hash_str #1 \@@_parse_auxi:w #1#2 \q_stop \else: \if_int_compare:w \__str_if_eq_x:nn { \exp_not:n {#2} } { ~Conditional~Mappings~ } = 0 \exp_stop_f: \cs_set_protected:Npn \@@_parse:w ##1 \q_stop { } \fi: \fi: } % \end{macrocode} % Storing each exception is always done in the same way: create a constant % token list which expands to exactly the mapping. These will have the % category codes \enquote{now} (so should be letters) but will be detokenized % for string use. % \begin{macrocode} \cs_set_protected:Npn \@@_store:nnnnn #1#2#3#4#5 { \tl_const:cx { c_@@_ #2 _ \utex_char:D "#1 _tl } { \utex_char:D "#3 ~ \utex_char:D "#4 ~ \tl_if_blank:nF {#5} { \utex_char:D "#5 } } } % \end{macrocode} % Parse the main Unicode data file for title case exceptions (the one-to-one % lower and upper case mappings it contains will all be covered by the \TeX{} % data). % \begin{macrocode} \cs_set_protected:Npn \@@_parse_auxi:w #1 ; #2 ; #3 ; #4 ; #5 ; #6 ; #7 ; #8 ; #9 ; { \@@_parse_auxii:w #1 ; } \cs_set_protected:Npn \@@_parse_auxii:w #1 ; #2 ; #3 ; #4 ; #5 ; #6 ; #7 \q_stop { \tl_if_blank:nF {#7} { \if_int_compare:w \__str_if_eq_x:nn { #5 ~ } {#7} = 0 \exp_stop_f: \else: \tl_const:cx { c_@@_title_ \utex_char:D "#1 _tl } { \utex_char:D "#7 } \fi: } } \@@_map_inline:n { UnicodeData.txt } % \end{macrocode} % The set up for case folding is in two parts. For the basic (core) mappings, % folding is the same as lower casing in most positions so only store % the differences. For the more complex foldings, always store the result, % splitting up the two or three code points in the input as required. % \begin{macrocode} \cs_set_protected:Npn \@@_parse_auxi:w #1 ;~ #2 ;~ #3 ; #4 \q_stop { \if_int_compare:w \__str_if_eq_x:nn {#2} { C } = 0 \exp_stop_f: \if_int_compare:w \tex_lccode:D "#1 = "#3 \scan_stop: \else: \tl_const:cx { c_@@_fold_ \utex_char:D "#1 _tl } { \utex_char:D "#3 ~ } \fi: \else: \if_int_compare:w \__str_if_eq_x:nn {#2} { F } = 0 \exp_stop_f: \@@_parse_auxii:w #1 ~ #3 ~ \q_stop \fi: \fi: } \cs_set_protected:Npn \@@_parse_auxii:w #1 ~ #2 ~ #3 ~ #4 \q_stop { \@@_store:nnnnn {#1} { fold } {#2} {#3} {#4} } \@@_map_inline:n { CaseFolding.txt } % \end{macrocode} % For upper and lower casing special situations, there is a bit more to % do as we also have title casing to consider. % \begin{macrocode} \cs_set_protected:Npn \@@_parse_auxi:w #1 ;~ #2 ;~ #3 ;~ #4 ; #5 \q_stop { \use:n { \@@_parse_auxii:w #1 ~ lower ~ #2 ~ } ~ \q_stop \use:n { \@@_parse_auxii:w #1 ~ upper ~ #4 ~ } ~ \q_stop \if_int_compare:w \__str_if_eq_x:nn {#3} {#4} = 0 \exp_stop_f: \else: \use:n { \@@_parse_auxii:w #1 ~ title ~ #3 ~ } ~ \q_stop \fi: } \cs_set_protected:Npn \@@_parse_auxii:w #1 ~ #2 ~ #3 ~ #4 ~ #5 \q_stop { \tl_if_empty:nF {#4} { \@@_store:nnnnn {#1} {#2} {#3} {#4} {#5} } } \@@_map_inline:n { SpecialCasing.txt } % \end{macrocode} % For the $8$-bit engines, the above does nothing but there is some set % up needed. There is no expandable character generator primitive so some % alternative is needed. As we've not used up hash space for the above, we % can go for the fast approach here of one name per letter. Keeping folding % and lower casing separate makes the use later a bit easier. % \begin{macrocode} \cs_if_exist:NF \utex_char:D { \cs_set_protected:Npn \@@_tmp:NN #1#2 { \if_meaning:w \q_recursion_tail #2 \exp_after:wN \use_none_delimit_by_q_recursion_stop:w \fi: \tl_const:cn { c_@@_fold_ #1 _tl } {#2} \tl_const:cn { c_@@_lower_ #1 _tl } {#2} \tl_const:cn { c_@@_upper_ #2 _tl } {#1} \@@_tmp:NN } \@@_tmp:NN AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZz ? \q_recursion_tail \q_recursion_stop } % \end{macrocode} % % All done: tidy up. % \begin{macrocode} \group_end: % \end{macrocode} % % \begin{macrocode} % % \end{macrocode} % % \end{implementation} % % \PrintIndex