% \iffalse meta-comment % %% File: l3candidates.dtx Copyright (C) 2012-2017 The LaTeX3 Project % % It may be distributed and/or modified under the conditions of the % LaTeX Project Public License (LPPL), either version 1.3c of this % license or (at your option) any later version. The latest version % of this license is in the file % % http://www.latex-project.org/lppl.txt % % This file is part of the "l3kernel bundle" (The Work in LPPL) % and all files in that bundle must be distributed together. % % ----------------------------------------------------------------------- % % The development version of the bundle can be found at % % https://github.com/latex3/latex3 % % for those people who are interested. % %<*driver> \documentclass[full]{l3doc} \begin{document} \DocInput{\jobname.dtx} \end{document} % % \fi % % \title{^^A % The \textsf{l3candidates} package\\ Experimental additions to % \pkg{l3kernel}^^A % } % % \author{^^A % The \LaTeX3 Project\thanks % {^^A % E-mail: % \href{mailto:latex-team@latex-project.org} % {latex-team@latex-project.org}^^A % }^^A % } % % \date{Released 2017/09/18} % % \maketitle % % \begin{documentation} % % \section{Important notice} % % This module provides a space in which functions can be added to % \pkg{l3kernel} (\pkg{expl3}) while still being experimental. % \begin{quote} % \bfseries % As such, the % functions here may not remain in their current form, or indeed at all, % in \pkg{l3kernel} in the future. % \end{quote} % In contrast to the material in % \pkg{l3experimental}, the functions here are all \emph{small} additions to % the kernel. We encourage programmers to test them out and report back on % the \texttt{LaTeX-L} mailing list. % % \medskip % % Thus, if you intend to use any of these functions from the candidate module in a public package % offered to others for productive use (e.g., being placed on CTAN) please consider the following points carefully: % \begin{itemize} % \item Be prepared that your public packages might require updating when such functions % are being finalized. % \item Consider informing us that you use a particular function in your public package, e.g., by % discussing this on the \texttt{LaTeX-L} % mailing list. This way it becomes easier to coordinate any updates necessary without issues % for the users of your package. % \item Discussing and understanding use cases for a particular addition or concept also helps to % ensure that we provide the right interfaces in the final version so please give us feedback % if you consider a certain candidate function useful (or not). % \end{itemize} % We only add functions in this space if we consider them being serious candidates for a final inclusion % into the kernel. However, real use sometimes leads to better ideas, so functions from this module are % \textbf{not necessarily stable} and we may have to adjust them! % % \section{Additions to \pkg{l3basics}} % % \begin{function}[added = 2017-07-16, updated = 2017-08-02]{\debug_on:n, \debug_off:n} % \begin{syntax} % \cs{debug_on:n} |{| \meta{comma-separated list} |}| % \cs{debug_off:n} |{| \meta{comma-separated list} |}| % \end{syntax} % Turn on and off within a group various debugging code, some of which % is also available as \pkg{expl3} load-time options. The items that % can be used in the \meta{list} are % \begin{itemize} % \item \texttt{check-declarations} that checks all \pkg{expl3} % variables used were previously declared; % \item \texttt{check-expressions} that checks integer, dimension, % skip, and muskip expressions are not terminated prematurely; % \item \texttt{deprecation} that makes soon-to-be-deprecated commands produce errors; % \item \texttt{log-functions} that logs function definitions; % \end{itemize} % Providing these as switches rather than options allows testing code % even if it relies on other packages: load all other packages, call % \cs{debug_on:n}, and load the code that one is interested in % testing. These functions can only be used in \LaTeXe{} package mode % loaded with \texttt{enable-debug} or another option implying it. % \end{function} % % \begin{function}[added = 2017-07-04]{\mode_leave_vertical:} % \begin{syntax} % \cs{mode_leave_vertical:} % \end{syntax} % Ensures that \TeX{} is not in vertical (inter-paragraph) mode. In % horizontal or math mode this command has no effect, in vertical mode it % switches to horizontal mode, and inserts a box of width % \tn{parindent}, followed by the \tn{everypar} token list. % \begin{texnote} % This results in the contents of the \tn{everypar} token register being % inserted, after \cs{mode_leave_vertical:} is complete. Notice that in % contrast to the \LaTeXe{} \tn{leavevmode} approach, no box is used % by the method implemented here. % \end{texnote} % \end{function} % % \section{Additions to \pkg{l3box}} % % \subsection{Viewing part of a box} % % \begin{function}{\box_clip:N, \box_clip:c} % \begin{syntax} % \cs{box_clip:N} \meta{box} % \end{syntax} % Clips the \meta{box} in the output so that only material inside the % bounding box is displayed in the output. The updated \meta{box} is an % hbox, irrespective of the nature of the \meta{box} before the clipping is % applied. The clipping applies within the current \TeX{} group level. % % \textbf{These functions require the \LaTeX3 native drivers: they do % not work with the \LaTeXe{} \pkg{graphics} drivers!} % % \begin{texnote} % Clipping is implemented by the driver, and as such the full content of % the box is placed in the output file. Thus clipping does not remove % any information from the raw output, and hidden material can therefore % be viewed by direct examination of the file. % \end{texnote} % \end{function} % % \begin{function}{\box_trim:Nnnnn, \box_trim:cnnnn} % \begin{syntax} % \cs{box_trim:Nnnnn} \meta{box} \Arg{left} \Arg{bottom} \Arg{right} \Arg{top} % \end{syntax} % Adjusts the bounding box of the \meta{box} \meta{left} is removed from % the left-hand edge of the bounding box, \meta{right} from the right-hand % edge and so fourth. All adjustments are \meta{dimension expressions}. % Material outside of the bounding box is still displayed in the output % unless \cs{box_clip:N} is subsequently applied. % The updated \meta{box} is an % hbox, irrespective of the nature of the \meta{box} before the trim % operation is applied. The adjustment applies within the current \TeX{} % group level. The behavior of the operation where the trims requested is % greater than the size of the box is undefined. % \end{function} % % \begin{function}{\box_viewport:Nnnnn, \box_viewport:cnnnn} % \begin{syntax} % \cs{box_viewport:Nnnnn} \meta{box} \Arg{llx} \Arg{lly} \Arg{urx} \Arg{ury} % \end{syntax} % Adjusts the bounding box of the \meta{box} such that it has lower-left % co-ordinates (\meta{llx}, \meta{lly}) and upper-right co-ordinates % (\meta{urx}, \meta{ury}). All four co-ordinate positions are % \meta{dimension expressions}. Material outside of the bounding box is % still displayed in the output unless \cs{box_clip:N} is % subsequently applied. % The updated \meta{box} is an % hbox, irrespective of the nature of the \meta{box} before the viewport % operation is applied. The adjustment applies within the current \TeX{} % group level. % \end{function} % % \section{Additions to \pkg{l3clist}} % % \begin{function}[EXP, added = 2016-12-06] % {\clist_rand_item:N, \clist_rand_item:n, \clist_rand_item:c} % \begin{syntax} % \cs{clist_rand_item:N} \meta{clist~var} % \cs{clist_rand_item:n} \Arg{comma list} % \end{syntax} % Selects a pseudo-random item of the \meta{comma list}. If the % \meta{comma list} has no item, the result is empty. This is only % available in \pdfTeX{} and \LuaTeX{}. % \begin{texnote} % The result is returned within the \tn{unexpanded} % primitive (\cs{exp_not:n}), which means that the \meta{item} % does not expand further when appearing in an \texttt{x}-type % argument expansion. % \end{texnote} % \end{function} % % \section{Additions to \pkg{l3coffins}} % % \begin{function}{\coffin_resize:Nnn, \coffin_resize:cnn} % \begin{syntax} % \cs{coffin_resize:Nnn} \meta{coffin} \Arg{width} \Arg{total-height} % \end{syntax} % Resized the \meta{coffin} to \meta{width} and \meta{total-height}, % both of which should be given as dimension expressions. % \end{function} % % \begin{function}{\coffin_rotate:Nn, \coffin_rotate:cn} % \begin{syntax} % \cs{coffin_rotate:Nn} \meta{coffin} \Arg{angle} % \end{syntax} % Rotates the \meta{coffin} by the given \meta{angle} (given in % degrees counter-clockwise). This process rotates both the % coffin content and poles. Multiple rotations do not result in % the bounding box of the coffin growing unnecessarily. % \end{function} % % \begin{function}{\coffin_scale:Nnn, \coffin_scale:cnn} % \begin{syntax} % \cs{coffin_scale:Nnn} \meta{coffin} \Arg{x-scale} \Arg{y-scale} % \end{syntax} % Scales the \meta{coffin} by a factors \meta{x-scale} and % \meta{y-scale} in the horizontal and vertical directions, % respectively. The two scale factors should be given as real numbers. % \end{function} % % \section{Additions to \pkg{l3file}} % % \begin{function}[added = 2017-07-11]{\file_get_mdfive_hash:nN} % \begin{syntax} % \cs{file_get_mdfive_hash:nN} \Arg{file name} \meta{str var} % \end{syntax} % Searches for \meta{file name} using the current \TeX{} search % path and the additional paths controlled by \cs{file_path_include:n}. % If found, sets the \meta{str var} to the MD5 sum generated from the % content of the file. % Where the file is not found, the \meta{str var} will be empty. % \end{function} % % \begin{function}[added = 2017-07-09]{\file_get_size:nN} % \begin{syntax} % \cs{file_get_size:nN} \Arg{file name} \meta{str var} % \end{syntax} % Searches for \meta{file name} using the current \TeX{} search % path and the additional paths controlled by \cs{file_path_include:n}. % If found, sets the \meta{str var} to the size of the file in bytes. % Where the file is not found, the \meta{str var} will be empty. % \begin{texnote} % The \XeTeX{} engine provides no way to implement this function. % \end{texnote} % \end{function} % % \begin{function}[added = 2017-07-09]{\file_get_timestamp:nN} % \begin{syntax} % \cs{file_get_timestamp:nN} \Arg{file name} \meta{str var} % \end{syntax} % Searches for \meta{file name} using the current \TeX{} search % path and the additional paths controlled by \cs{file_path_include:n}. % If found, sets the \meta{str var} to the modification timestamp of % the file in the form |D:|\meta{year}\meta{month}\meta{day}\meta{hour}^^A % \meta{minute}\meta{second}\meta{offset}, where the latter may be |Z| % (UTC) or \meta{plus-minus}\meta{hours}|'|\meta{minutes}|'|. % Where the file is not found, the \meta{str var} will be empty. % \begin{texnote} % The \XeTeX{} engine provides no way to implement this function. % \end{texnote} % \end{function} % % \begin{function}[added = 2014-07-02]{\file_if_exist_input:n, \file_if_exist_input:nF} % \begin{syntax} % \cs{file_if_exist_input:n} \Arg{file name} % \cs{file_if_exist_input:nF} \Arg{file name} \Arg{false code} % \end{syntax} % Searches for \meta{file name} using the current \TeX{} search % path and the additional paths controlled by % \cs{file_path_include:n}. If found then % reads in the file as additional \LaTeX{} source as described for % \cs{file_input:n}, otherwise inserts the \meta{false code}. % Note that these functions do not raise % an error if the file is not found, in contrast to \cs{file_input:n}. % \end{function} % % \begin{function}[added = 2017-07-07]{\file_input_stop:} % \begin{syntax} % \cs{file_input_stop:} % \end{syntax} % Ends the reading of a file started by \cs{file_input:n} or similar before % the end of the file is reached. Where the file reading is being terminated % due to an error, \cs{msg_critical:nn(nn)} should be preferred. % \begin{texnote} % This function must be used on a line on its own: \TeX{} reads files % line-by-line and so any additional tokens in the \enquote{current} line % will still be read. % % This is also true if the function is hidden inside another function % (which will be the normal case), i.e., all tokens on the same line % in the source file are still processed. Putting it on a line by itself % in the definition doesn't help as it is the line where it is used that % counts! % \end{texnote} % \end{function} % % \section{Additions to \pkg{l3int}} % % \begin{function}[EXP, added = 2016-12-06]{\int_rand:nn} % \begin{syntax} % \cs{int_rand:nn} \Arg{intexpr_1} \Arg{intexpr_2} % \end{syntax} % Evaluates the two \meta{integer expressions} and produces a % pseudo-random number between the two (with bounds included). This % is only available in \pdfTeX{} and \LuaTeX{}. % \end{function} % % \section{Additions to \pkg{l3msg}} % % In very rare cases it may be necessary to produce errors in an % expansion-only context. The functions in this section should only be % used if there is no alternative approach using \cs{msg_error:nnnnnn} % or other non-expandable commands from the previous section. Despite % having a similar interface as non-expandable messages, expandable % errors must be handled internally very differently from normal error % messages, as none of the tools to print to the terminal or the log % file are expandable. As a result, the message text and arguments are % not expanded, and messages must be very short (with default settings, % they are truncated after approximately 50 characters). It is % advisable to ensure that the message is understandable even when % truncated. Another particularity of expandable messages is that they % cannot be redirected or turned off by the user. % % \begin{function}[EXP, added = 2015-08-06] % { % \msg_expandable_error:nnnnnn , % \msg_expandable_error:nnnnn , % \msg_expandable_error:nnnn , % \msg_expandable_error:nnn , % \msg_expandable_error:nn , % \msg_expandable_error:nnffff , % \msg_expandable_error:nnfff , % \msg_expandable_error:nnff , % \msg_expandable_error:nnf , % } % \begin{syntax} % \cs{msg_expandable_error:nnnnnn} \Arg{module} \Arg{message} \Arg{arg one} \Arg{arg two} \Arg{arg three} \Arg{arg four} % \end{syntax} % Issues an \enquote{Undefined error} message from \TeX{} itself % using the undefined control sequence \cs{::error} then prints % \enquote{! \meta{module}: }\meta{error message}, which should be % short. With default settings, anything beyond approximately $60$ % characters long (or bytes in some engines) is cropped. A leading % space might be removed as well. % \end{function} % % \section{Additions to \pkg{l3prop}} % % \begin{function}[EXP]{\prop_count:N, \prop_count:c} % \begin{syntax} % \cs{prop_count:N} \meta{property list} % \end{syntax} % Leaves the number of key--value pairs in the \meta{property list} in % the input stream as an \meta{integer denotation}. % \end{function} % % \begin{function}[rEXP] % {\prop_map_tokens:Nn, \prop_map_tokens:cn} % \begin{syntax} % \cs{prop_map_tokens:Nn} \meta{property list} \Arg{code} % \end{syntax} % Analogue of \cs{prop_map_function:NN} which maps several tokens % instead of a single function. The \meta{code} receives each % key--value pair in the \meta{property list} as two trailing brace % groups. For instance, % \begin{verbatim} % \prop_map_tokens:Nn \l_my_prop { \str_if_eq:nnT { mykey } } % \end{verbatim} % expands to the value corresponding to \texttt{mykey}: for each % pair in |\l_my_prop| the function \cs{str_if_eq:nnT} receives % \texttt{mykey}, the \meta{key} and the \meta{value} as its three % arguments. For that specific task, \cs{prop_item:Nn} is faster. % \end{function} % % \begin{function}[EXP, added = 2016-12-06] % {\prop_rand_key_value:N, \prop_rand_key_value:c} % \begin{syntax} % \cs{prop_rand_key_value:N} \meta{prop~var} % \end{syntax} % Selects a pseudo-random key--value pair in the \meta{property list} % and returns \Arg{key}\Arg{value}. If the \meta{property list} is % empty the result is empty. This is only available in \pdfTeX{} and % \LuaTeX{}. % \begin{texnote} % The result is returned within the \tn{unexpanded} % primitive (\cs{exp_not:n}), which means that the \meta{value} % does not expand further when appearing in an \texttt{x}-type % argument expansion. % \end{texnote} % \end{function} % % \section{Additions to \pkg{l3seq}} % % \begin{function}[rEXP] % { % \seq_mapthread_function:NNN, \seq_mapthread_function:NcN, % \seq_mapthread_function:cNN, \seq_mapthread_function:ccN % } % \begin{syntax} % \cs{seq_mapthread_function:NNN} \meta{seq_1} \meta{seq_2} \meta{function} % \end{syntax} % Applies \meta{function} to every pair of items % \meta{seq_1-item}--\meta{seq_2-item} from the two sequences, returning % items from both sequences from left to right. The \meta{function} % receives two \texttt{n}-type arguments for each iteration. The mapping % terminates when % the end of either sequence is reached (\emph{i.e.}~whichever sequence has % fewer items determines how many iterations % occur). % \end{function} % % \begin{function}{\seq_set_filter:NNn, \seq_gset_filter:NNn} % \begin{syntax} % \cs{seq_set_filter:NNn} \meta{sequence_1} \meta{sequence_2} \Arg{inline boolexpr} % \end{syntax} % Evaluates the \meta{inline boolexpr} for every \meta{item} stored % within the \meta{sequence_2}. The \meta{inline boolexpr} % receives the \meta{item} as |#1|. The sequence of all \meta{items} % for which the \meta{inline boolexpr} evaluated to \texttt{true} % is assigned to \meta{sequence_1}. % \begin{texnote} % Contrarily to other mapping functions, \cs{seq_map_break:} cannot % be used in this function, and would lead to low-level \TeX{} errors. % \end{texnote} % \end{function} % % \begin{function}[added = 2011-12-22] % {\seq_set_map:NNn, \seq_gset_map:NNn} % \begin{syntax} % \cs{seq_set_map:NNn} \meta{sequence_1} \meta{sequence_2} \Arg{inline function} % \end{syntax} % Applies \meta{inline function} to every \meta{item} stored % within the \meta{sequence_2}. The \meta{inline function} should % consist of code which will receive the \meta{item} as |#1|. % The sequence resulting from \texttt{x}-expanding % \meta{inline function} applied to each \meta{item} % is assigned to \meta{sequence_1}. As such, the code % in \meta{inline function} should be expandable. % \begin{texnote} % Contrarily to other mapping functions, \cs{seq_map_break:} cannot % be used in this function, and would lead to low-level \TeX{} errors. % \end{texnote} % \end{function} % % \begin{function}[EXP, added = 2016-12-06]{\seq_rand_item:N, \seq_rand_item:c} % \begin{syntax} % \cs{seq_rand_item:N} \meta{seq~var} % \end{syntax} % Selects a pseudo-random item of the \meta{sequence}. If the % \meta{sequence} is empty the result is empty. This is only % available in \pdfTeX{} and \LuaTeX{}. % \begin{texnote} % The result is returned within the \tn{unexpanded} % primitive (\cs{exp_not:n}), which means that the \meta{item} % does not expand further when appearing in an \texttt{x}-type % argument expansion. % \end{texnote} % \end{function} % % \section{Additions to \pkg{l3skip}} % % \begin{function}{\skip_split_finite_else_action:nnNN} % \begin{syntax} % \cs{skip_split_finite_else_action:nnNN} \Arg{skipexpr} \Arg{action} % ~~\meta{dimen_1} \meta{dimen_2} % \end{syntax} % Checks if the \meta{skipexpr} contains finite glue. If it does then it % assigns % \meta{dimen_1} the stretch component and \meta{dimen_2} the shrink % component. If % it contains infinite glue set \meta{dimen_1} and \meta{dimen_2} to $0$\,pt % and place |#2| into the input stream: this is usually an error or % warning message of some sort. % \end{function} % % \section{Additions to \pkg{l3sys}} % % \begin{function}[added = 2017-05-27, EXP, pTF]{\sys_if_rand_exist:} % \begin{syntax} % \cs{sys_if_rand_exist_p:} % \cs{sys_if_rand_exist:TF} \Arg{true code} \Arg{false code} % \end{syntax} % Tests if the engine has a pseudo-random number generator. Currently % this is the case in \pdfTeX{} and \LuaTeX{}. % \end{function} % % \begin{function}[added = 2017-05-27, EXP]{\sys_rand_seed:} % \begin{syntax} % \cs{sys_rand_seed:} % \end{syntax} % Expands to the current value of the engine's random seed, a % non-negative integer. In engines without random number support this % expands to $0$. % \end{function} % % \begin{function}[added = 2017-05-27]{\sys_gset_rand_seed:n} % \begin{syntax} % \cs{sys_gset_rand_seed:n} \Arg{intexpr} % \end{syntax} % Sets the seed for the engine's pseudo-random number generator to the % \meta{integer expression}. The assignment is global. This random % seed affects all \cs[no-index]{\ldots{}_rand} functions (such as % \cs{int_rand:nn} or \cs{clist_rand_item:n}) as well as other % packages relying on the engine's random number generator. Currently % only the absolute value of the seed is used. In engines without % random number support this produces an error. % \end{function} % % \begin{variable}[added = 2017-05-27]{\c_sys_shell_escape_int} % This variable exposes the internal triple of the shell escape % status. The possible values are % \begin{description} % \item[0] Shell escape is disabled % \item[1] Unrestricted shell escape is enabled % \item[2] Restricted shell escape is enabled % \end{description} % \end{variable} % % \begin{function}[added = 2017-05-27, EXP, pTF]{\sys_if_shell:} % \begin{syntax} % \cs{sys_if_shell_p:} % \cs{sys_if_shell:TF} \Arg{true code} \Arg{false code} % \end{syntax} % Performs a check for whether shell escape is enabled. This % returns true if either of restricted or unrestricted shell escape % is enabled. % \end{function} % % \begin{function}[added = 2017-05-27, EXP, pTF]{\sys_if_shell_unrestricted:} % \begin{syntax} % \cs{sys_if_shell_unrestricted_p:} % \cs{sys_if_shell_unrestricted:TF} \Arg{true code} \Arg{false code} % \end{syntax} % Performs a check for whether \emph{unrestricted} shell escape is % enabled. % \end{function} % % \begin{function}[added = 2017-05-27, EXP, pTF]{\sys_if_shell_restricted:} % \begin{syntax} % \cs{sys_if_shell_restricted_p:} % \cs{sys_if_shell_restricted:TF} \Arg{true code} \Arg{false code} % \end{syntax} % Performs a check for whether \emph{restricted} shell escape is % enabled. This returns false if unrestricted shell escape is % enabled. Unrestricted shell escape is not considered a superset % of restricted shell escape in this case. To find whether any % shell escape is enabled use \cs{sys_if_shell:}. % \end{function} % % \begin{function}[added = 2017-05-27]{\sys_shell_now:n, \sys_shell_now:x} % \begin{syntax} % \cs{sys_shell_now:n} \Arg{tokens} % \end{syntax} % Execute \meta{tokens} through shell escape immediately. % \end{function} % % \begin{function}[added = 2017-05-27]{\sys_shell_shipout:n, \sys_shell_shipout:x} % \begin{syntax} % \cs{sys_shell_shipout:n} \Arg{tokens} % \end{syntax} % Execute \meta{tokens} through shell escape at shipout. % \end{function} % % \section{Additions to \pkg{l3tl}} % % \begin{function}[EXP,pTF]{\tl_if_single_token:n} % \begin{syntax} % \cs{tl_if_single_token_p:n} \Arg{token list} % \cs{tl_if_single_token:nTF} \Arg{token list} \Arg{true code} \Arg{false code} % \end{syntax} % Tests if the token list consists of exactly one token, \emph{i.e.}~is % either a single space character or a single \enquote{normal} token. % Token groups (|{|\ldots|}|) are not single tokens. % \end{function} % % \begin{function}[EXP]{\tl_reverse_tokens:n} % \begin{syntax} % \cs{tl_reverse_tokens:n} \Arg{tokens} % \end{syntax} % This function, which works directly on \TeX{} tokens, reverses % the order of the \meta{tokens}: the first becomes the last and % the last becomes first. Spaces are preserved. The reversal % also operates within brace groups, but the braces themselves % are not exchanged, as this would lead to an unbalanced token % list. For instance, \cs{tl_reverse_tokens:n} |{a~{b()}}| % leaves |{)(b}~a| in the input stream. This function requires % two steps of expansion. % \begin{texnote} % The result is returned within the \tn{unexpanded} % primitive (\cs{exp_not:n}), which means that the token % list does not expand further when appearing in an \texttt{x}-type % argument expansion. % \end{texnote} % \end{function} % % \begin{function}[EXP]{\tl_count_tokens:n} % \begin{syntax} % \cs{tl_count_tokens:n} \Arg{tokens} % \end{syntax} % Counts the number of \TeX{} tokens in the \meta{tokens} and leaves % this information in the input stream. Every token, including spaces and % braces, contributes one to the total; thus for instance, the token count of % |a~{bc}| is $6$. % This function requires three expansions, % giving an \meta{integer denotation}. % \end{function} % % \begin{function}[EXP, added = 2014-06-30, updated = 2016-01-12] % { % \tl_lower_case:n, \tl_upper_case:n, \tl_mixed_case:n, % \tl_lower_case:nn, \tl_upper_case:nn, \tl_mixed_case:nn % } % \begin{syntax} % \cs{tl_upper_case:n} \Arg{tokens} % \cs{tl_upper_case:nn} \Arg{language} \Arg{tokens} % \end{syntax} % These functions are intended to be applied to input which may be % regarded broadly as \enquote{text}. They traverse the \meta{tokens} and % change the case of characters as discussed below. The character code of % the characters replaced may be arbitrary: the replacement characters % have standard document-level category codes ($11$ for letters, $12$ for % letter-like characters which can also be case-changed). Begin-group and % end-group characters in the \meta{tokens} are normalized and become |{| % and |}|, respectively. % % Importantly, notice that these functions are intended for working with % user text for typesetting. For case changing programmatic data see the % \pkg{l3str} module and discussion there of \cs{str_lower_case:n}, % \cs{str_upper_case:n} and \cs{str_fold_case:n}. % \end{function} % % The functions perform expansion on the input in most cases. In particular, % input in the form of token lists or expandable functions is expanded % \emph{unless} it falls within one of the special handling classes described % below. This expansion approach means that in general the result of case % changing matches the \enquote{natural} outcome expected from a % \enquote{functional} approach to case modification. For example % \begin{verbatim} % \tl_set:Nn \l_tmpa_tl { hello } % \tl_upper_case:n { \l_tmpa_tl \c_space_tl world } % \end{verbatim} % produces % \begin{verbatim} % HELLO WORLD % \end{verbatim} % The expansion approach taken means that in package mode any \LaTeXe{} % \enquote{robust} commands which may appear in the input should be converted % to engine-protected versions using for example the \tn{robustify} command % from the \pkg{etoolbox} package. % % \begin{variable}{\l_tl_case_change_math_tl} % Case changing does not take place within math mode material so for example % \begin{verbatim} % \tl_upper_case:n { Some~text~$y = mx + c$~with~{Braces} } % \end{verbatim} % becomes % \begin{verbatim} % SOME TEXT $y = mx + c$ WITH {BRACES} % \end{verbatim} % Material inside math mode is left entirely unchanged: in particular, no % expansion is undertaken. % % Detection of math mode is controlled by the list of tokens in % \cs{l_tl_case_change_math_tl}, which should be in open--close pairs. In % package mode the standard settings is % \begin{verbatim} % $ $ \( \) % \end{verbatim} % % Note that while expansion occurs when searching the text it does not % apply to math mode material (which should be unaffected by case changing). % As such, whilst the opening token for math mode may be \enquote{hidden} % inside a command/macro, the closing one cannot be as this is being % searched for in math mode. Typically, in the types of \enquote{text} % the case changing functions are intended to apply to this should not be % an issue. % \end{variable} % % \begin{variable}{\l_tl_case_change_exclude_tl} % Case changing can be prevented by using any command on the list % \cs{l_tl_case_change_exclude_tl}. Each entry should be a function % to be followed by one argument: the latter will be preserved as-is % with no expansion. Thus for example following % \begin{verbatim} % \tl_put_right:Nn \l_tl_case_change_exclude_tl { \NoChangeCase } % \end{verbatim} % the input % \begin{verbatim} % \tl_upper_case:n % { Some~text~$y = mx + c$~with~\NoChangeCase {Protection} } % \end{verbatim} % will result in % \begin{verbatim} % SOME TEXT $y = mx + c$ WITH \NoChangeCase {Protection} % \end{verbatim} % Notice that the case changing mapping preserves the inclusion of % the escape functions: it is left to other code to provide suitable % definitions (typically equivalent to \cs{use:n}). In particular, the % result of case changing is returned protected by \cs{exp_not:n}. % % When used with \LaTeXe{} the commands |\cite|, |\ensuremath|, |\label| % and |\ref| are automatically included in the list for exclusion from % case changing. % \end{variable} % % \begin{variable}{\l_tl_case_change_accents_tl} % This list specifies accent commands which should be left unexpanded % in the output. This allows for example % \begin{verbatim} % \tl_upper_case:n { \" { a } } % \end{verbatim} % to yield % \begin{verbatim} % \" { A } % \end{verbatim} % irrespective of the expandability of |\"|. % % The standard contents of this variable is |\"|, |\'|, |\.|, |\^|, |\`|, % |\~|, |\c|, |\H|, |\k|, |\r|, |\t|, |\u| and |\v|. % \end{variable} % % \enquote{Mixed} case conversion may be regarded informally as converting the % first character of the \meta{tokens} to upper case and the rest to lower % case. However, the process is more complex than this as there are some % situations where a single lower case character maps to a special form, for % example \texttt{ij} in Dutch which becomes \texttt{IJ}. As such, % \cs[index=tl_mixed_case:n]{tl_mixed_case:n(n)} % implement a more sophisticated mapping which accounts % for this and for modifying accents on the first letter. Spaces at the start % of the \meta{tokens} are ignored when finding the first \enquote{letter} for % conversion. % \begin{verbatim} % \tl_mixed_case:n { hello~WORLD } % => "Hello world" % \tl_mixed_case:n { ~hello~WORLD } % => " Hello world" % \tl_mixed_case:n { {hello}~WORLD } % => "{Hello} world" % \end{verbatim} % When finding the first \enquote{letter} for this process, any content in % math mode or covered by \cs{l_tl_case_change_exclude_tl} is ignored. % % (Note that the Unicode Consortium describe this as \enquote{title case}, but % that in English title case applies on a word-by-word basis. The % \enquote{mixed} case implemented here is a lower level concept needed for % both \enquote{title} and \enquote{sentence} casing of text.) % % \begin{variable}{\l_tl_mixed_case_ignore_tl} % The list of characters to ignore when searching for the first % \enquote{letter} in mixed-casing is determined by % \cs{l_tl_mixed_change_ignore_tl}. This has the standard setting % \begin{verbatim} % ( [ { ` - % \end{verbatim} % where comparisons are made on a character basis. % \end{variable} % % As is generally true for \pkg{expl3}, these functions are designed to % work with Unicode input only. As such, UTF-8 input is assumed for % \emph{all} engines. When used with \XeTeX{} or \LuaTeX{} a full range of % Unicode transformations are enabled. Specifically, the standard mappings % here follow those defined by the \href{http://www.unicode.org}^^A % {Unicode Consortium} in \texttt{UnicodeData.txt} and % \texttt{SpecialCasing.txt}. In the case of $8$-bit engines, mappings % are provided for characters which can be represented in output typeset % using the |T1| font encoding. Thus for example |รค| can be case-changed % using \pdfTeX{}. For \pTeX{} only the ASCII range is covered as the % engine treats input outside of this range as east Asian. % % Context-sensitive mappings are enabled: language-dependent cases are % discussed below. Context detection expands input but treats any % unexpandable control sequences as \enquote{failures} to match a context. % % Language-sensitive conversions are enabled using the \meta{language} % argument, and follow Unicode Consortium guidelines. Currently, the % languages recognised for special handling are as follows. % \begin{itemize} % \item Azeri and Turkish (\texttt{az} and \texttt{tr}). % The case pairs I/i-dotless and I-dot/i are activated for these % languages. The combining dot mark is removed when lower % casing I-dot and introduced when upper casing i-dotless. % \item German (\texttt{de-alt}). % An alternative mapping for German in which the lower case % \emph{Eszett} maps to a \emph{gro\ss{}es Eszett}. % \item Lithuanian (\texttt{lt}). % The lower case letters i and j should retain a dot above when the % accents grave, acute or tilde are present. This is implemented for % lower casing of the relevant upper case letters both when input as % single Unicode codepoints and when using combining accents. The % combining dot is removed when upper casing in these cases. Note that % \emph{only} the accents used in Lithuanian are covered: the behaviour % of other accents are not modified. % \item Dutch (\texttt{nl}). % Capitalisation of \texttt{ij} at the beginning of mixed cased % input produces \texttt{IJ} rather than \texttt{Ij}. The output % retains two separate letters, thus this transformation \emph{is} % available using \pdfTeX{}. % \end{itemize} % % Creating additional context-sensitive mappings requires knowledge % of the underlying mapping implementation used here. The team are happy % to add these to the kernel where they are well-documented % (\emph{e.g.}~in Unicode Consortium or relevant government publications). % % \begin{function}[added = 2014-06-25] % { % \tl_set_from_file:Nnn, \tl_set_from_file:cnn, % \tl_gset_from_file:Nnn, \tl_gset_from_file:cnn % } % \begin{syntax} % \cs{tl_set_from_file:Nnn} \meta{tl} \Arg{setup} \Arg{filename} % \end{syntax} % Defines \meta{tl} to the contents of \meta{filename}. % Category codes may need to be set appropriately via the \meta{setup} % argument. % \end{function} % % \begin{function}[added = 2014-06-25] % { % \tl_set_from_file_x:Nnn, \tl_set_from_file_x:cnn, % \tl_gset_from_file_x:Nnn, \tl_gset_from_file_x:cnn % } % \begin{syntax} % \cs{tl_set_from_file_x:Nnn} \meta{tl} \Arg{setup} \Arg{filename} % \end{syntax} % Defines \meta{tl} to the contents of \meta{filename}, expanding % the contents of the file as it is read. Category codes and other % definitions may need to be set appropriately via the \meta{setup} % argument. % \end{function} % % \begin{function}[EXP, added = 2016-12-06] % {\tl_rand_item:N, \tl_rand_item:c, \tl_rand_item:n} % \begin{syntax} % \cs{tl_rand_item:N} \meta{tl~var} % \cs{tl_rand_item:n} \Arg{token list} % \end{syntax} % Selects a pseudo-random item of the \meta{token list}. If the % \meta{token list} is blank, the result is empty. This is only % available in \pdfTeX{} and \LuaTeX{}. % \begin{texnote} % The result is returned within the \tn{unexpanded} % primitive (\cs{exp_not:n}), which means that the \meta{item} % does not expand further when appearing in an \texttt{x}-type % argument expansion. % \end{texnote} % \end{function} % % \begin{function}[EXP, added = 2017-02-17, updated = 2017-07-15]{\tl_range:nnn} % \begin{syntax} % \cs{tl_range:Nnn} \meta{tl~var} \Arg{start index} \Arg{end index} % \cs{tl_range:nnn} \Arg{token list} \Arg{start index} \Arg{end index} % \end{syntax} % Leaves in the input stream the items from the \meta{start index} to the % \meta{end index} inclusive. Spaces and braces are preserved between % the items returned (but never at either end of the list). Positive % \meta{indices} are counted % from the start of the \meta{token list}, $1$~being the first item, and % negative \meta{indices} are counted from the end of the token list, % $-1$~being the last item. If either of \meta{start index} or % \meta{end index} is~$0$, the result is empty. For instance, % \begin{verbatim} % \iow_term:x { \tl_range:nnn { abcd~{e{}}f } { 2 } { 5 } } % \iow_term:x { \tl_range:nnn { abcd~{e{}}f } { -4 } { -1 } } % \iow_term:x { \tl_range:nnn { abcd~{e{}}f } { -2 } { -1 } } % \iow_term:x { \tl_range:nnn { abcd~{e{}}f } { 0 } { -1 } } % \end{verbatim} % prints \verb*|bcd {e{}}|, \verb*|cd {e{}}f|, \verb*|{e{}}f| and an empty % line to the terminal. The \meta{start index} must always be smaller than % or equal to the \meta{end index}: if this is not the case then no output % is generated. Thus % \begin{verbatim} % \iow_term:x { \tl_range:nnn { abcd~{e{}}f } { 5 } { 2 } } % \iow_term:x { \tl_range:nnn { abcd~{e{}}f } { -1 } { -4 } } % \end{verbatim} % both yield empty token lists. For improved performance, see % \cs{tl_range_braced:nnn} and \cs{tl_range_unbraced:nnn}. % \begin{texnote} % The result is returned within the \tn{unexpanded} % primitive (\cs{exp_not:n}), which means that the \meta{item} % does not expand further when appearing in an \texttt{x}-type % argument expansion. % \end{texnote} % \end{function} % % \begin{function}[EXP, added = 2017-07-15] % { % \tl_range_braced:Nnn, \tl_range_braced:cnn, \tl_range_braced:nnn, % \tl_range_unbraced:Nnn, \tl_range_unbraced:cnn, \tl_range_unbraced:nnn % } % \begin{syntax} % \cs{tl_range_braced:Nnn} \meta{tl~var} \Arg{start index} \Arg{end index} % \cs{tl_range_braced:nnn} \Arg{token list} \Arg{start index} \Arg{end index} % \cs{tl_range_unbraced:Nnn} \meta{tl~var} \Arg{start index} \Arg{end index} % \cs{tl_range_unbraced:nnn} \Arg{token list} \Arg{start index} \Arg{end index} % \end{syntax} % Leaves in the input stream the items from the \meta{start index} to % the \meta{end index} inclusive, using the same indexing as % \cs{tl_range:nnn}. Spaces are ignored. Regardless of whether items % appear with or without braces in the \meta{token list}, the % \cs{tl_range_braced:nnn} function wraps each item in braces, while % \cs{tl_range_unbraced:nnn} does not (overall it removes an outer set % of braces). For instance, % \begin{verbatim} % \iow_term:x { \tl_range_braced:nnn { abcd~{e{}}f } { 2 } { 5 } } % \iow_term:x { \tl_range_braced:nnn { abcd~{e{}}f } { -4 } { -1 } } % \iow_term:x { \tl_range_braced:nnn { abcd~{e{}}f } { -2 } { -1 } } % \iow_term:x { \tl_range_braced:nnn { abcd~{e{}}f } { 0 } { -1 } } % \end{verbatim} % prints \verb*|{b}{c}{d}{e{}}|, \verb*|{c}{d}{e{}}{f}|, \verb*|{e{}}{f}|, and an empty % line to the terminal, while % \begin{verbatim} % \iow_term:x { \tl_range_unbraced:nnn { abcd~{e{}}f } { 2 } { 5 } } % \iow_term:x { \tl_range_unbraced:nnn { abcd~{e{}}f } { -4 } { -1 } } % \iow_term:x { \tl_range_unbraced:nnn { abcd~{e{}}f } { -2 } { -1 } } % \iow_term:x { \tl_range_unbraced:nnn { abcd~{e{}}f } { 0 } { -1 } } % \end{verbatim} % prints \verb*|bcde{}|, \verb*|cde{}f|, \verb*|e{}f|, and an empty % line to the terminal. Because braces are removed, the result of % \cs{tl_range_unbraced:nnn} may have a different number of items as % for \cs{tl_range:nnn} or \cs{tl_range_braced:nnn}. In cases where % preserving spaces is important, consider the slower function % \cs{tl_range:nnn}. % \begin{texnote} % The result is returned within the \tn{unexpanded} % primitive (\cs{exp_not:n}), which means that the \meta{item} % does not expand further when appearing in an \texttt{x}-type % argument expansion. % \end{texnote} % \end{function} % % \section{Additions to \pkg{l3token}} % % \begin{variable}[added = 2017-08-07]{\c_catcode_active_space_tl} % Token list containing one character with category code $13$, % (\enquote{active}), and character code $32$ (space). % \end{variable} % % \begin{function}[TF, updated = 2012-12-20]{\peek_N_type:} % \begin{syntax} % \cs{peek_N_type:TF} \Arg{true code} \Arg{false code} % \end{syntax} % Tests if the next \meta{token} in the input stream can be safely % grabbed as an \texttt{N}-type argument. The test is \meta{false} % if the next \meta{token} is either an explicit or implicit % begin-group or end-group token (with any character code), or % an explicit or implicit space character (with character code $32$ % and category code $10$), or an outer token (never used in \LaTeX3) % and \meta{true} in all other cases. % Note that a \meta{true} result ensures that the next \meta{token} is % a valid \texttt{N}-type argument. However, if the next \meta{token} % is for instance \cs{c_space_token}, the test takes the % \meta{false} branch, even though the next \meta{token} is in fact % a valid \texttt{N}-type argument. The \meta{token} is left % in the input stream after the \meta{true code} or \meta{false code} % (as appropriate to the result of the test). % \end{function} % % \end{documentation} % % \begin{implementation} % % \section{\pkg{l3candidates} Implementation} % % \begin{macrocode} %<*initex|package> % \end{macrocode} % % \subsection{Additions to \pkg{l3basics}} % % \begin{macro}{\mode_leave_vertical:} % The approach here is different to that used by \LaTeXe{} or plain \TeX{}, % which unbox a void box to force horizontal mode. That inserts the % \tn{everypar} tokens \emph{before} the re-inserted unboxing tokens. The % approach here uses either the \tn{quitvmode} primitive or the equivalent % protected macro. In vertical mode, the \tn{indent} primitive is inserted: % this will switch to horizontal mode and insert \tn{everypar} tokens and % nothing else. Unlike the \LaTeXe{} version, the availability of \eTeX{} % means using a mode test can be done at for example the start of an % \tn{halign}. The \tn{quitvmode} primitive essentially wraps the same % code up at the engine level. % \begin{macrocode} \cs_new_protected:Npx \mode_leave_vertical: { \cs_if_exist:NTF \pdftex_quitvmode:D { \pdftex_quitvmode:D } { \exp_not:n { \if_mode_vertical: \exp_after:wN \tex_indent:D \fi: } } } % \end{macrocode} % \end{macro} % % \subsection{Additions to \pkg{l3box}} % % \begin{macrocode} %<@@=box> % \end{macrocode} % % \subsection{Viewing part of a box} % % \begin{macro}{\box_clip:N, \box_clip:c} % A wrapper around the driver-dependent code. % \begin{macrocode} \cs_new_protected:Npn \box_clip:N #1 { \hbox_set:Nn #1 { \__driver_box_use_clip:N #1 } } \cs_generate_variant:Nn \box_clip:N { c } % \end{macrocode} % \end{macro} % % \begin{macro}{\box_trim:Nnnnn, \box_trim:cnnnn} % Trimming from the left- and right-hand edges of the box is easy: kern the % appropriate parts off each side. % \begin{macrocode} \__debug_patch_args:nNNpn { {#1} { (#2) } {#3} { (#4) } {#5} } \cs_new_protected:Npn \box_trim:Nnnnn #1#2#3#4#5 { \hbox_set:Nn \l_@@_internal_box { \tex_kern:D -\__dim_eval:w #2 \__dim_eval_end: \box_use:N #1 \tex_kern:D -\__dim_eval:w #4 \__dim_eval_end: } % \end{macrocode} % For the height and depth, there is a need to watch the baseline is % respected. Material always has to stay on the correct side, so trimming % has to check that there is enough material to trim. First, the bottom % edge. If there is enough depth, simply set the depth, or if not move % down so the result is zero depth. \cs{box_move_down:nn} is used in both % cases so the resulting box always contains a \tn{lower} primitive. % The internal box is used here as it allows safe use of \cs{box_set_dp:Nn}. % \begin{macrocode} \dim_compare:nNnTF { \box_dp:N #1 } > {#3} { \hbox_set:Nn \l_@@_internal_box { \box_move_down:nn \c_zero_dim { \box_use:N \l_@@_internal_box } } \box_set_dp:Nn \l_@@_internal_box { \box_dp:N #1 - (#3) } } { \hbox_set:Nn \l_@@_internal_box { \box_move_down:nn { (#3) - \box_dp:N #1 } { \box_use:N \l_@@_internal_box } } \box_set_dp:Nn \l_@@_internal_box \c_zero_dim } % \end{macrocode} % Same thing, this time from the top of the box. % \begin{macrocode} \dim_compare:nNnTF { \box_ht:N \l_@@_internal_box } > {#5} { \hbox_set:Nn \l_@@_internal_box { \box_move_up:nn \c_zero_dim { \box_use:N \l_@@_internal_box } } \box_set_ht:Nn \l_@@_internal_box { \box_ht:N \l_@@_internal_box - (#5) } } { \hbox_set:Nn \l_@@_internal_box { \box_move_up:nn { (#5) - \box_ht:N \l_@@_internal_box } { \box_use:N \l_@@_internal_box } } \box_set_ht:Nn \l_@@_internal_box \c_zero_dim } \box_set_eq:NN #1 \l_@@_internal_box } \cs_generate_variant:Nn \box_trim:Nnnnn { c } % \end{macrocode} % \end{macro} % % \begin{macro}{\box_viewport:Nnnnn, \box_viewport:cnnnn} % The same general logic as for the trim operation, but with absolute % dimensions. As a result, there are some things to watch out for in the % vertical direction. % \begin{macrocode} \__debug_patch_args:nNNpn { {#1} { (#2) } {#3} { (#4) } {#5} } \cs_new_protected:Npn \box_viewport:Nnnnn #1#2#3#4#5 { \hbox_set:Nn \l_@@_internal_box { \tex_kern:D -\__dim_eval:w #2 \__dim_eval_end: \box_use:N #1 \tex_kern:D \__dim_eval:w #4 - \box_wd:N #1 \__dim_eval_end: } \dim_compare:nNnTF {#3} < \c_zero_dim { \hbox_set:Nn \l_@@_internal_box { \box_move_down:nn \c_zero_dim { \box_use:N \l_@@_internal_box } } \box_set_dp:Nn \l_@@_internal_box { -\dim_eval:n {#3} } } { \hbox_set:Nn \l_@@_internal_box { \box_move_down:nn {#3} { \box_use:N \l_@@_internal_box } } \box_set_dp:Nn \l_@@_internal_box \c_zero_dim } \dim_compare:nNnTF {#5} > \c_zero_dim { \hbox_set:Nn \l_@@_internal_box { \box_move_up:nn \c_zero_dim { \box_use:N \l_@@_internal_box } } \box_set_ht:Nn \l_@@_internal_box { (#5) \dim_compare:nNnT {#3} > \c_zero_dim { - (#3) } } } { \hbox_set:Nn \l_@@_internal_box { \box_move_up:nn { -\dim_eval:n {#5} } { \box_use:N \l_@@_internal_box } } \box_set_ht:Nn \l_@@_internal_box \c_zero_dim } \box_set_eq:NN #1 \l_@@_internal_box } \cs_generate_variant:Nn \box_viewport:Nnnnn { c } % \end{macrocode} % \end{macro} % % \subsection{Additions to \pkg{l3clist}} % % \begin{macrocode} %<@@=clist> % \end{macrocode} % % \begin{macro}{\clist_rand_item:n, \clist_rand_item:N, \clist_rand_item:c} % \begin{macro}[aux]{\@@_rand_item:nn} % The |N|-type function is not implemented through the |n|-type % function for efficiency: for instance comma-list variables do not % require space-trimming of their items. Even testing for emptyness % of an |n|-type comma-list is slow, so we count items first and use % that both for the emptyness test and the pseudo-random integer. % Importantly, \cs{clist_item:Nn} and \cs{clist_item:nn} only evaluate % their argument once. % \begin{macrocode} \cs_new:Npn \clist_rand_item:n #1 { \exp_args:Nf \@@_rand_item:nn { \clist_count:n {#1} } {#1} } \cs_new:Npn \@@_rand_item:nn #1#2 { \int_compare:nNnF {#1} = 0 { \clist_item:nn {#2} { \int_rand:nn { 1 } {#1} } } } \cs_new:Npn \clist_rand_item:N #1 { \clist_if_empty:NF #1 { \clist_item:Nn #1 { \int_rand:nn { 1 } { \clist_count:N #1 } } } } \cs_generate_variant:Nn \clist_rand_item:N { c } % \end{macrocode} % \end{macro} % \end{macro} % % \subsection{Additions to \pkg{l3coffins}} % % \begin{macrocode} %<@@=coffin> % \end{macrocode} % % \subsection{Rotating coffins} % % \begin{variable}{\l_@@_sin_fp} % \begin{variable}{\l_@@_cos_fp} % Used for rotations to get the sine and cosine values. % \begin{macrocode} \fp_new:N \l_@@_sin_fp \fp_new:N \l_@@_cos_fp % \end{macrocode} % \end{variable} % \end{variable} % % \begin{variable}{\l_@@_bounding_prop} % A property list for the bounding box of a coffin. This is only needed % during the rotation, so there is just the one. % \begin{macrocode} \prop_new:N \l_@@_bounding_prop % \end{macrocode} % \end{variable} % % \begin{variable}{\l_@@_bounding_shift_dim} % The shift of the bounding box of a coffin from the real content. % \begin{macrocode} \dim_new:N \l_@@_bounding_shift_dim % \end{macrocode} % \end{variable} % % \begin{variable}{\l_@@_left_corner_dim} % \begin{variable}{\l_@@_right_corner_dim} % \begin{variable}{\l_@@_bottom_corner_dim} % \begin{variable}{\l_@@_top_corner_dim} % These are used to hold maxima for the various corner values: these % thus define the minimum size of the bounding box after rotation. % \begin{macrocode} \dim_new:N \l_@@_left_corner_dim \dim_new:N \l_@@_right_corner_dim \dim_new:N \l_@@_bottom_corner_dim \dim_new:N \l_@@_top_corner_dim % \end{macrocode} % \end{variable} % \end{variable} % \end{variable} % \end{variable} % % \begin{macro}{\coffin_rotate:Nn, \coffin_rotate:cn} % Rotating a coffin requires several steps which can be conveniently % run together. The sine and cosine of the angle in degrees are % computed. This is then used to set \cs{l_@@_sin_fp} and % \cs{l_@@_cos_fp}, which are carried through unchanged for the rest % of the procedure. % \begin{macrocode} \cs_new_protected:Npn \coffin_rotate:Nn #1#2 { \fp_set:Nn \l_@@_sin_fp { sind ( #2 ) } \fp_set:Nn \l_@@_cos_fp { cosd ( #2 ) } % \end{macrocode} % The corners and poles of the coffin can now be rotated around the % origin. This is best achieved using mapping functions. % \begin{macrocode} \prop_map_inline:cn { l_@@_corners_ \__int_value:w #1 _prop } { \@@_rotate_corner:Nnnn #1 {##1} ##2 } \prop_map_inline:cn { l_@@_poles_ \__int_value:w #1 _prop } { \@@_rotate_pole:Nnnnnn #1 {##1} ##2 } % \end{macrocode} % The bounding box of the coffin needs to be rotated, and to do this % the corners have to be found first. They are then rotated in the same % way as the corners of the coffin material itself. % \begin{macrocode} \@@_set_bounding:N #1 \prop_map_inline:Nn \l_@@_bounding_prop { \@@_rotate_bounding:nnn {##1} ##2 } % \end{macrocode} % At this stage, there needs to be a calculation to find where the % corners of the content and the box itself will end up. % \begin{macrocode} \@@_find_corner_maxima:N #1 \@@_find_bounding_shift: \box_rotate:Nn #1 {#2} % \end{macrocode} % The correction of the box position itself takes place here. The idea % is that the bounding box for a coffin is tight up to the content, and % has the reference point at the bottom-left. The $x$-direction is % handled by moving the content by the difference in the positions of % the bounding box and the content left edge. The $y$-direction is % dealt with by moving the box down by any depth it has acquired. The % internal box is used here to allow for the next step. % \begin{macrocode} \hbox_set:Nn \l_@@_internal_box { \tex_kern:D \__dim_eval:w \l_@@_bounding_shift_dim - \l_@@_left_corner_dim \__dim_eval_end: \box_move_down:nn { \l_@@_bottom_corner_dim } { \box_use:N #1 } } % \end{macrocode} % If there have been any previous rotations then the size of the % bounding box will be bigger than the contents. This can be corrected % easily by setting the size of the box to the height and width of the % content. As this operation requires setting box dimensions and these % transcend grouping, the safe way to do this is to use the internal box % and to reset the result into the target box. % \begin{macrocode} \box_set_ht:Nn \l_@@_internal_box { \l_@@_top_corner_dim - \l_@@_bottom_corner_dim } \box_set_dp:Nn \l_@@_internal_box { 0 pt } \box_set_wd:Nn \l_@@_internal_box { \l_@@_right_corner_dim - \l_@@_left_corner_dim } \hbox_set:Nn #1 { \box_use:N \l_@@_internal_box } % \end{macrocode} % The final task is to move the poles and corners such that they are % back in alignment with the box reference point. % \begin{macrocode} \prop_map_inline:cn { l_@@_corners_ \__int_value:w #1 _prop } { \@@_shift_corner:Nnnn #1 {##1} ##2 } \prop_map_inline:cn { l_@@_poles_ \__int_value:w #1 _prop } { \@@_shift_pole:Nnnnnn #1 {##1} ##2 } } \cs_generate_variant:Nn \coffin_rotate:Nn { c } % \end{macrocode} % \end{macro} % % \begin{macro}{\@@_set_bounding:N} % The bounding box corners for a coffin are easy enough to find: this % is the same code as for the corners of the material itself, but % using a dedicated property list. % \begin{macrocode} \cs_new_protected:Npn \@@_set_bounding:N #1 { \prop_put:Nnx \l_@@_bounding_prop { tl } { { 0 pt } { \dim_eval:n { \box_ht:N #1 } } } \prop_put:Nnx \l_@@_bounding_prop { tr } { { \dim_eval:n { \box_wd:N #1 } } { \dim_eval:n { \box_ht:N #1 } } } \dim_set:Nn \l_@@_internal_dim { -\box_dp:N #1 } \prop_put:Nnx \l_@@_bounding_prop { bl } { { 0 pt } { \dim_use:N \l_@@_internal_dim } } \prop_put:Nnx \l_@@_bounding_prop { br } { { \dim_eval:n { \box_wd:N #1 } } { \dim_use:N \l_@@_internal_dim } } } % \end{macrocode} % \end{macro} % % \begin{macro}{\@@_rotate_bounding:nnn} % \begin{macro}{\@@_rotate_corner:Nnnn} % Rotating the position of the corner of the coffin is just a case % of treating this as a vector from the reference point. The same % treatment is used for the corners of the material itself and the % bounding box. % \begin{macrocode} \cs_new_protected:Npn \@@_rotate_bounding:nnn #1#2#3 { \@@_rotate_vector:nnNN {#2} {#3} \l_@@_x_dim \l_@@_y_dim \prop_put:Nnx \l_@@_bounding_prop {#1} { { \dim_use:N \l_@@_x_dim } { \dim_use:N \l_@@_y_dim } } } \cs_new_protected:Npn \@@_rotate_corner:Nnnn #1#2#3#4 { \@@_rotate_vector:nnNN {#3} {#4} \l_@@_x_dim \l_@@_y_dim \prop_put:cnx { l_@@_corners_ \__int_value:w #1 _prop } {#2} { { \dim_use:N \l_@@_x_dim } { \dim_use:N \l_@@_y_dim } } } % \end{macrocode} % \end{macro} % \end{macro} % % \begin{macro}{\@@_rotate_pole:Nnnnnn} % Rotating a single pole simply means shifting the co-ordinate of % the pole and its direction. The rotation here is about the bottom-left % corner of the coffin. % \begin{macrocode} \cs_new_protected:Npn \@@_rotate_pole:Nnnnnn #1#2#3#4#5#6 { \@@_rotate_vector:nnNN {#3} {#4} \l_@@_x_dim \l_@@_y_dim \@@_rotate_vector:nnNN {#5} {#6} \l_@@_x_prime_dim \l_@@_y_prime_dim \@@_set_pole:Nnx #1 {#2} { { \dim_use:N \l_@@_x_dim } { \dim_use:N \l_@@_y_dim } { \dim_use:N \l_@@_x_prime_dim } { \dim_use:N \l_@@_y_prime_dim } } } % \end{macrocode} % \end{macro} % % \begin{macro}{\@@_rotate_vector:nnNN} % A rotation function, which needs only an input vector (as dimensions) % and an output space. The values \cs{l_@@_cos_fp} and % \cs{l_@@_sin_fp} should previously have been set up correctly. % Working this way means that the floating point work is kept to a % minimum: for any given rotation the sin and cosine values do no % change, after all. % \begin{macrocode} \cs_new_protected:Npn \@@_rotate_vector:nnNN #1#2#3#4 { \dim_set:Nn #3 { \fp_to_dim:n { \dim_to_fp:n {#1} * \l_@@_cos_fp - \dim_to_fp:n {#2} * \l_@@_sin_fp } } \dim_set:Nn #4 { \fp_to_dim:n { \dim_to_fp:n {#1} * \l_@@_sin_fp + \dim_to_fp:n {#2} * \l_@@_cos_fp } } } % \end{macrocode} % \end{macro} % % \begin{macro}{\@@_find_corner_maxima:N} % \begin{macro}[aux]{\@@_find_corner_maxima_aux:nn} % The idea here is to find the extremities of the content of the % coffin. This is done by looking for the smallest values for the bottom % and left corners, and the largest values for the top and right % corners. The values start at the maximum dimensions so that the % case where all are positive or all are negative works out correctly. % \begin{macrocode} \cs_new_protected:Npn \@@_find_corner_maxima:N #1 { \dim_set:Nn \l_@@_top_corner_dim { -\c_max_dim } \dim_set:Nn \l_@@_right_corner_dim { -\c_max_dim } \dim_set:Nn \l_@@_bottom_corner_dim { \c_max_dim } \dim_set:Nn \l_@@_left_corner_dim { \c_max_dim } \prop_map_inline:cn { l_@@_corners_ \__int_value:w #1 _prop } { \@@_find_corner_maxima_aux:nn ##2 } } \cs_new_protected:Npn \@@_find_corner_maxima_aux:nn #1#2 { \dim_set:Nn \l_@@_left_corner_dim { \dim_min:nn { \l_@@_left_corner_dim } {#1} } \dim_set:Nn \l_@@_right_corner_dim { \dim_max:nn { \l_@@_right_corner_dim } {#1} } \dim_set:Nn \l_@@_bottom_corner_dim { \dim_min:nn { \l_@@_bottom_corner_dim } {#2} } \dim_set:Nn \l_@@_top_corner_dim { \dim_max:nn { \l_@@_top_corner_dim } {#2} } } % \end{macrocode} % \end{macro} % \end{macro} % % \begin{macro}{\@@_find_bounding_shift:} % \begin{macro}[aux]{\@@_find_bounding_shift_aux:nn} % The approach to finding the shift for the bounding box is similar to % that for the corners. However, there is only one value needed here and % a fixed input property list, so things are a bit clearer. % \begin{macrocode} \cs_new_protected:Npn \@@_find_bounding_shift: { \dim_set:Nn \l_@@_bounding_shift_dim { \c_max_dim } \prop_map_inline:Nn \l_@@_bounding_prop { \@@_find_bounding_shift_aux:nn ##2 } } \cs_new_protected:Npn \@@_find_bounding_shift_aux:nn #1#2 { \dim_set:Nn \l_@@_bounding_shift_dim { \dim_min:nn { \l_@@_bounding_shift_dim } {#1} } } % \end{macrocode} % \end{macro} % \end{macro} % % \begin{macro}{\@@_shift_corner:Nnnn} % \begin{macro}{\@@_shift_pole:Nnnnnn} % Shifting the corners and poles of a coffin means subtracting the % appropriate values from the $x$- and $y$-components. For % the poles, this means that the direction vector is unchanged. % \begin{macrocode} \cs_new_protected:Npn \@@_shift_corner:Nnnn #1#2#3#4 { \prop_put:cnx { l_@@_corners_ \__int_value:w #1 _ prop } {#2} { { \dim_eval:n { #3 - \l_@@_left_corner_dim } } { \dim_eval:n { #4 - \l_@@_bottom_corner_dim } } } } \cs_new_protected:Npn \@@_shift_pole:Nnnnnn #1#2#3#4#5#6 { \prop_put:cnx { l_@@_poles_ \__int_value:w #1 _ prop } {#2} { { \dim_eval:n { #3 - \l_@@_left_corner_dim } } { \dim_eval:n { #4 - \l_@@_bottom_corner_dim } } {#5} {#6} } } % \end{macrocode} % \end{macro} % \end{macro} % % \subsection{Resizing coffins} % % \begin{variable}{\l_@@_scale_x_fp} % \begin{variable}{\l_@@_scale_y_fp} % Storage for the scaling factors in $x$ and $y$, respectively. % \begin{macrocode} \fp_new:N \l_@@_scale_x_fp \fp_new:N \l_@@_scale_y_fp % \end{macrocode} % \end{variable} % \end{variable} % % \begin{variable}{\l_@@_scaled_total_height_dim} % \begin{variable}{\l_@@_scaled_width_dim} % When scaling, the values given have to be turned into absolute values. % \begin{macrocode} \dim_new:N \l_@@_scaled_total_height_dim \dim_new:N \l_@@_scaled_width_dim % \end{macrocode} % \end{variable} % \end{variable} % % \begin{macro}{\coffin_resize:Nnn, \coffin_resize:cnn} % Resizing a coffin begins by setting up the user-friendly names for % the dimensions of the coffin box. The new sizes are then turned into % scale factor. This is the same operation as takes place for the % underlying box, but that operation is grouped and so the same % calculation is done here. % \begin{macrocode} \cs_new_protected:Npn \coffin_resize:Nnn #1#2#3 { \fp_set:Nn \l_@@_scale_x_fp { \dim_to_fp:n {#2} / \dim_to_fp:n { \coffin_wd:N #1 } } \fp_set:Nn \l_@@_scale_y_fp { \dim_to_fp:n {#3} / \dim_to_fp:n { \coffin_ht:N #1 + \coffin_dp:N #1 } } \box_resize_to_wd_and_ht_plus_dp:Nnn #1 {#2} {#3} \@@_resize_common:Nnn #1 {#2} {#3} } \cs_generate_variant:Nn \coffin_resize:Nnn { c } % \end{macrocode} % \end{macro} % % \begin{macro}{\@@_resize_common:Nnn} % The poles and corners of the coffin are scaled to the appropriate % places before actually resizing the underlying box. % \begin{macrocode} \cs_new_protected:Npn \@@_resize_common:Nnn #1#2#3 { \prop_map_inline:cn { l_@@_corners_ \__int_value:w #1 _prop } { \@@_scale_corner:Nnnn #1 {##1} ##2 } \prop_map_inline:cn { l_@@_poles_ \__int_value:w #1 _prop } { \@@_scale_pole:Nnnnnn #1 {##1} ##2 } % \end{macrocode} % Negative $x$-scaling values place the poles in the wrong % location: this is corrected here. % \begin{macrocode} \fp_compare:nNnT \l_@@_scale_x_fp < \c_zero_fp { \prop_map_inline:cn { l_@@_corners_ \__int_value:w #1 _prop } { \@@_x_shift_corner:Nnnn #1 {##1} ##2 } \prop_map_inline:cn { l_@@_poles_ \__int_value:w #1 _prop } { \@@_x_shift_pole:Nnnnnn #1 {##1} ##2 } } } % \end{macrocode} % \end{macro} % % \begin{macro}{\coffin_scale:Nnn, \coffin_scale:cnn} % For scaling, the opposite calculation is done to find the new % dimensions for the coffin. Only the total height is needed, as this % is the shift required for corners and poles. The scaling is done % the \TeX{} way as this works properly with floating point values % without needing to use the \texttt{fp} module. % \begin{macrocode} \cs_new_protected:Npn \coffin_scale:Nnn #1#2#3 { \fp_set:Nn \l_@@_scale_x_fp {#2} \fp_set:Nn \l_@@_scale_y_fp {#3} \box_scale:Nnn #1 { \l_@@_scale_x_fp } { \l_@@_scale_y_fp } \dim_set:Nn \l_@@_internal_dim { \coffin_ht:N #1 + \coffin_dp:N #1 } \dim_set:Nn \l_@@_scaled_total_height_dim { \fp_abs:n { \l_@@_scale_y_fp } \l_@@_internal_dim } \dim_set:Nn \l_@@_scaled_width_dim { -\fp_abs:n { \l_@@_scale_x_fp } \coffin_wd:N #1 } \@@_resize_common:Nnn #1 { \l_@@_scaled_width_dim } { \l_@@_scaled_total_height_dim } } \cs_generate_variant:Nn \coffin_scale:Nnn { c } % \end{macrocode} % \end{macro} % % \begin{macro}{\@@_scale_vector:nnNN} % This functions scales a vector from the origin using the pre-set scale % factors in $x$ and $y$. This is a much less complex operation % than rotation, and as a result the code is a lot clearer. % \begin{macrocode} \cs_new_protected:Npn \@@_scale_vector:nnNN #1#2#3#4 { \dim_set:Nn #3 { \fp_to_dim:n { \dim_to_fp:n {#1} * \l_@@_scale_x_fp } } \dim_set:Nn #4 { \fp_to_dim:n { \dim_to_fp:n {#2} * \l_@@_scale_y_fp } } } % \end{macrocode} % \end{macro} % % \begin{macro}{\@@_scale_corner:Nnnn} % \begin{macro}{\@@_scale_pole:Nnnnnn} % Scaling both corners and poles is a simple calculation using the % preceding vector scaling. % \begin{macrocode} \cs_new_protected:Npn \@@_scale_corner:Nnnn #1#2#3#4 { \@@_scale_vector:nnNN {#3} {#4} \l_@@_x_dim \l_@@_y_dim \prop_put:cnx { l_@@_corners_ \__int_value:w #1 _prop } {#2} { { \dim_use:N \l_@@_x_dim } { \dim_use:N \l_@@_y_dim } } } \cs_new_protected:Npn \@@_scale_pole:Nnnnnn #1#2#3#4#5#6 { \@@_scale_vector:nnNN {#3} {#4} \l_@@_x_dim \l_@@_y_dim \@@_set_pole:Nnx #1 {#2} { { \dim_use:N \l_@@_x_dim } { \dim_use:N \l_@@_y_dim } {#5} {#6} } } % \end{macrocode} % \end{macro} % \end{macro} % % \begin{macro}{\@@_x_shift_corner:Nnnn} % \begin{macro}{\@@_x_shift_pole:Nnnnnn} % These functions correct for the $x$ displacement that takes % place with a negative horizontal scaling. % \begin{macrocode} \cs_new_protected:Npn \@@_x_shift_corner:Nnnn #1#2#3#4 { \prop_put:cnx { l_@@_corners_ \__int_value:w #1 _prop } {#2} { { \dim_eval:n { #3 + \box_wd:N #1 } } {#4} } } \cs_new_protected:Npn \@@_x_shift_pole:Nnnnnn #1#2#3#4#5#6 { \prop_put:cnx { l_@@_poles_ \__int_value:w #1 _prop } {#2} { { \dim_eval:n { #3 + \box_wd:N #1 } } {#4} {#5} {#6} } } % \end{macrocode} % \end{macro} % \end{macro} % % \subsection{Additions to \pkg{l3file}} % % \begin{macrocode} %<@@=file> % \end{macrocode} % % \begin{macro} % {\file_get_mdfive_hash:nN, \file_get_size:nN, \file_get_timestamp:nN} % \begin{macro}[aux]{\@@_get_details:nnN} % These are all wrappers around the \pdfTeX{} primitives doing the same % jobs: as we want consistent file paths to be found, they are all set up % using \cs{file_get_full_name:nN} and so are non-expandable \texttt{get} % functions. Much of the code is repetitive but we need to branch for % \LuaTeX{} (emulation in Lua), for the slightly different syntax % needed for \tn{pdftex_mdfivesum:D} and for the fact that primitive % coverage varies in other engines. % \begin{macrocode} \cs_new_protected:Npn \file_get_mdfive_hash:nN #1#2 { \@@_get_details:nnN {#1} { mdfivesum } {#2} } \cs_new_protected:Npn \file_get_size:nN #1#2 { \@@_get_details:nnN {#1} { size } {#2} } \cs_new_protected:Npn \file_get_timestamp:nN #1#2 { \@@_get_details:nnN {#1} { moddate } {#2} } \cs_new_protected:Npn \@@_get_details:nnN #1#2#3 { \file_get_full_name:nN {#1} \l_@@_full_name_str \str_set:Nx #3 { \use:c { pdftex_file #2 :D } \exp_after:wN { \l_@@_full_name_str } } } \cs_if_exist:NTF \luatex_directlua:D { \cs_set_protected:Npn \@@_get_details:nnN #1#2#3 { \file_get_full_name:nN {#1} \l_@@_full_name_str \str_set:Nx #3 { \lua_now_x:n { l3kernel.file#2 ( " \lua_escape_x:n { \l_@@_full_name_str } " ) } } } } { \cs_set_protected:Npn \file_get_mdfive_hash:nN #1#2 { \file_get_full_name:nN {#1} \l_@@_full_name_str \tl_set:Nx #2 { \pdftex_mdfivesum:D file \exp_after:wN { \l_@@_full_name_str } } } \cs_if_exist:NT \xetex_XeTeXversion:D { \cs_set_protected:Npn \@@_get_details:nnN #1#2#3 { \tl_clear:N #3 \__msg_kernel_error:nnx { kernel } { xetex-primitive-not-available } { \exp_not:c { pdffile #2 } } } } } \__msg_kernel_new:nnnn { kernel } { xetex-primitive-not-available } { Primitive~\token_to_str:N #1 not~available } { XeTeX~does~not~currently~provide~functionality~equivalent~to~the~ \token_to_str:N #1 primitive. } % \end{macrocode} % \end{macro} % \end{macro} % % \begin{macro}{\file_if_exist_input:n, \file_if_exist_input:nF} % Input of a file with a test for existence. We do not define the |T| % or |TF| variants because the most useful place to place the % \meta{true code} would be inconsistent with other conditionals. % \begin{macrocode} \cs_new_protected:Npn \file_if_exist_input:n #1 { \file_get_full_name:nN {#1} \l__file_full_name_str \str_if_empty:NF \l__file_full_name_str { \@@_input:V \l__file_full_name_str } } \cs_new_protected:Npn \file_if_exist_input:nF #1#2 { \file_get_full_name:nN {#1} \l__file_full_name_str \str_if_empty:NTF \l__file_full_name_str {#2} { \@@_input:V \l__file_full_name_str } } % \end{macrocode} % \end{macro} % % \begin{macro}[deprecated = 2017-12-31]{\file_if_exist_input:nT, \file_if_exist_input:nTF} % For removal after 2017-12-31. % \begin{macrocode} \__debug_deprecation:nnNNpn { 2017-12-31 } { \file_if_exist:nTF and~ \file_input:n } \cs_new_protected:Npn \file_if_exist_input:nTF #1#2#3 { \file_get_full_name:nN {#1} \l__file_full_name_str \str_if_empty:NTF \l__file_full_name_str {#3} { #2 \@@_input:V \l__file_full_name_str } } \__debug_deprecation:nnNNpn { 2017-12-31 } { \file_if_exist:nT and~ \file_input:n } \cs_new_protected:Npn \file_if_exist_input:nT #1#2 { \file_get_full_name:nN {#1} \l__file_full_name_str \str_if_empty:NF \l__file_full_name_str { #2 \@@_input:V \l__file_full_name_str } } % \end{macrocode} % \end{macro} % % \begin{macro}{\file_input_stop:} % A simple rename. % \begin{macrocode} \cs_new_protected:Npn \file_input_stop: { \tex_endinput:D } % \end{macrocode} % \end{macro} % % \subsection{Additions to \pkg{l3int}} % % \begin{macrocode} %<@@=int> % \end{macrocode} % % \begin{macro}[EXP]{\int_rand:nn} % \begin{macro}[aux, EXP] % { % \@@_rand:ww, \@@_rand_narrow:n, % \@@_rand_narrow:nnn, \@@_rand_narrow:nnnn % } % Evaluate the argument and filter out the case where the lower % bound~|#1| is more than the upper bound~|#2|. Then determine % whether the range is narrower than \cs{c__fp_rand_size_int}; |#2-#1| % may overflow for very large positive~|#2| and negative~|#1|. If the % range is wide, use slower code from \pkg{l3fp}. If the range is % narrow, call \cs{@@_rand_narrow:nn} \Arg{choices} |{#1}| where % \meta{choices} is the number of possible outcomes. Then % \cs{@@_rand_narrow:nnnn} receives a random number reduced modulo % \meta{choices}, the random number itself, \meta{choices} and |#1|. % To avoid bias, throw away the random number if it lies in the last, % incomplete, interval of size \meta{choices} in % $[0,\cs{c__fp_rand_size_int}-1]$, and try again. % \begin{macrocode} \cs_if_exist:NTF \pdftex_uniformdeviate:D { \__debug_patch_args:nNNpn { { (#1) } { (#2) } } \cs_new:Npn \int_rand:nn #1#2 { \exp_after:wN \@@_rand:ww \__int_value:w \__int_eval:w #1 \exp_after:wN ; \__int_value:w \__int_eval:w #2 ; } \cs_new:Npn \@@_rand:ww #1; #2; { \int_compare:nNnTF {#1} > {#2} { \__msg_kernel_expandable_error:nnnn { kernel } { backward-range } {#1} {#2} \@@_rand:ww #2; #1; } { \int_compare:nNnTF {#1} > 0 { \int_compare:nNnTF { #2 - #1 } < \c__fp_rand_size_int } { \int_compare:nNnTF {#2} < { #1 + \c__fp_rand_size_int } } { \exp_args:Nf \@@_rand_narrow:nn { \int_eval:n { #2 - #1 + 1 } } {#1} } { \fp_to_int:n { randint(#1,#2) } } } } \cs_new:Npn \@@_rand_narrow:nn { \exp_args:No \@@_rand_narrow:nnn { \pdftex_uniformdeviate:D \c__fp_rand_size_int } } \cs_new:Npn \@@_rand_narrow:nnn #1#2 { \exp_args:Nf \@@_rand_narrow:nnnn { \int_mod:nn {#1} {#2} } {#1} {#2} } \cs_new:Npn \@@_rand_narrow:nnnn #1#2#3#4 { \int_compare:nNnTF { #2 - #1 + #3 } > \c__fp_rand_size_int { \@@_rand_narrow:nn {#3} {#4} } { \int_eval:n { #4 + #1 } } } } { \cs_new:Npn \int_rand:nn #1#2 { \__msg_kernel_expandable_error:nn { kernel } { fp-no-random } \int_eval:n {#1} } } % \end{macrocode} % \end{macro} % \end{macro} % % The following must be added to \pkg{l3msg}. % \begin{macrocode} \cs_if_exist:NT \pdftex_uniformdeviate:D { \__msg_kernel_new:nnn { kernel } { backward-range } { Bounds~ordered~backwards~in~\int_rand:nn {#1}~{#2}. } } % \end{macrocode} % % \subsection{Additions to \pkg{l3msg}} % % \begin{macrocode} %<@@=msg> % \end{macrocode} % % \begin{macro}[EXP] % { % \msg_expandable_error:nnnnnn , % \msg_expandable_error:nnnnn , % \msg_expandable_error:nnnn , % \msg_expandable_error:nnn , % \msg_expandable_error:nn , % \msg_expandable_error:nnffff , % \msg_expandable_error:nnfff , % \msg_expandable_error:nnff , % \msg_expandable_error:nnf % } % \begin{macro}[aux]{\__msg_expandable_error_module:nn} % Pass to an auxiliary the message to display and the module name % \begin{macrocode} \cs_new:Npn \msg_expandable_error:nnnnnn #1#2#3#4#5#6 { \exp_args:Nf \@@_expandable_error_module:nn { \exp_args:Nf \tl_to_str:n { \use:c { \c_@@_text_prefix_tl #1 / #2 } {#3} {#4} {#5} {#6} } } {#1} } \cs_new:Npn \msg_expandable_error:nnnnn #1#2#3#4#5 { \msg_expandable_error:nnnnnn {#1} {#2} {#3} {#4} {#5} { } } \cs_new:Npn \msg_expandable_error:nnnn #1#2#3#4 { \msg_expandable_error:nnnnnn {#1} {#2} {#3} {#4} { } { } } \cs_new:Npn \msg_expandable_error:nnn #1#2#3 { \msg_expandable_error:nnnnnn {#1} {#2} {#3} { } { } { } } \cs_new:Npn \msg_expandable_error:nn #1#2 { \msg_expandable_error:nnnnnn {#1} {#2} { } { } { } { } } \cs_generate_variant:Nn \msg_expandable_error:nnnnnn { nnffff } \cs_generate_variant:Nn \msg_expandable_error:nnnnn { nnfff } \cs_generate_variant:Nn \msg_expandable_error:nnnn { nnff } \cs_generate_variant:Nn \msg_expandable_error:nnn { nnf } \cs_new:Npn \@@_expandable_error_module:nn #1#2 { \exp_after:wN \exp_after:wN \exp_after:wN \use_none_delimit_by_q_stop:w \use:n { \::error ! ~ #2 : ~ #1 } \q_stop } % \end{macrocode} % \end{macro} % \end{macro} % % \subsection{Additions to \pkg{l3prop}} % % \begin{macrocode} %<@@=prop> % \end{macrocode} % % \begin{macro}[EXP]{\prop_count:N, \prop_count:c} % \begin{macro}[aux,EXP]{\@@_count:nn} % Counting the key--value pairs in a property list is done using the % same approach as for other count functions: turn each entry into a % \texttt{+1} then use integer evaluation to actually do the % mathematics. % \begin{macrocode} \cs_new:Npn \prop_count:N #1 { \int_eval:n { 0 \prop_map_function:NN #1 \@@_count:nn } } \cs_new:Npn \@@_count:nn #1#2 { + 1 } \cs_generate_variant:Nn \prop_count:N { c } % \end{macrocode} % \end{macro} % \end{macro} % % \begin{macro}[rEXP]{\prop_map_tokens:Nn, \prop_map_tokens:cn} % \begin{macro}[aux]{\@@_map_tokens:nwwn} % The mapping is very similar to \cs{prop_map_function:NN}. It grabs % one key--value pair at a time, and stops when reaching the marker % key \cs{q_recursion_tail}, which cannot appear in normal keys since % those are strings. The odd construction |\use:n {#1}| allows |#1| % to contain any token without interfering with \cs{prop_map_break:}. % Argument |#2| of \cs{@@_map_tokens:nwwn} is \cs{s_@@} the first % time, and is otherwise empty. % \begin{macrocode} \cs_new:Npn \prop_map_tokens:Nn #1#2 { \exp_last_unbraced:Nno \@@_map_tokens:nwwn {#2} #1 \@@_pair:wn \q_recursion_tail \s_@@ { } \__prg_break_point:Nn \prop_map_break: { } } \cs_new:Npn \@@_map_tokens:nwwn #1#2 \@@_pair:wn #3 \s_@@ #4 { \if_meaning:w \q_recursion_tail #3 \exp_after:wN \prop_map_break: \fi: \use:n {#1} {#3} {#4} \@@_map_tokens:nwwn {#1} } \cs_generate_variant:Nn \prop_map_tokens:Nn { c } % \end{macrocode} % \end{macro} % \end{macro} % % \begin{macro}[EXP] % {\prop_rand_key_value:N, \prop_rand_key_value:c} % \begin{macro}[aux, EXP]{\@@_rand:NN, \@@_rand_item:Nw} % Contrarily to |clist|, |seq| and |tl|, there is no function to get % an item of a |prop| given an integer between $1$ and the number of % items, so we write the appropriate code. There is no bounds % checking because \cs{int_rand:nn} is always within bounds. At the % end, leave either the key |#3| or the value |#4| in the input % stream. % \begin{macrocode} \cs_new:Npn \prop_rand_key_value:N { \@@_rand:NN \@@_rand:nNn } \cs_new:Npn \@@_rand:nNn #1#2#3 { \exp_not:n { {#1} {#3} } } \cs_new:Npn \@@_rand:NN #1#2 { \prop_if_empty:NTF #2 { } { \exp_after:wN \@@_rand_item:Nw \exp_after:wN #1 \__int_value:w \int_rand:nn { 1 } { \prop_count:N #2 } #2 \q_stop } } \cs_new:Npn \@@_rand_item:Nw #1#2 \s_@@ \@@_pair:wn #3 \s_@@ #4 { \int_compare:nNnF {#2} > 1 { \use_i_delimit_by_q_stop:nw { #1 {#3} \exp_not:n {#4} } } \exp_after:wN \@@_rand_item:Nw \exp_after:wN #1 \__int_value:w \int_eval:n { #2 - 1 } \s_@@ } \cs_generate_variant:Nn \prop_rand_key_value:N { c } % \end{macrocode} % \end{macro} % \end{macro} % % \subsection{Additions to \pkg{l3seq}} % % \begin{macrocode} %<@@=seq> % \end{macrocode} % % \begin{macro} % { % \seq_mapthread_function:NNN, \seq_mapthread_function:NcN, % \seq_mapthread_function:cNN, \seq_mapthread_function:ccN % } % \begin{macro}[aux] % { % \@@_mapthread_function:wNN, \@@_mapthread_function:wNw, % \@@_mapthread_function:Nnnwnn % } % The idea is to first expand both sequences, adding the % usual |{ ? \__prg_break: } { }| to the end of each one. This is % most conveniently done in two steps using an auxiliary function. % The mapping then throws away the first tokens of |#2| and |#5|, % which for items in both sequences are \cs{s_@@} % \cs{@@_item:n}. The function to be mapped are then be applied to % the two entries. When the code hits the end of one of the % sequences, the break material stops the entire loop and tidy up. % This avoids needing to find the count of the two sequences, or % worrying about which is longer. % \begin{macrocode} \cs_new:Npn \seq_mapthread_function:NNN #1#2#3 { \exp_after:wN \@@_mapthread_function:wNN #2 \q_stop #1 #3 } \cs_new:Npn \@@_mapthread_function:wNN \s_@@ #1 \q_stop #2#3 { \exp_after:wN \@@_mapthread_function:wNw #2 \q_stop #3 #1 { ? \__prg_break: } { } \__prg_break_point: } \cs_new:Npn \@@_mapthread_function:wNw \s_@@ #1 \q_stop #2 { \@@_mapthread_function:Nnnwnn #2 #1 { ? \__prg_break: } { } \q_stop } \cs_new:Npn \@@_mapthread_function:Nnnwnn #1#2#3#4 \q_stop #5#6 { \use_none:n #2 \use_none:n #5 #1 {#3} {#6} \@@_mapthread_function:Nnnwnn #1 #4 \q_stop } \cs_generate_variant:Nn \seq_mapthread_function:NNN { Nc } \cs_generate_variant:Nn \seq_mapthread_function:NNN { c , cc } % \end{macrocode} % \end{macro} % \end{macro} % % \begin{macro}{\seq_set_filter:NNn, \seq_gset_filter:NNn} % \begin{macro}[aux]{\@@_set_filter:NNNn} % Similar to \cs{seq_map_inline:Nn}, without a % \cs{__prg_break_point:} because the user's code % is performed within the evaluation of a boolean expression, % and skipping out of that would break horribly. % The \cs{@@_wrap_item:n} function inserts the relevant % \cs{@@_item:n} without expansion in the input stream, % hence in the \texttt{x}-expanding assignment. % \begin{macrocode} \cs_new_protected:Npn \seq_set_filter:NNn { \@@_set_filter:NNNn \tl_set:Nx } \cs_new_protected:Npn \seq_gset_filter:NNn { \@@_set_filter:NNNn \tl_gset:Nx } \cs_new_protected:Npn \@@_set_filter:NNNn #1#2#3#4 { \@@_push_item_def:n { \bool_if:nT {#4} { \@@_wrap_item:n {##1} } } #1 #2 { #3 } \@@_pop_item_def: } % \end{macrocode} % \end{macro} % \end{macro} % % \begin{macro}{\seq_set_map:NNn, \seq_gset_map:NNn} % \begin{macro}[aux]{\@@_set_map:NNNn} % Very similar to \cs{seq_set_filter:NNn}. We could actually % merge the two within a single function, but it would have weird % semantics. % \begin{macrocode} \cs_new_protected:Npn \seq_set_map:NNn { \@@_set_map:NNNn \tl_set:Nx } \cs_new_protected:Npn \seq_gset_map:NNn { \@@_set_map:NNNn \tl_gset:Nx } \cs_new_protected:Npn \@@_set_map:NNNn #1#2#3#4 { \@@_push_item_def:n { \exp_not:N \@@_item:n {#4} } #1 #2 { #3 } \@@_pop_item_def: } % \end{macrocode} % \end{macro} % \end{macro} % % \begin{macro}{\seq_rand_item:N, \seq_rand_item:c} % Importantly, \cs{seq_item:Nn} only evaluates its argument once. % \begin{macrocode} \cs_new:Npn \seq_rand_item:N #1 { \seq_if_empty:NF #1 { \seq_item:Nn #1 { \int_rand:nn { 1 } { \seq_count:N #1 } } } } \cs_generate_variant:Nn \seq_rand_item:N { c } % \end{macrocode} % \end{macro} % % \subsection{Additions to \pkg{l3skip}} % % \begin{macrocode} %<@@=skip> % \end{macrocode} % % \begin{macro}{\skip_split_finite_else_action:nnNN} % This macro is useful when performing error checking in certain % circumstances. If the \meta{skip} register holds finite glue it sets % |#3| and |#4| to the stretch and shrink component, resp. If it holds % infinite glue set |#3| and |#4| to zero and issue the special action % |#2| which is probably an error message. % Assignments are local. % \begin{macrocode} \cs_new:Npn \skip_split_finite_else_action:nnNN #1#2#3#4 { \skip_if_finite:nTF {#1} { #3 = \etex_gluestretch:D #1 \scan_stop: #4 = \etex_glueshrink:D #1 \scan_stop: } { #3 = \c_zero_skip #4 = \c_zero_skip #2 } } % \end{macrocode} % \end{macro} % % \subsection{Additions to \pkg{l3sys}} % % \begin{macrocode} %<@@=sys> % \end{macrocode} % % \begin{macro}[EXP, pTF]{\sys_if_rand_exist:} % Currently, randomness exists under \pdfTeX{} and \LuaTeX{}. % \begin{macrocode} \cs_if_exist:NTF \pdftex_uniformdeviate:D { \prg_new_conditional:Npnn \sys_if_rand_exist: { p , T , F , TF } { \prg_return_true: } } { \prg_new_conditional:Npnn \sys_if_rand_exist: { p , T , F , TF } { \prg_return_false: } } % \end{macrocode} % \end{macro} % % \begin{macro}[EXP]{\sys_rand_seed:} % Unpack the primitive. % \begin{macrocode} \cs_new:Npn \sys_rand_seed: { \tex_the:D \pdftex_randomseed:D } \cs_if_exist:NF \pdftex_randomseed:D { \cs_set:Npn \sys_rand_seed: { 0 } } % \end{macrocode} % \end{macro} % % \begin{macro}{\sys_gset_rand_seed:n} % The primitive always assigns the seed globally. % \begin{macrocode} \__debug_patch_args:nNNpn { { (#1) } } \cs_new_protected:Npn \sys_gset_rand_seed:n #1 { \pdftex_setrandomseed:D \__int_eval:w #1 \__int_eval_end: } % \end{macrocode} % \end{macro} % % \begin{variable}{\c_sys_shell_escape_int} % Expose the engine's shell escape status to the user. % \begin{macrocode} \int_const:Nn \c_sys_shell_escape_int { \sys_if_engine_luatex:TF { \luatex_directlua:D { tex.sprint((status.shell_escape~or~os.execute()) .. " ") } } { \pdftex_shellescape:D } } % \end{macrocode} % \end{variable} % % \begin{macro}[EXP, pTF]{\sys_if_shell:} % Performs a check for whether shell escape is enabled. This % returns true if either of restricted or unrestricted shell escape % is enabled. % \begin{macrocode} \prg_new_conditional:Nnn \sys_if_shell: { p , T , F , TF } { \if_int_compare:w \c_sys_shell_escape_int = 0 ~ \prg_return_false: \else: \prg_return_true: \fi: } % \end{macrocode} % \end{macro} % % \begin{macro}[EXP, pTF]{\sys_if_shell_unrestricted:} % Performs a check for whether \emph{unrestricted} shell escape is % enabled. % \begin{macrocode} \prg_new_conditional:Nnn \sys_if_shell_unrestricted: { p , T , F , TF } { \if_int_compare:w \c_sys_shell_escape_int = 1 ~ \prg_return_true: \else: \prg_return_false: \fi: } % \end{macrocode} % \end{macro} % % \begin{macro}[EXP, pTF]{\sys_if_shell_unrestricted:} % Performs a check for whether \emph{restricted} shell escape is % enabled. This returns false if unrestricted shell escape is % enabled. Unrestricted shell escape is not considered a superset % of restricted shell escape in this case. To find whether any % shell escape is enabled use \cs{sys_if_shell:}. % \begin{macrocode} \prg_new_conditional:Nnn \sys_if_shell_restricted: { p , T , F , TF } { \if_int_compare:w \c_sys_shell_escape_int = 2 ~ \prg_return_true: \else: \prg_return_false: \fi: } % \end{macrocode} % \end{macro} % % \begin{variable}{\c_@@_shell_stream_int} % This is not needed for \LuaTeX{}: shell escape there isn't done using % a \TeX{} interface % \begin{macrocode} \sys_if_engine_luatex:F { \int_const:Nn \c_@@_shell_stream_int { 18 } } % \end{macrocode} % \end{variable} % % \begin{macro}{\sys_shell_now:n} % Execute commands through shell escape immediately. % \begin{macrocode} \sys_if_engine_luatex:TF { \cs_new_protected:Npn \sys_shell_now:n #1 { \luatex_directlua:D { os.execute(" \luatex_luaescapestring:D { \etex_detokenize:D {#1} } ") } } } { \cs_new_protected:Npn \sys_shell_now:n #1 { \iow_now:Nn \c_@@_shell_stream_int { #1 } } } \cs_generate_variant:Nn \sys_shell_now:n { x } % \end{macrocode} % \end{macro} % % \begin{macro}{\sys_shell_shipout:n} % Execute commands through shell escape at shipout. % \begin{macrocode} \sys_if_engine_luatex:TF { \cs_new_protected:Npn \sys_shell_shipout:n #1 { \luatex_latelua:D { os.execute(" \luatex_luaescapestring:D { \etex_detokenize:D {#1} } ") } } } { \cs_new_protected:Npn \sys_shell_shipout:n #1 { \iow_shipout:Nn \c_@@_shell_stream_int { #1 } } } \cs_generate_variant:Nn \sys_shell_shipout:n { x } % \end{macrocode} % \end{macro} % % \subsection{Additions to \pkg{l3tl}} % % \begin{macrocode} %<@@=tl> % \end{macrocode} % % \begin{macro}[EXP,pTF]{\tl_if_single_token:n} % There are four cases: empty token list, token list starting with a % normal token, with a brace group, or with a space token. If the % token list starts with a normal token, remove it and check for % emptiness. For the next case, an empty token list is not a single % token. Finally, we have a non-empty token list starting with a % space or a brace group. Applying \texttt{f}-expansion yields an % empty result if and only if the token list is a single space. % \begin{macrocode} \prg_new_conditional:Npnn \tl_if_single_token:n #1 { p , T , F , TF } { \tl_if_head_is_N_type:nTF {#1} { \@@_if_empty_return:o { \use_none:n #1 } } { \tl_if_empty:nTF {#1} { \prg_return_false: } { \@@_if_empty_return:o { \exp:w \exp_end_continue_f:w #1 } } } } % \end{macrocode} % \end{macro} % % \begin{macro}[EXP]{\tl_reverse_tokens:n} % \begin{macro}[EXP,aux]{\@@_reverse_group:nn} % The same as \cs{tl_reverse:n} but with recursion within brace groups. % \begin{macrocode} \cs_new:Npn \tl_reverse_tokens:n #1 { \etex_unexpanded:D \exp_after:wN { \exp:w \@@_act:NNNnn \@@_reverse_normal:nN \@@_reverse_group:nn \@@_reverse_space:n { } {#1} } } \cs_new:Npn \@@_reverse_group:nn #1 { \@@_act_group_recurse:Nnn \@@_act_reverse_output:n { \tl_reverse_tokens:n } } % \end{macrocode} % \end{macro} % \begin{macro}[EXP,aux]{\@@_act_group_recurse:Nnn} % In many applications of \cs{@@_act:NNNnn}, we need to recursively % apply some transformation within brace groups, then output. In this % code, |#1| is the output function, |#2| is the transformation, % which should expand in two steps, and |#3| is the group. % \begin{macrocode} \cs_new:Npn \@@_act_group_recurse:Nnn #1#2#3 { \exp_args:Nf #1 { \exp_after:wN \exp_after:wN \exp_after:wN { #2 {#3} } } } % \end{macrocode} % \end{macro} % \end{macro} % % \begin{macro}[EXP]{\tl_count_tokens:n} % \begin{macro}[EXP,aux]{\@@_act_count_normal:nN, % \@@_act_count_group:nn, \@@_act_count_space:n} % The token count is computed through an \cs{int_eval:n} construction. % Each \texttt{1+} is output to the \emph{left}, into the integer % expression, and the sum is ended by the \cs{exp_end:} inserted by % \cs{@@_act_end:wn} (which is technically implemented as \cs{c_zero}). % Somewhat a hack! % \begin{macrocode} \cs_new:Npn \tl_count_tokens:n #1 { \int_eval:n { \@@_act:NNNnn \@@_act_count_normal:nN \@@_act_count_group:nn \@@_act_count_space:n { } {#1} } } \cs_new:Npn \@@_act_count_normal:nN #1 #2 { 1 + } \cs_new:Npn \@@_act_count_space:n #1 { 1 + } \cs_new:Npn \@@_act_count_group:nn #1 #2 { 2 + \tl_count_tokens:n {#2} + } % \end{macrocode} % \end{macro} % \end{macro} % % \begin{macro} % { % \tl_set_from_file:Nnn, \tl_set_from_file:cnn, % \tl_gset_from_file:Nnn, \tl_gset_from_file:cnn % } % \begin{macro}[aux]{\@@_set_from_file:NNnn} % \begin{macro}[aux]{\@@_from_file_do:w} % The approach here is similar to that for doing a rescan, and so the same % internals can be reused. Thus the plan is to insert a pair of tokens of % the same charcode but different catcodes after the file has been read. % This plus \cs{exp_not:N} allows the primitive to be used to carry out % a set operation. % \begin{macrocode} \cs_new_protected:Npn \tl_set_from_file:Nnn { \@@_set_from_file:NNnn \tl_set:Nn } \cs_new_protected:Npn \tl_gset_from_file:Nnn { \@@_set_from_file:NNnn \tl_gset:Nn } \cs_generate_variant:Nn \tl_set_from_file:Nnn { c } \cs_generate_variant:Nn \tl_gset_from_file:Nnn { c } \cs_new_protected:Npn \@@_set_from_file:NNnn #1#2#3#4 { \file_get_full_name:nN {#4} \l__file_full_name_str \str_if_empty:NTF \l__file_full_name_str { \__file_missing:n {#4} } { \group_begin: \exp_args:No \etex_everyeof:D { \c_@@_rescan_marker_tl \exp_not:N } #3 \scan_stop: \exp_after:wN \@@_from_file_do:w \exp_after:wN \prg_do_nothing: \tex_input:D \l__file_full_name_str \scan_stop: \exp_args:NNNo \group_end: #1 #2 \l_@@_internal_a_tl } } \exp_args:Nno \use:nn { \cs_new_protected:Npn \@@_from_file_do:w #1 } { \c_@@_rescan_marker_tl } { \tl_set:No \l_@@_internal_a_tl {#1} } % \end{macrocode} % \end{macro} % \end{macro} % \end{macro} % % \begin{macro} % { % \tl_set_from_file_x:Nnn, \tl_set_from_file_x:cnn, % \tl_gset_from_file_x:Nnn, \tl_gset_from_file_x:cnn % } % \begin{macro}[aux]{\@@_set_from_file_x:NNnn} % When reading a file and allowing expansion of the content, the set up % only needs to prevent \TeX{} complaining about the end of the file. That % is done simply, with a group then used to trap the definition needed. % Once the business is done using some scratch space, the tokens can be % transferred to the real target. % \begin{macrocode} \cs_new_protected:Npn \tl_set_from_file_x:Nnn { \@@_set_from_file_x:NNnn \tl_set:Nn } \cs_new_protected:Npn \tl_gset_from_file_x:Nnn { \@@_set_from_file_x:NNnn \tl_gset:Nn } \cs_generate_variant:Nn \tl_set_from_file_x:Nnn { c } \cs_generate_variant:Nn \tl_gset_from_file_x:Nnn { c } \cs_new_protected:Npn \@@_set_from_file_x:NNnn #1#2#3#4 { \file_get_full_name:nN {#4} \l__file_full_name_str \str_if_empty:NTF \l__file_full_name_str { \__file_missing:n {#4} } { \group_begin: \etex_everyeof:D { \exp_not:N } #3 \scan_stop: \tl_set:Nx \l_@@_internal_a_tl { \tex_input:D \l__file_full_name_str \c_space_token } \exp_args:NNNo \group_end: #1 #2 \l_@@_internal_a_tl } } % \end{macrocode} % \end{macro} % \end{macro} % % \subsubsection{Unicode case changing} % % The mechanisms needed for case changing are somewhat involved, particularly % to allow for all of the special cases. These functions also require the % appropriate data extracted from the Unicode documentation (either manually % or automatically). % % \begin{macro}[EXP, documented-as=\tl_if_head_eq_catcode:nNTF] % {\tl_if_head_eq_catcode:oNTF} % Extra variants. % \begin{macrocode} \cs_generate_variant:Nn \tl_if_head_eq_catcode:nNTF { o } % \end{macrocode} % \end{macro} % % \begin{macro}[EXP]{\tl_lower_case:n, \tl_upper_case:n, \tl_mixed_case:n} % \begin{macro}[EXP]{\tl_lower_case:nn, \tl_upper_case:nn, \tl_mixed_case:nn} % The user level functions here are all wrappers around the internal % functions for case changing. % \begin{macrocode} \cs_new:Npn \tl_lower_case:n { \@@_change_case:nnn { lower } { } } \cs_new:Npn \tl_upper_case:n { \@@_change_case:nnn { upper } { } } \cs_new:Npn \tl_mixed_case:n { \@@_change_case:nnn { mixed } { } } \cs_new:Npn \tl_lower_case:nn { \@@_change_case:nnn { lower } } \cs_new:Npn \tl_upper_case:nn { \@@_change_case:nnn { upper } } \cs_new:Npn \tl_mixed_case:nn { \@@_change_case:nnn { mixed } } % \end{macrocode} % \end{macro} % \end{macro} % % \begin{macro}[aux, EXP]{\@@_change_case:nnn} % \begin{macro}[aux, EXP]{\@@_change_case_aux:nnn} % \begin{macro}[aux, EXP]{\@@_change_case_loop:wnn} % \begin{macro}[aux, EXP] % { % \@@_change_case_output:nwn , % \@@_change_case_output:Vwn , % \@@_change_case_output:own , % \@@_change_case_output:vwn , % \@@_change_case_output:fwn , % } % \begin{macro}[aux, EXP]{\@@_change_case_end:wn} % \begin{macro}[aux, EXP]{\@@_change_case_group:nwnn} % \begin{macro}[aux, EXP] % { % \@@_change_case_group_lower:nnnn , % \@@_change_case_group_upper:nnnn , % \@@_change_case_group_mixed:nnnn % } % \begin{macro}[aux, EXP]{\@@_change_case_space:wnn} % \begin{macro}[aux, EXP]{\@@_change_case_N_type:Nwnn} % \begin{macro}[aux, EXP]{\@@_change_case_N_type:NNNnnn} % \begin{macro}[aux, EXP]{\@@_change_case_math:NNNnnn} % \begin{macro}[aux, EXP]{\@@_change_case_math_loop:wNNnn} % \begin{macro}[aux, EXP]{\@@_change_case_math:NwNNnn} % \begin{macro}[aux, EXP]{\@@_change_case_math_group:nwNNnn} % \begin{macro}[aux, EXP]{\@@_change_case_math_space:wNNnn} % \begin{macro}[aux, EXP]{\@@_change_case_N_type:Nnnn} % \begin{macro}[aux, EXP] % { % \@@_change_case_char_lower:Nnn , % \@@_change_case_char_upper:Nnn , % \@@_change_case_char_mixed:Nnn % } % \begin{macro}[aux, EXP]{\@@_change_case_char:nN} % \begin{macro}[aux, EXP] % {\@@_change_case_char_auxi:nN, \@@_change_case_char_auxii:nN} % \begin{macro}[aux, EXP] % { % \@@_change_case_char_mixed:N, % \@@_change_case_char_lower:N, % \@@_change_case_char_upper:N % } % \begin{macro}[aux] % {\@@_lookup_mixed:N, \@@_lookup_lower:N, \@@_lookup_upper:N} % \begin{macro}[aux, EXP]{\@@_change_case_char_UTFviii:nNN} % \begin{macro}[aux, EXP]{\@@_change_case_char_UTFviii:nNNN} % \begin{macro}[aux, EXP]{\@@_change_case_char_UTFviii:nNNNN} % \begin{macro}[aux, EXP]{\@@_change_case_char_UTFviii:nn} % \begin{macro}[aux, EXP]{\@@_change_case_cs_letterlike:Nn} % \begin{macro}[aux, EXP]{\@@_change_case_cs_letterlike:NnN} % \begin{macro}[aux, EXP]{\@@_change_case_cs_accents:NN} % \begin{macro}[aux, EXP]{\@@_change_case_cs:N} % \begin{macro}[aux, EXP]{\@@_change_case_cs:NN} % \begin{macro}[aux, EXP]{\@@_change_case_cs:NNn} % \begin{macro}[aux, EXP]{\@@_change_case_protect:wNN} % \begin{macro}[aux, EXP]{\@@_change_case_if_expandable:NTF} % \begin{macro}[aux, EXP]{\@@_change_case_cs_expand:Nnw} % \begin{macro}[aux, EXP]{\@@_change_case_cs_expand:NN} % \begin{macro}[aux, EXP]{\@@_change_case_mixed_skip:N} % \begin{macro}[aux, EXP]{\@@_change_case_mixed_skip:NN} % \begin{macro}[aux, EXP]{\@@_change_case_mixed_skip_tidy:Nwn} % \begin{macro}[aux, EXP]{\@@_change_case_mixed_switch:w} % The mechanism for the core conversion of case is based on the idea that % we can use a loop to grab the entire token list plus a quark: the latter is % used as an end marker and to avoid any brace stripping. Depending on the % nature of the first item in the grabbed argument, it can either processed % as a single token, treated as a group or treated as a space. These % different cases all work by re-reading |#1| in the appropriate way, hence % the repetition of |#1 \q_recursion_stop|. % \begin{macrocode} \cs_new:Npn \@@_change_case:nnn #1#2#3 { \etex_unexpanded:D \exp_after:wN { \exp:w \@@_change_case_aux:nnn {#1} {#2} {#3} } } \cs_new:Npn \@@_change_case_aux:nnn #1#2#3 { \group_align_safe_begin: \@@_change_case_loop:wnn #3 \q_recursion_tail \q_recursion_stop {#1} {#2} \@@_change_case_result:n { } } \cs_new:Npn \@@_change_case_loop:wnn #1 \q_recursion_stop { \tl_if_head_is_N_type:nTF {#1} { \@@_change_case_N_type:Nwnn } { \tl_if_head_is_group:nTF {#1} { \@@_change_case_group:nwnn } { \@@_change_case_space:wnn } } #1 \q_recursion_stop } % \end{macrocode} % Earlier versions of the code where only \texttt{x}-type expandable rather % than \texttt{f}-type: this causes issues with nesting and so the slight % performance hit is taken for a better outcome in usability terms. Setting % up for \texttt{f}-type expandability has two requirements: a marker % token after the main loop (see above) and a mechanism to \enquote{load} % and finalise the result. That is handled in the code below, which includes % the necessary material to end the \cs{exp:w} expansion. % \begin{macrocode} \cs_new:Npn \@@_change_case_output:nwn #1#2 \@@_change_case_result:n #3 { #2 \@@_change_case_result:n { #3 #1 } } \cs_generate_variant:Nn \@@_change_case_output:nwn { V , o , v , f } \cs_new:Npn \@@_change_case_end:wn #1 \@@_change_case_result:n #2 { \group_align_safe_end: \exp_end: #2 } % \end{macrocode} % Handling for the cases where the current argument is a brace group or % a space is relatively easy. For the brace case, the routine works % recursively, using the expandability of the mechanism to ensure that the % result is finalised before storage. For the space case it is simply a % question of removing the space in the input and storing it in the output. % In both cases, and indeed for the \texttt{N}-type grabber, after removing % the current item from the input \cs{@@_change_case_loop:wnn} is inserted % in front of the remaining tokens. % \begin{macrocode} \cs_new:Npn \@@_change_case_group:nwnn #1#2 \q_recursion_stop #3#4 { \use:c { @@_change_case_group_ #3 : nnnn } {#1} {#2} {#3} {#4} } \cs_new:Npn \@@_change_case_group_lower:nnnn #1#2#3#4 { \@@_change_case_output:own { \exp_after:wN { \exp:w \@@_change_case_aux:nnn {#3} {#4} {#1} } } \@@_change_case_loop:wnn #2 \q_recursion_stop {#3} {#4} } \cs_new_eq:NN \@@_change_case_group_upper:nnnn \@@_change_case_group_lower:nnnn % \end{macrocode} % For the \enquote{mixed} case, a group is taken as forcing a switch to lower % casing. That means we need a separate auxiliary. (Tracking whether we have % found a first character inside a group and transferring the information out % looks pretty horrible.) % \begin{macrocode} \cs_new:Npn \@@_change_case_group_mixed:nnnn #1#2#3#4 { \@@_change_case_output:own { \exp_after:wN { \exp:w \@@_change_case_aux:nnn {#3} {#4} {#1} } } \@@_change_case_loop:wnn #2 \q_recursion_stop { lower } {#4} } \exp_last_unbraced:NNo \cs_new:Npn \@@_change_case_space:wnn \c_space_tl { \@@_change_case_output:nwn { ~ } \@@_change_case_loop:wnn } % \end{macrocode} % For \texttt{N}-type arguments there are several stages to the approach. % First, a simply check for the end-of-input marker, which if found triggers % the final clean up and output step. Assuming that is not the case, the % first check is for math-mode escaping: this test can encompass control % sequences or other \texttt{N}-type tokens so is handled up front. % \begin{macrocode} \cs_new:Npn \@@_change_case_N_type:Nwnn #1#2 \q_recursion_stop { \quark_if_recursion_tail_stop_do:Nn #1 { \@@_change_case_end:wn } \exp_after:wN \@@_change_case_N_type:NNNnnn \exp_after:wN #1 \l_tl_case_change_math_tl \q_recursion_tail ? \q_recursion_stop {#2} } % \end{macrocode} % Looking for math mode escape first requires a loop over the possible % token pairs to see if the current input (|#1|) matches an open-math case % (|#2|). If if does then this test loop is ended and a new input-gathering % one is begun. The latter simply transfers material from the input to the % output without any expansion, testing each \texttt{N}-type token to see % if it matches the close-math case required. If that is the situation then % the \enquote{math loop} stops and resumes the main loop: as that might % be either the standard case-changing one or the mixed-case alternative, % it is not hard-coded into the math loop but is rather passed as argument % |#3| to \cs{@@_change_case_math:NNNnnn}. If no close-math token is found % then the final clean-up is forced (\emph{i.e.}~there is no assumption % of \enquote{well-behaved} input in terms of math mode). % \begin{macrocode} \cs_new:Npn \@@_change_case_N_type:NNNnnn #1#2#3 { \quark_if_recursion_tail_stop_do:Nn #2 { \@@_change_case_N_type:Nnnn #1 } \token_if_eq_meaning:NNTF #1 #2 { \use_i_delimit_by_q_recursion_stop:nw { \@@_change_case_math:NNNnnn #1 #3 \@@_change_case_loop:wnn } } { \@@_change_case_N_type:NNNnnn #1 } } \cs_new:Npn \@@_change_case_math:NNNnnn #1#2#3#4 { \@@_change_case_output:nwn {#1} \@@_change_case_math_loop:wNNnn #4 \q_recursion_stop #2 #3 } \cs_new:Npn \@@_change_case_math_loop:wNNnn #1 \q_recursion_stop { \tl_if_head_is_N_type:nTF {#1} { \@@_change_case_math:NwNNnn } { \tl_if_head_is_group:nTF {#1} { \@@_change_case_math_group:nwNNnn } { \@@_change_case_math_space:wNNnn } } #1 \q_recursion_stop } \cs_new:Npn \@@_change_case_math:NwNNnn #1#2 \q_recursion_stop #3#4 { \token_if_eq_meaning:NNTF \q_recursion_tail #1 { \@@_change_case_end:wn } { \@@_change_case_output:nwn {#1} \token_if_eq_meaning:NNTF #1 #3 { #4 #2 \q_recursion_stop } { \@@_change_case_math_loop:wNNnn #2 \q_recursion_stop #3#4 } } } \cs_new:Npn \@@_change_case_math_group:nwNNnn #1#2 \q_recursion_stop { \@@_change_case_output:nwn { {#1} } \@@_change_case_math_loop:wNNnn #2 \q_recursion_stop } \exp_last_unbraced:NNo \cs_new:Npn \@@_change_case_math_space:wNNnn \c_space_tl { \@@_change_case_output:nwn { ~ } \@@_change_case_math_loop:wNNnn } % \end{macrocode} % Once potential math-mode cases are filtered out the next stage is to % test if the token grabbed is a control sequence: they cannot be used in % the lookup table and also may require expansion. At this stage the loop % code starting \cs{@@_change_case_loop:wnn} is inserted: all subsequent % steps in the code which need a look-ahead are coded to rely on this and % thus have \texttt{w}-type arguments if they may do a look-ahead. % \begin{macrocode} \cs_new:Npn \@@_change_case_N_type:Nnnn #1#2#3#4 { \token_if_cs:NTF #1 { \@@_change_case_cs_letterlike:Nn #1 {#3} } { \use:c { @@_change_case_char_ #3 :Nnn } #1 {#3} {#4} } \@@_change_case_loop:wnn #2 \q_recursion_stop {#3} {#4} } % \end{macrocode} % For character tokens there are some special cases to deal with then % the majority of changes are covered by using the \TeX{} data as a lookup % along with expandable character generation. This avoids needing a very % large number of macros or (as seen in earlier versions) a somewhat tricky % split of the characters into various blocks. Notice that the special case % code may do a look-ahead so requires a final \texttt{w}-type argument % whereas the core lookup table does not and also guarantees an output so % \texttt{f}-type expansion may be used to obtain the case-changed result. % \begin{macrocode} \cs_new:Npn \@@_change_case_char_lower:Nnn #1#2#3 { \cs_if_exist_use:cF { @@_change_case_ #2 _ #3 :Nnw } { \use_ii:nn } #1 { \use:c { @@_change_case_ #2 _ sigma:Nnw } #1 { \@@_change_case_char:nN {#2} #1 } } } \cs_new_eq:NN \@@_change_case_char_upper:Nnn \@@_change_case_char_lower:Nnn % \end{macrocode} % For mixed case, the code is somewhat different: there is a need to % look up both mixed and upper case chars and we have to cover the % situation where there is a character to skip over. % \begin{macrocode} \cs_new:Npn \@@_change_case_char_mixed:Nnn #1#2#3 { \@@_change_case_mixed_switch:w \cs_if_exist_use:cF { @@_change_case_mixed_ #3 :Nnw } { \cs_if_exist_use:cF { @@_change_case_upper_ #3 :Nnw } { \use_ii:nn } } #1 { \@@_change_case_mixed_skip:N #1 } } % \end{macrocode} % For Unicode engines we can handle all characters directly. However, for % the $8$-bit engines the aim is to deal with (a subset of) Unicode (UTF-8) % input. They deal with that by making the upper half of the range active, % so we look for that and if found work out how many UTF-8 octets there % are to deal with. Those can then be grabbed to reconstruct the full % Unicode character, which is then used in a lookup. (As will become % obvious below, there is no intention here of covering all of Unicode.) % \begin{macrocode} \cs_if_exist:NTF \utex_char:D { \cs_new:Npn \@@_change_case_char:nN #1#2 { \@@_change_case_char_auxi:nN {#1} #2 } } { \cs_new:Npn \@@_change_case_char:nN #1#2 { \int_compare:nNnTF { `#2 } > { "80 } { \int_compare:nNnTF { `#2 } < { "E0 } { \@@_change_case_char_UTFviii:nNNN {#1} #2 } { \int_compare:nNnTF { `#2 } < { "F0 } { \@@_change_case_char_UTFviii:nNNNN {#1} #2 } { \@@_change_case_char_UTFviii:nNNNNN {#1} #2 } } } { \@@_change_case_char_auxi:nN {#1} #2 } } } % \end{macrocode} % To allow for the special case of mixed case, we insert here a % action-dependent auxiliary. % \begin{macrocode} \cs_new:Npn \@@_change_case_char_auxi:nN #1#2 { \use:c { @@_change_case_char_ #1 :N } #2 } \cs_new:Npn \@@_change_case_char_lower:N #1 { \@@_change_case_output:fwn { \cs_if_exist_use:cF { c__unicode_lower_ \token_to_str:N #1 _tl } { \@@_change_case_char_auxii:nN { lower } #1 } } } \cs_new:Npn \@@_change_case_char_upper:N #1 { \@@_change_case_output:fwn { \cs_if_exist_use:cF { c__unicode_upper_ \token_to_str:N #1 _tl } { \@@_change_case_char_auxii:nN { upper } #1 } } } \cs_new:Npn \@@_change_case_char_mixed:N #1 { \cs_if_exist:cTF { c__unicode_mixed_ \token_to_str:N #1 _tl } { \@@_change_case_output:fwn { \tl_use:c { c__unicode_mixed_ \token_to_str:N #1 _tl } } } { \@@_change_case_char_upper:N #1 } } \cs_if_exist:NTF \utex_char:D { \cs_new:Npn \@@_change_case_char_auxii:nN #1#2 { \int_compare:nNnTF { \use:c { @@_lookup_ #1 :N } #2 } = { 0 } { \exp_stop_f: #2 } { \char_generate:nn { \use:c { @@_lookup_ #1 :N } #2 } { \char_value_catcode:n { \use:c { @@_lookup_ #1 :N } #2 } } } } \cs_new_protected:Npn \@@_lookup_lower:N #1 { \tex_lccode:D `#1 } \cs_new_protected:Npn \@@_lookup_upper:N #1 { \tex_uccode:D `#1 } \cs_new_eq:NN \@@_lookup_mixed:N \@@_lookup_upper:N } { \cs_new:Npn \@@_change_case_char_auxii:nN #1#2 { \exp_stop_f: #2 } \cs_new:Npn \@@_change_case_char_UTFviii:nNNN #1#2#3#4 { \@@_change_case_char_UTFviii:nnN {#1} {#2#4} #3 } \cs_new:Npn \@@_change_case_char_UTFviii:nNNNN #1#2#3#4#5 { \@@_change_case_char_UTFviii:nnN {#1} {#2#4#5} #3 } \cs_new:Npn \@@_change_case_char_UTFviii:nNNNNN #1#2#3#4#5#6 { \@@_change_case_char_UTFviii:nnN {#1} {#2#4#5#6} #3 } \cs_new:Npn \@@_change_case_char_UTFviii:nnN #1#2#3 { \cs_if_exist:cTF { c__unicode_ #1 _ \tl_to_str:n {#2} _tl } { \@@_change_case_output:vwn { c__unicode_ #1 _ \tl_to_str:n {#2} _tl } } { \@@_change_case_output:nwn {#2} } #3 } } % \end{macrocode} % Before dealing with general control sequences there are the special % ones to deal with. Letter-like control sequences are a simple look-up, % while for accents the loop is much as done elsewhere. Notice that % we have a no-op test to make sure there is no unexpected expansion of % letter-like input. The split into two parts here allows us to insert % the \enquote{switch} code for mixed casing. % \begin{macrocode} \cs_new:Npn \@@_change_case_cs_letterlike:Nn #1#2 { \str_if_eq:nnTF {#2} { mixed } { \@@_change_case_cs_letterlike:NnN #1 { upper } \@@_change_case_mixed_switch:w } { \@@_change_case_cs_letterlike:NnN #1 {#2} \prg_do_nothing: } } \cs_new:Npn \@@_change_case_cs_letterlike:NnN #1#2#3 { \cs_if_exist:cTF { c_@@_change_case_ #2 _ \token_to_str:N #1 _tl } { \@@_change_case_output:vwn { c_@@_change_case_ #2 _ \token_to_str:N #1 _tl } #3 } { \cs_if_exist:cTF { c_@@_change_case_ \str_if_eq:nnTF {#2} { lower } { upper } { lower } _ \token_to_str:N #1 _tl } { \@@_change_case_output:nwn {#1} #3 } { \exp_after:wN \@@_change_case_cs_accents:NN \exp_after:wN #1 \l_tl_case_change_accents_tl \q_recursion_tail \q_recursion_stop } } } \cs_new:Npn \@@_change_case_cs_accents:NN #1#2 { \quark_if_recursion_tail_stop_do:Nn #2 { \@@_change_case_cs:N #1 } \str_if_eq:nnTF {#1} {#2} { \use_i_delimit_by_q_recursion_stop:nw { \@@_change_case_output:nwn {#1} } } { \@@_change_case_cs_accents:NN #1 } } % \end{macrocode} % To deal with a control sequence there is first a need to test if it is % on the list which indicate that case changing should be skipped. That's % done using a loop as for the other special cases. If a hit is found then % the argument is grabbed: that comes \emph{after} the loop function which % is therefore rearranged. In a \LaTeXe{} context, \tn{protect} needs % to be treated specially, to prevent expansion of the next token but % output it without braces. % \begin{macrocode} \cs_new:Npn \@@_change_case_cs:N #1 { %<*package> \str_if_eq:nnTF {#1} { \protect } { \@@_change_case_protect:wNN } % \exp_after:wN \@@_change_case_cs:NN \exp_after:wN #1 \l_tl_case_change_exclude_tl \q_recursion_tail \q_recursion_stop } \cs_new:Npn \@@_change_case_cs:NN #1#2 { \quark_if_recursion_tail_stop_do:Nn #2 { \@@_change_case_cs_expand:Nnw #1 { \@@_change_case_output:nwn {#1} } } \str_if_eq:nnTF {#1} {#2} { \use_i_delimit_by_q_recursion_stop:nw { \@@_change_case_cs:NNn #1 } } { \@@_change_case_cs:NN #1 } } \cs_new:Npn \@@_change_case_cs:NNn #1#2#3 { \@@_change_case_output:nwn { #1 {#3} } #2 } %<*package> \cs_new:Npn \@@_change_case_protect:wNN #1 \q_recursion_stop #2 #3 { \@@_change_case_output:nwn { \protect #3 } #2 } % % \end{macrocode} % When a control sequence is not on the exclude list the other test if % to see if it is expandable. Once again, if there is a hit then the loop % function is grabbed as part of the clean-up and reinserted before the % now expanded material. The test for expandability has to check for % end-of-recursion as it is needed by the look-ahead code which might hit % the end of the input. The test is done in two parts as \cs{bool_if:nTF} % would choke if |#1| was |(|! % \begin{macrocode} \cs_new:Npn \@@_change_case_if_expandable:NTF #1 { \token_if_expandable:NTF #1 { \bool_lazy_any:nTF { { \token_if_eq_meaning_p:NN \q_recursion_tail #1 } { \token_if_protected_macro_p:N #1 } { \token_if_protected_long_macro_p:N #1 } } { \use_ii:nn } { \use_i:nn } } { \use_ii:nn } } \cs_new:Npn \@@_change_case_cs_expand:Nnw #1#2 { \@@_change_case_if_expandable:NTF #1 { \@@_change_case_cs_expand:NN #1 } { #2 } } \cs_new:Npn \@@_change_case_cs_expand:NN #1#2 { \exp_after:wN #2 #1 } % \end{macrocode} % For mixed case, there is an additional list of exceptions to deal with: % once that is sorted, we can move on back to the main loop. % \begin{macrocode} \cs_new:Npn \@@_change_case_mixed_skip:N #1 { \exp_after:wN \@@_change_case_mixed_skip:NN \exp_after:wN #1 \l_tl_mixed_case_ignore_tl \q_recursion_tail \q_recursion_stop } \cs_new:Npn \@@_change_case_mixed_skip:NN #1#2 { \quark_if_recursion_tail_stop_do:nn {#2} { \@@_change_case_char:nN { mixed } #1 } \int_compare:nNnT { `#1 } = { `#2 } { \use_i_delimit_by_q_recursion_stop:nw { \@@_change_case_output:nwn {#1} \@@_change_case_mixed_skip_tidy:Nwn } } \@@_change_case_mixed_skip:NN #1 } \cs_new:Npn \@@_change_case_mixed_skip_tidy:Nwn #1#2 \q_recursion_stop #3 { \@@_change_case_loop:wnn #2 \q_recursion_stop { mixed } } % \end{macrocode} % Needed to switch from mixed to lower casing when we have found a % first character in the former mode. % \begin{macrocode} \cs_new:Npn \@@_change_case_mixed_switch:w #1 \@@_change_case_loop:wnn #2 \q_recursion_stop #3 { #1 \@@_change_case_loop:wnn #2 \q_recursion_stop { lower } } % \end{macrocode} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % % \begin{macro}[aux, EXP]{\@@_change_case_lower_sigma:Nnw} % \begin{macro}[aux, EXP]{\@@_change_case_lower_sigma:w} % \begin{macro}[aux, EXP]{\@@_change_case_lower_sigma:Nw} % \begin{macro}[aux, EXP]{\@@_change_case_upper_sigma:Nnw} % If the current char is an upper case sigma, the a check is made on the next % item in the input. If it is \texttt{N}-type and not a control sequence % then there is a look-ahead phase. % \begin{macrocode} \cs_new:Npn \@@_change_case_lower_sigma:Nnw #1#2#3#4 \q_recursion_stop { \int_compare:nNnTF { `#1 } = { "03A3 } { \@@_change_case_output:fwn { \@@_change_case_lower_sigma:w #4 \q_recursion_stop } } {#2} #3 #4 \q_recursion_stop } \cs_new:Npn \@@_change_case_lower_sigma:w #1 \q_recursion_stop { \tl_if_head_is_N_type:nTF {#1} { \@@_change_case_lower_sigma:Nw #1 \q_recursion_stop } { \c__unicode_final_sigma_tl } } \cs_new:Npn \@@_change_case_lower_sigma:Nw #1#2 \q_recursion_stop { \@@_change_case_if_expandable:NTF #1 { \exp_after:wN \@@_change_case_lower_sigma:w #1 #2 \q_recursion_stop } { \token_if_letter:NTF #1 { \c__unicode_std_sigma_tl } { \c__unicode_final_sigma_tl } } } % \end{macrocode} % Simply skip to the final step for upper casing. % \begin{macrocode} \cs_new_eq:NN \@@_change_case_upper_sigma:Nnw \use_ii:nn % \end{macrocode} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % % \begin{macro}[aux, EXP]{\@@_change_case_lower_tr:Nnw} % \begin{macro}[aux, EXP]{\@@_change_case_lower_tr_auxi:Nw} % \begin{macro}[aux, EXP]{\@@_change_case_lower_tr_auxii:Nw} % \begin{macro}[aux, EXP]{\@@_change_case_upper_tr:Nnw} % \begin{macro}[aux, EXP]{\@@_change_case_lower_az:Nnw} % \begin{macro}[aux, EXP]{\@@_change_case_upper_az:Nnw} % The Turkic languages need special treatment for dotted-i and dotless-i. % The lower casing rule can be expressed in terms of searching first for % either a dotless-I or a dotted-I. In the latter case the mapping is % easy, but in the former there is a second stage search. % \begin{macrocode} \cs_if_exist:NTF \utex_char:D { \cs_new:Npn \@@_change_case_lower_tr:Nnw #1#2 { \int_compare:nNnTF { `#1 } = { "0049 } { \@@_change_case_lower_tr_auxi:Nw } { \int_compare:nNnTF { `#1 } = { "0130 } { \@@_change_case_output:nwn { i } } {#2} } } % \end{macrocode} % After a dotless-I there may be a dot-above character. If there is then % a dotted-i should be produced, otherwise output a dotless-i. When the % combination is found both the dotless-I and the dot-above char have to % be removed from the input, which is done by the \cs{use_i:nn} % (it grabs \cs{@@_change_case_loop:wn} and the dot-above char and % discards the latter). % \begin{macrocode} \cs_new:Npn \@@_change_case_lower_tr_auxi:Nw #1#2 \q_recursion_stop { \tl_if_head_is_N_type:nTF {#2} { \@@_change_case_lower_tr_auxii:Nw #2 \q_recursion_stop } { \@@_change_case_output:Vwn \c__unicode_dotless_i_tl } #1 #2 \q_recursion_stop } \cs_new:Npn \@@_change_case_lower_tr_auxii:Nw #1#2 \q_recursion_stop { \@@_change_case_if_expandable:NTF #1 { \exp_after:wN \@@_change_case_lower_tr_auxi:Nw #1 #2 \q_recursion_stop } { \bool_lazy_or:nnTF { \token_if_cs_p:N #1 } { ! \int_compare_p:nNn { `#1 } = { "0307 } } { \@@_change_case_output:Vwn \c__unicode_dotless_i_tl } { \@@_change_case_output:nwn { i } \use_i:nn } } } } % \end{macrocode} % For $8$-bit engines, dot-above is not available so there is a simple % test for an upper-case I. Then we can look for the UTF-8 representation of % an upper case dotted-I without the combining char. If it's not there, % preserve the UTF-8 sequence as-is. % \begin{macrocode} { \cs_new:Npn \@@_change_case_lower_tr:Nnw #1#2 { \int_compare:nNnTF { `#1 } = { "0049 } { \@@_change_case_output:Vwn \c__unicode_dotless_i_tl } { \int_compare:nNnTF { `#1 } = { 196 } { \@@_change_case_lower_tr_auxi:Nw #1 {#2} } {#2} } } \cs_new:Npn \@@_change_case_lower_tr_auxi:Nw #1#2#3#4 { \int_compare:nNnTF { `#4 } = { 176 } { \@@_change_case_output:nwn { i } #3 } { #2 #3 #4 } } } % \end{macrocode} % Upper casing is easier: just one exception with no context. % \begin{macrocode} \cs_new:Npn \@@_change_case_upper_tr:Nnw #1#2 { \int_compare:nNnTF { `#1 } = { "0069 } { \@@_change_case_output:Vwn \c__unicode_dotted_I_tl } {#2} } % \end{macrocode} % Straight copies. % \begin{macrocode} \cs_new_eq:NN \@@_change_case_lower_az:Nnw \@@_change_case_lower_tr:Nnw \cs_new_eq:NN \@@_change_case_upper_az:Nnw \@@_change_case_upper_tr:Nnw % \end{macrocode} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % % \begin{macro}[aux, EXP]{\@@_change_case_lower_lt:Nnw} % \begin{macro}[aux, EXP]{\@@_change_case_lower_lt:nNnw} % \begin{macro}[aux, EXP]{\@@_change_case_lower_lt:nnw} % \begin{macro}[aux, EXP]{\@@_change_case_lower_lt:Nw} % \begin{macro}[aux, EXP]{\@@_change_case_lower_lt:NNw} % \begin{macro}[aux, EXP]{\@@_change_case_upper_lt:Nnw} % \begin{macro}[aux, EXP]{\@@_change_case_upper_lt:nnw} % \begin{macro}[aux, EXP]{\@@_change_case_upper_lt:Nw} % \begin{macro}[aux, EXP]{\@@_change_case_upper_lt:NNw} % For Lithuanian, the issue to be dealt with is dots over lower case % letters: these should be present if there is another accent. That means % that there is some work to do when lower casing I and J. The first step % is a simple match attempt: \cs{c_@@_accents_lt_tl} contains % accented upper case letters which should gain a dot-above char in their % lower case form. This is done using \texttt{f}-type expansion so only one % pass is needed to find if it works or not. If there was no hit, the second % stage is to check for I, J and I-ogonek, and if the current char is a % match to look for a following accent. % \begin{macrocode} \cs_new:Npn \@@_change_case_lower_lt:Nnw #1 { \exp_args:Nf \@@_change_case_lower_lt:nNnw { \str_case:nVF #1 \c__unicode_accents_lt_tl \exp_stop_f: } #1 } \cs_new:Npn \@@_change_case_lower_lt:nNnw #1#2 { \tl_if_blank:nTF {#1} { \exp_args:Nf \@@_change_case_lower_lt:nnw { \int_case:nnF {`#2} { { "0049 } i { "004A } j { "012E } \c__unicode_i_ogonek_tl } \exp_stop_f: } } { \@@_change_case_output:nwn {#1} \use_none:n } } \cs_new:Npn \@@_change_case_lower_lt:nnw #1#2 { \tl_if_blank:nTF {#1} {#2} { \@@_change_case_output:nwn {#1} \@@_change_case_lower_lt:Nw } } % \end{macrocode} % Grab the next char and see if it is one of the accents used in Lithuanian: % if it is, add the dot-above char into the output. % \begin{macrocode} \cs_new:Npn \@@_change_case_lower_lt:Nw #1#2 \q_recursion_stop { \tl_if_head_is_N_type:nT {#2} { \@@_change_case_lower_lt:NNw } #1 #2 \q_recursion_stop } \cs_new:Npn \@@_change_case_lower_lt:NNw #1#2#3 \q_recursion_stop { \@@_change_case_if_expandable:NTF #2 { \exp_after:wN \@@_change_case_lower_lt:Nw \exp_after:wN #1 #2 #3 \q_recursion_stop } { \bool_lazy_and:nnT { ! \token_if_cs_p:N #2 } { \bool_lazy_any_p:n { { \int_compare_p:nNn { `#2 } = { "0300 } } { \int_compare_p:nNn { `#2 } = { "0301 } } { \int_compare_p:nNn { `#2 } = { "0303 } } } } { \@@_change_case_output:Vwn \c__unicode_dot_above_tl } #1 #2#3 \q_recursion_stop } } % \end{macrocode} % For upper casing, the test required is for a dot-above char after an I, % J or I-ogonek. First a test for the appropriate letter, and if found a % look-ahead and potentially one token dropped. % \begin{macrocode} \cs_new:Npn \@@_change_case_upper_lt:Nnw #1 { \exp_args:Nf \@@_change_case_upper_lt:nnw { \int_case:nnF {`#1} { { "0069 } I { "006A } J { "012F } \c__unicode_I_ogonek_tl } \exp_stop_f: } } \cs_new:Npn \@@_change_case_upper_lt:nnw #1#2 { \tl_if_blank:nTF {#1} {#2} { \@@_change_case_output:nwn {#1} \@@_change_case_upper_lt:Nw } } \cs_new:Npn \@@_change_case_upper_lt:Nw #1#2 \q_recursion_stop { \tl_if_head_is_N_type:nT {#2} { \@@_change_case_upper_lt:NNw } #1 #2 \q_recursion_stop } \cs_new:Npn \@@_change_case_upper_lt:NNw #1#2#3 \q_recursion_stop { \@@_change_case_if_expandable:NTF #2 { \exp_after:wN \@@_change_case_upper_lt:Nw \exp_after:wN #1 #2 #3 \q_recursion_stop } { \bool_lazy_and:nnTF { ! \token_if_cs_p:N #2 } { \int_compare_p:nNn { `#2 } = { "0307 } } { #1 } { #1 #2 } #3 \q_recursion_stop } } % \end{macrocode} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % % \begin{macro}{\@@_change_case_upper_de-alt:Nnw} % A simple alternative version for German. % \begin{macrocode} \cs_new:cpn { @@_change_case_upper_de-alt:Nnw } #1#2 { \int_compare:nNnTF { `#1 } = { 223 } { \@@_change_case_output:Vwn \c__unicode_upper_Eszett_tl } {#2} } % \end{macrocode} % \end{macro} % % \begin{macro}[EXP, int]{\__unicode_codepoint_to_UTFviii:n} % \begin{macro}[EXP, aux]{\__unicode_codepoint_to_UTFviii_auxi:n} % \begin{macro}[EXP, aux]{\__unicode_codepoint_to_UTFviii_auxii:Nnn} % \begin{macro}[EXP, aux]{\__unicode_codepoint_to_UTFviii_auxiii:n} % This code converts a codepoint into the correct UTF-8 representation. % As there are a variable number of octets, the result starts with the % numeral |1|--|4| to indicate the nature of the returned value. Note that % this code covers the full range even though at this stage it is not % required here. Also note that longer-term this is likely to need a public % interface and/or moving to \pkg{l3str} (see experimental string % conversions). In terms of the algorithm itself, see % \url{https://en.wikipedia.org/wiki/UTF-8} for the octet pattern. % \begin{macrocode} \cs_new:Npn \__unicode_codepoint_to_UTFviii:n #1 { \exp_args:Nf \__unicode_codepoint_to_UTFviii_auxi:n { \int_eval:n {#1} } } \cs_new:Npn \__unicode_codepoint_to_UTFviii_auxi:n #1 { \if_int_compare:w #1 > "80 ~ \if_int_compare:w #1 < "800 ~ 2 \__unicode_codepoint_to_UTFviii_auxii:Nnn C {#1} { 64 } \__unicode_codepoint_to_UTFviii_auxiii:n {#1} \else: \if_int_compare:w #1 < "10000 ~ 3 \__unicode_codepoint_to_UTFviii_auxii:Nnn E {#1} { 64 * 64 } \__unicode_codepoint_to_UTFviii_auxiii:n {#1} \__unicode_codepoint_to_UTFviii_auxiii:n { \int_div_truncate:nn {#1} { 64 } } \else: 4 \__unicode_codepoint_to_UTFviii_auxii:Nnn F {#1} { 64 * 64 * 64 } \__unicode_codepoint_to_UTFviii_auxiii:n { \int_div_truncate:nn {#1} { 64 * 64 } } \__unicode_codepoint_to_UTFviii_auxiii:n { \int_div_truncate:nn {#1} { 64 } } \__unicode_codepoint_to_UTFviii_auxiii:n {#1} \fi: \fi: \else: 1 {#1} \fi: } \cs_new:Npn \__unicode_codepoint_to_UTFviii_auxii:Nnn #1#2#3 { { \int_eval:n { "#10 + \int_div_truncate:nn {#2} {#3} } } } \cs_new:Npn \__unicode_codepoint_to_UTFviii_auxiii:n #1 { { \int_eval:n { \int_mod:nn {#1} { 64 } + 128 } } } % \end{macrocode} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % % \begin{variable} % { % \c__unicode_std_sigma_tl , % \c__unicode_final_sigma_tl , % \c__unicode_accents_lt_tl , % \c__unicode_dot_above_tl , % \c__unicode_upper_Eszett_tl % } % The above needs various special token lists containg pre-formed characters. % This set are only available in Unicode engines, with no-op definitions % for $8$-bit use. % \begin{macrocode} \cs_if_exist:NTF \utex_char:D { \tl_const:Nx \c__unicode_std_sigma_tl { \utex_char:D "03C3 ~ } \tl_const:Nx \c__unicode_final_sigma_tl { \utex_char:D "03C2 ~ } \tl_const:Nx \c__unicode_accents_lt_tl { \utex_char:D "00CC ~ { \utex_char:D "0069 ~ \utex_char:D "0307 ~ \utex_char:D "0300 ~ } \utex_char:D "00CD ~ { \utex_char:D "0069 ~ \utex_char:D "0307 ~ \utex_char:D "0301 ~ } \utex_char:D "0128 ~ { \utex_char:D "0069 ~ \utex_char:D "0307 ~ \utex_char:D "0303 ~ } } \tl_const:Nx \c__unicode_dot_above_tl { \utex_char:D "0307 ~ } \tl_const:Nx \c__unicode_upper_Eszett_tl { \utex_char:D "1E9E ~ } } { \tl_const:Nn \c__unicode_std_sigma_tl { } \tl_const:Nn \c__unicode_final_sigma_tl { } \tl_const:Nn \c__unicode_accents_lt_tl { } \tl_const:Nn \c__unicode_dot_above_tl { } \tl_const:Nn \c__unicode_upper_Eszett_tl { } } % \end{macrocode} % \end{variable} % \begin{variable} % { % \c__unicode_dotless_i_tl , % \c__unicode_dotted_I_tl , % \c__unicode_i_ogonek_tl , % \c__unicode_I_ogonek_tl , % } % For cases where there is an $8$-bit option in the |T1| font set up, % a variant is provided in both cases. % \begin{macrocode} \group_begin: \cs_if_exist:NTF \utex_char:D { \cs_set_protected:Npn \@@_tmp:w #1#2 { \tl_const:Nx #1 { \utex_char:D "#2 ~ } } } { \cs_set_protected:Npn \@@_tmp:w #1#2 { \group_begin: \cs_set_protected:Npn \@@_tmp:w ##1##2##3 { \tl_const:Nx #1 { \exp_after:wN \exp_after:wN \exp_after:wN \exp_not:N \__char_generate:nn {##2} { 13 } \exp_after:wN \exp_after:wN \exp_after:wN \exp_not:N \__char_generate:nn {##3} { 13 } } } \tl_set:Nx \l_@@_internal_a_tl { \__unicode_codepoint_to_UTFviii:n {"#2} } \exp_after:wN \@@_tmp:w \l_@@_internal_a_tl \group_end: } } \@@_tmp:w \c__unicode_dotless_i_tl { 0131 } \@@_tmp:w \c__unicode_dotted_I_tl { 0130 } \@@_tmp:w \c__unicode_i_ogonek_tl { 012F } \@@_tmp:w \c__unicode_I_ogonek_tl { 012E } \group_end: % \end{macrocode} % \end{variable} % % For $8$-bit engines we now need to define the case-change data for % the multi-octet mappings. These need a list of what code points are % doable in |T1| so the list is hard coded (there's no saving in loading % the mappings dynamically). All of the straight-forward ones have two % octets, so that is taken as read. % \begin{macrocode} \group_begin: \bool_lazy_or:nnT { \sys_if_engine_pdftex_p: } { \sys_if_engine_uptex_p: } { \cs_set_protected:Npn \@@_loop:nn #1#2 { \quark_if_recursion_tail_stop:n {#1} \tl_set:Nx \l_@@_internal_a_tl { \__unicode_codepoint_to_UTFviii:n {"#1} \__unicode_codepoint_to_UTFviii:n {"#2} } \exp_after:wN \@@_tmp:w \l_@@_internal_a_tl \@@_loop:nn } \cs_set_protected:Npn \@@_tmp:w #1#2#3#4#5#6 { \tl_const:cx { c__unicode_lower_ \char_generate:nn {#2} { 12 } \char_generate:nn {#3} { 12 } _tl } { \exp_after:wN \exp_after:wN \exp_after:wN \exp_not:N \__char_generate:nn {#5} { 13 } \exp_after:wN \exp_after:wN \exp_after:wN \exp_not:N \__char_generate:nn {#6} { 13 } } \tl_const:cx { c__unicode_upper_ \char_generate:nn {#5} { 12 } \char_generate:nn {#6} { 12 } _tl } { \exp_after:wN \exp_after:wN \exp_after:wN \exp_not:N \__char_generate:nn {#2} { 13 } \exp_after:wN \exp_after:wN \exp_after:wN \exp_not:N \__char_generate:nn {#3} { 13 } } } \@@_loop:nn { 00C0 } { 00E0 } { 00C2 } { 00E2 } { 00C3 } { 00E3 } { 00C4 } { 00E4 } { 00C5 } { 00E5 } { 00C6 } { 00E6 } { 00C7 } { 00E7 } { 00C8 } { 00E8 } { 00C9 } { 00E9 } { 00CA } { 00EA } { 00CB } { 00EB } { 00CC } { 00EC } { 00CD } { 00ED } { 00CE } { 00EE } { 00CF } { 00EF } { 00D0 } { 00F0 } { 00D1 } { 00F1 } { 00D2 } { 00F2 } { 00D3 } { 00F3 } { 00D4 } { 00F4 } { 00D5 } { 00F5 } { 00D6 } { 00F6 } { 00D8 } { 00F8 } { 00D9 } { 00F9 } { 00DA } { 00FA } { 00DB } { 00FB } { 00DC } { 00FC } { 00DD } { 00FD } { 00DE } { 00FE } { 0100 } { 0101 } { 0102 } { 0103 } { 0104 } { 0105 } { 0106 } { 0107 } { 0108 } { 0109 } { 010A } { 010B } { 010C } { 010D } { 010E } { 010F } { 0110 } { 0111 } { 0112 } { 0113 } { 0114 } { 0115 } { 0116 } { 0117 } { 0118 } { 0119 } { 011A } { 011B } { 011C } { 011D } { 011E } { 011F } { 0120 } { 0121 } { 0122 } { 0123 } { 0124 } { 0125 } { 0128 } { 0129 } { 012A } { 012B } { 012C } { 012D } { 012E } { 012F } { 0132 } { 0133 } { 0134 } { 0135 } { 0136 } { 0137 } { 0139 } { 013A } { 013B } { 013C } { 013E } { 013F } { 0141 } { 0142 } { 0143 } { 0144 } { 0145 } { 0146 } { 0147 } { 0148 } { 014A } { 014B } { 014C } { 014D } { 014E } { 014F } { 0150 } { 0151 } { 0152 } { 0153 } { 0154 } { 0155 } { 0156 } { 0157 } { 0158 } { 0159 } { 015A } { 015B } { 015C } { 015D } { 015E } { 015F } { 0160 } { 0161 } { 0162 } { 0163 } { 0164 } { 0165 } { 0168 } { 0169 } { 016A } { 016B } { 016C } { 016D } { 016E } { 016F } { 0170 } { 0171 } { 0172 } { 0173 } { 0174 } { 0175 } { 0176 } { 0177 } { 0178 } { 00FF } { 0179 } { 017A } { 017B } { 017C } { 017D } { 017E } { 01CD } { 01CE } { 01CF } { 01D0 } { 01D1 } { 01D2 } { 01D3 } { 01D4 } { 01E2 } { 01E3 } { 01E6 } { 01E7 } { 01E8 } { 01E9 } { 01EA } { 01EB } { 01F4 } { 01F5 } { 0218 } { 0219 } { 021A } { 021B } \q_recursion_tail ? \q_recursion_stop \cs_set_protected:Npn \@@_tmp:w #1#2#3 { \group_begin: \cs_set_protected:Npn \@@_tmp:w ##1##2##3 { \tl_const:cx { c__unicode_ #3 _ \char_generate:nn {##2} { 12 } \char_generate:nn {##3} { 12 } _tl } {#2} } \tl_set:Nx \l_@@_internal_a_tl { \__unicode_codepoint_to_UTFviii:n { "#1 } } \exp_after:wN \@@_tmp:w \l_@@_internal_a_tl \group_end: } \@@_tmp:w { 00DF } { SS } { upper } \@@_tmp:w { 00DF } { Ss } { mixed } \@@_tmp:w { 0131 } { I } { upper } } \group_end: % \end{macrocode} % % The (fixed) look-up mappings for letter-like control sequences. % \begin{macrocode} \group_begin: \cs_set_protected:Npn \@@_change_case_setup:NN #1#2 { \quark_if_recursion_tail_stop:N #1 \tl_const:cn { c_@@_change_case_lower_ \token_to_str:N #1 _tl } { #2 } \tl_const:cn { c_@@_change_case_upper_ \token_to_str:N #2 _tl } { #1 } \@@_change_case_setup:NN } \@@_change_case_setup:NN \AA \aa \AE \ae \DH \dh \DJ \dj \IJ \ij \L \l \NG \ng \O \o \OE \oe \SS \ss \TH \th \q_recursion_tail ? \q_recursion_stop \tl_const:cn { c_@@_change_case_upper_ \token_to_str:N \i _tl } { I } \tl_const:cn { c_@@_change_case_upper_ \token_to_str:N \j _tl } { J } \group_end: % \end{macrocode} % % \begin{variable}{\l_tl_case_change_accents_tl} % A list of accents to leave alone. % \begin{macrocode} \tl_new:N \l_tl_case_change_accents_tl \tl_set:Nn \l_tl_case_change_accents_tl { \" \' \. \^ \` \~ \c \H \k \r \t \u \v } % \end{macrocode} % \end{variable} % % \begin{macro}[aux, EXP]{\@@_change_case_mixed_nl:Nnw} % \begin{macro}[aux, EXP]{\@@_change_case_mixed_nl:Nw} % \begin{macro}[aux, EXP]{\@@_change_case_mixed_nl:NNw} % For Dutch, there is a single look-ahead test for \texttt{ij} when % title casing. If the appropriate letters are found, produce \texttt{IJ} % and gobble the \texttt{j}/\texttt{J}. % \begin{macrocode} \cs_new:Npn \@@_change_case_mixed_nl:Nnw #1 { \bool_lazy_or:nnTF { \int_compare_p:nNn { `#1 } = { `i } } { \int_compare_p:nNn { `#1 } = { `I } } { \@@_change_case_output:nwn { I } \@@_change_case_mixed_nl:Nw } } \cs_new:Npn \@@_change_case_mixed_nl:Nw #1#2 \q_recursion_stop { \tl_if_head_is_N_type:nT {#2} { \@@_change_case_mixed_nl:NNw } #1 #2 \q_recursion_stop } \cs_new:Npn \@@_change_case_mixed_nl:NNw #1#2#3 \q_recursion_stop { \@@_change_case_if_expandable:NTF #2 { \exp_after:wN \@@_change_case_mixed_nl:Nw \exp_after:wN #1 #2 #3 \q_recursion_stop } { \bool_lazy_and:nnTF { ! ( \token_if_cs_p:N #2 ) } { \bool_lazy_or_p:nn { \int_compare_p:nNn { `#2 } = { `j } } { \int_compare_p:nNn { `#2 } = { `J } } } { \@@_change_case_output:nwn { J } #1 } { #1 #2 } #3 \q_recursion_stop } } % \end{macrocode} % \end{macro} % \end{macro} % \end{macro} % % \begin{variable}{\l_tl_case_change_math_tl} % The list of token pairs which are treated as math mode and so % not case changed. % \begin{macrocode} \tl_new:N \l_tl_case_change_math_tl %<*package> \tl_set:Nn \l_tl_case_change_math_tl { $ $ \( \) } % % \end{macrocode} % \end{variable} % % \begin{variable}{\l_tl_case_change_exclude_tl} % The list of commands for which an argument is not case changed. % \begin{macrocode} \tl_new:N \l_tl_case_change_exclude_tl %<*package> \tl_set:Nn \l_tl_case_change_exclude_tl { \cite \ensuremath \label \ref } % % \end{macrocode} % \end{variable} % % \begin{variable}{\l_tl_mixed_case_ignore_tl} % Characters to skip over when finding the first letter in a word to be % mixed cased. % \begin{macrocode} \tl_new:N \l_tl_mixed_case_ignore_tl \tl_set:Nx \l_tl_mixed_case_ignore_tl { ( % ) [ % ] \cs_to_str:N \{ % \} ` - } % \end{macrocode} % \end{variable} % % \subsubsection{Other additions to \pkg{l3tl}} % % \begin{macro}{\tl_rand_item:n, \tl_rand_item:N, \tl_rand_item:c} % Importantly \cs{tl_item:nn} only evaluates its argument once. % \begin{macrocode} \cs_new:Npn \tl_rand_item:n #1 { \tl_if_blank:nF {#1} { \tl_item:nn {#1} { \int_rand:nn { 1 } { \tl_count:n {#1} } } } } \cs_new:Npn \tl_rand_item:N { \exp_args:No \tl_rand_item:n } \cs_generate_variant:Nn \tl_rand_item:N { c } % \end{macrocode} % \end{macro} % % Some preliminary code is needed for the \cs{tl_range:nnn} family of functions. % % \begin{macro}{\tl_range:Nnn, \tl_range:cnn, \tl_range:nnn} % \begin{macro}{\tl_range_braced:Nnn, \tl_range_braced:cnn, \tl_range_braced:nnn} % \begin{macro} % {\tl_range_unbraced:Nnn, \tl_range_unbraced:cnn, \tl_range_unbraced:nnn} % \begin{macro}[aux] % { % \@@_range:Nnnn, \@@_range:nnnNn, \@@_range:nnNn, \@@_range_skip:w, % \@@_range_braced:w, \@@_range_collect_braced:w, % \@@_range_unbraced:w, \@@_range_collect_unbraced:w, % \@@_range:w, \@@_range_skip_spaces:n, \@@_range_collect:nn, % \@@_range_collect:ff, \@@_range_collect_space:nw, % \@@_range_collect_N:nN, \@@_range_collect_group:nN, % } % To avoid checking for the end of the token list at every step, start % by counting the number $l$ of items and \enquote{normalizing} the % bounds, namely clamping them to the interval $[0,l]$ and dealing % with negative indices. More precisely, \cs{@@_range_items:nnNn} % receives the number of items to skip at the beginning of the token % list, the index of the last item to keep, a function among % \cs{@@_range:w}, \cs{@@_range_braced:w}, \cs{@@_range_unbraced:w}, % and the token list itself. If nothing should be kept, leave |{}|: % this stops the \texttt{f}-expansion of \cs{tl_head:f} and that % function produces an empty result. Otherwise, repeatedly call % \cs{@@_range_skip:w} to delete |#1| items from the input stream (the % extra brace group avoids an off-by-one shift). For the braced % version \cs{@@_range_braced:w} sets up % \cs{@@_range_collect_braced:w} which stores items one by one in an % argument after the semicolon. The unbraced version is almost % identical. The version preserving braces and spaces starts by % deleting spaces before the argument to avoid collecting them, and % sets up \cs{@@_range_collect:nn} with a first argument of the form % |{| \Arg{collected} \meta{tokens} |}|, whose head is the collected % tokens and whose tail is what remains of the original token list. % This form makes it easier to move tokens to the \meta{collected} % tokens. Depending on the first token of the tail, either just move % it (if it is a space) or also decrement the number of items left to % find. Eventually, the result is a brace group followed by the rest % of the token list, and \cs{tl_head:f} cleans up and gives the result % in \cs{exp_not:n}. % \begin{macrocode} \cs_new:Npn \tl_range:Nnn { \exp_args:No \tl_range:nnn } \cs_generate_variant:Nn \tl_range:Nnn { c } \cs_new:Npn \tl_range:nnn { \@@_range:Nnnn \@@_range:w } \cs_new:Npn \tl_range_braced:Nnn { \exp_args:No \tl_range_braced:nnn } \cs_generate_variant:Nn \tl_range_braced:Nnn { c } \cs_new:Npn \tl_range_braced:nnn { \@@_range:Nnnn \@@_range_braced:w } \cs_new:Npn \tl_range_unbraced:Nnn { \exp_args:No \tl_range_unbraced:nnn } \cs_generate_variant:Nn \tl_range_unbraced:Nnn { c } \cs_new:Npn \tl_range_unbraced:nnn { \@@_range:Nnnn \@@_range_unbraced:w } \cs_new:Npn \@@_range:Nnnn #1#2#3#4 { \tl_head:f { \exp_args:Nf \@@_range:nnnNn { \tl_count:n {#2} } {#3} {#4} #1 {#2} } } \cs_new:Npn \@@_range:nnnNn #1#2#3 { \exp_args:Nff \@@_range:nnNn { \exp_args:Nf \@@_range_normalize:nn { \int_eval:n { #2 - 1 } } {#1} } { \exp_args:Nf \@@_range_normalize:nn { \int_eval:n {#3} } {#1} } } \cs_new:Npn \@@_range:nnNn #1#2#3#4 { \if_int_compare:w #2 > #1 \exp_stop_f: \else: \exp_after:wN { \exp_after:wN } \fi: \exp_after:wN #3 \__int_value:w \__int_eval:w #2 - #1 \exp_after:wN ; \exp_after:wN { \exp:w \@@_range_skip:w #1 ; { } #4 } } \cs_new:Npn \@@_range_skip:w #1 ; #2 { \if_int_compare:w #1 > 0 \exp_stop_f: \exp_after:wN \@@_range_skip:w \__int_value:w \__int_eval:w #1 - 1 \exp_after:wN ; \else: \exp_after:wN \exp_end: \fi: } \cs_new:Npn \@@_range_braced:w #1 ; #2 { \@@_range_collect_braced:w #1 ; { } #2 } \cs_new:Npn \@@_range_unbraced:w #1 ; #2 { \@@_range_collect_unbraced:w #1 ; { } #2 } \cs_new:Npn \@@_range_collect_braced:w #1 ; #2#3 { \if_int_compare:w #1 > 1 \exp_stop_f: \exp_after:wN \@@_range_collect_braced:w \__int_value:w \__int_eval:w #1 - 1 \exp_after:wN ; \fi: { #2 {#3} } } \cs_new:Npn \@@_range_collect_unbraced:w #1 ; #2#3 { \if_int_compare:w #1 > 1 \exp_stop_f: \exp_after:wN \@@_range_collect_unbraced:w \__int_value:w \__int_eval:w #1 - 1 \exp_after:wN ; \fi: { #2 #3 } } \cs_new:Npn \@@_range:w #1 ; #2 { \exp_args:Nf \@@_range_collect:nn { \@@_range_skip_spaces:n {#2} } {#1} } \cs_new:Npn \@@_range_skip_spaces:n #1 { \tl_if_head_is_space:nTF {#1} { \exp_args:Nf \@@_range_skip_spaces:n {#1} } { { } #1 } } \cs_new:Npn \@@_range_collect:nn #1#2 { \int_compare:nNnTF {#2} = 0 {#1} { \exp_args:No \tl_if_head_is_space:nTF { \use_none:n #1 } { \exp_args:Nf \@@_range_collect:nn { \@@_range_collect_space:nw #1 } {#2} } { \@@_range_collect:ff { \exp_args:No \tl_if_head_is_N_type:nTF { \use_none:n #1 } { \@@_range_collect_N:nN } { \@@_range_collect_group:nn } #1 } { \int_eval:n { #2 - 1 } } } } } \cs_new:Npn \@@_range_collect_space:nw #1 ~ { { #1 ~ } } \cs_new:Npn \@@_range_collect_N:nN #1#2 { { #1 #2 } } \cs_new:Npn \@@_range_collect_group:nn #1#2 { { #1 {#2} } } \cs_generate_variant:Nn \@@_range_collect:nn { ff } % \end{macrocode} % \end{macro} % \end{macro} % \end{macro} % \end{macro} % % \begin{macro}[EXP, aux]{\@@_range_normalize:nn} % This function converts an \meta{index} argument into an explicit % position in the token list (a result of $0$ denoting \enquote{out of % bounds}). Expects two explicit integer arguments: the \meta{index} % |#1| and the string count~|#2|. If |#1| is negative, replace it by % $|#1| + |#2| + 1$, then limit to the range $[0, |#2|]$. % \begin{macrocode} \cs_new:Npn \@@_range_normalize:nn #1#2 { \int_eval:n { \if_int_compare:w #1 < 0 \exp_stop_f: \if_int_compare:w #1 < -#2 \exp_stop_f: 0 \else: #1 + #2 + 1 \fi: \else: \if_int_compare:w #1 < #2 \exp_stop_f: #1 \else: #2 \fi: \fi: } } % \end{macrocode} % \end{macro} % % \subsection{Additions to \pkg{l3token}} % % \begin{variable}{\c_catcode_active_space_tl} % While \cs{__char_generate:nn} can produce active characters in some % engines it cannot in general. It would be possible to simply change % the catcode of space but then the code would need to avoid all % spaces, making it quite unreadable. Instead we use the primitive % \cs{tex_lowercase:D} trick. % \begin{macrocode} \group_begin: \char_set_catcode_active:N * \char_set_lccode:nn { `* } { `\ } \tex_lowercase:D { \tl_const:Nn \c_catcode_active_space_tl { * } } \group_end: % \end{macrocode} % \end{variable} % % \begin{macrocode} %<@@=peek> % \end{macrocode} % % \begin{macro}[TF]{\peek_N_type:} % \begin{macro}[aux] % {\@@_execute_branches_N_type:, \@@_N_type:w, \@@_N_type_aux:nnw} % All tokens are \texttt{N}-type tokens, except in four cases: % begin-group tokens, end-group tokens, space tokens with character % code~$32$, and outer tokens. Since \cs{l_peek_token} might be % outer, we cannot use the convenient \cs{bool_if:nTF} function, and % must resort to the old trick of using \tn{ifodd} to expand a set of % tests. The \texttt{false} branch of this test is taken if the token % is one of the first three kinds of non-\texttt{N}-type tokens % (explicit or implicit), thus we call \cs{@@_false:w}. In the % \texttt{true} branch, we must detect outer tokens, without impacting % performance too much for non-outer tokens. The first filter is to % search for \texttt{outer} in the \tn{meaning} of \cs{l_peek_token}. % If that is absent, \cs{use_none_delimit_by_q_stop:w} cleans up, and % we call \cs{@@_true:w}. Otherwise, the token can be a non-outer % macro or a primitive mark whose parameter or replacement text % contains \texttt{outer}, it can be the primitive \tn{outer}, or it % can be an outer token. Macros and marks would have \texttt{ma} in % the part before the first occurrence of \texttt{outer}; the meaning % of \tn{outer} has nothing after \texttt{outer}, contrarily to outer % macros; and that covers all cases, calling \cs{@@_true:w} or % \cs{@@_false:w} as appropriate. Here, there is no \meta{search % token}, so we feed a dummy \cs{scan_stop:} to the % \cs{@@_token_generic:NNTF} function. % \begin{macrocode} \group_begin: \cs_set_protected:Npn \@@_tmp:w #1 \q_stop { \cs_new_protected:Npn \@@_execute_branches_N_type: { \if_int_odd:w \if_catcode:w \exp_not:N \l_peek_token { 0 \exp_stop_f: \fi: \if_catcode:w \exp_not:N \l_peek_token } 0 \exp_stop_f: \fi: \if_meaning:w \l_peek_token \c_space_token 0 \exp_stop_f: \fi: 1 \exp_stop_f: \exp_after:wN \@@_N_type:w \token_to_meaning:N \l_peek_token \q_mark \@@_N_type_aux:nnw #1 \q_mark \use_none_delimit_by_q_stop:w \q_stop \exp_after:wN \@@_true:w \else: \exp_after:wN \@@_false:w \fi: } \cs_new_protected:Npn \@@_N_type:w ##1 #1 ##2 \q_mark ##3 { ##3 {##1} {##2} } } \exp_after:wN \@@_tmp:w \tl_to_str:n { outer } \q_stop \group_end: \cs_new_protected:Npn \@@_N_type_aux:nnw #1 #2 #3 \fi: { \fi: \tl_if_in:noTF {#1} { \tl_to_str:n {ma} } { \@@_true:w } { \tl_if_empty:nTF {#2} { \@@_true:w } { \@@_false:w } } } \cs_new_protected:Npn \peek_N_type:TF { \@@_token_generic:NNTF \@@_execute_branches_N_type: \scan_stop: } \cs_new_protected:Npn \peek_N_type:T { \@@_token_generic:NNT \@@_execute_branches_N_type: \scan_stop: } \cs_new_protected:Npn \peek_N_type:F { \@@_token_generic:NNF \@@_execute_branches_N_type: \scan_stop: } % \end{macrocode} % \end{macro} % \end{macro} % % \begin{macrocode} % % \end{macrocode} % % \end{implementation} % % \PrintIndex