summaryrefslogtreecommitdiff
path: root/Master/texmf-dist/source/latex/l3kernel/l3unicode-data.dtx
diff options
context:
space:
mode:
Diffstat (limited to 'Master/texmf-dist/source/latex/l3kernel/l3unicode-data.dtx')
-rw-r--r--Master/texmf-dist/source/latex/l3kernel/l3unicode-data.dtx768
1 files changed, 768 insertions, 0 deletions
diff --git a/Master/texmf-dist/source/latex/l3kernel/l3unicode-data.dtx b/Master/texmf-dist/source/latex/l3kernel/l3unicode-data.dtx
new file mode 100644
index 00000000000..8d24cae8f46
--- /dev/null
+++ b/Master/texmf-dist/source/latex/l3kernel/l3unicode-data.dtx
@@ -0,0 +1,768 @@
+% \iffalse meta-comment
+%
+%% File: l3unicode-data.dtx Copyright(C) 2014 The LaTeX3 Project
+%%
+%% It may be distributed and/or modified under the conditions of the
+%% LaTeX Project Public License (LPPL), either version 1.3c of this
+%% license or (at your option) any later version. The latest version
+%% of this license is in the file
+%%
+%% http://www.latex-project.org/lppl.txt
+%%
+%% This file is part of the "l3kernel bundle" (The Work in LPPL)
+%% and all files in that bundle must be distributed together.
+%%
+%% The released version of this bundle is available from CTAN.
+%%
+%% -----------------------------------------------------------------------
+%%
+%% The development version of the bundle can be found at
+%%
+%% http://www.latex-project.org/svnroot/experimental/trunk/
+%%
+%% for those people who are interested.
+%%
+%%%%%%%%%%%
+%% NOTE: %%
+%%%%%%%%%%%
+%%
+%% Snapshots taken from the repository represent work in progress and may
+%% not work or may contain conflicting material! We therefore ask
+%% people _not_ to put them into distributions, archives, etc. without
+%% prior consultation with the LaTeX Project Team.
+%%
+%% -----------------------------------------------------------------------
+%%
+%
+% Both the driver and the script need \pkg{expl3}: as the script runs with
+% plain \TeX{}, set up in generic mode.
+%<*driver|script>
+\input expl3-generic\relax
+\GetIdInfo$Id: l3unicode-data.dtx 5166 2014-06-30 07:48:12Z joseph $
+ {L3 Case data script}
+%</driver|script>
+%
+% The same approach as used in \pkg{DocStrip}: if \cs{documentclass}
+% is undefined then skip the driver, allowing the file to be used directly.
+% This works as the \cs{fi} is only seen if \LaTeX{} is not in use. The odd
+% \cs{jobname} business allows the extraction to work with \LaTeX{} provided
+% an appropriate \texttt{.ins} file is set up.
+%<*gobble>
+\ifx\jobname\relax
+ \let\documentclass\undefined
+\fi
+\begingroup\expandafter\expandafter\expandafter\endgroup
+\expandafter\ifx\csname documentclass\endcsname\relax
+\else
+ \csname fi\endcsname
+%</gobble>
+%
+%<*driver>
+ \documentclass[full]{l3doc}
+ \begin{document}
+ \DocInput{\jobname.dtx}
+ \end{document}
+%<*gobble>
+\fi
+%</gobble>
+%</driver>
+% \fi
+%
+% \title{^^A
+% The \textsf{l3unicode-data} script\\Unicode data script^^A
+% \thanks{This file describes v\ExplFileVersion,
+% last revised \ExplFileDate.}^^A
+% }
+%
+% \author{^^A
+% The \LaTeX3 Project\thanks
+% {^^A
+% E-mail:
+% \href{mailto:latex-team@latex-project.org}
+% {latex-team@latex-project.org}^^A
+% }^^A
+% }
+%
+% \date{Released \ExplFileDate}
+%
+% \maketitle
+%
+% \begin{documentation}
+%
+% The Unicode Consortium provide comprehensive data on the standard
+% mapping of characters (or more formally codepoints) when carrying
+% out various different case-changing functions:
+% \begin{itemize}
+% \item Uppercasing
+% \item Lowercasing
+% \item Titlecasing (used for the first codepoint of a word:
+% may be subtly different to uppercasing)
+% \item Folding (removing case for comparison purposes: close
+% but not identical to lowercasing)
+% \end{itemize}
+% This data is available in machine readable format, such that many of
+% the basics of case changing can be set up on an automated basis.
+%
+% This file provides a script which will read the raw Unicode files
+% and convert the material to a form which can be used by \pkg{expl3}.
+% As the conversions here cover the entire UTF-8 range, this cannot
+% be carried out by pdf\TeX{}: at present, the script works only
+% with Lua\TeX{}.
+%
+% Note that this file is designed such that running \LaTeX{} will typeset
+% the documentation using any engine: the script will be run if the file
+% is processed by plain \TeX{}, specifically the |luatex| command.
+% This process requires the files |CaseFolding.txt|, |SpecialCasing.txt|
+% and |UnicodeData.txt| from the \href{http://www.unicode.org/}^^A
+% {Unicode Consortium website}.
+%
+% The file produced by this script, |l3unicode-data.def|, contains
+% appropriate definitions for all of the data structures used by \pkg{expl3}
+% for Unicode transformations. It also provides appropriate alternative
+% definitions for use with \pdfTeX{}.
+%
+% \end{documentation}
+%
+% \begin{implementation}
+%
+% \section{\pkg{l3unicode-data} Implementation}
+%
+% \begin{macrocode}
+%<*script>
+% \end{macrocode}
+%
+% The driver part has loaded \pkg{expl3}: turn on the syntax environment.
+% \begin{macrocode}
+\ExplSyntaxOn
+% \end{macrocode}
+%
+% \subsection{Setup}
+%
+% \begin{macro}{\str_case_x:nvF}
+% One handy variant.
+% \begin{macrocode}
+\cs_generate_variant:Nn \str_case_x:nnF { nv }
+% \end{macrocode}
+% \end{macro}
+%
+% The first step is to generate a series of temporary variables to
+% contain the data as it's extracted. This requires a nested loop
+% to give a total of $100$ token lists. Two sets are generated for
+% use in the upper/lower case part of the script.
+% \begin{macrocode}
+\tl_map_inline:nn { 0123456789 }
+ {
+ \tl_map_inline:nn { 0123456789 }
+ {
+ \tl_new:c { l__unicode_a_ #1 _ ##1 _tl }
+ \tl_new:c { l__unicode_b_ #1 _ ##1 _tl }
+ }
+ }
+% \end{macrocode}
+%
+% \begin{variable}{\g__unicode_data_ior}
+% \begin{variable}{\g__unicode_result_iow}
+% Streams for reading and writing the data.
+% \begin{macrocode}
+\ior_new:N \g__unicode_data_ior
+\iow_new:N \g__unicode_result_iow
+% \end{macrocode}
+% \end{variable}
+% \end{variable}
+%
+% Open the data file for writing.
+% \begin{macrocode}
+\iow_open:Nn \g__unicode_result_iow { l3unicode-data.def }
+% \end{macrocode}
+%
+% Write an identification line to the file: the file data here can't be set
+% automatically and so will need to be edited by hand. As such, the data here
+% the standard SVN filler.
+% \begin{macrocode}
+\iow_now:Nx \g__unicode_result_iow
+ {
+ \exp_not:N \ProvidesExplFile
+ { l3unicode-data.def } ~ { 0000/00/00 } ~ { -1 } ~ { L3~Unicode~data }
+ }
+% \end{macrocode}
+%
+% \subsection{Verbatim copying}
+%
+% \begin{macro}[int]{\__unicode_verb:}
+% \begin{macro}[aux]{\__unicode_verb_auxi:w, \__unicode_verb_auxii:w}
+% \begin{macro}[int]{\__unicode_verb_end:}
+% There are various bits of code which need to be transferred into the data
+% file from the source. This has to take place as part of the general writing
+% process so needs to be done without using DocStrip. That is achieved by
+% having a verbatim-copy mechanism available: this is all set up here.
+% As the line containing the \cs{__unicode_verb:} function will end up with a
+% (category code $12$) space at the start, there is a dedicated function to
+% clear this part up.
+% \begin{macrocode}
+\group_begin:
+ \char_set_catcode_other:n { `\^^M }%
+ \cs_new_protected:Npn \__unicode_verb:%
+ {%
+ \group_begin:%
+ \char_set_catcode_other:n { `\^^M }%
+ \tex_endlinechar:D = `\^^M%
+ \clist_map_inline:nn%
+ { \\ , \{ , \} , \# , \^ , \% , \ }%
+ { \char_set_catcode_other:n { `##1 } }%
+ \__unicode_verb_auxi:w%
+ }%
+ \cs_new_protected:Npn \__unicode_verb_auxi:w#1^^M%
+ {%
+ \exp_after:wN \__unicode_verb_auxii:w \use_none:n #1 ^^M
+ }%
+ \cs_new_protected:Npn \__unicode_verb_auxii:w#1^^M%
+ {%
+ \str_if_eq_x:nnTF {#1} { \token_to_str:N \__unicode_verb_end: }%
+ { \group_end: }%
+ {%
+ \iow_now:Nn \g__unicode_result_iow {#1}%
+ \__unicode_verb_auxii:w%
+ }%
+ }%
+\group_end:%
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+%
+% \subsection{Shared data}
+%
+% There are some data items which can be stored as numbers rather than as
+% literal UTF-8 chars. These could go into the main source files, but as they
+% conceptually go with everything else here this makes more sense. They are
+% safe for use with \pdfTeX{} so are given first.
+% \begin{macrocode}
+\__unicode_verb:
+\clist_const:Nn \c__tl_after_final_sigma_clist
+ { 0021 , 0022 , 0029 , 002C , 002E , 003A , 003B , 003F , 005D , 007D }
+\clist_const:Nn \c__tl_mixed_skip_clist
+ { 0028 , 005B , 0060 , 007B }
+\__unicode_verb_end:
+% \end{macrocode}
+%
+% \subsection{\pdfTeX{} support}
+%
+% As \pdfTeX{} does not support UTF-8 input natively, most of the data
+% here will not be useful. Rather than use two separate mechanisms for
+% each function depending on the engine, the system is designed such that
+% \enquote{truncated} data structures are provided for \pdfTeX{}. These
+% are coded here for direct transfer to the |.def| file, which can then
+% abort loading when \pdfTeX{} is in use.
+%
+% The idea here is simple: map over all of the letters of the Latin
+% alphabet and create appropriate token lists, then add all of the rest
+% of the data structures. For case folding, the tokens are all stored as
+% strings. For the lower case letters, to ensure there are always three
+% digits a bit of maths is used.
+%
+% After the mapping, the small number of fixed data structures that are
+% used for the special case conversions are created. These are mainly empty,
+% but for cases where a match is possible (as the test char is in the \pdfTeX{}
+% range), no-op data is included (as the \emph{output} would be out-of-range).
+% \begin{macrocode}
+\__unicode_verb:
+\pdftex_if_engine:T
+ {
+ \group_begin:
+ \cs_set_protected:Npn \__unicode_tmp:NN #1#2
+ {
+ \quark_if_recursion_tail_stop:N #1
+ \exp_after:wN \__unicode_tmp:NNNNNNN
+ \tex_number:D \__int_eval:w `#1 \exp_after:wN \__int_eval_end:
+ \tex_number:D \__int_eval:w 100 + `#2 \__int_eval_end:
+ #1 #2
+ \__unicode_tmp:NN
+ }
+ \cs_set_protected:Npn \__unicode_tmp:NNNNNNN #1#2#3#4#5#6#7
+ {
+ \tl_const:cx { c__str_fold_ #1 _ #2 _ tl }
+ { \tl_to_str:n { #6#7 } }
+ \tl_const:cn { c__tl_lower_ #1 _ #2 _ tl } { #6#7 }
+ \tl_const:cn { c__tl_upper_ #4 _ #5 _ tl } { #7#6 }
+ }
+ \__unicode_tmp:NN
+ AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZz
+ \q_recursion_tail ? \q_recursion_stop
+ \group_end:
+ \int_step_inline:nnnn { 0 } { 1 } { 9 }
+ {
+ \int_step_inline:nnnn { 0 } { 1 } { 9 }
+ {
+ \tl_if_exist:cF { c__str_fold_ #1 _ ##1 _ tl }
+ {
+ \tl_const:cn { c__str_fold_ #1 _ ##1 _ tl } { }
+ }
+ \tl_if_exist:cF { c__tl_lower_ #1 _ ##1 _ tl }
+ {
+ \tl_const:cn { c__tl_lower_ #1 _ ##1 _ tl } { }
+ }
+ \tl_if_exist:cF { c__tl_upper_ #1 _ ##1 _ tl }
+ {
+ \tl_const:cn { c__tl_upper_ #1 _ ##1 _ tl } { }
+ }
+ }
+ }
+ \tl_const:Nn \c__tl_mixed_exceptions_tl { }
+ \tl_const:Nn \c__tl_std_sigma_tl { }
+ \tl_const:Nn \c__tl_final_sigma_tl { }
+ \tl_const:Nn \c__tl_accents_lt_tl { }
+ \tl_const:Nn \c__tl_dot_above_tl { }
+ \tl_const:Nn \c__tl_dotless_i_tl { I }
+ \tl_const:Nn \c__tl_dotted_I_tl { i }
+ \tex_endinput:D
+ }
+\__unicode_verb_end:
+% \end{macrocode}
+%
+% \subsection{Case folding}
+%
+% \begin{macro}{\__unicode_parse_line:w}
+% \begin{macro}[aux]{\__unicode_parse_line_auxi:Nw}
+% \begin{macro}[aux]{\__unicode_parse_line_auxii:w}
+% \begin{macro}[aux]{\__unicode_parse_line_auxiii:nw}
+% \begin{macro}[aux]{\__unicode_parse_line_auxiv:nn}
+% \begin{macro}[aux]{\__unicode_parse_line_auxv:wnn}
+% The format of |CaseFolding.txt| allows for both blank lines and
+% C-style comments starting with |#|. Thus the first two steps of
+% the parsing routine are set up to deal with these cases.
+% \begin{macrocode}
+\cs_new_protected:Npn \__unicode_parse_line:w #1 \q_stop
+ {
+ \tl_if_blank:nF {#1}
+ { \__unicode_parse_line_auxi:Nw #1 \q_stop }
+ }
+\cs_new_protected:Npn \__unicode_parse_line_auxi:Nw #1#2 \q_stop
+ {
+ \str_if_eq_x:nnF { \exp_not:n {#1} } { \cs_to_str:N \# }
+ { \__unicode_parse_line_auxii:w #1#2 \q_stop }
+ }
+% \end{macrocode}
+% For lines actually containing data, there will be four entries separated by
+% |;| tokens: the hex code for the char itself, which folding regim\'{e}s
+% the line applies to, the hex code(s) for the folded char and a
+% description. Of these, we need all but the last one. In the simple
+% case of core foldings, the mapping is one--one and this information
+% can be passed directly to the next stage. We also handle the full
+% mappings (dropping simple ones plus any Turkic variation): an additional
+% step is needed to parse this case.
+% \begin{macrocode}
+\cs_new_protected:Npn \__unicode_parse_line_auxii:w #1 ;~ #2 ; #3 ; #4 \q_stop
+ {
+ \str_if_eq:nnTF {#2} { C }
+ {
+ \__unicode_parse_line_auxiv:nn
+ {#1} { \luatex_Uchar:D "#3 \c_space_tl }
+ }
+ {
+ \str_if_eq:nnT {#2} { F }
+ { \__unicode_parse_line_auxiii:nw {#1} #3 ~ \q_stop }
+ }
+ }
+% \end{macrocode}
+% Full folding produces two or three Unicode code points from a single
+% input char. To deal with this, we split the relevant part of the input
+% and check how many chars to generate. The entire folding output is
+% braced so that when read back \TeX{} will see this as a group in our
+% replacement code.
+% \begin{macrocode}
+\cs_new_protected:Npn \__unicode_parse_line_auxiii:nw #1 ~ #2 ~ #3 ~ #4 \q_stop
+ {
+ \__unicode_parse_line_auxiv:nn
+ {#1}
+ {
+ {
+ \luatex_Uchar:D "#2 \c_space_tl
+ \luatex_Uchar:D "#3 \c_space_tl
+ \tl_if_empty:nF {#4}
+ { \luatex_Uchar:D "#4 \c_space_tl }
+ }
+ }
+ }
+% \end{macrocode}
+% The final stage of extracting the mapping is to split the various cases
+% up such that comparison and replacement does not need to check every
+% character. That is done by taking the charcode modulo $100$: this splits
+% the list of chars into $100$ much shorter lists. With that done, the
+% input and output chars are added to the appropriate token lists.
+% \begin{macrocode}
+\cs_new_protected:Npn \__unicode_parse_line_auxiv:nn #1#2
+ {
+ \exp_last_unbraced:Nf \__unicode_parse_line_auxv:wnn
+ { \int_eval:n { 1000000 + "#1 } } \q_stop
+ {#1} {#2}
+ }
+% \end{macrocode}
+% As the input is read in string mode, there is a need for a rescan
+% here since \tn{Uchar} requires letters for hexadecimal digits
+% beyond~$9$.
+% \begin{macrocode}
+\cs_new_protected:Npn \__unicode_parse_line_auxv:wnn
+ #1#2#3#4#5#6#7 \q_stop #8#9
+ {
+ \tl_rescan:nn
+ { }
+ {
+ \tl_put_right:cx { l__unicode_a_ #6 _ #7 _tl }
+ {
+ \luatex_Uchar:D "#8 \c_space_tl
+ #9
+ }
+ }
+ }
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+%
+% The main loop can now take place, reading the source data and saving all of
+% the information in the token list array.
+% \begin{macrocode}
+\ior_open:Nn \g__unicode_data_ior { CaseFolding.txt }
+\ior_str_map_inline:Nn \g__unicode_data_ior
+ { \__unicode_parse_line:w #1 \q_stop }
+\ior_close:N \g__unicode_data_ior
+% \end{macrocode}
+%
+% \begin{macro}[aux]{\__str_tmp:NNn}
+% \begin{macro}[aux, EXP]{\__str_tmp:Nw}
+% To ensure that the output of the case-folding function is a string, all of
+% the stored results need to be detokenized. That is done by including a loop
+% in the |.def| file which will do the necessary change. To set that up, a
+% slightly complicated bit of secondary work: write the functions which do
+% the job into the |.def| file itself, using a group to trap the temporary
+% code. There is also a test in the following so that the result only has
+% braces around items which need it: this is a slight performance tweak when
+% the code actually gets used. Notice that everything in the token list is
+% detokenized except for the |{| and |}| chars needed for grouping: if the
+% search part of the list is not detokenized there are issues with \XeTeX{}
+% and chars beyond $0\mathrm{xFFFF}$ (probably a bug, but can be worked
+% around!).
+% \begin{macrocode}
+\__unicode_verb:
+\group_begin:
+ \cs_set_protected:Npn \__str_tmp:NNn #1#2#3
+ {
+ \tl_const:cx { c__str_fold_#1_#2_tl }
+ { \__str_tmp:Nw #3 \q_recursion_tail { } \q_recursion_stop }
+ }
+ \cs_set:Npn \__str_tmp:Nw #1#2
+ {
+ \quark_if_recursion_tail_stop:N #1
+ \tl_to_str:N #1
+ \tl_if_blank:oT { \use_none:n #2 }
+ { \use:n }
+ { \tl_to_str:n {#2} }
+ \__str_tmp:Nw
+ }
+\__unicode_verb_end:
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+%
+% The write loop is simple: map over the array and write everything to the
+% output. The saved data is also cleared to save a second loop later on when
+% dealing with case mappings. The group used for the temporary stuff in the
+% |.def| file is also closed at this point.
+% \begin{macrocode}
+\tl_map_inline:nn { 0123456789 }
+ {
+ \tl_map_inline:nn { 0123456789 }
+ {
+ \iow_now:Nx \g__unicode_result_iow
+ {
+ \c_space_tl \c_space_tl
+ \exp_not:N \__str_tmp:NNn #1 ~ ##1 ~
+ { ~ \exp_not:v { l__unicode_a_ #1 _ ##1 _tl } ~ }
+ }
+ \tl_clear:c { l__unicode_a_ #1 _ ##1 _tl }
+ }
+ }
+\iow_now:Nn \g__unicode_result_iow { \group_end: }
+% \end{macrocode}
+%
+% \subsection{Upper/lower/title casing}
+%
+% Unlike the case folding data, case changing data is split into two parts
+% which we need to combine into a single data structure. There are therefore
+% two parts to this process: first to read the exceptions, then to read the
+% main data and combine it.
+%
+% \begin{macro}^^A
+% {
+% \l__unicode_lower_exceptions_tl,
+% \l__unicode_title_exceptions_tl,
+% \l__unicode_upper_exceptions_tl
+% }
+% There are special cases for lower, title and uppercase changes: these
+% all get read in to appropriate lists. Exceptions could be saved as
+% property lists but that would make life a bit more complex with the
+% titlecase exceptions and wouldn't really gain much (this is after all
+% \enquote{disposable} data). Note that for our purposes, what Unicode call
+% title case is stored in the output as `mixed' case.
+% \begin{macrocode}
+\tl_new:N \l__unicode_lower_exceptions_tl
+\tl_new:N \l__unicode_title_exceptions_tl
+\tl_new:N \l__unicode_upper_exceptions_tl
+% \end{macrocode}
+% \end{macro}
+%
+% \begin{macro}[aux]{\__unicode_parse_line_auxii:w}
+% \begin{macro}[aux]{\__unicode_parse_line_auxiii:nnn}
+% \begin{macro}[aux]{\__unicode_parse_line_auxiv:nwn}
+% \begin{macro}[aux]{\__unicode_brace:n}
+% The format of the special cases data is similar to that of the folding
+% data: as such only some of the parsing is altered. This file has four
+% important data fields: the char at it's lower, title and uppercase
+% equivalents. As most of the titlecase exceptions are also uppercase
+% exceptions, a test is made so that we are only storing truly useful
+% exceptions for titlecase.
+% \begin{macrocode}
+\cs_set_protected:Npn \__unicode_parse_line_auxii:w
+ #1 ;~ #2 ;~ #3 ;~ #4 ; #5 \q_stop
+ {
+ \__unicode_parse_line_auxiii:nnn {#1} {#2} { lower }
+ \str_if_eq:nnF {#3} {#4}
+ { \__unicode_parse_line_auxiii:nnn {#1} {#3} { title } }
+ \__unicode_parse_line_auxiii:nnn {#1} {#4} { upper }
+ }
+% \end{macrocode}
+% Unlike the folding data, the special cases file always has a value for
+% each of the three entries. Some of these have only one hex number in.
+% After a bit of a trick to allow for ease of parsing, we check if there
+% are at least two numbers for the case-changed char. If there are, then
+% save the exception. If not, then the value will also be in the main
+% table and we can ignore it here. There is also a test to see if the
+% current value is a titlecase exception: they don't need extra braces
+% for those.
+% \begin{macrocode}
+\cs_new_protected:Npn \__unicode_parse_line_auxiii:nnn #1#2#3
+ { \use:n { \__unicode_parse_line_auxiv:nwn {#1} #2 ~ } ~ \q_stop {#3} }
+\cs_new_protected:Npn \__unicode_parse_line_auxiv:nwn #1#2 ~ #3 ~ #4 \q_stop #5
+ {
+ \tl_if_empty:nF {#3}
+ {
+ \str_if_eq:nnTF {#5} { title }
+ { \cs_set_eq:NN \__unicode_brace:n \use:n }
+ { \cs_set:Npn \__unicode_brace:n ##1 { { ##1 } } }
+ \tl_rescan:nn
+ { }
+ {
+ \tl_put_right:cx { l__unicode_ #5 _exceptions_tl }
+ {
+ \luatex_Uchar:D "#1 \c_space_tl
+ {
+ \__unicode_brace:n
+ {
+ \luatex_Uchar:D "#2 \c_space_tl
+ \luatex_Uchar:D "#3 \c_space_tl
+ \tl_if_empty:nF {#4}
+ { \luatex_Uchar:D "#4 \c_space_tl }
+ }
+ }
+ }
+ }
+ }
+ }
+\cs_new_eq:NN \__unicode_brace:n \use:n
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+%
+% Parsing set up, read the special cases file. The input contains both
+% general special cases and ones dependent on context. We only want to read
+% the former, so there is a check for the line that splits the two:
+% at that point, simply stop parsing.
+% \begin{macrocode}
+\ior_open:Nn \g__unicode_data_ior { SpecialCasing.txt }
+\ior_str_map_inline:Nn \g__unicode_data_ior
+ {
+ \str_if_eq_x:nnTF {#1} { \cs_to_str:N \# \c_space_tl Conditional~Mappings }
+ { \ior_map_break: }
+ { \__unicode_parse_line:w #1 \q_stop }
+ }
+\ior_close:N \g__unicode_data_ior
+% \end{macrocode}
+%
+% \begin{macro}{\__unicode_parse_line:w}
+% \begin{macro}[aux]{\__unicode_parse_line_auxi:w}
+% \begin{macro}[aux]{\__unicode_parse_line_auxii:nnNn}
+% \begin{macro}[aux]{\__unicode_parse_line_auxiii:wnnNn}
+% \begin{macro}[aux]{\__unicode_parse_line_auxiv:nnNNNn}
+% Much the same as for the case folding set up: parse the lines of
+% data. Here, the lines are longer but always have one--one mappings.
+% \begin{macrocode}
+\cs_set_protected:Npn \__unicode_parse_line:w
+ #1 ; #2 ; #3 ; #4 ; #5 ; #6 ; #7 ; #8 ; #9 ;
+ {
+ \__unicode_parse_line_auxi:w #1 ;
+ }
+% \end{macrocode}
+% With some data items removed, at this stage the hexadecimal
+% representation of the char is |#1|, the upper case char is |#5|,
+% the lower case one |#6| and the title case one |#7|. These may or
+% may not be present and the upper and titlecase values may be
+% identical. Where there are values for upper/lowercase, they are
+% saved into the arrays. For titlecase, since the number of exceptions
+% is small: they are added to the existing list of exceptions we've
+% already started.
+% \begin{macrocode}
+\cs_new_protected:Npn \__unicode_parse_line_auxi:w
+ #1 ; #2 ; #3 ; #4 ; #5 ; #6 ; #7 \q_stop
+ {
+ \tl_if_empty:nF {#5}
+ {
+ \__unicode_parse_line_auxii:nnNn {#1} {#5} a { upper }
+ \str_if_eq:nnF {#5} {#7}
+ {
+ \tl_put_right:Nx \l__unicode_title_exceptions_tl
+ {
+ \luatex_Uchar:D "#1 \c_space_tl
+ \luatex_Uchar:D "#7 \c_space_tl
+ }
+ }
+ }
+ \tl_if_empty:nF {#6}
+ { \__unicode_parse_line_auxii:nnNn {#1} {#6} b { lower } }
+ }
+% \end{macrocode}
+% The array structure here is the same as before, except now there
+% are two separate ones to manage.
+% \begin{macrocode}
+\cs_new_protected:Npn \__unicode_parse_line_auxii:nnNn #1#2#3
+ {
+ \exp_last_unbraced:Nf \__unicode_parse_line_auxiii:wnnNn
+ { \int_eval:n { 1000000 + "#1 } } \q_stop
+ {#1} {#2} #3
+ }
+\cs_new_protected:Npn \__unicode_parse_line_auxiii:wnnNn
+ #1#2#3#4#5#6#7 \q_stop #8#9
+ { \__unicode_parse_line_auxiv:nnNNNn {#8} {#9} #6 #7 }
+% \end{macrocode}
+% The final test required here is to look for the special cases and where
+% appropriate use that rather than the one--one mapping value.
+% \begin{macrocode}
+\cs_new_protected:Npn \__unicode_parse_line_auxiv:nnNNNn #1#2#3#4#5#6
+ {
+ \tl_rescan:nn
+ { }
+ {
+ \tl_put_right:cx { l__unicode_ #5 _ #3 _ #4 _tl }
+ {
+ \luatex_Uchar:D "#1 \c_space_tl
+ \str_case_x:nvF
+ { \luatex_Uchar:D "#1 \c_space_tl }
+ { l__unicode_ #6 _exceptions_tl }
+ { \luatex_Uchar:D "#2 \c_space_tl }
+ }
+ }
+ }
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+% \end{macro}
+%
+% Everything is set up and so the read loop can take place: this time
+% there are no comment chars to worry about and so normal category
+% codes apply.
+% \begin{macrocode}
+\ior_open:Nn \g__unicode_data_ior { UnicodeData.txt }
+\ior_map_inline:Nn \g__unicode_data_ior
+ { \__unicode_parse_line:w #1 \q_stop }
+\ior_close:N \g__unicode_data_ior
+% \end{macrocode}
+%
+% Saving the data uses a single file, with the uppercase array
+% followed by the lowercase one and finally the titlecase exceptions.
+% \begin{macrocode}
+\tl_map_inline:nn { 0123456789 }
+ {
+ \tl_map_inline:nn { 0123456789 }
+ {
+ \iow_now:Nx \g__unicode_result_iow
+ {
+ \tl_const:cn
+ { ~ c__tl_upper_ #1 _ ##1 _tl ~ } ~
+ { ~ \exp_not:v { l__unicode_a_ #1 _ ##1 _tl } ~ }
+ }
+ }
+ }
+\tl_map_inline:nn { 0123456789 }
+ {
+ \tl_map_inline:nn { 0123456789 }
+ {
+ \iow_now:Nx \g__unicode_result_iow
+ {
+ \tl_const:cn
+ { ~ c__tl_lower_ #1 _ ##1 _tl ~ } ~
+ { ~ \exp_not:v { l__unicode_b_ #1 _ ##1 _tl } ~ }
+ }
+ }
+ }
+\iow_now:Nx \g__unicode_result_iow
+ {
+ \tl_const:Nn
+ \exp_not:N \c__tl_mixed_exceptions_tl \c_space_tl
+ { ~ \exp_not:V \l__unicode_title_exceptions_tl \c_space_tl }
+ }
+% \end{macrocode}
+%
+% Data for the special cases is now needed. This is mainly a series of simple
+% token lists with appropriate names and content, but there is also one place
+% where a small mapping list is required.
+% \begin{macrocode}
+\cs_new_protected:Npn \__unicode_special_case:nn #1#2
+ {
+ \quark_if_recursion_tail_stop:n {#1}
+ \iow_now:Nx \g__unicode_result_iow
+ {
+ \tl_const:Nn \exp_not:c { c__tl_ #1 _tl } { \luatex_Uchar:D "#2 }
+ }
+ \__unicode_special_case:nn
+ }
+\__unicode_special_case:nn
+ { std_sigma } { 03C3 }
+ { final_sigma } { 03C2 }
+ { dotless_i } { 0131 }
+ { dot_above } { 0307 }
+ { dotted_I } { 0130 }
+ \q_recursion_tail { }
+ \q_recursion_stop
+\iow_now:Nx \g__unicode_result_iow
+ {
+ \tl_const:Nn \exp_not:N \c__tl_accents_lt_tl
+ {
+ \luatex_Uchar:D "00CC
+ { \luatex_Uchar:D "0069 \luatex_Uchar:D "0307 \luatex_Uchar:D "0300 }
+ \luatex_Uchar:D "00CD
+ { \luatex_Uchar:D "0069 \luatex_Uchar:D "0307 \luatex_Uchar:D "0301 }
+ \luatex_Uchar:D "0128
+ { \luatex_Uchar:D "0069 \luatex_Uchar:D "0307 \luatex_Uchar:D "0303 }
+ }
+ }
+% \end{macrocode}
+%
+% Job done, end the \TeX{} run.
+% \begin{macrocode}
+\iow_close:N \g__unicode_result_iow
+\tex_end:D
+% \end{macrocode}
+%
+% \begin{macrocode}
+%</script>
+% \end{macrocode}
+%
+% \end{implementation}
+%
+% \PrintIndex \ No newline at end of file