diff options
Diffstat (limited to 'Master/texmf-dist/source/latex/l3kernel/l3unicode-data.dtx')
-rw-r--r-- | Master/texmf-dist/source/latex/l3kernel/l3unicode-data.dtx | 575 |
1 files changed, 326 insertions, 249 deletions
diff --git a/Master/texmf-dist/source/latex/l3kernel/l3unicode-data.dtx b/Master/texmf-dist/source/latex/l3kernel/l3unicode-data.dtx index 8d24cae8f46..ff9b5ee304c 100644 --- a/Master/texmf-dist/source/latex/l3kernel/l3unicode-data.dtx +++ b/Master/texmf-dist/source/latex/l3kernel/l3unicode-data.dtx @@ -38,7 +38,7 @@ % plain \TeX{}, set up in generic mode. %<*driver|script> \input expl3-generic\relax -\GetIdInfo$Id: l3unicode-data.dtx 5166 2014-06-30 07:48:12Z joseph $ +\GetIdInfo$Id: l3unicode-data.dtx 5283 2014-08-17 00:52:45Z bruno $ {L3 Case data script} %</driver|script> % @@ -160,6 +160,15 @@ } % \end{macrocode} % +% \begin{variable}{\l__unicode_compat_seq} +% A sequence to hold the list of compatibility chars currently defined by +% Unicode. This is needed for both case mapping and case folding (it's +% defined by information in the master file |UnicodeData.txt|). +% \begin{macrocode} +\seq_new:N \l__unicode_compat_seq +% \end{macrocode} +% \end{variable} +% % \begin{variable}{\g__unicode_data_ior} % \begin{variable}{\g__unicode_result_iow} % Streams for reading and writing the data. @@ -259,7 +268,7 @@ % of the data structures. For case folding, the tokens are all stored as % strings. For the lower case letters, to ensure there are always three % digits a bit of maths is used. -% +% % After the mapping, the small number of fixed data structures that are % used for the special case conversions are created. These are mainly empty, % but for cases where a match is possible (as the test char is in the \pdfTeX{} @@ -280,10 +289,10 @@ } \cs_set_protected:Npn \__unicode_tmp:NNNNNNN #1#2#3#4#5#6#7 { - \tl_const:cx { c__str_fold_ #1 _ #2 _ tl } + \tl_const:cx { c__str_fold_ #1 _X_ #2 _ tl } { \tl_to_str:n { #6#7 } } - \tl_const:cn { c__tl_lower_ #1 _ #2 _ tl } { #6#7 } - \tl_const:cn { c__tl_upper_ #4 _ #5 _ tl } { #7#6 } + \tl_const:cn { c__tl_lower_ #1 _X_ #2 _ tl } { #6#7 } + \tl_const:cn { c__tl_upper_ #4 _X_ #5 _ tl } { #7#6 } } \__unicode_tmp:NN AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZz @@ -293,17 +302,17 @@ { \int_step_inline:nnnn { 0 } { 1 } { 9 } { - \tl_if_exist:cF { c__str_fold_ #1 _ ##1 _ tl } + \tl_if_exist:cF { c__str_fold_ #1 _X_ ##1 _ tl } { - \tl_const:cn { c__str_fold_ #1 _ ##1 _ tl } { } + \tl_const:cn { c__str_fold_ #1 _X_ ##1 _ tl } { } } - \tl_if_exist:cF { c__tl_lower_ #1 _ ##1 _ tl } + \tl_if_exist:cF { c__tl_lower_ #1 _X_ ##1 _ tl } { - \tl_const:cn { c__tl_lower_ #1 _ ##1 _ tl } { } + \tl_const:cn { c__tl_lower_ #1 _X_ ##1 _ tl } { } } - \tl_if_exist:cF { c__tl_upper_ #1 _ ##1 _ tl } + \tl_if_exist:cF { c__tl_upper_ #1 _X_ ##1 _ tl } { - \tl_const:cn { c__tl_upper_ #1 _ ##1 _ tl } { } + \tl_const:cn { c__tl_upper_ #1 _X_ ##1 _ tl } { } } } } @@ -319,192 +328,22 @@ \__unicode_verb_end: % \end{macrocode} % -% \subsection{Case folding} -% -% \begin{macro}{\__unicode_parse_line:w} -% \begin{macro}[aux]{\__unicode_parse_line_auxi:Nw} -% \begin{macro}[aux]{\__unicode_parse_line_auxii:w} -% \begin{macro}[aux]{\__unicode_parse_line_auxiii:nw} -% \begin{macro}[aux]{\__unicode_parse_line_auxiv:nn} -% \begin{macro}[aux]{\__unicode_parse_line_auxv:wnn} -% The format of |CaseFolding.txt| allows for both blank lines and -% C-style comments starting with |#|. Thus the first two steps of -% the parsing routine are set up to deal with these cases. -% \begin{macrocode} -\cs_new_protected:Npn \__unicode_parse_line:w #1 \q_stop - { - \tl_if_blank:nF {#1} - { \__unicode_parse_line_auxi:Nw #1 \q_stop } - } -\cs_new_protected:Npn \__unicode_parse_line_auxi:Nw #1#2 \q_stop - { - \str_if_eq_x:nnF { \exp_not:n {#1} } { \cs_to_str:N \# } - { \__unicode_parse_line_auxii:w #1#2 \q_stop } - } -% \end{macrocode} -% For lines actually containing data, there will be four entries separated by -% |;| tokens: the hex code for the char itself, which folding regim\'{e}s -% the line applies to, the hex code(s) for the folded char and a -% description. Of these, we need all but the last one. In the simple -% case of core foldings, the mapping is one--one and this information -% can be passed directly to the next stage. We also handle the full -% mappings (dropping simple ones plus any Turkic variation): an additional -% step is needed to parse this case. -% \begin{macrocode} -\cs_new_protected:Npn \__unicode_parse_line_auxii:w #1 ;~ #2 ; #3 ; #4 \q_stop - { - \str_if_eq:nnTF {#2} { C } - { - \__unicode_parse_line_auxiv:nn - {#1} { \luatex_Uchar:D "#3 \c_space_tl } - } - { - \str_if_eq:nnT {#2} { F } - { \__unicode_parse_line_auxiii:nw {#1} #3 ~ \q_stop } - } - } -% \end{macrocode} -% Full folding produces two or three Unicode code points from a single -% input char. To deal with this, we split the relevant part of the input -% and check how many chars to generate. The entire folding output is -% braced so that when read back \TeX{} will see this as a group in our -% replacement code. -% \begin{macrocode} -\cs_new_protected:Npn \__unicode_parse_line_auxiii:nw #1 ~ #2 ~ #3 ~ #4 \q_stop - { - \__unicode_parse_line_auxiv:nn - {#1} - { - { - \luatex_Uchar:D "#2 \c_space_tl - \luatex_Uchar:D "#3 \c_space_tl - \tl_if_empty:nF {#4} - { \luatex_Uchar:D "#4 \c_space_tl } - } - } - } -% \end{macrocode} -% The final stage of extracting the mapping is to split the various cases -% up such that comparison and replacement does not need to check every -% character. That is done by taking the charcode modulo $100$: this splits -% the list of chars into $100$ much shorter lists. With that done, the -% input and output chars are added to the appropriate token lists. -% \begin{macrocode} -\cs_new_protected:Npn \__unicode_parse_line_auxiv:nn #1#2 - { - \exp_last_unbraced:Nf \__unicode_parse_line_auxv:wnn - { \int_eval:n { 1000000 + "#1 } } \q_stop - {#1} {#2} - } -% \end{macrocode} -% As the input is read in string mode, there is a need for a rescan -% here since \tn{Uchar} requires letters for hexadecimal digits -% beyond~$9$. -% \begin{macrocode} -\cs_new_protected:Npn \__unicode_parse_line_auxv:wnn - #1#2#3#4#5#6#7 \q_stop #8#9 - { - \tl_rescan:nn - { } - { - \tl_put_right:cx { l__unicode_a_ #6 _ #7 _tl } - { - \luatex_Uchar:D "#8 \c_space_tl - #9 - } - } - } -% \end{macrocode} -% \end{macro} -% \end{macro} -% \end{macro} -% \end{macro} -% \end{macro} -% \end{macro} -% -% The main loop can now take place, reading the source data and saving all of -% the information in the token list array. -% \begin{macrocode} -\ior_open:Nn \g__unicode_data_ior { CaseFolding.txt } -\ior_str_map_inline:Nn \g__unicode_data_ior - { \__unicode_parse_line:w #1 \q_stop } -\ior_close:N \g__unicode_data_ior -% \end{macrocode} -% -% \begin{macro}[aux]{\__str_tmp:NNn} -% \begin{macro}[aux, EXP]{\__str_tmp:Nw} -% To ensure that the output of the case-folding function is a string, all of -% the stored results need to be detokenized. That is done by including a loop -% in the |.def| file which will do the necessary change. To set that up, a -% slightly complicated bit of secondary work: write the functions which do -% the job into the |.def| file itself, using a group to trap the temporary -% code. There is also a test in the following so that the result only has -% braces around items which need it: this is a slight performance tweak when -% the code actually gets used. Notice that everything in the token list is -% detokenized except for the |{| and |}| chars needed for grouping: if the -% search part of the list is not detokenized there are issues with \XeTeX{} -% and chars beyond $0\mathrm{xFFFF}$ (probably a bug, but can be worked -% around!). -% \begin{macrocode} -\__unicode_verb: -\group_begin: - \cs_set_protected:Npn \__str_tmp:NNn #1#2#3 - { - \tl_const:cx { c__str_fold_#1_#2_tl } - { \__str_tmp:Nw #3 \q_recursion_tail { } \q_recursion_stop } - } - \cs_set:Npn \__str_tmp:Nw #1#2 - { - \quark_if_recursion_tail_stop:N #1 - \tl_to_str:N #1 - \tl_if_blank:oT { \use_none:n #2 } - { \use:n } - { \tl_to_str:n {#2} } - \__str_tmp:Nw - } -\__unicode_verb_end: -% \end{macrocode} -% \end{macro} -% \end{macro} -% -% The write loop is simple: map over the array and write everything to the -% output. The saved data is also cleared to save a second loop later on when -% dealing with case mappings. The group used for the temporary stuff in the -% |.def| file is also closed at this point. -% \begin{macrocode} -\tl_map_inline:nn { 0123456789 } - { - \tl_map_inline:nn { 0123456789 } - { - \iow_now:Nx \g__unicode_result_iow - { - \c_space_tl \c_space_tl - \exp_not:N \__str_tmp:NNn #1 ~ ##1 ~ - { ~ \exp_not:v { l__unicode_a_ #1 _ ##1 _tl } ~ } - } - \tl_clear:c { l__unicode_a_ #1 _ ##1 _tl } - } - } -\iow_now:Nn \g__unicode_result_iow { \group_end: } -% \end{macrocode} -% % \subsection{Upper/lower/title casing} % -% Unlike the case folding data, case changing data is split into two parts -% which we need to combine into a single data structure. There are therefore -% two parts to this process: first to read the exceptions, then to read the -% main data and combine it. +% Case changing data is split into two parts which we need to combine into a +% single data structure. There are therefore two parts to this process: first +% to read the exceptions, then to read the main data and combine it. % % \begin{macro}^^A % { % \l__unicode_lower_exceptions_tl, % \l__unicode_title_exceptions_tl, % \l__unicode_upper_exceptions_tl -% } -% There are special cases for lower, title and uppercase changes: these +% } +% There are special cases for lower, title and upper case changes: these % all get read in to appropriate lists. Exceptions could be saved as % property lists but that would make life a bit more complex with the -% titlecase exceptions and wouldn't really gain much (this is after all +% title case exceptions and wouldn't really gain much (this is after all % \enquote{disposable} data). Note that for our purposes, what Unicode call % title case is stored in the output as `mixed' case. % \begin{macrocode} @@ -514,18 +353,31 @@ % \end{macrocode} % \end{macro} % +% \begin{macro}{\__unicode_parse_line:w} % \begin{macro}[aux]{\__unicode_parse_line_auxii:w} % \begin{macro}[aux]{\__unicode_parse_line_auxiii:nnn} % \begin{macro}[aux]{\__unicode_parse_line_auxiv:nwn} % \begin{macro}[aux]{\__unicode_brace:n} -% The format of the special cases data is similar to that of the folding -% data: as such only some of the parsing is altered. This file has four -% important data fields: the char at it's lower, title and uppercase -% equivalents. As most of the titlecase exceptions are also uppercase -% exceptions, a test is made so that we are only storing truly useful -% exceptions for titlecase. -% \begin{macrocode} -\cs_set_protected:Npn \__unicode_parse_line_auxii:w +% The file |SpecialCasing.txt| uses C-style comments and may contain +% blank lines: those two awkward situations need to be filtered out before +% parsing the real data in the line. +% \begin{macrocode} +\cs_new_protected:Npn \__unicode_parse_line:w #1 \q_stop + { + \tl_if_blank:nF {#1} + { + \str_if_eq_x:nnF { \tl_head:n {#1} } { \cs_to_str:N \# } + { \__unicode_parse_line_auxii:w #1 \q_stop } + } + } +% \end{macrocode} +% Here, |#1| is the code point for the input, |#2| is the lower case mapping, +% |#3| the title case mapping and |#4| the upper case mapping: all three +% mappings are always given even if they are also in |UnicodeData.txt|. As +% most of the title case exceptions are also upper case exceptions, a test is +% made so that we are only storing truly useful exceptions for title case. +% \begin{macrocode} +\cs_new_protected:Npn \__unicode_parse_line_auxii:w #1 ;~ #2 ;~ #3 ;~ #4 ; #5 \q_stop { \__unicode_parse_line_auxiii:nnn {#1} {#2} { lower } @@ -534,13 +386,12 @@ \__unicode_parse_line_auxiii:nnn {#1} {#4} { upper } } % \end{macrocode} -% Unlike the folding data, the special cases file always has a value for -% each of the three entries. Some of these have only one hex number in. -% After a bit of a trick to allow for ease of parsing, we check if there -% are at least two numbers for the case-changed char. If there are, then -% save the exception. If not, then the value will also be in the main +% For each mapping there may be one, two or three code points in the +% output. After a bit of a trick to allow for ease of parsing, we check if +% there are at least two numbers for the case-changed char. If there are, +% then save the exception. If not, then the value will also be in the main % table and we can ignore it here. There is also a test to see if the -% current value is a titlecase exception: they don't need extra braces +% current value is a title case exception: they don't need extra braces % for those. % \begin{macrocode} \cs_new_protected:Npn \__unicode_parse_line_auxiii:nnn #1#2#3 @@ -549,15 +400,15 @@ { \tl_if_empty:nF {#3} { - \str_if_eq:nnTF {#5} { title } - { \cs_set_eq:NN \__unicode_brace:n \use:n } - { \cs_set:Npn \__unicode_brace:n ##1 { { ##1 } } } + \str_if_eq:nnTF {#5} { title } + { \cs_set_eq:NN \__unicode_brace:n \use:n } + { \cs_set:Npn \__unicode_brace:n ##1 { { ##1 } } } \tl_rescan:nn { } { \tl_put_right:cx { l__unicode_ #5 _exceptions_tl } { - \luatex_Uchar:D "#1 \c_space_tl + { \luatex_Uchar:D "#1 \c_space_tl } { \__unicode_brace:n { @@ -577,6 +428,7 @@ % \end{macro} % \end{macro} % \end{macro} +% \end{macro} % % Parsing set up, read the special cases file. The input contains both % general special cases and ones dependent on context. We only want to read @@ -595,74 +447,123 @@ % % \begin{macro}{\__unicode_parse_line:w} % \begin{macro}[aux]{\__unicode_parse_line_auxi:w} -% \begin{macro}[aux]{\__unicode_parse_line_auxii:nnNn} -% \begin{macro}[aux]{\__unicode_parse_line_auxiii:wnnNn} -% \begin{macro}[aux]{\__unicode_parse_line_auxiv:nnNNNn} -% Much the same as for the case folding set up: parse the lines of -% data. Here, the lines are longer but always have one--one mappings. +% \begin{macro}[aux]{\__unicode_parse_line_auxii:nw} +% \begin{macro}[aux]{\__unicode_parse_line_auxiii:nw} +% \begin{macro}[aux]{\__unicode_parse_line_auxiv:nnNn} +% \begin{macro}[aux]{\__unicode_parse_line_auxv:wnnNn} +% \begin{macro}[aux]{\__unicode_parse_line_auxvi:nnNNNn} +% For parsing |UnicodeData.txt| there is no need to worry about funny lines: +% the file has no comments or blank lines. Each line also contains a +% one-to-one mapping for the case mappings, so they are easy to deal with. +% The slight complication here is that the lines are rather long, so a +% multi-part approach is needed to grab the correct parts of the line as +% arguments. Of the first set of arguments, the two that needed are |#1| +% (the code point) and |#6| (details about the code point which may include +% the fact it's a compatibility char). % \begin{macrocode} \cs_set_protected:Npn \__unicode_parse_line:w #1 ; #2 ; #3 ; #4 ; #5 ; #6 ; #7 ; #8 ; #9 ; { - \__unicode_parse_line_auxi:w #1 ; + \__unicode_parse_line_auxi:w #1 ; #6 ; } % \end{macrocode} % With some data items removed, at this stage the hexadecimal -% representation of the char is |#1|, the upper case char is |#5|, -% the lower case one |#6| and the title case one |#7|. These may or -% may not be present and the upper and titlecase values may be -% identical. Where there are values for upper/lowercase, they are -% saved into the arrays. For titlecase, since the number of exceptions -% is small: they are added to the existing list of exceptions we've -% already started. +% representation of the char is |#1|, any compatibility char information is +% in |#2|, the upper case char is |#6|, the lower case one |#7| and the +% title case one |#8|. These may or may not be present and the upper and +% title case values may be identical. The compatibility data is first +% extracted into a sequence, then the main information is processed. +% Where there are values for upper/lower case, they are saved into the +% arrays. For title case, since the number of exceptions is small: they are +% added to the existing list of exceptions we've already started. Note +% that there is a space at the end of |#8| as we are reading the data in +% with spaces not ignored: that has to be allowed for to get the equality +% test right. % \begin{macrocode} \cs_new_protected:Npn \__unicode_parse_line_auxi:w - #1 ; #2 ; #3 ; #4 ; #5 ; #6 ; #7 \q_stop + #1 ; #2 ; #3 ; #4 ; #5 ; #6 ; #7 ; #8 \q_stop { - \tl_if_empty:nF {#5} + \use:x { - \__unicode_parse_line_auxii:nnNn {#1} {#5} a { upper } - \str_if_eq:nnF {#5} {#7} + \__unicode_parse_line_auxii:nw {#1} #2 \tl_to_str:n { <compat> } + \c_space_tl \exp_not:N \q_stop + } + \tl_if_empty:nF {#6} + { + \__unicode_parse_line_auxiv:nnNn {#1} {#6} a { upper } + \str_if_eq:nnF {#6} {#8} { \tl_put_right:Nx \l__unicode_title_exceptions_tl { \luatex_Uchar:D "#1 \c_space_tl - \luatex_Uchar:D "#7 \c_space_tl + \luatex_Uchar:D "#8 \c_space_tl } } } - \tl_if_empty:nF {#6} - { \__unicode_parse_line_auxii:nnNn {#1} {#6} b { lower } } + \tl_if_empty:nF {#7} + { \__unicode_parse_line_auxiv:nnNn {#1} {#7} b { lower } } + } +% \end{macrocode} +% Compatibility chars have information as the marker |<compat>| then a list +% of one to three resulting code points. The one-to-one cases are not an +% issue for dealing the the data, so it's only the more complex versions that +% need to be recorded. +% \begin{macrocode} +\use:x + { + \cs_new_protected:Npn \exp_not:N \__unicode_parse_line_auxii:nw + ##1##2 \tl_to_str:n { <compat> } ~ ##3 \exp_not:N \q_stop + } + { + \tl_if_blank:nF {#3} + { + \__unicode_parse_line_auxiii:nw {#1} #3 ~ \q_stop + } + } +\cs_new_protected:Npn \__unicode_parse_line_auxiii:nw #1#2 ~ #3 \q_stop + { + \tl_if_blank:nF {#3} + { + \seq_put_right:Nn \l__unicode_compat_seq {#1} + } } % \end{macrocode} -% The array structure here is the same as before, except now there -% are two separate ones to manage. +% The array structure here is in two parts, one for upper and one +% for lower case mappings. % \begin{macrocode} -\cs_new_protected:Npn \__unicode_parse_line_auxii:nnNn #1#2#3 +\cs_new_protected:Npn \__unicode_parse_line_auxiv:nnNn #1#2#3 { - \exp_last_unbraced:Nf \__unicode_parse_line_auxiii:wnnNn + \exp_last_unbraced:Nf \__unicode_parse_line_auxv:wnnNn { \int_eval:n { 1000000 + "#1 } } \q_stop {#1} {#2} #3 } -\cs_new_protected:Npn \__unicode_parse_line_auxiii:wnnNn +\cs_new_protected:Npn \__unicode_parse_line_auxv:wnnNn #1#2#3#4#5#6#7 \q_stop #8#9 - { \__unicode_parse_line_auxiv:nnNNNn {#8} {#9} #6 #7 } + { \__unicode_parse_line_auxvi:nnNNNn {#8} {#9} #6 #7 } % \end{macrocode} -% The final test required here is to look for the special cases and where -% appropriate use that rather than the one--one mapping value. +% There are two final tests. First, for entries in the the compatibility list +% there is a need to add braces around the chars in case there is any +% normalisation during file reading. Second, any special cases have to be +% allowed for: these are checked in the exception list built up earlier. +% Entries in the latter are always braced, so there is no need to add another +% one. % \begin{macrocode} -\cs_new_protected:Npn \__unicode_parse_line_auxiv:nnNNNn #1#2#3#4#5#6 +\cs_new_protected:Npn \__unicode_parse_line_auxvi:nnNNNn #1#2#3#4#5#6 { + \seq_if_in:NnTF \l__unicode_compat_seq {#1} + { \cs_set:Npn \__unicode_brace:n ##1 { { ##1 } } } + { \cs_set_eq:NN \__unicode_brace:n \use:n } \tl_rescan:nn { } { \tl_put_right:cx { l__unicode_ #5 _ #3 _ #4 _tl } { - \luatex_Uchar:D "#1 \c_space_tl - \str_case_x:nvF - { \luatex_Uchar:D "#1 \c_space_tl } - { l__unicode_ #6 _exceptions_tl } - { \luatex_Uchar:D "#2 \c_space_tl } + \__unicode_brace:n + { \luatex_Uchar:D "#1 \c_space_tl } + \str_case_x:nvF + { \luatex_Uchar:D "#1 \c_space_tl } + { l__unicode_ #6 _exceptions_tl } + { \__unicode_brace:n { \luatex_Uchar:D "#2 \c_space_tl } } } } } @@ -672,19 +573,20 @@ % \end{macro} % \end{macro} % \end{macro} +% \end{macro} % -% Everything is set up and so the read loop can take place: this time -% there are no comment chars to worry about and so normal category -% codes apply. +% Everything is set up and so the read loop can take place. % \begin{macrocode} \ior_open:Nn \g__unicode_data_ior { UnicodeData.txt } -\ior_map_inline:Nn \g__unicode_data_ior - { \__unicode_parse_line:w #1 \q_stop } +\ior_str_map_inline:Nn \g__unicode_data_ior + { \__unicode_parse_line:w #1\q_stop } \ior_close:N \g__unicode_data_ior % \end{macrocode} % -% Saving the data uses a single file, with the uppercase array -% followed by the lowercase one and finally the titlecase exceptions. +% Saving the data uses a single file, with the upper case array +% followed by the lower case one and finally the title case exceptions. +% The saved data in the \texttt{a} storage array is also cleared to save a +% second loop later on when dealing with case folding. % \begin{macrocode} \tl_map_inline:nn { 0123456789 } { @@ -693,9 +595,10 @@ \iow_now:Nx \g__unicode_result_iow { \tl_const:cn - { ~ c__tl_upper_ #1 _ ##1 _tl ~ } ~ + { ~ c__tl_upper_ #1 _X_ ##1 _tl ~ } ~ { ~ \exp_not:v { l__unicode_a_ #1 _ ##1 _tl } ~ } } + \tl_clear:c { l__unicode_a_ #1 _ ##1 _tl } } } \tl_map_inline:nn { 0123456789 } @@ -705,7 +608,7 @@ \iow_now:Nx \g__unicode_result_iow { \tl_const:cn - { ~ c__tl_lower_ #1 _ ##1 _tl ~ } ~ + { ~ c__tl_lower_ #1 _X_ ##1 _tl ~ } ~ { ~ \exp_not:v { l__unicode_b_ #1 _ ##1 _tl } ~ } } } @@ -718,7 +621,7 @@ } % \end{macrocode} % -% Data for the special cases is now needed. This is mainly a series of simple +% Data for the special cases is now stored. This is mainly a series of simple % token lists with appropriate names and content, but there is also one place % where a small mapping list is required. % \begin{macrocode} @@ -753,6 +656,180 @@ } % \end{macrocode} % +% \subsection{Case folding} +% +% \begin{macro}{\__unicode_parse_line:w} +% \begin{macro}[aux]{\__unicode_parse_line_auxi:Nw} +% \begin{macro}[aux]{\__unicode_parse_line_auxii:w} +% \begin{macro}[aux]{\__unicode_parse_line_auxiii:nw} +% \begin{macro}[aux]{\__unicode_parse_line_auxiv:nn} +% \begin{macro}[aux]{\__unicode_parse_line_auxv:wnn} +% As for |SpecialCasing.txt|, the format of |CaseFolding.txt| allows both +% blank lines and C-style comments starting with |#|. +% \begin{macrocode} +\cs_set_protected:Npn \__unicode_parse_line:w #1 \q_stop + { + \tl_if_blank:nF {#1} + { \__unicode_parse_line_auxi:Nw #1 \q_stop } + } +\cs_set_protected:Npn \__unicode_parse_line_auxi:Nw #1#2 \q_stop + { + \str_if_eq_x:nnF { \exp_not:n {#1} } { \cs_to_str:N \# } + { \__unicode_parse_line_auxii:w #1#2 \q_stop } + } +% \end{macrocode} +% For lines actually containing data, there will be four entries separated by +% |;| tokens: the hex code for the char itself, which folding regim\'{e}s +% the line applies to, the hex code(s) for the folded char and a +% description. Of these, we need all but the last one. In the simple +% case of core foldings, the mapping is one--one and this information +% can be passed directly to the next stage. We also handle the full +% mappings (dropping simple ones plus any Turkic variation): an additional +% step is needed to parse this case. +% \begin{macrocode} +\cs_set_protected:Npn \__unicode_parse_line_auxii:w #1 ;~ #2 ; #3 ; #4 \q_stop + { + \str_if_eq:nnTF {#2} { C } + { + \__unicode_parse_line_auxiv:nn + {#1} { \luatex_Uchar:D "#3 \c_space_tl } + } + { + \str_if_eq:nnT {#2} { F } + { \__unicode_parse_line_auxiii:nw {#1} #3 ~ \q_stop } + } + } +% \end{macrocode} +% Full folding produces two or three Unicode code points from a single +% input char. To deal with this, we split the relevant part of the input +% and check how many chars to generate. The entire folding output is +% braced so that when read back \TeX{} will see this as a group in our +% replacement code: the only exceptions occur when the input char is on +% the compatibility list, as that would lead to an extra set of braces. +% \begin{macrocode} +\cs_set_protected:Npn \__unicode_parse_line_auxiii:nw #1 ~ #2 ~ #3 ~ #4 \q_stop + { + \seq_if_in:NnTF \l__unicode_compat_seq {#1} + { \cs_set_eq:NN \__unicode_brace:n \use:n } + { \cs_set:Npn \__unicode_brace:n ##1 { { ##1 } } } + \exp_args:Nno \__unicode_parse_line_auxiv:nn + {#1} + { + \__unicode_brace:n + { + \luatex_Uchar:D "#2 \c_space_tl + \luatex_Uchar:D "#3 \c_space_tl + \tl_if_empty:nF {#4} + { \luatex_Uchar:D "#4 \c_space_tl } + } + } + } +% \end{macrocode} +% The final stage of extracting the mapping is to split the various cases +% up such that comparison and replacement does not need to check every +% character. That is done by taking the charcode modulo $100$: this splits +% the list of chars into $100$ much shorter lists. With that done, the +% input and output chars are added to the appropriate token lists. +% \begin{macrocode} +\cs_new_protected:Npn \__unicode_parse_line_auxiv:nn #1#2 + { + \exp_last_unbraced:Nf \__unicode_parse_line_auxv:wnn + { \int_eval:n { 1000000 + "#1 } } \q_stop + {#1} {#2} + } +% \end{macrocode} +% As the input is read in string mode, there is a need for a rescan +% here since \tn{Uchar} requires letters for hexadecimal digits +% beyond~$9$. +% \begin{macrocode} +\cs_new_protected:Npn \__unicode_parse_line_auxv:wnn + #1#2#3#4#5#6#7 \q_stop #8#9 + { + \seq_if_in:NnTF \l__unicode_compat_seq {#8} + { \cs_set:Npn \__unicode_brace:n ##1 { { ##1 } } } + { \cs_set_eq:NN \__unicode_brace:n \use:n } + \tl_rescan:nn + { } + { + \tl_put_right:cx { l__unicode_a_ #6 _ #7 _tl } + { + \__unicode_brace:n { \luatex_Uchar:D "#8 \c_space_tl } + \__unicode_brace:n { #9 } + } + } + } +% \end{macrocode} +% \end{macro} +% \end{macro} +% \end{macro} +% \end{macro} +% \end{macro} +% \end{macro} +% +% The main loop can now take place, reading the source data and saving all of +% the information in the token list array. +% \begin{macrocode} +\ior_open:Nn \g__unicode_data_ior { CaseFolding.txt } +\ior_str_map_inline:Nn \g__unicode_data_ior + { \__unicode_parse_line:w #1 \q_stop } +\ior_close:N \g__unicode_data_ior +% \end{macrocode} +% +% \begin{macro}[aux]{\__str_tmp:NNn} +% \begin{macro}[aux, EXP]{\__str_tmp:Nw} +% To ensure that the output of the case-folding function is a string, all of +% the stored results need to be detokenized. That is done by including a loop +% in the |.def| file which will do the necessary change. To set that up, a +% slightly complicated bit of secondary work: write the functions which do +% the job into the |.def| file itself, using a group to trap the temporary +% code. There is also a test in the following so that the result only has +% braces around items which need it: this is a slight performance tweak when +% the code actually gets used. Notice that everything in the token list is +% detokenized except for the |{| and |}| chars needed for grouping: if the +% search part of the list is not detokenized there are issues with \XeTeX{} +% and chars beyond $0\mathrm{xFFFF}$ (probably a bug, but can be worked +% around!). +% \begin{macrocode} +\__unicode_verb: +\group_begin: + \cs_set_protected:Npn \__str_tmp:NNn #1#2#3 + { + \tl_const:cx { c__str_fold_#1 _X_ #2_tl } + { \__str_tmp:Nw #3 \q_recursion_tail { } \q_recursion_stop } + } + \cs_set:Npn \__str_tmp:Nw #1#2 + { + \quark_if_recursion_tail_stop:N #1 + \tl_to_str:N #1 + \tl_if_blank:oT { \use_none:n #2 } + { \use:n } + { \tl_to_str:n {#2} } + \__str_tmp:Nw + } +\__unicode_verb_end: +% \end{macrocode} +% \end{macro} +% \end{macro} +% +% The write loop is simple: map over the array and write everything to the +% output. The group used for the temporary stuff in the +% |.def| file is also closed at this point. +% \begin{macrocode} +\tl_map_inline:nn { 0123456789 } + { + \tl_map_inline:nn { 0123456789 } + { + \iow_now:Nx \g__unicode_result_iow + { + \c_space_tl \c_space_tl + \exp_not:N \__str_tmp:NNn #1 ~ ##1 ~ + { ~ \exp_not:v { l__unicode_a_ #1 _ ##1 _tl } ~ } + } + } + } +\iow_now:Nn \g__unicode_result_iow { \group_end: } +% \end{macrocode} +% % Job done, end the \TeX{} run. % \begin{macrocode} \iow_close:N \g__unicode_result_iow |