From e08b2c1f89e1eb5123b62d01b4932f5dad1a4305 Mon Sep 17 00:00:00 2001 From: Norbert Preining Date: Wed, 30 Dec 2020 03:01:29 +0000 Subject: CTAN sync 202012300301 --- .../biblatex-ieee/biblatex-ieee-alphabetic.pdf | Bin 293433 -> 293031 bytes .../biblatex-ieee/biblatex-ieee-alphabetic.tex | 4 +- .../biblatex-ieee/biblatex-ieee.pdf | Bin 383838 -> 384266 bytes .../biblatex-ieee/biblatex-ieee.tex | 7 +- .../biblatex-ieee/ieee-alphabetic.bbx | 2 +- .../biblatex-ieee/ieee-alphabetic.cbx | 2 +- .../biblatex-contrib/biblatex-ieee/ieee.bbx | 6 +- .../biblatex-contrib/biblatex-ieee/ieee.cbx | 2 +- macros/latex/contrib/nomencl/README | 5 +- macros/latex/contrib/nomencl/nomencl.dtx | 24 +- macros/latex/contrib/nomencl/nomencl.pdf | Bin 341763 -> 342134 bytes macros/latex/contrib/nomencl/sample01.pdf | Bin 34414 -> 34412 bytes macros/latex/contrib/nomencl/sample02.pdf | Bin 80168 -> 80170 bytes macros/latex/contrib/nomencl/sample03.pdf | Bin 81763 -> 81770 bytes macros/latex/contrib/nomencl/sample04.pdf | Bin 48310 -> 48313 bytes macros/latex/contrib/nomencl/sample05.pdf | Bin 34042 -> 34043 bytes macros/luatex/latex/uninormalize/README.md | 64 ++++ .../latex/uninormalize/unicode-normalization.lua | 351 +++++++++++++++++++++ .../latex/uninormalize/unicode-normalize-names.lua | 57 ++++ .../latex/uninormalize/unicode-normalize.lua | 226 +++++++++++++ .../luatex/latex/uninormalize/uninormalize-doc.pdf | Bin 0 -> 62530 bytes .../luatex/latex/uninormalize/uninormalize-doc.tex | 49 +++ macros/luatex/latex/uninormalize/uninormalize.sty | 35 ++ 23 files changed, 815 insertions(+), 19 deletions(-) create mode 100644 macros/luatex/latex/uninormalize/README.md create mode 100644 macros/luatex/latex/uninormalize/unicode-normalization.lua create mode 100644 macros/luatex/latex/uninormalize/unicode-normalize-names.lua create mode 100644 macros/luatex/latex/uninormalize/unicode-normalize.lua create mode 100644 macros/luatex/latex/uninormalize/uninormalize-doc.pdf create mode 100644 macros/luatex/latex/uninormalize/uninormalize-doc.tex create mode 100644 macros/luatex/latex/uninormalize/uninormalize.sty (limited to 'macros') diff --git a/macros/latex/contrib/biblatex-contrib/biblatex-ieee/biblatex-ieee-alphabetic.pdf b/macros/latex/contrib/biblatex-contrib/biblatex-ieee/biblatex-ieee-alphabetic.pdf index 845f7226c8..dea42d5d67 100644 Binary files a/macros/latex/contrib/biblatex-contrib/biblatex-ieee/biblatex-ieee-alphabetic.pdf and b/macros/latex/contrib/biblatex-contrib/biblatex-ieee/biblatex-ieee-alphabetic.pdf differ diff --git a/macros/latex/contrib/biblatex-contrib/biblatex-ieee/biblatex-ieee-alphabetic.tex b/macros/latex/contrib/biblatex-contrib/biblatex-ieee/biblatex-ieee-alphabetic.tex index 89e0024acf..511b761e6f 100644 --- a/macros/latex/contrib/biblatex-contrib/biblatex-ieee/biblatex-ieee-alphabetic.tex +++ b/macros/latex/contrib/biblatex-contrib/biblatex-ieee/biblatex-ieee-alphabetic.tex @@ -24,8 +24,8 @@ \href{mailto:joseph.wright@morningstar2.co.uk} {\texttt{joseph.wright@morningstar2.co.uk}}}} \title{The \pkg{ieee-alphabetic} bibliography style for \pkg{biblatex}% - \footnote{This file describes v1.3c, last revised 2020/08/31.}} -\date{Released 2020/08/31} + \footnote{This file describes v1.3d, last revised 2020/12/29.}} +\date{Released 2020/12/29} \providecommand*{\pkg}[1]{\textsf{#1}} diff --git a/macros/latex/contrib/biblatex-contrib/biblatex-ieee/biblatex-ieee.pdf b/macros/latex/contrib/biblatex-contrib/biblatex-ieee/biblatex-ieee.pdf index e57ebf5c87..38c1062d1e 100644 Binary files a/macros/latex/contrib/biblatex-contrib/biblatex-ieee/biblatex-ieee.pdf and b/macros/latex/contrib/biblatex-contrib/biblatex-ieee/biblatex-ieee.pdf differ diff --git a/macros/latex/contrib/biblatex-contrib/biblatex-ieee/biblatex-ieee.tex b/macros/latex/contrib/biblatex-contrib/biblatex-ieee/biblatex-ieee.tex index 1182cd1fe1..62a9daac31 100644 --- a/macros/latex/contrib/biblatex-contrib/biblatex-ieee/biblatex-ieee.tex +++ b/macros/latex/contrib/biblatex-contrib/biblatex-ieee/biblatex-ieee.tex @@ -24,8 +24,8 @@ \href{mailto:joseph.wright@morningstar2.co.uk} {\texttt{joseph.wright@morningstar2.co.uk}}}} \title{The \pkg{ieee} bibliography style for \pkg{biblatex}% - \footnote{This file describes v1.3c, last revised 2020/08/31.}} -\date{Released 2020/08/31} + \footnote{This file describes v1.3d, last revised 2020/12/29.}} +\date{Released 2020/12/29} \providecommand*{\opt}[1]{\texttt{#1}} \providecommand*{\pkg}[1]{\textsf{#1}} @@ -39,7 +39,7 @@ This package provides a style for \pkg{biblatex} which follows the guidelines of the \textsc{ieee}. The citation style is numeric and unsorted. The bibliography style follows the pattern of the official \pkg{IEEEtran} -package (\url{http://www.ieee.org/documents/style_manual.pdf}). The style +package (\url{https://ieeeauthorcenter.ieee.org/wp-content/uploads/IEEE-Editorial-Style-Manual.pdf}). The style should be loaded in the usual way \begin{verbatim} \usepackage[style=ieee]{biblatex} @@ -141,6 +141,7 @@ be sent by e-mail to \changes{v1.2d}{2017/03/26}{Subtle adjustment for volume string} \changes{v1.3}{2018/08/20}{Hungarian localisation} \changes{v1.3b}{2020/02/26}{Add quotes to website titles} +\changes{v1.3d}{2020/12/17}{Adjust \texttt{online} type} \PrintChanges diff --git a/macros/latex/contrib/biblatex-contrib/biblatex-ieee/ieee-alphabetic.bbx b/macros/latex/contrib/biblatex-contrib/biblatex-ieee/ieee-alphabetic.bbx index b2654804e2..612bd44630 100644 --- a/macros/latex/contrib/biblatex-contrib/biblatex-ieee/ieee-alphabetic.bbx +++ b/macros/latex/contrib/biblatex-contrib/biblatex-ieee/ieee-alphabetic.bbx @@ -8,7 +8,7 @@ %% --------------------------------------------------------------- %% -\ProvidesFile{ieee-alphabetic.bbx}[2020/08/31 v1.3c biblatex bibliography style] +\ProvidesFile{ieee-alphabetic.bbx}[2020/12/29 v1.3d biblatex bibliography style] \RequireBibliographyStyle{ieee} diff --git a/macros/latex/contrib/biblatex-contrib/biblatex-ieee/ieee-alphabetic.cbx b/macros/latex/contrib/biblatex-contrib/biblatex-ieee/ieee-alphabetic.cbx index be1fc4136a..0209c18730 100644 --- a/macros/latex/contrib/biblatex-contrib/biblatex-ieee/ieee-alphabetic.cbx +++ b/macros/latex/contrib/biblatex-contrib/biblatex-ieee/ieee-alphabetic.cbx @@ -8,7 +8,7 @@ %% --------------------------------------------------------------- %% -\ProvidesFile{ieee-alphabetic.cbx}[2020/08/31 v1.3c biblatex citation style] +\ProvidesFile{ieee-alphabetic.cbx}[2020/12/29 v1.3d biblatex citation style] \RequireCitationStyle{alphabetic} diff --git a/macros/latex/contrib/biblatex-contrib/biblatex-ieee/ieee.bbx b/macros/latex/contrib/biblatex-contrib/biblatex-ieee/ieee.bbx index faeae9a597..6bb139f627 100644 --- a/macros/latex/contrib/biblatex-contrib/biblatex-ieee/ieee.bbx +++ b/macros/latex/contrib/biblatex-contrib/biblatex-ieee/ieee.bbx @@ -8,7 +8,7 @@ %% --------------------------------------------------------------- %% -\ProvidesFile{ieee.bbx}[2020/08/31 v1.3c biblatex bibliography style] +\ProvidesFile{ieee.bbx}[2020/12/29 v1.3d biblatex bibliography style] % Load the standard style to avoid copy-pasting unnecessary material \RequireBibliographyStyle{numeric-comp} @@ -683,8 +683,6 @@ \usebibmacro{begentry}% \usebibmacro{author/editor+others/translator+others}% \setunit{\adddot\addspace}% - \printtext[parens]{\usebibmacro{date}}% - \setunit{\adddot\addspace}% \usebibmacro{title}% \setunit{\adddot\addspace}% \printlist{language}% @@ -702,6 +700,8 @@ \iftoggle{bbx:eprint} {\usebibmacro{eprint}} {}% + \setunit{\adddot\addspace}% + \printtext[parens]{\usebibmacro{date}}% \newunit\newblock \usebibmacro{url+urldate}% \setunit{\adddot\addspace}% diff --git a/macros/latex/contrib/biblatex-contrib/biblatex-ieee/ieee.cbx b/macros/latex/contrib/biblatex-contrib/biblatex-ieee/ieee.cbx index e4f422b494..94b89c5236 100644 --- a/macros/latex/contrib/biblatex-contrib/biblatex-ieee/ieee.cbx +++ b/macros/latex/contrib/biblatex-contrib/biblatex-ieee/ieee.cbx @@ -8,7 +8,7 @@ %% --------------------------------------------------------------- %% -\ProvidesFile{ieee.cbx}[2020/08/31 v1.3c biblatex citation style] +\ProvidesFile{ieee.cbx}[2020/12/29 v1.3d biblatex citation style] \RequireCitationStyle{numeric-comp} diff --git a/macros/latex/contrib/nomencl/README b/macros/latex/contrib/nomencl/README index 80d4c124aa..3968e40100 100644 --- a/macros/latex/contrib/nomencl/README +++ b/macros/latex/contrib/nomencl/README @@ -42,4 +42,7 @@ VERSION HISTORY Slovene option v5.4 2020/03/01 Norwegian (norwegian-bokmaal, norwegian-nynorsk) - options \ No newline at end of file + options + + v5.5 2020/12/29 Catalan option + diff --git a/macros/latex/contrib/nomencl/nomencl.dtx b/macros/latex/contrib/nomencl/nomencl.dtx index ff5f3dedcd..3b9326f74b 100644 --- a/macros/latex/contrib/nomencl/nomencl.dtx +++ b/macros/latex/contrib/nomencl/nomencl.dtx @@ -28,7 +28,7 @@ %<*package|driver> % \fi % \ProvidesFile{nomencl.dtx}% - [2020/03/01 v5.4 Nomenclature package] + [2020/12/29 v5.5 Nomenclature package] % % \iffalse % @@ -352,7 +352,7 @@ follows easily. % \item[nonomentbl] Do not print nomenclature in the |nomentbl| style, see % Section~\ref{sec:nomentbl} (default). % \item[\hspace{-\labelsep}] -% \textbf{croatian, danish, english, french, german, italian, +% \textbf{catalan, croatian, danish, english, french, german, italian, % norwegian-bokmaal, norwegian-nynorsk, polish, % portuguese, russian, slovene, spanish, ukrainian} % The reference texts and the nomenclature title will appear in the @@ -996,11 +996,12 @@ follows easily. % The authors want to thank Stefan % B\"ohm and Karl Heinz Marbaise who helped testing this package. % -% The translations were done by Branka Lon\v{c}arevi\'{c} (Croatian), -% Brian Elmegaard (Danish), Denis B.~Roegel (French), Sani Egisto -% (Italian), Artur Gorka (Polish), Pedro Areal (Portuguese), Alejandro -% Lopez-Valencia (Spanish), joder (Slovene), -% and Boris Veytsman (Russian and Ukrainian). +% The translations were done by Joan Queralt (Catalan), Branka +% Lon\v{c}arevi\'{c} (Croatian), Brian Elmegaard (Danish), Denis +% B.~Roegel (French), Sani Egisto (Italian), wishfort36 (Norwegian), +% Artur Gorka (Polish), Pedro Areal (Portuguese), joder (Slovene), +% Alejandro Lopez-Valencia (Spanish), and Boris Veytsman (Russian and +% Ukrainian). % % % \subsection{Releases and Legal Issues} @@ -1177,8 +1178,17 @@ follows easily. % \changes{v5.1}{2019/02/08}{Changed \# to \#\# in options.} % \changes{v5.3}{2019/11/23}{Added Slovene (joder)} % \changes{v5.4}{2020/03/01}{Added Norwegian (wishfort36)} +% \changes{v5.4}{2020/12/29}{Added Catalan (Joan Queralt)} % If you can help out with translations for some other languages, let me know. % \begin{macrocode} +\DeclareOptionX{catalan}{% +\def\eqdeclaration##1{, vegeu l'equaci\’o\nobreakspace(##1)}% +\def\pagedeclaration##1{, p\`agina\nobreakspace##1} +\def\nomname{Nomenclatura} +\def\nomAname{Lletres llatines}% +\def\nomGname{Lletres gregues} +\def\nomXname{Super{\’\i}ndexs}% +\def\nomZname{Sub{\’\i}ndexs}} \DeclareOptionX{croatian}{% \def\eqdeclaration##1{, vidi jednad\v{z}bu\nobreakspace(##1)}% \def\pagedeclaration##1{, stranica\nobreakspace##1}% diff --git a/macros/latex/contrib/nomencl/nomencl.pdf b/macros/latex/contrib/nomencl/nomencl.pdf index 18775e89c1..6159e7288e 100644 Binary files a/macros/latex/contrib/nomencl/nomencl.pdf and b/macros/latex/contrib/nomencl/nomencl.pdf differ diff --git a/macros/latex/contrib/nomencl/sample01.pdf b/macros/latex/contrib/nomencl/sample01.pdf index 6e594d9f44..2c8cc5a429 100644 Binary files a/macros/latex/contrib/nomencl/sample01.pdf and b/macros/latex/contrib/nomencl/sample01.pdf differ diff --git a/macros/latex/contrib/nomencl/sample02.pdf b/macros/latex/contrib/nomencl/sample02.pdf index 376d7b49a5..dfdfe26eb2 100644 Binary files a/macros/latex/contrib/nomencl/sample02.pdf and b/macros/latex/contrib/nomencl/sample02.pdf differ diff --git a/macros/latex/contrib/nomencl/sample03.pdf b/macros/latex/contrib/nomencl/sample03.pdf index 96a0757132..57c1219b87 100644 Binary files a/macros/latex/contrib/nomencl/sample03.pdf and b/macros/latex/contrib/nomencl/sample03.pdf differ diff --git a/macros/latex/contrib/nomencl/sample04.pdf b/macros/latex/contrib/nomencl/sample04.pdf index ebe4f6f15c..8473f6a624 100644 Binary files a/macros/latex/contrib/nomencl/sample04.pdf and b/macros/latex/contrib/nomencl/sample04.pdf differ diff --git a/macros/latex/contrib/nomencl/sample05.pdf b/macros/latex/contrib/nomencl/sample05.pdf index 463572254a..0965d8e7c3 100644 Binary files a/macros/latex/contrib/nomencl/sample05.pdf and b/macros/latex/contrib/nomencl/sample05.pdf differ diff --git a/macros/luatex/latex/uninormalize/README.md b/macros/luatex/latex/uninormalize/README.md new file mode 100644 index 0000000000..942f128dc4 --- /dev/null +++ b/macros/luatex/latex/uninormalize/README.md @@ -0,0 +1,64 @@ +# The `uninormalize` package + +The purpose of this package is to provide Unicode normalization for LuaLaTeX. It is based on Arthur Reutenauer's +[code for GSOC 2008](https://code.google.com/p/google-summer-of-code-2008-tex/downloads/list), which was adapted a little bit to work with +current `Luaotfload`. For more information, see [this question on TeX.sx](http://tex.stackexchange.com/q/229044/7712). + +## What does that mean? + +Citing [Wikipedia](https://en.wikipedia.org/wiki/Unicode_equivalence): + +> Unicode equivalence is the specification by the Unicode character encoding +> standard that some sequences of code points represent essentially the same +> character. This feature was introduced in the standard to allow compatibility +> with preexisting standard character sets, which often included similar or +> identical characters. +> +> Unicode provides two such notions, canonical equivalence and compatibility. +> Code point sequences that are defined as canonically equivalent are assumed to +> have the same appearance and meaning when printed or displayed. For example, +> the code point `U+006E` (the Latin lowercase "n") followed by `U+0303` (the +> combining tilde) is defined by Unicode to be canonically equivalent to the +> single code point `U+00F1` (the lowercase letter "ñ" of the Spanish alphabet). + +## Basic usage + + + \documentclass{article} + \usepackage{fontspec} + \usepackage[czech]{babel} + \setmainfont{Linux Libertine O} + \usepackage{uninormalize} + \begin{document} + + Some tests: + \begin{itemize} + \item combined letter ᾳ %GREEK SMALL LETTER ALPHA (U+03B1) + % + COMBINING GREEK YPOGEGRAMMENI + % (U+0345) + \item normal letter ᾳ % GREEK SMALL LETTER ALPHA WITH + %YPOGEGRAMMENI (U+1FB3) + \end{itemize} + + Some more combined and normal letters: + óóōōöö + + Linux Libertine does support some combined chars: \parbox{4em}{příliš} + + Using the \verb|^^^^| syntax: ^^^^0061^^^^0301 ^^^^0041^^^^0301 + \end{document} + +## Package options + +This package has three options: + + +- **buffer** -- normalize processed document at the moment when it's + source file is read, before processing by \TeX\ starts. This is the default + option, it seems to work better than the next one. +- **nodes** -- normalize LuaTeX nodes. Normalization happens after the full processiny by \TeX. +- **debug** -- print debug messages to the terminal output + +Both **buffer** and **nodes** options are enabled by default, you can disable any of them by using: + + \usepackage[nodes=false,buffer=false]{uninormalize} diff --git a/macros/luatex/latex/uninormalize/unicode-normalization.lua b/macros/luatex/latex/uninormalize/unicode-normalization.lua new file mode 100644 index 0000000000..b8a58b63bb --- /dev/null +++ b/macros/luatex/latex/uninormalize/unicode-normalization.lua @@ -0,0 +1,351 @@ +-- char-def now contains all necessary fields, no need for a custom file +if not characters then + require "char-def" +end + +if not unicode then require('unicode') end +unicode.conformance = unicode.conformance or { } + +unicharacters = unicharacters or {} +uni = unicode.utf8 +unidata = characters.data + +function printf(s, ...) print(string.format(s, ...)) end +-- function debug(s, ...) io.write("DEBUG: ", string.format(s, ...), "\n") end +function warn(s, ...) io.write("Warning: ", string.format(s, ...), "\n") end + +function md5sum(any) return md5.hex(md5.sum(any)) end + +-- Rehash the character data +unicharacters.combinee = { } +unicharacters.context = unicharacters.context or { } +local charu = unicode.utf8.char + +function unicharacters.context.rehash2() + for ucode, udata in pairs(unidata) -- *not* ipairs :-) + do + local sp = udata.specials + if sp then + if sp[1] == 'char' then + -- local ucode = udata.unicodeslot + local entry = { combinee = sp[2], combining = sp[3], combined = ucode } + if not unicharacters.combinee[sp[2]] + then unicharacters.combinee[sp[2]] = { } end + local n = #unicharacters.combinee[sp[2]] + unicharacters.combinee[sp[2]][n+1] = entry + end + end + -- copy context's combining field to combclass field + -- this field was in the custom copy of the char-def.lua that we no longer use + udata.combclass = udata.combining + end +end + + +unicharacters.context.rehash2() +combdata = unicharacters.combinee + +--[[ function unicode.conformance.is_hangul(ucode) + return ucode >= 0xAC00 and ucode <= 0xD7A3 +end ]] -- Make it local for the moment +local function is_hangul(char) + return char >= 0xAC00 and char <= 0xD7A3 +end + +local function is_jamo(char) + if char < 0x1100 then return false + elseif char < 0x1160 then return 'choseong' + elseif char < 0x11A8 then return 'jungseong' + elseif char < 0x11FA then return 'jongseong' + else return false + end +end + +local function decompose(ucode, compat) -- if compat then compatibility + local invbuf = { } + local sp = unidata[ucode].specials + if not sp + then return { ucode } else + if compat then compat = (sp[1] == 'compat') else compat = false end + while sp[1] == 'char' or compat do + head, tail = sp[2], sp[3] + if not tail then invbuf[#invbuf + 1] = head break end -- singleton + invbuf[#invbuf + 1] = tail + sp = unidata[head].specials + if not sp then invbuf[#invbuf + 1] = head sp = { } end + -- end -- not unidata[head] + end -- while sp[1] == 'char' or compat + end -- not sp + + local seq = { } + for i = #invbuf, 1, -1 + do seq[#seq + 1] = invbuf[i] + end + return seq +end + +local function canon(seq) -- Canonical reordering + if #seq < 3 then return seq end + local c1, c2, buf + -- I'd never thought I'd implement an actual bubble sort some day ;-) + for k = #seq - 1, 1, -1 do + for i = 2, k do -- was k - 1! Argh! + c1 = unidata[seq[i]].combclass + c2 = unidata[seq[i+1]].combclass + if c1 and c2 then + if c1 > c2 then + buf = seq[i] + seq[i] = seq[i+1] + seq[i+1] = buf + end + end + end + end + return seq +end + +if not math.div then -- from l-math.lua + function math.div(n, m) + return math.floor(n/m) + end +end + +local SBase, LBase, VBase, TBase = 0xAC00, 0x1100, 0x1161, 0x11A7 +local LCount, VCount, TCount = 19, 21, 28 +local NCount = VCount * TCount +local SCount = TCount * NCount + +local function decompose_hangul(ucode) -- assumes input is really a Hangul + local SIndex = ucode - SBase + local L = LBase + math.div(SIndex, NCount) + local V = VBase + math.div((SIndex % NCount), TCount) + local T = TBase + SIndex % TCount + if T == TBase then T = nil end + return { L, V, T } +end + +-- To NFK?D. +function toNF_D_or_KD(unistring, compat) + local nfd, seq = { }, { } + for uchar in uni.gmatch(unistring, '.') do + local ucode = uni.byte(uchar) + if is_hangul(ucode) then + seq = decompose_hangul(ucode) + for _, c in ipairs(seq) + do nfd[#nfd + 1] = c + end + seq = { } + elseif not unidata[ucode] + then nfd[#nfd + 1] = ucode else + local ccc = unidata[ucode].combclass + if not ccc or ccc == 0 then + seq = canon(seq) + for _, c in ipairs(seq) do nfd[#nfd + 1] = c end + seq = decompose(ucode, compat) + else seq[#seq + 1] = ucode + end -- not ccc or ccc == 0 + end -- if is_hangul(ucode) / elseif not unidata[ucode] + end -- for uchar in uni.gmatch(unistring, ".") + + if #seq > 0 then + seq = canon(seq) + for _, c in ipairs(seq) do nfd[#nfd + 1] = c end + end + + local nfdstr = "" + for _, chr in ipairs(nfd) + do nfdstr = string.format("%s%s", nfdstr, uni.char(chr)) end + return nfdstr, nfd +end + +function unicode.conformance.toNFD(unistring) + return toNF_D_or_KD(unistring, false) +end + +function unicode.conformance.toNFKD(unistring) + return toNF_D_or_KD(unistring, true) +end + +local function compose(seq) + local base = seq[1] + if not combdata[base] then return seq else + local i = 2 + while i <= #seq do -- can I play with 'i' in a for loop? + local cbng = seq[i] + local cccprev + if unidata[seq[i-1]] then cccprev = unidata[seq[i-1]].combclass end + if not cccprev then cccprev = -1 end + if unidata[cbng].combclass > cccprev then + if not combdata[base] then return seq else + for _, cbdata in ipairs(combdata[base]) do + if cbdata.combining == cbng then + seq[1] = cbdata.combined + base = seq[1] + for k = i, #seq - 1 + do seq[k] = seq[k+1] + end -- for k = i, #seq - 1 + seq[#seq] = nil + i = i - 1 + end -- if cbdata.combining == cbng + end -- for _, cbdata in ipairs(combdata[base]) + end -- if unidata[cbng.combclass > cccprev + end -- if not combdata[base] + i = i + 1 + end -- while i <= #seq + end -- if not combdata[base] + return seq +end + +-- To NFC from NFD. +-- Does not yet take all the composition exclusions in account +-- (missing types 1 and 2 as defined by UAX #15 X6) +function unicode.conformance.toNFC_fromNFD(nfd) + local nfc = { } + local seq = { } + for uchar in uni.gmatch(nfd, '.') do + local ucode = uni.byte(uchar) + if not unidata[ucode] + then nfc[#nfc + 1] = ucode else + local cb = unidata[ucode].combclass + if not cb or (cb == 0) then + -- if seq ~= { } then -- Dubious ... + if #seq > 0 then + seq = compose(seq) -- There was a check for #seq == 1 here + for i = 1, #seq do nfc[#nfc + 1] = seq[i] end + end -- #seq > 0 + seq = { ucode } + else seq[#seq + 1] = ucode --[[ Maybe check if seq is not empty ... ]] + end -- not cb or cb == 0 + end + end + + seq = compose(seq) + for i = 1, #seq do nfc[#nfc + 1] = seq[i] end + + local nfcstr = "" + for _, chr in ipairs(nfc) + do nfcstr = string.format("%s%s", nfcstr, uni.char(chr)) end + return nfcstr, nfc +end + +local function cancompose(seq, compat) + local dec = { } -- new table to hold the decomposed sequence + + local shift + if #seq >= 2 then -- let's do it the brutal way :-) + if is_jamo(seq[1]) == 'choseong' and + is_jamo(seq[2]) == 'jungseong' then + LIndex = seq[1] - LBase + VIndex = seq[2] - VBase + if #seq == 2 or is_jamo(seq[3]) ~= 'jongseong' then + TIndex = 0 + shift = 1 + else + TIndex = seq[3] - TBase + shift = 2 + end + seq[1] = (LIndex * VCount + VIndex) * TCount + TIndex + SBase + for i = 2, #seq -- this shifts and shrinks the table at the same time + do seq[i] = seq[i + shift] + end + end + end + + dec[1] = seq[1] + for i = 2, #seq do + local u = seq[i] + local sp = unidata[u].specials + if sp then + if compat then compat = (sp[1] == 'compat') else compat = false end + if (sp[1] == 'char') or compat then + for i = 2, #sp + do dec[#dec + 1] = sp[i] + end + end + else dec[#dec + 1] = u + end + end -- we have the fully decomposed sequence; now sort it + + for i = #dec - 1, 2, -1 do -- bubble sort! + for j = 2, #dec - 1 do + local u = dec[j] + local ccc1 = unidata[u].combclass + local v = dec[j+1] + local ccc2 = unidata[v].combclass + if ccc1 > ccc2 then -- swap + dec[j+1] = u + dec[j] = v + end + end + end -- dec sorted; now recursively compose + + local base, i, n = dec[1], 2, #dec + local cbd = combdata[base] + local incr_i = true + while i <= n do + local cbg = dec[i] + if cbd then + for _, cb in ipairs(cbd) do + if cb.combining == cbg then + -- NO :-) -- if cbd[cbg] then -- base and cbg combine; compose + dec[1] = cb.combined + base = dec[1] + cbd = combdata[base] + for j = i, n-1 -- shift table elements right of i + do dec[j] = dec[j+1] end + dec[n] = nil + n = n-1 -- table has shrunk by 1, and i doesn't grow + incr_i = false + end + end + end + if incr_i then i = i + 1 + else incr_i = true end + end -- we're finally through! return + return dec +end + +function toNF_C_or_KC(unistring, compat) + if unistring == "" then return "" end + local nfc, seq = "", { } + local start, space = true, "" + for uchar in uni.gmatch(unistring, '.') do + local ucode = uni.byte(uchar) + if start then space = ", " start = true end + if not unidata[ucode] then -- unknown to the UCD, will not compose + nfc = string.format("%s%s", nfc, uchar) + else + local ccc = unidata[ucode].combclass + if not (ccc or is_jamo(ucode) == 'jongseong' + or is_jamo(ucode) == 'jungseong') + or ccc == 0 or (is_jamo(ucode) == 'choseong') then + -- and is actually good :-) -- Well, yes and no ;-) + if #seq == 0 then -- add ucode and go to next item of the loop + seq = { ucode } + else -- seq contains unicharacters, try and compose them + if #seq == 1 then nfc = string.format("%s%s", nfc, uni.char(seq[1])) + else dec = cancompose(seq, compat) + for _, c in ipairs(dec) -- add the whole sequence to nfc + do nfc = string.format("%s%s", nfc, uni.char(c)) end + end + seq = { ucode } -- don't forget to reinitialize seq with current char + end + else -- not ccc or ccc == 0 and is_choseong: + -- character is combining, add it to seq + seq[#seq + 1] = ucode + end + end + end + if #seq > 0 then dec = cancompose(seq, compat) end + for _, c in ipairs(dec) + do nfc = string.format("%s%s", nfc, uni.char(c)) end + return nfc +end + +function unicode.conformance.toNFC(unistring) + return toNF_C_or_KC(unistring, false) +end + +function unicode.conformance.toNFKC(unistring) + return toNF_C_or_KC(unistring, true) +end diff --git a/macros/luatex/latex/uninormalize/unicode-normalize-names.lua b/macros/luatex/latex/uninormalize/unicode-normalize-names.lua new file mode 100644 index 0000000000..a15d6b0978 --- /dev/null +++ b/macros/luatex/latex/uninormalize/unicode-normalize-names.lua @@ -0,0 +1,57 @@ +-- Unicode names + +if not characters then + require "char-def" +end + +unicode = unicode or { } +unicode.conformance = unicode.conformance or { } + +unidata = characters.data + +if not math.div then -- from l-math.lua + function math.div(n, m) + return math.floor(n/m) + end +end + +local function is_hangul(char) + return char >= 0xAC00 and char <= 0xD7A3 +end + +local function is_han_character(char) -- from font-otf.lua (check) + return + (char>=0x04E00 and char<=0x09FFF) or + (char>=0x03400 and char<=0x04DFF) or + (char>=0x20000 and char<=0x2A6DF) or + (char>=0x0F900 and char<=0x0FAFF) or + (char>=0x2F800 and char<=0x2FA1F) +end + +local SBase, LBase, VBase, TBase = 0xAC00, 0x1100, 0x1161, 0x11A7 +local LCount, VCount, TCount = 19, 21, 28 +local NCount = VCount * TCount +local SCount = LCount * NCount + +local JAMO_L_TABLE = { [0] = "G", "GG", "N", "D", "DD", "R", "M", "B", "BB", + "S", "SS", "", "J", "JJ", "C", "K", "T", "P", "H" } +local JAMO_V_TABLE = { [0] = "A", "AE", "YA", "YAE", "EO", "E", "YEO", "YE", + "O", "WA", "WAE", "OE", "YO", "U", "WEO", "WE", "WI", "YU", "EU", "YI", "I" } +local JAMO_T_TABLE = { [0] = "", "G", "GG", "GS", "N", "NJ", "NH", "D", "L", + "LG", "LM", "LB", "LS", "LT", "LP", "LH", "M", "B", "BS", "S", "SS", "NG", + "J", "C", "K", "T", "P", "H" } + +function unicode.conformance.name(char) + if is_hangul(char) then + local SIndex = char - SBase + local LIndex = math.div(SIndex, NCount) + local VIndex = math.div(SIndex % NCount, TCount) + local TIndex = SIndex % TCount + return string.format("HANGUL SYLLABLE %s%s%s", JAMO_L_TABLE[LIndex], + JAMO_V_TABLE[VIndex], JAMO_T_TABLE[TIndex]) + elseif is_han_character(char) + then return string.format("CJK UNIFIED IDEOGRAPH-%04X", char) + elseif unidata[char] -- if unidata[char] exists, the name exists + then return unidata[char].description + end +end diff --git a/macros/luatex/latex/uninormalize/unicode-normalize.lua b/macros/luatex/latex/uninormalize/unicode-normalize.lua new file mode 100644 index 0000000000..0bd797d4b7 --- /dev/null +++ b/macros/luatex/latex/uninormalize/unicode-normalize.lua @@ -0,0 +1,226 @@ +local M = {} +require("unicode-normalize-names") +require('unicode-normalization') +local NFC = unicode.conformance.toNFC +local char = unicode.utf8.char +local gmatch = unicode.utf8.gmatch +local name = unicode.conformance.name +local byte = unicode.utf8.byte +-- local unidata = unicharacters.data +local length = unicode.utf8.len + +local glyph_id = node.id "glyph" + +M.debug = false + +-- for some reason variable number of arguments doesn't work +local function debug_msg(a,b,c,d,e,f,g,h,i) + if M.debug then + local t = {a,b,c,d,e,f,g,h,i} + print("[uninormalize]", table.unpack(t)) + end +end + +local function make_hash (t) + local y = {} + for _,v in ipairs(t) do + y[v] = true + end + return y +end + +local letter_categories = make_hash {"lu","ll","lt","lo","lm"} + +local mark_categories = make_hash {"mn","mc","me"} + +local function printchars(s) + local t = {} + for x in gmatch(s,".") do + t[#t+1] = name(byte(x)) + end + debug_msg("characters",table.concat(t,":")) +end + +local categories = {} + + +local function get_category(charcode) + local charcode = charcode or "" + if categories[charcode] then + return categories[charcode] + else + local unidatacode = unidata[charcode] or {} + local category = unidatacode.category + categories[charcode] = category + return category + end +end + +-- get glyph char and category +local function glyph_info(n) + local char = n.char + return char, get_category(char) +end + +local function get_mark(n) + if n.id == glyph_id then + local character, cat = glyph_info(n) + if mark_categories[cat] then + return char(character) + end + end + return false +end + +local function make_glyphs(head, nextn,s, lang, font, subtype) + local g = function(a) + local new_n = node.new(glyph_id, subtype) + new_n.lang = lang + new_n.font = font + new_n.char = byte(a) + return new_n + end + if length(s) == 1 then + return node.insert_before(head, nextn,g(s)) + else + local t = {} + local first = true + for x in gmatch(s,".") do + debug_msg("multi letter",x) + head, newn = node.insert_before(head, nextn, g(x)) + end + return head + end +end + +local function normalize_marks(head, n) + local lang, font, subtype = n.lang, n.font, n.subtype + local text = {} + text[#text+1] = char(n.char) + local head, nextn = node.remove(head, n) + --local nextn = n.next + local info = get_mark(nextn) + while(info) do + text[#text+1] = info + head, nextn = node.remove(head,nextn) + info = get_mark(nextn) + end + local s = NFC(table.concat(text)) + debug_msg("We've got mark: " .. s) + local new_n = node.new(glyph_id, subtype) + new_n.lang = lang + new_n.font = font + new_n.char = byte(s) + --head, new_n = node.insert_before(head, nextn, new_n) + -- head, new_n = node.insert_before(head, nextn, make_glyphs(s, lang, font, subtype)) + head, new_n = make_glyphs(head, nextn, s, lang, font, subtype) + local t = {} + for x in node.traverse_id(glyph_id,head) do + t[#t+1] = char(x.char) + end + debug_msg("Variables ", table.concat(t,":"), table.concat(text,";"), char(byte(s)),length(s)) + return head, nextn +end + +local function normalize_glyphs(head, n) + --local charcode = n.char + --local category = get_category(charcode) + local charcode, category = glyph_info(n) + if letter_categories[category] then + local nextn = n.next + if nextn and nextn.id == glyph_id then + --local nextchar = nextn.char + --local nextcat = get_category(nextchar) + local nextchar, nextcat = glyph_info(nextn) + if mark_categories[nextcat] then + return normalize_marks(head,n) + end + end + end + return head, n.next +end + + +function M.nodes(head) + local t = {} + local text = false + local n = head + -- for n in node.traverse(head) do + while n do + if n.id == glyph_id then + local charcode = n.char + debug_msg("unicode name",name(charcode)) + debug_msg("character category",get_category(charcode)) + t[#t+1]= char(charcode) + text = true + head, n = normalize_glyphs(head, n) + else + if text then + local s = table.concat(t) + debug_msg("text chunk",s) + --printchars(NFC(s)) + debug_msg("----------") + end + text = false + t = {} + n = n.next + end + end + return head +end + +local unibytes = {} + +local function get_charcategory(s) + local s = s or "" + local b = unibytes[s] or byte(s) or 0 + unibytes[s] = b + return get_category(b) +end + +local function normalize_charmarks(t,i) + local c = {t[i]} + local i = i + 1 + local s = get_charcategory(t[i]) + while mark_categories[s] do + c[#c+1] = t[i] + i = i + 1 + s = get_charcategory(t[i]) + end + return NFC(table.concat(c)), i +end + +local function normalize_char(t,i) + local ch = t[i] + local c = get_charcategory(ch) + if letter_categories[c] then + local nextc = get_charcategory(t[i+1]) + if mark_categories[nextc] then + return normalize_charmarks(t,i) + end + end + return ch, i+1 +end + +function M.buffer(line) + local t = {} + local new_t = {} + -- we need to make table witl all uni chars on the line + for x in gmatch(line,".") do + t[#t+1] = x + end + local i = 1 + -- normalize next char + local c, i = normalize_char(t, i) + new_t[#new_t+1] = c + while t[i] do + c, i = normalize_char(t,i) + -- local c = t[i] + -- i = i + 1 + new_t[#new_t+1] = c + end + return table.concat(new_t) +end + + +return M diff --git a/macros/luatex/latex/uninormalize/uninormalize-doc.pdf b/macros/luatex/latex/uninormalize/uninormalize-doc.pdf new file mode 100644 index 0000000000..9f45abc1e9 Binary files /dev/null and b/macros/luatex/latex/uninormalize/uninormalize-doc.pdf differ diff --git a/macros/luatex/latex/uninormalize/uninormalize-doc.tex b/macros/luatex/latex/uninormalize/uninormalize-doc.tex new file mode 100644 index 0000000000..d0f6eaf528 --- /dev/null +++ b/macros/luatex/latex/uninormalize/uninormalize-doc.tex @@ -0,0 +1,49 @@ +\documentclass{article} +\usepackage{url} +\ifx\HCode\undefined +\usepackage{fontspec} +\setmainfont{Linux Libertine O}[Renderer = Harfbuzz] +\setmonofont{DejaVu Sans Mono}[Scale=MatchLowercase] +\fi +\usepackage{microtype,hyperref} +\usepackage[nodes]{uninormalize} +\usepackage{markdown} +\def\tightlist{} +\begin{document} +\title{The \texttt{uninormalize} package} +\author{Michal Hoftich\footnote{\url{michal.h21@gmail.com}} \and Arthur Reutenauer\footnote{\url{arthur.reutenauer@normalesup.org }}} +\date{Version 0.1\\28/12/2020} +\maketitle + +\markdownInput[hybrid]{README.md} + +\subsection{Example results} + +\begin{itemize} + \item combined letter ᾳ %GREEK SMALL LETTER ALPHA (U+03B1) + COMBINING GREEK YPOGEGRAMMENI (U+0345) + \item normal letter ᾳ% GREEK SMALL LETTER ALPHA WITH YPOGEGRAMMENI (U+1FB3) +\end{itemize} + +Some more combined and normal letters: +óóōōöö + +Linux Libertine does support some combined chars: \parbox{4em}{příliš} + +Using the \verb|^^^^| syntax: ^^^^0061^^^^0301 ^^^^0041^^^^0301 + +\subsection{License} + +Copyright: 2020 Michal Hoftich + +This work may be distributed and/or modified under the conditions of the +\textit{\LaTeX\ Project Public License}, either version 1.3 of this license or (at your option) +any later version. The latest version of this license is in +\url{http://www.latex-project.org/lppl.txt} and version 1.3 or later is part of all +distributions of \LaTeX\ version 2005/12/01 or later. + +This work has the LPPL maintenance status \textit{maintained}. + +The Current Maintainer of this work is Michal Hoftich. + +\end{document} + diff --git a/macros/luatex/latex/uninormalize/uninormalize.sty b/macros/luatex/latex/uninormalize/uninormalize.sty new file mode 100644 index 0000000000..4e1e4b1571 --- /dev/null +++ b/macros/luatex/latex/uninormalize/uninormalize.sty @@ -0,0 +1,35 @@ +\ProvidesPackage{uninormalize} +\RequirePackage{luatexbase} +\RequirePackage{luacode} +\RequirePackage{kvoptions} +\DeclareBoolOption[true]{nodes} +\DeclareBoolOption[true]{buffer} +\DeclareBoolOption{debug} +\ProcessKeyvalOptions* +\ifuninormalize@nodes + \luaexec{processnodes=true} +\fi +\ifuninormalize@buffer + \luaexec{processbuffer=true} +\fi +\ifuninormalize@debug + \luaexec{uninormalize_debug = true} +\fi +\begin{luacode*} +local normalize = require "unicode-normalize" +if processnodes==true then + print "[uninormalize] process nodes on" + luatexbase.add_to_callback("pre_linebreak_filter",normalize.nodes, "normalize unicode") + luatexbase.add_to_callback("hpack_filter",normalize.nodes, "normalize unicode") +end +if processbuffer== true then + print "[uninormalize] process buffer on" + luatexbase.add_to_callback("process_input_buffer", normalize.buffer," normalize unicode") +end +if uninormalize_debug then + normalize.debug = true +end +\end{luacode*} + + +\endinput -- cgit v1.2.3