diff options
author | Karl Berry <karl@freefriends.org> | 2016-01-23 22:23:23 +0000 |
---|---|---|
committer | Karl Berry <karl@freefriends.org> | 2016-01-23 22:23:23 +0000 |
commit | 1fe8051f9151dd435a55d4f5e0932fd0b37eea74 (patch) | |
tree | 8721d7ee24c405d10907428b3336cbda4a1f686c /Master | |
parent | 9e3236ef10e2f9ee8a507bd5f34b83eb8de9d2f7 (diff) |
luaotfload (22jan16)
git-svn-id: svn://tug.org/texlive/trunk@39466 c570f23f-e606-0410-a88d-b1316a301751
Diffstat (limited to 'Master')
17 files changed, 1085 insertions, 3767 deletions
diff --git a/Master/texmf-dist/doc/luatex/luaotfload/filegraph.pdf b/Master/texmf-dist/doc/luatex/luaotfload/filegraph.pdf Binary files differindex fcf20f6a705..1163dc67cff 100644 --- a/Master/texmf-dist/doc/luatex/luaotfload/filegraph.pdf +++ b/Master/texmf-dist/doc/luatex/luaotfload/filegraph.pdf diff --git a/Master/texmf-dist/doc/luatex/luaotfload/luaotfload.pdf b/Master/texmf-dist/doc/luatex/luaotfload/luaotfload.pdf Binary files differindex cae52de888f..8406dfe431a 100644 --- a/Master/texmf-dist/doc/luatex/luaotfload/luaotfload.pdf +++ b/Master/texmf-dist/doc/luatex/luaotfload/luaotfload.pdf diff --git a/Master/texmf-dist/doc/man/man1/luaotfload-tool.man1.pdf b/Master/texmf-dist/doc/man/man1/luaotfload-tool.man1.pdf Binary files differindex 3f477eb162b..4cbb2655205 100644 --- a/Master/texmf-dist/doc/man/man1/luaotfload-tool.man1.pdf +++ b/Master/texmf-dist/doc/man/man1/luaotfload-tool.man1.pdf diff --git a/Master/texmf-dist/doc/man/man5/luaotfload.conf.man5.pdf b/Master/texmf-dist/doc/man/man5/luaotfload.conf.man5.pdf Binary files differindex 5837a83ab04..5c5af88e40b 100644 --- a/Master/texmf-dist/doc/man/man5/luaotfload.conf.man5.pdf +++ b/Master/texmf-dist/doc/man/man5/luaotfload.conf.man5.pdf diff --git a/Master/texmf-dist/tex/luatex/luaotfload/fontloader-2015-12-23.lua b/Master/texmf-dist/tex/luatex/luaotfload/fontloader-2016-01-22.lua index de754361554..760ab82bcc8 100644 --- a/Master/texmf-dist/tex/luatex/luaotfload/fontloader-2015-12-23.lua +++ b/Master/texmf-dist/tex/luatex/luaotfload/fontloader-2016-01-22.lua @@ -1,9 +1,9 @@ --[[info----------------------------------------------------------------------- Luaotfload fontloader package - build 2015-12-23 22:45:42 by phg@phlegethon + build 2016-01-22 07:53:39 by phg@phlegethon ------------------------------------------------------------------------------- - © 2015 PRAGMA ADE / ConTeXt Development Team + © 2016 PRAGMA ADE / ConTeXt Development Team The code in this file is provided under the GPL v2.0 license. See the file COPYING in the Luaotfload repository for details. @@ -47,7 +47,7 @@ --info]]----------------------------------------------------------------------- -do --- [luaotfload, fontloader-2015-12-23.lua scope for “data-con” 675f5a0af45ffb3e0d2e2ab5d6c2e47b] --- +do --- [luaotfload, fontloader-2016-01-22.lua scope for “data-con” 675f5a0af45ffb3e0d2e2ab5d6c2e47b] --- if not modules then modules={} end modules ['data-con']={ version=1.100, @@ -159,10 +159,10 @@ function containers.cleanname(name) return (gsub(lower(name),"[^%w\128-\255]+","-")) end -end --- [luaotfload, fontloader-2015-12-23.lua scope for “data-con”] --- +end --- [luaotfload, fontloader-2016-01-22.lua scope for “data-con”] --- -do --- [luaotfload, fontloader-2015-12-23.lua scope for “basics-nod” af682899d202229c0c1b859a07384b8c] --- +do --- [luaotfload, fontloader-2016-01-22.lua scope for “basics-nod” 50d00dd271a4af9b00cccf6ca433827a] --- if not modules then modules={} end modules ['luatex-fonts-nod']={ version=1.001, @@ -200,20 +200,17 @@ end nodes={} nodes.pool={} nodes.handlers={} -local nodecodes={} +local nodecodes={} for k,v in next,node.types () do nodecodes[string.gsub(v,"_","")]=k end +local whatcodes={} for k,v in next,node.whatsits() do whatcodes[string.gsub(v,"_","")]=k end local glyphcodes=node.subtypes("glyph") local disccodes=node.subtypes("disc") -for k,v in next,node.types() do - v=string.gsub(v,"_","") - nodecodes[k]=v - nodecodes[v]=k -end for i=0,#glyphcodes do glyphcodes[glyphcodes[i]]=i end for i=0,#disccodes do disccodes[disccodes[i]]=i end +nodes.whatcodes=whatcodes nodes.nodecodes=nodecodes nodes.glyphcodes=glyphcodes nodes.disccodes=disccodes @@ -376,10 +373,10 @@ end nodes.setprop=nodes.setproperty nodes.getprop=nodes.getproperty -end --- [luaotfload, fontloader-2015-12-23.lua scope for “basics-nod”] --- +end --- [luaotfload, fontloader-2016-01-22.lua scope for “basics-nod”] --- -do --- [luaotfload, fontloader-2015-12-23.lua scope for “font-ini” 179f0a75cda26696c1b1cd6d7fe0d8ae] --- +do --- [luaotfload, fontloader-2016-01-22.lua scope for “font-ini” 179f0a75cda26696c1b1cd6d7fe0d8ae] --- if not modules then modules={} end modules ['font-ini']={ version=1.001, @@ -403,10 +400,10 @@ fonts.definers={ methods={} } fonts.loggers={ register=function() end } fontloader.totable=fontloader.to_table -end --- [luaotfload, fontloader-2015-12-23.lua scope for “font-ini”] --- +end --- [luaotfload, fontloader-2016-01-22.lua scope for “font-ini”] --- -do --- [luaotfload, fontloader-2015-12-23.lua scope for “font-con” 99aacd19adce25fa35a9a30d43e8ac79] --- +do --- [luaotfload, fontloader-2016-01-22.lua scope for “font-con” 99aacd19adce25fa35a9a30d43e8ac79] --- if not modules then modules={} end modules ['font-con']={ version=1.001, @@ -1558,10 +1555,10 @@ function constructors.addcoreunicodes(unicodes) return unicodes end -end --- [luaotfload, fontloader-2015-12-23.lua scope for “font-con”] --- +end --- [luaotfload, fontloader-2016-01-22.lua scope for “font-con”] --- -do --- [luaotfload, fontloader-2015-12-23.lua scope for “fonts-enc” b224fe179312d924ffaf8334cf5ef15b] --- +do --- [luaotfload, fontloader-2016-01-22.lua scope for “fonts-enc” b224fe179312d924ffaf8334cf5ef15b] --- if not modules then modules={} end modules ['luatex-font-enc']={ version=1.001, @@ -1589,10 +1586,10 @@ setmetatable(fonts.encodings.agl,{ __index=function(t,k) end end }) -end --- [luaotfload, fontloader-2015-12-23.lua scope for “fonts-enc”] --- +end --- [luaotfload, fontloader-2016-01-22.lua scope for “fonts-enc”] --- -do --- [luaotfload, fontloader-2015-12-23.lua scope for “font-cid” 52421d1fdaa07ec4b1d936c6ff5079be] --- +do --- [luaotfload, fontloader-2016-01-22.lua scope for “font-cid” 52421d1fdaa07ec4b1d936c6ff5079be] --- if not modules then modules={} end modules ['font-cid']={ version=1.001, @@ -1743,10 +1740,10 @@ function cid.getmap(specification) return found end -end --- [luaotfload, fontloader-2015-12-23.lua scope for “font-cid”] --- +end --- [luaotfload, fontloader-2016-01-22.lua scope for “font-cid”] --- -do --- [luaotfload, fontloader-2015-12-23.lua scope for “font-map” a20a454f933095d78faf1d5f8200d025] --- +do --- [luaotfload, fontloader-2016-01-22.lua scope for “font-map” a20a454f933095d78faf1d5f8200d025] --- if not modules then modules={} end modules ['font-map']={ version=1.001, @@ -2060,10 +2057,10 @@ function mappings.addtounicode(data,filename,checklookups) end end -end --- [luaotfload, fontloader-2015-12-23.lua scope for “font-map”] --- +end --- [luaotfload, fontloader-2016-01-22.lua scope for “font-map”] --- -do --- [luaotfload, fontloader-2015-12-23.lua scope for “fonts-syn” 9729d0e49b770f78e88dab86739e0297] --- +do --- [luaotfload, fontloader-2016-01-22.lua scope for “fonts-syn” 9729d0e49b770f78e88dab86739e0297] --- if not modules then modules={} end modules ['luatex-fonts-syn']={ version=1.001, @@ -2139,10 +2136,10 @@ function fonts.names.ignoredfile(filename) return false end -end --- [luaotfload, fontloader-2015-12-23.lua scope for “fonts-syn”] --- +end --- [luaotfload, fontloader-2016-01-22.lua scope for “fonts-syn”] --- -do --- [luaotfload, fontloader-2015-12-23.lua scope for “font-tfm” bbee5eddb11211fb0a8d993db678bf3c] --- +do --- [luaotfload, fontloader-2016-01-22.lua scope for “font-tfm” bbee5eddb11211fb0a8d993db678bf3c] --- if not modules then modules={} end modules ['font-tfm']={ version=1.001, @@ -2297,10 +2294,10 @@ function readers.tfm(specification) return check_tfm(specification,fullname) end -end --- [luaotfload, fontloader-2015-12-23.lua scope for “font-tfm”] --- +end --- [luaotfload, fontloader-2016-01-22.lua scope for “font-tfm”] --- -do --- [luaotfload, fontloader-2015-12-23.lua scope for “font-afm” ece4863414d6b38c2e577110c9b55bd3] --- +do --- [luaotfload, fontloader-2016-01-22.lua scope for “font-afm” ece4863414d6b38c2e577110c9b55bd3] --- if not modules then modules={} end modules ['font-afm']={ version=1.001, @@ -3152,10 +3149,10 @@ function readers.pfb(specification,method) return readers.afm(specification,method) end -end --- [luaotfload, fontloader-2015-12-23.lua scope for “font-afm”] --- +end --- [luaotfload, fontloader-2016-01-22.lua scope for “font-afm”] --- -do --- [luaotfload, fontloader-2015-12-23.lua scope for “font-afk” b36a76ceb835f41f8c05b471000ddc14] --- +do --- [luaotfload, fontloader-2016-01-22.lua scope for “font-afk” b36a76ceb835f41f8c05b471000ddc14] --- if not modules then modules={} end modules ['font-afk']={ version=1.001, @@ -3322,10 +3319,10 @@ fonts.handlers.afm.helpdata={ } } -end --- [luaotfload, fontloader-2015-12-23.lua scope for “font-afk”] --- +end --- [luaotfload, fontloader-2016-01-22.lua scope for “font-afk”] --- -do --- [luaotfload, fontloader-2015-12-23.lua scope for “fonts-tfm” 8fd3865240e4e87e99e0739abeda2322] --- +do --- [luaotfload, fontloader-2016-01-22.lua scope for “fonts-tfm” 8fd3865240e4e87e99e0739abeda2322] --- if not modules then modules={} end modules ['luatex-fonts-tfm']={ version=1.001, @@ -3363,10 +3360,10 @@ function fonts.readers.tfm(specification) end end -end --- [luaotfload, fontloader-2015-12-23.lua scope for “fonts-tfm”] --- +end --- [luaotfload, fontloader-2016-01-22.lua scope for “fonts-tfm”] --- -do --- [luaotfload, fontloader-2015-12-23.lua scope for “font-oti” b6d493035cec2d748f2f9ec510c860ef] --- +do --- [luaotfload, fontloader-2016-01-22.lua scope for “font-oti” b6d493035cec2d748f2f9ec510c860ef] --- if not modules then modules={} end modules ['font-oti']={ version=1.001, @@ -3505,10 +3502,10 @@ function otffeatures.checkeddefaultlanguage(featuretype,autolanguage,languages) end end -end --- [luaotfload, fontloader-2015-12-23.lua scope for “font-oti”] --- +end --- [luaotfload, fontloader-2016-01-22.lua scope for “font-oti”] --- -do --- [luaotfload, fontloader-2015-12-23.lua scope for “font-otf” b14a46a7fec8b5cb4c909f4c4c299453] --- +do --- [luaotfload, fontloader-2016-01-22.lua scope for “font-otf” 184167365757f696e3457f276e7f480f] --- if not modules then modules={} end modules ['font-otf']={ version=1.001, @@ -5882,6 +5879,8 @@ otf.coverup={ multiple=justset, ligature=justset, kern=justset, + chainsubstitution=justset, + chainposition=justset, }, register=function(coverage,lookuptype,format,feature,n,descriptions,resources) local name=formatters["ctx_%s_%s_%s"](feature,lookuptype,n) @@ -5965,10 +5964,10 @@ function otf.getkern(tfmdata,left,right,kind) return 0 end -end --- [luaotfload, fontloader-2015-12-23.lua scope for “font-otf”] --- +end --- [luaotfload, fontloader-2016-01-22.lua scope for “font-otf”] --- -do --- [luaotfload, fontloader-2015-12-23.lua scope for “font-otb” 93461f2f412a9b33b35a273c09b64291] --- +do --- [luaotfload, fontloader-2016-01-22.lua scope for “font-otb” 93461f2f412a9b33b35a273c09b64291] --- if not modules then modules={} end modules ['font-otb']={ version=1.001, @@ -6553,10 +6552,10 @@ directives.register("fonts.otf.loader.basemethod",function(v) end end) -end --- [luaotfload, fontloader-2015-12-23.lua scope for “font-otb”] --- +end --- [luaotfload, fontloader-2016-01-22.lua scope for “font-otb”] --- -do --- [luaotfload, fontloader-2015-12-23.lua scope for “font-inj” 593642f0a6fe4a7fef5ed63034276e6f] --- +do --- [luaotfload, fontloader-2016-01-22.lua scope for “font-inj” 593642f0a6fe4a7fef5ed63034276e6f] --- if not modules then modules={} end modules ['font-inj']={ version=1.001, @@ -7583,10 +7582,10 @@ function injections.handler(head,where) end end -end --- [luaotfload, fontloader-2015-12-23.lua scope for “font-inj”] --- +end --- [luaotfload, fontloader-2016-01-22.lua scope for “font-inj”] --- -do --- [luaotfload, fontloader-2015-12-23.lua scope for “fonts-ota” 67a1fc4c6508526fa54041d22bcb6eab] --- +do --- [luaotfload, fontloader-2016-01-22.lua scope for “fonts-ota” 67a1fc4c6508526fa54041d22bcb6eab] --- if not modules then modules={} end modules ['luatex-fonts-ota']={ version=1.001, @@ -7964,10 +7963,10 @@ directives.register("otf.analyze.useunicodemarks",function(v) analyzers.useunicodemarks=v end) -end --- [luaotfload, fontloader-2015-12-23.lua scope for “fonts-ota”] --- +end --- [luaotfload, fontloader-2016-01-22.lua scope for “fonts-ota”] --- -do --- [luaotfload, fontloader-2015-12-23.lua scope for “font-otn” e8a1f33bbba52eda6aec4e6d6a9662dc] --- +do --- [luaotfload, fontloader-2016-01-22.lua scope for “font-otn” 7257a29e388f544a0f5deb612f6926dc] --- if not modules then modules={} end modules ['font-otn']={ version=1.001, @@ -8057,15 +8056,17 @@ local zwnj=0x200C local zwj=0x200D local wildcard="*" local default="dflt" +local whatcodes=nodes.whatcodes local nodecodes=nodes.nodecodes local glyphcodes=nodes.glyphcodes local disccodes=nodes.disccodes local glyph_code=nodecodes.glyph local glue_code=nodecodes.glue local disc_code=nodecodes.disc +local whatsit_code=nodecodes.whatsit local math_code=nodecodes.math -local dir_code=nodecodes.dir -local localpar_code=nodecodes.localpar +local dir_code=nodecodes.dir or whatcodes.dir +local localpar_code=nodecodes.localpar or whatcodes.localpar local discretionary_code=disccodes.discretionary local ligature_code=glyphcodes.ligature local privateattribute=attributes.private @@ -10727,6 +10728,36 @@ local function featuresprocessor(head,font,attr) comprun(start,c_run) start=getnext(start) end + elseif id==whatsit_code then + local subtype=getsubtype(start) + if subtype==dir_code then + local dir=getfield(start,"dir") + if dir=="+TLT" then + topstack=topstack+1 + dirstack[topstack]=dir + rlmode=1 + elseif dir=="+TRT" then + topstack=topstack+1 + dirstack[topstack]=dir + rlmode=-1 + elseif dir=="-TLT" or dir=="-TRT" then + topstack=topstack-1 + rlmode=dirstack[topstack]=="+TRT" and -1 or 1 + else + rlmode=rlparmode + end + elseif subtype==localpar_code then + local dir=getfield(start,"dir") + if dir=="TRT" then + rlparmode=-1 + elseif dir=="TLT" then + rlparmode=1 + else + rlparmode=0 + end + rlmode=rlparmode + end + start=getnext(start) elseif id==math_code then start=getnext(end_of_math(start)) elseif id==dir_code then @@ -10981,6 +11012,36 @@ local function featuresprocessor(head,font,attr) comprun(start,c_run) start=getnext(start) end + elseif id==whatsit_code then + local subtype=getsubtype(start) + if subtype==dir_code then + local dir=getfield(start,"dir") + if dir=="+TLT" then + topstack=topstack+1 + dirstack[topstack]=dir + rlmode=1 + elseif dir=="+TRT" then + topstack=topstack+1 + dirstack[topstack]=dir + rlmode=-1 + elseif dir=="-TLT" or dir=="-TRT" then + topstack=topstack-1 + rlmode=dirstack[topstack]=="+TRT" and -1 or 1 + else + rlmode=rlparmode + end + elseif subtype==localpar_code then + local dir=getfield(start,"dir") + if dir=="TRT" then + rlparmode=-1 + elseif dir=="TLT" then + rlparmode=1 + else + rlparmode=0 + end + rlmode=rlparmode + end + start=getnext(start) elseif id==math_code then start=getnext(end_of_math(start)) elseif id==dir_code then @@ -11259,10 +11320,10 @@ registerotffeature { } otf.handlers=handlers -end --- [luaotfload, fontloader-2015-12-23.lua scope for “font-otn”] --- +end --- [luaotfload, fontloader-2016-01-22.lua scope for “font-otn”] --- -do --- [luaotfload, fontloader-2015-12-23.lua scope for “font-otp” 18b4375155925ee1809150f4f6c3973b] --- +do --- [luaotfload, fontloader-2016-01-22.lua scope for “font-otp” 18b4375155925ee1809150f4f6c3973b] --- if not modules then modules={} end modules ['font-otp']={ version=1.001, @@ -12097,10 +12158,10 @@ end otf.enhancers.unpack=unpackdata otf.enhancers.pack=packdata -end --- [luaotfload, fontloader-2015-12-23.lua scope for “font-otp”] --- +end --- [luaotfload, fontloader-2016-01-22.lua scope for “font-otp”] --- -do --- [luaotfload, fontloader-2015-12-23.lua scope for “fonts-lua” 50b8edb1db7009b6c661ab71ff24a466] --- +do --- [luaotfload, fontloader-2016-01-22.lua scope for “fonts-lua” 50b8edb1db7009b6c661ab71ff24a466] --- if not modules then modules={} end modules ['luatex-fonts-lua']={ version=1.001, @@ -12133,10 +12194,10 @@ function fonts.readers.lua(specification) end end -end --- [luaotfload, fontloader-2015-12-23.lua scope for “fonts-lua”] --- +end --- [luaotfload, fontloader-2016-01-22.lua scope for “fonts-lua”] --- -do --- [luaotfload, fontloader-2015-12-23.lua scope for “font-def” 3c71c27300a8cb5c29f5d278d2049fb6] --- +do --- [luaotfload, fontloader-2016-01-22.lua scope for “font-def” 3c71c27300a8cb5c29f5d278d2049fb6] --- if not modules then modules={} end modules ['font-def']={ version=1.001, @@ -12467,10 +12528,10 @@ function font.getfont(id) end callbacks.register('define_font',definers.read,"definition of fonts (tfmdata preparation)") -end --- [luaotfload, fontloader-2015-12-23.lua scope for “font-def”] --- +end --- [luaotfload, fontloader-2016-01-22.lua scope for “font-def”] --- -do --- [luaotfload, fontloader-2015-12-23.lua scope for “fonts-def” e758c9faca4d44382b88bbea892e8bbf] --- +do --- [luaotfload, fontloader-2016-01-22.lua scope for “fonts-def” e758c9faca4d44382b88bbea892e8bbf] --- if not modules then modules={} end modules ['luatex-fonts-def']={ version=1.001, @@ -12550,10 +12611,10 @@ function fonts.definers.applypostprocessors(tfmdata) return tfmdata end -end --- [luaotfload, fontloader-2015-12-23.lua scope for “fonts-def”] --- +end --- [luaotfload, fontloader-2016-01-22.lua scope for “fonts-def”] --- -do --- [luaotfload, fontloader-2015-12-23.lua scope for “fonts-ext” 0eee87fb26b7d135da88ac0a43a8037a] --- +do --- [luaotfload, fontloader-2016-01-22.lua scope for “fonts-ext” 0eee87fb26b7d135da88ac0a43a8037a] --- if not modules then modules={} end modules ['luatex-fonts-ext']={ version=1.001, @@ -12770,10 +12831,10 @@ otffeatures.register { } } -end --- [luaotfload, fontloader-2015-12-23.lua scope for “fonts-ext”] --- +end --- [luaotfload, fontloader-2016-01-22.lua scope for “fonts-ext”] --- -do --- [luaotfload, fontloader-2015-12-23.lua scope for “fonts-cbk” 3e86c6a492ca8d792f6b06149ba0dd57] --- +do --- [luaotfload, fontloader-2016-01-22.lua scope for “fonts-cbk” 3e86c6a492ca8d792f6b06149ba0dd57] --- if not modules then modules={} end modules ['luatex-fonts-cbk']={ version=1.001, @@ -12975,7 +13036,7 @@ function nodes.simple_font_handler(head) end end -end --- [luaotfload, fontloader-2015-12-23.lua scope for “fonts-cbk”] --- +end --- [luaotfload, fontloader-2016-01-22.lua scope for “fonts-cbk”] --- --- vim:ft=lua:sw=2:ts=8:et:tw=79 diff --git a/Master/texmf-dist/tex/luatex/luaotfload/fontloader-basics-nod.lua b/Master/texmf-dist/tex/luatex/luaotfload/fontloader-basics-nod.lua index 78f1b172a43..95a1744f42c 100644 --- a/Master/texmf-dist/tex/luatex/luaotfload/fontloader-basics-nod.lua +++ b/Master/texmf-dist/tex/luatex/luaotfload/fontloader-basics-nod.lua @@ -51,15 +51,11 @@ nodes = { } nodes.pool = { } nodes.handlers = { } -local nodecodes = { } +local nodecodes = { } for k,v in next, node.types () do nodecodes[string.gsub(v,"_","")] = k end +local whatcodes = { } for k,v in next, node.whatsits() do whatcodes[string.gsub(v,"_","")] = k end local glyphcodes = node.subtypes("glyph") local disccodes = node.subtypes("disc") -for k, v in next, node.types() do - v = string.gsub(v,"_","") - nodecodes[k] = v - nodecodes[v] = k -end for i=0,#glyphcodes do glyphcodes[glyphcodes[i]] = i end @@ -67,6 +63,7 @@ for i=0,#disccodes do disccodes[disccodes[i]] = i end +nodes.whatcodes = whatcodes nodes.nodecodes = nodecodes nodes.glyphcodes = glyphcodes nodes.disccodes = disccodes diff --git a/Master/texmf-dist/tex/luatex/luaotfload/fontloader-char-ini.lua b/Master/texmf-dist/tex/luatex/luaotfload/fontloader-char-ini.lua deleted file mode 100644 index 0a79051e8f0..00000000000 --- a/Master/texmf-dist/tex/luatex/luaotfload/fontloader-char-ini.lua +++ /dev/null @@ -1,1318 +0,0 @@ -if not modules then modules = { } end modules ['char-ini'] = { - version = 1.001, - comment = "companion to char-ini.mkiv", - author = "Hans Hagen, PRAGMA-ADE, Hasselt NL", - copyright = "PRAGMA ADE / ConTeXt Development Team", - license = "see context related readme files" -} - --- todo: make two files, one for format generation, one for format use --- todo: move some to char-utf - --- we can remove the tag range starting at 0xE0000 (special applications) - -local utfchar, utfbyte, utfvalues, ustring, utotable = utf.char, utf.byte, utf.values, utf.ustring, utf.totable -local concat, unpack, tohash = table.concat, table.unpack, table.tohash -local next, tonumber, type, rawget, rawset = next, tonumber, type, rawget, rawset -local format, lower, gsub = string.format, string.lower, string.gsub -local P, R, S, Cs = lpeg.P, lpeg.R, lpeg.S, lpeg.Cs - -if not characters then require("char-def") end - -local lpegpatterns = lpeg.patterns -local lpegmatch = lpeg.match -local utf8byte = lpegpatterns.utf8byte -local utf8character = lpegpatterns.utf8character - -local utfchartabletopattern = lpeg.utfchartabletopattern - -local allocate = utilities.storage.allocate -local mark = utilities.storage.mark - -local setmetatableindex = table.setmetatableindex - -local trace_defining = false trackers.register("characters.defining", function(v) characters_defining = v end) - -local report_defining = logs.reporter("characters") - ---[[ldx-- -<p>This module implements some methods and creates additional datastructured -from the big character table that we use for all kind of purposes: -<type>char-def.lua</type>.</p> - -<p>We assume that at this point <type>characters.data</type> is already -loaded!</p> ---ldx]]-- - --- todo: in 'char-def.lua' assume defaults: --- --- directtions = l --- cjkwd = a --- linebreak = al - -characters = characters or { } -local characters = characters -local data = characters.data - -if data then - mark(data) -- why does this fail -else - report_defining("fatal error: 'char-def.lua' is not loaded") - os.exit() -end - ---[[ldx-- -<p>This converts a string (if given) into a number.</p> ---ldx]]-- - -local pattern = (P("0x") + P("U+")) * ((R("09","AF")^1 * P(-1)) / function(s) return tonumber(s,16) end) - -lpegpatterns.chartonumber = pattern - -local function chartonumber(k) - if type(k) == "string" then - local u = lpegmatch(pattern,k) - if u then - return utfbyte(u) - else - return utfbyte(k) or 0 - end - else - return k or 0 - end -end - -local function charfromnumber(k) - if type(k) == "number" then - return utfchar(k) or "" - else - local u = lpegmatch(pattern,k) - if u then - return utfchar(u) - else - return k - end - end -end - ---~ print(chartonumber(97), chartonumber("a"), chartonumber("0x61"), chartonumber("U+61")) - -characters.tonumber = chartonumber -characters.fromnumber = charfromnumber - -local private = { - description = "PRIVATE SLOT", -} - -local ranges = allocate() -characters.ranges = ranges - -setmetatableindex(data, function(t,k) - local tk = type(k) - if tk == "string" then - k = lpegmatch(pattern,k) or utfbyte(k) - if k then - local v = rawget(t,k) - if v then - return v - else - tk = "number" -- fall through to range - end - else - return private - end - end - if tk == "number" and k < 0xF0000 then - for r=1,#ranges do - local rr = ranges[r] - if k >= rr.first and k <= rr.last then - local extender = rr.extender - if extender then - local v = extender(k,v) - t[k] = v - return v - end - end - end - end - return private -- handy for when we loop over characters in fonts and check for a property -end) - -local blocks = allocate { - ["aegeannumbers"] = { first = 0x10100, last = 0x1013F, description = "Aegean Numbers" }, - ["ahom"] = { first = 0x11700, last = 0x1173F, description = "Ahom" }, - ["alchemicalsymbols"] = { first = 0x1F700, last = 0x1F77F, description = "Alchemical Symbols" }, - ["alphabeticpresentationforms"] = { first = 0x0FB00, last = 0x0FB4F, otf="latn", description = "Alphabetic Presentation Forms" }, - ["anatolianhieroglyphs"] = { first = 0x14400, last = 0x1467F, description = "Anatolian Hieroglyphs" }, - ["ancientgreekmusicalnotation"] = { first = 0x1D200, last = 0x1D24F, otf="grek", description = "Ancient Greek Musical Notation" }, - ["ancientgreeknumbers"] = { first = 0x10140, last = 0x1018F, otf="grek", description = "Ancient Greek Numbers" }, - ["ancientsymbols"] = { first = 0x10190, last = 0x101CF, otf="grek", description = "Ancient Symbols" }, - ["arabic"] = { first = 0x00600, last = 0x006FF, otf="arab", description = "Arabic" }, - ["arabicextendeda"] = { first = 0x008A0, last = 0x008FF, description = "Arabic Extended-A" }, - ["arabicmathematicalalphabeticsymbols"] = { first = 0x1EE00, last = 0x1EEFF, description = "Arabic Mathematical Alphabetic Symbols" }, - ["arabicpresentationformsa"] = { first = 0x0FB50, last = 0x0FDFF, otf="arab", description = "Arabic Presentation Forms-A" }, - ["arabicpresentationformsb"] = { first = 0x0FE70, last = 0x0FEFF, otf="arab", description = "Arabic Presentation Forms-B" }, - ["arabicsupplement"] = { first = 0x00750, last = 0x0077F, otf="arab", description = "Arabic Supplement" }, - ["armenian"] = { first = 0x00530, last = 0x0058F, otf="armn", description = "Armenian" }, - ["arrows"] = { first = 0x02190, last = 0x021FF, description = "Arrows" }, - ["avestan"] = { first = 0x10B00, last = 0x10B3F, description = "Avestan" }, - ["balinese"] = { first = 0x01B00, last = 0x01B7F, otf="bali", description = "Balinese" }, - ["bamum"] = { first = 0x0A6A0, last = 0x0A6FF, description = "Bamum" }, - ["bamumsupplement"] = { first = 0x16800, last = 0x16A3F, description = "Bamum Supplement" }, - ["basiclatin"] = { first = 0x00000, last = 0x0007F, otf="latn", description = "Basic Latin" }, - ["bassavah"] = { first = 0x16AD0, last = 0x16AFF, description = "Bassa Vah" }, - ["batak"] = { first = 0x01BC0, last = 0x01BFF, description = "Batak" }, - ["bengali"] = { first = 0x00980, last = 0x009FF, otf="beng", description = "Bengali" }, - ["blockelements"] = { first = 0x02580, last = 0x0259F, otf="bopo", description = "Block Elements" }, - ["bopomofo"] = { first = 0x03100, last = 0x0312F, otf="bopo", description = "Bopomofo" }, - ["bopomofoextended"] = { first = 0x031A0, last = 0x031BF, otf="bopo", description = "Bopomofo Extended" }, - ["boxdrawing"] = { first = 0x02500, last = 0x0257F, description = "Box Drawing" }, - ["brahmi"] = { first = 0x11000, last = 0x1107F, description = "Brahmi" }, - ["braillepatterns"] = { first = 0x02800, last = 0x028FF, otf="brai", description = "Braille Patterns" }, - ["buginese"] = { first = 0x01A00, last = 0x01A1F, otf="bugi", description = "Buginese" }, - ["buhid"] = { first = 0x01740, last = 0x0175F, otf="buhd", description = "Buhid" }, - ["byzantinemusicalsymbols"] = { first = 0x1D000, last = 0x1D0FF, otf="byzm", description = "Byzantine Musical Symbols" }, - ["carian"] = { first = 0x102A0, last = 0x102DF, description = "Carian" }, - ["caucasianalbanian"] = { first = 0x10530, last = 0x1056F, description = "Caucasian Albanian" }, - ["chakma"] = { first = 0x11100, last = 0x1114F, description = "Chakma" }, - ["cham"] = { first = 0x0AA00, last = 0x0AA5F, description = "Cham" }, - ["cherokee"] = { first = 0x013A0, last = 0x013FF, otf="cher", description = "Cherokee" }, - ["cherokeesupplement"] = { first = 0x0AB70, last = 0x0ABBF, description = "Cherokee Supplement" }, - ["cjkcompatibility"] = { first = 0x03300, last = 0x033FF, otf="hang", description = "CJK Compatibility" }, - ["cjkcompatibilityforms"] = { first = 0x0FE30, last = 0x0FE4F, otf="hang", description = "CJK Compatibility Forms" }, - ["cjkcompatibilityideographs"] = { first = 0x0F900, last = 0x0FAFF, otf="hang", description = "CJK Compatibility Ideographs" }, - ["cjkcompatibilityideographssupplement"] = { first = 0x2F800, last = 0x2FA1F, otf="hang", description = "CJK Compatibility Ideographs Supplement" }, - ["cjkradicalssupplement"] = { first = 0x02E80, last = 0x02EFF, otf="hang", description = "CJK Radicals Supplement" }, - ["cjkstrokes"] = { first = 0x031C0, last = 0x031EF, otf="hang", description = "CJK Strokes" }, - ["cjksymbolsandpunctuation"] = { first = 0x03000, last = 0x0303F, otf="hang", description = "CJK Symbols and Punctuation" }, - ["cjkunifiedideographs"] = { first = 0x04E00, last = 0x09FFF, otf="hang", description = "CJK Unified Ideographs", catcode = "letter" }, - ["cjkunifiedideographsextensiona"] = { first = 0x03400, last = 0x04DBF, otf="hang", description = "CJK Unified Ideographs Extension A" }, - ["cjkunifiedideographsextensionb"] = { first = 0x20000, last = 0x2A6DF, otf="hang", description = "CJK Unified Ideographs Extension B" }, - ["cjkunifiedideographsextensionc"] = { first = 0x2A700, last = 0x2B73F, description = "CJK Unified Ideographs Extension C" }, - ["cjkunifiedideographsextensiond"] = { first = 0x2B740, last = 0x2B81F, description = "CJK Unified Ideographs Extension D" }, - ["cjkunifiedideographsextensione"] = { first = 0x2B820, last = 0x2CEAF, description = "CJK Unified Ideographs Extension E" }, - ["combiningdiacriticalmarks"] = { first = 0x00300, last = 0x0036F, description = "Combining Diacritical Marks" }, - ["combiningdiacriticalmarksextended"] = { first = 0x01AB0, last = 0x01AFF, description = "Combining Diacritical Marks Extended" }, - ["combiningdiacriticalmarksforsymbols"] = { first = 0x020D0, last = 0x020FF, description = "Combining Diacritical Marks for Symbols" }, - ["combiningdiacriticalmarkssupplement"] = { first = 0x01DC0, last = 0x01DFF, description = "Combining Diacritical Marks Supplement" }, - ["combininghalfmarks"] = { first = 0x0FE20, last = 0x0FE2F, description = "Combining Half Marks" }, - ["commonindicnumberforms"] = { first = 0x0A830, last = 0x0A83F, description = "Common Indic Number Forms" }, - ["controlpictures"] = { first = 0x02400, last = 0x0243F, description = "Control Pictures" }, - ["coptic"] = { first = 0x02C80, last = 0x02CFF, otf="copt", description = "Coptic" }, - ["copticepactnumbers"] = { first = 0x102E0, last = 0x102FF, description = "Coptic Epact Numbers" }, - ["countingrodnumerals"] = { first = 0x1D360, last = 0x1D37F, description = "Counting Rod Numerals" }, - ["cuneiform"] = { first = 0x12000, last = 0x123FF, otf="xsux", description = "Cuneiform" }, - ["cuneiformnumbersandpunctuation"] = { first = 0x12400, last = 0x1247F, otf="xsux", description = "Cuneiform Numbers and Punctuation" }, - ["currencysymbols"] = { first = 0x020A0, last = 0x020CF, description = "Currency Symbols" }, - ["cypriotsyllabary"] = { first = 0x10800, last = 0x1083F, otf="cprt", description = "Cypriot Syllabary" }, - ["cyrillic"] = { first = 0x00400, last = 0x004FF, otf="cyrl", description = "Cyrillic" }, - ["cyrillicextendeda"] = { first = 0x02DE0, last = 0x02DFF, otf="cyrl", description = "Cyrillic Extended-A" }, - ["cyrillicextendedb"] = { first = 0x0A640, last = 0x0A69F, otf="cyrl", description = "Cyrillic Extended-B" }, - ["cyrillicsupplement"] = { first = 0x00500, last = 0x0052F, otf="cyrl", description = "Cyrillic Supplement" }, - ["deseret"] = { first = 0x10400, last = 0x1044F, otf="dsrt", description = "Deseret" }, - ["devanagari"] = { first = 0x00900, last = 0x0097F, otf="deva", description = "Devanagari" }, - ["devanagariextended"] = { first = 0x0A8E0, last = 0x0A8FF, description = "Devanagari Extended" }, - ["digitsarabicindic"] = { first = 0x00660, last = 0x00669, math = true }, - -- ["digitsbengali"] = { first = 0x009E6, last = 0x009EF, math = true }, - ["digitsbold"] = { first = 0x1D7CE, last = 0x1D7D8, math = true }, - -- ["digitsdevanagari"] = { first = 0x00966, last = 0x0096F, math = true }, - ["digitsdoublestruck"] = { first = 0x1D7D8, last = 0x1D7E2, math = true }, - -- ["digitsethiopic"] = { first = 0x01369, last = 0x01371, math = true }, - ["digitsextendedarabicindic"] = { first = 0x006F0, last = 0x006F9, math = true }, - -- ["digitsgujarati"] = { first = 0x00AE6, last = 0x00AEF, math = true }, - -- ["digitsgurmukhi"] = { first = 0x00A66, last = 0x00A6F, math = true }, - -- ["digitskannada"] = { first = 0x00CE6, last = 0x00CEF, math = true }, - -- ["digitskhmer"] = { first = 0x017E0, last = 0x017E9, math = true }, - -- ["digitslao"] = { first = 0x00ED0, last = 0x00ED9, math = true }, - ["digitslatin"] = { first = 0x00030, last = 0x00039, math = true }, - -- ["digitsmalayalam"] = { first = 0x00D66, last = 0x00D6F, math = true }, - -- ["digitsmongolian"] = { first = 0x01810, last = 0x01809, math = true }, - ["digitsmonospace"] = { first = 0x1D7F6, last = 0x1D80F, math = true }, - -- ["digitsmyanmar"] = { first = 0x01040, last = 0x01049, math = true }, - ["digitsnormal"] = { first = 0x00030, last = 0x00039, math = true }, - -- ["digitsoriya"] = { first = 0x00B66, last = 0x00B6F, math = true }, - ["digitssansserifbold"] = { first = 0x1D7EC, last = 0x1D805, math = true }, - ["digitssansserifnormal"] = { first = 0x1D7E2, last = 0x1D7EC, math = true }, - -- ["digitstamil"] = { first = 0x00030, last = 0x00039, math = true }, -- no zero - -- ["digitstelugu"] = { first = 0x00C66, last = 0x00C6F, math = true }, - -- ["digitsthai"] = { first = 0x00E50, last = 0x00E59, math = true }, - -- ["digitstibetan"] = { first = 0x00F20, last = 0x00F29, math = true }, - ["dingbats"] = { first = 0x02700, last = 0x027BF, description = "Dingbats" }, - ["dominotiles"] = { first = 0x1F030, last = 0x1F09F, description = "Domino Tiles" }, - ["duployan"] = { first = 0x1BC00, last = 0x1BC9F, description = "Duployan" }, - ["earlydynasticcuneiform"] = { first = 0x12480, last = 0x1254F, description = "Early Dynastic Cuneiform" }, - ["egyptianhieroglyphs"] = { first = 0x13000, last = 0x1342F, description = "Egyptian Hieroglyphs" }, - ["elbasan"] = { first = 0x10500, last = 0x1052F, description = "Elbasan" }, - ["emoticons"] = { first = 0x1F600, last = 0x1F64F, description = "Emoticons" }, - ["enclosedalphanumerics"] = { first = 0x02460, last = 0x024FF, description = "Enclosed Alphanumerics" }, - ["enclosedalphanumericsupplement"] = { first = 0x1F100, last = 0x1F1FF, description = "Enclosed Alphanumeric Supplement" }, - ["enclosedcjklettersandmonths"] = { first = 0x03200, last = 0x032FF, description = "Enclosed CJK Letters and Months" }, - ["enclosedideographicsupplement"] = { first = 0x1F200, last = 0x1F2FF, description = "Enclosed Ideographic Supplement" }, - ["ethiopic"] = { first = 0x01200, last = 0x0137F, otf="ethi", description = "Ethiopic" }, - ["ethiopicextended"] = { first = 0x02D80, last = 0x02DDF, otf="ethi", description = "Ethiopic Extended" }, - ["ethiopicextendeda"] = { first = 0x0AB00, last = 0x0AB2F, description = "Ethiopic Extended-A" }, - ["ethiopicsupplement"] = { first = 0x01380, last = 0x0139F, otf="ethi", description = "Ethiopic Supplement" }, - ["generalpunctuation"] = { first = 0x02000, last = 0x0206F, description = "General Punctuation" }, - ["geometricshapes"] = { first = 0x025A0, last = 0x025FF, math = true, description = "Geometric Shapes" }, - ["geometricshapesextended"] = { first = 0x1F780, last = 0x1F7FF, description = "Geometric Shapes Extended" }, - ["georgian"] = { first = 0x010A0, last = 0x010FF, otf="geor", description = "Georgian" }, - ["georgiansupplement"] = { first = 0x02D00, last = 0x02D2F, otf="geor", description = "Georgian Supplement" }, - ["glagolitic"] = { first = 0x02C00, last = 0x02C5F, otf="glag", description = "Glagolitic" }, - ["gothic"] = { first = 0x10330, last = 0x1034F, otf="goth", description = "Gothic" }, - ["grantha"] = { first = 0x11300, last = 0x1137F, description = "Grantha" }, - ["greekandcoptic"] = { first = 0x00370, last = 0x003FF, otf="grek", description = "Greek and Coptic" }, - ["greekextended"] = { first = 0x01F00, last = 0x01FFF, otf="grek", description = "Greek Extended" }, - ["gujarati"] = { first = 0x00A80, last = 0x00AFF, otf="gujr", description = "Gujarati" }, - ["gurmukhi"] = { first = 0x00A00, last = 0x00A7F, otf="guru", description = "Gurmukhi" }, - ["halfwidthandfullwidthforms"] = { first = 0x0FF00, last = 0x0FFEF, description = "Halfwidth and Fullwidth Forms" }, - ["hangulcompatibilityjamo"] = { first = 0x03130, last = 0x0318F, otf="jamo", description = "Hangul Compatibility Jamo" }, - ["hanguljamo"] = { first = 0x01100, last = 0x011FF, otf="jamo", description = "Hangul Jamo" }, - ["hanguljamoextendeda"] = { first = 0x0A960, last = 0x0A97F, description = "Hangul Jamo Extended-A" }, - ["hanguljamoextendedb"] = { first = 0x0D7B0, last = 0x0D7FF, description = "Hangul Jamo Extended-B" }, - ["hangulsyllables"] = { first = 0x0AC00, last = 0x0D7AF, otf="hang", description = "Hangul Syllables" }, - ["hanunoo"] = { first = 0x01720, last = 0x0173F, otf="hano", description = "Hanunoo" }, - ["hatran"] = { first = 0x108E0, last = 0x108FF, description = "Hatran" }, - ["hebrew"] = { first = 0x00590, last = 0x005FF, otf="hebr", description = "Hebrew" }, - ["highprivateusesurrogates"] = { first = 0x0DB80, last = 0x0DBFF, description = "High Private Use Surrogates" }, - ["highsurrogates"] = { first = 0x0D800, last = 0x0DB7F, description = "High Surrogates" }, - ["hiragana"] = { first = 0x03040, last = 0x0309F, otf="kana", description = "Hiragana" }, - ["ideographicdescriptioncharacters"] = { first = 0x02FF0, last = 0x02FFF, description = "Ideographic Description Characters" }, - ["imperialaramaic"] = { first = 0x10840, last = 0x1085F, description = "Imperial Aramaic" }, - ["inscriptionalpahlavi"] = { first = 0x10B60, last = 0x10B7F, description = "Inscriptional Pahlavi" }, - ["inscriptionalparthian"] = { first = 0x10B40, last = 0x10B5F, description = "Inscriptional Parthian" }, - ["ipaextensions"] = { first = 0x00250, last = 0x002AF, description = "IPA Extensions" }, - ["javanese"] = { first = 0x0A980, last = 0x0A9DF, description = "Javanese" }, - ["kaithi"] = { first = 0x11080, last = 0x110CF, description = "Kaithi" }, - ["kanasupplement"] = { first = 0x1B000, last = 0x1B0FF, description = "Kana Supplement" }, - ["kanbun"] = { first = 0x03190, last = 0x0319F, description = "Kanbun" }, - ["kangxiradicals"] = { first = 0x02F00, last = 0x02FDF, description = "Kangxi Radicals" }, - ["kannada"] = { first = 0x00C80, last = 0x00CFF, otf="knda", description = "Kannada" }, - ["katakana"] = { first = 0x030A0, last = 0x030FF, otf="kana", description = "Katakana" }, - ["katakanaphoneticextensions"] = { first = 0x031F0, last = 0x031FF, otf="kana", description = "Katakana Phonetic Extensions" }, - ["kayahli"] = { first = 0x0A900, last = 0x0A92F, description = "Kayah Li" }, - ["kharoshthi"] = { first = 0x10A00, last = 0x10A5F, otf="khar", description = "Kharoshthi" }, - ["khmer"] = { first = 0x01780, last = 0x017FF, otf="khmr", description = "Khmer" }, - ["khmersymbols"] = { first = 0x019E0, last = 0x019FF, otf="khmr", description = "Khmer Symbols" }, - ["khojki"] = { first = 0x11200, last = 0x1124F, description = "Khojki" }, - ["khudawadi"] = { first = 0x112B0, last = 0x112FF, description = "Khudawadi" }, - ["lao"] = { first = 0x00E80, last = 0x00EFF, otf="lao", description = "Lao" }, - ["latinextendeda"] = { first = 0x00100, last = 0x0017F, otf="latn", description = "Latin Extended-A" }, - ["latinextendedadditional"] = { first = 0x01E00, last = 0x01EFF, otf="latn", description = "Latin Extended Additional" }, - ["latinextendedb"] = { first = 0x00180, last = 0x0024F, otf="latn", description = "Latin Extended-B" }, - ["latinextendedc"] = { first = 0x02C60, last = 0x02C7F, otf="latn", description = "Latin Extended-C" }, - ["latinextendedd"] = { first = 0x0A720, last = 0x0A7FF, otf="latn", description = "Latin Extended-D" }, - ["latinextendede"] = { first = 0x0AB30, last = 0x0AB6F, description = "Latin Extended-E" }, - ["latinsupplement"] = { first = 0x00080, last = 0x000FF, otf="latn", description = "Latin-1 Supplement" }, - ["lepcha"] = { first = 0x01C00, last = 0x01C4F, description = "Lepcha" }, - ["letterlikesymbols"] = { first = 0x02100, last = 0x0214F, math = true, description = "Letterlike Symbols" }, - ["limbu"] = { first = 0x01900, last = 0x0194F, otf="limb", description = "Limbu" }, - ["lineara"] = { first = 0x10600, last = 0x1077F, description = "Linear A" }, - ["linearbideograms"] = { first = 0x10080, last = 0x100FF, otf="linb", description = "Linear B Ideograms" }, - ["linearbsyllabary"] = { first = 0x10000, last = 0x1007F, otf="linb", description = "Linear B Syllabary" }, - ["lisu"] = { first = 0x0A4D0, last = 0x0A4FF, description = "Lisu" }, - ["lowercasebold"] = { first = 0x1D41A, last = 0x1D433, math = true }, - ["lowercaseboldfraktur"] = { first = 0x1D586, last = 0x1D59F, math = true }, - ["lowercasebolditalic"] = { first = 0x1D482, last = 0x1D49B, math = true }, - ["lowercaseboldscript"] = { first = 0x1D4EA, last = 0x1D503, math = true }, - ["lowercasedoublestruck"] = { first = 0x1D552, last = 0x1D56B, math = true }, - ["lowercasefraktur"] = { first = 0x1D51E, last = 0x1D537, math = true }, - ["lowercasegreekbold"] = { first = 0x1D6C2, last = 0x1D6DB, math = true }, - ["lowercasegreekbolditalic"] = { first = 0x1D736, last = 0x1D74F, math = true }, - ["lowercasegreekitalic"] = { first = 0x1D6FC, last = 0x1D715, math = true }, - ["lowercasegreeknormal"] = { first = 0x003B1, last = 0x003CA, math = true }, - ["lowercasegreeksansserifbold"] = { first = 0x1D770, last = 0x1D789, math = true }, - ["lowercasegreeksansserifbolditalic"] = { first = 0x1D7AA, last = 0x1D7C3, math = true }, - ["lowercaseitalic"] = { first = 0x1D44E, last = 0x1D467, math = true }, - ["lowercasemonospace"] = { first = 0x1D68A, last = 0x1D6A3, math = true }, - ["lowercasenormal"] = { first = 0x00061, last = 0x0007A, math = true }, - ["lowercasesansserifbold"] = { first = 0x1D5EE, last = 0x1D607, math = true }, - ["lowercasesansserifbolditalic"] = { first = 0x1D656, last = 0x1D66F, math = true }, - ["lowercasesansserifitalic"] = { first = 0x1D622, last = 0x1D63B, math = true }, - ["lowercasesansserifnormal"] = { first = 0x1D5BA, last = 0x1D5D3, math = true }, - ["lowercasescript"] = { first = 0x1D4B6, last = 0x1D4CF, math = true }, - ["lowsurrogates"] = { first = 0x0DC00, last = 0x0DFFF, description = "Low Surrogates" }, - ["lycian"] = { first = 0x10280, last = 0x1029F, description = "Lycian" }, - ["lydian"] = { first = 0x10920, last = 0x1093F, description = "Lydian" }, - ["mahajani"] = { first = 0x11150, last = 0x1117F, description = "Mahajani" }, - ["mahjongtiles"] = { first = 0x1F000, last = 0x1F02F, description = "Mahjong Tiles" }, - ["malayalam"] = { first = 0x00D00, last = 0x00D7F, otf="mlym", description = "Malayalam" }, - ["mandaic"] = { first = 0x00840, last = 0x0085F, otf="mand", description = "Mandaic" }, - ["manichaean"] = { first = 0x10AC0, last = 0x10AFF, description = "Manichaean" }, - ["mathematicalalphanumericsymbols"] = { first = 0x1D400, last = 0x1D7FF, math = true, description = "Mathematical Alphanumeric Symbols" }, - ["mathematicaloperators"] = { first = 0x02200, last = 0x022FF, math = true, description = "Mathematical Operators" }, - ["meeteimayek"] = { first = 0x0ABC0, last = 0x0ABFF, description = "Meetei Mayek" }, - ["meeteimayekextensions"] = { first = 0x0AAE0, last = 0x0AAFF, description = "Meetei Mayek Extensions" }, - ["mendekikakui"] = { first = 0x1E800, last = 0x1E8DF, description = "Mende Kikakui" }, - ["meroiticcursive"] = { first = 0x109A0, last = 0x109FF, description = "Meroitic Cursive" }, - ["meroitichieroglyphs"] = { first = 0x10980, last = 0x1099F, description = "Meroitic Hieroglyphs" }, - ["miao"] = { first = 0x16F00, last = 0x16F9F, description = "Miao" }, - ["miscellaneousmathematicalsymbolsa"] = { first = 0x027C0, last = 0x027EF, math = true, description = "Miscellaneous Mathematical Symbols-A" }, - ["miscellaneousmathematicalsymbolsb"] = { first = 0x02980, last = 0x029FF, math = true, description = "Miscellaneous Mathematical Symbols-B" }, - ["miscellaneoussymbols"] = { first = 0x02600, last = 0x026FF, math = true, description = "Miscellaneous Symbols" }, - ["miscellaneoussymbolsandarrows"] = { first = 0x02B00, last = 0x02BFF, math = true, description = "Miscellaneous Symbols and Arrows" }, - ["miscellaneoussymbolsandpictographs"] = { first = 0x1F300, last = 0x1F5FF, description = "Miscellaneous Symbols and Pictographs" }, - ["miscellaneoustechnical"] = { first = 0x02300, last = 0x023FF, math = true, description = "Miscellaneous Technical" }, - ["modi"] = { first = 0x11600, last = 0x1165F, description = "Modi" }, - ["modifiertoneletters"] = { first = 0x0A700, last = 0x0A71F, description = "Modifier Tone Letters" }, - ["mongolian"] = { first = 0x01800, last = 0x018AF, otf="mong", description = "Mongolian" }, - ["mro"] = { first = 0x16A40, last = 0x16A6F, description = "Mro" }, - ["multani"] = { first = 0x11280, last = 0x112AF, description = "Multani" }, - ["musicalsymbols"] = { first = 0x1D100, last = 0x1D1FF, otf="musc", description = "Musical Symbols" }, - ["myanmar"] = { first = 0x01000, last = 0x0109F, otf="mymr", description = "Myanmar" }, - ["myanmarextendeda"] = { first = 0x0AA60, last = 0x0AA7F, description = "Myanmar Extended-A" }, - ["myanmarextendedb"] = { first = 0x0A9E0, last = 0x0A9FF, description = "Myanmar Extended-B" }, - ["nabataean"] = { first = 0x10880, last = 0x108AF, description = "Nabataean" }, - ["newtailue"] = { first = 0x01980, last = 0x019DF, description = "New Tai Lue" }, - ["nko"] = { first = 0x007C0, last = 0x007FF, otf="nko", description = "NKo" }, - ["numberforms"] = { first = 0x02150, last = 0x0218F, description = "Number Forms" }, - ["ogham"] = { first = 0x01680, last = 0x0169F, otf="ogam", description = "Ogham" }, - ["olchiki"] = { first = 0x01C50, last = 0x01C7F, description = "Ol Chiki" }, - ["oldhungarian"] = { first = 0x10C80, last = 0x10CFF, description = "Old Hungarian" }, - ["olditalic"] = { first = 0x10300, last = 0x1032F, otf="ital", description = "Old Italic" }, - ["oldnortharabian"] = { first = 0x10A80, last = 0x10A9F, description = "Old North Arabian" }, - ["oldpermic"] = { first = 0x10350, last = 0x1037F, description = "Old Permic" }, - ["oldpersian"] = { first = 0x103A0, last = 0x103DF, otf="xpeo", description = "Old Persian" }, - ["oldsoutharabian"] = { first = 0x10A60, last = 0x10A7F, description = "Old South Arabian" }, - ["oldturkic"] = { first = 0x10C00, last = 0x10C4F, description = "Old Turkic" }, - ["opticalcharacterrecognition"] = { first = 0x02440, last = 0x0245F, description = "Optical Character Recognition" }, - ["oriya"] = { first = 0x00B00, last = 0x00B7F, otf="orya", description = "Oriya" }, - ["ornamentaldingbats"] = { first = 0x1F650, last = 0x1F67F, description = "Ornamental Dingbats" }, - ["osmanya"] = { first = 0x10480, last = 0x104AF, otf="osma", description = "Osmanya" }, - ["pahawhhmong"] = { first = 0x16B00, last = 0x16B8F, description = "Pahawh Hmong" }, - ["palmyrene"] = { first = 0x10860, last = 0x1087F, description = "Palmyrene" }, - ["paucinhau"] = { first = 0x11AC0, last = 0x11AFF, description = "Pau Cin Hau" }, - ["phagspa"] = { first = 0x0A840, last = 0x0A87F, otf="phag", description = "Phags-pa" }, - ["phaistosdisc"] = { first = 0x101D0, last = 0x101FF, description = "Phaistos Disc" }, - ["phoenician"] = { first = 0x10900, last = 0x1091F, otf="phnx", description = "Phoenician" }, - ["phoneticextensions"] = { first = 0x01D00, last = 0x01D7F, description = "Phonetic Extensions" }, - ["phoneticextensionssupplement"] = { first = 0x01D80, last = 0x01DBF, description = "Phonetic Extensions Supplement" }, - ["playingcards"] = { first = 0x1F0A0, last = 0x1F0FF, description = "Playing Cards" }, - ["privateusearea"] = { first = 0x0E000, last = 0x0F8FF, description = "Private Use Area" }, - ["psalterpahlavi"] = { first = 0x10B80, last = 0x10BAF, description = "Psalter Pahlavi" }, - ["rejang"] = { first = 0x0A930, last = 0x0A95F, description = "Rejang" }, - ["ruminumeralsymbols"] = { first = 0x10E60, last = 0x10E7F, description = "Rumi Numeral Symbols" }, - ["runic"] = { first = 0x016A0, last = 0x016FF, otf="runr", description = "Runic" }, - ["samaritan"] = { first = 0x00800, last = 0x0083F, description = "Samaritan" }, - ["saurashtra"] = { first = 0x0A880, last = 0x0A8DF, description = "Saurashtra" }, - ["sharada"] = { first = 0x11180, last = 0x111DF, description = "Sharada" }, - ["shavian"] = { first = 0x10450, last = 0x1047F, otf="shaw", description = "Shavian" }, - ["shorthandformatcontrols"] = { first = 0x1BCA0, last = 0x1BCAF, description = "Shorthand Format Controls" }, - ["siddham"] = { first = 0x11580, last = 0x115FF, description = "Siddham" }, - ["sinhala"] = { first = 0x00D80, last = 0x00DFF, otf="sinh", description = "Sinhala" }, - ["sinhalaarchaicnumbers"] = { first = 0x111E0, last = 0x111FF, description = "Sinhala Archaic Numbers" }, - ["smallformvariants"] = { first = 0x0FE50, last = 0x0FE6F, description = "Small Form Variants" }, - ["sorasompeng"] = { first = 0x110D0, last = 0x110FF, description = "Sora Sompeng" }, - ["spacingmodifierletters"] = { first = 0x002B0, last = 0x002FF, description = "Spacing Modifier Letters" }, - ["specials"] = { first = 0x0FFF0, last = 0x0FFFF, description = "Specials" }, - ["sundanese"] = { first = 0x01B80, last = 0x01BBF, description = "Sundanese" }, - ["sundanesesupplement"] = { first = 0x01CC0, last = 0x01CCF, description = "Sundanese Supplement" }, - ["superscriptsandsubscripts"] = { first = 0x02070, last = 0x0209F, description = "Superscripts and Subscripts" }, - ["supplementalarrowsa"] = { first = 0x027F0, last = 0x027FF, math = true, description = "Supplemental Arrows-A" }, - ["supplementalarrowsb"] = { first = 0x02900, last = 0x0297F, math = true, description = "Supplemental Arrows-B" }, - ["supplementalarrowsc"] = { first = 0x1F800, last = 0x1F8FF, math = true, description = "Supplemental Arrows-C" }, - ["supplementalmathematicaloperators"] = { first = 0x02A00, last = 0x02AFF, math = true, description = "Supplemental Mathematical Operators" }, - ["supplementalpunctuation"] = { first = 0x02E00, last = 0x02E7F, description = "Supplemental Punctuation" }, - ["supplementalsymbolsandpictographs"] = { first = 0x1F900, last = 0x1F9FF, description = "Supplemental Symbols and Pictographs" }, - ["supplementaryprivateuseareaa"] = { first = 0xF0000, last = 0xFFFFF, description = "Supplementary Private Use Area-A" }, - ["supplementaryprivateuseareab"] = { first = 0x100000,last = 0x10FFFF, description = "Supplementary Private Use Area-B" }, - ["suttonsignwriting"] = { first = 0x1D800, last = 0x1DAAF, description = "Sutton SignWriting" }, - ["sylotinagri"] = { first = 0x0A800, last = 0x0A82F, otf="sylo", description = "Syloti Nagri" }, - ["syriac"] = { first = 0x00700, last = 0x0074F, otf="syrc", description = "Syriac" }, - ["tagalog"] = { first = 0x01700, last = 0x0171F, otf="tglg", description = "Tagalog" }, - ["tagbanwa"] = { first = 0x01760, last = 0x0177F, otf="tagb", description = "Tagbanwa" }, - ["tags"] = { first = 0xE0000, last = 0xE007F, description = "Tags" }, - ["taile"] = { first = 0x01950, last = 0x0197F, otf="tale", description = "Tai Le" }, - ["taitham"] = { first = 0x01A20, last = 0x01AAF, description = "Tai Tham" }, - ["taiviet"] = { first = 0x0AA80, last = 0x0AADF, description = "Tai Viet" }, - ["taixuanjingsymbols"] = { first = 0x1D300, last = 0x1D35F, description = "Tai Xuan Jing Symbols" }, - ["takri"] = { first = 0x11680, last = 0x116CF, description = "Takri" }, - ["tamil"] = { first = 0x00B80, last = 0x00BFF, otf="taml", description = "Tamil" }, - ["telugu"] = { first = 0x00C00, last = 0x00C7F, otf="telu", description = "Telugu" }, - ["thaana"] = { first = 0x00780, last = 0x007BF, otf="thaa", description = "Thaana" }, - ["thai"] = { first = 0x00E00, last = 0x00E7F, otf="thai", description = "Thai" }, - ["tibetan"] = { first = 0x00F00, last = 0x00FFF, otf="tibt", description = "Tibetan" }, - ["tifinagh"] = { first = 0x02D30, last = 0x02D7F, otf="tfng", description = "Tifinagh" }, - ["tirhuta"] = { first = 0x11480, last = 0x114DF, description = "Tirhuta" }, - ["transportandmapsymbols"] = { first = 0x1F680, last = 0x1F6FF, description = "Transport and Map Symbols" }, - ["ugaritic"] = { first = 0x10380, last = 0x1039F, otf="ugar", description = "Ugaritic" }, - ["unifiedcanadianaboriginalsyllabics"] = { first = 0x01400, last = 0x0167F, otf="cans", description = "Unified Canadian Aboriginal Syllabics" }, - ["unifiedcanadianaboriginalsyllabicsextended"] = { first = 0x018B0, last = 0x018FF, description = "Unified Canadian Aboriginal Syllabics Extended" }, - ["uppercasebold"] = { first = 0x1D400, last = 0x1D419, math = true }, - ["uppercaseboldfraktur"] = { first = 0x1D56C, last = 0x1D585, math = true }, - ["uppercasebolditalic"] = { first = 0x1D468, last = 0x1D481, math = true }, - ["uppercaseboldscript"] = { first = 0x1D4D0, last = 0x1D4E9, math = true }, - ["uppercasedoublestruck"] = { first = 0x1D538, last = 0x1D551, math = true }, - ["uppercasefraktur"] = { first = 0x1D504, last = 0x1D51D, math = true }, - ["uppercasegreekbold"] = { first = 0x1D6A8, last = 0x1D6C1, math = true }, - ["uppercasegreekbolditalic"] = { first = 0x1D71C, last = 0x1D735, math = true }, - ["uppercasegreekitalic"] = { first = 0x1D6E2, last = 0x1D6FB, math = true }, - ["uppercasegreeknormal"] = { first = 0x00391, last = 0x003AA, math = true }, - ["uppercasegreeksansserifbold"] = { first = 0x1D756, last = 0x1D76F, math = true }, - ["uppercasegreeksansserifbolditalic"] = { first = 0x1D790, last = 0x1D7A9, math = true }, - ["uppercaseitalic"] = { first = 0x1D434, last = 0x1D44D, math = true }, - ["uppercasemonospace"] = { first = 0x1D670, last = 0x1D689, math = true }, - ["uppercasenormal"] = { first = 0x00041, last = 0x0005A, math = true }, - ["uppercasesansserifbold"] = { first = 0x1D5D4, last = 0x1D5ED, math = true }, - ["uppercasesansserifbolditalic"] = { first = 0x1D63C, last = 0x1D655, math = true }, - ["uppercasesansserifitalic"] = { first = 0x1D608, last = 0x1D621, math = true }, - ["uppercasesansserifnormal"] = { first = 0x1D5A0, last = 0x1D5B9, math = true }, - ["uppercasescript"] = { first = 0x1D49C, last = 0x1D4B5, math = true }, - ["vai"] = { first = 0x0A500, last = 0x0A63F, description = "Vai" }, - ["variationselectors"] = { first = 0x0FE00, last = 0x0FE0F, description = "Variation Selectors" }, - ["variationselectorssupplement"] = { first = 0xE0100, last = 0xE01EF, description = "Variation Selectors Supplement" }, - ["vedicextensions"] = { first = 0x01CD0, last = 0x01CFF, description = "Vedic Extensions" }, - ["verticalforms"] = { first = 0x0FE10, last = 0x0FE1F, description = "Vertical Forms" }, - ["warangciti"] = { first = 0x118A0, last = 0x118FF, description = "Warang Citi" }, - ["yijinghexagramsymbols"] = { first = 0x04DC0, last = 0x04DFF, otf="yi", description = "Yijing Hexagram Symbols" }, - ["yiradicals"] = { first = 0x0A490, last = 0x0A4CF, otf="yi", description = "Yi Radicals" }, - ["yisyllables"] = { first = 0x0A000, last = 0x0A48F, otf="yi", description = "Yi Syllables" }, -} - -characters.blocks = blocks - -function characters.blockrange(name) - local b = blocks[name] - if b then - return b.first, b.last - else - return 0, 0 - end -end - -setmetatableindex(blocks, function(t,k) -- we could use an intermediate table if called often - return k and rawget(t,lower(gsub(k,"[^a-zA-Z]",""))) -end) - -local otfscripts = utilities.storage.allocate() -characters.otfscripts = otfscripts - -setmetatableindex(otfscripts,function(t,unicode) - for k, v in next, blocks do - local first, last = v.first, v.last - if unicode >= first and unicode <= last then - local script = v.otf or "dflt" - for u=first,last do - t[u] = script - end - return script - end - end - -- pretty slow when we're here - t[unicode] = "dflt" - return "dflt" -end) - -local splitter = lpeg.splitat(S(":-")) - -function characters.getrange(name) -- used in font fallback definitions (name or range) - local range = blocks[name] - if range then - return range.first, range.last, range.description, range.gaps - end - name = gsub(name,'"',"0x") -- goodie: tex hex notation - local start, stop = lpegmatch(splitter,name) - if start and stop then - start, stop = tonumber(start,16) or tonumber(start), tonumber(stop,16) or tonumber(stop) - if start and stop then - return start, stop, nil - end - end - local slot = tonumber(name,16) or tonumber(name) - return slot, slot, nil -end - -local categorytags = allocate { - lu = "Letter Uppercase", - ll = "Letter Lowercase", - lt = "Letter Titlecase", - lm = "Letter Modifier", - lo = "Letter Other", - mn = "Mark Nonspacing", - mc = "Mark Spacing Combining", - me = "Mark Enclosing", - nd = "Number Decimal Digit", - nl = "Number Letter", - no = "Number Other", - pc = "Punctuation Connector", - pd = "Punctuation Dash", - ps = "Punctuation Open", - pe = "Punctuation Close", - pi = "Punctuation Initial Quote", - pf = "Punctuation Final Quote", - po = "Punctuation Other", - sm = "Symbol Math", - sc = "Symbol Currency", - sk = "Symbol Modifier", - so = "Symbol Other", - zs = "Separator Space", - zl = "Separator Line", - zp = "Separator Paragraph", - cc = "Other Control", - cf = "Other Format", - cs = "Other Surrogate", - co = "Other Private Use", - cn = "Other Not Assigned", -} - -local detailtags = allocate { - sl = "small letter", - bl = "big letter", - im = "iteration mark", - pm = "prolonged sound mark" -} - -characters.categorytags = categorytags -characters.detailtags = detailtags - --- sounds : voiced unvoiced semivoiced - ---~ special : cf (softhyphen) zs (emspace) ---~ characters: ll lm lo lt lu mn nl no pc pd pe pf pi po ps sc sk sm so - -local is_character = allocate ( tohash { - "lu","ll","lt","lm","lo", - "nd","nl","no", - "mn", - "nl","no", - "pc","pd","ps","pe","pi","pf","po", - "sm","sc","sk","so" -} ) - -local is_letter = allocate ( tohash { - "ll","lm","lo","lt","lu" -} ) - -local is_command = allocate ( tohash { - "cf","zs" -} ) - -local is_spacing = allocate ( tohash { - "zs", "zl","zp", -} ) - -local is_mark = allocate ( tohash { - "mn", "ms", -} ) - -local is_punctuation = allocate ( tohash { - "pc","pd","ps","pe","pi","pf","po", -} ) - --- to be redone: store checked characters - -characters.is_character = is_character -characters.is_letter = is_letter -characters.is_command = is_command -characters.is_spacing = is_spacing -characters.is_mark = is_mark -characters.is_punctuation = is_punctuation - -local mti = function(t,k) - if type(k) == "number" then - local c = data[k].category - return c and rawget(t,c) - else - -- avoid auto conversion in data.characters lookups - end -end - -setmetatableindex(characters.is_character, mti) -setmetatableindex(characters.is_letter, mti) -setmetatableindex(characters.is_command, mti) -setmetatableindex(characters.is_spacing, mti) -setmetatableindex(characters.is_punctuation,mti) - --- todo: also define callers for the above - --- linebreak: todo: hash --- --- normative : BK CR LF CM SG GL CB SP ZW NL WJ JL JV JT H2 H3 --- informative : XX OP CL CP QU NS EX SY IS PR PO NU AL ID IN HY BB BA SA AI B2 HL CJ RI --- --- comments taken from standard: - -characters.linebreaks = { - - -- non-tailorable line breaking classes - - ["bk"] = "mandatory break", -- nl, ps : cause a line break (after) - ["cr"] = "carriage return", -- cr : cause a line break (after), except between cr and lf - ["lf"] = "line feed", -- lf : cause a line break (after) - ["cm"] = "combining mark", -- combining marks, control codes : prohibit a line break between the character and the preceding character - ["nl"] = "next line", -- nel : cause a line break (after) - ["sg"] = "surrogate", -- surrogates :do not occur in well-formed text - ["wj"] = "word joiner", -- wj : prohibit line breaks before and after - ["zw"] = "zero width space", -- zwsp : provide a break opportunity - ["gl"] = "non-breaking (glue)", -- cgj, nbsp, zwnbsp : prohibit line breaks before and after - ["sp"] = "space", -- space : enable indirect line breaks - - -- break opportunities - - ["b2"] = "break opportunity before and after", -- em dash : provide a line break opportunity before and after the character - ["ba"] = "break after", -- spaces, hyphens : generally provide a line break opportunity after the character - ["bb"] = "break before", -- punctuation used in dictionaries : generally provide a line break opportunity before the character - ["hy"] = "hyphen", -- hyphen-minus : provide a line break opportunity after the character, except in numeric context - ["cb"] = "contingent break opportunity", -- inline objects : provide a line break opportunity contingent on additional information - - -- characters prohibiting certain breaks - - ["cl"] = "close punctuation", -- “}”, “❳”, “⟫” etc. : prohibit line breaks before - ["cp"] = "close parenthesis", -- “)”, “]” : prohibit line breaks before - ["ex"] = "exclamation/interrogation", -- “!”, “?”, etc. : prohibit line breaks before - ["in"] = "inseparable", -- leaders : allow only indirect line breaks between pairs - ["ns"] = "nonstarter", -- “‼”, “‽”, “⁇”, “⁉”, etc. : allow only indirect line breaks before - ["op"] = "open punctuation", -- “(“, “[“, “{“, etc. : prohibit line breaks after - ["qu"] = "quotation", -- quotation marks : act like they are both opening and closing - - -- numeric context - - ["is"] = "infix numeric separator", -- . , : prevent breaks after any and before numeric - ["nu"] = "numeric", -- digits : form numeric expressions for line breaking purposes - ["po"] = "postfix numeric", -- %, ¢ : do not break following a numeric expression - ["pr"] = "prefix numeric", -- $, £, ¥, etc. : do not break in front of a numeric expression - ["sy"] = "symbols allowing break after", -- / : prevent a break before, and allow a break after - - -- other characters - - ["ai"] = "ambiguous (alphabetic or ideographic)", -- characters with ambiguous east asian width : act like al when the resolved eaw is n; otherwise, act as id - ["al"] = "alphabetic", -- alphabets and regular symbols : are alphabetic characters or symbols that are used with alphabetic characters - ["cj"] = "conditional japanese starter", -- small kana : treat as ns or id for strict or normal breaking. - ["h2"] = "hangul lv syllable", -- hangul : form korean syllable blocks - ["h3"] = "hangul lvt syllable", -- hangul : form korean syllable blocks - ["hl"] = "hebrew letter", -- hebrew : do not break around a following hyphen; otherwise act as alphabetic - ["id"] = "ideographic", -- ideographs : break before or after, except in some numeric context - ["jl"] = "hangul l jamo", -- conjoining jamo : form korean syllable blocks - ["jv"] = "hangul v jamo", -- conjoining jamo : form korean syllable blocks - ["jt"] = "hangul t jamo", -- conjoining jamo : form korean syllable blocks - ["ri"] = "regional indicator", -- regional indicator symbol letter a .. z : keep together, break before and after from others - ["sa"] = "complex context dependent (south east asian)", -- south east asian: thai, lao, khmer : provide a line break opportunity contingent on additional, language-specific context analysis - ["xx"] = "unknown", -- most unassigned, private-use : have as yet unknown line breaking behavior or unassigned code positions - -} - --- east asian width: --- --- N A H W F Na - -characters.bidi = allocate { - l = "Left-to-Right", - lre = "Left-to-Right Embedding", - lro = "Left-to-Right Override", - r = "Right-to-Left", - al = "Right-to-Left Arabic", - rle = "Right-to-Left Embedding", - rlo = "Right-to-Left Override", - pdf = "Pop Directional Format", - en = "European Number", - es = "European Number Separator", - et = "European Number Terminator", - an = "Arabic Number", - cs = "Common Number Separator", - nsm = "Non-Spacing Mark", - bn = "Boundary Neutral", - b = "Paragraph Separator", - s = "Segment Separator", - ws = "Whitespace", - on = "Other Neutrals", -} - ---[[ldx-- -<p>At this point we assume that the big data table is loaded. From this -table we derive a few more.</p> ---ldx]]-- - -if not characters.fallbacks then - - characters.fallbacks = { - [0x0308] = 0x00A8, [0x00A8] = 0x0308, -- dieresiscmb dieresis - [0x0304] = 0x00AF, [0x00AF] = 0x0304, -- macroncmb macron - [0x0301] = 0x00B4, [0x00B4] = 0x0301, -- acutecomb acute - [0x0327] = 0x00B8, [0x00B8] = 0x0327, -- cedillacmb cedilla - [0x0302] = 0x02C6, [0x02C6] = 0x0302, -- circumflexcmb circumflex - [0x030C] = 0x02C7, [0x02C7] = 0x030C, -- caroncmb caron - [0x0306] = 0x02D8, [0x02D8] = 0x0306, -- brevecmb breve - [0x0307] = 0x02D9, [0x02D9] = 0x0307, -- dotaccentcmb dotaccent - [0x030A] = 0x02DA, [0x02DA] = 0x030A, -- ringcmb ring - [0x0328] = 0x02DB, [0x02DB] = 0x0328, -- ogonekcmb ogonek - [0x0303] = 0x02DC, [0x02DC] = 0x0303, -- tildecomb tilde - [0x030B] = 0x02DD, [0x02DD] = 0x030B, -- hungarumlautcmb hungarumlaut - [0x0305] = 0x203E, [0x203E] = 0x0305, -- overlinecmb overline - [0x0300] = 0x0060, [0x0060] = 0x0333, -- gravecomb grave - } - - -- not done (would mess up mapping): - -- - -- 0X0301/0X0384 0X0314/0X1FFE 0X0313/0X1FBD 0X0313/0X1FBF 0X0342/0X1FC0 - -- 0X3099/0X309B 0X309A/0X309C 0X0333/0X2017 0X0345/0X037A - -end - -if storage then - storage.register("characters/fallbacks", characters.fallbacks, "characters.fallbacks") -- accents and such -end - -characters.directions = { } - -setmetatableindex(characters.directions,function(t,k) - local d = data[k] - if d then - local v = d.direction - if v then - t[k] = v - return v - end - end - t[k] = false -- maybe 'l' - return v -end) - -characters.mirrors = { } - -setmetatableindex(characters.mirrors,function(t,k) - local d = data[k] - if d then - local v = d.mirror - if v then - t[k] = v - return v - end - end - t[k] = false - return v -end) - -characters.textclasses = { } - -setmetatableindex(characters.textclasses,function(t,k) - local d = data[k] - if d then - local v = d.textclass - if v then - t[k] = v - return v - end - end - t[k] = false - return v -end) - ---[[ldx-- -<p>Next comes a whole series of helper methods. These are (will be) part -of the official <l n='api'/>.</p> ---ldx]]-- - --- we could make them virtual: characters.contextnames[n] - -function characters.contextname(n) return data[n] and data[n].contextname or "" end -function characters.adobename (n) return data[n] and data[n].adobename or "" end -function characters.description(n) return data[n] and data[n].description or "" end --------- characters.category (n) return data[n] and data[n].category or "" end - -function characters.category(n,verbose) - local c = data[n].category - if not c then - return "" - elseif verbose then - return categorytags[c] - else - return c - end -end - --- -- some day we will make a table .. not that many calls to utfchar --- --- local utfchar = utf.char --- local utfbyte = utf.byte --- local utfbytes = { } --- local utfchars = { } --- --- table.setmetatableindex(utfbytes,function(t,k) local v = utfchar(k) t[k] = v return v end) --- table.setmetatableindex(utfchars,function(t,k) local v = utfbyte(k) t[k] = v return v end) - -local function toutfstring(s) - if type(s) == "table" then - return utfchar(unpack(s)) -- concat { utfchar( unpack(s) ) } - else - return utfchar(s) - end -end - -utf.tostring = toutfstring - -local categories = allocate() characters.categories = categories -- lazy table - -setmetatableindex(categories, function(t,u) if u then local c = data[u] c = c and c.category or u t[u] = c return c end end) - --- todo: overloads (these register directly in the tables as number and string) e.g. for greek --- todo: for string do a numeric lookup in the table itself - -local lccodes = allocate() characters.lccodes = lccodes -- lazy table -local uccodes = allocate() characters.uccodes = uccodes -- lazy table -local shcodes = allocate() characters.shcodes = shcodes -- lazy table -local fscodes = allocate() characters.fscodes = fscodes -- lazy table - -setmetatableindex(lccodes, function(t,u) if u then local c = data[u] c = c and c.lccode or (type(u) == "string" and utfbyte(u)) or u t[u] = c return c end end) -setmetatableindex(uccodes, function(t,u) if u then local c = data[u] c = c and c.uccode or (type(u) == "string" and utfbyte(u)) or u t[u] = c return c end end) -setmetatableindex(shcodes, function(t,u) if u then local c = data[u] c = c and c.shcode or (type(u) == "string" and utfbyte(u)) or u t[u] = c return c end end) -setmetatableindex(fscodes, function(t,u) if u then local c = data[u] c = c and c.fscode or (type(u) == "string" and utfbyte(u)) or u t[u] = c return c end end) - -local lcchars = allocate() characters.lcchars = lcchars -- lazy table -local ucchars = allocate() characters.ucchars = ucchars -- lazy table -local shchars = allocate() characters.shchars = shchars -- lazy table -local fschars = allocate() characters.fschars = fschars -- lazy table - -setmetatableindex(lcchars, function(t,u) if u then local c = data[u] c = c and c.lccode c = c and toutfstring(c) or (type(u) == "number" and utfchar(u)) or u t[u] = c return c end end) -setmetatableindex(ucchars, function(t,u) if u then local c = data[u] c = c and c.uccode c = c and toutfstring(c) or (type(u) == "number" and utfchar(u)) or u t[u] = c return c end end) -setmetatableindex(shchars, function(t,u) if u then local c = data[u] c = c and c.shcode c = c and toutfstring(c) or (type(u) == "number" and utfchar(u)) or u t[u] = c return c end end) -setmetatableindex(fschars, function(t,u) if u then local c = data[u] c = c and c.fscode c = c and toutfstring(c) or (type(u) == "number" and utfchar(u)) or u t[u] = c return c end end) - -local decomposed = allocate() characters.decomposed = decomposed -- lazy table -local specials = allocate() characters.specials = specials -- lazy table - -setmetatableindex(decomposed, function(t,u) -- either a table or false - if u then - local c = data[u] - local s = c and c.decomposed or false -- could fall back to specials - t[u] = s - return s - end -end) - -setmetatableindex(specials, function(t,u) -- either a table or false - if u then - local c = data[u] - local s = c and c.specials or false - t[u] = s - return s - end -end) - -local specialchars = allocate() characters.specialchars = specialchars -- lazy table -local descriptions = allocate() characters.descriptions = descriptions -- lazy table - -setmetatableindex(specialchars, function(t,u) - if u then - local c = data[u] - local s = c and c.specials - if s then - local tt, ttn = { }, 0 - for i=2,#s do - local si = s[i] - local c = data[si] - if is_letter[c.category] then - ttn = ttn + 1 - tt[ttn] = utfchar(si) - end - end - c = concat(tt) - t[u] = c - return c - else - if type(u) == "number" then - u = utfchar(u) - end - t[u] = u - return u - end - end -end) - -setmetatableindex(descriptions, function(t,k) - -- 0.05 - 0.10 sec - for u, c in next, data do - local d = c.description - if d then - d = gsub(d," ","") - d = lower(d) - t[d] = u - end - end - local d = rawget(t,k) - if not d then - t[k] = k - end - return d -end) - -function characters.unicodechar(asked) - local n = tonumber(asked) - if n then - return n - elseif type(asked) == "string" then - return descriptions[asked] or descriptions[gsub(asked," ","")] - end -end - --- function characters.lower(str) --- local new, n = { }, 0 --- for u in utfvalues(str) do --- n = n + 1 --- new[n] = lcchars[u] --- end --- return concat(new) --- end --- --- function characters.upper(str) --- local new, n = { }, 0 --- for u in utfvalues(str) do --- n = n + 1 --- new[n] = ucchars[u] --- end --- return concat(new) --- end --- --- function characters.shaped(str) --- local new, n = { }, 0 --- for u in utfvalues(str) do --- n = n + 1 --- new[n] = shchars[u] --- end --- return concat(new) --- end - ------ tolower = Cs((utf8byte/lcchars)^0) ------ toupper = Cs((utf8byte/ucchars)^0) ------ toshape = Cs((utf8byte/shchars)^0) - -local tolower = Cs((utf8character/lcchars)^0) -- no need to check spacing -local toupper = Cs((utf8character/ucchars)^0) -- no need to check spacing -local toshape = Cs((utf8character/shchars)^0) -- no need to check spacing - -lpegpatterns.tolower = tolower -- old ones ... will be overloaded -lpegpatterns.toupper = toupper -- old ones ... will be overloaded -lpegpatterns.toshape = toshape -- old ones ... will be overloaded - --- function characters.lower (str) return lpegmatch(tolower,str) end --- function characters.upper (str) return lpegmatch(toupper,str) end --- function characters.shaped(str) return lpegmatch(toshape,str) end - --- local superscripts = allocate() characters.superscripts = superscripts --- local subscripts = allocate() characters.subscripts = subscripts - --- if storage then --- storage.register("characters/superscripts", superscripts, "characters.superscripts") --- storage.register("characters/subscripts", subscripts, "characters.subscripts") --- end - --- end - -if not characters.splits then - - local char = allocate() - local compat = allocate() - - local splits = { - char = char, - compat = compat, - } - - characters.splits = splits - - -- [0x013F] = { 0x004C, 0x00B7 } - -- [0x0140] = { 0x006C, 0x00B7 } - - for unicode, data in next, characters.data do - local specials = data.specials - if specials and #specials > 2 then - local kind = specials[1] - if kind == "compat" then - compat[unicode] = { unpack(specials,2) } - elseif kind == "char" then - char [unicode] = { unpack(specials,2) } - end - end - end - - if storage then - storage.register("characters/splits", splits, "characters.splits") - end - -end - -if not characters.lhash then - - local lhash = allocate() characters.lhash = lhash -- nil if no conversion - local uhash = allocate() characters.uhash = uhash -- nil if no conversion - local shash = allocate() characters.shash = shash -- nil if no conversion - - for k, v in next, characters.data do - -- if k < 0x11000 then - local l = v.lccode - if l then - -- we have an uppercase - if type(l) == "number" then - lhash[utfchar(k)] = utfchar(l) - elseif #l == 2 then - lhash[utfchar(k)] = utfchar(l[1]) .. utfchar(l[2]) - else - inspect(v) - end - else - local u = v.uccode - if u then - -- we have an lowercase - if type(u) == "number" then - uhash[utfchar(k)] = utfchar(u) - elseif #u == 2 then - uhash[utfchar(k)] = utfchar(u[1]) .. utfchar(u[2]) - else - inspect(v) - end - end - end - local s = v.shcode - if s then - if type(s) == "number" then - shash[utfchar(k)] = utfchar(s) - elseif #s == 2 then - shash[utfchar(k)] = utfchar(s[1]) .. utfchar(s[2]) - else - inspect(v) - end - end - -- end - end - - if storage then - storage.register("characters/lhash", lhash, "characters.lhash") - storage.register("characters/uhash", uhash, "characters.uhash") - storage.register("characters/shash", shash, "characters.shash") - end - -end - -local lhash = characters.lhash mark(lhash) -local uhash = characters.uhash mark(uhash) -local shash = characters.shash mark(shash) - -local utf8lowercharacter = utfchartabletopattern(lhash) / lhash -local utf8uppercharacter = utfchartabletopattern(uhash) / uhash -local utf8shapecharacter = utfchartabletopattern(shash) / shash - -local utf8lower = Cs((utf8lowercharacter + utf8character)^0) -local utf8upper = Cs((utf8uppercharacter + utf8character)^0) -local utf8shape = Cs((utf8shapecharacter + utf8character)^0) - -lpegpatterns.utf8lowercharacter = utf8lowercharacter -- one character -lpegpatterns.utf8uppercharacter = utf8uppercharacter -- one character -lpegpatterns.utf8shapecharacter = utf8shapecharacter -- one character - -lpegpatterns.utf8lower = utf8lower -- string -lpegpatterns.utf8upper = utf8upper -- string -lpegpatterns.utf8shape = utf8shape -- string - -function characters.lower (str) return lpegmatch(utf8lower,str) end -function characters.upper (str) return lpegmatch(utf8upper,str) end -function characters.shaped(str) return lpegmatch(utf8shape,str) end - --- local str = [[ --- ÀÁÂÃÄÅàáâãäå àáâãäåàáâãäå ÀÁÂÃÄÅÀÁÂÃÄÅ AAAAAAaaaaaa --- ÆÇæç æçæç ÆÇÆÇ AECaec --- ÈÉÊËèéêë èéêëèéêë ÈÉÊËÈÉÊË EEEEeeee --- ÌÍÎÏÞìíîïþ ìíîïþìíîïþ ÌÍÎÏÞÌÍÎÏÞ IIIIÞiiiiþ --- Ðð ðð ÐÐ Ðð --- Ññ ññ ÑÑ Nn --- ÒÓÔÕÖòóôõö òóôõöòóôõö ÒÓÔÕÖÒÓÔÕÖ OOOOOooooo --- Øø øø ØØ Oo --- ÙÚÛÜùúûü ùúûüùúûü ÙÚÛÜÙÚÛÜ UUUUuuuu --- Ýýÿ ýýÿ ÝÝŸ Yyy --- ß ß SS ss --- Ţţ ţţ ŢŢ Tt --- ]] --- --- local lower = characters.lower print(lower(str)) --- local upper = characters.upper print(upper(str)) --- local shaped = characters.shaped print(shaped(str)) --- --- local c, n = os.clock(), 10000 --- for i=1,n do lower(str) upper(str) shaped(str) end -- 2.08 => 0.77 --- print(os.clock()-c,n*#str*3) - --- maybe: (twice as fast when much ascii) --- --- local tolower = lpeg.patterns.tolower --- local lower = string.lower --- --- local allascii = R("\000\127")^1 * P(-1) --- --- function characters.checkedlower(str) --- return lpegmatch(allascii,str) and lower(str) or lpegmatch(tolower,str) or str --- end - -function characters.lettered(str,spacing) - local new, n = { }, 0 - if spacing then - local done = false - for u in utfvalues(str) do - local c = data[u].category - if is_letter[c] then - if done and n > 1 then - n = n + 1 - new[n] = " " - done = false - end - n = n + 1 - new[n] = utfchar(u) - elseif spacing and is_spacing[c] then - done = true - end - end - else - for u in utfvalues(str) do - if is_letter[data[u].category] then - n = n + 1 - new[n] = utfchar(u) - end - end - end - return concat(new) -end - ---[[ldx-- -<p>Requesting lower and uppercase codes:</p> ---ldx]]-- - -function characters.uccode(n) return uccodes[n] end -- obsolete -function characters.lccode(n) return lccodes[n] end -- obsolete - -function characters.shape(n) - local shcode = shcodes[n] - if not shcode then - return n, nil - elseif type(shcode) == "table" then - return shcode[1], shcode[#shcode] - else - return shcode, nil - end -end - --- -- some day we might go this route, but it does not really save that much --- -- so not now (we can generate a lot using mtx-unicode that operates on the --- -- database) --- --- -- category cjkwd direction linebreak --- --- -- adobename comment contextcommand contextname description fallback lccode --- -- mathclass mathfiller mathname mathspec mathstretch mathsymbol mirror --- -- range shcode specials uccode uccodes unicodeslot --- --- local data = { --- ['one']={ --- common = { --- category="cc", --- direction="bn", --- linebreak="cm", --- }, --- vector = { --- [0x0000] = { --- description="NULL", --- group='one', --- unicodeslot=0x0000, --- }, --- { --- description="START OF HEADING", --- group='one', --- unicodeslot=0x0001, --- }, --- } --- } --- } --- --- local chardata, groupdata = { }, { } --- --- for group, gdata in next, data do --- local common, vector = { __index = gdata.common }, gdata.vector --- for character, cdata in next, vector do --- chardata[character] = cdata --- setmetatable(cdata,common) --- end --- groupdata[group] = gdata --- end - --- characters.data, characters.groups = chardata, groupdata - --- [0xF0000]={ --- category="co", --- cjkwd="a", --- description="<Plane 0x000F Private Use, First>", --- direction="l", --- unicodeslot=0xF0000, --- }, --- [0xFFFFD]={ --- category="co", --- cjkwd="a", --- description="<Plane 0x000F Private Use, Last>", --- direction="l", --- unicodeslot=0xFFFFD, --- }, --- [0x100000]={ --- category="co", --- cjkwd="a", --- description="<Plane 0x0010 Private Use, First>", --- direction="l", --- unicodeslot=0x100000, --- }, --- [0x10FFFD]={ --- category="co", --- cjkwd="a", --- description="<Plane 0x0010 Private Use, Last>", --- direction="l", --- unicodeslot=0x10FFFD, --- }, - -if not characters.superscripts then - - local superscripts = allocate() characters.superscripts = superscripts - local subscripts = allocate() characters.subscripts = subscripts - local fractions = allocate() characters.fractions = fractions - - -- skipping U+02120 (service mark) U+02122 (trademark) - - for k, v in next, data do - local specials = v.specials - if specials then - local what = specials[1] - if what == "super" then - if #specials == 2 then - superscripts[k] = specials[2] - elseif trace_defining then - report_defining("ignoring %s %a, char %c, description %a","superscript",ustring(k),k,v.description) - end - elseif what == "sub" then - if #specials == 2 then - subscripts[k] = specials[2] - elseif trace_defining then - report_defining("ignoring %s %a, char %c, description %a","subscript",ustring(k),k,v.description) - end - elseif what == "fraction" then - if #specials > 1 then - fractions[k] = { unpack(specials,2) } - elseif trace_defining then - report_defining("ignoring %s %a, char %c, description %a","fraction",ustring(k),k,v.description) - end - end - end - end - - -- print(table.serialize(superscripts, "superscripts", { hexify = true })) - -- print(table.serialize(subscripts, "subscripts", { hexify = true })) - -- print(table.serialize(fractions, "fractions", { hexify = true })) - - if storage then - storage.register("characters/superscripts", superscripts, "characters.superscripts") - storage.register("characters/subscripts", subscripts, "characters.subscripts") - storage.register("characters/fractions", fractions, "characters.fractions") - end - -end - -function characters.showstring(str) - local list = utotable(str) - for i=1,#list do - report_defining("split % 3i : %C",i,list[i]) - end -end - --- code moved to char-tex.lua - -return characters diff --git a/Master/texmf-dist/tex/luatex/luaotfload/fontloader-font-cff.lua b/Master/texmf-dist/tex/luatex/luaotfload/fontloader-font-cff.lua deleted file mode 100644 index 0314e4ac4a1..00000000000 --- a/Master/texmf-dist/tex/luatex/luaotfload/fontloader-font-cff.lua +++ /dev/null @@ -1,1697 +0,0 @@ -if not modules then modules = { } end modules ['font-cff'] = { - version = 1.001, - comment = "companion to font-ini.mkiv", - author = "Hans Hagen, PRAGMA-ADE, Hasselt NL", - copyright = "PRAGMA ADE / ConTeXt Development Team", - license = "see context related readme files" -} - --- todo: option.outlines --- todo: option.boundingbox --- per charstring (less memory) - --- This is a heavy one as it is a rather packed format. We don't need al the information --- now but we might need it later (who know what magic we can do with metapost). So at --- some point this might become a module. We just follow Adobe Technical Notes #5176 and --- #5177. In case of doubt I looked in the fontforge code that comes with LuaTeX. - --- For now we save the segments in a list of segments with the operator last in an entry --- because that reflects the original. But it might make more sense to use a single array --- per segment. For pdf a simple concat works ok, but for other purposes a operator first --- flush is nicer. - -local next, type, tonumber = next, type, tonumber -local byte = string.byte -local concat, remove = table.concat, table.remove -local floor, abs, round, ceil = math.floor, math.abs, math.round, math.ceil -local P, C, R, S, C, Cs, Ct = lpeg.P, lpeg.C, lpeg.R, lpeg.S, lpeg.C, lpeg.Cs, lpeg.Ct -local lpegmatch = lpeg.match - -local readers = fonts.handlers.otf.readers -local streamreader = readers.streamreader - -local readbytes = streamreader.readbytes -local readstring = streamreader.readstring -local readbyte = streamreader.readcardinal1 -- 8-bit unsigned integer -local readushort = streamreader.readcardinal2 -- 16-bit unsigned integer -local readuint = streamreader.readcardinal3 -- 24-bit unsigned integer -local readulong = streamreader.readcardinal4 -- 24-bit unsigned integer -local setposition = streamreader.setposition -local getposition = streamreader.getposition - -local setmetatableindex = table.setmetatableindex - -local trace_charstrings = false trackers.register("fonts.cff.charstrings",function(v) trace_charstrings = v end) -local report = logs.reporter("otf reader","cff") - -local parsedictionaries -local parsecharstring -local parsecharstrings -local resetcharstrings -local parseprivates - -local defaultstrings = { [0] = -- taken from ff - ".notdef", "space", "exclam", "quotedbl", "numbersign", "dollar", "percent", - "ampersand", "quoteright", "parenleft", "parenright", "asterisk", "plus", - "comma", "hyphen", "period", "slash", "zero", "one", "two", "three", "four", - "five", "six", "seven", "eight", "nine", "colon", "semicolon", "less", - "equal", "greater", "question", "at", "A", "B", "C", "D", "E", "F", "G", "H", - "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", - "X", "Y", "Z", "bracketleft", "backslash", "bracketright", "asciicircum", - "underscore", "quoteleft", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", - "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", - "z", "braceleft", "bar", "braceright", "asciitilde", "exclamdown", "cent", - "sterling", "fraction", "yen", "florin", "section", "currency", - "quotesingle", "quotedblleft", "guillemotleft", "guilsinglleft", - "guilsinglright", "fi", "fl", "endash", "dagger", "daggerdbl", - "periodcentered", "paragraph", "bullet", "quotesinglbase", "quotedblbase", - "quotedblright", "guillemotright", "ellipsis", "perthousand", "questiondown", - "grave", "acute", "circumflex", "tilde", "macron", "breve", "dotaccent", - "dieresis", "ring", "cedilla", "hungarumlaut", "ogonek", "caron", "emdash", - "AE", "ordfeminine", "Lslash", "Oslash", "OE", "ordmasculine", "ae", - "dotlessi", "lslash", "oslash", "oe", "germandbls", "onesuperior", - "logicalnot", "mu", "trademark", "Eth", "onehalf", "plusminus", "Thorn", - "onequarter", "divide", "brokenbar", "degree", "thorn", "threequarters", - "twosuperior", "registered", "minus", "eth", "multiply", "threesuperior", - "copyright", "Aacute", "Acircumflex", "Adieresis", "Agrave", "Aring", - "Atilde", "Ccedilla", "Eacute", "Ecircumflex", "Edieresis", "Egrave", - "Iacute", "Icircumflex", "Idieresis", "Igrave", "Ntilde", "Oacute", - "Ocircumflex", "Odieresis", "Ograve", "Otilde", "Scaron", "Uacute", - "Ucircumflex", "Udieresis", "Ugrave", "Yacute", "Ydieresis", "Zcaron", - "aacute", "acircumflex", "adieresis", "agrave", "aring", "atilde", - "ccedilla", "eacute", "ecircumflex", "edieresis", "egrave", "iacute", - "icircumflex", "idieresis", "igrave", "ntilde", "oacute", "ocircumflex", - "odieresis", "ograve", "otilde", "scaron", "uacute", "ucircumflex", - "udieresis", "ugrave", "yacute", "ydieresis", "zcaron", "exclamsmall", - "Hungarumlautsmall", "dollaroldstyle", "dollarsuperior", "ampersandsmall", - "Acutesmall", "parenleftsuperior", "parenrightsuperior", "twodotenleader", - "onedotenleader", "zerooldstyle", "oneoldstyle", "twooldstyle", - "threeoldstyle", "fouroldstyle", "fiveoldstyle", "sixoldstyle", - "sevenoldstyle", "eightoldstyle", "nineoldstyle", "commasuperior", - "threequartersemdash", "periodsuperior", "questionsmall", "asuperior", - "bsuperior", "centsuperior", "dsuperior", "esuperior", "isuperior", - "lsuperior", "msuperior", "nsuperior", "osuperior", "rsuperior", "ssuperior", - "tsuperior", "ff", "ffi", "ffl", "parenleftinferior", "parenrightinferior", - "Circumflexsmall", "hyphensuperior", "Gravesmall", "Asmall", "Bsmall", - "Csmall", "Dsmall", "Esmall", "Fsmall", "Gsmall", "Hsmall", "Ismall", - "Jsmall", "Ksmall", "Lsmall", "Msmall", "Nsmall", "Osmall", "Psmall", - "Qsmall", "Rsmall", "Ssmall", "Tsmall", "Usmall", "Vsmall", "Wsmall", - "Xsmall", "Ysmall", "Zsmall", "colonmonetary", "onefitted", "rupiah", - "Tildesmall", "exclamdownsmall", "centoldstyle", "Lslashsmall", - "Scaronsmall", "Zcaronsmall", "Dieresissmall", "Brevesmall", "Caronsmall", - "Dotaccentsmall", "Macronsmall", "figuredash", "hypheninferior", - "Ogoneksmall", "Ringsmall", "Cedillasmall", "questiondownsmall", "oneeighth", - "threeeighths", "fiveeighths", "seveneighths", "onethird", "twothirds", - "zerosuperior", "foursuperior", "fivesuperior", "sixsuperior", - "sevensuperior", "eightsuperior", "ninesuperior", "zeroinferior", - "oneinferior", "twoinferior", "threeinferior", "fourinferior", - "fiveinferior", "sixinferior", "seveninferior", "eightinferior", - "nineinferior", "centinferior", "dollarinferior", "periodinferior", - "commainferior", "Agravesmall", "Aacutesmall", "Acircumflexsmall", - "Atildesmall", "Adieresissmall", "Aringsmall", "AEsmall", "Ccedillasmall", - "Egravesmall", "Eacutesmall", "Ecircumflexsmall", "Edieresissmall", - "Igravesmall", "Iacutesmall", "Icircumflexsmall", "Idieresissmall", - "Ethsmall", "Ntildesmall", "Ogravesmall", "Oacutesmall", "Ocircumflexsmall", - "Otildesmall", "Odieresissmall", "OEsmall", "Oslashsmall", "Ugravesmall", - "Uacutesmall", "Ucircumflexsmall", "Udieresissmall", "Yacutesmall", - "Thornsmall", "Ydieresissmall", "001.000", "001.001", "001.002", "001.003", - "Black", "Bold", "Book", "Light", "Medium", "Regular", "Roman", "Semibold", -} - -local cffreaders = { - readbyte, - readushort, - readuint, - readulong, -} - --- The header contains information about its own size. - -local function readheader(f) - local offset = getposition(f) - local header = { - offset = offset, - major = readbyte(f), - minor = readbyte(f), - size = readbyte(f), -- headersize - osize = readbyte(f), -- for offsets to start - } - setposition(f,offset+header.size) - return header -end - --- The indexes all look the same, so we share a loader. We could pass a handler --- and run over the array but why bother, we only have a few uses. - -local function readlengths(f) - local count = readushort(f) - if count == 0 then - return { } - end - local osize = readbyte(f) - local read = cffreaders[osize] - if not read then - report("bad offset size: %i",osize) - return { } - end - local lengths = { } - local previous = read(f) - for i=1,count do - local offset = read(f) - lengths[i] = offset - previous - previous = offset - end - return lengths -end - --- There can be subfonts so names is an array. However, in our case it's always --- one font. The same is true for the top dictionaries. Watch how we only load --- the dictionary string as for interpretation we need to have the strings loaded --- as well. - -local function readfontnames(f) - local names = readlengths(f) - for i=1,#names do - names[i] = readstring(f,names[i]) - end - return names -end - -local function readtopdictionaries(f) - local dictionaries = readlengths(f) - for i=1,#dictionaries do - dictionaries[i] = readstring(f,dictionaries[i]) - end - return dictionaries -end - --- Strings are added to a list of standard strings so we start the font specific --- one with an offset. Strings are shared so we have one table. - -local function readstrings(f) - local lengths = readlengths(f) - local strings = setmetatableindex({ }, defaultstrings) - local index = #defaultstrings - for i=1,#lengths do - index = index + 1 - strings[index] = readstring(f,lengths[i]) - end - return strings -end - --- Parsing the dictionaries is delayed till we have the strings loaded. The parser --- is stack based so the operands come before the operator (like in postscript). - --- local function delta(t) --- local n = #t --- if n > 1 then --- local p = t[1] --- for i=2,n do --- local c = t[i] --- t[i] = c + p --- p = c --- end --- end --- end - -do - - -- We use a closure so that we don't need to pass too much around. - - local stack = { } - local top = 0 - local result = { } - local strings = { } - - local p_single = - P("\00") / function() - result.version = strings[stack[top]] or "unset" - top = 0 - end - + P("\01") / function() - result.notice = strings[stack[top]] or "unset" - top = 0 - end - + P("\02") / function() - result.fullname = strings[stack[top]] or "unset" - top = 0 - end - + P("\03") / function() - result.familyname = strings[stack[top]] or "unset" - top = 0 - end - + P("\04") / function() - result.weight = strings[stack[top]] or "unset" - top = 0 - end - + P("\05") / function() - result.fontbbox = { unpack(stack,1,4) } - top = 0 - end - -- + P("\06") / function() end -- bluevalues - -- + P("\07") / function() end -- otherblues - -- + P("\08") / function() end -- familyblues - -- + P("\09") / function() end -- familyotherblues - -- + P("\10") / function() end -- strhw - -- + P("\11") / function() end -- stdvw - + P("\13") / function() - result.uniqueid = stack[top] - top = 0 - end - + P("\14") / function() - result.xuid = concat(stack,"",1,top) - top = 0 - end - + P("\15") / function() - result.charset = stack[top] - top = 0 - end - + P("\16") / function() - result.encoding = stack[top] - top = 0 - end - + P("\17") / function() - result.charstrings = stack[top] - top = 0 - end - + P("\18") / function() - result.private = { - size = stack[top-1], - offset = stack[top], - } - top = 0 - end - + P("\19") / function() - result.subroutines = stack[top] - end - + P("\20") / function() - result.defaultwidthx = stack[top] - end - + P("\21") / function() - result.nominalwidthx = stack[top] - end - -- + P("\22") / function() end -- reserved - -- + P("\23") / function() end -- reserved - -- + P("\24") / function() end -- reserved - -- + P("\25") / function() end -- reserved - -- + P("\26") / function() end -- reserved - -- + P("\27") / function() end -- reserved - - local p_double = P("\12") * ( - P("\00") / function() - result.copyright = stack[top] - top = 0 - end - + P("\01") / function() - result.monospaced = stack[top] == 1 and true or false -- isfixedpitch - top = 0 - end - + P("\02") / function() - result.italicangle = stack[top] - top = 0 - end - + P("\03") / function() - result.underlineposition = stack[top] - top = 0 - end - + P("\04") / function() - result.underlinethickness = stack[top] - top = 0 - end - + P("\05") / function() - result.painttype = stack[top] - top = 0 - end - + P("\06") / function() - result.charstringtype = stack[top] - top = 0 - end - + P("\07") / function() - result.fontmatrix = { unpack(stack,1,6) } - top = 0 - end - + P("\08") / function() - result.strokewidth = stack[top] - top = 0 - end - + P("\20") / function() - result.syntheticbase = stack[top] - top = 0 - end - + P("\21") / function() - result.postscript = strings[stack[top]] or "unset" - top = 0 - end - + P("\22") / function() - result.basefontname = strings[stack[top]] or "unset" - top = 0 - end - + P("\21") / function() - result.basefontblend = stack[top] - top = 0 - end - + P("\30") / function() - result.cid.registry = strings[stack[top-2]] or "unset" - result.cid.ordering = strings[stack[top-1]] or "unset" - result.cid.supplement = stack[top] - top = 0 - end - + P("\31") / function() - result.cid.fontversion = stack[top] - top = 0 - end - + P("\32") / function() - result.cid.fontrevision= stack[top] - top = 0 - end - + P("\33") / function() - result.cid.fonttype = stack[top] - top = 0 - end - + P("\34") / function() - result.cid.count = stack[top] - top = 0 - end - + P("\35") / function() - result.cid.uidbase = stack[top] - top = 0 - end - + P("\36") / function() - result.cid.fdarray = stack[top] - top = 0 - end - + P("\37") / function() - result.cid.fdselect = stack[top] - top = 0 - end - + P("\38") / function() - result.cid.fontname = strings[stack[top]] or "unset" - top = 0 - end - ) - - -- Some lpeg fun ... a first variant split the byte and made a new string but - -- the second variant is much faster. Not that it matters much as we don't see - -- such numbers often. - - local p_last = P("\x0F") / "0" + P("\x1F") / "1" + P("\x2F") / "2" + P("\x3F") / "3" - + P("\x4F") / "4" + P("\x5F") / "5" + P("\x6F") / "6" + P("\x7F") / "7" - + P("\x8F") / "8" + P("\x9F") / "9" + P("\xAF") / "" + P("\xBF") / "" - + P("\xCF") / "" + P("\xDF") / "" + P("\xEF") / "" + R("\xF0\xFF") / "" - - -- local remap = { [0] = - -- "00", "01", "02", "03", "04", "05", "06", "07", "08", "09", "0.", "0E", "0E-", "0", "0-", "0", - -- "10", "11", "12", "13", "14", "15", "16", "17", "18", "19", "0.", "0E", "0E-", "0", "0-", "0", - -- "20", "21", "22", "23", "24", "25", "26", "27", "28", "29", "0.", "0E", "0E-", "0", "0-", "0", - -- "30", "31", "32", "33", "34", "35", "36", "37", "38", "39", "0.", "0E", "0E-", "0", "0-", "0", - -- "40", "41", "42", "43", "44", "45", "46", "47", "48", "49", "0.", "0E", "0E-", "0", "0-", "0", - -- "50", "51", "52", "53", "54", "55", "56", "57", "58", "59", "0.", "0E", "0E-", "0", "0-", "0", - -- "60", "61", "62", "63", "64", "65", "66", "67", "68", "69", "0.", "0E", "0E-", "0", "0-", "0", - -- "70", "71", "72", "73", "74", "75", "76", "77", "78", "79", "0.", "0E", "0E-", "0", "0-", "0", - -- "80", "81", "82", "83", "84", "85", "86", "87", "88", "89", "0.", "0E", "0E-", "0", "0-", "0", - -- "90", "91", "92", "93", "94", "95", "96", "97", "98", "99", "0.", "0E", "0E-", "0", "0-", "0", - -- ".0", ".1", ".2", ".3", ".4", ".5", ".6", ".7", ".8", ".9", "..", ".E", ".E-", ".", ".-", ".", - -- "E0", "E1", "E2", "E3", "E4", "E5", "E6", "E7", "E8", "E9", "E.", "EE", "EE-", "E", "E-", "E", - -- "E-0", "E-1", "E-2", "E-3", "E-4", "E-5", "E-6", "E-7", "E-8", "E-9", "E-.", "E-E", "E-E-", "E-", "E--", "E-", - -- "-0", "-1", "-2", "-3", "-4", "-5", "-6", "-7", "-8", "-9", "-.", "-E", "-E-", "-", "--", "-", - -- } - - -- local p_nibbles = Cs(((1-p_last)/byte/remap)^0+p_last) - - -- local p = P("\30") * p_nibbles / function(t) - -- print(tonumber(t)) - -- end - - local remap = { - ["\x00"] = "00", ["\x01"] = "01", ["\x02"] = "02", ["\x03"] = "03", ["\x04"] = "04", ["\x05"] = "05", ["\x06"] = "06", ["\x07"] = "07", ["\x08"] = "08", ["\x09"] = "09", ["\x0A"] = "0.", ["\x0B"] = "0E", ["\x0C"] = "0E-", ["\x0D"] = "0", ["\x0E"] = "0-", ["\x0F"] = "0", - ["\x10"] = "10", ["\x11"] = "11", ["\x12"] = "12", ["\x13"] = "13", ["\x14"] = "14", ["\x15"] = "15", ["\x16"] = "16", ["\x17"] = "17", ["\x18"] = "18", ["\x19"] = "19", ["\x1A"] = "0.", ["\x1B"] = "0E", ["\x1C"] = "0E-", ["\x1D"] = "0", ["\x1E"] = "0-", ["\x1F"] = "0", - ["\x20"] = "20", ["\x21"] = "21", ["\x22"] = "22", ["\x23"] = "23", ["\x24"] = "24", ["\x25"] = "25", ["\x26"] = "26", ["\x27"] = "27", ["\x28"] = "28", ["\x29"] = "29", ["\x2A"] = "0.", ["\x2B"] = "0E", ["\x2C"] = "0E-", ["\x2D"] = "0", ["\x2E"] = "0-", ["\x2F"] = "0", - ["\x30"] = "30", ["\x31"] = "31", ["\x32"] = "32", ["\x33"] = "33", ["\x34"] = "34", ["\x35"] = "35", ["\x36"] = "36", ["\x37"] = "37", ["\x38"] = "38", ["\x39"] = "39", ["\x3A"] = "0.", ["\x3B"] = "0E", ["\x3C"] = "0E-", ["\x3D"] = "0", ["\x3E"] = "0-", ["\x3F"] = "0", - ["\x40"] = "40", ["\x41"] = "41", ["\x42"] = "42", ["\x43"] = "43", ["\x44"] = "44", ["\x45"] = "45", ["\x46"] = "46", ["\x47"] = "47", ["\x48"] = "48", ["\x49"] = "49", ["\x4A"] = "0.", ["\x4B"] = "0E", ["\x4C"] = "0E-", ["\x4D"] = "0", ["\x4E"] = "0-", ["\x4F"] = "0", - ["\x50"] = "50", ["\x51"] = "51", ["\x52"] = "52", ["\x53"] = "53", ["\x54"] = "54", ["\x55"] = "55", ["\x56"] = "56", ["\x57"] = "57", ["\x58"] = "58", ["\x59"] = "59", ["\x5A"] = "0.", ["\x5B"] = "0E", ["\x5C"] = "0E-", ["\x5D"] = "0", ["\x5E"] = "0-", ["\x5F"] = "0", - ["\x60"] = "60", ["\x61"] = "61", ["\x62"] = "62", ["\x63"] = "63", ["\x64"] = "64", ["\x65"] = "65", ["\x66"] = "66", ["\x67"] = "67", ["\x68"] = "68", ["\x69"] = "69", ["\x6A"] = "0.", ["\x6B"] = "0E", ["\x6C"] = "0E-", ["\x6D"] = "0", ["\x6E"] = "0-", ["\x6F"] = "0", - ["\x70"] = "70", ["\x71"] = "71", ["\x72"] = "72", ["\x73"] = "73", ["\x74"] = "74", ["\x75"] = "75", ["\x76"] = "76", ["\x77"] = "77", ["\x78"] = "78", ["\x79"] = "79", ["\x7A"] = "0.", ["\x7B"] = "0E", ["\x7C"] = "0E-", ["\x7D"] = "0", ["\x7E"] = "0-", ["\x7F"] = "0", - ["\x80"] = "80", ["\x81"] = "81", ["\x82"] = "82", ["\x83"] = "83", ["\x84"] = "84", ["\x85"] = "85", ["\x86"] = "86", ["\x87"] = "87", ["\x88"] = "88", ["\x89"] = "89", ["\x8A"] = "0.", ["\x8B"] = "0E", ["\x8C"] = "0E-", ["\x8D"] = "0", ["\x8E"] = "0-", ["\x8F"] = "0", - ["\x90"] = "90", ["\x91"] = "91", ["\x92"] = "92", ["\x93"] = "93", ["\x94"] = "94", ["\x95"] = "95", ["\x96"] = "96", ["\x97"] = "97", ["\x98"] = "98", ["\x99"] = "99", ["\x9A"] = "0.", ["\x9B"] = "0E", ["\x9C"] = "0E-", ["\x9D"] = "0", ["\x9E"] = "0-", ["\x9F"] = "0", - ["\xA0"] = ".0", ["\xA1"] = ".1", ["\xA2"] = ".2", ["\xA3"] = ".3", ["\xA4"] = ".4", ["\xA5"] = ".5", ["\xA6"] = ".6", ["\xA7"] = ".7", ["\xA8"] = ".8", ["\xA9"] = ".9", ["\xAA"] = "..", ["\xAB"] = ".E", ["\xAC"] = ".E-", ["\xAD"] = ".", ["\xAE"] = ".-", ["\xAF"] = ".", - ["\xB0"] = "E0", ["\xB1"] = "E1", ["\xB2"] = "E2", ["\xB3"] = "E3", ["\xB4"] = "E4", ["\xB5"] = "E5", ["\xB6"] = "E6", ["\xB7"] = "E7", ["\xB8"] = "E8", ["\xB9"] = "E9", ["\xBA"] = "E.", ["\xBB"] = "EE", ["\xBC"] = "EE-", ["\xBD"] = "E", ["\xBE"] = "E-", ["\xBF"] = "E", - ["\xC0"] = "E-0", ["\xC1"] = "E-1", ["\xC2"] = "E-2", ["\xC3"] = "E-3", ["\xC4"] = "E-4", ["\xC5"] = "E-5", ["\xC6"] = "E-6", ["\xC7"] = "E-7", ["\xC8"] = "E-8", ["\xC9"] = "E-9", ["\xCA"] = "E-.", ["\xCB"] = "E-E", ["\xCC"] = "E-E-", ["\xCD"] = "E-", ["\xCE"] = "E--", ["\xCF"] = "E-", - ["\xD0"] = "-0", ["\xD1"] = "-1", ["\xD2"] = "-2", ["\xD3"] = "-3", ["\xD4"] = "-4", ["\xD5"] = "-5", ["\xD6"] = "-6", ["\xD7"] = "-7", ["\xD8"] = "-8", ["\xD9"] = "-9", ["\xDA"] = "-.", ["\xDB"] = "-E", ["\xDC"] = "-E-", ["\xDD"] = "-", ["\xDE"] = "--", ["\xDF"] = "-", - } - - local p_nibbles = P("\30") * Cs(((1-p_last)/remap)^0+p_last) / function(n) - -- 0-9=digit a=. b=E c=E- d=reserved e=- f=finish - top = top + 1 - stack[top] = tonumber(n) or 0 - end - - local p_byte = C(R("\32\246")) / function(b0) - -- -107 .. +107 - top = top + 1 - stack[top] = byte(b0) - 139 - end - - local p_positive = C(R("\247\250")) * C(1) / function(b0,b1) - -- +108 .. +1131 - top = top + 1 - stack[top] = (byte(b0)-247)*256 + byte(b1) + 108 - end - - local p_negative = C(R("\251\254")) * C(1) / function(b0,b1) - -- -1131 .. -108 - top = top + 1 - stack[top] = -(byte(b0)-251)*256 - byte(b1) - 108 - end - - local p_short = P("\28") * C(1) * C(1) / function(b1,b2) - -- -32768 .. +32767 : b1<<8 | b2 - top = top + 1 - local n = 0x100 * byte(b1) + byte(b2) - if n >= 0x8000 then - stack[top] = n - 0xFFFF - 1 - else - stack[top] = n - end - end - - local p_long = P("\29") * C(1) * C(1) * C(1) * C(1) / function(b1,b2,b3,b4) - -- -2^31 .. +2^31-1 : b1<<24 | b2<<16 | b3<<8 | b4 - top = top + 1 - local n = 0x1000000 * byte(b1) + 0x10000 * byte(b2) + 0x100 * byte(b3) + byte(b4) - if n >= 0x8000000 then - stack[top] = n - 0xFFFFFFFF - 1 - else - stack[top] = n - end - end - - local p_unsupported = P(1) / function(detail) - top = 0 - end - - local p_dictionary = ( - p_byte - + p_positive - + p_negative - + p_short - + p_long - + p_nibbles - + p_single - + p_double - + p_unsupported - )^1 - - parsedictionaries = function(data,dictionaries) - stack = { } - strings = data.strings - for i=1,#dictionaries do - top = 0 - result = { - monospaced = false, - italicangle = 0, - underlineposition = -100, - underlinethickness = 50, - painttype = 0, - charstringtype = 2, - fontmatrix = { 0.001, 0, 0, 0.001, 0, 0 }, - fontbbox = { 0, 0, 0, 0 }, - strokewidth = 0, - charset = 0, - encoding = 0, - cid = { - fontversion = 0, - fontrevision = 0, - fonttype = 0, - count = 8720, - } - } - lpegmatch(p_dictionary,dictionaries[i]) - dictionaries[i] = result - end - -- - result = { } - top = 0 - stack = { } - end - - parseprivates = function(data,dictionaries) - stack = { } - strings = data.strings - for i=1,#dictionaries do - local private = dictionaries[i].private - if private and private.data then - top = 0 - result = { - forcebold = false, - languagegroup = 0, - expansionfactor = 0.06, - initialrandomseed = 0, - subroutines = 0, - defaultwidthx = 0, - nominalwidthx = 0, - cid = { - -- actually an error - }, - } - lpegmatch(p_dictionary,private.data) - private.data = result - end - end - result = { } - top = 0 - stack = { } - end - - -- All bezier curves have 6 points with successive pairs relative to - -- the previous pair. Some can be left out and are then copied or zero - -- (optimization). - -- - -- We are not really interested in all the details of a glyph because we - -- only need to calculate the boundingbox. So, todo: a quick no result but - -- calculate only variant. - -- - -- The conversion is straightforward and the specification os clear once - -- you understand that the x and y needs to be updates each step. It's also - -- quite easy to test because in mp a shape will look bad when a few variables - -- are swapped. But still there might be bugs down here because not all - -- variants are seen in a font so far. We are less compact that the ff code - -- because there quite some variants are done in one helper with a lot of - -- testing for states. - - local x = 0 - local y = 0 - local width = false - local r = 0 - local stems = 0 - local globalbias = 0 - local localbias = 0 - local globals = false - local locals = false - local depth = 1 - local xmin = 0 - local xmax = 0 - local ymin = 0 - local ymax = 0 - local checked = false - local keepcurve = false - - local function showstate(where) - report("%w%-10s : [%s] n=%i",depth*2,where,concat(stack," ",1,top),top) - end - - local function showvalue(where,value,showstack) - if showstack then - report("%w%-10s : %s : [%s] n=%i",depth*2,where,tostring(value),concat(stack," ",1,top),top) - else - report("%w%-10s : %s",depth*2,where,tostring(value)) - end - end - - -- All these indirect calls make this run slower but it's cleaner this way - -- and we cache the result. As we moved the boundingbox code inline we gain - -- some back. - - local function moveto(x,y) - if keepcurve then - r = r + 1 - result[r] = { x, y, "m" } - end - if checked then - if x < xmin then xmin = x elseif x > xmax then xmax = x end - if y < ymin then ymin = y elseif y > ymax then ymax = y end - else - xmin = x - ymin = y - xmax = x - ymax = y - checked = true - end - end - - local function lineto(x,y) - if keepcurve then - r = r + 1 - result[r] = { x, y, "l" } - end - if checked then - if x < xmin then xmin = x elseif x > xmax then xmax = x end - if y < ymin then ymin = y elseif y > ymax then ymax = y end - else - xmin = x - ymin = y - xmax = x - ymax = y - checked = true - end - end - - local function curveto(x1,y1,x2,y2,x3,y3) - if keepcurve then - r = r + 1 - result[r] = { x1, y1, x2, y2, x3, y3, "c" } - end - if checked then - if x1 < xmin then xmin = x1 elseif x1 > xmax then xmax = x1 end - if y1 < ymin then ymin = y1 elseif y1 > ymax then ymax = y1 end - else - xmin = x1 - ymin = y1 - xmax = x1 - ymax = y1 - checked = true - end - if x2 < xmin then xmin = x2 elseif x2 > xmax then xmax = x2 end - if y2 < ymin then ymin = y2 elseif y2 > ymax then ymax = y2 end - if x3 < xmin then xmin = x3 elseif x3 > xmax then xmax = x3 end - if y3 < ymin then ymin = y3 elseif y3 > ymax then ymax = y3 end - end - - local function rmoveto() - if top > 2 then - if not width then - width = stack[1] - if trace_charstrings then - showvalue("width",width) - end - end - elseif not width then - width = true - end - if trace_charstrings then - showstate("rmoveto") - end - x = x + stack[top-1] -- dx1 - y = y + stack[top] -- dy1 - top = 0 - moveto(x,y) - end - - local function hmoveto() - if top > 1 then - if not width then - width = stack[1] - if trace_charstrings then - showvalue("width",width) - end - end - elseif not width then - width = true - end - if trace_charstrings then - showstate("hmoveto") - end - x = x + stack[top] -- dx1 - top = 0 - moveto(x,y) - end - - local function vmoveto() - if top > 1 then - if not width then - width = stack[1] - if trace_charstrings then - showvalue("width",width) - end - end - elseif not width then - width = true - end - if trace_charstrings then - showstate("vmoveto") - end - y = y + stack[top] -- dy1 - top = 0 - moveto(x,y) - end - - local function rlineto() - if trace_charstrings then - showstate("rlineto") - end - for i=1,top,2 do - x = x + stack[i] -- dxa - y = y + stack[i+1] -- dya - lineto(x,y) - end - top = 0 - end - - local function xlineto(swap) -- x (y,x)+ | (x,y)+ - for i=1,top do - if swap then - x = x + stack[i] - swap = false - else - y = y + stack[i] - swap = true - end - lineto(x,y) - end - top = 0 - end - - local function hlineto() -- x (y,x)+ | (x,y)+ - if trace_charstrings then - showstate("hlineto") - end - xlineto(true) - end - - local function vlineto() -- y (x,y)+ | (y,x)+ - if trace_charstrings then - showstate("vlineto") - end - xlineto(false) - end - - local function rrcurveto() - if trace_charstrings then - showstate("rrcurveto") - end - for i=1,top,6 do - local ax = x + stack[i] -- dxa - local ay = y + stack[i+1] -- dya - local bx = ax + stack[i+2] -- dxb - local by = ay + stack[i+3] -- dyb - x = bx + stack[i+4] -- dxc - y = by + stack[i+5] -- dyc - curveto(ax,ay,bx,by,x,y) - end - top = 0 - end - - local function hhcurveto() - if trace_charstrings then - showstate("hhcurveto") - end - local s = 1 - if top % 2 ~= 0 then - y = y + stack[1] -- dy1 - s = 2 - end - for i=s,top,4 do - local ax = x + stack[i] -- dxa - local ay = y - local bx = ax + stack[i+1] -- dxb - local by = ay + stack[i+2] -- dyb - x = bx + stack[i+3] -- dxc - y = by - curveto(ax,ay,bx,by,x,y) - end - top = 0 - end - - local function vvcurveto() - if trace_charstrings then - showstate("vvcurveto") - end - local s = 1 - local d = 0 - if top % 2 ~= 0 then - d = stack[1] -- dx1 - s = 2 - end - for i=s,top,4 do - local ax = x + d - local ay = y + stack[i] -- dya - local bx = ax + stack[i+1] -- dxb - local by = ay + stack[i+2] -- dyb - x = bx - y = by + stack[i+3] -- dyc - curveto(ax,ay,bx,by,x,y) - d = 0 - end - top = 0 - end - - local function xxcurveto(swap) - local last = top % 4 ~= 0 and stack[top] - if last then - top = top - 1 - end - local sw = swap - for i=1,top,4 do - local ax, ay, bx, by - if swap then - ax = x + stack[i] - ay = y - bx = ax + stack[i+1] - by = ay + stack[i+2] - y = by + stack[i+3] - if last and i+3 == top then - x = bx + last - else - x = bx - end - swap = false - else - ax = x - ay = y + stack[i] - bx = ax + stack[i+1] - by = ay + stack[i+2] - x = bx + stack[i+3] - if last and i+3 == top then - y = by + last - else - y = by - end - swap = true - end - curveto(ax,ay,bx,by,x,y) - end - top = 0 - end - - local function hvcurveto() - if trace_charstrings then - showstate("hvcurveto") - end - xxcurveto(true) - end - - local function vhcurveto() - if trace_charstrings then - showstate("vhcurveto") - end - xxcurveto(false) - end - - local function rcurveline() - if trace_charstrings then - showstate("rcurveline") - end - for i=1,top-2,6 do - local ax = x + stack[i] -- dxa - local ay = y + stack[i+1] -- dya - local bx = ax + stack[i+2] -- dxb - local by = ay + stack[i+3] -- dyb - x = bx + stack[i+4] -- dxc - y = by + stack[i+5] -- dyc - curveto(ax,ay,bx,by,x,y) - end - x = x + stack[top-1] -- dxc - y = y + stack[top] -- dyc - lineto(x,y) - top = 0 - end - - local function rlinecurve() - if trace_charstrings then - showstate("rlinecurve") - end - if top > 6 then - for i=1,top-6,2 do - x = x + stack[i] - y = y + stack[i+1] - lineto(x,y) - end - end - local ax = x + stack[top-5] - local ay = y + stack[top-4] - local bx = ax + stack[top-3] - local by = ay + stack[top-2] - x = bx + stack[top-1] - y = by + stack[top] - curveto(ax,ay,bx,by,x,y) - top = 0 - end - - -- flex is not yet tested! no loop - - local function flex() -- fd not used - if trace_charstrings then - showstate("flex") - end - local ax = x + stack[1] -- dx1 - local ay = y + stack[2] -- dy1 - local bx = ax + stack[3] -- dx2 - local by = ay + stack[4] -- dy2 - local cx = bx + stack[5] -- dx3 - local cy = by + stack[6] -- dy3 - curveto(ax,ay,bx,by,cx,cy) - local dx = cx + stack[7] -- dx4 - local dy = cy + stack[8] -- dy4 - local ex = dx + stack[9] -- dx5 - local ey = dy + stack[10] -- dy5 - x = ex + stack[11] -- dx6 - y = ey + stack[12] -- dy6 - curveto(dx,dy,ex,ey,x,y) - top = 0 - end - - local function hflex() - if trace_charstrings then - showstate("hflex") - end - local ax = x + stack[1] -- dx1 - local ay = y - local bx = ax + stack[2] -- dx2 - local by = ay + stack[3] -- dy2 - local cx = bx + stack[4] -- dx3 - local cy = by - curveto(ax,ay,bx,by,cx,cy) - local dx = cx + stack[5] -- dx4 - local dy = by - local ex = dx + stack[6] -- dx5 - local ey = y - x = ex + stack[7] -- dx6 - curveto(dx,dy,ex,ey,x,y) - top = 0 - end - - local function hflex1() - if trace_charstrings then - showstate("hflex1") - end - local ax = x + stack[1] -- dx1 - local ay = y + stack[2] -- dy1 - local bx = ax + stack[3] -- dx2 - local by = ay + stack[4] -- dy2 - local cx = bx + stack[5] -- dx3 - local cy = by - curveto(ax,ay,bx,by,cx,cy) - local dx = cx + stack[6] -- dx4 - local dy = by - local ex = dx + stack[7] -- dx5 - local ey = dy + stack[8] -- dy5 - x = ex + stack[9] -- dx6 - curveto(dx,dy,ex,ey,x,y) - top = 0 - end - - local function flex1() - if trace_charstrings then - showstate("flex1") - end - local ax = x + stack[1] --dx1 - local ay = y + stack[2] --dy1 - local bx = ax + stack[3] --dx2 - local by = ay + stack[4] --dy2 - local cx = bx + stack[5] --dx3 - local cy = by + stack[6] --dy3 - curveto(ax,ay,bx,by,cx,cy) - local dx = cx + stack[7] --dx4 - local dy = cy + stack[8] --dy4 - local ex = dx + stack[9] --dx5 - local ey = dy + stack[10] --dy5 - if abs(ex - x) > abs(ey - y) then -- spec: abs(dx) > abs(dy) - x = ex + stack[11] - else - y = ey + stack[11] - end - curveto(dx,dy,ex,ey,x,y) - top = 0 - end - - local function getstem() - if top == 0 then - -- bad - elseif top % 2 ~= 0 then - if width then - remove(stack,1) - else - width = remove(stack,1) - if trace_charstrings then - showvalue("width",width) - end - end - top = top - 1 - end - if trace_charstrings then - showstate("stem") - end - stems = stems + top/2 - top = 0 - end - - local function getmask() - if top == 0 then - -- bad - elseif top % 2 ~= 0 then - if width then - remove(stack,1) - else - width = remove(stack,1) - if trace_charstrings then - showvalue("width",width) - end - end - top = top - 1 - end - if trace_charstrings then - showstate(operator == 19 and "hintmark" or "cntrmask") - end - stems = stems + top/2 - top = 0 - if stems == 0 then - -- forget about it - elseif stems <= 8 then - return 1 - else - return floor((stems+7)/8) - end - end - - local function unsupported() - if trace_charstrings then - showstate("unsupported") - end - top = 0 - end - - -- Bah, we cannot use a fast lpeg because a hint has an unknown size and a - -- runtime capture cannot handle that well. - - local actions = { [0] = - unsupported, -- 0 - getstem, -- 1 -- hstem - unsupported, -- 2 - getstem, -- 3 -- vstem - vmoveto, -- 4 - rlineto, -- 5 - hlineto, -- 6 - vlineto, -- 7 - rrcurveto, -- 8 - unsupported, -- 9 -- closepath - unsupported, -- 10 -- calllocal, - unsupported, -- 11 -- callreturn, - unsupported, -- 12 -- elsewhere - unsupported, -- 13 -- hsbw - unsupported, -- 14 -- endchar, - unsupported, -- 15 - unsupported, -- 16 - unsupported, -- 17 - getstem, -- 18 -- hstemhm - getmask, -- 19 -- hintmask - getmask, -- 20 -- cntrmask - rmoveto, -- 21 - hmoveto, -- 22 - getstem, -- 23 -- vstemhm - rcurveline, -- 24 - rlinecurve, -- 25 - vvcurveto, -- 26 - hhcurveto, -- 27 - unsupported, -- 28 -- elsewhere - unsupported, -- 29 -- elsewhere - vhcurveto, -- 30 - hvcurveto, -- 31 - } - - local subactions = { - [034] = hflex, - [035] = flex, - [036] = hflex1, - [037] = flex1, - } - - local p_bytes = Ct((P(1)/byte)^0) - - local function call(scope,list,bias,process) - local index = stack[top] + bias - top = top - 1 - if trace_charstrings then - showvalue(scope,index,true) - end - local str = list[index] - if str then - if type(str) == "string" then - str = lpegmatch(p_bytes,str) - list[index] = str - end - depth = depth + 1 - process(str) - depth = depth - 1 - else - report("unknown %s %i",scope,index) - end - end - - local function process(tab) - local i = 1 - local n = #tab - while i <= n do - local t = tab[i] - if t >= 32 and t<=246 then - -- -107 .. +107 - top = top + 1 - stack[top] = t - 139 - i = i + 1 - elseif t >= 247 and t <= 250 then - -- +108 .. +1131 - top = top + 1 - stack[top] = (t-247)*256 + tab[i+1] + 108 - i = i + 2 - elseif t >= 251 and t <= 254 then - -- -1131 .. -108 - top = top + 1 - stack[top] = -(t-251)*256 - tab[i+1] - 108 - i = i + 2 - elseif t == 28 then - -- -32768 .. +32767 : b1<<8 | b2 - top = top + 1 - local n = 0x100 * tab[i+1] + tab[i+2] - if n >= 0x8000 then - stack[top] = n - 0xFFFF - 1 - else - stack[top] = n - end - i = i + 3 - elseif t == 255 then - local n = 0x100 * tab[i+1] + tab[i+2] - top = top + 1 - if n >= 0x8000 then - stack[top] = n - 0xFFFF - 1 + (0x100 * tab[i+3] + tab[i+4])/0xFFFF - else - stack[top] = n + (0x100 * tab[i+3] + tab[i+4])/0xFFFF - end - i = i + 5 - elseif t == 11 then - if trace_charstrings then - showstate("return") - end - return - elseif t == 10 then - call("local",locals,localbias,process) - i = i + 1 - elseif t == 14 then -- endchar - if width then - -- okay - elseif top > 0 then - width = stack[1] - if trace_charstrings then - showvalue("width",width) - end - else - width = true - end - if trace_charstrings then - showstate("endchar") - end - return - elseif t == 29 then - call("global",globals,globalbias,process) - i = i + 1 - elseif t == 12 then - i = i + 1 - local t = tab[i] - local a = subactions[t] - if a then - a() - else - if trace_charstrings then - showvalue("<subaction>",t) - end - top = 0 - end - i = i + 1 - else - local a = actions[t] - if a then - local s = a() - if s then - i = i + s - end - else - if trace_charstrings then - showvalue("<action>",t) - end - top = 0 - end - i = i + 1 - end - end - end - - -- local function calculatebounds(segments,x,y) - -- local nofsegments = #segments - -- if nofsegments == 0 then - -- return { x, y, x, y } - -- else - -- local xmin = 10000 - -- local xmax = -10000 - -- local ymin = 10000 - -- local ymax = -10000 - -- if x < xmin then xmin = x end - -- if x > xmax then xmax = x end - -- if y < ymin then ymin = y end - -- if y > ymax then ymax = y end - -- -- we now have a reasonable start so we could - -- -- simplyfy the next checks - -- for i=1,nofsegments do - -- local s = segments[i] - -- local x = s[1] - -- local y = s[2] - -- if x < xmin then xmin = x end - -- if x > xmax then xmax = x end - -- if y < ymin then ymin = y end - -- if y > ymax then ymax = y end - -- if s[#s] == "c" then -- "curveto" - -- local x = s[3] - -- local y = s[4] - -- if x < xmin then xmin = x elseif x > xmax then xmax = x end - -- if y < ymin then ymin = y elseif y > ymax then ymax = y end - -- local x = s[5] - -- local y = s[6] - -- if x < xmin then xmin = x elseif x > xmax then xmax = x end - -- if y < ymin then ymin = y elseif y > ymax then ymax = y end - -- end - -- end - -- return { round(xmin), round(ymin), round(xmax), round(ymax) } -- doesn't make ceil more sense - -- end - -- end - - parsecharstrings = function(data,glyphs,doshapes) - -- for all charstrings - local dictionary = data.dictionaries[1] - local charstrings = dictionary.charstrings - local charset = dictionary.charset - keepcurve = doshapes - stack = { } - glyphs = glyphs or { } - strings = data.strings - locals = dictionary.subroutines - globals = data.routines - globalbias = #globals - localbias = #locals - globalbias = ((globalbias < 1240 and 107) or (globalbias < 33900 and 1131) or 32768) + 1 - localbias = ((localbias < 1240 and 107) or (localbias < 33900 and 1131) or 32768) + 1 - local nominalwidth = dictionary.private.data.nominalwidthx or 0 - local defaultwidth = dictionary.private.data.defaultwidthx or 0 - - for i=1,#charstrings do - local str = charstrings[i] - local tab = lpegmatch(p_bytes,str) - local index = i - 1 - x = 0 - y = 0 - width = false - r = 0 - top = 0 - stems = 0 - result = { } - -- - xmin = 0 - xmax = 0 - ymin = 0 - ymax = 0 - checked = false - -- - if trace_charstrings then - report("glyph: %i",index) - report("data: % t",tab) - end - -- - process(tab) - -- - local boundingbox = { round(xmin), round(ymin), round(xmax), round(ymax) } - -- - if width == true or width == false then - width = defaultwidth - else - width = nominalwidth + width - end - -- - -- trace_charstrings = index == 3078 -- todo: make tracker - local glyph = glyphs[index] -- can be autodefined in otr - if not glyph then - glyphs[index] = { - segments = doshapes ~= false and result or nil, -- optional - boundingbox = boundingbox, - width = width, - name = charset[index], - -- sidebearing = 0, - } - else - glyph.segments = doshapes ~= false and result or nil - glyph.boundingbox = boundingbox - if not glyph.width then - glyph.width = width - end - if charset and not glyph.name then - glyph.name = charset[index] - end - -- glyph.sidebearing = 0 -- todo - end - if trace_charstrings then - report("width: %s",tostring(width)) - report("boundingbox: % t",boundingbox) - end - charstrings[i] = nil -- free memory - end - return glyphs - end - - parsecharstring = function(data,dictionary,charstring,glyphs,index,doshapes) - local private = dictionary.private - keepcurve = doshapes - strings = data.strings -- or in dict? - locals = dictionary.subroutines or { } - globals = data.routines or { } - globalbias = #globals - localbias = #locals - globalbias = ((globalbias < 1240 and 107) or (globalbias < 33900 and 1131) or 32768) + 1 - localbias = ((localbias < 1240 and 107) or (localbias < 33900 and 1131) or 32768) + 1 - local nominalwidth = private and private.data.nominalwidthx or 0 - local defaultwidth = private and private.data.defaultwidthx or 0 - -- - local tab = lpegmatch(p_bytes,charstring) - x = 0 - y = 0 - width = false - r = 0 - top = 0 - stems = 0 - result = { } - -- - xmin = 0 - xmax = 0 - ymin = 0 - ymax = 0 - checked = false - -- - if trace_charstrings then - report("glyph: %i",index) - report("data: % t",tab) - end - -- - process(tab) - -- - local boundingbox = { xmin, ymin, xmax, ymax } - -- - if width == true or width == false then - width = defaultwidth - else - width = nominalwidth + width - end - -- - local glyph = glyphs[index] -- can be autodefined in otr - if not glyph then - glyphs[index] = { - segments = doshapes ~= false and result or nil, -- optional - boundingbox = boundingbox, - width = width, - name = charset[index], - -- sidebearing = 0, - } - else - glyph.segments = doshapes ~= false and result or nil - glyph.boundingbox = boundingbox - if not glyph.width then - glyph.width = width - end - if charset and not glyph.name then - glyph.name = charset[index] - end - -- glyph.sidebearing = 0 -- todo - end - -- - if trace_charstrings then - report("width: %s",tostring(width)) - report("boundingbox: % t",boundingbox) - end - -- - return charstring - end - - resetcharstrings = function() - result = { } - top = 0 - stack = { } - end - -end - -local function readglobals(f,data) - local routines = readlengths(f) - for i=1,#routines do - routines[i] = readstring(f,routines[i]) - end - data.routines = routines -end - -local function readencodings(f,data) - data.encodings = { } -end - -local function readcharsets(f,data,dictionary) - local header = data.header - local strings = data.strings - local nofglyphs = data.nofglyphs - local charsetoffset = dictionary.charset - - if charsetoffset ~= 0 then - setposition(f,header.offset+charsetoffset) - local format = readbyte(f) - local charset = { [0] = ".notdef" } - dictionary.charset = charset - if format == 0 then - for i=1,nofglyphs do - charset[i] = strings[readushort(f)] - end - elseif format == 1 or format == 2 then - local readcount = format == 1 and readbyte or readushort - local i = 1 - while i <= nofglyphs do - local sid = readushort(f) - local n = readcount(f) - for s=sid,sid+n do - charset[i] = strings[s] - i = i + 1 - if i > nofglyphs then - break - end - end - end - else - report("cff parser: unsupported charset format %a",format) - end - end -end - -local function readprivates(f,data) - local header = data.header - local dictionaries = data.dictionaries - local private = dictionaries[1].private - if private then - setposition(f,header.offset+private.offset) - private.data = readstring(f,private.size) - end -end - -local function readlocals(f,data,dictionary) - local header = data.header - local private = dictionary.private - if private then - local subroutineoffset = private.data.subroutines - if subroutineoffset ~= 0 then - setposition(f,header.offset+private.offset+subroutineoffset) - local subroutines = readlengths(f) - for i=1,#subroutines do - subroutines[i] = readstring(f,subroutines[i]) - end - dictionary.subroutines = subroutines - private.data.subroutines = nil - else - dictionary.subroutines = { } - end - else - dictionary.subroutines = { } - end -end - --- These charstrings are little programs and described in: Technical Note #5177. A truetype --- font has only one dictionary. - -local function readcharstrings(f,data) - local header = data.header - local dictionaries = data.dictionaries - local dictionary = dictionaries[1] - local type = dictionary.charstringtype - local offset = dictionary.charstrings - if type == 2 then - setposition(f,header.offset+offset) - -- could be a metatable .. delayed loading - local charstrings = readlengths(f) - local nofglyphs = #charstrings - for i=1,nofglyphs do - charstrings[i] = readstring(f,charstrings[i]) - end - data.nofglyphs = nofglyphs - dictionary.charstrings = charstrings - else - report("unsupported charstr type %i",type) - data.nofglyphs = 0 - dictionary.charstrings = { } - end -end - --- cid (maybe do this stepwise so less mem) -- share with above - -local function readcidprivates(f,data) - local header = data.header - local dictionaries = data.dictionaries[1].cid.dictionaries - for i=1,#dictionaries do - local dictionary = dictionaries[i] - local private = dictionary.private - if private then - setposition(f,header.offset+private.offset) - private.data = readstring(f,private.size) - end - end - parseprivates(data,dictionaries) -end - -local function readnoselect(f,data,glyphs,doshapes) - local dictionaries = data.dictionaries - local dictionary = dictionaries[1] - readglobals(f,data) - readcharstrings(f,data) - readencodings(f,data) - readcharsets(f,data,dictionary) - readprivates(f,data) - parseprivates(data,data.dictionaries) - readlocals(f,data,dictionary) - parsecharstrings(data,glyphs,doshapes) - resetcharstrings() -end - -local function readfdselect(f,data,glyphs,doshapes) - local header = data.header - local dictionaries = data.dictionaries - local dictionary = dictionaries[1] - local cid = dictionary.cid - local cidselect = cid and cid.fdselect - readglobals(f,data) - readcharstrings(f,data) - readencodings(f,data) - local charstrings = dictionary.charstrings - local fdindex = { } - local nofglyphs = data.nofglyphs - local maxindex = -1 - setposition(f,header.offset+cidselect) - local format = readbyte(f) - if format == 1 then - for i=0,nofglyphs do -- notdef included (needs checking) - local index = readbyte(i) - fdindex[i] = index - if index > maxindex then - maxindex = index - end - end - elseif format == 3 then - local nofranges = readushort(f) - local first = readushort(f) - local index = readbyte(f) - while true do - local last = readushort(f) - if index > maxindex then - maxindex = index - end - for i=first,last do - fdindex[i] = index - end - if last >= nofglyphs then - break - else - first = last + 1 - index = readbyte(f) - end - end - else - -- unsupported format - end - if maxindex >= 0 then - local cidarray = cid.fdarray - setposition(f,header.offset+cidarray) - local dictionaries = readlengths(f) - for i=1,#dictionaries do - dictionaries[i] = readstring(f,dictionaries[i]) - end - parsedictionaries(data,dictionaries) - cid.dictionaries = dictionaries - readcidprivates(f,data) - for i=1,#dictionaries do - readlocals(f,data,dictionaries[i]) - end - for i=1,#charstrings do - parsecharstring(data,dictionaries[fdindex[i]+1],charstrings[i],glyphs,i,doshapes) - end - resetcharstrings() - end -end - -function readers.cff(f,fontdata,specification) --- if specification.glyphs then - if specification.details then - local datatable = fontdata.tables.cff - if datatable then - local offset = datatable.offset - local glyphs = fontdata.glyphs - if not f then - report("invalid filehandle") - return - end - if offset then - setposition(f,offset) - end - local header = readheader(f) - if header.major > 1 then - report("version mismatch") - return - end - local names = readfontnames(f) - local dictionaries = readtopdictionaries(f) - local strings = readstrings(f) - local data = { - header = header, - names = names, - dictionaries = dictionaries, - strings = strings, - nofglyphs = fontdata.nofglyphs, - } - -- - parsedictionaries(data,data.dictionaries) - -- - local d = dictionaries[1] - local c = d.cid - fontdata.cffinfo = { - familynamename = d.familyname, - fullname = d.fullname, - boundingbox = d.boundingbox, - weight = d.weight, - italicangle = d.italicangle, - underlineposition = d.underlineposition, - underlinethickness = d.underlinethickness, - monospaced = d.monospaced, - } - fontdata.cidinfo = c and { - registry = c.registry, - ordering = c.ordering, - supplement = c.supplement, - } - -- - if not specification.glyphs then - -- we only want some metadata - else - local cid = d.cid - if cid and cid.fdselect then - readfdselect(f,data,glyphs,specification.shapes or false) - else - readnoselect(f,data,glyphs,specification.shapes or false) - end - end - -- - -- cleanup (probably more can go) - -- - -- for i=1,#dictionaries do - -- local d = dictionaries[i] - -- d.subroutines = nil - -- end - -- data.strings = nil - -- if data then - -- data.charstrings = nil - -- data.routines = nil - -- end - end - end -end diff --git a/Master/texmf-dist/tex/luatex/luaotfload/fontloader-font-otf.lua b/Master/texmf-dist/tex/luatex/luaotfload/fontloader-font-otf.lua index 0471c172b85..e90ec738f1c 100644 --- a/Master/texmf-dist/tex/luatex/luaotfload/fontloader-font-otf.lua +++ b/Master/texmf-dist/tex/luatex/luaotfload/fontloader-font-otf.lua @@ -287,7 +287,6 @@ local ordered_enhancers = { "check glyphs", "check metadata", --- "check extra features", -- after metadata "prepare tounicode", @@ -296,7 +295,6 @@ local ordered_enhancers = { "expand lookups", -- a temp hack awaiting the lua loader ---[[phg-- PATCH: Next line restores font features --phg]]-- "check extra features", -- after metadata and duplicates "cleanup tables", @@ -601,9 +599,6 @@ function otf.load(filename,sub,featurefile) -- second argument (format) is gone applyruntimefixes(filename,data) end enhance("add dimensions",data,filename,nil,false) ---[[phg-- This was hand-patched to restore the fontloader -enhance("check extra features",data,filename) ---phg]]-- if trace_sequences then showfeatureorder(data,filename) end @@ -2955,11 +2950,13 @@ end otf.coverup = { stepkey = "subtables", actions = { - substitution = justset, - alternate = justset, - multiple = justset, - ligature = justset, - kern = justset, + substitution = justset, + alternate = justset, + multiple = justset, + ligature = justset, + kern = justset, + chainsubstitution = justset, + chainposition = justset, }, register = function(coverage,lookuptype,format,feature,n,descriptions,resources) local name = formatters["ctx_%s_%s_%s"](feature,lookuptype,n) -- we can have a mix of types diff --git a/Master/texmf-dist/tex/luatex/luaotfload/fontloader-font-otn.lua b/Master/texmf-dist/tex/luatex/luaotfload/fontloader-font-otn.lua index b48aea761c8..081630a7be4 100644 --- a/Master/texmf-dist/tex/luatex/luaotfload/fontloader-font-otn.lua +++ b/Master/texmf-dist/tex/luatex/luaotfload/fontloader-font-otn.lua @@ -246,6 +246,7 @@ local zwj = 0x200D local wildcard = "*" local default = "dflt" +local whatcodes = nodes.whatcodes local nodecodes = nodes.nodecodes local glyphcodes = nodes.glyphcodes local disccodes = nodes.disccodes @@ -253,9 +254,10 @@ local disccodes = nodes.disccodes local glyph_code = nodecodes.glyph local glue_code = nodecodes.glue local disc_code = nodecodes.disc +local whatsit_code = nodecodes.whatsit local math_code = nodecodes.math -local dir_code = nodecodes.dir -local localpar_code = nodecodes.localpar +local dir_code = nodecodes.dir or whatcodes.dir +local localpar_code = nodecodes.localpar or whatcodes.localpar local discretionary_code = disccodes.discretionary local ligature_code = glyphcodes.ligature @@ -3358,6 +3360,37 @@ local function featuresprocessor(head,font,attr) comprun(start,c_run) start = getnext(start) end + elseif id == whatsit_code then + local subtype = getsubtype(start) + if subtype == dir_code then + local dir = getfield(start,"dir") + if dir == "+TLT" then + topstack = topstack + 1 + dirstack[topstack] = dir + rlmode = 1 + elseif dir == "+TRT" then + topstack = topstack + 1 + dirstack[topstack] = dir + rlmode = -1 + elseif dir == "-TLT" or dir == "-TRT" then + topstack = topstack - 1 + rlmode = dirstack[topstack] == "+TRT" and -1 or 1 + else + rlmode = rlparmode + end + elseif subtype == localpar_code then + local dir = getfield(start,"dir") + if dir == "TRT" then + rlparmode = -1 + elseif dir == "TLT" then + rlparmode = 1 + else + rlparmode = 0 + end + -- one might wonder if the par dir should be looked at, so we might as well drop the n + rlmode = rlparmode + end + start = getnext(start) elseif id == math_code then start = getnext(end_of_math(start)) elseif id == dir_code then @@ -3628,6 +3661,36 @@ local function featuresprocessor(head,font,attr) comprun(start,c_run) start = getnext(start) end + elseif id == whatsit_code then + local subtype = getsubtype(start) + if subtype == dir_code then + local dir = getfield(start,"dir") + if dir == "+TLT" then + topstack = topstack + 1 + dirstack[topstack] = dir + rlmode = 1 + elseif dir == "+TRT" then + topstack = topstack + 1 + dirstack[topstack] = dir + rlmode = -1 + elseif dir == "-TLT" or dir == "-TRT" then + topstack = topstack - 1 + rlmode = dirstack[topstack] == "+TRT" and -1 or 1 + else + rlmode = rlparmode + end + elseif subtype == localpar_code then + local dir = getfield(start,"dir") + if dir == "TRT" then + rlparmode = -1 + elseif dir == "TLT" then + rlparmode = 1 + else + rlparmode = 0 + end + rlmode = rlparmode + end + start = getnext(start) elseif id == math_code then start = getnext(end_of_math(start)) elseif id == dir_code then @@ -3894,9 +3957,6 @@ local function prepare_contextchains(tfmdata) sequence[nofsequences] = after[n] end end ---[[phg-- Hard patch: This crashes, see https://github.com/lualatex/luaotfload/issues/303 - if sequence[1] then ---phg]]-- if sequence[start] then -- Replacements only happen with reverse lookups as they are single only. We -- could pack them into current (replacement value instead of true) and then diff --git a/Master/texmf-dist/tex/luatex/luaotfload/fontloader-l-lpeg.lua b/Master/texmf-dist/tex/luatex/luaotfload/fontloader-l-lpeg.lua index 5be12468bc6..959ca553eb8 100644 --- a/Master/texmf-dist/tex/luatex/luaotfload/fontloader-l-lpeg.lua +++ b/Master/texmf-dist/tex/luatex/luaotfload/fontloader-l-lpeg.lua @@ -110,7 +110,7 @@ local underscore = P("_") local hexdigit = digit + lowercase + uppercase local cr, lf, crlf = P("\r"), P("\n"), P("\r\n") ----- newline = crlf + S("\r\n") -- cr + lf -local newline = P("\r") * (P("\n") + P(true)) + P("\n") +local newline = P("\r") * (P("\n") + P(true)) + P("\n") -- P("\r")^-1 * P("\n")^-1 local escaped = P("\\") * anything local squote = P("'") local dquote = P('"') diff --git a/Master/texmf-dist/tex/luatex/luaotfload/fontloader-reference.lua b/Master/texmf-dist/tex/luatex/luaotfload/fontloader-reference.lua index 9208e5286ba..b135c44b46e 100644 --- a/Master/texmf-dist/tex/luatex/luaotfload/fontloader-reference.lua +++ b/Master/texmf-dist/tex/luatex/luaotfload/fontloader-reference.lua @@ -1,6 +1,6 @@ -- merged file : c:/data/develop/context/sources/luatex-fonts-merged.lua -- parent file : c:/data/develop/context/sources/luatex-fonts.lua --- merge date : 12/22/15 10:50:54 +-- merge date : 01/08/16 19:09:31 do -- begin closure to overcome local limits and interference @@ -144,7 +144,7 @@ local uppercase=R("AZ") local underscore=P("_") local hexdigit=digit+lowercase+uppercase local cr,lf,crlf=P("\r"),P("\n"),P("\r\n") -local newline=P("\r")*(P("\n")+P(true))+P("\n") +local newline=P("\r")*(P("\n")+P(true))+P("\n") local escaped=P("\\")*anything local squote=P("'") local dquote=P('"') @@ -9571,6 +9571,8 @@ otf.coverup={ multiple=justset, ligature=justset, kern=justset, + chainsubstitution=justset, + chainposition=justset, }, register=function(coverage,lookuptype,format,feature,n,descriptions,resources) local name=formatters["ctx_%s_%s_%s"](feature,lookuptype,n) diff --git a/Master/texmf-dist/tex/luatex/luaotfload/luaotfload-configuration.lua b/Master/texmf-dist/tex/luatex/luaotfload/luaotfload-configuration.lua index 5e2800db678..0d484797511 100644 --- a/Master/texmf-dist/tex/luatex/luaotfload/luaotfload-configuration.lua +++ b/Master/texmf-dist/tex/luatex/luaotfload/luaotfload-configuration.lua @@ -89,7 +89,7 @@ local config_paths = { } local valid_formats = tabletohash { - "otf", "ttc", "ttf", "dfont", "afm", "pfb", "pfa", + "otf", "ttc", "ttf", "dfont", "afm", "pfb", "pfa", } local feature_presets = { diff --git a/Master/texmf-dist/tex/luatex/luaotfload/luaotfload-features.lua b/Master/texmf-dist/tex/luatex/luaotfload/luaotfload-features.lua index 39228821fb8..fd43e58b443 100644 --- a/Master/texmf-dist/tex/luatex/luaotfload/luaotfload-features.lua +++ b/Master/texmf-dist/tex/luatex/luaotfload/luaotfload-features.lua @@ -69,621 +69,822 @@ local mathceil = math.ceil ---[[ begin excerpt from font-ott.lua ]] local scripts = { - ['arab'] = 'arabic', - ['armn'] = 'armenian', - ['bali'] = 'balinese', - ['beng'] = 'bengali', - ['bopo'] = 'bopomofo', - ['brai'] = 'braille', - ['bugi'] = 'buginese', - ['buhd'] = 'buhid', - ['byzm'] = 'byzantine music', - ['cans'] = 'canadian syllabics', - ['cher'] = 'cherokee', - ['copt'] = 'coptic', - ['cprt'] = 'cypriot syllabary', - ['cyrl'] = 'cyrillic', - ['deva'] = 'devanagari', - ['dsrt'] = 'deseret', - ['ethi'] = 'ethiopic', - ['geor'] = 'georgian', - ['glag'] = 'glagolitic', - ['goth'] = 'gothic', - ['grek'] = 'greek', - ['gujr'] = 'gujarati', - ['guru'] = 'gurmukhi', - ['hang'] = 'hangul', - ['hani'] = 'cjk ideographic', - ['hano'] = 'hanunoo', - ['hebr'] = 'hebrew', - ['ital'] = 'old italic', - ['jamo'] = 'hangul jamo', - ['java'] = 'javanese', - ['kana'] = 'hiragana and katakana', - ['khar'] = 'kharosthi', - ['khmr'] = 'khmer', - ['knda'] = 'kannada', - ['lao' ] = 'lao', - ['latn'] = 'latin', - ['limb'] = 'limbu', - ['linb'] = 'linear b', - ['math'] = 'mathematical alphanumeric symbols', - ['mlym'] = 'malayalam', - ['mong'] = 'mongolian', - ['musc'] = 'musical symbols', - ['mymr'] = 'myanmar', - ['nko' ] = "n'ko", - ['ogam'] = 'ogham', - ['orya'] = 'oriya', - ['osma'] = 'osmanya', - ['phag'] = 'phags-pa', - ['phnx'] = 'phoenician', - ['runr'] = 'runic', - ['shaw'] = 'shavian', - ['sinh'] = 'sinhala', - ['sylo'] = 'syloti nagri', - ['syrc'] = 'syriac', - ['tagb'] = 'tagbanwa', - ['tale'] = 'tai le', - ['talu'] = 'tai lu', - ['taml'] = 'tamil', - ['telu'] = 'telugu', - ['tfng'] = 'tifinagh', - ['tglg'] = 'tagalog', - ['thaa'] = 'thaana', - ['thai'] = 'thai', - ['tibt'] = 'tibetan', - ['ugar'] = 'ugaritic cuneiform', - ['xpeo'] = 'old persian cuneiform', - ['xsux'] = 'sumero-akkadian cuneiform', - ['yi' ] = 'yi', -} + ["arab"] = "arabic", + ["armn"] = "armenian", + ["bali"] = "balinese", + ["beng"] = "bengali", + ["bopo"] = "bopomofo", + ["brai"] = "braille", + ["bugi"] = "buginese", + ["buhd"] = "buhid", + ["byzm"] = "byzantine music", + ["cans"] = "canadian syllabics", + ["cher"] = "cherokee", + ["copt"] = "coptic", + ["cprt"] = "cypriot syllabary", + ["cyrl"] = "cyrillic", + ["deva"] = "devanagari", + ["dsrt"] = "deseret", + ["ethi"] = "ethiopic", + ["geor"] = "georgian", + ["glag"] = "glagolitic", + ["goth"] = "gothic", + ["grek"] = "greek", + ["gujr"] = "gujarati", + ["guru"] = "gurmukhi", + ["hang"] = "hangul", + ["hani"] = "cjk ideographic", + ["hano"] = "hanunoo", + ["hebr"] = "hebrew", + ["ital"] = "old italic", + ["jamo"] = "hangul jamo", + ["java"] = "javanese", + ["kana"] = "hiragana and katakana", + ["khar"] = "kharosthi", + ["khmr"] = "khmer", + ["knda"] = "kannada", + ["lao" ] = "lao", + ["latn"] = "latin", + ["limb"] = "limbu", + ["linb"] = "linear b", + ["math"] = "mathematical alphanumeric symbols", + ["mlym"] = "malayalam", + ["mong"] = "mongolian", + ["musc"] = "musical symbols", + ["mymr"] = "myanmar", + ["nko" ] = "n\"ko", + ["ogam"] = "ogham", + ["orya"] = "oriya", + ["osma"] = "osmanya", + ["phag"] = "phags-pa", + ["phnx"] = "phoenician", + ["runr"] = "runic", + ["shaw"] = "shavian", + ["sinh"] = "sinhala", + ["sylo"] = "syloti nagri", + ["syrc"] = "syriac", + ["tagb"] = "tagbanwa", + ["tale"] = "tai le", + ["talu"] = "tai lu", + ["taml"] = "tamil", + ["telu"] = "telugu", + ["tfng"] = "tifinagh", + ["tglg"] = "tagalog", + ["thaa"] = "thaana", + ["thai"] = "thai", + ["tibt"] = "tibetan", + ["ugar"] = "ugaritic cuneiform", + ["xpeo"] = "old persian cuneiform", + ["xsux"] = "sumero-akkadian cuneiform", + ["yi" ] = "yi", +} -- [[ [scripts] ]] local languages = { - ['aba'] = 'abaza', - ['abk'] = 'abkhazian', - ['ady'] = 'adyghe', - ['afk'] = 'afrikaans', - ['afr'] = 'afar', - ['agw'] = 'agaw', - ['als'] = 'alsatian', - ['alt'] = 'altai', - ['amh'] = 'amharic', - ['ara'] = 'arabic', - ['ari'] = 'aari', - ['ark'] = 'arakanese', - ['asm'] = 'assamese', - ['ath'] = 'athapaskan', - ['avr'] = 'avar', - ['awa'] = 'awadhi', - ['aym'] = 'aymara', - ['aze'] = 'azeri', - ['bad'] = 'badaga', - ['bag'] = 'baghelkhandi', - ['bal'] = 'balkar', - ['bau'] = 'baule', - ['bbr'] = 'berber', - ['bch'] = 'bench', - ['bcr'] = 'bible cree', - ['bel'] = 'belarussian', - ['bem'] = 'bemba', - ['ben'] = 'bengali', - ['bgr'] = 'bulgarian', - ['bhi'] = 'bhili', - ['bho'] = 'bhojpuri', - ['bik'] = 'bikol', - ['bil'] = 'bilen', - ['bkf'] = 'blackfoot', - ['bli'] = 'balochi', - ['bln'] = 'balante', - ['blt'] = 'balti', - ['bmb'] = 'bambara', - ['bml'] = 'bamileke', - ['bos'] = 'bosnian', - ['bre'] = 'breton', - ['brh'] = 'brahui', - ['bri'] = 'braj bhasha', - ['brm'] = 'burmese', - ['bsh'] = 'bashkir', - ['bti'] = 'beti', - ['cat'] = 'catalan', - ['ceb'] = 'cebuano', - ['che'] = 'chechen', - ['chg'] = 'chaha gurage', - ['chh'] = 'chattisgarhi', - ['chi'] = 'chichewa', - ['chk'] = 'chukchi', - ['chp'] = 'chipewyan', - ['chr'] = 'cherokee', - ['chu'] = 'chuvash', - ['cmr'] = 'comorian', - ['cop'] = 'coptic', - ['cos'] = 'corsican', - ['cre'] = 'cree', - ['crr'] = 'carrier', - ['crt'] = 'crimean tatar', - ['csl'] = 'church slavonic', - ['csy'] = 'czech', - ['dan'] = 'danish', - ['dar'] = 'dargwa', - ['dcr'] = 'woods cree', - ['deu'] = 'german', - ['dgr'] = 'dogri', - ['div'] = 'divehi', - ['djr'] = 'djerma', - ['dng'] = 'dangme', - ['dnk'] = 'dinka', - ['dri'] = 'dari', - ['dun'] = 'dungan', - ['dzn'] = 'dzongkha', - ['ebi'] = 'ebira', - ['ecr'] = 'eastern cree', - ['edo'] = 'edo', - ['efi'] = 'efik', - ['ell'] = 'greek', - ['eng'] = 'english', - ['erz'] = 'erzya', - ['esp'] = 'spanish', - ['eti'] = 'estonian', - ['euq'] = 'basque', - ['evk'] = 'evenki', - ['evn'] = 'even', - ['ewe'] = 'ewe', - ['fan'] = 'french antillean', - ['far'] = 'farsi', - ['fin'] = 'finnish', - ['fji'] = 'fijian', - ['fle'] = 'flemish', - ['fne'] = 'forest nenets', - ['fon'] = 'fon', - ['fos'] = 'faroese', - ['fra'] = 'french', - ['fri'] = 'frisian', - ['frl'] = 'friulian', - ['fta'] = 'futa', - ['ful'] = 'fulani', - ['gad'] = 'ga', - ['gae'] = 'gaelic', - ['gag'] = 'gagauz', - ['gal'] = 'galician', - ['gar'] = 'garshuni', - ['gaw'] = 'garhwali', - ['gez'] = "ge'ez", - ['gil'] = 'gilyak', - ['gmz'] = 'gumuz', - ['gon'] = 'gondi', - ['grn'] = 'greenlandic', - ['gro'] = 'garo', - ['gua'] = 'guarani', - ['guj'] = 'gujarati', - ['hai'] = 'haitian', - ['hal'] = 'halam', - ['har'] = 'harauti', - ['hau'] = 'hausa', - ['haw'] = 'hawaiin', - ['hbn'] = 'hammer-banna', - ['hil'] = 'hiligaynon', - ['hin'] = 'hindi', - ['hma'] = 'high mari', - ['hnd'] = 'hindko', - ['ho'] = 'ho', - ['hri'] = 'harari', - ['hrv'] = 'croatian', - ['hun'] = 'hungarian', - ['hye'] = 'armenian', - ['ibo'] = 'igbo', - ['ijo'] = 'ijo', - ['ilo'] = 'ilokano', - ['ind'] = 'indonesian', - ['ing'] = 'ingush', - ['inu'] = 'inuktitut', - ['iri'] = 'irish', - ['irt'] = 'irish traditional', - ['isl'] = 'icelandic', - ['ism'] = 'inari sami', - ['ita'] = 'italian', - ['iwr'] = 'hebrew', - ['jan'] = 'japanese', - ['jav'] = 'javanese', - ['jii'] = 'yiddish', - ['jud'] = 'judezmo', - ['jul'] = 'jula', - ['kab'] = 'kabardian', - ['kac'] = 'kachchi', - ['kal'] = 'kalenjin', - ['kan'] = 'kannada', - ['kar'] = 'karachay', - ['kat'] = 'georgian', - ['kaz'] = 'kazakh', - ['keb'] = 'kebena', - ['kge'] = 'khutsuri georgian', - ['kha'] = 'khakass', - ['khk'] = 'khanty-kazim', - ['khm'] = 'khmer', - ['khs'] = 'khanty-shurishkar', - ['khv'] = 'khanty-vakhi', - ['khw'] = 'khowar', - ['kik'] = 'kikuyu', - ['kir'] = 'kirghiz', - ['kis'] = 'kisii', - ['kkn'] = 'kokni', - ['klm'] = 'kalmyk', - ['kmb'] = 'kamba', - ['kmn'] = 'kumaoni', - ['kmo'] = 'komo', - ['kms'] = 'komso', - ['knr'] = 'kanuri', - ['kod'] = 'kodagu', - ['koh'] = 'korean old hangul', - ['kok'] = 'konkani', - ['kon'] = 'kikongo', - ['kop'] = 'komi-permyak', - ['kor'] = 'korean', - ['koz'] = 'komi-zyrian', - ['kpl'] = 'kpelle', - ['kri'] = 'krio', - ['krk'] = 'karakalpak', - ['krl'] = 'karelian', - ['krm'] = 'karaim', - ['krn'] = 'karen', - ['krt'] = 'koorete', - ['ksh'] = 'kashmiri', - ['ksi'] = 'khasi', - ['ksm'] = 'kildin sami', - ['kui'] = 'kui', - ['kul'] = 'kulvi', - ['kum'] = 'kumyk', - ['kur'] = 'kurdish', - ['kuu'] = 'kurukh', - ['kuy'] = 'kuy', - ['kyk'] = 'koryak', - ['lad'] = 'ladin', - ['lah'] = 'lahuli', - ['lak'] = 'lak', - ['lam'] = 'lambani', - ['lao'] = 'lao', - ['lat'] = 'latin', - ['laz'] = 'laz', - ['lcr'] = 'l-cree', - ['ldk'] = 'ladakhi', - ['lez'] = 'lezgi', - ['lin'] = 'lingala', - ['lma'] = 'low mari', - ['lmb'] = 'limbu', - ['lmw'] = 'lomwe', - ['lsb'] = 'lower sorbian', - ['lsm'] = 'lule sami', - ['lth'] = 'lithuanian', - ['ltz'] = 'luxembourgish', - ['lub'] = 'luba', - ['lug'] = 'luganda', - ['luh'] = 'luhya', - ['luo'] = 'luo', - ['lvi'] = 'latvian', - ['maj'] = 'majang', - ['mak'] = 'makua', - ['mal'] = 'malayalam traditional', - ['man'] = 'mansi', - ['map'] = 'mapudungun', - ['mar'] = 'marathi', - ['maw'] = 'marwari', - ['mbn'] = 'mbundu', - ['mch'] = 'manchu', - ['mcr'] = 'moose cree', - ['mde'] = 'mende', - ['men'] = "me'en", - ['miz'] = 'mizo', - ['mkd'] = 'macedonian', - ['mle'] = 'male', - ['mlg'] = 'malagasy', - ['mln'] = 'malinke', - ['mlr'] = 'malayalam reformed', - ['mly'] = 'malay', - ['mnd'] = 'mandinka', - ['mng'] = 'mongolian', - ['mni'] = 'manipuri', - ['mnk'] = 'maninka', - ['mnx'] = 'manx gaelic', - ['moh'] = 'mohawk', - ['mok'] = 'moksha', - ['mol'] = 'moldavian', - ['mon'] = 'mon', - ['mor'] = 'moroccan', - ['mri'] = 'maori', - ['mth'] = 'maithili', - ['mts'] = 'maltese', - ['mun'] = 'mundari', - ['nag'] = 'naga-assamese', - ['nan'] = 'nanai', - ['nas'] = 'naskapi', - ['ncr'] = 'n-cree', - ['ndb'] = 'ndebele', - ['ndg'] = 'ndonga', - ['nep'] = 'nepali', - ['new'] = 'newari', - ['ngr'] = 'nagari', - ['nhc'] = 'norway house cree', - ['nis'] = 'nisi', - ['niu'] = 'niuean', - ['nkl'] = 'nkole', - ['nko'] = "n'ko", - ['nld'] = 'dutch', - ['nog'] = 'nogai', - ['nor'] = 'norwegian', - ['nsm'] = 'northern sami', - ['nta'] = 'northern tai', - ['nto'] = 'esperanto', - ['nyn'] = 'nynorsk', - ['oci'] = 'occitan', - ['ocr'] = 'oji-cree', - ['ojb'] = 'ojibway', - ['ori'] = 'oriya', - ['oro'] = 'oromo', - ['oss'] = 'ossetian', - ['paa'] = 'palestinian aramaic', - ['pal'] = 'pali', - ['pan'] = 'punjabi', - ['pap'] = 'palpa', - ['pas'] = 'pashto', - ['pgr'] = 'polytonic greek', - ['pil'] = 'pilipino', - ['plg'] = 'palaung', - ['plk'] = 'polish', - ['pro'] = 'provencal', - ['ptg'] = 'portuguese', - ['qin'] = 'chin', - ['raj'] = 'rajasthani', - ['rbu'] = 'russian buriat', - ['rcr'] = 'r-cree', - ['ria'] = 'riang', - ['rms'] = 'rhaeto-romanic', - ['rom'] = 'romanian', - ['roy'] = 'romany', - ['rsy'] = 'rusyn', - ['rua'] = 'ruanda', - ['rus'] = 'russian', - ['sad'] = 'sadri', - ['san'] = 'sanskrit', - ['sat'] = 'santali', - ['say'] = 'sayisi', - ['sek'] = 'sekota', - ['sel'] = 'selkup', - ['sgo'] = 'sango', - ['shn'] = 'shan', - ['sib'] = 'sibe', - ['sid'] = 'sidamo', - ['sig'] = 'silte gurage', - ['sks'] = 'skolt sami', - ['sky'] = 'slovak', - ['sla'] = 'slavey', - ['slv'] = 'slovenian', - ['sml'] = 'somali', - ['smo'] = 'samoan', - ['sna'] = 'sena', - ['snd'] = 'sindhi', - ['snh'] = 'sinhalese', - ['snk'] = 'soninke', - ['sog'] = 'sodo gurage', - ['sot'] = 'sotho', - ['sqi'] = 'albanian', - ['srb'] = 'serbian', - ['srk'] = 'saraiki', - ['srr'] = 'serer', - ['ssl'] = 'south slavey', - ['ssm'] = 'southern sami', - ['sur'] = 'suri', - ['sva'] = 'svan', - ['sve'] = 'swedish', - ['swa'] = 'swadaya aramaic', - ['swk'] = 'swahili', - ['swz'] = 'swazi', - ['sxt'] = 'sutu', - ['syr'] = 'syriac', - ['tab'] = 'tabasaran', - ['taj'] = 'tajiki', - ['tam'] = 'tamil', - ['tat'] = 'tatar', - ['tcr'] = 'th-cree', - ['tel'] = 'telugu', - ['tgn'] = 'tongan', - ['tgr'] = 'tigre', - ['tgy'] = 'tigrinya', - ['tha'] = 'thai', - ['tht'] = 'tahitian', - ['tib'] = 'tibetan', - ['tkm'] = 'turkmen', - ['tmn'] = 'temne', - ['tna'] = 'tswana', - ['tne'] = 'tundra nenets', - ['tng'] = 'tonga', - ['tod'] = 'todo', - ['trk'] = 'turkish', - ['tsg'] = 'tsonga', - ['tua'] = 'turoyo aramaic', - ['tul'] = 'tulu', - ['tuv'] = 'tuvin', - ['twi'] = 'twi', - ['udm'] = 'udmurt', - ['ukr'] = 'ukrainian', - ['urd'] = 'urdu', - ['usb'] = 'upper sorbian', - ['uyg'] = 'uyghur', - ['uzb'] = 'uzbek', - ['ven'] = 'venda', - ['vit'] = 'vietnamese', - ['wa' ] = 'wa', - ['wag'] = 'wagdi', - ['wcr'] = 'west-cree', - ['wel'] = 'welsh', - ['wlf'] = 'wolof', - ['xbd'] = 'tai lue', - ['xhs'] = 'xhosa', - ['yak'] = 'yakut', - ['yba'] = 'yoruba', - ['ycr'] = 'y-cree', - ['yic'] = 'yi classic', - ['yim'] = 'yi modern', - ['zhh'] = 'chinese hong kong', - ['zhp'] = 'chinese phonetic', - ['zhs'] = 'chinese simplified', - ['zht'] = 'chinese traditional', - ['znd'] = 'zande', - ['zul'] = 'zulu' -} + ["aba" ] = "abaza", + ["abk" ] = "abkhazian", + ["ach" ] = "acholi", + ["acr" ] = "achi", + ["ady" ] = "adyghe", + ["afk" ] = "afrikaans", + ["afr" ] = "afar", + ["agw" ] = "agaw", + ["aio" ] = "aiton", + ["aka" ] = "akan", + ["als" ] = "alsatian", + ["alt" ] = "altai", + ["amh" ] = "amharic", + ["ang" ] = "anglo-saxon", + ["apph"] = "phonetic transcription—americanist conventions", + ["ara" ] = "arabic", + ["arg" ] = "aragonese", + ["ari" ] = "aari", + ["ark" ] = "rakhine", + ["asm" ] = "assamese", + ["ast" ] = "asturian", + ["ath" ] = "athapaskan", + ["avr" ] = "avar", + ["awa" ] = "awadhi", + ["aym" ] = "aymara", + ["azb" ] = "torki", + ["aze" ] = "azerbaijani", + ["bad" ] = "badaga", + ["bad0"] = "banda", + ["bag" ] = "baghelkhandi", + ["bal" ] = "balkar", + ["ban" ] = "balinese", + ["bar" ] = "bavarian", + ["bau" ] = "baulé", + ["bbc" ] = "batak toba", + ["bbr" ] = "berber", + ["bch" ] = "bench", + ["bcr" ] = "bible cree", + ["bdy" ] = "bandjalang", + ["bel" ] = "belarussian", + ["bem" ] = "bemba", + ["ben" ] = "bengali", + ["bgc" ] = "haryanvi", + ["bgq" ] = "bagri", + ["bgr" ] = "bulgarian", + ["bhi" ] = "bhili", + ["bho" ] = "bhojpuri", + ["bik" ] = "bikol", + ["bil" ] = "bilen", + ["bis" ] = "bislama", + ["bjj" ] = "kanauji", + ["bkf" ] = "blackfoot", + ["bli" ] = "baluchi", + ["blk" ] = "pa'o karen", + ["bln" ] = "balante", + ["blt" ] = "balti", + ["bmb" ] = "bambara (bamanankan)", + ["bml" ] = "bamileke", + ["bos" ] = "bosnian", + ["bpy" ] = "bishnupriya manipuri", + ["bre" ] = "breton", + ["brh" ] = "brahui", + ["bri" ] = "braj bhasha", + ["brm" ] = "burmese", + ["brx" ] = "bodo", + ["bsh" ] = "bashkir", + ["bti" ] = "beti", + ["bts" ] = "batak simalungun", + ["bug" ] = "bugis", + ["cak" ] = "kaqchikel", + ["cat" ] = "catalan", + ["cbk" ] = "zamboanga chavacano", + ["ceb" ] = "cebuano", + ["cgg" ] = "chiga", + ["cha" ] = "chamorro", + ["che" ] = "chechen", + ["chg" ] = "chaha gurage", + ["chh" ] = "chattisgarhi", + ["chi" ] = "chichewa (chewa, nyanja)", + ["chk" ] = "chukchi", + ["chk0"] = "chuukese", + ["cho" ] = "choctaw", + ["chp" ] = "chipewyan", + ["chr" ] = "cherokee", + ["chu" ] = "chuvash", + ["chy" ] = "cheyenne", + ["cmr" ] = "comorian", + ["cop" ] = "coptic", + ["cor" ] = "cornish", + ["cos" ] = "corsican", + ["cpp" ] = "creoles", + ["cre" ] = "cree", + ["crr" ] = "carrier", + ["crt" ] = "crimean tatar", + ["csb" ] = "kashubian", + ["csl" ] = "church slavonic", + ["csy" ] = "czech", + ["ctg" ] = "chittagonian", + ["cuk" ] = "san blas kuna", + ["dan" ] = "danish", + ["dar" ] = "dargwa", + ["dax" ] = "dayi", + ["dcr" ] = "woods cree", + ["deu" ] = "german", + ["dgo" ] = "dogri", + ["dgr" ] = "dogri", + ["dhg" ] = "dhangu", + ["dhv" ] = "divehi (dhivehi, maldivian)", + ["diq" ] = "dimli", + ["div" ] = "divehi (dhivehi, maldivian)", + ["djr" ] = "zarma", + ["djr0"] = "djambarrpuyngu", + ["dng" ] = "dangme", + ["dnj" ] = "dan", + ["dnk" ] = "dinka", + ["dri" ] = "dari", + ["duj" ] = "dhuwal", + ["dun" ] = "dungan", + ["dzn" ] = "dzongkha", + ["ebi" ] = "ebira", + ["ecr" ] = "eastern cree", + ["edo" ] = "edo", + ["efi" ] = "efik", + ["ell" ] = "greek", + ["emk" ] = "eastern maninkakan", + ["eng" ] = "english", + ["erz" ] = "erzya", + ["esp" ] = "spanish", + ["esu" ] = "central yupik", + ["eti" ] = "estonian", + ["euq" ] = "basque", + ["evk" ] = "evenki", + ["evn" ] = "even", + ["ewe" ] = "ewe", + ["fan" ] = "french antillean", + ["fan0"] = " fang", + ["far" ] = "persian", + ["fat" ] = "fanti", + ["fin" ] = "finnish", + ["fji" ] = "fijian", + ["fle" ] = "dutch (flemish)", + ["fne" ] = "forest nenets", + ["fon" ] = "fon", + ["fos" ] = "faroese", + ["fra" ] = "french", + ["frc" ] = "cajun french", + ["fri" ] = "frisian", + ["frl" ] = "friulian", + ["frp" ] = "arpitan", + ["fta" ] = "futa", + ["ful" ] = "fulah", + ["fuv" ] = "nigerian fulfulde", + ["gad" ] = "ga", + ["gae" ] = "scottish gaelic (gaelic)", + ["gag" ] = "gagauz", + ["gal" ] = "galician", + ["gar" ] = "garshuni", + ["gaw" ] = "garhwali", + ["gez" ] = "ge'ez", + ["gih" ] = "githabul", + ["gil" ] = "gilyak", + ["gil0"] = " kiribati (gilbertese)", + ["gkp" ] = "kpelle (guinea)", + ["glk" ] = "gilaki", + ["gmz" ] = "gumuz", + ["gnn" ] = "gumatj", + ["gog" ] = "gogo", + ["gon" ] = "gondi", + ["grn" ] = "greenlandic", + ["gro" ] = "garo", + ["gua" ] = "guarani", + ["guc" ] = "wayuu", + ["guf" ] = "gupapuyngu", + ["guj" ] = "gujarati", + ["guz" ] = "gusii", + ["hai" ] = "haitian (haitian creole)", + ["hal" ] = "halam", + ["har" ] = "harauti", + ["hau" ] = "hausa", + ["haw" ] = "hawaiian", + ["hay" ] = "haya", + ["haz" ] = "hazaragi", + ["hbn" ] = "hammer-banna", + ["her" ] = "herero", + ["hil" ] = "hiligaynon", + ["hin" ] = "hindi", + ["hma" ] = "high mari", + ["hmn" ] = "hmong", + ["hmo" ] = "hiri motu", + ["hnd" ] = "hindko", + ["ho" ] = "ho", + ["hri" ] = "harari", + ["hrv" ] = "croatian", + ["hun" ] = "hungarian", + ["hye" ] = "armenian", + ["hye0"] = "armenian east", + ["iba" ] = "iban", + ["ibb" ] = "ibibio", + ["ibo" ] = "igbo", + ["ido" ] = "ido", + ["ijo" ] = "ijo languages", + ["ile" ] = "interlingue", + ["ilo" ] = "ilokano", + ["ina" ] = "interlingua", + ["ind" ] = "indonesian", + ["ing" ] = "ingush", + ["inu" ] = "inuktitut", + ["ipk" ] = "inupiat", + ["ipph"] = "phonetic transcription—ipa conventions", + ["iri" ] = "irish", + ["irt" ] = "irish traditional", + ["isl" ] = "icelandic", + ["ism" ] = "inari sami", + ["ita" ] = "italian", + ["iwr" ] = "hebrew", + ["jam" ] = "jamaican creole", + ["jan" ] = "japanese", + ["jav" ] = "javanese", + ["jbo" ] = "lojban", + ["jii" ] = "yiddish", + ["jud" ] = "ladino", + ["jul" ] = "jula", + ["kab" ] = "kabardian", + ["kab0"] = "kabyle", + ["kac" ] = "kachchi", + ["kal" ] = "kalenjin", + ["kan" ] = "kannada", + ["kar" ] = "karachay", + ["kat" ] = "georgian", + ["kaz" ] = "kazakh", + ["kde" ] = "makonde", + ["kea" ] = "kabuverdianu (crioulo)", + ["keb" ] = "kebena", + ["kek" ] = "kekchi", + ["kge" ] = "khutsuri georgian", + ["kha" ] = "khakass", + ["khk" ] = "khanty-kazim", + ["khm" ] = "khmer", + ["khs" ] = "khanty-shurishkar", + ["kht" ] = "khamti shan", + ["khv" ] = "khanty-vakhi", + ["khw" ] = "khowar", + ["kik" ] = "kikuyu (gikuyu)", + ["kir" ] = "kirghiz (kyrgyz)", + ["kis" ] = "kisii", + ["kiu" ] = "kirmanjki", + ["kjd" ] = "southern kiwai", + ["kjp" ] = "eastern pwo karen", + ["kkn" ] = "kokni", + ["klm" ] = "kalmyk", + ["kmb" ] = "kamba", + ["kmn" ] = "kumaoni", + ["kmo" ] = "komo", + ["kms" ] = "komso", + ["knr" ] = "kanuri", + ["kod" ] = "kodagu", + ["koh" ] = "korean old hangul", + ["kok" ] = "konkani", + ["kom" ] = "komi", + ["kon" ] = "kikongo", + ["kon0"] = "kongo", + ["kop" ] = "komi-permyak", + ["kor" ] = "korean", + ["kos" ] = "kosraean", + ["koz" ] = "komi-zyrian", + ["kpl" ] = "kpelle", + ["kri" ] = "krio", + ["krk" ] = "karakalpak", + ["krl" ] = "karelian", + ["krm" ] = "karaim", + ["krn" ] = "karen", + ["krt" ] = "koorete", + ["ksh" ] = "kashmiri", + ["ksh0"] = "ripuarian", + ["ksi" ] = "khasi", + ["ksm" ] = "kildin sami", + ["ksw" ] = "s’gaw karen", + ["kua" ] = "kuanyama", + ["kui" ] = "kui", + ["kul" ] = "kulvi", + ["kum" ] = "kumyk", + ["kur" ] = "kurdish", + ["kuu" ] = "kurukh", + ["kuy" ] = "kuy", + ["kyk" ] = "koryak", + ["kyu" ] = "western kayah", + ["lad" ] = "ladin", + ["lah" ] = "lahuli", + ["lak" ] = "lak", + ["lam" ] = "lambani", + ["lao" ] = "lao", + ["lat" ] = "latin", + ["laz" ] = "laz", + ["lcr" ] = "l-cree", + ["ldk" ] = "ladakhi", + ["lez" ] = "lezgi", + ["lij" ] = "ligurian", + ["lim" ] = "limburgish", + ["lin" ] = "lingala", + ["lis" ] = "lisu", + ["ljp" ] = "lampung", + ["lki" ] = "laki", + ["lma" ] = "low mari", + ["lmb" ] = "limbu", + ["lmo" ] = "lombard", + ["lmw" ] = "lomwe", + ["lom" ] = "loma", + ["lrc" ] = "luri", + ["lsb" ] = "lower sorbian", + ["lsm" ] = "lule sami", + ["lth" ] = "lithuanian", + ["ltz" ] = "luxembourgish", + ["lua" ] = "luba-lulua", + ["lub" ] = "luba-katanga", + ["lug" ] = "ganda", + ["luh" ] = "luyia", + ["luo" ] = "luo", + ["lvi" ] = "latvian", + ["mad" ] = "madura", + ["mag" ] = "magahi", + ["mah" ] = "marshallese", + ["maj" ] = "majang", + ["mak" ] = "makhuwa", + ["mal" ] = "malayalam reformed", + ["mam" ] = "mam", + ["man" ] = "mansi", + ["map" ] = "mapudungun", + ["mar" ] = "marathi", + ["maw" ] = "marwari", + ["mbn" ] = "mbundu", + ["mch" ] = "manchu", + ["mcr" ] = "moose cree", + ["mde" ] = "mende", + ["mdr" ] = "mandar", + ["men" ] = "me'en", + ["mer" ] = "meru", + ["mfe" ] = "morisyen", + ["min" ] = "minangkabau", + ["miz" ] = "mizo", + ["mkd" ] = "macedonian", + ["mkr" ] = "makasar", + ["mkw" ] = "kituba", + ["mle" ] = "male", + ["mlg" ] = "malagasy", + ["mln" ] = "malinke", + ["mly" ] = "malay", + ["mnd" ] = "mandinka", + ["mng" ] = "mongolian", + ["mni" ] = "manipuri", + ["mnk" ] = "maninka", + ["mnx" ] = "manx", + ["moh" ] = "mohawk", + ["mok" ] = "moksha", + ["mol" ] = "moldavian", + ["mon" ] = "mon", + ["mor" ] = "moroccan", + ["mos" ] = "mossi", + ["mri" ] = "maori", + ["mth" ] = "maithili", + ["mts" ] = "maltese", + ["mun" ] = "mundari", + ["mus" ] = "muscogee", + ["mwl" ] = "mirandese", + ["mww" ] = "hmong daw", + ["myn" ] = "mayan", + ["mzn" ] = "mazanderani", + ["nag" ] = "naga-assamese", + ["nah" ] = "nahuatl", + ["nan" ] = "nanai", + ["nap" ] = "neapolitan", + ["nas" ] = "naskapi", + ["nau" ] = "nauruan", + ["nav" ] = "navajo", + ["ncr" ] = "n-cree", + ["ndb" ] = "ndebele", + ["ndc" ] = "ndau", + ["ndg" ] = "ndonga", + ["nds" ] = "low saxon", + ["nep" ] = "nepali", + ["new" ] = "newari", + ["nga" ] = "ngbaka", + ["ngr" ] = "nagari", + ["nhc" ] = "norway house cree", + ["nis" ] = "nisi", + ["niu" ] = "niuean", + ["nkl" ] = "nyankole", + ["nko" ] = "n'ko", + ["nld" ] = "dutch", + ["noe" ] = "nimadi", + ["nog" ] = "nogai", + ["nor" ] = "norwegian", + ["nov" ] = "novial", + ["nsm" ] = "northern sami", + ["nso" ] = "sotho, northern", + ["nta" ] = "northern tai", + ["nto" ] = "esperanto", + ["nym" ] = "nyamwezi", + ["nyn" ] = "norwegian nynorsk", + ["oci" ] = "occitan", + ["ocr" ] = "oji-cree", + ["ojb" ] = "ojibway", + ["ori" ] = "odia", + ["oro" ] = "oromo", + ["oss" ] = "ossetian", + ["paa" ] = "palestinian aramaic", + ["pag" ] = "pangasinan", + ["pal" ] = "pali", + ["pam" ] = "pampangan", + ["pan" ] = "punjabi", + ["pap" ] = "palpa", + ["pap0"] = "papiamentu", + ["pas" ] = "pashto", + ["pau" ] = "palauan", + ["pcc" ] = "bouyei", + ["pcd" ] = "picard", + ["pdc" ] = "pennsylvania german", + ["pgr" ] = "polytonic greek", + ["phk" ] = "phake", + ["pih" ] = "norfolk", + ["pil" ] = "filipino", + ["plg" ] = "palaung", + ["plk" ] = "polish", + ["pms" ] = "piemontese", + ["pnb" ] = "western panjabi", + ["poh" ] = "pocomchi", + ["pon" ] = "pohnpeian", + ["pro" ] = "provencal", + ["ptg" ] = "portuguese", + ["pwo" ] = "western pwo karen", + ["qin" ] = "chin", + ["quc" ] = "k’iche’", + ["quh" ] = "quechua (bolivia)", + ["quz" ] = "quechua", + ["qvi" ] = "quechua (ecuador)", + ["qwh" ] = "quechua (peru)", + ["raj" ] = "rajasthani", + ["rar" ] = "rarotongan", + ["rbu" ] = "russian buriat", + ["rcr" ] = "r-cree", + ["rej" ] = "rejang", + ["ria" ] = "riang", + ["rif" ] = "tarifit", + ["rit" ] = "ritarungo", + ["rkw" ] = "arakwal", + ["rms" ] = "romansh", + ["rmy" ] = "vlax romani", + ["rom" ] = "romanian", + ["roy" ] = "romany", + ["rsy" ] = "rusyn", + ["rtm" ] = "rotuman", + ["rua" ] = "kinyarwanda", + ["run" ] = "rundi", + ["rup" ] = "aromanian", + ["rus" ] = "russian", + ["sad" ] = "sadri", + ["san" ] = "sanskrit", + ["sas" ] = "sasak", + ["sat" ] = "santali", + ["say" ] = "sayisi", + ["scn" ] = "sicilian", + ["sco" ] = "scots", + ["sek" ] = "sekota", + ["sel" ] = "selkup", + ["sga" ] = "old irish", + ["sgo" ] = "sango", + ["sgs" ] = "samogitian", + ["shi" ] = "tachelhit", + ["shn" ] = "shan", + ["sib" ] = "sibe", + ["sid" ] = "sidamo", + ["sig" ] = "silte gurage", + ["sks" ] = "skolt sami", + ["sky" ] = "slovak", + ["sla" ] = "slavey", + ["slv" ] = "slovenian", + ["sml" ] = "somali", + ["smo" ] = "samoan", + ["sna" ] = "sena", + ["sna0"] = "shona", + ["snd" ] = "sindhi", + ["snh" ] = "sinhala (sinhalese)", + ["snk" ] = "soninke", + ["sog" ] = "sodo gurage", + ["sop" ] = "songe", + ["sot" ] = "sotho, southern", + ["sqi" ] = "albanian", + ["srb" ] = "serbian", + ["srd" ] = "sardinian", + ["srk" ] = "saraiki", + ["srr" ] = "serer", + ["ssl" ] = "south slavey", + ["ssm" ] = "southern sami", + ["stq" ] = "saterland frisian", + ["suk" ] = "sukuma", + ["sun" ] = "sundanese", + ["sur" ] = "suri", + ["sva" ] = "svan", + ["sve" ] = "swedish", + ["swa" ] = "swadaya aramaic", + ["swk" ] = "swahili", + ["swz" ] = "swati", + ["sxt" ] = "sutu", + ["sxu" ] = "upper saxon", + ["syl" ] = "sylheti", + ["syr" ] = "syriac", + ["szl" ] = "silesian", + ["tab" ] = "tabasaran", + ["taj" ] = "tajiki", + ["tam" ] = "tamil", + ["tat" ] = "tatar", + ["tcr" ] = "th-cree", + ["tdd" ] = "dehong dai", + ["tel" ] = "telugu", + ["tet" ] = "tetum", + ["tgl" ] = "tagalog", + ["tgn" ] = "tongan", + ["tgr" ] = "tigre", + ["tgy" ] = "tigrinya", + ["tha" ] = "thai", + ["tht" ] = "tahitian", + ["tib" ] = "tibetan", + ["tiv" ] = "tiv", + ["tkm" ] = "turkmen", + ["tmh" ] = "tamashek", + ["tmn" ] = "temne", + ["tna" ] = "tswana", + ["tne" ] = "tundra nenets", + ["tng" ] = "tonga", + ["tod" ] = "todo", + ["tod0"] = "toma", + ["tpi" ] = "tok pisin", + ["trk" ] = "turkish", + ["tsg" ] = "tsonga", + ["tua" ] = "turoyo aramaic", + ["tul" ] = "tulu", + ["tuv" ] = "tuvin", + ["tvl" ] = "tuvalu", + ["twi" ] = "twi", + ["tyz" ] = "tày", + ["tzm" ] = "tamazight", + ["tzo" ] = "tzotzil", + ["udm" ] = "udmurt", + ["ukr" ] = "ukrainian", + ["umb" ] = "umbundu", + ["urd" ] = "urdu", + ["usb" ] = "upper sorbian", + ["uyg" ] = "uyghur", + ["uzb" ] = "uzbek", + ["vec" ] = "venetian", + ["ven" ] = "venda", + ["vit" ] = "vietnamese", + ["vol" ] = "volapük", + ["vro" ] = "võro", + ["wa" ] = "wa", + ["wag" ] = "wagdi", + ["war" ] = "waray-waray", + ["wcr" ] = "west-cree", + ["wel" ] = "welsh", + ["wlf" ] = "wolof", + ["wln" ] = "walloon", + ["xbd" ] = "lü", + ["xhs" ] = "xhosa", + ["xjb" ] = "minjangbal", + ["xog" ] = "soga", + ["xpe" ] = "kpelle (liberia)", + ["yak" ] = "sakha", + ["yao" ] = "yao", + ["yap" ] = "yapese", + ["yba" ] = "yoruba", + ["ycr" ] = "y-cree", + ["yic" ] = "yi classic", + ["yim" ] = "yi modern", + ["zea" ] = "zealandic", + ["zgh" ] = "standard morrocan tamazigh", + ["zha" ] = "zhuang", + ["zhh" ] = "chinese, hong kong sar", + ["zhp" ] = "chinese phonetic", + ["zhs" ] = "chinese simplified", + ["zht" ] = "chinese traditional", + ["znd" ] = "zande", + ["zul" ] = "zulu", + ["zza" ] = "zazaki", +} --[[ [languages] ]] local features = { - ['aalt'] = 'access all alternates', - ['abvf'] = 'above-base forms', - ['abvm'] = 'above-base mark positioning', - ['abvs'] = 'above-base substitutions', - ['afrc'] = 'alternative fractions', - ['akhn'] = 'akhands', - ['blwf'] = 'below-base forms', - ['blwm'] = 'below-base mark positioning', - ['blws'] = 'below-base substitutions', - ['c2pc'] = 'petite capitals from capitals', - ['c2sc'] = 'small capitals from capitals', - ['calt'] = 'contextual alternates', - ['case'] = 'case-sensitive forms', - ['ccmp'] = 'glyph composition/decomposition', - ['cjct'] = 'conjunct forms', - ['clig'] = 'contextual ligatures', - ['cpsp'] = 'capital spacing', - ['cswh'] = 'contextual swash', - ['curs'] = 'cursive positioning', - ['dflt'] = 'default processing', - ['dist'] = 'distances', - ['dlig'] = 'discretionary ligatures', - ['dnom'] = 'denominators', - ['dtls'] = 'dotless forms', -- math - ['expt'] = 'expert forms', - ['falt'] = 'final glyph alternates', - ['fin2'] = 'terminal forms #2', - ['fin3'] = 'terminal forms #3', - ['fina'] = 'terminal forms', - ['flac'] = 'flattened accents over capitals', -- math - ['frac'] = 'fractions', - ['fwid'] = 'full width', - ['half'] = 'half forms', - ['haln'] = 'halant forms', - ['halt'] = 'alternate half width', - ['hist'] = 'historical forms', - ['hkna'] = 'horizontal kana alternates', - ['hlig'] = 'historical ligatures', - ['hngl'] = 'hangul', - ['hojo'] = 'hojo kanji forms', - ['hwid'] = 'half width', - ['init'] = 'initial forms', - ['isol'] = 'isolated forms', - ['ital'] = 'italics', - ['jalt'] = 'justification alternatives', - ['jp04'] = 'jis2004 forms', - ['jp78'] = 'jis78 forms', - ['jp83'] = 'jis83 forms', - ['jp90'] = 'jis90 forms', - ['kern'] = 'kerning', - ['lfbd'] = 'left bounds', - ['liga'] = 'standard ligatures', - ['ljmo'] = 'leading jamo forms', - ['lnum'] = 'lining figures', - ['locl'] = 'localized forms', - ['mark'] = 'mark positioning', - ['med2'] = 'medial forms #2', - ['medi'] = 'medial forms', - ['mgrk'] = 'mathematical greek', - ['mkmk'] = 'mark to mark positioning', - ['mset'] = 'mark positioning via substitution', - ['nalt'] = 'alternate annotation forms', - ['nlck'] = 'nlc kanji forms', - ['nukt'] = 'nukta forms', - ['numr'] = 'numerators', - ['onum'] = 'old style figures', - ['opbd'] = 'optical bounds', - ['ordn'] = 'ordinals', - ['ornm'] = 'ornaments', - ['palt'] = 'proportional alternate width', - ['pcap'] = 'petite capitals', - ['pnum'] = 'proportional figures', - ['pref'] = 'pre-base forms', - ['pres'] = 'pre-base substitutions', - ['pstf'] = 'post-base forms', - ['psts'] = 'post-base substitutions', - ['pwid'] = 'proportional widths', - ['qwid'] = 'quarter widths', - ['rand'] = 'randomize', - ['rkrf'] = 'rakar forms', - ['rlig'] = 'required ligatures', - ['rphf'] = 'reph form', - ['rtbd'] = 'right bounds', - ['rtla'] = 'right-to-left alternates', - ['rtlm'] = 'right to left math', -- math - ['ruby'] = 'ruby notation forms', - ['salt'] = 'stylistic alternates', - ['sinf'] = 'scientific inferiors', - ['size'] = 'optical size', - ['smcp'] = 'small capitals', - ['smpl'] = 'simplified forms', - -- ['ss01'] = 'stylistic set 1', - -- ['ss02'] = 'stylistic set 2', - -- ['ss03'] = 'stylistic set 3', - -- ['ss04'] = 'stylistic set 4', - -- ['ss05'] = 'stylistic set 5', - -- ['ss06'] = 'stylistic set 6', - -- ['ss07'] = 'stylistic set 7', - -- ['ss08'] = 'stylistic set 8', - -- ['ss09'] = 'stylistic set 9', - -- ['ss10'] = 'stylistic set 10', - -- ['ss11'] = 'stylistic set 11', - -- ['ss12'] = 'stylistic set 12', - -- ['ss13'] = 'stylistic set 13', - -- ['ss14'] = 'stylistic set 14', - -- ['ss15'] = 'stylistic set 15', - -- ['ss16'] = 'stylistic set 16', - -- ['ss17'] = 'stylistic set 17', - -- ['ss18'] = 'stylistic set 18', - -- ['ss19'] = 'stylistic set 19', - -- ['ss20'] = 'stylistic set 20', - ['ssty'] = 'script style', -- math - ['subs'] = 'subscript', - ['sups'] = 'superscript', - ['swsh'] = 'swash', - ['titl'] = 'titling', - ['tjmo'] = 'trailing jamo forms', - ['tnam'] = 'traditional name forms', - ['tnum'] = 'tabular figures', - ['trad'] = 'traditional forms', - ['twid'] = 'third widths', - ['unic'] = 'unicase', - ['valt'] = 'alternate vertical metrics', - ['vatu'] = 'vattu variants', - ['vert'] = 'vertical writing', - ['vhal'] = 'alternate vertical half metrics', - ['vjmo'] = 'vowel jamo forms', - ['vkna'] = 'vertical kana alternates', - ['vkrn'] = 'vertical kerning', - ['vpal'] = 'proportional alternate vertical metrics', - ['vrt2'] = 'vertical rotation', - ['zero'] = 'slashed zero', - - ['trep'] = 'traditional tex replacements', - ['tlig'] = 'traditional tex ligatures', - - ['ss..'] = 'stylistic set ..', - ['cv..'] = 'character variant ..', - ['js..'] = 'justification ..', + ["aalt"] = "access all alternates", + ["abvf"] = "above-base forms", + ["abvm"] = "above-base mark positioning", + ["abvs"] = "above-base substitutions", + ["afrc"] = "alternative fractions", + ["akhn"] = "akhands", + ["blwf"] = "below-base forms", + ["blwm"] = "below-base mark positioning", + ["blws"] = "below-base substitutions", + ["c2pc"] = "petite capitals from capitals", + ["c2sc"] = "small capitals from capitals", + ["calt"] = "contextual alternates", + ["case"] = "case-sensitive forms", + ["ccmp"] = "glyph composition/decomposition", + ["cfar"] = "conjunct form after ro", + ["cjct"] = "conjunct forms", + ["clig"] = "contextual ligatures", + ["cpct"] = "centered cjk punctuation", + ["cpsp"] = "capital spacing", + ["cswh"] = "contextual swash", + ["curs"] = "cursive positioning", + ["dflt"] = "default processing", + ["dist"] = "distances", + ["dlig"] = "discretionary ligatures", + ["dnom"] = "denominators", + ["dtls"] = "dotless forms", -- math + ["expt"] = "expert forms", + ["falt"] = "final glyph alternates", + ["fin2"] = "terminal forms #2", + ["fin3"] = "terminal forms #3", + ["fina"] = "terminal forms", + ["flac"] = "flattened accents over capitals", -- math + ["frac"] = "fractions", + ["fwid"] = "full width", + ["half"] = "half forms", + ["haln"] = "halant forms", + ["halt"] = "alternate half width", + ["hist"] = "historical forms", + ["hkna"] = "horizontal kana alternates", + ["hlig"] = "historical ligatures", + ["hngl"] = "hangul", + ["hojo"] = "hojo kanji forms", + ["hwid"] = "half width", + ["init"] = "initial forms", + ["isol"] = "isolated forms", + ["ital"] = "italics", + ["jalt"] = "justification alternatives", + ["jp04"] = "jis2004 forms", + ["jp78"] = "jis78 forms", + ["jp83"] = "jis83 forms", + ["jp90"] = "jis90 forms", + ["kern"] = "kerning", + ["lfbd"] = "left bounds", + ["liga"] = "standard ligatures", + ["ljmo"] = "leading jamo forms", + ["lnum"] = "lining figures", + ["locl"] = "localized forms", + ["ltra"] = "left-to-right alternates", + ["ltrm"] = "left-to-right mirrored forms", + ["mark"] = "mark positioning", + ["med2"] = "medial forms #2", + ["medi"] = "medial forms", + ["mgrk"] = "mathematical greek", + ["mkmk"] = "mark to mark positioning", + ["mset"] = "mark positioning via substitution", + ["nalt"] = "alternate annotation forms", + ["nlck"] = "nlc kanji forms", + ["nukt"] = "nukta forms", + ["numr"] = "numerators", + ["onum"] = "old style figures", + ["opbd"] = "optical bounds", + ["ordn"] = "ordinals", + ["ornm"] = "ornaments", + ["palt"] = "proportional alternate width", + ["pcap"] = "petite capitals", + ["pkna"] = "proportional kana", + ["pnum"] = "proportional figures", + ["pref"] = "pre-base forms", + ["pres"] = "pre-base substitutions", + ["pstf"] = "post-base forms", + ["psts"] = "post-base substitutions", + ["pwid"] = "proportional widths", + ["qwid"] = "quarter widths", + ["rand"] = "randomize", + ["rclt"] = "required contextual alternates", + ["rkrf"] = "rakar forms", + ["rlig"] = "required ligatures", + ["rphf"] = "reph form", + ["rtbd"] = "right bounds", + ["rtla"] = "right-to-left alternates", + ["rtlm"] = "right to left math", -- math + ["ruby"] = "ruby notation forms", + ["salt"] = "stylistic alternates", + ["sinf"] = "scientific inferiors", + ["size"] = "optical size", + ["smcp"] = "small capitals", + ["smpl"] = "simplified forms", + -- ["ss01"] = "stylistic set 1", + -- ["ss02"] = "stylistic set 2", + -- ["ss03"] = "stylistic set 3", + -- ["ss04"] = "stylistic set 4", + -- ["ss05"] = "stylistic set 5", + -- ["ss06"] = "stylistic set 6", + -- ["ss07"] = "stylistic set 7", + -- ["ss08"] = "stylistic set 8", + -- ["ss09"] = "stylistic set 9", + -- ["ss10"] = "stylistic set 10", + -- ["ss11"] = "stylistic set 11", + -- ["ss12"] = "stylistic set 12", + -- ["ss13"] = "stylistic set 13", + -- ["ss14"] = "stylistic set 14", + -- ["ss15"] = "stylistic set 15", + -- ["ss16"] = "stylistic set 16", + -- ["ss17"] = "stylistic set 17", + -- ["ss18"] = "stylistic set 18", + -- ["ss19"] = "stylistic set 19", + -- ["ss20"] = "stylistic set 20", + ["ssty"] = "script style", -- math + ["stch"] = "stretching glyph decomposition", + ["subs"] = "subscript", + ["sups"] = "superscript", + ["swsh"] = "swash", + ["titl"] = "titling", + ["tjmo"] = "trailing jamo forms", + ["tnam"] = "traditional name forms", + ["tnum"] = "tabular figures", + ["trad"] = "traditional forms", + ["twid"] = "third widths", + ["unic"] = "unicase", + ["valt"] = "alternate vertical metrics", + ["vatu"] = "vattu variants", + ["vert"] = "vertical writing", + ["vhal"] = "alternate vertical half metrics", + ["vjmo"] = "vowel jamo forms", + ["vkna"] = "vertical kana alternates", + ["vkrn"] = "vertical kerning", + ["vpal"] = "proportional alternate vertical metrics", + ["vrt2"] = "vertical rotation", + ["zero"] = "slashed zero", + + ["trep"] = "traditional tex replacements", + ["tlig"] = "traditional tex ligatures", + + ["ss.."] = "stylistic set ..", + ["cv.."] = "character variant ..", + ["js.."] = "justification ..", ["dv.."] = "devanagari ..", -} + ["ml.."] = "malayalam ..", +} --[[ [features] ]] local baselines = { - ['hang'] = 'hanging baseline', - ['icfb'] = 'ideographic character face bottom edge baseline', - ['icft'] = 'ideographic character face tope edige baseline', - ['ideo'] = 'ideographic em-box bottom edge baseline', - ['idtp'] = 'ideographic em-box top edge baseline', - ['math'] = 'mathmatical centered baseline', - ['romn'] = 'roman baseline' -} + ["hang"] = "hanging baseline", + ["icfb"] = "ideographic character face bottom edge baseline", + ["icft"] = "ideographic character face tope edige baseline", + ["ideo"] = "ideographic em-box bottom edge baseline", + ["idtp"] = "ideographic em-box top edge baseline", + ["math"] = "mathematical centered baseline", + ["romn"] = "roman baseline" +} --[[ [baselines] ]] local swapped = function (h) local r = { } @@ -946,20 +1147,26 @@ local utfchar = unicode.utf8.char local otf = handlers and handlers.otf --- filled in later during initialization local normalized = { - substitution = "substitution", - single = "substitution", - ligature = "ligature", - alternate = "alternate", - multiple = "multiple", - kern = "kern", + substitution = "substitution", + single = "substitution", + ligature = "ligature", + alternate = "alternate", + multiple = "multiple", + kern = "kern", + pair = "pair", + chainsubstitution = "chainsubstitution", + chainposition = "chainposition", } local types = { - substitution = "gsub_single", - ligature = "gsub_ligature", - alternate = "gsub_alternate", - multiple = "gsub_multiple", - kern = "gpos_pair", + substitution = "gsub_single", + ligature = "gsub_ligature", + alternate = "gsub_alternate", + multiple = "gsub_multiple", + kern = "gpos_pair", + pair = "gpos_pair", + chainsubstitution = "gsub_contextchain", + chainposition = "gpos_contextchain", } setmetatableindex(types, function(t,k) t[k] = k return k end) -- "key" @@ -967,7 +1174,7 @@ setmetatableindex(types, function(t,k) t[k] = k return k end) -- "key" --- stop locals for addfeature() local everywhere = { ["*"] = { ["*"] = true } } -- or: { ["*"] = { "*" } } -local noflags = { } +local noflags = { false, false, false, false } local tohash = table.tohash diff --git a/Master/texmf-dist/tex/luatex/luaotfload/luaotfload-init.lua b/Master/texmf-dist/tex/luatex/luaotfload/luaotfload-init.lua index d4711527e99..7ed9c960a6e 100644 --- a/Master/texmf-dist/tex/luatex/luaotfload/luaotfload-init.lua +++ b/Master/texmf-dist/tex/luatex/luaotfload/luaotfload-init.lua @@ -247,21 +247,21 @@ local context_modules = { { ctx, "font-con" }, { ltx, "luatex-fonts-enc" }, { ctx, "font-cid" }, - { ctx, "font-map" }, { ltx, "luatex-fonts-syn" }, { ltx, "luatex-fonts-tfm" }, { ctx, "font-oti" }, { ctx, "font-otf" }, { ctx, "font-otb" }, - { ltx, "font-inj" }, + { ctx, "font-inj" }, { ltx, "luatex-fonts-ota" }, - { ltx, "font-otn" }, - { ctx, "font-otp" }, --> since 2013-04-23 + { ctx, "font-otn" }, + { ctx, "font-otp" }, { ltx, "luatex-fonts-lua" }, { ctx, "font-def" }, { ltx, "luatex-fonts-def" }, { ltx, "luatex-fonts-ext" }, { ltx, "luatex-fonts-cbk" }, + { ctx, "font-map" }, } --[[context_modules]] @@ -332,7 +332,7 @@ local init_main = function () fontloader = tostring (fontloader) if fontloader == "reference" then - logreport ("log", 4, "init", "Using reference fontloader.") + logreport ("log", 0, "init", "Using reference fontloader.") load_fontloader_module (luaotfload.fontloader_package) elseif fontloader == "default" then @@ -345,7 +345,7 @@ local init_main = function () load_fontloader_module (luaotfload.fontloader_package) elseif fontloader == "unpackaged" then - logreport ("both", 4, "init", + logreport ("log", 0, "init", "Loading fontloader components individually.") --- The loading sequence is known to change, so this might have to be --- updated with future updates. Do not modify it though unless there is @@ -388,41 +388,41 @@ local init_main = function () load_fontloader_module "fonts-cbk" elseif fontloader == "context" then - logreport ("both", 2, "init", - "Attempting to load Context modules in lookup path.") + logreport ("log", 0, "init", + "Loading Context modules in lookup path.") load_context_modules () elseif lfs.isdir (fontloader) then - logreport ("both", 2, "init", - "Attempting to load Context files under prefix “%s”.", + logreport ("log", 0, "init", + "Loading Context files under prefix “%s”.", fontloader) load_context_modules (fontloader) elseif lfs.isfile (fontloader) then - logreport ("both", 2, "init", - "Attempting to load fontloader from absolute path “%s”.", + logreport ("log", 0, "init", + "Loading fontloader from absolute path “%s”.", fontloader) local _void = require (fontloader) elseif kpselookup (fontloader) then local path = kpselookup (fontloader) - logreport ("both", 2, "init", - "Attempting to load fontloader “%s” from kpse-resolved path “%s”.", + logreport ("log", 0, "init", + "Loading fontloader “%s” from kpse-resolved path “%s”.", fontloader, path) local _void = require (path) elseif fontloader then - logreport ("log", 4, "init", + logreport ("log", 0, "init", "Using predefined fontloader “%s”.", fontloader) load_fontloader_module (fontloader) else - logreport ("log", 4, "init", + logreport ("log", 0, "init", "No match for requested fontloader “%s”.", fontloader) fontloader = luaotfload.fontloader_package - logreport ("log", 4, "init", + logreport ("log", 0, "init", "Defaulting to predefined fontloader “%s”.", fontloader) load_fontloader_module (fontloader) diff --git a/Master/texmf-dist/tex/luatex/luaotfload/luaotfload-letterspace.lua b/Master/texmf-dist/tex/luatex/luaotfload/luaotfload-letterspace.lua index 0d6b8e8264e..e4776c3ed11 100644 --- a/Master/texmf-dist/tex/luatex/luaotfload/luaotfload-letterspace.lua +++ b/Master/texmf-dist/tex/luatex/luaotfload/luaotfload-letterspace.lua @@ -40,6 +40,16 @@ local setnext = nodedirect.setnext or field_setter "next" local getprev = nodedirect.getprev or field_getter "prev" local setprev = nodedirect.setprev or field_setter "prev" +--- since r5336 +local getboth = nodedirect.getboth or function (n) + return getprev (n), getnext (n) +end + +local setlink = nodedirect.setlink or function (a, b) + setnext (a, b) + setprev (b, a) +end + local getdisc = nodedirect.getdisc or field_getter "disc" local setdisc = nodedirect.setdisc or field_setter "disc" @@ -325,27 +335,26 @@ kerncharacters = function (head) if c then if keepligature and keepligature(start) then -- keep 'm + c = nil else - --- c = kerncharacters (c) --> taken care of after replacing - local s = start - local p = getprev(s) - local n = getnext(s) - local tail = find_node_tail(c) - if p then - setnext(p, c) - p = getprev(c) - else - head = c - end - if n then - tail = getprev(n) + while c do + local s = start + local p, n = getboth (s) + if p then + setlink (p, c) + else + head = c + end + if n then + local tail = find_node_tail(c) + setlink (tail, n) + end + start = c + setfield(s, "components", nil) + free_node(s) + done = true + c = getfield (start, "components") end - setnext(tail, n) - start = c - setfield(s, "components", nil) - -- we now leak nodes ! - -- free_node(s) - done = true end end -- kern ligature diff --git a/Master/texmf-dist/tex/luatex/luaotfload/luaotfload-status.lua b/Master/texmf-dist/tex/luatex/luaotfload/luaotfload-status.lua index d2b513049f1..3645b6aa356 100644 --- a/Master/texmf-dist/tex/luatex/luaotfload/luaotfload-status.lua +++ b/Master/texmf-dist/tex/luatex/luaotfload/luaotfload-status.lua @@ -1,6 +1,6 @@ return { ["hashes"]={ - { "fontloader-2015-12-23.lua", "0092f8be42a96eff9a6fdf31ef3151d5" }, + { "fontloader-2016-01-22.lua", "93c4e862f0343aa66f6a72b9ea441a94" }, { "fontloader-util-str.lua", "cc6f38cc53aff51d60e2223bf922bb0d" }, { "fontloader-swiglib-test.lua", "c1cdeff07e5b8896f7aa98ce50c31618" }, { "fontloader-swiglib.lua", "3a350d89416637073b7f09e281dc7c11" }, @@ -11,7 +11,7 @@ return { { "fontloader-l-string.lua", "64d08350c5083058de7ec3d25ae05047" }, { "fontloader-l-math.lua", "51275e81c652e3fbd7d02b1c383d3049" }, { "fontloader-l-lua.lua", "ee479f6900b8c4d0fde7a8f536a1ce26" }, - { "fontloader-l-lpeg.lua", "1d7a1cf9d3ee07e758f611ee41d39c41" }, + { "fontloader-l-lpeg.lua", "93b90b22d7d3600ddd9def9f3ad9a7bb" }, { "fontloader-l-io.lua", "a3292f4dad2705c4eeb1d91c40bd0fde" }, { "fontloader-l-function.lua", "a7e68a9703c35238729da41a474e951b" }, { "fontloader-l-file.lua", "bb25347eee3208dbb419e80eb2809e99" }, @@ -20,7 +20,7 @@ return { { "fontloader-font-tfm.lua", "bbee5eddb11211fb0a8d993db678bf3c" }, { "fontloader-fonts-tfm.lua", "8fd3865240e4e87e99e0739abeda2322" }, { "fontloader-fonts-syn.lua", "9729d0e49b770f78e88dab86739e0297" }, - { "fontloader-font-otn.lua", "e8a1f33bbba52eda6aec4e6d6a9662dc" }, + { "fontloader-font-otn.lua", "7257a29e388f544a0f5deb612f6926dc" }, { "fontloader-fonts-ota.lua", "67a1fc4c6508526fa54041d22bcb6eab" }, { "fontloader-fonts-lua.lua", "50b8edb1db7009b6c661ab71ff24a466" }, { "fontloader-fonts.lua", "f3a705aade933e2171c8128cead7f4ad" }, @@ -32,7 +32,7 @@ return { { "fontloader-fonts-cbk.lua", "3e86c6a492ca8d792f6b06149ba0dd57" }, { "fontloader-font-otp.lua", "18b4375155925ee1809150f4f6c3973b" }, { "fontloader-font-oti.lua", "b6d493035cec2d748f2f9ec510c860ef" }, - { "fontloader-font-otf.lua", "b14a46a7fec8b5cb4c909f4c4c299453" }, + { "fontloader-font-otf.lua", "184167365757f696e3457f276e7f480f" }, { "fontloader-font-otb.lua", "93461f2f412a9b33b35a273c09b64291" }, { "fontloader-font-map.lua", "a20a454f933095d78faf1d5f8200d025" }, { "fontloader-font-ini.lua", "179f0a75cda26696c1b1cd6d7fe0d8ae" }, @@ -42,9 +42,9 @@ return { { "fontloader-font-afm.lua", "ece4863414d6b38c2e577110c9b55bd3" }, { "fontloader-font-afk.lua", "b36a76ceb835f41f8c05b471000ddc14" }, { "fontloader-data-con.lua", "675f5a0af45ffb3e0d2e2ab5d6c2e47b" }, - { "fontloader-basics-nod.lua", "af682899d202229c0c1b859a07384b8c" }, + { "fontloader-basics-nod.lua", "50d00dd271a4af9b00cccf6ca433827a" }, { "fontloader-tl2014.lua", "5bce3c40d6841694d6be5ad3680d5b10" }, - { "fontloader-reference.lua", "76f0ecc5232ab340103668f4c103e69a" }, + { "fontloader-reference.lua", "b33b4821125ab9613d4a32e4357b6d0b" }, { "fontloader-basics-gen.lua", "aa7229a3a0ae35d19bbe46e71d65bd2c" }, { "mktests", "918cb50be9ee8bd645ac1a27dc501e8c" }, { "mkstatus", "97dbb3537f8e65922d1e1606c4a031e5" }, @@ -59,20 +59,20 @@ return { { "luaotfload-main.lua", "fc8da63b610c808478a2a4a096c0fa9e" }, { "luaotfload-log.lua", "767adc12efe986322d16dd85f05b3e75" }, { "luaotfload-loaders.lua", "40e9e162b84e22db29cd79afb8a34761" }, - { "luaotfload-letterspace.lua", "b01dd0cd37deb2dbad97adbf738e9dd2" }, - { "luaotfload-init.lua", "e31c728f5e86c858909c491b39ba0fbd" }, - { "luaotfload-features.lua", "7c6bbba82e2d91ea63b5cf9ae41af3f5" }, + { "luaotfload-letterspace.lua", "83cad4a8a0340ebda84b2a9db6f5324d" }, + { "luaotfload-init.lua", "6ea8a870df5e5cab1e077b548d1e0d71" }, + { "luaotfload-features.lua", "727c96d713d37ca9baaeab0110dc4b9b" }, { "luaotfload-diagnostics.lua", "7b35c9f91e3e73fc5a61dbfe1f0e7ad9" }, { "luaotfload-database.lua", "ffb5c6497be2c518eb28fcefaa3175c1" }, - { "luaotfload-configuration.lua", "04eb776c853a22fc49dcf0e52da85dda" }, + { "luaotfload-configuration.lua", "9ec269fd5b32e14d7bedc90c9becb1a9" }, { "luaotfload-colors.lua", "b68a52ce0b046343cb689d981d0c5a7a" }, { "luaotfload-auxiliary.lua", "b63913f1027fdd60b89afc90c2ca9713" }, }, ["notes"]={ ["committer"]="Philipp Gesang <phg@phi-gamma.net>", - ["description"]="v2.6-fix-4", - ["loader"]="fontloader-2015-12-23.lua", - ["revision"]="9320186751b6c6a1113f41331716d3b68d73af7a", - ["timestamp"]="2015-12-23 22:41:05 +0100", + ["description"]="v2.6-fix-5", + ["loader"]="fontloader-2016-01-22.lua", + ["revision"]="55799dad5b8a2716aea1cfee28b4ce093e587f8a", + ["timestamp"]="2016-01-22 00:48:25 +0100", }, }
\ No newline at end of file |