diff options
Diffstat (limited to 'Master/texmf-dist/tex/context/base/font-map.lua')
-rw-r--r-- | Master/texmf-dist/tex/context/base/font-map.lua | 89 |
1 files changed, 51 insertions, 38 deletions
diff --git a/Master/texmf-dist/tex/context/base/font-map.lua b/Master/texmf-dist/tex/context/base/font-map.lua index b3c8da0f6c7..f74e13e8161 100644 --- a/Master/texmf-dist/tex/context/base/font-map.lua +++ b/Master/texmf-dist/tex/context/base/font-map.lua @@ -66,21 +66,6 @@ local function makenameparser(str) end end --- local parser = makenameparser("Japan1") --- local parser = makenameparser() --- local function test(str) --- local b, a = lpegmatch(parser,str) --- print((a and table.serialize(b)) or b) --- end --- test("a.sc") --- test("a") --- test("uni1234") --- test("uni1234.xx") --- test("uni12349876") --- test("u123400987600") --- test("index1234") --- test("Japan1.123") - local function tounicode16(unicode,name) if unicode < 0x10000 then return format("%04X",unicode) @@ -261,38 +246,51 @@ function mappings.addtounicode(data,filename) end end -- a.whatever or a_b_c.whatever or a_b_c (no numbers) a.b_ + -- + -- It is not trivial to find a solution that suits all fonts. We tried several alternatives + -- and this one seems to work reasonable also with fonts that use less standardized naming + -- schemes. The extra private test is tested by KE and seems to work okay with non-typical + -- fonts as well. + -- + -- The next time I look into this, I'll add an extra analysis step to the otf loader (we can + -- resolve some tounicodes by looking into the gsub data tables that are bound to glyphs. + -- if not unicode or unicode == "" then local split = lpegmatch(namesplitter,name) local nsplit = split and #split or 0 - if nsplit >= 2 then - local t, n = { }, 0 - for l=1,nsplit do - local base = split[l] - local u = unicodes[base] or unicodevector[base] - if not u then + local t, n = { }, 0 + unicode = true + for l=1,nsplit do + local base = split[l] + local u = unicodes[base] or unicodevector[base] + if not u then + break + elseif type(u) == "table" then + if u[1] >= private then + unicode = false break - elseif type(u) == "table" then - n = n + 1 - t[n] = u[1] - else - n = n + 1 - t[n] = u end - end - if n == 0 then -- done then - -- nothing - elseif n == 1 then - originals[index] = t[1] - tounicode[index] = tounicode16(t[1],name) + n = n + 1 + t[n] = u[1] else - originals[index] = t - tounicode[index] = tounicode16sequence(t) + if u >= private then + unicode = false + break + end + n = n + 1 + t[n] = u end - nl = nl + 1 - unicode = true + end + if n == 0 then -- done then + -- nothing + elseif n == 1 then + originals[index] = t[1] + tounicode[index] = tounicode16(t[1],name) else - -- skip: already checked and we don't want privates here + originals[index] = t + tounicode[index] = tounicode16sequence(t) end + nl = nl + 1 end -- last resort (we might need to catch private here as well) if not unicode or unicode == "" then @@ -333,3 +331,18 @@ function mappings.addtounicode(data,filename) report_fonts("%s tounicode entries added, ligatures %s",nl+ns,ns) end end + +-- local parser = makenameparser("Japan1") +-- local parser = makenameparser() +-- local function test(str) +-- local b, a = lpegmatch(parser,str) +-- print((a and table.serialize(b)) or b) +-- end +-- test("a.sc") +-- test("a") +-- test("uni1234") +-- test("uni1234.xx") +-- test("uni12349876") +-- test("u123400987600") +-- test("index1234") +-- test("Japan1.123") |