diff options
Diffstat (limited to 'Master/texmf-dist/doc/latex/unicode-math-input/unicode-math-input-script.py')
-rw-r--r-- | Master/texmf-dist/doc/latex/unicode-math-input/unicode-math-input-script.py | 311 |
1 files changed, 191 insertions, 120 deletions
diff --git a/Master/texmf-dist/doc/latex/unicode-math-input/unicode-math-input-script.py b/Master/texmf-dist/doc/latex/unicode-math-input/unicode-math-input-script.py index a3dbbb46254..34fd90ff5b9 100644 --- a/Master/texmf-dist/doc/latex/unicode-math-input/unicode-math-input-script.py +++ b/Master/texmf-dist/doc/latex/unicode-math-input/unicode-math-input-script.py @@ -1,6 +1,46 @@ #!/bin/python3 -# This file is not used while TeX is running. It's for generating unicode-math-input-table.tex file only. -# This requires pythonimmediate (not sure which version is compatible but commit 63f94476a5cb11e33db1215a9bf7c17657d9773d on Python 3.10.10 is) +""" +This file is not used while TeX is running. It's for generating unicode-math-input-table.tex file only. +This requires pythonimmediate (not sure which version is compatible but +commit 020068db8a966c138b5b0b93695c0fefdef03d0a on Python 3.11.3 is) + +To generate: run:: + python3 unicode-math-input-script.py > unicode-math-input-table.tex + +How does it work? + +The mapping is determined from multiple sources: + +* The unicode-math package itself, which defines a "command → Unicode character" mapping. + This does not always work because different TeX packages may name the command differently. + +* Synonym table, obtained by looking at STIX's command definition + +* TeX's glyph → unicode mapping (used to facilitate copy-paste in PDF), + e.g. /usr/share/texmf-dist/tex/generic/pdftex/glyphtounicode.tex + This should be good, but is currently not used. Furthermore, not all TeX commands are implemented by + getting a single character from a font... + +How does the Unicode mapping work? + +First there's the `pdftex.map` file, then there's umsa.cmap for msam10.tfm/afm/pfm/pfb/mf (metafont source file) + +/usr/share/texmf-dist/fonts/source/public/amsfonts/symbols/msam10.mf + /usr/share/texmf-dist/fonts/source/public/amsfonts/symbols/asymbols.mf + +/usr/share/texmf-dist/fonts/afm/public/amsfonts/symbols/msam10.afm +→ plaintext-looking file may work + +/usr/share/texmf-dist/fonts/tfm/public/amsfonts/symbols/msam10.tfm +/usr/share/texmf-dist/fonts/type1/public/amsfonts/symbols/msam10.pfm +/usr/share/texmf-dist/fonts/type1/public/amsfonts/symbols/msam10.pfb + +The glyphtounicode.tex may be a bit problematic... + https://tex.stackexchange.com/questions/66300/how-to-fix-missing-or-incorrect-mappings-from-glyphtounicode-tex + +See also: section 3.2 How to find a table of correspondences? in https://tex.stackexchange.com/a/628285/250119 + +""" from __future__ import annotations @@ -8,7 +48,8 @@ from pythonimmediate.engine import ChildProcessEngine from pythonimmediate.engine import default_engine from pythonimmediate import* import pythonimmediate -from collections import defaultdict +from collections import defaultdict, Counter +from itertools import groupby import os import json import subprocess @@ -25,11 +66,8 @@ print(r'% This file is automatically generated from unicode-math-input-script.py # ======== start a luatex engine -engine=ChildProcessEngine("luatex", env={**os.environ, "hash_extra": "0"}) # https://tex.stackexchange.com/questions/574607/tex-hashtokens-incomplete -default_engine.set_engine(engine) - -Catcode.active("a").meaning_str() +default_engine.set_engine(ChildProcessEngine("luatex", env={**os.environ, "hash_extra": "0"})) """ @@ -93,10 +131,108 @@ for line in lines: assert match unicode_char=chr(int(match[1], 16)) csname=match[2] - #unicode_math_table_.append(Item(unicode_char=unicode_char, csname=csname)) + #unicode_math_table_.append(Item(unicode_char=unicode_char, csname)) unicode_math_table_[unicode_char].append(csname) unicode_math_table={unicode_char: tuple(csnames) for unicode_char, csnames in unicode_math_table_.items()} +# ======== extract unicode-math synonyms + +def control_sequences()->list[str]: + return (lua_try_eval(r""" + do + local s={} + for k, v in pairs(tex.hashtokens()) do + if v:find("^[A-Za-z]+$") then + s[v]=0 + end + end + local t={} + for v, _ in pairs(s) do table.insert(t, v) end + return table.concat(t, "\x00") + end + """) or "").split("\x00") + +extra_synonyms_list: list[list[str]] = [ + ["adots", "iddots"], + ["unicodecdots", "cdots"], # https://github.com/wspr/unicode-math/issues/571 + ["unicodeellipsis", "ldots"], + #["llbracket", "lBrack"], + #["rrbracket", "rBrack"], + ] + + + + +c=control_sequences() +m={x: T[x].meaning_str() for x in c} + +pattern=re.compile(r'\\protected macro:->\\([A-Za-z]+) ?') + +extra_synonyms_list += [[c, match[1]] for c, m in m.items() + if (match:=pattern.fullmatch(m)) + ] + +def same_meaning_control_sequences(meaning: dict[str, str])->list[list[str]]: + return [ + l + for m, l0 in groupby(sorted(c, key=lambda x: meaning[x]), lambda x: meaning[x]) + if m!="undefined" + for l in [[*l0]] + if len(l)>=2 + ] + +extra_synonyms_list += same_meaning_control_sequences(m) + +# ======== extract amsmath&stix synonyms + + +m_values=[] +for preamble in [ +r""" +\documentclass{article} +\usepackage{amsmath} +\usepackage{amssymb} +\usepackage{amsfonts} +\begin{document} +""", +r""" +\documentclass{article} +\usepackage{stix} +\begin{document} +""" +]: + with ChildProcessEngine("luatex", env={**os.environ, "hash_extra": "0"}) as e, default_engine.set_engine(e): + execute(preamble) + c=control_sequences() + m={x: T[x].meaning_str() for x in c} + extra_synonyms_list += same_meaning_control_sequences(m) + m_values.append(m) +[amsmath_meaning, stix_meaning]=m_values +# ======== build extra_synonyms table + +while True: + tmp=Counter([x for l in extra_synonyms_list for x in l]) + [(item, frequency)]=tmp.most_common(1) + if frequency==1: break + assert frequency>1 + extra_synonyms_list=[ + # the group that contain item + [*{x for l in extra_synonyms_list if item in l for x in l}] + ] + [ + # remaining groups + l for l in extra_synonyms_list if item not in l] + + +extra_synonyms_list=sorted([sorted(l) for l in {frozenset( + item for item in l + if item not in ("dotsc", "dotsm", "dotsb", "dots") # some simple filtering -- we will just use \cdots and \ldots + ) for l in extra_synonyms_list} if len(l)>1]) # deduplicate + +tmp=Counter(sum(extra_synonyms_list, [])) +assert tmp.most_common()[0][1]==1, tmp + +extra_synonyms = {v: u for u in extra_synonyms_list for v in u} + # ======== check how much of the table is valid on unicode-math/luatex def getdelcode(x: str)->tuple[int, int, int, int]: @@ -289,11 +425,11 @@ math_alphabet_redundant_greek = { r"\Zeta" : "Z", } -extra_synonyms = {v: u for u in - [ - ["adots", "iddots"] - ] - for v in u} + +ASCII_symbol_synonym = { + "minus": "-", + "mid": "|", + } ## @@ -306,18 +442,27 @@ for i in range(ord("!"), ord("~")+1): if fullch in remaining_chars: remaining_chars.remove(fullch) print(r'\__umi_define_char{' + fullch + r'}{\char'+str(i)+' }') -defined_csnames = {x for l in unicode_math_table.values() for x in l} +defined_csnames = {x for l in unicode_math_table.values() for x in l} | {*stix_meaning} | {*amsmath_meaning} pdf_engine=ChildProcessEngine("pdftex") -execute(r""" +with default_engine.set_engine(pdf_engine): execute(r""" \documentclass{article} \usepackage{amsmath} \usepackage{amssymb} \usepackage{amsfonts} \usepackage{mathrsfs} \begin{document} -""", engine=pdf_engine) +""") + +def remove_not(a: str)->Optional[str]: + global defined_csnames + if a in (r"\ni", r"\nu"): return None + if a.startswith(r"\not") and a.removeprefix(r"\not") in defined_csnames: + return '\\' + a.removeprefix(r"\not") + elif a.startswith(r"\n") and a.removeprefix(r"\n") in defined_csnames: + return '\\' + a.removeprefix(r"\n") + else: return None for unicode_char, csnames_ in unicode_math_table.items(): csnames = [*csnames_] @@ -374,16 +519,18 @@ for unicode_char, csnames_ in unicode_math_table.items(): for csname in [*csnames]: if csname in extra_synonyms: csnames+=extra_synonyms[csname] - csnames=[*set(csnames)] + csnames=[*{csname: None for csname in csnames}] items1=[] for csname in csnames: if not is_delimiter: - assert "delimiter" not in T[csname].meaning_str(engine=pdf_engine), (unicode_char, csname) + with default_engine.set_engine(pdf_engine): + assert "delimiter" not in T[csname].meaning_str(), (unicode_char, csname) # that is the symbol is not a delimiter in pdf_engine either (check is not particularly reliable but okay) for prefix, replacement in math_alphabet_translate.items(): if csname.startswith(prefix): + assert csname not in ASCII_symbol_synonym cs = math_alphabet_csname_translation[csname.removeprefix(prefix)] def wrap_in_alphabet_selector(cs: str)->str: if replacement is None: return cs @@ -398,30 +545,39 @@ for unicode_char, csnames_ in unicode_math_table.items(): break else: items1.append("\\" + csname) + if csname in ASCII_symbol_synonym: items1+=ASCII_symbol_synonym[csname] + + assert items1 + if is_delimiter and len(items1)>1: + print("Warning: Synonym for delimiter not supported?", unicode_char, delimiter, items1, file=sys.stderr) + del items1[1:] if len(items1)==1: a = items1[0] - if a.startswith(r"\not") and a.removeprefix(r"\not") in defined_csnames: - assert not is_delimiter - b='\\' + a.removeprefix(r"\not") - print(f"\\__umi_define_char{{{optional_space}{unicode_char}}}{{\__umi_alternatives_not{a}{b}}}") - a.removeprefix(r"\not") - elif a.startswith(r"\n") and a.removeprefix(r"\n") in defined_csnames: + b = remove_not(a) + if b is not None: assert not is_delimiter - b='\\' + a.removeprefix(r"\n") print(f"\\__umi_define_char{{{optional_space}{unicode_char}}}{{\__umi_alternatives_not{a}{b}}}") - a.removeprefix(r"\n") else: if is_delimiter: print(f"\\__umi_define_char_maybe_delimiter{{{optional_space}{unicode_char}}}{{{a}}}") else: print(f"\\__umi_define_char{{{optional_space}{unicode_char}}}{{{a}}}") - else: - assert not is_delimiter, (unicode_char, delimiter) - assert len(items1)==2, items1 + elif len(items1)==2: assert re.fullmatch(r'\\[a-zA-Z]+', items1[0]), items1 - assert re.fullmatch(r'\\[a-zA-Z]+', items1[1]), items1 - print(f"\\__umi_define_char{{{optional_space}{unicode_char}}}{{\\__umi_alternatives{items1[0]}{items1[1]}}}") + assert re.fullmatch(r'\\[a-zA-Z]+|[^a-zA-Z]', items1[1]), items1 + b=remove_not(items1[0]) + if b is not None: + d=remove_not(items1[1]) + assert d is not None, items1 + print(f"\\__umi_define_char{{{optional_space}{unicode_char}}}{{\\__umi_alternatives_not_two{items1[0]}{items1[1]}{b}{d}}}") + else: + print(f"\\__umi_define_char{{{optional_space}{unicode_char}}}{{\\__umi_alternatives{items1[0]}{items1[1]}}}") + else: + assert len(items1)>=3, items1 + assert all(remove_not(x) is None for x in items1), items1 + assert all(re.fullmatch(r'\\[a-zA-Z]+', c) for c in items1), items1 + print(f"\\__umi_define_char{{{optional_space}{unicode_char}}}{{\\__umi_alternatives_m{{{''.join(items1)}}}}}") ## @@ -431,94 +587,12 @@ sys.exit() # ======== part below are draft. -T.longdivisionsign.meaning_str() - - - -T.mathexclam.meaning_str() - -T.symoperators.meaning_str() - -T.perp.meaning_str() - -umathcode[" ̅"[1]] - -BalancedTokenList(r'\the\Udelcode `̅').expand_o().int() - -x = BalancedTokenList(r'\the\Udelcode `!').expand_o().int() -print(hex(x)) - -hex(BalancedTokenList(r'\the\delcode `!').expand_o().int()) - - - - - -if 0: - - data = TokenList([r"\directlua", TokenList.fstr( - r""" - for k, v in pairs(tex.hashtokens()) do - tex.print(-2, v .. "\0") - end - """ - )]).expand_x().str() - control_sequences = data.split("\x00") - assert control_sequences[-1]=="" - del control_sequences[-1] - - -Path("/tmp/control_sequences.json").write_text(json.dumps(control_sequences)) # type: ignore -control_sequences = json.loads(Path("/tmp/control_sequences.json").read_text()) # type: ignore - - -Path("/tmp/control_sequences_unicode_math.json").write_text(json.dumps(control_sequences)) -control_sequences = json.loads(Path("/tmp/control_sequences_unicode_math.json").read_text()) -assert "mitrho" in control_sequences - -if 0: - # try some other random things - - control_sequences = data.split("\x00") - assert control_sequences[-1]=="" - del control_sequences[-1] - - - - BalancedTokenList(r'\the\Umathcode `′').expand_o().int() == 0x1000000 - - -control_sequences - -BalancedTokenList(r'\the\mathcode`⨁').expand_o().int() - -Catcode.active("⨁").meaning_str(engine=engine) - -Catcode.active("′").meaning_str(engine=engine) - -T.bigoplus_sym.meaning_str() - -T.bigoplusop.meaning_str() - -T.bigoplus.meaning_str() - - - -T.rho.meaning_str() - -T.mitrho.meaning_str() - -T.bigoplus.meaning_str() - -engine._stdout_lines[-100:] + [bytes(engine._stdout_buffer)] - - - -T.mscrA.meaning_str() - - +default_engine.set_engine(ChildProcessEngine("luatex", env={**os.environ, "hash_extra": "0"}, autorestart=True)) +execute(r'\documentclass{article}\usepackage{unicode-math}\begin{document}') +execute(r'\documentclass{article}\usepackage{amsmath,amssymb,amsfonts}\begin{document}') +execute(r'\documentclass{article}\usepackage{amsmath}\usepackage{amssymb}\usepackage{amsfonts}\usepackage{tikz}') @functools.lru_cache(maxsize=None) @@ -586,6 +660,3 @@ BalancedTokenList(r"\def\aa{bb}").execute(engine=test_engine) BalancedTokenList(r"\csname\noexpand\aa\endcsname").expand_o(engine=test_engine) # give error BalancedTokenList(r"\csname\string\aa\endcsname").expand_o(engine=test_engine) # \[\aa] as expected -T.iddots.meaning_str(engine=engine) - -T.adots.meaning_str(engine=engine) |