summaryrefslogtreecommitdiff
path: root/support/bibtexperllibs/LaTeX-ToUnicode/lib/LaTeX
diff options
context:
space:
mode:
authorNorbert Preining <norbert@preining.info>2023-08-23 03:01:02 +0000
committerNorbert Preining <norbert@preining.info>2023-08-23 03:01:02 +0000
commit76d75859475de5b0b64017c9dfc11644a2ece537 (patch)
treee2f334df5d811d8fe54215e0591eaf227de58fe0 /support/bibtexperllibs/LaTeX-ToUnicode/lib/LaTeX
parentcf3e954514eb50bea0335bdc97d6b76f80f5d03e (diff)
CTAN sync 202308230300
Diffstat (limited to 'support/bibtexperllibs/LaTeX-ToUnicode/lib/LaTeX')
-rw-r--r--support/bibtexperllibs/LaTeX-ToUnicode/lib/LaTeX/ToUnicode.pm708
-rw-r--r--support/bibtexperllibs/LaTeX-ToUnicode/lib/LaTeX/ToUnicode/Tables.pm1055
2 files changed, 1245 insertions, 518 deletions
diff --git a/support/bibtexperllibs/LaTeX-ToUnicode/lib/LaTeX/ToUnicode.pm b/support/bibtexperllibs/LaTeX-ToUnicode/lib/LaTeX/ToUnicode.pm
index 50ae29399a..173db38b02 100644
--- a/support/bibtexperllibs/LaTeX-ToUnicode/lib/LaTeX/ToUnicode.pm
+++ b/support/bibtexperllibs/LaTeX-ToUnicode/lib/LaTeX/ToUnicode.pm
@@ -2,52 +2,357 @@ use strict;
use warnings;
package LaTeX::ToUnicode;
BEGIN {
- $LaTeX::ToUnicode::VERSION = '0.11';
+ $LaTeX::ToUnicode::VERSION = '0.53';
}
#ABSTRACT: Convert LaTeX commands to Unicode (simplistically)
-
require Exporter;
our @ISA = qw(Exporter);
-our @EXPORT_OK = qw( convert );
+our @EXPORT_OK = qw( convert debuglevel $endcw );
use utf8;
+use Encode;
use LaTeX::ToUnicode::Tables;
+# Terminating a control word (not symbol) the way TeX does: at the
+# boundary between a letter (lookbehind) and a nonletter (lookahead),
+# and then ignore any following whitespace.
+our $endcw = qr/(?<=[a-zA-Z])(?=[^a-zA-Z]|$)\s*/;
+
+# all we need for is debugging being on and off. And it's pretty random
+# what gets output.
+my $debug = 0;
+sub debuglevel { $debug = shift; }
+sub _debug {
+ return unless $debug;
+ my ($pkgname,$filename,$line,$subr) = caller(1);
+ warn @_, " at $filename:$line (${pkgname}::$subr)\n";
+}
+# The main conversion function.
+#
sub convert {
- my ( $string, %options ) = @_;
- $string = _convert_commands( $string );
- $string = _convert_accents( $string );
- $string = _convert_german( $string ) if $options{german};
- $string = _convert_symbols( $string );
- $string = _convert_specials( $string );
- $string = _convert_ligatures( $string );
- $string = _convert_markups( $string );
- $string =~ s/{(\w*)}/$1/g;
+ my ($string, %options) = @_;
+ #warn debug_hash_as_string("starting with: $string", %options);
+
+ # First, remove leading and trailing horizontal whitespace
+ # on each line of the possibly-multiline string we're given.
+ $string =~ s/^[ \t]*//m;
+ $string =~ s/[ \t]*$//m;
+
+ # For HTML output, must convert special characters that were in the
+ # TeX text (&<>) to their entities to avoid misparsing. We want to
+ # do this first, because conversion of the markup commands might
+ # output HTML tags like <tt>, and we don't want to convert those <>.
+ # Although &lt;tt&gt; works, better to keep the output HTML as
+ # human-readable as we can.
+ #
+ if ($options{html}) {
+ $string =~ s/([^\\]|^)&/$1&amp;/g;
+ $string =~ s/</&lt;/g;
+ $string =~ s/>/&gt;/g;
+ }
+
+ my $user_hook = $options{hook};
+ if ($user_hook) {
+ $string = &$user_hook($string, \%options);
+ _debug("after user hook: $string");
+ }
+
+ # Convert general commands that take arguments, since (1) they might
+ # insert TeX commands that need to be converted, and (2) because
+ # their arguments could well contain constructs that will map to a
+ # Perl string \x{nnnn} for Unicode character nnnn; those Perl braces
+ # for the \x will confuse further parsing of the TeX.
+ #
+ $string = _convert_commands_with_arg($string);
+ _debug("after commands with arg: $string");
+
+ # Convert markups (\texttt, etc.); they have the same brace-parsing issue.
+ $string = _convert_markups($string, \%options);
+ _debug("after markups: $string");
+
+ # And urls, a special case of commands with arguments.
+ $string = _convert_urls($string, \%options);
+ _debug("after urls: $string");
+
+ $string = _convert_control_words($string);
+ _debug("after control words: $string");
+
+ $string = _convert_control_symbols($string);
+ _debug("after control symbols: $string");
+
+ $string = _convert_accents($string);
+ $string = _convert_german($string) if $options{german};
+ $string = _convert_symbols($string);
+ $string = _convert_ligatures($string);
+
+ # Let's handle ties here, after all the other conversions, since
+ # they don't fit well with any of the tables.
+ #
+ # /~, or ~ at the beginning of a line, is probably part of a url or
+ # path, not a tie. Otherwise, consider it a space, since a no-break
+ # spot in TeX is most likely fine to break in text or HTML.
+ #
+ $string =~ s,([^/])~,$1 ,g;
+
+ # Remove kerns. Clearly needs generalizing/sharpening to recognize
+ # dimens better, and plenty of other commands could use it.
+ #_debug("before kern: $string");
+ my $dimen_re = qr/[-+]?[0-9., ]+[a-z][a-z]\s*/;
+ $string =~ s!\\kern${endcw}${dimen_re}!!g;
+
+ # What the heck, let's do \hfuzz and \vfuzz too. They come up pretty
+ # often and practically the same thing (plus ignore optional =)..
+ $string =~ s!\\[hv]fuzz${endcw}=?\s*${dimen_re}!!g;
+
+ # After all the conversions, $string contains \x{....} constructs
+ # (Perl Unicode characters) where translations have happened. Change
+ # those to the desired output format. Thus we assume that the
+ # Unicode \x{....}'s are not themselves involved in further
+ # translations, which is, so far, true.
+ #
+ if (! $options{entities}) {
+ # Convert our \x strings from Tables.pm to the binary characters.
+ # Assume no more than four hex digits.
+ $string =~ s/\\x\{(.{1,4})\}/ pack('U*', hex($1))/eg;
+
+ } elsif ($options{entities}) {
+ # Convert the XML special characters that appeared in the input,
+ # e.g., from a TeX \&. Unless we're generating HTML output, in
+ # which case they have already been converted.
+ if (! $options{html}) {
+ $string =~ s/&/&amp;/g;
+ $string =~ s/</&lt;/g;
+ $string =~ s/>/&gt;/g;
+ }
+
+ # Our values in Tables.pm are simple ASCII strings \x{....},
+ # so we can replace them with hex entities with no trouble.
+ # Fortunately TeX does not have a standard \x control sequence.
+ $string =~ s/\\x\{(....)\}/&#x$1;/g;
+
+ # The rest of the job is about binary Unicode characters in the
+ # input. We want to transform them into entities also. As always
+ # in Perl, there's more than one way to do it, and several are
+ # described here, just for the fun of it.
+ my $ret = "";
+ #
+ # decode_utf8 is described in https://perldoc.perl.org/Encode.
+ # Without the decode_utf8, all of these methods output each byte
+ # separately; apparently $string is a byte string at this point,
+ # not a Unicode string. I don't know why that is.
+ $ret = decode_utf8($string);
+ #
+ # Transform everything that's not printable ASCII or newline into
+ # entities.
+ $ret =~ s/([^ -~\n])/ sprintf("&#x%04x;", ord($1)) /eg;
+ #
+ # This method leaves control characters as literal; doesn't matter
+ # for XML output, since control characters aren't allowed, but
+ # let's use the regexp method anyway.
+ #$ret = encode("ascii", decode_utf8($string), Encode::FB_XMLCREF);
+ #
+ # The nice_string function from perluniintro also works.
+ #
+ # This fails, just outputs numbers (that is, ord values):
+ # foreach my $c (unpack("U*", $ret)) {
+ #
+ # Without the decode_utf8, outputs each byte separately.
+ # With the decode_utf8, works, but the above seems cleaner.
+ #foreach my $c (split(//, $ret)) {
+ # if (ord($c) <= 31 || ord($c) >= 128) {
+ # $ret .= sprintf("&#x%04x;", ord($c));
+ # } else {
+ # $ret .= $c;
+ # }
+ #}
+ #
+ $string = $ret; # assigned from above.
+ }
+
+ if ($string =~ /\\x\{/) {
+ warn "LaTeX::ToUnicode::convert: untranslated \\x remains: $string\n";
+ warn "LaTeX::ToUnicode::convert: please report as bug.\n";
+ }
+
+ # Drop all braces.
+ $string =~ s/[{}]//g;
+
+ # Backslashes might remain. Don't remove them, as it makes for a
+ # useful way to find unhandled commands.
+
+ # leave newlines alone, but trim spaces and tabs.
+ $string =~ s/^[ \t]+//s; # remove leading whitespace
+ $string =~ s/[ \t]+$//s; # remove trailing whitespace
+ $string =~ s/[ \t]+/ /gs; # collapse all remaining whitespace to one space
+
$string;
}
-sub _convert_commands {
+# Convert commands that take a single braced argument. The table
+# defines text we're supposed to insert before and after the argument.
+# We let future processing handle conversion of both the inserted text
+# and the argument.
+#
+sub _convert_commands_with_arg {
my $string = shift;
- foreach my $command ( keys %LaTeX::ToUnicode::Tables::COMMANDS ) {
- $string =~ s/\{\\$command\}/$LaTeX::ToUnicode::Tables::COMMANDS{$command}/g;
- $string =~ s/\\$command(?=\s|\b)/$LaTeX::ToUnicode::Tables::COMMANDS{$command}/g;
+ foreach my $cmd ( keys %LaTeX::ToUnicode::Tables::ARGUMENT_COMMANDS ) {
+ my $repl = $LaTeX::ToUnicode::Tables::ARGUMENT_COMMANDS{$cmd};
+ my $lft = $repl->[0]; # ref to two-element list
+ my $rht = $repl->[1];
+ # \cmd{foo} -> LFT foo RHT
+ $string =~ s/\\$cmd${endcw}\{(.*?)\}/$lft$1$rht/g;
+ #warn "replaced arg $cmd, yielding $string\n";
+ }
+
+ $string;
+}
+
+# Convert url commands in STRING. This is a special case of commands
+# with arguments: \url{u} and \href{u}{desc text}. The HTML output
+# (generated if $OPTIONS{html} is set) is just too special to be handled
+# in a table; further, \href is the only two-argument command we are
+# currently handling.
+#
+sub _convert_urls {
+ my ($string,$options) = @_;
+
+ if ($options->{html}) {
+ # HTML output.
+ # \url{URL} -> <a href="URL">URL</a>
+ $string =~ s,\\url$endcw\{([^}]*)\}
+ ,<a href="$1">$1</a>,gx;
+ #
+ # \href{URL}{TEXT} -> <a href="URL">TEXT</a>
+ $string =~ s,\\href$endcw\{([^}]*)\}\s*\{([^}]*)\}
+ ,<a href="$1">$2</a>,gx;
+
+ } else {
+ # plain text output.
+ # \url{URL} -> URL
+ $string =~ s/\\url$endcw\{([^}]*)\}/$1/g;
+ #
+ # \href{URL}{TEXT} -> TEXT (URL)
+ # but, as a special case, if URL ends with TEXT, just output URL,
+ # as in:
+ # \href{https://doi.org/10/fjzzc8}{10/fjzzc8}
+ # ->
+ # https://doi.org/10/fjzzc8
+ #
+ # Yet more specialness: the TEXT might have extra braces, as in
+ # \href{https://doi.org/10/fjzzc8}{{10/fjzzc8}}
+ # left over from previous markup commands (\path) which got
+ # removed. We want to accept and ignore such extra braces,
+ # hence the \{+ ... \}+ in recognizing TEXT.
+ #
+#warn "txt url: starting with $string\n";
+ if ($string =~ m/\\href$endcw\{([^}]*)\}\s*\{+([^}]*)\}+/) {
+ my $url = $1;
+ my $text = $2;
+#warn " url: $url\n";
+#warn " text: $text\n";
+ my $repl = ($url =~ m!$text$!) ? $url : "$text ($url)";
+#warn " repl: $repl\n";
+ $string =~ s/\\href$endcw\{([^}]*)\}\s*\{+([^}]*)\}+/$repl/;
+#warn " str: $string\n";
+ }
+ }
+
+ $string;
+}
+
+# Convert control words (not symbols), that is, a backslash and an
+# alphabetic sequence of characters terminated by a non-alphabetic
+# character. Following whitespace is ignored.
+#
+sub _convert_control_words {
+ my $string = shift;
+
+ foreach my $command ( keys %LaTeX::ToUnicode::Tables::CONTROL_WORDS ) {
+ my $repl = $LaTeX::ToUnicode::Tables::CONTROL_WORDS{$command};
+ # replace {\CMD}, whitespace ignored after \CMD.
+ $string =~ s/\{\\$command$endcw\}/$repl/g;
+
+ # replace \CMD, preceded by not-consumed non-backslash.
+ $string =~ s/(?<=[^\\])\\$command$endcw/$repl/g;
+
+ # replace \CMD at beginning of whole string, which otherwise
+ # wouldn't be matched. Two separate regexps to avoid
+ # variable-length lookbehind.
+ $string =~ s/^\\$command$endcw/$repl/g;
}
$string;
}
+# Convert control symbols, other than accents. Much simpler than
+# control words, since are self-delimiting, don't take arguments, and
+# don't consume any following text.
+#
+sub _convert_control_symbols {
+ my $string = shift;
+
+ foreach my $symbol ( keys %LaTeX::ToUnicode::Tables::CONTROL_SYMBOLS ) {
+ my $repl = $LaTeX::ToUnicode::Tables::CONTROL_SYMBOLS{$symbol};
+
+ # because these are not alphabetic, we can quotemeta them,
+ # and we need to because "\" is one of the symbols.
+ my $rx = quotemeta($symbol);
+
+ # the preceding character must not be a backslash, else "\\ "
+ # could have the "\ " seen first as a control space, leaving
+ # a spurious \ behind. Don't consume the preceding.
+ # Or it could be at the beginning of a line.
+ #
+ $string =~ s/(^|(?<=[^\\]))\\$rx/$repl/g;
+ #warn "after sym $symbol (\\$rx -> $repl), have: $string\n";
+ }
+
+ $string;
+}
+
+# Convert accents.
+#
sub _convert_accents {
my $string = shift;
- $string =~ s/(\{\\(.)\{(\\?\w{1,2})\}\})/$LaTeX::ToUnicode::Tables::ACCENTS{$2}{$3} || $1/eg; # {\"{a}}
- $string =~ s/(\{\\(.)(\\?\w{1,2})\})/$LaTeX::ToUnicode::Tables::ACCENTS{$2}{$3} || $1/eg; # {\"a}
- $string =~ s/(\\(.)(\\?\w{1,2}))/$LaTeX::ToUnicode::Tables::ACCENTS{$2}{$3} || $1/eg; # \"a
- $string =~ s/(\\(.)\{(\\?\w{1,2})\})/$LaTeX::ToUnicode::Tables::ACCENTS{$2}{$3} || $1/eg; # \"{a}
+
+ # first the non-alphabetic accent commands, like \".
+ my %tbl = %LaTeX::ToUnicode::Tables::ACCENT_SYMBOLS;
+ $string =~ s/(\{\\(.)\s*\{(\\?\w{1,2})\}\})/$tbl{$2}{$3} || $1/eg; #{\"{a}}
+ $string =~ s/(\{\\(.)\s*(\\?\w{1,2})\})/ $tbl{$2}{$3} || $1/eg; # {\"a}
+ $string =~ s/(\\(.)\s*(\\?\w{1,1}))/ $tbl{$2}{$3} || $1/eg; # \"a
+ $string =~ s/(\\(.)\s*\{(\\?\w{1,2})\})/ $tbl{$2}{$3} || $1/eg; # \"{a}
+
+ # second the alphabetic commands, like \c. They have be handled
+ # differently because \cc is not \c{c}! The only difference in the
+ # regular expressions is using $endcw instead of just \s*.
+ #
+ %tbl = %LaTeX::ToUnicode::Tables::ACCENT_LETTERS;
+ $string =~ s/(\{\\(.)$endcw\{(\\?\w{1,2})\}\})/$tbl{$2}{$3} || $1/eg; #{\"{a}}
+ $string =~ s/(\{\\(.)$endcw(\\?\w{1,2})\})/ $tbl{$2}{$3} || $1/eg; # {\"a}
+ $string =~ s/(\\(.)$endcw(\\?\w{1,1}))/ $tbl{$2}{$3} || $1/eg; # \"a
+ $string =~ s/(\\(.)$endcw\{(\\?\w{1,2})\})/ $tbl{$2}{$3} || $1/eg; # \"{a}
+
+
+ # The argument is just one \w character for the \"a case, not two,
+ # because otherwise we might consume a following character that is
+ # not part of the accent, e.g., a backslash (\"a\'e).
+ #
+ # Others can be two because of the \t tie-after accent. Even {\t oo} is ok.
+ #
+ # Allow whitespace after the \CMD in all cases, e.g., "\c c". Even
+ # for the control symbols, it turns out spaces are ignored there
+ # (as in \" o), unlike the usual syntax.
+ #
+ # Some non-word constituents would work, but in practice we hope
+ # everyone just uses letters.
+
$string;
}
+# For the [n]german package.
sub _convert_german {
my $string = shift;
@@ -57,26 +362,28 @@ sub _convert_german {
$string;
}
+# Control words that produce printed symbols (and letters in languages
+# other than English), that is.
+#
sub _convert_symbols {
my $string = shift;
foreach my $symbol ( keys %LaTeX::ToUnicode::Tables::SYMBOLS ) {
- $string =~ s/{\\$symbol}/$LaTeX::ToUnicode::Tables::SYMBOLS{$symbol}/g;
- $string =~ s/\\$symbol\b/$LaTeX::ToUnicode::Tables::SYMBOLS{$symbol}/g;
+ my $repl = $LaTeX::ToUnicode::Tables::SYMBOLS{$symbol};
+ # preceded by a (non-consumed) non-backslash,
+ # usual termination for a control word.
+ # These commands don't take arguments.
+ $string =~ s/(?<=[^\\])\\$symbol$endcw/$repl/g;
+
+ # or the beginning of the whole string:
+ $string =~ s/^\\$symbol$endcw/$repl/g;
}
$string;
}
-# Replace \<specialchar> with <specialchar>.
-sub _convert_specials {
- my $string = shift;
- my $specials = join( '|', @LaTeX::ToUnicode::Tables::SPECIALS );
- my $pattern = qr/\\($specials)/o;
- $string =~ s/$pattern/$1/g;
- $string =~ s/\\\$/\$/g;
- $string;
-}
-
+# Special character sequences, not \commands. They aren't all
+# technically ligatures, but no matter.
+#
sub _convert_ligatures {
my $string = shift;
@@ -91,40 +398,117 @@ sub _convert_ligatures {
}
#
+# Convert LaTeX markup commands in STRING like \textbf{...} and
+# {\bfshape ...} and {\bf ...}.
+#
+# If we're aiming for plain text output, they are just cleared away (the
+# braces are not removed).
+#
+# If we're generating HTML output ("html" key is set in $OPTIONS hash
+# ref), we use the value in the hash, so that \textbf{foo} becomes
+# <b>foo</b>. Nested markup doesn't work.
+#
sub _convert_markups {
- my $string = shift;
- my $orig_string = $string;
+ my ($string, $options) = @_;
- my $markups = join( '|', @LaTeX::ToUnicode::Tables::MARKUPS );
+ # HTML is different.
+ return _convert_markups_html($string) if $options->{html};
+ # Ok, we'll "convert" to plain text by removing the markup commands.
+
+ # we can do all markup commands at once.
+ my $markups = join('|', keys %LaTeX::ToUnicode::Tables::MARKUPS);
# Remove \textMARKUP{...}, leaving just the {...}
- $string =~ s/\\text($markups)\b\s*//g;
-
- # Remove braces and \command in: {... \command ...}
- $string =~ s/(\{[^{}]+)\\(?:$markups)\s+([^{}]+\})/$1$2/g;
- #
- # Remove braces and \command in: {\command ...}
- $string =~ s/\{\\(?:$markups)\s+([^{}]*)\}/$1/g;
- #
- # Remove: {\command
+ $string =~ s/\\text($markups)$endcw//g;
+
+ # Similarly remove \MARKUPshape.
+ $string =~ s/\\($markups)shape$endcw//g;
+
+ # Remove braces and \command in: {... \MARKUP ...}
+ $string =~ s/(\{[^{}]+)\\(?:$markups)$endcw([^{}]+\})/$1$2/g;
+
+ # Remove braces and \command in: {\MARKUP ...}
+ $string =~ s/\{\\(?:$markups)$endcw([^{}]*)\}/$1/g;
+
+ # Remove: {\MARKUP
# Although this will leave unmatched } chars behind, there's no
# alternative without full parsing, since the bib entry will often
# look like: {\em {The TeX{}book}}. Also might, in principle, be
# at the end of a line.
- $string =~ s/\{\\(?:$markups)\b\s*//g;
+ $string =~ s/\{\\(?:$markups)$endcw//g;
# Ultimately we remove all braces in ltx2crossrefxml SanitizeText fns,
- # so the unmatched braces don't matter ... that code should be moved here.
+ # so the unmatched braces don't matter ... that code should be moved.
$string;
}
+# Convert \markup in STRING to html. We can't always figure out where to
+# put the end tag, but we always put it somewhere. We don't even attempt
+# to handle nested markup.
+#
+sub _convert_markups_html {
+ my ($string) = @_;
+
+ my %MARKUPS = %LaTeX::ToUnicode::Tables::MARKUPS;
+ # have to consider each markup \command separately.
+ for my $markup (keys %MARKUPS) {
+ my $hcmd = $MARKUPS{$markup}; # some TeX commands don't translate
+ my $tag = $hcmd ? "<$hcmd>" : "";
+ my $end_tag = $hcmd ? "</$hcmd>" : "";
+
+ # The easy one: \textMARKUP{...}
+ $string =~ s/\\text$markup$endcw\{(.*?)\}/$tag$1$end_tag/g;
+
+ # {x\MARKUP(shape) y} -> x<mk>y</mk> (leave out braces)
+ $string =~ s/\{([^{}]+)\\$markup(shape)?$endcw([^{}]+)\}
+ /$1$tag$3$end_tag/gx;
+
+ # {\MARKUP(shape) y} -> <mk>y</mk>. Same as previous but without
+ # the x part. Could do it in one regex but this seems clearer.
+ $string =~ s/\{\\$markup(shape)?$endcw([^{}]+)\}
+ /$tag$2$end_tag/gx;
+
+ # for {\MARKUP(shape) ... with no matching brace, we don't know
+ # where to put the end tag, so seems best to do nothing.
+ }
+
+ $string;
+}
+
+
+##############################################################
+# debug_hash_as_string($LABEL, HASH)
+#
+# Return LABEL followed by HASH elements, followed by a newline, as a
+# single string. If HASH is a reference, it is followed (but no recursive
+# derefencing).
+###############################################################
+sub debug_hash_as_string {
+ my ($label) = shift;
+ my (%hash) = (ref $_[0] && $_[0] =~ /.*HASH.*/) ? %{$_[0]} : @_;
+
+ my $str = "$label: {";
+ my @items = ();
+ for my $key (sort keys %hash) {
+ my $val = $hash{$key};
+ $val = ".undef" if ! defined $val;
+ $key =~ s/\n/\\n/g;
+ $val =~ s/\n/\\n/g;
+ push (@items, "$key:$val");
+ }
+ $str .= join (",", @items);
+ $str .= "}";
+
+ return "$str\n";
+}
+
1;
__END__
=pod
-=encoding utf-8
+=encoding UTF-8
=head1 NAME
@@ -132,68 +516,242 @@ LaTeX::ToUnicode - Convert LaTeX commands to Unicode
=head1 VERSION
-version 0.11
+version 0.53
=head1 SYNOPSIS
- use LaTeX::ToUnicode qw( convert );
+ use LaTeX::ToUnicode qw( convert debuglevel $endcw );
- convert( '{\"a}' ) eq 'ä'; # true
- convert( '"a', german => 1 ) eq 'ä'; # true, `german' package syntax
- convert( '"a', ) eq '"a'; # not enabled by default
+ # simple examples:
+ convert( '{\"a}' ) eq 'ä'; # true
+ convert( '{\"a}', entities=>1 ) eq '&#00EF;'; # true
+ convert( '"a', german=>1 ) eq 'ä'; # true, `german' package syntax
+ convert( '"a', ) eq '"a'; # false, not enabled by default
# more generally:
my $latexstr;
- my $unistr = convert($latexstr);
-
-=head1 DESCRIPTION
+ my $unistr = convert($latexstr); # get literal (binary) Unicode characters
-This module provides a method to convert LaTeX-style markups for accents etc.
-into their Unicode equivalents. It translates commands for special characters
-or accents into their Unicode equivalents and removes formatting commands.
-It is not at all bulletproof or complete.
+ my $entstr = convert($latexstr, entities=>1); # get &#xUUUU;
+
+ my $htmstr = convert($latexstr, entities=>1, html=>1); # also html markup
+
+ my $unistr = convert($latexstr, hook=>\&my_hook); # user-defined hook
+
+ # if nonzero, dumps various info; perhaps other levels in the future.
+ LaTeX::ToUnicode::debuglevel($verbose);
-This module converts values from BibTeX files into plain text. If your
-use case is different, YMMV.
+ # regexp for terminating TeX control words, e.g., in hooks.
+ my $endcw = $LaTeX::ToUnicode::endcw;
+ $string =~ s/\\newline$endcw/ /g; # translate \newline to space
+
+=head1 DESCRIPTION
-In contrast to L<TeX::Encode>, this module does not create HTML of any
-kind, including for HTML/XML metacharacters such as E<lt>, E<gt>, C<&>,
-which can appear literally in the output. Entities are other handling
-for these has to happen at another level, if need be.
+This module provides a method to convert LaTeX markups for accents etc.
+into their Unicode equivalents. It translates some commands for special
+characters or accents into their Unicode (or HTML) equivalents and
+removes formatting commands. It is not at all bulletproof or complete.
+
+This module is intended to convert fragments of LaTeX source, such as
+bibliography entries and abstracts, into plain text (or, optionally,
+simplistic HTML). It is not a document conversion system. Math, tables,
+figures, sectioning, etc., are not handled in any way, and mostly left
+in their TeX form in the output. The translations assume standard LaTeX
+meanings for characters and control sequences; macros in the input are
+not considered.
+
+The aim for all the output is utter simplicity and minimalism, not
+faithful translation. For example, although Unicode has a code point for
+a thin space, the LaTeX C<\thinspace> (etc.) command is translated to
+the empty string; such spacing refinements desirable in the TeX output
+are, in our experience, generally not desired in the HTML output from
+this tool.
+
+As another example, TeX C<%> comments are not removed, even on lines by
+themselves, because they may be inside verbatim blocks, and we don't
+attempt to keep any such context. In practice, TeX comments are rare in
+the text fragments intended to be handled, so removing them in advance
+has not been a great burden.
+
+As another example, LaTeX ties, C<~> characters, are replaced with
+normal spaces (exception: unless they follow a C</> character or at the
+beginning of a line, when they're assumed to be part of a url or a
+pathname), rather than a no-break space character, because in our
+experience most ties intended for the TeX output would just cause
+trouble in plain text or HTML.
+
+Regarding normal whitespace: all leading and trailing horizontal
+whitespace (that is, SPC and TAB) is removed. All internal horizontal
+whitespace sequences are collapsed to a single space.
+
+After the conversions, all brace characters (C<{}>) are simply removed
+from the returned string. This turns out to be a significant convenience
+in practice, since many LaTeX commands which take arguments don't need
+to do anything for our purposes except output the argument.
+
+On the other hand, backslashes are not removed. This is so the caller
+can check for C<\\> and thus discover untranslated commands. Of course
+there are many other constructs that might not be translated, or
+translated wrongly. There is no escaping the need to carefully look at
+the output.
+
+Suggestions and bug reports are welcome for practical needs; we know
+full well that there are hundreds of commands not handled that could be.
+Virtually all the behavior mentioned here would be easily made
+customizable, if there is a need to do so.
=head1 FUNCTIONS
=head2 convert( $latex_string, %options )
-Convert the text in C<$string> that contains LaTeX into a plain(er)
-Unicode string. All escape sequences for accented and special characters
-(e.g., \i, \"a, ...) are converted. Basic formatting commands (e.g. {\it
-...}) are removed.
+Convert the text in C<$latex_string> into a plain(er) Unicode string.
+Escape sequences for accented and special characters (e.g., C<\i>,
+C<\"a>, ...) are converted. A few basic formatting commands (e.g.,
+C<{\it ...}>) are removed. See the L<LaTeX::ToUnicode::Tables> submodule
+for the full conversion tables.
-C<%options> allows you to enable additional translations. These keys are
-recognized:
+These keys are recognized in C<%options>:
=over
+=item C<entities>
+
+Output C<&#xUUUU;> entities (valid in XML); in this case, also convert
+the E<lt>, E<gt>, C<&> metacharacters to entities. Recognized non-ASCII
+Unicode characters in the original input are also converted to entities,
+not only the translations from TeX commands.
+
+The default is to output literal (binary) Unicode characters, and
+not change any metacharacters.
+
=item C<german>
If this option is set, the commands introduced by the package `german'
(e.g. C<"a> eq C<ä>, note the missing backslash) are also
handled.
+=item C<html>
+
+If this option is set, the output is simplistic html rather than plain
+text. This affects only a few things: S<1) the> output of urls from
+C<\url> and C<\href>; S<2) the> output of markup commands like
+C<\textbf> (but nested markup commands don't work); S<3) two> other
+random commands, C<\enquote> and C<\path>, because they are needed.
+
+=item C<hook>
+
+The value must be a function that takes two arguments and returns a
+string. The first argument is the incoming string (may be multiple
+lines), and the second argument is a hash reference of options, exactly
+what was passed to this C<convert> function. Thus the hook can detect
+whether html is needed.
+
+The hook is called (almost) right away, before any of the other
+conversions have taken place. That way the hook can make use of the
+predefined conversions instead of repeating them. The only changes made
+to the input string before the hook is called are trivial: leading and
+trailing whitespace (space and tab) on each line are removed, and, for
+HTML output, incoming ampersand, less-than, and greater-than characters
+are replaced with their entities.
+
+Any substitutions that result in Unicode code points must use
+C<\\x{nnnn}> on the right hand side: that's two backslashes and a
+four-digit hex number.
+
+As an example, here is a skeleton of the hook function for TUGboat:
+
+ sub LaTeX_ToUnicode_convert_hook {
+ my ($string,$options) = @_;
+
+ my $endcw = $LaTeX::ToUnicode::endcw;
+ die "no endcw regexp in LaTeX::ToUnicode??" if ! $endcw;
+
+ ...
+ $string =~ s/\\newline$endcw/ /g;
+
+ # TUB's \acro{} takes an argument, but we do nothing with it.
+ # The braces will be removed by convert().
+ $string =~ s/\\acro$endcw//g;
+ ...
+ $string =~ s/\\CTAN$endcw/CTAN/g;
+ $string =~ s/\\Dash$endcw/\\x{2014}/g; # em dash; replacement is string
+ ...
+
+ # ignore \begin{abstract} and \end{abstract} commands.
+ $string =~ s,\\(begin|end)$endcw\{abstract\}\s*,,g;
+
+ # Output for our url abbreviations, and other commands, depends on
+ # whether we're generating plain text or HTML.
+ if ($options->{html}) {
+ # HTML.
+ # \tbsurl{URLBASE} -> <a href="https://URLBASE">URLBASE</a>
+ $string =~ s,\\tbsurl$endcw\{([^}]*)\}
+ ,<a href="https://$1">$1</a>,gx;
+ ...
+ # varepsilon, and no line break at hyphen.
+ $string =~ s,\\eTeX$endcw,\\x{03B5}<nobr>-</nobr>TeX,g;
+
+ } else {
+ # for plain text, we can just prepend the protocol://.
+ $string =~ s,\\tbsurl$endcw,https://,g;
+ ...
+ $string =~ s,\\eTeX$endcw,\\x{03B5}-TeX,g;
+ }
+ ...
+ return $string;
+ }
+
+As shown here for C<\eTeX> (an abbreviation macro defined in the
+TUGboat style files), if markup is desired in the output, the
+substitutions must be different for HTML and plain text. Otherwise, the
+desired HTML markup is transliterated as if it were plain text. Or else
+the translations must be extended so that TeX markup can be used on the
+rhs to be replaced with the desired HTML (C<&lt;nobr&gt;> in this case).
+
+For the full definition (and plenty of additional information),
+see the file C<ltx2crossrefxml-tugboat.cfg> in the TUGboat source
+repository at
+<https://github.com/TeXUsersGroup/tugboat/tree/trunk/capsules/crossref>.
+
+The hook function is specified in the C<convert()> call like this:
+
+ LaTeX::ToUnicode::convert(..., { hook => \&LaTeX_ToUnicode_convert_hook })
+
=back
+=head2 debuglevel( $level )
+
+Output debugging information if C<$level> is nonzero.
+
+=head2 $endcw
+
+A predefined regexp for terminating TeX control words (not control
+symbols!). Can be used in, for example, hook functions:
+
+ my $endcw = $LaTeX::ToUnicode::endcw;
+ $string =~ s/\\newline$endcw/ /g; # translate \newline to space
+
+It's defined as follows:
+
+ our $endcw = qr/(?<=[a-zA-Z])(?=[^a-zA-Z]|$)\s*/;
+
+That is, look behind for an alphabetic character, then look ahead for a
+non-alphabetic character (or end of line), then consume whitespace.
+Fingers crossed.
+
=head1 AUTHOR
-Gerhard Gossen <gerhard.gossen@googlemail.com> and
-Boris Veytsman <boris@varphi.com>
+Gerhard Gossen <gerhard.gossen@googlemail.com>,
+Boris Veytsman <boris@varphi.com>,
+Karl Berry <karl@freefriends.org>
+
L<https://github.com/borisveytsman/bibtexperllibs>
=head1 COPYRIGHT AND LICENSE
-This software is copyright (c) 2010-2020 by Gerhard Gossen and Boris Veytsman
+Copyright 2010-2023 Gerhard Gossen, Boris Veytsman, Karl Berry
This is free software; you can redistribute it and/or modify it under
-the same terms as the Perl 5 programming language system itself.
+the same terms as the Perl5 programming language system itself.
=cut
diff --git a/support/bibtexperllibs/LaTeX-ToUnicode/lib/LaTeX/ToUnicode/Tables.pm b/support/bibtexperllibs/LaTeX-ToUnicode/lib/LaTeX/ToUnicode/Tables.pm
index a9aef54834..fd82b3ea7c 100644
--- a/support/bibtexperllibs/LaTeX-ToUnicode/lib/LaTeX/ToUnicode/Tables.pm
+++ b/support/bibtexperllibs/LaTeX-ToUnicode/lib/LaTeX/ToUnicode/Tables.pm
@@ -1,449 +1,585 @@
package LaTeX::ToUnicode::Tables;
BEGIN {
- $LaTeX::ToUnicode::Tables::VERSION = '0.11';
+ $LaTeX::ToUnicode::Tables::VERSION = '0.53';
}
use strict;
use warnings;
#ABSTRACT: Character tables for LaTeX::ToUnicode
-use utf8;
-
+use utf8; # just for the german support
# Technically not all of these are ligatures, but close enough.
# Order is important, so has to be a list, not a hash.
+#
our @LIGATURES = (
- "---" => "\x{2014}", # em dash
- "--" => "\x{2013}", # en dash
- "!`" => "\x{00A1}", # inverted exclam
- "?`" => "\x{00A1}", # inverted question
- "``" => "\x{201c}", # left double
- "''" => "\x{201d}", # right double
- "`" => "\x{2018}", # left single
- "'" => "\x{2019}", # right single
+ "---" => '\x{2014}', # em dash
+ "--" => '\x{2013}', # en dash
+ "!`" => '\x{00A1}', # inverted exclam
+ "?`" => '\x{00A1}', # inverted question
+ "``" => '\x{201c}', # left double
+ "''" => '\x{201d}', # right double
+ "`" => '\x{2018}', # left single
+ "'" => '\x{2019}', # right single
+);
+# test text: em---dash, en--dash, exc!`am, quest?`ion, ``ld, rd'', `ls, rs'.
+#
+# Some additional ligatures supported in T1 encoding, but we won't (from
+# tex-text.map):
+# U+002C U+002C <> U+201E ; ,, -> DOUBLE LOW-9 QUOTATION MARK
+# U+003C U+003C <> U+00AB ; << -> LEFT POINTING GUILLEMET
+# U+003E U+003E <> U+00BB ; >> -> RIGHT POINTING GUILLEMET
+
+# for {\MARKUP(shape) ...} and \textMARKUP{...}; although not all
+# command names are defined in LaTeX for all markups, we translate them
+# anyway. Also, LaTeX has more font axes not included here: md, ulc, sw,
+# ssc, etc. See ltfntcmd.dtx and ltfssaxes.dtx if we ever want to try
+# for completeness.
+#
+our %MARKUPS = (
+ 'bf' => 'b',
+ 'cal' => '',
+ 'em' => 'em',
+ 'it' => 'i',
+ 'rm' => '',
+ 'sc' => '', # qqq should uppercasify
+ 'sf' => '',
+ 'sl' => 'i',
+ 'small' => '',
+ 'subscript' => 'sub',
+ 'superscript' => 'sup',
+ 'tt' => 'tt',
+);
+
+# More commands taking arguments that we want to handle.
+#
+our %ARGUMENT_COMMANDS = (
+ 'emph' => ['\textem{', '}'], # \textem doesn't exist, but is processed
+ 'enquote' => ["`", "'"],
+ 'path' => ['\texttt{', '}'], # ugh, might not be a braced argument
);
-# test text: em---dash, en---dash, exc!`am, quest?`ion, ``ld, rd'', `ls, rs'.
-
-# additions supported in T1 encoding, but we won't (from tex-text.map):
-# U+002C U+002C <> U+201E ; ,, -> DOUBLE LOW-9 QUOTATION MARK
-# U+003C U+003C <> U+00AB ; << -> LEFT POINTING GUILLEMET
-# U+003E U+003E <> U+00BB ; >> -> RIGHT POINTING GUILLEMET
-
-
-our @SPECIALS = ( qw( $ % & _ { } ), '#' );
-
-our %COMMANDS = (
- 'LaTeX' => 'LaTeX',
- 'TeX' => 'TeX',
- ' ' => ' ', # control space
- '-' => '', # hyphenation
- '/' => '', # italic correction
- 'dag' => "\x{2020}",
- 'ddag' => "\x{2021}",
- 'bullet' => "\x{2022}",
- 'dots' => "\x{2026}",
- 'ldots' => "\x{2026}",
- 'epsilon' => "\x{03F5}",
- 'varepsilon' => "\x{03B5}",
- 'Omega' => "\x{03A9}",
- 'omega' => "\x{03C9}",
- 'hookrightarrow' => "\x{2194}",
- 'log' => 'log',
+
+# Non-alphabetic \COMMANDs, other than accents and special cases.
+#
+our %CONTROL_SYMBOLS = (
+ ' ' => ' ', # control space
+ "\t" => ' ', # control space
+ "\n" => '\x{0020}', # control space; use entity to avoid being trimmed
+ '!' => '', # negative thin space
+ # " umlaut
+ '#' => '#', # sharp sign
+ '$' => '$', # dollar sign
+ '%' => '%', # percent sign
+ '&' => '\x{0026}', # ampersand, entity to avoid html conflict
+ # ' acute accent
+ '(' => '', # start inline math
+ ')' => '', # end inline math
+ '*' => '', # discretionary multiplication
+ '+' => '', # tabbing: tab stop to right
+ ',' => '', # thin space
+ '-' => '', # discretionary hyphenation
+ # . overdot accent
+ '/' => '', # italic correction
+ # 0..9 undefined
+ ':' => '', # medium space
+ ';' => ' ', # thick space
+ '<' => '', # tabbing: text to left of margin
+ # = macron accent
+ '>' => '', # tabbing: next tab stop
+ # ? undefined
+ '@' => '#', # end of sentence
+ # A..Z control words, not symbols
+ '[' => '', # start display math
+ '\\' => ' ', # line break
+ ']' => '', # end display math
+ # ^ circumflex accent
+ '_' => '_', # underscore
+ # ` grave accent
+ # a..z control words, not symbols
+ '{' => '\x{007b}', # lbrace
+ '|' => '\x{2225}', # parallel
+ '}' => '\x{007d}', # rbrace
+ # ~ tilde accent
+);
+
+# Alphabetic \COMMANDs that map to nothing. This is simply
+# interpolated into %CONTROL_WORDS (next), not used directly, so we
+# redundantly specify the '' on every line.
+#
+our %CONTROL_WORDS_EMPTY = (
+ 'begingroup' => '',
+ 'bgroup' => '',
+ 'checkcomma' => '',
+ #'cite' => '', # keep \cite undefined since it needs manual work
+ 'clearpage' => '',
+ 'doi' => '',
+ 'egroup' => '',
+ 'endgroup' => '',
+ 'hbox' => '',
+ 'ignorespaces' => '',
+ 'mbox' => '',
+ 'medspace' => '',
+ 'negmedspace' => '',
+ 'negthickspace' => '',
+ 'negthinspace' => '',
+ 'newblock' => '',
+ 'newpage' => '',
+ 'noindent' => '',
+ 'nolinkurl' => '',
+ 'oldstylenums' => '',
+ 'pagebreak' => '',
+ 'protect' => '',
+ 'raggedright' => '',
+ 'relax' => '',
+ 'thinspace' => '',
+ 'unskip' => '',
+ 'urlprefix' => '',
);
+# Alphabetic commands, that expand to nothing (above) and to
+# something (below).
+#
+our %CONTROL_WORDS = (
+ %CONTROL_WORDS_EMPTY,
+ 'BibLaTeX' => 'BibLaTeX',
+ 'BibTeX' => 'BibTeX',
+ 'LaTeX' => 'LaTeX',
+ 'LuaLaTeX' => 'LuaLaTeX',
+ 'LuaTeX' => 'LuaTeX',
+ 'MF' => 'Metafont',
+ 'MP' => 'MetaPost',
+ 'Omega' => '\x{03A9}',
+ 'TeX' => 'TeX',
+ 'XeLaTeX' => 'XeLaTeX',
+ 'XeTeX' => 'XeTeX',
+ 'bullet' => '\x{2022}',
+ 'dag' => '\x{2020}',
+ 'ddag' => '\x{2021}',
+ 'dots' => '\x{2026}',
+ 'epsilon' => '\x{03F5}',
+ 'hookrightarrow' => '\x{2194}',
+ 'ldots' => '\x{2026}',
+ 'log' => 'log',
+ 'omega' => '\x{03C9}',
+ 'par' => "\n\n",
+ 'qquad' => ' ', # 2em space
+ 'quad' => ' ', # em space
+ 'textbackslash' => '\x{005C}', # entities so \ in output indicates
+ # untranslated TeX source
+ 'textbraceleft' => '\x{007B}', # entities so our bare-brace removal
+ 'textbraceright' => '\x{007D}', # skips them
+ 'textgreater' => '\x{003E}',
+ 'textless' => '\x{003C}',
+ 'textquotedbl' => '"',
+ 'thickspace' => ' ',
+ 'varepsilon' => '\x{03B5}',
+);
+# Control words (not symbols) that generate various non-English
+# letters and symbols. Lots more could be added.
+#
our %SYMBOLS = ( # Table 3.2 in Lamport, plus more
- 'AA' => "\x{00C5}", # A with ring
- 'aa' => "\x{00E5}",
- 'AE' => "\x{00C6}", # AE
- 'ae' => "\x{00E6}",
- 'DH' => "\x{00D0}", # ETH
- 'dh' => "\x{00F0}",
- 'DJ' => "\x{0110}", # D with stroke
- 'dj' => "\x{0111}",
- 'i' => "\x{0131}", # small dotless i
- 'L' => "\x{0141}", # L with stroke
- 'l' => "\x{0142}",
- 'NG' => "\x{014A}", # ENG
- 'ng' => "\x{014B}",
- 'OE' => "\x{0152}", # OE
- 'oe' => "\x{0153}",
- 'O' => "\x{00D8}", # O with stroke
- 'o' => "\x{00F8}",
+ 'AA' => '\x{00C5}', # A with ring
+ 'aa' => '\x{00E5}',
+ 'AE' => '\x{00C6}', # AE
+ 'ae' => '\x{00E6}',
+ 'DH' => '\x{00D0}', # ETH
+ 'dh' => '\x{00F0}',
+ 'DJ' => '\x{0110}', # D with stroke
+ 'dj' => '\x{0111}',
+ 'i' => '\x{0131}', # small dotless i
+ 'L' => '\x{0141}', # L with stroke
+ 'l' => '\x{0142}',
+ 'NG' => '\x{014A}', # ENG
+ 'ng' => '\x{014B}',
+ 'OE' => '\x{0152}', # OE
+ 'oe' => '\x{0153}',
+ 'O' => '\x{00D8}', # O with stroke
+ 'o' => '\x{00F8}',
'SS' => 'SS', # lately also U+1E9E, but SS seems good enough
- 'ss' => "\x{00DF}",
- 'TH' => "\x{00DE}", # THORN
- 'th' => "\x{00FE}",
- 'TM' => "\x{2122}", # trade mark sign
+ 'ss' => '\x{00DF}',
+ 'TH' => '\x{00DE}', # THORN
+ 'textordfeminine' => '\x{00AA}',
+ 'textordmasculine' => '\x{00BA}',
+ 'textregistered' => '\x{00AE}',
+ 'th' => '\x{00FE}',
+ 'TM' => '\x{2122}', # trade mark sign
);
-
-our %ACCENTS = (
- "\"" => { # with diaresis
- A => "\x{00C4}",
- E => "\x{00CB}",
- H => "\x{1E26}",
- I => "\x{00CF}",
- O => "\x{00D6}",
- U => "\x{00DC}",
- W => "\x{1E84}",
- X => "\x{1E8c}",
- Y => "\x{0178}",
- "\\I" => "\x{00CF}",
- "\\i" => "\x{00EF}",
- a => "\x{00E4}",
- e => "\x{00EB}",
- h => "\x{1E27}",
- i => "\x{00EF}",
- o => "\x{00F6}",
- t => "\x{1E97}",
- u => "\x{00FC}",
- w => "\x{1E85}",
- x => "\x{1E8d}",
- y => "\x{00FF}",
+# Accent commands that are not alphabetic.
+#
+our %ACCENT_SYMBOLS = (
+ "\"" => { # with diaresis
+ A => '\x{00C4}',
+ E => '\x{00CB}',
+ H => '\x{1E26}',
+ I => '\x{00CF}',
+ O => '\x{00D6}',
+ U => '\x{00DC}',
+ W => '\x{1E84}',
+ X => '\x{1E8c}',
+ Y => '\x{0178}',
+ "\\I" => '\x{00CF}',
+ "\\i" => '\x{00EF}',
+ a => '\x{00E4}',
+ e => '\x{00EB}',
+ h => '\x{1E27}',
+ i => '\x{00EF}',
+ o => '\x{00F6}',
+ t => '\x{1E97}',
+ u => '\x{00FC}',
+ w => '\x{1E85}',
+ x => '\x{1E8d}',
+ y => '\x{00FF}',
},
- "'" => { # with acute
- A => "\x{00C1}",
- AE => "\x{01FC}",
- C => "\x{0106}",
- E => "\x{00C9}",
- G => "\x{01F4}",
- I => "\x{00CD}",
- K => "\x{1E30}",
- L => "\x{0139}",
- M => "\x{1E3E}",
- N => "\x{0143}",
- O => "\x{00D3}",
- P => "\x{1E54}",
- R => "\x{0154}",
- S => "\x{015A}",
- U => "\x{00DA}",
- W => "\x{1E82}",
- Y => "\x{00DD}",
- Z => "\x{0179}",
- "\\I" => "\x{00CD}",
- "\\i" => "\x{00ED}",
- a => "\x{00E1}",
- ae => "\x{01FD}",
- c => "\x{0107}",
- e => "\x{00E9}",
- g => "\x{01F5}",
- i => "\x{00ED}",
- k => "\x{1E31}",
- l => "\x{013A}",
- m => "\x{1E3f}",
- n => "\x{0144}",
- o => "\x{00F3}",
- p => "\x{1E55}",
- r => "\x{0155}",
- s => "\x{015B}",
- u => "\x{00FA}",
- w => "\x{1E83}",
- y => "\x{00FD}",
- z => "\x{017A}",
+ "'" => { # with acute
+ A => '\x{00C1}',
+ AE => '\x{01FC}',
+ C => '\x{0106}',
+ E => '\x{00C9}',
+ G => '\x{01F4}',
+ I => '\x{00CD}',
+ K => '\x{1E30}',
+ L => '\x{0139}',
+ M => '\x{1E3E}',
+ N => '\x{0143}',
+ O => '\x{00D3}',
+ P => '\x{1E54}',
+ R => '\x{0154}',
+ S => '\x{015A}',
+ U => '\x{00DA}',
+ W => '\x{1E82}',
+ Y => '\x{00DD}',
+ Z => '\x{0179}',
+ "\\I" => '\x{00CD}',
+ "\\i" => '\x{00ED}',
+ a => '\x{00E1}',
+ ae => '\x{01FD}',
+ c => '\x{0107}',
+ e => '\x{00E9}',
+ g => '\x{01F5}',
+ i => '\x{00ED}',
+ k => '\x{1E31}',
+ l => '\x{013A}',
+ m => '\x{1E3f}',
+ n => '\x{0144}',
+ o => '\x{00F3}',
+ p => '\x{1E55}',
+ r => '\x{0155}',
+ s => '\x{015B}',
+ u => '\x{00FA}',
+ w => '\x{1E83}',
+ y => '\x{00FD}',
+ z => '\x{017A}',
},
- "." => { # with dot above
- A => "\x{0226}",
- B => "\x{1E02}",
- C => "\x{010A}",
- D => "\x{1E0A}",
- E => "\x{0116}",
- F => "\x{1E1E}",
- G => "\x{0120}",
- H => "\x{1E22}",
- I => "\x{0130}",
- M => "\x{1E40}",
- N => "\x{1E44}",
- O => "\x{022E}",
- P => "\x{1E56}",
- R => "\x{1E58}",
- S => "\x{1E60}",
- T => "\x{1E6a}",
- W => "\x{1E86}",
- X => "\x{1E8A}",
- Y => "\x{1E8E}",
- Z => "\x{017B}",
- "\\I" => "\x{0130}",
- a => "\x{0227}",
- b => "\x{1E03}",
- c => "\x{010B}",
- d => "\x{1E0B}",
- e => "\x{0117}",
- f => "\x{1e1f}",
- g => "\x{0121}",
- h => "\x{1E23}",
- m => "\x{1E41}",
- n => "\x{1E45}",
- o => "\x{022F}",
- p => "\x{1E57}",
- r => "\x{1E59}",
- s => "\x{1E61}",
- t => "\x{1E6b}",
- w => "\x{1E87}",
- x => "\x{1E8b}",
- y => "\x{1E8f}",
- z => "\x{017C}",
+ "^" => { # with circumflex
+ A => '\x{00C2}',
+ C => '\x{0108}',
+ E => '\x{00CA}',
+ G => '\x{011C}',
+ H => '\x{0124}',
+ I => '\x{00CE}',
+ J => '\x{0134}',
+ O => '\x{00D4}',
+ R => 'R\x{0302}',
+ S => '\x{015C}',
+ U => '\x{00DB}',
+ W => '\x{0174}',
+ Y => '\x{0176}',
+ Z => '\x{1E90}',
+ "\\I" => '\x{00CE}',
+ "\\J" => '\x{0134}',
+ "\\i" => '\x{00EE}',
+ "\\j" => '\x{0135}',
+ a => '\x{00E2}',
+ c => '\x{0109}',
+ e => '\x{00EA}',
+ g => '\x{011D}',
+ h => '\x{0125}',
+ i => '\x{00EE}',
+ j => '\x{0135}',
+ o => '\x{00F4}',
+ s => '\x{015D}',
+ u => '\x{00FB}',
+ w => '\x{0175}',
+ y => '\x{0177}',
+ z => '\x{1E91}',
},
- '=' => { # with macron
- A => "\x{0100}",
- AE => "\x{01E2}",
- E => "\x{0112}",
- G => "\x{1E20}",
- I => "\x{012A}",
- O => "\x{014C}",
- U => "\x{016A}",
- Y => "\x{0232}",
- "\\I" => "\x{012A}",
- "\\i" => "\x{012B}",
- a => "\x{0101}",
- ae => "\x{01E3}",
- e => "\x{0113}",
- g => "\x{1E21}",
- i => "\x{012B}",
- o => "\x{014D}",
- u => "\x{016B}",
- y => "\x{0233}",
+ "`" => { # with grave
+ A => '\x{00C0}',
+ E => '\x{00C8}',
+ I => '\x{00CC}',
+ N => '\x{01F8}',
+ O => '\x{00D2}',
+ U => '\x{00D9}',
+ W => '\x{1E80}',
+ Y => '\x{1Ef2}',
+ "\\I" => '\x{00CC}',
+ "\\i" => '\x{00EC}',
+ a => '\x{00E0}',
+ e => '\x{00E8}',
+ i => '\x{00EC}',
+ n => '\x{01F9}',
+ o => '\x{00F2}',
+ u => '\x{00F9}',
+ w => '\x{1E81}',
+ y => '\x{1EF3}',
},
- "H" => { # with double acute
- O => "\x{0150}",
- U => "\x{0170}",
- o => "\x{0151}",
- u => "\x{0171}",
+ "." => { # with dot above
+ A => '\x{0226}',
+ B => '\x{1E02}',
+ C => '\x{010A}',
+ D => '\x{1E0A}',
+ E => '\x{0116}',
+ F => '\x{1E1E}',
+ G => '\x{0120}',
+ H => '\x{1E22}',
+ I => '\x{0130}',
+ M => '\x{1E40}',
+ N => '\x{1E44}',
+ O => '\x{022E}',
+ P => '\x{1E56}',
+ R => '\x{1E58}',
+ S => '\x{1E60}',
+ T => '\x{1E6a}',
+ W => '\x{1E86}',
+ X => '\x{1E8A}',
+ Y => '\x{1E8E}',
+ Z => '\x{017B}',
+ "\\I" => '\x{0130}',
+ a => '\x{0227}',
+ b => '\x{1E03}',
+ c => '\x{010B}',
+ d => '\x{1E0B}',
+ e => '\x{0117}',
+ f => '\x{1e1f}',
+ g => '\x{0121}',
+ h => '\x{1E23}',
+ m => '\x{1E41}',
+ n => '\x{1E45}',
+ o => '\x{022F}',
+ p => '\x{1E57}',
+ r => '\x{1E59}',
+ s => '\x{1E61}',
+ t => '\x{1E6b}',
+ w => '\x{1E87}',
+ x => '\x{1E8b}',
+ y => '\x{1E8f}',
+ z => '\x{017C}',
},
- "^" => { # with circumflex
- A => "\x{00C2}",
- C => "\x{0108}",
- E => "\x{00CA}",
- G => "\x{011C}",
- H => "\x{0124}",
- I => "\x{00CE}",
- J => "\x{0134}",
- O => "\x{00D4}",
- S => "\x{015C}",
- U => "\x{00DB}",
- W => "\x{0174}",
- Y => "\x{0176}",
- Z => "\x{1E90}",
- "\\I" => "\x{00CE}",
- "\\J" => "\x{0134}",
- "\\i" => "\x{00EE}",
- "\\j" => "\x{0135}",
- a => "\x{00E2}",
- c => "\x{0109}",
- e => "\x{00EA}",
- g => "\x{011D}",
- h => "\x{0125}",
- i => "\x{00EE}",
- j => "\x{0135}",
- o => "\x{00F4}",
- s => "\x{015D}",
- u => "\x{00FB}",
- w => "\x{0175}",
- y => "\x{0177}",
- z => "\x{1E91}",
+ '=' => { # with macron
+ A => '\x{0100}',
+ AE => '\x{01E2}',
+ E => '\x{0112}',
+ G => '\x{1E20}',
+ I => '\x{012A}',
+ O => '\x{014C}',
+ U => '\x{016A}',
+ Y => '\x{0232}',
+ "\\I" => '\x{012A}',
+ "\\i" => '\x{012B}',
+ a => '\x{0101}',
+ ae => '\x{01E3}',
+ e => '\x{0113}',
+ g => '\x{1E21}',
+ i => '\x{012B}',
+ o => '\x{014D}',
+ u => '\x{016B}',
+ y => '\x{0233}',
},
- "`" => { # with grave
- A => "\x{00C0}",
- E => "\x{00C8}",
- I => "\x{00CC}",
- N => "\x{01F8}",
- O => "\x{00D2}",
- U => "\x{00D9}",
- W => "\x{1E80}",
- Y => "\x{1Ef2}",
- "\\I" => "\x{00CC}",
- "\\i" => "\x{00EC}",
- a => "\x{00E0}",
- e => "\x{00E8}",
- i => "\x{00EC}",
- n => "\x{01F9}",
- o => "\x{00F2}",
- u => "\x{00F9}",
- w => "\x{1E81}",
- y => "\x{1EF3}",
+ "~" => { # with tilde
+ A => '\x{00C3}',
+ E => '\x{1EBC}',
+ I => '\x{0128}',
+ N => '\x{00D1}',
+ O => '\x{00D5}',
+ U => '\x{0168}',
+ V => '\x{1E7C}',
+ Y => '\x{1EF8}',
+ "\\I" => '\x{0128}',
+ "\\i" => '\x{0129}',
+ a => '\x{00E3}',
+ e => '\x{1EBD}',
+ i => '\x{0129}',
+ n => '\x{00F1}',
+ o => '\x{00F5}',
+ u => '\x{0169}',
+ v => '\x{1E7D}',
+ y => '\x{1EF9}',
},
- "c" => { # with cedilla
- C => "\x{00C7}",
- D => "\x{1E10}",
- E => "\x{0228}",
- G => "\x{0122}",
- H => "\x{1E28}",
- K => "\x{0136}",
- L => "\x{013B}",
- N => "\x{0145}",
- R => "\x{0156}",
- S => "\x{015E}",
- T => "\x{0162}",
- c => "\x{00E7}",
- d => "\x{1E11}",
- e => "\x{0229}",
- g => "\x{0123}",
- h => "\x{1E29}",
- k => "\x{0137}",
- l => "\x{013C}",
- n => "\x{0146}",
- r => "\x{0157}",
- s => "\x{015F}",
- t => "\x{0163}",
+);
+
+# Accent commands that are alphabetic.
+#
+our %ACCENT_LETTERS = (
+ "H" => { # with double acute
+ O => '\x{0150}',
+ U => '\x{0170}',
+ o => '\x{0151}',
+ u => '\x{0171}',
},
- "d" => { # with dot below
- A => "\x{1EA0}",
- B => "\x{1E04}",
- D => "\x{1E0C}",
- E => "\x{1EB8}",
- H => "\x{1E24}",
- I => "\x{1ECA}",
- K => "\x{1E32}",
- L => "\x{1E36}",
- M => "\x{1E42}",
- N => "\x{1E46}",
- O => "\x{1ECC}",
- R => "\x{1E5A}",
- S => "\x{1E62}",
- T => "\x{1E6C}",
- U => "\x{1EE4}",
- V => "\x{1E7E}",
- W => "\x{1E88}",
- Y => "\x{1Ef4}",
- Z => "\x{1E92}",
- "\\I" => "\x{1ECA}",
- "\\i" => "\x{1ECB}",
- a => "\x{1EA1}",
- b => "\x{1E05}",
- d => "\x{1E0D}",
- e => "\x{1EB9}",
- h => "\x{1E25}",
- i => "\x{1ECB}",
- k => "\x{1E33}",
- l => "\x{1E37}",
- m => "\x{1E43}",
- n => "\x{1E47}",
- o => "\x{1ECD}",
- r => "\x{1E5b}",
- s => "\x{1E63}",
- t => "\x{1E6D}",
- u => "\x{1EE5}",
- v => "\x{1E7F}",
- w => "\x{1E89}",
- y => "\x{1EF5}",
- z => "\x{1E93}",
+ "c" => { # with cedilla
+ C => '\x{00C7}',
+ D => '\x{1E10}',
+ E => '\x{0228}',
+ G => '\x{0122}',
+ H => '\x{1E28}',
+ K => '\x{0136}',
+ L => '\x{013B}',
+ N => '\x{0145}',
+ R => '\x{0156}',
+ S => '\x{015E}',
+ T => '\x{0162}',
+ c => '\x{00E7}',
+ d => '\x{1E11}',
+ e => '\x{0229}',
+ g => '\x{0123}',
+ h => '\x{1E29}',
+ k => '\x{0137}',
+ l => '\x{013C}',
+ n => '\x{0146}',
+ r => '\x{0157}',
+ s => '\x{015F}',
+ t => '\x{0163}',
},
- "h" => { # with hook above
- A => "\x{1EA2}",
- E => "\x{1EBA}",
- I => "\x{1EC8}",
- O => "\x{1ECe}",
- U => "\x{1EE6}",
- Y => "\x{1EF6}",
- "\\I" => "\x{1EC8}",
- "\\i" => "\x{1EC9}",
- a => "\x{1EA3}",
- e => "\x{1EBB}",
- i => "\x{1EC9}",
- o => "\x{1ECF}",
- u => "\x{1EE7}",
- y => "\x{1EF7}",
+ "d" => { # with dot below
+ A => '\x{1EA0}',
+ B => '\x{1E04}',
+ D => '\x{1E0C}',
+ E => '\x{1EB8}',
+ H => '\x{1E24}',
+ I => '\x{1ECA}',
+ K => '\x{1E32}',
+ L => '\x{1E36}',
+ M => '\x{1E42}',
+ N => '\x{1E46}',
+ O => '\x{1ECC}',
+ R => '\x{1E5A}',
+ S => '\x{1E62}',
+ T => '\x{1E6C}',
+ U => '\x{1EE4}',
+ V => '\x{1E7E}',
+ W => '\x{1E88}',
+ Y => '\x{1Ef4}',
+ Z => '\x{1E92}',
+ "\\I" => '\x{1ECA}',
+ "\\i" => '\x{1ECB}',
+ a => '\x{1EA1}',
+ b => '\x{1E05}',
+ d => '\x{1E0D}',
+ e => '\x{1EB9}',
+ h => '\x{1E25}',
+ i => '\x{1ECB}',
+ k => '\x{1E33}',
+ l => '\x{1E37}',
+ m => '\x{1E43}',
+ n => '\x{1E47}',
+ o => '\x{1ECD}',
+ r => '\x{1E5b}',
+ s => '\x{1E63}',
+ t => '\x{1E6D}',
+ u => '\x{1EE5}',
+ v => '\x{1E7F}',
+ w => '\x{1E89}',
+ y => '\x{1EF5}',
+ z => '\x{1E93}',
},
- "k" => { # with ogonek
- A => "\x{0104}",
- E => "\x{0118}",
- I => "\x{012E}",
- O => "\x{01EA}",
- U => "\x{0172}",
- "\\I" => "\x{012E}",
- "\\i" => "\x{012F}",
- a => "\x{0105}",
- e => "\x{0119}",
- i => "\x{012F}",
- o => "\x{01EB}",
- u => "\x{0173}",
+ "h" => { # with hook above
+ A => '\x{1EA2}',
+ E => '\x{1EBA}',
+ I => '\x{1EC8}',
+ O => '\x{1ECe}',
+ U => '\x{1EE6}',
+ Y => '\x{1EF6}',
+ "\\I" => '\x{1EC8}',
+ "\\i" => '\x{1EC9}',
+ a => '\x{1EA3}',
+ e => '\x{1EBB}',
+ i => '\x{1EC9}',
+ o => '\x{1ECF}',
+ u => '\x{1EE7}',
+ y => '\x{1EF7}',
},
- "r" => { # with ring above
- A => "\x{00C5}",
- U => "\x{016E}",
- a => "\x{00E5}",
- u => "\x{016F}",
- w => "\x{1E98}",
- y => "\x{1E99}",
+ "k" => { # with ogonek
+ A => '\x{0104}',
+ E => '\x{0118}',
+ I => '\x{012E}',
+ O => '\x{01EA}',
+ U => '\x{0172}',
+ "\\I" => '\x{012E}',
+ "\\i" => '\x{012F}',
+ a => '\x{0105}',
+ e => '\x{0119}',
+ i => '\x{012F}',
+ o => '\x{01EB}',
+ u => '\x{0173}',
},
- "u" => { # with breve
- A => "\x{0102}",
- E => "\x{0114}",
- G => "\x{011E}",
- I => "\x{012C}",
- O => "\x{014E}",
- U => "\x{016C}",
- "\\I" => "\x{012C}",
- "\\i" => "\x{012D}",
- a => "\x{0103}",
- e => "\x{0115}",
- g => "\x{011F}",
- i => "\x{012D}",
- o => "\x{014F}",
- u => "\x{016D}",
+ "r" => { # with ring above
+ A => '\x{00C5}',
+ U => '\x{016E}',
+ a => '\x{00E5}',
+ u => '\x{016F}',
+ w => '\x{1E98}',
+ y => '\x{1E99}',
},
- "v" => { # with caron
- A => "\x{01CD}",
- C => "\x{010C}",
- D => "\x{010E}",
- DZ => "\x{01C4}",
- E => "\x{011A}",
- G => "\x{01E6}",
- H => "\x{021E}",
- I => "\x{01CF}",
- K => "\x{01E8}",
- L => "\x{013D}",
- N => "\x{0147}",
- O => "\x{01D1}",
- R => "\x{0158}",
- S => "\x{0160}",
- T => "\x{0164}",
- U => "\x{01D3}",
- Z => "\x{017D}",
- "\\I" => "\x{01CF}",
- "\\i" => "\x{01D0}",
- "\\j" => "\x{01F0}",
- a => "\x{01CE}",
- c => "\x{010D}",
- d => "\x{010F}",
- dz => "\x{01C6}",
- e => "\x{011B}",
- g => "\x{01E7}",
- h => "\x{021F}",
- i => "\x{01D0}",
- j => "\x{01F0}",
- k => "\x{01E9}",
- l => "\x{013E}",
- n => "\x{0148}",
- o => "\x{01D2}",
- r => "\x{0159}",
- s => "\x{0161}",
- t => "\x{0165}",
- u => "\x{01D4}",
- z => "\x{017E}",
+ "u" => { # with breve
+ A => '\x{0102}',
+ E => '\x{0114}',
+ G => '\x{011E}',
+ I => '\x{012C}',
+ O => '\x{014E}',
+ U => '\x{016C}',
+ "\\I" => '\x{012C}',
+ "\\i" => '\x{012D}',
+ a => '\x{0103}',
+ e => '\x{0115}',
+ g => '\x{011F}',
+ i => '\x{012D}',
+ o => '\x{014F}',
+ u => '\x{016D}',
},
- "~" => { # with tilde
- A => "\x{00C3}",
- E => "\x{1EBC}",
- I => "\x{0128}",
- N => "\x{00D1}",
- O => "\x{00D5}",
- U => "\x{0168}",
- V => "\x{1E7C}",
- Y => "\x{1EF8}",
- "\\I" => "\x{0128}",
- "\\i" => "\x{0129}",
- a => "\x{00E3}",
- e => "\x{1EBD}",
- i => "\x{0129}",
- n => "\x{00F1}",
- o => "\x{00F5}",
- u => "\x{0169}",
- v => "\x{1E7D}",
- y => "\x{1EF9}",
+ "v" => { # with caron
+ A => '\x{01CD}',
+ C => '\x{010C}',
+ D => '\x{010E}',
+ DZ => '\x{01C4}',
+ E => '\x{011A}',
+ G => '\x{01E6}',
+ H => '\x{021E}',
+ I => '\x{01CF}',
+ K => '\x{01E8}',
+ L => '\x{013D}',
+ N => '\x{0147}',
+ O => '\x{01D1}',
+ R => '\x{0158}',
+ S => '\x{0160}',
+ T => '\x{0164}',
+ U => '\x{01D3}',
+ Z => '\x{017D}',
+ "\\I" => '\x{01CF}',
+ "\\i" => '\x{01D0}',
+ "\\j" => '\x{01F0}',
+ a => '\x{01CE}',
+ c => '\x{010D}',
+ d => '\x{010F}',
+ dz => '\x{01C6}',
+ e => '\x{011B}',
+ g => '\x{01E7}',
+ h => '\x{021F}',
+ i => '\x{01D0}',
+ j => '\x{01F0}',
+ k => '\x{01E9}',
+ l => '\x{013E}',
+ n => '\x{0148}',
+ o => '\x{01D2}',
+ r => '\x{0159}',
+ s => '\x{0161}',
+ t => '\x{0165}',
+ u => '\x{01D4}',
+ z => '\x{017E}',
},
);
-
+#
our %GERMAN = ( # for package `german'/`ngerman'
'"a' => 'ä',
'"A' => 'Ä',
@@ -465,9 +601,9 @@ our %GERMAN = ( # for package `german'/`ngerman'
"\"'" => '“',
'"<' => '«',
'">' => '»',
- '"-' => "\x{AD}", # soft hyphen
- '""' => "\x{200B}", # zero width space
- '"~' => "\x{2011}", # non-breaking hyphen
+ '"-' => '\x{AD}', # soft hyphen
+ '""' => '\x{200B}', # zero width space
+ '"~' => '\x{2011}', # non-breaking hyphen
'"=' => '-',
'\glq' => '‚', # left german single quote
'\grq' => '‘', # right german single quote
@@ -476,17 +612,12 @@ our %GERMAN = ( # for package `german'/`ngerman'
'\dq' => '"',
);
-
-# for {\MARKUP ...} and \textMARKUP{...}
-our @MARKUPS = ( qw( bf cal em it rm sc sl small tt ) );
-
-
1;
__END__
=pod
-=encoding utf-8
+=encoding UTF-8
=head1 NAME
@@ -494,7 +625,7 @@ LaTeX::ToUnicode::Tables - Character tables for LaTeX::ToUnicode
=head1 VERSION
-version 0.11
+version 0.53
=head1 CONSTANTS
@@ -503,29 +634,70 @@ version 0.11
Standard TeX character sequences (not \commands) which need to be
replaced: C<---> with U+2014 (em dash), etc. Includes: em dash, en
dash, inverted exclamation, inverted question, left double quote, right
-double quote, left single quote, right single quote.
+double quote, left single quote, right single quote. They are replaced
+in that order.
+
+=head2 %MARKUPS
+
+Hash where keys are the names of formatting commands like C<\tt>,
+without the backslash, namely: C<bf cal em it rm sc sf sl small tt>. Values
+are the obvious HTML equivalent where one exists, given as the tag name
+without the angle brackets: C<b em i tt>. Otherwise the value is the empty
+string.
+
+=head2 %ARGUMENT_COMMANDS
+
+Hash where keys are the names of TeX commands taking arguments that we
+handle, without the backslash, such as C<enquote>. Each value is a
+reference to a list of two strings, the first being the text to insert
+before the argument, the second being the text to insert after. For
+example, for C<enquote> the value is C<["`", "'"]>. The inserted text is
+subject to further replacements.
+
+Only three such commands are currently handled: C<\emph>, C<\enquote>,
+and C<\path>.
+
+=head2 %CONTROL_SYMBOLS
-=head2 @SPECIALS
+A hash where the keys are non-alphabetic C<\command>s (without the
+backslash), other than accents and special cases. These don't take
+arguments. Although some of these have Unicode equivalents, such as the
+C<\,> thin space, it seems better to keep the output as simple as
+possible; spacing tweaks in the TeX aren't usually desirable in plain
+text or HTML.
-Most of TeX's metacharacters, i.e., those for which C<\I<char>> typesets
-I<char>: C<$ % & _ { } #>. TeX has other special characters which are not
-included here, for instance: C<\ ^ ~>.
+The values are single-quoted strings C<'\x{...}'>, not double-quoted
+literal characters <"\x{...}">, to ease future parsing of the
+TeX/text/HTML.
-=head2 %COMMANDS
+This hash is necessary because TeX's parsing rules for control symbols
+are different from control words: no space or other token is needed to
+terminate control symbols.
-Names of argument-less commands like C<\LaTeX> as keys.
-Values are the replacements.
+=head2 %CONTROL_WORDS
+
+Keys are names of argument-less commands, such as C<\LaTeX> (without the
+backslash). Values are the replacements, often the empty string.
=head2 %SYMBOLS
-Predefined escape commands for extended characters.
+Keys are the commands for extended characters, such as C<\AA> (without
+the backslash.)
+
+=head2 %ACCENT_SYMBOLS
+
+Two-level hash of accented characters like C<\'{a}>. The keys of this
+hash are the accent symbols (without the backslash), such as C<`> and
+C<'>. The corresponding values are hash references where the keys are
+the base letters and the values are single-quoted C<'\x{....}'> strings.
+
+=head2 %ACCENT_LETTERS
-=head2 %ACCENTS
+Same as %ACCENT_SYMBOLS, except the keys are accents that are
+alphabetic, such as C<\c> (without the backslash as always).
-Two-level hash of accented characters like C<\'{a}>. The keys of this hash
-are the accent symbols, e.g C<`>, C<"> or C<'>. The corresponding values are
-references to hashes, where the keys are the base letters and the values are
-the decoded characters. As an example, C<< $ACCENTS{'`'}->{a} eq 'à' >>.
+As with control sequences, it's necessary to distinguish symbols and
+alphabetic commands because of the different parsing rules.
=head2 %GERMAN
@@ -535,22 +707,19 @@ or C<"`"> (german left quote). Note the missing backslash.
The keys of this hash are the literal character sequences.
-=head2 @MARKUPS
-
-Command names of formatting commands like C<\tt>, namely:
-C<bf cal em it rm sc sl small tt>.
-
=head1 AUTHOR
-Gerhard Gossen <gerhard.gossen@googlemail.com> and
-Boris Veytsman <boris@varphi.com>
+Gerhard Gossen <gerhard.gossen@googlemail.com>,
+Boris Veytsman <boris@varphi.com>,
+Karl Berry <karl@freefriends.org>
+
L<https://github.com/borisveytsman/bibtexperllibs>
=head1 COPYRIGHT AND LICENSE
-This software is copyright (c) 2010-2015 by Gerhard Gossen and Boris Veytsman
+Copyright 2010-2023 Gerhard Gossen, Boris Veytsman, Karl Berry
This is free software; you can redistribute it and/or modify it under
-the same terms as the Perl 5 programming language system itself.
+the same terms as the Perl5 programming language system itself.
=cut