summaryrefslogtreecommitdiff
path: root/support/splint
diff options
context:
space:
mode:
authorNorbert Preining <norbert@preining.info>2020-10-12 03:04:00 +0000
committerNorbert Preining <norbert@preining.info>2020-10-12 03:04:00 +0000
commit0ce40abb18ec02ec6fd6bcc5f21612c88daa7578 (patch)
tree416289fe1448873fd8ca33051f50ad85bffa8aaa /support/splint
parentfdb18507cd80dc17f5a5256153d34668b4f4e61c (diff)
CTAN sync 202010120303
Diffstat (limited to 'support/splint')
-rw-r--r--support/splint/INSTALL22
-rw-r--r--support/splint/Makefile41
-rw-r--r--support/splint/README14
-rw-r--r--support/splint/TODO9
-rw-r--r--support/splint/VERSION2
-rw-r--r--support/splint/cweb/Makefile165
-rw-r--r--support/splint/cweb/bo.w1735
-rw-r--r--support/splint/cweb/bs.w69
-rw-r--r--support/splint/cweb/checklists.w82
-rw-r--r--support/splint/cweb/common.w66
-rw-r--r--support/splint/cweb/fk.w63
-rw-r--r--support/splint/cweb/fo.w702
-rw-r--r--support/splint/cweb/lo.w242
-rw-r--r--support/splint/cweb/mkeparser.w24
-rw-r--r--support/splint/cweb/mkscanner.w11
-rw-r--r--support/splint/cweb/np.w141
-rw-r--r--support/splint/cweb/philosophy.w306
-rw-r--r--support/splint/cweb/references.w31
-rw-r--r--support/splint/cweb/so.w835
-rw-r--r--support/splint/cweb/splint.w1839
-rw-r--r--support/splint/cweb/ssffo.w25
-rw-r--r--support/splint/doc/ldman.pdfbin633311 -> 718042 bytes
-rw-r--r--support/splint/doc/splint.pdfbin829494 -> 1141608 bytes
-rw-r--r--support/splint/examples/count/count.sty16
-rw-r--r--support/splint/examples/expression/Makefile30
-rw-r--r--support/splint/examples/expression/etoks.sty16
-rw-r--r--support/splint/examples/expression/expression.sty36
-rw-r--r--support/splint/examples/expression/expression.w34
-rw-r--r--support/splint/examples/ld/Makefile72
-rw-r--r--support/splint/examples/ld/ldexample.hw77
-rw-r--r--support/splint/examples/ld/ldgram.w202
-rw-r--r--support/splint/examples/ld/ldgramo.w4
-rw-r--r--support/splint/examples/ld/ldint.sty27
-rw-r--r--support/splint/examples/ld/ldlex.w73
-rw-r--r--support/splint/examples/ld/ldlexo.w40
-rw-r--r--support/splint/examples/ld/ldman.w345
-rw-r--r--support/splint/examples/ld/ldnp.w71
-rw-r--r--support/splint/examples/ld/ldnump.w4
-rw-r--r--support/splint/examples/ld/ldtexlex.sty75
-rw-r--r--support/splint/examples/ld/ldunion.sty355
-rw-r--r--support/splint/examples/ld/lstokenset.sty17
-rw-r--r--support/splint/examples/ld/ltokenset.sty113
-rw-r--r--support/splint/examples/symbols/Makefile40
-rw-r--r--support/splint/examples/symbols/slimbo.sty16
-rw-r--r--support/splint/examples/symbols/symbols.w292
-rw-r--r--support/splint/examples/symbols/symfm.sty64
-rw-r--r--support/splint/examples/symbols/symmap.sty71
-rw-r--r--support/splint/examples/symbols/symtoks.sty16
-rw-r--r--support/splint/examples/symbols/symtricks.sty23
-rw-r--r--support/splint/examples/types/Makefile30
-rw-r--r--support/splint/examples/types/basic.sty4
-rw-r--r--support/splint/examples/types/test.sty7
-rw-r--r--support/splint/examples/types/tree.sty2
-rw-r--r--support/splint/examples/xxpression/Makefile53
-rw-r--r--support/splint/examples/xxpression/xtoks.sty17
-rw-r--r--support/splint/examples/xxpression/xxpression.sty23
-rw-r--r--support/splint/examples/xxpression/xxpression.w52
-rw-r--r--support/splint/examples/xxpression/xymmap.sty10
-rw-r--r--support/splint/extras/texmf/macros/protcode.tex83
-rw-r--r--support/splint/makefile.inc72
-rw-r--r--support/splint/makefile.loc4
-rwxr-xr-xsupport/splint/scripts/bindx.pl216
-rwxr-xr-xsupport/splint/scripts/brack.pl145
-rwxr-xr-xsupport/splint/scripts/cslist.pl77
-rwxr-xr-xsupport/splint/scripts/misccw.pl94
-rwxr-xr-xsupport/splint/scripts/unline.pl69
-rw-r--r--support/splint/tex/btokenset.sty21
-rw-r--r--support/splint/tex/dcols.sty84
-rw-r--r--support/splint/tex/flex.sty683
-rw-r--r--support/splint/tex/fretokenset.sty21
-rw-r--r--support/splint/tex/ftokenset.sty72
-rw-r--r--support/splint/tex/gindex.sty522
-rw-r--r--support/splint/tex/grabstates.sty67
-rw-r--r--support/splint/tex/hext.sty346
-rw-r--r--support/splint/tex/limbo.sty1142
-rw-r--r--support/splint/tex/noweb.sty399
-rw-r--r--support/splint/tex/stokenset.sty25
-rw-r--r--support/splint/tex/trt1.sty4
-rw-r--r--support/splint/tex/xarithm.sty2
-rw-r--r--support/splint/tex/yxunion.sty2
-rw-r--r--support/splint/tex/yy.sty29
-rw-r--r--support/splint/tex/yybootstrap.sty69
-rw-r--r--support/splint/tex/yyboth.sty181
-rw-r--r--support/splint/tex/yycommon.sty522
-rw-r--r--support/splint/tex/yydebug.sty148
-rw-r--r--support/splint/tex/yyfaststack.sty52
-rw-r--r--support/splint/tex/yyinit.sty614
-rw-r--r--support/splint/tex/yyinput.sty121
-rw-r--r--support/splint/tex/yymisc.sty355
-rw-r--r--support/splint/tex/yynested.sty15
-rw-r--r--support/splint/tex/yyparse.sty68
-rw-r--r--support/splint/tex/yypretty.sty25
-rw-r--r--support/splint/tex/yystype.sty112
-rw-r--r--support/splint/tex/yytexlex.sty652
-rw-r--r--support/splint/tex/yyunion.sty2024
-rw-r--r--support/splint/tex/yyxunion.sty33
96 files changed, 13104 insertions, 4817 deletions
diff --git a/support/splint/INSTALL b/support/splint/INSTALL
index 11e85736ed..033877aba0 100644
--- a/support/splint/INSTALL
+++ b/support/splint/INSTALL
@@ -1,3 +1,19 @@
+% Copyright 2012-2020, Alexander Shibakov
+% This file is part of SPLinT
+%
+% SPLinT is free software: you can redistribute it and/or modify
+% it under the terms of the GNU General Public License as published by
+% the Free Software Foundation, either version 3 of the License, or
+% (at your option) any later version.
+%
+% SPLinT is distributed in the hope that it will be useful,
+% but WITHOUT ANY WARRANTY; without even the implied warranty of
+% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+% GNU General Public License for more details.
+%
+% You should have received a copy of the GNU General Public License
+% along with SPLinT. If not, see <http://www.gnu.org/licenses/>.
+
There is no specific installation procedure to follow for SPLinT, although
there are a few dependencies to keep in mind. To build all the tools
in the package, you will need the usual suite of build tools (gcc,
@@ -18,8 +34,8 @@ to pay for the arrogance and shortsightedness of bison maintainers and
developers. Therefore, as of this release, this package is only
intended to be used with bison version 2.7 or lower. So far, the
latest release of flex (2.5.39) is still compatible with SPLinT. In
-case the local version of bison (and possibly, flex, in the future) is
-incompatible with splint, it is recommended that a local version is
+case the system version of bison (and possibly, flex, in the future) is
+incompatible with SPLinT, it is recommended that a local version is
compiled and used. For this purpose, make variables BISON_ROOT and
FLEX_ROOT can be set to the appropriate locations in makefile.loc. The
appropriate versions of bison and flex can be downloaded from
@@ -34,7 +50,7 @@ The arrays (yyprhs and yyrhs) only affect the error reporting and the
(following the route taken by bison itself), however, the latter one
is a much more serious issue. If one is not using symbolic names for
grammar terms, the arrays can be ignored. The approach taken by the
-curent version of bison is to use the state stack and yystos, yyr1,
+current version of bison is to use the state stack and yystos, yyr1,
and yyr2 arrays instead. Note that this is somewhat inconsistent with
the purpose of the debugging output since the error reporting routines
rely on the correct state of the state stack (yyssa) rather than on a
diff --git a/support/splint/Makefile b/support/splint/Makefile
index 05209cf6f8..b7f0269350 100644
--- a/support/splint/Makefile
+++ b/support/splint/Makefile
@@ -1,10 +1,29 @@
-SPLINT_ROOT = $(shell pwd)
-SPLINT_EXAMPLES_DIRS = expression xxpression symbols ld
-
-DO_SUBMAKE = for dir in ${SPLINT_EXAMPLES_DIRS}; do cd ${SPLINT_ROOT}/examples/$$dir && ${MAKE} $(1); done
-
-include ${SPLINT_ROOT}/makefile.inc
-include ${SPLINT_ROOT}/makefile.loc
+# Copyright 2012-2020, Alexander Shibakov
+# This file is part of SPLinT
+#
+# SPLinT is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# SPLinT is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with SPLinT. If not, see <http://www.gnu.org/licenses/>.
+
+SPLINT_EXAMPLE_OTHER = $(if $BISON_IS_CRIPPLED,,symbols xxpression)
+
+SPLINT_EXAMPLES_DIRS = expression ld ${SPLINT_EXAMPLE_OTHER}
+SPLINT_EXAMPLES_DIRS_ALL = expression ld symbols xxpression
+
+DO_SUBMAKE = for dir in ${SPLINT_EXAMPLES_DIRS}; do cd ${SPLINT_ROOT}examples/$$dir && ${MAKE} $(1); done
+CLEAN_SUBMAKE = for dir in ${SPLINT_EXAMPLES_DIRS_ALL}; do cd ${SPLINT_ROOT}examples/$$dir && ${MAKE} $(1); done
+
+include makefile.inc
+include makefile.loc
# output a list of all control sequences defined in the package
@@ -12,10 +31,10 @@ lists: tex/*.sty
perl scripts/cslist.pl $^ > cseqs.lst
manual:
- cd ${SPLINT_ROOT}/cweb && ${MAKE} splint.pdf
+ cd ${SPLINT_ROOT}cweb && ${MAKE} splint.pdf
docs:
- cd ${SPLINT_ROOT}/cweb && ${MAKE} splint.pdf && ${MAKE} ssffo.pdf
+ cd ${SPLINT_ROOT}cweb && ${MAKE} splint.pdf && ${MAKE} ssffo.pdf
$(call DO_SUBMAKE,docs)
# clean will erase all automatically generated files in the current directory
@@ -28,12 +47,12 @@ clean: clean_core
mostlyclean:
-cd cweb && ${MAKE} clean_temp && rm -f ctablesout b?out ltout smallp_out \
smalll_out lstabout bo.c np.c
- $(call DO_SUBMAKE,mostlyclean)
+ $(call CLEAN_SUBMAKE,mostlyclean)
# distclean will erase all automatically generated files
distclean: clean
rm -f splint.tar.bz2
cd cweb && ${MAKE} clean
- $(call DO_SUBMAKE,clean)
+ $(call CLEAN_SUBMAKE,clean)
diff --git a/support/splint/README b/support/splint/README
index 9a0111b681..9665ca2e7c 100644
--- a/support/splint/README
+++ b/support/splint/README
@@ -1,6 +1,6 @@
To make the licensing part clear, SPLinT is GPL v.~3:
-% Copyright 2012-2014, Alexander Shibakov
+% Copyright 2012-2020, Alexander Shibakov
% This file is part of SPLinT
%
% SPLinT is free software: you can redistribute it and/or modify
@@ -35,19 +35,25 @@ cweb/* - executables and documentation:
common.w - common code for table generators
bo.w - parser for the bison grammar
lo.w - lexer for the bison grammar
+ fo.w - parser for the flex grammar
+ so.w - lexer for the flex grammar
np.w - scanner and parser for token names
ssffo.w - lexer for state grabbing
mkeparser.w - parser output `driver'
mkscanner.w - lexer output `driver'
splint.w - documentation
+ philosophy.w - rants
+ checklists.w - checklists to follow when modifying SPLinT
tex/* - \TeX\ macros
yy*.sty, yx*.sty, flex.sty - automata machinery
trt1.sty - `\TeX\ runtime': temporary register definitions
xarithm.sty - expandable arithmetic for parsing macros
grabstates.sty - macros for state grabbing
- ?tokenset.sty - token typesetting definitions
+ *tokenset.sty - token typesetting definitions
+ gindex.sty -generic indexing macros
dcols.sty - multiple column output
+ noweb.sty - noweb style references (local within a page)
limbo.sty - limbo section macros
frontmatter.sty - macros to typeset the cover page
@@ -56,8 +62,8 @@ examples/* - various examples:
expression - a simple expression parser built with the package.
ld - a typesetting parser for the \GNU\ linker, ld, with a very
detailed implementation manual.
- symbols - a demonstration of the features of the bison parser
- included in the package.
+ symbols - a demonstration of the features of the bison and flex
+ parsers included in the package.
types - expandable arithmetic (e.g. addition and subtraction
macros that can be used inside \edef), tree data structure;
incomplete and slow, merely a proof of concept; only
diff --git a/support/splint/TODO b/support/splint/TODO
index 42c0b2b51f..95e44e9459 100644
--- a/support/splint/TODO
+++ b/support/splint/TODO
@@ -10,9 +10,16 @@ o Change generic macro names (such as \table, \symstream, etc) to something more
have to wait)
* Rewrite yytexlex.sty for better \TeX\ pretty printing macros (* the new macros are still
not ideal but are very unlikely to change in the near future)
-o Rewrite limbo.sty to provide a more logical structure
+ o Change the name of the namespace (currently 'index') used by the \TeXx macros to avoid
+ confusion with the special '[index]' namespace used by the parser for inserted symbols
+ (like an empty right hand side and implicit terms)
+ o Develop a 'loose' parsing scheme for \TeX\ macros by requiring some macros to
+ follow stricter rules
+* Rewrite limbo.sty to provide a more logical structure (* several changes have been implemented)
* Add indexing features to \TeXx macros (* there is a mechanism to do this)
o Change CWEB macros so that \pdfoutput=0 does not cause conflicts (this is a bug in cwebmac.tex)
+o Change the support for named references so that the driver generates references while outputting
+ the 'action switch' (using code like stack_depth = $$ - $[name])
... very remote future
diff --git a/support/splint/VERSION b/support/splint/VERSION
index 993f095645..9084fa2f71 100644
--- a/support/splint/VERSION
+++ b/support/splint/VERSION
@@ -1 +1 @@
-1.05
+1.1.0
diff --git a/support/splint/cweb/Makefile b/support/splint/cweb/Makefile
index ec9973fb66..1c3b4d6bc3 100644
--- a/support/splint/cweb/Makefile
+++ b/support/splint/cweb/Makefile
@@ -1,30 +1,86 @@
-SPLINT_ROOT = $(shell pwd)/..
-
-include ${SPLINT_ROOT}/makefile.inc
-
-all: ${SPLINT_PTABLES} ${SPLINT_LTABLES}
+# Copyright 2012-2020, Alexander Shibakov
+# This file is part of SPLinT
+#
+# SPLinT is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# SPLinT is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with SPLinT. If not, see <http://www.gnu.org/licenses/>.
+
+include ../makefile.inc
+
+all: ${SPLINT_PTABLES} ${SPLINT_LTABLES}
b%out: mkeparser.c b%.c
${CC} ${BISON_STATE} -DPARSER_FILE=\"$(lastword $^)\" -DYYPARSE_PARAMETERS= -o $@ $<
-b%.yy: bo.x
- ${CTANGLE} $<
+b%.yy: bo.x
+ ${CTANGLE} $<
-%yytab.tex: b%out
- ${SPLINT_DRIVER_DIR}/$< --optimize-actions $@
+%yytab.tex: b%out
+ ${SPLINT_DRIVER_DIR}/$< --optimize-actions $@
-ltab.tex: ltout
- ${SPLINT_DRIVER_DIR}/$< --optimize-actions $@
+ltab.tex: ltout
+ ${SPLINT_DRIVER_DIR}/$< --optimize-actions $@
-ltout: mkscanner.c lo_states.h lo.c
- ${CC} -DLEXER_FILE=\"$(lastword $^)\" -o $@ $<
+ltout: mkscanner.c lo_states.h lo.c
+ ${CC} -DLEXER_FILE=\"$(lastword $^)\" -o $@ $<
ssffo.ll lo.ll: \
-%.ll: %.x
- ${CTANGLE} $< && rm $(patsubst %.x, %.c, $^)
+%.ll: %.x
+ ${CTANGLE} $< && rm $(patsubst %.x, %.c, $^)
+
+fil.ll: so.x
+ ${CTANGLE} $< && rm $(patsubst %.x, %.c, $^)
+
+fip.yy rep.yy rap.yy \
+ddp.yy: fo.x
+ ${CTANGLE} $< && rm $(patsubst %.x, %.c, $^)
+
+# flex parser
+
+fil.c: fil.l
+ ${FLEX} -o $@ $<
-lo.c: lo.l
- ${FLEX} -o $@ $<
+fil_out: mkscanner.c fil_states.h fil.c
+ ${CC} -DLEXER_FILE=\"$(lastword $^)\" -o $@ $<
+
+fil_states.h: so.tex lstab.tex byytab.tex
+ ${PDFTEX} $<
+
+filtab.tex: fil_out
+ ${SPLINT_DRIVER_DIR}/$< --optimize-actions --optimize-tables $@
+
+fip.c rep.c rap.c \
+ddp.c:%.c: %.y
+ ${BISON} -o $@ $<
+
+fip_out rap_out \
+ddp_out rep_out:%_out: mkeparser.c %.c
+ ${CC} ${BISON_STATE} -DPARSER_FILE=\"$(lastword $^)\" -DYYPARSE_PARAMETERS= -o $@ $<
+
+fiptab.tex raptab.tex \
+ddptab.tex reptab.tex:%tab.tex: %_out
+ ${SPLINT_DRIVER_DIR}/$< --optimize-actions --optimize-tables $@
+
+so.tex: so.x
+ ${CWEAVE} $<
+
+fo.tex: fo.x
+ ${CWEAVE} $<
+
+fo.tok: fo.tex ltab.tex byytab.tex
+ ${TEX} ${MODEBOOTSTRAP} \\input $<
+
+lo.c: lo.l
+ ${FLEX} -o $@ $<
mkscanner.c mkeparser.c: \
%.c: %.w
@@ -35,25 +91,30 @@ mkscanner.c mkeparser.c: \
smallp_out: mkeparser.c small_parser.c
${CC} ${BISON_STATE} -DPARSER_FILE=\"$(lastword $^)\" -DYYPARSE_PARAMETERS= -o $@ $<
-smalll_out: mkscanner.c small_lexer.c
- ${CC} -DLEXER_FILE=\"$(lastword $^)\" -o $@ $<
+smalll_out: mkscanner.c small_lexer.c
+ ${CC} -DLEXER_FILE=\"$(lastword $^)\" -o $@ $<
-small_tab.tex: smallp_out
- ${SPLINT_DRIVER_DIR}/$< --optimize-actions $@
+small_tab.tex: smallp_out
+ ${SPLINT_DRIVER_DIR}/$< --optimize-actions $@
-small_dfa.tex: smalll_out
- ${SPLINT_DRIVER_DIR}/$< --optimize-actions $@
+small_dfa.tex: smalll_out
+ ${SPLINT_DRIVER_DIR}/$< --optimize-actions $@
-small_parser.yy small_lexer.ll: np.x
+small_parser.yy \
+small_lexer.ll: np.x
@${CTANGLE} $<
-bo.tex: bo.x
- ${CWEAVE} -x $<
+bo.tex: bo.x
+ -${CWEAVE} -x $<
splint.tex \
splint.idx \
-splint.scn: splint.w bo.x lo.x np.x common.w bs.w fk.w philosophy.w references.w
- ${CWEAVE} $<
+splint.scn: splint.x bo.x lo.x fo.x so.x np.x common.w bs.w \
+ fk.w philosophy.w checklists.w references.w alphas.hx
+ -${CWEAVE} $<
+
+alphas.hx:
+ ${MISCCW} --alpha-list --alpha-length=1 $@
ssffo.tex \
ssffo.idx ssffo.scn: ssffo.x
@@ -70,46 +131,56 @@ ssffo.dvi: %.dvi: ${SPLINT_DOC_PREREQS_XREF}
splint.gdx: %.gdx: ${SPLINT_DOC_PREREQS_XREF}
@echo "Making the bison and TeX indices ..."
- ${TEX} $*.tex
+ ${PDFTEX} $*.tex
+
+%.gdy: %.gdx
+
+splint.gdy: splint.gdx
+ ${BINDX} --fine $^ $@
-splint.pdf: %.pdf: ${SPLINT_DOC_PREREQS_XREF} %.gdy
+splint.xxr: %.xxr: ${SPLINT_DOC_PREREQS_XREF}
+ @echo "Generating the cross references ..."
+ ${PDFTEX} $*.tex
+
+splint.pdf: %.pdf: ${SPLINT_DOC_PREREQS_XREF} %.gdy %.xxr
${PDFTEX} \\input $*.tex && touch $*.gdy && touch $*.pdf
splint.dvi: %.dvi: ${SPLINT_DOC_PREREQS_XREF} %.gdy
- ${TEX} $*.tex && touch $*.gdy && touch $*.dvi
+ ${TEX} $*.tex && touch $*.gdy && touch $*.dvi && rm $*.xxr && rm $*.ftn
-${SPLINT_ROOT}/tex/btokenset.sty: # stupid make weirdness
- @
+${SPLINT_ROOT}tex/btokenset.sty: # stupid make weirdness
+ @
# state parsing
-lstabout: mkscanner.c ssffo.c
- ${CC} -DLEXER_FILE=\"$(lastword $^)\" -o $@ $<
+lstabout: mkscanner.c ssffo.c
+ ${CC} -DLEXER_FILE=\"$(lastword $^)\" -o $@ $<
-lstab.tex: lstabout
- ${SPLINT_DRIVER_DIR}/$< --optimize-actions $@
+lstab.tex: lstabout
+ ${SPLINT_DRIVER_DIR}/$< --optimize-actions $@
-lo.tex: lo.x
- ${CWEAVE} $<
+lo.tex: lo.x
+ ${CWEAVE} $<
-lo_states.h: lo.tex lstab.tex byytab.tex
- ${PDFTEX} $<
+lo_states.h: lo.tex lstab.tex byytab.tex
+ ${PDFTEX} $<
# clean will erase all automatically generated files in the current directory
-clean: clean_core
- -rm -f ctablesout b?out ltout smallp_out \
- smalll_out lstabout
+clean: clean_core
+ -rm -f ctablesout b?out ltout smallp_out \
+ smalll_out lstabout fil_out fip_out rep_out rap_out ddp_out
-include ${SPLINT_ROOT}/makefile.loc
+include ${SPLINT_ROOT}makefile.loc
# since bg.yy is not an intermediate file in examples/symbols/Makefile, repeated 'make all'
# remakes bg.yy thereby forcing make to update byytab.tex, etc., which results in remaking
# of bo.tok, lo.tex, eventually leading to remaking of splint.pdf;
# the special target below tells make to treat bg.yy as if it were not an intermediate file
-.PRECIOUS: bg.yy bg.y
+.PRECIOUS: %.yy bo.tok fo.tok %.ll b%.y splint.gdx splint.xxr
# the files below appear as targets but are really intermediaries for other files
-.INTERMEDIATE: smallp_out smalll_out lstabout ltout splint.gdx
+.INTERMEDIATE: smallp_out smalll_out lstabout ltout fil_out \
+ fip_out rep_out rap_out ddp_out splint.gdx
diff --git a/support/splint/cweb/bo.w b/support/splint/cweb/bo.w
index 2185f2f65e..fe7bf59f52 100644
--- a/support/splint/cweb/bo.w
+++ b/support/splint/cweb/bo.w
@@ -1,4 +1,4 @@
-% Copyright 2012-2014, Alexander Shibakov
+% Copyright 2012-2020, Alexander Shibakov
% Copyright 2002-2014 Free Software Foundation, Inc.
% This file is part of SPLinT
%
@@ -15,1433 +15,70 @@
% You should have received a copy of the GNU General Public License
% along with SPLinT. If not, see <http://www.gnu.org/licenses/>.
-\input limbo.sty
-\input frontmatter.sty
-\def\optimization{5}
-\input yy.sty
-% multi-column output
-\input dcols.sty
-
-\let\hostparsernamespace\mainnamespace % the namespace where tokens are looked up
- % for typesetting purposes
-\let\currentparsernamespace\parsernamespace
- \let\parsernamespace\mainnamespace
- \let\currenttokeneq\tokeneq
- %\def\tokeneq#1#2{\prettytoken{#1}}
- \let\tokeneq\prettywordpair@@
- \let\optstrextra\optstrextraesc
- \input bo.tok % re-use token equivalence table to set the typesetting of tokens
- \let\tokeneq\currenttokeneq
- \input btokenset.sty
- % index entries
- \let\parsernamespace\indexpseudonamespace
- \prettywordpair{emptyrhs}{$\circ$ {\rm(empty rhs)}}%
- \prettywordpair{inline_action}{$\diamond$ {\rm(inline action)}}%
- \prettywordpair{TOKEN}{{\tt TOKEN} {\rm(example)}}%
- \prettywordpair{token}{{\tt "token"} {\rm(example)}}%
-\let\parsernamespace\currentparsernamespace
-
-\immediate\openout\exampletable=\jobname.exl
-
-\def\nontitle#1{{\ttl #1}}
-\def\cite[#1]{%
- \def\next{#1}\setbox0=\hbox{l}%
- [\ifx\next\empty$\,$\hbox{\vrule width\wd0 height\ht0 depth\dp0}$\,$\else \locallink{#1bibref}#1\endlink\fi]%
-}
-
-\let\oldN\N
-\let\N\textN
-\let\M\textM
-
-\defreserved{Y}{\.{Y}}
-\showlastactiontrue
-
-@**Introduction.
-\setupfootnotes
-\splint\footnote{I was tempted to call the package {\tt ParLALRgram}
-which stands for Parsing {\sc LALR} Grammars or {\tt PinT} for
-`Parsing in \TeX' but both sounded too generic.} (Simple Parsing and
-Lexing in \TeX, or, following the great GNU
-tradition of creating recursive names, \splint\ Parses Languages
-in \TeX) is a system (or
-rather a m\'elange of systems) designed to
-facilitate developing parsing macros in \TeX\ and (to a lesser
-degree) documenting parsers written in other languages. As
-an application, a parser for \bison\ input file syntax has been
-developed, along with a macro collection that makes it possible to
-design and pretty print \bison\ grammars using \CWEB.
-
-Developing software in \CWEB\ involves two programs. The first of these is
-\CTANGLE\ that outputs the actual code, intended to be in
-\Cee. In reality, \CTANGLE\ cares very little about the language it
-produces. Exceptions are \Cee\ comments and |@[#line@]| directives that might
-confuse lesser software, although \bison\ is all too happy to swallow them
-(there are also some \Cee\ specific constructs that \CTANGLE\ tries to
-recognize). \CTANGLE's main function is to rearrange the text of the
-program as written by the programmer (in a way that, hopefully,
-emphasizes the internal logic of the code) into an appropriate
-sequence (e.g.~all variable declaration must textually precede their
-use). All that is required to adopt \CTANGLE\ to produce \bison\
-output is some very rudimentary post- and pre-processing.
-
-Our main concern is thus \CWEAVE\ that not only pretty prints the
-program but also creates an index, cross-references all the
-sections, etc. Getting \CWEAVE\ to pretty print a language other than
-\Cee\ requires some additional attention. A true digital warrior would
-probably try to decipher \CWEAVE's output `in the raw' but, alas, my
-WebFu is not that strong. The loophole comes in the form of a rarely
-(for a good reason) used \CWEB\ command: the verbatim (\.{@@=...@@>})
-output. The material to be output by this construct undergoes minimal
-processing and is put inside \.{\\vb\{}$\ldots$\.{\}}. All that is
-needed now is a way to process this virtually straight text inside \TeX.
-
-@*1 Using the \bison\ parser.
-The process of using \splint\ for writing parsing macros in \TeX\ is
-treated in considerable detail later in this document. A shorter
-(albeit somewhat outdated but still applicable) version of this
-process is outlined in \cite[Sh]. We begin,
-instead, by explaining how one such parser can be used to pretty print a
-\bison\ grammar. Following the convention mentioned above and putting
-all non-\Cee\ code inside \CWEAVE's verbatim blocks, consider the
-following (meaningless) code fragment. The fragment contains a mixture
-of \Cee\ and \bison\ code, the former appears outside of the verbatim blocks.
-\begindemo
-^@@= non_terminal: @@>
-^@@= term.1 term.2 {@@> a = b; @@=}@@>
-^@@= **H term.3 other_term {@@> $$ = $1; @@=}@@>
-^@@= **H still more terms {@@> f($1); @@=}@@>
-^@@= ; @@>
-\enddemo
-The fragment above will appear as (the output of \CTANGLE\ can be
-examined in \.{sill.y})
-@<A silly example@>=
-@G
-non_terminal:
- term.1 term.2 {@> a = b; @=}
-| term.3 other_term {@> $$ = $1; @=}
-| still more terms {@> f($1); @=}
-;
-@g
-
-@ $\ldots$ if the syntax is correct.
-In case it is a bit off, the parser will give up and
-you will see a different result. The code in the fragment below is easily
-recognizable, and some parts of it (all of \Cee\ code, in fact) are
-still pretty printed in \CWEAVE. Only the verbatim portion is left
-unprocessed.
-@<A silly example@>=
-@G
-whoops
- term.1 term.2 {@>@+ a = b; @+@=}
-| term.3 other_term {@>@+ $$ = $1; @+@=}
-| still more terms {@>@+ f($1); @+@=}
-;
-@g
-
-@ The \TeX\ header that makes such output possible is quite plain. In this case
-(i.e.\ this very file) it begins as
-\begindemo
-^\input limbo.sty
-^\input frontmatter.sty
-^\input yy.sty
-\nooutput
-\enddemo
-The first two lines are presented here merely for completeness: there is
-no parsing-relevant code in them. The line that
-follows loads the macros that implement the parsing and scanning
-machinery. This is enough to set up all the basic
-mechanisms used by the parsing and lexing macros. The rest of the header
-provides a few definitions to fine tune the typesetting of
-grammar productions. It starts with
-\begindemo
-^\let\currentparsernamespace\parsernamespace
-^ \let\parsernamespace\mainnamespace
-^ \let\currenttokeneq\tokeneq
-^ \def\tokeneq#1#2{\prettytoken{#1}}
-^ \input bo.tok % re-use token equivalence table to set the typesetting of tokens
-^ \let\tokeneq\currenttokeneq
-^ \input btokenset.sty
-\nooutput
-\enddemo
-We will have a chance to discuss all the \.{\\}$\ldots$\.{namespace}
-macros later, at this point it will suffice to say that the lines
-above are responsible for controlling the typesetting of term names. The
-file \.{bo.tok} consists of a number of lines like the ones below:
-\begindemo
-^\tokeneq {STRING}{{34}{115}{116}{114}{105}{110}{103}{34}}
-^\tokeneq {PERCENT_TOKEN}{{34}{37}{116}{111}{107}{101}{110}{34}}
-\nooutput
-\enddemo
-The cryptic looking sequences of integers above are strings of {\sc ASCII}
-codes of the letters that form the name \bison\ uses when it needs to
-refer to the corresponding token (thus, the second one is
-\toksa{}\numberstochars{34}{37}{116}{111}{107}{101}{110}{34}\end
-\.{\the\toksa} which might help explain why such an elaborate scheme
-has been chosen). The macro \.{\\tokeneq} is defined in
-\.{yymisc.sty}, which in turn is input by \.{yy.sty} but what about
-the token names themselves? In this case they were extracted
-automatically from the \CWEB\ source file by the parser during the
-\CWEAVE\ processing stage. All of these definitions can be
-overwritten to get the desired output (say, one might want to typeset
-\.{ID} in a roman font, as `identifier'; all that needs to be done is
-a macro that says \.{\\prettywordpair\{ID\}\{\{\\rm
-identifier\}\}}). The file \.{btokenset.sty} input above contains a
-number of such definitions.
-
-@ To round off this short overview, I must mention a caveat associated
-with using the macros in this collection: while one of the greatest
-advantages of using \CWEB\ is its ability to rearrange the code in a
-very flexible way, the parser will either give up or produce
-unintended output if this feature is abused while describing the
-grammar. For example, in the code below
-@<A silly example@>=
-@G
-next_term:
- stuff @> @<Rest of line@> @={@> a = f( x ); @=}
-@g
-@<A production@>@;
-
-@ the line titled |@<A production@>| is intended to be a rule defined
-later. Notice that while it seems that the parser was able to recognize
-the first code fragment as a valid \bison\ input, it misplaced the
-|@<Rest of line@>|, having erroneously assumed it to be a part of
-the action code for this grammar (later on we will go into the details of
-why it is necessary to collect all the non-verbatim output of \CWEAVE,
-even the one that contains no interesting \Cee\ code; hint: it has
-something to do with money (\.{\$}), also known as math and the way
-\CWEAVE\ processes the `gaps' between verbatim sections). The production
-line that follows did not fare as well: the parser gave up. There
-is simply no point in including such a small language fragment as a
-valid input for the grammar the parser uses to process the verbatim
-output.
-@<A production@>=
-@G
- more stuff in this line {@> @[b = g(y);@]@=}
-@g
-
-@ Finally, if you forget that only the verbatim part of the output is
-looked at by the parser you might get something unrecognizable, such
-as
-@<Rest of line@>=
- but not all of it
-
-@ To correct this, one can provide a more complete grammar fragment to
-allow the parser to complete its task successfully. In some cases,
-this imposes too strict a constraint on the programmer. Instead, the
-parser that pretty prints \bison\ grammars allows one to add {\it
-hidden context\/} to the code fragments above. The context is added
-inside \.{\\vb} sections using \CWEB's \.{@@t}$\ldots$\.{@@>} facility. The \CTANGLE\
-output is not affected by this while the code above can now be typeset as:
-@<A silly example@>=
-@G
-next_term:
- stuff @> @t}\vb{\formatlocal{\let\peekstash\stashtoterm}}{@> @<Rest of line@> @t}\vb{FAKE}{@> @={@> a = f( x ); @=}
-@g
-@<A production@>@;
-
-@ $\ldots$ even a single line can now be displayed properly.
-@<A production@>=
-@G
-@t}\vb{\formatlocal{\skipheader} FAKE:}{@>
- more stuff in this line {@> b = g( y ); @=}
-@g
-
-@ With enough hidden context, even a small rule fragment can be
-typeset as intended. The `action star' was inserted to reveal some of
-the context.
-@<Rest of line@>=
-@G
-@t}\vb{\formatlocal{\skipheader} FAKE:}{@>
- but not all of it
-@t}\vb{\{\stashed{$\star$}\}}{@>
-@g
-@ What makes all of this even more confusing is that \CTANGLE\ will
-have no trouble outputting this as a(n almost, due to the
-intentionally bad \.{whoops} production above) valid \bison\ file
-(as can be checked by looking into \.{sill.y}). The author
-happens to think that one should not fragment the software into pieces
-that are too small: \bison\ is not \Cee\ so it makes sense to write
-\bison\ code differently. However, if the logic behind your code
-organization demands such fine fragmentation, hidden context provides
-you with a tool to show it off. A look inside the source of this
-document shows that adding hidden context can be a bit ugly so it is
-not recommended for routine use. The short example above is output in
-the file below.
-@(sill.y@>=
- @<A silly example@>@;
-
-@*1 On debugging. This concludes a short introduction to the \bison\
-grammar pretty printing using this macro collection. It would be
-incomplete, however, without a short reference to debugging\footnote{Here
-we are talking about debugging the output produced by \CWEAVE\ when
-the included \bison\ parser is used, {\it not\/} debugging parsers
-written with the help of this software: the latter topic is covered in more
-detail later on}. There is a
-fair amount of debugging information that the macros can output,
-unfortunately, very little of it is tailored to the {\it use\/} of the
-macros in the \bison\ parser. Most of it is designed to help {\it
-build\/} a new parser. If you find that the parser gives up too often
-or even crashes (the latter is most certainly a bug in the parser
-itself), the first approach is to make sure that your code {\it
-compiles\/} i.e.\ forget about the printed output and try to see if
-the `real' \bison\ accepts the code (just the syntax, no need to
-worry about conflicts and such).
-
-If this does not shed any light on why the macros seem to fail, turn
-on the debugging output by saying \.{\\trace$\ldots$true} for various
-trace macros. This can produce {\it a lot\/} of output, even for
-small fragments, so turn it on only for a section at a time. If you
-need still {\it more\/} details of the inner workings of the parser
-and the lexer, various other debugging conditionals are available. For
-example, \.{\\yyflexdebugtrue} turns on the debugging output for the
-scanner. There are a number of such conditionals that are discussed in
-the commentary for the appropriate \TeX\ macros.
-
-Remember, what you are seeing at this point is the parsing process of
-the \bison\ input file, not the one for {\it your\/} grammar (which
-might not even be complete at this point). However, if this fails, you
-are on your own: drop me a line if you figure out how to fix any bugs
-you find.
-
-@*1 Terminology. We now list a few definitions of the concepts used
-repeatedly in this documentation. Most of this terminology is
-rather standard. Formal precision is not the goal here, and intuitive
-explanations are substituted whenever possible.
-{%
-\def\aterm#1{\item{\sqebullet}{\ttl #1}: \ignorespaces}%
-\setbox0=\hbox{\sqebullet\enspace}
-\parindent=0pt
-\advance\parindent by \wd0
-\smallskip
-\aterm{bison parser} while, strictly speaking, not a formally defined
-term, this combination will always stand for one of the parsers generated
-by this package designed to parse a subset of the `official' grammar for
-\bison\ input files. All of these parsers are described later in
-this documentation. The term {\it main parser\/} will be
-used as a substitute in example documentation for the same purpose.
-
-\aterm{driver} a generic but poorly defined concept. In this
-documentation it is used predominantly to mean both the \Cee\ code and
-the resulting executable that outputs the \TeX\ macros that contain the
-parser tables, token values, etc., for the parsers built by the user. It
-is understood that the \Cee\ code of the `driver' is unchanged and the
-information about the parser itself is obtained by {\it including\/} the \Cee\
-file produced by \bison\ in the `driver' (see the examples supplied
-with the package).
-
-\aterm{lexer} a synonym for {\it scanner}, a subroutine that performs the {\it
-lexical analysis\/} phase of the parsing process, i.e.\ groups various
-characters from the input stream into parser {\it tokens}.
-
-\aterm{namespace} this is an overused bit of terminology meaning a
-set of names grouped together according to some relatively
-well defined principle. In a language without a well developed type
-system (such as \TeX) it is usually accompanied by a specially designed
-naming scheme. {\it Parser namespaces\/} are commonly used in this
-documentation to mean a collection of all the data structures describing a
-parser and its state, including tables, stacks, etc., named by using the
-`root' name (say \.{\\yytable}) and adding the name of the parser (for
-example, \.{[main]}). To support this naming scheme, a number of
-macros work in unison to create and rename the `data macros' accordingly.
-
-\aterm{symbolic switch} a macro (or an associative array of macros)
-that let the \TeX\ parser generated by the package associate {\it
-symbolic term names\/} with the terms. Unlike the `real' parser, the
-parser created with this suite requires some extra setup as explained
-in the included examples (one can also consult the source for this
-documentation which creates but does not use a symbolic switch).
-
-\aterm{symbolic term name} a (relatively new) way to refer to stack
-values in \bison. In addition to using the `positional' names such as
-\.{\$}$n$ to refer to term values, one can utilize the new syntax:
-\.{\$}\.{[}{\it name\/}\.{]}. The `{\it name}' can be assigned by the
-user or can be the name of the nonterminal or token used in the
-productions.
-
-\aterm{term} in a narrow sense, an `element' of a grammar. Instead of
-a long winded definition, an example, such as \prodstyle{ID} should
-suffice. Terms are further classified into {\it terminals\/} (tokens)
-and {\it nonterminals\/} (which can be intuitively thought of as
-composite terms).
-
-\aterm{token} in short, an element of a set. Usually encoded as an
-integer by most parsers, an indivisible {\it term\/}
-produced for the parser by the scanner. \TeX's scanner uses a more
-sophisticated token classification, for example, $($character code,
-character category$)$ pairs, etc.
-
-}
-@** Languages, scanners, parsers, and \TeX. % Or $\ldots$
-$$\vbox{\halign to\hsize{\kern-1.5pt\it#\hfil\tabskip0pt plus1fil\cr
-Tokens and tables keep macros in check.\cr
-Make 'em with \bison, use \.{WEAVE} as a tool.\cr
-Add \TeX\ and \CTANGLE, and \Cee\ to the pool.\cr
-Reduce 'em with actions, look forward, not back.\cr
-Macros, productions, recursion and stack!\cr
-\noalign{\vskip2pt}
-\omit\hfil\eightpoint Computer generated (most likely)\cr}}
-$$
-\def\recount#1{${}^{(#1)}$}%
-In order to understand the parsing routines in this collection,
-it would help to gain some familiarity with the internals of the
-parsers produced by \bison\ for its intended target: \Cee. A person
-looking inside a parser delivered by \bison\ would
-quickly discover that the parsing procedure itself (|yyparse|)
-occupies a rather small portion of the file. If (s)he were to further
-reduce the size of the file by removing all the preprocessor
-directives intended to anticipate every conceivable combination of the
-operating system, compiler, and \Cee\ dialect, and various reporting
-and error logging functions it would become very clear that the most
-valuable product of \bison's labor is a collection of integer {\it
-tables\/} that control the actions of the parser routine. Moreover,
-the routine itself is an extremely concise and well-structured loop
-composed of |goto|'s and a number of numerical conditionals. If one
-were to think of a way of accessing arrays and processing conditionals
-in the language of one's choice, once the tables produced by \bison\
-have been converted into a form suitable for the consumption by the
-appropriate language engine, the parser implementation becomes
-straightforward. Or nearly so.
-
-The {\it scanning\/} (or {\it lexing\/}) step of this process---a way
-to convert a stream of symbols into a stream of integers, also
-deserves some attention here. There are a number of excellent tools
-written to automate this step in much the same fashion as \bison\
-automates the generation of parsers. One such tool, \flex, though
-(in the opinion of this author) slightly lacking in the simplicity and
-elegance as compared to \bison, was used to implement the lexer for
-this software suite. Lexing in \TeX\ will be discussed in considerable
-detail later in this manual.
-
-The language of interest in our case is, of course, \TeX, so our
-future discussion will revolve around the five elements mentioned
-above: \recount{1}data structures (mainly arrays and stacks),
-\recount{2}converting
-\bison's output into a form suitable for \TeX's consumption,
-\recount{3}processing raw streams of \TeX's tokens and converting them into
-streams of parser tokens, \recount{4}the implementation of \bison's
-|yyparse| in \TeX, and, finally, \recount{5}producing \TeX\ output via {\it
-syntax-directed translation} (which requires an appropriate
-abstraction to represent \bison's actions inside \TeX). We shall
-begin by discussing the parsing process itself.
-
-@*1 Arrays, stacks and the parser.
-Let us briefly examine the programming environment offered by \TeX.
-Designed for typesetting, \TeX's remarkable language
-provides a layer of macro processing atop of a set of commands that
-produce the output fulfilling its primary mission: delivering page
-layouts. In The \TeX book, macro {\it expansion\/} is likened to
-mastication, whereas \TeX's main product, the typographic output is the
-result of its `digestion' process. Not everything that goes through
-\TeX's digestive tract ends up leaving a trace on the final page: a
-file full of \.{\\relax}'s will produce no output, even though
-\.{\\relax} is not a macro, and thus would have to be processed by
-\TeX\ at the lowest level.
-
-It is time to describe the details of defining suitable data structures
-in \TeX. At first glance, \TeX\ provides rather standard means of
-organizing and using general memory. At the core of its generic
-programming environment is an array of \.{\\count}$\,n$ {\it
-registers\/}, which may be viewed as general purpose integer variables
-that are randomly accessible by their indices. The integer arithmetic
-machinery offered by \TeX\ is spartan but is very adequate for the sort of
-operations a parser would perform: mostly additions and
-comparisons.
-
-Is the \.{\\count} array a good way to store tables in \TeX? Probably
-not. The first factor is the {\it size\/} of this array: only 256
-\.{\\count} registers exist in a standard \TeX\ (the actual number of
-such registers on a typical machine running \TeX\ is significantly
-higher but this author is a great believer in standards, and to his
-knowledge, none of the standardization efforts in the \TeX\ world has
-resulted in anything even close to the definitive masterpiece that is
-The \TeX book). The issue of size can be mitigated to some extent by
-using a number of other similar arrays used by \TeX\ (\.{\\catcode},
-\.{\\uccode}, \.{\\dimen}, \.{\\sfcode} and others can be used for
-this purpose as long as one takes care to restore the `sane' values
-before control is handed off to \TeX's typesetting mechanisms). If a
-table has to span several such arrays, however, the complexity of
-accessing code would have to increase significantly, and the issue of
-size would still haunt the programmer.
-
-The second factor is the use of several registers by \TeX\ for special
-purposes (in addition, some of these registers can only store a
-limited range of values). Thus, the first 10 \.{\\count} registers are
-used by plain \TeX\ for (well, {\it intended\/} for, anyway) the
-purposes of page accounting: their values would have to be carefully
-saved and restored before and after each parsing call,
-respectively. Other registers (\.{\\catcode} in particular) have even
-more disrupting effects on \TeX's internal mechanisms. While all of
-this can be managed (after all, using \TeX\ as an arithmetic engine
-such as a parser suspends the need for any typographic or other
-specialized functions controlled by these arrays), the added
-complexity of using several memory banks simultaneously and the speed penalty
-caused by the need to store and restore register values make this
-approach much less attractive.
-
-What other means of storing arrays are provided by \TeX? Essentially,
-only three options remain: \.{\\token} registers, macros holding whole
-arrays, and associative arrays accessed through
-\.{\\csname}$\,\ldots\,$\.{\\endcsname}. In the first two cases if care
-is taken to store such arrays in an
-appropriate form one can use \TeX's \.{\\ifcase} primitive to access
-individual elements. The trade-off is the speed of such
-access: it is {\it linear\/} in the size of the array for most
-operations, and worse than that for others, such as removing the last
-item of an array. Using clever ways
-of organizing such arrays, one can improve the linear access time to
-$O(\log n)$ by simply modifying the access macros but at the moment, a
-straightforward \.{\\ifcase} is used after expanding a list macro or
-the contents of a \.{\\token}$\,n$ register in an {\it un\/}optimized
-parser. An {\it optimized\/} parser uses associative arrays.
-
-The array discussion above is just as applicable to {\it stacks\/}
-(indeed, an array is the most common form of stack
-implementation). Since stacks pop up and disappear frequently (what
-else are stacks to do?), list macros are usually used to store
-them. The optimized parser uses a separate \.{\\count} register to
-keep track of the top of the stack in the appropriate associative
-array.
-
-Let us now switch our attention
-to the code that implements the parser and scanner {\it functions\/}.
-If one has spent some time writing \TeX\ macros of any sophistication
-(or any macros, for that matter) (s)he must be familiar with the general
-feeling of frustration and the desire to `just call a function here and move
-on'. Macros produce {\it tokens\/}, however, and tokens must either
-expand to nothing or stay and be contributed to your input, or worse,
-be out of place and produce an error. One way to sustain a stream
-of execution with macros is {\it tail recursion\/} (i.e.~always expanding the
-{\it last token left standing}).
-
-As we have already discussed, \bison's
-|yyparse()| is a well laid out loop organized as a sequence of
-|goto|'s (no reason to become religious about structured programming
-here). This fact, and the following well known trick, make \Cee\ to \TeX\
-translation almost straightforward.
-
-% The macro mess below looks painful but this is the only place such layout is used
-% The approach can be easily generalized and put in limbo.sty but it seems
-% a bit redundant at this point.
-
-\newcount\piccount
-\newdimen\lasthsize
-
-\setbox5=\vtop{
-\demomargin=0pt
-\let\demoastyle\empty
-\begindemo
-^label A: ...
-\nooutput
-^ if**L**Krm(condition)**N
-^ goto C;
-\nooutput
-^label B: ...
-\nooutput
-^ goto A;
-\nooutput
-^label C: ...
-\nooutput
-\enddemo
-}
-\dp5=\z@@
-
-\setbox3=\vtop{
-\demomargin=0pt
-\let\demoastyle\empty
-\begindemo
-^\if**L**Krm(condition)**N
-^ \let\next=\labelC
-^\else
-^ \let\next=\labelAtail
-\enddemo
-}
-\dp3=\z@@
-
-\newdimen\lastdepth
-
-\def\startfitpar{%
- \bgroup
- \lasthsize=\hsize
- \advance\lasthsize-1.5in
- \vsize=\baselineskip
- \topskip=\z@@
- \setbox0\box2 % empty it
- % this sounds good at first but there is no good way to pull the insertions out after the
- % box manipulations that follow;
- % insertions will thus be contributed to whatever page was being worked on when the
- % picture insertions {\it started}; hence, if these happen to start at the very top of the page,
- % any insertion that follows will be contributed to the previous page; we correct this for footnotes
- % below
- % \holdinginserts=1
- \output{%
- \global\setbox2=\vbox{
- \ifvoid2
- \else
- \prevdepth=\dp2
- \unvbox2
- \fi
- \lastdepth=\dp255
- \unvbox255
- % this would be tempting, however, the \eject that follows should disappear
- % in addition, one really should not be playing with page breaking in the middle of
- % such tricky insertions
- % \penalty\outputpenalty
- % \kern-\lastdepth % to make sure \baselineskip is accounted for
- }%
- }\eject
- \output{%
- \setbox0=\vbox{%
- \unvbox255%
- }% \lastbox would almost work ... if not for insertions
- \global\advance\piccount1
- \global\setbox2=\vbox{%
- \prevdepth=\dp2 \unvbox2
- \hbox to\hsize{%
- \ifnum\piccount<15
- \hbox to1.5in{%
- \ifnum\piccount=1
- \ \box5
- \fi
- \hfill}%
- \fi
- \box0 \hfill
- \ifnum\piccount=1
- \box3 \ %
- \fi
- \ifvoid\footins % reinsert footnotes
- \else
- \insert\footins{\unvbox\footins}%
- \fi
- }%
- }%
- }%
- \parshape=15
- 0pt 2.7in
- 0pt 2.7in
- 0pt 2.7in
- 0pt 2.7in
- 0pt 2.7in
- 0pt 2.7in
- 0pt 2.7in
- 0pt \lasthsize
- 0pt \lasthsize
- 0pt \lasthsize
- 0pt \lasthsize
- 0pt \lasthsize
- 0pt \lasthsize
- 0pt \lasthsize
- 0pt \hsize
-}
-
-\def\endfitpar{%
- \par
- \eject
- \egroup
- % see the comment above
- % \holdinginserts=0
- \prevdepth=\dp2
- \unvbox2
-}
-
-\startfitpar
-\noindent Given the code on the left (where |goto|'s
-are the only means of branching but can appear inside conditionals),
-one way to translate it into \TeX\ is to define a set of macros (call
-them \.{\\labelA}, \.{\\labelAtail} and so forth for clarity) that end in
-\.{\\next} (a common name for this purpose). Now, \.{\\labelA} will
-implement the code that comes between \.{label A:} and \.{goto C;},
-whereas \.{\\labelAtail} is responsible for the code after \.{goto C;}
-and before \.{label B:}
-(provided no other |goto|'s intervene which can always be
-arranged). The conditional which precedes \.{goto C;} can now be written in
-\TeX\ as presented on the right, where (condition) is an appropriate
-translation of the corresponding condition
-in the code being translated (usually, one of `$=$' or `$\not=$'). Further
-details can be extracted from the \TeX\ code that implements these
-functions where the corresponding \Cee\ code is presented alongside
-the macros that mimic its functionality%
-\footnote{Running the risk of overloading the reader with details, the author
-would like to note that the actual implementation follows a {\it slightly\/} different
-route in order to avoid any \.{\\let} assignments or changing the
-meaning of \.{\\next}}.
-This concludes an overview of the general approach,
-It is time to consider the way characters get consumed
-on the lower levels of the macro hierarchy and the interaction between the different
-layers of the package.
-\endfitpar
-
-@*1 \TeX\ into tokens.
-Thus far we have covered the ideas
-behind items \recount{1} and \recount{4} on our list. It is time to
-discuss the lowest level of processing done by these macros:
-converting \TeX's tokens into the tokens consumed by the parser,
-i.e.\ part\recount{3} of the plan. Perhaps, it would be most appropriate
-to begin by defining the term {\it token}.
-
-As commonly defined, a token is simply an element of a set. Depending on
-how much structure the said set possesses, a token can be represented by
-an integer or a more complicated data structure. In the discussion
-below, we will be dealing with two kinds of tokens: the tokens
-consumed by the parsers and the \TeX\ tokens seen by the input
-routines. The latter play the role of {\it characters\/} that combine
-to become the former. \bison's internal representation for its tokens
-is non-negative integers so this is what a scanner must
-produce.
-
-\TeX's tokens are a good deal more sophisticated: they can be
-either pairs $(c_{\rm ch}, c_{\rm cat})$, where $c_{\rm ch}$ is the
-character code and $c_{\rm cat}$ is \TeX's category code ($1$ and $2$ for
-group characters, $5$ for end of line, etc.), or {\it control
-sequences\/}, such as \.{\\relax}. Some of these tokens (control
-sequences and {\it active}, i.e.~category~13 characters) can have
-complicated internal structure (expansion). The situation is further
-complicated by \TeX's \.{\\let} facility, which can create
-`character-like' control sequences, and the lack of conditionals
-to distinguish them from the `real' characters. Finally, not all pairs
-can appear as part of the input (say, there is no $(n, 0)$ token for
-any $n$, in the terminology above).
-
-The scanner expects to see {\it characters} in its input, which are
-represented by their {\sc ASCII} codes, i.e.~integers between $0$ and
-$255$ (actually, a more general notion of the Unicode character is
-supported but we will not discuss it further). Before character codes
-appear as the input to the scanner, however, and make its integer
-table-driven mechanism `tick', a lot of work must be done to collect
-and process the stream of \TeX\ tokens produced after \CWEAVE\ is done
-with your input. This work becomes further complicated when the
-typesetting routines that interpret the parser's output must sneak
-outside of the parsed stream of text (which is structured by the
-parser) and insert the original \TeX\ code produced by \CWEAVE\ into
-the page.
-
-\splint\ comes with a customizeable input routine of
-moderate complexity (\.{\\yyinput}) that classifies all \TeX\ tokens
-into seven categories: `normal' spaces (i.e.~category~10 tokens,
-skipped by \TeX's parameter scanning mechanism),
-`explicit' spaces (includes the control sequences \.{\\let} to \.{\ },
-as well as \.{\\\ }), groups ({\it avoid} using \.{\\bgroup} and \.{\\egroup} in
-your input but `real', \.{\{}$\ldots$\.{\}} groups are fine), active
-characters, normal characters (of all character categories that can
-appear in \TeX\ input, including \.{\$}, \.{\^}, \.{\#}, \.{a}--\.{Z},
-etc.), single letter control sequences, and multi-letter control
-sequences. Each of these categories can be processed separately to
-`fine-tune' the input routine to the problem at hand. The input
-routine is not very fast, instead, flexibility was the main
-goal. Therefore, if speed is desirable, a customized input routine
-is a great place to start. As an example, a minimalistic
-\.{\\yyinputtrivial} macro is included.
-
-When \.{\\yyinput} `returns' by calling \.{\\yyreturn} (which is a
-macro you design), your lexing routines have access to three
-registers: \.{\\yycp@@}, that holds the character value of the
-character just consumed by \.{\\yyinput}, \.{\\yybyte}, that most of
-the time holds the token just removed from the input,
-and \.{\\yybytepure}, that (again, with very few
-exceptions) holds a `normalized' version of the read character (i.e.~a
-character of the same character code as \.{\\yycp@@}, and category~11
-(to be even more precise (and to use nested parentheses), `normalized'
-characters have the same category code as the current category code of
-\.{@@})).
-
-Most of the time it is the character code one needs (say, in the case
-of \.{\\\{}, \.{\\\}}, \.{\\\&} and so on) but under some circumstances the
-distinction is important (outside of \.{\\vb\{}$\ldots$\.{\}}, the sequence
-\.{\\1} has nothing to do with the digit `\.{1}'). This mechanism
-makes it easy to examine the consumed token. It also forms
-the foundation of the `hidden context' passing mechanism described later.
-
-The remainder of this section discusses the internals of \.{\\yyinput}
-and some of the design trade-offs one has to make while working on
-processing general \TeX\ token streams. It is typeset in `small print'
-and can be skipped if desired.
-\smallskip
-\begingroup
-\abovedisplayskip=5pt%
-\abovedisplayshortskip=2pt%
-\belowdisplayskip=5pt%
-\belowdisplayshortskip=2pt%
-\fnotesstart=1
-\fnotesspan=2
-\noofcolumns=2
-\icgap=1em%
-\eightpoint
-\linecount=73
-\setmcparams
-\def\.#1{{\chardef\\=`\\\chardef\&=`\&\tt #1}}%
-\dsskip=0pt%
-\begindoublecols
-To examine every token in its path (including spaces that are easy to
-skip), the input routine uses one of the two well-known {\sc \TeX}nologies:
-\.{\\futurelet\\next\\examinenext} or equally effective
-\hbox{\.{\\afterassignment\\next\\let={\tt\char"20}}}.
-Recursively inserting one of these sequences, \.{\\yyinput} can go
-through any list of tokens, as long as it knows where to stop
-(i.e.~return an end of file character). The
-signal to stop is provided by the \.{\\yyeof}
-primitive which should not appear in any `ordinary' text
-presented for parsing, other than for the purpose of providing such a
-stop signal. Even the dependence on \.{\\yyeof} can be eliminated if
-one is willing to invest the time in writing macros that juggle \TeX's
-\.{\\token} registers and only limit oneself to input from such
-registers (which is, aside from an obvious efficiency hit, a strain on
-\TeX's memory, as you have to store multiple (3 in the general case)
-copies of your input to be able to back up when the lexer makes a
-wrong choice). There does not seem to be a way of doing it unless the
-text has been stored in a \.{\\token} register first (or storing the
-whole input as a {\it parameter\/} for the appropriate macro: this
-scheme is remarkably powerful and leads to {\it expandable\/} versions
-of very complicated macros, although the amount of effort required to
-write such macros grows at a frightening rate). All of these are
-non-issues for the text inside \.{\\vb\{}$\ldots$\.{\}} and the care that
-\.{\\yyinput} takes in processing characters inside such lists is an
-overkill. In a more `hostile' environment (such as the one encountered
-by the now obsolete \.{\\Tex} macros), this extra attention to detail pays
-off in the form of a more robust input mechanism.
-
-One subtlety deserves a special mention here, as it can be important
-to the designer of `higher-level' scanning macros. Two types of tokens
-are extremely difficult to deal with whenever \TeX's own lexing
-mechanisms are used: (implicit) spaces and even more so, braces. We
-will only discuss braces here, however, almost everything that follows
-applies equally well to spaces (category 10 tokens to be precise), with
-a few simplifications (or complications, in a couple of places). To
-understand the difficulty, let's consider one of the approaches above:
-$$
-\.{\\futurelet\\next\\examinenext}.
-$$
-The macro \.{\\examinenext}
-usually looks at \.{\\next} and inserts another macro (usually also called
-\.{\\next}) at the very end of its expansion list. This macro usually
-takes one parameter, to consume the next token. This mechanism works
-flawlessly, until the lexer encounters a \.{\{}br\.{,}sp\.{\}}ace. The \.{\\next}
-sequence, seen by \.{\\examinenext} contains a lot of information
-about the brace ahead: it knows its category code (left brace, so $1$), its
-character code (in case there was, say a \.{\\catcode`\\[=1{\tt\char`\ }}
-earlier) but not whether it is a `real' brace (i.e.\ a character
-\.{\{}$_1$) or an implicit one (a \.{\\bgroup}). There is no way to find
-that out until the control sequence `launched' by \.{\\examinenext}
-sees the token as a parameter.
-
-If the next token is a `real' brace, however,
-\.{\\examinenext}'s successor will never see the token itself: the
-braces are stripped by \TeX's scanning mechanism. Even if it finds a
-\.{\\bgroup} as the parameter, there is no guarantee that the actual
-input was not \.{\{\\bgroup\}}. One way to handle this is by using
-\.{\\string} ahead of any consumption of the next token. If prior to
-expanding \.{\\string} care has been taken to set the \.{\\escapechar}
-appropriately (remember, we know the character code in advance), as
-soon as one sees a character with \.{\\escapechar}'s character code,
-(s)he knows that an implicit brace has just been seen. One added
-complication to all this is that a very determined programmer can
-insert an {\it active\/} character (using, say, the \.{\\uccode}
-mechanism) that has the {\it same\/} character code as the {\it
-brace\/} token that it has been \.{\\let} to! Setting this possibility
-aside, the \.{\\string} mechanism (or, its cousin, \.{\\meaning}) is
-not perfect: both produce a sequence of category 12 and 10 tokens. If
-it is indeed a brace character that we just saw, we can consume the next
-token and move on but what if this was a control sequence? After all,
-just as easily as \.{\\string} makes a sequence into characters,
-\.{\\csname}$\,\ldots\,$\.{\\endcsname} pair will make any sequence of
-characters into a control sequence. Huh~$\ldots$
-
-What we need is a backup mechanism: if one has a copy of the
-token sequence ahead, one can use \.{\\string} to see if it is a real
-brace first, and if it is, consume it and move on (the active character
-case can be handled as the implicit case below, with one extra backup
-to count how many tokens have been consumed). At this point one has to {\it
-reinsert\/} the brace in case, at some point, a future `back up'
-requires that the rest of the tokens are removed from the output (to
-avoid `\.{Too many \}'s}' complaints from \TeX). This can be done by using
-the \.{\\iftrue\{\\else\}\\fi} trick but of course, some bookkeeping is
-needed to keep track of how far inside the brace groups we
-are.
-
-If it is an implicit brace, more work is needed: read all the
-characters that \.{\\string} produced (an maybe more), then remember
-the number of characters consumed. Remove the rest of the input using
-the method described above and restart the scanning from the same point
-knowing that the next token can be scanned as a parameter.
-
-Another strategy is to design a general enough macro that counts
-tokens in a token register and simply recount the tokens after every
-brace was consumed.
-
-Either way, it takes a lot of work. If anyone would
-like to pursue the counting strategy, simple counting macros
-are provided in \.{/examples/count/count.sty}.
-The macros in this example
-supply a very general counting mechanism that does not depend on
-\.{\\yyeof} (or {\it any\/} other token) being `special' and can count the
-tokens in any token register, as long as none of those tokens is an
-\.{\\outer} control sequence. In other words, if the macro is used
-immediately after the assignment to the token register, it should
-always produce a correct count.
-
-Needless to say, if such a general mechanism is desired, one has to
-look elsewhere. The added complications of treating spaces (\TeX\
-tends to ignore them most of the time) make this a torturous exercise
-in \TeX's macro wizardry. The included \.{\\yyinput} has two ways of
-dealing with braces: strip them or view the whole group as a
-token. Pick one or write a different \.{\\yyinput}. Spaces, implicit
-or explicit are reported as a specially selected character code and
-consumed with a likeness of
-$$
-\hbox{\.{\\afterassignment\\moveon\\let\\next={\tt\char`\ }}}.
-$$
-
-Now that a steady stream of character codes is arriving at \.{\\yylex}
-after \.{\\yyreturn} the job of converting it into numerical tokens
-is performed by the {\it scanner} (or {\it lexer\/}, or {\it tokenizer\/},
-or even {\it tokener}), discussed in the next section.
-\enddoublecols
-\endgroup
-
-@*1 Lexing in \TeX. In a typical system that uses a parser to process
-text, the parsing pass is usually split into several stages: the raw
-input, the lexical analysis (or simply {\it lexing}), and the parsing
-proper. The {\it lexing\/} (also called {\it scanning}, we use these
-terms interchangeably) clumps various sequences of characters into
-{\it tokens\/} to facilitate the parsing stage. The reasons for this
-particular hierarchy are largely pragmatic and are partially historic
-(there is no reason that {\it parsing\/} cannot be done in multiple
-phases, as well, although it usually isn't).
-
-If one remembers a few basic facts from the formal language theory, it
-becomes obvious that a lexer, that parses {\it regular\/} languages,
-can (theoretically) be replaced by an {\sc LALR} parser, that parses {\it
-context-free\/} ones (or some subset thereof, which is
-still a super set of all regular languages). A common justification given for
-creating specialized lexers is efficiency and speed. The
-reality is somewhat more subtle. While we do care about the efficiency of
-parsing in \TeX, having a specialized scanner is important for
-a number of different reasons.
-
-The real advantage of having a dedicated scanner is the ease with which it
-can match incomplete inputs and back up. A parser can, of course,
-{\it recognize\/} any valid input that is also acceptable to a lexer, as well
-as {\it reject\/} any input that does not form a valid token. Between
-those two extremes, however, lies a whole realm of options that a
-traditional parser will have great difficulty exploring. Thus, to
-mention just one example, it
-is relatively easy to set up a DFA\footnote{Which stands for
-Deterministic Finite Automaton, a common (and mathematically unique)
-way of implementing a scanner for regular languages. Incidentally {\sc
-LALR} mentioned above is short for Look Ahead Left to Right.}
-so that the {\it longest\/}
-matching input is accepted. The only straightforward way to do this
-with a traditional parser is to parse longer and longer inputs again
-and again. While this process can be optimized to a certain degree,
-the fact that a parser has a {\it stack\/} to maintain limits its
-ability to back up.
-
-As an aside, the mechanism by which \CWEB\ assembles its `scraps'
-into chunks of recognized code is essentially iterative lexing,
-very similar to what a human does to make sense of complicated
-texts. Instead of trying to match the longest running piece of text,
-\CWEB\ simply looks for patterns to combine inputs into larger
-chunks, which can later be further combined. Note that this is not
-quite the same as the approach taken by, say {\sc GLR} parsers, where
-the parser must match the {\it whole\/} input or declare a
-failure. Where a \CWEB-type parser may settle for the first available
-match (or the longest available) a {\sc GLR} parser must try {\it
-all\/} possible matches or use an algorithm to reject the majority of
-the ones that are bound to fail in the end.
-
-This `\CWEB\ way' is also different from a traditional `strict' {\sc
-LR} parser/scanner approach and certainly deserves serious
-consideration when the text to be parsed possesses some rigid
-structure but the parser is only allowed to process it one small
-fragment at a time.
-
-Returning to the present macro suite, the lexer produced by \flex\
-uses integer tables similar to those employed by \bison\ so the
-usual {\sc\TeX}niques used in implementing \.{\\yyparse} are fully
-applicable to \.{\\yylex}.
-
-An additional advantage provided by having a \flex\ scanner implemented
-as part of the suite is the availability of the original \bison\ scanner written
-in \Cee\ for the use by the macro package.
-
-This said, the code generated by \flex\ contains a few idiosyncrasies
-not present in the \bison\ output. These `quirks' mostly involve
-handling of end of input and error conditions. A quick glance at the
-\.{\\yylex} implementation will reveal a rather extensive collection of
-macros designed to deal with end of input actions.
-
-Another difficulty one has to face in translating \flex\ output into
-\TeX\ is a somewhat unstructured namespace delivered in the final
-output (this is partially due to the \POSIX\ standard that \flex\
-strives to follow). One consequence of this `messy' approach is that the
-writer of a \flex\ scanner targeted to \TeX\ has to declare \flex\
-`states' (more properly called {\it subautomata}) twice: first for the
-benefit of \flex\ itself, and then again, in the {\it \Cee\ preamble\/}
-portion of the code to output the states to be used by the action code
-in the lexer. \.{Define\_State($\ldots$)} macro is provided for this
-purpose. This macro can be used explicitly by the programmer or be
-inserted by a specially designed parser.
-Using \CWEB\ helps to keep these declarations together.
-
-The `hand-off' from the scanner to the parser is implemented
-through a pair of registers: \.{\\yylval}, a token register
-containing the value of the returned token and \.{\\yychar}, a
-\.{\\count} register that contains the numerical value of the
-token to be returned.
-
-Upon matching a token, the scanner passes one crucial piece of
-information to the user: the character sequence representing the token
-just matched (\.{\\yytext}). This is not the whole story
-though. There are three more token sequences that are made available
-to the parser writer whenever a token is matched.
-
-The first of these is simply a `normalized' version of
-\.{\\yytext} (called \.{\\yytextpure}). In most cases it
-is a sequence of \TeX\ tokens with the same character codes as the one
-in \.{\\yytext} but with their category codes set to 11. In
-cases when the tokens in \.{\\yytext} are {\it not}
-$(c_{\rm ch}, c_{\rm cat})$ pairs, a few simple
-conventions are followed, some of which will be explained below. This
-sequence is provided merely for convenience and its typical use is to
-generate a key for an associate array.
-
-The other two sequences are special `stream pointers' that provide
-access to the extended scanner mechanism in order to implement passing
-of `formatting hints' to the parser without introducing any changes to
-the original grammar. As the mechanism itself and the motivation
-behind it are somewhat subtle, let me spend a few moments discussing
-the range of formatting options desirable in a generic pretty-printer.
-
-Unlike strict parsers employed by most compilers, a parser designed
-for pretty printing cannot afford being too picky about the structure
-of its input (\cite[Go] calls such parsers `loose'). To provide
-a simple illustration, an isolated identifier, such as `\.{lg\_integer}'
-can be a type name, a variable name, or a structure tag (in a language like
-\Cee\ for example). If one expects the pretty printer to typeset this
-identifier in a correct style, some context must be supplied, as
-well. There are several strategies a pretty printer can employ to get
-a hold of the necessary context. Perhaps the simplest way to handle
-this, and to reduce the complexity of the pretty printing algorithm is
-to insist on the user providing enough context for the parser to do
-its job. For short examples like the one above, this is an acceptable
-strategy. Unfortunately, it is easy to come up with longer snippets of
-grammatically deficient text that a pretty printer should be expected
-to handle. Some pretty printers, such as the one employed by \CWEB\
-and its ilk (the original \.{WEB}, \.{FWEB}), use a very flexible
-bottom-up technique that tries to make sense of as large a portion of
-the text as it can before outputting the result (see also \cite[Wo],
-which implements a similar algorithm in \LaTeX).
-
-The expectation is that this algorithm will handle the majority (about
-90\%? it would be interesting to carry out a study in the spirit of
-the ones discussed in \cite[Jo] to find out) of the
-cases with the remaining few left for the author to correct. The
-question is, how can such a correction be applied?
-
-\CWEB\ itself provides two rather different mechanisms for handling
-these exceptions. The first uses direct typesetting commands (for
-example, \.{@@/} and \.{@@\#} for canceling and
-introducing a line break, resp.) to change the typographic output.
-
-The second (preferred) way is to supply {\it hidden context\/} to the
-pretty-printer. Two commands, \.{@@;} and
-\.{@@[}$\ldots$\.{@@]} are used for this purpose. The
-former introduces a `virtual semicolon' that acts in every way like a
-real one except it is not typeset (it is not output in the source file
-generated by \CTANGLE, either but this has nothing to do with pretty
-printing, so I will not mention \CTANGLE\ anymore). For
-instance, from the parser's point of view, if the preceding text was
-parsed as a `scrap' of type {\it exp}, the addition of \.{@@;}
-will make it into a `scrap' of type {\it stmt\/} in \CWEB's
-parlance. The second construct (\.{@@[}$\ldots$\.{@@]}),
-is used to create an {\it exp\/} scrap out of whatever happens to be
-inside the brackets.
-
-This is a powerful tool at the author's disposal. Stylistically,
-this is the right way to handle exceptions as it forces the writer to
-emphasize the {\it logical\/} structure of the formal
-text. If the pretty printing style is changed
-extensively later, the texts with such hidden contexts should be able to
-survive intact in the final document (as an example, using a break
-after every statement in \Cee\ may no longer be considered
-appropriate, so any forced break introduced to support this convention
-would now have to be removed, whereas \.{@@;}'s would simply
-quietly disappear into the background).
-
-The same hidden context idea has another important advantage: with
-careful grammar fragmenting (facilitated by \CWEB's or any other
-literate programming tool's `hypertext' structure) and a more diverse
-hidden context (or even arbitrary hidden text) mechanism, it is
-possible to use a strict parser to parse incomplete language
-fragments. For example, the productions that are needed to parse
-\Cee's expressions form a complete subset of the grammar. If the
-grammar's `start' symbol is changed to {\it expression\/} (instead of
-the {\it translation-unit\/} as it is in the full \Cee\ grammar), a
-variety of incomplete \Cee\ fragments can now be parsed and
-pretty-printed. Whenever such granularity is still too `coarse',
-carefully supplied hidden context will give the pretty printer enough
-information to adequately process each fragment. A number of such {\it
-sub}-parsers can be tried on each fragment (this may sound
-computationally expensive, however, in practice, a carefully chosen
-hierarchy of parsers will finish the job rather quickly) until a
-correct parser produced the desired output (this approach is similar
-to, although not quite the same one employed by the {\it General LR
-parsers}).
-
-This somewhat lengthy discussion brings us to the question directly
-related to the tools described in this article: how does one provide
-typographical hints or hidden context to the parser?
-
-One obvious solution is to build such hints directly into the
-grammar. The parser designer can, for instance, add new tokens
-(say, \.{BREAK\_LINE}) to the grammar and extend the
-production set to incorporate the new additions. The risk of
-introducing new conflicts into the grammar is low (although not
-entirely non-existent, due to the lookahead limitations of LR(1)
-grammars) and the changes required are easy, although very tedious, to
-incorporate.
-
-In addition to being labor intensive, this solution has two other
-significant shortcomings: it alters the original grammar and hides its
-logical structure; it also `bakes in' the pretty-printing conventions
-into the language structure (making `hidden' context much less
-`stealthy'). It does avoid the `synchronicity problem' mentioned
-below.
-
-A marginally better technique is to introduce a new regular expression
-recognizable by the scanner which will then do all the necessary
-bookkeeping upon matching the sequence. All the difficulties with
-altering the grammar mentioned above apply in this case, as well, only
-at the `lexical analysis level'. At a minimum, the set of tokens
-matched by the scanner would have to be changed.
-
-A much better approach involves inserting the hints at the input stage and
-passing this information to the scanner and parser as part of the token `values'. The
-hints themselves can masquerade as characters ignored by the scanner
-(white space, for example) and preprocessed by a specially designed
-input routine. The scanner then simply passes on the values to the
-parser. This makes hints, in effect, invisible.
-
-The difficulty lies in synchronizing the token production with the
-parser. This subtle complication is very familiar to anyone who has
-designed \TeX's output routines: the parser and the lexer are not
-synchronous, in the sense that the scanner might be reading several
-(in the case of the general LR$(n)$ parsers) tokens ahead of the
-parser before deciding on how to proceed (the same way \TeX\ can
-consume a whole paragraph's worth of text before exercising its page
-builder).
-
-If we simple-mindedly let the scanner return every hint it has encountered
-so far, we may end up feeding the parser the hints meant for the token
-that appears {\it after\/} the fragment the parser is currently working
-on. In other words, when the scanner `backs up' it must correctly back
-up the hints as well.
-
-This is exactly what the scanner produced by the tools in this package
-does: along with the main stream of tokens meant for the parser, it
-produces two hidden streams (called the \.{\\format} stream and
-the \.{\\stash} stream) and provides the parser with two
-strings (currently only strings of digits are used although arbitrary
-sequences of \TeX\ tokens can be used as pointers) with the promise
-that {\it all the `hints' between the beginning of the corresponding
-stream and the point labeled by the current stream pointer appeared
-among the characters up to and, possibly, including the ones matched
-as the current token}. The macros to extract the relevant parts of the
-streams (\.{\\yyreadfifo} and its cousins) are provided for the
-convenience of the parser designer. The interested reader can consult
-the input routine macros for the details of the internal
-representation of the streams.
-
-In the interest of full disclosure, let me point out that this simple
-technique introduces a significant strain on \TeX's
-computational resources: the lowest level macros, the ones that handle
-character input and are thus executed (sometimes multiple times), for
-{\it every\/} character in the input stream are rather complicated and
-therefore, slow. Whenever the use of such streams is not desired a simpler
-input routine can be written to speed up the process (see
-\.{\\yyinputtrivial} for a working example of such macro).
-
-Finally, while probably not directly related to the present
-discussion, this approach has one more interesting feature: after the
-parser is finished, the parser output and the streams exist
-`statically', fully available for any last minute preprocessing or for
-debugging purposes, if necessary. Under most circumstances, the parser
-output is `executed' and the macros in the output are the ones reading
-the various streams using the pointers supplied at the parsing stage
-(at least, this is the case for all the parsers supplied with the
-package).
-
-@*1 Inside semantic actions: switch statements and `functions' in \TeX.
-Now you have a lexer for your input, and a grammar ready to be put into
-action (we will talk about actions a bit later). It is time to discuss
-how the tables produced by \bison\ get converted into \TeX\ {\it macros\/}
-that drive the parser in {\it \TeX}.
-
-The tables that drive the \bison\ input parsers
-are collected in various \.{\{b,d,f,g,n\}yytab.tex} and \.{small\_tab.tex}. Each
-one of these files contains the tables that implement a specific parser
-used during different stages of processing.
-Their exact function is well explained
-in the source file produced by \bison\ ({\it how} this is done is
-explained elsewhere, see \cite[Ah] for a good reference). It would
-suffice to mention here that there are three types of tables in this
-file: \recount{1}numerical tables such as \.{\\yytable} and
-\.{\\yycheck} (both are either \TeX's token registers in an
-unoptimized parser or associate arrays in an optimized version of such
-as discussed below),
-\recount{2}a string array \.{\\yytname}, and \recount{3}an action
-switch. The action switch is what gets called when the parser does a
-{\it reduction}. It is easy to notice that the numerical tables come
-`premade' whereas the string array consisting of token names
-is difficult to recognize. This is intentional: this form of initialization
-is designed to allow the widest range of
-characters to appear inside names. The macros that do this reside in
-\.{yymisc.sty}. The generated table files also contain
-constant and token declarations used by the parser.
-
-The description of the process used to output \bison\ tables in an
-appropriate form continues in the section about
-\locallink{bsfile}outputting \TeX\ tables\endlink, we pick it up here
-with the description of the syntax-directed translation and the
-actions. The line
-$$
-\.{\\switchon\\next\\in\\currentswitch}
-$$
-is responsible for calling an appropriate action in the current
-switch, as is easy to infer. A {\it switch\/} is also a macro that
-consists of strings of \TeX\ tokens intermixed with \TeX\ macros
-inside braces. Each group of macros
-gets executed whenever the character or the group of characters in
-\.{\\next} matches a substring preceding the braced group. If there
-are two different substrings
-that match, only the earliest group of macros gets expanded.
-Before a state is
-used, a special control sequence,
-\.{\\setspecialcharsfrom\\switchname} can be used to put the \TeX\
-tokens in a form suitable for the consumption by \.{\\switchon}'s. The
-most important step it performs is it {\it turns every token in the
-list into a character with the same character code and category
-12\/}. Thus \.{\\\{} becomes \.{\{}$_{12}$. There are other ways of
-inserting tokens into a state: enclosing a token or a string of tokens in
-\.{\\raw...\\raw} adds it to the state macro unchanged. If you have
-a sequence of category 12 characters you want to add to the state, put
-it after \.{\\classexpand} (such sequences are usually prepared by the
-\.{\\setspecialchars} macro that uses the token tables generated by
-\bison\ from your grammar).
-
-You can give a case a readable label (say, \.{brackets}) and enclose
-this label in \.{\\raw}$\ldots$\.{\\raw}. A word of caution: an `a'
-inside of \.{\\raw}$\ldots$\.{\\raw} (which is most likely an
-\.{a}$_{11}$ unless you played with category codes before loading the
-\.{\\switchon} macros) and the one outside it are two different
-characters, as one is no longer a letter (category 11) in the eyes of
-\TeX\ whereas the other one still is. For this reason one should not
-use characters other than letters in h\.{\{}is\.{,}er\.{\}} state
-names: the way a state picks an action does not distinguish between,
-say, a `\.{(}' in `\.{(letter)}' and a stand alone `\.{(}' and may
-pick an action that you did not intend. This applies even if `\.{(}'
-is not among the characters explicitly inserted in the state macro: if
-an action for a given character is not found in the state macro, the
-\.{\\switchon} macro will insert a current \.{\\default} action
-instead, which most often you would want to be \.{\\yylex} or
-\.{\\yyinput} (i.e.\ skip this token). If `\.{(}' or `\.{)}' matches
-the braced group that follows `\.{(letter)}' chaos may ensue (most
-likely \TeX\ will keep reading past the \.{\\end} or \.{\\yyeof} that
-should have terminated the input). Make the names of character
-categories as unique as possible: the \.{\\switchon} is simply a
-string matching mechanism, with the added distinction between
-characters of different categories.
-
-Finally, the construct \.{\\statecomment}{\it
-anything\/}\.{\\statecoment} allows you to insert comments in the
-state sequence (note that the state {\it name\/} is put at the
-beginning of the state macro (by \.{\\setspecialcharsfrom})
-in the form of a special control sequence
-that expands to nothing: this elaborate scheme is needed because
-another control sequence can be \.{\\let} to the state macro which
-makes the debugging information difficult to decipher). The debugging
-mode for the lexer implemented with these macros is activated by
-\.{\\tracedfatrue}.
-
-The functionality of the \.{\\switchon} macros (for `historical'
-reasons, one can also use \.{\\action} as a synonym) has been
-implemented in a number of other macro packages (see \cite[Fi] that
-discusses the well-known and widely used \.{\\CASE} and \.{\\FIND}
-macros). The macros in this collection have the additional property
-that the only assignments that persist after the \.{\\switchon}
-completes are the ones performed by the user code inside the selected
-case.
-
-This last property of the switch macros is implemented using another
-mechanism that is part of this macro suite: the `subroutine-like'
-macros, \.{\\begingroup}$\ldots$\.{\\tokreturn}. For examples, an
-interested reader can take a look at the macros included with the
-package. A typical use is
-\.{\\begingroup}$\ldots$\.{\\tokreturn\{\}\{\\toks0 \}\{\}} which will
-preserve all the changes to \.{\\toks0} and have no other side effects
-(if, for example, in typical \TeX\ vernacular, \.{\\next} is used
-to implement tail recursion inside the group, after the
-\.{\\tokreturn}, \.{\\next} will still have the same value it
-had before the group was entered). This functionality comes at the
-expense of some computational efficiency.
-
-This covers most of the routine computations inside semantic actions,
-all that is left is a way to `tap' into the stack automaton
-built by \bison\ using an interface similar to the special
-\.{\$$n$} variables utilized by the `genuine' \bison\ parsers
-(i.e.\ written in \Cee\ or any other target language supported by
-\bison).
-
-This role is played by the several varieties of \.{\\yy$\,p$} command
-sequences (for the sake of completeness, $p$ stands for one of \.{($n$)},
-\.{[{\rm name}]}, \.{]{\rm name}[} or $n$, here $n$ is a
-string of digits, and a `name' is any name acceptable as a symbolic
-name for a term in \bison). Instead
-of going into the minutia of various flavors of \.{\\yy}-macros, let me
-just mention that one can get by with only two `idioms' and still
-be able to write parsers of arbitrary sophistication:
-\.{\\yy($n$)} can be treated as a token register containing the
-value of the $n$-th term of the rule's right hand side, $n>0$. The left
-hand side of a production is accessed through \.{\\yyval}. A
-convenient shortcut is \.{\\yy0\{{\rm \TeX\space material}\}} which
-will expand the `\TeX\ material' inside the braces. Thus, a simple way
-to concatenate the values of the first two production terms is
-\.{\\yy0\{\\the\\yy(1)\\the\\yy(2)\}}. The included \bison\
-parser can also be used to provide support for `symbolic names',
-analogous to \bison's \.{{\$}[{\rm name}]} but a
-bit more effort is required on the user's part to initialize such support.
-Using symbolic names can make the parser more readable and maintainable,
-however.
-
-There is also a \.{\\bb$\,n$} macro, that provides access to the term
-values in the `natural order' (e.g.~\.{\\bb1} is the last term read). Its
-intended use is with the `inline' rules (see the main parser for
-such examples). As of version \.{3.0} \bison\ no longer outputs
-|yyrhs| and |yyprhs|, which makes it impossible to produce the
-|yyrthree| array necessary for processing such rules in the `left to right'
-order. One might also note that the new notation is better suited for
-the inline rules since the value that is pushed on the stack is that
-of \.{\\bb0}, i.e.~the term implicitly inserted by \bison. Be aware
-that there are no \.{\\bb[$\cdot$]} or \.{\\bb($\cdot$)} versions of
-these macros, for obvious reasons. A less obvious feature of this
-macro is its `nonexpandable' nature. This means they cannot be used
-inside \.{\\edef}. Thus, the most common use pattern is
-\.{\\bb$\,n$\{\\toks$\,m$\}} with a subsequent expansion of
-\.{\\toks$\,m$}. Making these macros expandable is certainly possible
-but does not seem crucial for the intended limited use pattern.
-
-Naturally, a parser writer may need a number of other data
-abstractions to complete the task. Since these are highly dependent on
-the nature of the processing the parser is supposed to provide, we
-refer the interested reader to the parsers included in the package as
-a source of examples of such specialized data structures.
-
-One last remark about the parser operation is worth making here:
-the parser automaton itself does not make any \.{\\global}
-assignments. This (along with some careful semantic action writing)
-can be used to `localize' the effects of the parser operation and,
-most importantly, to create `reentrant' parsers that can, e.g.\ call
-{\it themselves\/} recursively.
-
-@*1 `Optimization'.
-By default, the generated parser and scanner keep all of their tables
-in separate token registers. Each stack is kept in a single macro (this
-description is further complicated by the support for parser {\it
-namespaces\/} that exists even for unoptimized parsers but this
-subtlety will not be mentioned again---see the macros in the package
-for further details). Thus, every time a table
-is accessed, it has to be expanded making the table access latency
-linear in {\it the size of the table}. The same holds for stacks and
-the action `switches', of
-course. While keeping the parser tables (which are immutable) in token
-registers does not have any better rationale than saving the control
-sequence memory (the most abundant memory in \TeX), this way of
-storing {\it stacks} does have an advantage when multiple parsers get
-to play simultaneously. All one has to do to switch from one parser to
-another is to save the state by renaming the stack control sequences
-accordingly.
-
-When the parser and scanner are `optimized', all these control
-sequenced are `spread over' appropriate associative arrays. One caveat
-to be aware of: the action switches for both the parser and the scanner
-have to be output differently (a command line option is used to
-control this) for optimized and unoptimized parsers. While it is
-certainly possible to optimize only some of the parsers (if your
-document uses multiple) or even only some {\it parts\/} of a given
-parser (or scanner), the details of how to do this are rather
-technical and are left for the reader to discover by reading the
-examples supplied with the package. At least at the beginning it is
-easier to simply set the highest optimization level and use it
-consistently throughout the document.
-
-@*1 {\it \TeX\/} with a different {\sl slant} or do you C an escape?.
-%\def\texnspace{other}
-Some \TeX\ productions below probably look like alien script.
-The authors of \cite[Er] cite a number of reasons pretty printing of
-\TeX\ in general is a nearly impossible task. The macros included with
-the package follow a very straightforward strategy and do not try to
-be very comprehensive. Instead, the burden of presenting \TeX\ code in
-a readable form is placed on the programmer. Appropriate hints can be
-supplied by means of indenting the code, using assignments ($=$) where
-appropriate, etc. If you would rather look at straight \TeX\
-instead, the line \.{\\def\\texnspace\{other\}} at the beginning of
-this section can be uncommented and
-|TeX_( "/noexpand/inmath{/yy0{/yy1{}}}" );| becomes
-\def\texnspace{other}%
-|TeX_( "/noexpand/inmath{/yy0{/yy1{}}}" );|.
-\def\texnspace{texline}%
-There is, however, more to this story. A look at the actual file will
-reveal that the line above was typed as
-$$
-\.{TeX\_( "/noexpand/inmath\{/yy0\{/yy1\{\}\}\}" );}
-$$
-The `escape character' is leaning the other way!
-The lore of \TeX\ is uncompromising: `\.{\\}' is {\it the\/} escape
-character. What is the reason to avoid it in this case?
-
-The mystery is not very deep: `\.{/}' was chosen as an escape character
-by the parser macros (a quick glance at \.{?yytab.tex} will reveal as
-much). There is, of course, nothing sacred (other than tradition,
-which this author is trying his hardest to follow) about what character code
-the escape character has. The reason to look for the alternative is straightforward: `\.{\\}' is
-a special character in \Cee, as well (also an `escape' in fact). The line
-\.{TeX\_( "..." );} is a {\it macro-call\/} but $\ldots$ in \Cee. This
-function simply prints out (almost `as-is') the line in
-parenthesis. An attempt at \.{TeX\_( "\\noexpand" );} would result in
-\numberlinestrue
-\begindemo
-^
-^oexpand
-\enddemo
-\numberlinesfalse
-Other escape combinations\footnote{Here is a full list of {\it
-defined\/} escaped characters in \Cee: \.{\\a}, \.{\\b}, \.{\\f}, \.{\\n},
-\.{\\r}, \.{\\t}, \.{\\v}, \.{\\}{$[$\it octal digit$]$}, \.{\\'},
-\.{\\"}, \.{\\?}, \.{\\\\}, \.{\\x}, \.{\\u}, \.{\\U}. Note that the
-last three combinations must be followed by a specific string of
-characters to appear in the input without generating errors.} are
-even worse: most are simply undefined. If anyone feels trapped without
-an escape, however, the same line can be typed as
-$$
-\.{TeX\_( "\\\\noexpand\\\\inmath\{\\\\yy0\{\\\\yy1\{\}\}\}" );}
-$$
-Twice the escape!
-
-If one were to look closer at the code, another oddity stands
-out: there are no \.{\$}'s anywhere in sight.
-The big money, \.{\$} is a beloved character in
-\bison. It is used in action code to reference the values of the
-appropriate terms in a production. If mathematics pays your bills, use
-\.{\\inmath} instead.
-
-@*1 The \bison\ parser(s). Let's take a short break for a broad overview of the input file.
+\ifbootstrapmode % this is a bootstrap run to extract the states
+ \message{bootstrapping \jobname.tex ...}%
+ \input limbo.sty
+ \def\optimization{5}
+ \input yy.sty
+ \modebootstrap
+\fi
+
+@**The \eatone{bison}\bison\ parser stack.
+The input language for \bison\ loosely follows the {\sc BNF} notation, with
+a few enhancements, such as the syntax for {\em actions}, to implement
+the syntax-directed translation@^syntax-directed translation@>, as
+well as various declarations for tokens, nonterminals, etc.
+
+On the one hand, the language is relatively easy to handle, is
+nearly whitespace agnostic, on the other, a primitive parser is
+required for some basic setup even at a very early stage, so the
+design must be carefully thought out. This {\em
+bootstrapping\/}@^bootstrapping@> step is discussed in more details
+later on.
+
+The path chosen here is by no means optimal. What it lacks in
+efficiency, though, it may amply gain in practicality, as we reuse the
+original grammar used by \bison\ to produce the parser(s) for both
+pretty printing and bootstrapping. Some minor subtleties arising from
+this approach are explained in later sections.
+
+As was described in the
+\ifbootstrapmode\else\locallink{parser.stacks} discussion of parser
+stacks \endlink\fi@^parser stack@> above, to pretty print a variety
+of grammar fragments, one may employ
+a {\em parser stack\/} derived from the original grammar. The most
+natural and common unit of a \bison\ grammar is a set of
+productions. It is thus natural to begin our discussion of the parsers
+in the \bison\ stack with the parser responsible for processing
+individual rules.
+
+One should note that the productions below are not concerned with the
+typesetting of the grammar. Instead this task is delegated to the
+macros in \.{yyunion.sty} and its companions. The first pass of the
+parser merely constructs an `executable abstract syntax tree' (or
+\EAST\footnote{One may argue that \EAST\ is still merely a syntactic
+construct requiring a proper macro framework for its execution and
+should be called a `weak executable syntax tree' or \WEST. This
+acronym extravagnza is heading south so we shall stop here.}) which
+can serve very diverse purposes: from collecting token declarations in
+the boostrapping pass to typesetting the grammar rules.
+
+It would be impossible to completely avoid the question of the visual
+presentation of the \bison\ input, however. It has already been
+pointed out that the syntax adopted by \bison\ is nearly insensitive
+to whitespace. This makes {\em writing\/} \bison\ grammars easier. On
+the other hand, {\em presenting\/} a grammar is best done using a
+variety of typographic devices that take advantage of the meaningful
+positioning of text on the page: skips, indents, etc. Therefore, the
+macros for \bison\ pretty printing trade a number of \bison\ syntax
+elements (such as \.{\yl}, \.{;}, action braces, etc.) for the careful
+placement of each fragment of the input on the page.
+
+Let's take a short break for a broad overview of the input file.
The basic structure is that of an ordinary \bison\ file that produces
-plain \Cee\ output. The \Cee\ actions, however, are programmed to output \TeX.
-
-@s TeX_ TeX
-@s TeXa TeX
-@s TeXb TeX
-@s TeXf TeX
-@s TeXfo TeX
-@s TeXao TeX
+plain \Cee\ output. The \Cee\ actions, however, are programmed to output \TeX.
+The \bison\ sections (separated by \.{\%\%} (shown (pretty printed)
+as \prodstyle{\%\%} below)) appear between the successive dotted lines.
@(bg.yy@>=
@G Switch to generic mode.
@@ -1457,7 +94,8 @@ plain \Cee\ output. The \Cee\ actions, however, are programmed to output \TeX.
%%
@g
-@ Bootstrap mode is next. The reason for a separate bootstrap parser is to
+@ Bootstrap\namedspot{bootstrapping}@^bootstrapping@> mode is next.
+The reason for a separate bootstrap parser is to
collect the minimal amount of information to `spool up' the `production'
parsers. To understand the mechanics and the reasons behind it, consider what happens
following a declaration such as \.{\%token TOKEN "token"}
@@ -1486,8 +124,8 @@ function of the bootstrap parser. Since the lexer is reused, some
token values need to be known in advance (and the rest either ignored
or replaced by some `made up' values). These tokens are `hard coded'
into the parser file generated by \bison\ and output using a special
-function. The switch `|@[#define@]@; BISON_BOOTSTRAP_MODE|' tells the `driver'
-program to output the hard coded token values.
+function. The switch `|@[#define@]@; BISON_BOOTSTRAP_MODE|' tells the
+`driver' program to output the hard coded token values.
@q Bizarre looking way of typing #define is due to the awkward way@>
@q \CWEB\ treats switching in and out of $-mode in inline \Cee@>
@@ -1504,10 +142,10 @@ for an appropriate subset of the \bison\ syntax is not yet available
would have to use the same scanner (unless you want to write a custom
scanner for it), which would need to know how to output tokens, for
which it would need a parser for a subset of \bison\ syntax $\ldots$
-it is a `chicken and egg'). Hence the name `bootstrap'. Once a
-functional parser for a large enough subset of the \bison\ input
-grammar is operational, {\it it\/} can be used to pair up the token
-names.
+it is a genuine `chicken and egg' problem). Hence the need for
+`bootstrap'. Once a functional parser for a large enough subset of the
+\bison\ input grammar is operational, {\it it\/} can be used to pair
+up the token names.
The second function of the bootstrap parser is to collect information
about the scanner's states. The mechanism is slightly different for
@@ -1519,14 +157,13 @@ the `driver' program in any data structure and are instead defined as
ordinary macros. The header file is the information the `driver' file
needs to output the state values.
-An additional subtlety in the case of state value output is that the
+An additional subtlety in the case of the state value output is that the
main lexer for the \bison\ grammar utilizes states extensively and thus
cannot be easily used with the bootstrap parser before the state
values are known. The solution is to substitute a very simple scanner barely
capable of lexing state declarations. Such a scanner is implemented
in \.{ssffo.w} (the somewhat cryptic name stands for `{\bf s}imple {\bf s}canner
{\bf f}or {\bf f}lex {\bf o}ptions').
-\saveparseoutputtrue
@(bb.yy@>=
@G Switch to generic mode.
%{
@@ -1548,12 +185,6 @@ in \.{ssffo.w} (the somewhat cryptic name stands for `{\bf s}imple {\bf s}canner
@ The prologue parser is responsible for parsing various grammar
declarations as well as parser options.
-\saveparseoutputfalse
-%\traceparserstatestrue
-%\tracestackstrue
-%\tracerulestrue
-%\traceactionstrue
-\saveparseoutputtrue
@(bd.yy@>=
@G Switch to generic mode.
%{@> @<Grammar parser \Cee\ preamble@> @=%}
@@ -1562,21 +193,15 @@ declarations as well as parser options.
%{@> @<Grammar parser \Cee\ postamble@> @=%}
@> @<Tokens and types ...@> @=
%%
- @> @<Fake start symbol for prologue grammar@>@;
+ @> @<Fake start symbol for prologue grammar@> @=
@> @<Parser common productions@> @=
@> @<Parser prologue productions@> @=
%%
@g
-@ Full \bison\ input parser is used when a complete \bison\ file is
+@ The full \bison\ input parser is used when a complete \bison\ file is
expected. It is also capable of parsing a `skeleton' of such a file,
similar to the one that follows this paragraph.
-\traceparserstatesfalse
-\tracestacksfalse
-\tracerulesfalse
-\traceactionsfalse
-\checktablefalse
-\saveparseoutputfalse
@(bf.yy@>=
@G Switch to generic mode.
%{@> @<Grammar parser \Cee\ preamble@> @=%}
@@ -1592,12 +217,27 @@ similar to the one that follows this paragraph.
%%
@g
-@ The first two options are essential for the parser operation. The
+@ \namedspot{bison.options}The first two options below are essential
+for the parser operation
+as each of them makes \bison\ produce additional tables (arrays) used
+in the operation (or bootstrapping) of \bison\ parsers. The
start symbol can be set implicitly by listing the appropriate
-production first.
-@q %define lr.type canonical-lr @>
-@q Make not on this and lexing too much lookahead and the \stashed trick@>
-@q Explain other options @>
+production first. Modern \bison\ also allows specifying the kind of
+parsing algorithm to be used (provided the supplied grammar is in the
+appropriate class): {\sc LALR}($n$), {\sc LR}($n$), {\sc GLR}, etc.
+The default is to use the {\sc LALR}($1$) algorithm (with the
+corresponding assumption about the grammar) which can also be set
+explicitly by putting
+\medskip
+\beginprod
+\%define lr.type canonical-lr
+\endprod
+\medskip
+\noindent
+in with the rest of the options.
+Using other types of grammars will wreak havoc
+on the parsing algorithm hardcoded into \splint\ (see \.{yyparse.sty})
+as well as on the production of \.{\\stashed} and \.{\\format} streams.
@<Grammar parser \bison\ options@>=
@G
%token-table
@@ -1605,9 +245,9 @@ production first.
%start input
@g
-@*2 Grammar rules. Most of the original comments present in
+@*1 Token declarations. Most of the original comments present in
the grammar file used by \bison\ itself have been preserved and appear in
-{\it italics\/} at the beginning of each appropriate section.
+{\it italics\/} at the beginning of the appropriate section.
To facilitate the {\it bootstrapping\/} of the parser (see above), some
declarations have been separated into their own sections. Also, a
@@ -1616,6 +256,11 @@ number of new rules have been introduced to create a hierarchy of
most of the tokens used by the grammar. Only the string versions are
kept in the |yytname| array, which, in part is the reason for a
special bootstrapping parser as explained earlier.
+\iffalse
+ \checktrailingstashtrue % see what is left at the end
+ \checktabletrue % display the table
+ \let\stashnext\stashnextwithnothing % collect the stash expanded
+\fi
@<Tokens and types for the grammar parser@>=
@G
%token GRAM_EOF 0 "end of file"
@@ -1641,6 +286,11 @@ special bootstrapping parser as explained earlier.
@ We continue with the list of tokens below, following the layout of
the original parser.
+\iffalse
+ \checktrailingstashfalse
+ \checktablefalse
+ \let\stashnext\stashnextwithnothingnx
+\fi
@<Global Declarations@>=
@G
%token
@@ -1689,15 +339,19 @@ the original parser.
%token <param> PERCENT_PARAM "%param";
@g
-@ Extra tokens for typesetting \flex\ state
+@ \namedspot{flex.options}Extra tokens for typesetting \flex\ state
declarations and options are declared in addition to the ones that a
-standard \bison\ parser recognizes.
+standard \bison\ parser recognizes. This extension of the original
+grammar has become unnecessary with the addition of the \flex\ input
+parser(s) but is left as part of the extended grammar for convenience and
+`historical' reasons.
@<Tokens and...@>=
@G
%token FLEX_OPTION FLEX_STATE_X FLEX_STATE_S
@g
-@ We are ready to describe the top levels of the parse tree. The first
+@*1 Grammar productions.
+We are ready to describe the top levels of the parse tree. The first
`sub parser' we consider is a `full' parser, that is the parser that
expects a full grammar file, complete with the prologue, declarations,
etc. This parser can be used to extract information from the grammar
@@ -1706,23 +360,23 @@ that is otherwise absent from the executable code generated by
\.{\$}\.{[}{\rm name}\.{]}.
This parser is therefore used to generate the `symbolic
switch' to provide support for symbolic term names similar to
-`genuine' \bison's \.{\$}\.{[}$\ldots$\.{]} syntax.
+the `genuine' \bison's \.{\$}\.{[}$\ldots$\.{]} syntax.
+
+The action of the parser in this case is simply to separate the
+accumulated `parse tree' from the auxiliary information carried by the
+parser on the stack.
+\saveparseoutputfalse
+\checktablefalse
+\tracenamesfalse
@<Parser full productions@>=
@G
@t}\vb{\inline}{@>
input:
prologue_declarations
- "%%" grammar epilogue.opt {@> @<Finish the input setup@> @=}
+ "%%" grammar epilogue.opt {@> TeX_( "/getsecond{/yy(3)}/to/table" ); @=}
;
@g
-@ The action of the parser in this case is simply to separate the
-accumulated `parse tree' from the auxiliary information carried by the
-parser on the stack.
-@<Finish the input setup@>=
- @[TeX_( "/getsecond{/yy(3)}/to/toksa" );@]@; /* extract grammar contents */
- @[TeX_( "/yy0{/the/toksa}/table=/yy(0)" );@]@;
-
@ Another subgrammar deals with the syntax of isolated \bison\ rules. This is
the most commonly used `subparser' since a rules cluster is the most
natural `unit' to include in a \CWEB\ file.
@@ -1772,9 +426,12 @@ is undocumented so we supply an explicit action in each case.
@<Carry on@>=
@[TeX_( "/yy0{/the/yy(1)}" );@]@;
-@ Next, a subgrammar for processing prologue declarations. Finer
+@ Next comes a subgrammar for processing prologue declarations. Finer
differentiation is possible but the `subparsers' described here work
-pretty well and impose a mild style on the grammar writer.
+pretty well and impose a mild style on the grammar writer. Note that
+these roles are not part of the official \bison\ input grammar and are
+added to make the typesetting of `file outlines' (e.g.~|@(bb.yy@>|
+above) possible.
@<Fake start symbol for prologue grammar@>=
@G
@t}\vb{\inline}{@>
@@ -1819,8 +476,8 @@ prologue_declaration:
| "%defines" {@> TeX_( "/yy0{/nx/optionflag{defines}{}/the/yy(1)}" ); @=}
| "%defines" STRING {@> @[TeX_( "/toksa{defines}" );@]@+@<Prepare one parametric option@> @=}
| "%error-verbose" {@> TeX_( "/yy0{/nx/optionflag{error verbose}{}/the/yy(1)}" ); @=}
-| "%expect" INT {@> @[TeX_( "/toksa{expect}" );@]@+@<Prepare one parametric option@> @=}
-| "%expect-rr" INT {@> @[TeX_( "/toksa{expect-rr}" );@]@+@<Prepare one parametric option@> @=}
+| "%expect" INT {@> @[TeX_( "/toksa{expect}" );@]@+@<Prepare a generic one parametric option@> @=}
+| "%expect-rr" INT {@> @[TeX_( "/toksa{expect-rr}" );@]@+@<Prepare a generic one parametric option@> @=}
| "%file-prefix" STRING {@> @[TeX_( "/toksa{file prefix}" );@]@+@<Prepare one parametric option@> @=}
| "%glr-parser" {@> TeX_( "/yy0{/nx/optionflag{glr parser}{}/the/yy(1)}" ); @=}
| "%initial-action" "{...}" {@> TeX_( "/yy0{/nx/initaction/the/yy(2)}" ); @=}
@@ -1851,11 +508,20 @@ params:
construct just parsed and attach some auxiliary info, in this case the
stream pointers.
@<Prepare one parametric option@>=
+ @[TeX_( "/yy0{/nx/oneparametricoption{/the/toksa}{/nx/stringify/the/yy(2)}/the/yy(1)}" );@]@;
+
+@ @<Prepare a generic one parametric option@>=
@[TeX_( "/yy0{/nx/oneparametricoption{/the/toksa}{/the/yy(2)}/the/yy(1)}" );@]@;
-@ Some extra declarations to typeset \flex\ options and
+@ These rules handle extra declarations to typeset \flex\ options and
declarations. These are not part of the \bison\ syntax but their
-structure is similar enough that they can be included in the grammar.
+structure is similar enough that they can be included in the
+grammar. As was pointed out \locallink{flex.options}earlier \endlink
+the addition of the \flex\ input parser to \splint\ made this
+extension of the original \bison\ grammar obsolete but it was kept as
+part of the extended grammar for convenience and `historical'
+reasons. The convenience results from simplifying the bootstrap
+procedure by using a single parser.
@<Parser prologue productions@>=
@G
prologue_declaration:
@@ -1884,8 +550,8 @@ flex_option_list:
;
flex_option:
- ID {@> TeX_( "/yy0{/nx/flexoptionpair{/the/yy(1)}{}}" ); @=}
-| ID "=" symbol {@> TeX_( "/yy0{/nx/flexoptionpair{/the/yy(1)}{/the/yy(3)}}" ); @=}
+ ID {@> TeX_( "/yy0{/nx/flexoptionpair{/nx/idit/the/yy(1)}{}}" ); @=}
+| ID "=" symbol {@> TeX_( "/yy0{/nx/flexoptionpair{/nx/idit/the/yy(1)}{/the/yy(3)}}" ); @=}
;
@g
@@ -1905,19 +571,19 @@ flex_option:
@[TeX_( "/yy0{/the/yy(1)/nx/hspace{/the/toksb}{/the/toksc}/the/yy(2)}" );@]@;
@ {\it Grammar declarations}. These declarations can appear in both
-prologue and the rules sections. Their treatment is very similar to
-prologue-only options.
+the prologue and the rules sections. Their treatment is very similar to
+the prologue-only options.
@<Parser common productions@>=
@G
grammar_declaration:
precedence_declaration {@> @<Carry on@> @=}
| symbol_declaration {@> @<Carry on@> @=}
-| "%start" symbol {@> @[TeX_( "/toksa{start}" );@]@+@<Prepare one parametric option@> @=}
+| "%start" symbol {@> @[TeX_( "/toksa{start}" );@]@+@<Prepare a generic one parametric option@> @=}
| code_props_type "{...}" generic_symlist {@> @<Assign a code fragment to symbols@> @=}
| "%default-prec" {@> TeX_( "/yy0{/nx/optionflag{default prec.}{}/the/yy(1)}" ); @=}
| "%no-default-prec" {@> TeX_( "/yy0{/nx/optionflag{no default prec.}{}/the/yy(1)}" ); @=}
| "%code" "{...}" {@> TeX_( "/yy0{/nx/codeassoc{code}{}/the/yy(2)/the/yy(1)}" ); @=}
-| "%code" ID "{...}" {@> TeX_( "/yy0{/nx/codeassoc{code}{/the/yy(2)}/the/yy(3)/the/yy(1)}" ); @=}
+| "%code" ID "{...}" {@> TeX_( "/yy0{/nx/codeassoc{code}{/nx/idit/the/yy(2)}/the/yy(3)/the/yy(1)}" ); @=}
;
code_props_type:
@@ -1945,7 +611,7 @@ code_props_type:
@t}\vb{\inline\flatten}{@>
union_name:
{@> TeX_( "/yy0{}" ); @=}
-| ID {@> @<Carry on@> @=}
+| ID {@> @<Turn an identifier into a term@> @=}
;
grammar_declaration:
@@ -1969,7 +635,7 @@ precedence_declarator:
@t}\vb{\inline}{@>
tag.opt:
{@> TeX_( "/yy0{}" ); @=}
-| TAG {@> @<Carry on@> @=}
+| TAG {@> @<Prepare a \prodstyle{TAG}@> @=}
;
@g
@@ -1977,7 +643,7 @@ tag.opt:
@[TeX_( "/yy0{/nx/codeassoc{union}{/the/yy(2)}/the/yy(3)/the/yy(1)}" );@]@;
@ @<Define symbol types@>=
- @[TeX_( "/yy0{/nx/typedecls{/the/yy(2)}{/the/yy(3)}/the/yy(1)}" );@]@;
+ @[TeX_( "/yy0{/nx/typedecls{/nx/tagit/the/yy(2)}{/the/yy(3)}/the/yy(1)}" );@]@;
@ @<Define symbol precedences@>=
@[TeX_( "/getthird{/yy(1)}/to/toksa" );@]@; /* format pointer */
@@ -1989,7 +655,16 @@ tag.opt:
@<Parser common productions@>=
@<Parser bootstrap productions@>@;
-@ These are the two most important rules for the bootstrap parser.
+@ @<Prepare a \prodstyle{TAG}@>=
+ @[TeX_( "/yy0{/nx/tagit/the/yy(1)}" );@]@;
+
+@ These are the two most important rules for the bootstrap parser. The reasons for
+the~\prodstyle{\%token} declarations to be collected during the bootstrap pass are
+outlined in the \locallink{bootstrapping}section on bootstrapping\endlink.
+The~\prodstyle{\%nterm} declarations are not strictly necessary for
+boostrapping the parsers included in \splint\ but they are added for
+the cases when the bootstrap mode is used for purposes other than
+bootstrapping \splint.
@<Parser bootstrap productions@>=
@G
@t}\vb{\flatten}{@>
@@ -2002,8 +677,13 @@ symbol_declaration:
@ {\it Just like \prodstyle{symbols.1} but accept \prodstyle{INT} for
the sake of \POSIX}. Perhaps the only point worth mentioning here is
-the inserted separator (\.{\\hspace}). Like any other separator, it takes
-two parameters, stream pointers. In this case, however, both pointers are null
+the inserted separator (\.{\\hspace\{}$p_0$\.{\}\{}$p_1$\.{\}},
+typeset as
+|TeXa("/hspace"); TeXao(@t\TeXlit"\{\hbox{$p_0$}\}\{\hbox{$p_1$}\}\hbox{$\!$}"@>);|).
+@q A string "..." is a syntactic unit in \CWEB\ so it is impossible@>
+@q to insert \TeX\ material in the middle of the string directly@>
+Like any other separator, it takes
+two parameters, the stream pointers $p_0$ and~$p_1$. In this case, however, both pointers are null
since there seems to be no other meaningful assignment. If any
formatting or stash information is needed, it can be extracted by the
symbols themselves.
@@ -2020,7 +700,9 @@ symbol.prec:
;
@g
-@ {\it One or more symbols to be \prodstyle{\%type}'d}.
+@ {\it One or more symbols to be \prodstyle{\%type}'d}. The |@<List of
+symbols@>| rules below are reused in the boostrap parser and are put
+in a separate section for this reason.
@<Parser common productions@>=
@<List of symbols@>@;
@@ -2045,7 +727,7 @@ generic_symlist_item:
;
tag:
- TAG {@> @<Carry on@> @=}
+ TAG {@> @<Prepare a \prodstyle{TAG}@> @=}
| "<*>" {@> @<Carry on@> @=}
| "<>" {@> @<Carry on@> @=}
;
@@ -2055,7 +737,7 @@ tag:
@<Parser bootstrap productions@>=
@G
symbol_def:
- TAG {@> @<Carry on@> @=}
+ TAG {@> @<Prepare a \prodstyle{TAG}@> @=}
@t}\vb{\flatten}{@>
| id {@> TeX_( "/yy0{/nx/onesymbol{/the/yy(1)}{}{}}" ); @=}
| id INT {@> TeX_( "/yy0{/nx/onesymbol{/the/yy(1)}{/the/yy(2)}{}}" ); @=}
@@ -2158,7 +840,7 @@ indentation. The `format' of the \.{\\rhs} `structure' includes the
stash pointers and a `boolean' to indicate whether the right hand side ends
with an action. Since the action can be implicit, this decision has to
be postponed until, say, a semicolon is seen.
-No formatting or stash pointers are added for such implicit action.
+No formatting or stash pointers are added for implicit actions.
@<Start the right hand side@>=
@[TeX_( "/rhsbool{/yy(1)}/to/toksa /the/toksa" );@]@;
@[TeX_( "/getthird{/yy(1)}/to/toksb" );@]@; /* the format pointer */
@@ -2188,8 +870,7 @@ is not known a different way of accessing the stack is necessary.
@[TeX_( "/rhscont{/toksa}/to{/yy(0)}" );@]@;
@[TeX_( "/yy0{/the/yy(0)/nx/midf/the/toksb}" );@]@;
-@ No pointers are provided for an {\it implicit\/} action.
-@<Add a right hand side to a production@>=
+@ @<Old `Add a right hand side to a production'@>=
@[TeX_( "/rhsbool{/yy(4)}/to/toksa /the/toksa" );@]@;
@[TeX_( "/ifrhsfull" );@]@;
@[TeX_( " /yy0{/nx/rules{/the/yy(3)/nx/rrhssep/the/yy(2)/the/yy(4)}/the/yy(2)}" );@]@;
@@ -2204,6 +885,31 @@ is not known a different way of accessing the stack is necessary.
@[TeXfo( " /nx/actbraces{}{}{0}{0}/nx/bdend}{}{/nx/rhsfulltrue}}/the/yy(2)}" );@]@;
@[TeX_( "/fi" );@]@;
+@ No pointers are provided for an {\it implicit\/} action. Processing a set of rules involves a large number of
+reexpansions. This seems to be a good place to use an array to store {\sc AST} nodes ({\let\writetexidxentry\writetextxtidxentry
+\def\texnspace{texline}\def\texispace{index}\inlineTeXx{/astarray}}). While
+providing a noticeable speed up, this technique significantly
+complicates the debugging of the grammar. In particular, inspecting a
+parsed table supplies very little information if the {\sc AST} nodes are not
+expanded. The macros in \.{yyunion.sty} provide a special debugging
+namespace where the expansion of the parser produced control sequences
+may be modified to safely expand the generated table.
+@<Add a right hand side to a production@>=
+ @[TeX_( "/rhsbool{/yy(4)}/to/toksa /the/toksa" );@]@;
+ @[TeX_( "/ifrhsfull" );@]@;
+ @[TeX_( " /yypushx{/the/yy(3)/nx/rrhssep/the/yy(2)/the/yy(4)}/on/astarray" );@]@;
+ @[TeX_( "/else" );@]@;
+ @[TeX_( " /rhscont{/yy(4)}/to/toksa" );@]@;
+ @[TeX_( " /edef/next{/the/toksa}" );@]@;
+ @[TeX_( " /ifx/next/empty" );@]@;
+ @[TeX_( " /toksa{/emptyterm}" );@]@;
+ @[TeX_( " /fi" );@]@;
+ @[TeXb( " /yypushx{/the/yy(3)/nx/rrhssep/the/yy(2)" );@]@;
+ @[TeXf( " /nx/rhs{/the/toksa/nx/rarhssep{0}{0}" );@]@; /* streams have already been grabbed */
+ @[TeXfo( " /nx/actbraces{}{}{0}{0}/nx/bdend}{}{/nx/rhsfulltrue}}/on/astarray" );@]@;
+ @[TeX_( "/fi" );@]@;
+ @[TeX_( "/yy0{/nx/rules{/astarraylastcs}/the/yy(2)}" );@]@;
+
@ @<Add an optional semicolon@>=
@<Carry on@>@;
@@ -2216,8 +922,9 @@ is not known a different way of accessing the stack is necessary.
of a production. Various `precedence hints' must be attached to an
appropriate portion of the rule, just before an action (which can
be inline, implicit or both in this case).
+\saveparseoutputtrue
@<Parser grammar productions@>=
-@G
+@G(b)
rhs:
{@> @<Make an empty right hand side@> @=}
| rhs symbol named_ref.opt {@> @<Add a term to the right hand side@> @=}
@@ -2235,7 +942,8 @@ named_ref.opt:
;
@g
-@ @<Make an empty right hand side@>=
+@ \saveparseoutputfalse
+@<Make an empty right hand side@>=
@[TeX_( "/yy0{/nx/rhs{}{}{/nx/rhsfullfalse}}" );@]@;
@ @<Add a term to the right hand side@>=
@@ -2329,24 +1037,24 @@ named_ref.opt:
@[TeX_( "/rhscnct{/yy(1)}/to/toksb" );@]@;
@[TeX_( "/rhsbool{/yy(1)}/to/toksc /the/toksc" );@]@;
@[TeX_( "/ifrhsfull" );@]@;
- @[TeX_( " /yy0{/nx/mergeop{/the/yy(3)}/the/yy(2)}" );@]@; /* reuse \.{\\yyval} */
+ @[TeX_( " /yy0{/nx/mergeop{/nx/tagit/the/yy(3)}/the/yy(2)}" );@]@; /* reuse \.{\\yyval} */
@[TeX_( " /supplybdirective/toksa/yyval" );@]@; /* the directive is `absorbed' by the action */
@[TeX_( " /yy0{/nx/rhs{/the/toksa}{/the/toksb}{/nx/rhsfulltrue}}" );@]@;
@[TeX_( "/else" );@]@;
@[TeXb( " /yy0{/nx/rhs{/the/toksa" );@]@;
- @[TeXao( "/nx/mergeop{/the/yy(3)}/the/yy(2)}{/the/toksb}{/nx/rhsfullfalse}}" );@]@;
+ @[TeXao( "/nx/mergeop{/nx/tagit/the/yy(3)}/the/yy(2)}{/the/toksb}{/nx/rhsfullfalse}}" );@]@;
@[TeX_( "/fi" );@]@;
@ @<Create an empty named reference@>=
@[TeX_( "/yy0{}" );@]@;
@ @<Create a named reference@>=
- @<Carry on@>@;
+ @<Turn an identifier into a term@>@;
@ Identifiers.
{\it Identifiers are returned as |uniqstr| values by the scanner.
Depending on their use, we may need to make them genuine symbols}. We,
-on the other hand simply copy the values returned by the scanner.
+on the other hand, simply copy the values returned by the scanner.
@<Parser bootstrap productions@>=
@G
id:
@@ -2388,10 +1096,10 @@ placeholders for the appropriate actions in case the parser gains some
sophistication in processing low level types (or starts expecting
different types from the scanner).
@<Turn an identifier into a term@>=
- @<Carry on@>@;
+ @[TeX_( "/yy0{/nx/idit/the/yy(1)}" );@]@;
@ @<Turn a character into a term@>=
- @<Carry on@>@;
+ @[TeX_( "/yy0{/nx/charit/the/yy(1)}" );@]@;
@ @<Turn an identifier into a symbol@>=
@<Carry on@>@;
@@ -2400,10 +1108,10 @@ different types from the scanner).
@<Carry on@>@;
@ @<Prepare the left hand side@>=
- @<Carry on@>@;
+ @<Turn an identifier into a term@>@;
@ @<Prepare a string for use@>=
- @<Carry on@>@;
+ @[TeX_( "/yy0{/nx/stringify/the/yy(1)}" );@]@;
@ {\it Variable and value.
The \prodstyle{STRING} form of variable is deprecated and is not \.{M4}-friendly.
@@ -2412,14 +1120,14 @@ For example, \.{M4} fails for \.{\%define "[" "value"}.}
@G
@t}\vb{\flatten\inline}{@>
variable:
- ID {@> @<Carry on@> @=}
-| STRING {@> @<Carry on@> @=}
+ ID {@> @<Turn an identifier into a term@> @=}
+| STRING {@> @<Prepare a string for use@> @=}
;
value:
{@> TeX_( "/yy0{}" ); @=}
-| ID {@> @<Carry on@> @=}
-| STRING {@> @<Carry on@> @=}
+| ID {@> @<Turn an identifier into a term@> @=}
+| STRING {@> @<Prepare a string for use@> @=}
| "{...}" {@> TeX_( "/yy0{/nx/bracedvalue/the/yy(1)}" ); @=}
;
@g
@@ -2443,7 +1151,7 @@ as they have to be inserted in a place that is aware of the internal definitions
definitions are used.
@<Grammar parser \Cee\ postamble@>=
-#define YYPRINT(file, type, value) yyprint (file, type, value)
+#define YYPRINT(file, type, value) @[yyprint (file, type, value)@]
static void yyprint (FILE *file, int type, YYSTYPE value){}
@ @<Bootstrap parser \Cee\ postamble@>=
@@ -2452,10 +1160,10 @@ definitions are used.
@ @<Bootstrap token output@>=
void bootstrap_tokens( char *bootstrap_token_format ) {
-
-#define _register_token_d(name) fprintf( tables_out, bootstrap_token_format, #name, name, #name );
+
+#define _register_token_d(name) @[fprintf( tables_out, bootstrap_token_format, #name, name, #name );@;
@<Bootstrap token list@>@;
-#undef _register_token_d
+#undef _register_token_d@;
}
@@ -2475,5 +1183,8 @@ token information from the grammar.
@q _register_token_d(SEMICOLON) /* can be omitted in prologue */ @>
@q _register_token_d(TAG) /* only encountered in the definition of PERCENT_PARAM */ @>
-@ Union of types.
+@ Union of types. This section of the \bison\ input lists the types
+that may appear on the value stack. Since \TeX\ does not provide any
+mechanism for type checking (nor is it clear how to translate a \Cee\
+|union| into any data structure usable in \TeX), this section is left empty.
@<Union of grammar parser types@>=
diff --git a/support/splint/cweb/bs.w b/support/splint/cweb/bs.w
index a2003d81fa..adefd23f8b 100644
--- a/support/splint/cweb/bs.w
+++ b/support/splint/cweb/bs.w
@@ -1,4 +1,4 @@
-@q Copyright 2012-2014, Alexander Shibakov@>
+@q Copyright 2012-2020, Alexander Shibakov@>
@q This file is part of SPLinT@>
@q SPLinT is free software: you can redistribute it and/or modify@>
@@ -14,7 +14,7 @@
@q You should have received a copy of the GNU General Public License@>
@q along with SPLinT. If not, see <http://www.gnu.org/licenses/>.@>
-@*1\bison\ specific routines.
+@*1\eatone{Bison}\bison\ specific routines.
The placeholder code left blank in the common routines is filed in
with the code relevant to the output of parser tables in the following sections.
@@ -173,7 +173,9 @@ change affects the generated parser.
}
-@*2Constants.
+@*2Constants. A generic list of constants to be used later in different contexts is defined below.
+As before, the appropriate macro will be defined generically to do what is required with these
+names (for example, we can turn each name into a string for reporting purposes).
@<Parser constants@>=
_register_const_d(YYEMPTY)@;
_register_const_d(YYPACT_NINF)@;
@@ -196,17 +198,11 @@ format tokens are output in.
char *token_format_suffix = NULL;
char *bootstrap_token_format = NULL;
-@ @<Parser specific option list@>=
- _register_option("token-format-char", required_argument, 0, TOKEN_FORMAT_CHAR, "")@;
- _register_option("token-format-affix", required_argument, 0, TOKEN_FORMAT_AFFIX, "")@;
- _register_option("token-format-suffix", required_argument, 0, TOKEN_FORMAT_SUFFIX, "")@;
- _register_option("bootstrap-token-format", required_argument, 0, BOOTSTRAP_TOKEN_FORMAT, "")@;
-
-@ @<Higher index parser specific options@>=
- TOKEN_FORMAT_CHAR,@[@]
- TOKEN_FORMAT_AFFIX,@[@]
- TOKEN_FORMAT_SUFFIX,@[@]
- BOOTSTRAP_TOKEN_FORMAT,@[@]
+@ @<Parser specific options without shortcuts@>=
+ register_option_("token-format-char", required_argument, 0, TOKEN_FORMAT_CHAR, "")@;
+ register_option_("token-format-affix", required_argument, 0, TOKEN_FORMAT_AFFIX, "")@;
+ register_option_("token-format-suffix", required_argument, 0, TOKEN_FORMAT_SUFFIX, "")@;
+ register_option_("bootstrap-token-format", required_argument, 0, BOOTSTRAP_TOKEN_FORMAT, "")@;
@ @<Handle parser output options@>=
case TOKEN_FORMAT_CHAR:@;
@@ -323,11 +319,8 @@ names and values in the format of your choosing.
@<Prepare token only output environment@>@;
break;
-@ @<Parser specific option list@>=
- _register_option("token-only-mode", no_argument, 0, TOKEN_ONLY_MODE, "")@;
-
-@ @<Higher index parser specific options@>=
- TOKEN_ONLY_MODE,@[@]
+@ @<Parser specific options without shortcuts@>=
+ register_option_("token-only-mode", no_argument, 0, TOKEN_ONLY_MODE, "")@;
@ @<Configure parser output modes@>=
case TOKEN_ONLY_MODE:@;
@@ -560,11 +553,11 @@ rules are not output if a crippled \bison\ is used.
@<Helper functions for parser output@>=
void print_rule( int n ) {
- int i;
-
fprintf( tables_out, "%s%s: ", (n < 10 && !optimize_actions ? " " : ""), yytname[yyr1[n]] );
#ifndef BISON_IS_CRIPPLED
+ int i;
+
i = yyprhs[n];
if ( yyrhs[i] < 0 ) {
@@ -634,14 +627,11 @@ YYPACT_NINF_desc.name = "YYPACTNINF";
@*2 Command line options.
We start with the most obvious option, the one begging for help.
-@<Higher index parser specific options@>=
- LONG_HELP,@[@]
-
-@ @<Parser specific option list@>=
- _register_option("help", no_argument, 0, LONG_HELP, "")@;
+@ @<Parser specific options without shortcuts@>=
+ register_option_("help", no_argument, 0, LONG_HELP, "")@;
@ @<Shortcuts for command line options affecting parser output@>=
- "h"
+ @[@[@], 'h'@]
@ @<Handle parser output options@>=
case 'h': /* short help */@;
@@ -655,24 +645,21 @@ We start with the most obvious option, the one begging for help.
exit(0);
break; /* should not be needed */
-@ @<Parser specific option list@>=
- _register_option("debug", optional_argument, 0, 'b', "")@;
- _register_option("mode", required_argument, 0, 'm', "")@;
- _register_option("table-separator", required_argument, 0, 'z', "")@;
+@ @<Parser specific options with shortcuts@>=
+ register_option_("debug", optional_argument, 0, 'b', "")@;
+ register_option_("mode", required_argument, 0, 'm', "")@;
+ register_option_("table-separator", required_argument, 0, 'z', "")@;
- _register_option("format", required_argument, 0, 'f', "")@; /* name? */
- _register_option("table", required_argument, 0, 't', "")@; /* specific table */
- _register_option("constant", required_argument, 0, 'c', "")@; /* specific constant */
- _register_option("name-length", required_argument, 0, 'l', "")@; /* change |MAX_NAME_LENGTH| */
- _register_option("token", required_argument, 0, 'n', "")@; /* specific token */
- _register_option("run-parse", required_argument, 0, 'p', "")@; /* run the parser */
- _register_option("parse-file", required_argument, 0, 'i', "")@; /* input for the parser */
+ register_option_("format", required_argument, 0, 'f', "")@; /* name? */
+ register_option_("table", required_argument, 0, 't', "")@; /* specific table */
+ register_option_("constant", required_argument, 0, 'c', "")@; /* specific constant */
+ register_option_("name-length", required_argument, 0, 'l', "")@; /* change |MAX_NAME_LENGTH| */
+ register_option_("token", required_argument, 0, 'n', "")@; /* specific token */
+ register_option_("run-parse", required_argument, 0, 'p', "")@; /* run the parser */
+ register_option_("parse-file", required_argument, 0, 'i', "")@; /* input for the parser */
@ The string below is a list of short options.
-@<Shortcuts for command line options affecting parser output@>=
- "z:m:f:t:"
-
@ A few options can be discussed immediately.
@<Variables and types local to the parser@>=
diff --git a/support/splint/cweb/checklists.w b/support/splint/cweb/checklists.w
new file mode 100644
index 0000000000..754855d747
--- /dev/null
+++ b/support/splint/cweb/checklists.w
@@ -0,0 +1,82 @@
+% Copyright 2012-2020, Alexander Shibakov
+% Copyright 2002-2014 Free Software Foundation, Inc.
+% This file is part of SPLinT
+%
+% SPLinT is free software: you can redistribute it and/or modify
+% it under the terms of the GNU General Public License as published by
+% the Free Software Foundation, either version 3 of the License, or
+% (at your option) any later version.
+%
+% SPLinT is distributed in the hope that it will be useful,
+% but WITHOUT ANY WARRANTY; without even the implied warranty of
+% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+% GNU General Public License for more details.
+%
+% You should have received a copy of the GNU General Public License
+% along with SPLinT. If not, see <http://www.gnu.org/licenses/>.
+
+@** Checklists. This (experimental) section serves to aid in the
+testing and extension of \splint\ by formalizing a number of procedures
+in the form of a checklist. After having witnessed first hand the effectiveness
+of checklists in aviation, the author feels that a similar approach
+will be beneficial in programming, as well. Most of these tests can and
+should be automated but the applicable situations are rather rare so
+the automation has not been implemented yet.
+
+{%
+\def\aterm#1{\item{\sqebullet\ }{\ttl #1}\ignorespaces}%
+\def\aaterm#1{\itemitem{\sqebullet\ }{\ttl #1}\ignorespaces}%
+\def\aaaterm#1{\itemitemitem{\sqebullet\ }{\ttl #1}\ignorespaces}%
+\setbox0=\hbox{\sqebullet\ \enspace}
+\parindent=0pt
+\advance\parindent by \wd0
+\bigskip
+\noindent{\ttl General checklist.}
+\smallskip
+\aterm{} Have the checklists in this section been followed?
+
+\aterm{} Have {\it all\/} the examples been built and tested?
+
+\aaterm{} \.{make}: this would build the \.{ld} parser, as well as other
+parts, like \.{ssfo.pdf}, etc.
+
+\aaterm{} \.{symbols}
+
+\aaterm{} \.{xxpression} (both \.{make} and \.{make test})
+
+\aaterm{} \.{expression} (both \.{make} and \.{make test})
+
+\aaterm{} once in a while it is useful to run a tool like \.{diffpdf} to check
+that the generated output does not change unexpectedly
+
+\aaterm{} \.{parsec} (not part of \splint)
+
+\aterm{} Have the changes been documented?
+
+\aaterm{} If any limitations have been removed, has this been reflected
+in the documentation, examples, such as \.{symbols.sty}?
+
+\aaterm{} If any new conditionals have been added, does \.{yydebug.sty} provide
+a way to check their status, if appropriate?
+
+\aaterm{} If any new script option has been added, has the script documentation been
+updated?
+
+\aterm{} If a new process has been introduced, has it been reflected in any of the checklists in
+this section?
+
+\bigskip
+\noindent{\ttl Rewriting checklist.}
+\smallskip
+\aterm{} Is the output of the new system identical?
+
+\aaterm{} once in a while it is useful to run a tool like \.{diffpdf} to check
+that the generated output does not change unexpectedly
+
+\aaterm{} has \.{diff} been used to check that \.{.gdx} and \.{.gdy} files produced
+are (nearly) identical?
+
+\aaterm{} has \.{diff} been used to check that \.{.sns} files produced by \.{symbols}
+and \.{xxpression} examples are (nearly) identical?
+
+}
diff --git a/support/splint/cweb/common.w b/support/splint/cweb/common.w
index 3ab5af93b4..0f50fdcabd 100644
--- a/support/splint/cweb/common.w
+++ b/support/splint/cweb/common.w
@@ -1,4 +1,4 @@
-@q Copyright 2012-2014, Alexander Shibakov@>
+@q Copyright 2012-2020, Alexander Shibakov@>
@q This file is part of SPLinT@>
@q SPLinT is free software: you can redistribute it and/or modify@>
@@ -14,7 +14,7 @@
@q You should have received a copy of the GNU General Public License@>
@q along with SPLinT. If not, see <http://www.gnu.org/licenses/>.@>
-@** Forcing \bison\ and \flex\ to output \TeX.
+@** Forcing \eatone{bison}\bison\ and \eatone{flex}\flex\ to output \TeX.
Instead of implementing a \bison\ (or \flex) `plugin' for outputting
\TeX\ parser, the code that follows produces a separate executable
that outputs all the required tables after the inclusion of an
@@ -25,6 +25,11 @@ assumed to be merely |printf()| statements that output the `real'
output an `action switch' appropriate for use with \TeX. In every
other respect, the included parser or scanner can use any features
allowed in `real' parsers and scanners.
+\def\action#1{\hbox{$\hbox{\\{action}}_{#1}$}}
+\def\actionn{\action{n}}
+
+@s action1 TeX
+@s actionn TeX
@*1 Common routines.
The `top' level of the scanner and parser `drivers' is very similar,
@@ -43,9 +48,6 @@ software. All the essential routines are presented in the sections
below, though.
@<\Cee\ postamble@>=
-@<Outer definitions@>;
-@<Global variables and types@>@;
-@<Auxiliary function declarations@>@;
@<Auxiliary function definitions@>@;
int main( int argc, char **argv ) {
@@ -64,9 +66,13 @@ int main( int argc, char **argv ) {
}
+ fprintf( stderr, "Outputting tables and actions\n" );
+
if ( tables_out ) {
+ fprintf( stderr, " tables ... " );
@<Perform output@>@;
+ fprintf( stderr, "actions ... " );
@<Output action switch, if any@>@;
} else {
@@ -76,6 +82,8 @@ int main( int argc, char **argv ) {
}
+ fprintf( stderr, "done, cleaning up\n" );
+
@<Clean up@>@;
return 0;
@@ -122,7 +130,7 @@ example, the |yyrthree| array, which is necessary for processing
mentioned in the previous sentence. There does not seem to be any
other way to access this information. A number of tools (GNU and
otherwise) have taken the path of narrowing the field of application
-to a few use cases invisioned by the maintainers. This includes
+to a few use cases envisioned by the maintainers. This includes
compilers, as well.
There is a strange
@@ -228,8 +236,8 @@ is to provide a general pattern.
static int optimize_tables = 0;
@ It is set using the command line option below.
-@<Raw option list@>=
- _register_option("optimize-tables", no_argument, &optimize_tables, 1, "")@;
+@<Options without arguments@>=
+ register_option_("optimize-tables", no_argument, &optimize_tables, 1, "")@;
@ The reason to implement the table output routine as a macro is to avoid
writing separate functions for tables of different types of data
@@ -408,9 +416,9 @@ the processing of the action code.
The last argument of the `flexible' macro below is supposed to be an
extended description of each option which can be later utilized by a
|usage()| function.
-@<Raw option list@>=
- _register_option("bare-actions", no_argument, &bare_actions, 1, "")@;
- _register_option("optimize-actions", no_argument, &optimize_actions, 1, "")@;
+@<Options without arguments@>=
+ register_option_("bare-actions", no_argument, &bare_actions, 1, "")@;
+ register_option_("optimize-actions", no_argument, &optimize_actions, 1, "")@;
@ The rest of the action output code mimics that for table output, starting with
the descriptor. To make the output format more flexible, this
@@ -470,11 +478,21 @@ macros which produces undesirable typesetting artefacts.}.
@d TeXf( string ) TeX_( string )
@d TeXfo( string ) TeX_( string )
@d TeXao( string ) TeX_( string )
+@d YY_FATAL_ERROR( message ) fprintf( tables_out, " /yylexcomplain{%s}/yylexerrterminate%%\n", message )
@q \CWEB\ is not aware of variadic macros, so this has to be done the old way@>
@<\Cee\ preamble@>=
#define TeX__( string, ... ) @[fprintf( tables_out, " " string "%s\n", __VA_ARGS__, "%" )@]
+@ If a full parser is not needed, the lexing mechanism is not required. To satisfy the compiler
+and the linker, the lexer and other functions still have to be declared and defined, since these functions
+are referred to in the body of the parser. The details of these declarations can be found in the driver
+code.
+@<\Cee\ preamble@>=
+ @<Outer definitions@>;
+ @<Global variables and types@>@;
+ @<Auxiliary function declarations@>@;
+
@ We begin with a few macros to facilitate the output
of tables in the format that \TeX\ can understand. As there is no
perfect way to represent an array in \TeX\ a rather weak compromise
@@ -710,7 +728,7 @@ specific cases are added to in the course of adding new features.
FOREVER {
- c = getopt_long (argc, argv, ":" @<Short option list@>, long_options, &option_index);
+ c = getopt_long (argc, argv, ( char [] ){':'@t, @>@<Short option list@>}, long_options, &option_index);
if (c == -1) break;
@@ -763,9 +781,11 @@ specific cases are added to in the course of adding new features.
}
@ @<Long options array@>=
-#define _register_option(name, arg_flag, loc, val, exp) @[{name, arg_flag, loc, val},@[@]@]
- @<Raw option list@>@;
-#undef _register_option
+#define register_option_(name, arg_flag, loc, val, exp) @[{name, arg_flag, loc, val},@[@]@]
+ @<Options without shortcuts@>@;
+ @<Options with shortcuts@>@;
+ @<Options without arguments@>@;
+#undef register_option_
@ In addition to spelling out the full command line option name (such
as \.{--help}) |getopt_long| gives the user a choice of using a
@@ -775,12 +795,28 @@ this section (and a number of others) empty to be filled in with the
driver specific code to pacify \CWEAVE.
@<Short option list@>=
+#define dd_optional_argument @[@[@], ':', ':'@]
+#define dd_required_argument @[@[@], ':'@]
+#define dd_no_argument
+#define register_option_(name, arg_flag, loc, val, ...) @[@[@], val dd_##arg_flag@]
+ @<Options with shortcuts@>@;
+#undef register_option_
+#undef dd_optional_argument
+#undef dd_required_argument
+#undef dd_no_argument
@ Some options have one-letter `shortcuts', whereas others only exist
in `fully spelled-out' form. To easily keep track of the latter, a
special enumerated list is declared. To add to this list, simply add
to the \CWEB\ section below.
@<Higher index options@>=
+#define register_option_(name, arg_flag, loc, val, ...) @[val,@[@]@]
+ @<Options without shortcuts@>@;
+#undef register_option_
+
+@ @<Options with shortcuts@>=
+
+@ @<Options without shortcuts@>=
@ @<Cases affecting the whole program@>=
diff --git a/support/splint/cweb/fk.w b/support/splint/cweb/fk.w
index 04685ff9aa..fc0c5d2e14 100644
--- a/support/splint/cweb/fk.w
+++ b/support/splint/cweb/fk.w
@@ -1,4 +1,4 @@
-@q Copyright 2012-2014, Alexander Shibakov@>
+@q Copyright 2012-2020, Alexander Shibakov@>
@q This file is part of SPLinT@>
@q SPLinT is free software: you can redistribute it and/or modify@>
@@ -14,7 +14,7 @@
@q You should have received a copy of the GNU General Public License@>
@q along with SPLinT. If not, see <http://www.gnu.org/licenses/>.@>
-@*1 \flex\ specific routines. The output of the scanner automaton
+@*1\eatone{Flex}\flex\ specific routines. The output of the scanner automaton
follows the steps similar to the ones taken during the parser output.
The major difference is in the output of actions and constants.
@*2 Tables.
@@ -90,7 +90,7 @@ way.
@ @<Output scanner actions@>=
if ( output_desc.output_actions ) {
-
+
int i, j;
yyscan_t fake_scanner;
@@ -106,15 +106,24 @@ way.
yy_ec[0] = 0;
yy_base[1] = max_yybase_entry;
+
+ yy_base[2] = 0;
+ yy_chk[0] = 2;
+
yy_chk[max_yybase_entry] = 1;
yy_nxt[max_yybase_entry] = 1;
+ yy_nxt[0] = 1;
+
+ fprintf( stderr, "max entry: %d\n", max_yybase_entry );
+
}
+
for ( i = 1; i <= max_yyaccept_entry; i++ ) {
fprintf( tables_out, action_desc.act_setup, i );
-
+
if ( i == YY_END_OF_BUFFER ) {
fprintf( tables_out, " %% YY_END_OF_BUFFER\n%s\n", " \\yylexeofaction" );
@@ -127,15 +136,17 @@ way.
(( struct yyguts_t *)fake_scanner)->yy_hold_char = 0;
yy_accept[1] = i;
+ if ( i%10 == 0 ) {
+ fprintf( stderr, "." );
+ }
yylex( NULL, fake_scanner );
-
}
}
fprintf( tables_out, action_desc.act_suffix, i );
}
-
+
fprintf( tables_out, " %% end of file states:\n%s\n",
" %#define YY_STATE_EOF(state) (YY_END_OF_BUFFER + state + 1)"
);
@@ -376,8 +387,7 @@ changes.
" \\csname doflexaction\\number #1\\parsernamespace\\endcsname\n"@/
" \\yylextail\n"@;
"}\\stashswitch{yydoactionswitch}%\n";
- action_desc.act_setup = "\n\\expandafter\\def\\csname doflexaction%d\\parsernamespace\\endcsname{%%\n"
- " \\YYRULESETUP";
+ action_desc.act_setup = "\n\\expandafter\\def\\csname doflexaction%d\\parsernamespace\\endcsname{%%";
action_desc.act_suffix = "}%% end of rule %d\n";
action_desc.action1 = NULL;
action_desc.actionn = NULL;
@@ -435,14 +445,11 @@ are handled separately, because they contain underscores.
@*2 Command line options.
We start with the most obvious option, the one begging for help.
-@<Higher index scanner specific options@>=
- LONG_HELP,@[@]
-
-@ @<Scanner specific option list@>=
- _register_option("help", no_argument, 0, LONG_HELP, "")@;
+@ @<Scanner specific options without shortcuts@>=
+ register_option_("help", no_argument, 0, LONG_HELP, "")@;
@ @<Shortcuts for command line options affecting scanner output@>=
- "h"
+ @[@[@], 'h'@]
@ @<Handle scanner output options@>=
case 'h': /* short help */@;
@@ -456,22 +463,18 @@ We start with the most obvious option, the one begging for help.
exit(0);
break; /* should not be needed */
-@ @<Scanner specific option list@>=
- _register_option("debug", optional_argument, 0, 'b', "")@;
- _register_option("mode", required_argument, 0, 'm', "")@;
- _register_option("table-separator", required_argument, 0, 'z', "")@;
-
- _register_option("format", required_argument, 0, 'f', "")@; /* name? */
- _register_option("table", required_argument, 0, 't', "")@; /* specific table */
- _register_option("constant", required_argument, 0, 'c', "")@; /* specific constant */
- _register_option("name-length", required_argument, 0, 'l', "")@; /* change |MAX_NAME_LENGTH| */
- _register_option("token", required_argument, 0, 'n', "")@; /* specific token */
- _register_option("run-scan", required_argument, 0, 'p', "")@; /* run the scanner */
- _register_option("scan-file", required_argument, 0, 'i', "")@; /* input for the scanner */
-
-@ The string below is a list of short options.
-@<Shortcuts for command line options affecting scanner output@>=
- "b::z:m:f:t:"
+@ @<Scanner specific options with shortcuts@>=
+ register_option_("debug", optional_argument, 0, 'b', "")@;
+ register_option_("mode", required_argument, 0, 'm', "")@;
+ register_option_("table-separator", required_argument, 0, 'z', "")@;
+
+ register_option_("format", required_argument, 0, 'f', "")@; /* name? */
+ register_option_("table", required_argument, 0, 't', "")@; /* specific table */
+ register_option_("constant", required_argument, 0, 'c', "")@; /* specific constant */
+ register_option_("name-length", required_argument, 0, 'l', "")@; /* change |MAX_NAME_LENGTH| */
+ register_option_("token", required_argument, 0, 'n', "")@; /* specific token */
+ register_option_("run-scan", required_argument, 0, 'p', "")@; /* run the scanner */
+ register_option_("scan-file", required_argument, 0, 'i', "")@; /* input for the scanner */
@ A few options can be immediately discussed.
@<Variables and types local to the scanner driver@>=
diff --git a/support/splint/cweb/fo.w b/support/splint/cweb/fo.w
new file mode 100644
index 0000000000..81694024c3
--- /dev/null
+++ b/support/splint/cweb/fo.w
@@ -0,0 +1,702 @@
+@q Copyright (c) 1990 The Regents of the University of California. @>
+@q All rights reserved. @>
+
+@q This code is derived from software contributed to Berkeley by @>
+@q Vern Paxson. @>
+
+@q The United States Government has rights in this work pursuant @>
+@q to contract no. DE-AC03-76SF00098 between the United States @>
+@q Department of Energy and the University of California. @>
+
+@q This file is part of SPLinT. @>
+
+@q Redistribution and use in source and binary forms, with or without @>
+@q modification, are permitted provided that the following conditions @>
+@q are met: @>
+
+@q 1. Redistributions of source code must retain the above copyright @>
+@q notice, this list of conditions and the following disclaimer. @>
+@q 2. Redistributions in binary form must reproduce the above copyright @>
+@q notice, this list of conditions and the following disclaimer in the @>
+@q documentation and/or other materials provided with the distribution. @>
+
+@q Neither the name of the University nor the names of its contributors @>
+@q may be used to endorse or promote products derived from this software @>
+@q without specific prior written permission. @>
+
+@q THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR @>
+@q IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED @>
+@q WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR @>
+@q PURPOSE. @>
+
+@**The \ifbootstrapmode flex \else\flex\ \fi parser stack.
+\ifbootstrapmode % this is a bootstrap run to generate the tables
+ \input limbo.sty
+ \def\optimization{5}
+ \input yy.sty
+ \modebootstrap
+\else % otherwise set up the pretty printing of tokens
+ \let\hostparsernamespace\flexnamespace
+\fi
+The scanner generator, \flex, uses \bison\ to produce a parser for its
+input language. Its lexer is output by \flex\ itself so both are
+reused to generate the parser and the scanner for pretty printing
+\flex\ input.
+
+This task is made somewhat complicated by the dependence of the \flex\
+input scanner on the correctly placed whitespace\footnote{For example,
+each regular expression definition in section~1 must start at the
+beginning of the line.}, as well as the reliance of the said scanner
+on rather involved state switching. Therefore, making subparsers for
+different fragments of \flex\ input involves not only choosing an
+appropriate subset of grammar rules to correctly process the
+grammatic constructs but also setting up the correct lexer states.
+
+The first subparser is designed to process a complete \flex\
+file. This parser is not currently part of any parser stack and is
+only used for testing. This is the only parser that does not rely on
+any custom adjustments to the lexer state to operate correctly.
+@(fip.yy@>=
+@G
+%{@> @<Preamble for the \flex\ parser@> @=%}
+ @> @<Options for \flex\ parser@> @=
+%union {@> @=}
+%{@> @<Postamble for \flex\ parser@> @=%}
+ @> @<Token definitions for \flex\ input parser@>@=
+%%
+ @> @<Productions for \flex\ parser@> @=
+%%
+@g
+
+@ The selection of options for \bison\ parsers suitable for \splint\
+have been discussed
+\ifbootstrapmode\else\locallink{bison.options}earlier \endlink\fi so we
+list them here without further comments.
+@<Options for \flex\ parser@>=
+@G
+%token-table
+%debug
+%start goal
+@g
+
+@ A parser for section~1 (definitions and declarations). This parser requires a custom
+lexer, as discussed above, to properly set up the state. Short of
+this, the lexer may produce the wrong kind of tokens or even generate
+an error.
+@(ddp.yy@>=
+@G
+%{@> @<Preamble for the \flex\ parser@> @=%}
+ @> @<Options for \flex\ parser@> @=
+%union {@> @=}
+%{@> @<Postamble for \flex\ parser@> @=%}
+ @> @<Token definitions for \flex\ input parser@>@=
+%%
+ @> @<Exclusive productions for \flex\ section~1 parser@> @=
+ @> @<Productions for \flex\ section~1 parser@> @=
+%%
+
+@ A parser for section~2 (rules and actions). This subparser must also
+use a custom set up for its lexer as discussed above.
+@(rap.yy@>=
+@G
+%{@> @<Preamble for the \flex\ parser@> @=%}
+ @> @<Options for \flex\ parser@> @=
+%union {@> @=}
+%{@> @<Postamble for \flex\ parser@> @=%}
+ @> @<Token definitions for \flex\ input parser@>@=
+%%
+ @> @<Special \flex\ section~2 parser productions@> @=
+ @> @<Productions for \flex\ section~2 parser@> @=
+%%
+@g
+
+@ A parser for just the regular expression syntax. A custom
+lexer initialization must precede the use of this parser, as well.
+@(rep.yy@>=
+@G
+%{@> @<Preamble for the \flex\ parser@> @=%}
+ @> @<Options for \flex\ parser@> @=
+%union {@> @=}
+%{@> @<Postamble for \flex\ parser@> @=%}
+ @> @<Token definitions for \flex\ input parser@>@=
+%%
+ @> @<Special productions for regular expressions@> @=
+ @> @<Rules for \flex\ regular expressions@> @=
+%%
+
+@*1 Token and state declarations for the \eatone{flex}\flex\ input scanner.
+Needless to say, the original grammar used by \flex\ was not designed
+with pretty printing in mind (and why would it be?). Instead, efficiency
+was the goal which resulted in a number of lexical constructs being
+processed `on the fly', as the lexer encounters them. Such syntax
+fragments never reach the parser, and would not have a chance to be
+displayed by our routines, unless some grammar extensions and
+alterations were introduced.
+
+To make the pretty printing possible, a number of new tokens have been
+introduced below that are later used in a few altered or entirely new
+grammar productions.
+@<Token definitions for \flex\ input parser@>=
+@G
+%token CHAR NUMBER SECTEND SCDECL XSCDECL NAME PREVCCL EOF_OP
+%token OPTION_OP OPT_OUTFILE OPT_PREFIX OPT_YYCLASS OPT_HEADER OPT_EXTRA_TYPE
+%token OPT_TABLES
+
+%token CCE_ALNUM CCE_ALPHA CCE_BLANK CCE_CNTRL CCE_DIGIT CCE_GRAPH
+%token CCE_LOWER CCE_PRINT CCE_PUNCT CCE_SPACE CCE_UPPER CCE_XDIGIT
+
+%token CCE_NEG_ALNUM CCE_NEG_ALPHA CCE_NEG_BLANK CCE_NEG_CNTRL CCE_NEG_DIGIT CCE_NEG_GRAPH
+%token CCE_NEG_LOWER CCE_NEG_PRINT CCE_NEG_PUNCT CCE_NEG_SPACE CCE_NEG_UPPER CCE_NEG_XDIGIT
+
+%left CCL_OP_DIFF CCL_OP_UNION
+
+@ We introduce an additional option type to capture all the non-parametric options used
+by the \flex\ lexer. The original lexer processes these options at the
+point of recognition, while the typesetting parser needs to be aware of them.
+@<Token definitions for \flex\ input parser@>=
+@G
+%token TOP_OP POINTER_OP ARRAY_OP DEF_OP RE_DEF OPT_OTHER OPT_DEPRECATED
+
+@ {\em \POSIX\ and \.{AT\&T} \lex\ place the
+precedence of the repeat operator, \.{\{\}}, below that of concatenation.
+Thus, \.{ab\{3\}} is\/ \.{ababab}. Most other \POSIX\ utilities use an {\rm Extended
+Regular Expression (ERE)} precedence that has the repeat operator
+higher than concatenation. This causes \.{ab\{3\}} to yield\/ \.{abbb}.
+
+In order to support the \POSIX\ and \.{AT\&T} precedence and the \flex\
+precedence we define two token sets for the begin and end tokens of
+the repeat operator, \prodstyle{BEGIN_REPEAT_POSIX} and\/ \prodstyle{END_REPEAT_POSIX}. The lexical scanner chooses
+which tokens to return based on whether {\let\it\itbold\prodstyle{posix\_compat} or \prodstyle{lex\_compat}
+are specified. Specifying either \prodstyle{posix\_compat} or \prodstyle{lex\_compat}} will
+cause \flex\ to parse scanner files as per the \.{AT\&T} and \POSIX-mandated behavior.}
+@<Token definitions for \flex\ input parser@>=
+@G
+%token BEGIN_REPEAT_POSIX END_REPEAT_POSIX BEGIN_REPEAT_FLEX END_REPEAT_FLEX
+
+@*1 The grammar for \eatone{flex}\flex\ input.
+The original grammar has been carefully split into sections to
+facilitate the assembly of various subparsers in the \flex's
+stack. Neither the \flex\ parser nor its scanner are part of the
+bootstrap procedure which simplifies both the input file organization,
+as well as the macro design. Some amount of preprocessing is still
+necessary, however, to extract the state names from the lexer file
+(see \ifbootstrapmode\else\locallink{state.grabbing}above \endlink\fi for the
+explanation). We can nevertheless get away with an empty \Cee\ preamble.
+@<Preamble for the \flex\ parser@>=
+
+@ @<Productions for \flex\ parser@>=
+@G
+@t}\vb{\inline\flatten}{@>
+goal:
+ initlex sect1 sect1end
+ sect2 initforrule {@> @<Assemble a \flex\ input file@> @=}
+ ;
+
+sect1end:
+ SECTEND {@> @<Copy the value@> @=}
+ ;
+
+initlex:
+ {@> @=}
+ ;
+@g
+
+@ @<Assemble a \flex\ input file@>=
+ @[TeX_( "/yy0{/the/yy(2)/the/yy(4)}" );@]@;
+
+@ @<Productions for \flex\ parser@>=
+@<Productions for \flex\ section~1 parser@>@;
+@<Productions for \flex\ section~2 parser@>@;
+
+@ @<Exclusive productions for \flex\ section~1 parser@>=
+@G
+@t}\vb{\inline\flatten}{@>
+goal:
+ sect1 {@> @<Assemble a \flex\ section~1 file@> @=}
+ ;
+
+@ @<Assemble a \flex\ section~1 file@>=
+ @[TeX_( "/table/expandafter{/the/yy(1)}" );@]@;
+
+@ @<Productions for \flex\ section~1 parser@>=
+@G
+sect1:
+ sect1 startconddecl namelist1 {@> @<Add start condition declarations@> @=}
+ | sect1 options {@> @<Add options to section~1@> @=}
+ | {@> @<Create an empty section~1@> @=}
+ | error {@> @<Report an error in section~1 and quit@> @=}
+ ;
+
+startconddecl:
+ SCDECL {@> @<Prepare a state declaration@> @=}
+ | XSCDECL {@> @<Prepare an exclusive state declaration@> @=}
+ ;
+
+namelist1:
+ namelist1 NAME {@> @<Add a name to a list@> @=}
+ | NAME {@> @<Start a \prodstylens{namelist1}{\flexnamespace} with a name@> @=}
+ | error {@> @<Report an error in \prodstylens{namelist1}{\flexnamespace} and quit@> @=}
+ ;
+@g
+
+@ @<Add start condition declarations@>=
+ @[TeX_( "/yy0{/the/yy(1)/nx/flscondecl/the/yy(2){/the/yy(3)}}" );@]@;
+
+@ @<Add options to section~1@>=
+ @[TeX_( "/yy0{/the/yy(1)/the/yy(2)}" );@]@;
+
+@ @<Create an empty section~1@>=
+ @[TeX_( "/yy0{}" );@]@;
+
+@ @<Report an error in section~1 and quit@>=
+ @[TeX_( "/yyerror" );@]@;
+
+@ @<Prepare a state declaration@>=
+ @[TeX_( "/yy0{{s}/the/yy(1)}" );@]@;
+
+@ @<Prepare an exclusive state declaration@>=
+ @[TeX_( "/yy0{{x}/the/yy(1)}" );@]@;
+
+@ @<Add a name to a list@>=
+ @[TeX_( "/yy0{/the/yy(1)/nx/flnamesep{}{}/nx/flname/the/yy(2)}" );@]@;
+
+@ @<Start a \prodstylens{namelist1}{\flexnamespace} with a name@>=
+ @[TeX_( "/yy0{/nx/flname/the/yy(1)}" );@]@;
+
+@ @<Report an error in \prodstylens{namelist1}{\flexnamespace} and quit@>=
+ @[TeX_( "/yyerror" );@]@;
+
+@ @<Productions for \flex\ section~1 parser@>=
+@G
+options:
+ OPTION_OP optionlist {@> @<Start an options list@> @=}
+ | POINTER_OP {@> @<Add a pointer option@> @=}
+ | ARRAY_OP {@> @<Add an array option@> @=}
+ | TOP_OP '\n' {@> @<Add a \prodstylens{\%top}{\flexnamespace} directive@> @=}
+ | DEF_OP RE_DEF {@> @<Add a regular expression definition@> @=}
+ | OPT_DEPRECATED {@> @<Output a deprecated option@> @=}
+ ;
+@t}\vb{\inline\flatten}{@>
+optionlist:
+ optionlist option {@> @<Add an option to a list@> @=}
+ | {@> @<Make an empty option list@> @=}
+ ;
+@t}\vb{\resetf}{@>
+option:
+ OPT_OUTFILE '=' NAME {@> @<Record the name of the output file@> @=}
+ | OPT_EXTRA_TYPE '=' NAME {@> @<Declare an extra type@> @=}
+ | OPT_PREFIX '=' NAME {@> @<Declare a prefix@> @=}
+ | OPT_YYCLASS '=' NAME {@> @<Declare a class@> @=}
+ | OPT_HEADER '=' NAME {@> @<Declare the name of a header@> @=}
+ | OPT_TABLES '=' NAME {@> @<Declare the name for the tables@> @=}
+ | OPT_OTHER {@> @<Output a non-parametric option@> @=}
+ ;
+@g
+
+@ @<Start an options list@>=
+ @[TeX_( "/yy0{/nx/floptions{/the/yy(2)}}" );@]@;
+
+@ @<Add a pointer option@>=
+ @[TeX_( "/yy0{/nx/flptropt/the/yy(1)}" );@]@;
+
+@ @<Add an array option@>=
+ @[TeX_( "/yy0{/nx/flarrayopt/the/yy(1)}" );@]@;
+
+@ @<Add a \prodstylens{\%top}{\flexnamespace} directive@>=
+ @[TeX_( "/yy0{/nx/fltopopt/the/yy(1)/the/yy(2)}" );@]@;
+
+@ @<Add a regular expression definition@>=
+ @[TeX_( "/yy0{/nx/flredef/the/yy(1)/the/yy(2)}" );@]@;
+
+@ @<Add an option to a list@>=
+ @[TeX_( "/yy0{/the/yy(1)/the/yy(2)}" );@]@;
+
+@ @<Make an empty option list@>=
+ @[TeX_( "/yy0{}" );@]@;
+
+@ @<Record the name of the output file@>=
+ @[TeX_( "/yy0{/nx/flopt{file}/the/yy(3)}" );@]@;
+
+@ @<Declare an extra type@>=
+ @[TeX_( "/yy0{/nx/flopt{xtype}/the/yy(3)}" );@]@;
+
+@ @<Declare a prefix@>=
+ @[TeX_( "/yy0{/nx/flopt{prefix}/the/yy(3)}" );@]@;
+
+@ @<Declare a class@>=
+ @[TeX_( "/yy0{/nx/flopt{yyclass}/the/yy(3)}" );@]@;
+
+@ @<Declare the name of a header@>=
+ @[TeX_( "/yy0{/nx/flopt{header}/the/yy(3)}" );@]@;
+
+@ @<Declare the name for the tables@>=
+ @[TeX_( "/yy0{/nx/flopt{tables}/the/yy(3)}" );@]@;
+
+@ @<Output a non-parametric option@>=
+ @[TeX_( "/yy0{/nx/flopt{other}/the/yy(1)}" );@]@;
+
+@ @<Output a deprecated option@>=
+ @[TeX_( "/yy0{/nx/flopt{deprecated}/the/yy(1)}" );@]@;
+
+@ @<Special \flex\ section~2 parser productions@>=
+@G
+goal:
+ sect2 {@> @<Output section~2@> @=}
+ ;
+@g
+
+@ @<Output section~2@>=
+ @[TeX_( "/table/yy(1)" );@]@;
+
+@ This portion of the grammar was changed to make it possible to read the
+action code.
+@<Productions for \flex\ section~2 parser@>=
+@G
+sect2:
+ sect2 scon initforrule flexrule '\n' '\n' {@> @<Add a rule to section~2@> @=}
+ | sect2 scon '{' sect2 '}' {@> @<Add a group of rules to section~2@> @=}
+ | {@> @<Start an empty section~2@> @=}
+ | sect2 '\n' {@> @<Add a bare action@> @=}
+ ;
+@t}\vb{\inline\flatten}{@>
+initforrule:
+ {@> @[TeX_( "/flin@@ruletrue/yylexnext" );@] @=}
+ ;
+@g
+
+@ @<Add a rule to section~2@>=
+ @[TeX_( "/ifflcontinued@@action" );@]@;
+ @[TeX_( " /toksb{/flactionc}" );@]@;
+ @[TeX_( "/else" );@]@;
+ @[TeX_( " /toksb{/flaction}" );@]@;
+ @[TeX_( "/fi" );@]@;
+ @[TeX_( "/toksa/expandafter{/astformat@@flaction}" );@]@; /* capture the formatting action */
+ @[TeX_( "/yypushx{/the/yy(1)/the/toksb{/the/yy(2)}{/the/yy(4)}/the/yy(5)/the/yy(6){/the/toksa}}/on/astarray" );@]@;
+ @[TeX_( "/yy0{/astarraylastcs}" );@]@;
+ @[TeX_( "/let/astformat@@flaction/empty" );@]@; /* reset the format */
+
+@ @<Add a group of rules to section~2@>=
+ @[TeX_( "/yy0{/the/yy(1)/nx/flactiongroup{/the/yy(2)}/the/yy(3){/the/yy(4)}/the/yy(5)}" );@]@;
+
+@ @<Start an empty section~2@>=
+ @[TeX_( "/yy0{}" );@]@;
+
+@ @<Add a bare action@>=
+ @[TeX_( "/yy0{/the/yy(1)/nx/flbareaction/the/yy(2)}" );@]@;
+
+@ @<Productions for \flex\ section~2 parser@>=
+@G
+@t}\vb{\inline\flatten}{@>
+scon_stk_ptr:
+ {@> @=}
+ ;
+@t}\vb{\resetf}{@>
+scon:
+ '<' scon_stk_ptr namelist2 '>' {@> @<Create a list of start conditions@> @=}
+ | '<' '*' '>' {@> @<Create a universal start condition@> @=}
+ | {@> @<Create an empty start condition@> @=}
+ ;
+
+namelist2:
+ namelist2 ',' sconname {@> @<Add a start condition to a list@> @=}
+ | sconname {@> @<Start a list with a start condition name@> @=}
+ | error {@> @<Report an error compiling a start condition list@> @=}
+ ;
+@t}\vb{\inline\flatten}{@>
+sconname:
+ NAME {@> @<Make a \prodstylens{NAME}{\flexnamespace} into a start condition@> @=}
+ ;
+
+@ @<Create a list of start conditions@>=
+ @[TeX_( "/yy0{/nx/flsconlist{/the/yy(1)}{/the/yy(3)}{/the/yy(4)}}" );@]@;
+
+@ @<Create a universal start condition@>=
+ @[TeX_( "/yy0{/nx/flsconuniv/the/yy(3)}" );@]@;
+
+@ @<Create an empty start condition@>=
+ @[TeX_( "/yy0{}" );@]@;
+
+@ @<Add a start condition to a list@>=
+ @[TeX_( "/yy0{/the/yy(1)/nx/flnamesep/the/yy(2)/the/yy(3)}" );@]@;
+
+@ @<Start a list with a start condition name@>=
+ @<Copy the value@>@;
+
+@ @<Report an error compiling a start condition list@>=
+ @[TeX_( "/yyerror" );@]@;
+
+@ @<Make a \prodstylens{NAME}{\flexnamespace} into a start condition@>=
+ @[TeX_( "/yy0{/nx/flname/the/yy(1)}" );@]@;
+
+@ @<Productions for \flex\ section~2 parser@>=
+ @<Rules for \flex\ regular expressions@>@;
+
+@ @<Special productions for regular expressions@>=
+@G
+goal:
+ flexrule {@> @<Output a regular expression@> @=}
+ ;
+@g
+
+@ The parsed regular expression is output in the \.{\\table}
+register. It is important to ensure that whenever this parser is used
+inside another parser that uses \.{\\table} for output, the changes to
+this register stay local. The \.{\\frexproc} macro in \.{yyunion.sty}
+ensures that all the changes are local to the parsing macro.
+@<Output a regular expression@>=
+ @[TeX_( "/table/yy(1)" );@]@;
+
+@ @<Rules for \flex\ regular expressions@>=
+@G
+flexrule:
+ '^' rule {@> @<Match a rule at the beginning of the line@> @=}
+ | rule {@> @<Match an ordinary rule@> @=}
+ | EOF_OP {@> @<Match an end of file@> @=}
+ | error {@> @<Report an error and quit@> @=}
+ ;
+
+@ @<Match a rule at the beginning of the line@>=
+ @[TeX_( "/toksa/expandafter{/astformat@@flrule}" );@]@;
+ @[TeX_( "/let/astformat@@flrule/empty" );@]@;
+ @[TeX_( "/yy0{/nx/flbolrule{/the/yy(2)}{/the/toksa}}" );@]@;
+
+@ @<Match an ordinary rule@>=
+ @[TeX_( "/toksa/expandafter{/astformat@@flrule}" );@]@;
+ @[TeX_( "/let/astformat@@flrule/empty" );@]@;
+ @[TeX_( "/yy0{/nx/flrule{/the/yy(1)}{/the/toksa}}" );@]@;
+
+@ @<Match an end of file@>=
+ @[TeX_( "/yy0{/nx/fleof/the/yy(1)}" );@]@;
+
+@ @<Report an error and quit@>=
+ @[TeX_( "/yyerror" );@]@;
+
+@ @<Rules for \flex\ regular expressions@>=
+@G
+rule:
+ re2 re {@> @<Match a regular expression with a trailing context@> @=}
+ | re2 re '$' {@> @<Disallow a repeated trailing context@> @=}
+ | re '$' {@> @<Match a regular expression at the end of the line@> @=}
+ | re {@> @<Match an ordinary regular expression@> @=}
+ ;
+
+re:
+ re '|' series {@> @<Match a sequence of alternatives@> @=}
+ | series {@> @<Match a sequence of singletons@> @=}
+ ;
+@t}\vb{\inline\flatten}{@>
+re2:
+ re '/' {@> @<Prepare to match a trailing context@> @=}
+ ;
+@g
+
+@ @<Match a regular expression with a trailing context@>=
+ @[TeX_( "/getsecond{/yy(1)}/to/toksa/getthird{/yy(1)}/to/toksb" );@]@;
+ @[TeX_( "/yy0{/nx/flretrail{/the/toksa}{/the/toksb}{/the/yy(2)}}" );@]@;
+
+@ @<Disallow a repeated trailing context@>=
+ @[TeX_( "/yyerror" );@]@;
+
+@ @<Match a regular expression at the end of the line@>=
+ @[TeX_( "/yy0{/nx/flreateol{/the/yy(1)}/the/yy(2)}" );@]@;
+
+@ @<Match an ordinary regular expression@>=
+ @<Copy the value@>@;
+
+@ @<Match a sequence of alternatives@>=
+ @[TeX_( "/yy0{/the/yy(1)/nx/flor/the/yy(2)/the/yy(3)}" );@]@;
+
+@ @<Match a sequence of singletons@>=
+ @<Copy the value@>@;
+
+@ @<Prepare to match a trailing context@>=
+ @[TeX_( "/yy0{/nx/fltrail{/the/yy(1)}{/the/yy(2)}}" );@]@;
+
+@ @<Rules for \flex\ regular expressions@>=
+@G
+series:
+ series singleton {@> @<Extend a series by a singleton@> @=}
+ | singleton {@> @<Match a singleton@> @=}
+ | series BEGIN_REPEAT_POSIX
+ NUMBER ',' NUMBER END_REPEAT_POSIX {@> @<Match a series of specific length@> @=}
+ | series BEGIN_REPEAT_POSIX
+ NUMBER ',' END_REPEAT_POSIX {@> @<Match a series of minimal length@> @=}
+ | series BEGIN_REPEAT_POSIX
+ NUMBER END_REPEAT_POSIX {@> @<Match a series of exact length@> @=}
+ ;
+@g
+
+@ @<Extend a series by a singleton@>=
+ @[TeX_( "/yy0{/the/yy(1)/the/yy(2)}" );@]@;
+
+@ @<Match a singleton@>=
+ @<Copy the value@>@;
+
+@ @<Match a series of specific length@>=
+ @<Create a series of specific length@>@;
+
+@ @<Match a series of minimal length@>=
+ @<Create a series of minimal length@>@;
+
+@ @<Match a series of exact length@>=
+ @<Create a series of exact length@>@;
+
+@ @<Rules for \flex\ regular expressions@>=
+@G
+singleton:
+ singleton '*' {@> @<Create a lazy series match@> @=}
+ | singleton '+' {@> @<Create a nonempty series match@> @=}
+ | singleton '?' {@> @<Create a possible single match@> @=}
+ | singleton BEGIN_REPEAT_FLEX
+ NUMBER ',' NUMBER END_REPEAT_FLEX {@> @<Create a series of specific length@> @=}
+ | singleton BEGIN_REPEAT_FLEX
+ NUMBER ',' END_REPEAT_FLEX {@> @<Create a series of minimal length@> @=}
+ | singleton BEGIN_REPEAT_FLEX
+ NUMBER END_REPEAT_FLEX {@> @<Create a series of exact length@> @=}
+ | '.' {@> @<Match (almost) any character@> @=}
+ | fullccl {@> @<Match a character class@> @=}
+ | PREVCCL {@> @<Match a \prodstylens{PREVCCL}{\flexnamespace}@> @=}
+ | '"' string '"' {@> @<Match a string@> @=}
+ | '(' re ')' {@> @<Match an atom@> @=}
+ | CHAR {@> @<Match a specific character@> @=}
+ ;
+@g
+
+@ @<Create a lazy series match@>=
+ @[TeX_( "/yy0{/nx/flrepeat{/the/yy(1)}}" );@]@;
+
+@ @<Create a nonempty series match@>=
+ @[TeX_( "/yy0{/nx/flrepeatstrict{/the/yy(1)}}" );@]@;
+
+@ @<Create a possible single match@>=
+ @[TeX_( "/yy0{/nx/flrepeatonce{/the/yy(1)}}" );@]@;
+
+@ @<Create a series of specific length@>=
+ @[TeX_( "/yy0{/nx/flrepeatnm{/the/yy(1)}{/the/yy(3)}{/the/yy(5)}}" );@]@;
+
+@ @<Create a series of minimal length@>=
+ @[TeX_( "/yy0{/nx/flrepeatgen{/the/yy(1)}{/the/yy(3)}}" );@]@;
+
+@ @<Create a series of exact length@>=
+ @[TeX_( "/yy0{/nx/flrepeatn{/the/yy(1)}{/the/yy(3)}}" );@]@;
+
+@ @<Match (almost) any character@>=
+ @[TeX_( "/yy0{/nx/fldot/the/yy(1)}" );@]@;
+
+@ @<Match a character class@>=
+ @<Copy the value@>@;
+
+@ @<Match a \prodstylens{PREVCCL}{\flexnamespace}@>=
+ @<Copy the value@>@;
+
+@ @<Match a string@>=
+ @[TeX_( "/yy0{/nx/flstring{/the/yy(1)}{/the/yy(2)}{/the/yy(3)}}" );@]@;
+
+@ @<Match an atom@>=
+ @[TeX_( "/toksa/expandafter{/astformat@@flparens}" );@]@;
+ @[TeX_( "/let/astformat@@flparens/empty" );@]@;
+ @[TeX_( "/yy0{/nx/flparens{/the/yy(1)}{/the/yy(2)}{/the/yy(3)}{/the/toksa}}" );@]@;
+
+@ @<Match a specific character@>=
+ @[TeX_( "/yy0{/nx/flchar/the/yy(1)}" );@]@;
+
+@ @<Rules for \flex\ regular expressions@>=
+@G
+fullccl:
+ fullccl CCL_OP_DIFF braceccl {@> @<Subtract a character class@> @=}
+ | fullccl CCL_OP_UNION braceccl {@> @<Create a union of character classes@> @=}
+ | braceccl {@> @<Turn a basic character class into a character class@> @=}
+ ;
+
+braceccl:
+ '[' ccl ']' {@> @<Create a character class@> @=}
+ | '[' '^' ccl ']' {@> @<Create a complementary character class@> @=}
+ ;
+
+ccl:
+ ccl CHAR '-' CHAR {@> @<Add a range to a character class@> @=}
+ | ccl CHAR {@> @<Add a character to a character class@> @=}
+ | ccl ccl_expr {@> @<Add an expression to a character class@> @=}
+ | {@> @<Create an empty character class@> @=}
+ ;
+@g
+
+@ @<Subtract a character class@>=
+ @[TeX_( "/yy0{/nx/flccldiff{/the/yy(1)}{/the/yy(3)}}" );@]@;
+
+@ @<Create a union of character classes@>=
+ @[TeX_( "/yy0{/nx/flcclunion{/the/yy(1)}{/the/yy(3)}}" );@]@;
+
+@ @<Turn a basic character class into a character class@>=
+ @<Copy the value@>@;
+
+@ @<Create a character class@>=
+ @[TeX_( "/yy0{/nx/flbraceccl{/the/yy(1)}{/the/yy(2)}{/the/yy(3)}}" );@]@;
+
+@ @<Create a complementary character class@>=
+ @[TeX_( "/yy0{/nx/flbracecclneg{/the/yy(1)}{/the/yy(3)}{/the/yy(4)}}" );@]@;
+
+@ @<Add a range to a character class@>=
+ @[TeX_( "/yy0{/the/yy(1)/nx/flcclrnge{/nx/flchar/the/yy(2)}{/nx/flchar/the/yy(4)}}" );@]@;
+
+@ @<Add a character to a character class@>=
+ @[TeX_( "/yy0{/the/yy(1)/nx/flchar/the/yy(2)}" );@]@;
+
+@ @<Add an expression to a character class@>=
+ @[TeX_( "/yy0{/the/yy(1)/nx/flcclexpr/the/yy(2)}" );@]@;
+
+@ @<Create an empty character class@>=
+ @[TeX_( "/yy0{}" );@]@;
+
+@ @<Rules for \flex\ regular expressions@>=
+@G
+ccl_expr:
+@t}\vb{\flatten}{@>
+ CCE_ALNUM {@> @<Copy the value@> @=}
+ | CCE_ALPHA {@> @<Copy the value@> @=}
+ | CCE_BLANK {@> @<Copy the value@> @=}
+ | CCE_CNTRL {@> @<Copy the value@> @=}
+ | CCE_DIGIT {@> @<Copy the value@> @=}
+ | CCE_GRAPH {@> @<Copy the value@> @=}
+ @t}\vb{\fold\flatten}{@>
+ | CCE_LOWER {@> @<Copy the value@> @=}
+ | CCE_PRINT {@> @<Copy the value@> @=}
+ | CCE_PUNCT {@> @<Copy the value@> @=}
+ | CCE_SPACE {@> @<Copy the value@> @=}
+ | CCE_XDIGIT {@> @<Copy the value@> @=}
+ | CCE_UPPER {@> @<Copy the value@> @=}
+@t}\vb{\fold\flatten}{@>
+ | CCE_NEG_ALNUM {@> @<Copy the value@> @=}
+ | CCE_NEG_ALPHA {@> @<Copy the value@> @=}
+ | CCE_NEG_BLANK {@> @<Copy the value@> @=}
+ | CCE_NEG_CNTRL {@> @<Copy the value@> @=}
+ | CCE_NEG_DIGIT {@> @<Copy the value@> @=}
+ | CCE_NEG_GRAPH {@> @<Copy the value@> @=}
+ @t}\vb{\fold\flatten}{@>
+ | CCE_NEG_PRINT {@> @<Copy the value@> @=}
+ | CCE_NEG_PUNCT {@> @<Copy the value@> @=}
+ | CCE_NEG_SPACE {@> @<Copy the value@> @=}
+ | CCE_NEG_XDIGIT {@> @<Copy the value@> @=}
+ | CCE_NEG_LOWER {@> @<Copy the value@> @=}
+ | CCE_NEG_UPPER {@> @<Copy the value@> @=}
+ ;
+@t}\vb{\inline}{@>
+string:
+ string CHAR {@> @<Extend a \flex\ string by a character@> @=}
+ | {@> @<Make an empty regular expression string@> @=}
+ ;
+@g
+
+@ @<Copy the value@>=
+ @[TeX_( "/yy0{/the/yy(1)}" );@]@;
+
+@ @<Extend a \flex\ string by a character@>=
+ @[TeX_( "/yy0{/the/yy(1)/nx/flchar/the/yy(2)}" );@]@;
+
+@ @<Make an empty regular expression string@>=
+ @[TeX_( "/yy0{}" );@]@;
+
+@ This is needed to get the |yytoknum| array. A trivial declaration suffices.
+@<Postamble for \flex\ parser@>=
+#define YYPRINT(file, type, value) @[yyprint (file, type, value)@]
+ static void yyprint (FILE *file, int type, YYSTYPE value){}
diff --git a/support/splint/cweb/lo.w b/support/splint/cweb/lo.w
index b28711423c..68ef1b6595 100644
--- a/support/splint/cweb/lo.w
+++ b/support/splint/cweb/lo.w
@@ -1,4 +1,4 @@
-@q Copyright 2012-2014 Alexander Shibakov@>
+@q Copyright 2012-2020 Alexander Shibakov@>
@q Copyright 2002-2014 Free Software Foundation, Inc.@>
@q This file is part of SPLinT@>
@@ -15,20 +15,21 @@
@q You should have received a copy of the GNU General Public License@>
@q along with SPLinT. If not, see <http://www.gnu.org/licenses/>.@>
-@*1 The scanner for grammar syntax.
-\ifx\parsernamespace\UNDEFINED
+@**The scanner for \ifx\bison\UNDEFINED\.{bison}\else\bison\fi\ syntax.
+\ifx\bison\UNDEFINED
\input limbo.sty
\input grabstates.sty
\immediate\openout\stlist=lo_states.h
\fi
The fact that \bison\ has a relatively straightforward grammar is
-due to the sophistication of its scanner. The primary reason for this
+partly due to the sophistication of its scanner. The primary reason for this
increased complexity is \bison's awareness
of syntax variations in its input files. In addition to the grammar
syntax, the parser has to be able to deal with extended \Cee\ syntax
inside \bison's actions.
-Since the names of the scanner {\it states\/} reside in the common
+Since the names\namedspot{state.grabbing} of the scanner
+{\em states@^scanner states@>\/} reside in the common
namespace with other variables, in order to make the \TeX\ version of
the scanner aware of the numerical values of the states, a special
procedure is required. It is executed as part of \flex's user
@@ -52,10 +53,11 @@ void define_all_states( void ) {
@o
@g
-@ It is convenient to abbreviate some commonly used subexpressions.
+@*1 Definitions and state declarations.
+It is convenient to abbreviate some commonly used subexpressions.
@<Grammar lexer definitions@>=
@<Grammar lexer states@>@;
-@G
+@G(fs1)
letter [.abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_]
notletter [^.abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_]{-}[%\{]
id {letter}({letter}|[-0-9])*
@@ -65,14 +67,14 @@ int [0-9]+
@ {\it Zero or more instances of backslash-newline. Following \gcc, allow
white space between the backslash and the newline}.
@<Grammar lexer definitions@>=
-@G
+@G(fs1)
splice (\\[ \f\t\v]*\n)*
@g
@ {\it An equal sign, with optional leading whitespaces. This is used in some
deprecated constructs}.
@<Grammar lexer definitions@>=
-@G
+@G(fs1)
eqopt ([[:space:]]*=)?
@g
@@ -81,7 +83,7 @@ routine mentioned above. The state information is collected by a
special small scanner that is coupled with the bootstrap parser. This
way, all the necessary token information comes `hardwired' in the
bootstrap parser, and the small scanner itself does not use any state
-manipulation and thus can get away without any state setup. It can,
+manipulation and thus can get away with using no state setup. It can,
however, scan just enough of the \flex\ syntax to extract the state
information from it (only the state {\it names\/} are needed) and
output it in the form of a header file for the `real' lexer output
@@ -98,6 +100,19 @@ output it in the form of a header file for the `real' lexer output
@g
@ {\it Strings and characters in directives/rules}.
+%\yyflexdebugtrue
+%\traceparserstatestrue
+%\tracestackstrue
+%\tracerulestrue
+%\traceactionstrue
+%\tracebadcharstrue
+%\prodstyle{\%\%}%
+\traceparserstatesfalse
+\tracestacksfalse
+\tracerulesfalse
+\traceactionsfalse
+\tracebadcharsfalse
+\yyflexdebugfalse
@<Grammar lexer states@>=
@G
%x SC_ESCAPED_STRING SC_ESCAPED_CHARACTER
@@ -139,7 +154,7 @@ Four types of user code:
%x SC_PROLOGUE SC_BRACED_CODE SC_EPILOGUE SC_PREDICATE
@g
-@ {\it \Cee\ and \Cee++ comments in code}.
+@ {\it \Cee\ and \Ceepp\ comments in code}.
@<Grammar lexer states@>=
@G
%x SC_COMMENT SC_LINE_COMMENT
@@ -175,8 +190,8 @@ adopted in this package to work.
%option outfile="lo.c"
@g
-@*2 Tokenizing with regular expressions.
-Here is a full collection of regular expressions employed by the scanner.
+@*1 Tokenizing with regular expressions.
+Here is a full list of regular expressions recognized by the \bison\ scanner.
@<Grammar token regular expressions@>=
@<Scan grammar white space@>@;
@<Scan \flex\ directives and options@>@;
@@ -184,7 +199,7 @@ Here is a full collection of regular expressions employed by the scanner.
@<Do not support zero characters@>@;
@<Scan after an identifier, check whether a colon is next@>@;
@<Scan bracketed identifiers@>@;
- @<Scan a Yacc comment@>@;
+ @<Scan a \yacc\ comment@>@;
@<Scan a \Cee\ comment@>@;
@<Scan a line comment@>@;
@<Scan a \bison\ string@>@;
@@ -199,15 +214,17 @@ Here is a full collection of regular expressions employed by the scanner.
@<Add the scanned symbol to the current string@>@;
@ @<Scan grammar white space@>=
-@G
+@G(fs2)
<INITIAL,SC_AFTER_IDENTIFIER,SC_BRACKETED_ID,SC_RETURN_BRACKETED_ID>
{
- /* {\it Comments and white space.} */
- "," {@> @[TeX_( "/yycomplain{stray `,' treated as white space}/yylexnext" );@]@=}
- [ \f\n\t\v] |
+@t}\vb{\insertraw{\inscomment{\it comments and white space}}}{@>
+ "," {@> @[TeX_( "/yywarn{stray `,' treated as white space}" );@]@=}
+ [ \f\n\t\v] |
"//".* {@> @[TeX_( "/yylexnext" );@]@=}
@= "/*" {@> @[TeX_( "/contextstate/YYSTART /yyBEGIN{SC_YACC_COMMENT}/yylexnext" );@]@=}@>@/
- /* {\it |@[#line@]| directives are not documented, and may be withdrawn or modified in future versions of \bison.} */
+
+@t}\vb{\insertraw{\inscomment{\it \.{\#line} directives are not documented, and may be withdrawn or modified in future versions of \bison}}}{@>
+
^"#line "{int}(" \"".*"\"")?"\n" {@> @[TeX_( "/yylexnext" );@]@=}
}
@g
@@ -219,7 +236,7 @@ to \.{../build-aux/cross-options.pl}}. For most options the scanner
returns a pair of pointers as the value.
@<Scan \bison\ directives@>=
-@G
+@G(fs2)
<INITIAL>
{
"%binary" {@> @[TeX_( "/yylexreturnptr{PERCENT_NONASSOC}" );@]@=}
@@ -267,8 +284,7 @@ returns a pair of pointers as the value.
"%union" {@> @[TeX_( "/yylexreturnptr{PERCENT_UNION}" );@]@=}
"%verbose" {@> @[TeX_( "/yylexreturnptr{PERCENT_VERBOSE}" );@]@=}
"%yacc" {@> @[TeX_( "/yylexreturnptr{PERCENT_YACC}" );@]@=}
-
- /* {\it deprecated} */
+@t}\vb{\insertraw{\inscomment{\it deprecated}}}{@>
"%default"[-_]"prec" {@> @[TeX_( "/yypdeprecated{\\%default-prec}" );@]@=}
"%error"[-_]"verbose" {@> @[TeX_( "/yypdeprecated{\\%define parse.error verbose}" );@]@=}
"%expect"[-_]"rr" {@> @[TeX_( "/yypdeprecated{\\%expect-rr}" );@]@=}
@@ -281,50 +297,44 @@ returns a pair of pointers as the value.
"%pure"[-_]"parser" {@> @[TeX_( "/yypdeprecated{\\%pure-parser}" );@]@=}
"%token"[-_]"table" {@> @[TeX_( "/yypdeprecated{\\%token-table}" );@]@=}
- /* {\it Semantic predicate.} */
+@t}\vb{\insertraw{\inscomment{\it semantic predicate}}}{@>
+
"%?"[ \f\n\t\v]*"{" {@> @[TeX_( "/yyBEGIN{SC_PREDICATE}/yylexnext" );@]@=}
- "%"{id}|"%"{notletter}([[:graph:]])+ {@> @[@<Possbly complain about a bad directive@>@]@=}
+ "%"{id}|"%"{notletter}([[:graph:]])+ {@> @[@<Possibly complain about a bad directive@>@]@=}
"=" {@> @[TeX_( "/yylexreturnptr{EQUAL}" );@]@=}
"|" {@> @[TeX_( "/yylexreturnptr{PIPE}" );@]@=}
";" {@> @[TeX_( "/yylexreturnptr{SEMICOLON}" );@]@=}
{id} {@> @[@<Prepare an identifier@>@]@=}
- {int} {@> @[TeX_( "/edef/next{/yylval{/nx/anint{/the/yytext}" );@]@;
- @> @[TeX_( "{/the/yyfmark}{/the/yysmark}}}/next" );@]@;
+ {int} {@> @[TeX_( "/edef/next{/yylval{/nx/anint{/the/yytext}" );@]@=
+ @> @[TeX_( " {/the/yyfmark}{/the/yysmark}}}/next" );@]@=
@> @[TeX_( "/yylexreturn{INT}" );@]@=}
- 0[xX][0-9abcdefABCDEF]+ {@> @[TeX_( "/edef/next{/yylval{/nx/hexint{/the/yytext}" );@]@;
- @> @[TeX_( "{/the/yyfmark}{/the/yysmark}}}/next" );@]@;
+ 0[xX][0-9abcdefABCDEF]+ {@> @[TeX_( "/edef/next{/yylval{/nx/hexint{/the/yytext}" );@]@=
+ @> @[TeX_( " {/the/yyfmark}{/the/yysmark}}}/next" );@]@=
@> @[TeX_( "/yylexreturn{INT}" );@]@=}
-
- /* {\it Identifiers may not start with a digit. Yet, don't silently accept \.{1FOO} as \.{1 FOO}.} */
- {int}{id} {@> @[TeX_( "/yycomplain{invalid identifier: /the/yytext}" );@]
- @> @[TeX_( "/yyerrterminate" );@]@=}
-
- /* {\it Characters.} */
+@t}\vb{\insertraw{\inscomment{\it identifiers may not start with a digit; yet, don't silently accept \.{1foo} as \.{1 foo}}}}{@>
+ {int}{id} {@> @[TeX_( "/yyfatal{invalid identifier: /the/yytext}" );@]@=}
+@t}\vb{\insertraw{\inscomment{\it characters}}}{@>
"'" {@> @[TeX_( "/yyBEGIN{SC_ESCAPED_CHARACTER}/yylexnext" );@]@=}
-
- /* {\it Strings.} */
+@t}\vb{\insertraw{\inscomment{\it strings}}}{@>
"\"" {@> @[TeX_( "/yyBEGIN{SC_ESCAPED_STRING}/yylexnext" );@]@=}
-
- /* {\it Prologue.} */
+@t}\vb{\insertraw{\inscomment{\it prologue}}}{@>
"%{" {@> @[@<Start assembling prologue code@>@]@=}
-
- /* {\it Code in between braces.} Originally preceded by \.{\\STRINGGROW} but it is omitted here. */
+@t}\vb{\insertraw{\inscomment{{\it code in between braces}; originally preceded by \.{\\STRINGGROW} but it is omitted here}}}{@>
"{" {@> @[TeX_( "/lonesting/z@@/yyBEGIN{SC_BRACED_CODE}/yylexnext" );@]@=}
-
- /* {\it A type.} */
+@t}\vb{\insertraw{\inscomment{\it a type}}}{@>
"<*>" {@> @[TeX_( "/yylexreturnptr{TAG_ANY}" );@]@=}
"<>" {@> @[TeX_( "/yylexreturnptr{TAG_NONE}" );@]@=}
"<" {@> @[TeX_( "/lonesting=/z@@/yyBEGIN{SC_TAG}/yylexnext" );@]@=}
"%%" {@> @[@<Switch sections@>@]@=}
- "[" {@> @[TeX_( "/let/bracketedidstr=/empty" );@]@;
+ "[" {@> @[TeX_( "/let/bracketedidstr=/empty" );@]
@> @[TeX_( "/bracketedidcontextstate/YYSTART" );@]
@> @[TeX_( "/yyBEGIN{SC_BRACKETED_ID}/yylexnext" );@]@=}
- <<EOF>> {@> @[TeX_( "/yyterminate% EOF in INITIAL" );@]@=}
+ <<EOF>> {@> @[TeX_( "/yyterminate" );@]/* \flexrenstyle{EOF} in \flexsnstyle{INITIAL} */@=}
[^\[%A-Za-z0-9_<>{}\"\'*;|=/, \f\n\t\v]+|. {@> @[@<Process a bad character@>@]@=}
}
@@ -333,7 +343,7 @@ returns a pair of pointers as the value.
@ Some additional constructs needed to typeset simple \flex\
declarations. This is not part of the original \bison\ scanner.
@<Scan \flex\ directives and options@>=
-@G
+@G(fs2)
<INITIAL>
{
"%option" {@> @[TeX_( "/yylexreturnptr{FLEX_OPTION}" );@]@=}
@@ -348,9 +358,8 @@ of the character matching by the rest of the lexer.
@[TeX_( "/edef/next{/nx/csname lexspecial[/the/yytextpure]/nx/endcsname}" );@]@;
@[TeX_( "/expandafter/expandafter/expandafter/ifx/next/relax" );@]@;
@[TeX_( " /iftracebadchars" );@]@;
- @[TeX_( " /yycomplain{invalid character(s): /the/yytext}" );@]@;
+ @[TeX_( " /yyfatal{invalid character(s): /the/yytext}" );@]@;
@[TeX_( " /fi" );@]@;
- @[TeX_( " /yylexreturn{$undefined}" );@]@;
@[TeX_( "/else" );@]@;
@[TeX_( " /expandafter/lexspecialchar/expandafter{/next}{/the/yyfmark}{/the/yysmark}/yylexnext" );@]@;
@[TeX_( "/fi" );@]@;
@@ -379,14 +388,15 @@ of the character matching by the rest of the lexer.
@[TeX_( "/edef/next{/yylval{{api.pure}{pure-parser}{/the/yyfmark}{/the/yysmark}}}/next" );@]@;
@[TeX_( "/yylexreturn{PERCENT_FLAG}" );@]@;
-@ @<Possbly complain about a bad directive@>=
+@ @<Possibly complain about a bad directive@>=
@[TeX_( "/iftracebadchars" );@]@;
- @[TeX_( " /yycomplain{invalid directive: /the/yytext}" );@]@;
+ @[TeX_( " /yywarn{invalid directive: /the/yytext}" );@]@;
@[TeX_( "/fi" );@]@;
- @[TeX_( "/yylexnext" );@]@;
-@ @<Prepare an identifier@>=
- @[TeX_( "/edef/next{/yylval{/nx/idit{/the/yytextpure}{/the/yytext}" );@]@;
+@ At this point we save the spelling and the location of the identifier. The token is returned
+later, after the context is known.
+@<Prepare an identifier@>=
+ @[TeX_( "/edef/next{/yylval{{/the/yytextpure}{/the/yytext}" );@]@;
@[TeX_( " {/the/yyfmark}{/the/yysmark}}}/next" );@]@;
@[TeX_( "/let/bracketedidstr=/empty" );@]@;
@[TeX_( "/yyBEGIN{SC_AFTER_IDENTIFIER}/yylexnext" );@]@;
@@ -402,18 +412,18 @@ of the character matching by the rest of the lexer.
@[TeX_( "/edef/next{/postoks{{/the/yyfmark}{/the/yysmark}}}/next" );@]@;
@[TeX_( "/yyBEGIN{SC_PROLOGUE}/yylexnext" );@]@;
-@ {\it Supporting \.{\\0} complexifies our implementation for no expected added value}.
+@ {\it Supporting \flexrestyle{\\0} complexifies our implementation for no expected added value}.
@<Do not support zero characters@>=
-@G
+@G(fs2)
<SC_ESCAPED_CHARACTER,SC_ESCAPED_STRING,SC_TAG>
{
- \0 {@> @[TeX_( "/yycomplain{invalid null character}/yylexnext" );@]@=}
+ \0 {@> @[TeX_( "/yywarn{invalid null character}" );@]@=}
}
@g
@ @<Scan after an identifier, check whether a colon is next@>=
-@G
+@G(fs2)
<SC_AFTER_IDENTIFIER>
{
"[" {@> @[@<Process the bracketed part of an identifier@>@]@=}
@@ -461,7 +471,7 @@ of the character matching by the rest of the lexer.
@[TeX_( "/yylexreturn{ID}" );@]@;
@ @<Scan bracketed identifiers@>=
-@G
+@G(fs2)
<SC_BRACKETED_ID>
{
<<EOF>> {@> @[@<Complain about unexpected end of file inside brackets@>@]@=}
@@ -473,19 +483,19 @@ of the character matching by the rest of the lexer.
@ @<Process bracketed identifier@>=
@[TeX_( "/ifx/bracketedidstr/empty" );@]@;
- @[TeX_( " /edef/bracketedidstr{/nx/idit{/the/yytextpure}" );@]@;
- @[TeX_( " {/the/yytext}{/the/yyfmark}{/the/yysmark}}" );@]@;
+ @[TeX_( " /edef/bracketedidstr{{/the/yytextpure}{/the/yytext}" );@]@;
+ @[TeX_( " {/the/yyfmark}{/the/yysmark}}" );@]@;
@[TeX_( " /let/next=/yylexnext" );@]@;
@[TeX_( "/else" );@]@;
- @[TeX_( " /def/next{/yycomplain{unexpected " );@]@;
- @[TeX_( " identifier in bracketed name: /the/yytext}/yylexnext}" );@]@;
+ @[TeX_( " /def/next{/yywarn{unexpected identifier " );@]@;
+ @[TeX_( " in bracketed name: /the/yytext}}" );@]@;
@[TeX_( "/fi" );@]@;
@[TeX_( "/next" );@]@;
@ @<Finish processing bracketed identifier@>=
@[TeX_( "/yyBEGINr/bracketedidcontextstate" );@]@;
@[TeX_( "/ifx/bracketedidstr/empty" );@]@;
- @[TeX_( " /def/next{/yycomplain{an identifier expected}/yylexnext}" );@]@;
+ @[TeX_( " /def/next{/yywarn{an identifier expected}}" );@]@;
@[TeX_( "/else" );@]@;
@[TeX_( " /ifnum/bracketedidcontextstate=/yylexstate{INITIAL}/relax" );@]@;
@[TeX_( " /expandafter/yylval/expandafter{/bracketedidstr}" );@]@;
@@ -498,14 +508,14 @@ of the character matching by the rest of the lexer.
@[TeX_( "/next" );@]@;
@ @<Complain about improper identifier characters@>=
- @[TeX_( "/yycomplain{invalid character(s) in bracketed name: /the/yytext}/yyerrterminate" );@]@;
+ @[TeX_( "/yyfatal{invalid character(s) in bracketed name: /the/yytext}" );@]@;
@ @<Complain about unexpected end of file inside brackets@>=
@[TeX_( "/yyBEGINr/bracketedidcontextstate" );@]@;
- @[TeX_( "/yycomplain{unexpected end of file inside brackets}/yyerrterminate" );@]@;
+ @[TeX_( "/yyfatal{unexpected end of file inside brackets}" );@]@;
@ @<Scan bracketed identifiers@>=
-@G
+@G(fs2)
<SC_RETURN_BRACKETED_ID>
{
. {@> @[@<Return a bracketed identifier@>@]@=}
@@ -519,32 +529,30 @@ of the character matching by the rest of the lexer.
@[TeX_( "/yyBEGIN{INITIAL}" );@]@;
@[TeX_( "/yylexreturn{BRACKETED_ID}" );@]@;
-@ {\it Scanning a Yacc comment. The initial \.{/*} is already eaten}.
-@<Scan a Yacc comment@>=
-@G
+@ {\it Scanning a \yacc\ comment. The initial \.{/*} is already eaten}.
+@<Scan a \yacc\ comment@>=
+@G(fs2)
<SC_YACC_COMMENT>
{
- <<EOF>> {@> @[TeX_( "/yycomplain{unexpected end of file in " );@]
- @> @[TeX_( " a comment}/yyerrterminate" );@]@=}
- "*/" {@> @[TeX_( "/yyBEGINr{/contextstate}/yylexnext" );@]@=}
+ <<EOF>> {@> @[TeX_( "/yyfatal{unexpected end of file in a comment}" );@]@=}
+ "*/" {@> @[TeX_( "/yyBEGINr/contextstate /yylexnext" );@]@=}
.|\n {@> @[TeX_( "/yylexnext" );@]@=}
}
@g
@ {\it Scanning a \Cee\ comment. The initial \.{/*} is already eaten}.
@<Scan a \Cee\ comment@>=
-@G
+@G(fs2)
<SC_COMMENT>
{
- <<EOF>> {@> @[TeX_( "/yycomplain{unexpected end of file in " );@]
- @> @[TeX_( " a comment}/yyerrterminate" );@]@=}
- "*"{splice}"/" {@> @[TeX_( "/STRINGGROW/yyBEGINr/contextstate/yylexnext" );@]@=}
+ <<EOF>> {@> @[TeX_( "/yyfatal{unexpected end of file in a comment}" );@]@=}
+ "*"{splice}"/" {@> @[TeX_( "/STRINGGROW/yyBEGINr/contextstate /yylexnext" );@]@=}
}
@g
@ {\it Scanning a line comment. The initial \.{//} is already eaten}.
@<Scan a line comment@>=
-@G
+@G(fs2)
<SC_LINE_COMMENT>
{
<<EOF>> {@> @[TeX_( "/yyBEGINr/contextstate /ROLLBACKCURRENTTOKEN" );@]
@@ -557,41 +565,37 @@ of the character matching by the rest of the lexer.
@ {\it Scanning a \bison\ string, including its escapes.
The initial quote is already eaten}.
@<Scan a \bison\ string@>=
-@G
+@G(fs2)
<SC_ESCAPED_STRING>
{
- <<EOF>> {@> @[TeX_( "/yycomplain{unexpected end of file in " );@]
- @> @[TeX_( " a string}/yyerrterminate" );@]@=}
+ <<EOF>> {@> @[TeX_( "/yyfatal{unexpected end of file in a string}" );@]@=}
"\"" {@> @[@<Finish a \bison\ string@>@]@=}
- "\n" {@> @[TeX_( "/yycomplain{unexpected end of line in " );@]
- @> @[TeX_( " a string}/yyerrterminate" );@]@=}
+ "\n" {@> @[TeX_( "/yyfatal{unexpected end of line in a string}" );@]@=}
}
@g
@ @<Finish a \bison\ string@>=
@[TeX_( "/STRINGFINISH" );@]@;
- @[TeX_( "/edef/next{/yylval{/nx/stringify{/the/laststring}" );@]@;
- @[TeX_( "{/the/laststringraw}{/the/yyfmark}{/the/yysmark}}}/next" );@]@;
+ @[TeX_( "/edef/next{/yylval{{/the/laststring}{/the/laststringraw}" );@]@;
+ @[TeX_( " {/the/yyfmark}{/the/yysmark}}}/next" );@]@;
@[TeX_( "/yyBEGIN{INITIAL}" );@]@;
@[TeX_( "/yylexreturn{STRING}" );@]@;
@ {\it Scanning a \bison\ character literal, decoding its escapes.
The initial quote is already eaten}.
@<Scan a character literal@>=
-@G
+@G(fs2)
<SC_ESCAPED_CHARACTER>
{
- <<EOF>> {@> @[TeX_( "/yycomplain{unexpected end of file in " );@]
- @> @[TeX_( " a literal}/yyerrterminate" );@]@=}
+ <<EOF>> {@> @[TeX_( "/yyfatal{unexpected end of file in a literal}" );@]@=}
"'" {@> @[@<Return an escaped character@>@]@=}
- "\n" {@> @[TeX_( "/yycomplain{unexpected end of line in " );@]
- @> @[TeX_( " a literal}/yyerrterminate" );@]@=}
+ "\n" {@> @[TeX_( "/yyfatal{unexpected end of line in a literal}" );@]@=}
}
@g
@ @<Return an escaped character@>=
@[TeX_( "/STRINGFINISH" );@]@;
- @[TeX_( "/edef/next{/yylval{/nx/charit{/the/laststring}{/the/laststringraw}" );@]@;
+ @[TeX_( "/edef/next{/yylval{{/the/laststring}{/the/laststringraw}" );@]@;
@[TeX_( " {/the/yyfmark}{/the/yysmark}}}/next" );@]@;
@[TeX_( "/STRINGFREE" );@]@;
@[TeX_( "/yyBEGIN{INITIAL}" );@]@;
@@ -599,14 +603,13 @@ The initial quote is already eaten}.
@ {\it Scanning a tag. The initial angle bracket is already eaten}.
@<Scan a tag@>=
-@G
+@G(fs2)
<SC_TAG>
{
">" {@> @[@<Finish a tag@>@]@=}
([^<>]|->)+ {@> @[TeX_( "/STRINGGROW/yylexnext" );@]@=}
"<" {@> @[@<Raise nesting level@>@]@=}
- <<EOF>> {@> @[TeX_( "/yycomplain{unexpected end of file in " );@]
- @> @[TeX_( " a literal}/yyerrterminate" );@]@=}
+ <<EOF>> {@> @[TeX_( "/yyfatal{unexpected end of file in a literal}" );@]@=}
}
@g
@@ -614,7 +617,7 @@ The initial quote is already eaten}.
@[TeX_( "/advance/lonesting/m@@ne" );@]@;
@[TeX_( "/ifnum/lonesting</z@@" );@]@;
@[TeX_( " /STRINGFINISH" );@]@;
- @[TeX_( " /edef/next{/yylval{/nx/tagit{/the/laststring}{/the/laststringraw}" );@]@;
+ @[TeX_( " /edef/next{/yylval{{/the/laststring}{/the/laststringraw}" );@]@;
@[TeX_( " {/the/yyfmark}{/the/yysmark}}}/next" );@]@;
@[TeX_( " /STRINGFREE" );@]@;
@[TeX_( " /yyBEGIN{INITIAL}" );@]@;
@@ -632,7 +635,7 @@ so it makes sense to raise the nesting level one by one.
@[TeX_( "/yylexnext" );@]@;
@ @<Decode escaped characters@>=
-@G
+@G(fs2)
<SC_ESCAPED_STRING,SC_ESCAPED_CHARACTER>
{
\\[0-7]{1,3} {@> @[TeX_( "/STRINGGROW/yylexnext" );@]@=}
@@ -645,17 +648,16 @@ so it makes sense to raise the nesting level one by one.
\\t {@> @[TeX_( "/STRINGGROW/yylexnext" );@]@=}
\\v {@> @[TeX_( "/STRINGGROW/yylexnext" );@]@=}
- /* {\it \.{\\\\[\\"\\'?\\\\]} would be shorter, but it confuses |xgettext|.} */
+ /* {\it \flexrestyle{\\\\[\\"\\'?\\\\]} is shorter but confuses |xgettext|} */
\\("\""|"'"|"?"|"\\") {@> @[TeX_( "/STRINGGROW/yylexnext" );@]@=}
\\(u|U[0-9abcdefABCDEF]{4})[0-9abcdefABCDEF]{4} {@> @[TeX_( "/STRINGGROW/yylexnext" );@]@=}
- \\(.|\n) {@> @[TeX_( "/yycomplain{invalid character after " );@]
- @> @[TeX_( " /\\-escape: /the/yytext}/yylexnext" );@]@=}
+ \\(.|\n) {@> @[TeX_( "/yyfatal{invalid character after /\\: /the/yytext}" );@]@=}
}
@g
@ @<Scan user-code characters and strings@>=
-@G
+@G(fs2)
<SC_CHARACTER,SC_STRING>
{
{splice}|\\{splice}[^\n\[\]] {@> @[TeX_( "/STRINGGROW/yylexnext" );@]@=}
@@ -663,42 +665,34 @@ so it makes sense to raise the nesting level one by one.
<SC_CHARACTER>
{
- "'" {@> @[TeX_( "/STRINGGROW /yyBEGINr{/contextstate}/yylexnext" );@]@=}
- \n {@> @[TeX_( "/yycomplain{unexpected end of line instead of " );@]
- @> @[TeX_( " a character}/yyerrterminate" );@]@=}
- <<EOF>> {@> @[TeX_( "/yycomplain{unexpected end of file instead of " );@]
- @> @[TeX_( " a character}/yyerrterminate" );@]@=}
+ "'" {@> @[TeX_( "/STRINGGROW /yyBEGINr/contextstate /yylexnext" );@]@=}
+ \n {@> @[TeX_( "/yyfatal{unexpected end of line instead of a character}" );@]@=}
+ <<EOF>> {@> @[TeX_( "/yyfatal{unexpected end of file instead of a character}" );@]@=}
}
<SC_STRING>
{
- "\"" {@> @[TeX_( "/STRINGGROW /yyBEGINr{/contextstate}/yylexnext" );@]@=}
- \n {@> @[TeX_( "/yycomplain{unexpected end of line instead of " );@]
- @> @[TeX_( " a character}/yyerrterminate" );@]@=}
- <<EOF>> {@> @[TeX_( "/yycomplain{unexpected end of file instead of " );@]
- @> @[TeX_( " a character}/yyerrterminate" );@]@=}
+ "\"" {@> @[TeX_( "/STRINGGROW /yyBEGINr/contextstate /yylexnext" );@]@=}
+ \n {@> @[TeX_( "/yyfatal{unexpected end of line instead of a character}" );@]@=}
+ <<EOF>> {@> @[TeX_( "/yyfatal{unexpected end of file instead of a character}" );@]@=}
}
@g
@ @<Strings, comments etc.\ found in user code@>=
-@G
+@G(fs2)
<SC_BRACED_CODE,SC_PROLOGUE,SC_EPILOGUE,SC_PREDICATE>
{
- "'" {@> @[TeX_( "/STRINGGROW /contextstate/YYSTART" );@]
- @> @[TeX_( " /yyBEGIN{SC_CHARACTER}/yylexnext" );@]@=}
- "\"" {@> @[TeX_( "/STRINGGROW /contextstate/YYSTART" );@]
- @> @[TeX_( " /yyBEGIN{SC_STRING}/yylexnext" );@]@=}
- "/"{splice}"*" {@> @[TeX_( "/STRINGGROW /contextstate/YYSTART" );@]
- @> @[TeX_( " /yyBEGIN{SC_COMMENT}/yylexnext" );@]@=}
- "/"{splice}"/" {@> @[TeX_( "/STRINGGROW /contextstate/YYSTART" );@]
- @> @[TeX_( " /yyBEGIN{SC_LINE_COMMENT}/yylexnext" );@]@=}
+ "'" {@> @[TeX_( "/STRINGGROW /contextstate/YYSTART /yyBEGIN{SC_CHARACTER}/yylexnext" );@]@=}
+ "\"" {@> @[TeX_( "/STRINGGROW /contextstate/YYSTART /yyBEGIN{SC_STRING}/yylexnext" );@]@=}
+ "/"{splice}"*" {@> @[TeX_( "/STRINGGROW /contextstate/YYSTART /yyBEGIN{SC_COMMENT}/yylexnext" );@]@=}
+ "/"{splice}"/" {@> @[TeX_( "/STRINGGROW /contextstate/YYSTART /yyBEGIN{SC_LINE_COMMENT}/yylexnext" );@]@=}
}
@g
@ {\it Scanning some code in braces (actions, predicates). The
initial \.{\{} is already eaten}.
@<Scan code in braces@>=
-@G
+@G(fs2)
<SC_BRACED_CODE,SC_PREDICATE>
{
"{"|"<"{splice}"%" {@> @[TeX_( "/STRINGGROW /advance/lonesting/@@ne /yylexnext" );@]@=}
@@ -706,8 +700,7 @@ initial \.{\{} is already eaten}.
/* {\it Tokenize \.{<<\%} correctly (as \.{<<} \.{\%}) rather than incorrectly (as \.{<} \.{<\%}).} */
"<"{splice}"<" {@> @[TeX_( "/STRINGGROW /yylexnext" );@]@=}
- <<EOF>> {@> @[TeX_( "/yycomplain{unexpected end of line " );@]
- @> @[TeX_( " inside braced code}/yyerrterminate" );@]@=}
+ <<EOF>> {@> @[TeX_( "/yyfatal{unexpected end of line inside braced code}" );@]@=}
}
<SC_BRACED_CODE>
@@ -752,12 +745,11 @@ braced code.
@ {\it Scanning some prologue: from \.{\%\{} (already scanned) to \.{\%\}}}.
@<Scan prologue@>=
-@G
+@G(fs2)
<SC_PROLOGUE>
{
"%}" {@> @[@<Finish braced code@>@]@=}
- <<EOF>> {@> @[TeX_( "/yycomplain{unexpected end of file " );@]
- @> @[TeX_( " inside prologue}/yyerrterminate" );@]@=}
+ <<EOF>> {@> @[TeX_( "/yyfatal{unexpected end of file inside prologue}" );@]@=}
}
@g
@@ -770,7 +762,7 @@ braced code.
@ {\it Scanning the epilogue (everything after the second \prodstyle{\%\%}, which
has already been eaten)}.
@<Scan the epilogue@>=
-@G
+@G(fs2)
<SC_EPILOGUE>
{
<<EOF>> {@> @[@<Handle end of file in the epilogue@>@]@=}
@@ -789,7 +781,7 @@ has already been eaten)}.
\immediate\closeout\stlist
\fi
@<Add the scanned symbol to the current string@>=
-@G
+@G(fs2)
<SC_COMMENT,SC_LINE_COMMENT,SC_BRACED_CODE,SC_PREDICATE,SC_PROLOGUE,SC_EPILOGUE,
SC_STRING,SC_CHARACTER,SC_ESCAPED_STRING,SC_ESCAPED_CHARACTER>. |
<SC_COMMENT,SC_LINE_COMMENT,SC_BRACED_CODE,SC_PREDICATE,
diff --git a/support/splint/cweb/mkeparser.w b/support/splint/cweb/mkeparser.w
index 937a9e5498..f60ccf5d64 100644
--- a/support/splint/cweb/mkeparser.w
+++ b/support/splint/cweb/mkeparser.w
@@ -1,4 +1,4 @@
-% Copyright 2012-2014, Alexander Shibakov
+% Copyright 2012-2020, Alexander Shibakov
% This file is part of SPLinT
%
% SPLinT is free software: you can redistribute it and/or modify
@@ -48,11 +48,11 @@ interpret the acronym as something easier to remember.
@ @<Short option list@>=
@<Shortcuts for command line options affecting parser output@>@;
-@ @<Raw option list@>=
- @<Parser specific option list@>@;
+@ @<Options with shortcuts@>=
+ @<Parser specific options with shortcuts@>@;
-@ @<Higher index options@>=
- @<Higher index parser specific options@>@;
+@ @<Options without shortcuts@>=
+ @<Parser specific options without shortcuts@>@;
@ @<Cases affecting the whole program@>=
@<Handle parser output options@>@;
@@ -89,21 +89,25 @@ interpret the acronym as something easier to remember.
@*1 Parser dependent settings.
This is it for the core table output functions.
-To make all this into a working code in this
-case, lexing and error function declarations are supplied.
+To make all of this into working code in this
+case, lexing and error functions are supplied.
+
+If an existing parser is reused as |PARSER_FILE| these functions may
+have been redefined by the programmer, in which case a mechanism to
+supply the appropriate definitions is provided.
@<Auxiliary function declarations@>=
#ifndef HAS_SCANNER
int yylex(void);
- int yyerror(void);
+ int yyerror(char *);
#endif
@ @<Auxiliary function definitions@>=
#ifndef HAS_SCANNER
int yylex(void){}
- int yyerror(void){}
+ int yyerror(char * text){}
#endif
-@ \let\B\oldB % \Cee\ mode mixes all up
+@ %\let\B\oldB % \Cee\ mode mixes all up
@c
@<\Cee\ preamble@>@;
diff --git a/support/splint/cweb/mkscanner.w b/support/splint/cweb/mkscanner.w
index 22c39e9342..e847428199 100644
--- a/support/splint/cweb/mkscanner.w
+++ b/support/splint/cweb/mkscanner.w
@@ -1,4 +1,4 @@
-% Copyright 2012-2014, Alexander Shibakov
+% Copyright 2012-2020, Alexander Shibakov
% This file is part of SPLinT
%
% SPLinT is free software: you can redistribute it and/or modify
@@ -51,11 +51,11 @@ interpret the acronym as something easier to remember.
@ @<Cases affecting the whole program@>=
@<Handle scanner output options@>@;
-@ @<Raw option list@>=
- @<Scanner specific option list@>@;
+@ @<Options with shortcuts@>=
+ @<Scanner specific options with shortcuts@>@;
-@ @<Higher index options@>=
- @<Higher index scanner specific options@>@;
+@ @<Options without shortcuts@>=
+ @<Scanner specific options without shortcuts@>@;
@ @<Short option list@>=
@<Shortcuts for command line options affecting scanner output@>@;
@@ -87,6 +87,7 @@ lexer written for a different purpose, the situation may be different.
@<\Cee\ preamble@>@;
typedef int YYSTYPE;
#define YY_BREAK return 0;
+#define YY_USER_ACTION fprintf( tables_out, "%s\n", " \\YYRULESETUP" );
#include LEXER_FILE
diff --git a/support/splint/cweb/np.w b/support/splint/cweb/np.w
index aa20e551c7..58379649a5 100644
--- a/support/splint/cweb/np.w
+++ b/support/splint/cweb/np.w
@@ -1,4 +1,4 @@
-@q Copyright 2012-2014, Alexander Shibakov@>
+@q Copyright 2012-2020, Alexander Shibakov@>
@q This file is part of SPLinT@>
@q SPLinT is free software: you can redistribute it and/or modify@>
@@ -14,8 +14,8 @@
@q You should have received a copy of the GNU General Public License@>
@q along with SPLinT. If not, see <http://www.gnu.org/licenses/>.@>
-@*1 The name parser. What follows is an example parser for the name
-processing. This approach (i.e. using a `full blown' parser/scanner
+@** The name parser. What follows is an example parser for the term
+name processing. This approach (i.e. using a `full blown' parser/scanner
combination) is probably not the best way to implement such machinery
but its main purpose is to demonstrate a way to create a separate
parser for local purposes.
@@ -51,17 +51,18 @@ parser for local purposes.
@G
%token PERCENT_IDENTIFIER
%token IDENTIFIER
-%token OPTIONAL
-%token NO_ATTR
+%token OPTIONAL NO_ATTR EXTENDED LT RT
%token INTEGER
-%token EXTENDED
-%token WILDCARD
+%token WILDCARD C_ESCCHAR
+%token META_IDENTIFIER
@g
@ @<Parser productions@>=
@G
full_name:
identifier_string suffixes.opt {@> @<Compose the full name@> @=}
+| META_IDENTIFIER {@> @<Turn a \prodstylens{META\_IDENTIFIER}{\smallnamespace} into a full name@> @=}
+| quoted_name suffixes.opt {@> @<Compose the full name@> @=}
;
identifier_string:
@@ -69,17 +70,24 @@ identifier_string:
| IDENTIFIER {@> @<Start with an identifier@> @=}
| '<' IDENTIFIER '>' {@> @<Start with a tag@> @=}
| '\'' WILDCARD '\'' {@> @<Start with a quoted string@> @=}
+| '\'' C_ESCCHAR '\'' {@> @<Start with an escaped character@> @=}
| '\'' '>' '\'' {@> @<Start with a \prodstyle{'>'} string@> @=}
| '\'' '<' '\'' {@> @<Start with a \prodstyle{'<'} string@> @=}
| '\'' '.' '\'' {@> @<Start with a \prodstyle{'.'} string@> @=}
| '\'' '_' '\'' {@> @<Start with an \prodstyle{'\_'} string@> @=}
| '\'' '-' '\'' {@> @<Start with a \prodstyle{'-'} string@> @=}
+| '\'' '$' '\'' {@> @<Start with a \prodstyle{'\$'} string@> @=}
+| '$' {@> @<Prepare a \bison\ stack name@> @=}
| qualifier {@> @<Turn a qualifier into an identifier@> @=}
| identifier_string IDENTIFIER {@> @<Attach an identifier@> @=}
| identifier_string qualifier {@> @<Attach qualifier to a name@> @=}
| identifier_string INTEGER {@> @<Attach an integer@> @=}
;
+quoted_name:
+ '\"' PERCENT_IDENTIFIER '\"' {@> @<Process quoted option@> @=}
+| '\"' IDENTIFIER '\"' {@> @<Process quoted name@> @=}
+;
suffixes.opt:
{@> TeX_( "/yy0{}" ); @=}
| '.' {@> TeX_( "/yy0{/nx/dotsp/nx/sfxnone}" ); @=}
@@ -102,16 +110,24 @@ qualified_suffixes:
| qualifier {@> @<Start suffixes with a qualifier@> @=}
;
+@t}\vb{\inline\flatten}{@>
qualifier:
OPTIONAL {@> TeX_( "/yy0{/the/yy(1)}" ); @=}
| NO_ATTR {@> TeX_( "/yy0{/the/yy(1)}" ); @=}
| EXTENDED {@> TeX_( "/yy0{/the/yy(1)}" ); @=}
+| LT {@> TeX_( "/yy0{/the/yy(1)}" ); @=}
+| RT {@> TeX_( "/yy0{/the/yy(1)}" ); @=}
;
@g
@ @<Compose the full name@>=
@[TeX_( "/yy0{/the/yy(1)/the/yy(2)}/namechars/yyval" );@]@;
+@ @<Turn a \prodstylens{META\_IDENTIFIER}{\smallnamespace} into a full name@>=
+ @[TeX_( "/getfirst{/yy(1)}/to/toksa" );@]@;
+ @[TeX_( "/getsecond{/yy(1)}/to/toksb" );@]@;
+ @[TeX_( "/yy0{/nx/idstr{/the/toksa}{/the/toksb}}/namechars/yyval" );@]@;
+
@ @<Attach option name@>=
@[TeX_( "/getfirst{/yy(1)}/to/toksa" );@]@;
@[TeX_( "/getsecond{/yy(1)}/to/toksb" );@]@;
@@ -130,22 +146,33 @@ qualifier:
@ @<Start with a quoted string@>=
@[TeX_( "/getfirst{/yy(2)}/to/toksa" );@]@;
@[TeX_( "/getsecond{/yy(2)}/to/toksb" );@]@;
- @[TeX_( "/yy0{/nx/chstr{/the/toksa}{/the/toksb}}" );@]@;
+ @[TeX_( "/sansfirst/toksb" );@]@;
+ @[TeX_( "/yy0{/nx/chstr{/the/toksb}{/the/toksb}/nx/visflag{/nx/termvstring}{}}" );@]@;
+
+@ @<Start with an escaped character@>=
+ @[TeX_( "/getsecond{/yy(2)}/to/toksb" );@]@;
+ @[TeX_( "/yy0{/nx/chstr{/the/toksb}{/the/toksb}/nx/visflag{/nx/termvstring}{}}" );@]@;
@ @<Start with a \prodstyle{'<'} string@>=
- @[TeX_( "/yy0{/nx/chstr{<}{<}}" );@]@;
+ @[TeX_( "/yy0{/nx/chstr{<}{<}/nx/visflag{/nx/termvstring}{}}" );@]@;
@ @<Start with a \prodstyle{'>'} string@>=
- @[TeX_( "/yy0{/nx/chstr{/greaterthan}{/greaterthan}}" );@]@;
+ @[TeX_( "/yy0{/nx/chstr{/greaterthan}{/greaterthan}/nx/visflag{/nx/termvstring}{}}" );@]@;
@ @<Start with an \prodstyle{'\_'} string@>=
- @[TeX_( "/yy0{/nx/chstr{/uscoreletter}{/uscoreletter}}" );@]@;
+ @[TeX_( "/yy0{/nx/chstr{/uscoreletter}{/uscoreletter}/nx/visflag{/nx/termvstring}{}}" );@]@;
@ @<Start with a \prodstyle{'-'} string@>=
- @[TeX_( "/yy0{/nx/chstr{-}{-}}" );@]@;
+ @[TeX_( "/yy0{/nx/chstr{-}{-}/nx/visflag{/nx/termvstring}{}}" );@]@;
+
+@ @<Start with a \prodstyle{'\$'} string@>=
+ @[TeX_( "/yy0{/nx/chstr{/safemath}{/safemath}/nx/visflag{/nx/termvstring}{}}" );@]@;
@ @<Start with a \prodstyle{'.'} string@>=
- @[TeX_( "/yy0{/nx/chstr{.}{.}}" );@]@;
+ @[TeX_( "/yy0{/nx/chstr{.}{.}/nx/visflag{/nx/termvstring}{}}" );@]@;
+
+@ @<Prepare a \bison\ stack name@>=
+ @[TeX_( "/yy0{/nx/bidstr{/nx/$}{/safemath}}" );@]@;
@ @<Turn a qualifier into an identifier@>=
@<Start with an identifier@>@;
@@ -164,9 +191,29 @@ qualifier:
@ @<Attach qualifier to a name@>=
@<Attach an identifier@>
-@ @<Attach an integer@>=
- @<Attach an identifier@>@;
+@ An integer at the end of an identifier (such as |id1|) is
+interpreted as a suffix (similar to the way \MF\ treats identifiers,
+and \mft\ typesets them\footnote{This allows, for example, names like
+|$[term0]| while leaving |$[char2int]| in its `natural' form.}) to
+mitigate a well-intentioned but surprisingly inconvenient feature of
+\CTANGLE, namely outputting something like \.{id.1} as \.{id\ .1} in an
+attempt to make sure that integers do not interfere with structure
+dereferences. For this to produce meaningful results, a stricter
+interpretation of \prodstyle{IDENTIFIER} syntax is required,
+represented by the \.{id\_strict} syntax below.
+@<Attach an integer@>=
+ @[TeX_( "/yy0{/the/yy(1)/nx/dotsp/nx/sfxi/the/yy(2)}" );@]@;
+@ @<Process quoted name@>=
+ @[TeX_( "/getfirst{/yy(2)}/to/toksa" );@]@;
+ @[TeX_( "/getsecond{/yy(2)}/to/toksb" );@]@;
+ @[TeX_( "/yy0{/nx/idstr{/the/toksa}{/the/toksb}/nx/visflag{/nx/termvstring}{}}" );@]@;
+
+@ @<Process quoted option@>=
+ @[TeX_( "/getfirst{/yy(2)}/to/toksa" );@]@;
+ @[TeX_( "/getsecond{/yy(2)}/to/toksb" );@]@;
+ @[TeX_( "/yy0{/nx/optstr{/the/toksa}{/the/toksb}/nx/visflag{/nx/termvstring}{}}" );@]@;
+
@ @<Attach suffixes@>=
@[TeX_( "/yy0{/nx/dotsp/the/yy(2)}" );@]@;
@@ -211,8 +258,14 @@ definitions are used.
@<Union of parser types@>=
-@*1 The name scanner.
-%\checktabletrue
+@** The name scanner.
+The scanner for lexing term names is admittedly {\em ad hoc\/} and
+rather redundant. A minor reason for this is to provide some
+flexibility for name typesetting. Another reason is to let the
+existing code serve as a template for similar procedures in other
+projects. At the same time, it must be pointed out that this scanner
+is executed multiple times for every \bison\ section, so its
+efficiency directly affects the speed at which the parser operates.
@(small_lexer.ll@>=
@G
@> @<Lexer definitions@> @=
@@ -230,10 +283,13 @@ void define_all_states( void ) {
@ @<Lexer definitions@>=
@<Lexer states@>@;
-@G
+@G(fs1)
letter [_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ]
-wc ([^\\\'\"]{-}[_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0-9]|\\.)
+c-escchar \\[fnrtv]
+wc ([^\\\'\"$.<>]{-}[_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0-9]|\\.)
id {letter}({letter}|[-0-9])*
+id_strict {letter}(({letter}|[-0-9])*{letter})?
+meta_id "*"{id_strict}"*"?
int [0-9]+
@g
@@ -268,27 +324,8 @@ int [0-9]+
@<Scan identifiers@>@;
@ White space skipping.
-\traceparserstatestrue
-\tracestackstrue
-\tracerulestrue
-\traceactionstrue
-\tracelookaheadtrue
-\traceparseresultstrue
-\tracebadcharstrue
-\yyflexdebugtrue
-%
-\traceparserstatesfalse
-\tracestacksfalse
-\tracerulesfalse
-\traceactionsfalse
-\tracelookaheadfalse
-\traceparseresultsfalse
-\tracebadcharsfalse
-\yyflexdebugfalse
-%
-\yyskipparsetrue
@<Scan white space@>=
-@G
+@G(fs2)
[ \f\n\t\v] {@> @[TeX_( "/yylexnext" );@]@=}
@g
@@ -297,9 +334,8 @@ its present state, it certainly is. However, if later on the
typesetting style for some of the keywords would need to be adjusted,
such changes would be easy to implement, since the template is already
here.
-\yyskipparsefalse % this is not necessary
@<Scan identifiers@>=
-@G
+@G(fs2)
"%binary" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
"%code" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
"%debug" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
@@ -355,26 +391,31 @@ here.
"%pure"[-_]"parser" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
"%token"[-_]"table" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
"%"({letter}|[0-9]|[-_]|"%"|[<>])+ {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
-
+@t}\vb{\insertraw{\inscomment{\it suffixes}}}{@>
"opt" {@> @[TeX_( "/yylexreturnval{OPTIONAL}" );@]@=}
"na" {@> @[TeX_( "/yylexreturnval{NO_ATTR}" );@]@=}
"ext" {@> @[TeX_( "/yylexreturnval{EXTENDED}" );@]@=}
-
-[<>._\'] {@> @[TeX_( "/yylexreturnchar" );@]@=}
+"l" {@> @[TeX_( "/yylexreturnval{LT}" );@]@=}
+"r" {@> @[TeX_( "/yylexreturnval{RT}" );@]@=}
+@t}\vb{\insertraw{\inscomment{\it delimeters}}}{@>
+[<>$._\'\"] {@> @[TeX_( "/yylexreturnchar" );@]@=}
+{c-escchar} {@> @[TeX_( "/yylexreturnval{C_ESCCHAR}" );@]@=}
{wc} {@> @[TeX_( "/yylexreturnval{WILDCARD}" );@]@=}
-
-{id} {@> @[@<Prepare to process an identifier@>@]@=}
+@t}\vb{\insertraw{\inscomment{\it identifiers and other names}}}{@>
+{id_strict} {@> @[@<Prepare to process an identifier@>@]@=}
+{meta_id} {@> @[@<Prepare to process a meta-identifier@>@]@=}
{int} {@> @[TeX_( "/yylexreturnval{INTEGER}" );@]@=}
-
-"\"" {@> @[TeX_( "/yylexnext" );@]@=}
+@t}\vb{\insertraw{\inscomment{\it everything else}}}{@>
. {@> @[@<React to a bad character@>@]@=}
@g
@ @<Prepare to process an identifier@>=
@[TeX_( "/yylexreturnval{IDENTIFIER}" );@]@;
+@ @<Prepare to process a meta-identifier@>=
+ @[TeX_( "/yylexreturnval{META_IDENTIFIER}" );@]@;
+
@ @<React to a bad character@>=
@[TeX_( "/iftracebadchars" );@]@;
- @[TeX_( " /yycomplain{invalid character(s): /the/yytext}" );@]@;
+ @[TeX_( " /yyfatal{invalid character(s): /the/yytext}" );@]@;
@[TeX_( "/fi" );@]@;
- @[TeX_( "/yylexreturn{$undefined}" );@]@;
diff --git a/support/splint/cweb/philosophy.w b/support/splint/cweb/philosophy.w
index 4d30f2d765..7b173f5d2f 100644
--- a/support/splint/cweb/philosophy.w
+++ b/support/splint/cweb/philosophy.w
@@ -1,3 +1,19 @@
+@q Copyright 2012-2020, Alexander Shibakov@>
+@q This file is part of SPLinT@>
+
+@q SPLinT is free software: you can redistribute it and/or modify@>
+@q it under the terms of the GNU General Public License as published by@>
+@q the Free Software Foundation, either version 3 of the License, or@>
+@q (at your option) any later version.@>
+
+@q SPLinT is distributed in the hope that it will be useful,@>
+@q but WITHOUT ANY WARRANTY; without even the implied warranty of@>
+@q MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the@>
+@q GNU General Public License for more details.@>
+
+@q You should have received a copy of the GNU General Public License@>
+@q along with SPLinT. If not, see <http://www.gnu.org/licenses/>.@>
+
@**Philosophy.
This section should, perhaps, be more appropriately called {\it rant\/} but
{\it philosophy\/} sounds more academic. The design of any software involves
@@ -5,58 +21,61 @@ numerous choices, and \splint\ is no exception. Some of these choices
are explained in the appropriate places in the package files. This
section collects a few `big picture' viewpoints that did not fit elsewhere.
-@*1 On typographic style.
+@*1 On typographic convention.
It must seem quite perplexing to some readers that a
-document with a focus on {\it pretty-printing\/} displays such a
-wanton disregard for good typographic taste. Haphazard choice of
-styles to present programming constructs, random overabundance of
-fonts on almost every single page are just a few among the many typographic sins
-and design guffaws so amply manifested on these pages. The author has
-to take full responsibility for the lack of taste in this
-opus and has only one argument in his defense: this is not
+manual focussing on {\it pretty-printing\/} shows such a
+wanton disregard for good typographic style. Haphazard choice of
+layouts to present programming constructs, random overabundance of
+fonts on almost every page are just a few of the many typographic sins
+and design guffaws so amply manifested in this opus. The author must
+take full responsibility for the lack of taste in this
+document and has only one argument in his defense: this is not
merely a book for a good night read but a piece of technical
documentation.
-In many ways, the goal of this document is somewhat contrary to that
+In many ways, the goal of this document is somewhat different from that
of a well-written manual: to display the main features
prominently and in logical order. After all, this is a package that is
-intended to help {\it write\/} such manuals so it inevitably must
-display some use cases that demonstrate a variety of typographic styles
-possible to achieve with \splint. Needless to say, {\it variety\/} and
-{\it consistency\/} seldom go hand in hand and it is consistency that
-makes for a pretty page. One of the objectives has been to demonstrate a
+intended to help {\it write\/} such manuals so it must inevitably
+present some use cases that exhibit a variety of typographic styles
+achievable with \splint. Needless to say, {\it variety\/} and
+{\it consistency\/} seldom go hand in hand and it is the consistency that
+makes for a pretty page. One of the objectives has been to reveal a
number of quite technical programming constructs so one should keep in
mind that it is assumed that the reader will want to look up the input
files to see how some (however ugly and esoteric) typographic effects
-had been achieved.
+have been achieved.
-On the other hand, to use a clich\'e, beauty is in the eyes of the
+On the other hand, to quote a clich\'e, beauty is in the eyes of the
beholder so what makes a book readable (or even beautiful) may well
-depend on the background of the reader. As an example, letterspacing
+depend on the reader's background. As an example, letterspacing
as a typographic device is almost universally reviled in Western
typography (aside from a few niche uses such as setting titles). In
Russian, however (at least until recently), letterspacing has been
routinely used for emphasis (or, as a Russian would put it,
e$\,$m$\,$p$\,$h$\,$a$\,$s$\,$i$\,$s) in lieu of, say, {\it italics}. Before
I hear any objections from typography purists, let me just say that
-this technique fits perfectly with the way emphasis works in the Russian speech: a
-speaker slowly enunciates the sounds of each word (incidentally,
-emphasizing {\it emphasis\/} is a perfect example of why this method
-would fail in most English texts). Letterspaced
-sentences are easy to find on a page and set a special reading rhythm,
-which is an added bonus in many cases, although it does violate the
-`universally gray pages are a must' dogma.
+this technique fits in perfectly with the way emphasis works in the
+Russian speech: the speaker slowly enunciates the sounds of each word
+(incidentally, emphasizing {\it emphasis\/} this way is a perfect example of the
+inevitable failure of any attempted letterspaced highlighting in most
+English texts). Letterspaced sentences are easy to find on a page, and
+they set a special reading rhythm, which is an added bonus in many
+cases, although their presense openly violates
+the `universally gray pages are a must' dogma.
@*1 Why GPL.
-The choice of license for this project goes beyond merely showing the
-source. \TeX, by its very nature is an open source language, so it
-is not a matter of hiding anything from the user or a potential
-developer. The \Cee\ code is a different matter but the source is not
+Selecting the license for this project involves more than the
+availability of the source code. \TeX, by its very nature is an
+interpreted\footnote{There are some exceptions to this, in the form of
+preloaded {\em formats}.} language, so it
+is not a matter of hiding anything from the reader or a potential
+programmer. The \Cee\ code is a different matter but the source is not
that complicated. Reducing the licensing issue to the ability of
someone else to see the source code is a great
-oversimplification. Without getting into too many details of so-called
+oversimplification. Short of getting into too many details of the so-called
`open source licenses' (other than GPL) and arguing with their advocates, let me simply
-express my lack of understanding at the arguments that purport that
+express my lack of understanding of the arguments purporting that
BSD-style licenses introduce more freedom by allowing a software
vendor to incorporate the BSD-licensed software into their
products. What benefit does one derive from such `extension' of software
@@ -69,22 +88,23 @@ misplaced such hopes are.
I am not going to argue for the benefits of free software at length, either
(such benefits seem self-evident to me, although the readers should
-feel free to disagree). Let me just point out that software companies
+feel free to disagree). Let me just point out that the software companies
enjoy quite a few freedoms that we, as software consumers elect to
afford them. Among such freedoms are the ability to renege on any
-promises made to potential users and withdraw any guarantees that such
-users might enjoy. Free software, of course, does not provide any
-guarantees, either but `you get what you paid for'. As a result of
-such `release of any responsibility', the claims of increased
+promises made to us and withdraw any guarantees that we might enjoy.
+As a result of such `release of any responsibility', the claims of increased
reliability or better support for the commercial software sound a
-bit hollow. Another well spread tactic is user brainwashing and
+bit hollow. Free software, of course, does not provide any
+guarantees, either but `you get what you paid for'.
+
+Another well spread industry tactic is user brainwashing and
changing the culture (usually for the worse) in order to promote new
`user-friendly' features of commercial software. Instead of taking
advantage of computers as cognitive machines we have come to view
them as advanced media players that we interact with through
artificial, unnatural interfaces. Meaningless terminology (`UX' for
`user experience'? What in the world is `user experience'?)
-proliferates, and programmers are happy to deceive themselves with
+proliferates, and programmers are all too happy to deceive themselves with
their newly discovered business prowess.
One would hope that the somewhat higher standards of the `real'
@@ -105,13 +125,14 @@ software is quite possible, though, just look at programmable
thermostats, simple cellphones and other `invisible' gadgets we
enjoy. The `software ideology' with its `IP' lingo is spreading like a
virus even through the world of real things. We now expect products to
-break and are too quick to forgive sloppy engineering that goes into
-everyday things. We are also getting used to the idea that it is the
-manufacturers that get to dictate the terms of use for `their' products
-and that we are merely borrowing `their' stuff.
+break and are too quick to forgive sloppy (or worse, malicious)
+engineering that goes into everyday things. We are also getting
+used to the idea that it is the manufacturers that get to dictate
+the terms of use for `their' products and that we are merely
+borrowing `their' stuff.
-The GPL was conceived as an antidote to this scourge. This document is a
-remarkable piece of `legal engineering': a self-propagating license
+The GPL was conceived as an antidote to this scourge. This license is a
+remarkable piece of `legal engineering': a self-propagating contract
with a clearly outlined set of goals. While by itself it does not
guarantee reliability or quality, it does inhibit the spread of the `IP'
(which is sometimes sarcastically, though quite perceptively,
@@ -121,29 +142,32 @@ software.
The industry has adapted, of course. So called (non GPL) `open source
licenses', that are supposed to be an improvement on GPL,
are a sort of `immune reaction' to the free software
-movement. Convince and confuse enough apathetic users and the
+movement. Describing GPL as `viral', creating dismissive acronims such as FLOSS to
+refer to the free software, and spreading outright misinformation about GPL
+are just a few of the tactics employed by the software companies.
+Convince and confuse enough apathetic users and the
protections granted by GPL are no longer visible.
-@*1 Why not \Cee$++$ or OOP in general.
+@*1 Why not C{\tt ++} or OOP in general.
The choice of the language was mainly driven by \ae sthetic motives:
-\Cee$++$ has a bloated and confusing standard, partially supported by
+\Ceepp\ has a bloated and confusing standard, partially supported by
various compilers. It seems that there is no agreement on what
-\Cee$++$ really is or how to use some of its constructs. This is all
+\Ceepp\ really is or how to use some of its constructs. This is all
in contrast to \Cee\ with its well defined and concise body of
specifications and rather well established stylistics. The existence
-of `obfuscated \Cee' is not good evidence of deficiency and \Cee$++$
+of `obfuscated \Cee' is not good evidence of deficiency and \Ceepp\
is definitely not immune to this malady.
Object oriented design has certainly taken on an aura of a religious
dictate, universally adhered to and forcefully promoted by its
followers. Unfortunately, the definition of what constitutes an
-`object-oriented' approach is rather vague. A few abstract concepts are
+`object-oriented' approach is rather vague. A few informal concepts are
commonly tossed about to give the illusion of a well developed
abstraction (such as `polymorphism', `encapsulation', and so on) but
-definitions vary in both length and contents, depending on the source.
+definitions vary in both length and content, depending on the source.
-On a syntactic level, some features of object-oriented languages are
-undoubtedly very practical (such as a |this| pointer in \Cee$++$),
+On the syntactic level, some features of object-oriented languages are
+undoubtedly very practical (such as a |this| pointer in \Ceepp),
however, many of those features can be effectively emulated with some
clever uses of an appropriate preprocessor (there are a few
exceptions, of course, |this| being one of them). The rest of the
@@ -188,17 +212,18 @@ software with inadequate tools merely `to encourage development'.
The feeling of a \.{WEB} source being {\it over-documented\/} is most
certainly subjective, and, I am sure, not shared by all `current
programmers'. The advantage of using \.{WEB}-like tools, however, is
-that it gives the programmer the ability to place the vital
+that it gives the programmer the ability to place vital
information where it does not distract the reader (`developer',
`maintainer', call it whatever you like) from the logical flow of the
code.
-Some of the complaints in \cite[Ho] are definitely justified,
+Some of the complaints in \cite[Ho] are definitely justified (see
+below for a few similar criticisms of \CWEB),
although it seems that a better approach would be to write an improved
tool similar to \.{WEB}, rather than give up all the flexibility such
a tool provides.
-@*1 Why \CWEB.
+@*1 Why \eatone{CWEB}\CWEB.
\CWEB\ is not as polished as \TeX\ but it works and has a
number of impressive features. It is, regrettably, a `niche' tool and
a few existing extensions of \CWEB\ and software based on similar ideas
@@ -207,16 +232,179 @@ largely neglected even though it seems to have a more logical
foundation than OOP. Under these circumstances, \CWEB\ seemed to be
the best available option.
-@*1 Why not GitHub, Bitbucket, etc.
-Git is an incredible tool and is used extensively in the development
+@*2 Some \eatone{CWEB}\CWEB\ idiosynchrasies.
+\CWEB\ was among the first tools for literate programming intended
+for public use\footnote{The original \WEB\ was designed to support
+DEK's \TeX\ and \MF\ projects and was inteded for \Pascal\ family
+languages.}. By almost every measure it is a very successful design:
+the program mostly does what is intended, was used in a number of
+projects, and made a significant contribution to the practice of {\it
+literate programming}@^literate programming@>. It also gave rise to a
+multitude of similar software packages (see, for example,
+\noweb@^noweb@>\ by N.~Ramsey, \cite[Ra]), which proves the vitality
+of the approach taken by the authors of \CWEB.
+
+While the value of \CWEB\ is not in dispute, it would be healthy to
+outline a few deficiencies\footnote{Quirks would be a better term.}
+that became apparent after intensive (ab)use of this software. Before
+we proceed to list our criticisms, however, the author must make a
+disclaimer that not only most of the complaints below stem from trying
+to use \CWEB\ outside of its intended field of application but such
+use has also been hampered by the author's likely lack of familiarity
+with some ot \CWEB's features.
+
+The first (non)complaint that must be mentioned here is \CWEB's narrow
+focus on \Cee-styled languages. The `grammar' used to process the
+input is hard coded in \CWEAVE, so any changes to it inevitably
+involve rewriting portions of the code and rebuilding \CWEAVE. As
+\Cee11 came to prominence, a few of its constructs have been left
+behind by \CWEAVE. Among the most obvious of these are variadic macros
+and compound literals. The former is only a problem in \CWEB's \.{@@d}
+style definitions (which are of questionable utility to begin with)
+while the lack of support for the latter may be somewhat amended by
+the use of \.{@@[}$\ldots$\.{@@]} and \.{@@;} constructs to
+manipulate \CWEAVE's perception of a given {\em chunk\/} as either an
+{\em exp\/} or a {\em stmt}. This last mechanism of syntactic markup
+is spartan but remarkably effective, although the code thus annotated
+tends to be hard to read in the editor (while resulting in just as
+beautifully typeset pages, nonetheless).
+
+Granted, \CWEB's stated goal was to bring the technique of literate
+programming to \Cee, \Ceepp, and related languages so the criticism
+above must be viewed in this context. Since \CWEAVE\ outputs \TeX, one
+avenue for customizing its use to one's needs is modifying the macros
+in \.{cwebmac.tex}. \splint\ took this route by rewriting a number of
+macros, ranging from simple operator displays (replacing, say, `$=$' with
+`|=|') to extensively customizing the indexing mechanism.
+
+Unfortunately, this strategy could only take one thus far. The \TeX\
+output produced by \CWEAVE\ does not always avail itself to this
+approach readily. To begin with, while combining its `chunks' into
+larger ones, \CWEAVE\ dives in and out of the math mode unpredictably,
+so any macros trying to read their `environment' must be ready to operate both
+inside and outside of the math mode and leave the proper mode behind when
+they are done. The situation is not helped by the fact that both the
+beginning and the end of the math mode in \TeX\ are marked by the same
+character (\.{\$}, and it costs you, indeed) so `expandable' macros
+are difficult to design.
+
+Adding to these difficulties is \CWEAVE's facility to insert raw \TeX\
+material in the middle of its input (the \.{@@t}$\ldots$\.{@@>}
+construct). While rather flexible, by default it puts all such user
+supplied \TeX\ fragments inside an \.{\\hbox} which brings with it all
+the advantages, and, unfortunately, disadvantages of grouping,
+inability to introduce line breaks within the fragment, etc. There is,
+of course, an easy fix to most of these woes, outlined in \CWEB's
+manual: one can simply type \.{@@t\}}$\,$\TeX\ stuff$\,$\.{\{@@>} which
+inserts \.{\\hbox\{\}}$\,$\TeX\ stuff$\,$\.{\{\}} into \CWEAVE's output. The
+cost of this hack (aside from looking and feeling rather ugly on the
+editor screen, not to mention disrupting the editor's brace
+accounting) is a superfluous \.{\\hbox\{\}} left behind {\em before\/}
+the `\TeX\ stuff'. The programmer's provided \TeX\ code is unable to
+remove this box (at the macro level, i.e.~in \TeX's `mouth' using
+D.~Knuth's terminology, one may still succeed with the \.{\\lastbox}
+approach unless the \.{\\hbox} was inserted in the main vertical mode)
+and it may result in an unwanted blank line, slow down
+the typesetting, etc. Most of these side-effects are easily treatable
+but it would still be nice if a true `\.{asm} style' insertion of raw
+\TeX\ were possible\footnote{It must be said that in the majority of
+cases such side-effects are indeed desirable, and save the programmer some
+typing but it seems that the \.{@@t} facility was not well thought
+out in its entirety.}.
+
+In general, the lack of structure in \CWEAVE's generated \TeX\ seems
+to hinder even seemingly legitimate uses of \.{cwebmac.tex}
+macros. Even such a natural desire as to use a different type size for
+the \Cee\ portions of the \CWEB\ input is unexpectedly tricky to
+implement. Modifying the \.{\\B} macro results in rather wasteful
+multiple reading of the tokens in the \Cee\ portion, not to mention
+the absense of any guarantee that \.{\\B} can find the end of its
+argument (the macros used by \splint\ look for the \.{\\par} inserted
+by \CWEAVE\ whenever \.{\\B} is output but an unsuspecting programmer
+may disrupt this mechanism by inserting h\.{\{}is, her\.{\}} own
+\.{\\par} using the \.{@@t} facility with the aim to put a picture in
+the middle of the code, for example.
+
+The authors of \CWEB\ understood the importance of the
+cross-referencing facilities provided by their program. There are
+several control sequences dedicated to indexing alone (which itself
+has been the subject of criticism aimed at \CWEB). The indexing
+mechanism addresses a number of important needs, although it does not
+seem to be as flexible as required in some instances. For example, most
+book indices are split into sections according to the first letter of
+the indexed word to make it easier to find the desired term in the
+index (or to establish that it is not indexed). Doing so in \CWEB\
+requires some macro acrobatics, to say the least.
+
+Also absent is a facility to explicitly inhibit the indexing of a
+specific word (in \CWEAVE's own source, the references for |pp| fill
+up several lines in the index) or limit it to definitions only (as
+\CWEAVE\ automatically does for single letter identifiers). This too, can be
+fixed by writing new indexing macros.
+
+Finally, the index is created at the point of \CWEAVE\ invocation,
+before any pagination information becomes available. It is therefore
+difficult to implement any page oriented referencing scheme. Instead,
+the index and all the other cross referencing facilities are tied to
+section numbers. In the vast majority of cases, this is a superior
+scheme: sections tend to be short and the index creation is
+fast. Sometimes, however, it is useful to provide the page information
+to the index macros. Unfortunately, after the index creation is
+completed, any connection between the words in the original document
+and those in the index is lost.
+
+The indexing macros in \splint\ that deal with \bison\ and \flex\ code
+have the advantage of being able to use the page numbers so a better
+indexing scheme is possible. The section numbering approach taken by
+\splint\ approximately follows that of \noweb: the section reference
+consists of two parts, where the first is the page number the section
+starts on, and the the second is the index of the section within the
+page. Within the page, sections are indexed by (sequences of) letters
+of the aphabet (\.a$\ldots$\.z and, in the rarest of cases,
+\.{aa}$\ldots$\.{zz} and so on). Numbering the sections themselves is
+not terribly complicated. Where it gets interesting, is during the production
+of the index entries based on this system. When the sections are short, just
+referencing the section where the term appears works well. Sometimes,
+however, a section is split between two or more pages, in which case
+the indexing macros provide a compromise: whenever the term appears on
+a page different from the one on which the corresponding section
+starts, the index entry for that term uses the page number instead of
+the section reference. The difference between the two is easy to see,
+since the page number does not have any alphabetic characters in it.
+
+This is not {\em exactly\/} how the references work in \noweb, since
+\noweb\ ignores the \TeX\ portion of the section and only references
+the code {\em chunks\/} but it is similar in spirit. Other
+conveniences, also borrowed from \noweb, are the references in the
+margins that allow the reader to jump from one chink to the next
+whenever the code chunk is composed of several sections. All of these
+changes are implemented with macros only, so, for example, the finer
+section number${}/{}$page number scheme is not available for the index
+entries produced by \CWEAVE\ itself. In the case of \CWEB\ generated
+entries only the section numbers are used (which in most cases do provide
+the correct page number as part of the reference, however).
+
+To conclude this Festivus@^Festivus@>\footnote{Yes, I am old enough to know what
+this means.} style airing of grievances, let me state once again that
+\CWEB\ is a remarkable tool, and incredibly useful as it is, although
+it does test one's ability to write sophisticated \TeX\ if subtle
+effects are desired. Finally, when all else fails, one is free to
+modify \CWEB\ itself or even write one's own literate programming tool.
+
+@*1 Why not GitHub$^{\hbox{\sevenpoint\copyright}}$, Bitbucket$^{\hbox{\sevenpoint\copyright}}$, etc.
+Git is fantastic software that is used extensively in the development
of \splint. The distribution archive is a Git repository. The use of
-centralized services such as GitHub, however, seems redundant. The
+centralized services such as GitHub$^{\hbox{\sixpoint\copyright}}$\footnote{A recent aquisition of
+GitHub$^{\hbox{\sixpoint\copyright}}$
+by a company that not so long ago used expletives to refer to the free software
+movement only strengthens my suspicions, although everyone is welcome to draw
+their own conclusions.}, however, seems redundant. The
standard cycle, `clone-modify-create pull request' works the same even
when `clone' is replaced by `download'. Thus, no functionality is
lost. This might change if the popularity of the package unexpectedly
increases.
-On the other hand, GitHub and its cousins are commercial entities,
+On the other hand, GitHub$^{\hbox{\sixpoint\copyright}}$ and its cousins are commercial entities,
whose availability in the future is not guaranteed (nothing is
certain, of course, no matter what distribution method is
chosen). Keeping \splint\ as an archive of a Git repository seems like
diff --git a/support/splint/cweb/references.w b/support/splint/cweb/references.w
index 76dc718d3b..83397c93bb 100644
--- a/support/splint/cweb/references.w
+++ b/support/splint/cweb/references.w
@@ -1,3 +1,19 @@
+@q Copyright 2012-2020, Alexander Shibakov@>
+@q This file is part of SPLinT@>
+
+@q SPLinT is free software: you can redistribute it and/or modify@>
+@q it under the terms of the GNU General Public License as published by@>
+@q the Free Software Foundation, either version 3 of the License, or@>
+@q (at your option) any later version.@>
+
+@q SPLinT is distributed in the hope that it will be useful,@>
+@q but WITHOUT ANY WARRANTY; without even the implied warranty of@>
+@q MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the@>
+@q GNU General Public License for more details.@>
+
+@q You should have received a copy of the GNU General Public License@>
+@q along with SPLinT. If not, see <http://www.gnu.org/licenses/>.@>
+
@** Bibliography. This list of references is not meant to be
exhaustive or complete. These are merely the papers and the books
mentioned in the body of the program above. Naturally, this project
@@ -6,7 +22,6 @@ to list them all due to time and (human) memory limitations.
{%
\def\BASIX{{B\kern-.7ptA\kern-.7ptS\kern-.3pt\lower1.3pt\hbox{I}\kern-.3pt X}}
-\def\MF{{\tt METAFONT}}
\def\bterm#1{\item{[#1]\namedspot{#1bibref}\quad}\ignorespaces}%
\setbox0=\hbox{[ISO/C11]\quad}
\parindent=0pt
@@ -16,6 +31,9 @@ to list them all due to time and (human) memory limitations.
\centerline{\dinkus}%
\smallskip
+\bterm{ACM}Ronald M.~Baecker, Aaron~Marcus, {\it Human Factors and Typography for More Readable Programs},
+Reading, Massachusetts: Addison-Wesley, 1990, xx+344~pp.
+
\bterm{Ah}Alfred V.~Aho et al., {\it Compilers: Principles,
Techniques, and Tools}, Pearson Education, 2006.
@@ -23,10 +41,16 @@ Techniques, and Tools}, Pearson Education, 2006.
Yacc-compatible Parser Generator}, The Free Software Foundation, 2013.
\url{http://www.gnu.org/software/bison/}
+\bterm{CWEB}Donald E. Knuth and Silvio Levy {\it The \CWEB\ System of Structured Documentation},
+Reading, Massachusetts: Addison-Wesley, 1993, iv+227~pp.
+
\bterm{DEK1}Donald E.~Knuth, {\it The \TeX book}, Addison-Wesley Reading, Massachusetts, 1984.
\bterm{DEK2}Donald E.~Knuth {\it The future of \TeX\ and \MF}, TUGboat {\bf 11} (4), p.~489, 1990.
+\bterm{DHB}R.~Kent Dybvig, Robert Hieb, and Carl Bruggeman, {\em Syntactic Abstraction in Scheme},
+Lisp Symb.\ Comput.\ 5, {\bf 4} (Dec.~1992), pp.~295--326.
+
\bterm{Do}Jean-luc Doumont, {\it Pascal pretty-printing: an example of ``preprocessing with \TeX''},
TUGboat {\bf 15} (3), 1994---Proceedings of the 1994 TUG Annual Meeting
@@ -58,11 +82,16 @@ pp.~136--140, 2009---Euro\TeX\ 2009 Proceedings.
\bterm{Jo}Derek M.~Jones, {\it The New C Standard: An Economic and
Cultural Commentary}, available at \url{http://www.knosof.co.uk/cbook/cbook.html}.
+\bterm{KR}B.~Kernighan, D.~Ritchie, {\it The \Cee\ programming language}, Englewood Cliffs, NJ: Prentice Hall, 1978.
+
\bterm{La}{\it The \.{l3regex} package: regular expressions in \TeX}, The \LaTeX3\ Project.
\bterm{Pa}Vern Paxson et al., {\it Lexical Analysis With Flex, for
Flex~2.5.37}, July~2012. \url{http://flex.sourceforge.net/manual/}.
+\bterm{Ra}Norman Ramsey, {\it Literate programming simplified}, IEEE Software, {\bf 11} (5),
+pp.~97--105, 1994.
+
\bterm{Sh}Alexander Shibakov, {\it Parsers in \TeX\ and using \CWEB\ for general pretty-printing},
TUGboat {\bf 35} (1), 2014, available as part of the documentation supplied with \splint.
diff --git a/support/splint/cweb/so.w b/support/splint/cweb/so.w
new file mode 100644
index 0000000000..d7a7696b8c
--- /dev/null
+++ b/support/splint/cweb/so.w
@@ -0,0 +1,835 @@
+@q Copyright (c) 1990 The Regents of the University of California. @>
+@q All rights reserved. @>
+
+@q This code is derived from software contributed to Berkeley by @>
+@q Vern Paxson. @>
+
+@q The United States Government has rights in this work pursuant @>
+@q to contract no. DE-AC03-76SF00098 between the United States @>
+@q Department of Energy and the University of California. @>
+
+@q This file is part of SPLinT. @>
+
+@q Redistribution and use in source and binary forms, with or without @>
+@q modification, are permitted provided that the following conditions @>
+@q are met: @>
+
+@q 1. Redistributions of source code must retain the above copyright @>
+@q notice, this list of conditions and the following disclaimer. @>
+@q 2. Redistributions in binary form must reproduce the above copyright @>
+@q notice, this list of conditions and the following disclaimer in the @>
+@q documentation and/or other materials provided with the distribution. @>
+
+@q Neither the name of the University nor the names of its contributors @>
+@q may be used to endorse or promote products derived from this software @>
+@q without specific prior written permission. @>
+
+@q THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR @>
+@q IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED @>
+@q WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR @>
+@q PURPOSE. @>
+
+@**The lexer for \ifx\flex\UNDEFINED\.{flex}\else\flex\ \fi syntax.
+\ifx\flex\UNDEFINED
+ \input limbo.sty
+ \input grabstates.sty
+ \immediate\openout\stlist=fil_states.h
+\fi
+The original lexer for \flex\ grammar relies on a few rules that use `trailing context'.
+The lexing mechanism implemented by \splint\ cannot process such rules properly in general.
+The rules used by \flex\ match fixed-length trailing context only, which makes it possible
+to replace them with ordinary patterns and use |yyless()| in the actions.
+@(fil.ll@>=
+@G
+%{@> @<Preamble for \flex\ lexer@> @=%}
+ @> @<Options for \flex\ input lexer@> @=
+ @> @<Additional options for \flex\ input lexer@> @=
+ @> @<State definitions for \flex\ input lexer@> @=
+ @> @<Definitions for \flex\ input lexer@> @=
+%%
+ @> @<Postamble for \flex\ input lexer@> @=
+ @> @<Patterns for \flex\ lexer@> @=
+%%
+ @> @<Auxilary code for \flex\ lexer@> @=
+@g
+
+@ @<Preamble for \flex\ lexer@>=
+
+@ There are a few options that are necessary to ensure that the lexer
+functions properly. Some of them (like \.{caseless}) directly
+affect the behavior of the scanner, others (e.g.~\.{noyy\_top\_state})
+prevent generation of unnecessary code.
+@<Options for \flex\ input lexer@>=
+@G
+%option caseless nodefault stack noyy_top_state
+%option nostdinit
+@g
+
+@ @<Additional options for \flex\ input lexer@>=
+@G
+%option bison-bridge
+%option noyywrap nounput noinput reentrant
+%option debug
+%option stack
+%option outfile="fil.c"
+@g
+
+@*1 Regular expression and state definitions.
+The lexer uses a large number of states to control its operation. Both
+section~1 and section~2 rules rely on the scanner being in the
+appropriate state. Otherwise (see \.{symbols.sty} example) the lexer
+may parse the same fragment in a wrong context.
+@<State definitions for \flex\ input lexer@>=
+@G
+%x SECT2 SECT2PROLOG SECT3 CODEBLOCK PICKUPDEF SC CARETISBOL NUM QUOTE
+%x FIRSTCCL CCL ACTION RECOVER COMMENT ACTION_STRING PERCENT_BRACE_ACTION
+%x OPTION LINEDIR CODEBLOCK_MATCH_BRACE
+%x GROUP_WITH_PARAMS
+%x GROUP_MINUS_PARAMS
+%x EXTENDED_COMMENT
+%x COMMENT_DISCARD
+
+@ Somewhat counterintuitively, \flex\ definitions do not {\it always\/} have to be
+fully formed regular expressions. For example, after
+$$
+\hbox to 3in{\flexrenstyle{BOGUS}\hfil\.{\^[a-}}
+$$
+one can form the following action:
+$$
+\hbox to 3in{\flexrenstyle{BOGUS}\.{t]}\hfil\.{;}}
+$$
+although without the `\.{\^}' in the definition of
+`\flexrenstyle{BOGUS}' \flex\ would have put a `\.{)}' inside the
+character class. We will assume such (rather counterproductive) tricks
+are not used. If the definition is not a well-formed regular
+expression the pretty printing will be suspended.
+@<Definitions for \flex\ input lexer@>=
+@G(fs1)
+WS [[:blank:]]+
+OPTWS [[:blank:]]*
+NOT_WS [^[:blank:]\r\n]
+
+NL \r?\n
+
+NAME ([[:alpha:]_][[:alnum:]_-]*)
+NOT_NAME [^[:alpha:]_*\n]+
+
+SCNAME {NAME}
+
+ESCSEQ (\\([^\n]|[0-7]{1,3}|x[[:xdigit:]]{1,2}))
+
+FIRST_CCL_CHAR ([^\\\n]|{ESCSEQ})
+CCL_CHAR ([^\\\n\]]|{ESCSEQ})
+CCL_EXPR ("[:"^?[[:alpha:]]+":]")
+
+LEXOPT [porkacne]
+
+M4QSTART "[["
+M4QEND "]]"
+
+@ @<Postamble for \flex\ input lexer@>=
+
+@*1 Regular expressions for \flex\ input scanner.
+The code below treats \prodstyle{\%pointer} and \prodstyle{\%array} the same way it treats
+\prodstyle{\%option} while typesetting.
+@<Patterns for \flex\ lexer@>=
+@G(fs2)
+<INITIAL>{
+ ^{WS} {@> @[TeX_( "/flindented@@codetrue/yyBEGIN{CODEBLOCK}/yylexnext" );@]@=}
+ ^"/*" {@> @[TeX_( "/yypushstate{COMMENT}/yylexnext" );@]@=}
+ ^#{OPTWS}line{WS} {@> @[TeX_( "/yypushstate{LINEDIR}/yylexnext" );@]@=}
+ ^"%s"{NAME}? {@> @[TeX_( "/yylexreturnptr{SCDECL}" );@]@=}
+ ^"%x"{NAME}? {@> @[TeX_( "/yylexreturnptr{XSCDECL}" );@]@=}
+ ^"%{".*{NL} {@> @<Start a \Cee\ code section@> @=}
+
+ ^"%top"[[:blank:]]*"{"[[:blank:]]*{NL} {@> @<Begin the \prodstyle{\%top} directive@> @=}
+ ^"%top".* {@> @[TeX_( "/yyfatal{malformed '/harmlesscomment top' directive}" );@] @=}
+
+ {WS} {@> @[;@]/* discard */ @=}
+
+ ^"%%".* {@> @<Start section 2@> @=}
+
+ ^"%pointer".*{NL} {@> @[TeX_( "/flinc@@linenum/yylexreturn{POINTER_OP}" );@]@=}
+ ^"%array".*{NL} {@> @[TeX_( "/flinc@@linenum/yylexreturn{ARRAY_OP}" );@]@=}
+
+ ^"%option" {@> @[TeX_( "/yyBEGIN{OPTION}/yylexreturn{OPTION_OP}" );@]@=}
+
+ ^"%"{LEXOPT}{OPTWS}[[:digit:]]*{OPTWS}{NL} {@> @[TeX_( "/flinc@@linenum/yyflexoptreturn{OPT_DEPRECATED}" );@]@=}
+ ^"%"{LEXOPT}{WS}.*{NL} {@> @[TeX_( "/flinc@@linenum/yyflexoptreturn{OPT_DEPRECATED}" );@]@=}
+
+ ^"%"[^porksexcan{}].* {@> @[TeX_( "/yyfatal{unrecognized '/harmlesscomment' directive: /the/yytext}" );@] @=}
+
+ ^{NAME} {@> @<Copy the name and start a definition@> @=}
+ {SCNAME} @> @[TeX_( "/RETURNNAME" );@] @=
+ ^{OPTWS}{NL} {@> @[TeX_( "/flinc@@linenum/yylexnext" );@]/* allows blank lines in section 1 */@=}
+ {OPTWS}{NL} {@> @[TeX_( "/flinc@@linenum/yylexnext" );@]/* maybe end of comment line */@=}
+}
+
+@ @<Start a \Cee\ code section@>=
+ @[TeX_( "/flinc@@linenum" );@]@;
+ @[TeX_( "/flindented@@codefalse/yyBEGIN{CODEBLOCK}" );@]@;
+ @[TeX_( "/yylexnext" );@]@;
+
+@ Ignore setting |brace_start_line| as it is only used internally to report errors.
+@<Begin the \prodstyle{\%top} directive@>=
+ @[TeX_( "/flinc@@linenum" );@]@;
+ @[TeX_( "/def/flbrace@@depth{1}" );@]@;
+ @[TeX_( "/yypushstate{CODEBLOCK_MATCH_BRACE}/yylexnext" );@]@;
+
+@ @<Start section 2@>=
+ @[TeX_( "/def/flsectnum{2}/def/flbracelevel{0}" );@]@;
+ @[TeX_( "/yyBEGIN{SECT2PROLOG}/yylexreturnptr{SECTEND}" );@]@;
+
+@ @<Copy the name and start a definition@>=
+ @[TeX_( "/fldidadeffalse/yyBEGIN{PICKUPDEF}" );@]@;
+ @[TeX_( "/yylexreturnsym{DEF_OP}" );@]@;
+
+@ @<Patterns for \flex\ lexer@>=
+@G(fs2)
+<COMMENT>{
+ "*/" {@> @[TeX_( "/yypopstate/yylexnext" );@]@=}
+ "*" {@> @[TeX_( "/yylexnext" );@]@=}
+ {M4QSTART} {@> @[TeX_( "/yylexnext" );@]@=}
+ {M4QEND} {@> @[TeX_( "/yylexnext" );@]@=}
+ [^*\n] {@> @[TeX_( "/yylexnext" );@]@=}
+ {NL} {@> @[TeX_( "/flinc@@linenum/yylexnext" );@]@=}
+}
+
+/* This is the same as \flexsnstyle{COMMENT}, but is discarded rather than output. */
+<COMMENT_DISCARD>{
+ "*/" {@> @[TeX_( "/yypopstate/yylexnext" );@]@=}
+ "*" {@> @[TeX_( "/yylexnext" );@]@=}
+ [^*\n] {@> @[TeX_( "/yylexnext" );@]@=}
+ {NL} {@> @[TeX_( "/flinc@@linenum/yylexnext" );@]@=}
+}
+
+<EXTENDED_COMMENT>{
+ ")" {@> @[TeX_( "/yypopstate/yylexnext" );@]@=}
+ [^\n\)]+ {@> @[TeX_( "/yylexnext" );@]@=}
+ {NL} {@> @[TeX_( "/flinc@@linenum/yylexnext" );@]@=}
+}
+
+<LINEDIR>{
+ \n {@> @[TeX_( "/yypopstate/yylexnext" );@]@=}
+ [[:digit:]]+ {@> @[TeX_( "/fllinenum=/number/yytext/yylexnext" );@]@=}
+ \"[^"\n]*\" {@> @[TeX_( "/yylexnext" );@] /* ignore the file name in the line directives */ @=}
+ . {@> @[TeX_( "/yylexnext" );@] /* ignore spurious characters */ @=}
+}
+
+<CODEBLOCK>{
+ ^"%}".*{NL} {@> @[TeX_( "/flinc@@linenum/yyBEGIN{INITIAL}/yylexnext" );@]@=}
+ {M4QSTART} {@> @[TeX_( "/yylexnext" );@]@=}
+ {M4QEND} {@> @[TeX_( "/yylexnext" );@]@=}
+ . {@> @[TeX_( "/yylexnext" );@]@=}
+ {NL} {@> @[TeX_( "/flinc@@linenum/ifflindented@@code/yyBEGIN{INITIAL}/fi/yylexnext" );@]@=}
+}
+
+<CODEBLOCK_MATCH_BRACE>{
+ "}" {@> @<Pop state if code braces match@> @=}
+ "{" {@> @[TeX_( "/flinc/flbrace@@depth/yylexnext" );@] @=}
+ {NL} {@> @[TeX_( "/flinc@@linenum/yylexnext" );@]@=}
+ {M4QSTART} {@> @[TeX_( "/yylexnext" );@]@=}
+ {M4QEND} {@> @[TeX_( "/yylexnext" );@]@=}
+ [^{}\r\n] {@> @[TeX_( "/yylexnext" );@]@=}
+ <<EOF>> {@> @[TeX_( "/yyfatal{Unmatched '/lbchar'}" );@] @=}
+}
+
+@ @<Pop state if code braces match@>=
+ @[TeX_( "/fldec/flbrace@@depth" );@]@;
+ @[TeX_( "/ifnum/flbrace@@depth=/z@@/relax" );@]@;
+ @[TeX_( " /yypopstate/yylexreturnxchar/n" );@]@;
+ @[TeX_( "/else" );@]@;
+ @[TeX_( " /yylexnext" );@]@;
+ @[TeX_( "/fi" );@]@;
+
+@ @<Patterns for \flex\ lexer@>=
+@G(fs2)
+<PICKUPDEF>{
+ {WS} {@> @[TeX_( "/yylexnext" );@]@=}
+ {NOT_WS}[^\r\n]* {@> @<Skip trailing whitespace, save the definition@> @=}
+ {NL} {@> @<Complain if not inside a definition, continue otherwise@> @=}
+}
+
+@ @<Skip trailing whitespace, save the definition@>=
+ @[TeX_( "/edef/flnmdef{{/the/yytext}{/the/yytextpure}{/the/yyfmark}{/the/yysmark}}" );@]@;
+ @[TeX_( "/fldidadeftrue/yylexnext" );@]@;
+
+@ @<Complain if not inside a definition, continue otherwise@>=
+ @[TeX_( "/iffldidadef" );@]@;
+ @[TeX_( " /yylval/expandafter{/flnmdef}" );@]@;
+ @[TeX_( " /def/next{/flinc@@linenum/yyBEGIN{INITIAL}/yylexreturn{RE_DEF}}" );@]@;
+ @[TeX_( "/else" );@]@;
+ @[TeX_( " /def/next{/yyfatal{incomplete name definition}}" );@]@;
+ @[TeX_( "/fi/next" );@]@;
+
+@ @<Patterns for \flex\ lexer@>=
+@G(fs2)
+<OPTION>{
+ {NL} {@> @[TeX_( "/flinc@@linenum/yyBEGIN{INITIAL}/yylexnext" );@] @=}
+ {WS} {@> @[TeX_( "/floption@@sensetrue/yylexnext" );@] @=}
+
+ "=" {@> @[TeX_( "/yylexreturnchar" );@]@=}
+
+ no {@> @<Toggle |option_sense|@> @=};
+
+ 7bit {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+ 8bit {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+
+ align {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+ always-interactive {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+
+ array {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+ ansi-definitions {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+ ansi-prototypes {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+ backup {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+ batch {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+ bison-bridge {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+ bison-locations {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+
+ "c++" {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+ caseful|case-sensitive {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+ caseless|case-insensitive {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+ debug {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+ default {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+ ecs {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+ fast {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+
+ full {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+
+ input {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+ interactive {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+ lex-compat {@> @<Set |lex_compat|@> @=}
+ posix-compat {@> @<Set |posix_compat|@> @=}
+ main {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+
+ meta-ecs {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+ never-interactive {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+
+ perf-report {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+ pointer {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+ read {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+ reentrant {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+ reject {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+ stack {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+ stdinit {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+ stdout {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+ unistd {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+ unput {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+ verbose {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+ warn {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+ yylineno {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+ yymore {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+ yywrap {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+
+ yy_push_state {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+ yy_pop_state {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+ yy_top_state {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+
+ yy_scan_buffer {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+ yy_scan_bytes {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+ yy_scan_string {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+
+ yyalloc {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+ yyrealloc {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+ yyfree {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+
+ yyget_debug {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+ yyset_debug {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+ yyget_extra {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+ yyset_extra {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+ yyget_leng {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+ yyget_text {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+ yyget_lineno {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+ yyset_lineno {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+ yyget_in {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+ yyset_in {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+ yyget_out {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+ yyset_out {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+ yyget_lval {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+ yyset_lval {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+ yyget_lloc {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+ yyset_lloc {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+
+ extra-type {@> @[TeX_( "/yylexreturn{OPT_EXTRA_TYPE}" );@]@=}
+ outfile {@> @[TeX_( "/yylexreturn{OPT_OUTFILE}" );@]@=}
+ prefix {@> @[TeX_( "/yylexreturn{OPT_PREFIX}" );@]@=}
+ yyclass {@> @[TeX_( "/yylexreturn{OPT_YYCLASS}" );@]@=}
+ header(-file)? {@> @[TeX_( "/yylexreturn{OPT_HEADER}" );@]@=}
+ tables-file {@> @[TeX_( "/yylexreturn{OPT_TABLES}" );@]@=}
+ tables-verify {@> @[TeX_( "/yyflexoptreturn{OPT_OTHER}" );@]@=}
+
+ \"[^"\n]*\" {@> @[TeX_( "/edef/flnmstr{{/the/yytext}{/the/yytextpure}}/yylexreturnsym{NAME}" );@]@=}
+
+ (([a-mo-z]|n[a-np-z])[[:alpha:]\-+]*)|. {@> @[TeX_( "/yyfatal{unrecognized /%option: /the/yytext}" );@]@=}
+}
+
+@ @<Toggle |option_sense|@>=
+ @[TeX_( "/iffloption@@sense" );@]@;
+ @[TeX_( " /floption@@sensefalse" );@]@;
+ @[TeX_( "/else" );@]@;
+ @[TeX_( " /floption@@sensetrue" );@]@;
+ @[TeX_( "/fi/yylexnext" );@]@;
+
+@ @<Set |lex_compat|@>=
+ @[TeX_( "/iffloption@@sense" );@]@;
+ @[TeX_( " /fllex@@compattrue" );@]@;
+ @[TeX_( "/else" );@]@;
+ @[TeX_( " /fllex@@compatfalse" );@]@;
+ @[TeX_( "/fi/yyflexoptreturn{OPT_OTHER}" );@]@;
+
+@ @<Set |posix_compat|@>=
+ @[TeX_( "/iffloption@@sense" );@]@;
+ @[TeX_( " /flposix@@compattrue" );@]@;
+ @[TeX_( "/else" );@]@;
+ @[TeX_( " /flposix@@compatfalse" );@]@;
+ @[TeX_( "/fi/yyflexoptreturn{OPT_OTHER}" );@]@;
+
+@ The \flexsnstyle{RECOVER} state is never used for typesetting and is only added for completeness.
+@<Patterns for \flex\ lexer@>=
+@G(fs2)
+<RECOVER>.*{NL} {@> @[TeX_( "/flinc@@linenum/yyBEGIN{INITIAL}/yylexnext" );@] @=}
+
+@ Like \bison, \flex\ allows insertion of \Cee\ code in the middle of the input file.
+\saveparseoutputtrue
+@<Patterns for \flex\ lexer@>=
+@G(fs2)
+<SECT2PROLOG>{
+ ^"%{".* {@> @<Consume the brace and increment the brace level@> @=}
+ ^"%}".* {@> @<Consume the brace and decrement the brace level@> @=}
+
+ ^{WS}.* {@> @[TeX_( "/yylexnext" );@]@=}
+
+ ^{NOT_WS}.* {@> @<Begin section 2, prepare to reread, or ignore braced code@> @=}
+ . {@> @[TeX_( "/yylexnext" );@] @=}
+ {NL} {@> @[TeX_( "/flinc@@linenum/yylexnext" );@] @=}
+
+ <<EOF>> {@> @[TeX_( "/def/flsectnum{0}/yyterminate" );@] @=}
+}
+
+@ All the code inside is ignored.
+\saveparseoutputfalse
+@<Consume the brace and increment the brace level@>=
+ @[TeX_( "/flinc/flbracelevel/yyless{2}/yylexnext" );@]@;
+
+@ @<Consume the brace and decrement the brace level@>=
+ @[TeX_( "/fldec/flbracelevel/yyless{2}/yylexnext" );@]@;
+
+@ @<Begin section 2, prepare to reread, or ignore braced code@>=
+ @[TeX_( "/ifnum/flbracelevel>/z@@" );@]@;
+ @[TeX_( " /let/next/yylexnext" );@]@;
+ @[TeX_( "/else" );@]@;
+ @[TeX_( " /def/next{/yysetbol{/@@ne}/yyBEGIN{SECT2}/yyless{0}/yylexnext}" );@]@;
+ @[TeX_( "/fi/next" );@]@;
+
+@ A pattern below (for the character class processing) had to be broken into two lines.
+A symbol ($\odot$@^$\odot$ separator, \flex@>) was inserted to indicate that a break had occured. The macros for \flex\
+typesetting use a different mechanism from that of \bison\ macros and allow typographic
+corrections to be applied to sections of the \flex\ code represented by various nonterminals.
+These corrections can also be delayed. For the details, an interested reader may consult
+\.{yyunion.sty}.
+@<Patterns for \flex\ lexer@>=
+@G(fs2)
+<SECT2>{
+ ^{OPTWS}{NL} {@> @[TeX_( "/flinc@@linenum/yylexnext" );@] /* allow blank lines in section 2 */ @=}
+ ^{OPTWS}"%{" {@> @<Start braced code in section 2@> @=}
+ ^{OPTWS}"<" {@> @[TeX_( "/ifflsf@@skip@@ws/else/yyBEGIN{SC}/fi/yylexreturnraw<" );@] @=}
+ ^{OPTWS}"^" {@> @[TeX_( "/yylexreturnraw^" );@] @=}
+ \" {@> @[TeX_( "/yyBEGIN{QUOTE}/yylexreturnxchar/flquotechar" );@] @=}
+ "{"[[:digit:]] {@> @<Process a repeat pattern@> @=}
+ "$"([[:blank:]]|{NL}) {@> @[TeX_( "/yyless{1}/yylexreturnraw/$" );@] @=}
+
+ {WS}"%{" {@> @<Process braced code in the middle of section 2@> @=}
+ {WS}"|".*{NL} {@> @<Process a deferred action@> @=}
+ ^{WS}"/*" {@> @<Process a comment inside a pattern@> @=}
+ ^{WS} @> ; /* allow indented rules */@=
+ {WS} {@> @<Decide whether to start an action or skip whitespace inside a rule@> @=}
+
+ {OPTWS}{NL} {@> @<Finish the line and/or action@> @=}
+
+ ^{OPTWS}"<<EOF>>" |
+ "<<EOF>>" {@> @[TeX_( "/yylexreturnptr{EOF_OP}" );@] @=}
+
+ ^"%%".* {@> @<Start section 3@> @=}
+@t}\vb{\insertraw{\insrulealign{\rulealigntemplate}{\cr\egroup\egroup}}}{@>
+ "["@>@t}\vb{\insertraw{\insparensalign{&}{}}}{@>@=({FIRST_CCL_CHAR}|{CCL_EXPR})@>@t}\vb{\insertraw{\insparensalign{\rlap{$\odot$}\cr&}{}}}{@>@=({CCL_CHAR}|{CCL_EXPR})* {@> @<Start processing a character class@> @=}
+
+ "{-}" {@> @[TeX_( "/yylexreturn{CCL_OP_DIFF}" );@] @=}
+ "{+}" {@> @[TeX_( "/yylexreturn{CCL_OP_UNION}" );@] @=}
+
+ "{"{NAME}"}"[[:space:]]? {@> @<Process a named expression after checking for whitespace at the end@> @=}
+
+ "/*" {@> @<Decide if this is a comment@> @=}
+ "(?#" {@> @<Determine if this is extended syntax or return a parenthesis@> @=}
+ "(?" {@> @<Determine if this is a parametric group or return a parenthesis@> @=}
+ "(" {@> @[TeX_( "/flsf@@push/yylexreturnraw/(" );@] @=}
+ ")" {@> @[TeX_( "/flsf@@pop/yylexreturnraw/)" );@] @=}
+
+ [/|*+?.(){}] {@> @[TeX_( "/yylexreturnchar" );@] @=}
+ . {@> @[TeX_( "/RETURNCHAR" );@] @=}
+}
+
+@ @<Start braced code in section 2@>=
+ @[TeX_( "/def/flbracelevel{1}" );@]@;
+ @[TeX_( "/indented@@codefalse/doing@@codeblocktrue" );@]@;
+ @[TeX_( "/yyBEGIN{PERCENT_BRACE_ACTION}" );@]@;
+ @[TeX_( "/yylexnext" );@]@;
+
+@ @<Process a repeat pattern@>=
+ @[TeX_( "/yyless{1}/yyBEGIN{NUM}" );@]@;
+ @[TeX_( "/iffllex@@compat" );@]@;
+ @[TeX_( " /def/next{/yylexreturn{BEGIN_REPEAT_POSIX}}" );@]@;
+ @[TeX_( "/else" );@]@;
+ @[TeX_( " /ifflposix@@compat" );@]@;
+ @[TeX_( " /def/next{/yylexreturn{BEGIN_REPEAT_POSIX}}" );@]@;
+ @[TeX_( " /else" );@]@;
+ @[TeX_( " /def/next{/yylexreturn{BEGIN_REPEAT_FLEX}}" );@]@;
+ @[TeX_( " /fi" );@]@;
+ @[TeX_( "/fi/next" );@]@;
+
+@ @<Process braced code in the middle of section 2@>=
+ @[TeX_( "/def/flbracelevel{1}" );@]@;
+ @[TeX_( "/yyBEGIN{PERCENT_BRACE_ACTION}" );@]@;
+ @[TeX_( "/ifflin@@rule" );@]@;
+ @[TeX_( " /fldoing@@rule@@actiontrue" );@]@;
+ @[TeX_( " /flin@@rulefalse" );@]@;
+ @[TeX_( " /def/next{/yylexreturnxchar/n}" );@]@;
+ @[TeX_( "/else" );@]@;
+ @[TeX_( " /let/next/yylexnext" );@]@;
+ @[TeX_( "/fi/next" );@]@;
+
+@ This action has been changed to accomodate the new grammar. The separator (\.{\yl})
+is treated as an ordinary (empty) action.
+@<Process a deferred action@>=
+ @[TeX_( "/ifflsf@@skip@@ws" );@]@;/* whitespace ignored, still inside a pattern */
+ @[TeX_( " /yylessafter{|}" );@]@;
+ @[TeX_( " /let/next/yylexnext" );@]@;
+ @[TeX_( "/else" );@]@;
+ @[TeX_( " /flinc@@linenum" );@]@;
+ @[TeX_( " /fldoing@@rule@@actiontrue" );@]@;
+ @[TeX_( " /flin@@rulefalse" );@]@;
+ @[TeX_( " /flcontinued@@actiontrue" );@]@;
+ @[TeX_( " /unput{/n}" );@]@;
+ @[TeX_( " /yyBEGIN{ACTION}" );@]@;
+ @[TeX_( " /edef/next{/nx/yylexreturnxchar/n}" );@]@;
+ @[TeX_( "/fi/next" );@]@;
+
+@ @<Process a comment inside a pattern@>=
+ @[TeX_( "/ifflsf@@skip@@ws" );@]@;
+ @[TeX_( " /yypushstate{COMMENT_DISCARD}" );@]@;
+ @[TeX_( "/else" );@]@;
+ @[TeX_( " /unput{//*}" );@]@;
+ @[TeX_( " /def/flbracelevel{0}" );@]@;
+ @[TeX_( " /flcontinued@@actionfalse" );@]@;
+ @[TeX_( " /yyBEGIN{ACTION}" );@]@;
+ @[TeX_( "/fi/yylexnext" );@]@;
+
+@ @<Decide whether to start an action or skip whitespace inside a rule@>=
+ @[TeX_( "/ifflsf@@skip@@ws" );@]@;
+ @[TeX_( " /let/next/yylexnext" );@]@;
+ @[TeX_( "/else" );@]@;
+ @[TeX_( " /def/flbracelevel{0}" );@]@;
+ @[TeX_( " /flcontinued@@actionfalse" );@]@;
+ @[TeX_( " /yyBEGIN{ACTION}" );@]@;
+ @[TeX_( " /ifflin@@rule" );@]@;
+ @[TeX_( " /fldoing@@rule@@actiontrue" );@]@;
+ @[TeX_( " /flin@@rulefalse" );@]@;
+ @[TeX_( " /def/next{/yylexreturnxchar/n}" );@]@;
+ @[TeX_( " /else" );@]@;
+ @[TeX_( " /let/next/yylexnext" );@]@;
+ @[TeX_( " /fi" );@]@;
+ @[TeX_( "/fi/next" );@]@;
+
+@ @<Finish the line and/or action@>=
+ @[TeX_( "/ifflsf@@skip@@ws" );@]@;
+ @[TeX_( " /flinc@@linenum" );@]@;
+ @[TeX_( " /let/next/yylexnext" );@]@;
+ @[TeX_( "/else" );@]@;
+ @[TeX_( " /def/flbracelevel{0}" );@]@;
+ @[TeX_( " /flcontinued@@actionfalse" );@]@;
+ @[TeX_( " /yyBEGIN{ACTION}" );@]@;
+ @[TeX_( " /unput{/n}" );@]@;
+ @[TeX_( " /ifflin@@rule" );@]@;
+ @[TeX_( " /fldoing@@rule@@actiontrue" );@]@;
+ @[TeX_( " /flin@@rulefalse" );@]@;
+ @[TeX_( " /def/next{/yylexreturnxchar/n}" );@]@;
+ @[TeX_( " /else" );@]@;
+ @[TeX_( " /let/next/yylexnext" );@]@;
+ @[TeX_( " /fi" );@]@;
+ @[TeX_( "/fi/next" );@]@;
+
+@ @<Start section 3@>=
+ @[TeX_( "/def/flsectnum{3}" );@]@;
+ @[TeX_( "/yyBEGIN{SECT3}" );@]@;
+ @[TeX_( "/yyterminate" );@]@;
+
+@ @<Start processing a character class@>=
+ @[TeX_( "/edef/flnmstr{/the/yytext}" );@]@;
+ @[TeX_( "/yyless{1}" );@]@;
+ @[TeX_( "/yyBEGIN{FIRSTCCL}" );@]@;
+ @[TeX_( "/yylexreturnraw[" );@]@;
+
+@ Return a special \prodstyle{CHAR} and return the whitespace back into the input.
+The braces and the possible trailing whitespace will be dealt with by the typesetting code.
+@<Process a named expression after checking for whitespace at the end@>=
+ @[TeX_( "/edef/flend@@ch{/the/yytextlastchar}" );@]@;
+ @[TeX_( "/ifnum/flend@@ch=`/}/relax" );@]@;
+ @[TeX_( " /flend@@is@@wsfalse" );@]@;
+ @[TeX_( "/else" );@]@;
+ @[TeX_( " /flend@@is@@wstrue" );@]@;
+ @[TeX_( "/fi" );@]@;
+ @[TeX_( "/toksa/expandafter{/astformat@@flnametok}" );@]@;
+ @[TeX_( "/let/astformat@@flnametok/empty" );@]@;
+ @[TeX_( "/edef/next{/yylval{{/nx/flnametok{/the/yytext}{/the/toksa}}{}{/the/yyfmark}{/the/yysmark}}}/next" );@]@;
+ @[TeX_( "/ifflend@@is@@ws" );@]@;
+ @[TeX_( " /unput{ }" );@]@;
+ @[TeX_( "/fi" );@]@;
+ @[TeX_( "/yylexreturn{CHAR}" );@]@;
+
+@ @<Decide if this is a comment@>=
+ @[TeX_( "/ifflsf@@skip@@ws" );@]@;
+ @[TeX_( " /yypushstate{COMMENT_DISCARD}" );@]@;
+ @[TeX_( " /yylexnext" );@]@;
+ @[TeX_( "/else" );@]@;
+ @[TeX_( " /yyless{1}" );@]@;
+ @[TeX_( " /yylexreturnraw//" );@]@;
+ @[TeX_( "/fi" );@]@;
+
+@ @<Determine if this is extended syntax or return a parenthesis@>=
+ @[TeX_( "/iffllex@@compat" );@]@;
+ @[TeX_( " /def/next{/yyless{1}/flsf@@push/yylexreturnraw(}" );@]@;
+ @[TeX_( "/else" );@]@;
+ @[TeX_( " /ifflposix@@compat" );@]@;
+ @[TeX_( " /def/next{/yyless{1}/flsf@@push/yylexreturnraw(}" );@]@;
+ @[TeX_( " /else" );@]@;
+ @[TeX_( " /def/next{/yypushstate{EXTENDED_COMMENT}}" );@]@;
+ @[TeX_( " /fi" );@]@;
+ @[TeX_( "/fi/next" );@]@;
+
+@ @<Determine if this is a parametric group or return a parenthesis@>=
+ @[TeX_( "/flsf@@push" );@]@;
+ @[TeX_( "/iffllex@@compat" );@]@;
+ @[TeX_( " /def/next{/yyless{1}}" );@]@;
+ @[TeX_( "/else" );@]@;
+ @[TeX_( " /ifflposix@@compat" );@]@;
+ @[TeX_( " /def/next{/yyless{1}}" );@]@;
+ @[TeX_( " /else" );@]@;
+ @[TeX_( " /def/next{/yyBEGIN{GROUP_WITH_PARAMS}}" );@]@;
+ @[TeX_( " /fi" );@]@;
+ @[TeX_( "/fi/next" );@]@;
+ @[TeX_( "/yylexreturnraw(" );@]@;
+
+@ @<Patterns for \flex\ lexer@>=
+@G(fs2)
+<SC>{
+ {OPTWS}{NL}{OPTWS} {@> @[TeX_( "/flinc@@linenum" );@] /* allow blank lines and continuations */@+@=}@>
+ [,*] {@> @[TeX_( "/yylexreturnchar" );@] @=}@>
+ ">" {@> @[TeX_( "/yyBEGIN{SECT2}/yylexreturnchar" );@] @=}
+ ">"^ {@> @[TeX_( "/yyBEGIN{CARETISBOL}/yyless{1}/yylexreturnraw>" );@] @=}
+ {SCNAME} {@> @[TeX_( "/RETURNNAME" );@] @=}
+ . {@> @[TeX_( "/yyfatal{bad <start condition>: /the/yytext}" );@] @=}
+}
+
+<CARETISBOL>"^" {@> @[TeX_( "/yyBEGIN{SECT2}/yylexreturnchar" );@] @=}
+
+<QUOTE>{
+ [^"\n] {@> @[TeX_( "/RETURNCHAR" );@] @=}
+ \" {@> @[TeX_( "/yyBEGIN{SECT2}/yylexreturnxchar/flquotechar" );@] @=}
+
+ {NL} {@> @[TeX_( "/yyfatal{missing quote}" );@] @=}
+}
+
+<GROUP_WITH_PARAMS>{
+ ":" {@> @[TeX_( "/yyBEGIN{SECT2}/yylexnext" );@] @=}
+ "-" {@> @[TeX_( "/yyBEGIN{GROUP_MINUS_PARAMS}/yylexnext" );@] @=}
+ i {@> @[TeX_( "/flsf@@case@@instrue/yylexnext" );@] @=}
+ s {@> @[TeX_( "/flsf@@dot@@alltrue/yylexnext" );@] @=}
+ x {@> @[TeX_( "/flsf@@skip@@wstrue/yylexnext" );@] @=}
+}
+
+<GROUP_MINUS_PARAMS>{
+ ":" {@> @[TeX_( "/yyBEGIN{SECT2}/yylexnext" );@] @=}
+ i {@> @[TeX_( "/flsf@@case@@insfalse/yylexnext" );@] @=}
+ s {@> @[TeX_( "/flsf@@dot@@allfalse/yylexnext" );@] @=}
+ x {@> @[TeX_( "/flsf@@skip@@wsfalse/yylexnext" );@] @=}
+}
+
+<FIRSTCCL>{
+ "^"[^-\]\n] {@> @[TeX_( "/yyBEGIN{CCL}/yyless{1}/yylexreturnraw^" );@] @=}
+ "^"("-"|"]") {@> @[TeX_( "/yyless{1}/yylexreturnraw^" );@] @=}
+ . {@> @[TeX_( "/yyBEGIN{CCL}/RETURNCHAR" );@] @=}
+}
+
+<CCL>{
+ -[^\]\n] {@> @[TeX_( "/yyless{1}/yylexreturnraw-" );@] @=}
+ [^\]\n] {@> @[TeX_( "/RETURNCHAR" );@] @=}
+ "]" {@> @[TeX_( "/yyBEGIN{SECT2}/yylexreturnchar" );@] @=}
+ .|{NL} {@> @[TeX_( "/yyfatal{bad character class}" );@] @=}
+}
+
+<FIRSTCCL,CCL>{
+ "[:alnum:]" {@> @[TeX_( "/xcclreturn{CCE_ALNUM}" );@] @=}
+ "[:alpha:]" {@> @[TeX_( "/xcclreturn{CCE_ALPHA}" );@] @=}
+ "[:blank:]" {@> @[TeX_( "/xcclreturn{CCE_BLANK}" );@] @=}
+ "[:cntrl:]" {@> @[TeX_( "/xcclreturn{CCE_CNTRL}" );@] @=}
+ "[:digit:]" {@> @[TeX_( "/xcclreturn{CCE_DIGIT}" );@] @=}
+ "[:graph:]" {@> @[TeX_( "/xcclreturn{CCE_GRAPH}" );@] @=}
+ "[:lower:]" {@> @[TeX_( "/xcclreturn{CCE_LOWER}" );@] @=}
+ "[:print:]" {@> @[TeX_( "/xcclreturn{CCE_PRINT}" );@] @=}
+ "[:punct:]" {@> @[TeX_( "/xcclreturn{CCE_PUNCT}" );@] @=}
+ "[:space:]" {@> @[TeX_( "/xcclreturn{CCE_SPACE}" );@] @=}
+ "[:upper:]" {@> @[TeX_( "/xcclreturn{CCE_UPPER}" );@] @=}
+ "[:xdigit:]" {@> @[TeX_( "/xcclreturn{CCE_XDIGIT}" );@] @=}
+
+ "[:^alnum:]" {@> @[TeX_( "/xcclreturn{CCE_NEG_ALNUM}" );@] @=}
+ "[:^alpha:]" {@> @[TeX_( "/xcclreturn{CCE_NEG_ALPHA}" );@] @=}
+ "[:^blank:]" {@> @[TeX_( "/xcclreturn{CCE_NEG_BLANK}" );@] @=}
+ "[:^cntrl:]" {@> @[TeX_( "/xcclreturn{CCE_NEG_CNTRL}" );@] @=}
+ "[:^digit:]" {@> @[TeX_( "/xcclreturn{CCE_NEG_DIGIT}" );@] @=}
+ "[:^graph:]" {@> @[TeX_( "/xcclreturn{CCE_NEG_GRAPH}" );@] @=}
+ "[:^lower:]" {@> @[TeX_( "/xcclreturn{CCE_NEG_LOWER}" );@] @=}
+ "[:^print:]" {@> @[TeX_( "/xcclreturn{CCE_NEG_PRINT}" );@] @=}
+ "[:^punct:]" {@> @[TeX_( "/xcclreturn{CCE_NEG_PUNCT}" );@] @=}
+ "[:^space:]" {@> @[TeX_( "/xcclreturn{CCE_NEG_SPACE}" );@] @=}
+ "[:^upper:]" {@> @[TeX_( "/xcclreturn{CCE_NEG_UPPER}" );@] @=}
+ "[:^xdigit:]" {@> @[TeX_( "/xcclreturn{CCE_NEG_XDIGIT}" );@] @=}
+ {CCL_EXPR} {@> @[TeX_( "/yyfatal{bad character class expression: /the/yytext}" );@] @=}
+}
+
+<NUM>{
+ [[:digit:]]+ {@> @[TeX_( "/yylexreturnval{NUMBER}" );@] @=}
+
+ "," {@> @[TeX_( "/yylexreturnchar" );@] @=}
+ "}" {@> @<Finish the repeat pattern@> @=}
+ . {@> @[TeX_( "/yyfatal{bad character inside {}'s}" );@] @=}
+
+ {NL} {@> @[TeX_( "/yyfatal{missing /nx/}}" );@] @=}
+}
+
+@ @<Finish the repeat pattern@>=
+ @[TeX_( "/yyBEGIN{SECT2}" );@]@;
+ @[TeX_( "/iffllex@@compat" );@]@;
+ @[TeX_( " /def/next{/yylexreturn{END_REPEAT_POSIX}}" );@]@;
+ @[TeX_( "/else" );@]@;
+ @[TeX_( " /ifflposix@@compat" );@]@;
+ @[TeX_( " /def/next{/yylexreturn{END_REPEAT_POSIX}}" );@]@;
+ @[TeX_( " /else" );@]@;
+ @[TeX_( " /def/next{/yylexreturn{END_REPEAT_FLEX}}" );@]@;
+ @[TeX_( " /fi" );@]@;
+ @[TeX_( "/fi/next" );@]@;
+
+
+@ @<Patterns for \flex\ lexer@>=
+@G(fs2)
+<PERCENT_BRACE_ACTION>{
+ {OPTWS}"%}".* {@> @[TeX_( "/def/flbracelevel{0}/yylexnext" );@] @=}
+
+ <ACTION>"/*" {@> @[TeX_( "/yypushstate{COMMENT}/yylexnext" );@] @=}
+
+ <CODEBLOCK,ACTION>{
+ "reject" {@> @[TeX_( "/yylexnext" );@]@=}
+ "yymore" {@> @[TeX_( "/yylexnext" );@]@=}
+ }
+
+ {M4QSTART} {@> @[TeX_( "/yylexnext" );@]@=}
+ {M4QEND} {@> @[TeX_( "/yylexnext" );@]@=}
+ . {@> @[TeX_( "/yylexnext" );@]@=}
+ {NL} {@> @<Process a newline inside a braced group@> @=}
+}
+
+@ This actions has been modified to output \prodstyle{'\\n'}.
+@<Process a newline inside a braced group@>=
+ @[TeX_( "/flinc@@linenum" );@]@;
+ @[TeX_( "/ifnum/flbracelevel=/z@@" );@]@;
+ @[TeX_( " /iffldoing@@rule@@action" );@]@;
+ @[TeX_( " /yylexreturnxchar/n" );@]@;
+ @[TeX_( " /else" );@]@;
+ @[TeX_( " /yylexnext" );@]@;
+ @[TeX_( " /fi" );@]@;
+ @[TeX_( " /fldoing@@rule@@actionfalse" );@]@;
+ @[TeX_( " /fldoing@@codeblockfalse" );@]@;
+ @[TeX_( " /yyBEGIN{SECT2}" );@]@;
+ @[TeX_( "/else" );@]@;
+ @[TeX_( " /iffldoing@@codeblock" );@]@;
+ @[TeX_( " /ifflindented@@code" );@]@;
+ @[TeX_( " /fldoing@@rule@@actionfalse" );@]@;
+ @[TeX_( " /fldoing@@codeblockfalse" );@]@;
+ @[TeX_( " /yyBEGIN{SECT2}" );@]@;
+ @[TeX_( " /fi" );@]@;
+ @[TeX_( " /fi" );@]@;
+ @[TeX_( " /yylexnext" );@]@;
+ @[TeX_( "/fi" );@]@;
+
+@ @<Patterns for \flex\ lexer@>=
+@G(fs2)
+/* |reject| and |yymore()| are checked for above, in \flexsnstyle{PERCENT\_BRACE\_ACTION} */
+<ACTION>{
+ "{" {@> @[TeX_( "/flinc/flbracelevel/yylexnext" );@] @=}
+ "}" {@> @[TeX_( "/fldec/flbracelevel/yylexnext" );@] @=}
+ {M4QSTART} {@> @[TeX_( "/yylexnext" );@]@=}
+ {M4QEND} {@> @[TeX_( "/yylexnext" );@]@=}
+ [^[:alpha:]_{}"'/\n\[\]]+ {@> @[TeX_( "/yylexnext" );@]@=}
+ [\[\]] {@> @[TeX_( "/yylexnext" );@]@=}
+ {NAME} {@> @[TeX_( "/yylexnext" );@]@=}
+ "'"([^'\\\n]|\\.)*"'" {@> @[TeX_( "/yylexnext" );@]@=}
+ \" {@> @[TeX_( "/yyBEGIN{ACTION_STRING}/yylexnext" );@]@=}
+ {NL} {@> @<Process a newline inside an action@> @=}
+ . {@> @[TeX_( "/yylexnext" );@]@=}
+}
+
+@ This actions has been modified to output \prodstyle{'\\n'}.
+@<Process a newline inside an action@>=
+ @[TeX_( "/flinc@@linenum" );@]@;
+ @[TeX_( "/ifnum/flbracelevel=/z@@" );@]@;
+ @[TeX_( " /iffldoing@@rule@@action" );@]@;
+ @[TeX_( " /yylexreturnxchar/n" );@]@;
+ @[TeX_( " /else" );@]@;
+ @[TeX_( " /yylexnext" );@]@;
+ @[TeX_( " /fi" );@]@;
+ @[TeX_( " /fldoing@@rule@@actionfalse" );@]@;
+ @[TeX_( " /yyBEGIN{SECT2}" );@]@;
+ @[TeX_( "/fi" );@]@;
+
+@ @<Patterns for \flex\ lexer@>=
+@G(fs2)
+<ACTION_STRING>{
+ [^"\\\n]+ {@> @[TeX_( "/yylexnext" );@] @=}
+ \\. {@> @[TeX_( "/yylexnext" );@] @=}
+ {NL} {@> @[TeX_( "/flinc@@linenum/yyBEGIN{ACTION}/yylexnext" );@] @=}
+ \" {@> @[TeX_( "/yyBEGIN{ACTION}/yylexnext" );@] @=}
+ . {@> @[TeX_( "/yylexnext" );@] @=}
+}
+
+<COMMENT,COMMENT_DISCARD,ACTION,ACTION_STRING><<EOF>> {@> @[TeX_( "/yyfatal{EOF encountered inside an action}" );@] @=}
+
+<EXTENDED_COMMENT,GROUP_WITH_PARAMS,GROUP_MINUS_PARAMS><<EOF>> {@> @[TeX_( "/yyfatal{EOF encountered inside pattern}" );@] @=}
+
+<SECT2,QUOTE,FIRSTCCL,CCL>{ESCSEQ} {@> @<Process an escaped sequence@> @=}
+
+@ @<Process an escaped sequence@>=
+ @[TeX_( "/ifnum/YYSTART=/number/csname flexstate/parsernamespace FIRSTCCL/endcsname/relax" );@]@;
+ @[TeX_( " /yyBEGIN{CCL}" );@]@;
+ @[TeX_( "/fi" );@]@;
+ @[TeX_( "/RETURNCHAR" );@]@;
+
+@ @<Patterns for \flex\ lexer@>=
+@G(fs2)
+<SECT3>{
+ {M4QSTART} {@> @[TeX_( "/yylexnext" );@] @=}
+ {M4QEND} {@> @[TeX_( "/yylexnext" );@] @=}
+ [^\[\]\n]*(\n?) {@> @[TeX_( "/yylexnext" );@] @=}
+ (.|\n) {@> @[TeX_( "/yylexnext" );@] @=}
+ <<EOF>> {@> @[TeX_( "/def/flsectnum{0}/yyterminate" );@] @=}
+}
+
+<*>.|\n {@> @[TeX_( "/yyfatal{bad character: /the/yytext}" );@] @=}
+
+@ @<Auxilary code for \flex\ lexer@>=
+void define_all_states( void ) {
+ @<Collect state definitions for the \flex\ lexer@>@;
+}
+
+@ @<Collect state definitions for the \flex\ lexer@>=
+#define _register_name( name ) @[Define_State( #name, name )@]
+#include "fil_states.h"
+#undef _register_name
+
diff --git a/support/splint/cweb/splint.w b/support/splint/cweb/splint.w
index 05fe5c42fc..e50e090c32 100644
--- a/support/splint/cweb/splint.w
+++ b/support/splint/cweb/splint.w
@@ -1,4 +1,4 @@
-% Copyright 2012-2015, Alexander Shibakov
+% Copyright 2012-2020, Alexander Shibakov
% This file is part of SPLinT
%
% SPLinT is free software: you can redistribute it and/or modify
@@ -13,25 +13,1806 @@
%
% You should have received a copy of the GNU General Public License
% along with SPLinT. If not, see <http://www.gnu.org/licenses/>.
-\newwrite\gindex
+%
+\newwrite\gindex % index entries generated by the various parsers, \prodstyle{...} commands, etc.
+%
+\input limbo.sty
+\input frontmatter.sty
+\def\optimization{5}
+\input yy.sty
+\modenormal
+\input noweb.sty
+ \xreflocaltrue
+ \readlxrefs % read the reference file if exists before any sections appear
+ \newwrite\xrefstream % references for noweb like style of sectioning.
+ \immediate\openout\xrefstream=\jobname.xxr
+% multi-column output
+\input dcols.sty
+\topskip=9pt % this is a purely aesthetic choice, also negating the \raggedbottom
+ % option in cwebmac.tex
+% set the typesetting of various token groups
+\let\currentparsernamespace\parsernamespace
+\let\currenttokeneq\tokeneq
+ \let\parsernamespace\mainnamespace
+ \let\hostparsernamespace\mainnamespace % for \nameproc in tokeneqpretty
+ \let\tokeneq\tokeneqpretty
+ \let\optstrextra\optstrextraesc
+ %\traceprettytokenstrue
+ \input bo.tok % re-use token equivalence table to set the typesetting of tokens
+ \input btokenset.sty % adjust the typesetting of some tokens
+ \let\parsernamespace\flexnamespace
+ \let\hostparsernamespace\flexnamespace
+ \input fo.tok
+ \input ftokenset.sty % ... for the flex input lexer
+ \let\parsernamespace\flexpseudorenamespace
+ \let\hostparsernamespace\flexpseudorenamespace
+ \input fretokenset.sty % regular expression names
+ % index entries
+ \let\parsernamespace\indexpseudonamespace
+ \input yypretty.sty
+ \prettywordpair{TOKEN}{{\tt TOKEN} {\rm(example)}}%
+ \prettywordpair{token}{{\tt "token"} {\rm(example)}}%
+\let\tokeneq\currenttokeneq
+\let\parsernamespace\currentparsernamespace
+\let\hostparsernamespace\mainnamespace % the namespace where tokens are looked up
+ % by \nameproc and friends for typesetting purposes
+%
+\immediate\openout\exampletable=\jobname.exl % file for parser output examples
+
+\def\nontitle#1{{\ttl #1}}
+\def\cite[#1]{%
+ \def\next{#1}\setbox0=\hbox{l}%
+ [\ifx\next\empty$\,$\hbox{\vrule width\wd0 height\ht0 depth\dp0}$\,$\else \locallink{#1bibref}#1\endlink\fi]%
+}
+
+\let\N\textN
+\let\N\chapterN
+\let\M\textM
+
+\defreserved{Y}{\.{Y}}
+\showlastactiontrue
+
+@**Introduction.
+\setupfootnotes
+\splint\footnote{I was tempted to call the package {\tt ParLALRgram}
+which stands for Parsing {\sc LALR} Grammars or {\tt PinT} for
+`Parsing in \TeX' but both sounded too generic.} (Simple Parsing and
+Lexing in \TeX, or, following the great GNU
+tradition of creating recursive names, \splint\ Parses Languages
+in \TeX) is a system (or
+rather a m\'elange of systems) designed to
+facilitate the development of parsing macros in \TeX\ and (to a lesser
+degree) to assist one in documenting parsers written in other languages. As
+an application, parsers for \bison\ and \flex\ input file syntax have been
+developed, along with a macro collection that makes it possible to
+design and pretty print\footnote{The term {\it pretty printing\/} is used here in
+its technical sense as one might find that there is nothing pretty about
+the output of the parsing routines presented in this document.}
+\bison\ grammars and \flex\ automata using \CWEB. The \.{examples}
+directory contains a few other parsers designed to pretty print
+various languages (among them is \ld, the language of the \GNU\
+linker).
+
+@s TeX_ TeX
+@s TeXa TeX
+@s TeXb TeX
+@s TeXf TeX
+@s TeXfo TeX
+@s TeXao TeX
+
+@*1 \eatone{CWEB}\CWEB\ and literate programming.
+Writing software in \CWEB\ involves two programs. The first of these is
+\CTANGLE\ that outputs the actual code, intended to be in
+\Cee. In reality, \CTANGLE\ cares very little about the language it
+produces. Among the exceptions are \Cee\ comments and |@[#line@]| directives that might
+confuse lesser software but \bison\ is all too happy to swallow them
+(there are also some \Cee\ specific constructs that \CTANGLE\ tries to
+recognize). \CTANGLE's main function is to rearrange the text of the
+program as written by the programmer (in a way that, hopefully,
+emphasizes the internal logic of the code) into an appropriate
+sequence (e.g.~all variable declaration must textually precede their
+use). All that is required to adopt \CTANGLE\ to produce \bison\
+output is some very rudimentary post- and pre-processing.
+
+Our main concern is thus \CWEAVE\ that not only pretty prints the
+program but also creates an index, cross-references all the
+sections, etc. Getting \CWEAVE\ to pretty print a language other than
+\Cee\ requires some additional attention. A true digital warrior would
+probably try to decipher \CWEAVE's output `in the raw' but, alas, my
+WebFu is not that strong. The loophole comes in the form of a rarely
+(for a good reason) used \CWEB\ command: the verbatim (\.{@@=...@@>})
+output. The material to be output by this construct undergoes minimal
+processing and is put inside \.{\\vb\{}$\ldots$\.{\}}. All that is
+needed now is a way to process this virtually straight text inside \TeX.
+
+This manual, as well as nearly every other document that accompanies
+\splint\ is itself a source for a computer program (or, as is the case
+with this document, several programs) that is extracted using
+\CTANGLE. We refer an interested reader to \cite[CWEB] for a detailed
+description of the syntax and use patterns of \CWEB. The following is
+merely a brief overview of the approach.
+
+Every \CWEB\ document is split into {\em sections}, each divided into
+three parts (any one of which can be empty): the \TeX\ part, the middle part, and
+the \Cee\ part (which should more appropriately be called the {\em code
+part\/}). The code part of each\footnote{With the exception of the nameless \.{@@c}
+sections.} section carries a name for cross referencing
+purposes. The sections themselves are automatically numbered by \CWEAVE\ and
+their code parts may be referenced from other sections, as well
+as included in other sections' code parts using \CWEB's cross referencing
+syntax (such as |@<A production@>|). Using the same name for the \Cee\
+portion in several sections has the effect of merging the
+corresponding code fragments. When the section with such a name is
+used (included) later, all of the concatenated fragments are included
+as well, even the ones that appear after the point in the \CWEB\
+document where such inclusion takes place.
+
+The original \CWEB\ macros (from \.{cwebmac.tex}) used the numbers
+generated by \CWEAVE\ to refer to specific sections. This was true
+for the table of contents, as well as the index entries. The macros
+used by \splint\ adopt a different convention, proposed by N.~Ramsey
+for his literate programming software, \noweb. In the new system
+(which will be referred to as the \noweb\ style of cross referencing),
+each section is labelled by the page number where it starts and an
+alphabetic character that indicates the order of appearance of the
+section on the page. Also following \noweb, the new macros display
+links beween the fragments of the same section in the margins. This
+allows for quicker navigation between sections of the code and lets
+the reader to get a quick overview of what gets `collected' in a given
+section.
+
+The top level (\.{@@**}) sections, introducing
+major portions of the code have also been given more prominent
+appearance and carry a chapter number in addition to the the \noweb\
+style section number (the latter is used for cross references, as the
+chapter number gives no indication as to where the said chapter is
+located).
+
+\CWEB\ also generates an {\em index\/} of all the identifiers (with
+some exceptions, such as single letter names) appearing in the
+\Cee\ portion of each section, {\em except\/} those that appear inside
+the {\em verbatim@^verbatim block@>\/} portions of the code
+(i.e.~between \.{@@=} and \.{@@>}). Since \splint\ uses the verbatim blocks
+extensively, additional indexing facilities have been implemented to
+provide indexing for the non-\Cee\ languages handled by various
+\splint\ parsers.
+
+@*1 Pretty (and not so pretty) printing.
+Pretty-printing can be narrowly defined as a way to organize the
+presentation of the program's text. The range of visual devices used
+for this purpose is usually limited to indentation and discrete line
+skips, to mimic the capabilities of an old computer terminal. Some
+authors (see~\cite[ACM]) have replaced the term pretty printing with
+{\em program visualization\/} to refer to a much broader range of
+graphic tools for translating the code (and its meaning) into a richer
+medium. This manual uses the terms {\em pretty printing\/} and {\em
+program visualization\/} interchangeably.
+
+Pretty printing in the broader sense above has been the subject of
+research for some time. The monograph~\cite[ACM] develops a methodical
+(if not formalized) approach to the design of visualization frameworks
+for programming languages (although the main focus is on procedural
+\Cee-like languages).
+
+A number of papers about pretty printing have appeared since,
+extending the research to new languages, and suggesting new
+visualizatin rules.
+Unfortunately, most of this research is driven by rules of
+thumb and anecdotes (the approach fully embraced by this manual),
+although there have been a few rigorous studies investigating
+isolated visualization techniques (see, for example, the discussion
+of variable declaration placement in~\cite[Jo]).
+
+Perhaps the only firm conclusion one can draw from this discussion is
+that {\em writing\/} the code and {\em reading\/} it are very different
+activities so facilitating the former may in turn make the latter more
+difficult and vice versa. Some well known languages try to arrive at a
+compromise where the syntax forces a certain style of
+presentation on the programmer. An example of a successful language in
+this group is Python with its meaningful white space. The author does
+not share the enthusiasm some programmers express for this approach.
+
+On the other hand, a language like \Cee\ does not enforce any
+presentation format\footnote{The `feature' so masterfully exploited by
+the International Obfuscated \Cee\ Code Contest ({\sc IOCCC})
+participants.}. The authors of
+\Cee\ even remarked that semicolons and braces were merely a nod to
+the compiler (or, one might add, static analysis software,
+see~\cite[KR]). It may thus seem reasonable that such redundant
+syntax elements may be replaced by different typographic devices (such
+as judicially chosen skips and indentation, or the choice of fonts)
+when (pretty) printing the code.
+
+Even the critics of pretty printing
+usually concede that well indented code is easier to read. The practice
+of using different typefaces to distinguish between various
+syntactic elements (such as reserved words and general identifiers) is
+a subject of some controversy, although not as pronounced as some of
+the more drastic approaches (such as completely replacing the brace
+pairs with indentation as practiced by \splint\ for \bison\ input or
+by the authors of~\cite[ACM] for the control statements in \Cee).
+
+The goal of \splint\ was not to force any parcticular `pretty printing
+philosophy' on the programmer (although, if one uses the macros `as
+is', some form of quiet approval is assumed $\ldots$) but rather to
+provide one with the tools necessary to implement one's own vision of
+making the code readable.
+
+One tacit assumption made by the author is that an integral part of
+any pretty printing strategy is extracting (some) meaning from the raw
+text. This is done by {\em parsing\/} the program, the subject we
+discuss next. It should be said that it is the parser design in \TeX\
+that \splint\ aims to facilitate, with pretty printing being merely an
+important application.
+
+@*1 Parsing and parsers.
+At an abstract level, a {\em parser@^parser@>\/} is just a routine
+that transforms text. Naturally, not every possible tranformation is
+beneficial, so, informally, the value of a parser lies in its ability
+to expose some {\em meaning\/} in the text. If valid texts are reduced
+to a small finite set (while each text can be arbitrarily long) one
+can concievably write a primitive string matching algorithm that
+recognizes whether any given input is an element of such set, and if
+it is, which one. Such `parsers' would be rather limited and are only
+mentioned to illustrate the point that, in general, the texts being
+parsed are not required to follow any particular specifiction.
+
+In practice, however, real world parsers rely on the
+presence of some structure in the input to do their work. The latter
+can be introduced by supplying a formal (computable) description of
+every valid input. The `ridgidity' of this specification directly
+affects the sophistication of the parsing algorithm required to
+process a valid input (or reject an invalid one).
+
+Parsing algorithms normally follow a model where the text is processed
+a few symbols at a time and the information about the symbols
+already seen is carried in some easily accessible form. `A few symbols at a time'
+often translates to `at most one symbol', while `easily accessible'
+reduces to using a stack-like data structure for bookkeeping.
+
+A popular way of specifying {\em structure\/} is by using a {\em
+formal grammar@^grammar@>}\footnote{While popular, formal grammars are
+not the only way of describing a language. For example, `powers of $2$
+presented in radix $3$' is a specification that cannot be defined by a
+context-free grammar, although it is possible to write a (very complex)
+grammar for it.} that essentially expresses how some (preferably
+meaningful) parts of the text relate to other parts. Keeping with the
+principle of making the information about the seen portions of the
+input easily accessible, practical grammars are normally required to
+express the meaning of a fragment in a manner that does not depend
+on the input that surrounds the fragment (i.e.~to be {\em
+context-free@^context-free@>}). Real-world languages rarely satisfy
+this requirement\footnote{Processing |typedef|'s in \Cee\ is a well
+known case of such a language defect.} thus presenting a challenge to
+parser generating software that assumes the language is context-free.
+
+Even the task of parsing all context-free languages is too ambitious
+in most practical scenarios, so further limitations on the grammar are
+normally imposed. One may require that the next action of the parsing
+algorithm must depend exclusively on the next symbol seen and one of
+the finitely many {\em states\/} the parser may be in. The action here
+simply refers to the choice of the next state, as well as the possible
+decision to consume more input or output a portion of the {\em
+abstract syntax tree@^abstract syntax tree@>\/} which is discussed
+below.
+
+The same language may have more than one grammar and the choice of the
+latter normally has a profound effect on the selection of
+the parsing algorithm. Without getting too deep into the parsing
+theory, consider the following simple sketch.
+\medskip
+\beginprod
+\format{\inline\flatten}
+pexp:
+ '(' pexp ')' \
+ ` astring \
+;
+%
+astring:
+ \
+ ` '*' astring \
+;
+\endprod
+\medskip
+\noindent Informally, the language consists of `strings of $n$ \.{*}'s
+nested $m$ parentheses deep'. After parsing such a string, one might
+be interested in the values of $m$ and $n$.
+
+The three
+states the parser may be in are `start', `parsing \prodstyle{pexp}' and
+`parsing \prodstyle{astring}'. A quick glance at the grammar above
+shows that switching between the states is straightforward (we omit
+the discussion of the `start' state for brevity):
+if the next symbol is \.{(}, parse the next~\prodstyle{pexp},
+otherwise, if the next symbol is \.{*}, parse~\prodstyle{astring}.
+Finally, if the next symbol is \.{)} and we
+are parsing~\prodstyle{pexp}, finish parsing it and look for the next
+input, otherwise, we are parsing~\prodstyle{astring}, finish parsing
+it, make it a~\prodstyle{pexp}, finish parsing a~\prodstyle{pexp}
+started by a parenthesis, and look for more input. This unnecessarily
+long (as well as incomplete and imprecise) description serves
+to present a simple fact that the
+parsing states are most naturally represented by individual {\em
+functions\/} resulting in what is known as a {\em recursive descent
+parser@^recursive descent parser@>\/} in which the call stack is the
+`data structure' responsible for keeping track of the parser's
+state. One disadvantage of the algorithm above is that the maximal
+depth of the call stack reaches $m+n$ which may present a problem for
+longer strings.
+
+Computing $m$ and $n$ above now reduces to incrementing an appropriate
+variable upon exiting the corresponding function. More important,
+however, is the observation that this parsing algorithm can be
+extracted from the grammar in a very straightforward fashion. To
+better illustrate the r\^ole of the grammar in the choice of the
+parsing algorithm, consider the following syntax for the same
+language:
+\medskip
+\beginprod
+\format{\inline\flatten}
+pexp:
+ '(' pexp ')' \
+ ` astring \
+;
+%
+astring:
+ \
+ ` astring '*' \
+;
+\endprod
+\medskip
+\noindent While the language is unchanged, so the algorithm
+above still works, the lookahead tokens are not {\em immediately\/}
+apparent upon looking at the productions. Some preprocessing must take
+place before one can decide on the choice of parser states and the
+appropriate lookahead tokens. Such algorithms indeed exist and result
+in what is known as an {\sc LR} parser for the fragment above
+(actually, a simpler {\sc LALR} parser may be built for this
+grammar\footnote{Both of these algorithms will use the parser stack
+more efficiently, effectively resolving the `call stack depth' issue
+mentioned earlier.}). One can see that some grammar types may make
+the selection of the parsing algorithm more involved. Since \splint\ relies
+on \bison\ for the generation of the parsing algorithm, one must
+ensure that the grammar is {\sc LALR}$(1)$\footnote{The newest versions of
+\bison\ are capable of processing a {\em much\/} wider set of
+grammars, although \splint\ can only handle the \bison\ output for
+{\sc LALR}$(1)$ parsers.}.
+
+@*1 Using the \eatone{bison}\bison\ parser.
+The process of using \splint\ for writing parsing macros in \TeX\ is
+treated in considerable detail later in this document. A shorter
+(albeit somewhat outdated but still applicable) version of this
+process is outlined in \cite[Sh], included as part of \splint's documentation.
+We begin, instead, by explaining how one such parser can be used to pretty print a
+\bison\ grammar. Following the convention mentioned above and putting
+all non-\Cee\ code inside \CWEAVE's verbatim blocks, consider the
+following (meaningless) code fragment\footnote{The software included in the package
+contains a number of preprocessing scripts that reduce the necessity of using
+the verbatim blocks for every line of the \bison\ code so the snippet above can
+instead be presented without the distraction of \.{@@=...@@>}, looking more
+like the `native' \bison\ input}. The fragment contains a mixture
+of \Cee\ and \bison\ code, the former appears outside of the verbatim blocks.
+\begindemo
+^@@= non_terminal: @@>
+^@@= term.1 term.2 {@@> a = b; @@=}@@>
+^@@= **H term.3 other_term {@@> $$ = $1; @@=}@@>
+^@@= **H still more terms {@@> f($1); @@=}@@>
+^@@= ; @@>
+\enddemo
+The fragment above will appear as (the output of \CTANGLE\ can be
+examined in \.{sill.y})
+@<A silly example@>=
+@G
+non_terminal:
+ term.1 term.2 {@> a = b; @=}
+| term.3 other_term {@> $$ = $1; @=}
+| still more terms {@> f($1); @=}
+;
+@g
+
+@ $\ldots$ if the syntax is correct.
+In case it is a bit off (note the missing colon after \.{whoops}), the parser will give up and
+you will see a different result. The code in the fragment below is easily
+recognizable, and some parts of it (all of \Cee\ code, in fact) are
+still pretty printed by \CWEAVE. Only the verbatim portion is left
+unprocessed.
+@<A silly example@>=
+@G
+whoops
+ term.1 term.2 {@>@+ a = b; @+@=}
+| term.3 other_term {@>@+ $$ = $1; @+@=}
+| still more terms {@>@+ f($1); @+@=}
+;
+@g
+
+@ The \TeX\ header that makes such output possible is quite plain. In the case
+of this document it begins as
+\begindemo
+^\input limbo.sty
+^\input frontmatter.sty
+^\def\optimization{5}
+^\input yy.sty
+\nooutput
+\enddemo
+The first two lines are presented here merely for completeness: there is
+no parsing-relevant code in them. The third line
+(\.{\\def\\optimization\{5\}}) may be ignored for now (we discuss some
+ways the parser code may be sped up
+\locallink{optimization}later\endlink. The line that
+follows loads the macros that implement the parsing and scanning
+machinery.
+
+This is enough to set up all the basic
+mechanisms used by the parsing and lexing macros. The rest of the header
+provides a few definitions to fine tune the typesetting of
+grammar productions. It starts with
+\begindemo
+^\let\currentparsernamespace\parsernamespace
+^ \let\parsernamespace\mainnamespace
+^ \let\currenttokeneq\tokeneq
+^ \def\tokeneq#1#2{\prettytoken{#1}}
+^ \input bo.tok % re-use token equivalence table to set the typesetting of tokens
+^ \let\tokeneq\currenttokeneq
+^ \input btokenset.sty
+\nooutput
+\enddemo
+We will have a chance to discuss all the \.{\\}$\ldots$\.{namespace}
+macros later, at this point it will suffice to say that the lines
+above are responsible for controlling the typesetting of term names. The
+file \.{bo.tok} consists of a number of lines like the ones below:
+\begindemo
+^\tokeneq {STRING}{{34}{115}{116}{114}{105}{110}{103}{34}}
+^\tokeneq {PERCENT_TOKEN}{{34}{37}{116}{111}{107}{101}{110}{34}}
+\nooutput
+\enddemo
+The cryptic looking sequences of integers above are strings of {\sc ASCII}
+codes of the letters that form the name that \bison\ uses when it needs to
+refer to the corresponding token (thus, the second one is
+\toksa{}\numberstochars{34}{37}{116}{111}{107}{101}{110}{34}\end
+\.{\the\toksa} which might help explain why such an indirect scheme
+has been chosen). The macro \.{\\tokeneq} is defined in
+\.{yymisc.sty}, which in turn is input by \.{yy.sty} but what about
+the token names themselves? In this case they were extracted
+automatically from the \CWEB\ source file by the
+\locallink{bootstrapping}{\em bootstrapping parser\/} \endlink during the
+\CWEAVE\ processing stage. All of these definitions can be
+overwritten to get the desired output (say, one might want to typeset
+\.{ID} in a roman font, as `identifier'; all that needs to be done to
+make this possible is a macro that says \.{\\prettywordpair\{ID\}\{\{\\rm
+identifier\}\}} in an appropriate namespace (usually
+\.{\\hostparternamespace})). The file \.{btokenset.sty} input above
+contains a number of such definitions.
+
+@ To round off this short overview, I must mention a caveat associated
+with using the macros in this collection: while one of the greatest
+advantages of using \CWEB\ is its ability to rearrange the code in a
+very flexible way, the parser will either give up or produce
+unintended output if this feature is abused while describing the
+grammar. For example, in the code below
+@<A silly example@>=
+@G
+next_term:
+ stuff @> @<Rest of line@> @={@> a = f( x ); @=}
+@g
+@<A production@>@;
+
+@ the line titled |@<A production@>| is intended to be a rule defined
+later. Notice that while it seems that the parser was able to recognize
+the first code fragment as a valid \bison\ input, it misplaced the
+|@<Rest of line@>|, having erroneously assumed it to be a part of
+the action code for this grammar (later on we will go into the details of
+why it is necessary to collect all the non-verbatim output of \CWEAVE,
+even that which contains no interesting \Cee\ code; hint: it has
+something to do with money (\.{\$}), also known as math and the way
+\CWEAVE\ processes the `gaps' between verbatim sections). The production
+line that follows did not fare as well: the parser gave up. There
+is simply no point in including such a small language fragment as a
+valid input for the grammar the parser uses to process the verbatim
+output.
+@<A production@>=
+@G
+ more stuff in this line {@> @[b = g(y);@]@=}
+@g
+
+@ Finally, if you forget that only the verbatim part of the output is
+looked at by the parser you might get something unrecognizable, such
+as
+@<Rest of line@>=
+ but not all of it
+
+@ To correct this, one can provide a more complete grammar fragment to
+allow the parser to complete its task successfully. In some cases,
+this imposes too strict a constraint on the programmer. Instead, the
+parser that pretty prints \bison\ grammars allows one to add {\it
+hidden context\/} to the code fragments above. The context is added
+inside \.{\\vb} sections using \CWEB's \.{@@t}$\ldots$\.{@@>} facility. The \CTANGLE\
+output is not affected by this while the code above can now be typeset as:
+@<A silly example@>=
+@G
+next_term:
+ stuff @> @t}\vb{\formatlocal{\let\peekstash\stashtoterm}}{@> @<Rest of line@> @t}\vb{FAKE}{@> @={@> a = f( x ); @=}
+@g
+@<A production@>@;
+
+@ $\ldots$ even a single line can now be displayed properly.
+@<A production@>=
+@G
+@t}\vb{\formatlocal{\skipheader} FAKE:}{@>
+ more stuff in this line {@> b = g( y ); @=}
+@g
+
+@ With enough hidden context, even a small rule fragment can be
+typeset as intended. The `action star' was inserted to reveal some of
+the context.
+@<Rest of line@>=
+@G
+@t}\vb{\formatlocal{\skipheader} FAKE:}{@>
+ but not all of it
+@t}\vb{\{\stashed{$\star$}\}}{@>
+@g
+@ What makes all of this even more confusing is that \CTANGLE\ will
+have no trouble outputting this as a(n almost, due to the
+intentionally bad \.{whoops} production above) valid \bison\ file
+(as can be checked by looking into \.{sill.y}). The author
+happens to think that one should not fragment the software into pieces
+that are too small: \bison\ is not \Cee\ so it makes sense to write
+\bison\ code differently. However, if the logic behind your code
+organization demands such fine fragmentation, hidden context provides
+you with a tool to show it off. A look inside the source of this
+document shows that adding hidden context can be a bit ugly so it is
+not recommended for routine use. The short example above is output in
+the file below.
+@(sill.y@>=
+ @<A silly example@>@;
+
+@*1 On debugging. This concludes a short introduction to the \bison\
+grammar pretty printing using this macro collection. It would be
+incomplete, however, without a short reference to debugging\footnote{Here
+we are talking about debugging the output produced by \CWEAVE\ when
+the included \bison\ parser is used, {\it not\/} debugging parsers
+written with the help of this software: the latter topic is covered in more
+detail later on}. There is a
+fair amount of debugging information that the macros can output,
+unfortunately, very little of it is tailored to the {\em use\/} of the
+macros in the \bison\ parser. Most of it is designed to help build a
+{\em new\/} parser. If you find that the \bison\ parser gives up too often
+or even crashes (the latter is most certainly a bug in the \splint\
+version of the \bison\ parser itself), the first approach is to make
+sure that your code {\em compiles}, i.e.\ forget about the printed
+output and try to see if the `real' \bison\ accepts the code (just the
+syntax, no need to worry about conflicts and such).
+
+If this does not shed any light on why the macros seem to fail, turn
+on the debugging output by saying \.{\\trace$\ldots$true} to activate the
+appropriate trace macros. This may produce {\it a lot\/} of output, even for
+small fragments, so turn it on for only a section at a time. If you
+need still {\it more\/} details of the inner workings of the parser
+and the lexer, various other debugging conditionals are available. For
+example, \.{\\yyflexdebugtrue} turns on the debugging output for the
+scanner. There are a number of such conditionals that are discussed in
+the commentary for the appropriate \TeX\ macros. Most of these
+conditionals are documented in \.{yydebug.sty}, which provides a
+number of handy shortcuts for a few commonly encountered
+situations, as well.
+
+Remember, what you are seeing at this point is the parsing process of
+the \bison\ input file, not the one for {\it your\/} grammar (which
+might not even be complete at this point). However, if all of the
+above fails, you are on your own: drop me a line if you figure out how
+to fix any bugs you find.
+
+@** Terminology. \namedspot{terminology}This short chapter is an informal
+listing of a few loose definitions of
+the concepts used repeatedly in this documentation. Most of this terminology is
+rather standard. Formal precision is not the goal here, instead, intuitive
+explanations are substituted whenever possible.
+{%
+\def\aterm#1{\item{\sqebullet}{\ttl #1}: \ignorespaces}%
+\setbox0=\hbox{\sqebullet\enspace}
+\parindent=0pt
+\advance\parindent by \wd0
+\smallskip
+\aterm{bison {\rm(as well as} flex{\rm)} parser{\rm(}s{\rm)}}
+while, strictly speaking, not a formally defined
+term, this combination will always stand for one of the parsers generated
+by this package designed to parse a subset of the `official' grammar for
+\bison\ or \flex\ input files. All of these parsers are described later in
+this documentation. The term {\it main parser\/} will be
+used as a substitute in example documentation for the same purpose.
+
+\aterm{driver} a generic but poorly defined concept. In this
+documentation it is used predominantly to mean both the \Cee\ code and
+the resulting executable that outputs the \TeX\ macros that contain the
+parser tables, token values, etc., for the parsers built by the user. It
+is understood that the \Cee\ code of the `driver' is unchanged and the
+information about the parser itself is obtained by {\it including\/} the \Cee\
+file produced by \bison\ in the `driver' (see the examples supplied
+with the package).
+
+\aterm{lexer} a synonym for {\it scanner}, a subroutine that performs the {\it
+lexical analysis\/} phase of the parsing process, i.e.\ groups various
+characters from the input stream into parser {\it tokens}.
+
+\aterm{namespace} this is an overused bit of terminology meaning a
+set of names grouped together according to some relatively
+well defined principle. In a language without a well developed type
+system (such as \TeX) it is usually accompanied by a specially designed
+naming scheme. {\it Parser namespaces\/} are commonly used in this
+documentation to mean a collection of all the data structures describing a
+parser and its state, including tables, stacks, etc., named by using the
+`root' name (say \.{\\yytable}) and adding the name of the parser (for
+example, \.{[main]}). To support this naming scheme, a number of
+macros work in unison to create and rename the `data macros'
+accordingly\footnote{To be precise, the {\em namespaces\/} in this
+manual, would more appropriately be referred to as {\em named
+scopes}. The {\em tag namespace\/} in \Cee\ is an example of a
+(built-in) language namespace where the {\em grammatical r\^ole\/} of the
+identifier determines its association with the appropriate set.}.
+
+\aterm{parser stack}
+a collection of parsers, usually derived from a common set of
+productions, and sharing a common lexer. As the name suggests, the
+parsers in the collection are tried in order until the input is parsed
+successfully or every parser has been tried. This terminology may be the
+source of some confusion, since each parsing algorithm used by \bison\
+maintains several stacks. We will always refer to them by naming a specific
+task the stack is used for (such as the {\em value stack\/} or the
+{\em state stack}, etc.).
+
+\aterm{pretty printing {\rm or} program visualization}
+The terms above are used interchangeably in this manual to mean
+typesetting the program code in a way that emphasizes its meaning as
+seen by the author of the program\footnote{Or the person
+typesetting the code.}. It is usually assumed that such
+meaning is extracted by the software (a specially designed {\em
+parser\/}) and translated into a suitable visual representation.
+
+\aterm{symbolic switch} a macro (or an associative array of macros)
+that let the \TeX\ parser generated by the package associate {\it
+symbolic term names\/} (called {\it named references\/} in the official
+\bison\ documentation) with the terms. Unlike the `real' parser, the
+parser created with this suite requires some extra setup as explained
+in the included examples (one can also consult the source for this
+documentation which creates but does not use a symbolic switch).
+
+\aterm{symbolic term name} (also refered to as a {\it named reference\/}
+in the \bison\ manual): a (relatively new) way to refer to stack
+values in \bison. In addition to using the `positional' names such as
+\.{\$}$n$ to refer to term values, one can utilize the new syntax:
+\.{\$}\.{[}\\{name}\.{]} (or even \.{\$}\\{name} when the \\{name}
+has a tame enough syntax). The `\\{name}' can be assigned by the
+user or can be the name of the nonterminal or token used in the
+productions.
+
+\aterm{term} in a narrow sense, an `element' of a grammar. Instead of
+a long winded definition, an example, such as \prodstyle{ID} should
+suffice. Terms are further classified into {\it terminals\/} (tokens)
+and {\it nonterminals\/} (which can be intuitively thought of as
+composite terms).
+
+\aterm{token} in short, an element of a set. Usually encoded as an
+integer by most parsers, a {\em token\/} is an indivisible {\em term\/}
+produced for the parser by the scanner. \TeX's scanner uses a more
+sophisticated token classification, for example, $($character code,
+character category$)$ pairs, etc.
+
+}
+@** Languages, scanners, parsers, and \TeX. % Or $\ldots$
+\vtop{\halign to\hsize{\kern-1.5pt\it#\hfil\tabskip0pt plus1fil\cr
+Tokens and tables keep macros in check.\cr
+Make 'em with \bison, use \.{WEAVE} as a tool.\cr
+Add \TeX\ and \CTANGLE, and \Cee\ to the pool.\cr
+Reduce 'em with actions, look forward, not back.\cr
+Macros, productions, recursion and stack!\cr
+\noalign{\vskip2pt}
+\omit\hfil\eightpoint Computer generated (most likely)\cr}}
+\bigskip
+\def\recount#1{${}^{(#1)}$}%
+\noindent In order to understand the parsing routines in this collection,
+it would help to gain some familiarity with the internals of the
+parsers produced by \bison\ for its intended target: \Cee. A person
+looking inside a parser delivered by \bison\ would
+quickly discover that the parsing procedure itself (|yyparse|)
+occupies a rather small portion of the file. If (s)he were to further
+reduce the size of the file by removing all the preprocessor
+directives intended to anticipate every conceivable combination of the
+operating system, compiler, and \Cee\ dialect, and various reporting
+and error logging functions it would become very clear that the most
+valuable product of \bison's labor is a collection of integer {\it
+tables\/} that control the actions of the parser routine. Moreover,
+the routine itself is an extremely concise and well-structured loop
+composed of |goto|'s and a number of numerical conditionals. If one
+could think of a way of accessing arrays and processing conditionals
+in the language of one's choice, once the tables produced by \bison\
+have been converted into a form suitable for the consumption by the
+appropriate language engine, the parser implementation becomes
+straightforward. Or nearly so.
+
+The {\it scanning\/} (or {\it lexing\/}) step of this process---a way
+to convert a stream of symbols into a stream of integers, deserves
+some attention, as well. There are a number of excellent programs
+written to automate this step in much the same fashion as \bison\
+automates the generation of parsers. One such tool, \flex, though
+(in the opinion of this author) slightly lacking in the simplicity and
+elegance as compared to \bison, was used to implement the lexer for
+this software suite. Lexing in \TeX\ will be discussed in considerable
+detail later in this manual.
+
+The language of interest in our case is, of course, \TeX, so our
+future discussion will revolve around the five elements mentioned
+above: \recount{1}data structures (mainly arrays and stacks),
+\recount{2}converting
+\bison's output into a form suitable for \TeX's consumption,
+\recount{3}processing raw streams of \TeX's tokens and converting them into
+streams of parser tokens, \recount{4}the implementation of \bison's
+|yyparse| in \TeX, and, finally, \recount{5}producing \TeX\ output via {\it
+syntax-directed translation} (which requires an appropriate
+abstraction to represent \bison's actions inside \TeX). We shall
+begin by discussing the parsing process itself.
+
+@*1 Arrays, stacks and the parser.
+Let us briefly examine the programming environment offered by \TeX.
+Designed for typesetting, \TeX's remarkable language
+provides a layer of macro processing atop of a set of commands that
+produce the output fulfilling its primary mission: delivering page
+layouts. In The \TeX book, the macro {\it expansion\/} is likened to
+mastication, whereas \TeX's main product, the typographic output is the
+result of its `digestion' process. Not everything that goes through
+\TeX's digestive tract ends up leaving a trace on the final page: a
+file full of \.{\\relax}'s will produce no output, even though
+\.{\\relax} is not a macro, and thus would have to be processed by
+\TeX\ at the lowest level.
+
+It is time to describe the details of defining suitable data structures
+in \TeX. At first glance, \TeX\ provides rather standard means of
+organizing and using the memory. At the core of its generic
+programming environment is an array of \.{\\count}$\,n$ {\it
+registers\/}, which may be viewed as general purpose integer variables
+that are randomly accessible by their indices. The integer arithmetic
+machinery offered by \TeX\ is spartan but is very adequate for the sort of
+operations a parser would perform: mostly additions and
+comparisons.
+
+Is the \.{\\count} array a good way to store tables in \TeX? Probably
+not. The first factor is the {\it size\/} of this array: only 256
+\.{\\count} registers exist in a standard \TeX\ (the actual number of
+such registers on a typical machine running \TeX\ is significantly
+higher but this author is a great believer in standards, and to his
+knowledge, none of the standardization efforts in the \TeX\ world has
+resulted in anything even close to the definitive masterpiece that is
+The \TeX book). The issue of size can be mitigated to some extent by
+using a number of other similar arrays used by \TeX\ (\.{\\catcode},
+\.{\\uccode}, \.{\\dimen}, \.{\\sfcode} and others can be used for
+this purpose as long as one takes care to restore the `sane' values
+before the control is handed off to \TeX's typesetting mechanisms). If a
+table has to span several such arrays, however, the complexity of
+accessing code would have to increase significantly, and the issue of
+size would still haunt the programmer.
+
+The second factor is the utilization of several registers by \TeX\ for special
+purposes (in addition, some of these registers can only store a
+limited range of values). Thus, the first 10 \.{\\count} registers are
+used by the plain \TeX\ for (well, {\it intended\/} for, anyway) the
+purposes of page accounting: their values would have to be carefully
+saved and restored before and after each parsing call,
+respectively. Other registers (\.{\\catcode} in particular) have even
+more disrupting effects on \TeX's internal mechanisms. While all of
+this can be managed (after all, using \TeX\ as an arithmetic engine
+such as a parser suspends the need for any typographic or other
+specialized functions controlled by these arrays), the added
+complexity of using several memory banks simultaneously and the speed penalty
+caused by the need to save and restore register values make this
+approach much less attractive.
+
+What other means of storing arrays are provided by \TeX? Essentially,
+only three options remain: \.{\\token} registers, macros holding whole
+arrays, and associative arrays accessed through
+\.{\\csname}$\,\ldots\,$\.{\\endcsname}. In the first two cases if care
+is taken to store such arrays in an
+appropriate form one can use \TeX's \.{\\ifcase} primitive to access
+individual elements. The trade-off is the speed of such
+access: it is {\it linear\/} in the size of the array for most
+operations, and worse than that for others, such as removing the last
+item of an array. Using clever ways
+of organizing such arrays, one can improve the linear access time to
+$O(\log n)$ by simply modifying the access macros but at the moment, a
+straightforward \.{\\ifcase} is used after expanding a list macro or
+the contents of a \.{\\token}$\,n$ register in an {\it un\/}optimized
+parser. An {\it optimized\/} parser uses associative arrays.
+
+The array discussion above is just as applicable to {\it stacks\/}
+(indeed, an array is the most common form of stack
+implementation). Since stacks pop up and disappear frequently (what
+else are stacks to do?), list macros are usually used to store
+them. The optimized parser uses a separate \.{\\count} register to
+keep track of the top of the stack in the corresponding associative
+array.
+
+Let us now switch our attention
+to the code that implements the parser and scanner {\it functions\/}.
+If one has spent some time writing \TeX\ macros of any sophistication
+(or any macros, for that matter) (s)he must be familiar with the general
+feeling of frustration and the desire to `just call a function here and move
+on'. Macros\footnote{Formally defined as `$\ldots$ special
+compile-time functions that consume and produce {\em syntax objects}'
+in~\cite[DHB].} produce {\it tokens\/}, however, and tokens must either
+expand to nothing or stay and be contributed to your input, or worse,
+be out of place and produce an error. One way to sustain a stream
+of execution with macros is {\it tail recursion\/} (i.e.~always expanding the
+{\it last token left standing}).
+
+As we have already discussed, \bison's
+|yyparse()| is a well laid out loop organized as a sequence of
+|goto|'s (no reason to become religious about structured programming
+here). This fact, and the following well known trick, make \Cee\ to \TeX\
+translation nearly straightforward. The macro \TeX niques employed by the
+sample code below are further discussed elsewhere in this manual.
+
+% The macro mess below looks painful but this is the only place such layout is used
+% The approach can be easily generalized and put in limbo.sty but it seems
+% a bit redundant at this point.
+
+\newcount\piccount
+\newdimen\lasthsize
+\setbox5=\vtop{
+\demomargin=0pt
+\let\demoastyle\empty
+\begindemo
+^label A: ...
+\nooutput
+^ if**L**Krm(condition)**N
+^ goto C;
+\nooutput
+^label B: ...
+\nooutput
+^ goto A;
+\nooutput
+^label C: ...
+\nooutput
+\enddemo
+}
+\dp5=\z@@
+
+\setbox3=\vtop{
+\demomargin=0pt
+\let\demoastyle\empty
+\begindemo
+^\if**L**Krm(condition)**N
+^ \let\next=\labelC
+^\else
+^ \let\next=\labelAtail
+\enddemo
+}
+\dp3=\z@@
+
+\newdimen\lastdepth
+
+\def\startfitpar{%
+ \bgroup
+ \lasthsize=\hsize
+ \advance\lasthsize-1.5in
+ \vsize=\baselineskip
+ \topskip=\z@@
+ \setbox0\box2 % empty it
+ % this sounds good at first but there is no good way to pull the insertions out after the
+ % box manipulations that follow;
+ % insertions will thus be contributed to whatever page was being worked on when the
+ % picture insertions {\it started}; hence, if these happen to start at the very top of the page,
+ % any insertion that follows will be contributed to the previous page; we correct this for footnotes
+ % below
+ % \holdinginserts=1
+ \output{%
+ \global\setbox2=\vbox{
+ \ifvoid2
+ \else
+ \prevdepth=\dp2
+ \unvbox2
+ \fi
+ \lastdepth=\dp255
+ \unvbox255
+ % this would be tempting, however, the \eject that follows should disappear
+ % in addition, one really should not be playing with page breaking in the middle of
+ % such tricky insertions
+ % \penalty\outputpenalty
+ % \kern-\lastdepth % to make sure \baselineskip is accounted for
+ }%
+ }\eject
+ \output{%
+ \setbox0=\vbox{%
+ \unvbox255%
+ }% \lastbox would almost work ... if not for insertions
+ \global\advance\piccount1
+ \global\setbox2=\vbox{%
+ \prevdepth=\dp2 \unvbox2
+ \hbox to\hsize{%
+ \ifnum\piccount<15
+ \hbox to1.5in{%
+ \ifnum\piccount=1
+ \ \box5
+ \fi
+ \hfill}%
+ \fi
+ \box0 \hfill
+ \ifnum\piccount=1
+ \box3 \ %
+ \fi
+ \ifvoid\footins % reinsert footnotes
+ \else
+ \insert\footins{\unvbox\footins}%
+ \fi
+ }%
+ }%
+ }%
+ \parshape=15
+ 0pt 2.7in
+ 0pt 2.7in
+ 0pt 2.7in
+ 0pt 2.7in
+ 0pt 2.7in
+ 0pt 2.7in
+ 0pt 2.7in
+ 0pt \lasthsize
+ 0pt \lasthsize
+ 0pt \lasthsize
+ 0pt \lasthsize
+ 0pt \lasthsize
+ 0pt \lasthsize
+ 0pt \lasthsize
+ 0pt \hsize
+}
+
+\def\endfitpar{%
+ \par
+ \eject
+ \egroup
+ % see the comment above
+ % \holdinginserts=0
+ \prevdepth=\dp2
+ \unvbox2
+}
+
+\startfitpar
+\noindent Given the code on the left (where |goto|'s
+are the only means of branching but can appear inside conditionals),
+one way to translate it into \TeX\ is to define a set of macros (call
+them \.{\\labelA}, \.{\\labelAtail} and so forth for clarity) that end in
+\.{\\next} (a common name for this purpose). Now, \.{\\labelA} will
+implement the code that comes between \.{label A:} and \.{goto C;},
+whereas \.{\\labelAtail} is responsible for the code after \.{goto C;}
+and before \.{label B:}
+(provided no other |goto|'s intervene which can always be
+arranged). The conditional which precedes \.{goto C;} can now be written in
+\TeX\ as presented on the right, where (condition) is an appropriate
+translation of the corresponding condition
+in the code being translated (usually, one of `$=$' or `$\not=$'). Further
+details can be extracted from the \TeX\ code that implements these
+functions where the corresponding \Cee\ code is presented alongside
+the macros that mimic its functionality%
+\footnote{Running the risk of overloading the reader with details, the author
+would like to note that the actual implementation follows a {\it slightly\/} different
+route in order to avoid any \.{\\let} assignments or changing the
+meaning of \.{\\next}}.
+This concludes the overview of the general approach,
+It is time to consider the way characters get consumed
+on the lower levels of the macro hierarchy and the interaction between the different
+layers of the package.
+\endfitpar
+
+@*1 \TeX\ into tokens.
+Thus far we have covered the ideas
+behind items \recount{1} and \recount{4} on our list. It is time to
+discuss the lowest level of processing performed by these macros:
+converting \TeX's tokens into the tokens consumed by the parser,
+i.e.\ part \recount{3} of the plan. Perhaps, it would be most appropriate
+to begin by reviewing the concept of a {\it token}.
+
+As commonly defined, a token is simply an element of a set (see the section
+on \locallink{terminology}terminology\endlink\ earlier in this manual).
+Depending on
+how much structure the said set possesses, a token can be represented by
+an integer or a more complicated data structure. In the discussion
+below, we will be dealing with two kinds of tokens: the tokens
+consumed by the parsers and the \TeX\ tokens seen by the input
+routines. The latter play the r\^ole of {\it characters\/} that combine
+to become the former. Since \bison's internal representation for its tokens
+is non-negative integers, this is what the scanner must produce.
+
+\TeX's tokens are a good deal more sophisticated: they can be
+either pairs $(c_{\rm ch}, c_{\rm cat})$, where $c_{\rm ch}$ is the
+character code and $c_{\rm cat}$ is \TeX's category code ($1$ and $2$ for
+group characters, $5$ for end of line, etc.), or {\it control
+sequences\/}, such as \.{\\relax}. Some of these tokens (control
+sequences and {\it active}, i.e.~category~13 characters) can have
+complicated internal structure (expansion). The situation is further
+complicated by \TeX's \.{\\let} facility, which can create
+`character-like' control sequences, and the lack of conditionals
+to distinguish them from the `real' characters. Finally, not all pairs
+can appear as part of the input (say, there is no $(n, 0)$ token for
+any $n$, in the terminology above).
+
+The scanner expects to see {\it characters} in its input, which are
+represented by their {\sc ASCII} codes, i.e.~integers between $0$ and
+$255$ (actually, a more general notion of the Unicode character is
+supported but we will not discuss it further). Before character codes
+appear as the input to the scanner, however, and make its integer
+table-driven mechanism `tick', a lot of work must be done to collect
+and process the stream of \TeX\ tokens produced after \CWEAVE\ is done
+with your input. This work becomes even more complicated when the
+typesetting routines that interpret the parser's output must sneak
+outside of the parsed stream of text (which is structured by the
+parser) and insert the original \TeX\ code produced by \CWEAVE\ into
+the page.
+
+\splint\ comes with a customizeable input routine of
+moderate complexity (\.{\\yyinput}) that classifies all \TeX\ tokens
+into seven categories: `normal' spaces (i.e.~category~10 tokens,
+skipped by \TeX's parameter scanning mechanism),
+`explicit' spaces (includes the control sequences \.{\\let} to \.{\ },
+as well as \.{\\\ }), groups ({\it avoid} using \.{\\bgroup} and \.{\\egroup} in
+your input but `real', \.{\{}$\ldots$\.{\}} groups are fine), active
+characters, normal characters (of all character categories that can
+appear in \TeX\ input, including \.{\$}, \.{\^}, \.{\#}, \.{a}--\.{Z},
+etc.), single letter control sequences, and multi-letter control
+sequences. Each of these categories can be processed separately to
+`fine-tune' the input routine to the problem at hand. The input
+routine is not very fast, instead, flexibility was the main
+goal. Therefore, if speed is desirable, a customized input routine
+is a great place to start. As an example, a minimalistic
+\.{\\yyinputtrivial} macro is included.
+
+When \.{\\yyinput} `returns' by calling \.{\\yyreturn} (which is a
+macro you design), your lexing routines have access to three
+registers: \.{\\yycp@@}, that holds the character value of the
+character just consumed by \.{\\yyinput}, \.{\\yybyte}, that most of
+the time holds the token just removed from the input,
+and \namedspot{yybytepure-discussion}\.{\\yybytepure}, that (again, with very few
+exceptions) holds a `normalized' version of the read character (i.e.~a
+character of the same character code as \.{\\yycp@@}, and category~12
+(to be even more precise (and to use nested parentheses), `normalized'
+characters have the same category code as that of `\.{.}' at the point
+where \.{yyinput.sty} is read)).
+
+Most of the time it is the character code one needs (say, in the case
+of \.{\\\{}, \.{\\\}}, \.{\\\&} and so on) but under some circumstances the
+distinction is important (outside of \.{\\vb\{}$\ldots$\.{\}}, the sequence
+\.{\\1} has nothing to do with the digit `\.{1}'). This mechanism
+makes it easy to examine the consumed token. It also forms
+the foundation of the `hidden context' passing mechanism described later.
+
+The remainder of this section discusses the internals of \.{\\yyinput}
+and some of the design trade-offs one has to make while working on
+processing general \TeX\ token streams. It is typeset in `small print'
+and can be skipped if desired.
+\smallskip
+\begingroup
+\abovedisplayskip=5pt%
+\abovedisplayshortskip=2pt%
+\belowdisplayskip=5pt%
+\belowdisplayshortskip=2pt%
+\fnotesstart=1
+\fnotesspan=2
+\noofcolumns=2
+\icgap=1em%
+\eightpoint
+\linecount=73
+\setmcparams
+\def\.#1{{\chardef\\=`\\\chardef\&=`\&\tt #1}}%
+\dsskip=0pt%
+\begindoublecols
+To examine every token in its path (including spaces that are easy to
+skip), the input routine uses one of the two well-known {\sc \TeX}nologies:
+\.{\\futurelet\\next\\examinenext} or its equivalent
+\.{\\afterassignment\\examinenext\\let\\next=}\hbox{\tt\char"20}.
+Recursively inserting one of these sequences, \.{\\yyinput} can go
+through any list of tokens, as long as it knows where to stop
+(i.e.~return an end of file character). The
+signal to stop is provided by the \.{\\yyeof}
+sequence, which should not appear in any `ordinary' text
+presented for parsing, other than for the purpose of providing such a
+stop signal. Even the dependence on \.{\\yyeof} can be eliminated if
+one is willing to invest the time in writing macros that juggle \TeX's
+\.{\\token} registers and only limit oneself to input from such
+registers (which is, aside from an obvious efficiency hit, a strain on
+\TeX's memory, as you have to store multiple (3 in the general case)
+copies of your input to be able to back up when the lexer makes a
+wrong choice). Another approach to avoid the use of stop tokens is
+to store the whole input as a {\it parameter\/} for the appropriate macro.
+This scheme is remarkably powerful and can produce {\it expandable\/} versions
+of very complicated routines, although the amount of effort required to
+write such macros grows at a frightening rate. As the text inside
+\.{\\vb\{}$\ldots$\.{\}} is nearly always well structured, the care that
+\.{\\yyinput} takes in processing such character lists is an
+overkill. In a more `hostile' environment (such as the one encountered
+by the now obsolete \.{\\Tex} macros), however, this extra attention to detail pays
+off in the form of a more robust input mechanism.
+
+One subtlety deserves a special mention here, as it can be important
+to the designer of `higher-level' scanning macros. Two types of tokens
+are extremely difficult to deal with whenever \TeX's own lexing
+mechanisms are used: (implicit) spaces and even more so, braces. We
+will only discuss braces here, however, almost everything that follows
+applies equally well to spaces (category 10 tokens to be precise), with
+a few simplifications (or complications, in a couple of places). To
+understand the difficulty, let's consider one of the approaches above:
+$$
+\.{\\futurelet\\next\\examinenext}.
+$$
+The macro \.{\\examinenext}
+usually looks at \.{\\next} and inserts another macro (usually also called
+\.{\\next}) at the very end of its expansion list. This macro usually
+takes one parameter, to consume the next token. This mechanism works
+flawlessly, until the lexer encounters a \.{\{}br\.{,}sp\.{\}}ace. The \.{\\next}
+sequence, seen by \.{\\examinenext} contains a lot of information
+about the brace ahead: it knows its category code (left brace, so $1$), its
+character code (in case there was, say a \.{\\catcode`\\[=1{\tt\char`\ }}
+earlier) but not whether it is a `real' brace (i.e.\ a character
+\.{\{}$_1$) or an implicit one (a \.{\\bgroup}). There is no way to find
+that out until the control sequence `launched' by \.{\\examinenext}
+sees the token as a parameter.
+
+If the next token is a `real' brace, however,
+\.{\\examinenext}'s successor will never see the token itself: the
+braces are stripped by \TeX's scanning mechanism. Even if it finds a
+\.{\\bgroup} as the parameter, there is no guarantee that the actual
+input was not \.{\{\\bgroup\}}. One way to handle this is by applying
+\.{\\string} before consuming the next token. If prior to
+expanding \.{\\string} care has been taken to set the \.{\\escapechar}
+appropriately (remember, we know the character code of the next
+token in advance), as soon as one sees a character with
+\.{\\escapechar}'s character code,
+(s)he knows that an implicit brace has just been seen. One added
+complication to all this is that a very determined programmer can
+insert an {\it active\/} character (using, say, the \.{\\uccode}
+mechanism) that has the {\it same\/} character code as the {\it
+brace\/} token that it has been \.{\\let} to! Even setting this
+disturbing possibility aside, the \.{\\string} mechanism
+(or, its cousin, \.{\\meaning}) is
+far from perfect: both produce a sequence of category 12 and 10 tokens
+that are mixed into the original input. If
+it is indeed a brace character that we just saw, we can consume the next
+token and move on but what if this was a control sequence? After all,
+just as easily as \.{\\string} makes a sequence into characters,
+\.{\\csname}$\,\ldots\,$\.{\\endcsname} pair will make any sequence of
+characters into a control sequence so determining the end the character
+sequence produced by \.{\\string} may prove impossible. Huh~$\ldots$
+
+What we need is a backup mechanism: keeping a copy of the
+token sequence ahead, one can use \.{\\string} to see whether
+the next token is a real
+brace first, and if it is, consume it and move on (the active character
+case can be handled as the implicit case below, with one extra backup
+to count how many tokens have been consumed). At this point the brace has to be {\it
+reinserted\/} in case, at some point, a future `back up'
+requires that the rest of the tokens are removed from the output (to
+avoid `\.{Too many \}'s}' complaints from \TeX). This can be done by using
+the \.{\\iftrue\{\\else\}\\fi} trick (and a generous sprinkling of \.{\\expandafter}s).
+Of course, some bookkeeping is needed to keep track of how deep inside the
+braced groups we are.
+For an implicit brace, more work is needed: read all the
+characters that \.{\\string} produced (and maybe more), then remember
+the number of characters consumed. Remove the rest of the input using
+the method described above and restart the scanning from the same point
+knowing that the next token can be scanned as a parameter.
+
+Another strategy is to design a general enough macro that counts
+tokens in a token register and simply recount the tokens after every
+brace was consumed.
+
+Either way, it takes a lot of work. If anyone would
+like to pursue the counting strategy, simple counting macros
+are provided in \.{/examples/count/count.sty}.
+The macros in this example
+supply a very general counting mechanism that does not depend on
+\.{\\yyeof} (or {\it any\/} other token) being `special' and can count the
+tokens in any token register, as long as none of those tokens is an
+\.{\\outer} control sequence. In other words, if the macro is used
+immediately after the assignment to the token register, it should
+always produce a correct count.
+
+Needless to say, if such a general mechanism is desired, one has to
+look elsewhere. The added complications of treating spaces (\TeX\
+tends to ignore them most of the time) make this a torturous exercise
+in \TeX's macro wizardry.
+
+The included \.{\\yyinput} has two ways of
+dealing with braces: strip them or view the whole group as a
+token. Pick one or write a different \.{\\yyinput}. Spaces, implicit
+or explicit, are reported as a specially selected character code and
+consumed with a likeness of
+\.{\\afterassignment\\moveon\\let\\next={\tt\char`\ }}. This behavior
+can be adjusted if needed.
+
+Now that a steady stream of character codes is arriving at \.{\\yylex}
+after \.{\\yyreturn} the job of converting it into numerical tokens
+is performed by the {\it scanner} (or {\it lexer\/}, or {\it tokenizer\/},
+or even {\it tokener}), discussed in the next section.
+\enddoublecols
+\endgroup
+
+@*1 Lexing in \TeX. In a typical system that uses a parser to process
+text, the parsing pass is usually split into several stages: the raw
+input, the lexical analysis (or simply {\it lexing}), and the parsing
+proper. The {\it lexing\/} (also called {\it scanning}, we use these
+terms interchangeably) clumps various sequences of characters into
+{\it tokens\/} to facilitate the parsing stage. The reasons for this
+particular hierarchy are largely pragmatic and are partially historic
+(there is no reason that {\it parsing\/} cannot be done in multiple
+phases, as well, although it usually isn't).
+
+If one recalls a few basic facts from the formal language theory, it
+becomes obvious that a lexer, that parses {\it regular\/} languages,
+can (theoretically) be replaced by an {\sc LALR} parser, that parses {\it
+context-free\/} ones (or some subset thereof, which is
+still a super set of all regular languages). A common justification given for
+creating specialized lexers is efficiency and speed. The
+reality is somewhat more subtle. While we do care about the efficiency of
+parsing in \TeX, having a specialized scanner is important for
+a number of different reasons.
+
+The real advantage of having a dedicated scanner is the ease with which it
+can match incomplete inputs and back up. A parser can, of course,
+{\it recognize\/} any valid input that is also acceptable to a lexer, as well
+as {\it reject\/} any input that does not form a valid token. Between
+those two extremes, however, lies a whole realm of options that a
+traditional parser will have great difficulty exploring. Thus, to
+mention just one example, it
+is relatively easy to set up a {\sc DFA}\footnote{Which stands for
+Deterministic Finite Automaton, a common (and mathematically unique)
+way of implementing a scanner for regular languages. Incidentally {\sc
+LALR} mentioned above is short for Look Ahead Left to Right.}
+so that the {\it longest\/}
+matching input is accepted. The only straightforward way to do this
+with a traditional parser is to parse longer and longer inputs again
+and again. While this process can be optimized to a certain degree,
+the fact that a parser has a {\it stack\/} to maintain limits its
+ability to back up.
+
+As an aside, the mechanism by which \CWEB\ assembles its `scraps'
+into chunks of recognized code is essentially iterative lexing,
+very similar to what a human does to make sense of complicated
+texts. Instead of trying to match the longest running piece of text,
+\CWEB\ simply looks for patterns to combine inputs into larger
+chunks, which can later be further combined. Note that this is not
+quite the same as the approach taken by, say {\sc GLR} parsers, where
+the parser must match the {\it whole\/} input or declare a
+failure. Where a \CWEB-type parser may settle for the first available
+match (or the longest available) a {\sc GLR} parser must try {\it
+all\/} possible matches or use an algorithm to reject the majority of
+the ones that are bound to fail in the end.
+
+This `\CWEB\ way' is also different from a traditional `strict' {\sc
+LR} parser/scanner approach and certainly deserves serious
+consideration when the text to be parsed possesses some rigid
+structure but the parser is only allowed to process it one small
+fragment at a time.
+
+Returning to the present macro suite, the lexer produced by \flex\
+uses integer tables similar to those employed by \bison\ so the
+usual {\sc\TeX}niques used in implementing \.{\\yyparse} are fully
+applicable to \.{\\yylex}.
+
+An additional advantage provided by having a \flex\ scanner implemented
+as part of the suite is the availability of the original \bison\ scanner written
+in \Cee\ for the use by the macro package.
+
+This said, the code generated by \flex\ contains a few idiosyncrasies
+not present in the \bison\ output. These `quirks' mostly involve
+handling of end of input and error conditions. A quick glance at the
+\.{\\yylex} implementation will reveal a rather extensive collection of
+macros designed to deal with end of input actions.
+
+Another difficulty one has to face in translating \flex\ output into
+\TeX\ is a somewhat unstructured namespace delivered in the final
+output (this is partially due to the \POSIX\ standard that \flex\
+strives to follow). One consequence of this `messy' approach is that the
+writer of a \flex\ scanner targeted to \TeX\ has to declare \flex\
+`states' (more properly called {\it subautomata}) twice: first for the
+benefit of \flex\ itself, and then again, in the {\it \Cee\ preamble\/}
+portion of the code to output the states to be used by the action code
+in the lexer. \.{Define\_State($\ldots$)} macro is provided for this
+purpose. This macro can be used explicitly by the programmer or be
+inserted by a specially designed parser.
+Using \CWEB\ helps to keep these declarations together.
+
+The `hand-off' from the scanner to the parser is implemented
+through a pair of registers: \.{\\yylval}, a token register
+containing the value of the returned token and \.{\\yychar}, a
+\.{\\count} register that contains the numerical value of the
+token to be returned.
+
+Upon matching a token, the scanner passes one crucial piece of
+information to the programmer: the character sequence representing the token
+just matched (\.{\\yytext}). This is not the whole story
+though as there are three more token sequences that are made available
+to the parser writer whenever a token is matched.
+
+The first of these is simply a `normalized' version of
+\.{\\yytext} (called \.{\\yytextpure}). In most cases it
+is a sequence of \TeX\ tokens with the same character codes as the one
+in \.{\\yytext} but with their category codes set to 12
+(see the discussion of \.{\\yybytepure}
+\locallink{yybytepure-discussion}above\endlink). In
+cases when the tokens in \.{\\yytext} are {\it not}
+$(c_{\rm ch}, c_{\rm cat})$ pairs, a few simple
+conventions are followed, some of which will be explained below. This
+sequence is provided merely for convenience and its typical use is to
+generate a key for an associative array.
+
+The other two sequences are special `stream pointers' that provide
+access to the extended scanner mechanism in order to implement the passing
+of the `formatting hints' to the parser, as well as incorporate
+\CWEAVE\ formatted code into the input, without introducing any changes to
+the original grammar. As the mechanism itself and the motivation
+behind it are somewhat subtle, let us spend a few moments discussing
+the range of formatting options desirable in a generic pretty-printer.
+
+Unlike strict parsers employed by most compilers, a parser designed
+for pretty printing cannot afford being too picky about the structure
+of its input (\cite[Go] calls such parsers `loose'). To provide
+a simple illustration, an isolated identifier, such as `\.{lg\_integer}'
+can be a type name, a variable name, or a structure tag (in a language like
+\Cee\ for example). If one expects the pretty printer to typeset this
+identifier in a correct style, some context must be supplied, as
+well. There are several strategies a pretty printer can employ to get
+a hold of the necessary context. Perhaps the simplest way to handle
+this, and to reduce the complexity of the pretty printing algorithm is
+to insist on the programmer providing enough context for the parser to do
+its job. For short examples like the one above, this may be an acceptable
+strategy. Unfortunately, it is easy to come up with longer snippets of
+grammatically deficient text that a pretty printer should be expected
+to handle. Some pretty printers, such as the one employed by \CWEB\
+and its ilk (the original \.{WEB}, \.{FWEB}), use a very flexible
+bottom-up technique that tries to make sense of as large a portion of
+the text as it can before outputting the result (see also \cite[Wo],
+which implements a similar algorithm in \LaTeX).
+
+The expectation is that this algorithm will handle the majority (about
+90\%? it would be interesting to carry out a study in the spirit of
+the ones discussed in \cite[Jo] to find out) of the
+cases with the remaining few left for the author to correct. The
+question is, how can such a correction be applied?
+
+\CWEB\ itself provides two rather different mechanisms for handling
+these exceptions. The first uses direct typesetting commands (for
+example, \.{@@/} and \.{@@\#} for canceling and
+introducing a line break, resp.) to change the typographic output.
+
+The second (preferred) way is to supply {\it hidden context\/} to the
+pretty-printer. Two commands, \.{@@;} and
+\.{@@[}$\ldots$\.{@@]} are used for this purpose. The
+former introduces a `virtual semicolon' that acts in every way like a
+real one except it is not typeset (it is not output in the source file
+generated by \CTANGLE\ either but this has nothing to do with pretty
+printing, so I will not mention \CTANGLE\ anymore). For
+instance, from the parser's point of view, if the preceding text was
+parsed as a `scrap' of type {\it exp}, the addition of \.{@@;}
+will make it into a `scrap' of type {\it stmt\/} in \CWEB's
+parlance. The second construct (\.{@@[}$\ldots$\.{@@]}),
+is used to create an {\it exp\/} scrap out of whatever happens to be
+inside the brackets.
+
+This is a powerful tool at the author's disposal. Stylistically,
+such context hints are the right way to handle exceptions,
+since using them forces the writer to emphasize the {\it logical\/}
+structure of the formal text. If the pretty printing style is changed
+later on, the texts with such hidden contexts should be able to
+survive intact in the final document (as an example, using a break
+after every statement in \Cee\ may no longer be considered
+appropriate, so any forced break introduced to support this convention
+would now have to be removed, whereas \.{@@;}'s would simply
+quietly disappear into the background).
+
+The same hidden context idea has another important advantage: with
+careful grammar fragmenting (facilitated by \CWEB's or any other
+literate programming tool's `hypertext' structure) and a more diverse
+hidden context (or even arbitrary hidden text) mechanism, it is
+possible to use a strict parser to parse incomplete language
+fragments. For example, the productions that are needed to parse
+\Cee's expressions form a complete subset of the grammar. If the
+grammar's `start' symbol is changed to \prodstyle{expression} (instead of
+the \prodstyle{translation-unit} as it is in the full \Cee\ grammar), a
+variety of incomplete \Cee\ fragments can now be parsed and
+pretty-printed. Whenever such granularity is still too `coarse',
+carefully supplied hidden context will give the pretty printer enough
+information to adequately process each fragment. A number of such {\it
+sub}-parsers\namedspot{parser.stacks} can be tried on each fragment (this may sound
+computationally expensive, however, in practice, a carefully chosen
+hierarchy of parsers will finish the job rather quickly) until a
+correct parser produced the desired output (this approach is similar
+to, although not quite the same as the one employed by the {\it General LR
+parsers}).
+
+This somewhat lengthy discussion brings us to the question directly
+related to the tools described in this manual: how does one provide
+typographical hints or hidden context to the parser?
+
+One obvious solution is to build such hints directly into the
+grammar. The parser designer can, for instance, add new tokens
+(say, \.{BREAK\_LINE}) to the grammar and extend the
+production set to incorporate the new additions. The risk of
+introducing new conflicts into the grammar is low (although not
+entirely non-existent, due to the lookahead limitations of {\sc LR}($1$)
+grammars) and the changes required are easy, although very tedious, to
+incorporate.
+
+In addition to being labor intensive, this solution has two other
+significant shortcomings: it alters the original grammar and hides its
+logical structure; it also `bakes in' the pretty-printing conventions
+into the language structure (making the `hidden' context much less
+`stealthy'). It does avoid the `synchronicity problem' mentioned
+below.
+
+A marginally better technique is to introduce a new regular expression
+recognizable by the scanner which will then do all the necessary
+bookkeeping upon matching the sequence. All the difficulties with
+altering the grammar mentioned above apply in this case, as well, only
+at the `lexical analysis level'. At a minimum, the set of tokens
+matched by the scanner would have to be altered.
+
+A much more satisfying approach involves inserting the hints at the input stage and
+passing this information to the scanner and the parser as part of the token
+`values'. The hints themselves can masquerade as characters ignored by the scanner
+(white space\footnote{Or even the `intercharacter space', to make the
+hints truly invisible to the scanner.}, for example) and preprocessed by a specially designed
+input routine. The scanner then simply passes on the values to the
+parser. This makes hints, in effect, invisible.
+
+The difficulty now lies in synchronizing the token production with the
+parser. This subtle complication is very familiar to anyone who has
+designed \TeX's output routines: the parser and the lexer are not
+synchronous, in the sense that the scanner might be reading several
+(in the case of the general {\sc LR}$(n)$ parsers) tokens\footnote{Even if
+one were to somehow mitigate the effects of the lookahead {\it in the
+parser\/}, the scanner would still have to read the characters of the
+current token up to (and, in some cases, beyond) the (token's)
+boundary which, in most cases, is
+the whitespace, possibly hiding the next hint.} ahead of the
+parser before deciding on how to proceed (the same way \TeX\ can
+consume a whole paragraph's worth of text before exercising its page
+builder).
+
+If we simple-mindedly let the scanner return every hint it has encountered
+so far, we may end up feeding the parser the hints meant for the token
+that appears {\it after\/} the fragment the parser is currently working
+on. In other words, when the scanner `backs up' it must correctly back
+up the hints as well.
+
+This is exactly what the scanner produced by the tools in this package
+does: along with the main stream of tokens meant for the parser, it
+produces two\footnote{There would be no difficulty in splitting either
+of these streams into multiple `substreams' by modifying the stream
+extraction macros accordingly.} hidden streams (called the \.{\\yyformat} stream and
+the \.{\\yystash} stream) and provides the parser with two
+strings (currently only strings of digits are used although arbitrary
+sequences of \TeX\ tokens can be used as pointers) with the promise
+that {\it all the `hints' between the beginning of the corresponding
+stream and the point labeled by the current stream pointer appeared
+among the characters up to and, possibly, including the ones matched
+as the current token}. The macros to extract the relevant parts of the
+streams (\.{\\yyreadfifo} and its cousins) are provided for the
+convenience of the parser designer.
+
+The \.{\\yystash} stream collects all the typesetting commands inserted by
+\CWEB\ to be possibly used in displaying the action code in \bison\
+productions, for example. Because of this, it may appear in somewhat
+unexpected places, introducing spaces where the programmer did not
+neccessarily intend (such as at the beginning of the line, etc.). To
+mitigate this problem, the \.{\\yystash} stream macros are implemented
+to be entirely invisible to the lexer. Making them produce spaces is
+also possible, and some examples are provided in \.{symbols.sty}.
+The interested reader can consult the input routine macros in
+\.{yyinput.sty} for the
+details of the internal representation of the streams.
+
+In the interest of full disclosure, let me point out that this simple
+technique introduces a significant strain on \TeX's
+computational resources: the lowest level macros, the ones that handle
+character input and are thus executed (sometimes multiple times), for
+{\it every\/} character in the input stream are rather complicated and
+therefore, slow. Whenever the use of such streams is not desired a simpler
+input routine can be written to speed up the process (see
+\.{\\yyinputtrivial} for a working example of such macro).
+
+Finally, while probably not directly related to the present
+discussion, this approach has one more interesting feature: after the
+parser is finished, the parser output and the streams exist
+`statically', fully available for any last minute preprocessing or for
+debugging purposes, if necessary\footnote{One may think of the parser output
+as an {\it executable abstract syntax tree (AST)}.}. Under most circumstances, the parser
+output is `executed' and the macros in the output are the ones reading
+the various streams using the pointers supplied at the parsing stage
+(at least, this is the case for all the parsers supplied with the
+package).
+
+@*1 Inside semantic actions: switch statements and `functions' in \TeX.
+So far we have looked at the lexer for your input, and a grammar ready to be put into
+action (we will talk about actions a few moments later). It is time to discuss
+how the tables produced by \bison\ get converted into \TeX\ {\it macros\/}
+that drive the parser in {\it \TeX}.
+
+The tables that drive the \bison\ input parsers
+are collected in \.{\{b,d,f,g,n\}yytab.tex} and \.{small\_tab.tex}. Each
+one of these files contains the tables that implement a specific parser
+used during different stages of processing.
+Their exact function is well explained
+in the source file produced by \bison\ ({\it how} this is done is
+detailed elsewhere, see \cite[Ah] for a good reference). It would
+suffice to mention here that there are three types of tables in this
+file: \recount{1}numerical tables such as \.{\\yytable} and
+\.{\\yycheck} (both are either \TeX's token registers in an
+unoptimized parser or associate arrays in an optimized version of such
+as discussed below),
+\recount{2}a string array \.{\\yytname}, and \recount{3}an action
+switch. The action switch is what gets called when the parser does a
+{\it reduction}. It is easy to notice that the numerical tables come
+`premade' whereas the string array consisting of token names
+is difficult to recognize. This is intentional: this form of initialization
+is designed to allow the widest range of
+characters to appear inside names. The macros that do this reside in
+\.{yymisc.sty}. The generated table files also contain
+constant and token declarations used by the parser.
+
+The description of the process used to output \bison\ tables in an
+appropriate form continues in the section about
+\locallink{bsfile}outputting \TeX\ tables\endlink, we pick it up here
+with the description of the syntax-directed translation and the
+actions. The line
+$$
+\.{\\switchon\\next\\in\\currentswitch}
+$$
+is responsible for calling an appropriate action in the current
+switch, as is easy to infer. A {\it switch\/} is also a macro that
+consists of strings of \TeX\ tokens intermixed with \TeX\ macros
+inside braces. Each group of macros
+gets executed whenever the character or the group of characters in
+\.{\\next} matches a substring preceding the braced group. If there
+are two different substrings
+that match, only the earliest group of macros gets expanded.
+Before a state is
+used, a special control sequence,
+\.{\\setspecialcharsfrom\\switchname} can be used to put the \TeX\
+tokens in a form suitable for the consumption by \.{\\switchon}'s. The
+most important step it performs is it {\it turns every token in the
+list into a character with the same character code and category
+12\/}. Thus \.{\\\{} becomes \.{\{}$_{12}$. There are other ways of
+inserting tokens into a state: enclosing a token or a string of tokens in
+\.{\\raw...\\raw} adds it to the state macro unchanged. If you have
+a sequence of category 12 characters you want to add to the state, put
+it after \.{\\classexpand} (such sequences are usually prepared by the
+\.{\\setspecialchars} macro that uses the token tables generated by
+\bison\ from your grammar).
+
+You can give a case a readable label (say, \.{brackets}) and enclose
+this label in \.{\\raw}$\ldots$\.{\\raw}. A word of caution: an `\.{a}'
+inside of \.{\\raw}$\ldots$\.{\\raw} (which is most likely an
+\.{a}$_{11}$ unless you played with the category codes before loading the
+\.{\\switchon} macros) and the one outside it are two different
+characters, as one is no longer a letter (category 11) in the eyes of
+\TeX\ whereas the other one still is. For this reason one should not
+use characters other than letters in h\.{\{}is\.{,}er\.{\}} state
+{\em names}: the way a state picks an action does not distinguish between,
+say, a `\.{(}' in `\.{(letter)}' and a stand alone `\.{(}' and may
+pick an action that you did not intend\footnote{One way to mitigate
+this is by putting such named states at the end of the switch, {\em
+after\/} the actions labelled by the standalone characters.}.
+This applies even if `\.{(}'
+is not among the characters explicitly inserted in the state macro: if
+an action for a given character is not found in the state macro, the
+\.{\\switchon} macro will insert a current \.{\\default} action
+instead, which most often you would want to be \.{\\yylex} or
+\.{\\yyinput} (i.e.\ skip this token). If a single `\.{(}' or `\.{)}' matches
+the braced group that follows `\.{(letter)}' chaos may ensue (most
+likely \TeX\ will keep reading past the \.{\\end} or \.{\\yyeof} that
+should have terminated the input). Make the names of character
+categories as unique as possible: the \.{\\switchon} is simply a
+string matching mechanism, with the added differentiation between
+characters of different categories.
+
+Finally, the construct \.{\\statecomment}{\it
+anything\/}\.{\\statecomment} allows you to insert comments in the
+state sequence (note that the state {\it name\/} is put at the
+beginning of the state macro (by \.{\\setspecialcharsfrom})
+in the form of a special control sequence
+that expands to nothing: this elaborate scheme is needed because
+another control sequence can be \.{\\let} to the state macro which
+makes the debugging information difficult to decipher). The debugging
+mode for the lexer implemented with these macros is activated by
+\.{\\tracedfatrue}.
+
+The functionality of the \.{\\switchon} (as well as the \.{\\switchonwithtype}, which
+is capable of some rudimentary type checking) macros (for `historical'
+reasons, one can also use \.{\\action} as a synonym for the latter) has been
+implemented in a number of other macro packages (see \cite[Fi] that
+discusses the well-known and widely used \.{\\CASE} and \.{\\FIND}
+macros). The macros in this collection have the additional property
+that the only assignments that persist after the \.{\\switchon}
+completes are the ones performed by the user code inside the selected
+case.
+
+This last property of the switch macros is implemented using another
+mechanism that is part of this macro suite: the `subroutine-like'
+macros, \.{\\begingroup}$\ldots$\.{\\tokreturn}. For examples, an
+interested reader can take a look at the macros included with the
+package. A typical use is
+\.{\\begingroup}$\ldots$\.{\\tokreturn\{\}\{\\toks0 \}\{\}} which will
+preserve all the changes to \.{\\toks0} and have no other side effects
+(if, for example, in typical \TeX\ vernacular, \.{\\next} is used
+to implement tail recursion inside the group, after the
+\.{\\tokreturn}, \.{\\next} will still have the same value it
+had before the group was entered). This functionality comes at the
+expense of some computational efficiency.
+
+This covers most of the routine computations inside semantic actions,
+all that is left is a way to `tap' into the stack automaton
+built by \bison\ using an interface similar to the special
+\.{\$$n$} variables utilized by the `genuine' \bison\ parsers
+(i.e.\ written in \Cee\ or any other target language supported by
+\bison).
+
+This r\^ole is played by the several varieties of \.{\\yy$\,p$} command
+sequences (for the sake of completeness, $p$ stands for one of \.{($n$)},
+\.{[{\rm name}]}, \.{]{\rm name}[} or $n$, here $n$ is a
+string of digits, and a `name' is any name acceptable as a symbolic
+name for a term in \bison). Instead
+of going into the minutia of various flavors of \.{\\yy}-macros, let me
+just mention that one can get by with only two `idioms' and still
+be able to write parsers of arbitrary sophistication:
+\.{\\yy($n$)} can be treated as a token register containing the
+value of the $n$-th term of the rule's right hand side, $n>0$. The left
+hand side of a production is accessed through \.{\\yyval}. A
+convenient shortcut is \.{\\yy0\{{\rm \TeX\space material}\}} which
+will expand (as in \.{\\edef}) the `\TeX\ material' inside the braces. Thus, a simple way
+to concatenate the values of the first two production terms is
+\.{\\yy0\{\\the\\yy(1)\\the\\yy(2)\}}. The included \bison\
+parser can also be used to provide support for `symbolic names',
+analogous to \bison's \.{{\$}[{\rm name}]} but a
+bit more effort is required on the user's part to initialize such support.
+Using symbolic names can make the parser more readable and maintainable,
+however.
+
+There is also a \.{\\bb$\,n$} macro, that has no analogue in the
+`real' \bison\ parsers, and provides access to the term
+values in the `natural order' (e.g.~\.{\\bb1} is the last term read). Its
+intended use is with the `inline' rules (see the main parser for
+such examples). As of version \.{3.0} \bison\ no longer outputs
+|yyrhs| and |yyprhs|, which makes it impossible to produce the
+|yyrthree| array necessary for processing such rules in the `left to right'
+order. One might also note that the new notation is better suited for
+the inline rules since the value that is pushed on the stack is that
+of \.{\\bb0}, i.e.~the term implicitly inserted by \bison. Be aware
+that there are no \.{\\bb[$\cdot$]} or \.{\\bb($\cdot$)} versions of
+these macros, for obvious reasons. A less obvious feature of this
+macro is its `nonexpandable' nature. This means they cannot be used
+inside \.{\\edef}. Thus, the most common use pattern is
+\.{\\bb$\,n$\{\\toks$\,m$\}} with a subsequent expansion of
+\.{\\toks$\,m$}. Making these macros expandable is certainly possible
+but does not seem crucial for the intended limited use pattern.
+
+Naturally, a parser writer may need a number of other data
+abstractions to complete the task. Since these are highly dependent on
+the nature of the processing the parser is supposed to provide, we
+refer the interested reader to the parsers included in the package as
+a source of examples of such specialized data structures.
+
+One last remark about the parser operation is worth making here:
+the parser automaton itself does not make any \.{\\global}
+assignments. This (along with some careful semantic action writing)
+can be used to `localize' the effects of the parser operation and,
+most importantly, to create `reentrant' parsers that can, e.g.\ call
+{\it themselves\/} recursively.
+
+@*1 `Optimization'.
+\namedspot{optimization}By default, the generated parser and scanner keep all of their tables
+in separate token registers. Each stack is kept in a single macro (this
+description is further complicated by the support for parser {\it
+namespaces\/} that exists even for unoptimized parsers but this
+subtlety will not be mentioned again---see the macros in the package
+for further details). Thus, every time a table
+is accessed, it has to be expanded making the table access latency
+linear in {\it the size of the table}. The same holds for stacks and
+the action `switches', of
+course. While keeping the parser tables (which are immutable) in token
+registers does not have any better rationale than saving the control
+sequence memory (the most abundant memory in \TeX), this way of
+storing {\it stacks} does have an advantage when multiple parsers get
+to play simultaneously. All one has to do to switch from one parser to
+another is to save the state by renaming the stack control sequences.
+
+When the parser and scanner are `optimized', all these control
+sequenced are `spread over' appropriate associative arrays. One caveat
+to be aware of: the action switches for both the parser and the scanner
+have to be output differently (a command line option is used to
+control this) for optimized and unoptimized parsers. While it is
+certainly possible to optimize only some of the parsers (if your
+document uses multiple) or even only some {\it parts\/} of a given
+parser (or scanner), the details of how to do this are rather
+technical and are left for the reader to discover by reading the
+examples supplied with the package. At least at the beginning it is
+easier to simply set the highest optimization level and use it
+consistently throughout the document.
+
+@*1 {\it \TeX\/} with a different {\sl slant} or do you C an escape?.
+Some \TeX\ productions below probably look like alien script.
+The authors of \cite[Er] cite a number of reasons to view pretty printing of
+\TeX\ in general as a nearly impossible task. The macros included with
+the package follow a very straightforward strategy and do not try to
+be very comprehensive. Instead, the burden of presenting \TeX\ code in
+a readable form is placed on the programmer. Appropriate hints can be
+supplied by means of indenting the code, using assignments ($=$) where
+appropriate, etc. If you would rather look at straight \TeX\
+instead, the line \.{\\def\\texnspace\{other\}} at the beginning of
+this section can be uncommented and
+{\let\writetexidxentry\writetextxtidxentry
+|TeX_( "/noexpand/inmath{/yy0{/yy1{}}}" );|} becomes {%
+\def\texnspace{other}%
+\def\texispace{other}% for the index
+\let\writetexidxentry\writetextxtidxentry % for the index appearance
+|TeX_( "/noexpand/inmath{/yy0{/yy1{}}}" );|}.
+There is, however, more to this story. A look at the actual file will
+reveal that the line above was typed as
+$$
+\.{TeX\_( "/noexpand/inmath\{/yy0\{/yy1\{\}\}\}" );}
+$$
+The `escape character' is leaning the other way!
+The lore of \TeX\ is uncompromising: `\.{\\}' is {\it the\/} escape
+character. What is the reason to avoid it in this case?
+
+The mystery is not very deep: `\.{/}' was chosen as an escape character
+by the parser macros (a quick glance at \.{?yytab.tex} will reveal as
+much). There is, of course, nothing sacred (other than tradition,
+which this author is trying his hardest to follow) about what character code
+the escape character has. The reason to look for an alternative is straightforward: `\.{\\}' is
+a special character in \Cee, as well (also an `escape', in fact). The line
+\.{TeX\_( "..." );} is a {\it macro-call\/} but $\ldots$ in \Cee. This
+function simply prints out (almost `as-is') the line in
+parenthesis. An attempt at \.{TeX\_( "\\noexpand" );} would result in
+\numberlinestrue
+\begindemo
+^
+^oexpand
+\enddemo
+\numberlinesfalse
+Other escape combinations\footnote{Here is a full list of {\it
+defined\/} escaped characters in \Cee: \.{\\a}, \.{\\b}, \.{\\f}, \.{\\n},
+\.{\\r}, \.{\\t}, \.{\\v}, \.{\\}{$[$\it octal digit\/$]$}, \.{\\'},
+\.{\\"}, \.{\\?}, \.{\\\\}, \.{\\x}, \.{\\u}, \.{\\U}. Note that the
+last three combinations must be followed by a specific string of
+characters to appear in the input without generating errors.} are
+even worse: most are simply undefined. If anyone feels trapped without
+an escape, however, the same line can be typed as
+$$
+\.{TeX\_( "\\\\noexpand\\\\inmath\{\\\\yy0\{\\\\yy1\{\}\}\}" );}
+$$
+Twice the escape!
+
+If one were to look even closer at the code, another oddity stands
+out: there are no \.{\$}'s anywhere in sight.
+The big money, \.{\$} is a beloved character in
+\bison. It is used in action code to reference the values of the
+appropriate terms in a production. If mathematics pays your bills, use
+\.{\\inmath} instead.
@i bo.x
@i lo.x
+@i fo.x
+@i so.x
@i np.x
@i common.w
@i bs.w
@i fk.w
@i philosophy.w
+@i checklists.w
@i references.w
-\let\N\oldN
\let\hostparsernamespace\mainnamespace % to typeset examples in the text
% properly
@**Index. This section is, perhaps, the most valuable product of
\CWEB's labors. It lists references to definitions (set in {\it
italic}) as well as uses for each \Cee\ identifier used in the
-source. Special facilities have been added to extend indexing to
-\bison\ grammar terms and \TeX\ control sequences encountered in
+source. Special facilities have been added to extend the indexing to
+\bison\ grammar terms, \flex\ regular expression names and state names, as well as
+\flex\ options,
+and \TeX\ control sequences encountered in
\bison\ actions. Definitions of tokens (via \prodstyle{\%token},
\prodstyle{\%nterm} and \prodstyle{\%type} directives) are
%$\underline{\hbox{underlined}}$
@@ -64,7 +1845,7 @@ appeared on the left hand side of a production. A production:
}%
\beginmprod
left_hand_side:
- term.1 term.2 term.3 \{\stashed{|TeX_("/dosomething/yy(1)");|}\}
+ term.1 term.2 term.3 \{\stashed{|TeX_("/do/something/yy(1)");|}\}
\endmprod
inside the \TeX\ part of a \CWEB\ section will generate several
index entries, as well, including the entries for any
@@ -82,21 +1863,51 @@ program). The same applies to the way the index is constructed (it
would be easy to only use the `string' name of the token if
available, thus avoiding referencing the same token twice).
-\TeX\ control sequences are listed following the index of all \bison\
-entries. The two indices are separated by a {\it dinkus}
+\TeX\ control sequences are listed following the index of all \bison\ and \flex\
+entries. The different sections of the index are separated by a {\it dinkus\/}
(\dinkus). Since it is nearly impossible to determine at what point a
\TeX\ macro is defined (and most of them are defined outside of the
\CWEB\ sources), only their uses are listed (to be more precise, {\it
every\/} appearance of a macro is assumed to be its use). In a few cases, a
-`graphic' representation for a control sequence is also listed (for
-example, {\termindexfalse\def\texnspace{texline}\inlineTeXx{/getfirst}} represents
-{\termindexfalse\def\texnspace{other}\inlineTeXx{/getfirst}}). The index entries are ordered alphabetically. The
+`graphic' representation for a control sequence appears in the index (for
+example, {\let\writetexidxentry\writetextxtidxentry
+\def\texnspace{texline}\def\texispace{index}\inlineTeXx{/getfirst}} represents
+{\let\writetexidxentry\writetextxtidxentry
+\def\texnspace{other}\def\texispace{other}\inlineTeXx{/getfirst}$\!$}).
+The index entries are ordered alphabetically. The
latter may not be entirely obvious in the cases when the `graphical
representation' of the corresponding token manifests a significant
-departure from its string version (such as |TeX_("/yy(1)");|
-instead of {\def\texnspace{other}|TeX_("/yy(1)");|}).
+departure from its string version (such as
+{\let\writetexidxentry\writetextxtidxentry|TeX_("/yy(1)");|}
+instead of {\def\texnspace{other}\def\texispace{other}%
+\let\writetexidxentry\writetextxtidxentry
+|TeX_("/yy(1)");|$\!$}). Incidentally, for the examples on this page
+(as well an example in the section about \TeX\ pretty-printing) both
+the `graphic' as well as `text' versions of the control sequence are
+indexed. It is instructive to verify that their location in the index
+corresponds to the `spelling' of their visual representation (thus,
+{\let\writetexidxentry\writetextxtidxentry
+\def\texnspace{texline}\def\texispace{index}\inlineTeXx{/getfirst}}
+appears under `p'). One should also be aware that the indexing of some terms has
+been suppressed, since they appear too often.
+\unsetfootnotes % this way the footnote stream is not affected by the extra index pages
+% TODO: explain the visibility system. Note the anomalous order of \prodstyle{term.1}
+% vs.~\prodstyle{term0} due to the dot in \.{term.1}, which is otherwise invisible. Underscore the
+% importance of following a consistent naming scheme, including the `stringized' versions
+% of token names.
+@q Include the list of index section markers; this is a hack to get around @>
+@q the lack of control over the generation of \CWEB's index; the correct order @>
+@q of index entries depends on the placement of this inclusion @>
+@i alphas.hx
+
\closeout\gindex
+\termindexfalse % do not attach indexing entries to the terms in the index, or the section list
+\def\next{\expandafter\eatone\string}
+\edef\unindexable{{\next\the}{\next\nx}{\next\yy}{\next\yylexnext}{\next\else}{\next\fi}{\next\yyBEGIN}{\next\next}}
+\input gindex.sty
+%\indexverbosetrue
\let\inx\inxmod
\let\fin\finmod
-\displaytokenrawtrue
+\lxrefseparator % start generating section lists
+%\displaytokenrawtrue % so that the `nonstringified' version of the token is displayed as well
\def\topofcontents{\null\vskip-3\baselineskip\centerline{C{\sc ONTENTS} (\sc\uppercase\expandafter{\title})}\medskip}
diff --git a/support/splint/cweb/ssffo.w b/support/splint/cweb/ssffo.w
index 56e710b8c6..315bffc798 100644
--- a/support/splint/cweb/ssffo.w
+++ b/support/splint/cweb/ssffo.w
@@ -1,4 +1,4 @@
-@q Copyright 2012-2015, Alexander Shibakov@>
+@q Copyright 2012-2020, Alexander Shibakov@>
@q This file is part of SPLinT@>
@q SPLinT is free software: you can redistribute it and/or modify@>
@@ -16,18 +16,21 @@
\input limbo.sty
\def\optimization{5}
\input yy.sty
-
+\modenormal
\let\oldN\N
\let\N\textN
\let\M\textM
-\def\hostparsernamespace{[unreacheable]}
+% uncomment the next line to leave the typesetting of tokens in its raw state.
+%\def\hostparsernamespace{[unreacheable]}
+\let\hostparsernamespace\mainnamespace
+\input btokenset.sty
@** A simple scanner for \flex\ options.
This is a `bare-bones' scanner for a subset of the `extended' \bison\
-grammar that parses, well, some of the `extensions', namely, the
+grammar that parses some of the `extensions', namely, the
\flex\ state declarations. It does not use the state mechanism
itself, and is supposed to be used with the bootstrapping parser, even
-though it is not strictly necessary. It parses state declarations as
+though this is not strictly necessary. It parses state declarations as
long as they are separated into their own \CWEB\ sections and extracts
the {\it names\/} of the states. The \flex\ scanner output `driver'
does the rest after including the produced header file.
@@ -57,7 +60,7 @@ something that should not have been. Making the syntax and the grammar
more restrictive helps to acheive this, as well as makes the overall
design simpler.
@<Lexer definitions@>=
-@G
+@G(fs1)
letter [_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ]
id {letter}({letter}|[-0-9])*
@g
@@ -89,13 +92,15 @@ of tokens recognized by this routine is not very wide.
@ White space skipping.
@<Scan white space@>=
-@G
+@G(fs2)
[ \f\n\t\v] {@> @[TeX_( "/yylexnext" );@]@=}
@g
-@ The rest of it are either identifiers or \.{\%}-options.
+@ The rest of it are either identifiers or \.{\%}-options. The typesetting of
+the appropriate \bison\ tokens below was arranged by inputting \.{btokenset.sty} and
+relying on the pretty-printing macros for {\it \TeX} to do their job.
@<Scan identifiers@>=
-@G
+@G(fs2)
{id} {@> @<Return an identifier@> @=}
"%x" {@> @[TeX_( "/yylexreturnptr{FLEX_STATE_X}" );@] @=}
"%s" {@> @[TeX_( "/yylexreturnptr{FLEX_STATE_S}" );@] @=}
@@ -110,7 +115,7 @@ of tokens recognized by this routine is not very wide.
@ The lexer returns standard \.{\\yyunion} types.
@<Return an identifier@>=
- @[TeX_( "/edef/next{/yylval{/nx/idit{/the/yytextpure}{/the/yytext}" );@]@;
+ @[TeX_( "/edef/next{/yylval{{/the/yytextpure}{/the/yytext}" );@]@;
@[TeX_( " {/the/yyfmark}{/the/yysmark}}}/next" );@]@;
@[TeX_( "/yylexreturn{ID}" );@]@;
diff --git a/support/splint/doc/ldman.pdf b/support/splint/doc/ldman.pdf
index 22a07ae754..d1d798816a 100644
--- a/support/splint/doc/ldman.pdf
+++ b/support/splint/doc/ldman.pdf
Binary files differ
diff --git a/support/splint/doc/splint.pdf b/support/splint/doc/splint.pdf
index 101984caa1..76600ea682 100644
--- a/support/splint/doc/splint.pdf
+++ b/support/splint/doc/splint.pdf
Binary files differ
diff --git a/support/splint/examples/count/count.sty b/support/splint/examples/count/count.sty
index bb8998e64f..d9fed05f94 100644
--- a/support/splint/examples/count/count.sty
+++ b/support/splint/examples/count/count.sty
@@ -1,3 +1,19 @@
+% Copyright 2012-2020, Alexander Shibakov
+% This file is part of SPLinT
+%
+% SPLinT is free software: you can redistribute it and/or modify
+% it under the terms of the GNU General Public License as published by
+% the Free Software Foundation, either version 3 of the License, or
+% (at your option) any later version.
+%
+% SPLinT is distributed in the hope that it will be useful,
+% but WITHOUT ANY WARRANTY; without even the implied warranty of
+% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+% GNU General Public License for more details.
+%
+% You should have received a copy of the GNU General Public License
+% along with SPLinT. If not, see <http://www.gnu.org/licenses/>.
+
% the sequences in this file have the eventual goal of implementing macros that count and compare token sequences
% (as either parameters or contents of token registers) in expandable manner.
% currently, a new sequence is prepared first, consisting of `markers'
diff --git a/support/splint/examples/expression/Makefile b/support/splint/examples/expression/Makefile
index 0e2f33c2ba..6f306939b6 100644
--- a/support/splint/examples/expression/Makefile
+++ b/support/splint/examples/expression/Makefile
@@ -1,11 +1,25 @@
+# Copyright 2012-2020, Alexander Shibakov
+# This file is part of SPLinT
+#
+# SPLinT is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# SPLinT is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with SPLinT. If not, see <http://www.gnu.org/licenses/>.
+
# this Makefile uses a flat directory structure for demonstration
# purposes; the main directory (../..) contains a slightly more
# modular organization.
-SPLINT_ROOT = $(shell pwd)/../..
-
-include ${SPLINT_ROOT}/makefile.inc
-include ${SPLINT_ROOT}/makefile.loc
+include ../../makefile.inc
+include ${SPLINT_ROOT}makefile.loc
# rules specific to this example
@@ -18,13 +32,13 @@ step1: expp.y expl.l
step2: ptabout ltabout
step3: ltab.tex ptab.tex
-ptabout: ${SPLINT_ROOT}/cweb/mkeparser.c ${PARSER}.c
- ${CC} -DPARSER_FILE=\"../examples/expression/$(lastword $^)\" -DYYPARSE_PARAMETERS= -o $@ $<
+ptabout: ${SPLINT_ROOT}cweb/mkeparser.c ${PARSER}.c
+ ${CC} ${BISON_STATE} -DPARSER_FILE=\"../examples/expression/$(lastword $^)\" -DYYPARSE_PARAMETERS= -o $@ $<
ptab.tex: ptabout
$< --optimize-actions $@
-ltabout: ${SPLINT_ROOT}/cweb/mkscanner.c ${LEXER}.c
+ltabout: ${SPLINT_ROOT}cweb/mkscanner.c ${LEXER}.c
${CC} -DLEXER_FILE=\"../examples/expression/$(lastword $^)\" -o $@ $<
ltab.tex: ltabout
@@ -40,7 +54,7 @@ test.tex ${PARSER}.yy ${LEXER}.ll: expression.x
expression.tex: expression.x
@${CWEAVE} $<
-expression.pdf: expression.tex ${SPLINT_XPTABLES} ${SPLINT_XLTABLES} ${SPLINT_ROOT}/cweb/bo.tok etoks.sty
+expression.pdf: expression.tex ${SPLINT_XPTABLES} ${SPLINT_XLTABLES} ${SPLINT_ROOT}cweb/bo.tok ${SPLINT_ROOT}cweb/fo.tok etoks.sty
@${PDFTEX} $<
test: test.tex ptab.tex ltab.tex
diff --git a/support/splint/examples/expression/etoks.sty b/support/splint/examples/expression/etoks.sty
index 7326245338..706ad35818 100644
--- a/support/splint/examples/expression/etoks.sty
+++ b/support/splint/examples/expression/etoks.sty
@@ -1,3 +1,19 @@
+% Copyright 2012-2020, Alexander Shibakov
+% This file is part of SPLinT
+%
+% SPLinT is free software: you can redistribute it and/or modify
+% it under the terms of the GNU General Public License as published by
+% the Free Software Foundation, either version 3 of the License, or
+% (at your option) any later version.
+%
+% SPLinT is distributed in the hope that it will be useful,
+% but WITHOUT ANY WARRANTY; without even the implied warranty of
+% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+% GNU General Public License for more details.
+%
+% You should have received a copy of the GNU General Public License
+% along with SPLinT. If not, see <http://www.gnu.org/licenses/>.
+
\prettywordpair{IDENTIFIER}{{$\langle$\rm identifier$\rangle$}}
\prettywordpair{INTEGER}{{\bf int}}
\prettywordpair{BOGUS}{{\tt Oh, @\%\$\&*!}}
diff --git a/support/splint/examples/expression/expression.sty b/support/splint/examples/expression/expression.sty
index 9f9195aafb..ac0d8ba7e5 100644
--- a/support/splint/examples/expression/expression.sty
+++ b/support/splint/examples/expression/expression.sty
@@ -1,25 +1,47 @@
-\def\optimization{5}
+% Copyright 2012-2020, Alexander Shibakov
+% This file is part of SPLinT
+%
+% SPLinT is free software: you can redistribute it and/or modify
+% it under the terms of the GNU General Public License as published by
+% the Free Software Foundation, either version 3 of the License, or
+% (at your option) any later version.
+%
+% SPLinT is distributed in the hope that it will be useful,
+% but WITHOUT ANY WARRANTY; without even the implied warranty of
+% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+% GNU General Public License for more details.
+%
+% You should have received a copy of the GNU General Public License
+% along with SPLinT. If not, see <http://www.gnu.org/licenses/>.
+% this example uses the unoptimized version of the stack for testing purposes;
+% it does not affect the performance much; if the optimized stack is desired,
+% the optimization related code below can be uncommented
+
+%\def\optimization{5}
+
+\input trt1.sty % \TeX\ `runtime': temporary register definitions
\input yycommon.sty % general routines for stack and array access
\input yymisc.sty % helper macros (stack manipulation, table processing, value stack pointers)
+ % parser initialization, optimization
\input yyinput.sty % input functions
\input yyparse.sty % parser machinery
\input flex.sty % lexer functions
-\input yyfaststack.sty % sped up stack access functions
-\input yyboth.sty % parser initializatio, optimization
+%\input yyfaststack.sty % sped up stack access functions
\let\yylexreturn\yylexreturnregular
\let\setflexstates\relax
\let\parsernamespace\empty
\genericparser
- name: main,
+ name: emain,
ptables: ptab.tex,
ltables: ltab.tex,
tokens: {},
asetup: {},
dsetup: {},
rsetup: {},
- optimization: \optimizeall;%
-
-\tomainparser
+ optimization: %\optimizeall
+ ;
+
+\toemainparser
diff --git a/support/splint/examples/expression/expression.w b/support/splint/examples/expression/expression.w
index c775741184..9e96ea44d0 100644
--- a/support/splint/examples/expression/expression.w
+++ b/support/splint/examples/expression/expression.w
@@ -1,4 +1,4 @@
-@q Copyright 2012-2014, Alexander Shibakov@>
+@q Copyright 2012-2020, Alexander Shibakov@>
@q This file is part of SPLinT@>
@q SPLinT is free software: you can redistribute it and/or modify@>
@@ -16,8 +16,9 @@
\input limbo.sty
\def\optimization{5}
\input yy.sty
+\modenormal
-@** Parser file. This an example parser for expressions. It takes
+@** Parser file. This is an example parser for expressions. It takes
advantage of some of the features of \splint\ generated parsers,
although anything that takes more than a straightforward setup is
omitted.
@@ -25,11 +26,11 @@ omitted.
The top-level structure of the input file presents no surprises and is
presented below.
\let\currentparsernamespace\parsernamespace
- \let\parsernamespace\smallnamespace
- \let\hostparsernamespace\smallnamespace
+ \def\parsernamespace{[edisplay]}
+ \def\hostparsernamespace{[edisplay]}
\input etoks.sty
\let\parsernamespace\currentparsernamespace
-\def\texnspace{[other]}
+\def\texnspace{[other]}% no pretty printing of \TeX
@s TeX_ TeX
@(expp.yy@>=
@@ -160,7 +161,7 @@ appropriate values.
%%
@ @<Lexer definitions@>=
-@G
+@G(fs1)
letter [_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ]
id {letter}({letter}|[-0-9])*
int [0-9]+
@@ -188,30 +189,13 @@ int [0-9]+
@<Scan identifiers@>@;
@ White space skipping.
-\traceparserstatestrue
-\tracestackstrue
-\tracerulestrue
-\traceactionstrue
-\tracelookaheadtrue
-\traceparseresultstrue
-\tracebadcharstrue
-\yyflexdebugtrue
-%
-\traceparserstatesfalse
-\tracestacksfalse
-\tracerulesfalse
-\traceactionsfalse
-\tracelookaheadfalse
-\traceparseresultsfalse
-\tracebadcharsfalse
-\yyflexdebugfalse
@<Scan white space@>=
-@G
+@G(fs2)
[ \f\n\t\v] {@> @[TeX_( "/yylexnext" );@]@=}
@g
@ @<Scan identifiers@>=
-@G
+@G(fs2)
{id} {@> @[TeX_( "/yylexreturnval{IDENTIFIER}" );@]@=}
{int} {@> @[TeX_( "/yylexreturnval{INTEGER}" );@]@=}
[+*()] {@> @[TeX_( "/yylexreturnchar" );@]@=}
diff --git a/support/splint/examples/ld/Makefile b/support/splint/examples/ld/Makefile
index a23f191b9c..24bf4a7a9f 100644
--- a/support/splint/examples/ld/Makefile
+++ b/support/splint/examples/ld/Makefile
@@ -1,30 +1,44 @@
+# Copyright 2012-2020, Alexander Shibakov
+# This file is part of SPLinT
+#
+# SPLinT is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# SPLinT is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with SPLinT. If not, see <http://www.gnu.org/licenses/>.
+
# this Makefile uses a flat directory structure for demonstration
# purposes; the main directory (../..) contains a slightly more
# modular organization.
-SPLINT_ROOT = $(shell pwd)/../..
-
PARSER = ldp
LEXER = ldl
MANUAL = ldman
-LD_CORE_PREREQS = ${SPLINT_XTEXSTYLES} ${SPLINT_XPTABLES} ${SPLINT_XLTABLES} ${SPLINT_ROOT}/cweb/bo.tok
-LD_DOC_PREREQS = %.tex %.sty ${PARSER}.tok ldunion.sty ldint.sty ldfrontmatter.sty ${LD_CORE_PREREQS} ptab.tex ltab.tex ld_small_tab.tex
-LD_DOC_PREREQS += ld_small_dfa.tex ltokenset.sty lstokenset.sty
+LD_CORE_PREREQS = ${SPLINT_XTEXSTYLES} ${SPLINT_XPTABLES} ${SPLINT_XLTABLES} ${SPLINT_ROOT}cweb/bo.tok ${SPLINT_ROOT}cweb/fo.tok
+LD_DOC_PREREQS = %.tex %.sty ${PARSER}.tok ldunion.sty ldtexlex.sty ldint.sty ldfrontmatter.sty ${LD_CORE_PREREQS}
+LD_DOC_PREREQS += ptab.tex ltab.tex ld_small_tab.tex ld_small_dfa.tex ltokenset.sty lstokenset.sty
LD_DOC_PREREQS_XREF = ${LD_DOC_PREREQS} %.scn %.idx
-include ${SPLINT_ROOT}/makefile.inc
-include ${SPLINT_ROOT}/makefile.loc
+include ../../makefile.inc
+include ${SPLINT_ROOT}makefile.loc
# rules specific to this example
-ptabout: ${SPLINT_ROOT}/cweb/mkeparser.c ${PARSER}.c
- ${CC} -DPARSER_FILE=\"../examples/ld/$(lastword $^)\" -DYYPARSE_PARAMETERS= -o $@ $<
+ptabout: ${SPLINT_ROOT}cweb/mkeparser.c ${PARSER}.c
+ ${CC} ${BISON_STATE} -DPARSER_FILE=\"../examples/ld/$(lastword $^)\" -DYYPARSE_PARAMETERS= -o $@ $<
ptab.tex: ptabout
$< --optimize-actions --optimize-tables $@
-ltabout: ${SPLINT_ROOT}/cweb/mkscanner.c ldl_states.h ${LEXER}.c
+ltabout: ${SPLINT_ROOT}cweb/mkscanner.c ldl_states.h ${LEXER}.c
${CC} -DLEXER_FILE=\"../examples/ld/$(lastword $^)\" -o $@ $<
ltab.tex: ltabout
@@ -44,10 +58,10 @@ ${MANUAL}.stx: ${MANUAL}.x ldgram.x ldlex.x ldlexo.x ldgramo.x ldnp.x
# term name parser for ld grammar
-ldsmallp_out: ${SPLINT_ROOT}/cweb/mkeparser.c ld_small_parser.c
+ldsmallp_out: ${SPLINT_ROOT}cweb/mkeparser.c ld_small_parser.c
${CC} ${BISON_STATE} -DPARSER_FILE=\"../examples/ld/$(lastword $^)\" -DYYPARSE_PARAMETERS= -o $@ $<
-ldsmalll_out: ${SPLINT_ROOT}/cweb/mkscanner.c ld_small_lexer.c
+ldsmalll_out: ${SPLINT_ROOT}cweb/mkscanner.c ld_small_lexer.c
${CC} -DLEXER_FILE=\"../examples/ld/$(lastword $^)\" -o $@ $<
ld_small_tab.tex: ldsmallp_out
@@ -62,10 +76,10 @@ ld_small_lexer.ll: ldnp.x
# numeric parser for ld grammar
-ldnump_out: ${SPLINT_ROOT}/cweb/mkeparser.c ld_num_parser.c
+ldnump_out: ${SPLINT_ROOT}cweb/mkeparser.c ld_num_parser.c
${CC} ${BISON_STATE} -DPARSER_FILE=\"../examples/ld/$(lastword $^)\" -DYYPARSE_PARAMETERS= -o $@ $<
-ldnuml_out: ${SPLINT_ROOT}/cweb/mkscanner.c ld_num_lexer.c
+ldnuml_out: ${SPLINT_ROOT}cweb/mkscanner.c ld_num_lexer.c
${CC} -DLEXER_FILE=\"../examples/ld/$(lastword $^)\" -o $@ $<
ld_num_tab.tex: ldnump_out
@@ -78,8 +92,17 @@ ld_num_parser.yy \
ld_num_lexer.ll: ldnump.x
@${CTANGLE} $<
-${MANUAL}.tex: ${MANUAL}.x ldgram.x ldlex.x ldlexo.x ldgramo.x ldnp.x
- ${CWEAVE} $<
+ldexample_%.hx: ldexample.hw
+ echo $(patsubst ldexample_%.hx, "@G(%)", $@) >> $(patsubst %.hx, %.hy, $@)
+ cat $< >> $(patsubst %.hx, %.hy, $@)
+ echo "@g" >> $(patsubst %.hx, %.hy, $@)
+ ${BRACK} $(patsubst %.hx, %.hy, $@) $@
+
+alphas.hx:
+ ${MISCCW} --alpha-list --alpha-length=1 $@
+
+${MANUAL}.tex: ${MANUAL}.x ldgram.x ldlex.x ldlexo.x ldgramo.x ldnp.x ldexample_l.hx ldexample_b.hx alphas.hx
+ -${CWEAVE} $<
${MANUAL}.sty: ${MANUAL}.stx
${UNLINE} $< $@
@@ -88,12 +111,16 @@ ${MANUAL}.gdx: %.gdx: ${LD_DOC_PREREQS_XREF} ${PARSER}.tok
@echo "Making the bison and TeX indices ..."
${TEX} $*.tex
-${MANUAL}.pdf: %.pdf: ${LD_DOC_PREREQS_XREF} %.gdy
- ${PDFTEX} $< && touch $*.gdy && touch $*.pdf
+%.gdy: %.gdx
+
+${MANUAL}.gdy: %.gdy: %.gdx
+ ${BINDX} --fine $^ $@
-#${PARSER}.tok \
-#ldl_states.h: ${MANUAL}.tex ${MANUAL}.sty ${LD_CORE_PREREQS}
-# ${TEX} ${MODEBOOTSTRAP} \\input $<
+${MANUAL}.xxr: %.xxr: %.tex
+ ${PDFTEX} $*.tex
+
+${MANUAL}.pdf: %.pdf: ${LD_DOC_PREREQS_XREF} %.gdy %.xxr
+ ${PDFTEX} $< && touch $*.gdy && touch $*.pdf
${PARSER}.tok: ldgram.tex ${MANUAL}.sty ${LD_CORE_PREREQS}
${TEX} ${MODEBOOTSTRAP} \\input $<
@@ -103,7 +130,7 @@ ldl_states.h: ldlex.tex ${MANUAL}.sty ${LD_CORE_PREREQS}
ldgram.tex ldlex.tex: \
%.tex: %.x
- ${CWEAVE} -x $<
+ -${CWEAVE} -x $<
docs: ${MANUAL}.pdf
@@ -115,4 +142,3 @@ clean: clean_core
distclean: clean
cd ${SPLINT_ROOT} && ${MAKE} clean
-
diff --git a/support/splint/examples/ld/ldexample.hw b/support/splint/examples/ld/ldexample.hw
new file mode 100644
index 0000000000..a5cab97d2a
--- /dev/null
+++ b/support/splint/examples/ld/ldexample.hw
@@ -0,0 +1,77 @@
+INCLUDE file.ld
+
+MEMORY
+{
+ @> @<Some random portion of \ld\ code@> @=
+ RAM (xrw) : ORIGIN = 0x20000000, LENGTH = 20K
+ FLASH (rx) : ORIGIN = 0x8000000, LENGTH = 128K
+ ASH (rx) : ORIGIN = 8001000, LENGTH = 128K
+ @> @<Some random portion of \ld\ code@> @=
+ CLASH (rx) : ORIGIN = 700000, LENGTH = 128K
+ ASH (rx) : ORIGIN = $8000000, LENGTH = 128K
+ CLASH (rx) : ORIGIN = 700000B, LENGTH = 128K
+ INCLUDE file.mem
+ @> @<Some random portion of \ld\ code@> @=
+
+}
+
+_estack = 0x20005000;
+_bstack = a > 0 ? NEXT(11) : 0x19;
+ @> @<Some random portion of \ld\ code@> @=
+PROVIDE( var1 = . );
+PROVIDE_HIDDEN( var2 = . );
+ @> @<Some random portion of \ld\ code@> @=
+HIDDEN( var3 = . );
+ENTRY(_entry);
+
+SECTIONS
+{
+ @> @<Some random portion of \ld\ code@> @=
+ .isr_vector ALIGN(8) (NOLOAD): AT(.) ALIGN(.) ALIGN_WITH_INPUT SUBALIGN(8) SPECIAL
+ {
+ . = ALIGN(4);
+ KEEP(*(.isr_vector))
+ . = ALIGN(4);
+ } > FLASH AT > RAM : FLASH : RAM : OTHER = . + 8
+ @> @<Some random portion of \ld\ code@> @=
+ .text :
+ {
+ /* skip this comment */;
+ . = ALIGN(4);
+ *(.text)
+ *(.text.*)
+ *(.rodata)
+ *(.rodata*)
+ *(.glue_7)
+ *(.glue_7t)
+ . = ALIGN(4);
+ _etext = . + 8;
+ _sidata = _etext;
+ PROVIDE( var1 = . );
+ PROVIDE_HIDDEN( var2 = . );
+ HIDDEN( var3 = . );
+ } >FLASH AT > RAM
+
+ @> @<Some random portion of \ld\ code@> @=
+ .data : AT ( _sidata )
+ {
+ . = ALIGN(4);
+ _sdata = . ;
+ *(.data)
+ *(.data.*)
+ . = ALIGN(4);
+ _edata = . ;
+ } >RAM
+
+ .bss :
+ {
+ . = ALIGN(4);
+ _sbss = .;
+ *(.bss)
+ *(COMMON)
+ . = ALIGN(4);
+ _ebss = . ;
+ } >RAM
+ @> @<Some random portion of \ld\ code@> @=
+
+}
diff --git a/support/splint/examples/ld/ldgram.w b/support/splint/examples/ld/ldgram.w
index 4fb9a9a876..5e7ff73a03 100644
--- a/support/splint/examples/ld/ldgram.w
+++ b/support/splint/examples/ld/ldgram.w
@@ -1,4 +1,4 @@
-@q Copyright 2012-2015 Alexander Shibakov@>
+@q Copyright 2012-2020 Alexander Shibakov@>
@q Copyright 2002-2014 Free Software Foundation, Inc.@>
@q This file is part of SPLinT@>
@@ -16,10 +16,10 @@
@q along with SPLinT. If not, see <http://www.gnu.org/licenses/>.@>
@** The parser.
-\ifx\parsernamespace\UNDEFINED
+\ifbootstrapmode
\def\tokendeffile{ldp.tok}%
\input ldman.sty
- \input limbo.sty
+ \modebootstrap
\input dcols.sty
\setupfootnotes
\def\MRI{}
@@ -334,20 +334,23 @@ script commands.
@G
script_file:
{@>@[TeX_( "/ldlex@@both" );@]@=}
- ifile_list {@>@[TeX_( "/getsecond{/yy(2)}/to/ldcmds/ldlex@@popstate" );@]@=}
+ ifile_list {@>@[TeX_( "/getfifth{/yy(2)}/to/ldcmds/ldlex@@popstate" );@]@=}
;
ifile_list:
ifile_list ifile_p1 {@>@<Add the next command@>@=}
- | {@>@[TeX_( "/yy0{{}{}}" );@]@=}
+ | {@>@[TeX_( "/yy0{/nx/ldinsertcweb{}{}{}{}}" );@]@=}
;
@g
@ @<Add the next command@>=
- @[TeX_( "/getfirst{/yy(1)}/to/toksa/getsecond{/yy(1)}/to/toksb" );@]@;
- @[TeX_( "/getfirst{/yy(2)}/to/toksc/getsecond{/yy(2)}/to/toksd" );@]@;
- @[TeXb( "/yytoksempty{/toksb}{/yy0{/the/yy(2)}}" );@]@;
- @[TeXao( "{/yy0{{/the/toksc}{/the/toksb/noexpand/ldcommandseparator{/the/toksa}{/the/toksc}/the/toksd}}}" );@]@;
+ @[TeX_( "/getsecond{/yy(1)}/to/toksa/getthird{/yy(1)}/to/toksb" );@]@;
+ @[TeX_( "/getfourth{/yy(1)}/to/toksc/getfifth{/yy(1)}/to/toksd" );@]@;
+ @[TeX_( "/getsecond{/yy(2)}/to/tokse/getthird{/yy(2)}/to/toksf" );@]@;
+ @[TeX_( "/getfourth{/yy(2)}/to/toksg/getfifth{/yy(2)}/to/toksh" );@]@;
+ @[TeXb( "/yytoksempty{/toksh}{/yy0{/the/yy(1)}}{/yytoksempty{/toksd}{/yy0{/the/yy(2)}}" );@]@;
+ @[TeXf( " {/yy0{/nx/ldinsertcweb{/the/toksa}{/the/toksb}{/the/toksg}{/the/toksd" );@]@;
+ @[TeXfo( " /nx/ldcommandseparator{/the/tokse}{/the/toksf}{/the/toksc}{/the/toksg}/the/toksh}}}}" );@]@;
@*1 Script internals.
There are a number of different commands. For typesetting purposes,
@@ -377,16 +380,16 @@ the rule that adds the fragment to the script file.
@<\GNU\ \ld\ script rules@>=
@G
ifile_p1:
- memory {@>@[TeX_( "/yy0{{mem}{/the/yy(1)}}" );@]@=}
- | sections {@>@[TeX_( "/yy0{{sect}{/the/yy(1)}}" );@]@=}
+ memory {@>@<Carry on@>@=}
+ | sections {@>@<Carry on@>@=}
| phdrs
| startup
| high_level_library
| low_level_library
| floating_point_support
- | statement_anywhere {@>@[TeX_( "/yy0{{stmt}{/noexpand/ldstatement{/the/yy(1)}}}" );@]@=}
+ | statement_anywhere {@>@<Carry on@>@=}
| version
- | ';' {@>@[TeX_( "/yy0{{none}{}}" );@]@=}
+ | ';' {@>@[TeX_( "/yy0{/nx/ldinsertcweb/the/yy(1){none}{}}" );@]@=}
| TARGET_K '(' NAME ')' {}
| SEARCH_DIR '(' filename ')' {}
| OUTPUT '(' filename ')' {}
@@ -401,7 +404,7 @@ ifile_p1:
'(' input_list ')' {}
| MAP '(' filename ')' {}
| INCLUDE filename {@>@<Peek at a file@>@=}
- ifile_list END {@>@<Add an \prodstyle{INCLUDE} statement@>@=}
+ ifile_list END {@>@<Close the file@>@=}
| NOCROSSREFS '('
nocrossref_list ')' {}
| EXTERN '(' extern_name_list ')'
@@ -427,30 +430,39 @@ input_list:
;
sections:
- SECTIONS '{' sec_or_group_p1 '}' {@>@[TeX_( "/yy0{/noexpand/ldsections{/the/yy(3)}}" );@]@=}
+ SECTIONS '{' sec_or_group_p1 '}' {@>@<Form the \prodstyle{SECTIONS} group@>@=}
;
sec_or_group_p1:
- sec_or_group_p1 section {@>@<Add the next section@>@=}
- | sec_or_group_p1 statement_anywhere {@>@<Add the next statement@>@=}
+ sec_or_group_p1 section {@>@<Add the next section chunk@>@=}
+ | sec_or_group_p1 statement_anywhere {@>@<Add the next section chunk@>@=}
| {@>@[TeX_( "/yy0{}" );@]@=}
;
statement_anywhere:
- ENTRY '(' NAME ')' {}
- | assignment end {@>@<Carry on@>@=}
+ ENTRY '(' NAME ')' {@>@<Form an \prodstyle{ENTRY} statement@>@=}
+ | assignment end {@>@<Form a statement@>@=}
| ASSERT_K {@>@[TeX_( "/ldlex@@expression" );@]@=}
'(' exp ',' NAME ')' {@>@[TeX_( "/ldlex@@popstate" );@]@=}
;
@g
-@ @<Add the next section@>=
- @[TeXb( "/yytoksempty{/yy(1)}{/yy0{/the/yy(2)}}" );@]@;
- @[TeXao( "{/yy0{/the/yy(1)/noexpand/ldsectionseparator/the/yy(2)}}" );@]@;
+@ @<Form the \prodstyle{SECTIONS} group@>=
+ @[TeX_( "/yy0{/nx/ldinsertcweb/the/yy(1){sect}{/nx/ldsections{/the/yy(3)/nx/ldsectionstash/the/yy(4)}}}" );@]
-@ @<Add the next statement@>=
- @[TeXb( "/yytoksempty{/yy(1)}{/yy0{/the/yy(2)}}" );@]@;
- @[TeXao( "{/yy0{/the/yy(1)/noexpand/ldsectionseparator/ldstatement{/the/yy(2)}}}" );@]@;
+@ @<Add the next section chunk@>=
+ @[TeX_( "/getsecond{/yy(2)}/to/tokse/getthird{/yy(2)}/to/toksf" );@]@;
+ @[TeX_( "/getfourth{/yy(2)}/to/toksg/getfifth{/yy(2)}/to/toksh" );@]@;
+ @[TeXb( "/yytoksempty{/yy(1)}{/yy0{/nx/ldsectionstash{/the/tokse}{/the/toksf}/the/toksh}}" );@]@;
+ @[TeXfo( " {/yy0{/the/yy(1)/nx/ldsectionseparator{/the/tokse}{/the/toksf}/the/toksh}}" );@]@;
+
+@ @<Form an \prodstyle{ENTRY} statement@>=
+ @[TeX_( "/yy0{/nx/ldinsertcweb/the/yy(1){stmt}{/nx/ldstatement{/nx/ldentry{/nx/ldregexp{/the/yy(3)}}}}}" );@]
+
+@ @<Form a statement@>=
+ @[TeXb( "/getsecond{/yy(1)}/to/toksa/getthird{/yy(1)}/to/toksb" ); @]@;
+ @[TeXf( "/getfourth{/yy(1)}/to/toksc/getfifth{/yy(1)}/to/toksd" );@]@;
+ @[TeXfo( "/yy0{/nx/ldinsertcweb{/the/toksa}{/the/toksb}{stmt}{/nx/ldstatement{/the/toksd}}}" );@]
@ This is the default action performed by the parser when the parser
writer does not supply one. For a minor gain in efficiency, this
@@ -463,28 +475,25 @@ definition can be made empty.
@[TeX_( "/ldfile@@open@@command@@file{/yy(2)}" );@]@;
@ @<Close the file@>=
- @[TeX_( "/yy0{/noexpand/ldinclude{/the/yy(2)}}/ldlex@@popstate" );@]@;
-
-@ @<Add an \prodstyle{INCLUDE} statement@>=
- @[TeX_( "/yy0{{inc}{/noexpand/ldinclude{/the/yy(2)}}}/ldlex@@popstate" );@]@;
+ @[TeX_( "/yy0{/nx/ldinsertcweb/the/yy(1){inc}{/nx/ldinclude{/the/yy(2)}}}/ldlex@@popstate" );@]@;
@ \tracebadcharstrue
-{\it The \prodstyle{'*'} and \prodstyle{'?'} cases are there because the lexer returns them as
+{\it \setrulecontext{wildcard_name}The \prodstyle{'*'} and \prodstyle{'?'} cases are there because the lexer returns them as
separate tokens rather than as \prodstyle{NAME}.}
\tracebadcharsfalse
@<Grammar rules@>=
@G
wildcard_name:
- NAME {@>@<Carry on@>@=}
- | '*' {@>@[TeX_( "/yy0{{*}{*}}" );@]@=}
- | '?' {@>@[TeX_( "/yy0{{?}{?}}" );@]@=}
+ NAME {@>@<Create a wildcard name@>@=}
+ | '*' {@>@[TeX_( "/yy0{/nx/ldinsertcweb/the/yy(1){wld}{/nx/ldregop{{*}{*}/the/yy(1)}}}" );@]@=}
+ | '?' {@>@[TeX_( "/yy0{/nx/ldinsertcweb/the/yy(1){wld}{/nx/ldregop{{?}{?}/the/yy(1)}}}" );@]@=}
;
@g
@ @<Grammar rules@>=
@G
wildcard_spec:
- wildcard_name {}
+ wildcard_name {@>@<Carry on@>@=}
| EXCLUDE_FILE '(' exclude_name_list ')' wildcard_name
{}
| SORT_BY_NAME '(' wildcard_name ')' {}
@@ -525,8 +534,8 @@ exclude_name_list:
;
file_NAME_list:
- file_NAME_list opt_comma wildcard_spec {@>@[TeX_( "/yy0{/the/yy(1)/noexpand/ldspace/noexpand/ldregexp{/the/yy(2)}}" );@]@=}
- | wildcard_spec {@>@[TeX_( "/yy0{/noexpand/ldregexp{/the/yy(1)}}" );@]@=}
+ file_NAME_list opt_comma wildcard_spec {@>@<Add a wildcard spec to a list of files@>@=}
+ | wildcard_spec {@>@<Start a file list with a wildcard spec@>@=}
;
input_section_spec_no_keep:
@@ -534,51 +543,83 @@ input_section_spec_no_keep:
| sect_flags NAME {}
| '[' file_NAME_list ']' {}
| sect_flags '[' file_NAME_list ']' {}
- | wildcard_spec '(' file_NAME_list ')' {@>@[TeX_( "/yy0{/noexpand/ldregexp{/the/yy(1)}(/the/yy(3))}" );@]@=}
+ | wildcard_spec '(' file_NAME_list ')' {@>@<Add a plain section spec@>@=}
| sect_flags wildcard_spec '(' file_NAME_list ')'
{}
;
@g
+@ @<Create a wildcard name@>=
+ @[TeXf( "/getthird{/yy(1)}/to/toksa/getfourth{/yy(1)}/to/toksb" );@]@;
+ @[TeX_( "/yy0{/nx/ldinsertcweb{/the/toksa}{/the/toksb}{wld}{/nx/ldregexp{/the/yy(1)}}}" );@]
+
+@ @<Add a wildcard spec to a list of files@>=
+ @[TeX_( "/getsecond{/yy(1)}/to/toksa/getthird{/yy(1)}/to/toksb" );@]@;
+ @[TeX_( "/getfourth{/yy(1)}/to/toksc/getfifth{/yy(1)}/to/toksd" );@]@;
+ @[TeX_( "/getsecond{/yy(2)}/to/tokse/getthird{/yy(2)}/to/toksf" );@]@;
+ @[TeX_( "/getfourth{/yy(2)}/to/toksg/getfifth{/yy(2)}/to/toksh" );@]@;
+ @[TeX_( "/yy0{/nx/ldinsertcweb{/the/toksa}{/the/toksb}{flst}{/the/toksd/nx/ldspace/the/toksh}}" );@]
+
+@ @<Start a file list with a wildcard spec@>=
+ @<Carry on@>@;
+
+@ @<Add a plain section spec@>=
+ @[TeX_( "/getsecond{/yy(1)}/to/toksa/getthird{/yy(1)}/to/toksb" );@]@;
+ @[TeX_( "/getfourth{/yy(1)}/to/toksc/getfifth{/yy(1)}/to/toksd" );@]@;
+ @[TeX_( "/getfifth{/yy(3)}/to/toksh" );@]@;
+ @[TeX_( "/yy0{/nx/ldinsertcweb{/the/toksa}{/the/toksb}{sspec}{/the/toksd(/the/toksh)}}" );@]
+
@ @<Grammar rules@>=
@G
input_section_spec:
input_section_spec_no_keep {@>@<Carry on@>@=}
| KEEP '(' {}
- input_section_spec_no_keep ')' {@>@[TeX_( "/yy0{/mathop{/hbox{/noexpand/ttl keep}(/the/yy(4))}}" );@]@=}
+ input_section_spec_no_keep ')' {@>@<Add a \prodstyle{KEEP} statement@>@=}
;
statement:
- assignment end
+ assignment end {@>@<Form a statement@>@=}
| CREATE_OBJECT_SYMBOLS {}
- | ';' {@>@[TeX_( "/yy0{}" );@]@=}
+ | ';' {@>@[TeX_( "/yy0{/nx/ldinsertcweb/the/yy(1){stmt}{}}" );@]@=}
| CONSTRUCTORS {}
| SORT_BY_NAME '(' CONSTRUCTORS ')' {}
- | input_section_spec
+ | input_section_spec {@>@<Form an input section spec@>@=}
| length '(' mustbe_exp ')' {}
| FILL '(' fill_exp ')' {}
| ASSERT_K {@>@[TeX_( "/ldlex@@expression" );@]@=}
'(' exp ',' NAME ')' end {@>@[TeX_( "/ldlex@@popstate" );@]@=}
| INCLUDE filename {@>@<Peek at a file@>@=}
- statement_list_opt END {@>@<Close the file@>@=}
+ statement_list_opt END {@>@<Close the file@>@>@=}
;
statement_list:
statement_list statement {@>@<Attach a statement to a statement list@>@=}
- | statement {@>@<Start a statement list with a statement@>@=}
+ | statement {@>@<Carry on@>@=}
;
statement_list_opt:
- {@>@[TeX_( "/yy0{}" );@]@=}
+ {@>@[TeX_( "/yy0{/nx/insertcweb{}{}{stmt}{}}" );@]@=}
| statement_list {@>@<Carry on@>@=}
;
@g
-@ @<Attach a statement to a statement list@>=
- @[TeX_( "/yy0{/the/yy(1)/yytoksempty{/yy(2)}{}{/yytoksempty{/yy(1)}{}{/noexpand/ldor}{/the/yy(2)}}}" );@]
+@ @<Form an input section spec@>=
+ @[TeXb( "/getsecond{/yy(1)}/to/toksa/getthird{/yy(1)}/to/toksb" ); @]@;
+ @[TeXf( "/getfourth{/yy(1)}/to/toksc/getfifth{/yy(1)}/to/toksd" );@]@;
+ @[TeXfo( "/yy0{/nx/ldinsertcweb{/the/toksa}{/the/toksb}{stmt}{/nx/ldsecspec{/the/toksd}}}" );@]
-@ @<Start a statement list with a statement@>=
- @[TeX_( "/yy0{/yytoksempty{/yy(1)}{}{{/the/yy(1)}}}" );@]
+@ @<Add a \prodstyle{KEEP} statement@>=
+ @[TeX_( "/getfifth{/yy(4)}/to/toksa" );@]@;
+ @[TeX_( "/yy0{/nx/ldinsertcweb/the/yy(1){stmt}{/nx/ldkeep{/the/toksa}}}" );@]
+
+@ @<Attach a statement to a statement list@>=
+ @[TeX_( "/getsecond{/yy(1)}/to/toksa/getthird{/yy(1)}/to/toksb" );@]@;
+ @[TeX_( "/getfourth{/yy(1)}/to/toksc/getfifth{/yy(1)}/to/toksd" );@]@;
+ @[TeX_( "/getsecond{/yy(2)}/to/tokse/getthird{/yy(2)}/to/toksf" );@]@;
+ @[TeX_( "/getfourth{/yy(2)}/to/toksg/getfifth{/yy(2)}/to/toksh" );@]@;
+ @[TeXb( "/yytoksempty{/toksd}{/yy0{/the/yy(2)}}" );@]@;
+ @[TeXf( " {/yy0{/nx/ldinsertcweb{/the/toksa}{/the/toksb}{/the/toksg}{/the/toksd" );@]@;
+ @[TeXfo( " /yytoksempty{/toksh}{}{/yytoksempty{/toksd}{}{/nx/ldor}/the/toksh}}}}" );@]@;
@ @<Grammar rules@>=
@G
@@ -627,22 +668,33 @@ opt_comma: ',' |
assignment:
NAME '=' mustbe_exp {@>@<Process simple assignment@>@=}
| NAME assign_op mustbe_exp {@>@<Process compound assignment@>@=}
- | HIDDEN '(' NAME '=' mustbe_exp ')' {}
- | PROVIDE '(' NAME '=' mustbe_exp ')' {}
- | PROVIDE_HIDDEN '(' NAME '=' mustbe_exp ')' {}
+ | HIDDEN '(' NAME '=' mustbe_exp ')' {@>@<Process a \prodstyle{HIDDEN} assignment@>@=}
+ | PROVIDE '(' NAME '=' mustbe_exp ')' {@>@<Process a \prodstyle{PROVIDE} assignment@>@=}
+ | PROVIDE_HIDDEN '(' NAME '=' mustbe_exp ')' {@>@<Process a \prodstyle{PROVIDE\_HIDDEN} assignment@>@=}
;
@g
@ @<Process simple assignment@>=
- @[TeX_( "/yy0{/noexpand/ldassignment{/noexpand/ldregexp{/the/yy(1)}}{/K}{/the/yy(3)}}" );@]
+ @[TeXb( "/getthird{/yy(1)}/to/toksa/getfourth{/yy(1)}/to/toksb" );@]@;
+ @[TeXfo( "/yy0{/nx/ldinsertcweb{/the/toksa}{/the/toksb}{asgnm}{/nx/ldassignment{/nx/ldregexp{/the/yy(1)}}{/K}{/the/yy(3)}}}" );@]
@ @<Process compound assignment@>=
- @[TeX_( "/yy0{/noexpand/ldassignment{/noexpand/ldregexp{/the/yy(1)}}{/the/yy(2)}{/the/yy(3)}}" );@]
+ @[TeXb( "/getthird{/yy(1)}/to/toksa/getfourth{/yy(1)}/to/toksb" );@]@;
+ @[TeXfo( "/yy0{/nx/ldinsertcweb{/the/toksa}{/the/toksb}{asgnm}{/nx/ldassignment{/nx/ldregexp{/the/yy(1)}}{/the/yy(2)}{/the/yy(3)}}}" );@]
+
+@ @<Process a \prodstyle{HIDDEN} assignment@>=
+ @[TeX_( "/yy0{/nx/ldinsertcweb/the/yy(1){hiddn}{/nx/ldhidden{/nx/ldregexp{/the/yy(3)}}{/the/yy(5)}}}" );@]
+
+@ @<Process a \prodstyle{PROVIDE} assignment@>=
+ @[TeX_( "/yy0{/nx/ldinsertcweb/the/yy(1){prvde}{/nx/ldprovide{/nx/ldregexp{/the/yy(3)}}{/the/yy(5)}}}" );@]
+
+@ @<Process a \prodstyle{PROVIDE\_HIDDEN} assignment@>=
+ @[TeX_( "/yy0{/nx/ldinsertcweb/the/yy(1){prhid}{/nx/ldprovidehid{/nx/ldregexp{/the/yy(3)}}{/the/yy(5)}}}" );@]
@ @<Grammar rules@>=
@G
memory:
- MEMORY '{' memory_spec_list_opt '}' {@>@[TeX_( "/yy0{/noexpand/ldmemory{/the/yy(3)}}" );@]@=}
+ MEMORY '{' memory_spec_list_opt '}' {@>@<Form the \prodstyle{MEMORY} group@>@=}
;
memory_spec_list_opt:
@@ -651,8 +703,8 @@ memory_spec_list_opt:
;
memory_spec_list:
- memory_spec_list opt_comma memory_spec {@>@[TeX_( "/yy0{/the/yy(1)/the/yy(3)}" );@]@=}
- | memory_spec {@>@[TeX_( "/yy0{/the/yy(1)}" );@]@=}
+ memory_spec_list opt_comma memory_spec {@>@<Add a memory spec@>@=}
+ | memory_spec {@>@<Start a list of memory specs@>@=}
;
@@ -661,12 +713,30 @@ memory_spec:
attributes_opt ':'
origin_spec
@t}\vb{\breakline}{@>
- opt_comma length_spec {@>@[TeX_( "/yy0{/noexpand/ldmemoryspec{/the/yy(1)}{/the/yy(3)}{/the/yy(5)}{/the/yy(7)}}" );@]@=}
+ opt_comma length_spec {@>@<Declare a named memory region@>@=}
| INCLUDE filename {@>@<Peek at a file@>@=}
memory_spec_list_opt END {@>@<Close the file@>@=}
;
@g
+@ @<Form the \prodstyle{MEMORY} group@>=
+ @[TeX_( "/yy0{/nx/ldinsertcweb/the/yy(1){mem}{/nx/ldmemory{/the/yy(3)/nx/ldmemspecstash/the/yy(4)}}}" );@]
+
+@ @<Start a list of memory specs@>=
+ @[TeXb( "/getsecond{/yy(1)}/to/toksa/getthird{/yy(1)}/to/toksb" ); @]@;
+ @[TeXf( "/getfifth{/yy(1)}/to/toksc" );@]@;
+ @[TeXfo( "/yy0{/nx/ldmemspecstash{/the/toksa}{/the/toksb}/the/toksc}" );@]@;
+
+@ @<Add a memory spec@>=
+ @[TeXb( "/getsecond{/yy(3)}/to/toksa/getthird{/yy(3)}/to/toksb" ); @]@;
+ @[TeXf( "/getfifth{/yy(3)}/to/toksc" );@]@;
+ @[TeXfo( "/yy0{/the/yy(1)/nx/ldmemspecseparator{/the/toksa}{/the/toksb}/the/toksc}" );@]@;
+
+@ @<Declare a named memory region@>=
+ @[TeXb( "/getthird{/yy(1)}/to/toksa/getfourth{/yy(1)}/to/toksb" );@]@;
+ @[TeXf( "/yy0{/nx/ldinsertcweb{/the/toksa}{/the/toksb}{mreg}" );@]@;
+ @[TeXfo( " {/nx/ldmemoryspec{/the/yy(1)}{/the/yy(3)}{/the/yy(5)}{/the/yy(7)}}}" );@]
+
@ @<Grammar rules@>=
@G
origin_spec:
@@ -735,7 +805,7 @@ mustbe_exp:
;
@g
-@*1 {\ifheader\ninepoint\fi\prodstyle{SECTIONS}} and expressions.
+@*1 {\ifheader\ninepoint\fi\prodstylens{SECTIONS}{\ldnamespace}} and expressions.
The linker supports an extensive range of expressions. The precedence
mechanism provided by \bison\ is used to present the composition of
expressions out of simpler chunks and basic building blocks tied
@@ -790,7 +860,7 @@ exp :
| DATA_SEGMENT_END '(' exp ')' {}
| SEGMENT_START '(' NAME ',' exp ')' {}
| BLOCK '(' exp ')' {}
- | NAME {@>@[TeX_( "/yy0{/noexpand/ldregexp{/the/yy(1)}}" );@]@=}
+ | NAME {@>@[TeX_( "/yy0{/nx/ldregexp{/the/yy(1)}}" );@]@=}
| MAX_K '(' exp ',' exp ')' {}
| MIN_K '(' exp ',' exp ')' {}
| ASSERT_K '(' exp ',' NAME ')' {}
@@ -802,8 +872,8 @@ exp :
@ @<Process a primitive conditional@>=
@q TeX_( "/yy0{/hbox{/nx/ttl let }/nx/xi(0)=/the/yy(5): /nx/xi(/nx/CM0)=/the/yy(3)/hbox{ /nx/ttl do }/xi(/the/yy(1))}" );@>
-@[TeX_( "/yy0{/hbox{/nx/ttl do }/xi(/the/yy(1))/hbox{ /nx/ttl where }" );@>
-@[TeX_( " {/let/nx/{/nx/bigbracedel/nx/xi(x)=/nx/cases{/the/yy(5)& if /inmath{x=0}/cr/the/yy(3)& if /inmath{x/nx/not=0}}}}" );@]
+@[TeXb( "/yy0{/hbox{/nx/ttl do }/xi(/the/yy(1))/hbox{ /nx/ttl where }" );@]@;
+@[TeXfo( " {/let/nx/{/nx/bigbracedel/nx/xi(x)=/nx/cases{/the/yy(5)& if /inmath{x=0}/cr/the/yy(3)& if /inmath{x/nx/not=0}}}}" );@]@;
@ @<Grammar rules@>=
@G
@@ -880,7 +950,7 @@ section:
opt_comma {@>@<Record an overlay section@>@=}
| GROUP {@>@[TeX_( "/ldlex@@expression" );@]@=}
opt_exp_with_type {@>@[TeX_( "/ldlex@@popstate" );@]@=}
- '{' sec_or_group_p1 '}'
+ '{' sec_or_group_p1 '}' {}
| INCLUDE filename {@>@<Peek at a file@>@=}
sec_or_group_p1 END
{@>@<Close the file@>@=}
@@ -888,10 +958,12 @@ section:
@g
@ @<Record a named section@>=
- @[TeXb( "/yy0{/nx/ldnamedsection{/the/yy(1)}{/the/yy(3)}{/the/yy(4)}" );@]@;
+ @[TeXb( "/getthird{/yy(1)}/to/toksa/getfourth{/yy(1)}/to/toksb" );@]@;
+ @[TeXf( "/getfifth{/yy(12)}/to/toksc" );@]@;/* \prodstylens{statement\_list\_opt}{\ldnamespace} contents */
+ @[TeXf( "/yy0{/nx/ldinsertcweb{/the/toksa}{/the/toksb}{osect}{/nx/ldnamedsection{/the/yy(1)}{/the/yy(3)}{/the/yy(4)}" );@]@;
@[TeXf( " {{/the/yy(5)}{/the/yy(6)}{/the/yy(7)}}" );@]@;/* alignment */
- @[TeXf( " {/the/yy(9)}{/the/yy(12)}" );@]@;
- @[TeXfo( " {{/the/yy(15)}{/the/yy(16)}{/the/yy(17)}{/the/yy(18)}}}" );@]@; /*memory specifiers */
+ @[TeXf( " {/the/yy(9)}{/the/toksc}" );@]@;
+ @[TeXfo( " {{/the/yy(15)}{/the/yy(16)}{/the/yy(17)}{/the/yy(18)}}}}" );@]@; /*memory specifiers */
@ @<Record an overlay section@>=
diff --git a/support/splint/examples/ld/ldgramo.w b/support/splint/examples/ld/ldgramo.w
index 3069bb71eb..46d46f9a1f 100644
--- a/support/splint/examples/ld/ldgramo.w
+++ b/support/splint/examples/ld/ldgramo.w
@@ -1,4 +1,4 @@
-@q Copyright 2015 Alexander Shibakov@>
+@q Copyright 2020 Alexander Shibakov@>
@q Copyright 2002-2015 Free Software Foundation, Inc.@>
@q This file is part of SPLinT@>
@@ -504,7 +504,7 @@ statement_anywhere:
@g
@ \tracebadcharstrue
-The \prodstyle{'*'} and \prodstyle{'?'} cases are there because the lexer returns them as
+The {\setrulecontext{wildcardname}\prodstyle{'*'}} and \prodstyle{'?'} cases are there because the lexer returns them as
separate tokens rather than as \prodstyle{NAME}.
\tracebadcharsfalse
@<Original \ld\ grammar rules@>=
diff --git a/support/splint/examples/ld/ldint.sty b/support/splint/examples/ld/ldint.sty
index 478bdef17d..83f3068dc5 100644
--- a/support/splint/examples/ld/ldint.sty
+++ b/support/splint/examples/ld/ldint.sty
@@ -1,4 +1,4 @@
-% Copyright 2012-2015, Alexander Shibakov
+% Copyright 2012-2020, Alexander Shibakov
% This file is part of SPLinT
%
% SPLinT is free software: you can redistribute it and/or modify
@@ -175,7 +175,7 @@
}
\def\ldsciinteger#1{%
- \ifcase\intprefix{#1}
+ \ifcase\intprefix{#1}%
% decimal number (no prefix)
\lddecsplitws{#1}{}%
\or % hex number (0X)
@@ -203,11 +203,23 @@
}
\def\displayinteger#1#2#3#4#5#6#7#8{%
- {\def\ldintsep{$\,$}\hbox{\tt#7#8${}_{#1}\yystringempty{#2}{}{\,\hbox{\tt#2}}$}}%
+ {\def\ldintsep{$\,$}\hbox{\rm#7#8\rlap{${}_{#1}$}$\yystringempty{#2}{}{\lddisplayintsuffix{#2}}$}}%
}
+\def\lddisplayintsuffix#1{%
+ \expandafter\ifx\csname ldspecialsuffixdisplay#1\endcsname\relax
+ \,\hbox{\tt#1}%
+ \else
+ \csname ldspecialsuffixdisplay#1\endcsname
+ \fi
+}
+
+\def\ldspecialsuffixdisplayK{{}\cdot2^{10}}
+
+\def\ldspecialsuffixdisplayK{\,\hbox{\rm Kb}}
+
\def\displayintegerws#1#2#3#4#5#6#7{%
- {\def\ldintsep{$\,$}\hbox{\tt#6#7${}_{\yystringempty{#1}{}{\csname ldradix#1\endcsname}}$}}%
+ {\def\ldintsep{$\,$}\hbox{\rm#6#7\rlap{${}_{\yystringempty{#1}{}{\csname ldradix#1\endcsname}}$}}}%
}
% typeseting examples in text
@@ -234,6 +246,7 @@
\let\acharswitch\texcharadjust
\let\onecharswitch\texcsadjust
\let\yyinputgroup\yyinputldgroup
+ \expandafter\hidecs\expandafter{\ldunion} % inhibit expansion so that fewer \noexpand are necessary
\toldparser
\ldparserinit
\yyparse#1\yyeof\yyeof\endparseinput\endparse
@@ -242,9 +255,13 @@
\else % Stage three, process the parsed table
\yybreak{%
{%
+ \restorecslist{ld-parser:restash}\ldunion % extract the stash and mark lhs of assignments
+ \setprodtable % use \bison's parser typesetting definitions
+ \the\ldcmds
\restorecslist{ld-display}\ldunion
+ \setprodtable % use \bison's parser typesetting definitions
+ \restorecs{ld-display}{\anint\bint\hexint} % ... except for integer typesetting
\the\ldcmds
-% \par
\the\lddisplay
}%
}%
diff --git a/support/splint/examples/ld/ldlex.w b/support/splint/examples/ld/ldlex.w
index 9d1c256e62..2312fd1027 100644
--- a/support/splint/examples/ld/ldlex.w
+++ b/support/splint/examples/ld/ldlex.w
@@ -1,4 +1,4 @@
-@q Copyright 2012-2015 Alexander Shibakov@>
+@q Copyright 2012-2020 Alexander Shibakov@>
@q Copyright 2002-2014 Free Software Foundation, Inc.@>
@q This file is part of SPLinT@>
@@ -16,9 +16,9 @@
@q along with SPLinT. If not, see <http://www.gnu.org/licenses/>.@>
@** The lexer.
-\ifx\parsernamespace\UNDEFINED
+\ifbootstrapmode
\input ldman.sty
- \input limbo.sty
+ \modebootstrap
\input dcols.sty
\setupfootnotes
\def\MRI{}
@@ -65,13 +65,13 @@ void define_all_states( void ) {
#include "ldl_states.h"
#undef _register_name
-@ \yyskipparsetrue The character classes used by the scanner as well as
+@ The character classes used by the scanner as well as
lexer state declarations have been put in the definitions section of
the input file. No attempt has been made to clean up the definitions
of the character classes.
@<\ld\ lexer definitions@>=
@<\ld\ lexer states@>@;
-@G
+@G(fs1)
CMDFILENAMECHAR [_a-zA-Z0-9\/\.\\_\+\$\:\[\]\\\,\=\&\!\<\>\-\~]
CMDFILENAMECHAR1 [_a-zA-Z0-9\/\.\\_\+\$\:\[\]\\\,\=\&\!\<\>\~]
FILENAMECHAR1 [_a-zA-Z\/\.\\\$\_\~]
@@ -93,7 +93,6 @@ very rarely used explicitly. Keeping all the state declarations in the
same section simplifies the job of the
\locallink{bootstrapstates}bootstrap parser\endlink, as well.
\ifbootstrapmode\immediate\openout\stlist=ldl_states.h\fi
-\yyskipparsefalse
@<\ld\ lexer states@>=
@G
%s SCRIPT
@@ -109,8 +108,8 @@ same section simplifies the job of the
@*1 Macros for lexer functions.
The \locallink{pingpong}state switching\endlink\ `ping-pong' between the lexer and the parser aside,
the \ld\ lexer is very traditional. One implementation choice
-deserving some attenion is the treatment of comments by the lexer. The
-difficulty of implementing \Cee\ style comment lexing using regular
+deserving some attention is the treatment of comments. The
+difficulty of implementing \Cee\ style comment scanning using regular
expressions is well-known so an often used alternative is a
special function that simply skips to the end of the comment. This is
exactly what the \ld\ lexer does with an aptly named |comment()|
@@ -119,7 +118,7 @@ the same purpose. For the curious, here is a \flex\ style regular
expression defining \Cee\ comments\footnote{Taken from W.~McKeeman's site
at
\url{http://www.cs.dartmouth.edu/~mckeeman/cs118/assignments/comment.html}
-and adopted to \flex\ syntax.}:
+and adapted to \flex\ syntax.}:
$$
\hbox{\.{"/*" ("/"\yl[\^*/]\yl"*"+[\^*/])* "*"+ "/"}}
$$
@@ -136,7 +135,7 @@ setup of the lexer and enters a `|while| loop' in the input
routine. This macro is a reasonable approximation of the functionality
provided by |comment()|.
@<Additional macros for the \ld\ lexer/parser@>=
-@G
+@G(t)
\def\ldcomment{%
\let\oldyyreturn\yyreturn
\let\oldyylextail\yylextail
@@ -147,7 +146,7 @@ provided by |comment()|.
@ The rest of the |while| loop merely waits for the \.{*/} combination.
@<Additional macros for the \ld\ lexer/parser@>=
-@G
+@G(t)
\def\ldcommentskipchars{%
\ifnum\yycp@@=`*
\yybreak{\let\yyreturn\ldcommentseekslash\yyinput}%
@@ -174,7 +173,7 @@ provided by |comment()|.
@ Once the end of the comment has been found, resume lexing the input
stream.
@<Additional macros for the \ld\ lexer/parser@>=
-@G
+@G(t)
\def\ldcommentfinish{%
\let\yyreturn\oldyyreturn
\let\yylextail\oldyylextail
@@ -195,7 +194,7 @@ an `optimized' lexer the restriction is even weaker, namely,
\.{\\yylextail} merely has to be absent in the portion of the action
following \.{\\ldcomment}).
@<Additional macros for the \ld\ lexer/parser@>=
-@G
+@G(t)
\def\ldcomment#1\yylextail{%
\let\oldyyreturn\yyreturn
\def\yylexcontinuation{#1\yylextail}%
@@ -224,7 +223,7 @@ appropriate end of file marker for the lexer (a double
has to be cleaned up from the input stream (the lexer is designed to
leave it intact). The macros below are designed to handle this assignment.
@<Additional macros for the \ld\ lexer/parser@>=
-@G
+@G(t)
\def\ldcleanyyeof#1\yylextail{%
\let\oldyyinput\yyinput
\def\yyinput\yyeof\yyeof{\let\yyinput\oldyyinput#1\yylextail}%
@@ -240,11 +239,11 @@ sections that follow.
Variable names and algebraic operations come first.
@<\ld\ token regular expressions@>=
-@G
+@G(fs2)
<BOTH,SCRIPT,EXPRESSION,VERS_START,VERS_NODE,VERS_SCRIPT>"/*" {@> @[TeX_( "/ldcomment/yylexnext" );@]@=}
<DEFSYMEXP>"-" {@> @[TeX_( "/yylexreturnchar" );@]@=}
<DEFSYMEXP>"+" {@> @[TeX_( "/yylexreturnchar" );@]@=}
-<DEFSYMEXP>{FILENAMECHAR1}{SYMBOLCHARN}* {@> @[TeX_( "/yylexreturnval{NAME}" );@]@=}
+<DEFSYMEXP>{FILENAMECHAR1}{SYMBOLCHARN}* {@> @[TeX_( "/yylexreturnsym{NAME}" );@]@=}
<DEFSYMEXP>"=" {@> @[TeX_( "/yylexreturnchar" );@]@=}
<MRI,EXPRESSION>"$"([0-9A-Fa-f])+ {@> @<Return an absolute hex constant@> @=}
<MRI,EXPRESSION>([0-9A-Fa-f])+(H|h|X|x|B|b|O|o|D|d) {@> @<Return a constant in a specific radix@>@=}
@@ -294,7 +293,7 @@ Variable names and algebraic operations come first.
inside script files. File name syntax is listed as well, along with
miscellanea such as whitespace and version symbols.
@<\ld\ token regular expressions@>=
-@G
+@G(fs2)
<BOTH,SCRIPT>"MEMORY" {@> @[TeX_( "/yylexreturnptr{MEMORY}" );@]@=}
<BOTH,SCRIPT>"REGION_ALIAS" {@> @[TeX_( "/yylexreturnptr{REGION_ALIAS}" );@]@=}
<BOTH,SCRIPT>"LD_FEATURE" {@> @[TeX_( "/yylexreturnptr{LD_FEATURE}" );@]@=}
@@ -420,11 +419,11 @@ miscellanea such as whitespace and version symbols.
<MRI>"list".* {@> @[TeX_( "/yylexreturnptr{LIST}" );@]@=}
<MRI>"sect" {@> @[TeX_( "/yylexreturnptr{SECT}" );@]@=}
<EXPRESSION,BOTH,SCRIPT,MRI>"absolute" {@> @[TeX_( "/yylexreturnptr{ABSOLUTE}" );@]@=}
-<MRI>{FILENAMECHAR1}{NOCFILENAMECHAR}* {@> @[TeX_( "/yylexreturnval{NAME}" );@]@=}
-<BOTH>{FILENAMECHAR1}{FILENAMECHAR}* {@> @[TeX_( "/yylexreturnval{NAME}" );@]@=}
-<BOTH>"-l"{FILENAMECHAR}+ {@> @[TeX_( "/yylexreturnval{NAME}" );@]@=}
-<EXPRESSION>{FILENAMECHAR1}{NOCFILENAMECHAR}* {@> @[TeX_( "/yylexreturnval{NAME}" );@]@=}
-<EXPRESSION>"-l"{NOCFILENAMECHAR}+ {@> @[TeX_( "/yylexreturnval{NAME}" );@]@=}
+<MRI>{FILENAMECHAR1}{NOCFILENAMECHAR}* {@> @[TeX_( "/yylexreturnsym{NAME}" );@]@=}
+<BOTH>{FILENAMECHAR1}{FILENAMECHAR}* {@> @[TeX_( "/yylexreturnsym{NAME}" );@]@=}
+<BOTH>"-l"{FILENAMECHAR}+ {@> @[TeX_( "/yylexreturnsym{NAME}" );@]@=}
+<EXPRESSION>{FILENAMECHAR1}{NOCFILENAMECHAR}* {@> @[TeX_( "/yylexreturnsym{NAME}" );@]@=}
+<EXPRESSION>"-l"{NOCFILENAMECHAR}+ {@> @[TeX_( "/yylexreturnsym{NAME}" );@]@=}
<SCRIPT>{WILDCHAR}* {@> @[@<Skip a possible comment and return a \prodstyle{NAME}@>@]@=}
<EXPRESSION,BOTH,SCRIPT,VERS_NODE>"\""[^\"]*"\"" {@> @[@<Return the \prodstyle{NAME} inside quotes@>@]@=}
<BOTH,SCRIPT,EXPRESSION>"\n" {@> @[TeX_( "/yylexnext" );@]@=}
@@ -457,7 +456,7 @@ macros are looking for a \.{\$} suffix while the contents of
@[TeX_( "/yylexreturn{INT}" );@]@;
@ @<Additional macros for the \ld\ lexer/parser@>=
-@G
+@G(t)
@=\def\matchcomment@@#1/*#2\yyeof#3#4{%@>@;
\yystringempty{#1}{#3}{#4}%
}
@@ -480,28 +479,28 @@ try again.}
@<Skip a possible comment and return a \prodstyle{NAME}@>=
@[TeX_( "/matchcomment/yytextpure" );@]@;
@[TeX_( " {/yyless/tw@@/ldcomment}" );@]/*matched the beginning of a comment*/@;
- @[TeX_( " {/yylexreturnval{NAME}}" );@]@;
+ @[TeX_( " {/yylexreturnsym{NAME}}" );@]@;
@ {\it No matter the state, quotes give what's inside.}
@<Return the \prodstyle{NAME} inside quotes@>=
- @[TeX_( "/ldstripquotes/yylexreturnval{NAME}" );@]@;
+ @[TeX_( "/ldstripquotes/yylexreturnsym{NAME}" );@]@;
@ @<Additional macros for the \ld\ lexer/parser@>=
-@G
+@G(t)
\newcount\versnodenesting
\newcount\includestackptr
@g
@ Some syntax specific to version scripts.
@<\ld\ token regular expressions@>=
-@G
+@G(fs2)
<VERS_SCRIPT>"{" {@> @[TeX_( "/yyBEGIN{VERS_NODE}/versnodenesting=/z@@/yylexreturnchar" );@]@=}
<VERS_SCRIPT>"}" {@> @[TeX_( "/yylexreturnchar" );@]@=}
<VERS_NODE>"{" {@> @[TeX_( "/advance/versnodenesting/@@ne /yylexreturnchar" );@]@=}
-<VERS_NODE>"}" {@> @[TeX_( "/advance/versnodenesting/m@@ne" );@]@;
- @> @[TeX_( "/ifnum/versnodenesting</z@@" );@]@;
- @> @[TeX_( " /yyBEGIN{VERS_SCRIPT}" );@]@;
- @> @[TeX_( "/fi" );@]@;
+<VERS_NODE>"}" {@> @[TeX_( "/advance/versnodenesting/m@@ne" );@]@=
+ @> @[TeX_( "/ifnum/versnodenesting</z@@" );@]@=
+ @> @[TeX_( " /yyBEGIN{VERS_SCRIPT}" );@]@=
+ @> @[TeX_( "/fi" );@]@=
@> @[TeX_( "/yylexreturnchar" );@]@=}
<VERS_START,VERS_NODE,VERS_SCRIPT>[\n] {@> @[TeX_( "/yylexnext" );@]@=}
<VERS_START,VERS_NODE,VERS_SCRIPT>#.* {@> @[TeX_( "/yylexnext" );@]@=}
@@ -509,12 +508,8 @@ try again.}
<<EOF>> {@> @[@<Process the end of (possibly included) file@>@]@=}
-<SCRIPT,MRI,VERS_START,VERS_SCRIPT,VERS_NODE>. {@> @[TeX_( "/yycomplain{bad character `/the/yytext'" );@]
- @> @[TeX_( " in script}" );@]
- @> @[TeX_( "/yyerrterminate" );@]@=}
-<EXPRESSION,DEFSYMEXP,BOTH>. {@> @[TeX_( "/yycomplain{bad character `/the/yytext'" );@]
- @> @[TeX_( " in expression}" );@]@=
- @> @[TeX_( "/yyerrterminate" );@]@=}
+<SCRIPT,MRI,VERS_START,VERS_SCRIPT,VERS_NODE>. {@> @[TeX_( "/yyfatal{bad character `/the/yytext' in script}" );@]@=}
+<EXPRESSION,DEFSYMEXP,BOTH>. {@> @[TeX_( "/yyfatal{bad character `/the/yytext' in expression}" );@]@=}
@g
@ @<Process the end of (possibly included) file@>=
@@ -529,7 +524,7 @@ try again.}
\namedspot{stateswitchers}Here are the long promised auxiliary
macros for switching lexer states and handling file input.
@<Additional macros for the \ld\ lexer/parser@>=
-@G
+@G(t)
\def\ldlex@@script{\yypushstate{SCRIPT}}
\def\ldlex@@mri@@script{\yypushstate{MRI}}
\def\ldlex@@version@@script{\yypushstate{VERS_START}}
@@ -541,7 +536,7 @@ macros for switching lexer states and handling file input.
\def\ldfile@@open@@command@@file#1{%
\advance\includestackptr\@@ne
- \appendl\yytextseen{\noexpand\yyeof\noexpand\yyeof}%
+ \appendlnx\yytext@@seen{\yyeof\yyeof}%
\yytextbackuptrue
}
diff --git a/support/splint/examples/ld/ldlexo.w b/support/splint/examples/ld/ldlexo.w
index 6b62d2fd77..a1c5d62bb0 100644
--- a/support/splint/examples/ld/ldlexo.w
+++ b/support/splint/examples/ld/ldlexo.w
@@ -1,4 +1,4 @@
-@q Copyright 2015 Alexander Shibakov@>
+@q Copyright 2015-2020 Alexander Shibakov@>
@q Copyright 2002-2015 Free Software Foundation, Inc.@>
@q This file is part of SPLinT@>
@@ -89,14 +89,14 @@ int yywrap (void) { return 1; }/* Some versions of \flex\ want this. */
#endif
@ @<Ignored options@>=
-@G
+@G(fs1)
%a 4000
%o 5000
@g
@ {\it Some convenient abbreviations for regular expressions.}%
-\ifbootstrapmode\else\yyskipparsetrue\fi@<Original \ld\ macros@>=
-@G
+@<Original \ld\ macros@>=
+@G(fs1)
CMDFILENAMECHAR [_a-zA-Z0-9\/\.\\_\+\$\:\[\]\\\,\=\&\!\<\>\-\~]
CMDFILENAMECHAR1 [_a-zA-Z0-9\/\.\\_\+\$\:\[\]\\\,\=\&\!\<\>\~]
FILENAMECHAR1 [_a-zA-Z\/\.\\\$\_\~]
@@ -163,8 +163,8 @@ States:
}
@ @<Original \ld\ regular expressions@>=
+@G(fs2)
@=<BOTH,SCRIPT,EXPRESSION,VERS_START,VERS_NODE,VERS_SCRIPT>"/*" {@> comment ();@+@=}@>@;
-@G
<DEFSYMEXP>"-" {@>@[RTOKEN('-');@]@=}
<DEFSYMEXP>"+" {@>@[RTOKEN('+');@]@=}
<DEFSYMEXP>{FILENAMECHAR1}{SYMBOLCHARN}* {@>@[yylval.name = xstrdup (yytext);@+return NAME;@]@=}
@@ -177,8 +177,8 @@ States:
return INT;
@o
}
-
-<MRI,EXPRESSION>([0-9A-Fa-f])+(H|h|X|x|B|b|O|o|D|d) {
+@t}\vb{\insertraw{\insrulealign{\rulealigntemplate}{\cr\egroup\egroup}}}{@>
+<MRI,EXPRESSION>@>@t}\vb{\insertraw{\insparensalign{&}{}}}{@>@=([0-9A-Fa-f])+@>@t}\vb{\insertraw{\insparensalign{\rlap{$\odot$}\cr&}{}}}{@>@=(H|h|X|x|B|b|O|o|D|d) {
@O
int ibase ;
switch (yytext[yyleng - 1]) {
@@ -205,8 +205,8 @@ States:
return INT;
@o
}
-
-<SCRIPT,DEFSYMEXP,MRI,BOTH,EXPRESSION>((("$"|0[xX])([0-9A-Fa-f])+)|(([0-9])+))(M|K|m|k)? {
+@t}\vb{\insertraw{\insrulealign{\rulealigntemplate}{\cr\egroup\egroup}}}{@>
+<SCRIPT,DEFSYMEXP,MRI,BOTH,EXPRESSION>((("$"|0[xX])([0-9A-Fa-f])+)|(([0-9])+@>@t}\vb{\insertraw{\insparensalign{\rlap{$\odot$}\cr&}{}}}{@>@=)@>@t}\vb{\insertraw{\insparensalign{&}{}}}{@>@=)(M|K|m|k)? {
@O
char *s = yytext;
int ibase = 0;
@@ -364,8 +364,8 @@ States:
<EXPRESSION,BOTH,SCRIPT>"CONSTANT" {@>@[RTOKEN(CONSTANT);@]@=}
<MRI>"#".*\n? {@>@[++ lineno; @]@=}
<MRI>"\n" {@> @[++lineno;@+RTOKEN(NEWLINE);@]@=}
-<MRI>"*".* {}/* \MRI\ comment line */
-<MRI>";".* {}/* \MRI\ comment line */
+<MRI>"*".* {@> /* \MRI\ comment line */ @=}
+<MRI>";".* {@> /* \MRI\ comment line */ @=}
<MRI>"END" {@>@[RTOKEN(ENDWORD); @]@=}
<MRI>"ALIGNMOD" {@>@[RTOKEN(ALIGNMOD);@]@=}
<MRI>"ALIGN" {@>@[RTOKEN(ALIGN_K);@]@=}
@@ -380,7 +380,7 @@ States:
<MRI>"FORMAT" {@>@[RTOKEN(FORMAT); @]@=}
<MRI>"CASE" {@>@[RTOKEN(CASE); @]@=}
<MRI>"START" {@>@[RTOKEN(START); @]@=}
-<MRI>"LIST".* {@>@[RTOKEN(LIST); @]@=}/* \prodstyle{LIST} and ignore to end of line */
+<MRI>"LIST".* {@>@[RTOKEN(LIST);@]/* \prodstyle{LIST} and ignore to end of line */@=}
<MRI>"SECT" {@>@[RTOKEN(SECT); @]@=}
<EXPRESSION,BOTH,SCRIPT,MRI>"ABSOLUTE" {@>@[RTOKEN(ABSOLUTE); @]@=}
<MRI>"end" {@>@[RTOKEN(ENDWORD); @]@=}
@@ -398,7 +398,7 @@ States:
<MRI>"case" {@>@[RTOKEN(CASE); @]@=}
<MRI>"extern" {@>@[RTOKEN(EXTERN); @]@=}
<MRI>"start" {@>@[RTOKEN(START); @]@=}
-<MRI>"list".* {@>@[RTOKEN(LIST); @]@=}/* \prodstyle{LIST} and ignore to end of line */
+<MRI>"list".* {@>@[RTOKEN(LIST);@]/* \prodstyle{LIST} and ignore to end of line */@=}
<MRI>"sect" {@>@[RTOKEN(SECT); @]@=}
<EXPRESSION,BOTH,SCRIPT,MRI>"absolute" {@>@[RTOKEN(ABSOLUTE); @]@=}
@@ -435,10 +435,10 @@ States:
}
<SCRIPT>{WILDCHAR}* {
@O
- /* Annoyingly, this pattern can match comments, and we have
- longest match issues to consider. So if the first two
- characters are a comment opening, put the input back and
- try again. */
+ @t}\C{Annoyingly, this pattern can match comments,}\6{@>
+ @t}\C{and we have longest match issues to consider.}\6{@>
+ @t}\C{So if the first two characters are a comment}\6{@>
+ @t}\C{opening, put the input back and try again.}\6{@>
if (yytext[0] == '/' && yytext[1] == '*')
{
yyless (2);
@@ -506,10 +506,10 @@ States:
}
<VERS_START,VERS_NODE,VERS_SCRIPT>[\n] {@>@[lineno++;@]@=}
-<VERS_START,VERS_NODE,VERS_SCRIPT>#.* {}/* Eat up comments */
-<VERS_START,VERS_NODE,VERS_SCRIPT>[ \t\r]+ {}/* Eat up whitespace */
+<VERS_START,VERS_NODE,VERS_SCRIPT>#.* {@>@[;@]/* Eat up comments */@=}
+<VERS_START,VERS_NODE,VERS_SCRIPT>[ \t\r]+ {@>@[;@]/* Eat up whitespace */@=}
-<<EOF>> {
+<<EOF>> {@>@=
@O
include_stack_ptr--;
if (include_stack_ptr == 0)
diff --git a/support/splint/examples/ld/ldman.w b/support/splint/examples/ld/ldman.w
index c9551d1b3e..b6eb733dbd 100644
--- a/support/splint/examples/ld/ldman.w
+++ b/support/splint/examples/ld/ldman.w
@@ -1,4 +1,4 @@
-% Copyright 2012-2015, Alexander Shibakov
+% Copyright 2012-2020, Alexander Shibakov
% Copyright 2002-2014 Free Software Foundation, Inc.
% This file is part of SPLinT
%
@@ -15,18 +15,22 @@
% You should have received a copy of the GNU General Public License
% along with SPLinT. If not, see <http://www.gnu.org/licenses/>.
@s TeX_ TeX
-\input limbo.sty
-\input ldfrontmatter.sty
\def\MRI{{\sc MRI}}
\def\optimization{5}
\input ldman.sty
+\modenormal
+\input ldfrontmatter.sty
+\input noweb.sty
+ \xreflocaltrue
+ \readlxrefs % read the reference file if exists before any sections appear
+ \newwrite\xrefstream % references for noweb like style of sectioning.
+ \immediate\openout\xrefstream=\jobname.xxr
% multi-column output
\input dcols.sty
\let\oldN\N
-\let\N\textN
+\let\N\chapterN
\let\M\textM
-\def\ld{\.{ld}}
\showlastactiontrue
\immediate\openout\gindex=\jobname.gdx
@@ -48,28 +52,34 @@ supporting \TeX\ macros that make both the parser and this
documentation possible. The \TeX\ macros presented here are collected
in \.{ldman.sty} which is later included in the \TeX\ file produced by \CWEAVE.
@<Set up the generic parser machinery@>=
-@G
+@G(t)
\ifx\optimization\UNDEFINED %/* this trick is based on the premise that \.{\\UNDEFINED} */
\def\optimization{0} %/* is never defined nor created with \.{\\csname$\ldots$\\endcsname} */
\fi
\let\nx\noexpand %/* convenient */
+\input limbo.sty %/* general setup macros */
\input yycommon.sty %/* general routines for stack and array access */
-\input yymisc.sty %/* helper macros (stack manipulation, table processing, value stack pointers) */
+\input yymisc.sty %/* helper macros (stack manipulation, table processing, value stack pointers */
+ %/* parser initialization, optimization) */
\input yyinput.sty %/* input functions */
\input yyparse.sty %/* parser machinery */
\input flex.sty %/* lexer functions */
-\input yyboth.sty %/* parser initialization, optimization */
\ifnum\optimization>\tw@
\input yyfaststack.sty
+\else
+ \let\stashnext\stashnextwithnothing
\fi
\input yystype.sty %/* scanner auxiliary types and functions */
\input yyunion.sty %/* parser data structures */
\input yxunion.sty %/* extended parser data structures */
-@=\input ldunion.sty %@>/* \ld\ parser data structures */@+
+\expandafter\def %/* adjust the \.{\\yyinput} to recognize \.{\\yyendgame} */
+ \expandafter\multicharswitch\expandafter
+ {\multicharswitch\yyendgame{\yyinput\yyeof\yyeof\endparseinput\removefinalvb}}
+\input ldunion.sty %@>/* \ld\ parser data structures */@+
@g
@*1 Bootstrapping.
@@ -141,7 +151,7 @@ a robust token output function which simply ignores the token values
the lexer is not aware of (it should not be necessary in our case
since we are using full featured lexer and parser).
@<Define the bootstrapping mode@>=
-@G
+@G(t)
\newwrite\tokendefs %/* token list */
\newwrite\stlist %/* \flex\ state list */
\newwrite\gindex %/* index entries */
@@ -155,12 +165,13 @@ since we are using full featured lexer and parser).
%\let\yylexreturn\yylexreturnregular /* should also work */
}%
\input yybootstrap.sty%
+ \input yytexlex.sty%
}
@g
@*1 Namespaces and modes.
Every parser/lexer pair (as well as some other macros) operates
-within a dedicated {\it namespace\/}. This simply means that the macros
+within a set of dedicated {\it namespaces\/}. This simply means that the macros
that output token values, switch lexer states and access various
tables `tack on' the string of characters representing the current
namespace to the `low level' control sequence name that performs the
@@ -171,23 +182,66 @@ generic name in the case of an `unoptimized' parser or lexer. The
optimized parser or lexer handles the namespace referencing internally.
The mode setup macros for this manual define several separate
-namespaces. The \.{[main]} namespace is established for the parser
-that does the typesetting of the grammar. Every time a term name is
+namespaces:
+{%
+\def\aterm#1{\item{\sqebullet}{\ttl #1}: \ignorespaces}%
+\setbox0=\hbox{\sqebullet\enspace}
+\parindent=0pt
+\advance\parindent by \wd0
+\smallskip
+\aterm{main}the \.{[main]} namespace is established for the parser
+that does the typesetting of the grammar.
+
+\aterm{ld}every time a term name is
processed, the token names are looked up in the \.{[ld]}
namespace. The same namespace is used by the parser that typesets \ld\
script examples in the manual (i.e.~the parser described here). This
is done to provide visual consistency between the description of the
-parser and its output. The \.{[small]} namespace is used by the term
+parser and its output.
+
+\aterm{small{ \rm and} ldsmall}the \.{[small]}namespace is used by the term
name parser itself. Since we use a customized version of the name parser, we
dedicate a separate namespace for this purpose, \.{[ldsmall]}.
-The parser based on a subset of the full \bison\
+
+\aterm{prologue}the parser based on a subset of the full \bison\
grammar describing prologue declarations uses the \.{[prologue]}
-namespace. The \.{[index]} namespace is used for typesetting the
+namespace.
+
+\aterm{index}the \.{[index]} namespace is used for typesetting the
index entries and is not necessarily associated with any parser or
-lexer.
+lexer. Somewhat confusingly, the macros that typeset \TeX\ entries, use
+\.{index} (without the brackets) as a pseudonamespace to display \TeX\
+terms in the index (due to the design of these typesetting macros, many
+of them take parameters, which can lead to chaos in the index). These
+two namespaces are not related but due to `hystorical' reasons (and the
+poorly thought out \TeX\ typesetting macro design) the \.{index} name has
+been retained. In addition, \.{index:visual} is used to adjust the sort order
+of \TeX\ terms (similar to the way \.{\\prettywordpairwvis} macro does).
+
+
+\aterm{flexre{\rm, }flexone{, \rm and} flextwo}the parsers for
+\flex\ input use the \.{[flexre]}, \.{[flexone]}, and~\.{[flextwo]} namespaces for
+their operation. Another convention is to use the \.{\\flexpseudonamespace}
+to typeset \flex\ state names, and the \.{\\flexpseudorenamespace} for typesetting
+the names of \flex\ regular
+expressions. Currently, \.{\\flexpseudo...} namespaces are set equal to
+their non-\.{pseudo} versions by default. This setting may be changed
+whenever several parsers are used in the same document and tokens with
+the same names must be typeset in different styles.
+All \flex\ namespaces, as well as~\.{[main]}, \.{[small]},
+and~\.{[ldsmall]} are defined by the \.{\\genericparser}
+macros.
+
+\aterm{cwebclink}finally, the \.{[cwebclink]} namespace is used for
+typesetting the variables {\it inside\/} \ld\ scripts. This way, the
+symbols exported by the linker may be typeset in a style similar to
+\Cee\ variables, if desired (as they play very similar roles).
+
+}
@<Begin namespace setup@>=
-@G
+@G(t)
\def\indexpseudonamespace{[index]}
+\def\cwebclinknamespace{[cwebclink]}
\let\parsernamespace\empty
@g
@@ -201,15 +255,18 @@ initializing the \bison\ parser (accomplished by inputting
\.{yyinit.sty}), followed by handling the token typesetting for the
\ld\ grammar.
@<Define the normal mode@>=
-@G
+@G(t)
\newtoks\ldcmds
\def\modenormal{%
- \def\drvname{bo}%
\def\appendr##1##2{\edef\appnext{##1{\the##1##2}}\appnext}%
\def\appendl##1##2{\edef\appnext{##1{##2\the##1}}\appnext}%
\input yyinit.sty%
+ \input yytexlex.sty% /* \TeX\ typesetting macros */
+ \input ldtexlex.sty% /* \TeX\ typesetting specific to \ld */
\let\hostparsernamespace\ldnamespace /* the namespace where tokens are looked up for typesetting purposes */
+ @>@[@<Initialize \ld\ parsers@>@]
+ @>@[@<Modified name parser for \ld\ grammar@>@]
}
@g
@@ -220,7 +277,7 @@ the ability of the parser to switch lexer states. Thus, the parser can
switch the lexer state before the lexer is invoked for the first time
wreaking havoc on the lexer state stack.
@<Define the normal mode@>=
-@G
+@G(t)
\def\ldparserinit{%
\basicparserinit
\includestackptr=\@@ne
@@ -233,38 +290,45 @@ wreaking havoc on the lexer state stack.
@ This is the \ld\ parser invocation routine. It is coded according to
a straightforward sequence initialize-invoke-execute-or-fall back.
@<Define the normal mode@>=
-@G
-\expandafter\def\csname parserstack[l]\endcsname#1#2{%
- \toldparser\ldparserinit\yyparse#1\yyeof\yyeof\endparseinput\endparse
- \ifyyparsefail % /* revert to generic macros if parsing failed */
- \yybreak{\message{parsing failed ...}#2}%
- \else % /* stage three, process the parsed table */
- \yybreak{%
- \message{commands: \the\ldcmds}%
- {%
- \restorecslist{ld-display}\ldunion
- \the\ldcmds
- \par
- \vskip-\baselineskip
- \the\lddisplay
- }%
- }%
- \yycontinue
+@G(t)
+\def\preparseld{%
+ \let\postparse\postparseld
+ \expandafter\hidecs\expandafter{\ldunion}% /* inhibit expansion so that fewer \.{\\noexpand}s are necessary */
+ \toldparser
+ \ldparserinit
+ \yyparse
}
-@g
-
-@ @<Initialize the active mode@>=
-@G
-\ifx\modeactive\UNDEFINED
- \def\modeactive{\modenormal}
-\fi
-\modeactive
+\def\postparseld{%
+ \ifsaveparseoutput
+ {\newlinechar=`^^J\immediate\write\exampletable{^^J\harmlesscomment
+ parsed table: \the\ldcmds^^J^^J\harmlesscomment
+ stashed stream:^^J\the\yystash^^J^^J\harmlesscomment
+ format stream: ^^J\the\yyformat}%
+ }%
+ \fi
+ \ifchecktable
+ \errmessage{parsed table: \the\ldcmds^^J^^J%
+ stashed stream: \the\yystash^^J^^J%
+ format stream: \the\yyformat}%
+ \fi
+ \restorecslist{ld-parser:restash}\ldunion % /* mark variables, preprocess stash */
+ \setprodtable
+ \the\ldcmds
+ \restorecslist{ld-display}\ldunion
+ \setprodtable /* use the \bison's parser typesetting definitions */
+ \restorecs{ld-display}{\anint\bint\hexint} % /* $\ldots$ except for integer typesetting */
+ \the\ldcmds
+ \par
+ \vskip-\baselineskip
+ \the\lddisplay
+}
-\ifbootstrapmode\else
- @>@[@<Initialize \ld\ parsers@>@]@;
- @>@[@<Modified name parser for \ld\ grammar@>@]@;
-\fi
+\fillpstack{l}{%
+ \preparseld
+ {\preparsefallback{++}}% /* skip this section if parsing failed, put \.{++} on the screen */
+ \relax % /* this \.{\\relax} serves as a `guard' for the braces */
+}
@g
@ Unless they are being bootstrapped, the \ld\ parser and its
@@ -280,7 +344,7 @@ In the original \bison-\flex\ interface, token names are
defined as straightforward macros (a poor choice as will be seen
shortly) which can sometimes clash with the standard \Cee\ macros.
This is why \ld\ lexer returns \prodstyle{ASSERT} as
-\prodstyle{ASSERT_K}. The name parser treats \.{K} as a suffix to
+\prodstyle{ASSERT\_K}. The name parser treats \.{K} as a suffix to
supply a visual reminder of this flaw. Note that the `suffixless' part
of these tokens (such as \prodstyle{ASSERT}) is never declared and
thus has to be entered in \.{ltokenset.sty} by hand.
@@ -291,7 +355,7 @@ fixed appearance (for example, \prodstyle{NAME}) are typeset in a
style that indicates their origin. The details can be found by
examining \.{ltokenset.sty}.
@<Initialize \ld\ parsers@>=
-@G
+@G(t)
\genericparser
name: ld,
ptables: ptab.tex,
@@ -300,7 +364,7 @@ examining \.{ltokenset.sty}.
asetup: {},
dsetup: {},
rsetup: {},
- optimization: {};%
+ optimization: {};% /* the parser and lexer are optimized when output */
\genericprettytokens
namespace: ld,
tokens: ldp.tok,
@@ -316,7 +380,6 @@ of this documentation.
@<Define the bootstrapping mode@>@;
@<Define the normal mode@>@;
@<Additional macros for the \ld\ lexer/parser@>@;
-@<Initialize the active mode@>@;
@i ldgram.x
@i ldlex.x
@@ -342,156 +405,14 @@ need for a `parser stack' as in the case of the \bison\
parser. If one must be able to display still smaller segments of \ld\
code, using `hidden context' tricks (discussed elsewhere) seems to be
a better approach.
-\iffalse
-\traceparserstatestrue
-\tracestackstrue
-\tracerulestrue
-\traceactionstrue
-\tracelookaheadtrue
-\traceparseresultstrue
-\tracebadcharstrue
-\yyflexdebugtrue
-\tracestatestrue
-\fi
%
@<Example \ld\ script@>=
-@G(l)
-INCLUDE file.ld
-
-MEMORY
-{
- RAM (xrw) : ORIGIN = 0x20000000, LENGTH = 20K
- FLASH (rx) : ORIGIN = 0x8000000, LENGTH = 128K
- ASH (rx) : ORIGIN = 8000000, LENGTH = 128K
- CLASH (rx) : ORIGIN = 700000, LENGTH = 128K
- ASH (rx) : ORIGIN = $8000000, LENGTH = 128K
- CLASH (rx) : ORIGIN = 700000B, LENGTH = 128K
- INCLUDE file.mem
-}
-
-_estack = 0x20005000;
-_bstack = a > 0 ? NEXT(11) : 0x19;
-
-SECTIONS
-{
- .isr_vector ALIGN(8) (NOLOAD): AT(.) ALIGN(.) ALIGN_WITH_INPUT SUBALIGN(8) SPECIAL
- {
- . = ALIGN(4);
- KEEP(*(.isr_vector))
- . = ALIGN(4);
- } > FLASH AT > RAM : FLASH : RAM : OTHER = . + 8
-
- .text :
- {
- /* skip this comment */;
- . = ALIGN(4);
- *(.text)
- *(.text.*)
- *(.rodata)
- *(.rodata*)
- *(.glue_7)
- *(.glue_7t)
- . = ALIGN(4);
- _etext = . + 8;
- _sidata = _etext;
- } >FLASH AT > RAM
-
- .data : AT ( _sidata )
- {
- . = ALIGN(4);
- _sdata = . ;
- *(.data)
- *(.data.*)
- . = ALIGN(4);
- _edata = . ;
- } >RAM
-
- .bss :
- {
- . = ALIGN(4);
- _sbss = .;
- *(.bss)
- *(COMMON)
- . = ALIGN(4);
- _ebss = . ;
- } >RAM
-}
-@g
+@i ldexample_l.hx
@ @<The same example of an \ld\ script@>=
-@G(b)
-INCLUDE file.ld
-
-MEMORY
-{
- RAM (xrw) : ORIGIN = 0x20000000, LENGTH = 20K
- FLASH (rx) : ORIGIN = 0x8000000, LENGTH = 128K
- ASH (rx) : ORIGIN = 8000000, LENGTH = 128K
- CLASH (rx) : ORIGIN = 700000, LENGTH = 128K
- ASH (rx) : ORIGIN = $8000000, LENGTH = 128K
- CLASH (rx) : ORIGIN = 700000B, LENGTH = 128K
- INCLUDE file.mem
-}
+@i ldexample_b.hx
-_estack = 0x20005000;
-_bstack = a > 0 ? NEXT(11) : 0x19;
-
-SECTIONS
-{
- .isr_vector ALIGN(8) (NOLOAD): AT(.) ALIGN(.) ALIGN_WITH_INPUT SUBALIGN(8) SPECIAL
- {
- . = ALIGN(4);
- KEEP(*(.isr_vector))
- . = ALIGN(4);
- } > FLASH AT > RAM : FLASH : RAM : OTHER = . + 8
-
- .text :
- {
- /* skip this comment */;
- . = ALIGN(4);
- *(.text)
- *(.text.*)
- *(.rodata)
- *(.rodata*)
- *(.glue_7)
- *(.glue_7t)
- . = ALIGN(4);
- _etext = . + 8;
- _sidata = _etext;
- } >FLASH AT > RAM
-
- .data : AT ( _sidata )
- {
- . = ALIGN(4);
- _sdata = . ;
- *(.data)
- *(.data.*)
- . = ALIGN(4);
- _edata = . ;
- } >RAM
-
- .bss :
- {
- . = ALIGN(4);
- _sbss = .;
- *(.bss)
- *(COMMON)
- . = ALIGN(4);
- _ebss = . ;
- } >RAM
-}
-@g
-
-@
-\traceparserstatesfalse
-\tracestacksfalse
-\tracerulesfalse
-\traceactionsfalse
-\tracelookaheadfalse
-\traceparseresultsfalse
-\tracebadcharsfalse
-\yyflexdebugfalse
-\tracestatesfalse
+@ @<Some random portion of \ld\ code@>=
@i ldnp.x
@@ -516,12 +437,17 @@ to take advantage of \TeX\ formatting and introduce some visual
cues. The convention of using {\it italics\/} for the original
comments has been reversed: the italicized comments are the ones
introduced by the author, {\it not\/} the original
-creators of \ld.
+creators of \ld.%\checktabletrue\saveparseoutputtrue
@i ldgramo.x
@i ldlexo.x
+@q Include the list of index section markers; this is a hack to get around @>
+@q the lack of control over the generation of \CWEB's index; the correct order @>
+@q of index entries depends on the placement of this inclusion @>
+@i alphas.hx
-@** Index. This section lists the variable names and (in some cases)
+@** Index. \checktablefalse\saveparseoutputtrue
+This section lists the variable names and (in some cases)
the keywords used inside the `language sections' of the \CWEB\
source. It takes advantage of the built-in facility of \CWEB\ to supply
references for both definitions (set in {\it
@@ -551,19 +477,12 @@ $$
index entries, as well, mimicking \CWEB's behavior for the
{\it inline \Cee\/} (\.{\yl}$\ldots$\.{\yl}). Such entries are labeled
with $^\circ$, to provide a reminder of their origin.
-\def\otherlangindexseparator{%
- \par
- \vskip.5\baselineskip
- \centerline{B{\sc ISON}, LD, {\sc AND} \TeX\ {\sc INDICES}}%
- \vskip.5\baselineskip
- \par
-}
-\let\currentparsernamespace\parsernamespace
- \let\parsernamespace\indexpseudonamespace
- \prettywordpair{emptyrhs}{$\circ$ {\rm(empty rhs)}}%
- \prettywordpair{inline_action}{$\diamond$ {\rm(inline action)}}%
-\let\parsernamespace\currentparsernamespace
+\unsetfootnotes
+\def\next{\expandafter\eatone\string}
+\edef\unindexable{{\next\the}{\next\nx}{\next\yy}{\next\yylexnext}{\next\else}{\next\fi}{\next\yyBEGIN}{\next\next}}
+\input gindex.sty
\closeout\gindex
+\termindexfalse
\let\inx\inxmod
\let\fin\finmod
\let\oldMRL\MRL
diff --git a/support/splint/examples/ld/ldnp.w b/support/splint/examples/ld/ldnp.w
index f92d0dfc8c..0a99d864fd 100644
--- a/support/splint/examples/ld/ldnp.w
+++ b/support/splint/examples/ld/ldnp.w
@@ -1,4 +1,4 @@
-@q Copyright 2012-2015, Alexander Shibakov@>
+@q Copyright 2012-2020, Alexander Shibakov@>
@q This file is part of SPLinT@>
@q SPLinT is free software: you can redistribute it and/or modify@>
@@ -40,7 +40,7 @@ initialization is done by the macros below. After the initialization
has been completed, the switch command is replaced by the one that
activates the new name parser.
@<Modified name parser for \ld\ grammar@>=
-@G
+@G(t)
\genericparser
name: ldsmall,
ptables: ld_small_tab.tex,
@@ -48,10 +48,12 @@ activates the new name parser.
tokens: {},
asetup: {},
dsetup: {},
- rsetup: {\noexpand\savefullstateextra},
+ rsetup: \let\returnexplicitspace\ignoreexplicitspace, % ignore spaces in names
optimization: {};%
\let\otosmallparser\tosmallparser % /* save the old name parser */
\let\tosmallparser\toldsmallparser
+\expandafter\let\csname to\stripbrackets\cwebclinknamespace parser\endcsname\tosmallparser %
+/* make the name parser handle the typesetting of \Cee\ variables */
@g
@ @<Bison options@>=
@@ -70,6 +72,7 @@ activates the new name parser.
%token INTEGER
%token EXTENDED
%token WILDCARD
+%token META_IDENTIFIER
@g
@*1 The name parser productions. These macros do a bit more than we
@@ -80,26 +83,39 @@ order to be able to refer to, say, \flex\ options in text. The inline
action in one of the rules for \prodstyle{identifier\_string} was
added to adjust the number and the position of the terms so that the
appropriate action can be reused later for
-\prodstyle{qualified\_identifier\_string}.
+\prodstyle{qualified\_identifier\_string}.
+%\tracebadnamestrue
+%\tracenamestrue
+%\traceparserstatestrue
+%\tracestackstrue
+%\tracerulestrue
+%\traceactionstrue
+%\tracelookaheadtrue
+%\traceparseresultstrue
+%\tracebadcharstrue
+%\yyflexdebugtrue
@<Parser productions@>=
@G
full_name:
identifier_string suffixes.opt {@> @<Compose the full name@> @=}
| qualifier '_' identifier_string suffixes.opt {@> @<Compose a qualified name@> @=}
+| META_IDENTIFIER {@> @<Turn a \prodstylens{META\_IDENTIFIER}{\ldsmallnamespace} into a full name@> @=}
+| '\'' {@> @<Make \prodstyle{'} into a name@> @=}
;
identifier_string:
PERCENT_IDENTIFIER {@> @<Attach option name@> @=}
| IDENTIFIER {@> @<Start with an identifier@> @=}
| '\'' WILDCARD '\'' {@> @<Start with a quoted string@> @=}
+| '\'' '_' '\'' {@> @<Start with a \prodstyle{'\_'} string@> @=}
| '\'' '.' '\'' {@> @<Start with a \prodstyle{'.'} string@> @=}
-| '\'' '_' '\'' {@> @<Start with an \prodstyle{'\_'} string@> @=}
@t}\vb{\flatten}{@>
| incomplete_identifier_string {} IDENTIFIER {@> @<Attach an identifier@> @=}
;
@t}\vb{\resetf}{@>
incomplete_identifier_string:
- identifier_string '_' {@> TeX_( "/yy0{/the/yy(1)}" ); @=}
+ '_' {@> TeX_( "/yy0{/nx/idstr{}{}}" );@=}
+| identifier_string '_' {@> TeX_( "/yy0{/the/yy(1)}" ); @=}
| qualified_identifier_string '_' {@> TeX_( "/yy0{/the/yy(1)}" ); @=}
;
@@ -144,7 +160,16 @@ qualifier:
@[TeX_( "/yy0{/the/yy(1)/the/yy(2)}/namechars/yyval" );@]@;
@ @<Compose a qualified name@>=
- @[TeX_( "/yy0{/the/yy(3)/the/yy(4)/nx/dotsp/nx/qual/the/yy(1)}/namechars/yyval" );@]@;
+ @[TeX_( "/getfirst{/yy(1)}/to/toksa/getsecond{/yy(1)}/to/toksb" );@]@;
+ @[TeX_( "/yy0{/the/yy(3)/the/yy(4)/nx/dotsp/nx/qual{/the/toksa/nx/_}{/the/toksb/uscoreletter}}/namechars/yyval" );@]@;
+
+@ @<Turn a \prodstylens{META\_IDENTIFIER}{\ldsmallnamespace} into a full name@>=
+ @[TeX_( "/getfirst{/yy(1)}/to/toksa" );@]@;
+ @[TeX_( "/getsecond{/yy(1)}/to/toksb" );@]@;
+ @[TeX_( "/yy0{/nx/idstr{/the/toksa}{/the/toksb}}/namechars/yyval" );@]@;
+
+@ @<Make \prodstyle{'} into a name@>=
+ @[TeX_( "/yy0{/nx/chstr{'}{'}}/namechars/yyval" );@]@;
@ @<Attach option name@>=
@[TeX_( "/getfirst{/yy(1)}/to/toksa" );@]@;
@@ -164,13 +189,13 @@ qualifier:
@ @<Start with a quoted string@>=
@[TeX_( "/getfirst{/yy(2)}/to/toksa" );@]@;
@[TeX_( "/getsecond{/yy(2)}/to/toksb" );@]@;
- @[TeX_( "/yy0{/nx/chstr{/the/toksa}{/the/toksb}}" );@]@;
+ @[TeX_( "/yy0{/nx/visflag{/nx/termvstring}{}/nx/chstr{/the/toksa}{/the/toksb}}" );@]@;
-@ @<Start with a \prodstyle{'.'} string@>=
- @[TeX_( "/yy0{/nx/chstr{.}{.}}" );@]@;
+@ @<Start with a \prodstyle{'\_'} string@>=
+ @[TeX_( "/yy0{/nx/visflag{/nx/termvstring}{}/nx/chstr{/nx/_}{_}}" );@]@;
-@ @<Start with an \prodstyle{'\_'} string@>=
- @[TeX_( "/yy0{/nx/chstr{/uscoreletter}{/uscoreletter}}" );@]@;
+@ @<Start with a \prodstyle{'.'} string@>=
+ @[TeX_( "/yy0{/nx/visflag{/nx/termvstring}{}/nx/chstr{.}{.}}" );@]@;
@ @<Turn a qualifier into an identifier@>=
@<Start with an identifier@>@;
@@ -193,10 +218,12 @@ qualifier:
@[TeX_( "/yy0{/nx/dotsp/nx/sfxi/the/yy(1)}" );@]@;
@ @<Attach a subscripted integer@>=
- @[TeX_( "/yy0{/nx/dotsp/nx/sfxi/the/yy(2)}" );@]@;
+ @[TeX_( "/getfirst{/yy(2)}/to/toksa/getsecond{/yy(2)}/to/toksb" );@]@;
+ @[TeX_( "/yy0{/nx/dotsp/nx/sfxi{/nx/_/the/toksa}{/uscoreletter/the/toksb}}" );@]@;
@ @<Attach a subscripted qualifier@>=
- @[TeX_( "/yy0{/nx/dotsp/nx/qual/the/yy(2)}" );@]@;
+ @[TeX_( "/getfirst{/yy(2)}/to/toksa/getsecond{/yy(2)}/to/toksb" );@]@;
+ @[TeX_( "/yy0{/nx/dotsp/nx/qual{/nx/_/the/toksa}{/uscoreletter/the/toksb}}" );@]@;
@ @<Attach suffixes@>=
@[TeX_( "/yy0{/nx/dotsp/the/yy(2)}" );@]@;
@@ -262,10 +289,13 @@ void define_all_states( void ) {
@ @<Lexer definitions@>=
@<Lexer states@>@;
-@G
+@G(fs1)
aletter [a-zA-Z]
-wc ([^\\\']{-}[a-zA-Z0-9]|\\.)
+letter (_|{aletter})
+wc ([^\\\'\"]{-}[_a-zA-Z0-9]|\\.)
id ({aletter}|{aletter}({aletter}|[0-9])*{aletter})
+id_strict {letter}(({letter}|[-0-9])*{letter})?
+meta_id "*"{id_strict}"*"?
int [0-9]+
@g
@@ -321,9 +351,8 @@ int [0-9]+
\tracebadcharsfalse
\yyflexdebugfalse
%
-\yyskipparsetrue
@<Scan white space@>=
-@G
+@G(fs2)
[ \f\n\t\v] {@> @[TeX_( "/yylexnext" );@]@=}
@g
@@ -333,7 +362,7 @@ typesetting style for some of the keywords would need to be adjusted,
such changes would be easy to implement, since the template is already
here.
@<Scan identifiers@>=
-@G
+@G(fs2)
"%"({aletter}|[0-9]|[-_]|"%"|[<>])+ {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
"opt" {@> @[TeX_( "/yylexreturnval{OPTIONAL}" );@]@=}
@@ -344,6 +373,7 @@ here.
{wc} {@> @[TeX_( "/yylexreturnval{WILDCARD}" );@]@=}
{id} {@> @[@<Prepare to process an identifier@>@]@=}
+{meta_id} {@> @[@<Prepare to process a meta-identifier@>@]@=}
{int} {@> @[TeX_( "/yylexreturnval{INTEGER}" );@]@=}
"\"" {@> @[TeX_( "/yylexnext" );@]@=}
@@ -353,6 +383,9 @@ here.
@ @<Prepare to process an identifier@>=
@[TeX_( "/yylexreturnval{IDENTIFIER}" );@]@;
+@ @<Prepare to process a meta-identifier@>=
+ @[TeX_( "/yylexreturnval{META_IDENTIFIER}" );@]@;
+
@ \let\hostparsernamespace\ldnamespace\yyskipparsefalse A simple routine to detect
trivial scanning problems.
@<React to a bad character@>=
diff --git a/support/splint/examples/ld/ldnump.w b/support/splint/examples/ld/ldnump.w
index 5d4d45f724..813628c68c 100644
--- a/support/splint/examples/ld/ldnump.w
+++ b/support/splint/examples/ld/ldnump.w
@@ -1,4 +1,4 @@
-@q Copyright 2012-2015, Alexander Shibakov@>
+@q Copyright 2012-2020, Alexander Shibakov@>
@q This file is part of SPLinT@>
@q SPLinT is free software: you can redistribute it and/or modify@>
@@ -54,7 +54,7 @@ activates the new name parser.
tokens: {},
asetup: {},
dsetup: {},
- rsetup: {\noexpand\savefullstateextra},
+ rsetup: {},
optimization: {};%
@g
diff --git a/support/splint/examples/ld/ldtexlex.sty b/support/splint/examples/ld/ldtexlex.sty
new file mode 100644
index 0000000000..01b8873540
--- /dev/null
+++ b/support/splint/examples/ld/ldtexlex.sty
@@ -0,0 +1,75 @@
+% Copyright 2012-2020, Alexander Shibakov
+% This file is part of SPLinT
+%
+% SPLinT is free software: you can redistribute it and/or modify
+% it under the terms of the GNU General Public License as published by
+% the Free Software Foundation, either version 3 of the License, or
+% (at your option) any later version.
+%
+% SPLinT is distributed in the hope that it will be useful,
+% but WITHOUT ANY WARRANTY; without even the implied warranty of
+% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+% GNU General Public License for more details.
+%
+% You should have received a copy of the GNU General Public License
+% along with SPLinT. If not, see <http://www.gnu.org/licenses/>.
+
+% additional definitions for typesetting \TeX\ control sequences
+
+\def\defz#1#{%
+ \d@fz{#1}%
+}
+
+\def\d@fz#1#2{%
+ \defx#1{#2}{texline}%
+ \d@@z#1{#2}{index}%
+}
+
+\def\d@@z#1#2#{%
+ \expandafter\def
+ \expandafter
+ \alltexsymbols
+ \expandafter{\alltexsymbols#1}%
+ \defy#1%
+}
+
+\def\defzop#1{%
+ \defz#1{$#1$}%
+}
+
+\defzop\CM
+\defzop\AND
+\defzop\OR
+\defzop\XOR
+\defzop\V
+\defzop\W
+\defzop\leq
+\defzop\geq
+\defzop\ll
+\defzop\gg
+\defzop\times
+\defzop\div
+\defz\/{${}/{}$}
+\defzop\R
+\defzop\xi
+\defzop\Xorxeq
+\defzop\K
+
+
+\defx\CM{=not}{index:visual}
+\defx\AND{&&}{index:visual}
+\defx\OR{||}{index:visual}
+\defx\XOR{|||}{index:visual}
+%\defx\V{V}{index:visual}
+%\defx\W{W}{index:visual}
+\defx\leq{<=}{index:visual}
+\defx\geq{>=}{index:visual}
+\defx\ll{<<}{index:visual}
+\defx\gg{>>}{index:visual}
+\defx\times{=times}{index:visual}
+\defx\div{\%}{index:visual}
+\defx\/{/}{index:visual}
+\defx\R{not^}{index:visual}
+%\defx\xi{xi}{index:visual}
+\defx\Xorxeq{=|||}{index:visual}
+\defx\K{=}{index:visual}
diff --git a/support/splint/examples/ld/ldunion.sty b/support/splint/examples/ld/ldunion.sty
index b2c3837e57..710d4e1725 100644
--- a/support/splint/examples/ld/ldunion.sty
+++ b/support/splint/examples/ld/ldunion.sty
@@ -1,4 +1,4 @@
-% Copyright 2012-2015, Alexander Shibakov
+% Copyright 2012-2020, Alexander Shibakov
% This file is part of SPLinT
%
% SPLinT is free software: you can redistribute it and/or modify
@@ -21,6 +21,7 @@
\def\yyuniontag{\ldunion}
\def\parserstrictnamespace{ld-parser-strict}
\def\parserprototypesnamespace{ld-parser-strict:headers}
+\def\parserdebugnamespace{ld-parser-debug}
\def\ldunion{\currentyyunionnamespace}
\def\currentyyunionnamespace{ld-generic}
@@ -33,28 +34,40 @@
% ld commands
-\defp\ldregexp#1{} % name pattern :: \ldregexp{name}
+\defp\ldregexp#1{} % name pattern :: \ldregexp{{name}{name11}{fptr}{sptr}}
+\defp\ldregop#1{} % name pattern :: \ldregop{name}
\defp\ldspace{} % space separator :: \ldspace
\defp\ldattributes#1{} % attributes :: \ldattributes{name}
\defp\ldattributesneg#1{} % complemented attributes :: \ldattributesneg{name}
\defp\ldfilename#1{} % file name :: \ldfilename{name}
-\defp\ldcommandseparator#1#2{} % command separator
+\defp\ldcommandseparator#1#2#3#4{} % command separator :: \ldcommandseparator{fptr}{sptr}{prev command}{next command}
\defp\ldassignment#1#2#3{} % assignment :: \ldassignment{lhs}{op}{rhs}
+\defp\ldhidden#1#2{} % hidden :: \ldhidden{lhs}{rhs}
+\defp\ldprovide#1#2{} % provide :: \ldprovide{lhs}{rhs}
+\defp\ldprovidehid#1#2{} % provide hidden :: \ldprovidehid{lhs}{rhs}
+\defp\ldkeep#1{} % keep :: \ldkeep{list}
+\defp\ldentry#1{} % entry :: \ldentry{name}
\defp\ldinclude#1{} % file inclusion :: \ldinclude{file name}
\defp\ldmemory#1{} % memory specification :: \ldmemory{memory spcification}
\defp\ldfill#1{} % fill expression :: \ldfill{expression}
\defp\ldmemoryspec#1#2#3#4{} % memory item :: \ldmemoryspec{name}{attributes}{origin}{length}
+\defp\ldmemspecstash#1#2{} % memory spec stash :: \ldmemspecstash{fptr}{sptr}
+\defp\ldmemspecseparator#1#2{} % memory spec separator :: \ldmemspecseparator{fptr}{sptr}
\defp\ldoriginspec#1{} % origin :: \ldoriginspec{expression}
\defp\ldlengthspec#1{} % length :: \ldlengthspec{expression}
\defp\ldsections#1{} % ld sections :: \ldsections{sections}
-\defp\ldsectionseparator{} % section separator
+\defp\ldsectionseparator#1#2{} % section separator :: \ldsectionseparator{fptr}{sptr}
\defp\ldtype#1{} % section type :: \ldtype{type}
-\defp\ldstatement#1{} % stray statement :: \ldstatement{statement}
+\defp\ldstatement#1{} % statement :: \ldstatement{statement}
+\defp\ldsecspec#1{} % section spec :: \ldsecspec{section spec}
+\defp\ldinsertcweb#1#2#3#4{} % insert accumulated \CWEB\ material :: \ldinsertcweb
+ % {fptr}{sptr}{command}{parsed segment} : never defined
\defp\ldnamedsection#1#2#3#4#5#6#7{} % named section :: \ldnamedsection{name}
% {{expression}{type}}{at}
% {{}{}{}:alignment}
% {constraint}{statements}
% {{}{}{}{}:placement}
+\defp\ldsectionstash#1#2{} % sections spec stash :: \ldsectionstash{fptr}{sptr}
\savecslist{ld-parser-prototypes}\ldunion
@@ -64,6 +77,8 @@
\newtoks\lddisplay
+\restorecslist{ld-parser-strict}\ldunion
+
\defc\ldinclude{%
\toksa={&##\cr\ttl include\ &}#1%
\concat\toksa\toksc
@@ -77,8 +92,8 @@
\restorecslist{ld-parser:memory-spec}\ldunion
\toksa{}#1%
\toksc{%
- \hfil##\qquad&##\hfil&\qquad##\hfil\quad&\hfil##&\quad\hfil##\cr
- \ttl memory&\hfil&\ttl attributes&\ttl origin&\ttl length\cr
+ \hfil##\qquad&##\hfil&\qquad##\hfil\quad&\hfil##&\qquad\hfil##\cr
+ \ttl memory&\hfil&\ttl attributes&\ttl starts at&\ttl length\cr
\noalign{\smallskip}%
}%
\edef\next{\lddisplay{\the\lddisplay\halign{\the\toksc\the\toksa}}}\next
@@ -97,23 +112,72 @@
\restorecslist{local-namespace}{\ldunion}%
}
-\def\ldextractname#1#2{%
- \edef\next{\toksc{\gidxentry{\termttstring}{#2}{\ntt #2}}}\next
+\def\ldextractname#1#2#3#4{%
+ \edef\next{\toksc{\gidxentry{\termttstring}{#2}{}{\ntt #2}}}\next
+}
+
+\def\ldextractmemname#1#2#3#4{%
+ {%
+ \expandafter\let\expandafter\tosmallparser\csname to\stripbrackets\cwebclinknamespace parser\endcsname
+ \let\optstrextra\optstrextraesc
+ \def\hostparsernamespace{[none]}%
+ \nameproc{#2}\with\parsebin
+ \edef\next{\toksc{\gidxentry{\termvstring}{#2}{}{\let\idxfont\nx\empty\ntt\the\toksa}}}%
+ \expandafter
+ }\next%
+
}
% the grammar of ld scripts is very uniform so the separator form
-% below is more than adequate; if a more sophisticated spacing
+% below should be more than adequate; if a more sophisticated spacing
% strategy is required, one may consult the design of
% \separatorswitcheq and \separatorswitchneq in yyunion.sty
-\defc\ldcommandseparator{\yyifsamestring{#1}{#2}{}{\appendr\lddisplay{\noexpand\medskip}}}
+\defc\ldcommandseparator{%
+ \restorecs{table-render}{\strm}%
+ \expandafter\ifx\csname ldstashentry[#2]\endcsname\relax
+ \yyifsamestring{#3}{#4}{}{\appendrnx\lddisplay{\medskip}}%
+ \else
+ \expandafter\expandafter\expandafter\yystashlocal\expandafter\expandafter\expandafter{\csname ldstashentry[#2]\endcsname}%
+ \edef\next{\toksc{\toksa{}\the\yystashlocal
+ \noexpand\ldmakestashbox{}}}\next
+ \appendrnx\lddisplay{\smallskip\noindent}%
+ \concat\lddisplay\toksc
+ \appendrnx\lddisplay{\smallskip}%
+ \fi
+}
+
+\defc\ldcommandseparator{% new version; TODO: remove the duplicates after the macros have been tested
+ \expandafter\ifx\csname ldstashentry[#2]\endcsname\relax
+ \yyifsamestring{#3}{#4}{}{\appendrnx\lddisplay{\medskip}}%
+ \else
+ \expandafter\expandafter\expandafter\yystashlocal\expandafter\expandafter\expandafter{\csname ldstashentry[#2]\endcsname}%
+ \appendrnx\lddisplay{\smallskip\noindent}%
+ \appendr \lddisplay{\toksa{\the\yystashlocal}}%
+ \appendrnx\lddisplay{\ldmakestashbox{}\smallskip}%
+ \fi
+}
\defc\ldstatement{\toksc{\hbox{$#1$}}\concat\lddisplay\toksc}
+\let\ldsecspec\ldstatement
+
\defc\ldassignment{%
#1#2#3%
}
+\defc\ldhidden{%
+ \mathop{\hbox{\ssf hidden}}\hbox{$\langle\,$}#1\K#2\hbox{$\,\rangle$}%
+}
+
+\defc\ldprovide{%
+ \mathop{\hbox{\ssf provide}}\hbox{$\langle\,$}#1\K#2\hbox{$\,\rangle$}%
+}
+
+\defc\ldprovidehid{%
+ \mathop{\hbox{\ssf provide$_{h}$}}\hbox{$\langle\,$}#1\K#2\hbox{$\,\rangle$}%
+}
+
\defc\anint{%
\uppercase{\ldsciinteger{#1}}%
}
@@ -130,14 +194,61 @@
\ldreg@xp#1%
}
-\def\ldreg@xp#1#2{%
- \hbox{\let\termindex\writeidxentry\edef\next{\toksc{\gidxentry{\termttstring}{#2}}}\next\the\toksc\ntt@#2}%
+\def\ldreg@xp#1#2#3#4{%
+ \expandafter\ifx\csname ldvarname[#2]\endcsname\relax
+ {%
+ \let\termindex\writeidxentry
+ \let\hostparsernamespace\cwebclinknamespace% process the variable names as in \CWEB
+ \edef\next{\toksc{\gidxentry{\termttstring}{#1}{}}}\next\the\toksc
+ \hbox{\ntt@#1}%
+ \expandafter
+ }\the\toksc
+ \else
+ \yyifsamestring{#2}{.}{% special . name
+ {%
+ \let\termindex\writeidxentry
+ \let\hostparsernamespace\cwebclinknamespace% for consistency
+ \edef\next{\toksc{\gidxentry{\termexception}{.origin&}{.}}}\next
+ \hbox{\csname\prettynamecs\hostparsernamespace{.origin&}\endcsname{}}%
+ \expandafter
+ }\the\toksc
+ }{%
+ {%
+ \let\termindex\writeidxentry
+ \let\hostparsernamespace\cwebclinknamespace% process the variable names as in \CWEB
+ \edef\next{\toksc{\gidxentry{\termhostidstring}{#1}{}}}\next
+ \hbox{%
+ \expandafter\let\expandafter\tosmallparser\csname to\stripbrackets\cwebclinknamespace parser\endcsname
+ \let\optstrextra\optstrextraesc
+ \nameproc{#2}\with\parsebin
+ \it\the\toksa
+ }%
+ \expandafter
+ }\the\toksc
+ }%
+ \fi
+}
+
+\defc\ldregop{%
+ \ldreg@p#1%
+}
+
+\def\ldreg@p#1#2#3#4{%
+ \hbox{\ntt@#1}%
}
\defc\ldfill{%
#1%
}
+\defc\ldentry{% this command survives till the table time
+ \hbox{\ttl entry{\rm: }} #1%
+}
+
+\defc\ldkeep{%
+ \mathop{\hbox{\ssf keep}}(#1)%
+}
+
\defc\ldfilename{\ldextractname#1}
\savecslist{ld-display}\ldunion
@@ -149,9 +260,9 @@
\defc\ldmemoryspec{%
\toksb{\hfil&}%
\let\termindex\writeidxhentry
- \ldextractname#1\concat\toksb\toksc
+ \ldextractmemname#1\concat\toksb\toksc
\let\termindex\eatone
- \appendr\toksb{&}%
+ \appendrnx\toksb{&}%
\toksc{}#2\concat\toksb\toksc
\toksc{}#3\concat\toksb\toksc
\toksc{}#4\concat\toksb\toksc
@@ -160,7 +271,7 @@
}
\defc\ldattributes{%
- \ldextractname#1\appendr\toksc{&}%
+ \ldextractname#1\appendrnx\toksc{&}%
}
\defc\ldlengthspec{%
@@ -179,7 +290,43 @@
\concat\toksa\toksc
}
-\restorecs{ld-display}{\anint\ldfilename}
+\defc\ldmemspecseparator{% TODO: remove after testing is complete
+ \restorecs{table-render}{\strm}%
+ \expandafter\ifx\csname ldstashentry[#2]\endcsname\relax
+ \else
+ \expandafter\expandafter\expandafter\yystashlocal\expandafter\expandafter\expandafter{\csname ldstashentry[#2]\endcsname}%
+ \edef\next{\toksc{&\noexpand\multispan4\toksa{}\the\yystashlocal
+ \noexpand\ldmakestashbox{\cdotfill}\noexpand\quad\cr}}\next\concat\toksa\toksc
+ \fi
+}
+
+\defc\ldmemspecseparator{%
+ \expandafter\ifx\csname ldstashentry[#2]\endcsname\relax
+ \else
+ \expandafter\expandafter\expandafter\yystashlocal\expandafter\expandafter\expandafter{\csname ldstashentry[#2]\endcsname}%
+ \appendr\toksa{&\nx\multispan4\toksa{\the\yystashlocal}\nx\ldmakestashbox{\nx\cdotfill}\nx\quad\cr}%
+ \fi
+}
+
+\defc\ldmemspecstash{%
+ \restorecs{table-render}{\strm}%
+ \expandafter\ifx\csname ldstashentry[#2]\endcsname\relax
+ \else
+ \expandafter\expandafter\expandafter\yystashlocal\expandafter\expandafter\expandafter{\csname ldstashentry[#2]\endcsname}%
+ \edef\next{\toksc{&\noexpand\multispan4\toksa{}\the\yystashlocal
+ \noexpand\ldmakestashbox{\cdotfill}\noexpand\quad\cr}}\next\concat\toksa\toksc
+ \fi
+}
+
+\defc\ldmemspecstash{%
+ \expandafter\ifx\csname ldstashentry[#2]\endcsname\relax
+ \else
+ \expandafter\expandafter\expandafter\yystashlocal\expandafter\expandafter\expandafter{\csname ldstashentry[#2]\endcsname}%
+ \appendr\toksa{&\nx\multispan4\toksa{\the\yystashlocal}\nx\ldmakestashbox{\nx\cdotfill}\nx\quad\cr}%
+ \fi
+}
+
+\restorecs{ld-display}{\ldfilename\ldentry}
\toyyunion{ld-parser:memory-spec}
% sections commands
@@ -206,19 +353,19 @@
\toksb\expandafter{\sections@header&}% section header
\ldextractname#1% section name
\concat\toksb\toksc
- \appendr\toksb{&}%
+ \appendrnx\toksb{&}%
\ldexpwithtype#2% location and type
\concat\toksb\toksc
- \appendr\toksb{&}%
+ \appendrnx\toksb{&}%
\yystringempty{#3}{\ldpushalignment#4}{\toksc{{\ttl at }$#3$}}% alignment
\concat\toksb\toksc
- \appendr\toksb{&}%
+ \appendrnx\toksb{&}%
\yystringempty{#5}{\ldpushplacement#7}{\toksc{{\ttl #5}}}% constraint
\concat\toksb\toksc
- \appendr\toksb{&}%
+ \appendrnx\toksb{&}%
\ldstartpheaders#7% possible pheaders
\concat\toksb\toksc
- \appendr\toksb{\cr}%
+ \appendrnx\toksb{\cr}%
\def\sections@header{}%
\else
\toksb\expandafter{&}% section header
@@ -238,19 +385,19 @@
\toksc\expandafter{\expandafter\qquad\expandafter$\the\toksc{}$}%
}%
\concat\toksb\toksc
- \appendr\toksb{&}%
+ \appendrnx\toksb{&}%
% \ldexpwithtype#2% location and type
% \concat\toksa\toksc
- \appendr\toksb{&}%
+ \appendrnx\toksb{&}%
\ldpushalignment#4% alignment
\concat\toksb\toksc
- \appendr\toksb{&}%
+ \appendrnx\toksb{&}%
\ldpushplacement#7% placement
\concat\toksb\toksc
- \appendr\toksb{&}%
+ \appendrnx\toksb{&}%
\ldpushpheaders#7% possible pheaders
\concat\toksb\toksc
- \appendr\toksb{\cr}%
+ \appendrnx\toksb{\cr}%
\def\sections@header{}%
\fi
\ifsectioncomplete
@@ -296,11 +443,11 @@
}{%
\ldextractname#1%
\toksd{{\ttl in }}%
- \appendl\toksc{\the\toksd}%
+ \concatl\toksd\toksc
}%
\yystringempty{#2}{% any AT > ?
}{%
- \yytoksempty\toksc{\toksc{{\ttl as }}}{\appendr\toksc{ {\noexpand\ttl as }}}%
+ \yytoksempty\toksc{\toksc{{\ttl as }}}{\appendrnx\toksc{ {\ttl as }}}%
\toksd=\toksc
\ldextractname#2%
\concat\toksd\toksc
@@ -336,7 +483,7 @@
\def\ldexpwithtype#1#2#3#4#5{% TODO
\yystringempty{#2}{\toksc{$}}{\toksc{$#2}}%$
- \yystringempty{#5}{\toksd{{}$}}{#5\appendr\toksd{]$}\appendl\toksd{[}}%
+ \yystringempty{#5}{\toksd{{}$}}{#5\appendlnx\toksd{[}\appendrnx\toksd{]$}}%
\concat\toksc\toksd
}
@@ -344,8 +491,152 @@
\toksd{\hbox{\ttl #1}}%
}
-\defc\ldstatement{}
+\defc\ldstatement{{#1}} % the braces form the group for a \toks assignment
+
+\let\ldsecspec\ldstatement
+
\defc\ldsectionseparator{\toksc{&\multispan5\cdotfill\quad\cr}\concat\toksa\toksc}
-\restorecs{ld-display}{\anint\ldregexp\ldassignment\ldfill}
+\def\ldboxstash#1{%
+ \ifchecktrim\errmessage{stash contents: \the\toksa}\fi
+ {\setbox0 \vbox{\the\toksa}\ifdim\ht0=\z@\aftergroup\toksa\else\aftergroup\eatone\fi}{}%
+ \yytoksempty\toksa{#1}{%
+ $\vtop{\activateinlinec\tabskip\z@\halign{\strut\ignorespaces##\hfil\cr\the\toksa\crcr}}$\hfill}}
+
+\def\ldmakestashbox#1{\cleanstash\stripstash\ldboxstash{#1}}
+
+\defc\ldsectionseparator{%
+ \restorecs{table-render}{\strm}%
+ \expandafter\ifx\csname ldstashentry[#2]\endcsname\relax
+ \else
+ \expandafter\expandafter\expandafter\yystashlocal\expandafter\expandafter\expandafter{\csname ldstashentry[#2]\endcsname}%
+ \edef\next{\toksc{&\noexpand\multispan5\toksa{}\the\yystashlocal
+ \noexpand\ldmakestashbox{\cdotfill}\noexpand\quad\cr}}\next\concat\toksa\toksc
+ \fi
+}
+
+\defc\ldsectionseparator{%
+ \expandafter\ifx\csname ldstashentry[#2]\endcsname\relax
+ \else
+ \expandafter\expandafter\expandafter\yystashlocal\expandafter\expandafter\expandafter{\csname ldstashentry[#2]\endcsname}%
+ \appendr\toksa{&\nx\multispan5\toksa{\the\yystashlocal}\nx\ldmakestashbox{\nx\cdotfill}\nx\quad\cr}%
+ \fi
+}
+
+\defc\ldsectionstash{%
+ \restorecs{table-render}{\strm}%
+ \expandafter\ifx\csname ldstashentry[#2]\endcsname\relax
+ \else
+ \expandafter\expandafter\expandafter\yystashlocal\expandafter\expandafter\expandafter{\csname ldstashentry[#2]\endcsname}%
+ \edef\next{\toksc{\sections@header&\noexpand\multispan5\toksa{}\the\yystashlocal
+ \noexpand\ldmakestashbox{\cdotfill}\noexpand\quad\cr}}\next\concat\toksa\toksc
+ \def\sections@header{}%
+ \fi
+}
+
+\defc\ldsectionstash{%
+ \expandafter\ifx\csname ldstashentry[#2]\endcsname\relax
+ \else
+ \expandafter\expandafter\expandafter\yystashlocal\expandafter\expandafter\expandafter{\csname ldstashentry[#2]\endcsname}%
+ \appendr\toksa{\sections@header&\nx\multispan5\toksa{\the\yystashlocal}\nx\ldmakestashbox{\nx\cdotfill}\nx\quad\cr}%
+ \let\sections@header\empty
+ \fi
+}
+
+\restorecs{ld-display}{\ldregexp\ldassignment\ldfill\ldinsertcweb\ldentry}
\toyyunion{ld-parser:sections}
+
+% preprocessing macros: collecing stash and marking variables
+
+\restorecslist{ld-parser-prototypes}\ldunion
+\restorecs{ld-parser-strict}{\insertcweb}
+
+\defc\ldmemory{#1} % memory specification :: \ldmemory{memory spcification}
+
+\defc\ldmemspecstash{%
+ \readstash{#2}%
+ \setbox0 \vbox{\toksa\expandafter{\the\yystashlocal}\cleanstash\stripstash\the\toksa}%
+ \ifdim\ht0=\z@
+ \expandafter\let\csname ldstashentry[#2]\endcsname\relax
+ \else
+ \expandafter\edef\csname ldstashentry[#2]\endcsname{\the\yystashlocal}%
+ \fi
+} % memory spec stash :: \ldmemspecstash{fptr}{sptr}
+
+\defc\ldmemspecseparator{%
+ \readstash{#2}%
+ \setbox0 \vbox{\toksa\expandafter{\the\yystashlocal}\cleanstash\stripstash\the\toksa}%
+ \ifdim\ht0=\z@
+ \expandafter\let\csname ldstashentry[#2]\endcsname\relax
+ \else
+ \expandafter\edef\csname ldstashentry[#2]\endcsname{\the\yystashlocal}%
+ \fi
+} % memory spec separator :: \ldmemspecseparator{fptr}{sptr}
+
+\defc\ldsections{#1} % ld sections :: \ldsections{sections}
+
+\defc\ldsectionseparator{%
+ \readstash{#2}%
+ \setbox0 \vbox{\toksa\expandafter{\the\yystashlocal}\cleanstash\stripstash\the\toksa}%
+ \ifdim\ht0=\z@
+ \expandafter\let\csname ldstashentry[#2]\endcsname\relax
+ \else
+ \expandafter\edef\csname ldstashentry[#2]\endcsname{\the\yystashlocal}%
+ \fi
+} % section separator :: \ldsectionseparator{fptr}{sptr}
+
+\defc\ldsectionstash{%
+ \readstash{#2}%
+ \setbox0 \vbox{\toksa\expandafter{\the\yystashlocal}\cleanstash\stripstash\the\toksa}%
+ \ifdim\ht0=\z@
+ \expandafter\let\csname ldstashentry[#2]\endcsname\relax
+ \else
+ \expandafter\edef\csname ldstashentry[#2]\endcsname{\the\yystashlocal}%
+ \fi
+} % sections spec stash :: \ldsectionstash{fptr}{sptr}
+
+\defc\ldcommandseparator{%
+ \readstash{#2}%
+ \setbox0 \vbox{\toksa\expandafter{\the\yystashlocal}\cleanstash\stripstash\the\toksa}%
+ \ifdim\ht0=\z@
+ \expandafter\let\csname ldstashentry[#2]\endcsname\relax
+ \else
+ \expandafter\edef\csname ldstashentry[#2]\endcsname{\the\yystashlocal}%
+ \fi
+} % command separator :: \ldcommandseparator{fptr}{sptr}{prev command}{next command}
+
+\defc\ldstatement{#1}
+
+\defc\ldnamedsection{\let\ldor\empty#6}
+
+\defc\ldassignment{%
+ {%
+ \let\ldregexp\markldname
+ #1%
+ }{\errmessage{Not an \\ldregexp!}}%
+}
+\defc\ldprovide{%
+ {%
+ \let\ldregexp\markldname
+ #1%
+ }{\errmessage{Not an \\ldregexp!}}%
+}
+
+\defc\ldentry{%
+ {%
+ \let\ldregexp\markldname
+ #1%
+ }{\errmessage{Not an \\ldregexp!}}%
+}
+
+\def\markldname#1{%
+ \markldn@me#1%
+}
+\def\markldn@me#1#2#3#4{%
+ \aftergroup\def\expandafter\aftergroup\csname ldvarname[#2]\endcsname
+}
+
+\let\ldhidden\ldprovide
+\let\ldprovidehid\ldprovide
+
+\toyyunion{ld-parser:restash}
diff --git a/support/splint/examples/ld/lstokenset.sty b/support/splint/examples/ld/lstokenset.sty
index 3ab3ae2d16..441bad2543 100644
--- a/support/splint/examples/ld/lstokenset.sty
+++ b/support/splint/examples/ld/lstokenset.sty
@@ -1,4 +1,4 @@
-% Copyright 2012-2015, Alexander Shibakov
+% Copyright 2012-2020, Alexander Shibakov
% This file is part of SPLinT
%
% SPLinT is free software: you can redistribute it and/or modify
@@ -14,10 +14,11 @@
% You should have received a copy of the GNU General Public License
% along with SPLinT. If not, see <http://www.gnu.org/licenses/>.
-\prettywordpair{OPTIONAL}{{\tt opt}}%
-\prettywordpair{SUFFIX}{{\tt suffix}}%
-\prettywordpair{EXTENDED}{{\tt ext}}%
-\prettywordpair{INTEGER}{{$[\,0\ldots9\,]\ast$}}%
-\prettywordpair{IDENTIFIER}{{$[\,\hbox{\tt a}\ldots\hbox{\tt Z}\,0\ldots9\,]\ast$}}%
-\prettywordpair{WILDCARD}{{{\tt *} {\rm or} {\tt ?}}}%
-\prettywordpair{PERCENT_IDENTIFIER}{{\tt\%$[\,\hbox{\tt a}\ldots\hbox{\tt Z}\,0\ldots9\,]\ast$}}%
+\prettywordpairwvis{OPTIONAL}{{\tt opt}}{opt}%
+\prettywordpairwvis{SUFFIX}{{\tt suffix}}{suffix}%
+\prettywordpairwvis{EXTENDED}{{\tt ext}}{ext}%
+\prettywordpairwvis{INTEGER}{{$[\,0\ldots9\,]\ast$}}{[0...9]*}%
+\prettywordpairwvis{IDENTIFIER}{{$[\,\hbox{\tt a}\ldots\hbox{\tt Z}\,0\ldots9\,]\ast$}}{[a...Z0...9]*}%
+\prettywordpairwvis{META_IDENTIFIER}{{\cyr\lqq{\rm meta identifier}\rqq}}{meta identifier}%
+\prettywordpairwvis{WILDCARD}{{{\tt *} {\rm or} {\tt ?}}}{* or ?}%
+\prettywordpairwvis{PERCENT_IDENTIFIER}{{\tt\%$[\,\hbox{\tt a}\ldots\hbox{\tt Z}\,0\ldots9\,]\ast$}}{\%[a...Z0...9]*}%
diff --git a/support/splint/examples/ld/ltokenset.sty b/support/splint/examples/ld/ltokenset.sty
index d5d0d9b1ee..846dead6c8 100644
--- a/support/splint/examples/ld/ltokenset.sty
+++ b/support/splint/examples/ld/ltokenset.sty
@@ -1,4 +1,4 @@
-% Copyright 2012-2015, Alexander Shibakov
+% Copyright 2012-2020, Alexander Shibakov
% This file is part of SPLinT
%
% SPLinT is free software: you can redistribute it and/or modify
@@ -14,13 +14,21 @@
% You should have received a copy of the GNU General Public License
% along with SPLinT. If not, see <http://www.gnu.org/licenses/>.
-\prettywordpair{comma}{{\tt,}}
-\prettywordpair{semicolon}{{\tt;}}
-\prettywordpair@@{file\_NAME\_list}{\ifinheader\itbold\else\it\fi file\_{\rm name}\_list}
-\prettywordpair@@{high\_level\_library\_NAME\_list}{\ifinheader\itbold\else\it\fi high\_level\_library\_{\rm name}\_list}
-\prettywordpair@@{low\_level\_library\_NAME\_list}{\ifinheader\itbold\else\it\fi low\_level\_library\_{\rm name}\_list}
+\let\currentparsernamespace\parsernamespace
+\let\parsernamespace\indexpseudonamespace % use the correct namespace
+\input yypretty.sty % input standard prettified tokens first
+ % so that we can change their appearance below
+\let\parsernamespace\cwebclinknamespace
+\prettywordpair{.origin&}{{$\cdot\cdot$}}% special . name in \ld\ scripts
+\let\parsernamespace\currentparsernamespace
+
+\prettywordpairwvis{comma}{{\tt,}}{,}
+\prettywordpairwvis{semicolon}{{\tt;}}{;}
+\prettywordpair@@{file\_NAME\_list}{{\ifinheader\itbold\else\it\fi file\_{\rm name}\_list}}
+\prettywordpair@@{high\_level\_library\_NAME\_list}{{\ifinheader\itbold\else\it\fi high\_level\_library\_{\rm name}\_list}}
+\prettywordpair@@{low\_level\_library\_NAME\_list}{{\ifinheader\itbold\else\it\fi low\_level\_library\_{\rm name}\_list}}
\prettywordpair@@{NAME}{{\rm name}}
-\prettywordpair@@{LNAME}{{\rm name$_{\rm L}$}}
+\prettywordpair@@wvis{LNAME}{{\rm name$_{\rm L}$}}{name_L}
\prettywordpair@@{END}{{\rm end}}
\prettywordpair@@{UNARY}{{\rm unary}}
\prettywordpair@@{VERSIONK}{{\tt VERSION}$_{\rm K}$}
@@ -30,23 +38,23 @@
\prettytoken{TARGET}
\prettytoken{ASSERT}
\prettytoken{INSERT}
-\prettywordpair@@{PLUSEQ}{{}$\MRL{+{\K}}${}}
-\prettywordpair@@{MINUSEQ}{{}$\MRL{-{\K}}${}}
-\prettywordpair@@{MULTEQ}{{}$\MRL{\times{\K}}${}}
-\prettywordpair@@{DIVEQ}{{}$\MRL{{\div}{\K}}${}}
-\prettywordpair@@{ANDEQ}{{}$\Xandxeq${}}
-\prettywordpair@@{OREQ}{{}$\Xorxeq${}}
-\prettywordpair@@{LSHIFTEQ}{{}$\MRL{\ll{\K}}${}}
-\prettywordpair@@{RSHIFTEQ}{{}$\MRL{\gg{\K}}${}}
-\prettywordpair@@{NE}{{}$\not=${}}
-\prettywordpair@@{LE}{{}$\leq${}}
-\prettywordpair@@{GE}{{}$\geq${}}
-\prettywordpair@@{EQ}{{}$=${}}
-\prettywordpair@@{OROR}{{}$\V${}}
-\prettywordpair@@{ANDAND}{{}$\W${}}
-\prettywordpair@@{LSHIFT}{{}$\ll${}}
-\prettywordpair@@{RSHIFT}{{}$\gg${}}
-\prettywordpair@@{=}{{}$\K${}}
+\prettywordpair@@wvis{PLUSEQ}{{}$\MRL{+{\K}}${}}{=+}
+\prettywordpair@@wvis{MINUSEQ}{{}$\MRL{-{\K}}${}}{=-}
+\prettywordpair@@wvis{MULTEQ}{{}$\MRL{\times{\K}}${}}{=*}
+\prettywordpair@@wvis{DIVEQ}{{}$\MRL{{\div}{\K}}${}}{=/}
+\prettywordpair@@wvis{ANDEQ}{{}$\Xandxeq${}}{=W}
+\prettywordpair@@wvis{OREQ}{{}$\Xorxeq${}}{=V}
+\prettywordpair@@wvis{LSHIFTEQ}{{}$\MRL{\ll{\K}}${}}{=<<}
+\prettywordpair@@wvis{RSHIFTEQ}{{}$\MRL{\gg{\K}}${}}{=>>}
+\prettywordpair@@wvis{NE}{{}$\not=${}}{= }
+\prettywordpair@@wvis{LE}{{}$\leq${}}{<=}
+\prettywordpair@@wvis{GE}{{}$\geq${}}{>=}
+\prettywordpair@@wvis{EQ}{{}$=${}}{=}
+\prettywordpair@@wvis{OROR}{{}$\V${}}{||}
+\prettywordpair@@wvis{ANDAND}{{}$\W${}}{&&}
+\prettywordpair@@wvis{LSHIFT}{{}$\ll${}}{<<}
+\prettywordpair@@wvis{RSHIFT}{{}$\gg${}}{>>}
+\prettywordpair@@wvis{=}{{}$\K${}}{=:}
\prettywordpair@@{+}{{}$+${}}
\prettywordpair@@{-}{{}$-${}}
\prettywordpair@@{!}{{}$\CM${}}
@@ -54,9 +62,16 @@
\prettywordpair@@{^}{{}$\XOR${}}
\prettywordpair@@{|}{{}$\OR${}}
\prettywordpair@@{&}{{}$\AND${}}
+\prettywordpair@@{\{}{{\ntt@\{}}% the purpose is to adjust the indexing
+\prettywordpair@@{\}}{{\ntt@\}}}% the purpose is to adjust the indexing
+
+% the switches below are sloppy, since the macros in the index
+% read a-z as letters (category 11) whereas the parser records them as
+% non-letters, it would be beter to use category 12 throughout (by,
+% for example passing the context as a stream of character codes
\def\gtsymswitch{%
- \raw exp *left *right\raw {%
+ exp \raw exp *left *right\raw {%
{}$>${}%
}
}
@@ -64,7 +79,7 @@
\def\gtsymswitchdefault{{\tt >}}
\def\ltsymswitch{%
- \raw exp *left *right\raw {%
+ exp \raw exp *left *right\raw {%
{}$<${}%
}
}
@@ -74,17 +89,53 @@
\setspecialcharsfrom\gtsymswitch
\setspecialcharsfrom\ltsymswitch
-\prettywordpair@@{>}{{%
+% the visual key switches below slightly abuse the properties of
+% \yyifsamestring, namely that it ignores the category code of the characters
+
+\prettywordpair@@wvis{>}{{%
\let\default\gtsymswitchdefault
\switchon{#1}\in\gtsymswitch
-}}
+}}{%
+ \yyifsamestring{exp}{#1}{> exp}{%
+ \yyifsamestring{*left}{#1}{> exp}{%
+ \yyifsamestring{*right}{#1}{> exp}{>}%
+ }%
+ }%
+}
-\prettywordpair@@{<}{{%
+\prettywordpair@@wvis{<}{{%
\let\default\ltsymswitchdefault
\switchon{#1}\in\ltsymswitch
-}}
+}}{%
+ \yyifsamestring{exp}{#1}{< exp}{%
+ \yyifsamestring{*left}{#1}{< exp}{%
+ \yyifsamestring{*right}{#1}{< exp}{<}%
+ }%
+ }%
+}
-\prettywordpair@@{*}{\yyifsamestring{exp}{#1}{{}$\times${}}{{\tt *}}}
+\def\astsymswitch{%
+ exp \raw exp *left *right\raw {%
+ {}$\times${}%
+ }
+}
+
+\def\astsymswitchdefault{{\tt *}}
+
+\setspecialcharsfrom\astsymswitch
+
+\prettywordpair@@wvis{*}{%
+ {%
+ \let\default\astsymswitchdefault
+ \switchon{#1}\in\astsymswitch
+ }%
+}{%
+ \yyifsamestring{exp}{#1}{* exp}{%
+ \yyifsamestring{*left}{#1}{* exp}{%
+ \yyifsamestring{*right}{#1}{* exp}{*}%
+ }%
+ }%
+}
\prettywordpair@@{/}{{}$/${}}
diff --git a/support/splint/examples/symbols/Makefile b/support/splint/examples/symbols/Makefile
index d22110baaf..964927bc9d 100644
--- a/support/splint/examples/symbols/Makefile
+++ b/support/splint/examples/symbols/Makefile
@@ -1,20 +1,38 @@
+# Copyright 2012-2020, Alexander Shibakov
+# This file is part of SPLinT
+#
+# SPLinT is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# SPLinT is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with SPLinT. If not, see <http://www.gnu.org/licenses/>.
+
# this Makefile uses a flat directory structure for demonstration
# purposes; the main directory (../..) follows a slightly more
# modular organization scheme.
-SPLINT_ROOT = $(shell pwd)/../..
-SYMBOLS_PREREQS = %.tex %.idx ${SPLINT_XTEXSTYLES} ${SPLINT_ROOT}/cweb/bo.tok symtoks.sty
-SYMBOLS_PREREQS += ${SPLINT_XPTABLES} ${SPLINT_XLTABLES} symmap.sty ${SPLINT_ROOT}/cweb/bg.y
+SYMBOLS_PREREQS = %.tex %.idx ${SPLINT_XTEXSTYLES} ${SPLINT_ROOT}cweb/bo.tok ${SPLINT_ROOT}cweb/fo.tok symtoks.sty
+SYMBOLS_PREREQS += ${SPLINT_XPTABLES} ${SPLINT_XLTABLES} symmap.sty symtricks.sty symfm.sty slimbo.sty
+SYMBOLS_PREREQS += ${SPLINT_ROOT}cweb/fil.l ${SPLINT_ROOT}cweb/bg.y ${SPLINT_ROOT}cweb/lo.l
-include ${SPLINT_ROOT}/makefile.inc
+include ../../makefile.inc
# rules specific to this example
+%.tex: %.x
+
%.tex: %.w
-%.tex %.idx: %.w
- ${CWEAVE} $<
+%.tex %.idx: %.x alphas.hx
+ -${CWEAVE} $<
%.gdx: ${SYMBOLS_PREREQS}
${TEX} $*.tex
@@ -22,6 +40,9 @@ include ${SPLINT_ROOT}/makefile.inc
%.pdf %.sns: ${SYMBOLS_PREREQS} %.gdy
${PDFTEX} $*.tex && touch $*.gdy && touch $*.pdf
+alphas.hx:
+ ${MISCCW} --alpha-list --alpha-length=1 $@
+
docs: symbols.pdf
clean: clean_core
@@ -32,6 +53,9 @@ mostlyclean: clean_temp
distclean: clean
cd ${SPLINT_ROOT} && ${MAKE} clean
-.PRECIOUS: symbols.gdy
+.PRECIOUS: ${SPLINT_ROOT}cweb/fo.tok ${SPLINT_ROOT}cweb/bo.tok ${SPLINT_ROOT}cweb/lo.l \
+ ${SPLINT_ROOT}cweb/fil.l ${SPLINT_ROOT}cweb/dyytab.tex ${SPLINT_ROOT}cweb/gyytab.tex \
+ ${SPLINT_ROOT}cweb/fyytab.tex symbols.gdy
-.INTERMEDIATE: symbols.gdx
+.INTERMEDIATE:
+#.INTERMEDIATE: symbols.gdx
diff --git a/support/splint/examples/symbols/slimbo.sty b/support/splint/examples/symbols/slimbo.sty
new file mode 100644
index 0000000000..48da360f55
--- /dev/null
+++ b/support/splint/examples/symbols/slimbo.sty
@@ -0,0 +1,16 @@
+% Copyright 2012-2020, Alexander Shibakov
+% This file is part of SPLinT
+%
+% SPLinT is free software: you can redistribute it and/or modify
+% it under the terms of the GNU General Public License as published by
+% the Free Software Foundation, either version 3 of the License, or
+% (at your option) any later version.
+%
+% SPLinT is distributed in the hope that it will be useful,
+% but WITHOUT ANY WARRANTY; without even the implied warranty of
+% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+% GNU General Public License for more details.
+%
+% You should have received a copy of the GNU General Public License
+% along with SPLinT. If not, see <http://www.gnu.org/licenses/>.
+
diff --git a/support/splint/examples/symbols/symbols.w b/support/splint/examples/symbols/symbols.w
index 47c210e4f8..dc58adc0bd 100644
--- a/support/splint/examples/symbols/symbols.w
+++ b/support/splint/examples/symbols/symbols.w
@@ -1,4 +1,4 @@
-% Copyright 2012-2014, Alexander Shibakov
+% Copyright 2012-2020, Alexander Shibakov
% This file is part of SPLinT
%
% SPLinT is free software: you can redistribute it and/or modify
@@ -15,17 +15,40 @@
% along with SPLinT. If not, see <http://www.gnu.org/licenses/>.
\newwrite\gindex
+% the namespace choices below are a bit random as this is a demo only
+
\input limbo.sty
\def\optimization{5}
\input yy.sty
+\modenormal
+\input noweb.sty
+ \xreflocaltrue
+ \readlxrefs
+ \newwrite\xrefstream
+ \immediate\openout\xrefstream=\jobname.xxr
\input dcols.sty
\input symmap.sty
+\let\parsernamespace\flexnamespace
+\let\hostparsernamespace\flexnamespace
+ \let\tokeneq\tokeneqpretty
+\let\optstrextra\optstrextraesc
+ %\input fo.tok
+ \input ftokenset.sty %
+\let\parsernamespace\flexpseudorenamespace
+\let\hostparsernamespace\flexpseudorenamespace
+\input fretokenset.sty % regular expression names
\def\symnamespace{[symbols]}
\let\currentnamespace\parsernamespace
\let\parsernamespace\symnamespace
- \input symtoks.sty %
+ \input symtoks.sty %
+ \let\tokeneq\tokeneqpretty
+ \let\optstrextra\optstrextraesc
+ \input fo.tok
+ \input ftokenset.sty
+\let\parsernamespace\indexpseudonamespace
+ \input yypretty.sty
\let\parsernamespace\currentnamespace
\let\hostparsernamespace\symnamespace % the namespace where tokens are looked up
% for typesetting purposes
@@ -38,17 +61,60 @@
}
\let\oldN\N
-\let\N\textN
\let\M\textM
+\let\N\chapterN
+
+\font\ttit=cmitt10
\defreserved{Y}{\.{Y}}
-@** Introduction. \setupfootnotes
+\input symfm.sty
+\input slimbo.sty
+\setupfootnotes
+
+@** Introduction.
+The manual supplied with \splint\ presents an outline of the main
+features of the package. Its main focus, however, is on the
+general parser design using the package. The two parsers that come
+with \splint, for pretty printing \bison\ and \flex\ are used as an
+illustration only. A full featured parser design of a parser for
+pretty printing linker scripts does not treat the \bison\ and \flex\
+parsers in any detail, either. Partially filling this gap is the main
+reason for this example\footnote{A secondary reason is to provide a testbed
+for typesetting experiments.}.
+
The same parser and lexer (with a slightly different input routine)
-are used to typeset examples of productions in text.
-Some of the features of this parser
-are collected below for future reference. One might keep this section
-handy when typesetting his own examples (for example, it is probably
+may be used to typeset \bison\ and \flex\ examples in text, as
+well. There are some subtle differences between doing it inside a
+straightforward \TeX\ file and a \CWEB\ section. The obvious one of
+these is the requirement to use \.{@@@@} whenever a single \.{@@} is
+called for in the \TeX\ input. See below for further examples.
+
+Typesetting grammar examples also calls for a wider range of
+typographic devices than the ones used for pretty printing \bison\
+(and \flex) code. Formatting and modifying the typesetting of \bison\
+productions and \flex\ scanner rules is given some consideration as
+well.
+
+While this example is rather short, it has enough variety to present
+the indexing features of the macros supplied with \splint in some
+detail.
+
+Finally, a few macros, hastily thrown together, show how \CWEB\ may be
+used to create the documentation with a `book feel', including the
+custom typesetting of chapter headers, sectioned index, etc. Not all
+of the features mentioned above may be desired for any one project but
+in case some of them are, these examples provide a convenient place to
+consult about the details of the implementaion for one's own use.
+
+%\let\N\textN
+
+@s TeX_ TeX
+
+@* Examples of \bison\ parser output.
+Some of the features specific to the use of the \bison\ parser for this purpose
+are explained below. One might find it useful to keep this section
+as a quick reference while typesetting his own examples (for example, it is probably
unintuitive that `\.{`}' produces `\.{\yl}' but there is simply no
way to use `\.{\yl}' as a character inside the \TeX\ section of \CWEB).
The first, rather eclectic and lengthy example demonstrates various
@@ -89,7 +155,7 @@ and_another:
\%token bogey1 bogey2 ;
\%type <some> TOKEN ANOTHER ;
\%start inputer;
-\stashed{\rm Example 1 of flushed code}\sflush{F}{flush this}
+\stashed{\rm Example 1 of flushed code (delayed till next \.{\\stashed} is encountered).}\sflush{F}{flush this}
\%default-prec;
\%no-default-prec;
\stashed{\rm Example 2 of flushed code}
@@ -112,9 +178,9 @@ and_another:
\beginprod
\%expect 0x137;
\%expect-rr 17;
-\%lex-param \{\stashed{|int number;|}\};
-\%define var.1 \{ \stashed{ |func3(8, "string"){n = m++;};| } \}
-\%union var.2 \{ \stashed{\rm |int a, b, c;|\6 |char a_char;|} \}
+\%lex-param \{\stashed{\rm|int number;|}\};
+\%define var.1 \{ \stashed{\rm |func3(8, "string"){n = m++;}| } \}
+\%union var.2 \{ \stashed{\rm |int a, b, c;|\6\rm |char a_char;|\C{font switching must be applied to every line (see the source)}} \}
\%\{ \stashed{\rm |int a, b, c;|\6 |char a_char;|} \%\}
\endprod
%
@@ -128,7 +194,8 @@ variations enabled by such context.
\nx\restorecolor
}%
\def\dset#1{%
- $\nx\underbrace{\hbox{#1}}_{{\nx\rm identifier:\ \the\toksc}}$%
+ $\nx\underbrace{\hbox{#1}}_{{\nx\rm id:\ \hbox{\sixrm\the\toksa}}}$% TODO: find a better way to switch
+ % the font family explicitly
}%
\def\esets#1{%
\nx\beginub#1%
@@ -139,6 +206,8 @@ variations enabled by such context.
\def\beginub#1\endub{%
$\underbrace{\hbox{#1}}_{\rm a\ group}$%
}
+\checktabletrue
+%\yyflexdebugtrue
\smallskip
\beginprod
\skipheader
@@ -150,11 +219,38 @@ ghost:
\formatlocal{\let\termmetastyle\esete}this\formatlocal{\restorecs{table-render}\termmetastyle}one\{\stashed{\rm|assign(x, y, z);|}\}
;
\endprod
+\checktablefalse
+\yyflexdebugfalse
+\medskip
+\noindent Next, an incomplete listing of the characters that can be
+typeset, as well as the way to typeset\footnote{Please note that we are discussing the issues of typesetting
+{\let\tt\tti\it examples of \bison\ input in text\/} at the moment; the parser reading the code from
+{\let\tt\tti\it the \Cee\ portion of the \CWEB\ input\/} typesets these symbols automaticaly.} them (only the `tricky' cases
+are listed). The use of `\.{`}' to typeset `\.{\yl}' deserves a special note---\CWEB's rules make it
+nearly impossible to use `\.{\yl}' in the \TeX\ portion of the program. One way to avoid using this
+relatively unnatural notation is to put the production example in a separate \TeX\ file as demonstrated
+by the \prodstyle{symbol\_tricks2} example below, included from \.{symtricks.sty}. The same example uses
+a few parser facilities to override the typesetting defaults of the standard production demo setup (such as using
+\.{\\insertraw} to reset the last action display).
+
+The uniform alignment across several productions below was accomplished with \.{\\setglobalalignrules} by using
+the value of \.{\\gaglue} set by a copy of one of the productions.
+\begingroup
+\setbox0\hbox{\ninepoint look: $\rightarrow\,$\X{$\infty$}:See this example to deduce $\ldots$\X}%
+\setbox0=\vbox{
\medskip
-\noindent Finally, an incomplete listing of the characters that can be
-typeset, as well as the way to typeset them. The use of `\.{`}' to
-typeset `\.{\yl}' deserves a special note---\CWEB's rules make it
-nearly impossible to use `\.{\yl}' in the \TeX\ portion of the program.
+\beginprod
+\insertraw{\let\stashnext\stashnextwithspace}%
+line_breaking_and_symbols:
+ GEN\stashed{|stash!=0|}ERIC '(' expression',' \ ss another es')' \
+ \insertraw{\let\stashnext\stashnextwithnothingnx}%
+ ` inline_\stashed{look: $\rightarrow\,$}c \{ \stashed{\X{$\infty$}:See this example to deduce $\ldots$\X\6}\stashed{|b == a - c|} \}
+ ` more_inline_c \{ \stashed{|func(int a, char b);|} \}
+%
+\endprod
+\expandafter
+}%
+\expandafter\setglobalalignrules\expandafter{\the\gaglue}%
\medskip
\tomainparser
\prettywordpair{GENERIC}{\_Generic}
@@ -185,9 +281,169 @@ symbol_tricks:
` ' ' \
;
\endprod
+\medskip
+\input symtricks.sty
+\noindent The stash chunks, inserted by \.{\\stashed\{}{\it random input\/}\.{\}} are invisible to the parser.
+As an example, the stash producing the action in the first rule below (|stash!=0|) was
+inserted in the middle of the first term (\prodstyle{GENERIC}). The space (\.{ }) is a special case.
+\medskip
+\beginprod
+line_breaking_and_symbols:
+ GEN\stashed{|stash!=0|}ERIC '(' expression',' \ ss another es')' \
+ ` inline_\stashed{look: $\rightarrow\,$}c \{ \stashed{\X{$\infty$}:See this example to deduce $\ldots$\X\6}\stashed{|b == a - c|} \}
+ ` more_inline_c \{ \stashed{|func(int a, char b);|} \}
+%
+\endprod
+\medskip
+\noindent The behavior or the input routine mentioned above is adjustable by redefining \.{\\stashnext}. These adjustments may
+be even made locally, for small portions of the input only, using \.{\\insertraw}.
+Here is the same set of productions with stash producing a space in the middle of \prodstyle{GENERIC} reverting to the usual,
+`invisible' behavior by the time \.{\\yyinput} reaches \prodstyle{inline\_c} (that has
+`\.{\\stashed\{}$\,$look: $\rightarrow\,$\.{\}}' inserted before~\prodstyle{\_c}):
+\medskip
+\beginprod
+\insertraw{\let\stashnext\stashnextwithspace}%
+line_breaking_and_symbols:
+ GEN\stashed{|stash!=0|}ERIC '(' expression',' \ ss another es')' \
+ \insertraw{\let\stashnext\stashnextwithnothingnx}%
+ ` inline_\stashed{look: $\rightarrow\,$}c \{ \stashed{\X{$\infty$}:See this example to deduce $\ldots$\X\6}\stashed{|b == a - c|} \}
+ ` more_inline_c \{ \stashed{|func(int a, char b);|} \}
+%
+\endprod
+\endgroup
+
+@* Examples of \flex\ parser output. Standalone regular expressions can be displayed using \.{\\flexrestyle}:
+{\it \flexrestyle{\^\\\\[\\"\\'?\\\\]}}. Portions of \flex\ files may be typeset with the help of
+\.{\\beginflex}$\ldots$\.{\\endflex} macros. Just as in the case of \bison\ productions, care must be taken to
+escape some symbols that have special meaning to \TeX. The ones that {\it must be\/} escaped when used inside
+regular expressions are `\.{\{}', `\.{\}}', `\.{\\}', and~`\.{\%}'. Others, such as `\.{\^}', `\.{\_}', `\.{\$}', `\.{\#}',
+and~`\.{\&}' do not require any special treatment (although they continue to perform their special functions
+inside \.{\\stashed} blocks). As a note of caution, `$\ldots$\.{\\\\]}' results
+in `$\ldots$\flexrestyle{\\]}' and not
+{%
+\let\flbraceccl\flbraceccldemo\savecs{flexparser-re}\flbraceccl
+`\flexrestyle{[\\\\]}'%
+} as might have been intended (i.e.~the bracket, \.{]} is treated as an ordinary character, and not as part of
+the syntax for a character class). This is because the escape character (\.{\\}) serves a special r\^ole in \flex\ so
+to get the desired effect one must type \.{\\\\\\\\]}. The use of `\.{\yl}' deserves a special mention. As was pointed out above,
+this character is nearly inaccessible in the \TeX\ mode of \CTANGLE, which resulted in the following workaround. To use
+`\.{\yl}' in the examples typeset inside the \TeX\ portion of the \CWEB\ input, one should type `\.{`}'. To use `\.{`}', type
+`\.{\\`}' instead. If the example is not part of a \CWEB\ input (for example it is included from its own \TeX\ file similar to
+\.{symtricks.sty} above) then
+one can use the `\.{\yl}' character as intended. However, even inside a `pure \TeX\ file' to get `\.{`}', one must still type `\.{\\`}'.
+Many of the points made above may become more transparent after examining the source of the example following this sentence.
+\medskip
+\cdebugtrue
+\beginflex
+\stashed{\C{ Comments are possible with some effort }}
+<FAKE\stashed{\X{$x_m\ldots x_n$}:Names can have their own sections\X}>\{
+ ^\{WS\}([\ a-x#\\\\]`[\`0-9\\`])\\n\\r \{\stashed{|x=@t}2^y{@>|}\}
+ ^"/*"$ \{\stashed{|start_comment(@tWatch out for `\.{\yl}'!@>)|}\}
+\}
+\endflex
+\cdebugfalse
+\medskip
+\noindent While, technically speaking, \flex\ has a `parser stack' in the sense that in the event of an unsuccessful parsing pass
+with a `section 2' parser, a `section 1' parser may be attempted, this strategy often fails. As a short excerpt immediately
+following this section shows, `section 1' input may also pass for syntactically correct `section 2' \flex\ code (although with
+entirely wrong semantics). Thus a better `lazy' approach is to mark all \flex\ code as `section 1' instead.
+@<Definitions for \flex\ input lexer@>=
+@G(fs1)
+WS [[:blank:]]+
+OPTWS [[:blank:]]*
+NOT_WS [^[:blank:]\r\n]
+
+NL \r?\n
+
+NAME ([[:alpha:]_][[:alnum:]_-]*)
+NOT_NAME [^[:alpha:]_*\n]+
+
+SCNAME {NAME}
+
+ESCSEQ (\\([^\n]|[0-7]{1,3}|x[[:xdigit:]]{1,2}))
+
+FIRST_CCL_CHAR ([^\\\n]|{ESCSEQ})
+CCL_CHAR ([^\\\n\]]|{ESCSEQ})
+CCL_EXPR ("[:"^?[[:alpha:]]+":]")
+
+LEXOPT [porkacne]
+
+M4QSTART "[["
+M4QEND "]]"
+
+@ The first three lines of the previous section successfully parse as section~2 input.
+\parseverbosetrue
+@<As section 2@>=
+@G(fs2)
+WS [[:blank:]]+
+OPTWS [[:blank:]]*
+NOT_WS [^[:blank:]\r\n]
+@ @<Patterns for \flex\ lexer@>=
+@G(fs1)
+/* Comment before the section is put after the states list */
+<INITIAL@>@<Definitions for \flex\ input lexer@>@=>{
+ ^{WS} {@> @[TeX_( "/flindented@@codetrue/yyBEGIN{CODEBLOCK}/yylexnext" );@]@=}
+ ^"/*" {@> @[TeX_( "/yypushstate{COMMENT}/yylexnext" );@]@=}
+ ^#{OPTWS}line{WS} {@> @[TeX_( "/yypushstate{LINEDIR}/yylexnext" );@]@=}
+ ^"%s"{NAME}? {@> @[TeX_( "/yylexreturnptr{SCDECL}" );@]@=}
+ ^"%x"{NAME}? {@> @[TeX_( "/yylexreturnptr{XSCDECL}" );@]@=}
+ ^"%{".*{NL} {@> @<Start a \Cee\ code section@> @=}
+
+ ^"%top"[[:blank:]]*"{"[[:blank:]]*{NL} {@> @<Begin the \prodstyle{\%top} directive@> @=}
+ ^"%top".* {@> @[TeX_( "/yyfatal{malformed '/harmlesscomment top' directive}" );@] @=}
+
+ {WS} {@> @[;@]/* discard */ @=}
+
+ ^"%%".* {@> @<Start section 2@> @=}
+
+ ^"%pointer".*{NL} {@> @[TeX_( "/flinc@@linenum/yylexreturn{POINTER_OP}" );@]@=}
+ ^"%array".*{NL} {@> @[TeX_( "/flinc@@linenum/yylexreturn{ARRAY_OP}" );@]@=}
+
+ ^"%option" {@> @[TeX_( "/yyBEGIN{OPTION}/yylexreturn{OPTION_OP}" );@]@=}
+
+ ^"%"{LEXOPT}{OPTWS}[[:digit:]]*{OPTWS}{NL} {@> @[TeX_( "/flinc@@linenum/yyflexoptreturn{OPT_DEPRECATED}" );@]@=}
+ ^"%"{LEXOPT}{WS}.*{NL} {@> @[TeX_( "/flinc@@linenum/yyflexoptreturn{OPT_DEPRECATED}" );@]@=}
+
+ ^"%"[^porksexcan{}].* {@> @[TeX_( "/yyfatal{unrecognized '/harmlesscomment' directive: /the/yytext}" );@] @=}
+
+ ^{NAME} {@> @<Copy the name and start a definition@> @=}
+ {SCNAME} @> @[TeX_( "/RETURNNAME" );@] @=
+ ^{OPTWS}{NL} {@> @[TeX_( "/flinc@@linenum/yylexnext" );@]/* allows blank lines in section 1 */@=}
+ {OPTWS}{NL} {@> @[TeX_( "/flinc@@linenum/yylexnext" );@]/* maybe end of comment line */@=}
+}
+
+@ @<Start a \Cee\ code section@>=
+
+@ @<Copy the name and start a definition@>=
+
+@ @<Begin the \prodstyle{\%top} directive@>=
+
+@ @<Start section 2@>=
+@q Include the list of index section markers; this is a hack to get around @>
+@q the lack of control over the generation of \CWEB's index; the correct order @>
+@q of index entries depends on the placement of this inclusion @>
+@i alphas.hx
+
+@** Index. Various identifiers in \bison\ productions and \flex\ sections are put in the index, along with
+the identifiers from the \Cee\ portions of the \CWEB\ input. The
+mechanism used to typeset these identifiers is different from the one
+employed by the \CWEB's indexing macros. While the \.{\\I} macros in
+\.{cwebmac.tex} pass the actual typesetting commands to \TeX, \splint\
+only outputs the context in which the identifier was encountered. By
+redefining the macros that interpret this context to typeset the
+index, several useful effects can be achieved\footnote{One pretty common use is to redefine
+macros that take parameters to take none.}.
+\unsetfootnotes
+\input gindex.sty
+\def\otherlangindexseparator{% the index is too short
+ \toksg{}%
+ \vskip.5\baselineskip
+ \centerline{B{\sc ISON}, F{\sc LEX, AND} \TeX\ {\sc INDICES}}%
+ \vskip.5\baselineskip
+}
\closeout\gindex
-@** Index. Totally superfluous in this case.
+\termindexfalse % do not attach indexing entries to the terms in the index, or the section list
\let\inx\inxmod
\let\fin\finmod
\def\topofcontents{\null\vskip-3\baselineskip\centerline{C{\sc ONTENTS} (\sc\uppercase\expandafter{\title})}\medskip}
diff --git a/support/splint/examples/symbols/symfm.sty b/support/splint/examples/symbols/symfm.sty
new file mode 100644
index 0000000000..9cfeacbeaa
--- /dev/null
+++ b/support/splint/examples/symbols/symfm.sty
@@ -0,0 +1,64 @@
+% Copyright 2012-2020, Alexander Shibakov
+% This file is part of SPLinT
+%
+% SPLinT is free software: you can redistribute it and/or modify
+% it under the terms of the GNU General Public License as published by
+% the Free Software Foundation, either version 3 of the License, or
+% (at your option) any later version.
+%
+% SPLinT is distributed in the hope that it will be useful,
+% but WITHOUT ANY WARRANTY; without even the implied warranty of
+% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+% GNU General Public License for more details.
+%
+% You should have received a copy of the GNU General Public License
+% along with SPLinT. If not, see <http://www.gnu.org/licenses/>.
+
+\begingroup
+\footline{\hfill}\headline{\hfill}\vfill
+\newread\versionstream
+\immediate\openin\versionstream=VERSION
+\read\versionstream to \version
+\message{<\version>}%
+\def\undot#1.#2.#3{#1\kern-.3pc.\kern-.3pc#2\kern-.3pc.\kern-.3pc#3\ignorespaces}
+\chardef\amp=`\&
+\titletrue
+
+% a few more fonts: everybody has these.
+
+\font\euror=cmr10
+\font\eurow=cmr10
+\font\eurob=cmbr10
+\font\eurobtwelve=cmbr10 at 16pt
+{\font\cminch=cmr10 at 1in
+\font\cmhuge=cmr10 at 1in
+\def\strut{\vrule height 17pt depth 7pt width 0pt}%
+\noindent
+\hbox to \hsize{%
+ \valign{#\vfil\cr
+ \halign{%
+ \hfil\eurobtwelve\strut#\cr
+ \noalign{\hrule height 5pt\vskip0pt plus1pt}%
+ S\kern-.1pc\hfill P\hfill L\hfill I\hfill N\hfill T\cr
+ \noalign{\hrule height .9pt\vskip0pt plus1pt}%
+ reference\cr
+ \noalign{\hrule height 5pt\vskip0pt plus1pt}%
+ }\cr
+ \noalign{\hfill}%
+ \vbox{\hbox{\eurobtwelve v e r s i o n\ \cminch \expandafter\undot\version}}\cr
+ }%
+}%
+%
+\vfill
+\newdimen\tempdimen
+\def\hrfill#1{\leaders\hrule#1\hfill}
+\hbox to \hsize{\cmhuge S\hfil\kern-3.9pc Y\hfil\kern-3.3pc M\hfil\kern-3.3pc B\hfil\kern-3.5pc O\hfil\kern-3.7pc L\hfil\kern-3.5pc S}
+\vfill
+\hrule height .3pc
+\kern .2pc
+\hbox to \hsize{\leaders\hrule height 15pt\hfill\raise5pt\llap{\hbox to
+\hsize{\euror\colorwhite{$\;$Alex Shibakov\hfill\ifcase\month \or
+January \or February\or March\or April\or May\or June\or July\or August\or September\or
+October\or November \or December\fi\ \number\day, \number\year$\,$}}}}}\eject
+\endgroup
+\titletrue
diff --git a/support/splint/examples/symbols/symmap.sty b/support/splint/examples/symbols/symmap.sty
index 12261be656..450dd9a649 100644
--- a/support/splint/examples/symbols/symmap.sty
+++ b/support/splint/examples/symbols/symmap.sty
@@ -1,4 +1,4 @@
-% Copyright 2012-2014, Alexander Shibakov
+% Copyright 2012-2020, Alexander Shibakov
% This file is part of SPLinT
%
% SPLinT is free software: you can redistribute it and/or modify
@@ -54,17 +54,15 @@
\input cweb/gyytab.tex % this should be the parser that will be used later
% in this case it is just an example
\edef\tointermediateparser{%
- \noexpand\savefullstate % unnecessary
- \noexpand\savefullstateextra % unnecessary
\let\noexpand\parsernamespace\noexpand\inamespace % switch to the new namespace
\the\pinittoks % restore all the tables, tokens and constants, and stacks
\let\noexpand\getcurrentparser\noexpand\tointermediateparser
}%
- \settokens % this simply assign values to tokens where the name of each token is taken out of yytname ...
- \input \drvname.tok % this will set up token equivalences in the namespace above ...
- % those are the values gleaned during the bootstrapping stage.
- % in the general case, one needs to run a bootsrapping (or similar)
- % parser to extract the token information.
+ \settokens % this simply assigns values to tokens where the name of each token is taken out of yytname ...
+ \input bo.tok % this will set up token equivalences in the namespace above ...
+ % those are the values gleaned during the bootstrapping stage.
+ % in the general case, one needs to run a bootsrapping (or similar)
+ % parser to extract the token information.
\optimizeall % this is necessary for correct rule listing in the output stage:
% otherwise \fgetelemof will use the current value of the \yy... token registers which
% will hold the values of the full parser that is loaded next
@@ -72,7 +70,7 @@
\listrules % ... to be used while listing the rules
% note that we do not bother to set up a lexer for this parser (even though we already have one and the
% \optimizeall macro above will create a set of associative tables for it---this is merely an unwanted
- % sideffect); after the rules have been listed, the intemediate parser in no longer needed.
+ % sideffect); after the rules have been listed, the intemediate parser is no longer needed.
%\showthe\newsymswitch
\def\fullnamespace{[full]}% this is the parser that parses the bison grammar from a raw
% bison file; it can play a role of the bootstrap parser for
@@ -96,7 +94,7 @@
% part of the file, this can simply be disabled
\settokens
\setflexstates % the main lexer can be reused in this case; the states still need to be set up
- \input \drvname.tok % set up the tokens for the bison grammar parser
+ \input bo.tok % set up the tokens for the bison grammar parser
\newparserstate
\newlexerstate
\newlexerstateextra
@@ -104,8 +102,6 @@
\setnulstack{yyirulestack}%
%
\edef\tofullparser{%
- \noexpand\savefullstate
- \noexpand\savefullstateextra
\let\noexpand\parsernamespace\noexpand\fullnamespace % switch to the new namespace
\the\pinittoks % restore all the tables, tokens and constants, and stacks
\let\noexpand\getcurrentparser\noexpand\tofullparser
@@ -140,7 +136,7 @@
\fi
}%
% build the command to create the symbolic name switch
- \toksc{\tofullparser\parserinit
+ \toksc{\tofullparser\basicparserinit\bisonparserinit\bisonparserdatainit
\let\yyinputold\yyinput
\let\yyinput\yyinputtrivial % a demo of a stripped down, slightly faster input routine
\doascii{11}\expandafter\yyparse}%
@@ -161,4 +157,53 @@
{\newlinechar=`\^^J \immediate\write\symbolicswitch{\the\setsncommands\the\unsetsncommands}}%
\immediate\closeout\symbolicswitch
\tomainparser % go back to the main parser
+%
+% \flex\ parser test
+%
+ {%
+ \toflexreparser
+ \basicparserinit
+ \flexreparserinit
+ \flexreparserdatainit
+ \yyBEGIN{SECT2}%
+ \flin@ruletrue
+ % special status of `\yl' in \CWEB\ makes the following workaround necessary
+ % if the code is unsed inside a \CWEB\ file
+ %|@t}\expandafter\yyparse\space [a-b]*(c|d|e)?\yyeof\yyeof\endparseinput\endparse{@>|
+ \expandafter\yyparse\space [a-b]*(c|d|e)?\yyeof\yyeof\endparseinput\endparse
+ \ifyyparsefail\else
+ \ferrmessage{done processing flex}%
+ \fi
+ }%
+%
+ {%
+ \def\flnamespace{[flex]}
+ \let\parsernamespace\flnamespace
+ \toflexparser
+ \basicparserinit
+ %\flexparserinit
+ \flexparserdatainit
+ \yylessusedtrue % TODO: put it in the \genericparser command
+ \let\f@nishparse\finishparse
+ \def\finishparse{\endinput\message{end of input}\relax\let\cleanupcs\f@nishparse}%
+ \def\next{%
+ \catcode`\\=12
+ \catcode`\^^J=12
+ \catcode`\%=12
+ \catcode`\^^M=12
+ \catcode`\{=12
+ \catcode`\}=12
+ \catcode`\#=12
+ \catcode`\_=12
+ %\yydebugmost
+ \expandafter\yyparse\input cweb/lo.l \cleanupcs\yyeof\yyeof\endparseinput\endparse % note that the space after the file name is necessary
+ \ifyyparsefail
+ \errmessage{stopped}%
+ \fi
+ \yydebugnone
+ \basicparserinit
+ \expandafter\yyparse\input cweb/fil.l \cleanupcs\yyeof\yyeof\endparseinput\endparse
+ % note that the space after the file name is necessary
+ }\expandafter\next % to lock the \catcode of the brace
+ }%
\fi
diff --git a/support/splint/examples/symbols/symtoks.sty b/support/splint/examples/symbols/symtoks.sty
index 8d6cdc5596..3ab9b142c7 100644
--- a/support/splint/examples/symbols/symtoks.sty
+++ b/support/splint/examples/symbols/symtoks.sty
@@ -1,3 +1,19 @@
+% Copyright 2012-2020, Alexander Shibakov
+% This file is part of SPLinT
+%
+% SPLinT is free software: you can redistribute it and/or modify
+% it under the terms of the GNU General Public License as published by
+% the Free Software Foundation, either version 3 of the License, or
+% (at your option) any later version.
+%
+% SPLinT is distributed in the hope that it will be useful,
+% but WITHOUT ANY WARRANTY; without even the implied warranty of
+% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+% GNU General Public License for more details.
+%
+% You should have received a copy of the GNU General Public License
+% along with SPLinT. If not, see <http://www.gnu.org/licenses/>.
+
\prettywordpair{ANOTHER}{{\rm(}another\rm{)}}
\prettywordpair{TOKEN}{$\langle${\it token}$\rangle$}
\prettywordpair{GENERIC}{{\tt \_Generic}}
diff --git a/support/splint/examples/symbols/symtricks.sty b/support/splint/examples/symbols/symtricks.sty
new file mode 100644
index 0000000000..04bf5c5ac1
--- /dev/null
+++ b/support/splint/examples/symbols/symtricks.sty
@@ -0,0 +1,23 @@
+% Copyright 2012-2020, Alexander Shibakov
+% This file is part of SPLinT
+%
+% SPLinT is free software: you can redistribute it and/or modify
+% it under the terms of the GNU General Public License as published by
+% the Free Software Foundation, either version 3 of the License, or
+% (at your option) any later version.
+%
+% SPLinT is distributed in the hope that it will be useful,
+% but WITHOUT ANY WARRANTY; without even the implied warranty of
+% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+% GNU General Public License for more details.
+%
+% You should have received a copy of the GNU General Public License
+% along with SPLinT. If not, see <http://www.gnu.org/licenses/>.
+
+\beginprod
+\insertraw{\showlastactiontrue}\format{\inline\flatten}
+symbol_tricks2:
+ '|' | other \{\stashed{$\displaystyle e^{-1}=\lim\limits_{n\to\infty}\left({n\over n+1}\right)^n$}\}
+ ;
+\endprod
+\medskip
diff --git a/support/splint/examples/types/Makefile b/support/splint/examples/types/Makefile
new file mode 100644
index 0000000000..3dd52abeed
--- /dev/null
+++ b/support/splint/examples/types/Makefile
@@ -0,0 +1,30 @@
+# Copyright 2012-2020, Alexander Shibakov
+# This file is part of SPLinT
+#
+# SPLinT is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# SPLinT is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with SPLinT. If not, see <http://www.gnu.org/licenses/>.
+
+# this Makefile uses a flat directory structure for demonstration
+# purposes; the main directory (../..) follows a slightly more
+# modular organization scheme.
+
+# a rudimentary test for the tree evaluation macros and 'bignum'
+# arithmetic; mainly a debugging tool for simple parsers
+
+include ../../makefile.inc
+
+test:
+ ${TEX} test.sty
+
+
+clean: clean_core
diff --git a/support/splint/examples/types/basic.sty b/support/splint/examples/types/basic.sty
index 856faf2374..ba0decc237 100644
--- a/support/splint/examples/types/basic.sty
+++ b/support/splint/examples/types/basic.sty
@@ -1,4 +1,4 @@
-% Copyright 2012-2014, Alexander Shibakov
+% Copyright 2012-2020, Alexander Shibakov
% This file is part of SPLinT
%
% SPLinT is free software: you can redistribute it and/or modify
@@ -208,7 +208,7 @@
% \sdigit a b c which expands to the two digits of a + b + c
% where c \in \{0, 1\}.
% altogether there are 1200 sequences in use. this number can be
-% reduced to under 500 at the expence of more sophisticated
+% reduced to under 500 at the expense of more sophisticated
% conditionals:
% o in the case of \xdigit, a == 1 or b == 1 reduces \xdigit a b c to
% \sdigit a c 0 (for b == 1), whereas a == 0 or b == 0 makes
diff --git a/support/splint/examples/types/test.sty b/support/splint/examples/types/test.sty
index 8a7859d3d6..018b299c29 100644
--- a/support/splint/examples/types/test.sty
+++ b/support/splint/examples/types/test.sty
@@ -1,4 +1,4 @@
-% Copyright 2012-2014, Alexander Shibakov
+% Copyright 2012-2020, Alexander Shibakov
% This file is part of SPLinT
%
% SPLinT is free software: you can redistribute it and/or modify
@@ -16,8 +16,9 @@
% some examples of the `tree evaluator' and the `expandable arithmetic' macros
-\input ../../yycommon.sty
-\input ../../yymisc.sty
+\input ../../tex/yycommon.sty
+\input ../../tex/trt1.sty
+\input ../../tex/yymisc.sty
\input basic.sty
\input tree.sty
diff --git a/support/splint/examples/types/tree.sty b/support/splint/examples/types/tree.sty
index 46a4c94a97..68171e6a2d 100644
--- a/support/splint/examples/types/tree.sty
+++ b/support/splint/examples/types/tree.sty
@@ -1,4 +1,4 @@
-% Copyright 2012-2014, Alexander Shibakov
+% Copyright 2012-2020, Alexander Shibakov
% This file is part of SPLinT
%
% SPLinT is free software: you can redistribute it and/or modify
diff --git a/support/splint/examples/xxpression/Makefile b/support/splint/examples/xxpression/Makefile
index 4450e3126d..8d8b6877c6 100644
--- a/support/splint/examples/xxpression/Makefile
+++ b/support/splint/examples/xxpression/Makefile
@@ -1,12 +1,28 @@
+# Copyright 2012-2020, Alexander Shibakov
+# This file is part of SPLinT
+#
+# SPLinT is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# SPLinT is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with SPLinT. If not, see <http://www.gnu.org/licenses/>.
+
# this Makefile uses a flat directory structure for demonstration
# purposes; the main directory (../..) contains a slightly more
# modular organization.
-SPLINT_ROOT = $(shell pwd)/../..
-XXPRESSION_PREREQS = %.tex %.scn %.idx %.tok ${SPLINT_XPTABLES} ${SPLINT_XLTABLES} ${SPLINT_ROOT}/cweb/bo.tok xymmap.sty xtoks.sty
+XXPRESSION_PREREQS = %.tex %.scn %.idx %.tok ${SPLINT_XPTABLES} ${SPLINT_XLTABLES}
+XXPRESSION_PREREQS += ${SPLINT_ROOT}cweb/bo.tok ${SPLINT_ROOT}cweb/fo.tok xymmap.sty xtoks.sty
-include ${SPLINT_ROOT}/makefile.inc
-include ${SPLINT_ROOT}/makefile.loc
+include ../../makefile.inc
+include ${SPLINT_ROOT}makefile.loc
# rules specific to this example
@@ -20,13 +36,13 @@ step2: ptabout ltabout
step3: ltab.tex ptab.tex
step4: xymbols.sns
-ptabout: ${SPLINT_ROOT}/cweb/mkeparser.c ${PARSER}.c
+ptabout: ${SPLINT_ROOT}cweb/mkeparser.c ${PARSER}.c
${CC} -DPARSER_FILE=\"../examples/xxpression/$(lastword $^)\" -DYYPARSE_PARAMETERS= -o $@ $<
ptab.tex: ptabout
$< --optimize-actions $@
-ltabout: ${SPLINT_ROOT}/cweb/mkscanner.c ${LEXER}.c
+ltabout: ${SPLINT_ROOT}cweb/mkscanner.c ${LEXER}.c
${CC} -DLEXER_FILE=\"../examples/xxpression/$(lastword $^)\" -o $@ $<
ltab.tex: ltabout
@@ -36,28 +52,35 @@ ${LEXER}.c: ${LEXER}.l
${FLEX} -o $@ $<
test.tex xymbols.tex \
-${PARSER}.yy ${LEXER}.ll: xxpression.x
+${PARSER}.yy ${LEXER}.ll: xxpression.x alphas.hx
@${CTANGLE} $<
${UNLINE} test.txx test.tex
${UNLINE} xymbols.txx xymbols.tex
-xymbols.sns: xymbols.tex xxpression.tok ${SPLINT_ROOT}/cweb/bo.tok ${SPLINT_XPTABLES} ${SPLINT_XLTABLES} ptab.tex
+xymbols.sns: xymbols.tex xxpression.tok ${SPLINT_ROOT}cweb/bo.tok ${SPLINT_ROOT}cweb/fo.tok \
+ ${SPLINT_XPTABLES} ${SPLINT_XLTABLES} ptab.tex
${PDFTEX} $<
-xxpression.tex: xxpression.x
- ${CWEAVE} $<
+alphas.hx:
+ ${MISCCW} --alpha-list --alpha-length=1 $@
+
+xxpression.tex: xxpression.x alphas.hx
+ ${CWEAVE} $<
-xxpression.gdx: %.gdx: ${XXPRESSION_PREREQS}
+xxpression.gdx:%.gdx: ${XXPRESSION_PREREQS}
${TEX} $*.tex
-xxpression.pdf: %.pdf: ${XXPRESSION_PREREQS} %.gdy
+xxpression.pdf:%.pdf: ${XXPRESSION_PREREQS} %.gdy
${PDFTEX} $* && touch $*.gdy && touch $*.pdf
# if [ -f $*.gdx ] ; then ${BINDX} $*.gdx $*.gdy; fi
-xxpression.tok: xxpression.tex ${SPLINT_XPTABLES} ${SPLINT_XLTABLES} ${SPLINT_ROOT}/cweb/bo.tok
- ${PDFTEX} $< && touch xxpression.tok
+xxpression.tok: xxpression.tex ${SPLINT_XPTABLES} ${SPLINT_XLTABLES} ${SPLINT_ROOT}cweb/bo.tok
+ ${PDFTEX} $< && touch xxpression.tok
+
+${SPLINT_ROOT}cweb/%:
+ cd ${SPLINT_ROOT}cweb/ && ${MAKE} $(notdir $@)
-test: test.tex xymbols.sns xxpression.tok ptab.tex ltab.tex
+test: test.tex xymbols.sns xxpression.tok ptab.tex ltab.tex ${SPLINT_ROOT}cweb/fo.tok
tex test.tex
docs: xxpression.pdf
diff --git a/support/splint/examples/xxpression/xtoks.sty b/support/splint/examples/xxpression/xtoks.sty
index 2dc5abafb4..60e7dfa7a6 100644
--- a/support/splint/examples/xxpression/xtoks.sty
+++ b/support/splint/examples/xxpression/xtoks.sty
@@ -1,2 +1,19 @@
+% Copyright 2012-2020, Alexander Shibakov
+% This file is part of SPLinT
+%
+% SPLinT is free software: you can redistribute it and/or modify
+% it under the terms of the GNU General Public License as published by
+% the Free Software Foundation, either version 3 of the License, or
+% (at your option) any later version.
+%
+% SPLinT is distributed in the hope that it will be useful,
+% but WITHOUT ANY WARRANTY; without even the implied warranty of
+% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+% GNU General Public License for more details.
+%
+% You should have received a copy of the GNU General Public License
+% along with SPLinT. If not, see <http://www.gnu.org/licenses/>.
+
\prettywordpair{IDENTIFIER}{{$\langle$\rm identifier$\rangle$}}
\prettywordpair{INTEGER}{{\bf int}}
+\prettywordpair{$\ undefined}{{\tt\$undefined}}
diff --git a/support/splint/examples/xxpression/xxpression.sty b/support/splint/examples/xxpression/xxpression.sty
index 422901f6da..1b56c4dce3 100644
--- a/support/splint/examples/xxpression/xxpression.sty
+++ b/support/splint/examples/xxpression/xxpression.sty
@@ -1,12 +1,29 @@
+% Copyright 2012-2020, Alexander Shibakov
+% This file is part of SPLinT
+%
+% SPLinT is free software: you can redistribute it and/or modify
+% it under the terms of the GNU General Public License as published by
+% the Free Software Foundation, either version 3 of the License, or
+% (at your option) any later version.
+%
+% SPLinT is distributed in the hope that it will be useful,
+% but WITHOUT ANY WARRANTY; without even the implied warranty of
+% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+% GNU General Public License for more details.
+%
+% You should have received a copy of the GNU General Public License
+% along with SPLinT. If not, see <http://www.gnu.org/licenses/>.
+
\def\optimization{5}
+\input trt1.sty % \TeX\ `runtime': temporary register definitions
\input yycommon.sty % general routines for stack and array access
\input yymisc.sty % helper macros (stack manipulation, table processing, value stack pointers)
+ % parser initializatio, optimization
\input yyinput.sty % input functions
\input yyparse.sty % parser machinery
\input flex.sty % lexer functions
\input yyfaststack.sty % sped up stack access functions
-\input yyboth.sty % parser initializatio, optimization
\def\yycomplain#1{\immediate\write16{#1}} % lexer errors
@@ -15,7 +32,7 @@
\let\parsernamespace\empty
\genericparser
- name: main,
+ name: xmain,
ptables: ptab.tex,
ltables: ltab.tex,
tokens: xxpression.tok,
@@ -24,4 +41,4 @@
rsetup: {},
optimization: \optimizeall;%
-\tomainparser
+\toxmainparser
diff --git a/support/splint/examples/xxpression/xxpression.w b/support/splint/examples/xxpression/xxpression.w
index 00834b4436..bdf81b004d 100644
--- a/support/splint/examples/xxpression/xxpression.w
+++ b/support/splint/examples/xxpression/xxpression.w
@@ -1,4 +1,4 @@
-@q Copyright 2012-2014, Alexander Shibakov@>
+@q Copyright 2012-2020, Alexander Shibakov@>
@q This file is part of SPLinT@>
@q SPLinT is free software: you can redistribute it and/or modify@>
@@ -39,25 +39,25 @@
\newread\testeof
\immediate\openin\testeof=\jobname.tok
\ifeof\testeof % make the local token equivalence table
- \def\drvname{bo}
- \csname newwrite\endcsname\tokendefs
\let\nx\noexpand
+ \csname newwrite\endcsname\tokendefs
\edef\tokendeffile{\jobname.tok} % where to put the token equivalence table
\edef\bstrapparser{dyytab.tex}
\def\bootstraplexersetup{%
\let\yylexreturn\yylexreturnregular
\bootstrapmodetrue
- \input \drvname.tok % use token equivalence table to set the values of non-string tokens
- % this has to be added if a non-bootstrap parser is used to
- % extract token information (see the comments above)
+ \input bo.tok % use token equivalence table to set the values of non-string tokens
+ % this has to be added if a non-bootstrap parser is used to
+ % extract token information (see the comments above)
}
\toks0{%
+ \input trt1.sty %/* \TeX\ `runtime': temporary register definitions */
\input yycommon.sty % general routines for stack and array access
\input yymisc.sty % helper macros (stack manipulation, table processing, value stack pointers)
+ % parser initialization, optimization
\input yyinput.sty % input functions
\input yyparse.sty % parser machinery
\input flex.sty % lexer functions
- \input yyboth.sty % parser initialization, optimization
\input yyfaststack.sty
\input yystype.sty % scanner auxiliary types and functions
\input yyunion.sty % parser data structures
@@ -65,22 +65,29 @@
\let\parsernamespace\empty
% create token equivalence table (making, say, \tokenID the same as \csname token"identifier"\endcsname)
\input yybootstrap.sty
+ \input yytexlex.sty
+ \expandafter\def %/* adjust the \.{\\yyinput} to recognize \.{\\yyendgame} */
+ \expandafter\multicharswitch\expandafter
+ {\multicharswitch\yyendgame{\yyinput\yyeof\yyeof\endparseinput\removefinalvb}}%
}
\else
\toks0{%
\input yy.sty
+ \modenormal
\let\currentparsernamespace\parsernamespace
- \let\parsernamespace\smallnamespace
- \let\hostparsernamespace\smallnamespace
+ \def\parsernamespace{[xxdisplay]}% for \pretty... commands to works
+ \def\hostparsernamespace{[xxdisplay]}% for the \nameproc macro
\input xtoks.sty
- \let\parsernamespace\currentparsernamespace
+ \let\parsernamespace\currentparsernamespace % does not really matter
+ % the \hostparsernamespace stays `[xxdisplay]' which should cause the
+ % \nameproc macro to correct the typesetting of terminals accordingly
}
\fi
\immediate\closein\testeof
\the\toks0
@**Parser file. \setupfootnotes
-This an enhanced parser for expressions. It takes
+This is an enhanced parser for expressions. It takes
advantage of the `symbolic term name' mechanism and extends the basic
expression syntax.
@@ -130,7 +137,9 @@ parser impossible.
@ To continue the token name discussion, this parser uses internal
names only but the |yytname| array contains a string equivalent of
-\prodstyle{IDENTIFIER}. Thus, bootstrapping is necessary. The beginning
+\prodstyle{IDENTIFIER}. Thus, bootstrapping is necessary\footnote{This
+was done as a demonstration; changing the definition of
+\prodstyle{IDENTIFIER} would easily remove this requirement.}. The beginning
of this file contains a simple scheme for producing a token
equivalence table.
The typesetting of the tokens can be adjusted using \.{\\prettywordpair}
@@ -238,7 +247,7 @@ variable names that expand to appropriate values.
@g
@ @<Lexer definitions@>=
-@G
+@G(fs1)
letter [_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ]
id {letter}({letter}|[-0-9])*
int [0-9]+
@@ -284,12 +293,12 @@ int [0-9]+
\tracebadcharsfalse
\yyflexdebugfalse
@<Scan white space@>=
-@G
+@G(fs2)
[ \f\n\t\v] {@> @[TeX_( "/yylexnext" );@]@=}
@g
@ @<Scan identifiers@>=
-@G
+@G(fs2)
{id} {@> @[TeX_( "/yylexreturnval{IDENTIFIER}" );@]@=}
{int} {@> @[TeX_( "/yylexreturnval{INTEGER}" );@]@=}
[-+*/()] {@> @[TeX_( "/yylexreturnchar" );@]@=}
@@ -309,7 +318,10 @@ any adjustments are needed.
@(xymbols.txx@>=
@G
\def\optimization{5} % this can be omitted
+\input cwebmac.tex
+\input limbo.sty
\input yy.sty
+\modenormal
\input xymmap.sty
\end
@g
@@ -332,6 +344,7 @@ scanner routines.
\tracebadcharstrue
\yyflexdebugtrue
\yyinputdebugtrue
+ \traceactioncodetrue
\fi
\newread\ssw
@@ -346,7 +359,6 @@ scanner routines.
\def\varone{10}
\def\expression{1 + 3 * ( 5 + 7 ) + varone - 10}
-
\basicparserinit\expandafter\yyparse \expression \yyeof\yyeof\endparseinput\endparse
{
@@ -356,8 +368,16 @@ scanner routines.
\bye
@g
+@q Include the list of index section markers; this is a hack to get around @>
+@q the lack of control over the generation of \CWEB's index; the correct order @>
+@q of index entries depends on the placement of this inclusion @>
+@i alphas.hx
@**Index.
+\unsetfootnotes
+\input gindex.sty
+\closeout\gindex
+\termindexfalse % do not attach indexing entries to the terms in the index, or the section list
\let\inx\inxmod
\let\fin\finmod
\immediate\closeout\exampletable
diff --git a/support/splint/examples/xxpression/xymmap.sty b/support/splint/examples/xxpression/xymmap.sty
index 3d36bae920..d38d23ca9b 100644
--- a/support/splint/examples/xxpression/xymmap.sty
+++ b/support/splint/examples/xxpression/xymmap.sty
@@ -1,4 +1,4 @@
-% Copyright 2012-2015, Alexander Shibakov
+% Copyright 2012-2020, Alexander Shibakov
% This file is part of SPLinT
%
% SPLinT is free software: you can redistribute it and/or modify
@@ -36,8 +36,6 @@
\pinittoks{}%
\input ptab.tex % this is the enhanced expression parser
\edef\tointermediateparser{%
- \noexpand\savefullstate % unnecessary
- \noexpand\savefullstateextra % unnecessary
\let\noexpand\parsernamespace\noexpand\inamespace % switch to the new namespace
\the\pinittoks % restore all the tables, tokens and constants, and stacks
\let\noexpand\getcurrentparser\noexpand\tointermediateparser
@@ -87,7 +85,7 @@
% part of the file, this can simply be disabled
\settokens
\setflexstates % the main lexer can be reused in this case; the states still need to be set up
-\input \drvname.tok % set up the tokens for the bison grammar parser
+\input bo.tok % set up the tokens for the bison grammar parser
\newparserstate
\newlexerstate
\newlexerstateextra
@@ -95,8 +93,6 @@
\setnulstack{yyirulestack}%
%
\edef\tofullparser{%
- \noexpand\savefullstate
- \noexpand\savefullstateextra
\let\noexpand\parsernamespace\noexpand\fullnamespace % switch to the new namespace
\the\pinittoks % restore all the tables, tokens and constants, and stacks
\let\noexpand\getcurrentparser\noexpand\tofullparser
@@ -131,7 +127,7 @@
\fi
}%
% build the command to create the symbolic name switch
-\toksc{\tofullparser\parserinit
+\toksc{\tofullparser\basicparserinit\bisonparserinit\bisonparserdatainit
\let\yyinputold\yyinput
\let\yyinput\yyinputtrivial % a demo of a stripped down, slightly faster input routine
\doascii{11}\expandafter\yyparse}%
diff --git a/support/splint/extras/texmf/macros/protcode.tex b/support/splint/extras/texmf/macros/protcode.tex
new file mode 100644
index 0000000000..57f735a973
--- /dev/null
+++ b/support/splint/extras/texmf/macros/protcode.tex
@@ -0,0 +1,83 @@
+% this code works with OT1 encoding only!
+
+\def\setprotcode#1{%
+ \rpcode#1`\!=200
+ \rpcode#1`\,=700
+ \rpcode#1`\-=700
+ \rpcode#1`\.=700
+ \rpcode#1`\;=500
+ \rpcode#1`\:=500
+ \rpcode#1`\?=200
+ \lpcode#1`\`=700
+ \rpcode#1`\'=700
+ \lpcode#1 92=500 % ``
+ \rpcode#1 34=500 % ''
+ \rpcode#1 123=300 % --
+ \rpcode#1 124=200 % ---
+ \rpcode#1`\)=50
+ \rpcode#1`\A=50
+ \rpcode#1`\F=50
+ \rpcode#1`\K=50
+ \rpcode#1`\L=50
+ \rpcode#1`\T=50
+ \rpcode#1`\V=50
+ \rpcode#1`\W=50
+ \rpcode#1`\X=50
+ \rpcode#1`\Y=50
+ \rpcode#1`\k=50
+ \rpcode#1`\r=50
+ \rpcode#1`\t=50
+ \rpcode#1`\v=50
+ \rpcode#1`\w=50
+ \rpcode#1`\x=50
+ \rpcode#1`\y=50
+ \lpcode#1`\(=50
+ \lpcode#1`\A=50
+ \lpcode#1`\J=50
+ \lpcode#1`\T=50
+ \lpcode#1`\V=50
+ \lpcode#1`\W=50
+ \lpcode#1`\X=50
+ \lpcode#1`\Y=50
+ \lpcode#1`\v=50
+ \lpcode#1`\w=50
+ \lpcode#1`\x=50
+ \lpcode#1`\y=0
+ \adjustprotcode#1\relax
+}
+
+\newif\ifneedadjustprotcode
+\newbox\boxA
+\newcount\countA
+\newcount\countB
+\def\adjustprotcode#1{%
+ \needadjustprotcodefalse
+ \ifnum\pdftexversion = 14
+ \ifnum \expandafter`\pdftexrevision > `g
+ \needadjustprotcodetrue
+ \fi
+ \else\ifnum\pdftexversion > 14
+ \needadjustprotcodetrue
+ \fi \fi
+ \ifneedadjustprotcode
+ \countA=0
+ \loop
+ \ifcase\lpcode#1\countA\else
+ \adjustcp\lpcode#1\countA
+ \fi
+ \ifcase\rpcode#1\countA\else
+ \adjustcp\rpcode#1\countA
+ \fi
+ \advance\countA 1
+ \ifnum\countA < 256 \repeat
+ \fi
+}
+\def\adjustcp#1#2#3{%
+ \setbox\boxA=\hbox{%
+ \ifx#2\font\else#2\fi
+ \char#3}%
+ \countB=\wd\boxA
+ \multiply\countB #1#2#3\relax
+ \divide\countB \fontdimen6 #2\relax
+ #1#2#3=\countB\relax
+}
diff --git a/support/splint/makefile.inc b/support/splint/makefile.inc
index d48478bb1a..787617da1e 100644
--- a/support/splint/makefile.inc
+++ b/support/splint/makefile.inc
@@ -1,19 +1,43 @@
-TEXINPUTS := .:${SPLINT_ROOT}/cweb/:${SPLINT_ROOT}/tex/:${SPLINT_ROOT}/:${TEXINPUTS}
-
-SPLINT_DRIVER_DIR = ${SPLINT_ROOT}/cweb
-
-SPLINT_TEXSTYLES = flex.sty yyfaststack.sty yyparse.sty yyunion.sty yyboth.sty yyinput.sty yystype.sty yy.sty
+# Copyright 2012-2020, Alexander Shibakov
+# This file is part of SPLinT
+#
+# SPLinT is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# SPLinT is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with SPLinT. If not, see <http://www.gnu.org/licenses/>.
+
+SPLINT_ROOT := $(dir $(CURDIR)/$(word $(words $(MAKEFILE_LIST)),$(MAKEFILE_LIST)))
+# taken from a stackoverflow answer by Xavier Holt
+
+TEXINPUTS := .:${SPLINT_ROOT}cweb/:${SPLINT_ROOT}tex/:${SPLINT_ROOT}:${TEXINPUTS}
+
+SPLINT_DRIVER_DIR = ${SPLINT_ROOT}cweb
+
+SPLINT_TEXSTYLES = flex.sty yyfaststack.sty yyparse.sty yyunion.sty yyinput.sty yystype.sty yy.sty
SPLINT_TEXSTYLES += yyinit.sty yybootstrap.sty dcols.sty limbo.sty yycommon.sty yymisc.sty yytexlex.sty
+SPLINT_TEXSTYLES += yydebug.sty gindex.sty noweb.sty
SPLINT_BOOTSTRAP_STYLES = grabstates.sty
-SPLINT_PTABLES = byytab.tex dyytab.tex fyytab.tex gyytab.tex small_tab.tex
-SPLINT_LTABLES = ltab.tex small_dfa.tex
-
-SPLINT_XTEXSTYLES = $(patsubst %, ${SPLINT_ROOT}/tex/%, ${SPLINT_TEXSTYLES})
-SPLINT_XBOOTSTRAP_STYLES = $(patsubst %, ${SPLINT_ROOT}/tex/%, ${SPLINT_BOOTSTRAP_STYLES})
-SPLINT_XPTABLES = $(patsubst %, ${SPLINT_ROOT}/cweb/%, ${SPLINT_PTABLES})
-SPLINT_XLTABLES = $(patsubst %, ${SPLINT_ROOT}/cweb/%, ${SPLINT_LTABLES})
-
-SPLINT_DOC_PREREQS = %.tex ${SPLINT_PTABLES} ${SPLINT_LTABLES} ${SPLINT_XTEXSTYLES} ${SPLINT_ROOT}/tex/btokenset.sty bo.tok
+SPLINT_PTABLES = byytab.tex dyytab.tex fyytab.tex gyytab.tex fiptab.tex reptab.tex small_tab.tex
+SPLINT_PTABLES += raptab.tex ddptab.tex
+SPLINT_LTABLES = ltab.tex small_dfa.tex filtab.tex
+
+SPLINT_XTEXSTYLES = $(patsubst %, ${SPLINT_ROOT}tex/%, ${SPLINT_TEXSTYLES})
+SPLINT_XBOOTSTRAP_STYLES = $(patsubst %, ${SPLINT_ROOT}tex/%, ${SPLINT_BOOTSTRAP_STYLES})
+SPLINT_XPTABLES = $(patsubst %, ${SPLINT_ROOT}cweb/%, ${SPLINT_PTABLES})
+SPLINT_XLTABLES = $(patsubst %, ${SPLINT_ROOT}cweb/%, ${SPLINT_LTABLES})
+
+SPLINT_DOC_PREREQS = %.tex ${SPLINT_PTABLES} ${SPLINT_LTABLES} ${SPLINT_XTEXSTYLES}
+SPLINT_DOC_PREREQS += ${SPLINT_ROOT}tex/btokenset.sty ${SPLINT_ROOT}tex/ftokenset.sty
+SPLINT_DOC_PREREQS += ${SPLINT_ROOT}tex/stokenset.sty ${SPLINT_ROOT}tex/fretokenset.sty
+SPLINT_DOC_PREREQS += bo.tok fo.tok
SPLINT_DOC_PREREQS_XREF = ${SPLINT_DOC_PREREQS} %.scn %.idx
PDFTEX = export TEXINPUTS=${TEXINPUTS} && pdftex
@@ -25,11 +49,12 @@ CTANGLE = ctangle -bhp
BISON = ${BISON_ROOT}bison -v
FLEX = ${FLEX_ROOT}flex
-MODEBOOTSTRAP = \\def\\modeactive\{\\modebootstrap\}
+MODEBOOTSTRAP = \\let\\ifbootstrapmode\\iftrue
-BRACK = ${SPLINT_ROOT}/scripts/brack.pl
-UNLINE = ${SPLINT_ROOT}/scripts/unline.pl
-BINDX = ${SPLINT_ROOT}/scripts/bindx.pl
+BRACK = ${SPLINT_ROOT}scripts/brack.pl
+UNLINE = ${SPLINT_ROOT}scripts/unline.pl
+BINDX = ${SPLINT_ROOT}scripts/bindx.pl
+MISCCW = ${SPLINT_ROOT}scripts/misccw.pl
%.c: %.w
%.c: %.y
@@ -61,15 +86,16 @@ BINDX = ${SPLINT_ROOT}/scripts/bindx.pl
all: docs
-${SPLINT_ROOT}/cweb/%:
- cd ${SPLINT_ROOT}/cweb && ${MAKE} $(notdir $@)
+${SPLINT_ROOT}cweb/%:
+ cd ${SPLINT_ROOT}cweb && ${MAKE} $(notdir $@)
clean_temp:
- -rm -f *.o *.tab.* *.dvi *.pdf *.ps *~ *.log \
- *.rli *.rls *.lpg *.output *.lst *.exl *.ftn *.*pk *.gdx
+ -rm -f *.o *.tab.* *.dvi *.pdf *.ps *~ *.log *.rli *.rls *.lpg \
+ *.output *.lst *.exl *.ftn *.*pk *.gdx
clean_core: clean_temp
- -rm -f *.c *.h *.tex *.idx *.scn *.toc *.tok *.sns *.gdy *.x *.yy *.ll *.y *.l
+ -rm -f *.c *.h *.tex *.idx *.scn *.toc *.tok *.sns *.gdy *.xxr *.x \
+ *.hx *.hy *.yy *.ll *.y *.l
# to ensure that interrupted and erroneous builds are cleaned up
diff --git a/support/splint/makefile.loc b/support/splint/makefile.loc
index 58d7b0abaf..e80b509044 100644
--- a/support/splint/makefile.loc
+++ b/support/splint/makefile.loc
@@ -1,5 +1,5 @@
# uncomment the lines below to use your local version of bison or flex
# BISON_ROOT ?= ${HOME}/bin/
# FLEX_ROOT ?= ${HOME}/bin/
-# uncomment the line below if you have bison version 3.0 or newer
-# BISON_STATE := -DBISON_IS_CRIPPLED=yes
+# comment the line below if you have an older (version <3.0 or sane) bison
+BISON_STATE := -DBISON_IS_CRIPPLED=yes
diff --git a/support/splint/scripts/bindx.pl b/support/splint/scripts/bindx.pl
index ace62429ba..beb9719bf8 100755
--- a/support/splint/scripts/bindx.pl
+++ b/support/splint/scripts/bindx.pl
@@ -1,8 +1,60 @@
#!/usr/bin/perl
+# Copyright 2012-2020, Alexander Shibakov
+# This file is part of SPLinT
+#
+# SPLinT is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# SPLinT is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with SPLinT. If not, see <http://www.gnu.org/licenses/>.
+
+use Getopt::Long;
+use Pod::Usage;
+use File::Basename;
+use English;
+
+( $my_name, $my_path, $my_suffix ) = fileparse( $PROGRAM_NAME );
+$invocation_line = "\% ".$my_name." ".(join ' ', @ARGV)."\n";
+
+my $man = 0;
+my $help = 0;
+my $fine_index = 0;
+my $fine_headercs = "\\FI"; # index command sequence for the fine format
+my $crude_headercs = "\\GI"; # index command sequence for the standard format
+my $headercs = ""; # the default is the standard format
+
+#Getopt::Long::Configure ("bundling"); # to allow -abc to set a, b, and c
+
+GetOptions ("help|?" => \$help,
+ man => \$man,
+ "fine" => \$fine_index,
+ "cs=s" => \$headercs
+ ) or pod2usage(2);
+
+pod2usage(-exitval => 0, -verbose => 1) if $help;
+pod2usage(-exitval => 0, -verbose => 2) if $man;
+
open FILE, "$ARGV[0]" or die "Cannot open input file $ARGV[0]\n";
open FILE_OUT, ">$ARGV[1]" or die "Cannot open input file $ARGV[1]\n";
+if ( $headercs eq "" ) {
+ if ( $fine_index ) {
+ $headercs = $fine_headercs;
+ } else {
+ $headercs = $crude_headercs;
+ }
+}
+
+print FILE_OUT $invocation_line;
+
sub lex_order (\@\@) { # lexicographic ordering
my (@string1) = @{shift @_};
@@ -43,39 +95,82 @@ sub lexicographically { # lexicographic ordering for numeric sequences separated
return lex_order @chars1, @chars2;
}
-$alphabet = "\#\$\%^&*<>[]{}()+-=_|\\,:;~`.?!\'\"\@0123456789AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZz";
+$alphabet = " /\#\$\%^&*<>[]{}()+-=_|\\,:;~`.?!\'\"\@0123456789AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZz";
@main_set = split //, $alphabet;
map { $main_order{$_} = index $alphabet, $_ } @main_set; # inefficient ...
-$ldelim[1] = "\\["; $rdelim[1] = "]";
$ldelim[0] = "\\("; $rdelim[0] = ")";
-$ldelim[5] = "\\e"; $rdelim[5] = "e";
-$ldelim[4] = "\\f"; $rdelim[4] = "f";
+$ldelim[1] = "\\["; $rdelim[1] = "]";
+$ldelim[2] = ""; $rdelim[2] = "";
$ldelim[3] = "\\g"; $rdelim[3] = "g";
+$ldelim[4] = "\\f"; $rdelim[4] = "f";
+$ldelim[5] = "\\e"; $rdelim[5] = "e";
+
+sub alpha_jump { # have we switched to the next letter?
+
+ my $a = substr shift @_, 0, 1;
+ my $b = substr shift @_, 0, 1;
+
+ $a =~ tr/a-z/A-Z/; $a =~ tr/a-zA-Z/0/cs;
+
+ return (ord $a) <=> (ord $b);
+}
while (<FILE>) {
$input = $_;
- if ( $input =~ /\\(.)TI\s*([0-9]+)\s*\{(\\[a-z]+)\s*\{(.+)\}\}\{(.+)\}\{(.+)\}\{(.+)\}\n/ ) {
-
- ($domain, $rank, $type, $key, $nspace, $pageno, $ref) = ($1, $2, $3, $4, $5, $6, $7);
+ if ( $input =~ /\\i\@\@\@e\s* \{([0-9]+)\} # section number
+ \{([0-9]+)\} # page number
+ \{((\{[^\{\}]*\})+)\} # host namespace, context, etc.
+ \{([^\{\}]+)\} # domain
+ \{([0-9]+)\} # rank
+ \{([^\{\}]*)\} # type1
+ \{([^\{\}\s]+)\s*\} # type2
+ \{((\{[^\{\}]+\})+)\} # key
+ \{((\{[^\{\}]+\})*)\} # visual key
+ (\%.*)*\n/x ) {
+ # ordinary index entry
+
+ ($section, $pageno, $nspace, $junk0, $domain, $rank, $type1, $type2, $key, $junk1, $vkey) =
+ ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11 );
+
$key =~ s/\{([0-9]+)\}/pack "c1", $1/eg;
-
- push @{$index{$domain}{$key}{refs}}, "$ref $rank";
- $index{$domain}{$key}{nspace} = $nspace;
- $index{$domain}{$key}{type} = $type;
+ $term = $key;
+ $vkey =~ s/\{([0-9]+)\}/pack "c1", $1/eg;
+
+ if ( $vkey ne "" ) {
+
+ if (exists $index{$domain}{$vkey}{type} && $index{$domain}{$vkey}{type} ne $type2) {
+ #warn "Differing output types for term <$term> with key <$vkey>.\n", "$index{$domain}{$vkey}{type} vs. $type2\n";
+ $key = "$vkey $key";
+
+ if (exists $index{$domain}{$key}{type} && $index{$domain}{$key}{type} ne $type2) {
+ warn "Differing output types for term <$term> with key <$vkey>.\n", "$index{$domain}{$vkey}{type} vs. $type2\n";
+ }
+ } else {
+ $key = $vkey;
+ }
+ }
+
+ # print "KEY: ", $key, " ", $vkey, "\n";
+
+ push @{$index{$domain}{$key}{refs}}, "$section $rank $pageno";
+ $index{$domain}{$key}{nspace} = $nspace;
+ $index{$domain}{$key}{type} = $type2;
+ $index{$domain}{$key}{term} = $term;
}
-
}
$i = 0;
+$last_alpha = "0"; # the last index section
foreach $domain (sort keys %index ) {
if ( $i > 0) {
+ $last_alpha = "0";
print FILE_OUT "\\indexseparator{$domain}{$i}\n";
}
@@ -84,18 +179,95 @@ foreach $domain (sort keys %index ) {
foreach $key ( sort alphabetically keys %{$index{$domain}} ) {
- %ref_list = ();
- map { exists $ref_list{$_} ? ($ref_list{$_}++) : ($ref_list{$_} = 0) } @{$index{$domain}{$key}{refs}};
+ if (exists $index{$domain}{$key}{refs}) {
+
+ %ref_hash = ();
+ map { @r = split / /, $_; exists $ref_hash{$r[0]}{$r[1]}{$r[2]} ?
+ $ref_hash{$r[0]}{$r[1]}{$r[2]}++ : ($ref_hash{$r[0]}{$r[1]}{$r[2]} = 0) } @{$index{$domain}{$key}{refs}};
+
+ my @fine_ref_list = ();
+ my @crude_ref_list = ();
+
+ foreach $key ( reverse sort numerically keys %ref_hash ) {
+
+ foreach $rkey ( reverse sort numerically keys %{$ref_hash{$key}} ) {
+
+ $ref_string = "$ldelim[$rkey]$key$rdelim[$rkey]";
+ push @crude_ref_list, $ref_string;
+ $ref_string = $ref_string."\{".(join ', ', (reverse sort numerically keys %{$ref_hash{$key}{$rkey}}))."\}";
+ push @fine_ref_list, $ref_string;
- @ref_list = sort lexicographically keys %ref_list;
- @ref_list = map { @r = split / /, $_; "$ldelim[$r[1]]$r[0]$rdelim[$r[1]]" } @ref_list;
+ }
- $ukey = $key;
- $ukey =~ s/(.)/"\{".(unpack "c1", $1)."\}"/eg;
-
- print FILE_OUT "\\GI{$index{$domain}{$key}{nspace}}{$index{$domain}{$key}{type}}{$ukey}, ",
- (join ', ', @ref_list), ". \% $key, sec nos. ", (join ', ', @ref_list ), "\n";
-
+ }
+
+ $ref_string = join ', ', @fine_ref_list;
+ $cref_string = join ', ', @crude_ref_list;
+
+ $term = $index{$domain}{$key}{term};
+ $term_printable = $term;
+ $term =~ s/(.)/"\{".(unpack "c1", $1)."\}"/eg;
+
+ if ( alpha_jump( $key, $last_alpha ) > 0 ) {
+ $last_alpha = substr $key, 0, 1;
+ $last_alpha =~ tr/a-z/A-Z/;
+ print FILE_OUT "\\indexsection{$last_alpha}\n",
+ }
+
+ print FILE_OUT $headercs."{$index{$domain}{$key}{nspace}}{$index{$domain}{$key}{type}}{$term}, ".
+ ($fine_index ? $ref_string : $cref_string).".\% $term_printable, ($key)\n",
+ "\% sec nos. ".$ref_string."\n";
+
+ }
}
}
+__END__
+
+=head1 BINDX
+
+bindx.pl - Postprocess an index (.gdx) in <input_file> to produce a set of index entries in
+ the <output_file> (.gdy)
+
+=head1 SYNOPSIS
+
+bindx.pl [options] input_file output_file
+
+
+ Options:
+ --help|-h|-? brief help message
+ --man|-m full documentation
+ --fine|-f add page references to each index entry
+ --cs=<string> specify the index control sequence name
+
+=head1 OPTIONS
+
+=over 8
+
+=item B<--help>
+
+Print a brief help message and exit.
+
+=item B<--man>
+
+Print the manual page and exit.
+
+=item B<--fine>
+
+Create index entries in the form B<r>I<nnn>B<l>{n1, n2, ...} where B<r> and B<l>
+are the left and ring delimeters, I<nnn> is the section number and the list of page
+numbers appears inside the braced group.
+
+=item B<--cs>
+
+The name of the index control sequence. The default is B<\GI> for the standard
+index format and B<\FI> for the 'fine' format.
+
+=back
+
+=head1 DESCRIPTION
+
+B<bindx.pl> will read the given <input_file>, and output an index
+in the <output_file>.
+
+=cut
diff --git a/support/splint/scripts/brack.pl b/support/splint/scripts/brack.pl
index 6e110e5cf3..887c6324f0 100755
--- a/support/splint/scripts/brack.pl
+++ b/support/splint/scripts/brack.pl
@@ -1,15 +1,51 @@
#!/usr/bin/perl
+# Copyright 2012-2020, Alexander Shibakov
+# This file is part of SPLinT
+#
+# SPLinT is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# SPLinT is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with SPLinT. If not, see <http://www.gnu.org/licenses/>.
+
# a simple script to replace all @G ... @`other' regions with
# @= ... @>@; lines
# comments are allowed at the end of the lines.
# only one style of comments is accepted: /* ... */. note that these are not
# output
+use Getopt::Long;
+use Pod::Usage;
+
+my $man = 0;
+my $help = 0;
+my $elang_start = "\@t}\\lsectionbegin{\%s}\\vb{\@>\n";
+my $elang_finish = "\@t}\\vb{\\yyendgame}\\vb{}\\endparse\\postparse{\@>\n";
+
+#Getopt::Long::Configure ("bundling"); # to allow -abc to set a, b, and c
+
+GetOptions ("help|?" => \$help,
+ man => \$man,
+ "startol=s" => \$elang_start, # the string that starts an `other language' region
+ "finishol=s" => \$elang_finish # the string that ends an `other language' region
+ ) or pod2usage(2);
+
+pod2usage(-exitval => 0, -verbose => 1) if $help;
+pod2usage(-exitval => 0, -verbose => 2) if $man;
+
open FILE, "$ARGV[0]" or die "Cannot open input file $ARGV[0]\n";
open FILEOUT, ">$ARGV[1]" or die "Cannot open output file $ARGV[1]\n";
$state = 0;
+$paused_state = 0;
while (<FILE>) {
$inline = $_;
@@ -17,20 +53,22 @@ while (<FILE>) {
if ( $inline =~ m/^\@G(.*)$/ ) { # @G detected, this line is part of the `other language' region
- $inline = $1; $state = 1;
- if ( $inline =~ m/^\(([^)]*)\).*/ ) {
- printf FILEOUT "\@q Start generic language section\@>\n\@t}\\begingsec{%s}{\@>\n", "$1"; # a parser switcher
- } else {
- $inline = " Start \@\@G (generic) language section";
- printf FILEOUT "\@q%s\@>\n\@t}\\begingsec{b}{\@>\n", "$inline"; # a parser switcher
- }
+ $inline = $1; $state = 1;
+
+ if ( $inline =~ m/^\(([^)]*)\).*/ ) { # language specifier present
+ $inline = $1;
+ } else {
+ $inline = "b";
+ }
+
+ printf FILEOUT "\@q Start generic language section\@>\n" . $elang_start, "$inline"; # a parser switcher
- } elsif ( $inline =~ m/^\@[\scp].*$/ ) { # @`other' detected, so `other language' region is over
+ } elsif ( $inline =~ m/^\@[\scp\*0-9].*$/ ) { # @`other' detected, so `other language' region is over
if ($state == 1) {
- printf FILEOUT "\@q%s\@>\n\@t}\\endgsec{\@>\n",
- "End of generic language section"; # a parser switcher
- }
+ printf FILEOUT "\@q%s\@>\n" . $elang_finish, "End of generic language section"; # a parser switcher
+ }
+
$state = 0;
printf FILEOUT "%s", "$inline";
@@ -38,35 +76,33 @@ while (<FILE>) {
printf FILEOUT "%s", "$inline";
- } elsif ( $inline =~ m/^\@g(.*)$/ ) { # @`other' detected, so `other language' region is over
+ } elsif ( $inline =~ m/^\@g(.*)$/ ) { # explicit end of other languge region detected
- if ($state == 1) {
- printf FILEOUT "\@q%s\@>\n\@t}\\endgsec{\@>\n",
- "End of generic language section"; # a parser switcher
- }
+ $inline = $1;
- $inline = $1; $state = 0;
+ if ($state == 1) {
+ printf FILEOUT "\@q%s\@>\n" . $elang_finish, "End of generic language section"; # a parser switcher
+ }
- if ( not $inline ) { $inline = "End generic language section"; }
- printf FILEOUT "\@q%s\@>\n", "$inline";
+ $state = 0;
} elsif ( $inline =~ m/^\@O(.*)$/ ) { # @O detected, so `other language' region is paused
- $inline = $1; $state = 0;
+ $inline = $1; $paused_state = $state; $state = 0;
if ( not $inline ) { $inline = "End generic language section"; }
printf FILEOUT "\@q%s\@>\n", "$inline";
} elsif ( $inline =~ m/^\@o(.*)$/ ) { # @o detected, so `other language' region is resumed
- $inline = $1; $state = 1;
+ $inline = $1; $state = $paused_state; $paused_state = 0;
if ( not $inline ) { $inline = "End generic language section"; }
printf FILEOUT "\@q%s\@>\n", "$inline";
- } elsif ( $state == 1 ) {
+ } elsif ( $state != 0 ) {
- if ( $inline =~ m/\/\*.*\*\/\s*$/ ) {
+ if ( $inline =~ m/\/\*.*\*\/\s*$/ ) { # the line contains a comment at the end
$inline =~ m/^(.*\S|)\s*(\/\*.*\*\/)\s*$/;
$string = $1; $comment = $2;
@@ -80,11 +116,11 @@ while (<FILE>) {
if ( $string ) {
- printf FILEOUT "\@=%s\@>\@;", "$string";
+ printf FILEOUT "\@=%s\@>\@t}\\vb{\\n}{\@>\@;", "$string";
} else {
- printf FILEOUT "\@=%s\@>\@;", " ";
+ printf FILEOUT "\@=%s\@>\@t}\\vb{\\n}{\@>\@;", " "; # to keep \CWEB\ happy
}
@@ -103,3 +139,62 @@ while (<FILE>) {
}
}
+
+__END__
+
+=head1 BRACK
+
+brack.pl - Preprocess a CWEB file to allow language extensions
+
+=head1 SYNOPSIS
+
+brack.pl [options] input_file output_file
+
+
+ Options:
+ --help|-h|-? brief help message
+ --man|-m full documentation
+ --startol|-s string to begin a language region
+ --finishol|-f string to end a language region
+
+=head1 OPTIONS
+
+=over 8
+
+=item B<--help>
+
+Print a brief help message and exit.
+
+=item B<--man>
+
+Print the manual page and exit.
+
+=item B<--startol>=I<CWEB string>
+
+The string to print at the beginning of an other language region
+
+=item B<--finishol>=I<CWEB string>
+
+The string to print at the end of an other language region
+
+=back
+
+=head1 DESCRIPTION
+
+B<brack.pl> will read the given <input_file>, and the format @G(...) sections
+appropriately to be read by CWEAVE and output the result in the <output_file>.
+
+The processing mechanism is very primitive and makes use of some assumptions
+on the appearance of the B<CWEB> file. Unlike the 'standard' B<CWEB> input, the
+new 'generic language' section markers (the @G(...) construct) are
+I<case sensitive> and I<must> appear at the beginning of the line. The
+'other language' markers (the @O... sections) follow the same restrictions as
+the @G sections above, and I<do not nest>.
+
+A comment at the very end of the line is moved to the B<C> portion of the input.
+To put the comment inside the verbatim blocks, one may surround it by [@>@=] and
+[@>@= ] (the square brackets are not part of the input and are here to draw attention
+to the spacing, see next). Note the space at the end of the closing construct: this
+is necessary to pacify B<CWEAVE>.
+
+=cut
diff --git a/support/splint/scripts/cslist.pl b/support/splint/scripts/cslist.pl
index d1df62bc07..9c187517ac 100755
--- a/support/splint/scripts/cslist.pl
+++ b/support/splint/scripts/cslist.pl
@@ -1,3 +1,36 @@
+#!/usr/bin/perl
+
+# Copyright 2012-2020, Alexander Shibakov
+# This file is part of SPLinT
+#
+# SPLinT is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# SPLinT is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with SPLinT. If not, see <http://www.gnu.org/licenses/>.
+
+use Getopt::Long;
+use Pod::Usage;
+
+my $man = 0;
+my $help = 0;
+
+#Getopt::Long::Configure ("bundling"); # to allow -abc to set a, b, and c
+
+GetOptions ("help|?" => \$help,
+ man => \$man,
+ ) or pod2usage(2);
+
+pod2usage(-exitval => 0, -verbose => 1) if $help;
+pod2usage(-exitval => 0, -verbose => 2) if $man;
+
for ($i = 0; $i <= $#ARGV; $i++) {
open FILE, "<$ARGV[$i]" or die "Cannot open file $ARGV[$i]!\n";
@@ -7,7 +40,7 @@ for ($i = 0; $i <= $#ARGV; $i++) {
$text = $_;
- while( $text =~ /(\\newcount|\\newtoks|\\newdimen|\\newif|\\let|\\([exg]|char|toks|count)*def)\w*(\\[a-zA-Z@]+)/g ) {
+ while( $text =~ /(\\newcount|\\newtoks|\\newdimen|\\newif|\\let|\\([exg]|char|toks|count)?def)\w*(\\[a-zA-Z@]+)/g ) {
$cs = $3;
push @all, $cs;
@@ -24,8 +57,8 @@ for ($i = 0; $i <= $#ARGV; $i++) {
foreach $ocs (@alphsecs) {
- print "$ocs %";
- @fnames = sort keys $secse{$ocs};
+ print "$ocs \% ". ref $secse{$osc};
+ @fnames = sort keys %{$secse{$ocs}};
foreach $fname (@fnames) {
print " $fname ($secse{$ocs}->{$fname} occurence";
if ( $secse{$ocs}->{$fname} > 1 ) {print "s";}
@@ -38,3 +71,41 @@ print "\n";
$i = $#alphsecs;
$i++;
print "total sequences: $i\n";
+
+__END__
+
+=head1 CSLIST
+
+cslist.pl - output a list of all control sequences in the input files.
+
+=head1 SYNOPSIS
+
+cslist.pl [options] input_files
+
+
+ Options:
+ --help|-h|-? brief help message
+ --man|-m full documentation
+
+=head1 OPTIONS
+
+=over 8
+
+=item B<--help>
+
+Print a brief help message and exit.
+
+=item B<--man>
+
+Print the manual page and exit.
+
+=back
+
+=head1 DESCRIPTION
+
+B<cslist.pl> will read the given <input_files>, and produce a list of all the
+B<TeX> control sequences defined in those files (a contorl sequence is considered
+I<defined> in a file B<foo.tex> if one of B<\def>, B<\let>, B<\new...>, or similar
+preceeds it in B<foo.tex>.
+
+=cut
diff --git a/support/splint/scripts/misccw.pl b/support/splint/scripts/misccw.pl
new file mode 100755
index 0000000000..b536904df4
--- /dev/null
+++ b/support/splint/scripts/misccw.pl
@@ -0,0 +1,94 @@
+#!/usr/bin/perl
+
+# Copyright 2012-2020, Alexander Shibakov
+# This file is part of SPLinT
+#
+# SPLinT is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# SPLinT is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with SPLinT. If not, see <http://www.gnu.org/licenses/>.
+
+use Getopt::Long;
+use Pod::Usage;
+
+my $man = 0;
+my $help = 0;
+my $alpha_list = 0;
+my $alpha_length = 1;
+
+#Getopt::Long::Configure ("bundling"); # to allow -abc to set a, b, and c
+
+GetOptions ("help|?" => \$help,
+ man => \$man,
+ "alpha-list" => \$alpha_list,
+ "alpha-length=i" => \$alpha_length
+ ) or pod2usage(2);
+
+pod2usage(-exitval => 0, -verbose => 1) if $help;
+pod2usage(-exitval => 0, -verbose => 2) if $man;
+
+if ( $alpha_list ) {
+ open FILE_OUT, ">$ARGV[0]" or die "Cannot open input file $ARGV[0]\n";
+
+ $alphabet = "abcdefghijklomnopqrstuvwxyz";
+
+ @alpha_chars = split //, $alphabet;
+
+ foreach $letter ( @alpha_chars ) {
+ $letter_array[0] = $letter;
+ for ( $i = 1; $i < $alpha_length; $i++ ) {
+ $letter_array[$i] = '!';
+ }
+ $string = join '', @letter_array;
+ print FILE_OUT "\@!\@:".$string."\@>\n";
+ }
+}
+
+__END__
+
+=head1 MISCCW
+
+misccw.pl - Miscellaneous functions
+
+=head1 SYNOPSIS
+
+misccw.pl [options] [input_file] [output_file]
+
+
+ Options:
+ --help|-h|-? brief help message
+ --man|-m full documentation
+ --alpha-list generate a list of alphabetic markers
+
+=head1 OPTIONS
+
+=over 8
+
+=item B<--help>
+
+Print a brief help message and exit.
+
+=item B<--man>
+
+Print the manual page and exit.
+
+=item B<--alpha-list>
+
+Output a list of markers of the form @:?????@> in <output_file>
+
+=back
+
+=head1 DESCRIPTION
+
+B<misccw.pl> will possibly read the given <input_file>, and likely output
+something in the <output_file> depending on the options given.
+
+=cut
diff --git a/support/splint/scripts/unline.pl b/support/splint/scripts/unline.pl
index 5349156cb4..36a1b524f5 100755
--- a/support/splint/scripts/unline.pl
+++ b/support/splint/scripts/unline.pl
@@ -1,10 +1,41 @@
#!/usr/bin/perl
+# Copyright 2012-2020, Alexander Shibakov
+# This file is part of SPLinT
+#
+# SPLinT is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# SPLinT is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with SPLinT. If not, see <http://www.gnu.org/licenses/>.
+
# a simple script to remove comments and #line directives left by CTANGLE
# this allows one to build Makefiles and linker scripts from inside CWEB
# by making a few simple changes to the macros (to facilitate typeseting)
# and using CWEB's @= ... @> facility.
+use Getopt::Long;
+use Pod::Usage;
+
+my $man = 0;
+my $help = 0;
+
+#Getopt::Long::Configure ("bundling"); # to allow -abc to set a, b, and c
+
+GetOptions ("help|?" => \$help,
+ man => \$man,
+ ) or pod2usage(2);
+
+pod2usage(-exitval => 0, -verbose => 1) if $help;
+pod2usage(-exitval => 0, -verbose => 2) if $man;
+
open FILE, "$ARGV[0]" or die "Cannot open input file $ARGV[0]\n";
open FILEOUT, ">$ARGV[1]" or die "Cannot open output file $ARGV[1]\n";
@@ -16,7 +47,7 @@ while (<FILE>) {
if ( $ARGV[2] != 3 ) {
- s/\/\*.*\*\///g;
+ s/\/\*([^\/]|\/[^\*])*\*\///g;
}
@@ -29,3 +60,39 @@ while (<FILE>) {
# }
}
+
+__END__
+
+=head1 UNLINE
+
+unline.pl - Remove B<C> comments from a file
+
+=head1 SYNOPSIS
+
+unline.pl [options] input_file output_file
+
+
+ Options:
+ --help|-h|-? brief help message
+ --man|-m full documentation
+
+=head1 OPTIONS
+
+=over 8
+
+=item B<--help>
+
+Print a brief help message and exit.
+
+=item B<--man>
+
+Print the manual page and exit.
+
+=back
+
+=head1 DESCRIPTION
+
+B<unline.pl> will read the given <input_file>, remove the B<C> comments
+and output the resulting file in <output_file>
+
+=cut
diff --git a/support/splint/tex/btokenset.sty b/support/splint/tex/btokenset.sty
index 07affb717e..72d3b3394f 100644
--- a/support/splint/tex/btokenset.sty
+++ b/support/splint/tex/btokenset.sty
@@ -1,4 +1,4 @@
-% Copyright 2012-2014, Alexander Shibakov
+% Copyright 2012-2020, Alexander Shibakov
% This file is part of SPLinT
%
% SPLinT is free software: you can redistribute it and/or modify
@@ -14,18 +14,21 @@
% You should have received a copy of the GNU General Public License
% along with SPLinT. If not, see <http://www.gnu.org/licenses/>.
-\prettywordpair{\%<flag>}{{$\langle\star\rangle$}}
-\prettywordpair{\%nondeterministic-parser}{{$\langle$non...ic-parser$\rangle$}}
-\prettywordpair{PERCENT_NONDETERMINISTIC_PARSER}{PER...NON...IC\_PARSER}
-\prettywordpair{semi}{{\tt;}}
+% definitions for typesetting tokens in \bison's grammar
+
+\prettywordpairwvis{\%<flag>}{{$\langle\star\rangle$}}{<*>}
+\prettywordpair{\%nondeterministic-parser}{{$\langle$nondet. parser$\rangle$}}
+\prettywordpair{PERCENT_NONDETERMINISTIC_PARSER}{{$\langle$nondet. parser$\rangle$}}
+\prettywordpairwvis{semi}{{\tt;}}{;}
\prettywordpair{TAG}{{{\tt<}{\it tag\/}{\tt>}}}
+\prettywordpair{$\ undefined}{{\tt\$undefined}}
\prettywordpair{<tag>}{{{\tt<}{\it tag\/}{\tt>}}}
-\prettywordpair{FLEX_OPTION}{{$\langle$\bf option$\rangle_{\rm f}$}}
-\prettywordpair{FLEX_STATE_X}{{$\langle$\bf state-x$\rangle_{\rm f}$}}
-\prettywordpair{FLEX_STATE_S}{{$\langle$\bf state-s$\rangle_{\rm f}$}}
+\prettywordpairwvis{FLEX_OPTION}{{$\langle$\bf option$\rangle_{\rm f}$}}{option_f}
+\prettywordpairwvis{FLEX_STATE_X}{{$\langle$\bf state-x$\rangle_{\rm f}$}}{state-x}
+\prettywordpairwvis{FLEX_STATE_S}{{$\langle$\bf state-s$\rangle_{\rm f}$}}{state-s}
\prettywordpair{ID}{{\cyr\lqq{\rm identifier}\rqq}}
\prettywordpair{ID_COLON}{{\cyr\lqq{\rm identifier: }\rqq}}
-\prettywordpair{NUL}{{$\Lambda$}}
+\prettywordpairwvis{NUL}{{$\Lambda$}}{Lambda}
\prettywordpair{INT}{{\bf int}}
\prettywordpair{CHAR}{{\bf char}}
\prettywordpair{STRING}{{\cyr\lqq{\tt string}\rqq}}
diff --git a/support/splint/tex/dcols.sty b/support/splint/tex/dcols.sty
index 2560495d45..71176efd14 100644
--- a/support/splint/tex/dcols.sty
+++ b/support/splint/tex/dcols.sty
@@ -1,4 +1,4 @@
-% Copyright 2004-2014, Alexander Shibakov
+% Copyright 2004-2020, Alexander Shibakov
% This file is part of SPLinT
%
% SPLinT is free software: you can redistribute it and/or modify
@@ -74,8 +74,9 @@
\footglue=1em plus.2em minus.1em
\adjskip=0pt plus 6pt
-\topskip=9pt
-
+%\topskip=9pt
+%\normalbottom % one of these must accompany the use of the macros below
+ % so that the columns are aligned
%
\def\setmcparams{%
@@ -231,7 +232,7 @@
\dsskip\smallskipamount
-\def\begindoublecols{\begingroup\r@ggedbottomfalse\parindent=0pt
+\def\begindoublecols{\begingroup\normalbottom\parindent=0pt
\count\footins=\@m \multiply\count\footins by\fnotesspan
\skip\footins=\fnotesspan\realfootins
\remaindergaps=\noofcolumns
@@ -718,7 +719,19 @@
% footnote counting macros
\let\footthenotes\empty
-\footline{\footthenotes}% so the footnotes know where to reset
+
+\footline{%
+ \footthenotes % so the footnotes know where to reset
+ \ifchapterhead
+ \vbox to 0pt{
+ \hbox to\pagewidth{\hfil
+ \mainfont\vrule width 0pt height 2pc\oldstyle\the\pageno
+ \hfil}
+ \vss
+ }%
+ \global\chapterheadfalse
+ \fi
+}
\def\fnmark#1{{%
\ifnum#1<\@M
@@ -741,20 +754,25 @@
% auxiliary file (we have to read it in whole because it will be
% rewritten as the document is being typeset); this opening has to be
% done carefully for the file might not be there yet; check for it first.
+% as a side note, one should not use the same stream number (at least the
+% one allocated by a \new... command) for both the input and the output
+% streams, since one might accidentally close a different stream that
+% happened to have the same index but allocated by a \new... command as a
+% stream of a different type.
-\newread\fnlabels
+\newread\tryfnlabels % to check if the file exists
\newwrite\fnlabels
\newcount\localfn % footnote count
\newcount\fniteration
\def\setupfootnotes{% this macro should be used at the beginning of the file, after a box (preferably)
- \def\footthenotes{\write\fnlabels{\noexpand\addpgendlabel}}%
- \openin\fnlabels=\jobname.ftn\relax
- \ifeof\fnlabels
- \closein\fnlabels\relax
+ \let\footthenotes\markbottomofpage
+ \openin\tryfnlabels=\jobname.ftn\relax
+ \ifeof\tryfnlabels
+ \closein\tryfnlabels\relax
\fniteration\z@
\else
- \closein\fnlabels\relax
+ \closein\tryfnlabels\relax
\input \jobname.ftn\relax
\fi
\edef\savedfnstream{\the\fnstream}%
@@ -764,30 +782,38 @@
\write\fnlabels{\noexpand\fniteration\the\fniteration\space}%
}
-\def\unsetfootnotes{% a clean(er) way to terminate footnote accounting
-% \let\footthenotes\footthenoteslast
- \def\footthenotes{\closeout\fnlabels\let\footthenotes\empty}%
+\def\markbottomofpage{%
+ \write\fnlabels{\noexpand\addpgendlabel}%
}
-% the control sequence below is not used, although the idea is clear;
-% unfortunately, this does not work, since \closeout\fnlabels is delayed
-% so \input \jobname.ftn is likely to import an empty file;
-% a more elaborate strategy is possible whereby all the macros inside
-% \jobname.ftn are made expandable and the test below (along with \input \jobname.ftn)
-% is put inside \write\m@ne{ ... } (so that the expansion of \input \jobname.ftn is
-% delayed) using string comparison macros defined elsewhere
-% but at the moment it seems excessive
-
-\def\footthenoteslast{% unused, see above
- \closeout\fnlabels
+\def\unsetfootnotes{%
+ \def\footthenotes{%
+ \write\fnlabels{\noexpand\endinput}%
+ \closeout\fnlabels
+ \global\let\footthenotes\checkfootnotestream%
+ % it is tempting to say \input \jobname.ftn
+ % at this point to use in the test below but this is not
+ % going to work: the \closeout will get invoked during the
+ % \shipout, which happens after the page is set and these
+ % macros are expanded.
+ % note that using another `stream' sequence for this purpose
+ % would not work either, since a footnote at the bottom of the
+ % page might `fire' but would be put on the next page by
+ % the epage builder.
+ }%
+}
+
+\def\checkfootnotestream{% this needs at least one extra page to work correctly ...
\fnstream{}%
- \input \jobname.ftn
+ \input\jobname.ftn
\edef\next{\the\fnstream}%
\ifx\next\savedfnstream
- \else
+ \message{Footnotes are stable ...}%
+ \else % this test is too strict, a better way would be to check that
+ % \next is a prefix in \savedfnstream and the remaining characters are all `|'
\message{Footnotes may be set incorrectly ...}%
\fi
- \let\footthenotes\relax
+ \global\let\footthenotes\empty
}
\def\setfnmark{{%
@@ -807,7 +833,7 @@
\def\splitfnstream{\edef\tempdefone{\the\fnstream}%
\ifx\tempdefone\empty
- \def\nexttok{?}\message{You need to recount the footnotes ...}%
+ \def\nexttok{?}\message{You may have to reposition the footnotes ...}%
\else
\expandafter\splitfnstre@m\tempdefone\end
\fi
diff --git a/support/splint/tex/flex.sty b/support/splint/tex/flex.sty
index da3aa7b0b7..59e23785b3 100644
--- a/support/splint/tex/flex.sty
+++ b/support/splint/tex/flex.sty
@@ -1,4 +1,4 @@
-% Copyright 2012-2014, Alexander Shibakov
+% Copyright 2012-2020, Alexander Shibakov
% This file is part of SPLinT
%
% SPLinT is free software: you can redistribute it and/or modify
@@ -14,10 +14,20 @@
% You should have received a copy of the GNU General Public License
% along with SPLinT. If not, see <http://www.gnu.org/licenses/>.
-\newtoks\tayybyte % throwaway token register
-\newcount\tayymarker % throwaway count register
-
-\chardef\EOL=`\^^J%
+% the implementation of (some part of) the \flex automata mechanisms;
+% note that a few important features have been omitted and some that
+% have been implemented may follow a (sometimes subtly) different
+% semantics (see \unput and \yyless below);
+% among the features that are missing are REJECT, ECHO and YYMORE;
+% another important missing feature is the lack of trailing
+% context matching; fixed length trailing context or fixed length
+% match with variable length context can be easily emulated with
+% regular rules and \yyless (or \unput); when both the match and
+% the context are variable length, different automaton code is
+% required; same code is required to implement REJECT; in addition,
+% the action code for the actions matching trailing context rules
+% must also be modified; while all of this may be implemented, it
+% is better not to (ab)use trailing context rules.
\def\yylex{%
\ifnum\yyg@yyinit=\z@ % if ( !yyg->yy_init ) {
@@ -29,6 +39,13 @@
\yylwhile
}
+\newif\ifyylessused
+\newtoks\yylesslastformat
+\newtoks\yylesslaststash
+\newcount\yylesslastfmark
+\newcount\yylesslastsmark
+\newcount\yylesslastchar
+
\def\yylwhile{%
% yy_cp = yyg->yy_c_buf_p;
% *yy_cp = yyg->yy_hold_char;
@@ -36,6 +53,14 @@
%
\yycurrentstate=\yyg@yystart % yy_current_state = yyg->yy_start;
\advance\yycurrentstate\YYATBOL % yy_current_state += YY_AT_BOL();
+ % save the format and stash streams for \yyless
+ \ifyylessused
+ \yylesslastformat\yyformat
+ \yylesslaststash\yystash
+ \yylesslastfmark\yyfmark
+ \yylesslastsmark\yysmark
+ \yylesslastchar\yytextlastchar
+ \fi
\yymatch
}
@@ -50,39 +75,46 @@
\fi
}
-\def\yyreinput{\expandafter\yyresetstreams\expandafter\yyinput\the\yytextseen}
+\def\yyreinput{\expandafter\yyresetstreams\expandafter\yyinput\the\yytext@seen}
\def\yyresetstreams{%
- \iftraceflexbuffers\ferrmessage{will rescan: \the\yytextseen}\fi
- \yytextseen{}%
- \yytextseenpure\yytextseen
- \yyformatseen\yytextseen
- \yystashseen\yytextseen
+ \iftraceflexbuffers\ferrmessage{will rescan: <\the\yytext@seen> (stash: \the\yystashseen)}\fi
+ \yytextseen{}\yytext@seen{}\yytextseenpure{}%
+ \yyformatseen{}\yystashseen{}%
\yytextbackupfalse
}
+% \yym@tch is the return point from the low-level input routine
+
\def\yym@tch{% do {
\yyc=\fgetelemof{yyec}\at\yycp@\relax % yy_c = yy_ec[YY_SC_TO_UI(*yy_cp)];
\yyact=\fgetelemof{yyaccept}\at\yycurrentstate\relax
% ... ... reuse \yyact ( yy_act = yy_accept[yy_current_state] )
+ \ifyyflexdebug\ferrmessage{match action: \the\yyact\space(yytext: \the\yytext@:^^Jyytextseen: \the\yytext@seen>\the\yybyte)}\fi
\ifnum\yyact=\z@ % if ( yy_accept[yy_current_state] ) {
\else %
+ \ifyyflexdebug\ferrmessage{yym@tch: stash seen: \the\yystashseen^^Jsmark: \the\yysmarklast}\fi
\yyg@yylastacceptingstate=\yycurrentstate % yyg->yy_last_accepting_state = yy_current_state;
\concat\yytext\yytextseen % yyg->yy_last_accepting_cpos = yy_cp;
- \concat\yytextpure\yytextseenpure % ... ...
+ \concat\yytext@\yytext@seen % ... ...
+ \concat\yytextpure\yytextseenpure % ... ...
\concat\yyformat\yyformatseen % ... ...
\concat\yystash\yystashseen % ... ...
\yytextlastchar=\yytextseenlastchar %
\yyfmark=\yyfmarklast % ... ...
\yysmark=\yysmarklast % ... ...
- \yytextseen={}%
- \yytextseenpure\yytextseen
- \yyformatseen\yytextseen
- \yystashseen\yytextseen
+ \yyfmark@accept=\yyfmark
+ \yysmark@accept=\yysmark
+ \yytextseen={}\yytext@seen={}\yytextseenpure{}%
+ \yyformatseen{}\yystashseen{}%
\fi % }
\yyllwhile
}
+\let\yyreturn\yym@tch
+
+% \yyllwhile searches for an accepting of rejecting state
+
\def\yyllwhile{% ... yyllwhile:
\yyact=\fgetelemof{yybase}\at\yycurrentstate\relax % ... ... reusing \yyact ( yy_act = yy_base[yy_current_state] )
\advance\yyact\yyc % ... ... ( yy_act = yy_act + yy_c )
@@ -103,21 +135,27 @@
\def\yymatchtail{%
\yycurrentstate=\fgetelemof{yybase}\at\yycurrentstate\relax
\advance\yycurrentstate\yyc %
- \yycurrentstate=\fgetelemof{yynxt}\at\yycurrentstate\relax
+ \yycurrentstate=\fgetelemof{yynxt}\at\yycurrentstate\relax
+ \ifyyflexdebug\ferrmessage{yysubtext: (\the\yysubtext),^^Jyysbyte: (\the\yysbyte),^^Jyybyte: (\the\yybyte)}\fi
% yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c];
\concat\yytextseen\yybyte % ++yy_cp;
+ \concat\yytext@seen\yysubtext \yysubtext{}% % ...
+ \concat\yytext@seen\yybyte % ...
\concat\yytextseenpure\yybytepure % ...
- \concat\yyformatseen\yyfbyte\yyfbyte{}% % ...
- \concat\yystashseen\yysbyte\yysbyte{}% % ...
+ \concat\yyformatseen\yyfbyte \yyfbyte{}% % ...
+ \concat\yystashseen\yysbyte \yysbyte{}% % ...
\yytextseenlastchar=\yycp@ %
\yyfmarklast=\formatmarker % ...
\yysmarklast=\stashmarker % ...
\yyact=\fgetelemof{yybase}\at\yycurrentstate\relax % ... reusing \yyact ( yy_act = yy_base[yy_current_state] )
+ \ifyyflexdebug\ferrmessage{matching the rest: <\the\yytext@seen>^^J(stash: \the\yystash:^^J\the\yystashseen)}\fi
\ifnum\yyact=\YYBASEMAXENTRY\relax % } while ( yy_base[yy_current_state] != max( yy_base ) );
- \xskiptofi\yyfindaction % ... if ( yy_base[yy_current_state] == max( yy_base ) ) goto yy_find_action;
+ \ifyyflexdebug\ferrmessage{looking for the action}\fi
+ \yybreak\yyfindaction % ... if ( yy_base[yy_current_state] == max( yy_base ) ) goto yy_find_action;
\else % ... else goto yy_match;
- \xskiptofi\yymatch
- \fi
+ \ifyyflexdebug\ferrmessage{looking for more input}\fi
+ \yybreak\yymatch
+ \yycontinue
}
\newif\ifyyflexdebug
@@ -129,190 +167,38 @@
\yytextbackuptrue % yy_cp = yyg->yy_last_accepting_cpos;
\yycurrentstate=\yyg@yylastacceptingstate % yy_current_state = yyg->yy_last_accepting_state;
\yyact=\fgetelemof{yyaccept}\at\yycurrentstate\relax% yy_act = yy_accept[yy_current_state];
- \ifyyflexdebug\ferrmessage{backup: \the\yytext:\the\yytextseen}\fi
+ \formatmarker=\yyfmark@accept % ...
+ \stashmarker=\yysmark@accept % ...
+ \ifyyflexdebug\ferrmessage{backup: \the\yytext@:\the\yytext@seen^^J^^J%
+ from state: \the\yyg@yylastacceptingstate\space
+ stashmarker: \the\stashmarker}\fi
\else
- %
+ % note that at this point \yytextbackupfalse is already set
\concat\yytext\yytextseen
+ \concat\yytext@\yytext@seen
\concat\yytextpure\yytextseenpure
\concat\yyformat\yyformatseen
\concat\yystash\yystashseen
\yytextlastchar=\yytextseenlastchar
- \yyfmark=\yyfmarklast
- \yysmark=\yysmarklast
- \yytextseen{}%
- \yytextseenpure{}%
- \yyformatseen{}%
- \yystashseen{}%
+ \yyfmark=\yyfmarklast % == \formatmarker
+ \yysmark=\yysmarklast % == \stashmarker
+ \yytextseen{}\yytext@seen{}\yytextseenpure{}%
+ \yyformatseen{}\yystashseen{}%
%
\fi % }
-% \YYDOBEFOREACTION % YY_DO_BEFORE_ACTION;
+ \YYDOBEFOREACTION % YY_DO_BEFORE_ACTION;
\ifyyflexdebug
- \ferrmessage{action: \the\yyact, state: \the\yycurrentstate\space(\the\yytext)}%
+ \ferrmessage{action: \the\yyact,^^J%
+ state: \the\yycurrentstate\space(\the\yytext@:\the\yytext@seen)^^J%
+ stash: \the\yystash:\the\yystashseen^^J%
+ yysbyte: \the\yysbyte}%
\fi
\doaction
}
-\def\YYRULESETUP{% %#define YY_RULE_SETUP \
- \yytoksempty{\yytext}{}{% % if ( yyleng > 0 ) \
- \ifnum\EOL=\yytextlastchar % YY_CURRENT_BUFFER_LVALUE->yy_at_bol = (yytext[yyleng - 1] == '\n'); \
- \YYATBOL=\@ne % YY_USER_ACTION
- \else %#define YY_AT_BOL() (YY_CURRENT_BUFFER_LVALUE->yy_at_bol)
- \YYATBOL=\z@
- \fi
- }%
-}
-
-%#define BEGIN yyg->yy_start = 1 + 2 *
-% should be:
-%#define BEGIN(state) yyg->yy_start = 1 + 2 * ( state )
-
-\newif\iftracestates
-
-\def\yylexstate#1{\csname flexstate\parsernamespace #1\endcsname}
-
-\def\yyBEGIN#1{%
- \yyg@yystart
- \expandafter\ifx\csname flexstate\parsernamespace #1\endcsname\relax
- #1 \iftracestates\ferrmessage{now in state: #1}\fi
- \else
- \csname flexstate\parsernamespace #1\endcsname\relax
- \iftracestates\ferrmessage{now in state: #1 (\number\csname flexstate\parsernamespace #1\endcsname)}\fi
- \fi
- \multiply\yyg@yystart\tw@
- \advance\yyg@yystart\@ne
-}
-
-\def\yyBEGINr#1{%
- \yyg@yystart#1%
- \iftracestates\ferrmessage{new state: \the\yyg@yystart}\fi
- \multiply\yyg@yystart\tw@
- \advance\yyg@yystart\@ne
-}
-
-%#define YY_START ((yyg->yy_start - 1) / 2)
-
-\def\YYSTART{%
- \expandafter\xdivbytwo\expandafter{\number
- \expandafter\xdecrement\expandafter{\number\yyg@yystart}}
-}
-
-\def\yypushstate#1{%
- \expandafter\yypush\expandafter{\number\YYSTART}\on\yystatestack
- \yyBEGIN{#1}%
-}
-
-\def\yypopstate{%
- \yypop\yystatestack\into{\expandafter\yyBEGIN\romannumeral0}%
- \iftracestates\message{new state (* 2 + 1 scrambled): \the\yyg@yystart}\fi
-}
-
-\def\yytopstate#1{%
- \yyreadstack\yystatestack\at\z@\to#1%
-}
-
-\let\yylexcontinue\yylwhile % complete the while loop (requires \yylexnext)
-\def\yylexcontinue{\yytext{}\yytextpure\yytext\yylwhile} % complete the while loop
-
-% \yylextail is the return point from the lexer
-
-\let\yyreturn\yym@tch % the return point from the low-level input
-
-\def\yylexreturnbootstrap#1{%
- \yytext{}\yytextpure\yytext
- \expandafter\ifx\csname token\parsernamespace #1\endcsname\relax
- % this token value is undefined
- \let\yylextail\yylex % so lex the next token
- \else
- \yychar\csname token\parsernamespace #1\endcsname\relax
- \let\yylextail\yyparsetail
- \fi
-}
-
-\def\yylexreturnregular#1{\yychar\csname token\parsernamespace #1\endcsname\relax\yytext{}\yytextpure{}\let\yylextail\yyparsetail}
-
-\def\yylexreturnval#1{% return value (yytext) only
- \yychar\csname token\parsernamespace #1\endcsname\relax
- \yylval\expandafter{\expandafter{\the\yytext}}%
- \yylval\expandafter\expandafter\expandafter{\expandafter\the\expandafter\yylval\expandafter{\the\yytextpure}}%
- % the above is an equivalent of
- % \edef\next{\yylval{{\the\yytext}{\the\yytextpure}}}\next
- % but does not `pollute' the definition of \next
- \yytext{}\yytextpure\yytext\let\yylextail\yyparsetail
-}
-
-\def\yylexreturnptr#1{% return stream pointers only
- \yylval\expandafter{\expandafter{\the\yyfmark}}%
- \yylval\expandafter\expandafter\expandafter{\expandafter\the\expandafter\yylval\expandafter{\the\yysmark}}%
- % the above is an equivalent of
- % \edef\next{\yylval{{\the\yyfmark}{\the\yysmark}}}\next
- % but does not `pollute' the definition of \next
- \yylexreturn{#1}%
-}
+\newif\iftraceactioncode
-\def\yylexreturntext{\yylexreturnptr{\the\yytextpure}}
-
-\def\yylexreturnraw#1{% return the character value
- \yychar`#1\relax
- \yytext{}\yytextpure{}\let\yylextail\yyparsetail
-}
-
-\def\yylexreturnchar{%
- \expandafter\yylexreturnraw\the\yytextpure
-}
-
-\def\yylexnext{\yytext{}\yytextpure{}} % use this with a trivial \yylexcontinue
-\let\yylexnext\empty
-
-\def\yyterminate{\yychar\YYEOF\yylval{}\yytext{}\yytextpure{}\let\yylextail\yyparsetail}
-
-\def\yyerrterminate{\yylexreturn{$undefined}} %$
-
-\def\ferrmessage#1{\immediate\write16{#1}}
-
-% \yyless is slow and the macros below have not been tested in their current
-% form: avoid them
-
-\def\yyless#1{%
- \ifnum#1=\z@
- \ROLLBACKCURRENTTOKEN
- \else
- \let\oldyyreturn\yyreturn
- \edef\yyreturn{\noexpand\yyskipnchars{\number#1}}%
- \expandafter\yyinput\the\yytext\yyeof
- \fi
-}
-
-\def\yyskipnchars#1{%
- \ifnum\yycp@=\YYENDOFBUFFERCHAR % read \yyeof
- \yycomplain{yyless buffer overflow: #1 characters too many}%
- \else
- \ifnum#1=\@ne % skipped the required number of tokens
- \yybreak@\yyl@ss
- \else
- \edef\yyreturn{\noexpand\yyskipnchars{\xdecrement{#1}}}%
- \yybreak@\yyinput
- \fi
- \yycontinue
-}
-
-\def\yyl@ss#1\yyeof{%
- \yytext{#1}%
- \concatl\yytext\yytextseen
- \yytext{}\yytextpure{}%
- \yytextbackuptrue
- \let\yyreturn\oldyyreturn
-}
-
-%#define ROLLBACK_CURRENT_TOKEN \
-% do { \
-% scanner_cursor.column -= mbsnwidth (yytext, yyleng, 0); \
-% yyless (0); \
-% } while (0)
-
-\def\ROLLBACKCURRENTTOKEN{%
- \concatl\yytext\yytextseen
- \yytext{}\yytextpure{}%
- \yytextbackuptrue
+\def\YYDOBEFOREACTION{%
}
% \yytext---current yytext
@@ -332,6 +218,13 @@
% yyg->yy_c_buf_p = yy_cp;
\def\doaction{%
+ \iftraceactioncode
+ {%
+ \expandafter\toksa
+ \expandafter\expandafter\expandafter{\csname doflexaction\number\yyact\parsernamespace\endcsname}%
+ \ferrmessage{action code (\the\yyact):^^J\the\toksa}%
+ }%
+ \fi
\yydoactionswitch\yyact
}
@@ -344,10 +237,14 @@
% another;
\def\yylexeofaction{%
+ \ifyyflexdebug
+ \ferrmessage{eof action: .\the\yytext@>>\the\yytext@seen(\the\yytext)}%
+ \fi
\yygetnextbuffer
\ifYYEOBLASTMATCH
\yygetpreviousstate
- \ifyyflexdebug\ferrmessage{found previous state: \the\yycurrentstatelocal\space matched text: \the\yytext>>\the\yytextseen(\the\yybyte))}\fi
+ \ifyyflexdebug\ferrmessage{found previous state: \the\yycurrentstatelocal\space
+ matched text: \the\yytext@>>\the\yytext@seen(\the\yybyte)}\fi
\yycurrentstate\yycurrentstatelocal
\let\yylextail\yyfind@ction
\YYEOBLASTMATCHfalse
@@ -356,55 +253,60 @@
\advance\yyact\YYSTART
\advance\yyact\@ne % yy_act = YY_STATE_EOF(YY_START);
\let\yylextail\doaction % goto do_action;
- \appendl\yytextseen{\the\yytext}\yytextbackuptrue % in case the scanner gets called again
- \ifyyflexdebug
+ \concatl\yytext\yytext@seen \yytextbackuptrue % in case the scanner gets called again
\yytoksempty{\yytext}%
{\errmessage{internal error: matched nothing in eob state}}%
- {\yyisthiscsr\yytext\yyeof{}{\errmessage{internal error: matched more that a single eob}}}%
- \ferrmessage{eob in user state: \the\yyact\space matched text: \the\yytext(\the\yytextpure)\space
- to read: \the\yytextseen}%
+ {\yyisthiscsr\yytext\yyeof{}{\errmessage{internal error: matched more than a single eob: (\the\yytext)}}}%
+ \ifyyflexdebug
+ \ferrmessage{eob in user state: \the\yyact\space matched text: \the\yytext@(\the\yytext)\space
+ to read: \the\yytext@seen}%
\fi
- \yytext{}%
+ \yytext{}\yytext@{}%
\fi
}
\def\yygetnextbuffer{%
\yytoksempty{\yytext}{\errmessage{internal error: empty matched text in eob state}}%
- {%
- \expandafter\expandafter\expandafter
- \yystringempty % we have matched exactly one token
- \expandafter\expandafter\expandafter{\expandafter\eatone\the\yytext}%
- {\YYEOBLASTMATCHfalse}{\YYEOBLASTMATCHtrue}%
+ {% this somewhat complicated definition is needed in case the last character is an ordinary
+ % space character
+ \expandafter\yystartsinspace\expandafter{\the\yytext}{%
+ \expandafter\expandafter\expandafter
+ \yystringempty % we have matched exactly one token (space)
+ \expandafter\expandafter\expandafter{\expandafter\eattospace\the\yytext}%
+ {\YYEOBLASTMATCHfalse}{\YYEOBLASTMATCHtrue}%
+ }{%
+ \expandafter\expandafter\expandafter
+ \yystringempty % we have matched exactly one token
+ \expandafter\expandafter\expandafter{\expandafter\eatone\the\yytext}%
+ {\YYEOBLASTMATCHfalse}{\YYEOBLASTMATCHtrue}%
+ }%
}%
}
-\def\yyeofcontainer{\yyeof}
-
\newif\ifYYEOBLASTMATCH
\def\yygetpreviousstate{%
\yycurrentstatelocal=\yyg@yystart
\advance\yycurrentstatelocal\YYATBOL
+ \ifyyflexdebug\ferrmessage{state setup.: start(\the\yyg@yystart), bol(\number\YYATBOL)}\fi
%
\yytoksempty{\yytext}{\errmessage{internal error: empty matched text in eob state}}{}%
- \expandafter\yysplitoff\expandafter.\the\yytext\yyeof\end
- \appendl\yytextseen{\noexpand\yyeof}\yytextbackuptrue
- \ifyyflexdebug\ferrmessage{searching for last match in text: \the\yytext}\fi % TODO: delete
+ % move the scanned \yyeof to the buffer of seen characters
+ \expandafter\yysplitoff\expandafter.\the\yytext\yyeof\end\yytext
+ \expandafter\yysplitoff\expandafter.\the\yytext@\yyeof\end\yytext@
+ \appendlnx\yytextseen\yyeof
+ \appendlnx\yytext@seen\yyeof \yytextbackuptrue
+ \ifyyflexdebug\ferrmessage{searching for last match in text: \the\yytext@}\fi
\yytoksempty{\yytext}{}{% yyg->yytext_ptr >= yyg->yy_c_buf_p /* do not enter the for loop */
- \yytextpure{}%
- \yyfutureyytext\yytextseen % to be reinserted in yyfind@action
- \yytextseen{}\yytextseenpure{}%
- \expandafter\yytext\expandafter
- {\expandafter}\expandafter
- \yygetpreviousstatefor\the\yytext\splitoffend % at least one iteration of the for loop
- }%
+ \expandafter\yygetpreviousstatefor\the\yytext@\splitoffend % at least one iteration of the for loop
+ }%
}
-\def\yysplitoff#1\yyeof#2\end{%
+\def\yysplitoff#1\yyeof#2\end#3{%
\yystringempty{#2}{%
- \errmessage{internal error: something other than \noexpand\yyeof\space triggered eob action}%
+ \errmessage{internal error: something other than \noexpand\yyeof\space triggered eob action: <\the\yytext@>}%
}{%
- \yytext\expandafter{\eatone#1}% to remove the `.' at the beginning
+ #3\expandafter{\eatone#1}% to remove the `.' at the beginning
}%
}
@@ -413,46 +315,41 @@
% to be restored on exit from the `for' loop
% i.e.~when the \splitoff token is read
\let\currentyyreturn\yyreturn
-%
- \let\currentyyfbyte\yyfbyte
- \let\currentyysbyte\yysbyte
- \let\currentstashmarker\stashmarker
- \let\currentformatmarker\formatmarker
-%
- \let\yyfbyte\tayybyte
- \let\yysbyte\tayybyte
- \let\stashmarker\tayymarker
- \let\formatmarker\tayymarker
-%
+ \yyfutureyytext\yytext@seen % to be reinserted in yyfind@action
+ \yytext{}\yytext@{}\yytextpure{}%
+ \yytextseen{}\yytext@seen{}\yytextseenpure{}%
+ \formatmarker=\yyfmark@accept
+ \stashmarker=\yysmark@accept
+ \yyfmarklast=\formatmarker
+ \yysmarklast=\stashmarker
+ \yyformatseen{}\yystashseen{}%
\let\yyreturn\yygetpreviousstatef@r
\yyinput
}
\def\yygetpreviousstatef@r{%
\let\default\yygetpreviousstatedefault
- \ifyyflexdebug\ferrmessage{just read: \the\yybyte}\fi % TODO: delete
+ \ifyyflexdebug\ferrmessage{just read: \the\yybyte (so far: \the\yytext@><\the\yytext@seen)}\fi % TODO: delete
\switchon{\expandafter\getfirsttoken\expandafter{\the\yybyte}}\in\yygetpreviousstateswitch
}
\def\yygetpreviousstateswitch{%
- \yyeof {% scanned a NUL
+ \yyeof {% scanned a NUL: currently NUL transitions are not implemented
+ % (so one cannot use NUL's in the input) so this state is never used
\yyclocal\YYECMAGIC\relax % the constant extracted from yy_get_next_state
+ \edef\yycurrentstatelocal@prejam{\the\yycurrentstatelocal}%
\yygetpreviousstat@f@r
- }
+ }%
\splitoffend {% yyg->yytext_ptr >= yyg->yy_c_buf_p /* exit the for loop */
% this is not an actual character but a marker for the end of the `for' loop
- \let\yyfbyte\currentyyfbyte
- \let\yysbyte\currentyysbyte
- \let\stashmarker\currentstashmarker
- \let\formatmarker\currentformatmarker
\yycp@=\YYENDOFBUFFERCHAR\relax % this is not necessary
\yybyte{}\yybytepure{}%
%
\let\yyreturn\currentyyreturn
- }
+ }%
\endparse \endparseinput {%
\errmessage{internal error.: reading past the end of the input buffer}%
- }
+ }%
}
\def\yygetpreviousstatedefault{%
@@ -465,20 +362,41 @@
\ifnum\fgetelemof{yyaccept}\at\yycurrentstatelocal=\z@
% if ( yy_accept[yy_current_state] ) {
\concat\yytextseen\yybyte
+ \concat\yytext@seen\yysubtext \yysubtext{}%
+ \concat\yytext@seen\yybyte
\concat\yytextseenpure\yybytepure
- \yybyte{}\yybytepure\yybyte
+ \yybyte{}\yybytepure{}%
+ \yyfmarklast=\formatmarker
+ \yysmarklast=\stashmarker
+ \concat\yyformatseen\yyfbyte \yyfbyte{}%
+ \concat\yystashseen\yysbyte \yysbyte{}%
\else %
- \ifyyflexdebug\ferrmessage{possible accepting state: \the\yycurrentstatelocal:%
- \fgetelemof{yyaccept}\at\yycurrentstatelocal}\fi % TODO: delete
+ \ifyyflexdebug
+ \ferrmessage{possible accepting state.: \the\yycurrentstatelocal:%
+ \fgetelemof{yyaccept}\at\yycurrentstatelocal}%
+ \fi
\yyg@yylastacceptingstate=\yycurrentstatelocal
% yyg->yy_last_accepting_state = yy_current_state;
\concat\yytext\yytextseen % yyg->yy_last_accepting_cpos = yy_cp; TODO:?????????
- \concat\yytextpure\yytextseenpure % ... ...
- \yytextseen=\yybyte
- \yytextseenpure=\yybytepure
+ \concat\yytext@\yytext@seen % ...
+ \concat\yytextpure\yytextseenpure % ...
+ \yytextseen=\yybyte % ...
+ \yytext@seen=\yysubtext \yysubtext{}% % ...
+ \concat\yytext@seen\yybyte \yybyte{}% % ...
+ \yytextseenpure=\yybytepure\yybytepure{}% %
\yytextbackuptrue
- \yybyte{}\yybytepure\yybyte
\yytextlastchar=\yytextseenlastchar %
+ %
+ \yyfmark=\yyfmarklast % ... ...
+ \yysmark=\yysmarklast % ... ...
+ \yyfmark@accept=\yyfmark
+ \yysmark@accept=\yysmark
+ \yyfmarklast=\formatmarker
+ \yysmarklast=\stashmarker
+ \concat\yyformat\yyformatseen
+ \concat\yystash\yystashseen
+ \yyformatseen\yyfbyte\yyfbyte{}%
+ \yystashseen\yysbyte\yysbyte{}%
\fi
\yygpswhile
}
@@ -501,11 +419,26 @@
\yycontinue % }
}
-\def\yygpsfornext{%
+\newif\ifyyflexisjammed
+
+\def\yygpsfornext{% this combines yy_get_previous_state() and yy_get_NUL_trans()
+ \ifyyflexdebug\ferrmessage{starting in state.: \the\yycurrentstatelocal}\fi % TODO: delete
\yycurrentstatelocal=\fgetelemof{yybase}\at\yycurrentstatelocal\relax
\advance\yycurrentstatelocal\yyclocal
\yycurrentstatelocal=\fgetelemof{yynxt}\at\yycurrentstatelocal\relax
- \ifyyflexdebug\ferrmessage{switching to state: \the\yycurrentstatelocal}\fi % TODO: delete
+ % yy_current_state = yy_nxt[yy_base[yy_current_state] + (unsigned int) yy_c];
+ % currently, we do not allow NULs in the input so the code below is not used
+ % when NUL transitions are implemented, this code will come into play
+ \ifnum\yycurrentstatelocal=\YYMAXREALCHAR\relax
+ \yycurrentstatelocal=\yycurrentstatelocal@prejam\relax
+ \ifyyflexdebug\ferrmessage{jammed, switching to \yycurrentstatelocal@prejam}\fi
+ \yyflexisjammedtrue
+ \else
+ \yyflexisjammedfalse
+ \fi
+ %yy_is_jam = (yy_current_state == ...);
+ %return yy_is_jam ? 0 : yy_current_state;
+ \ifyyflexdebug\ferrmessage{switching to state.: \the\yycurrentstatelocal\space on char \the\yyclocal}\fi % TODO: delete
\yyinput
}
@@ -518,30 +451,266 @@
% yy_cp = yyg->yy_last_accepting_cpos;
\yycurrentstate=\yyg@yylastacceptingstate % yy_current_state = yyg->yy_last_accepting_state;
\yyact=\fgetelemof{yyaccept}\at\yycurrentstate\relax% yy_act = yy_accept[yy_current_state];
+ \formatmarker=\yyfmark@accept % ...
+ \stashmarker=\yysmark@accept % ...
\ifyyflexdebug\ferrmessage{backup.: \the\yytext:\the\yytextseen}\fi
\else
%
\concat\yytext\yytextseen
- \concat\yytext\yybyte
- \yybyte{}%
- \yytextseen{}%
+ \concat\yytext@\yytext@seen
\concat\yytextpure\yytextseenpure
- \concat\yyformat\yyformatseen
- \concat\yystash\yystashseen
+ \concat\yytext\yybyte
+ \concat\yytext@\yysubtext \yysubtext{}%
+ \concat\yytext@\yybyte
+ \concat\yytextpure\yybytepure
+ \yybyte{}\yybytepure{}%
+ \yytextseen{}\yytext@seen{}\yytextseenpure{}%
+ \concat\yyformat\yyformatseen\yyformatseen{}%
+ \concat\yystash\yystashseen\yystashseen{}%
\yytextlastchar=\yytextseenlastchar
\yyfmark=\yyfmarklast
\yysmark=\yysmarklast
- \yyformatseen{}%
- \yystashseen{}%
%
\fi % }
- \concat\yytextseen\yyfutureyytext % ... reinsert the tokens seen by the lexer before
+ \concat\yytext@seen\yyfutureyytext % ... reinsert the tokens seen by the lexer before
% ... \yygetpreviousaction got rolling
\yyfutureyytext{}%
\yytextbackuptrue
-% \YYDOBEFOREACTION % YY_DO_BEFORE_ACTION;
+ \YYDOBEFOREACTION % YY_DO_BEFORE_ACTION;
\ifyyflexdebug
\ferrmessage{action.: \the\yyact, state: \the\yycurrentstate\space(\the\yytext) \parsernamespace}%
\fi
\doaction
}
+
+\def\YYRULESETUP{% %#define YY_RULE_SETUP \
+ \yytoksempty{\yytext}{}{% % if ( yyleng > 0 ) \
+ \ifnum\n=\yytextlastchar % YY_CURRENT_BUFFER_LVALUE->yy_at_bol = (yytext[yyleng - 1] == '\n'); \
+ \YYATBOL=\@ne % YY_USER_ACTION
+ \else %#define YY_AT_BOL() (YY_CURRENT_BUFFER_LVALUE->yy_at_bol)
+ \YYATBOL=\z@
+ \ifyyflexdebug\ferrmessage{YYATBOL: 0}\fi
+ \fi
+ }%
+}
+
+%#define yy_set_bol(at_bol) \
+% { \
+% if ( ! YY_CURRENT_BUFFER ){\
+% yyensure_buffer_stack (); \
+% YY_CURRENT_BUFFER_LVALUE = \
+% yy_create_buffer(yyin,YY_BUF_SIZE ); \
+% } \
+% YY_CURRENT_BUFFER_LVALUE->yy_at_bol = at_bol; \
+% }
+
+\def\yysetbol#1{\YYATBOL=#1\relax} % we do not switch or create buffers
+
+%#define BEGIN yyg->yy_start = 1 + 2 *
+% should be:
+%#define BEGIN(state) yyg->yy_start = 1 + 2 * ( state )
+
+\newif\iftracestates
+
+\def\yylexstate#1{\csname flexstate\parsernamespace #1\endcsname}
+
+\def\yyBEGIN#1{%
+ \yyg@yystart
+ \expandafter\ifx\csname flexstate\parsernamespace #1\endcsname\relax
+ #1 \iftracestates\ferrmessage{now in state: #1}\fi
+ \else
+ \csname flexstate\parsernamespace #1\endcsname\relax
+ \iftracestates\ferrmessage{now in state: #1 (\number\csname flexstate\parsernamespace #1\endcsname)}\fi
+ \fi
+ \multiply\yyg@yystart\tw@
+ \advance\yyg@yystart\@ne
+}
+
+\def\yyBEGINr#1{%
+ \yyg@yystart#1%
+ \iftracestates\ferrmessage{new state: \the\yyg@yystart}\fi
+ \multiply\yyg@yystart\tw@
+ \advance\yyg@yystart\@ne
+}
+
+%#define YY_START ((yyg->yy_start - 1) / 2)
+
+\def\YYSTART{%
+ \expandafter\xdivbytwo\expandafter{\number
+ \expandafter\xdecrement\expandafter{\number\yyg@yystart}}
+}
+
+\def\yypushstate#1{%
+ \expandafter\yypush\expandafter{\number\YYSTART}\on\yystatestack
+ \yyBEGIN{#1}%
+}
+
+\def\yypopstate{%
+ \yypop\yystatestack\into{\expandafter\yyBEGIN\romannumeral0}%
+ \iftracestates\ferrmessage{new state (* 2 + 1 scrambled): \the\yyg@yystart}\fi
+}
+
+\def\yytopstate#1{%
+ \yyreadstack\yystatestack\at\z@\to#1%
+}
+
+\let\yylexcontinue\yylwhile % complete the while loop (requires \yylexnext)
+\def\yylexcontinue{\yytext{}\yytextpure{}\yytext@{}\yylwhile} % complete the while loop
+
+% \yylextail is the return point from the lexer
+
+\def\yylexreturnbootstrap#1{%
+ \yytext{}\yytext@{}\yytextpure{}%
+ \expandafter\ifx\csname token\parsernamespace #1\endcsname\relax
+ % this token value is undefined
+ \let\yylextail\yylex % so lex the next token
+ \else
+ \yychar\csname token\parsernamespace #1\endcsname\relax
+ \let\yylextail\yyparsetail
+ \fi
+}
+
+\def\yylexreturnregular#1{%
+ \yychar\csname token\parsernamespace #1\endcsname\relax
+ \yytext{}\yytext@{}\yytextpure{}\let\yylextail\yyparsetail
+}
+
+\def\yylexreturnval#1{% return value (yytext) only
+ \yychar\csname token\parsernamespace #1\endcsname\relax
+ \yylval\expandafter{\expandafter{\the\yytext}}%
+ \yylval\expandafter\expandafter\expandafter{\expandafter\the\expandafter\yylval\expandafter{\the\yytextpure}}%
+ % the above is an equivalent of
+ % \edef\next{\yylval{{\the\yytext}{\the\yytextpure}}}\next
+ % but does not `pollute' the definition of \next
+ \yytext{}\yytext@{}\yytextpure{}\let\yylextail\yyparsetail
+}
+
+\def\yylexreturnptr#1{% return stream pointers only
+ \yylval\expandafter{\expandafter{\the\yyfmark}}%
+ \yylval\expandafter\expandafter\expandafter{\expandafter\the\expandafter\yylval\expandafter{\the\yysmark}}%
+ % the above is an equivalent of
+ % \edef\next{\yylval{{\the\yyfmark}{\the\yysmark}}}\next
+ % but does not `pollute' the definition of \next
+ \yylexreturn{#1}%
+}
+
+\def\yylexreturnsym#1{% return the value (yytext) followed by the pointers
+ \yychar\csname token\parsernamespace #1\endcsname\relax
+ \yylval\expandafter{\expandafter{\the\yytext}}%
+ \yylval\expandafter\expandafter\expandafter{\expandafter\the\expandafter\yylval\expandafter{\the\yytextpure}}%
+ \yylval\expandafter\expandafter\expandafter{\expandafter\the\expandafter\yylval\expandafter{\the\yyfmark}}%
+ \yylval\expandafter\expandafter\expandafter{\expandafter\the\expandafter\yylval\expandafter{\the\yysmark}}%
+ % the above is an equivalent of
+ % \edef\next{\yylval{{\the\yytext}{\the\yytextpure}{\the\yyfmark}{\the\yysmark}}}\next
+ % but does not `pollute' the definition of \next
+ \yytext{}\yytext@{}\yytextpure{}\let\yylextail\yyparsetail
+}
+
+\def\yylexreturntext{\yylexreturnptr{\the\yytextpure}}
+
+\def\yylexreturnraw#1{% return the character, pointers as the value
+ \yylval\expandafter{\expandafter{\the\yyfmark}}%
+ \yylval\expandafter\expandafter\expandafter{\expandafter\the\expandafter\yylval\expandafter{\the\yysmark}}%
+ \yychar`#1\relax
+ \yytext{}\yytext@{}\yytextpure{}\let\yylextail\yyparsetail
+}
+
+\def\yylexreturnchar{%
+ \expandafter\yylexreturnraw\the\yytextpure
+}
+
+\def\yylexreturnxchar#1{% return the numeric value, pointers as the value
+ \yylval\expandafter{\expandafter{\the\yyfmark}}%
+ \yylval\expandafter\expandafter\expandafter{\expandafter\the\expandafter\yylval\expandafter{\the\yysmark}}%
+ \yychar#1\relax
+ \yytext{}\yytext@{}\yytextpure{}\let\yylextail\yyparsetail
+}
+
+\def\yylexnext{\yytext{}\yytext@{}\yytextpure{}} % use this with a trivial \yylexcontinue
+\let\yylexnext\empty
+
+\def\yyterminate{\yychar\YYEOF\yylval{}\yytext{}\yytext@{}\yytextpure{}\let\yylextail\yyparsetail}
+
+\def\yyerrterminate{\yylexreturn{$undefined}} %$
+
+\def\yyfatal#1{\yycomplain{#1}\yyerrterminate}
+
+\def\yywarn#1{\yycomplain{#1}\yylexnext}
+
+% \yyless is slow and the macros below have not been tested in their current
+% form: avoid them
+
+\def\yyless#1{%
+ \yyless@backup
+ \ifnum#1=\z@
+ \yyfmark=\formatmarker
+ \yysmark=\stashmarker
+ \ROLLBACKCURRENTTOKEN
+ \else
+ \let\oldyyreturn\yyreturn
+ \edef\yyreturn{\noexpand\yyskipnchars{\number#1}}%
+ \expandafter\yyinput\the\yytext@\yyeof
+ \fi
+}
+
+\def\yyless@backup{%
+ \yyformat\yylesslastformat
+ \yystash\yylesslaststash
+ \yysbyte{}\yyfbyte{}%
+ \formatmarker\yylesslastfmark
+ \stashmarker\yylesslastsmark
+}
+
+\def\yyless@updatestreams{%
+ \concat\yyformat\yyfbyte\yyfbyte{}%
+ \concat\yystash\yysbyte\yysbyte{}%
+}
+
+\def\yyskipnchars#1{%
+ \ifnum\yycp@=\YYENDOFBUFFERCHAR % read \yyeof
+ \yycomplain{yyless buffer overflow: #1 characters too many}%
+ \else
+ \yyless@updatestreams
+ \ifnum#1=\@ne % skipped the required number of tokens
+ \yybreak@\yyl@ss
+ \else
+ \edef\yyreturn{\noexpand\yyskipnchars{\xdecrement{#1}}}%
+ \yybreak@\yyinput
+ \fi
+ \yycontinue
+}
+
+\def\yyl@ss#1\yyeof{%
+ \yyfmark=\formatmarker
+ \yysmark=\stashmarker
+ \yylesslaststash\yystash
+ \yylesslastformat\yyformat
+ \unput{#1}%
+ \let\yyreturn\oldyyreturn
+}
+
+% \unput has a slightly different semantics from \flex's unput() since
+% it puts back an arbitrary string rather than a character; it clobbers
+% the values of \yytext, \yytext@, and \yytextpure so these must be saved before
+% \unput is used
+
+\def\unput#1{%
+ \yytext@{#1}%
+ \concatl\yytext@\yytext@seen
+ \yytext{}\yytext@{}\yytextpure{}%
+% \yystringempty{#1}{}{\yytextbackuptrue}% TODO: just set \yytexbackuptrue
+ \yytextbackuptrue
+}
+
+%#define ROLLBACK_CURRENT_TOKEN \
+% do { \
+% scanner_cursor.column -= mbsnwidth (yytext, yyleng, 0); \
+% yyless (0); \
+% } while (0)
+
+\def\ROLLBACKCURRENTTOKEN{% this does not reset the streams
+ \concatl\yytext\yytextseen
+ \concatl\yytext\yytext@seen % do not rollback collected streams
+ \yytext{}\yytext@{}\yytextpure{}%
+ \yytextbackuptrue
+}
diff --git a/support/splint/tex/fretokenset.sty b/support/splint/tex/fretokenset.sty
new file mode 100644
index 0000000000..6dfba7ea58
--- /dev/null
+++ b/support/splint/tex/fretokenset.sty
@@ -0,0 +1,21 @@
+% Copyright 2012-2020, Alexander Shibakov
+% This file is part of SPLinT
+%
+% SPLinT is free software: you can redistribute it and/or modify
+% it under the terms of the GNU General Public License as published by
+% the Free Software Foundation, either version 3 of the License, or
+% (at your option) any later version.
+%
+% SPLinT is distributed in the hope that it will be useful,
+% but WITHOUT ANY WARRANTY; without even the implied warranty of
+% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+% GNU General Public License for more details.
+%
+% You should have received a copy of the GNU General Public License
+% along with SPLinT. If not, see <http://www.gnu.org/licenses/>.
+
+% typesetting regular expression names in \flex's scanner
+
+\prettywordpairwvis{WS}{\.{\ }${}_+$}{ +}
+\prettywordpairwvis{NL}{\hbox{$\hookleftarrow$}}{\n}
+\prettywordpairwvis{OPTWS}{\.{\ }${}_*$}{ *}
diff --git a/support/splint/tex/ftokenset.sty b/support/splint/tex/ftokenset.sty
new file mode 100644
index 0000000000..9eb5fbd70d
--- /dev/null
+++ b/support/splint/tex/ftokenset.sty
@@ -0,0 +1,72 @@
+% Copyright 2012-2020, Alexander Shibakov
+% This file is part of SPLinT
+%
+% SPLinT is free software: you can redistribute it and/or modify
+% it under the terms of the GNU General Public License as published by
+% the Free Software Foundation, either version 3 of the License, or
+% (at your option) any later version.
+%
+% SPLinT is distributed in the hope that it will be useful,
+% but WITHOUT ANY WARRANTY; without even the implied warranty of
+% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+% GNU General Public License for more details.
+%
+% You should have received a copy of the GNU General Public License
+% along with SPLinT. If not, see <http://www.gnu.org/licenses/>.
+
+% typesetting tokens in \flex's grammar
+
+\prettywordpairwvis{CHAR}{{\bf char}}{char}
+\prettywordpairwvis{NUMBER}{{\bf num}}{num}
+\prettywordpairwvis{NAME}{{\cyr\lqq{\rm name}\rqq}}{name}
+\prettywordpairwvis{OPTION_OP}{\hbox{$\langle\hbox{\.{option}}\rangle$}}{option}
+\prettywordpairwvis{EOF_OP}{\hbox{$\langle\hbox{\.{EOF}}\rangle$}}{EOF}
+
+\def\prettyccepairwvis#1#2{%
+ \prettywordpairwvis{#1}{\hbox{$\csname xccl@#1\endcsname$}}{#2}%
+ \prettyccep@irwvis#1.{#2}%
+}
+
+\def\prettyccep@irwvis CCE_#1.#2{%
+ \prettywordpairwvis{CCE_NEG_#1}{\hbox{$\csname xccl@CCE_NEG_#1\endcsname$}}{neg #2}%
+}
+
+\prettyccepairwvis{CCE_ALNUM}{alphan}
+\prettyccepairwvis{CCE_ALPHA}{alpha}
+\prettyccepairwvis{CCE_BLANK}{ .}
+\prettyccepairwvis{CCE_GRAPH}{beta}
+\prettyccepairwvis{CCE_DIGIT}{0}
+\prettyccepairwvis{CCE_XDIGIT}{0..Z}
+\prettyccepairwvis{CCE_SPACE}{ }
+\prettyccepairwvis{CCE_LOWER}{a..z}
+\prettyccepairwvis{CCE_UPPER}{A..Z}
+\prettyccepairwvis{CCE_PRINT}{zzz}
+\prettyccepairwvis{CCE_PUNCT}{.}
+\prettyccepairwvis{CCE_CNTRL}{->}
+%
+\prettywordpairwvis{BEGIN_REPEAT_POSIX}{{\.{\{}$_{\rm p}$}}{\lbchar p}
+\prettywordpairwvis{END_REPEAT_POSIX}{{\.{\}}$_{\rm p}$}}{\rbchar p}
+\prettywordpairwvis{BEGIN_REPEAT_FLEX}{{\.{\{}$_{\rm f}$}}{\lbchar f}
+\prettywordpairwvis{END_REPEAT_FLEX}{{\.{\}}$_{\rm f}$}}{\rbchar f}
+\prettywordpairwvis{CCL_OP_DIFF}{\hbox{$\setminus$}}{\benignescape}
+\prettywordpairwvis{CCL_OP_UNION}{\hbox{$\cup$}}{U}% see lstokenset.sty for setting up context-dependent indexing/display
+\prettywordpairwvis{OPT_OUTFILE}{\hbox{$\langle\hbox{\.{outfile}}\rangle$}}{outfile}
+\prettywordpairwvis{OPT_EXTRA_TYPE}{\hbox{$\langle\hbox{\.{extra type}}\rangle$}}{extra type}
+\prettywordpairwvis{OPT_PREFIX}{\hbox{$\langle\hbox{\.{prefix}}\rangle$}}{prefix}
+\prettywordpairwvis{OPT_YYCLASS}{\hbox{$\langle\hbox{\.{yyclass}}\rangle$}}{yyclass}
+\prettywordpairwvis{OPT_HEADER}{\hbox{$\langle\hbox{\.{header}}\rangle$}}{header}
+\prettywordpairwvis{OPT_TABLES}{\hbox{$\langle\hbox{\.{tables}}\rangle$}}{tables}
+
+\prettywordpairwvis{TOP_OP}{\hbox{$\langle\hbox{\.{top}}\rangle$}}{top_op}
+\prettywordpairwvis{POINTER_OP}{\hbox{$\langle\hbox{\.{pointer*}}\rangle$}}{pointer_op}
+\prettywordpairwvis{ARRAY_OP}{\hbox{$\langle\hbox{\.{array}}\rangle$}}{array_op}
+\prettywordpairwvis{DEF_OP}{\hbox{$\langle\hbox{\.{def}}\rangle$}}{def_op}
+\prettywordpairwvis{OPT_OTHER}{\hbox{$\langle\hbox{\.{other}}\rangle$}}{other_op}
+\prettywordpairwvis{OPT_DEPRECATED}{\hbox{$\langle\hbox{\.{deprecated}}\rangle$}}{deprecated_op}
+\prettywordpairwvis{RE_DEF}{\hbox{$\langle\hbox{\.{def}$_{\rm re}$}\rangle$}}{def_re}
+
+\prettywordpairwvis{SCDECL}{\hbox{$\langle\hbox{\bf state}\rangle$}}{state}
+\prettywordpairwvis{XSCDECL}{\hbox{$\langle\hbox{\bf xtate}\rangle$}}{xtate}
+\prettywordpairwvis{\\n}{\hbox{{\sixpoint\.{\\}}\.{n}}}{\benignescape n}
+\prettywordpairwvis{-}{\hbox{\rm--}}{--}
+
diff --git a/support/splint/tex/gindex.sty b/support/splint/tex/gindex.sty
new file mode 100644
index 0000000000..9d8ed2d334
--- /dev/null
+++ b/support/splint/tex/gindex.sty
@@ -0,0 +1,522 @@
+% Copyright 2012-2020, Alexander Shibakov
+% This file is part of SPLinT
+%
+% SPLinT is free software: you can redistribute it and/or modify
+% it under the terms of the GNU General Public License as published by
+% the Free Software Foundation, either version 3 of the License, or
+% (at your option) any later version.
+%
+% SPLinT is distributed in the hope that it will be useful,
+% but WITHOUT ANY WARRANTY; without even the implied warranty of
+% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+% GNU General Public License for more details.
+%
+% You should have received a copy of the GNU General Public License
+% along with SPLinT. If not, see <http://www.gnu.org/licenses/>.
+
+% indexing macros
+
+% set up the list of unindexable terms
+
+\ifx\unindexable\UNDEFINED
+ \def\unindexable{{$\TeXx$}{$\TeXa$}{$\TeXb$}{$\TeXf$}{$\TeXao$}{$\TeXfo$}}%
+\else
+ \expandafter\def\expandafter\unindexable\expandafter
+ {\unindexable{$\TeXx$}{$\TeXa$}{$\TeXb$}{$\TeXf$}{$\TeXao$}{$\TeXfo$}}%
+\fi
+
+\def\TeXx{\hbox{\let\_\UL\tt\TeX\_}}
+\def\TeXa{\hbox{\tt\TeX\rm(a)}}
+\def\TeXb{\hbox{\tt\TeX\rm(b)}}
+\def\TeXf{\hbox{\tt\TeX\rm(f)}}
+\def\TeXao{\hbox{\tt\TeX\rm(ao)}}
+\def\TeXfo{\hbox{\tt\TeX\rm(fo)}}
+
+% macros to determine if the current term is indexable
+
+\newif\ifindexverbose
+
+\def\yyifindexable#1{%
+ {%
+ \def\n@xt{#1}%
+ \expandafter\s@tindexable\unindexable\relax\end % the \relax is to preserve the braces
+ % in the last list element
+ }%
+}
+
+\def\s@tindexable#1#2\end{%
+ {%
+ \def\next{#1}%
+ \ifx\next\n@xt % match found, stop
+ \yybreak{\aftergroup\s@t@ndexable}%
+ \else
+ \yybreak{%
+ \yystringempty{#2}{% unindexable sequence list has been exhausted, stop
+ \aftergroup\s@t@nd@xable
+ }{% no match yet, continue
+ \aftergroup\s@tindexable
+ }%
+ }%
+ \yycontinue
+ }%
+ #2\end
+}
+
+\def\s@t@ndexable#1\end{\aftergroup\yysecondoftwo}
+
+\def\s@t@nd@xable#1\end{\aftergroup\yyfirstoftwo}
+
+% \Cee\ index entry (produced by \CWEAVE\ and typeset directly from the file)
+
+\def\I#1, #2.{%
+ \makeoneindexentry{#1}{#1}{#2}{\setxreflistplain}{\filterxrefsplain}%
+}
+
+% other language index entries (produced by \GI, \FI, and \HI macros)
+
+% the standard reference format (no page number references)
+
+\def\Ji#1#2, #3.{% #1 is the `visible key', #2 and #3 are similar to #1 and #2 in \I above
+ \makeoneindexentry{#1}{#2}{#3}{\setxreflistplain}{\filterxrefsplain}%
+}%
+
+% the `fine' reference format
+
+\def\Fi#1#2, #3.{% #1 is the `visible key', #2 and #3 are similar to #1 and #2 in \I above
+ \makeoneindexentry{#1}{#2}{#3}{\setxreflist}{\filterxrefs}%
+}%
+
+% special index entries: redirects, etc. (generated by bindx.pl)
+
+\def\Jk#1#2, #3.{%
+ \makeoneindexentry{#1}{#2}{#3}{\yyid}{\eatone}%
+}%
+
+\def\makeoneindexentry#1#2#3#4#5{% #1 is the `visible key'
+ % #2 is the term (including typesetting)
+ % #3 is the list of references
+ % #4 is the command to process #3
+ % #5 is the command to filter and process #3
+ \yyifindexable{#1}{\oneindexentry{#2}{#3}{#4}}{%
+ \ifindexverbose{\toksa{#1}\message{filtering term: \the\toksa}}\fi
+ \oneindexentryfiltered{#2}{#3}{#5}%
+ }%
+}
+
+% insert the original control sequence name after the `graphic' version (\thisnamex
+% is set by yytexlex.sty macros
+
+\def\defypostambleshowcs{{ \rm(}\hbox{\sixpoint\tt\char`\\}\.{\thisnamex}{\rm)}}%
+
+% let the author set the format for a cross reference list.
+
+\def\setxreflistplain#1{%
+ \ifacro\pdfnote#1.\else#1.\fi
+}
+
+\def\oneindexentry#1#2#3{%
+ \checkforninecs#1\9\end{%
+ \the\toksg\toksg{}% set the index section header
+ \let\defypreamble\empty
+ \let\defypostamble\defypostambleshowcs
+ \hangindent\inxhangindent\noindent\strut#1:%
+ \hskip0pt plus2em\penalty1000\hskip0pt plus-2em\relax\kern\inxaiskip
+ #3{#2}%
+ \par
+ }{%
+ \toksg{#1}%
+ }%
+}
+
+\def\filterxrefsplain#1{\setxrefsplain{\f@lterdefsplain}{#1}}
+
+\def\filterxrefs#1{\setxrefs{\f@lterdefs}{#1}}
+
+\def\oneindexentryfiltered#1#2#3{%
+ \the\toksg\toksg{}% set the index section header
+ \let\defypreamble\empty
+ \let\defypostamble\defypostambleshowcs
+ \hangindent\inxhangindent\noindent\strut#1:%
+ \hskip0pt plus2em\penalty1000\hskip0pt plus-2em\relax\kern\inxaiskip
+ #3{#2}%
+ \par
+}
+
+\def\checkforninecs#1\9#2\end{%
+ \yystringempty{#2}{\yyfirstoftwo}{\yysecondoftwo}%
+}
+
+\def\filterdefs#1#2{%
+ #1{}{}, #2, \[]{}% the last empty braces are not necessary but this way
+ % both \f@lterdefs and \f@lterdefsplain can be defined
+}
+
+\def\f@lterdefsplain#1#2#3, \[#4]{%
+ \yystringempty{#4}{ {#1}{#2}}{%
+ \yystringempty{#1}{%
+ \f@lterdefsplain{\[#4]}{#2#3}%
+ }{%
+ \f@lterdefsplain{#1, \[#4]}{#2#3}%
+ }%
+ }%
+}
+
+\def\f@lterdefs#1#2#3, \[#4]#5{%
+ \yystringempty{#4}{ {#1}{#2}}{%
+ \yystringempty{#1}{%
+ \f@lterdefs{\[#4]{#5}}{#2#3}%
+ }{%
+ \f@lterdefs{#1, \[#4]{#5}}{#2#3}%
+ }%
+ }%
+}
+
+\def\setxrefs#1#2{% typeset filtered references (with page references)
+ \expandafter\s@txrefs\romannumeral0\filterdefs{#1}{#2}%
+}
+
+\def\s@txrefs#1#2{%
+ \yystringempty{#1}{%
+ several refs.%
+ }{%
+ \setxreflist{#1}%
+ \yystringempty{#2}{}{, other refs.}%
+ }%
+}
+
+\def\setxrefsplain#1#2{% typeset filtered references (without page references)
+ \expandafter\s@txrefsplain\romannumeral0\filterdefs{#1}{#2}%
+}
+
+\def\s@txrefsplain#1#2{%
+ \yystringempty{#1}{%
+ several refs.%
+ }{%
+ \ifacro\pdfnote#1.\else#1.\fi
+ \yystringempty{#2}{}{, other refs.}%
+ }%
+}
+
+\def\setxreflist#1{%
+ \yystringempty{#1}{}{%
+ \grabfinexrefs{}, #1, {}{}%
+ }%
+}
+
+\def\grabfinexrefs#1, #2#3#{% collect (and process) fine references (i.e. references like lnnnr{p1, p2, ... }
+ \yystringempty{#2#3}{% this is the last reference, clean up (the last empty group is left unchanged)
+ \expandafter\relax\eatacomma#1%
+ }{%
+ \ifcat\noexpand#20% this is a bare number
+ \yybreak{\consumeonexref{{}{}{#1}}{#2#3}}%
+ \else % this is a qualified number
+ \yybreak{\stripxrefdelims{{#2}{#1}}{}#3}%
+ \yycontinue
+ }%
+}
+
+\def\eatacomma, {}
+
+% get the right delimeter; we assume that at least one token is present
+
+\def\stripxrefdelims#1#2#3{%
+ \ifnum`#3<`0
+ \yybreak{\consumeonexref{{#3}#1}{#2}}% this is a delimeter
+ \else
+ \ifnum`#3>`9
+ \yybreak@{\consumeonexref{{#3}#1}{#2}}% this is a delimeter
+ \else % `0<=`#4<=`9
+ \yybreak@{\stripxrefdelims{#1}{#2#3}}% keep looking for the next digit
+ \fi
+ \yycontinue
+}
+
+\ifx\consumeonexref\UNDEFINED
+ \def\consumeonexref#1#2#3{% #1 is the accumulated references
+ % #2 is the section number
+ % #3 is the list of pages
+ \addnewpair#1{#2}%
+ }
+\fi
+
+% ignore paging information
+
+\def\addnewpair#1#2#3#4{% #1 is the right delimeter
+ % #2 is the left delimeter
+ % #3 is the list of references
+ % #4 is the section number
+ \grabfinexrefs{#3, #2\compoundlink{#4}{#4}#1}%
+}
+
+
+\def\compoundlink#1#2{%
+ \ifacro
+ \pdflink{#1}{#2}%
+ \else
+ #2%
+ \fi
+}
+
+\def\pagelink#1{%
+ \ifacro
+ \pdfpagelink{#1}% use the section number
+ \else
+ #1%
+ \fi
+}
+
+% indexing macros for grammar terms
+
+\def\termidstring#1{% processed name in italics
+ \numberstocharsandspaces#1\end
+ \let\optstrextra\optstrextraesc
+ \expandafter\nameproc\expandafter{\the\toksa}\with\parsebin
+ \def\idxentry{{\it\the\toksa}}%
+}%
+
+\def\termvstring#1{% processed name in typewriter style
+ \numberstochars#1\end
+ \let\optstrextra\optstrextraesc
+ \expandafter\nameproc\expandafter{\the\toksa}\with\parsebin
+ \def\idxentry{{\tt\def\_{\char`\_}\the\toksa}}%
+}%
+
+\def\termttstring#1{% straightforward typewriter text
+ \numberstocharsandspaces#1\end
+ \def\idxentry{{\tt\the\toksa}}%
+}%
+
+\def\termhostidstring#1{% processed name in italics (using the host name parser)
+ \numberstocharsandspaces#1\end
+ \let\optstrextra\optstrextraesc
+ {%
+ \expandafter\let\expandafter\tosmallparser
+ \csname to\stripbrackets\hostparsernamespace parser\endcsname
+ \expandafter\nameproc\expandafter{\the\toksa}\with\parsebin
+ \aftergroup\toksa
+ \expandafter
+ }\expandafter{\the\toksa}%
+ \def\idxentry{{\it\the\toksa}}%
+}%
+
+\def\termhostvstring#1{% processed name in typewriter style (using the host name parser)
+ \numberstocharsandspaces#1\end
+ \let\optstrextra\optstrextraesc
+ {%
+ \expandafter\let\expandafter\tosmallparser
+ \csname to\stripbrackets\hostparsernamespace parser\endcsname
+ \expandafter\nameproc\expandafter{\the\toksa}\with\parsebin
+ \aftergroup\toksa
+ \expandafter
+ }\expandafter{\the\toksa}%
+ \def\idxentry{{\tt\def\_{\char`\_}\the\toksa}}%
+}%
+
+\def\termostring#1{% options (e.g. \flex\ and \bison\)
+ \numberstocharsandspaces#1\end
+ \def\idxentry{{$\langle$\bf\the\toksa$\rangle$}}%
+}%
+
+\def\termfsrestring#1{% flex regular expression definition names
+ \numberstocharsandspaces#1\end
+ \let\optstrextra\optstrextraesc
+ \expandafter\nameproc\expandafter{\the\toksa}\with\parsebin
+ \def\idxentry{{\def\_{\char`\_}\flexrendisplay{\the\toksa}}}%
+}%
+
+\def\termfsopstring#1{% flex option names
+ \numberstocharsandspaces#1\end
+ \let\optstrextra\optstrextraesc
+ \expandafter\nameproc\expandafter{\the\toksa}\with\parsebin
+ \def\idxentry{{\def\_{\char`\_}\hbox{\tt$\langle$\the\toksa$\rangle_{\rm f}$}}}%
+}%
+
+\def\termstring#1{%
+ \numberstocharsandspaces#1\end
+ \let\optstrextra\optstrextraesc
+ \expandafter\nameproc\expandafter{\the\toksa}\with\parsebin
+ \def\idxentry{{\tt"\the\toksa"}}%
+}%
+
+\def\termexception#1{% special names
+ \numberstocharsandspaces#1\end
+ \toksc\toksa
+ \expandafter\nameproc\expandafter{\the\toksa}\with\parsebin
+ \ifyyparsefail
+ \expandafter\ifx\csname\prettynamecs\hostparsernamespace{\the\toksa}\endcsname\relax
+ \errmessage{The name \the\toksc\space is exceptional but is not defined.}%
+ \else
+ \def\idxentry{{\it\csname\prettynamecs\hostparsernamespace{\the\toksa}\endcsname{#1}}}%
+ \fi
+ \else
+ \errmessage{The name \the\toksc\space is not exceptional.}%
+ \fi
+}%
+
+% \TeX\ conrol sequence output
+\def\texcsstring#1{%
+ \numberstocharsandspaces#1\end
+ \def\idxentry{%
+ \let\texnspace\hostparsernamespace
+ \def\getcescape{% an \ seen is really an \, and will not go through C string processing
+ \def\next{escape}%
+ \switchon\next\in\currentstate
+ }%
+ \termindexfalse\expandafter\inlineTeXx\expandafter{\expandafter/\the\toksa}%
+ }%
+}%
+
+\expandafter\def\csname acharswitch:index\endcsname{% correct reserved \TeX\ characters a la CWEB verbatim
+ %$\%\\ % unaffected
+ %\#\ % these never appear
+ _{%
+ \yybyte\expandafter{\csname \the\yybyte\endcsname}%
+ \expandafter\yycp@\expandafter`\the\yybyte\relax
+ \mkpurebyte
+ \yyreturn
+ }
+}%
+
+\expandafter\setspecialcharsfrom\csname acharswitch:index\endcsname
+
+\def\texlexerindex{% now that all character codes are 12
+ \let\default\yygetchar
+ \let\next\yycp@
+ \ifnum\yycp@>"3F %
+ \ifnum\yycp@<"5B % an uppercase letter or @
+ \def\next{letter}%
+ \fi
+ \fi
+ \ifnum\yycp@>"60 %
+ \ifnum\yycp@<"7B %
+ \def\next{letter}%
+ \fi
+ \fi
+ \switchonwithtype\next\in\currentstate
+}%
+
+\def\indexseparator#1#2{% generic separator
+ \vskip.5\baselineskip
+ \centerline{\dinkus}%
+ \vskip.5\baselineskip
+}
+
+\def\indexseparator#1#2{%
+ \vskip.3\baselineskip
+ \centerline{\csname index domain translation [#1]\endcsname}%
+ \vskip.3\baselineskip
+}
+
+\expandafter\def\csname index domain translation [F]\endcsname{{\sc FLEX INDEX}}
+\expandafter\def\csname index domain translation [T]\endcsname{{\sc\TeX\ INDEX}}
+
+\def\indexsection#1{%
+ \vskip.3\baselineskip
+ \penalty-1000
+ \hbox to \hsize{\strut\ssfbn #1\ \cdotfill}%
+ \penalty10000
+ \vskip.2\baselineskip
+}
+
+\def\9#1{%
+ \indexs@ction#1\end
+}
+
+\def\indexs@ction#1#2\end{
+ \indexsection{\uppercase{#1}}%
+}
+
+\def\otherlangindexseparator{%
+ \enddoublecols
+ \vskip.8\baselineskip
+ \centerline{B{\sc ISON}, F{\sc LEX, AND} \TeX\ {\sc INDICES}}%
+ \vskip.5\baselineskip
+ \begindoublecols
+}
+
+% general index entries (generated by bindx.pl)
+
+\def\GI#1#2#3#4.{% raw index entries (standard format, with no page references)
+ {%
+ \edef\hostparsernamespace{\yysecondoftwo#1}%
+ \edef\currentrulecontext{\yyfirstoftwo#1}%
+ \toksa{}\numberstocharsandspaces#3\end
+ \edef\indexkeyseq{\the\toksa}%
+ \toksa{}#2{#3}%
+ \expandafter\Ji\expandafter{\indexkeyseq}{\idxentry}#4.%
+ }%
+}%
+
+\def\FI#1#2#3#4.{% raw index entries (fine format)
+ {%
+ \edef\hostparsernamespace{\yysecondoftwo#1}%
+ \edef\currentrulecontext{\yyfirstoftwo#1}%
+ \toksa{}\numberstocharsandspaces#3\end
+ \edef\indexkeyseq{\the\toksa}%
+ \toksa{}#2{#3}%
+ \expandafter\Fi\expandafter{\indexkeyseq}{\idxentry}#4.%
+ }%
+}%
+
+\def\HI#1#2#3#4{% special raw index entries
+ {%
+ \def\hostparsernamespace{#2}%
+ \let\defypreamble\empty
+ \let\defypostamble\empty
+ \toksa{}\numberstocharsandspaces#4\end
+ \edef\indexkeyseq{\the\toksa}%
+ \toksa{}#3{#4}%
+ \expandafter\Jk\expandafter{\indexkeyseq}{\idxentry}, see
+ {\tt\hbox{\sixpoint\tt\char`\\}\indexkeyseq}.%
+ }%
+}%
+
+% reference styles (ordinary terms are set in roman face)
+
+\def\[#1]{{\it#1}} % term definitions (such as lhs in productions)
+\def\(#1){$\underline{#1}$} % declarations (such as token declarations), underlined index item
+\def\(#1){{\bf #1}} % declarations, an alternative to the above
+\def\e#1e{#1{\sevenpoint$^\circ\!$}} % terms in examples
+\def\f#1f{{\it#1\/\kern.2ex}${}^\circ\!$} % lhs in examples (italic correction does not seem to be enough)
+\def\g#1g{$\underline{#1}^\circ\!$} % declarations in examples
+\def\g#1g{{\bf #1}$^\circ\!$} % declarations in examples, an alternative to the above
+
+\def\inxhangindent{1em}
+\def\inxaiskip{.5em}
+\def\inxicgap{5pt}
+
+\def\inxmod{% new indexing macro
+ \write\cont{} % ensure that the contents file isn't empty
+ \write\cont{\catcode `\noexpand\@=12\relax} % \makeatother
+ \closeout\cont % the contents information has been fully gathered
+ \message{Index:}
+ \medskip
+ \eightpoint\raggedright
+ \fnotesstart=2
+ \fnotesspan=1
+ \noofcolumns=3
+ \icgap=\inxicgap%
+ \linecount=3
+ \setmcparams
+ \dsskip=0pt%
+ \adjskip=0pt plus 9pt%
+ % \TeX\ conrol sequence output
+ \expandafter\let\expandafter\acharswitch\csname acharswitch:index\endcsname
+ \let\texlexer\texlexerindex
+ \let\*=\lapstar
+ \begindoublecols
+ \readindex
+ \otherlangindexseparator
+ \readgindex
+}
+
+\newread\trygindex
+
+\def\readgindex{%
+ \openin\trygindex=\jobname.gdy
+ \ifeof\trygindex
+ \else
+ \closein\trygindex
+ \input \jobname.gdy
+ \fi
+}
diff --git a/support/splint/tex/grabstates.sty b/support/splint/tex/grabstates.sty
index 6fa805168a..1024d99432 100644
--- a/support/splint/tex/grabstates.sty
+++ b/support/splint/tex/grabstates.sty
@@ -1,4 +1,4 @@
-% Copyright 2012-2015, Alexander Shibakov
+% Copyright 2012-2020, Alexander Shibakov
% This file is part of SPLinT
%
% SPLinT is free software: you can redistribute it and/or modify
@@ -18,15 +18,27 @@
\input yycommon.sty % general routines for stack and array access
\input yymisc.sty % helper macros (stack manipulation, table processing, value stack pointers)
+ % parser initialization, optimization
\input yyinput.sty % input functions
\input yyparse.sty % parser machinery
\input flex.sty % lexer functions
\input yyfaststack.sty % sped up stack access functions
-\input yyboth.sty % parser initialization, optimization
\input yyunion.sty
+\def\yycomplain#1{\immediate\write16{#1}} % lexer errors
+
+% modify the input routine to recognize \yyendgame; the somewhat verbose end of
+% the section is necessary to gracefully handle parser failures: the \endparse
+% control sequence must appear outside of any \vb block for \cleanupparse
+% to do its job; the \yyinput reads \vb blocks in pairs so the closing \vb{} is
+% necessary and is removed by \removefinalvb (see limbo.sty and brack.pl for
+% details).
+
+\expandafter\def\expandafter\multicharswitch\expandafter
+{\multicharswitch\yyendgame{\yyinput\yyeof\yyeof\endparseinput\removefinalvb}}
+
\genericparser
- name: main,
+ name: grabstates,
ptables: cweb/byytab.tex,
ltables: cweb/lstab.tex,
tokens: {},
@@ -35,35 +47,54 @@
rsetup: {},
optimization: \optimizeall;%
-\let\parsernamespace\mainnamespace
+\let\parsernamespace\grabstatesnamespace
\let\yylexreturn\yylexreturnregular
-% main section macro
+% stage two parsing macros
\let\nx\noexpand
-\expandafter\def\csname parserstack[b]\endcsname#1#2{%
- \basicparserinit\yyparse#1\yyeof\yyeof\endparseinput\endparse
- \ifyyparsefail % do nothing if parsing failed
- \yybreak{}%
- \else % Stage three, process the parsed table
- \yybreak{%
- \restorecslist{bootstrap}\yyunion
- \the\table\relax
- }%
- \yycontinue
+\def\preparsegrabstates{%
+ \let\postparse\postparsegrabstates
+ \basicparserinit
+ \yyparse
}
-\expandafter\def\csname parserstack[]\endcsname#1#2{%
- \basicparserinit\yyparse#1\yyeof\yyeof\endparseinput\endparse
+\def\postparsegrabstates{%
\ifyyparsefail % do nothing if parsing failed
\yybreak{}%
\else % Stage three, process the parsed table
\yybreak{%
- \restorecslist{bootstrap}\yyunion
+ \restorecslist{bootstrap}\yyunion % TODO: this will not work in fs1 sections
\the\table\relax
}%
\yycontinue
}
+\fillpstack{b}{%
+ \preparsegrabstates
+ \relax
+}
+
+\fillpstack{fs1}{%
+ \preparsegrabstates
+ \relax
+}
+
+% ignore in text production examples (there is no way or need to parse them anyway)
+
+\long\def\beginprod#1\endprod{}
+
+\let\begincprod\beginprod
+
+\def\nameproc#1\with#2{%
+ #2{}{}{}{}{}% pretend the name is empty
+}
+
+\def\frexproc#1\with#2{%
+ #2{}{}{}% pretend the regex is empty
+}
+
+\def\prodstyle#1{}
+
\newwrite\stlist
diff --git a/support/splint/tex/hext.sty b/support/splint/tex/hext.sty
new file mode 100644
index 0000000000..433d53ac5c
--- /dev/null
+++ b/support/splint/tex/hext.sty
@@ -0,0 +1,346 @@
+% macros to typeset hex values in customized form (useful when typesetting embedded code)
+
+% the line translator: take a sequence of tokens and translate it into another sequence
+% #1 is the sequence of tokens (will be expanded) to insert at the beginning
+% #2 is the sequence of tokens (will be expanded) to insert before every token in the sequence
+% #3 is the first sequence of tokens (will be expanded) to insert between tokens in the sequence
+% #4 is the second sequence of tokens (will be expanded) to insert between tokens in the sequence
+% #5 is the sequence of tokens (will be expanded) to insert at the end
+% #6 is the command to execute after each token
+% #7 is original sequence
+% #8 is the resulting sequence
+
+\def\linetranslator#1#2#3#4#5#6:#7>#8{%
+ \def\ins@rt{#2}\def\insert@{#3}\def\@insert{#4}%
+ \def\@nd{#5}\def\c@mmand{#6}\let\t@kens#8%
+ \edef\next{\t@kens{\the\t@kens#1}}\next
+ \expandafter\linetr@nslator\the#7\end}
+
+\def\linetr@nslator#1#2{%
+ \ifx#1\end
+ \let\next\relax % no more tokens left
+ \else
+ \ifx#2\end % #1 is the last token
+ \edef\next{\t@kens{\the\t@kens\ins@rt#1\@nd}}\next
+ \else
+ \edef\next{\t@kens{\the\t@kens\ins@rt#1\insert@\@insert}}\next
+ \c@mmand
+ \fi
+ \let\next\linetr@nslator
+ \fi\next#2
+}
+
+% the mask translator: take a sequence of pairs {length:number}{contents} and translate it into another sequence
+% #1 is the sequence of tokens (will be expanded) to insert at the beginning
+% #2 is the sequence of tokens (will be expanded) to insert before every pair in the sequence
+% #3 is the first sequence of tokens (will be expanded) to insert between pairs in the sequence
+% #4 is the second sequence of tokens (will be expanded) to insert between pairs in the sequence
+% #5 is the sequence of tokens (will be expanded) to insert at the end
+% #6 is the command to execute after each pair
+% #7 is original sequence
+% #8 is the resulting sequence
+
+\def\masktranslator#1#2#3#4#5#6:#7>#8{%
+ \def\ins@rt{#2}\def\insert@{#3}\def\@insert{#4}%
+ \def\@nd{#5}\def\c@mmand{#6}\let\t@kens#8%
+ \edef\next{\t@kens{\the\t@kens#1}}\next
+ \m@sktranslator#7{"40000000}\relax\relax}
+
+\def\m@sktranslator#1#2#3{%
+ \ifnum#1<"40000000 % only happens when there are no pairs
+ \ifnum#3<"40000000 % not the last pair
+ \edef\next{\t@kens{\the\t@kens\ins@rt{#1}{#2}\insert@\@insert}}\next
+ \c@mmand
+ \let\next\m@sktranslator
+ \else
+ \edef\next{\t@kens{\the\t@kens\ins@rt{#1}{#2}\@nd}}\next
+ \let\next\m@sktr@nslator
+ \fi
+ \else
+ \let\next\m@sktr@nslator
+ \fi
+ \next{#3}%
+}
+
+\def\m@sktr@nslator#1{}
+
+% binary converter
+
+\def\binary#1{%
+ \ifx#1\end
+ \let\next\relax
+ \else
+ \bin@ry#1\let\next\binary
+ \fi\next}
+
+\let\bits\toksa % storage for binary conversion bits
+\newtoks\bytes % storage for the binary representation
+\let\quotient\tempca
+\let\oldquotient\tempcb
+
+\def\bin@ry#1{\bits{}\tempcc=4 \quotient="#1 \oldquotient=\quotient
+\loop \divide\quotient by2 \multiply\quotient by2 \advance\oldquotient by-\quotient
+ \advance\tempcc by\m@ne
+ \edef\next{\bits{\the\oldquotient\the\bits}}\next
+ \divide\quotient by2 \oldquotient=\quotient
+\ifnum\tempcc > \z@
+\repeat
+\edef\next{\bytes{\the\bytes\the\bits}}\next}
+
+% display macros:
+%
+% o registers
+
+\newtoks\digittable % the table representing the register
+\newcount\columncount % counter for the current column
+
+% o fonts
+
+\font\fivessb=phvb at4pt
+
+% o generic control sequences
+
+\def\incrementcc{\advance\columncount by\@ne}
+\def\decrementcc{\advance\columncount by\m@ne}
+
+% o mask highlighting
+
+% the strange looking definition of \m@skspan originates from \multispan in plain.tex:
+% \newcount\mscount
+% \def\multispan#1{\omit \mscount#1\relax
+% \loop\ifnum\mscount>\@ne \sp@n\repeat}
+% \def\sp@n{\span\omit\advance\mscount\m@ne}
+%
+% the number of `physical' columns to span is (2 * columns - 1) due to the fact that we
+% have to span the gaps between columns as well.
+
+\def\m@skspan#1#2{%
+ \multispan{#1\advance\mscount by\mscount\advance\mscount by-1}{\vrule\highltcl{\leaders\hrule height 4pt\hfil}\vrule}%
+}
+
+% o mask underlining
+
+\def\m@sksp@n#1#2{%
+ \multispan{#1\advance\mscount by\mscount\advance\mscount by-1}{\hrulefill}%
+}
+
+% o mask legend typesetting
+
+\def\m@sklegend#1#2{%
+ \multispan{#1\advance\mscount by\mscount\advance\mscount by-1}{\hfil\lgndcl{\fivessb#2}\hfil}%
+}
+
+% o displaying each digit
+
+\def\dgtdispl@y#1#2{%
+ \ifx#20%
+ \let\colorbg\lightcl\let\colord\darkcl
+ \else
+ \let\colorbg\darkcl\let\colord\lightcl
+ \fi
+ \vrule\colorbg{\vrule height4pt width 12.5pt}%
+ \llap{\raise.6pt\hbox{\colord{\fivessb#1}$\,$}}\vrule
+}
+
+% o typesetting space between digits
+
+\def\c@lumnspacer#1{\tempca#1\tempcb\tempca\divide\tempca by8\multiply\tempca by8
+ \ifnum\tempca=\tempcb
+ \middlediamond
+ \else
+ \tempca#1\tempcb\tempca\divide\tempca by4\multiply\tempca by4
+ \ifnum\tempca=\tempcb
+ \middledot
+ \else
+ \hfil
+ \fi
+ \fi}
+
+% oo dimond to separate bytes
+
+\def\middlediamond{\setbox0=\hbox{$\scriptscriptstyle\diamond$}\tempda=4pt \advance\tempda by-\ht0
+ \divide\tempda by2 \hss\raise\tempda\box0\hss}%
+
+% oo dot to separate nybbles
+
+\def\middledot{\setbox0=\hbox{.}\tempda=4pt \advance\tempda by-\ht0
+ \divide\tempda by2 \hss\raise\tempda\box0\hss}%
+
+% o put it all together
+
+\let\stcr\cr % so that this can be used inside \tabalign which redef's \cr
+
+\def\regdisplay#1#2{{%
+ \binary#1\end
+ \linetranslator{}{\hrulefill\noexpand\eatone}{&$\,\,$}{&}{\stcr}{\incrementcc}:\bytes>\digittable
+ \linetranslator{}{\noexpand\dgtdispl@y{\the\columncount}}{&\noexpand\c@lumnspacer{\the\columncount}}{&}%
+ {\stcr}{\decrementcc}:\bytes>\digittable
+ \masktranslator{}{\noexpand\m@sksp@n}{&\hfil}{&}{\stcr}{\relax}:{#2}>\digittable
+ \masktranslator{}{\noexpand\m@skspan}{&\hfil}{&}{\stcr}{\relax}:{#2}>\digittable
+ \masktranslator{}{\noexpand\m@sksp@n}{&\hfil}{&}{\stcr}{\relax}:{#2}>\digittable
+ \expandafter\digittable\expandafter{\the\digittable\noalign{\vskip-3.8pt}}%
+ \masktranslator{}{\noexpand\m@sklegend}{&\hfil}{&}{\stcr}{\relax}:{#2}>\digittable
+ \vbox{\offinterlineskip
+ \halign{&##\stcr
+ \the\digittable
+ }
+ }%
+}}
+
+% colors
+
+\newif\iffullcolorpalette
+\fullcolorpalettetrue
+
+\iffullcolorpalette
+ \let\lightcl=\colorpeach
+ \let\shadedcl=\colorcorn
+ \let\highltcl=\colorgray
+ \let\lgndcl=\colorwhite
+\else
+ \let\lightcl=\colorwhite
+ \let\shadedcl=\colorgray
+ \let\highltcl=\colorwhite
+ \let\lgndcl=\colorblack
+% \let\colorlinkstart=\begingroup
+% \let\colorlinkend=\endgroup
+\fi
+ \let\darkcl=\colorblack
+
+% an alternate display
+
+\def\altmiddledot{\setbox0=\hbox{.}%
+ \tempda=\ht\thezero \advance\tempda by-\ht0
+ \divide\tempda by2 \advance\tempda by -\dp\thezero
+ $\,$\raise\tempda\box0$\,$%
+}
+
+\def\altmiddlediamond{\setbox0=\hbox{$\scriptscriptstyle\diamond$}%
+ \tempda=\ht\thezero \advance\tempda by-\ht0
+ \divide\tempda by2 \advance\tempda by -\dp\thezero
+ \hss\raise\tempda\box0\hss
+}
+
+\def\altdgtdispl@y#1#2{%
+ \ifx#20%
+ \let\colorbg\lightcl\let\colord\darkcl
+ \else
+ \let\colorbg\darkcl\let\colord\lightcl
+ \fi
+ \setbox0=\hbox{$\,$0$\,$}\tempda\ht0
+ \advance\tempda by 2pt \ht0\tempda
+ \vrule\colorbg{\lower1pt\hbox{\vrule height\ht0 depth\dp0 width \wd0}}%
+ \llap{\colord{\raise.5pt\hbox{\fivessb#1$\,$}}}\vrule
+}
+
+\def\altc@lumnspacer#1{\tempca#1\tempcb\tempca
+ \divide\tempca by4\multiply\tempca by4
+ \ifnum\tempca=\tempcb
+ \ %
+ \else
+ \kern1pt%
+ \fi
+}
+
+\def\bytedisplay#1{{%
+ \binary#1\end
+ \linetranslator{}{\hrulefill\noexpand\eatone}{&\hfil}{&}{\stcr}{\incrementcc}:\bytes>\digittable
+ \linetranslator{}{\noexpand\altdgtdispl@y{\the\columncount}}{&\noexpand\altc@lumnspacer{\the\columncount}}{&}%
+ {\stcr}{\decrementcc}:\bytes>\digittable
+ \linetranslator{}{\hrulefill\noexpand\eatone}{&\hfil}{&}{\stcr}{\incrementcc}:\bytes>\digittable
+ \lower\dp\thezero\vbox{\offinterlineskip
+ \halign{&##\stcr
+ \the\digittable
+ }
+ }%
+}}
+
+% macros to typeset the hex numbers in `graphic' format
+
+\newtoks\registerlegend
+
+\def\registerdisplay#1\end{\edef\next{\the\registerlegend}%
+ \ifx\next\empty
+ \edef\next{\noexpand\bytedisplay{#1}}\next
+ \else
+ \edef\next{\noexpand\regdisplay{#1}{\the\registerlegend}}\next
+ \fi
+}
+
+\newbox\thezero
+
+\setbox\thezero=\hbox{\lower1pt\hbox{\vbox{\offinterlineskip
+\halign{#&$\,$#$\,$&#\cr
+\noalign{\hrule}
+\omit\vrule height1pt&&\omit\vrule height1pt\cr
+\vrule&\phantom0&\vrule\cr
+\omit\vrule height1pt&&\omit\vrule height1pt\cr
+\noalign{\hrule}
+}}}}
+
+\def\texbinary{\global\let\oldT\T
+ \global\def\T##1{{\let\end\relax\let\^\registerdisplay##1\end{\tt :}\let\^\hexify##1\end}}}
+
+\def\endtexbinary{\global\let\T\oldT}
+
+\def\hexify#1\end{\hbox{{\tt #1}$_{\scriptscriptstyle1\kern-.7pt6}$}}
+
+% delay functions prettyprinting and other miscellanea;
+
+\def\delayhalfsec{\hbox{wait \kern-.5pt\raise1pt\hbox{$\scriptstyle1$}%
+\kern-1pt/\kern-1pt\lower1pt\hbox{$\scriptstyle2$} sec, }}
+\def\delayquartersec{\hbox{wait \kern-.5pt\raise1pt\hbox{$\scriptstyle1$}%
+\kern-1pt/\kern-1pt\lower1pt\hbox{$\scriptstyle4$} sec, }}
+
+\def\delay#1sec{%
+\hbox{\def\secmult{}wait
+\getsecmult#1\end
+\if\secmult u\def\secmodifier{ $\mu$\kern.05pt }\else
+\if\secmult m\def\secmodifier{ m}\else
+\if\secmult n\def\secmodifier{ n}\else
+ \secmult\def\secmodifier{ }%
+\fi\fi\fi
+\secmodifier sec$\rm\scriptstyle s$, }
+}
+
+\def\getsecmult#1{%
+\ifx#1\end\let\next\relax\else
+ \secmult\def\secmult{#1}\let\next\getsecmult
+\fi\next
+}
+
+% attribute prettyprinting;
+
+\long\def\parseattrib#1#2\end{\csname attrib#1\endcsname}
+
+\def\xxattributexx#1{\ifx(#1\let\next\xxattributex\else\let\next\xxattribute\fi\next#1}
+\def\xxattributex((\\#1{%
+ \expandafter\let\expandafter\currentattrib\csname attrib#1\endcsname
+ \ifx\currentattrib\attribpacked
+ \strut\colorblack{%
+ \lower2pt\hbox{\vrule width \wd\thezero height
+ \ht\thezero depth
+ \dp\thezero}}\llap{\colorwhite{\hbox{$\scriptscriptstyle\bf 01\kern.5pt$}}}%
+ \let\next\eatparentheses
+ \fi
+ \ifx\currentattrib\attribnoinline
+ {\ \bf noinline\rm_a}%
+ \let\next\eatparentheses
+ \fi
+ \ifx\currentattrib\attribformat
+ {\ \bf format\rm_a}%
+ \let\next\eatarguments
+ \fi
+ \next
+}
+
+\def\eatparentheses#1)){}
+\def\eatarguments(\\#1,#2,#3))){(\hbox{\rm as }\.{#1})}
+
+\def\xxattribute#1{\.{\_\_attribute\_\_} (\sc GNU) #1}
+
+\let\attribnoinline=1
+\let\attribpacked=2
+\let\attribformat=3
+
+%\registerlegend{{8}{\noexpand\phantom{B}}}
diff --git a/support/splint/tex/limbo.sty b/support/splint/tex/limbo.sty
index 1ab76629a4..c810cd55da 100644
--- a/support/splint/tex/limbo.sty
+++ b/support/splint/tex/limbo.sty
@@ -1,4 +1,4 @@
-% Copyright 2012-2015, Alexander Shibakov
+% Copyright 2012-2020, Alexander Shibakov
% This file is part of SPLinT
%
% SPLinT is free software: you can redistribute it and/or modify
@@ -14,7 +14,7 @@
% You should have received a copy of the GNU General Public License
% along with SPLinT. If not, see <http://www.gnu.org/licenses/>.
-% limbo.sty version 2.3
+% limbo.sty version 3.0
\input epsf.tex
\input amssym.def
@@ -52,9 +52,29 @@
\font\seventitle=cmssbx7
\font\titlefrak=eufb10 at 12pt
+%\font\eightss=cmssq8
+%\font\eightssi=cmssqi8
+
\font\tenss=cmss10
\font\niness=cmss9
\font\eightss=cmss8
+\font\sevenss=cmss7
+\font\sixss=cmss6
+\font\fivess=cmss5
+
+\font\tenssn=cmssdc10
+\font\ninessn=cmssdc9
+\font\eightssn=cmssdc8
+\font\sevenssn=cmssdc7
+\font\sixssn=cmssdc6
+\font\fivessn=cmssdc5
+
+\font\tenssb=cmssbx10
+\font\ninessb=cmssbx9
+\font\eightssb=cmssbx8
+\font\sevenssb=cmssbx7
+\font\sixssb=cmssbx6
+\font\fivessb=cmssbx5
\font\twelvetitleit=cmssbxo10 at 12pt
\font\ninetitleit=cmssbxo10 at 9pt
@@ -75,10 +95,11 @@
\font\ninerm=cmr9
\font\eightrm=cmr8
\font\sevenrm=cmr7
+\font\sixrm=cmr6
+
\font\ninecyr=lhr9
\font\eightcyr=lhr8
\font\sevencyr=lhr7
-\font\sixrm=cmr6
\font\ninei=cmmi9
\font\eighti=cmmi8
@@ -90,10 +111,6 @@
\font\sixsy=cmsy6
\skewchar\ninesy='60 \skewchar\eightsy='60 \skewchar\sixsy='60
-\font\eightss=cmssq8
-
-\font\eightssi=cmssqi8
-
\font\twelvebf=cmbx12
\font\ninebf=cmbx9
\font\eightbf=cmbx8
@@ -102,6 +119,13 @@
\font\ninett=cmtt9
\font\eighttt=cmtt8
\font\seventt=cmtt7
+\font\sixtt=cmtt6
+
+\font\tentti=cmitt10 % typewriter italic
+\font\ninetti=cmitt9
+\font\eighttti=cmitt8
+\font\seventti=cmitt7
+\font\sixtti=cmitt6
\hyphenchar\tentt=-1 % inhibit hyphenation in typewriter type
\hyphenchar\ninett=-1
@@ -110,6 +134,7 @@
\font\ninesl=cmsl9
\font\eightsl=cmsl8
\font\sevensl=cmsl7
+\font\sixsl=cmsl6
\font\nineit=cmti9
\font\eightit=cmti8
@@ -137,11 +162,19 @@
\font\ninefrak=eufm9
\font\eightfrak=eufm8
\font\sevenfrak=eufm7
+\font\sixfrak=eufm6
\font\hv=phvr
+\font\dings=pzdr
+\font\dingssmall=pzdr at 8pt
+\chardef\pen='062
+\chardef\leaf='247
+\chardef\fancystar='122
+
+\newfam\ssfam
+\newfam\ssbnfam
\newfam\itbfam
-\newfam\frakfam
\newfam\msbmfam
\newfam\msamfam
@@ -183,26 +216,35 @@
\let\{=\LB % left brace in a string
\let\}=\RB % right brace in a string
}%
+ \def\tti{\fam\ttfam\tentti % switch to typewriter italic
+ \let\{=\LB % left brace in a string
+ \let\}=\RB % right brace in a string
+ }%
\def\ttex{\tentex
\let\{=\LB % left brace in a string
\let\}=\RB % right brace in a string
}%
\def\ttl{\let\it\tentitlei\let\sl\tentitlei\tentitle}%
- \def\ssf{\tenss}%
+ \def\ssf{\fam\ssfam\tenss}%
+ \textfont\ssfam=\tenss\scriptfont\ssfam=\sevenss
+ \def\ssfb{\fam\ssfam\tenssb}%
+ \def\ssfbn{\fam\ssbnfam\tenssb}%
+ \textfont\ssbnfam=\tenssn\scriptfont\ssfam=\sevenssn
\textfont\ttfam=\tentt
- \tt \ttglue=.5em plus.25em minus.15em
+ \fam\ttfam\tentt \ttglue=.5em plus.25em minus.15em
\def\Bbb{\fam\msbmfam\tenmsbm}%
\textfont\msbmfam=\tenmsbm\scriptfont\msbmfam=\sevenmsbm \scriptscriptfont\msbmfam=\fivemsbm
\textfont\msamfam=\tenmsam\scriptfont\msamfam=\sevenmsam
\scriptscriptfont\msamfam=\fivemsam
- \textfont\frakfam=\tenfrak
- \def\frak##1{{\fam\frakfam\tenfrak ##1}}%
+ \def\frak##1{{\tenfrak ##1}}%
\normalbaselineskip=12pt
\let\sc=\eightrm
+ \let\sci=\eightit
\let\mc=\ninerm
\let\cyr=\tencyr
\let\big=\tenbig
\setbox\strutbox=\hbox{\vrule height8.5pt depth3.5pt width0pt}%
+ \let\sscmd\sevenpoint
\normalbaselines\rm}
\def\ninepoint{\def\rm{\fam0\ninerm}%
@@ -223,26 +265,36 @@
\let\{=\LB % left brace in a string
\let\}=\RB % right brace in a string
}%
+ \def\tti{\fam\ttfam\ninetti % switch to typewriter italic
+ \let\{=\LB % left brace in a string
+ \let\}=\RB % right brace in a string
+ }%
\def\ttex{\ninetex
\let\{=\LB % left brace in a string
\let\}=\RB % right brace in a string
}%
\def\ttl{\let\it\ninetitleit\let\sl\ninetitleit\ninetitle}%
- \def\ssf{\niness}%
+ \def\ssf{\fam\ssfam\niness}%
+ \textfont\ssfam=\niness\scriptfont\ssfam=\sixss
+ \def\ssfb{\fam\ssfam\ninessb}%
+ %no room for another font family
+ \def\ssfbn{\fam\ssbnfam\ninessn}%
+ \textfont\ssbnfam=\ninessn\scriptfont\ssbnfam=\sixssn
\textfont\ttfam=\ninett
- \tt \ttglue=.5em plus.25em minus.15em
+ \fam\ttfam\ninett \ttglue=.5em plus.25em minus.15em
\def\Bbb{\fam\msbmfam\ninemsbm}%
\textfont\msbmfam=\ninemsbm\scriptfont\msbmfam=\sixmsbm \scriptscriptfont\msbmfam=\fivemsbm
\textfont\msamfam=\ninemsam\scriptfont\msamfam=\sixmsam
\scriptscriptfont\msamfam=\fivemsam
- \textfont\frakfam=\ninefrak
- \def\frak##1{{\fam\frakfam\ninefrak ##1}}%
+ \def\frak##1{{\ninefrak ##1}}%
\normalbaselineskip=11pt
\let\sc=\sevenrm
+ \let\sci\sevenit
\let\mc=\eightrm
\let\cyr=\ninecyr
\let\big=\ninebig
\setbox\strutbox=\hbox{\vrule height8pt depth3pt width0pt}%
+ \let\sscmd\sixpoint
\normalbaselines\rm}
\def\eightpoint{\def\rm{\fam0\eightrm}%
@@ -262,24 +314,34 @@
\let\{=\LB % left brace in a string
\let\}=\RB % right brace in a string
}%
+ \def\tti{\fam\ttfam\eighttti % switch to typewriter italic
+ \let\{=\LB % left brace in a string
+ \let\}=\RB % right brace in a string
+ }%
\def\ttex{\eighttex
\let\{=\LB % left brace in a string
\let\}=\RB % right brace in a string
}%
\textfont\ttfam=\eighttt
- \tt \ttglue=.5em plus.25em minus.15em
+ \fam\ttfam\eighttt \ttglue=.5em plus.25em minus.15em
+ \def\ssf{\fam\ssfam\eightss}%
+ \textfont\ssfam=\eightss\scriptfont\ssfam=\sixss
+ \def\ssfb{\fam\ssfam\eightssb}%
+ \def\ssfbn{\fam\ssbnfam\eightssb}%
+ \textfont\ssbnfam=\eightssn\scriptfont\ssbnfam=\sixssn
\def\Bbb{\fam\msbmfam\eightmsbm}%
\textfont\msbmfam=\eightmsbm\scriptfont\msbmfam=\sixmsbm \scriptscriptfont\msbmfam=\fivemsbm
\textfont\msamfam=\eightmsam\scriptfont\msamfam=\sixmsam
\scriptscriptfont\msamfam=\fivemsam
- \textfont\frakfam=\eightfrak
- \def\frak##1{{\fam\frakfam\eightfrak ##1}}%
+ \def\frak##1{{\eightfrak ##1}}%
\normalbaselineskip=9pt
\let\sc=\sixrm
+ \let\sci=\sixit
\let\mc=\sevenrm
\let\cyr=\eightcyr
\let\big=\eightbig
\setbox\strutbox=\hbox{\vrule height7pt depth2pt width0pt}%
+ \let\sscmd\sixpoint
\normalbaselines\rm}
\def\sevenpoint{\def\rm{\fam0\sevenrm}%
@@ -298,21 +360,92 @@
\let\{=\LB % left brace in a string
\let\}=\RB % right brace in a string
}%
+ \def\tti{\fam\ttfam\seventti % switch to typewriter italic
+ \let\{=\LB % left brace in a string
+ \let\}=\RB % right brace in a string
+ }%
\textfont\ttfam=\seventt
- \tt \ttglue=.5em plus.25em minus.15em
+ \fam\ttfam\seventt \ttglue=.5em plus.25em minus.15em
+ \def\ssf{\fam\ssfam\sevenss}%
+ \textfont\ssfam=\sevenss\scriptfont\ssfam=\sixss
+ \def\ssfb{\fam\ssfam\sevenssb}%
+ \def\ssfbn{\fam\ssbnfam\sevenssb}%
+ \textfont\ssbnfam=\sevenssn\scriptfont\ssbnfam=\sixssn
\def\Bbb{\fam\msbmfam\sevenmsbm}%
\textfont\msbmfam=\sevenmsbm\scriptfont\msbmfam=\sixmsbm \scriptscriptfont\msbmfam=\fivemsbm
\textfont\msamfam=\sevenmsam\scriptfont\msamfam=\sixmsam
\scriptscriptfont\msamfam=\fivemsam
- \textfont\frakfam=\sevenfrak
- \def\frak##1{{\fam\frakfam\sevenfrak ##1}}%
+ \def\frak##1{{\sevenfrak ##1}}%
\normalbaselineskip=8pt
\let\sc=\sixrm
+ \let\sci=\sixit
\let\mc=\sixrm
\let\big=\sevenbig
\setbox\strutbox=\hbox{\vrule height6pt depth2pt width0pt}%
\normalbaselines\rm}
+\def\sixpoint{\def\rm{\fam0\sixrm}%
+ \textfont0=\sixrm \scriptfont0=\fiverm \scriptscriptfont0=\fiverm
+ \textfont1=\sixi \scriptfont1=\fivei \scriptscriptfont1=\fivei
+ \textfont2=\sixsy \scriptfont2=\fivesy \scriptscriptfont2=\fivesy
+ \textfont3=\tenex \scriptfont3=\tenex \scriptscriptfont3=\tenex
+ \def\it{\fam\itfam\sixit}%
+ \def\itbold{\fam\itbfam\sixboldi}%
+ \textfont\itfam=\sixit
+ \def\sl{\fam\slfam\sixsl}%
+ \textfont\slfam=\sixsl
+ \def\bf{\fam\bffam\sixbf}%
+ \textfont\bffam=\sixbf \scriptfont\bffam=\fivebf\scriptscriptfont\bffam=\fivebf
+ \def\tt{\fam\ttfam\sixtt
+ \let\{=\LB % left brace in a string
+ \let\}=\RB % right brace in a string
+ }%
+ \def\tti{\fam\ttfam\sixtti % switch to typewriter italic
+ \let\{=\LB % left brace in a string
+ \let\}=\RB % right brace in a string
+ }%
+ \textfont\ttfam=\sixtt
+ \fam\ttfam\sixtt \ttglue=.5em plus.25em minus.15em
+ \def\ssf{\fam\ssfam\sixss}%
+ \textfont\ssfam=\sixss\scriptfont\ssfam=\fivess
+ \def\ssfb{\fam\ssfam\sixssb}%
+ \def\ssfbn{\fam\ssbnfam\sixssb}%
+ \textfont\ssbnfam=\sixssn\scriptfont\ssbnfam=\fivessn
+ \def\Bbb{\fam\msbmfam\sixmsbm}%
+ \textfont\msbmfam=\sixmsbm\scriptfont\msbmfam=\fivemsbm \scriptscriptfont\msbmfam=\fivemsbm
+ \textfont\msamfam=\sixmsam\scriptfont\msamfam=\fivemsam
+ \scriptscriptfont\msamfam=\fivemsam
+ \def\frak##1{{\sixfrak ##1}}%
+ \normalbaselineskip=8pt
+ \let\sc=\sixrm
+ \let\sci=\sixit
+ \let\mc=\sixrm
+ \let\big=\sixbig
+ \setbox\strutbox=\hbox{\vrule height5.5pt depth1.5pt width0pt}%
+ \normalbaselines\rm}
+
+\def\em{\let\tt\tti\let\sc\sci\it}
+
+% fonts for the chapter titles
+
+\font\cmhuge=cmssbx10 at .7in
+\font\tthuge=cmtt10 at .7 in
+
+\font\cmmiddle=cmss10 at .3in
+\font\ttmiddle=cmtt10 at .3in
+
+\def\hugetitle{%
+ \let\ssf\cmhuge
+ \let\tt\tthuge
+ \ssf
+}
+
+\def\midtitle{%
+ \let\ssf\cmmiddle
+ \let\tt\ttmiddle
+ \ssf
+}
+
% temporary registers
\input trt1.sty
@@ -416,244 +549,10 @@
\raise\ht\strutbox\vbox to 0pt{\vss\hbox to 0pt{\pdfdest name {#1} xyz}}%
\fi}
-% macros to typeset hex values in customized form (useful when typesetting embedded code)
-
-% the line translator: take a sequence of tokens and translate it into another sequence
-% #1 is the sequence of tokens (will be expanded) to insert at the beginning
-% #2 is the sequence of tokens (will be expanded) to insert before every token in the sequence
-% #3 is the first sequence of tokens (will be expanded) to insert between tokens in the sequence
-% #4 is the second sequence of tokens (will be expanded) to insert between tokens in the sequence
-% #5 is the sequence of tokens (will be expanded) to insert at the end
-% #6 is the command to execute after each token
-% #7 is original sequence
-% #8 is the resulting sequence
-
-\def\linetranslator#1#2#3#4#5#6:#7>#8{%
- \def\ins@rt{#2}\def\insert@{#3}\def\@insert{#4}%
- \def\@nd{#5}\def\c@mmand{#6}\let\t@kens#8%
- \edef\next{\t@kens{\the\t@kens#1}}\next
- \expandafter\linetr@nslator\the#7\end}
-
-\def\linetr@nslator#1#2{%
- \ifx#1\end
- \let\next\relax % no more tokens left
- \else
- \ifx#2\end % #1 is the last token
- \edef\next{\t@kens{\the\t@kens\ins@rt#1\@nd}}\next
- \else
- \edef\next{\t@kens{\the\t@kens\ins@rt#1\insert@\@insert}}\next
- \c@mmand
- \fi
- \let\next\linetr@nslator
- \fi\next#2
-}
-
-% the mask translator: take a sequence of pairs {length:number}{contents} and translate it into another sequence
-% #1 is the sequence of tokens (will be expanded) to insert at the beginning
-% #2 is the sequence of tokens (will be expanded) to insert before every pair in the sequence
-% #3 is the first sequence of tokens (will be expanded) to insert between pairs in the sequence
-% #4 is the second sequence of tokens (will be expanded) to insert between pairs in the sequence
-% #5 is the sequence of tokens (will be expanded) to insert at the end
-% #6 is the command to execute after each pair
-% #7 is original sequence
-% #8 is the resulting sequence
-
-\def\masktranslator#1#2#3#4#5#6:#7>#8{%
- \def\ins@rt{#2}\def\insert@{#3}\def\@insert{#4}%
- \def\@nd{#5}\def\c@mmand{#6}\let\t@kens#8%
- \edef\next{\t@kens{\the\t@kens#1}}\next
- \m@sktranslator#7{"40000000}\relax\relax}
-
-\def\m@sktranslator#1#2#3{%
- \ifnum#1<"40000000 % only happens when there are no pairs
- \ifnum#3<"40000000 % not the last pair
- \edef\next{\t@kens{\the\t@kens\ins@rt{#1}{#2}\insert@\@insert}}\next
- \c@mmand
- \let\next\m@sktranslator
- \else
- \edef\next{\t@kens{\the\t@kens\ins@rt{#1}{#2}\@nd}}\next
- \let\next\m@sktr@nslator
- \fi
- \else
- \let\next\m@sktr@nslator
- \fi
- \next{#3}%
+\def\pdfpagelink#1{% reference the page number
+ \pdfstartlink attr {/Border [0 0 0]} goto page #1 {/XYZ null null null}\BlueGreen#1\Black\pdfendlink
}
-\def\m@sktr@nslator#1{}
-
-% binary converter
-
-\def\binary#1{%
- \ifx#1\end
- \let\next\relax
- \else
- \bin@ry#1\let\next\binary
- \fi\next}
-
-\let\bits\toksa % storage for binary conversion bits
-\newtoks\bytes % storage for the binary representation
-\let\quotient\tempca
-\let\oldquotient\tempcb
-
-\def\bin@ry#1{\tempcc=4 \quotient="#1 \oldquotient=\quotient
-\loop \divide\quotient by2 \multiply\quotient by2 \advance\oldquotient by-\quotient
- \advance\tempcc by\m@ne
- \edef\next{\bits{\the\oldquotient\the\bits}}\next
- \divide\quotient by2 \oldquotient=\quotient
-\ifnum\tempcc > \z@
-\repeat
-\edef\next{\bytes{\the\bytes\the\bits}}\next
-\bits{}}
-
-% display macros:
-%
-% o registers
-
-\newtoks\digittable % the table representing the register
-\newcount\columncount % counter for the current column
-
-% o fonts
-
-\font\fivessb=phvb at4pt
-
-% o generic control sequences
-
-\def\incrementcc{\advance\columncount by\@ne}
-\def\decrementcc{\advance\columncount by\m@ne}
-
-% o mask highlighting
-
-% the strange looking definition of \m@skspan originates from \multispan in plain.tex:
-% \newcount\mscount
-% \def\multispan#1{\omit \mscount#1\relax
-% \loop\ifnum\mscount>\@ne \sp@n\repeat}
-% \def\sp@n{\span\omit\advance\mscount\m@ne}
-%
-% the number of `physical' columns to span is (2 * columns - 1) due to the fact that we
-% have to span the gaps between columns as well.
-
-\def\m@skspan#1#2{%
- \multispan{#1\advance\mscount by\mscount\advance\mscount by-1}{\vrule\highltcl{\leaders\hrule height 4pt\hfil}\vrule}%
-}
-
-% o mask underlining
-
-\def\m@sksp@n#1#2{%
- \multispan{#1\advance\mscount by\mscount\advance\mscount by-1}{\hrulefill}%
-}
-
-% o mask legend typesetting
-
-\def\m@sklegend#1#2{%
- \multispan{#1\advance\mscount by\mscount\advance\mscount by-1}{\hfil\lgndcl{\fivessb#2}\hfil}%
-}
-
-% o displaying each digit
-
-\def\dgtdispl@y#1#2{%
- \ifx#20%
- \let\colorbg\lightcl\let\colord\darkcl
- \else
- \let\colorbg\darkcl\let\colord\lightcl
- \fi
- \vrule\colorbg{\vrule height4pt width 12.5pt}%
- \llap{\raise.6pt\hbox{\colord{\fivessb#1}$\,$}}\vrule
-}
-
-% o typesetting space between digits
-
-\def\c@lumnspacer#1{\tempca#1\tempcb\tempca\divide\tempca by8\multiply\tempca by8
- \ifnum\tempca=\tempcb
- \middlediamond
- \else
- \tempca#1\tempcb\tempca\divide\tempca by4\multiply\tempca by4
- \ifnum\tempca=\tempcb
- \middledot
- \else
- \hfil
- \fi
- \fi}
-
-% oo dimond to separate bytes
-
-\def\middlediamond{\setbox0=\hbox{$\scriptscriptstyle\diamond$}\tempda=4pt \advance\tempda by-\ht0
- \divide\tempda by2 \hss\raise\tempda\box0\hss}%
-
-% oo dot to separate nybbles
-
-\def\middledot{\setbox0=\hbox{.}\tempda=4pt \advance\tempda by-\ht0
- \divide\tempda by2 \hss\raise\tempda\box0\hss}%
-
-% o put it all together
-
-\let\stcr\cr % so that this can be used inside \tabalign which redef's \cr
-
-\def\regdisplay#1#2{{%
- \binary#1\end
- \linetranslator{}{\hrulefill\noexpand\eatone}{&$\,\,$}{&}{\stcr}{\incrementcc}:\bytes>\digittable
- \linetranslator{}{\noexpand\dgtdispl@y{\the\columncount}}{&\noexpand\c@lumnspacer{\the\columncount}}{&}%
- {\stcr}{\decrementcc}:\bytes>\digittable
- \masktranslator{}{\noexpand\m@sksp@n}{&\hfil}{&}{\stcr}{\relax}:{#2}>\digittable
- \masktranslator{}{\noexpand\m@skspan}{&\hfil}{&}{\stcr}{\relax}:{#2}>\digittable
- \masktranslator{}{\noexpand\m@sksp@n}{&\hfil}{&}{\stcr}{\relax}:{#2}>\digittable
- \expandafter\digittable\expandafter{\the\digittable\noalign{\vskip-3.8pt}}%
- \masktranslator{}{\noexpand\m@sklegend}{&\hfil}{&}{\stcr}{\relax}:{#2}>\digittable
- \vbox{\offinterlineskip
- \halign{&##\stcr
- \the\digittable
- }
- }%
-}}
-
-% an alternate display
-
-\def\altmiddledot{\setbox0=\hbox{.}%
- \tempda=\ht\thezero \advance\tempda by-\ht0
- \divide\tempda by2 \advance\tempda by -\dp\thezero
- $\,$\raise\tempda\box0$\,$%
-}
-
-\def\altmiddlediamond{\setbox0=\hbox{$\scriptscriptstyle\diamond$}%
- \tempda=\ht\thezero \advance\tempda by-\ht0
- \divide\tempda by2 \advance\tempda by -\dp\thezero
- \hss\raise\tempda\box0\hss
-}
-
-\def\altdgtdispl@y#1#2{%
- \ifx#20%
- \let\colorbg\lightcl\let\colord\darkcl
- \else
- \let\colorbg\darkcl\let\colord\lightcl
- \fi
- \setbox0=\hbox{$\,$0$\,$}\tempda\ht0
- \advance\tempda by 2pt \ht0\tempda
- \vrule\colorbg{\lower1pt\hbox{\vrule height\ht0 depth\dp0 width \wd0}}%
- \llap{\colord{\raise.5pt\hbox{\fivessb#1$\,$}}}\vrule
-}
-
-\def\altc@lumnspacer#1{\tempca#1\tempcb\tempca
- \divide\tempca by4\multiply\tempca by4
- \ifnum\tempca=\tempcb
- \ %
- \else
- \kern1pt%
- \fi
-}
-
-\def\bytedisplay#1{{%
- \binary#1\end
- \linetranslator{}{\hrulefill\noexpand\eatone}{&\hfil}{&}{\stcr}{\incrementcc}:\bytes>\digittable
- \linetranslator{}{\noexpand\altdgtdispl@y{\the\columncount}}{&\noexpand\altc@lumnspacer{\the\columncount}}{&}%
- {\stcr}{\decrementcc}:\bytes>\digittable
- \linetranslator{}{\hrulefill\noexpand\eatone}{&\hfil}{&}{\stcr}{\incrementcc}:\bytes>\digittable
- \lower\dp\thezero\vbox{\offinterlineskip
- \halign{&##\stcr
- \the\digittable
- }
- }%
-}}
-
\quickcolordef{peach}{0 0.06 0.14 0}
\quickcolordef{corn}{0 0.06 0.63 0.02}
\quickcolordef{sandybrown}{0 0.33 0.61 0.04}
@@ -664,54 +563,6 @@
\def\colorwhite#1{\grayset{1}#1\restorecolor}
\def\colorgray#1{\grayset{.5}#1\restorecolor}
-\newif\iffullcolorpalette
-\fullcolorpalettetrue
-
-\iffullcolorpalette
- \let\lightcl=\colorpeach
- \let\shadedcl=\colorcorn
- \let\highltcl=\colorgray
- \let\lgndcl=\colorwhite
-\else
- \let\lightcl=\colorwhite
- \let\shadedcl=\colorgray
- \let\highltcl=\colorwhite
- \let\lgndcl=\colorblack
-% \let\colorlinkstart=\begingroup
-% \let\colorlinkend=\endgroup
-\fi
- \let\darkcl=\colorblack
-
-% macros to typeset the hex numbers in `graphic' format
-
-\newtoks\registerlegend
-
-\def\registerdisplay#1\end{\edef\next{\the\registerlegend}%
- \ifx\next\empty
- \edef\next{\noexpand\bytedisplay{#1}}\next
- \else
- \edef\next{\noexpand\regdisplay{#1}{\the\registerlegend}}\next
- \fi
-}
-
-\newbox\thezero
-
-\setbox\thezero=\hbox{\lower1pt\hbox{\vbox{\offinterlineskip
-\halign{#&$\,$#$\,$&#\cr
-\noalign{\hrule}
-\omit\vrule height1pt&&\omit\vrule height1pt\cr
-\vrule&\phantom0&\vrule\cr
-\omit\vrule height1pt&&\omit\vrule height1pt\cr
-\noalign{\hrule}
-}}}}
-
-\def\texbinary{\global\let\oldT\T
- \global\def\T##1{{\let\end\relax\let\^\registerdisplay##1\end{\tt :}\let\^\hexify##1\end}}}
-
-\def\endtexbinary{\global\let\T\oldT}
-
-\def\hexify#1\end{\hbox{{\tt #1}$_{\scriptscriptstyle1\kern-.7pt6}$}}
-
% verbatim listing macros borrowed (with changes) from The TeXbook
\newcount\democodelc
@@ -801,31 +652,55 @@
% macros that provide a way to customize printing of reserved words
-\newtoks\tempseq
-\tempseq={RVD}
\let\oldamp\&
\let\oldoneletterid\|
-\def\replaceunderscore#1{\let\next\replaceunderscore
- \ifx#1\_\tempseq=\expandafter{\the\tempseq x}\else
- \ifx#1\#\tempseq=\expandafter{\the\tempseq H}\else
- \ifx#1\end\let\next\relax\else\tempseq=\expandafter{\the\tempseq #1}\fi
- \fi
- \fi\next}
-\def\reservedid#1{{\replaceunderscore#1\end
- \expandafter\let\expandafter\newseq\csname\the\tempseq\endcsname
- \ifx\relax\newseq\oldamp{#1}\else\newseq\fi}}
-
-\def\reservedoneletterid#1{{%
- \expandafter\let\expandafter\newseq\csname\the\tempseq\string#1\endcsname
- \ifx\relax\newseq\oldoneletterid{#1}\else\newseq\fi}}
+\let\oldslashslash\\
+
+\def\replaceunderscore#1{%
+ \ifx#1\_%
+ \yybreak{x\replaceunderscore}%
+ \else
+ \ifx#1\#%
+ \yybreak@{H\replaceunderscore}%
+ \else
+ \ifx#1\end
+ \yybreak@@{}%
+ \else
+ \yybreak@@{\string#1\replaceunderscore}%
+ \fi
+ \fi
+ \yycontinue
+}
+
+\def\reservedid#1{\expandafter\r@servedid\expandafter{\csname RVD\replaceunderscore#1\end\endcsname}{#1}}
+
+\def\r@servedid#1#2{\ifx#1\relax\yybreak{\oldamp{#2}}\else\yybreak{#1}\yycontinue}
+
+\def\reservedname#1{\expandafter\r@servedname\expandafter{\csname RVD\replaceunderscore#1\end\endcsname}{#1}}
+
+\def\r@servedname#1#2{\ifx#1\relax\yybreak{\oldslashslash{#2}}\else\yybreak{#1}\yycontinue}
+
+\def\reservedoneletterid#1{%
+ \expandafter\ifx\csname RVD\string#1\endcsname\relax
+ \yybreak{\oldoneletterid{#1}}%
+ \else
+ \yybreak{\csname RVD\string#1\endcsname}%
+ \yycontinue}
\def\defreserved#1{%
- \expandafter\def\csname\the\tempseq #1\endcsname
+ \expandafter\def\csname RVD\replaceunderscore#1\end\endcsname
}
\let\&\reservedid
\let\|\reservedoneletterid
+% better `hash'
+
+\let\oldhash\#
+\def\#{\lower.5pt\hbox{\tt\oldhash}}
+
+\let\oldmathS\$
+
% cweb macros adopted to making typesetting languages other than C possible: Makefile and linker scripts.
\tempca=\catcode`\^^I% remember the old catcode
@@ -843,6 +718,8 @@
\let\&=\AM % ampersand in a string
\let\^=\CF % circumflex in a string
\let\$=\oldmathS % dollar sign
+ \let\#=\oldhash % hash symbol: the new definition above would break alignment
+ \let\n\empty % the `end of line' inserted by the preprocessing script
\def^^I{$\llcorner$&$\lrcorner$}% a tab is a tab
\tabalign{}#1\cr}}}\endgroup} % verbatim string
\catcode`\^^I=\tempca
@@ -854,17 +731,12 @@
\def\R{\hbox{$^{\rm not}$}}
\let\CM\lnot
\def\MRL#1{\KK#1}\def\KK#1#2{\buildrel\;{\let\OR\lor\scriptscriptstyle#1}\over#2}
-\def\C#1{\5\5\quad$\triangleright\,${\rm #1}$\,\triangleleft$}
+\def\C#1{\5\5\quad\hbox{$\triangleright\,$}{\rm #1}\hbox{$\,\triangleleft$}}
\let\Z=\leq \let\G=\geq
\let\E==
\def\tabC#1{\quad$\triangleright\,${\cmntfont#1}$\,\triangleleft$}
-% better `hash'
-
-\let\oldhash\#
-\def\#{\lower.5pt\hbox{\.{\oldhash}}}
-
\def\.#1{\leavevmode\hbox{\tt % typewriter type for strings
\let\\=\BS % backslash in a string
\let\{=\LB % left brace in a string
@@ -880,263 +752,180 @@
% macros to change the appearance of section headers
\outer\def\N#1#2#3.{% beginning of starred section
- \ifacro{\toksF={}\makeoutlinetoks#3\outlinedone\outlinedone}\fi
- \gdepth=#1\gtitle={#3}\MN{#2}%
- \ifon\ifnum#1<\secpagedepth \vfil\eject % force page break if depth is small
- \else\vfil\penalty-100\vfilneg\vskip\intersecskip\fi\fi
- \message{*\secno} % progress report
- \def\stripprefix##1>{}\def\gtitletoks{#3}%
- \edef\gtitletoks{\expandafter\stripprefix\meaning\gtitletoks}%
- \edef\next{\write\cont{%\noexpand\noexpand\noexpand\eatone{\noexpand\meaning\noexpand\ZZ}%
- \noexpand\noexpand\noexpand % AS this is a (sort of) bug in cwebmac.tex
- % as long as the index is output (i.e. ...\eject)
- % before all the sections this is unnecessary
- % uncomment the line after the brace to see what \ZZ
- % expands to in case of weird errors
- \ZZ{\gtitletoks}{#1}{\secno}% write to contents file
- {\noexpand\the\pageno}{\the\toksE}}}\next % \ZZ{title}{depth}{sec}{page}{ss}
- \ifpdftex\expandafter\xdef\csname curr#1\endcsname{\secno}%
- \ifnum#1>0\countB=#1 \advance\countB by-1
- \advancenumber{chunk\the\countB.\expnumber{curr\the\countB}}\fi\fi
- \ifpdf\special{pdf: outline #1 << /Title (\the\toksE) /Dest
- [ @thispage /FitH @ypos ] >>}\fi
- \ifon\startsection{\ttl#3.\ }\ignorespaces} % changed the font; AS
+ \ifacro{\toksF={}\makeoutlinetoks#3\outlinedone\outlinedone}\fi
+ \gdepth=#1\gtitle={#3}\MN{#2}%
+ \ifon\ifnum#1<\secpagedepth \vfil\eject % force page break if depth is small
+ \else\vfil\penalty-100\vfilneg\vskip\intersecskip\fi\fi
+ \message{*\secno} % progress report
+ \def\stripprefix##1>{}\def\gtitletoks{#3}%
+ \edef\gtitletoks{\expandafter\stripprefix\meaning\gtitletoks}%
+ \edef\next{\write\cont{%\noexpand\noexpand\noexpand\eatone{\noexpand\meaning\noexpand\ZZ}%
+ \noexpand\noexpand\noexpand % AS this is a (sort of) bug in cwebmac.tex
+ % as long as the index is output (i.e. ...\eject)
+ % before all the sections this is unnecessary
+ % uncomment the line after the brace to see what \ZZ
+ % expands to in case of weird errors
+ \ZZ{\gtitletoks}{#1}{\secno}% write to contents file
+ {\noexpand\the\pageno}{\the\toksE}}}\next % \ZZ{title}{depth}{sec}{page}{ss}
+ \ifpdftex\expandafter\xdef\csname curr#1\endcsname{\secno}%
+ \ifnum#1>0\countB=#1 \advance\countB by-1
+ \advancenumber{chunk\the\countB.\expnumber{curr\the\countB}}\fi\fi
+ \ifpdf\special{pdf: outline #1 << /Title (\the\toksE) /Dest
+ [ @thispage /FitH @ypos ] >>}\fi
+ \ifon\startsection{\ttl#3.\ }\ignorespaces} % changed the font; AS
\let\oldN\N
\outer\def\textN#1#2#3.{% beginning of starred section in `text' mode
- \ifacro{\toksF={}\makeoutlinetoks#3\outlinedone\outlinedone}\fi
- \gdepth=#1\gtitle={#3}\MN{#2}%
- \ifon
- \ifnum#1<2
- \vfil\eject % force page break for chapters
- \else
- \vskip0pt plus 3.5\baselineskip\penalty-100\vskip0pt plus -3.5\baselineskip\vskip\intersecskip % no page break
- \fi
- \fi
- \message{*\secno} % progress report
- \def\stripprefix##1>{}\def\gtitletoks{#3}%
- \edef\gtitletoks{\expandafter\stripprefix\meaning\gtitletoks}%
- \edef\next{\write\cont{%\noexpand\noexpand\noexpand\eatone{\noexpand\meaning\noexpand\ZZ}%
- \noexpand\noexpand\noexpand % AS this is a (sort of) bug in cwebmac.tex
- % as long as the index is output (i.e. ...\eject)
- % before all the sections this is unnecessary
- % uncomment the line after the brace to see what \ZZ
- % expands to in case of weird errors
- \ZZ{\gtitletoks}{#1}{\secno}% write to contents file
- {\noexpand\the\pageno}{\the\toksE}}}\next % \ZZ{title}{depth}{sec}{page}{ss}
- \ifpdftex\expandafter\xdef\csname curr#1\endcsname{\secno}%
- \ifnum#1>0\countB=#1 \advance\countB by-1
- \advancenumber{chunk\the\countB.\expnumber{curr\the\countB}}\fi\fi
- \ifpdf\special{pdf: outline #1 << /Title (\the\toksE) /Dest
- [ @thispage /FitH @ypos ] >>}\fi
- \ifon\startsection{\ttl#3}\smallskip\noindent\ignorespaces}
+ \ifacro{\toksF={}\makeoutlinetoks#3\outlinedone\outlinedone}\fi
+ \gdepth=#1\gtitle={#3}\MN{#2}%
+ \ifon
+ \ifnum#1<2
+ \vfil\eject % force page break for chapters
+ \else
+ \vskip0pt plus 3.5\baselineskip\penalty-100\vskip0pt plus -3.5\baselineskip\vskip\intersecskip % no page break
+ \fi
+ \fi
+ \message{*\secno} % progress report
+ \def\stripprefix##1>{}\def\gtitletoks{#3}%
+ \edef\gtitletoks{\expandafter\stripprefix\meaning\gtitletoks}%
+ \edef\next{\write\cont{%\noexpand\noexpand\noexpand\eatone{\noexpand\meaning\noexpand\ZZ}%
+ \noexpand\noexpand\noexpand % AS this is a (sort of) bug in cwebmac.tex
+ % as long as the index is output (i.e. ...\eject)
+ % before all the sections this is unnecessary
+ % uncomment the line after the brace to see what \ZZ
+ % expands to in case of weird errors
+ \ZZ{\gtitletoks}{#1}{\secno}% write to contents file
+ {\noexpand\the\pageno}{\the\toksE}}}\next % \ZZ{title}{depth}{sec}{page}{ss}
+ \ifpdftex\expandafter\xdef\csname curr#1\endcsname{\secno}%
+ \ifnum#1>0\countB=#1 \advance\countB by-1
+ \advancenumber{chunk\the\countB.\expnumber{curr\the\countB}}\fi\fi
+ \ifpdf\special{pdf: outline #1 << /Title (\the\toksE) /Dest
+ [ @thispage /FitH @ypos ] >>}\fi
+ \ifon\startsection{\ttl#3}\smallskip\noindent\ignorespaces}
+
+\outer\def\chapterN#1#2#3.{% beginning of starred section in `book' mode
+ \ifacro{\toksF={}\makeoutlinetoks#3\outlinedone\outlinedone}\fi
+ \gdepth=#1\gtitle={#3}\MN{#2}%
+ \ifon
+ \ifnum#1<\tw@
+ \vfil\eject % force page break for chapters and major subsections
+ \ifodd\pageno
+ \else
+ \null\vfill\eject
+ \fi
+ \else
+ \vskip0pt plus 3.5\baselineskip
+ \penalty-100
+ \vskip0pt plus -3.5\baselineskip
+ \vskip\intersecskip % no forced page break
+ \fi
+ \fi
+ \message{*\secno} % progress report
+ \def\stripprefix##1>{}\def\gtitletoks{#3}%
+ \edef\gtitletoks{\expandafter\stripprefix\meaning\gtitletoks}%
+ \edef\next{\write\cont{%\noexpand\noexpand\noexpand\eatone{\noexpand\meaning\noexpand\ZZ}%
+ \noexpand\noexpand\noexpand % AS this is a (sort of) bug in cwebmac.tex
+ % as long as the index is output (i.e. ...\eject)
+ % before all the sections this is unnecessary
+ % uncomment the line after the brace to see what \ZZ
+ % expands to in case of weird errors
+ \ZZ{\gtitletoks}{#1}{\secno}% write to contents file
+ {\noexpand\the\pageno}{\the\toksE}}}\next % \ZZ{title}{depth}{sec}{page}{ss}
+ \ifpdftex\expandafter\xdef\csname curr#1\endcsname{\secno}%
+ \ifnum#1>0\countB=#1 \advance\countB by-1
+ \advancenumber{chunk\the\countB.\expnumber{curr\the\countB}}\fi\fi
+ \ifpdf\special{pdf: outline #1 << /Title (\the\toksE) /Dest
+ [ @thispage /FitH @ypos ] >>}\fi
+ \ifon\stsecchap{#1}{#3}\ignorespaces
+}
+
+\def\stsecchap#1#2{\rightskip=0pt % get out of C mode (cf. \B)
+ \sfcode`;=1500 \pretolerance 200 \hyphenpenalty 50 \exhyphenpenalty 50
+%
+ \ifnum#1>0
+ \noindent{\let\*=\lapstar\llap{\tentitle\secstar\quad}}{\ttl #2}\smallskip\noindent% push it to the margins
+ \else
+ \null % sloppy but this works for the narrow case of this example
+ \global\chapterheadtrue
+ \vskip1.5in
+ \vbox{\tabskip=0pt plus 1 fil
+ \halign to\hsize{%
+ \hfil##\tabskip=0pt\cr
+ \cmhuge\secstar\cr
+ \noalign{\vskip 1pc}%
+ \cmmiddle#2\cr
+ }%
+ }%
+ \bigskip
+ \noindent
+ \fi
+%
+ \ifpdftex\smash{\raise\baselineskip\hbox to0pt{%
+ \let\*=\empty\ifmakepdf\pdfdest num \secstar fith\fi}}% this space is a bug in the original cwebmac.tex; AS
+ \else\ifpdf\smash{\raise\baselineskip\hbox to0pt{%
+ \let\*=\empty\special{%
+ pdf: dest (\romannumeral\secstar) [ @thispage /FitH @ypos ]}}}\fi\fi}
\outer\def\textM#1{\MN{#1}\ifon\vskip0pt plus 3\baselineskip\penalty-100\vskip0pt plus -3\baselineskip % no page break
- \vskip\intersecskip\startsection\ignorespaces}
+ \vskip\intersecskip\startsection\ignorespaces}
\def\MN#1{\par % common code for \M, \N
- {\xdef\secstar{#1}\let\*=\empty\xdef\secno{#1}}% remove \* from section name
- \ifx\secno\secstar \onmaybe \else\ontrue \fi
- \mark{{\secno}{\the\gdepth}{\the\gtitle}}} % remove the annoying
- % `paragraph' sign; AS
+ {\xdef\secstar{#1}\let\*=\empty\xdef\secno{#1}}% remove \* from section name
+ \ifx\secno\secstar \onmaybe \else\ontrue \fi
+ \mark{{\secno}{\the\gdepth}{\the\gtitle}}% remove the annoying `paragraph' sign; AS
+}
-% a new start section macro: fix the extra space bug and push the setion number to the margins
+% a new start section macro: fix the extra space bug and push the section number to the margins
\def\stseclap{\rightskip=0pt % get out of C mode (cf. \B)
- \sfcode`;=1500 \pretolerance 200 \hyphenpenalty 50 \exhyphenpenalty 50
- \noindent{\let\*=\lapstar\llap{\tentitle\secstar\quad}}% push it to the margins
- \ifpdftex\smash{\raise\baselineskip\hbox to0pt{%
- \let\*=\empty\ifmakepdf\pdfdest num \secstar fith\fi}}% this space is a bug in the original cwebmac.tex; AS
- \else\ifpdf\smash{\raise\baselineskip\hbox to0pt{%
- \let\*=\empty\special{%
- pdf: dest (\romannumeral\secstar) [ @thispage /FitH @ypos ]}}}\fi\fi}
+ \sfcode`;=1500 \pretolerance 200 \hyphenpenalty 50 \exhyphenpenalty 50
+ \noindent{\let\*=\lapstar\llap{\tentitle\secstar\quad}}% push it to the margins
+ \ifpdftex\smash{\raise\baselineskip\hbox to0pt{%
+ \let\*=\empty\ifmakepdf\pdfdest num \secstar fith\fi}}% this space is a bug in the original cwebmac.tex; AS
+ \else\ifpdf\smash{\raise\baselineskip\hbox to0pt{%
+ \let\*=\empty\special{%
+ pdf: dest (\romannumeral\secstar) [ @thispage /FitH @ypos ]}}}\fi\fi}
\let\startsection\stseclap
+\newif\ifchapterhead
+
% make page and sectoin number hang in the margins; \quad is chosen to
% be consistent with section appearance; the page numbers are printed
% in old-style numerals;
-\def\lheader{\headertrue\llap{\mainfont\oldstyle\the\pageno\tentitle\quad}\eightpoint\rm\grouptitle
- \hfill\title\rlap{\tentitle\quad\tenpoint$^{\hbox{\sevenrm\topsecno}}_{\hbox{\sevenrm\botsecno}}$}} % top line on left-hand pages
-\def\rheader{\headertrue
-\llap{\tenpoint$^{\hbox{\sevenrm\topsecno}}_{\hbox{\sevenrm\botsecno}}$\tentitle\quad}%
-\eightpoint\rm\title\hfill
- \grouptitle\rlap{\tentitle\quad\mainfont\oldstyle\the\pageno}} % top line on right-hand pages
+\def\lheader{%
+ \ifchapterhead
+ \hfil
+ \else
+ \termindexfalse\headertrue\llap{\mainfont\oldstyle\the\pageno\tentitle\quad}%
+ \eightpoint\rm\grouptitle\hfill\title
+ \rlap{\tentitle\quad\tenpoint$^{\hbox{\sevenrm\topsecno}}_{\hbox{\sevenrm\botsecno}}$}%
+ \fi
+} % top line on left-hand pages
+
+\def\rheader{%
+ \ifchapterhead
+ \hfil
+ \else
+ \termindexfalse\headertrue\llap{\tenpoint$^{\hbox{\sevenrm\topsecno}}_{\hbox{\sevenrm\botsecno}}$\tentitle\quad}%
+ \eightpoint\rm\title\hfill\grouptitle
+ \rlap{\tentitle\quad\mainfont\oldstyle\the\pageno}%
+ \fi
+} % top line on right-hand pages
\def\botsecno{\expandafter\takeone\botmark} % the first section on the
% next page
-% from time to time, entries must be omitted from the index;
-% the macros below implement a mechanism to do that
-
-\newif\ifisindexable
-
-\def\setindexable#1{%
- \isindexabletrue
- \def\next{#1}%
- \expandafter\s@t@ndexable\unindexable.\end
-}
+\newwrite\exampletable
-\def\s@t@ndexable#1#2\end{%
- {%
- \def\ifisindexable{#1}%
- \ifx\ifisindexable\next
- \yybreak{\aftergroup\isindexablefalse\aftergroup\eattoend}%
- \else
- \def\ifisindexable{#2}%
- \ifx\ifisindexable\dotcontainer
- \yybreak@{\aftergroup\eattoend}%
- \else
- \yybreak@{\aftergroup\s@t@ndexable}%
- \fi
- \yycontinue
- }%
- #2\end
-}
-
-\def\inxmod{%
- \write\cont{} % ensure that the contents file isn't empty
- \write\cont{\catcode `\noexpand\@=12\relax} % \makeatother
- \closeout\cont % the contents information has been fully gathered
- \message{Index:}
- \medskip
- \eightpoint\raggedright
- \fnotesstart=2
- \fnotesspan=1
- \noofcolumns=3
- \icgap=5pt%
- \linecount=3
- \setmcparams
- \dsskip=0pt%
- \adjskip=0pt plus 9pt%
- \ifx\unindexable\UNDEFINED
- \def\unindexable{{$\TeXx$}{$\TeXa$}{$\TeXb$}{$\TeXf$}{$\TeXao$}{$\TeXfo$}}%
- \else
- \expandafter\def\expandafter\unindexable\expandafter{\unindexable
- {$\TeXx$}{$\TeXa$}{$\TeXb$}{$\TeXf$}{$\TeXao$}{$\TeXfo$}%
- }%
- \fi
- \def\I##1, ##2.{%
- {%
- \setindexable{##1}%
- \ifisindexable
- \else
- \aftergroup\eatone
- \fi
- }%
- {%
- \hangindent1em\noindent##1:\kern.5em
- \ifacro\pdfnote##2.\else##2\fi.%
- \par
- }%
- }%
- % indexing macros for grammar terms
- \def\termidstring##1{% processed name in italics
- \numberstocharsandspaces##1\end
- \let\optstrextra\optstrextraesc
- \expandafter\nameproc\expandafter{\the\toksa}%
- \def\idxentry{{\it\the\toksa}}%
- }%
- \def\termvstring##1{% processed name in typewriter style
- \numberstocharsandspaces##1\end
- \let\optstrextra\optstrextraesc
- \expandafter\nameproc\expandafter{\the\toksa}%
- \def\idxentry{{\tt\def\_{\char`\_}\the\toksa}}%
- }%
- \def\termttstring##1{% straightforward typewriter text
- \numberstocharsandspaces##1\end
- \def\idxentry{{\tt\the\toksa}}%
- }%
- \def\termostring##1{% options (e.g. \flex\ and \bison\)
- \numberstocharsandspaces##1\end
- \def\idxentry{{$\langle$\bf\the\toksa$\rangle$}}%
- }%
- \let\termhdrstring\termidstring
- \def\termstring##1{%
- \numberstocharsandspaces##1\end
- \let\optstrextra\optstrextraesc
- \expandafter\nameproc\expandafter{\the\toksa}%
- \def\idxentry{{\tt"\the\toksa"}}%
- }%
- \def\texcsstring##1{%
- \numberstocharsandspaces##1\end
- \def\idxentry{%
- \def\texnspace{index}%
- \def\getcescape{% an \ seen is really an \, and will not go through C string processing
- \def\next{escape}%
- \action\next\in\currentstate
- }%
- \termindexfalse\expandafter\inlineTeXx\expandafter{\expandafter/\the\toksa}%
- }%
- }%
- \expandafter\def\csname acharswitch:index\endcsname{% correct reserved \TeX\ characters a la CWEB verbatim
- %$\%\\ % unaffected
- %\#\ % these never appear
- _{%
- \yybyte\expandafter{\csname \the\yybyte\endcsname}%
- \expandafter\yycp@\expandafter`\the\yybyte\relax
- \mkpurebyte
- \yyreturn
- }
- }%
- \expandafter\setspecialcharsfrom\csname acharswitch:index\endcsname
- \expandafter\let\expandafter\acharswitch\csname acharswitch:index\endcsname
- \def\texlexer{% now that all character codes are 12
- \let\default\yygetchar
- \let\next\yycp@
- \ifnum\yycp@>"3F %
- \ifnum\yycp@<"5B % an uppercase letter or @
- \def\next{letter}%
- \fi
- \fi
- \ifnum\yycp@>"60 %
- \ifnum\yycp@<"7B %
- \def\next{letter}%
- \fi
- \fi
- \action\next\in\currentstate
- }%
- \def\indexseparator##1##2{%
- \vskip.5\baselineskip
- \centerline{\dinkus}%
- \vskip.5\baselineskip
- }
- \def\GI##1##2##3##4.{%
- {%
- \def\hostparsernamespace{##1}%
- \toksa{}##2{##3}%
- \I{\idxentry}##4.%
- }%
- }%
- % end indexing macros for grammar terms
- \let\*=\lapstar
- \def\[##1]{{\it##1}} % definitions
- \def\(##1){$\underline{##1}$} % underlined index item
- \def\(##1){{\bf ##1}} % an alternative to the above
- \def\e##1e{##1{\sevenpoint$^\circ$}} % terms in examples
- \def\f##1f{{\it##1\/\kern.2ex}${}^\circ$} % lhs in examples (italic correction does not seem to be enough)
- \def\g##1g{$\underline{##1}^\circ$} % definitions in examples
- \def\g##1g{{\bf ##1}$^\circ$} % an alternative to the above
- \begindoublecols
- \readindex
- \otherlangindexseparator
- \readgindex
-}
-
-\def\otherlangindexseparator{%
- \par
- \vskip.5\baselineskip
- \centerline{B{\sc ISON AND} \TeX\ {\sc INDEX}}%
- \vskip.5\baselineskip
- \par
-}
+\newif\ifsaveparseoutput
+\newif\ifchecktable
\def\finmod{%
\enddoublecols
\closeout\exampletable
- \unsetfootnotes
\parfillskip 0pt plus 1fil
\def\grouptitle{NAMES OF THE SECTIONS}
\let\topsecno=\nullsec
@@ -1178,68 +967,122 @@
\centerline{\sc A LIST OF ALL SECTIONS}
\penalty300
\medskip
+ \emergencystretch=10pt
\readsections
}
-\newread\trygindex
+% \Cee\ section macros
-\def\readgindex{%
- \openin\trygindex=\jobname.gdy
- \ifeof\trygindex
- \else
- \closein\trygindex
- \input \jobname.gdy
- \fi
-}
+% stage one macros for `\B' sections: collecting tokens
-% main section macro
+% \B is always paired with \par (see CWEAVE section 213); unfortunately, this is
+% where certainty ends; the output of \CWEAVE\ is woefully unstructured, to say the
+% least; the ugly hack below appears to be the only way to typeset the \Cee\ sections
+% in their own style (using the 9pt face as is the case below).
\let\oldB\B
-% stage one macros for `\B' sections: collecting tokens
-
-% \B is always paired with \par (see CWEAVE section 213)
-
\long\def\Bvbpp#1\par{%
- \Bvbp@#1\X\X${}\E{}$\par
+ \Bvbp@#1\X\X$\E$\par
+}
+
+\long\def\Bvbpp#1\par{% the \vb{\yyendgame}\vb... is inserted by brack.pl
+ \Bvbp@#1\cleanBtail\X\X$\E$\par
}
-\long\def\Bvbp@#1\X#2\X$#3\E#4$#5\par{%
+\def\cleanBtail\X\X$\E${}
+
+\long\def\Bvbp@#1\X#2\X$#3\E#4$#5\par{% new version of the above, experimental
\yystringempty{#5}%
- {%
- \Bvb@@#1\X#2\X$#3\E#4$\par
- }{%
- \Bvb@@#1\X#2\X${#3}\E{#4}$\begingroup
+ {% this is a \Cee\ section (@c or @p); it is tempting to assume that the suffix
+ % is \X\X$\E$\par and forego the invocation of \Bvb@@, however, this is not always
+ % the case as the first \X may come from a different section in the middle.
+ \oldB#1\X#2\X$#3\E#4$\par
+ }{% the logic here is not perfect but if this is still a \Cee\ section,
+ % inserting @t@> between @> and whatever follows that produces $...\E...$
+ % will correct the misidentification (the result of which may be, at best,
+ % part of the section being typeset in a wrong face, or, worse \begingroup
+ % starting a group in a wrong place (such as in the middle of the math mode))
+ \oldB#1\X#2\X${#3}\E{#4}$\begingroup % the braces around #3 and #4 are necessary
+ % to ensure proper spacing around \E in case
+ % #3 and #4 are empty
\ninepoint
+ \Binputtoks{#5}\Btoksmathfalse
#5\par\endgroup
}%
}
-\long\def\Bvb@@#1\X\X$#2\E#3$\par{%
- \oldB#1\par
+\newtoks\Binputtoks
+
+\newif\ifBtoksmath % does the section start in math mode?
+
+\let\yyendgame\empty
+
+\let\postparse\empty % the `upper half' \preparse... macros will use the contents of \Binputtoks
+
+\newif\ifyyskipparse
+
+\let\B\Bvbpp
+
+% parser stack setup
+
+\def\fillpstack#1#2{%
+ \expandafter\def\csname currentparser[#1]\endcsname{#2}%
}
-\def\begingsec#1#2\endgsec{%
- \ifmmode
- \yybreak{{}${}\vbpp{}$#2\vb\endgsec{#1}{$#2}}%$
+\def\poppstack#1{%
+ \expandafter\ifx\csname currentparser[#1]\endcsname\relax
+ \yybreak{% unknown language, skip the section
+ \preparsetrivial
+ }
\else
- \yybreak{\vbpp{}#2\vb\endgsec{#1}{#2}}%
+ \yybreak{%
+ \expandafter\expandafter\expandafter
+ \p@ppstack\csname currentparser[#1]\endcsname\end{#1}%
+ }%
\yycontinue
}
-\long\def\vbpp#1#2\vb#3#{% verbatim section preprocessor
- \yystringempty{#3}{%
- \vbp@{#1\stashed{#2}}%
+\def\p@ppstack#1#2\end#3{%
+ \yystringempty{#2}{%
+ \fillpstack{#3}\relax#1%
}{%
- #3{#1\stashed{#2}}%
+ \fillpstack{#3}{#2}#1%
}%
}
-\long\def\vbp@#1#2{%
- \vbpp{#1#2}%
+% section macros
+
+\def\lsectionbegin#1{%
+ \ifmmode
+ \yybreak{%
+ {}$% exit the group inside the math mode
+ \Btoksmathtrue
+ \poppstack{#1}\vb{}$%
+ }% finish the math mode and reinsert it as stash
+ \else
+ \yybreak{%
+ \poppstack{#1}%
+ }%
+ \yycontinue
}
-\let\B\Bvbpp
+\def\skiptolsection#1\lsectionbegin#2{%
+ \ifBtoksmath
+ \yybreak{%
+ \lsectionbegin{#2}\vb{}${}%
+ }%
+ \else
+ \yybreak{%
+ \lsectionbegin{#2}%
+ }%
+ \yycontinue
+}
+
+\def\removefinalvb#1\endparse{}
+
+\def\preparsetrivial#1\postparse{} % in case the section contains some dangerous
+ % terms like \insertraw{...} that must be skipped
% URL typesetting
@@ -1260,15 +1103,29 @@
% commonly used names
-\def\CWEB{\.{CWEB}}\def\CTANGLE{\.{CTANGLE}}\def\CWEAVE{\.{CWEAVE}}
+\def\CWEB{\.{CWEB}}
+\def\CTANGLE{\.{CTANGLE}}
+\def\CWEAVE{\.{CWEAVE}}
+\def\WEB{\.{WEB}}
+\def\Pascal{P{\sc ASCAL}}
+\def\noweb{\.{noweb}}
\def\splint{\.{SPLinT}}
\def\POSIX{{\sc POSIX}}
+\def\EAST{{\sc EAST}}
+\def\WEST{{\sc WEST}}
\def\ISO{{\mc ISO}}
-\def\bison{\.{bison}}
-\def\flex{\.{flex}}
+\def\bison{\ifheader B{\sc ISON}\else\.{bison}\fi}
+\def\flex{\ifheader F{\sc LEX}\else\.{flex}\fi}
\def\lex{\.{lex}}
+\def\yacc{\.{yacc}}
\def\gcc{\.{gcc}}
+\def\ld{\ifheader LD\else\.{ld}\fi}
\def\GNU{{\sc GNU}}
+\def\CEEPP/{{\mc C{\tt ++}\spacefactor1000}}
+\def\Ceepp{\CEEPP/}
+\def\mft{\.{mft}}
+\def\MF{{\tt META}\-{\tt FONT}\spacefactor1000 }
+\def\MP{{\tt META}\-{\tt POST}\spacefactor1000 }
% from tugboat.cmn
\newbox\TestBox
\def\La{\tempca=\the\fam \leavevmode L%
@@ -1289,84 +1146,31 @@
}%
}
-% debugging
-
-\def\shownethe#1{%
- \edef\next{\the#1}%
- \ifx\next\empty
- \else
- \showthe#1%
- \fi
-}
-
-\def\showem#1#2#3{\toksa{#1}\toksb{#2}\toksc{#3}{\newlinechar=`^^J%
- \errmessage{%
- arg. 1: \the\toksa^^J%
- arg. 2: \the\toksb^^J%
- arg. 3: \the\toksc%
-}}}
-
% sugar
\def\FOREVER{{\bf forever}}
-% delay functions prettyprinting and other miscellanea;
-
-\def\delayhalfsec{\hbox{wait \kern-.5pt\raise1pt\hbox{$\scriptstyle1$}%
-\kern-1pt/\kern-1pt\lower1pt\hbox{$\scriptstyle2$} sec, }}
-\def\delayquartersec{\hbox{wait \kern-.5pt\raise1pt\hbox{$\scriptstyle1$}%
-\kern-1pt/\kern-1pt\lower1pt\hbox{$\scriptstyle4$} sec, }}
-
-\def\delay#1sec{%
-\hbox{\def\secmult{}wait
-\getsecmult#1\end
-\if\secmult u\def\secmodifier{ $\mu$\kern.05pt }\else
-\if\secmult m\def\secmodifier{ m}\else
-\if\secmult n\def\secmodifier{ n}\else
- \secmult\def\secmodifier{ }%
-\fi\fi\fi
-\secmodifier sec$\rm\scriptstyle s$, }
-}
-
-\def\getsecmult#1{%
-\ifx#1\end\let\next\relax\else
- \secmult\def\secmult{#1}\let\next\getsecmult
-\fi\next
-}
-
-% attribute prettyprinting;
-
-\long\def\parseattrib#1#2\end{\csname attrib#1\endcsname}
-
-\def\xxattributexx#1{\ifx(#1\let\next\xxattributex\else\let\next\xxattribute\fi\next#1}
-\def\xxattributex((\\#1{%
- \expandafter\let\expandafter\currentattrib\csname attrib#1\endcsname
- \ifx\currentattrib\attribpacked
- \strut\colorblack{%
- \lower2pt\hbox{\vrule width \wd\thezero height
- \ht\thezero depth
- \dp\thezero}}\llap{\colorwhite{\hbox{$\scriptscriptstyle\bf 01\kern.5pt$}}}%
- \let\next\eatparentheses
- \fi
- \ifx\currentattrib\attribnoinline
- {\ \bf noinline\rm_a}%
- \let\next\eatparentheses
- \fi
- \ifx\currentattrib\attribformat
- {\ \bf format\rm_a}%
- \let\next\eatarguments
- \fi
- \next
-}
-
-\def\eatparentheses#1)){}
-\def\eatarguments(\\#1,#2,#3))){(\hbox{\rm as }\.{#1})}
-
-\def\xxattribute#1{\.{\_\_attribute\_\_} (\sc GNU) #1}
-
-\let\attribnoinline=1
-\let\attribpacked=2
-\let\attribformat=3
+\ifacro
+ \ifpdftex
+ \input extras/texmf/macros/protcode.tex
+ \pdfprotrudechars=2
+ \setprotcode\tenrm
+ \setprotcode\ninerm
+ \setprotcode\eightrm
+ \setprotcode\sevenrm
+ \setprotcode\sixrm
+ \setprotcode\tenit
+ \setprotcode\nineit
+ \setprotcode\eightit
+ \setprotcode\sevenit
+ \setprotcode\sixit
+ % do not stretch characters
+ %\pdfdjustspacing=2
+ %\pdffontexpand\sevenrm 30 20 10 autoexpand
+ % TODO: use \pdfsavepos, \pdflastxpos, and \pdflastypos to
+ % save the position of index terms on the page
+ \fi
+\fi
\tenpoint
-%\registerlegend{{8}{\noexpand\phantom{B}}}
+
diff --git a/support/splint/tex/noweb.sty b/support/splint/tex/noweb.sty
new file mode 100644
index 0000000000..a0f2c7cf43
--- /dev/null
+++ b/support/splint/tex/noweb.sty
@@ -0,0 +1,399 @@
+% Copyright 2012-2020, Alexander Shibakov
+% This file is part of SPLinT
+%
+% SPLinT is free software: you can redistribute it and/or modify
+% it under the terms of the GNU General Public License as published by
+% the Free Software Foundation, either version 3 of the License, or
+% (at your option) any later version.
+%
+% SPLinT is distributed in the hope that it will be useful,
+% but WITHOUT ANY WARRANTY; without even the implied warranty of
+% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+% GNU General Public License for more details.
+%
+% You should have received a copy of the GNU General Public License
+% along with SPLinT. If not, see <http://www.gnu.org/licenses/>.
+
+\def\stseclap{\rightskip=0pt % get out of C mode (cf. \B)
+ \sfcode`;=1500 \pretolerance 200 \hyphenpenalty 50 \exhyphenpenalty 50
+ \noindent{\let\*=\empty\llap{\tentitle\setsafesecno{\secno}\quad}}% push it to the margins
+ \putxref{\MNM}{}{}.% generate a location reference for noweb cross referencing style
+ \ifpdftex\smash{\raise\baselineskip\hbox to0pt{%
+ \let\*=\empty\ifmakepdf\pdfdest num \secstar fith\fi}}% this space is a bug in the original cwebmac.tex; AS
+ \else
+ \ifpdf
+ \smash{\raise\baselineskip
+ \hbox to0pt{%
+ \let\*=\empty\special{%
+ pdf: dest (\romannumeral\secstar) [ @thispage /FitH @ypos ]%
+ }%
+ }%
+ }%
+ \fi
+ \fi
+}
+
+\def\stsecchap#1#2{\rightskip=0pt % get out of C mode (cf. \B)
+ \sfcode`;=1500 \pretolerance 200 \hyphenpenalty 50 \exhyphenpenalty 50
+%
+ \ifnum#1>\z@ % the `level' if this section is below that of a chapter
+ \noindent{\let\*=\empty\llap{\tentitle
+ \setsafesecno{\secno}\quad}}{\ttl #2}% push it to the margins
+ \putxref{\MNM}{}{}.% generate a location reference for noweb cross referencing style
+ \smallskip\noindent
+ \else
+ \setchaptertitle{\secno}{#2}%
+ \putxref{\MNM}{0}{}.% generate a chapter location reference for noweb cross referencing style
+ \fi
+%
+ \ifpdftex\smash{\raise\baselineskip\hbox to0pt{%
+ \let\*=\empty\ifmakepdf\pdfdest num \secstar fith\fi}}% this space is a bug in the original cwebmac.tex; AS
+ \else\ifpdf\smash{\raise\baselineskip\hbox to0pt{%
+ \let\*=\empty\special{%
+ pdf: dest (\romannumeral\secstar) [ @thispage /FitH @ypos ]}}}\fi\fi}
+
+\def\setchaptertitle#1#2{%
+ \global\chapterheadtrue
+ \null
+ {\ninepoint\tempda=\baselineskip
+ \multiply\tempda by 17
+ \vskip\tempda plus\baselineskip minus 2pt
+ }%
+ \vbox to 0pt{%
+ \vss
+ \tabskip=0pt plus 1 fil
+ \halign to\hsize{%
+ \hfil##\tabskip=0pt\cr
+ \hugetitle\setsafechapno{#1}\cr
+ \noalign{\vskip 1pc}%
+ \midtitle#2\cr
+ }%
+ }%
+ \prevdepth=0pt
+ \bigskip
+ \noindent{\let\*=\empty\llap{\tentitle\setsafesecno{\secno}\quad}}%
+}
+
+\let\startsection\stseclap
+
+\expandafter\def\expandafter\oldB\expandafter{\oldB\global\csectionstarttrue\putxref{\BBB}{}{}.}
+
+\def\putxref#1#2.{% it is important that #2 contains at least two braced groups (as in {}{})
+ % otherwise the parameter scanning mechanism will strip the outside braces
+ \ifxreflocal
+ {\edef\next{\write\xrefstream{\nx\nx\nx#1{\nx\the\nx\pageno}{\secno}#2\harmlesscomment}}\next}%
+ \fi
+}
+
+\def\X#1:#2\X{\ifmmode\gdef\XX{\null$\null}\else\gdef\XX{}\fi %$% section name
+ \XX$\langle\,${\let\I=\ne#2\eightrm\kern.5em
+ \ifacro{\pdfnote#1.}\else#1\fi}$\,\rangle$\putpathindex{\secno}{#1}\XX\putxrefx{#1}{#2}}
+
+\def\putxrefx#1#2{%
+ \ifxreflocal
+ {\def\sname{#2}%
+ \def\stripprefix##1>{ }%
+ \let\*\empty
+ \edef\next{\write\xrefstream{\nx\nx\nx\XXX{\nx\the\nx\pageno}{\secno}{#1}\harmlesscomment
+ \expandafter\stripprefix\meaning\sname}}\next}%
+ \fi
+}
+
+\newif\ifcsectionstart % does this section name appear at the beginning of a \Cee\ section?
+
+\def\putpathindex#1#2{%
+ \ifcsectionstart
+ \global\csectionstartfalse\strut
+ \vadjust{%
+ \setbox0=\hbox{\strut}%
+ \kern-\dp0
+ \vbox to 0pt{
+ \vss
+ \hbox to\pagewidth{%
+ \hfil\rlap{%
+ \let\*\empty
+ \kern1em \setpathlinks{#1}{#2}%
+ }%
+ }%
+ }%
+ \setbox0=\hbox{\strut}%
+ \kern\dp0
+ }%
+ \fi
+}
+
+\def\setpathlinks#1#2{%
+ \expandafter\ifx\csname xchain[#1][#2]down\endcsname\relax
+ \expandafter\ifx\csname xchain[#1][#2]up\endcsname\relax
+ \else
+ \setuplink{#1}{#2}{\hfil##\hfil}%
+ \fi
+ \else
+ \expandafter\ifx\csname xchain[#1][#2]up\endcsname\relax
+ \setdownlink{#1}{#2}{\hfil##\hfil}%
+ \else
+ \setuplink{#1}{#2}{##\hfil}
+ \setdownlink{#1}{#2}{\hfil##}%
+ \fi
+ \fi
+}
+
+\def\setdownlink#1#2#3{%
+ {\setbox0\vtop{
+ \sevenpoint\halign{#3\cr
+ \xref@nowebsection{\csname xchain[#1][#2]down\endcsname}\cr
+ \relax\ifacro
+ \pdflink{\csname xchain[#1][#2]down\endcsname}{\raise3.5pt\hbox{$\scriptscriptstyle\bigtriangledown$}}%
+ \else
+ \raise3.5pt\hbox{$\scriptscriptstyle\bigtriangledown$}%
+ \fi
+ \cr
+ }
+ }%
+ \dp0=0pt \box0}%
+}
+
+\def\setuplink#1#2#3{%
+ {\setbox0\vbox{
+ \sevenpoint\halign{#3\cr
+ \relax\ifacro
+ \pdflink{\csname xchain[#1][#2]up\endcsname}{\lower1.3pt\hbox{$\scriptscriptstyle\bigtriangleup$}}%
+ \else
+ \lower1.3pt\hbox{$\scriptscriptstyle\bigtriangleup$}%
+ \fi
+ \cr
+ \xref@nowebsection{\csname xchain[#1][#2]up\endcsname}\cr
+ }
+ }%
+ \box0}%
+}
+
+\def\makenote{%
+ \addtokens\toksB{\noexpand\pdflink{\the\toksC}{\setsafesecno{\the\toksC}}}%
+ \toksC={}\global\countC=0
+}
+
+% redefine \pdflink; N.B.: the semantics of this command sequence have changed, since now
+% #2 is not ignored (as it would have been if using pdftex); instead it becomes a typesetting
+% template for the link
+
+\ifpdftex
+ \ifx\pdfannotlink\undefined\let\pdfannotlink\pdfstartlink\fi% for pdfTeX 0.14
+ \def\pdflink#1#2{\hbox{\pdfannotlink height\ht\strutbox depth\dp\strutbox
+ attr{/Border [0 0 0]} goto num #1 \BlueGreen #2\Black\pdfendlink}}
+\else\def\pdflink#1#2{\setbox0=\hbox{\special{pdf: bc [ \pdflinkcolor ]}{#2}%
+ \special{pdf: ec}}\special{pdf: ann width \thewidth height \theheight
+ depth \thedepth << /Type /Annot /Subtype /Link
+ /Border [0 0 0] /A << /S /GoTo /D (\romannumeral#1) >> >>}\box0\relax}\fi
+
+% while typesetting the table of contents, show the actual section numbers
+% TODO: introduce section accounting for book style typesetting.
+
+\def\contentsline#1#2#3#4#5{\ifnum#2=0 \smallbreak\fi
+ \line{\consetup{#2}#1
+ \rm\leaders\hbox to .5em{.\hfil}\hfil
+ \ \ifacro\pdflink{#3}{#3}\else#3\fi\hbox to3em{\hss#4}}}
+
+\newif\ifxreflocal % are we generating references in noweb style (as page no., position pairs)?
+
+\newread\trystream % generic stream to test for an existence of a file
+
+\def\readlxrefs{% see if the local reference file exists
+ \openin\trystream=\jobname.xxr
+ \ifeof\trystream
+ \else
+ \closein\trystream
+ \input \jobname.xxr
+ \fi
+}
+
+\newif\ifcsecactive
+
+\def\BBB#1#2#3#4{% #1 is the page number
+ % #2 is the section number
+ \csecactivetrue
+}
+
+\def\lxrcurrentpage{-1}
+
+\newcount\lxrcurrentindex
+\newcount\chaptercount
+
+\lxrcurrentindex=\z@
+
+\def\MNM#1#2#3#4{% #1 is the page number
+ % #2 is the section number
+ % #3 is the chapter flag or empty
+ % #4 is reserved
+ \ifnum#1=\lxrcurrentpage
+ \else
+ \lxrcurrentindex=\z@
+ \def\lxrcurrentpage{#1}%
+ \fi
+ {%
+ \tomodalpha\lxrcurrentindex\next
+ \def\nnext{\expandafter\noexpand\csname lxref[#2]\endcsname}%
+ \yystringempty{#3}{%
+ \let\nnnext\empty
+ }{%
+ \global\advance\chaptercount\@ne
+ \def\nnnext{\the\chaptercount}%
+ }%
+ \edef\next{\def\nnext{{#1}{\next}{\nnnext}}}%
+ \expandafter
+ }\next
+ \advance\lxrcurrentindex\@ne
+}
+
+\def\xref@nowebsection#1{% translate the \CWEB\ section number into a \noweb\ reference
+ \expandafter\xr@f@nowebsection\romannumeral-1\csname lxref[#1]\endcsname.%
+}
+
+\def\xr@f@nowebsection#1#2#3.{%
+ {#1}{#2}%
+}
+
+\def\xref@chapter#1{% translate the \CWEB\ section number into a chapter number
+ \expandafter\xr@f@chapter\romannumeral-1\csname lxref[#1]\endcsname.%
+}
+
+\def\xr@f@chapter#1#2#3#4.{%
+ \yystringempty{#3}{{000}}{%
+ {#3}%
+ }%
+}
+
+\def\setsafesecno#1{%
+ \expandafter\ifx\csname lxref[#1]\endcsname\relax
+ #1%
+ \else
+ \xref@nowebsection{#1}%
+ \fi
+}
+
+\def\setsafechapno#1{%
+ \expandafter\ifx\csname lxref[#1]\endcsname\relax
+ #1%
+ \else
+ \xref@chapter{#1}%
+ \fi
+}
+
+\def\tomodalpha#1#2{%
+ \tempca=#1
+ \let#2\empty
+ \bloop
+ \tempcb=\tempca
+ \divide\tempca by 26
+ \tempcc=\tempca
+ \multiply\tempcc by 26
+ \advance\tempcb by -\tempcc
+ \advance\tempcb by `\a
+ \uccode`\.=\tempcb
+ \uppercase{\edef#2{.#2}}%
+ \advance\tempca\m@ne
+ \ifnum\tempca<\z@
+ \else
+ \repeat
+}
+
+\def\XXX#1#2#3{%
+ \ifcsecactive % previous command was \BBB (produced by the \B macro)
+% \expandafter\def\csname lxref[][]\endcsname{}%
+ \csecactivefalse
+ \fi
+}
+
+\def\SSS{\let\XXX\xxxchain} % reference separator
+
+\def\lxrefseparator{%
+ \write\xrefstream{\nx\SSS}%
+}
+
+\def\xxxchain#1#2#3{%
+ \xxxch@in{}{}#3, .%
+}
+
+\newif\iftracenowebchains
+
+\def\xxxch@in#1#2#3, #4.{% #1 is the chain head
+ % #2 is the chain previous
+ % #3 is the chain next
+ % #4 is the remaining chain
+ \yystringempty{#1}{% start the chain
+ \yystringempty{#3}{%
+ \errmessage{The reference chain (#3) is malformed.}%
+ }{%
+ \xxxch@in{#3}{}#4, .%
+ }%
+ }{%
+ \yystringempty{#2}{% potentially the second link
+ \yystringempty{#3}{% this is a one link chain
+ }{% this is a second link
+ \expandafter\def\csname xchain[#1][#1]down\endcsname{#3}%
+ \expandafter\def\csname xchain[#3][#1]up\endcsname{#1}%
+ \iftracenowebchains\message{s(#1.->#3 #3->#1.)}\fi
+ \xxxch@in{#1}{#3}#4, .%
+ }%
+ }{% this is the middle or the end of the chain
+ \yystringempty{#3}{% this is the end of the chain
+ }{%
+ \expandafter\def\csname xchain[#3][#1]up\endcsname{#2}%
+ \expandafter\def\csname xchain[#2][#1]down\endcsname{#3}%
+ \iftracenowebchains\message{m(#1)(#2->#3 #3->#2)}\fi
+ \xxxch@in{#1}{#3}#4, .%
+ }%
+ }%
+ }%
+}
+
+% replace the default reference setting macros in gindex.sty;
+% typeset references so that, for example, \(77){8, 9} becomes
+% \(\pdflink{77}{8c}), \(\pdfpagelink{9}) provided section 77
+% is section 8c by noweb's reckoning.
+
+\def\consumeonexref#1#2{% #1 is the accumulated references
+ % #2 is the section number
+ \expandafter\ifx\csname lxref[#2]\endcsname\relax
+ \errmessage{Section #2 does not have a local index.}%
+ \else
+ \yybreak{\expandafter\c@nsumeonexref\romannumeral-1\xref@nowebsection{#2}{#2}{#1}}%
+ \yycontinue
+}
+
+\def\c@nsumeonexref#1#2#3#4#5{% #1 is the page number of the beginning of the section
+ % #2 is the (\noweb) section number (within the page) where the term appears
+ % #3 is the (\CWEB) section number there the term appears
+ % #4 is the accumulated references
+ % #5 is a list of pages where this term appears
+ \c@nsume@nexref{#1}{#2}{#3}{#4}{}#5, \end
+}
+
+\def\c@nsume@nexref#1#2#3#4#5#6, {% #1 is the page number of the beginning of the section
+ % #2 is the (\noweb) section number (within the page) where the term appears
+ % #3 is the (\CWEB) section number where the term appears
+ % #4 is a total list of processed references
+ % #5 is a local list of processed references
+ % #6 is a page number from the list
+ \ifnum#1=#6 % the term appears on the same page where the section begins
+ \yybreak{\c@nsume@nexr@f{#1}{#2}{#3}#4{#5}{\compoundlink{#3}{#1#2}}}%
+ \else
+ \yybreak{\c@nsume@nexr@f{#1}{#2}{#3}#4{#5}{\pagelink{#6}}}%
+ \yycontinue
+}
+
+\def\c@nsume@nexr@f#1#2#3#4#5#6#7#8{%
+ \c@nsume@n@xr@f{#1}{#2}{#3}{{#4}{#5}{#6}}{#7, #5#8#4}%
+}
+
+\def\c@nsume@n@xr@f#1#2#3#4#5#6\end{% no more page numbers
+ \yystringempty{#6}{%
+ \attachlocallist{#1}{#2}{#3}#4{#5}%
+ }{%
+ \c@nsume@nexref{#1}{#2}{#3}{#4}{#5}#6\end
+ }%
+}
+
+\def\attachlocallist#1#2#3#4#5#6#7{%
+ \grabfinexrefs{#6#7}%
+}
diff --git a/support/splint/tex/stokenset.sty b/support/splint/tex/stokenset.sty
index 7146fcf0c1..9382415f6c 100644
--- a/support/splint/tex/stokenset.sty
+++ b/support/splint/tex/stokenset.sty
@@ -1,4 +1,4 @@
-% Copyright 2012-2014, Alexander Shibakov
+% Copyright 2012-2020, Alexander Shibakov
% This file is part of SPLinT
%
% SPLinT is free software: you can redistribute it and/or modify
@@ -14,10 +14,19 @@
% You should have received a copy of the GNU General Public License
% along with SPLinT. If not, see <http://www.gnu.org/licenses/>.
-\prettywordpair{OPTIONAL}{{\tt opt}}%
-\prettywordpair{NO_ATTR}{{\tt na}}%
-\prettywordpair{EXTENDED}{{\tt ext}}%
-\prettywordpair{INTEGER}{{$[\,0\ldots9\,]\ast$}}%
-\prettywordpair{IDENTIFIER}{{$[\,\hbox{\tt a}\ldots\hbox{\tt Z}\,0\ldots9\,]\ast$}}%
-\prettywordpair{WILDCARD}{{{\tt *} {\rm or} {\tt ?}}}%
-\prettywordpair{PERCENT_IDENTIFIER}{{\tt\%$[\,\hbox{\tt a}\ldots\hbox{\tt Z}\,0\ldots9\,]\ast$}}%
+% token typesetting for the name parser; we forego setting up
+% automatic token typesetting (see xxexpression example for how
+% this can be done) since the token set is rather small
+
+\prettywordpairwvis{OPTIONAL}{{\tt opt}}{opt}%
+\prettywordpairwvis{NO_ATTR}{{\tt na}}{na}%
+\prettywordpairwvis{EXTENDED}{{\tt ext}}{ext}%
+\prettywordpair{LT}{{\tt l}}%
+\prettywordpair{RT}{{\tt r}}%
+\prettywordpairwvis{$\ undefined}{{\tt\$undefined}}{$undefined}
+\prettywordpairwvis{INTEGER}{{$[\,0\ldots9\,]\ast$}}{[0...9]}%
+\prettywordpairwvis{IDENTIFIER}{{$[\,\hbox{\tt a}\ldots\hbox{\tt Z}\,0\ldots9\,]\ast$}}{[a...Z0...9]*}%
+\prettywordpair{META_IDENTIFIER}{{\cyr\lqq{\rm meta identifier}\rqq}}%
+\prettywordpairwvis{WILDCARD}{{{\tt *} {\rm or} {\tt ?}}}{* or ?}%
+\prettywordpairwvis{C_ESCCHAR}{\hbox{{\sixpoint\.{\\}}$c$}}{\benignescape c}%
+\prettywordpairwvis{PERCENT_IDENTIFIER}{{\tt\%$[\,\hbox{\tt a}\ldots\hbox{\tt Z}\,0\ldots9\,]\ast$}}{\%[a...Z0...9]*}%
diff --git a/support/splint/tex/trt1.sty b/support/splint/tex/trt1.sty
index e3e722c91f..7783e15767 100644
--- a/support/splint/tex/trt1.sty
+++ b/support/splint/tex/trt1.sty
@@ -1,4 +1,4 @@
-% Copyright 2014, Alexander Shibakov
+% Copyright 2014-2020, Alexander Shibakov
% This file is part of SPLinT
%
% SPLinT is free software: you can redistribute it and/or modify
@@ -33,5 +33,7 @@
\newtoks\toksd
\newtoks\tokse
\newtoks\toksf
+\newtoks\toksg
+\newtoks\toksh
\expandafter\def\csname loadtexruntime1\endcsname{\endinput}
diff --git a/support/splint/tex/xarithm.sty b/support/splint/tex/xarithm.sty
index 4d424a863c..d971092043 100644
--- a/support/splint/tex/xarithm.sty
+++ b/support/splint/tex/xarithm.sty
@@ -1,4 +1,4 @@
-% Copyright 2014, Alexander Shibakov
+% Copyright 2014-2020, Alexander Shibakov
% This file is part of SPLinT
%
% SPLinT is free software: you can redistribute it and/or modify
diff --git a/support/splint/tex/yxunion.sty b/support/splint/tex/yxunion.sty
index fb0d2460b3..4d03d83c78 100644
--- a/support/splint/tex/yxunion.sty
+++ b/support/splint/tex/yxunion.sty
@@ -1,4 +1,4 @@
-% Copyright 2012-2015, Alexander Shibakov
+% Copyright 2012-2020, Alexander Shibakov
% This file is part of SPLinT
%
% SPLinT is free software: you can redistribute it and/or modify
diff --git a/support/splint/tex/yy.sty b/support/splint/tex/yy.sty
index 22a7948e9d..96769a0fc5 100644
--- a/support/splint/tex/yy.sty
+++ b/support/splint/tex/yy.sty
@@ -1,4 +1,4 @@
-% Copyright 2012-2014, Alexander Shibakov
+% Copyright 2012-2020, Alexander Shibakov
% This file is part of SPLinT
%
% SPLinT is free software: you can redistribute it and/or modify
@@ -27,14 +27,12 @@
\newwrite\tokendefs
\let\nx\noexpand
-\def\drvname{bo}
-
\input yycommon.sty % general routines for stack and array access
\input yymisc.sty % helper macros (stack manipulation, table processing, value stack pointers)
+ % parser intitialization, optimization
\input yyinput.sty % input functions
\input yyparse.sty % parser machinery
\input flex.sty % lexer functions
-\input yyboth.sty % parser intitialization, optimization
\ifnum\optimization>\tw@
\input yyfaststack.sty
@@ -43,17 +41,31 @@
\input yystype.sty % scanner auxilary types and functions
\input yyunion.sty % parser data structures
+% modify the input routine to recognize \yyendgame; the somewhat verbose end of
+% the section is necessary to gracefully handle parser failures: the \endparse
+% control sequence must appear outside of any \vb block for \cleanupparse
+% to do its job; the \yyinput reads \vb blocks in pairs so the closing \vb{} is
+% necessary and is removed by \removefinalvb (see limbo.sty and brack.pl for
+% details).
+
+\expandafter\def\expandafter\multicharswitch\expandafter
+{\multicharswitch\yyendgame{\yyinput\yyeof\yyeof\endparseinput\removefinalvb}}
+
\def\indexpseudonamespace{[index]}
% the main parser
\let\parsernamespace\empty
+% the bootstrapping parser needs the name of the file where token equivalences will appear
+
\def\modebootstrap{%
- \edef\tokendeffile{\drvname.tok}%
+ \edef\tokendeffile{\jobname.tok}% so that the name of the token file can track the name of the parser
\edef\bstrapparser{byytab.tex}% sets \bootstrapmodetrue
\def\bootstraplexersetup{\let\yylexreturn\yylexreturnbootstrap}% only return tokens whose value is known at bootstrap
\input yybootstrap.sty%
+ \def\PB##1{}
+ \def\inlineTeXx##1{}
}
\def\modenormal{%
@@ -67,10 +79,7 @@
% \csname ##1\parsernamespace\the##2\endcsname
%}%
\input yyinit.sty%
+ \input yytexlex.sty%
}
-\ifx\modeactive\UNDEFINED
- \def\modeactive{\modenormal}
-\fi
-
-\modeactive
+\input yydebug.sty
diff --git a/support/splint/tex/yybootstrap.sty b/support/splint/tex/yybootstrap.sty
index 185ad3b0a6..97ddb1a666 100644
--- a/support/splint/tex/yybootstrap.sty
+++ b/support/splint/tex/yybootstrap.sty
@@ -1,4 +1,4 @@
-% Copyright 2012-2014, Alexander Shibakov
+% Copyright 2012-2020, Alexander Shibakov
% This file is part of SPLinT
%
% SPLinT is free software: you can redistribute it and/or modify
@@ -21,23 +21,36 @@
\immediate\openout\tokendefs=\tokendeffile
\fi
\genericparser
- name: main,
+ name: bootstrap,
ptables: \bstrapparser,
ltables: cweb/ltab.tex,
tokens: {},
asetup: \bootstraplexersetup,
- dsetup: \newlexerstateextra,
- rsetup: \noexpand\savefullstateextra,
+ dsetup: \newlexerstateextra\newparserstateextra,
+ rsetup: {},
optimization: \optimizeall;%
-\tomainparser % this saves the current state
+\tobootstrapparser % this saves the current state
-\input yytexlex.sty
+% stage two macros: parsing
+% note that the bootstrap parser has no way of telling if the \%token declarations
+% belong to the current parser or not (for example, the \flex\ parser has its own
+% declarations); it is thus important to make sure that the \%token declarations in
+% the file visible to the parser belong to a single parser (so the file read by the
+% bootstrap parser does not have declarations sections for a a parser other than
+% \bison's.
-\let\unparse\eatone
+\def\preparsebootstrap{%
+ \let\postparse\postparsebootstrap
+ \tobootstrapparser % this is not currently required since there are no namespace switching macros
+ % in the \TeX\ portion of the text; it is left here in case such macros are used in the future
+ \basicparserinit
+ \bisonparserinit
+ \bisonparserdatainit
+ \yyparse
+}
-\expandafter\def\csname parserstack[b]\endcsname#1#2{%
- \parsevb{#1}% Stage two, start the parsing
+\def\postparsebootstrap{%
\ifyyparsefail % do nothing if parsing failed
\yybreak{}%
\else % Stage three, process the parsed table
@@ -45,26 +58,32 @@
\yycontinue
}
-\expandafter\def\csname parserstack[]\endcsname#1#2{%
- \parsevb{#1}% Stage two, start the parsing
- \ifyyparsefail % do nothing if parsing failed
- \yybreak{}%
- \else % Stage three, process the parsed table
- \yybreak{\initbootstrap\the\table}%
- \yycontinue
+\fillpstack{b}{%
+ \preparsebootstrap
+ \relax
}
-% stage two macros: parsing
+% sections with no parser tag are assumed to be bison sections
-\def\parsevb#1{%
- %\ifchecktable {\toks0{#1}\errmessage{table before parsing: \the\toks0}}\fi
- \tomainparser % this is not currently required since there are no namespace switching macros
- % in the \TeX\ portion of the text; it is left here in case such macros are used in the future
- \doparse{#1}%
+\fillpstack{}{%
+ \preparsebootstrap
+ \relax
}
-\long\def\beginprod#1\endprod{%
- \endgroup
-}
+% ignore in text production examples (there is no way to parse them anyway)
+
+\long\def\beginprod#1\endprod{}
\let\begincprod\beginprod
+
+\def\nameproc#1\with#2{%
+ #2{}{}{}{}{}% pretend the name is empty
+}
+
+\def\frexproc#1\with#2{%
+ #2{}{}{}% pretend the regex is empty
+}
+
+\def\prodstyle#1{}
+
+\input yydebug.sty
diff --git a/support/splint/tex/yyboth.sty b/support/splint/tex/yyboth.sty
deleted file mode 100644
index f15fe006c2..0000000000
--- a/support/splint/tex/yyboth.sty
+++ /dev/null
@@ -1,181 +0,0 @@
-% Copyright 2012-2014, Alexander Shibakov
-% This file is part of SPLinT
-%
-% SPLinT is free software: you can redistribute it and/or modify
-% it under the terms of the GNU General Public License as published by
-% the Free Software Foundation, either version 3 of the License, or
-% (at your option) any later version.
-%
-% SPLinT is distributed in the hope that it will be useful,
-% but WITHOUT ANY WARRANTY; without even the implied warranty of
-% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-% GNU General Public License for more details.
-%
-% You should have received a copy of the GNU General Public License
-% along with SPLinT. If not, see <http://www.gnu.org/licenses/>.
-
-% parser environment
-
-\newif\ifyyskipparse
-\newif\ifsaveparseoutput
-\newif\ifchecktable
-\newif\ifchecktrim
-
-\newwrite\exampletable
-
-\def\basicparserinit{%
- \yytext{}%
- \yytextpure{}%
- \yytextseenpure{}%
- \yytextseen{}%
- \yybyte{}%
- \yyfbyte{}\yysbyte{}%
- \yystash{}%
- \yystashseen{}%
- \yyformat{}%
- \yyformatseen{}%
- \yyfutureyytext{}%
- \yyinitstack\yystatestack
- \yyfmark=\z@
- \yysmark=\z@
- \yyfmarklast=\z@
- \formatmarker=\z@
- \yysmarklast=\z@
- \stashmarker=\z@
- \yytextbackupfalse
- \yyg@yyinit=\z@
- \yyg@yystart=\z@
- \YYATBOL=\@ne
- \yyparsefailfalse
- \YYEOBLASTMATCHtrue
-}
-
-\def\parserinit{%
- \basicparserinit
- \yyinitstack\obstackforstring
- \yyinitstack\obstackforstringraw
- \percentpercentcount=\z@
- \lonesting=\z@
- \laststring{}\laststringraw{}%
- \currentlaststring{}\currentlaststringraw{}%
- \parserdatainit
-}
-
-\def\parserdatainit{%
- % make control sequences inserted by the parser non expandable to facilitate
- % token list manipulation; currently nothing is done
-}
-
-% optimization options
-
-\def\optimizeall{%
- % lexer
- \ifnum\optimization>\z@
- \optimize{yynxt}%
- \optimize{yyaccept}%
- \optimize{yydef}%
- \optimize{yychk}%
- \optimize{yybase}%
- \optimize{yyec}%
- \optimize{yymeta}%
- \tracingstats=\@ne
- \fi
- % parser
- \ifnum\optimization>\@ne
- \optimize{yytranslate}%
- \optimize{yyrone}%
- \optimize{yyrtwo}%
- \optimize{yyrthree}%
- \optimize{yydefact}%
- \optimize{yydefgoto}%
- \optimize{yypact}%
- \optimize{yypgoto}%
- \optimize{yytable}%
- \optimize{yycheck}%
- \optimize{yyprhs}%
- \optimize{yyrhs}%
- \optimize{yytoknum}%
- \optimize{yystos}%
- \optimizetext{yytname}%
- \fi
-}
-
-\long\def\endgsec#1#2#3{%
- \ifyyskipparse
- \yybreak{\unparse{#3}}%
- \else
- \yybreak{\csname parserstack[#2]\endcsname{#1}{#3}}%
- \yycontinue
-}
-
-\let\endcprod\endgroup
-\let\endmprod\endgroup
-\let\endprod\endgroup
-
-\def\genericparser name: #1, ptables: #2, ltables: #3, tokens: #4, asetup: #5, dsetup: #6, rsetup: #7, optimization: #8;{%
- % parser initialization
- %
- \expandafter\def\csname #1namespace\endcsname{[#1]}%
- \savecs{local-namespace}\parsernamespace
- \expandafter\let\expandafter\parsernamespace\csname #1namespace\endcsname
- \pinittoks{}%
- \input #2 % load main parser table
- \settokens % set the values of all tokens
- \yystringempty{#4}{}{%
- \input #4 % use token equivalence table to set the values of non-string tokens
- }%
- #5% additional setups
- %
- \input #3 % load lexer tables
- %
- % at this point the macros inside the table files (\newtable, \constset,
- % \yybigswitch, \stashswitch, \addname, \yydoactionswitch, \setflexstates,
- % \stateset, \tokeneq) have set up the corresponding stuctures in
- % the `parser namespace' (e.g. if the parser namespace is `main',
- % \newtable{yyaccept} created a token register \yyaccept[main]),
- % assigned the `generic' names to them (to continue
- % the example above, \newtable does \let\yyaccept\yyaccept[main]) and
- % recorded the corresponding commands in \pinittoks for future use.
- %
- % lexer state macros are namespace specific (just like token names)
- % so they have to be set in each namespace.
- %
- \setflexstates
- #8% possible optimization
- %
- % finally, we add the definitions for the variables used in running
- % the lexer and the parser.
- \newparserstate
- \newlexerstate
- #6% additional data setup (say, \newlexerstateextra)
- %
- % we record all the commands necessary to switch to the desired namespace
- % in a convenient macro
- \expandafter\edef\csname to#1parser\endcsname{%
- \noexpand\savefullstate % save the state of the current parser
- #7% any data that needs to be saved
- % switch to the new namespace
- \let\noexpand\parsernamespace\expandafter\noexpand\csname #1namespace\endcsname
- \the\pinittoks % restore all the tables, tokens and constants, and stacks
- \let\noexpand\getcurrentparser\expandafter\noexpand\csname to#1parser\endcsname
- }%
- \restorecs{local-namespace}\parsernamespace
-}
-
-\def\genericprettytokens namespace: #1, tokens: #2, correction: #3, host: #4;{%
- \savecs{local-namespace}{\parsernamespace\tokeneq}%
- \yystringempty{#2}{}{%
- \expandafter\let\expandafter\parsernamespace\csname #1namespace\endcsname
- \def\tokeneq##1##2{\prettytoken{##1}}%
- \let\tokenpp\prettytoken
- \input #2 % /* re-use token equivalence table to set the typesetting of tokens */
- }%
- \yystringempty{#3}{}{%
- \expandafter\let\expandafter\parsernamespace\csname #1namespace\endcsname
- \input #3 % input customized typesetting rules for tokens
- }%
- \yystringempty{#4}{}{%
- \expandafter\let\expandafter\hostparsernamespace\csname #4namespace\endcsname
- }%
- \restorecs{local-namespace}{\parsernamespace\tokeneq}%
-}
diff --git a/support/splint/tex/yycommon.sty b/support/splint/tex/yycommon.sty
index 5058fafe8a..a8144efaa4 100644
--- a/support/splint/tex/yycommon.sty
+++ b/support/splint/tex/yycommon.sty
@@ -1,4 +1,4 @@
-% Copyright 2012-2014, Alexander Shibakov
+% Copyright 2012-2020, Alexander Shibakov
% This file is part of SPLinT
%
% SPLinT is free software: you can redistribute it and/or modify
@@ -38,9 +38,9 @@
% the following macro is a mild example of expansion tricks
\def\yypopstack#1\by#2{%
- \ifnum#2>\z@
- \yyp@pst@ck{#1}{#2}%
- \fi
+ \ifnum#2>\z@
+ \yyp@pst@ck{#1}{#2}%
+ \fi
}
\def\yyp@pstack#1{%
@@ -53,8 +53,8 @@
\let.\expandafter
\def\yyp@pst@ck#1#2{%
- \let\sts\or
- \iffalse{\fi...\def...#1...{...\sts.\ifcase\number\number.\xincrement.{\number#2} \yyp@pstack#1}\else}\fi
+ \let\sts\or
+ \iffalse{\fi...\def...#1...{...\sts.\ifcase\number\number.\xincrement.{\number#2} \yyp@pstack#1}\else}\fi
}
\catcode`\.=12 % other character
@@ -64,17 +64,21 @@
\def\yypop#1\into#2{\def\sts{\consumeone{#2}}#1\stackend#1}
\long\def\consumeone#1#2{%
- #1{#2}\let\sts\scoopupstack
+ #1{#2}\let\sts\scoopupstack
}
% pushing stuff on a stack: \yypush{t o k e n s}\on\yyvs or \expandafter\yypush\number\yystate\on\yyss
-\long\def\yypush#1\on#2{\toksa={#1}\expandafter\toksb\expandafter{#2}\edef#2{\noexpand\sts{\the\toksa}\the\toksb}}
+\long\def\yypush#1\on#2{\expandafter\def\expandafter#2\expandafter{\romannumeral\yyp@sh{#2}{#1}}}
+
+\long\def\yyp@sh#1#2{\expandafter\yyp@@h\expandafter{#1}{#2}}
+
+\long\def\yyp@@h#1#2{0 \sts{#2}#1}
% push register contents on a stack: #1 is a register, #2 is a stack (a control
% sequence that expands to a `\sts{v a l u e}\sts...' list)
-\def\yypushr#1\on#2{\expandafter\toksa\expandafter{#2}\edef#2{\noexpand\sts{\the#1}\the\toksa}}
+\def\yypushr#1\on#2{\expandafter\yypush\expandafter{\the#1}\on#2}
% the first parameter is the stack, the second is the location from the top (a nonnegative number), the third is
% the control sequence that will hold the value;
@@ -82,12 +86,12 @@
\def\yyreadstack#1\at#2\to#3{\edef\sts{\noexpand\skipandcount{\number#2}{\noexpand#3}}#1\stackfinish#1}
\def\skipandcount#1#2#3{%
- \ifnum#1=\z@ %we have got to the element we need
- \def#2{#3}%
- \yybreak\ignorestack
- \else
- \yybreak{\edef\sts{\noexpand\skipandcount{\xdecrement{#1}}{\noexpand#2}}}%
- \yycontinue
+ \ifnum#1=\z@ %we have got to the element we need
+ \def#2{#3}%
+ \yybreak\ignorestack
+ \else
+ \yybreak{\edef\sts{\noexpand\skipandcount{\xdecrement{#1}}{\noexpand#2}}}%
+ \yycontinue
}
% same as above except read the value into a register
@@ -95,12 +99,12 @@
\def\yyreadstackr#1\at#2\to#3{\edef\sts{\noexpand\skipandcountr{\number#2}{#3}}#1\stackfinish#1}
\def\skipandcountr#1#2#3{%
- \ifnum#1=\z@ %we have got to the element we need
- #2#3%
- \yybreak\ignorestack
- \else
- \yybreak{\edef\sts{\noexpand\skipandcountr{\xdecrement{#1}}{\noexpand#2}}}%
- \yycontinue
+ \ifnum#1=\z@ %we have got to the element we need
+ #2#3%
+ \yybreak\ignorestack
+ \else
+ \yybreak{\edef\sts{\noexpand\skipandcountr{\xdecrement{#1}}{\noexpand#2}}}%
+ \yycontinue
}
\long\def\ignorestack#1\stackfinish#2{}
@@ -108,53 +112,53 @@
\def\stackfinish#1{\def#1{0\message{:stack empty:}}}
\def\yyreadvstack#1\upto#2{% assume that #2 > 0
- \edef\sts{\noexpand\splitstack{\number#2}{\expandafter\xincrement\expandafter{\number\toptoks}}}#1\stackend#1%
+ \edef\sts{\noexpand\splitstack{\number#2}{\expandafter\xincrement\expandafter{\number\toptoks}}}#1\stackend#1%
}
\long\def\splitstack#1#2#3{%
- \expandafter\def\csname$'#1\endcsname{#3}% $
- \ifnum#2<\@cclvi % we have not reached the maximum allocated number of token registers
- \expandafter\toksdef\csname$$'#1\endcsname=#2
- \toks#2{#3}%
- \fi
- \ifnum#1=\@ne %we have read the values
- \let\sts\scoopupstack
- \else
- \edef\sts{\noexpand\splitstack{\xdecrement{#1}}{\xincrement{#2}}}%
- \fi
+ \expandafter\def\csname$'#1\endcsname{#3}% $
+ \ifnum#2<\@cclvi % we have not reached the maximum allocated number of token registers
+ \expandafter\toksdef\csname$$'#1\endcsname=#2
+ \toks#2{#3}%
+ \fi
+ \ifnum#1=\@ne %we have read the values
+ \let\sts\scoopupstack
+ \else
+ \edef\sts{\noexpand\splitstack{\xdecrement{#1}}{\xincrement{#2}}}%
+ \fi
}
\def\yypeekvstack#1\upto#2{% assume #2 > 0
- \edef\sts{\noexpand\peelstack{\number#2}{\expandafter\xincrement\expandafter{\number\toptoks}}}#1\relax%
+ \edef\sts{\noexpand\peelstack{\number#2}{\expandafter\xincrement\expandafter{\number\toptoks}}}#1\relax%
}
\long\def\peelstack#1#2#3{%
- \expandafter\def\csname$'#1\endcsname{#3}% $
- \ifnum#2<\@cclvi % we have not reached the maximum allocated number of token registers
- \expandafter\toksdef\csname$$'#1\endcsname=#2
- \toks#2{#3}%
- \fi
- \ifnum#1=\@ne %we have read the values
- \let\sts\eatone
- \else
- \edef\sts{\noexpand\peelstack{\xdecrement{#1}}{\xincrement{#2}}}%
- \fi
+ \expandafter\def\csname$'#1\endcsname{#3}% $
+ \ifnum#2<\@cclvi % we have not reached the maximum allocated number of token registers
+ \expandafter\toksdef\csname$$'#1\endcsname=#2
+ \toks#2{#3}%
+ \fi
+ \ifnum#1=\@ne %we have read the values
+ \let\sts\eatone
+ \else
+ \edef\sts{\noexpand\peelstack{\xdecrement{#1}}{\xincrement{#2}}}%
+ \fi
}
% macros to support new printing routines
\def\yypeeksstack#1\upto#2\withprefix#3{% assume #2 > 0
- \edef\sts{\noexpand\peelsstack{\number#2}}%
- \expandafter\def\expandafter\sts\expandafter{\sts{#3}{}}#1\relax%
+ \edef\sts{\noexpand\peelsstack{\number#2}}%
+ \expandafter\def\expandafter\sts\expandafter{\sts{#3}{}}#1\relax%
}
\long\def\peelsstack#1#2#3#4{%
- \ifnum#1=\@ne %we have read the values
- #3\let\sts\eatone
- \else
- \edef\sts{\noexpand\peelsstack{\xdecrement{#1}}}%
- \expandafter\def\expandafter\sts\expandafter{\sts{#2}{#2{#4}#3}}%
- \fi
+ \ifnum#1=\@ne %we have read the values
+ #3\let\sts\eatone
+ \else
+ \edef\sts{\noexpand\peelsstack{\xdecrement{#1}}}%
+ \expandafter\def\expandafter\sts\expandafter{\sts{#2}{#2{#4}#3}}%
+ \fi
}
% token register access
@@ -164,7 +168,11 @@
}
\def\concatl#1#2{% store the concatenation result in the second sequence
- #2\expandafter\expandafter\expandafter{\expandafter\the\expandafter#1\the#2}%
+ \expandafter\conc@tl\expandafter{\the#2}{#1}{#2}%
+}
+
+\def\conc@tl#1#2#3{%
+ #3\expandafter{\the#2#1}%
}
\def\appendr#1#2{%
@@ -179,12 +187,35 @@
\tokreturn{}{}{#1{\the#1}}%
}
+% appending to token registers without expansion
+
+\long\def\appendlnx#1#2{%
+ \expandafter\app@ndlnx\expandafter{\the#1}{#2}{#1}%
+}
+
+\long\def\app@ndlnx#1#2#3{%
+ #3{#2#1}%
+}
+
+% one can use #1\expandafter{\the#1#2} instead of \appendrnx below;
+% this form (as well as the \appendlnx above) has the advantage of
+% being useable with \romannumeral0 in case one merely wants to record
+% the concatenation for future use
+
+\long\def\appendrnx#1#2{%
+ \expandafter\app@ndrnx\expandafter{\the#1}{#2}{#1}%
+}
+
+\long\def\app@ndrnx#1#2#3{%
+ #3{#1#2}%
+}
+
% the following macros are an expandable way to determine if a token register is empty;
% while a number of different conditionals can be used, including plain \iffalse,
% this choice seems to result in a shortest macro and the fewest number of \expandafter's;
% an idea from
% http://tex.stackexchange.com/questions/2936/test-whether-token-list-is-empty
-% where it is attributed to Ulrich Diez can be generalized to make multiple tests inside braces
+% where it is attributed to Ulrich Diez can be generalized to apply multiple tests inside braces
% in a row; the macros from that discussion are quoted below; note, however, that these macros
% lead to unbalanced braces inside alignments (see The \TeX book, Appendix~D, p.~385 for the
% discussion of the `master counter' and the `balance counter' and their behavior when
@@ -205,17 +236,17 @@
% non-brace while the parameter scanning mechanism of \TeX\ will try
% to collect the smallest possible balanced input; the `excessive'
% braces will disappear in the expansion of the `\if...' construct;
-% the reason \if or other macros that expand their argumens are so well suited for this
+% the reason \if or other macros that expand their arguments are so well suited for this
% `chain expansion' mechanism is in the fact that the expansions for \string and \if... are launched
% from the same point.
\long\def\yytoksempty#1{%
- \iffalse{{\fi
- \if{\expandafter\yytoks@mpty\the#1}}}%
- \yybreak\yyfirstoftwo
- \else
- \yybreak\yysecondoftwo
- \yycontinue
+ \iffalse{{\fi
+ \if{\expandafter\yytoks@mpty\the#1}}}%
+ \yybreak\yyfirstoftwo
+ \else
+ \yybreak\yysecondoftwo
+ \yycontinue
}
% when the token list is empty, \TeX\ will try to expand \yybreak premaurely;
@@ -225,12 +256,12 @@
% gleaned from `\TeX\ the program')
\long\def\yystringempty#1{%
- \iffalse{{\fi
- \if{\yytoks@mpty#1}}}%
- \yybreak\yyfirstoftwo
- \else
- \yybreak\yysecondoftwo
- \yycontinue
+ \iffalse{{\fi
+ \if{\yytoks@mpty#1}}}%
+ \yybreak\yyfirstoftwo
+ \else
+ \yybreak\yysecondoftwo
+ \yycontinue
}
\catcode`\>=2
@@ -253,7 +284,7 @@
}
% the macros below are a derivation of David Kastrup's magnificent string comparison
-% macros below:
+% macros:
% \def\strequal#1{\number\strequalstart{}{}#1\relax}
% \def\strequalstart#1#2#3{\if#3\relax\strequalstop\fi
% \strequalstart{\if#3#1}{#2\fi}}
@@ -261,7 +292,7 @@
%
% use: \if\strequal{string}{string}...
%
-% they were adjusted to handle spaces in the strings and conform to a different
+% they were adjusted to handle spaces inside the strings and to conform to a different
% syntax, namely \yyifsamestring{string1}{string2}{true}{false}
% the original macros use the fact that, say \if1\fi will expand to nothing and
% that \number'13 expands to 11 whereas \number13 expands to 13; the elegance of
@@ -269,7 +300,7 @@
\edef\yyifsamestring#1{\noexpand\yyifsamestr@ng{}{}#1 \noexpand\yyifsam@str@ng\space}
\def\yyifsamestr@ng#1#2#3 {\ifx\yyifsam@str@ng#3\yyifsam@str@ng\fi
- \yyifs@m@str@ng{#1}{#2}#3\space}
+ \yyifs@m@str@ng{#1}{#2}#3\space}
\def\yyifs@m@str@ng#1#2#3{%
\if#3\space
@@ -299,12 +330,12 @@
% but then \yytoks@mpty could not be reused
\long\def\yystartsinbrace#1{%
- \iffalse{\fi
- \if{\yytoks@mpty#1}}%
- \yybreak\yysecondoftwo
- \else
- \yybreak\yyfirstoftwo
- \yycontinue
+ \iffalse{\fi
+ \if{\yytoks@mpty#1}}%
+ \yybreak\yysecondoftwo
+ \else
+ \yybreak\yyfirstoftwo
+ \yycontinue
}
% a test to determine whether the argument is a given control sequence
@@ -333,89 +364,86 @@
\long\def\yyfirstoftwo#1#2{#1}
\long\def\yysecondoftwo#1#2{#2}
+\long\def\yyfirstofthree#1#2#3{#1}
\long\def\yysecondofthree#1#2#3{#2}
\long\def\yythirdofthree#1#2#3{#3}
-% arrays of integers are going to be represented by a string of tokens `element0 \or element1 \or ...'
+\long\def\yypioneofthree#1#2#3{{#1}}
+\long\def\yypitwoofthree#1#2#3{{#2}}
+\long\def\yypithreeofthree#1#2#3{{#3}}
+
+% (unoptimized) arrays of integers are represented by a string of tokens `element0 \or element1 \or ...'
% #2 is a register (otherwise the case and the integer from the array `coalesce');
% the following macro was designed to make something like
-% \vara=\getelemof\yytable\at\yyn\relax possible so it has to expand to a number;
+% \tempca=\getelemof\yytable\at\yyn\relax possible so it has to expand to a number;
% incidentally, some care should be taken using the above asignment to make sure that
% it is not followed by an expandable token (such as \if) as in this case the token might be
-% expanded prematurely, as the assignment is looking for the first non-expandable token which
+% expanded prematurely while the assignment is looking for the first non-expandable token which
% is not part of the number; this is the reason for the \relax
-\def\getelemof#1\at#2{% the original meaning of this macro
- \ifcase\expandafter#2\the#1\else\fi
+\def\getelemof#1\at#2{% #1 is the name of the token register containing the array, #2 is the index
+ \expandafter\get@l@mof\expandafter{\csname#1\endcsname}{#2}%
}
-\def\getelemof#1\at#2{% no longer limited to registers for #2
- \expandafter\get@lemof\expandafter{\the#1}{#2}%
+\def\get@l@mof#1#2{%
+ \expandafter\get@lemof\expandafter{\the#1}{#2}%
}
\def\get@lemof#1#2{%
- \ifcase#2 #1\else\fi
+ \ifcase#2 #1\else\fi
}
-\def\fastgetelemof#1\at#2{%
- \csname #1\parsernamespace\number#2\endcsname
-}
-
-\def\fgetelemof#1\at#2{%
- \expandafter\ifx\csname optopt[#1]\parsernamespace\endcsname\relax
- \expandafter\getelemof\csname #1\endcsname\at{#2}%
- \else
- \csname #1\parsernamespace\number#2\endcsname
- \fi
-}
+\let\fgetelemof\getelemof
% a nestable loop
-\def\bloop#1\repeat{#1\bloop{#1}\repeat\fi}
+\def\bloop#1\repeat{#1\bl@op{#1}\repeat\fi}
+
+\def\bl@op#1\repeat\fi{\fi#1\bl@op{#1}\repeat\fi}
% optimization macros: currently, the level of optimization has to be consistent throughout the
% document, i.e. \optimize macros have to be called on the same arrays after loading.
% the reason is the yyfaststack.sty file that modifies the \newtable macro once for all the tables
\def\optimize#1{%
- \setoptopt{#1}%
- \tempca\z@
- \bloop
- \tempcb=\expandafter\ifcase\expandafter\tempca\the\csname#1\endcsname\else\@MM\fi\relax
- \ifnum\tempcb<\@MM %
- \expandafter\edef\csname #1\parsernamespace\the\tempca\endcsname{\the\tempcb}%
- \advance\tempca\@ne
- \repeat
+ \setoptopt{#1}%
+ \tempca\z@
+ \bloop
+ \tempcb=\expandafter\ifcase\expandafter\tempca\the\csname#1\endcsname\else\@MM\fi\relax
+ \ifnum\tempcb<\@MM %
+ \expandafter\edef\csname #1\parsernamespace\the\tempca\endcsname{\the\tempcb}%
+ \advance\tempca\@ne
+ \repeat
}
\def\optimizetext#1{% optimizing text arrays
- \setoptopt{#1}%
- \tempca\z@
- \@ptimizetext{#1}
+ \setoptopt{#1}%
+ \tempca\z@
+ \@ptimizetext{#1}
}
\def\@ptimizetext#1{%
- \edef\next{\expandafter\ifcase\expandafter\tempca\the\csname#1\endcsname\else\end\fi}%
- \ifx\next\endcontainer
- \let\next\eatone
- \else
- \expandafter\edef\csname #1\parsernamespace\the\tempca\endcsname{\next}%
- \advance\tempca\@ne
- \let\next\@ptimizetext
- \fi
- \next{#1}%
+ \edef\next{\expandafter\ifcase\expandafter\tempca\the\csname#1\endcsname\else\end\fi}%
+ \ifx\next\endcontainer
+ \let\next\eatone
+ \else
+ \expandafter\edef\csname #1\parsernamespace\the\tempca\endcsname{\next}%
+ \advance\tempca\@ne
+ \let\next\@ptimizetext
+ \fi
+ \next{#1}%
}
\def\uoptimize#1{% same as the macro above but produces nonnegative constants as \mathchardef's
- \setoptopt{#1}%
- \tempca\z@
- \bloop
- \tempcb=\expandafter\ifcase\expandafter\tempca\the\csname#1\endcsname\else\@MM\fi\relax
- \ifnum\tempcb<\@MM %
- \toksa\expandafter{\csname #1\parsernamespace\the\tempca\endcsname}%
- \edef\next{\mathchardef\the\toksa=\the\tempcb\relax}\next
- \advance\tempca\@ne
- \repeat
+ \setoptopt{#1}%
+ \tempca\z@
+ \bloop
+ \tempcb=\expandafter\ifcase\expandafter\tempca\the\csname#1\endcsname\else\@MM\fi\relax
+ \ifnum\tempcb<\@MM %
+ \toksa\expandafter{\csname #1\parsernamespace\the\tempca\endcsname}%
+ \edef\next{\mathchardef\the\toksa=\the\tempcb\relax}\next
+ \advance\tempca\@ne
+ \repeat
}
\def\setoptopt#1{%
@@ -466,29 +494,29 @@
\newif\iftracedfa
\def\taction#1\in#2{%
- \begingroup
- \edef\acstring{#1}% in case #1 is, say, \the\toksa, so we no longer have to keep track of it
- \iftracedfa\derrmessage{acting on <\meaning\acstring>\space in (\string#2) \getstatename#2 }\fi
- \toksa\expandafter{#2}\toksb\expandafter{\acstring}%
- \edef\next{\toksa{\the\toksa\the\toksb{%
- \iftracedfa\noexpand\derrmessage{default action: \noexpand\meaning\noexpand\default}\fi
- \noexpand\default}}%
- \def\noexpand\next####1\the\toksb####2####{\noexpand\grabaction}}\next
- \expandafter\next\the\toksa\grabaction
- \tokreturn{}{}{\the\toksa}%
+ \begingroup
+ \edef\acstring{#1}% in case #1 is, say, \the\toksa, so we no longer have to keep track of it
+ \iftracedfa\ferrmessage{acting on <\meaning\acstring>\space in (\string#2) \getstatename#2 }\fi
+ \toksa\expandafter{#2}\toksb\expandafter{\acstring}%
+ \edef\next{\toksa{\the\toksa\the\toksb{%
+ \iftracedfa\noexpand\ferrmessage{default action: \noexpand\meaning\noexpand\default}\fi
+ \noexpand\default}}%
+ \def\noexpand\next####1\the\toksb####2####{\noexpand\grabaction}}\next
+ \expandafter\next\the\toksa\grabaction
+ \tokreturn{}{}{\the\toksa}%
}
\def\tactionx#1\in#2{% exclusive version of the macro above (i.e. match the last action before the brace)
- \begingroup
- \edef\acstring{#1}% in case #1 is, say, \the\toksa, so we no longer have to keep track of it
- \iftracedfa\errmessage{acting on <\meaning\acstring>\space in (\string#2) \getstatename#2 }\fi
- \toksa\expandafter{#2}\toksb\expandafter{\acstring}%
- \edef\next{\toksa{\the\toksa\the\toksb{%
- \iftracedfa\noexpand\derrmessage{default action: \noexpand\meaning\noexpand\default}\fi
- \noexpand\default}}%
- \def\noexpand\next####1\the\toksb####{\noexpand\grabaction}}\next
- \expandafter\next\the\toksa\grabaction
- \tokreturn{}{}{\the\toksa}%
+ \begingroup
+ \edef\acstring{#1}% in case #1 is, say, \the\toksa, so we no longer have to keep track of it
+ \iftracedfa\errmessage{acting on <\meaning\acstring>\space in (\string#2) \getstatename#2 }\fi
+ \toksa\expandafter{#2}\toksb\expandafter{\acstring}%
+ \edef\next{\toksa{\the\toksa\the\toksb{%
+ \iftracedfa\noexpand\ferrmessage{default action: \noexpand\meaning\noexpand\default}\fi
+ \noexpand\default}}%
+ \def\noexpand\next####1\the\toksb####{\noexpand\grabaction}}\next
+ \expandafter\next\the\toksa\grabaction
+ \tokreturn{}{}{\the\toksa}%
}
\def\getstatename#1{\expandafter\g@tstatename#1.\raw}
@@ -496,11 +524,11 @@
\def\g@tstatename#1#2\raw{\expandafter\eatone\string#1}
\def\caction#1\in#2{%
- \begingroup
- \uccode`.=#1\relax
- \uppercase{\toksa{\taction{.}\in}}%
- \toksb{#2}\concat\toksa\toksb
- \tokreturn{}{}{\the\toksa}%
+ \begingroup
+ \uccode`.=#1\relax
+ \uppercase{\toksa{\taction{.}\in}}%
+ \toksb{#2}\concat\toksa\toksb
+ \tokreturn{}{}{\the\toksa}%
}
\def\checkforcount#1{% a rough implementation of `type checking' for a parameter
@@ -513,17 +541,182 @@
}
\def\action#1\in#2{%
- \begingroup
- \checkforcount#1%
- \toksb{{#1}\in{#2}}\concat\toksa\toksb
- \tokreturn{}{}{\the\toksa}%
+ \begingroup
+ \checkforcount#1%
+ \toksb{{#1}\in{#2}}\concat\toksa\toksb
+ \tokreturn{}{}{\the\toksa}%
}%
\let\switchon\taction
+\let\switchonwithtype\action % phase out \action, since it is a rather common name
\let\default\relax
\def\grabaction#1#2\grabaction{\toksa{#1}}
+% switch manipulation macros: adding and replacing labels and actions
+% the macros assume that the switch to be manipulated is well formed, otherwise
+% no assumptions have been made;
+% if the label is not present in the switch or the new label already exists, an
+% error is returned
+% the macros are not expandable but can be made such with some
+% (rather significant) effort
+
+% #1 is the label at which to change
+% #2 is the new label
+% #3 is the name of the new switch
+% #4 is the switch
+% #5 is the operation to perform if #2 is present and #3 is not
+\def\matchswitch#1#2#3#4#5{%
+ \expandafter\matchswitch@a\expandafter{#1}{#2}{#3}{#4}{#1}{\matchswitch@e}{#5}%
+}
+
+% #1 is the expanded version of the switch
+% #2 is the label at which to change
+% #3 is the new label
+% #4 is the name of the new switch
+% #5 is the switch
+% #6 is the test sequence to apply if #2 is present
+% #7 is the operation to perform if #2 is present and #3 is not
+\def\matchswitch@a#1#2#3#4#5#6#7{\matchswitch@b{#1}{#1}{#2}{#3}{#4}{#5}{#6}{#7}}
+
+% #1 is the expanded version of the switch
+% #2 is the expanded version of the switch
+% #3 is the label at which to change
+% #4 is the new label
+% #5 is the name of the new switch
+% #6 is the switch
+% #7 is the test sequence to apply if #3 is present
+% #8 is the operation to perform if #3 is present and #4 is not
+\def\matchswitch@b#1#2#3#4#5#6#7#8{%
+ \def#5##1#3{\matchswitch@c}%
+ \expandafter\expandafter\expandafter#5\expandafter\eatone\string{#1#3}{#2}{#3}{#4}{#5}{#6}{#7}{#8}%
+}
+
+\def\matchswitch@c{%
+ \expandafter\expandafter\expandafter\matchswitch@d
+ \expandafter\expandafter\expandafter{\expandafter\eatone\string}%
+}
+
+% #1 is the match result
+% #2 is the expanded version of the switch
+% #3 is the label at which to change
+% #4 is the new label
+% #5 is the name of the new switch
+% #6 is the switch
+% #7 is the test sequence to apply if #3 is present
+% #8 is the operation to perform if #3 is present and #4 is not
+\def\matchswitch@d#1#2#3#4#5#6#7#8{%
+ #7{#1}{#2}{#3}{#4}{#5}{#6}{#8}%
+}
+
+% #1 is the match result
+% #2 is the expanded version of the switch
+% #3 is the label at which to change
+% #4 is the new label
+% #5 is the name of the new switch
+% #6 is the switch
+% #7 is the operation to perform if #3 is present and #4 is not
+\def\matchswitch@e#1#2#3#4#5#6#7{%
+ \yystringempty{#1}{% label not present
+ \errhelp{Switch #6 contents: #2}%
+ \errmessage{label \nx#3 was not found in switch \nx#6}%
+ }{%
+ \yystringempty{#4}{% no new label, skip the next test
+ #7{#2}{#3}{#4}{#5}%
+ }{%
+ \matchswitch@a{#2}{#4}{#3}{#5}{#6}{\matchswitch@f}{#7}%
+ }%
+ }%
+}
+
+\def\matchswitch@f#1#2#3#4#5#6#7{%
+ \yystringempty{#1}{% label not present
+ #7{#2}{#4}{#3}{#5}%
+ }{%
+ \errhelp{Switch #6 contents: #2}%
+ \errmessage{label \nx#3 already exists in switch \nx#6}
+ }%
+}
+
+% add a label to an existing action
+
+\def\extendswitch#1\at#2\by#3\to#4{%
+ \matchswitch{#1}{#2}{#3}{#4}{\@xtendswitch}%
+}
+
+\def\@xtendswitch#1#2#3#4{%
+ \def#4##1#2##2#3{\def#4{##1#2#3##2}}%
+ #4#1#3%
+}
+
+% replace an existing label inside a switch
+
+\def\replaceswitch#1\at#2\by#3\to#4{%
+ \matchswitch{#1}{#2}{#3}{#4}{\r@placeswitch}%
+}
+
+\def\r@placeswitch#1#2#3#4{%
+ \def#4##1#2##2#3{\def#4{##1#3##2}}%
+ #4#1#3%
+}
+
+% replace an existing action inside a switch
+
+\def\replaceaction#1\at#2\by#3\to#4{%
+ \matchswitch{#1}{#2}{}{#4}{\r@placeaction{#3}}%
+}
+
+% #1 holds the new action
+% #2 is the expanded switch
+% #3 is the label at which to make the replacement
+% #4 is empty
+% #5 is the name of the new switch
+\def\r@placeaction#1#2#3#4#5{%
+ \r@placeaction@a{#2}{#3}{#5}{#1}%
+}
+
+% #1 is the expanded switch
+% #2 is the label at which to make the replacement
+% #3 is the name of the new switch
+% #4 is the new action
+\def\r@placeaction@a#1#2#3#4{%
+ \def#3##1#2##2##{\expandafter\expandafter\expandafter
+ \r@placeaction@b\expandafter\expandafter\expandafter{\expandafter\eattwo\string}}%
+ \expandafter\expandafter\expandafter#3\expandafter\eatone\string{#1}{#2}{#3}{#1}{#4}%
+}
+
+% #1 is the part of the switch after the action
+% #2 is the label at which to make the replacement
+% #3 is the name of the new switch
+% #4 is the expanded switch
+% #5 is the new action
+\def\r@placeaction@b#1#2#3#4#5{%
+ \def#3##1#2##2##{\expandafter\expandafter\expandafter
+ \r@placeaction@c\expandafter\expandafter\expandafter{\expandafter\eatone\string}{##1}{##2}}%
+ \expandafter\expandafter\expandafter#3\expandafter\eattwo\string{#4.}{#2}{#3}{#1}{#5}%
+}
+
+% #1 = {{before the label}{between the label and the action} after the action . }
+\def\r@placeaction@c#1{%
+ \expandafter\yystringempty\expandafter{\r@placeaction@f#1}{% the remainder of the switch is gone
+ \expandafter\r@placeaction@d\r@placeaction@e#1%
+ }{%
+ \expandafter\r@placeaction@c\expandafter{\r@placeaction@e#1}%
+ }
+}
+
+% #1 before the label
+% #2 between the label and the action
+% #3 is the label
+% #4 is the name of the new switch
+% #5 part of the switch after the action
+% #6 is the new action
+\def\r@placeaction@d#1#2#3#4#5#6{\def#4{#1#3#2#6#5}}
+
+\def\r@placeaction@e#1#2#3.{{#1}{#2}}
+
+\def\r@placeaction@f#1#2#3.{}
+
% grab the first token unless it is a space or a brace
\def\getfirsttoken#1{%
@@ -554,7 +747,8 @@
\long\def\yybreak@@@#1#2\yycontinue{\fi\fi\fi\fi#1}
\long\def\yybreak@@@@#1#2\yycontinue{\fi\fi\fi\fi\fi#1}
-% we intentionally leave \yycontinue undefined since it should not be expanded normally
+% we intentionally leave \yycontinue defined as an \errmessage
+% since it should not be expanded normally;
% every conditional that uses \yybreak?{...} ... \yycontinue construct
% must have an \else clause, i.e.\ a conditional such as
% \if ab
@@ -562,27 +756,21 @@
% \yycontinue
% is a bad idea as it will result in an incomplete \iffalse
%\let\yycontinue\fi
+\def\yycontinue{\errmessage{\noexpand\yycontinue should never be expanded!}}
+% this also makes \if...\yycontinue constructs unskippable; this can be remedied by
+% adding a \fi before \yycontinue, which will not affect a properly constructed
+% conditional
% macros for taking care of extra tokens
-\long\def\yyid#1{#1}
-\long\def\yypione#1#2{#1}
-\long\def\yypitwo#1#2{#2}
+\long\def\yyid#1{#1} % this is misnamed since it changes #1 by stripping braces and spaces
\long\def\yyswap#1#2{#2#1}
\long\def\eatone#1{}
\long\def\eattwo#1#2{}
+\long\def\eatthree#1#2#3{}
\long\def\eattoend#1\end{}
+\long\def\eattospace#1 {}
\input xarithm.sty
-% temporaries
-
-\input trt1.sty
-
-% \tempcd used by \printrule and implicit rule name macros in yymisc.sty
-% \tempce used by implicit rule name macros in yymisc.sty
-
-% \tokse and \toksf so far only used in the bison action for
-% \codepropstype
-
\newif\ifbootstrapmode
diff --git a/support/splint/tex/yydebug.sty b/support/splint/tex/yydebug.sty
new file mode 100644
index 0000000000..0d763aecfa
--- /dev/null
+++ b/support/splint/tex/yydebug.sty
@@ -0,0 +1,148 @@
+% Copyright 2012-2020, Alexander Shibakov
+% This file is part of SPLinT
+%
+% SPLinT is free software: you can redistribute it and/or modify
+% it under the terms of the GNU General Public License as published by
+% the Free Software Foundation, either version 3 of the License, or
+% (at your option) any later version.
+%
+% SPLinT is distributed in the hope that it will be useful,
+% but WITHOUT ANY WARRANTY; without even the implied warranty of
+% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+% GNU General Public License for more details.
+%
+% You should have received a copy of the GNU General Public License
+% along with SPLinT. If not, see <http://www.gnu.org/licenses/>.
+
+% handy debugging shortcuts
+
+\def\yydebugsetup{% initialize debugging (note that arbitrary commands
+ % can be also inserted by \insertraw{...}
+ \expandafter\def\expandafter\multicharswitch % allow execution of any code in the middle of input
+ \expandafter{\multicharswitch\yydebugbegin\yydebugend{\the\yybyte}}%
+}
+
+\def\yydebuggenericstart{% generic command executed in the middle of input
+ \yydebugmost % turn on tracing
+ \yyinput % ... and continue
+}
+
+\def\yydebuggenericfinish{% stop debugging
+ \yydebugnone % turn off tracing
+ \yyinput % ... and continue
+}
+
+\let\yydebugbegin\yydebuggeneric
+\let\yydebugend\yydebuggenericfinish
+
+\newif\ifcdebug % a temporary conditional
+
+\def\yydebugall{% turn all debugging options on
+ \yydebugmost
+ \checktabletrue
+ \saveparseoutputtrue
+}
+
+\def\yydebugparse{%
+ \tracelookaheadtrue
+ \traceparseresultstrue
+ \traceparserstatestrue
+ \tracerulestrue
+ \tracestackstrue
+ \tracediscardedinputtrue
+}
+
+\def\reporttokendisplay{% to make this skippable in bootstrap mode
+ \ifdisplaytokenraw\ferrmessage{tokens are typeset as is^^J}\fi
+}
+
+\def\yydebugmost{% do not save or display the tables
+ \ifbootstrapmode
+ \ferrmessage{*** bootstrap mode is on ***^^J^^J}%
+ \else
+ \reporttokendisplay
+ \checktrimtrue
+ \parseverbosetrue
+ \checktrailingstashtrue
+ \fi
+ \ifbracketedvisible\ferrmessage{Symbolic term names are visible^^J}\fi
+ \iftermindex\else\ferrmessage{Indexing is turned off^^J}\fi
+ \ifshowlastaction\else\ferrmessage{Last action will not be shown}\fi
+ \ifxreflocal\else\ferrmessage{Cross references are generated in noweb style}\fi
+ \ifx\optimization\UNDEFINED
+ \else
+ \ferrmessage{Optimization level set at \number\optimization^^J}%
+ \fi
+
+ \tracingonline=3
+ \showboxbreadth=10000
+ \showboxdepth=10000
+%
+ \yydebugparse
+ \traceactioncodetrue
+ \traceactionstrue
+ \tracebadcharstrue
+ \tracebadnamestrue
+ \tracedfatrue
+ \traceflexbufferstrue
+ \tracenamestrue
+ \tracestatestrue
+ \traceswitchlabelstrue
+ \tracetexpptrue
+ \tracetokennamestrue
+ \yyflexdebugtrue
+ \yyinputdebugtrue
+ \yytracereplacementstrue
+ \traceprettytokenstrue
+}
+
+\def\yydebugnone{% turn all debugging options off
+ \checktablefalse
+ \ifbootstrapmode
+ \else
+ \checktrimfalse
+ \parseverbosefalse
+ \fi
+ \saveparseoutputfalse
+ \traceactioncodefalse
+ \traceactionsfalse
+ \tracebadcharsfalse
+ \tracebadnamesfalse
+ \tracedfafalse
+ \tracediscardedinputfalse
+ \traceflexbuffersfalse
+ \tracelookaheadfalse
+ \tracenamesfalse
+ \traceparseresultsfalse
+ \traceparserstatesfalse
+ \tracerulesfalse
+ \tracestacksfalse
+ \tracestatesfalse
+ \traceswitchlabelsfalse
+ \tracetexppfalse
+ \tracetokennamesfalse
+ \yyflexdebugfalse
+ \yyinputdebugfalse
+ \yytracereplacementsfalse
+ \cdebugfalse
+}
+
+% miscellaneous shortcuts
+
+\def\shownethe#1{%
+ {%
+ \edef\next{\the#1}%
+ \ifx\next\empty
+ \else
+ \showthe#1%
+ \fi
+ }%
+}
+
+\def\showem#1#2#3{\toksa{#1}\toksb{#2}\toksc{#3}{\newlinechar=`^^J%
+ \errmessage{%
+ arg. 1: \the\toksa^^J%
+ arg. 2: \the\toksb^^J%
+ arg. 3: \the\toksc%
+}}}
+
diff --git a/support/splint/tex/yyfaststack.sty b/support/splint/tex/yyfaststack.sty
index cc43c33115..9bf46b3d1e 100644
--- a/support/splint/tex/yyfaststack.sty
+++ b/support/splint/tex/yyfaststack.sty
@@ -1,4 +1,4 @@
-% Copyright 2012-2014, Alexander Shibakov
+% Copyright 2012-2020, Alexander Shibakov
% This file is part of SPLinT
%
% SPLinT is free software: you can redistribute it and/or modify
@@ -14,23 +14,26 @@
% You should have received a copy of the GNU General Public License
% along with SPLinT. If not, see <http://www.gnu.org/licenses/>.
-% fast versions of stack access functions
+% fast(er) versions of stack access functions
\catcode`\@=11
-\def\yyinitstack#1{% #1 is the counter
+\def\yyinitstack#1{% #1 is the name of the stack
\csname \expandafter\eatone\string#1<count>\endcsname\m@ne
}
% stacks will be defined as pairs ( \s t a c k n a m e [n u m b e r], \s t a c k n a m e <count> )
-% where \s t a c k n a m e [n u m b e r] is a control sequence and \s t a c k n a m e <count>
-% is a \count register
+% where \s t a c k n a m e [n u m b e r] is an associative array of control sequences
+% and \s t a c k n a m e <count> is a \count register
%
% note that in the implementaion below the stack grows `the wrong
% way'; this is done for convenience, since there is no `memory read
% with increment' operation, the `usual' (say, hardware) stack
% implementations take advantage of, and our stack can grow
-% unrestricted this way
+% unrestricted this way; also note that while the array is namespace specific,
+% the stack pointer is not, so special effort is required to preserve it
+
+\let\yyinitarray\yyinitstack % using the stack as an array only makes sense for the optimized version
\def\getstackpointer#1{% expands to the value of the current top of the stack position
\expandafter\the\csname \expandafter\eatone\string#1<count>\endcsname
@@ -38,7 +41,15 @@
\def\gettopofstackcs#1{% expands to the control sequence (a member of the associative array)
% that holds the value of the current element at the top of the stack
- \csname \expandafter\eatone\string#1[\getstackpointer#1]\parsernamespace\endcsname
+ \csname \expandafter\eatone\string#1[\getstackpointer#1]\parsernamespace \endcsname
+}
+
+\def\gettopofstackcsx#1{% a version of the above to be used with \romannumeral
+ 0\expandafter\noexpand\csname \expandafter\eatone\string#1[\getstackpointer#1]\parsernamespace \endcsname
+}
+
+\def\gettopofstackcsxx#1{% to be used with \romannumeral, expands to the contents of the top stack element
+ 0\expandafter\expandafter\expandafter\space\csname \expandafter\eatone\string#1[\getstackpointer#1]\parsernamespace \endcsname
}
\def\getmidstackcs#1#2{% expands to the control sequence (a member of the associative array)
@@ -46,6 +57,10 @@
\csname \expandafter\eatone\string#1[#2]\parsernamespace\endcsname
}
+\def\getmidstackcsx#1#2{% a version of the above to be used with \romannumeral, safe to use with \edef
+ 0\expandafter\noexpand\csname \expandafter\eatone\string#1[#2]\parsernamespace\endcsname
+}
+
\def\movestackpointer#1\by#2{%
\expandafter\advance\csname \expandafter\eatone\string#1<count>\endcsname#2%
}
@@ -56,8 +71,8 @@
%
-\def\yypop#1\into#2{%
- #2\expandafter\expandafter\expandafter\expandafter\expandafter\expandafter\expandafter{\gettopofstackcs#1}%
+\def\yypop#1\into#2{% pops the stack #1, stores the top element in token register #2
+ #2\expandafter{\romannumeral\gettopofstackcsxx#1}%
\movestackpointer#1\by\m@ne
}
@@ -68,6 +83,11 @@
\expandafter\expandafter\expandafter\def\gettopofstackcs#2{#1}%
}
+\long\def\yypushx#1\on#2{% push with expand
+ \movestackpointer#2\by\@ne
+ \expandafter\expandafter\expandafter\edef\gettopofstackcs#2{#1}%
+}
+
% push register contents on a stack: #1 is a register, #2 is a stack
\def\yypushr#1\on#2{%
@@ -199,6 +219,20 @@
\movestackpointer#1\by#2\relax
}
+% faster array access macros
+
+\def\fastgetelemof#1\at#2{%
+ \csname #1\parsernamespace\number#2\endcsname
+}
+
+\def\fgetelemof#1\at#2{% this definition allows mixing optimized and unoptimized tables
+ \expandafter\ifx\csname optopt[#1]\parsernamespace\endcsname\relax
+ \getelemof{#1}\at{#2}%
+ \else
+ \csname #1\parsernamespace\number#2\endcsname
+ \fi
+}
+
% new stack access macros that read the stack directly (to reduce namespace pollution)
% these are here more as an example
diff --git a/support/splint/tex/yyinit.sty b/support/splint/tex/yyinit.sty
index 8d9b7d33c3..5091c05007 100644
--- a/support/splint/tex/yyinit.sty
+++ b/support/splint/tex/yyinit.sty
@@ -1,4 +1,4 @@
-% Copyright 2012-2014, Alexander Shibakov
+% Copyright 2012-2020, Alexander Shibakov
% This file is part of SPLinT
%
% SPLinT is free software: you can redistribute it and/or modify
@@ -22,10 +22,10 @@
name: main,
ptables: cweb/gyytab.tex,
ltables: cweb/ltab.tex,
- tokens: \drvname.tok,
+ tokens: bo.tok,
asetup: {},
- dsetup: \newlexerstateextra,
- rsetup: \noexpand\savefullstateextra,
+ dsetup: \newlexerstateextra\newparserstateextra,
+ rsetup: {},
optimization: \optimizeall;%
% prologue parser
@@ -34,12 +34,63 @@
name: prologue,
ptables: cweb/dyytab.tex,
ltables: cweb/ltab.tex,
- tokens: \drvname.tok,
+ tokens: bo.tok,
asetup: {},
- dsetup: \newlexerstateextra,
- rsetup: \noexpand\savefullstateextra,
+ dsetup: \newlexerstateextra\newparserstateextra,
+ rsetup: {},
optimization: \optimizeall;%
+% flex parser
+
+\genericparser
+ name: flex,
+ ptables: cweb/fiptab.tex,
+ ltables: cweb/filtab.tex,
+ tokens: fo.tok,
+ asetup: {},
+ dsetup: {},
+ rsetup: {},
+ optimization: {};% optimized by the driver (--optimize-tables, --optimize-actions)
+
+% flex regex parser
+
+\genericparser
+ name: flexre,
+ ptables: cweb/reptab.tex,
+ ltables: cweb/filtab.tex,
+ tokens: fo.tok,
+ asetup: {},
+ dsetup: {},
+ rsetup: {},
+ optimization: {};% optimized by the driver (--optimize-tables, --optimize-actions)
+
+% flex section 2 parser
+
+\genericparser
+ name: flextwo,
+ ptables: cweb/raptab.tex,
+ ltables: cweb/filtab.tex,
+ tokens: fo.tok,
+ asetup: {},
+ dsetup: \newparserstateextra,
+ rsetup: {},
+ optimization: {};% optimized by the driver (--optimize-tables, --optimize-actions)
+
+% flex section 1 parser
+
+\genericparser
+ name: flexone,
+ ptables: cweb/ddptab.tex,
+ ltables: cweb/filtab.tex,
+ tokens: fo.tok,
+ asetup: {},
+ dsetup: {},
+ rsetup: {},
+ optimization: {};% optimized by the driver (--optimize-tables, --optimize-actions)
+
+\let\flexpseudonamespace\flexnamespace
+\let\flexpseudorenamespace\flexrenamespace
+
% parser for term names: this is not really a great idea in itself but rather an
% illustration of what is possible
@@ -50,84 +101,299 @@
tokens: {},
asetup: {},
dsetup: {},
- rsetup: \noexpand\savefullstateextra,
+ rsetup: \let\returnexplicitspace\ignoreexplicitspace, % ignore spaces in names
optimization: \optimizeall;%
\tomainparser
-\input yytexlex.sty
+% stage two macros: parsing
-\let\unparse\yyid
+% \flex parser stack
-\expandafter\def\csname parserstack[b]\endcsname#1#2{%
- \parsevb{#1}% Stage two, start the parsing
- \ifyyparsefail % revert to generic macros if parsing failed
- \yybreak{\message{parsing failed ...}#2}%
- \else % Stage three, process the parsed table
- \yybreak{\typesetalltables}%
- \yycontinue
+% section 1 parser
+
+\def\flexoneparserinit{%
+ \yylessusedtrue
+ \floption@sensetrue
}
-\expandafter\def\csname parserstack[]\endcsname#1#2{%
- \parsevb{#1}% Stage two, start the parsing
- \ifyyparsefail % revert to generic macros if parsing failed
- \yybreak{\message{parsing failed ...}#2}%
- \else % Stage three, process the parsed table
- \yybreak{\typesetalltables}%
- \yycontinue
+\def\flexoneparserdatainit{%
+ \table{}%
}
-% stage two macros: parsing
+% regular expression parser
-\newtoks\symstream
+\def\flexreparserinit{%
+ \yyBEGIN{SECT2}%
+ \flin@ruletrue
+ \yylessusedtrue
+}
-\def\parsevb#1{%
- \ifchecktable
- \ifsaveparseoutput
- {\toks0{#1}\newlinechar=`^^J\immediate\write\exampletable{^^J\harmlesscomment
- table before parsing:^^J\the\toks0}}%
- \else
- {\toks0{#1}\errmessage{table before parsing: \the\toks0}}%
- \fi
- \fi
+\def\flexreparserdatainit{%
+ \table{}%
+}
+
+% section 2 parser
+
+\def\flexparserinit{%
+ \yyBEGIN{SECT2}%
+ \def\flbracelevel{0}%
+ \yylessusedtrue
+}
+
+\def\flexparserdatainit{%
+ \table{}%
+}
+
+% output parsed tables
+
+\long\def\displayoutputcode#1#2#3{% #1 is the output code (will be expanded by \write)
+ % #2 is the output stream
+ % #3 is the preamble (will be expanded by \write)
+ \immediate\write#2{#3#1}%
+}
+
+\def\displayflextable#1{{%
+ \hidecslist\cwebstreamchars
+ \restorecslist{flexparser-debug}\yyflunion
+ \newlinechar=`^^J%
+ \expandafter\displayoutputcode\expandafter{\the\table}\exampletable
+ {^^J\harmlesscomment \parsernamespace::parsed table #1:^^J}%
+}}
+
+\def\displaybisontable#1{{%
+ \hidecslist\cwebstreamchars
+ \restorecslist{parser-debug}\yyunion
+ \newlinechar=`^^J%
+ \expandafter\displayoutputcode\expandafter{\the\table}\exampletable
+ {^^J\harmlesscomment \parsernamespace::parsed table #1:^^J}%
+}}
+
+% stage two parsing macros
+
+\def\preparsebisongrammar{%
+ \let\postparse\postparsebisongrammar
\tomainparser
- \doparse{#1}%
+ \displayrawtable % do this after the parse namespaces are setup
+ \basicparserinit
+ \bisonparserinit
+ \bisonparserdatainit
+ \yyparse
+}
+
+\def\preparsebisonprologue{%
+ \let\postparse\postparsebisonprologue
+ \toprologueparser
+ \displayrawtable % do this after the namespaces are setup
+ \basicparserinit
+ \bisonparserinit
+ \bisonparserdatainit
+ \yyparse
+}
+
+\def\preparseflexone{%
+ \let\postparse\postparseflexone
+ \toflexoneparser
+ \displayrawtable % do this after the namespaces are setup
+ \basicparserinit
+ \flexoneparserinit
+ \flexoneparserdatainit
+ \yyparse
+}
+
+\def\preparseflextwo{%
+ \let\postparse\postparseflextwo
+ \toflextwoparser
+ \displayrawtable % do this after the namespaces are setup
+ \basicparserinit
+ \flexparserinit
+ \flexparserdatainit
+ \yyparse
+}
+
+\def\preparsefallback#1{%
+ \let\postparse\relax
+ \message{#1}%
+}
+
+% stage three, postprocessing and typesetting
+
+\newif\ifparseverbose
+
+\def\postparsegeneric#1{%
\ifyyparsefail
- \toprologueparser
- \doparse{#1}%
- \ifyyparsefail
- \else
- \ifsaveparseoutput{\newlinechar=`^^J\immediate\write\exampletable{^^J\harmlesscomment parsed table (prologue):
- ^^J\the\table}}%
- \fi
- \fi
+ \yybreak{%
+ \ifparseverbose\ferrmessage{#1 parsing failed.}\fi
+ \parserreset
+ }%
\else
- \ifsaveparseoutput{\newlinechar=`^^J\immediate\write\exampletable{^^J\harmlesscomment parsed table (grammar):
- ^^J\the\table}}%
- \fi
+ \yybreak{%
+ \ifparseverbose\ferrmessage{#1 parsing successful.}\fi
+ \ifsaveparseoutput\displayparsedtable{#1}\fi
+ \typesetparsedtables
+ }%
+ \yycontinue
+}
+
+\def\parserreset#1\par{%
+ \yyparsefailfalse % in case the next pass is a \relax
+ \let\postparse\empty % ...
+ \expandafter\skiptolsection\the\Binputtoks\par% start the next parsing pass, skip \6\hbox{} trash
+}
+
+\def\postparsebisongrammar{%
+ \let\displayparsedtable\displaybisontable
+ \let\typesetparsedtables\typesetalltables
+ \postparsegeneric{(grammar)}%
+}
+
+\def\postparsebisonprologue{%
+ \let\displayparsedtable\displaybisontable
+ \let\typesetparsedtables\typesetalltables
+ \postparsegeneric{(prologue)}%
+}
+
+\def\postparseflexone{%
+ \let\displayparsedtable\displayflextable
+ \let\typesetparsedtables\typesetfsonetables
+ \postparsegeneric{(section 1)}%
+}
+
+\def\postparseflextwo{%
+ \let\displayparsedtable\displayflextable
+ \let\typesetparsedtables\typesetfstwotables
+ \postparsegeneric{(section 2)}%
+}
+
+\def\displayrawtable{%
+ \ifsaveparseoutput
+ {\newlinechar=`^^J\immediate\write\exampletable{^^J\harmlesscomment
+ table before parsing:^^J\the\Binputtoks}}%
+ \fi
+ \ifchecktable
+ \ferrmessage{table before parsing: \the\Binputtoks}%
\fi
}
-% stage three macros: typesetting
+\fillpstack{}{%
+ \preparsebisongrammar
+ \preparsebisonprologue
+ {\preparsefallback{**}}%
+ \relax % this \relax is necessary so that the braces above
+ % are not stripped by \poppstack
+}
+
+\fillpstack{b}{%
+ \preparsebisongrammar
+ \preparsebisonprologue
+ {\preparsefallback{**}}%
+ \relax % this \relax is necessary so that the braces above
+ % are not stripped by \poppstack
+}
+
+\fillpstack{fs1}{%
+ \preparseflexone
+ \preparseflextwo
+ {\preparsefallback{==}}%
+ \relax % this \relax is necessary so that the braces above
+ % are not stripped by \poppstack
+}
+
+\fillpstack{fs2}{%
+ \preparseflextwo
+ {\preparsefallback{--}}%
+ \relax % this \relax is necessary so that the braces above
+ % are not stripped by \poppstack
+}
+
+\fillpstack{t}{%
+ \relax
+}
+
+% stage 3.5 macros: typesetting
+
+\newtoks\symstream
\def\tlskip{\z@}
\def\tfskip{\parindent}
+\newif\ifchecktrailingstash
+
\def\typesetalltables{%
\begingroup
+ \ifsaveparseoutput
+ {\newlinechar=`^^J\immediate\write\exampletable{^^J\harmlesscomment
+ stashed stream:^^J\the\yystash^^J^^J\harmlesscomment
+ format stream: ^^J\the\yyformat}}%
+ \fi
+ \ifchecktable
+ \ferrmessage{parsed table: \the\table^^J^^J%
+ stashed stream: \the\yystash^^J^^J%
+ format stream: \the\yyformat}%
+ \fi
+ \extractprodtableinfo
+ \symstream\table
+ \table{}%
+ \setprodtable
+ \the\symstream\relax
+ \postoks{}\pushothertables
+ \ifchecktable
+ \ferrmessage{table after processing: \the\table}%
+ \fi
+ \ifsaveparseoutput
+ {\newlinechar=`^^J\immediate\write\exampletable{^^J\harmlesscomment
+ processed table:^^J\the\table}}%
+ \fi
+ \parindent1em
+ \checkforpropertable\table
+ \tabskip\tfskip
+ \ruletableset
+ \ifchecktrailingstash
+ \ferrmessage{remaining stash: \the\yystash}%
+ \fi
+ \unwrapstash\yystash
+ \toksa\expandafter{\the\yystash}%
+ \cleanstash\stripstash\checkforccode
+ \ifchecktrailingstash
+ \ferrmessage{stash after cleaning: \the\toksa}%
+ \fi
+ \ifnum\wd0>\z@
+ %\ifchecktable
+ % \showboxdepth=1000
+ % \showboxbreadth=1000
+ % \showbox0
+ %\fi
+ % currently testing for nontrivial leftover stash involves packaging the stash material
+ % into a \vbox; as a result, the stash containing ${}{}$\hbox{} will have a nonzero length
+ % which is why the test below is necessary
+ \ifnum\ht0>\z@
+ \indent\boxstash
+ \fi
+ \fi
+ \expandafter % export the value of the alignment
+ \endgroup
+ \expandafter\gaglue\the\gaglue\relax
+}
+
+\def\typesetfstwotables{%
+ \begingroup
\ifchecktable
\ifsaveparseoutput
{\newlinechar=`^^J\immediate\write\exampletable{^^J\harmlesscomment
- stashed stream:^^J\the\yystash}}%
+ stashed stream:^^J\the\yystash^^J^^J\harmlesscomment
+ format stream: ^^J\the\yyformat}}%
\else
- \errmessage{parsed table: \the\table^^J^^Jstashed stream: \the\yystash}%
+ \errmessage{parsed table: \the\table^^J^^J%
+ stashed stream: \the\yystash^^J^^J%
+ format stream: \the\yyformat}%
\fi
\fi
+ \extractregextableinfo
\symstream\table
\table{}%
- \setprodtable
+ \setregextable
\the\symstream\relax
- \postoks{}\pushothertables
+ \regextableset
\ifchecktable
\ifsaveparseoutput
\else
@@ -138,17 +404,76 @@
{\newlinechar=`^^J\immediate\write\exampletable{^^J\harmlesscomment
processed table:^^J\the\table}}%
\fi
- \parindent1em
- \checkforpropertable\table
- \tabskip\tfskip
- \ruletableset
\ifchecktable
- \message{remaining stash: \the\yystash}%
+ \ifchecktrim
+ \ferrmessage{remaining stash: \the\yystash}%
+ \fi
+ \fi
+ \unwrapstash\yystash
+ \toksa\expandafter{\the\yystash}%
+ \cleanstash\stripstash\checkforccode
+ \ifchecktable
+ \ifchecktrim
+ \ferrmessage{stash after cleaning: \the\toksa}%
+ \fi
+ \fi
+ \ifnum\wd0>\z@
+ %\ifchecktable
+ % \showboxdepth=1000
+ % \showboxbreadth=1000
+ % \showbox0
+ %\fi
+ % currently testing for nontrivial leftover stash involves packaging the stash material
+ % into a \vbox; as a result, the stash containing ${}{}$\hbox{} will have a nonzero length
+ % which is why the test below is necessary
+ \ifnum\ht0>\z@
+ \indent\boxstash
+ \fi
+ \fi
+ \endgroup
+}
+
+\def\typesetfsonetables{%
+ \begingroup
+ \ifchecktable
+ \ifsaveparseoutput
+ {\newlinechar=`^^J\immediate\write\exampletable{^^J\harmlesscomment
+ stashed stream:^^J\the\yystash^^J^^J\harmlesscomment
+ format stream: ^^J\the\yyformat}}%
+ \else
+ \errmessage{parsed table: \the\table^^J^^J%
+ stashed stream: \the\yystash^^J^^J%
+ format stream: \the\yyformat}%
+ \fi
+ \fi
+% \extractregextableinfo
+ \symstream\table
+ \table{}%
+ \setregexdeftable
+ \the\symstream\relax
+ \regexdeftableset
+ \ifchecktable
+ \ifsaveparseoutput
+ \else
+ \errmessage{table after processing: \the\table}%
+ \fi
+ \fi
+ \ifsaveparseoutput
+ {\newlinechar=`^^J\immediate\write\exampletable{^^J\harmlesscomment
+ processed table:^^J\the\table}}%
+ \fi
+ \ifchecktable
+ \ifchecktrim
+ \ferrmessage{remaining stash: \the\yystash}%
+ \fi
\fi
- \toksa{}\the\yystash
+ \unwrapstash\yystash
+ \toksa\expandafter{\the\yystash}%
\cleanstash\stripstash\checkforccode
\ifchecktable
- \message{stash after cleaning: \the\toksa}%
+ \ifchecktrim
+ \ferrmessage{stash after cleaning: \the\toksa}%
+ \fi
\fi
\ifnum\wd0>\z@
%\ifchecktable
@@ -166,10 +491,16 @@
\endgroup
}
+\let\extractprodtableinfo\empty % we do not preprocess the productions table
+
+\let\extractregextableinfo\empty % we do not preprocess the regex table
+
% setting the rule table: cross-section alignment and other effects are applied here;
% in order to produce the proper line skips before and after \unvbox, the rules followed by
% \TeX\ while adding an \halign to a vertical list have to be reproduced explicitly
+\newdimen\gaglue % the width of the action box of the last alignment
+
\def\ruletableset{%
\par
\vskip-\baselineskip
@@ -186,12 +517,48 @@
}
\expandafter
}\expandafter
- \unvbox\expandafter0\expandafter
+ \unvcopy\expandafter0\expandafter
\prevdepth\the\prevdepth\relax
+ \setbox\z@=\vbox{\unvbox\z@ \setbox\z@=\lastbox % set the alignment dimension
+ \hbox{\unhbox\z@ \unskip\setbox\z@=\lastbox\expandafter}\expandafter}\expandafter\gaglue\the\wd\z@
}
\def\setallterms#1{\setbox\z@=\hbox{\it#1}\ifsquashterms\hbox to0pt{\unhbox\z@\hss}\else\unhbox\z@\fi\hfil}
+% typesetting the scanner automaton rules
+
+\def\regextableset{%
+ \par
+ \vskip-\baselineskip
+ \setbox0 \vbox\expandafter{\expandafter
+ \null\expandafter\prevdepth\the\prevdepth
+ \halign to\hsize
+ {##\hfil\tabskip0 pt plus1fil\ &\relax\tabskip\tlskip\toksa{}##\makestashbox\hfil\cr
+ \the\table
+ }%
+ \expandafter
+ }\expandafter
+ \unvbox\expandafter0\expandafter
+ \prevdepth\the\prevdepth\relax
+}
+
+% typesetting named regular expression definitions
+
+\def\regexdeftableset{%
+ \par
+ \vskip-\baselineskip
+ \setbox0\vbox\expandafter{\expandafter
+ \null\expandafter\prevdepth\the\prevdepth
+ \halign to\hsize
+ {\hskip\parindent##\hfil\tabskip0 pt plus1fil\ &\relax\tabskip\tlskip\tt##\hfil\cr
+ \the\table
+ }%
+ \expandafter
+ }\expandafter
+ \unvbox\expandafter0\expandafter
+ \prevdepth\the\prevdepth\relax
+}
+
% quick and dirty global alignment: the size of the last box (and those in between)
% can be chosen automatically after one pass and read in for the final pass;
% in the future this will be the default implementation; for now, the inelegant
@@ -264,7 +631,8 @@
\tokdectoks\toksa
\edef\next{\table{\the\table\noalign{%
\tabskip\parindent
- \nx\displaytokenrawtrue
+ %\nx\displaytokenrawtrue % this controls how tokens are displayed
+ % in the declarations: if true, the macro names will be shown
\halign to\hsize{\the\tokdectoks}%
}%
}}\next
@@ -384,10 +752,12 @@
% macros for processing \Cee\ mode material
+\newif\ifchecktrim
+
%\long\def\buildstash#1{\toksa\expandafter{\the\toksa#1}} % = stashed
\def\cleanstash{%
- \ifchecktrim\errmessage{collected stash: \the\toksa}\fi
+ \ifchecktrim\ferrmessage{collected stash: \the\toksa}\fi
\expandafter\cleanst@sh\the\toksa\packagebox}
\def\cleanst@sh{\let\6\testsbox\setbox0=\vbox\bgroup}
@@ -417,7 +787,7 @@
\def\packagebox{\egroup\ifnum\wd0>\z@\else\toksa{}\fi}
\def\stripstash{%
- \ifchecktrim\errmessage{before trimming: \the\toksa}\fi
+ \ifchecktrim\ferrmessage{before trimming: \the\toksa}\fi
\def\6{}\expandafter\stripst@sh\expandafter\ignorespaces\the\toksa\6\str@pst@sh}
\def\stripst@sh{\toksa{}\stripst@shi}
@@ -451,7 +821,7 @@
\newif\iftrailingreturn
\def\striptrim{%
- \ifchecktable\errmessage{trimming: \the\toksb}\fi
+ \ifchecktrim\ferrmessage{trimming: \the\toksb}\fi
\edef\next{\the\toksb}%
\expandafter\striptr@m\the\toksb\relax\end
}
@@ -463,10 +833,10 @@
#3% \relax
#4% ?
\end{%
-% \def\next{#4}%
-% \ifx\next\empty
% \toksc{#3#4}\showthe\toksc
- \setbox\z@\vbox{#3#4}%
+%
+ \setbox\z@\vbox{
+ #3#4}%
\ifnum\wd\z@=\z@
\expandafter\trimreturn\the\toksa\end
\toksb{}%
@@ -485,8 +855,8 @@
}
\def\boxstash{%
- \ifchecktrim\errmessage{stash contents: \the\toksa}\fi
- $\vtop{\activateinlinec\tabskip\z@\halign{\strut\ignorespaces##\hfil\cr\the\toksa\crcr}}$}
+ \ifchecktrim\ferrmessage{stash contents: \the\toksa}\fi
+ $\vtop{\activateinlinec\tabskip\z@\halign{\strut\ignorespaces##\hfil\cr\relax\the\toksa\crcr}}$}
\def\makestashbox{\cleanstash\stripstash\boxstash}
@@ -533,12 +903,16 @@
\ifcat\noexpand\next0%
\let\next\pr@dterm
\else
- \let\next\oldmathS
+ \if\noexpand\next[%
+ \let\next\pr@dterm
+ \else
+ \let\next\oldmathS
+ \fi
\fi
\fi
\next
}
-
+
\def\pr@dterm#1{%
\ifx#1\$%
\def\next{\hbox{$\Upsilon$}}%
@@ -550,44 +924,60 @@
\ifnum`#1>`0\relax
\def\next{\seekno#1}%
\else
- \def\next{\hbox{$\Upsilon$}#1}%
+ \def\next{\hbox{$\Upsilon$}#1}% TODO: look for an identifier
\fi
\else
- \def\next{\hbox{$\Upsilon$}#1}%
+ \def\next{\hbox{$\Upsilon$}#1}% TODO: look for an identifier
\fi
\fi
\fi
\next
}%
-\let\oldmathS\$
\let\$\prodterm
+\defreserved\${\prodterm}
\def\seekno{\afterassignment\printterm\tempca}%
\def\seeksym#1]{%
- \hbox{$\Upsilon\kern-1pt{}_{\rm#1}$}}
+ \hbox{$\Upsilon\kern-1pt{}_{\def\\##1{\hbox{\sscmd\prodstyle{##1}}}\rm#1}$}}
+
+\def\seeksym#1]{% a better version of the above
+ \hbox{$\ulcorner\def\\##1{##1}\let\.\\\let\|\\\let\ous\_\let\_\relax
+ \edef\next{#1}\let\_\ous
+ \hbox{\expandafter\prodstyle\expandafter{\next}}\urcorner$}}
\def\printterm{\hbox{$\Upsilon\kern-1pt{}_{\number\tempca}$}}%
+% typesetting examples of \bison\ productions and \flex\ input in text
+
\long\def\setproduction#1{%
\def\termidxrank{5}%
\def\headeridxrank{4}%
\def\defidxrank{3}%
\def\texcsidxrank{5}%
+ \textproductionsetup
+ \hbox{\strut}%
+ \Binputtoks{\lsectionbegin{b}#1\yyeof\yyeof\endparseinput\endparse\postparse}%
+ \the\Binputtoks\par% Stage two, start the parsing
+}
+
+\def\textproductionsetup{%
\ninepoint
\let\returnexplicitspace\splitexplicitspace
\let\acharswitch\texcharadjust
\let\onecharswitch\texcsadjust
+ \let\extractprodtableinfo\empty % we do not preprocess the table
\showlastactionfalse
\let\actionfiller\empty
- \parsevb{#1} % Stage two, start the parsing
- \ifyyparsefail
- {\toks0{#1}\errmessage{failed to parse: \the\toks0}}%
- \else % Stage three, process the parsed table
- \par\hbox{\strut}%
- \typesetalltables
- \fi
+ \let\postparsetext\postparsebproduction
+ \fillpstack{b}{%
+ \preparsebisongrammar
+ \preparsebisonprologue
+ {\preparsefallback{**}}%
+ \relax % this \relax is necessary so that the braces above
+ % are not stripped by \poppstack
+ }%
}
\def\splitexplicitspace{%
@@ -605,7 +995,7 @@
\yybyte{\_}%
\expandafter\yycp@\expandafter`\the\yybyte\relax
\mkpurebyte
- \yyreturn
+ \yyreturn
}
}
@@ -636,8 +1026,9 @@
\long\def\b@ginprod#1\endprod{%
\setproduction{#1}%
+ \expandafter
\endgroup
- \par
+ \expandafter\gaglue\the\gaglue\relax % export the alignment width
}
\def\beginmprod{%
@@ -657,8 +1048,55 @@
\def\begincprod#1\endcprod{{\def\tlskip{0 pt plus1fill}\let\tfskip\tlskip\beginprod#1\endprod}}
-% the next macro assumes that \gindex being defined implies that all the bookkeeping required
-% for maintaining the index of grammar terms has been taken care of
+% flex examples typesetting
+
+\long\def\setflex#1{%
+ \def\fstatedefidxrank{3}%
+ \def\fstateidxrank{4}%
+ \def\fregexidxrank{5}%
+ \textflexsetup
+ \hbox{\strut}%
+ \Binputtoks{\lsectionbegin{fs1}#1\yyeof\yyeof\endparseinput\endparse\postparse}%
+ \the\Binputtoks\par% Stage two, start the parsing, the \par is expected by the \parserreset
+}
+
+\def\textflexsetup{%
+ \ninepoint
+ %\let\returnexplicitspace\splitexplicitspace
+ \let\acharswitch\texcharadjust
+ %\let\onecharswitch\texcsadjust
+ \let\extractprodtableinfo\empty % we do not preprocess the table
+ \let\postparsetext\postparsefsection
+ \fillpstack{fs1}{%
+ \preparseflexone % TODO
+ \preparseflextwo
+ {\preparsefallback{**}}%
+ \relax % this \relax is necessary so that the braces above
+ % are not stripped by \poppstack
+ }%
+}
+
+\def\beginflex{%
+ \par
+ \begingroup
+ \catcode`\^^M=12 %
+ \catcode`\#=12 %
+ \b@ginflex%
+}
+
+\long\def\b@ginflex#1\endflex{%
+ \setflex{#1}%
+ \expandafter
+ \endgroup
+ \expandafter\gaglue\the\gaglue\relax % export the alignment width, TODO: set \gaglue in \flex
+}
+
+
+
+% the following macros assume that \gindex or \xrefstream having been defined implies
+% that all the bookkeeping required for maintaining the custom index and local cross referencing
+% has been taken care of; this way during the bootstrapping mode, index entries or cross references
+% are not generated.
\ifx\gindex\UNDEFINED
\else
@@ -666,6 +1104,10 @@
\immediate\openout\gindex=\jobname.gdx
\fi
+\let\endcprod\endgroup
+\let\endmprod\endgroup
+\let\endprod\endgroup
+
% stringing all the manuals together (disabled for now)
%\newwrite\lastpageinfo
diff --git a/support/splint/tex/yyinput.sty b/support/splint/tex/yyinput.sty
index d424d299d8..0898410c51 100644
--- a/support/splint/tex/yyinput.sty
+++ b/support/splint/tex/yyinput.sty
@@ -1,4 +1,4 @@
-% Copyright 2012-2015, Alexander Shibakov
+% Copyright 2012-2020, Alexander Shibakov
% This file is part of SPLinT
%
% SPLinT is free software: you can redistribute it and/or modify
@@ -14,7 +14,13 @@
% You should have received a copy of the GNU General Public License
% along with SPLinT. If not, see <http://www.gnu.org/licenses/>.
-\def\mkpurebyte{\uccode`\@=\yycp@\uppercase{\yybytepure{@}}\uccode`\@=`\@}
+%\def\mkpurebyte{\uccode`\@=\yycp@\uppercase{\yybytepure{@}}\uccode`\@=`\@}
+
+% make all symbol characters category 12; most macros are indifferent to this
+% however, if delimited macros are used to process the mathched text, the option
+% makes it easier to write such macros;
+
+\def\mkpurebyte{\uccode`\.=\yycp@\uppercase{\yybytepure{.}}\uccode`\.=`\.}
\def\yyinput{\futurelet\next\yyinp@t} % get the code of the next character ...
@@ -57,7 +63,13 @@
\yycontinue
}
+% some \Cee\ escape characters; the rest are either silly (like \a and \b) or
+% are already defined to have other important functions in \TeX\ (such as \v and \t)
+
\chardef\n=`\^^J
+\chardef\r=`\^^M
+\chardef\f=`\^^L
+\chardef\HT=`\^^I % ASCII horizontal tab
\chardef\charseq1
\chardef\charac2
\chardef\chargroup3
@@ -78,7 +90,7 @@
% automatically
\yybyte{#1}%
\ifyyinputdebug
- \immediate\write16{read: \the\yybyte}%
+ \immediate\write16{read: \the\yybyte\space after: \the\yytext@seen}%
\fi
\ifx#1\ % a space token
\yybreak\returnexplicitspace
@@ -135,12 +147,24 @@
}%
\def\multicharswitch{
+ \raw\vb\raw {%
+ \vbunwrap
+ }
+ \raw\insertraw\raw {%
+ \insertrawnext
+ }
\raw\stashed\raw {%
\stashnext
}
\raw\format \formatlocal\raw {%
\formatnext
}
+ \raw\formatbegin\raw {%
+ \fmtbegin
+ }
+ \raw\formatp\raw {%
+ \fmtparam
+ }
\raw\sflush\raw {%
\sflushnext
}
@@ -158,6 +182,9 @@
\raw\fold \breakline\raw {%
\expandafter\yyinput\expandafter\formatlocal\expandafter{\the\yybyte}%
}
+ \raw\breakahead\raw {%
+ \expandafter\yyinput\expandafter\formatp\the\yybyte
+ }
\raw\break\raw {% for testing purposes
\yycp@=`\ %
\yybytepure={ }%
@@ -195,22 +222,64 @@
\setspecialcharsfrom\multicharswitch
\setspecialcharsfrom\acharswitch
+\def\insertrawnext#1{% insert a command
+ #1\yyinput
+}
+
+\def\vbunwrap#1#2\vb{%
+ \yyinput#1\stashed{#2}\vb
+}
+
\chardef\stashchar=`\ %
\chardef\formatchar=`\ %
\newcount\stashmarker
\newcount\formatmarker
-\def\stashnext#1{%
+\def\stashnextwithspace#1{%
\yybytepure{ }\yycp@\stashchar
- \toksa{#1}%
+ \yybyte\expandafter{\the\yybyte{#1}}%
\advance\stashmarker\@ne
- \edef\next{\yysbyte{\noexpand\strm[\the\stashmarker]{\the\yybyte{\the\toksa}}.[\the\stashmarker]}}\next
+ \edef\next{\yysbyte{\noexpand\strm[\the\stashmarker]{\the\yybyte}.[\the\stashmarker]}}\next
% the extra `.' is to avoid the stripping of braces by the parameter scanning mechanism of TeX
- \yybyte\expandafter{\the\yybyte{#1}}%
\yyreturn
}
+% the mechanism for stash processing making stash invisible
+
+\def\stashnextwithnothing#1{%
+ \advance\stashmarker\@ne
+ \yybyte\expandafter{\the\yybyte{#1}}\concat\yysubtext\yybyte
+ \appendr\yysbyte{\noexpand\strm[\the\stashmarker]{\the\yybyte}.[\the\stashmarker]}%
+ \ifyyinputdebug
+ \immediate\write16{stash byte: \the\yysbyte mid text: \the\yysubtext}%
+ \fi
+ % the extra `.' is to avoid the stripping of braces by the parameter scanning mechanism of TeX
+ \yyinput
+}
+
+% while collecting the stash, the macro below packages the stash contents into a command sequence
+% to add to the \yystash stream; the stash is presented to the lexer unpackaged though; this
+% introduces a minor inefficiency while ensuring that exactly one level of packaging is present
+% (otherwise the lexer may back up and repackage already packaged stash); the inefficiency only
+% persists while the lexer scans the current token (and possibly backs up).
+
+\def\stashnextwithnothingnx#1{%
+ \advance\stashmarker\@ne
+ \yypush{#1}\on\astarray
+ \appendr\yysbyte{\noexpand\strm[\the\stashmarker]{\the\yybyte{%
+ \expandafter\noexpand\romannumeral\gettopofstackcsx\astarray}}.[\the\stashmarker]}%
+ \yybyte\expandafter{\the\yybyte{#1}}% do not package the input
+ \concat\yysubtext\yybyte
+ \ifyyinputdebug
+ \immediate\write16{stash byte: \the\yysbyte mid text: \the\yysubtext}%
+ \fi
+ % the extra `.' is to avoid the stripping of braces by the parameter scanning mechanism of TeX
+ \yyinput
+}
+
+\let\stashnext\stashnextwithnothingnx
+
\def\formatnext#1{%
\yybytepure{ }\yycp@\formatchar
\toksa{#1}%
@@ -221,6 +290,9 @@
\yyreturn
}
+\def\fmtbegin#1\fmtend{\formatnext{#1}} % multiparameter format sequences
+\def\fmtparam#1#2{\formatnext{#1{#2}}} % single parameter format sequences
+
\chardef\boundarychar=`\ %
% the following is a minimal setup of a parsing boundary
@@ -237,22 +309,45 @@
\edef\next{\toksc{\the\yystash\the\yystashseen}}\next
\yyfifolastidx\toksc\in\toksc
\toksa{#1}\toksb{#2}%
- \edef\next{\yysbyte{\noexpand\strm[\the\toksa]{\the\yybyte{\the\toksb}{\the\toksc}}.[\the\toksa]}}\next
+ \appendr\yysbyte{\noexpand\strm[\the\toksa]{\the\yybyte{{\nx\it flushing}: ``\the\toksb'',
+ {\nx\it last index}: $[\the\toksc]$. }}.[\the\toksa]}%
% the extra `.' is to avoid the stripping of braces by the parameter scanning mechanism of TeX
\yybyte\expandafter{\the\yybyte{#1}{#2}}%
+ \concat\yysubtext\yybyte
+ \ifyyinputdebug
+ \immediate\write16{stash byte: \the\yysbyte mid text: \the\yysubtext}%
+ \fi
\yyreturn
}
+% the following implementation is marginally cleaner as it does not redefine \yyr@@dfifo
+% it also makes it almost transparent that \yyreadfifo does not modify any token registers
+% except for the two of its parameters
+% TODO: replace \yyr@@dfifo with \yyr@adfifo
+
\def\yyreadfifo#1\to#2\in#3{%
+%{% to reduce the side effects to the redefinition of the input and output
+ % token registers
\def\yyr@adfifo##1\strm[#2]##2[#2]##3\end{%
- \def\yyr@@dfifo{##3}%
- \ifx\yyr@@dfifo\empty
- #3{}% there is no such marker in the fifo
- \else
+ \yystringempty{##3}{#3{}}% there is no such marker in the fifo
+ {%
#3{##1\strm[#2]##2[#2]}%
\def\yyr@@dfifo####1\strm[#2]####2[#2]\end{#1{####1}}% strip off the inserted string
\yyr@@dfifo##3\end
- \fi
+% TODO: \def\yyr@dfifo####1\strm[#2].[#2]\end{#1{####1}}% strip off the inserted string
+% \yyr@dfifo##3\end
+ }%
+ }%
+ \expandafter\yyr@adfifo\the#1\strm[#2].[#2]\end
+%\edef\next{#1{\the#1}#3{\the#3}}\expandafter}\next
+}
+
+\def\yytrimfifo#1\to#2\in#3{% just save the initial segment of #1 in #3
+ \def\yyr@adfifo##1\strm[#2]##2[#2]##3\end{%
+ \yystringempty{##3}{#3{}}% there is no such marker in the fifo
+ {%
+ #3{##1\strm[#2]##2[#2]}%
+ }%
}%
\expandafter\yyr@adfifo\the#1\strm[#2].[#2]\end
}
diff --git a/support/splint/tex/yymisc.sty b/support/splint/tex/yymisc.sty
index fcc38da96d..b961135b02 100644
--- a/support/splint/tex/yymisc.sty
+++ b/support/splint/tex/yymisc.sty
@@ -1,4 +1,4 @@
-% Copyright 2012-2014, Alexander Shibakov
+% Copyright 2012-2020, Alexander Shibakov
% This file is part of SPLinT
%
% SPLinT is free software: you can redistribute it and/or modify
@@ -14,7 +14,7 @@
% You should have received a copy of the GNU General Public License
% along with SPLinT. If not, see <http://www.gnu.org/licenses/>.
-% `data structure access' macros': picking the n-th undelimited parameter
+% `data structure access' macros: picking the n-th undelimited parameter
% in a parameter list inside a token register
% it is assumed that none of the arguments is \end, and that there are enough
% parameters to pick the desired one
@@ -88,6 +88,28 @@
\def\g@ttenth#1#2#3#4#5#6#7#8#9\end{\g@tsecond#9\end}
+% removing the first element of a token register if it has more than one;
+
+\def\sansfirst#1{%
+ \expandafter\s@nsfirst\expandafter{\the#1}{#1}%
+}
+
+\def\s@nsfirst#1#2{%
+ \expandafter\s@nsf@rst\expandafter{\eatone#1}{#2}%
+}
+
+\def\s@nsf@rst#1#2{%
+ \yystringempty{#1}{}{%
+ #2{#1}%
+ }%
+}
+
+% a version of the macro above that expands to the original argument with the first element removed
+
+\def\sansfirstx#1{%
+ \expandafter\yystringempty\expandafter{\eatone#1}{#1}{\eatone#1}%
+}
+
% string replacement: all arguments are registers, nothing is expanded, no \next is defined
% note that this is not a greedy replacement: this could be arranged with a more sophisticated macro
% also note that the string being replaced cannot have any braces in it
@@ -270,6 +292,18 @@
% in the interest of efficiency; the old version (that used a control sequence
% as a `stop marker') was more prone to this bug.
+% the next sequence is not merely a convenient abbreviation; it is useful if one
+% wants to look at an `alternative' value stored for the sequence without restoring
+% it to the curent namespace; it also sets up the naming convention for namespaces
+
+\def\restorecsname#1#2{% get the name of the sequence in storage
+ % note that #2 can be a string beginning with an arbitrary
+ % character as a placeholder for the escape character
+ '#1'[\expandafter\eatone\string#2]%
+}
+
+% save macros listed in #2 in namespace #1
+
\def\savecs#1#2{\s@vecs{#1}#2.}
\def\s@vecs#1#2{%
@@ -277,12 +311,22 @@
\yybreak{}%
\else
\yybreak{%
- \expandafter\let\csname '#1'[\expandafter\eatone\string#2]\endcsname#2%
+ \expandafter\let\csname\restorecsname{#1}{#2}\endcsname#2%
\s@vecs{#1}%
}%
\yycontinue
}
+% similar to the macro above but the list is a control sequence
+
+\def\savecslist#1#2{%
+ \expandafter\s@vecslist\expandafter{#2}{#1}%
+}
+
+\def\s@vecslist#1#2{%
+ \savecs{#2}{#1}%
+}
+
\def\restorecs#1#2{\r@storecs{#1}#2.}
\def\r@storecs#1#2{%
@@ -290,12 +334,20 @@
\yybreak{}%
\else
\yybreak{%
- \expandafter\let\expandafter#2\csname '#1'[\expandafter\eatone\string#2]\endcsname
+ \expandafter\let\expandafter#2\csname\restorecsname{#1}{#2}\endcsname
\r@storecs{#1}%
}%
\yycontinue
}
+\def\restorecslist#1#2{%
+ \expandafter\r@storecslist\expandafter{#2}{#1}%
+}
+
+\def\r@storecslist#1#2{%
+ \restorecs{#2}{#1}%
+}
+
\def\hidecs#1{\h@decs#1.}
\def\h@decs#1{%
@@ -309,51 +361,53 @@
\yycontinue
}
-\def\savehcs#1#2{\savecs{#1}{#2}\hidecs{#2}}
-
-\def\savecslist#1#2{%
- \expandafter\s@vecslist\expandafter{#2}{#1}%
-}
+\def\hidecslist#1{\expandafter\hidecs\expandafter{#1}}
-\def\s@vecslist#1#2{%
- \savecs{#2}{#1}%
-}
+\def\savehcs#1#2{\savecs{#1}{#2}\hidecs{#2}}
-\def\restorecslist#1#2{%
- \expandafter\r@storecslist\expandafter{#2}{#1}%
+\def\savehcslist#1#2{%
+ \expandafter\s@vehcslist\expandafter{#2}{#1}%
}
-\def\r@storecslist#1#2{%
- \restorecs{#2}{#1}%
+\def\s@vehcslist#1#2{%
+ \savehcs{#2}{#1}%
}
% a twist on the macros above: save control sequences with a postfix
-\def\savecsx#1#2{\s@vecsx{#1}#2.}
+\def\savecsx#1#2{\s@vecsx{#1}#2.} % there is no \savecsxlist macro
-\def\s@vecsx#1#2{%
+\def\s@vecsx#1#2{% #1 is the namespace, #2 is a control sequence without the postfix or prefix
\ifx#2.%
\yybreak{}%
\else
\yybreak{%
- \expandafter\s@vecsxlet\expandafter#2\csname\expandafter\defprefix\expandafter\eatone\string#2\defpostfix\endcsname{#1}%
+ \expandafter\s@vecsxlet\csname\expandafter\defprefix\expandafter\eatone\string#2\defpostfix\endcsname#2{#1}%
\s@vecsx{#1}%
}%
\yycontinue
}
-\def\s@vecsxlet#1#2{%
- \expandafter\let\csname '#2'[\expandafter\defprefix\expandafter\eatone\string#1\defpostfix]\endcsname
+\def\s@vecsxlet#1#2#3{% #1 is the control sequence augmented by prefix and postfix,
+ % #2 is the control sequence without prefix or postfix
+ % #3 is the namespace
+ \expandafter\let\expandafter#1\csname\restorecsxname{#3}{#2}\endcsname
}
\def\restorecsx#1#2{\r@storecsx{#1}#2.}
+% see remarks about \restorecsname above
+
+\def\restorecsxname#1#2{%
+ '#1'[\expandafter\defprefix\expandafter\eatone\string#2\defpostfix]%
+}
+
\def\r@storecsx#1#2{%
\ifx#2.%
\yybreak{}%
\else
\yybreak{%
- \expandafter\r@storecsxlet\expandafter#2\csname '#1'[\expandafter\defprefix\expandafter\eatone\string#2\defpostfix]\endcsname
+ \expandafter\r@storecsxlet\expandafter#2\csname\restorecsxname{#1}{#2}\endcsname
\r@storecsx{#1}%
}%
\yycontinue
@@ -401,7 +455,7 @@
% in the appropriate namespace, this macro adds a
% preamble, a postamble and a `this' type macro; this
% will mostly be used with indexing \TeX\ control sequences
- \tokse{\def\thisname{#1}\edef\thisnamex{\expandafter\eatone\string#1}}%
+ \toksf{\def\thisname{#1}\edef\thisnamex{\expandafter\eatone\string#1}}%
\toksa\expandafter{%
\csname\expandafter\defprefix\expandafter\eatone\string#1\defpostfix\endcsname}%
\toksc\expandafter{\expandafter\def\the\toksa#2}%
@@ -410,11 +464,11 @@
}
\def\d@f@{%
- \appendl\toksd{\the\tokse}%
- \tokse\expandafter{\defypreamble}%
- \appendl\toksd{\the\tokse}%
- \tokse\expandafter{\defypostamble}%
- \appendr\toksd{\the\tokse}
+ \tokse{\defypreamble}%
+ \concatl\tokse\toksd
+ \concatl\toksf\toksd
+ \tokse{\defypostamble}%
+ \concat\toksd\tokse
\toksd\expandafter{\expandafter{\the\toksd}}%
\concat\toksc\toksd
\afterassignment\d@fy
@@ -429,12 +483,14 @@
\def\defp#1#2#{% flexible dynamic type checking
\toksa\expandafter\expandafter\expandafter{\yyuniontag#1}%
- \expandafter\edef\yyuniontag{\the\toksa}%
+ \expandafter\edef\yyuniontag{\the\toksa}% add the sequence to the current union
\def#1#2{\errmessage{unexpected type: \string#1 in namespace <\currentyyunionnamespace>}}%
\savecs\parserstrictnamespace#1%
\toksa{#2}%
- \edef#1{\the\toksa}%
+ \edef#1{\the\toksa}% save the prototype
\savecs\parserprototypesnamespace#1%
+ \let#1\relax
+ \savecs\parserdebugnamespace#1% save the debug sequence for outputting the AST
\def#1#2%
}
@@ -478,6 +534,17 @@
{\catcode`\^^M=12 \aftergroup\def\aftergroup\eolletter\aftergroup{\aftergroup^^M\aftergroup}}% end of line, ... not really
{\catcode`\|=0\catcode`\\=12 |aftergroup|def|aftergroup|benignescape|aftergroup{|aftergroup\|aftergroup}}% not an escape
{\catcode`\#=12 \aftergroup\def\aftergroup\hashletter\aftergroup{\aftergroup#\aftergroup}}% not a parameter token
+{\catcode`\~=12 \aftergroup\def\aftergroup\safetilde\aftergroup{\aftergroup~\aftergroup}}% inactive tie
+{\catcode`\$=12 \aftergroup\def\aftergroup\safemath\aftergroup{\aftergroup$\aftergroup}}% not really a start math character
+
+\let\uscore\_ % the canonical underscore for the fonts that do not have the character
+
+% remove the brackets from the namespace string;
+% the parameter is the control sequence that expands to the namespace string
+
+\def\stripbrackets#1{\expandafter\stripbr@ckets#1[]\end}
+
+\def\stripbr@ckets#1[#2]#3\end{\yystringempty{#3}{#1}{#1#2}}
% token name input
@@ -631,10 +698,11 @@
% tables
\def\newtable@full#1{%
- \toksa{\csname newtoks\endcsname}%
- \expandafter\the\expandafter\toksa\csname #1\parsernamespace\endcsname
- \edef\next{\let\csname #1\endcsname\csname #1\parsernamespace\endcsname}\next
- \toksa\expandafter{\next}\concat\pinittoks\toksa
+ \expandafter\expandafter\csname newtoks\endcsname\csname #1\parsernamespace\endcsname
+ \expandafter\expandafter\expandafter\let\expandafter
+ \expandafter\csname #1\endcsname\csname #1\parsernamespace\endcsname
+ \appendr\pinittoks{\let\expandafter\noexpand\csname #1\endcsname
+ \expandafter\noexpand\csname #1\parsernamespace\endcsname}%
\csname #1\parsernamespace\endcsname=%
}
@@ -645,72 +713,61 @@
\def\constset#1#2{%
% a \mathchardef would be nicer but it cannot handle negative numbers
\expandafter\def\csname #1\parsernamespace\endcsname{#2}%
- \toksa\expandafter{\csname #1\endcsname}%
- \toksb\expandafter{\csname #1\parsernamespace\endcsname}%
- \edef\next{\let\the\toksa\the\toksb}\next
- \toksa\expandafter{\next}\concat\pinittoks\toksa
+ \expandafter\expandafter\expandafter\let\expandafter
+ \expandafter\csname #1\endcsname\csname #1\parsernamespace\endcsname
+ \appendr\pinittoks{\let\expandafter\noexpand\csname #1\endcsname
+ \expandafter\noexpand\csname #1\parsernamespace\endcsname}%
}
\def\uconstset#1#2{%
% a \mathchardef for positive constants
\expandafter\mathchardef\csname #1\parsernamespace\endcsname=#2 %
- \toksa\expandafter{\csname #1\endcsname}%
- \toksb\expandafter{\csname #1\parsernamespace\endcsname}%
- \edef\next{\let\the\toksa\the\toksb}\next
- \toksa\expandafter{\next}\concat\pinittoks\toksa
+ \expandafter\expandafter\expandafter\let\expandafter
+ \expandafter\csname #1\endcsname\csname #1\parsernamespace\endcsname
+ \appendr\pinittoks{\let\expandafter\noexpand\csname #1\endcsname
+ \expandafter\noexpand\csname #1\parsernamespace\endcsname}%
}
\def\charset#1#2{%
\expandafter\chardef\csname #1\parsernamespace\endcsname=#2\relax%
- \toksa\expandafter{\csname #1\endcsname}%
- \toksb\expandafter{\csname #1\parsernamespace\endcsname}%
- \edef\next{\let\the\toksa\the\toksb}\next
- \toksa\expandafter{\next}\concat\pinittoks\toksa
-}
-
-% switch macro
-
-\def\stashswitch#1{%
- \toksa\expandafter{\csname #1\endcsname}%
- \toksb\expandafter{\csname #1\parsernamespace\endcsname}%
- \edef\next{\let\the\toksb\the\toksa}\next
- \edef\next{\let\the\toksa\the\toksb}%
- \toksa\expandafter{\next}\concat\pinittoks\toksa
+ \expandafter\expandafter\expandafter\let\expandafter
+ \expandafter\csname #1\endcsname\csname #1\parsernamespace\endcsname
+ \appendr\pinittoks{\let\expandafter\noexpand\csname #1\endcsname
+ \expandafter\noexpand\csname #1\parsernamespace\endcsname}%
}
% parser and lexer state control
-\def\settokreg#1{%
- \toksa{\csname newtoks\endcsname}%
- \expandafter\the\expandafter\toksa\csname #1\parsernamespace\endcsname
- \edef\next{\let\csname #1\endcsname\csname #1\parsernamespace\endcsname}\next
- \toksa\expandafter{\next}\concat\pinittoks\toksa
+\def\settokreg#1{% do not create individual token registers for each parser namespace
+ \expandafter\ifx\csname #1\endcsname\relax
+ \expandafter\expandafter\csname newtoks\endcsname\csname #1\endcsname
+ \fi
}
-\def\setcntreg#1{%
- \toksa{\csname newcount\endcsname}%
- \expandafter\the\expandafter\toksa\csname #1\parsernamespace\endcsname
- \edef\next{\let\csname #1\endcsname\csname #1\parsernamespace\endcsname}\next
- \toksa\expandafter{\next}\concat\pinittoks\toksa
+\def\setcntreg#1{% do not create individual count registers for each parser namespace
+ \expandafter\ifx\csname #1\endcsname\relax
+ \expandafter\expandafter\csname newcount\endcsname\csname #1\endcsname
+ \fi
}
\def\setnulstack#1{%
- \toksa\expandafter{\csname #1\endcsname}%
- \toksb\expandafter{\csname #1\parsernamespace\endcsname}%
- \edef\next{\let\the\toksb\noexpand\empty}\next
- \edef\next{\let\the\toksa\the\toksb}\next
- \toksa\expandafter{\next}\concat\pinittoks\toksa
- \setcntreg{#1<count>}% this is only needed for the accelerated stack
+ \expandafter\let\csname #1\parsernamespace\endcsname\empty % for the unoptimized stack
+ \appendr\pinittoks{\let\expandafter\noexpand\csname #1\endcsname
+ \expandafter\noexpand\csname #1\parsernamespace\endcsname}%
+ \setcntreg{#1<count>}% this is only (as well as the only action that is) needed for the accelerated stack
}
\def\setcurrentcs#1{%
- \toksa\expandafter{\csname #1\endcsname}%
- \toksb\expandafter{\csname #1\parsernamespace\endcsname}%
- \edef\next{\let\the\toksb\the\toksa}\next
- \edef\next{\toksa{\let\the\toksa\the\toksb}}\next
- \concat\pinittoks\toksa
+ \expandafter\expandafter\expandafter\let\expandafter
+ \expandafter\csname #1\parsernamespace\endcsname\csname #1\endcsname
+ \appendr\pinittoks{\let\expandafter\noexpand\csname #1\endcsname
+ \expandafter\noexpand\csname #1\parsernamespace\endcsname}%
}
+% switch macro
+
+\let\stashswitch\setcurrentcs
+
\def\newparserstate{%
\setcntreg{yytoken}%
\setcntreg{yystate}%
@@ -721,14 +778,18 @@
% \yyval and \yylval will be token registers
\settokreg{yyval}%
\settokreg{yylval}%
+ \setnulstack{astarray}% an array of AST nodes
\setnulstack{yyssa}%
\setnulstack{yyvsa}%
}
\def\newlexerstate{%
+ \settokreg{yytext@}% `dirty' buffer (contains stash and formatting)
+ \settokreg{yytext@seen}% read `dirty' buffer
\settokreg{yytext}%
\settokreg{yytextseen}%
\settokreg{yybyte}%
+ \settokreg{yysubtext}% stash between characters
\settokreg{yyfutureyytext}% token register used to save read text in eob macros
%
\settokreg{yytextpure}% % the registers that serve the same role
@@ -760,32 +821,133 @@
%
\setcntreg{yyfmark}% % the last marker in the current token
\setcntreg{yyfmarklast}%
+ \setcntreg{yyfmark@accept}%
\setcntreg{yysmark}%
\setcntreg{yysmarklast}%
+ \setcntreg{yysmark@accept}%
\setnulstack{yystatestack}%
\setcurrentcs{setflexstates}%
\setcurrentcs{ifyytextbackup}% this is purely to record the recovery command in \pinittoks
}
-\def\savestatelist#1{% this elaborate definition is needed to ensure that `throwaway token registers' like \toksa
- % survive the namespace switch
- \edef\next{\expandafter\expandafter\expandafter
- \let\expandafter\expandafter\expandafter\noexpand\expandafter\expandafter\csname #1\parsernamespace\endcsname
- \expandafter\noexpand\csname #1\endcsname}\next
-}
-
-\def\savefullstate{%
- \savestatelist{yyssa}%
- \savestatelist{yyvsa}%
- \savestatelist{yystatestack}%
- \savestatelist{ifyytextbackup}% buffer
-}
-
% use \pinittoks to compose a `parser restore' macro along with
% \let\yyvsa\yyvsa[parser namespace] and \let\yyssa\yyssa[parser namespace];
% `parser save' macro only has to set up \yy?sa[parser namespace]'s;
-% if a fully reentrant parser is required, use the macros above to save the contents of
-% all the variables by redefining \settokreg and \setcntreg and saving \yy?sa stacks
+%
+% this mechanism creates parsers that (barely) survive namespace switches (such as when a local
+% parser is used to extract information about the tokens) and does not result in a reentrant parser;
+% if a fully reentrant parser is required, either use the macros above to save the contents of
+% all the variables by redefining \settokreg and \setcntreg and saving \yy?sa stacks or, better still,
+% take advantage of the grouping mechanism provided by \TeX; note that the grouping approach seems
+% to be the only viable solution if the stack mechanism is `optimized'
+
+% optimization options
+
+\def\optimizeall{%
+ % lexer
+ \ifnum\optimization>\z@
+ \optimize{yynxt}%
+ \optimize{yyaccept}%
+ \optimize{yydef}%
+ \optimize{yychk}%
+ \optimize{yybase}%
+ \optimize{yyec}%
+ \optimize{yymeta}%
+ \tracingstats=\@ne
+ \fi
+ % parser
+ \ifnum\optimization>\@ne
+ \optimize{yytranslate}%
+ \optimize{yyrone}%
+ \optimize{yyrtwo}%
+ \optimize{yyrthree}%
+ \optimize{yydefact}%
+ \optimize{yydefgoto}%
+ \optimize{yypact}%
+ \optimize{yypgoto}%
+ \optimize{yytable}%
+ \optimize{yycheck}%
+ \optimize{yyprhs}%
+ \optimize{yyrhs}%
+ \optimize{yytoknum}%
+ \optimize{yystos}%
+ \optimizetext{yytname}%
+ \fi
+}
+
+% parser and lexer initialization, token prettyfying
+
+\def\genericparser name: #1, ptables: #2, ltables: #3, tokens: #4, asetup: #5, dsetup: #6, rsetup: #7, optimization: #8;{%
+ % parser initialization
+ %
+ \expandafter\def\csname #1namespace\endcsname{[#1]}%
+ \savecs{local-namespace}\parsernamespace
+ \expandafter\let\expandafter\parsernamespace\csname #1namespace\endcsname
+ \pinittoks{}%
+ \input #2 % load main parser table
+ \settokens % set the values of all tokens
+ \yystringempty{#4}{}{%
+ \input #4 % use token equivalence table to set the values of non-string tokens
+ }%
+ #5% (a)dditional setups
+ %
+ \input #3 % load lexer tables
+ %
+ % at this point the macros inside the table files (\newtable, \constset,
+ % \yybigswitch, \stashswitch, \addname, \yydoactionswitch, \setflexstates,
+ % \stateset, \tokeneq) have set up the corresponding stuctures in
+ % the `parser namespace' (e.g. if the parser namespace is `main',
+ % \newtable{yyaccept} created a token register \yyaccept[main]),
+ % assigned the `generic' names to them (to continue
+ % the example above, \newtable does \let\yyaccept\yyaccept[main]) and
+ % recorded the corresponding commands in \pinittoks for future use.
+ %
+ % lexer state macros are namespace specific (just like token names)
+ % so they have to be set in each namespace.
+ %
+ \setflexstates
+ #8% possible optimization
+ %
+ % finally, we add the definitions for the variables used in running
+ % the lexer and the parser.
+ \newparserstate
+ \newlexerstate
+ #6% additional (d)ata setup (say, \newlexerstateextra)
+ %
+ % we record all the commands necessary to switch to the desired namespace
+ % in a convenient macro
+ {%
+ \toks0{#7}% additional setup before switching namespaces
+ \edef\next{%
+ \the\toks0 % additional namespace setups
+ \let\noexpand\parsernamespace\expandafter\noexpand\csname #1namespace\endcsname
+ \the\pinittoks % restore all the tables, tokens and constants, and stacks
+ \let\noexpand\getcurrentparser\expandafter\noexpand\csname to#1parser\endcsname
+ }%
+ \toks0\expandafter{\next}%
+ \edef\next{\toks0{\def\expandafter\noexpand\csname to#1parser\endcsname{\the\toks0}}}\next
+ \expandafter
+ }\the\toks0
+ \restorecs{local-namespace}\parsernamespace
+}
+
+\def\genericprettytokens namespace: #1, tokens: #2, correction: #3, host: #4;{%
+ \savecs{local-namespace}{\parsernamespace\tokeneq}%
+ \yystringempty{#2}{}{%
+ \expandafter\let\expandafter\parsernamespace\csname #1namespace\endcsname
+ \def\tokeneq##1##2{\prettytoken{##1}}%
+ \let\tokenpp\prettytoken
+ \input #2 % /* re-use token equivalence table to set the typesetting of tokens */
+ }%
+ \yystringempty{#3}{}{%
+ \expandafter\let\expandafter\parsernamespace\csname #1namespace\endcsname
+ \input #3 % input customized typesetting rules for tokens
+ }%
+ \yystringempty{#4}{}{%
+ \expandafter\let\expandafter\hostparsernamespace\csname #4namespace\endcsname
+ }%
+ \restorecs{local-namespace}{\parsernamespace\tokeneq}%
+}
% switch and dfa macros
@@ -1186,7 +1348,7 @@
\def\setuprefs#1#2{%
\expandafter\let\csname$[#1]\endcsname.% $o the \ifx ... \relax test makes sense ...
\appendr\nameflagtoks{\noexpand\unsetsymname{#1}}% clean it up later
- \toksa{}\expandafter\charstonumbers#1\end
+ \toksa{}\expandafter\charstonumbers#1\end
\edef\next{\toksb{\space\space\noexpand\setsym{\the\toksa}{#2}\hc^^J\uu\uu\hc\hc\hc\uu#1 --> #2^^J}}\next
\edef\next{\toksc{\space\space\noexpand\unsetsym{\the\toksa}\hc^^J\uu\uu\hc\hc\hc\uu#1 --> \relax^^J}}\next
\concat\setsncommands\toksb
@@ -1420,3 +1582,8 @@
\setsymcs{#1}{#2}%
\setsymtr{#1}{#2}%
}
+
+% message output
+
+\def\ferrmessage#1{{\newlinechar`\^^J\immediate\write16{\parsernamespace::#1^^J}}}
+
diff --git a/support/splint/tex/yynested.sty b/support/splint/tex/yynested.sty
index db725abfc5..2a47c79593 100644
--- a/support/splint/tex/yynested.sty
+++ b/support/splint/tex/yynested.sty
@@ -1,4 +1,4 @@
-% Copyright 2012-2015, Alexander Shibakov
+% Copyright 2012-2020, Alexander Shibakov
% This file is part of SPLinT
%
% SPLinT is free software: you can redistribute it and/or modify
@@ -24,21 +24,12 @@
\boundaryupper-\@M
\boundarylower-\@M
-\expandafter\def\expandafter\parserdatainit\expandafter{%
- \parserdatainit
+\expandafter\def\expandafter\bisonparserdatainit\expandafter{%
+ \bisonparserdatainit
\boundaryupper-\@M
\boundarylower-\@M
}
-% the next sequence is just a reminder of what would have to be done if
-% reentrancy is required; the current state saving mechanism is intended for
-% bare unoptimized parsers exclusively and does not handle any constant saving
-
-\expandafter\def\expandafter\savefullstateextra\expandafter{%
- \savefullstateextra
- % save \boundaryupper and \boundarylower
-}
-
\def\inputboundarynext#1{%
\yybytepure{ }\yycp@\boundarychar
#1\getstackpointer\yyvsa\relax
diff --git a/support/splint/tex/yyparse.sty b/support/splint/tex/yyparse.sty
index ce5ab8b5e3..f6c6dadff8 100644
--- a/support/splint/tex/yyparse.sty
+++ b/support/splint/tex/yyparse.sty
@@ -1,4 +1,4 @@
-% Copyright 2012-2015, Alexander Shibakov
+% Copyright 2012-2020, Alexander Shibakov
% This file is part of SPLinT
%
% SPLinT is free software: you can redistribute it and/or modify
@@ -37,14 +37,14 @@
\yypushr\yystate\on\yyssa
{%
\iftraceparserstates
- \derrmessage{^^Jstate \the\yystate\iftracestacks, \fi}%
+ \ferrmessage{^^Jstate \the\yystate\iftracestacks, \fi}%
\fi
\iftracestacks
\iftraceparserstates
\else
- \derrmessage{^^J}%
+ \ferrmessage{^^J}%
\fi
- \showstack\yyssa\derrmessage{state stack: \stackcs^^J}%
+ \showstack\yyssa\ferrmessage{state stack: \stackcs^^J}%
\fi
}%
\yybreak\yybackup
@@ -117,7 +117,7 @@
\yybreak@@@\yynewstate % new state will be shifted
\else % ...if (yyn < 0)
\iffalse % if (yytable_value_is_error(yyn))
- % \ifnum\yyn=\YYTABLEERROR\relax
+ % /* #define yytable_value_is_error(Yytable_value) YYID (0) */
\yybreak@@@@\yyerr
\else
\yyn=-\yyn
@@ -137,7 +137,7 @@
\yyn=\fgetelemof{yydefact}\at\yystate\relax
% yyn = yydefact[yystate]
\iftraceactions
- \derrmessage{default action^^J}%
+ \ferrmessage{default action^^J}%
\fi
\ifnum\yyn=\z@
\xskiptofi\yyerr
@@ -149,16 +149,16 @@
\def\yyreduce{%
\yylen=\fgetelemof{yyrtwo}\at\yyn\relax
\iftracerules
- \derrmessage{^^Jreducing (rule \the\yyn)}\printrule{\yyn}%
+ \ferrmessage{^^Jreducing (rule \the\yyn)}\printrule{\yyn}%
\fi
{%
\iftracestacks
\iftracerules
\else
- \derrmessage{^^J}%
+ \ferrmessage{^^J}%
\fi
\showstack\yyssa
- \derrmessage{stack: \stackcs^^Jpopping \the\yylen^^J}%
+ \ferrmessage{stack: \stackcs^^Jpopping \the\yylen^^J}%
\fi
}%
\yyilen=\fgetelemof{yyrthree}\at\yyn\relax
@@ -218,14 +218,14 @@
\def\yypaccept{%
\iftraceparseresults
- \derrmessage{accepted^^J}%
+ \ferrmessage{accepted^^J}%
\fi
\finishparse
}
\def\yyerr{%
\iftraceparseresults
- \derrmessage{(parse) error^^J}%
+ \ferrmessage{(parse) error^^J}%
\fi
\cleanupparse
}
@@ -235,10 +235,12 @@
\let\yysymswitch\eatone
\let\yysymcleanup\eatone
+% parser specific output message: defines the current token name to output
+
\def\derrmessage#1{{%
\newlinechar=`\^^J%
\edef\tokname{\fgetelemof{yytname}\at\yytoken}%
- \message{#1}%
+ \ferrmessage{#1}%
}}
% these macros reuse dedicated counters ... locally
@@ -256,7 +258,7 @@
\else
\edef\ruleline{\ruleline\space<empty>}%
\fi
- \message{ -->\ruleline}%
+ \ferrmessage{ -->\ruleline}%
}}
% print the rule without using yyprhs and yyrhs (necessary if using
@@ -269,7 +271,7 @@
\else
\edef\ruleline{\ruleline\space<empty>}%
\fi
- \message{ -->\ruleline}%
+ \ferrmessage{ -->\ruleline}%
}}
\def\appendtoruleline#1{%
@@ -293,7 +295,7 @@
\long\def\cleanupparse#1\endparse{%
\iftracediscardedinput
- \immediate\write16{discarding the rest of the input}%
+ {\toksa{#1}\immediate\write16{discarding the rest of the input: \the\toksa}}%
\fi
\yyerror
}
@@ -317,3 +319,39 @@
%
\yysymcleanup{\yyn}% removing symbol names from the namespace
}
+
+% common parser initializations; note that some initializations (such as resetting the data
+% and state stacks) are done at parser startup (see the definition of \yyparse)
+
+\def\basicparserinit{%
+ \yytext{}%
+ \yysubtext{}%
+ \yytext@{}%
+ \yytextpure{}%
+ \yytextseenpure{}%
+ \yytextseen{}%
+ \yytext@seen{}%
+ \yybyte{}%
+ \yyfbyte{}\yysbyte{}%
+ \yystash{}%
+ \yystashseen{}%
+ \yyformat{}%
+ \yyformatseen{}%
+ \yyfutureyytext{}%
+ \yyinitstack\astarray % flattened AST to (possibly) speed up the parser
+ \yyinitstack\yystatestack
+ \yyfmark=\z@
+ \yysmark=\z@
+ \yyfmarklast=\z@
+ \yyfmark@accept=\z@
+ \formatmarker=\z@
+ \yysmarklast=\z@
+ \yysmark@accept=\z@
+ \stashmarker=\z@
+ \yytextbackupfalse
+ \yyg@yyinit=\z@
+ \yyg@yystart=\z@
+ \YYATBOL=\@ne
+ \yyparsefailfalse
+ \YYEOBLASTMATCHtrue
+}
diff --git a/support/splint/tex/yypretty.sty b/support/splint/tex/yypretty.sty
new file mode 100644
index 0000000000..aeb1179b32
--- /dev/null
+++ b/support/splint/tex/yypretty.sty
@@ -0,0 +1,25 @@
+% Copyright 2012-2020, Alexander Shibakov
+% Copyright 2002-2014 Free Software Foundation, Inc.
+% This file is part of SPLinT
+%
+% SPLinT is free software: you can redistribute it and/or modify
+% it under the terms of the GNU General Public License as published by
+% the Free Software Foundation, either version 3 of the License, or
+% (at your option) any later version.
+%
+% SPLinT is distributed in the hope that it will be useful,
+% but WITHOUT ANY WARRANTY; without even the implied warranty of
+% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+% GNU General Public License for more details.
+%
+% You should have received a copy of the GNU General Public License
+% along with SPLinT. If not, see <http://www.gnu.org/licenses/>.
+
+% prettified token output for the common parts of \bison grammars;
+% for these to be properly displayed, the name parser must produce
+% the tokens in an appropriate namespace; they should also not be
+% \bison\ names (otherwise \TeX\ will complain about `... name ...
+% not exceptional ...')
+
+\prettywordpair{emptyrhs&}{$\circ$ {\rm(empty rhs)}}%
+\prettywordpair{inline_action&}{$\diamond$ {\rm(inline action)}}%
diff --git a/support/splint/tex/yystype.sty b/support/splint/tex/yystype.sty
index e37b298590..8e4660fb6e 100644
--- a/support/splint/tex/yystype.sty
+++ b/support/splint/tex/yystype.sty
@@ -1,4 +1,4 @@
-% Copyright 2012-2014, Alexander Shibakov
+% Copyright 2012-2020, Alexander Shibakov
% This file is part of SPLinT
%
% SPLinT is free software: you can redistribute it and/or modify
@@ -23,15 +23,10 @@
\setnulstack{obstackforstringraw}%
}
-\def\savefullstateextra{%
- \savestatelist{obstackforstring}%
- \savestatelist{obstackforstringraw}%
-}
-
% lexer environment
-\def\yycomplain#1{\immediate\write16{#1}} % lexer errors
-\def\yypdeprecated#1{\errmessage{option: #1 is ignored}} % outdated options
+\def\yycomplain#1{\ferrmessage{flex: #1}} % lexer errors
+\def\yypdeprecated#1{\yywarn{option: #1 is ignored}} % outdated options
\def\STRINGGROW{%
\concat\currentlaststring\yytextpure
@@ -63,3 +58,104 @@
\newtoks\laststringraw
\newtoks\currentlaststring
\newtoks\currentlaststringraw
+
+% common \bison parser initialization; the first macro initializes the lexer structures only
+% while the second one deals exclusively with the data produced by the parser; logically, in a `real'
+% \bison\ parser, the second set of initializations is applied to the |..._extra| local
+% data for the parser (for a reentrant yyparse()); we put it here for convenience.
+
+\def\bisonparserinit{%
+ \yyinitstack\obstackforstring
+ \yyinitstack\obstackforstringraw
+ \percentpercentcount=\z@
+ \lonesting=\z@
+ \laststring{}\laststringraw{}%
+ \currentlaststring{}\currentlaststringraw{}%
+}
+
+\def\bisonparserdatainit{%
+ \table{}\typestable{}\prectable{}\opttable{}%
+}
+
+% macros for \flex\ lexer
+
+\newif\iffldidadef
+\newif\ifflindented@code
+\newif\iffloption@sense
+\newif\iffllex@compat
+\newif\ifflposix@compat
+\newif\ifflin@rule
+\newif\iffldoing@rule@action
+\newif\ifflcontinued@action
+\newif\ifflend@is@ws
+\newif\ifflsf@skip@ws
+\newif\ifflsf@case@ins
+\newif\ifflsf@dot@all
+
+\newcount\fllinenum
+\def\flinc@linenum{\advance\fllinenum\@ne}
+\def\flinc#1{%
+ \expandafter\fl@nc\expandafter{\number#1}{#1}%
+}
+\def\fl@nc#1#2{%
+ \edef#2{\xincrement{#1}}%
+}
+
+\def\fldec#1{%
+ \expandafter\fld@c\expandafter{\number#1}{#1}%
+}
+\def\fld@c#1#2{%
+ \edef#2{\xdecrement{#1}}%
+}
+
+\def\yylessafter#1{}% put back the string starting at (after) #1
+\def\flsf@push{}% push the next parenthesis level
+\def\flsf@pop{}% pop the nesting level
+
+\def\RETURNNAME{\yylexreturnsym{NAME}}
+\def\RETURNCHAR{\yylexreturnsym{CHAR}}
+\def\yyflexoptreturn#1{%
+ \edef\next{\yylval{{\iffloption@sense\else no\fi}{\the\yytextpure}{\the\yyfmark}{\the\yysmark}}}\next
+ \yylexreturnregular{#1}%
+}
+
+% macros to process special character classes
+
+\def\makecclpair#1#2{%
+ \expandafter\def\csname xccl@#1\endcsname{\langle#2\rangle}%
+ \mak@cclpair#1.{#2}%
+}
+
+\def\mak@cclpair CCE_#1.#2{%
+ \expandafter\def\csname xccl@CCE_NEG_#1\endcsname{\langle^\neg#2\rangle}%
+}
+
+\makecclpair{CCE_ALPHA}{\alpha\beta}
+\makecclpair{CCE_ALNUM}{\alpha n}
+\makecclpair{CCE_BLANK}{\hbox{ }}
+\makecclpair{CCE_GRAPH}{\hbox{\dingssmall\leaf}}
+\makecclpair{CCE_DIGIT}{\hbox{\.{0..9}}}
+\makecclpair{CCE_XDIGIT}{\hbox{\.{0..Z}}}
+\makecclpair{CCE_SPACE}{\hbox{\.{\ }}}
+\makecclpair{CCE_LOWER}{\hbox{\.{a..z}}}
+\makecclpair{CCE_UPPER}{\hbox{\.{A..Z}}}
+\makecclpair{CCE_PRINT}{\hbox{\dingssmall\pen}}
+\makecclpair{CCE_PUNCT}{\hbox{\.{.}}}
+\makecclpair{CCE_CNTRL}{\mapsto}
+
+\def\xcclreturn#1{%
+ \yyBEGIN{CCL}%
+ \yytextpure{#1}% substitute the token name for the actual text
+ % this is slightly dangerous, because of the `_' in the name
+ % for this to work, ftokenset.sty must be used
+ \yylexreturnsym{#1}%
+}
+
+\chardef\flquotechar`\"
+
+\def\fllexsetup{%
+ \def\flbrace@depth{0}%
+ \def\flsectnum{0}%
+ \def\flnmstr{{}{}}% recorded name
+ \def\flnmdef{{}{}}% recorded definition
+}
diff --git a/support/splint/tex/yytexlex.sty b/support/splint/tex/yytexlex.sty
index a7e7dc3250..7c149108d3 100644
--- a/support/splint/tex/yytexlex.sty
+++ b/support/splint/tex/yytexlex.sty
@@ -1,4 +1,4 @@
-% Copyright 2012-2015, Alexander Shibakov
+% Copyright 2012-2020, Alexander Shibakov
% This file is part of SPLinT
%
% SPLinT is free software: you can redistribute it and/or modify
@@ -40,7 +40,7 @@
\appendyybyte}
=
{%
- \appendr\texlinetoks{{}${}={}${}}%
+ \appendrnx\texlinetoks{{}${}={}${}}%
\yygetchar
}
\{
@@ -62,12 +62,12 @@
\else
\def\next{^^A}%
\fi
- \action\next\in\texdefaultstate
+ \switchon\next\in\texdefaultstate
}
^^A % \yycp@==1
{%
\edef\next{\noexpand\noexpand\the\yybyte}%
- \action\next\in\texdefaultstate}
+ \switchon\next\in\texdefaultstate}
\raw\hbox\raw
{%
\appendnext}
@@ -151,14 +151,14 @@
\yycp@=`\\%
\putother\yycp@\in\yybyte
\def\next{escape}%
- \action\next\in\currentstate}
+ \switchon\next\in\currentstate}
}
\def\texescdefault{%
\let\currentstate\esccurrentstate
\yycp@=`\ %
\yybyte{\ }%
- \action\yycp@\in\currentstate
+ \caction\yycp@\in\currentstate
}
\def\getcescape{%
@@ -189,37 +189,42 @@
\fi
\fi
\fi
- \action\next\in\currentstate
+ \switchonwithtype\next\in\currentstate
}
\def\outputtexcs{%
- \expandafter\ifx\csname\the\yytext\defTpostfix\endcsname\relax
- \putother{`\\}\in\toksa
+ \expandafter\ifx\csname\defprefix\the\yytext\defpostfix\endcsname\relax
+ %\putother{`\\}\in\toksa
+ \toksa{\hbox{\sixpoint\tt\char`\\}}%
\concat\texlinetoks\toksa
\concat\texlinetoks\yytext
- \appendr\texlinetoks{\noexpand\hbox{$\noexpand\,$}}%
+ \appendrnx\texlinetoks{\hbox{$\,$}}%
+ \toksb{}% no visual key
\else
- \appendr\texlinetoks{\expandafter\expandafter\expandafter\noexpand\expandafter\csname\the\yytext xTeXmode\endcsname}%
+ \appendr\texlinetoks{\expandafter\noexpand\csname\defprefix\the\yytext\defpostfix\endcsname}%
+ \expandafter\ifx\csname\restorecsxname{index:visual}{.\the\yytext}\endcsname\relax
+ \toksb{}% no visual key
+ \else
+ \toksb\expandafter\expandafter\expandafter{\csname\restorecsxname{index:visual}{.\the\yytext}\endcsname}%
+ \fi
\fi
- \appendr\texidxtoks{%
- \termindex{{\nx\nx\nx\texcsstring{\expandafter\charstonumberse\the\yytext\end}}{\texnspace}{\nx\the\pageno}{\secno}}%
- }%
+ \appendr\texidxtoks{\gidxentryxb{\texcsstring}{\the\yytext}{\the\toksb}}%
}
-% the `calling conventions' for indexing \TeX\ control sequences are too different to use canned indexing macros
-% (the second parameter is a register so it has to be expanded before the \charstonumberse processes it)
-% we therefore invoke the macros by hand
+% indexing of \TeX\ control sequences
\def\texidxdomain{T}
\def\texcsidxrank{2}
+\def\texcstxtidxrank{5}
-\def\writetexidxentry#1{\iftermindex\write\gindex{\nx\nx\expandafter\nx\csname\texidxdomain TI\endcsname\texcsidxrank#1}\fi}
+\def\writetexidxentry#1{\indxe\gindex{{\secno}{{}{\texispace}}{\texidxdomain}{\texcsidxrank}#1}}
+\def\writetextxtidxentry#1{\indxe\gindex{{\secno}{{}{\texispace}}{\texidxdomain}{\texcstxtidxrank}#1}}
\def\appendyybyte{\concat\texlinetoks\yybyte\yygetchar}
\def\appendnext#1{\toksa{#1}\concat\texlinetoks\toksa\yygetchar}
-\def\defTpostfix{xTeXmode}
+\def\defTpostfix{[xTeXmode]}
\def\stripyybyte{%
\expandafter\stripyyb@te\the\yybyte
@@ -239,57 +244,69 @@
\newif\iftracetexpp
\newtoks\textoks
+% the most common way for a \TeX? macro to appear in in the input is being inserted by
+% \CWEB\ itself; in this case \CWEB\ makes sure that the macro is expanded in math mode;
+% this assumption is relied upon in the design of these macros, and violating it may
+% result in sume rather puzzling error messages, resulting from the insertion of
+% \ignorespaces outside of the current group; the warning macro below makes this
+% dependence explicit.
+
+\def\TeXxwarn{%
+ \relax
+ \ifmmode
+ \else
+ \errhelp{Check stash collecting macros.}%
+ \errmessage{\nx\\TeXx macro is used outside of math mode.}%
+ \fi
+}
+
\def\TeXx(#1)#2;{% TODO
- \let\oldttdot\.\relaxcweb
- \textoks{}\let\.\dotcollect
+ \TeXxwarn
+ {}$\let\oldttdot\.\relaxcweb
+ \let\.\dotcollect
+ \textoks{}%
#1%
\restorecweb\let\.\oldttdot
- \expandafter\T@Xx\the\textoks}
+ \expandafter\T@Xx\the\textoks
+ {}${}\aftergroup\ignorespaces}
\def\mypar{\par}
\def\TeXb(#1)#2;{% TeX material begin
- {}$%
+ \TeXxwarn
+ {}$\let\oldttdot\.\relaxcweb
+ \toksa{}\let\.\dotcollectb
+ \textoks{}%
\let\oldsix\6%
\let\6\ignorespaces
\let\oldC\C
\let\C\saveCcomments
- \let\oldttdot\.\relaxcweb
- \textoks{}\toksa{}\let\.\dotcollectb
#1%
\restorecweb\let\.\oldttdot
\dotcollectstripquotes
- ${}}
+ ${}\aftergroup\ignorespaces}
\def\TeXa(#1)#2;{% TeX material add
- {}$%
- \let\oldttdot\.\relaxcweb
+ \TeXxwarn
+ {}$\let\oldttdot\.\relaxcweb
\toksa{}\let\.\dotcollectb
#1%
\restorecweb\let\.\oldttdot
\dotcollectstripquotes
- ${}}
+ ${}\aftergroup\ignorespaces}
\def\TeXf(#1)#2;{% TeX material add
- {}$%
- \let\oldttdot\.\relaxcweb
+ \TeXxwarn
+ {}$\let\oldttdot\.\relaxcweb
\toksa{}\let\.\dotcollectb
\textoks\expandafter{\the\textoks\hbox{\6}}%
#1%
\restorecweb\let\.\oldttdot
\dotcollectstripquotes
- ${}}
-
-\def\TeXo(#1)#2;{% TeX material output
- \let\oldttdot\.\relaxcweb
- \let\.\dotcollect
- \let\6\oldsix
- \let\C\oldC
- #1%
- \restorecweb\let\.\oldttdot
- $\expandafter\T@Xx\expandafter"\the\textoks"$}
+ ${}\aftergroup\ignorespaces}
\def\TeXao(#1)#2;{% TeX material output
+ \TeXxwarn
{}$\let\oldttdot\.\relaxcweb
\toksa{}\let\.\dotcollectb
\let\6\oldsix
@@ -297,9 +314,11 @@
#1%
\restorecweb\let\.\oldttdot
\dotcollectstripquotes
- $\expandafter\T@Xx\expandafter"\the\textoks"${}${}}
+ \expandafter\T@Xx\expandafter
+ "\the\textoks"{}${}\aftergroup\ignorespaces}
\def\TeXfo(#1)#2;{% TeX material output
+ \TeXxwarn
{}$\let\oldttdot\.\relaxcweb
\toksa{}\let\.\dotcollectb
\textoks\expandafter{\the\textoks\hbox{\6}}%
@@ -308,11 +327,14 @@
#1%
\restorecweb\let\.\oldttdot
\dotcollectstripquotes
- $\expandafter\T@Xx\expandafter"\the\textoks"${}${}}
+ \expandafter\T@Xx\expandafter
+ "\the\textoks"{}${}\aftergroup\ignorespaces}
\let\TeXxi\TeXx
-\def\inlineTeXx#1{$\TeXxi(\.{"#1"});$} % for indexing macros
-
+\def\inlineTeXx#1{$\let\writetexidxentry\writetextxtidxentry\TeXxi(\.{"#1"});$} % for indexing macros
+\def\TeXlit{\iffalse{\fi}{\setbox0\lastbox}\removewhitespace
+ \expandafter\.\expandafter{\iffalse}\fi} % to help with \CWEB's @t...@> cleanup
+ % e.g.\ |TeXao(@t\TeXlit"\hbox{\TeX\ stuff}"@>);|
\def\dotcollect#1{\toksa{#1}\concat\textoks\toksa}
\def\dotcollectb#1{\toksb{#1}\concat\toksa\toksb}
\def\dotcollectstripquotes{\expandafter\d@tcollectstripquotes\the\toksa}
@@ -322,7 +344,6 @@
\def\restorecweb{\restorecs{local-namespace}{\)}}
\def\T@Xx"#1"{%
- {}${}%$
\iffalse{\fi % alignment!
\begingroup
% tune up the standard input routines
@@ -336,17 +357,17 @@
\texlinetoks{}\texidxtoks{}\bbalance\z@
\let\bbal\bbalempty
\yytextbackupfalse
+ \let\defpostfix\defTpostfix\let\defprefix\empty
\restorecsxlist\texnspace\alltexsymbols
\let\termindex\writetexidxentry
\yygetchar#1\end
- \toksc{#1}%
\iftracetexpp{\newlinechar=`^^J%
- \errmessage{TeX_ input: \the\toksc^^JTeX_ first pass: \the\texlinetoks}}\fi
+ \toksc{#1}\ferrmessage{TeX_ input: \the\toksc^^JTeX_ first pass: \the\texlinetoks}}\fi
\ifnum\bbalance=\z@
\else
- \bbbalance
+ \bbbalance{#1}%
\fi
- \iftracetexpp\errmessage{TeX_ final pass: \the\texlinetoks}\fi
+ \iftracetexpp\ferrmessage{TeX_ final pass: \the\texlinetoks}\fi
\concat\texlinetoks\texidxtoks
\expandafter
\endgroup
@@ -356,24 +377,35 @@
% otherwise there is a risk that an output routine is called before
% the group is complete and \yyreturn definition is wrong so \yyparse
% will not be able to function
- \expandafter\texlinetoks\expandafter{\the\texlinetoks}%
- \let\bbal\bbalempty\bbalance\z@
- \restorecsxlist\texnspace\alltexsymbols
- \let\termindex\writetexidxentry\tt\chardef\_=`\_\the\texlinetoks
+ \expandafter\T@Xpretypeset\the\texlinetoks
\endgroup
\iffalse}\fi
- {}${}%$
+}
+
+\def\T@Xpretypeset{%
+ \let\bbal\bbalempty\bbalance\z@
+ \let\defpostfix\defTpostfix\let\defprefix\empty
+ \restorecsxlist\texnspace\alltexsymbols
+ \tt\chardef\_=`\_%
}
\def\alltexsymbols{%
- \space\toksa\toksb\toksc\tokse\toksf\the\ifx\ifnum\fi\else
- \def\edef\let\empty\next\switchon\in\concat\appendr\default
- \noexpand\emptyterm\print\relax\yy\inmath\omit\hfil\getfirst
- \getsecond\getthird\getfourth\getfifth\nx\to\hspace\toksd\rhscont
- \rhscnct\rhsbool\table\ifrhsfull\rhsfulltrue\rhsfullfalse
- \yyval\tempca\tempcb\z@\@ne\tw@\m@ne\advance\ifcat\iftracebadchars
- \bb\yylexreturnptr\yylexreturn\yylexreturnval\yylexreturnchar
- \yylexreturntext\yylexnext
+ \space\toksa\toksb\toksc\toksd\tokse\toksf\toksg\toksh\the\ifx
+ \ifnum\fi\else\def\edef\let\empty\next\switchon\in\concat\appendr
+ \default\noexpand\emptyterm\print\relax\yy\inmath\omit\hfil\getfirst
+ \getsecond\getthird\getfourth\getfifth\nx\to\hspace\rhscont\rhscnct
+ \rhsbool\table\ifrhsfull\rhsfulltrue\rhsfullfalse\yyval\tempca
+ \tempcb\z@\@ne\tw@\m@ne\advance\ifcat\iftracebadchars\bb
+ \yylexreturnptr\yylexreturn\yylexreturnval\yylexreturnsym
+ \yylexreturnchar\yylexreturnxchar\yylexreturntext\yylexnext
+ \%\harmlesscomment\\\yyfatal\yywarn\yyBEGIN\yypushstate\yypopstate
+ \yyBEGINr\yylexstate\yypdeprecated
+}
+
+% TODO: make this the mechanism for updating \alltexsymbols
+
+\def\extendcs#1\with#2{%
+ \expandafter\def\expandafter#1\expandafter{#1#2}%
}
\def\collectspaces#1{%
@@ -394,23 +426,34 @@
\def\spacecontainer{\ }
\def\texnspace{texline}
+\def\texvspace{texvisline}
+\def\texispace{index}
\let\defpostfix\defTpostfix
\let\defprefix\empty
-\let\defypreamble\empty % for indexing macros
-\def\defypostamble{ {\rm(\.{\\\thisnamex})}}
-
\let\settgroup\relax
\defx\space{\hbox{$\,$\char`\ $\,$}}{texline}
+\defy\space{\hbox{$\,$\char`\ $\,$}}{index}
-\defx\hspace{\hbox{$\,$\char`\ $\,$}}{texline}
+\defx\hspace{% somewhat precarious definition
+ \hbox{\char`\ }%
+ \def\setegroup{{}$\,${}}%
+ \def\setpgr@up{^{\hbox{\sscmd\the\toksa}}\,$}%
+ \def\setegr@up{\,$}%
+ \def\setpgroup{%
+ ${}_{\hbox{\sscmd\the\toksa}}%
+ \let\setpgroup\setpgr@up
+ \let\setegroup\setegr@up
+ \grabbalanced
+ }\grabbalanced
+}{texline}
\defy\hspace{\hbox{$\,$\char`\ $\,$}}{index}
\defx\advance{%
- $\mathop{\hbox{\bf add}}{}$%
+ $\mathop{\hbox{\bf add}}$%
}{texline}
\defy\advance{%
@@ -425,6 +468,8 @@
$0_{\rm R}$%
}{index}
+\defx\z@{0_R}{index:visual}
+
\defx\@ne{%
$\,1_{\rm R}\,$%
}{texline}
@@ -433,6 +478,8 @@
$1_{\rm R}$%
}{index}
+\defx\@ne{1_R}{index:visual}
+
\defx\tw@{%
$\,2_{\rm R}\,$%
}{texline}
@@ -441,6 +488,8 @@
$2_{\rm R}$%
}{index}
+\defx\tw@{2_R}{index:visual}
+
\defx\m@ne{%
$\,-1_{\rm R}\,$%
}{texline}
@@ -449,151 +498,208 @@
$-1_{\rm R}$%
}{index}
+\defx\m@ne{-1_R}{index:visual}
+
\defx\tempca{%
- ${}t_a{}$%
+ $t_a$%
}{texline}
\defy\tempca{%
$t_a$%
}{index}
+\defx\tempca{t_a}{index:visual}
+
\defx\tempcb{%
- ${}t_b{}$%
+ $t_b$%
}{texline}
\defy\tempcb{%
$t_b$%
}{index}
+\defx\tempcb{t_b}{index:visual}
+
\defx\toksa{%
- {}${}v_a{}${}%
+ {}$v_a${}%
\def\setegroup{}%
\def\setpgroup{%
$\leftarrow\langle\,\hbox{\the\toksa}%
\if\next]%
\else
- \rangle%
+ \,\rangle%
\fi
$}\grabbalanced
}{texline}
+\defx\toksa{va}{index:visual}
+
\defy\toksa{%
- ${}v_a{}$%
+ $v_a$%
}{index}
\defx\toksb{%
- {}${}v_b{}${}%
+ {}$v_b${}%
\def\setegroup{}%
\def\setpgroup{%
$\leftarrow\langle\,\hbox{\the\toksa}%
\if\next]%
\else
- \rangle%
+ \,\rangle%
\fi
$}\grabbalanced
}{texline}
+\defx\toksb{vb}{index:visual}
+
\defy\toksb{%
- ${}v_b{}$%
+ $v_b$%
}{index}
\defx\toksc{%
- {}${}v_c{}${}%
+ {}$v_c${}%
\def\setegroup{}%
\def\setpgroup{%
$\leftarrow\langle\,\hbox{\the\toksa}%
\if\next]%
\else
- \rangle%
+ \,\rangle%
\fi
$}\grabbalanced
}{texline}
+\defx\toksc{vc}{index:visual}
+
\defy\toksc{%
- ${}v_c{}$%
+ $v_c$%
}{index}
\defx\toksd{%
- {}${}v_d{}${}%
+ {}$v_d${}%
\def\setegroup{}%
\def\setpgroup{%
$\leftarrow\langle\,\hbox{\the\toksa}%
\if\next]%
\else
- \rangle%
+ \,\rangle%
\fi
$}\grabbalanced
}{texline}
+\defx\toksd{vd}{index:visual}
+
\defy\toksd{%
- ${}v_d{}$%
+ $v_d$%
}{index}
\defx\tokse{%
- {}${}v_e{}${}%
+ {}$v_e${}%
\def\setegroup{}%
\def\setpgroup{%
$\leftarrow\langle\,\hbox{\the\toksa}%
\if\next]%
\else
- \rangle%
+ \,\rangle%
\fi
$}\grabbalanced
}{texline}
+\defx\tokse{ve}{index:visual}
+
\defy\tokse{%
- ${}v_e{}$%
+ $v_e$%
}{index}
\defx\toksf{%
- {}${}v_f{}${}%
+ {}$v_f${}%
\def\setegroup{}%
\def\setpgroup{%
$\leftarrow\langle\,\hbox{\the\toksa}%
\if\next]%
\else
- \rangle%
+ \,\rangle%
\fi
$}\grabbalanced
}{texline}
+\defx\toksf{vf}{index:visual}
+
\defy\toksf{$v_f$}{index}
+\defx\toksg{%
+ {}$v_g${}%
+ \def\setegroup{}%
+ \def\setpgroup{%
+ $\leftarrow\langle\,\hbox{\the\toksa}%
+ \if\next]%
+ \else
+ \,\rangle%
+ \fi
+ $}\grabbalanced
+}{texline}
+
+\defx\toksg{vg}{index:visual}
+
+\defy\toksg{$v_g$}{index}
+
+\defx\toksh{%
+ {}$v_h${}%
+ \def\setegroup{}%
+ \def\setpgroup{%
+ $\leftarrow\langle\,\hbox{\the\toksa}%
+ \if\next]%
+ \else
+ \,\rangle%
+ \fi
+ $}\grabbalanced
+}{texline}
+
+\defx\toksh{vh}{index:visual}
+
+\defy\toksh{$v_h$}{index}
+
\defx\yyval{%
- {}${}\Upsilon{}${}%
+ {}$\Upsilon${}%
\def\setegroup{}%
\def\setpgroup{%
$\leftarrow\langle\,\the\toksa
\if\next]%
\else
- \rangle%
+ \,\rangle%
\fi
$}\grabbalanced
}{texline}
\defy\yyval{%
- ${}\Upsilon{}$%
+ $\Upsilon$%
}{index}
-\def\setflexreturn#1{%
- \def\setegroup{{\bf return}#1}%
+\defx\yyval{Y}{index:visual}
+
+\def\setcfreturn#1{% more flexible return statement
+ \def\setegroup{{}$\mathop{\hbox{#1}}${}}%
+ \let\settgroup\setegroup
\def\setpgroup{%
{%
- \let\texidxdomain\bisonidxdomain
- \tokse\toksa
- \expandafter\nameproc\expandafter{\the\toksa}%
+ \let\termindex\writeidxentry % the indexed term is a \bison\ token
+ \toksc\toksa
+ \expandafter\nameproc\expandafter{\the\toksa}\with\parsebin
\ifyyparsefail
\edef\next{\toksa{\termmetastyle{%
- \expandafter\gidxentry\expandafter{\expandafter\termvstring\expandafter}\expandafter{\the\toksa}%
+ \gidxentryxb{\termvstring}{\the\toksa}{}%
\let\nx\idxfont\nx\empty\nx\tt\the\toksa\nx\/%
}}}\next
\else
- \expandafter\settermstyle\expandafter{\the\tokse}%
+ \edef\next{\noexpand\settermstyle{\the\toksb}{\the\toksc}}\next
\fi
- {}$\mathop{\hbox{{\bf return}#1}}\hbox{\the\toksa}${}}%
+ {}$\mathop{\hbox{#1}}\hbox{\the\toksa}${}}%
}\grabbalanced
}
+\def\setflexreturn#1{%
+ \setcfreturn{{\bf return}#1}%
+}
+
\defx\yylexreturnptr{%
\setflexreturn{$_p${}}%
}{texline}
@@ -602,14 +708,28 @@
{\bf return$_p$}%
}{index}
+\defx\yylexreturnptr{return_p}{index:visual}
+
+\defx\yylexreturnxchar{%
+ \setflexreturn{$_x${}}%
+}{texline}
+
+\defy\yylexreturnxchar{%
+ \hbox{\bf return$_x$}%
+}{index}
+
+\defx\yylexreturnxchar{return_x}{index:visual}
+
\defx\yylexreturnchar{%
- {\bf return$_c$}%
+ \hbox{\bf return$_c$}%
}{texline}
\defy\yylexreturnchar{%
- {\bf return$_c$}%
+ \hbox{\bf return$_c$}%
}{index}
+\defx\yylexreturnchar{return_c}{index:visual}
+
\defx\yylexnext{%
{\bf continue}%
}{texline}
@@ -618,6 +738,8 @@
{\bf continue}%
}{index}
+\defx\yylexnext{continue}{index:visual}
+
\defx\yylexreturn{%
\setflexreturn{$_l${}}%
}{texline}
@@ -626,6 +748,8 @@
{\bf return$_l$}%
}{index}
+\defx\yylexreturn{return_l}{index:visual}
+
\defx\yylexreturnval{%
\setflexreturn{$_v${}}%
}{texline}
@@ -634,6 +758,18 @@
{\bf return$_v$}%
}{index}
+\defx\yylexreturnval{return_v}{index:visual}
+
+\defx\yylexreturnsym{%
+ \setflexreturn{$_{vp}${}}%
+}{texline}
+
+\defy\yylexreturnsym{%
+ {\bf return$_{vp}$}%
+}{index}
+
+\defx\yylexreturnsym{return_vp}{index:visual}
+
\defx\yylexreturntext{%
{\bf return$_t$}%
}{texline}
@@ -642,10 +778,90 @@
{\bf return$_t$}%
}{index}
-\defx\table{{}${}\Omega{}${}}{texline}
+\defx\yylexreturntext{return_t}{index:visual}
+
+\defx\xcclreturn{%
+ \setcfreturn{\bf set $\Upsilon$ {\rm and} return$^{\rm ccl}$}%
+}{texline}
+
+\defy\xcclreturn{%
+ {\bf set $\Upsilon$ {\rm and} return$^{\rm ccl}$}%
+}{index}
+
+\defx\xcclreturn{set U return^ccl}{index:visual}
+
+\defx\yyflexoptreturn{%
+ \setflexreturn{$^{\rm opt}$}%
+}{texline}
+
+\defy\yyflexoptreturn{%
+ {\bf return$^{\rm opt}$}%
+}{index}
+
+\defx\yyflexoptreturn{return^opt}{index:visual}
+
+\extendcs\alltexsymbols\with{\xcclreturn\yyflexoptreturn}
+
+\defx\yyfatal{%
+ {}$\mathop{\bf fatal}${}%
+ \def\setegroup{}%
+ \def\setpgroup{%
+ $\langle\,\hbox{\the\toksa}%
+ \if\next]%
+ \else
+ \,\rangle%
+ \fi$%
+ }\grabbalanced
+}{texline}
+
+\defx\yyfatal{fatal}{index:visual}
+
+\defy\yyfatal{%
+ {\bf fatal}%
+}{index}
+
+\defx\yywarn{%
+ {}$\mathop{\bf warn}${}%
+ \def\setegroup{}%
+ \def\setpgroup{%
+ $\langle\,\hbox{\the\toksa}%
+ \if\next]%
+ \else
+ \,\rangle%
+ \fi$%
+ }\grabbalanced
+}{texline}
+
+\defx\yywarn{warn}{index:visual}
+
+\defy\yywarn{%
+ {\bf warn}%
+}{index}
+
+\defx\yypdeprecated{%
+ {}$\mathop{\bf deprecated}${}%
+ \def\setegroup{}%
+ \def\setpgroup{%
+ $\langle\,\hbox{\the\toksa}%
+ \if\next]%
+ \else
+ \,\rangle%
+ \fi$%
+ }\grabbalanced
+}{texline}
+
+\defx\yypdeprecated{deprecated}{index:visual}
+
+\defy\yypdeprecated{%
+ {\bf deprecated}%
+}{index}
+
+\defx\table{{}$\Omega${}}{texline}
\defy\table{$\Omega$}{index}
+\defx\table{Omega}{index:visual}
+
\defx\relax{\hbox{$\circ$}}{texline}
\defy\relax{\hbox{$\circ$}}{index}
@@ -662,6 +878,8 @@
\lcenclose{$\cdot$}%
}{index}
+\defx\the{val}{index:visual}
+
\def\thecomaction{%
\raw \toksaxTeXmode\toksbxTeXmode\tokscxTeXmode\toksdxTeXmode\toksexTeXmode\toksfxTeXmode \raw
{%
@@ -728,10 +946,18 @@
{\bf else$\;$}%
}{texline}
+\defy\else{%
+ {\bf else}%
+}{index}
+
\defx\fi{%
{\bf fi$\;$}%
}{texline}
+\defy\fi{%
+ {\bf fi}%
+}{index}
+
\defx\def{%
{\bf def$\;$}%
}{texline}
@@ -748,6 +974,8 @@
{\bf def$_{\rm x}$}%
}{index}
+\defx\edef{def_x}{index:visual}
+
\defx\let{%
{\bf let$\;$}%
}{texline}
@@ -808,6 +1036,10 @@
${}\mapsto{}$%
}{texline}
+\defy\to{%
+ $\mapsto$%
+}{index}
+
\defx\emptyterm{%
\hbox{$\ulcorner\ldots\urcorner$}%
}{texline}
@@ -840,6 +1072,8 @@
$\Upsilon\kern-1pt{}_{\rm?}$%
}{index}
+\defx\yy{Y_?}{index:visual}
+
\def\seeknots#1\bbal{%
\let\setegroup\relax
\ifnum#1>0\relax
@@ -904,6 +1138,8 @@
${}_{\rm?}\kern-2pt\Upsilon$%
}{index}
+\defx\bb{Y_??}{index:visual}
+
\defx\switchon{%
\hbox{\bf switch$\;$}%
\def\setpgroup{{$($}\the\toksa{$)$}$\,$}%
@@ -948,7 +1184,7 @@
\defx\getfirst{%
- {}${}\pi_1${}%
+ {}$\pi_1${}%
\def\setegroup{\relax}%
\def\setpgroup{%
${}(\hbox{\the\toksa}%
@@ -960,14 +1196,16 @@
}{texline}
\defy\getfirst{%
- ${}\pi_1$%
+ $\pi_1$%
}{index}
+\defx\getfirst{pi_1}{index:visual}
+
\defx\getsecond{%
- {}${}\pi_2${}%
+ {}$\pi_2${}%
\def\setegroup{\relax}%
\def\setpgroup{%
- ${}(\hbox{\the\toksa}%
+ $(\hbox{\the\toksa}%
\if\next]%
\else
)%
@@ -976,11 +1214,13 @@
}{texline}
\defy\getsecond{%
- ${}\pi_2$%
+ $\pi_2$%
}{index}
+\defx\getsecond{pi_2}{index:visual}
+
\defx\getthird{%
- {}${}\pi_3${}%
+ {}$\pi_3${}%
\def\setegroup{\relax}%
\def\setpgroup{%
${}(\hbox{\the\toksa}%
@@ -992,14 +1232,16 @@
}{texline}
\defy\getthird{%
- ${}\pi_3$%
+ $\pi_3$%
}{index}
+\defx\getthird{pi_3}{index:visual}
+
\defx\getfourth{%
- {}${}\pi_4${}%
+ {}$\pi_4${}%
\def\setegroup{\relax}%
\def\setpgroup{%
- ${}(\hbox{\the\toksa}%
+ $(\hbox{\the\toksa}%
\if\next]%
\else
)%
@@ -1008,14 +1250,16 @@
}{texline}
\defy\getfourth{%
- ${}\pi_4$%
+ $\pi_4$%
}{index}
+\defx\getfourth{pi_4}{index:visual}
+
\defx\getfifth{%
- {}${}\pi_5${}%
+ {}$\pi_5${}%
\def\setegroup{\relax}%
\def\setpgroup{%
- ${}(\hbox{\the\toksa}%
+ $(\hbox{\the\toksa}%
\if\next]%
\else
)%
@@ -1024,11 +1268,13 @@
}{texline}
\defy\getfifth{%
- ${}\pi_5$%
+ $\pi_5$%
}{index}
+\defx\getfifth{pi_5}{index:visual}
+
\defx\rhscont{%
- {}${}\pi_{\{\}}${}%
+ {}$\pi_{\{\}}${}%
\def\setegroup{\relax}%
\def\setpgroup{%
${}(\hbox{\the\toksa}%
@@ -1040,11 +1286,13 @@
}{texline}
\defy\rhscont{%
- ${}\pi_{\{\}}$%
+ $\pi_{\{\}}$%
}{index}
+\defx\rhscont{pi_brace}{index:visual}
+
\defx\rhscnct{%
- {}${}\pi_{\leftrightarrow}${}%
+ {}$\pi_{\leftrightarrow}${}%
\def\setegroup{\relax}%
\def\setpgroup{%
${}(\hbox{\the\toksa}%
@@ -1056,11 +1304,13 @@
}{texline}
\defy\rhscnct{%
- ${}\pi_{\leftrightarrow}$%
+ $\pi_{\leftrightarrow}$%
}{index}
+\defx\rhscnct{pi_arrow}{index:visual}
+
\defx\rhsbool{%
- {}${}\pi_{\vdash}${}%
+ {}$\pi_{\vdash}${}%
\def\setegroup{\relax}%
\def\setpgroup{%
${}(\hbox{\the\toksa}%
@@ -1072,9 +1322,53 @@
}{texline}
\defy\rhsbool{%
- ${}\pi_{\vdash}$%
+ $\pi_{\vdash}$%
+}{index}
+
+\defx\rhsbool{pi_implies}{index:visual}
+
+\defx\%{%
+ {\.{\harmlesscomment}}%
+}{texline}
+
+\defy\%{%
+ {\.{\harmlesscomment}}\let\defypostamble\relax%
}{index}
+\defx\%{\%}{index:visual}
+
+\defx\harmlesscomment{%
+ {\.{\harmlesscomment}}%
+}{texline}
+
+\defy\harmlesscomment{%
+ {\.{\harmlesscomment}}\let\defypostamble\relax%
+}{index}
+
+\defx\harmlesscomment{\%}{index:visual}
+
+\defx\\{%
+ {\.{\\}}%
+}{texline}
+
+\defy\\{%
+ {\.{\\}}\let\defypostamble\relax%
+}{index}
+
+\defx\\{\\}{index:visual}
+
+\defx\lbchar{%
+ \.{\{}%
+}{texline}
+
+\defy\lbchar{%
+ \.{\{}%
+}{index}
+
+\expandafter\defx\expandafter\lbchar\expandafter{\lbchar lbchar}{index:visual}
+
+\extendcs\alltexsymbols\with\lbchar
+
\def\bbalempty[#1]#2{%
\ifx#2]\relax
\else
@@ -1089,7 +1383,7 @@
\fi
}%
-\def\bbbalance{%
+\def\bbbalance#1{%
\ifnum\bbalance>0
\loop
\advance\bbalance-1\relax
@@ -1107,14 +1401,14 @@
\tempca-\bbalance
\bbalance\tempca
\texlinetoks{}\texidxtoks{}%
- \expandafter\yygetchar\the\toksc\end
+ \yygetchar#1\end
% \errmessage{\the\texlinetoks...\the\toksd...\the\bbalance}%
- \appendl\texlinetoks{\the\toksd}%
+ \concatl\toksd\texlinetoks
\fi
}
\def\grabbalanced{%
- \futurelet\next\gr@bbalanced
+ \futurelet\next\gr@bbalanced
}
\def\gr@bbalanced{%
@@ -1143,3 +1437,113 @@
\fi
\next
}
+
+\defx\yyBEGIN{%
+ \def\setegroup{{\bf enter}}%
+ \def\setpgroup{%
+ {%
+ \let\parsernamespace\flexpseudonamespace
+ \expandafter\nameproc\expandafter{\the\toksa}\with\parsebin
+ \edef\next{%
+ \toksd{\toksd{\the\toksa}\tokse{\the\toksb}}%
+ }\next % a trick to `reshuffle' the output of \nameproc:
+ % the parsed name goes to \toksd and the visual key is put in \tokse
+ \expandafter
+ }\the\toksd
+ \let\termindex\writeidxfsentry
+ \edef\next{{}$\nx\mathop{\hbox{\nx\bf enter}}(\hbox{\the\toksd})${}%
+ \gidxentryxb{\termvstring}{\the\toksa}{\the\tokse}}\next
+ \let\termindex\eatone
+ }\grabbalanced
+}{texline}
+
+\defy\yyBEGIN{%
+ {\bf enter}%
+}{index}
+
+\defx\yyBEGIN{enter}{index:visual}
+
+\defx\yyBEGINr{%
+ {\bf enter$_x\,$}%
+}{texline}
+
+\defy\yyBEGINr{%
+ {\bf enter$_x$}%
+}{index}
+
+\defx\yyBEGINr{enter_x}{index:visual}
+
+\defx\yypushstate{%
+ \def\setegroup{{\bf push state}}%
+ \def\setpgroup{%
+ {%
+ \let\parsernamespace\flexpseudonamespace
+ \expandafter\nameproc\expandafter{\the\toksa}\with\parsebin
+ \edef\next{%
+ \toksd{\toksd{\the\toksa}\tokse{\the\toksb}}%
+ }\next % a trick to `reshuffle' the output of \nameproc:
+ % the parsed name goes to \toksd and the visual key is put in \tokse
+ \expandafter
+ }\the\toksd
+ \let\termindex\writeidxfsentry
+ \edef\next{{}$\nx\mathop{\hbox{\nx\bf push state}}(\hbox{\the\toksd})\,${}
+ \gidxentryxb{\termvstring}{\the\toksa}{\the\tokse}}\next
+ \let\termindex\eatone
+ }\grabbalanced
+}{texline}
+
+\defy\yypushstate{%
+ {\bf push state}%
+}{index}
+
+\defx\yypushstate{push\_state}{index:visual}
+
+\defx\yypopstate{%
+ \def\setegroup{{\bf pop state}}%
+ \def\setpgroup{%
+ {%
+ \let\parsernamespace\flexpseudonamespace
+ \expandafter\nameproc\expandafter{\the\toksa}\with\parsebin
+ \edef\next{%
+ \toksd{\toksd{\the\toksa}\tokse{\the\toksb}}%
+ }\next % a trick to `reshuffle' the output of \nameproc:
+ % the parsed name goes to \toksd and the visual key is put in \tokse
+ \expandafter
+ }\the\toksd
+ \let\termindex\writeidxfsentry
+ \edef\next{{}$\nx\mathop{\hbox{\nx\bf pop state}}(\hbox{\the\toksd})\,${}
+ \gidxentryxb{\termvstring}{\the\toksa}{\the\tokse}}\next
+ \let\termindex\eatone
+ }\grabbalanced
+}{texline}
+
+\defy\yypopstate{%
+ {\bf pop state}%
+}{index}
+
+\defx\yypopstate{pop state}{index:visual}
+
+\defx\yylexstate{%
+ \def\setegroup{{\bf state}}%
+ \def\setpgroup{%
+ {%
+ \let\parsernamespace\flexpseudonamespace
+ \expandafter\nameproc\expandafter{\the\toksa}\with\parsebin
+ \edef\next{%
+ \toksd{\toksd{\the\toksa}\tokse{\the\toksb}}%
+ }\next % a trick to `reshuffle' the output of \nameproc:
+ % the parsed name goes to \toksd and the visual key is put in \tokse
+ \expandafter
+ }\the\toksd
+ \let\termindex\writeidxfsentry
+ \edef\next{{}$\nx\mathop{\hbox{\nx\bf state}}(\hbox{\the\toksd})\,${}%
+ \gidxentryxb{\termvstring}{\the\toksa}{\the\tokse}}\next
+ \let\termindex\eatone
+ }\grabbalanced
+}{texline}
+
+\defy\yylexstate{%
+ {\bf state}%
+}{index}
+
+\defx\yylexstate{state}{index:visual}
diff --git a/support/splint/tex/yyunion.sty b/support/splint/tex/yyunion.sty
index d9749a86b9..7f4937a4e3 100644
--- a/support/splint/tex/yyunion.sty
+++ b/support/splint/tex/yyunion.sty
@@ -1,4 +1,4 @@
-% Copyright 2012-2015, Alexander Shibakov
+% Copyright 2012-2020, Alexander Shibakov
% This file is part of SPLinT
%
% SPLinT is free software: you can redistribute it and/or modify
@@ -21,6 +21,7 @@
\def\yyuniontag{\yyunion}
\def\parserstrictnamespace{parser-strict}
\def\parserprototypesnamespace{parser-strict:headers}
+\def\parserdebugnamespace{parser-debug}
\def\yyunion{\currentyyunionnamespace}
\def\currentyyunionnamespace{generic}
@@ -29,6 +30,11 @@
% therefore they are not included in the yyunion list and only defined once below and redefined later
% permanently
+\def\cwebstreamchars{% characters seen by the input routine
+ \|\`\\\{\}\~\ \_\&\^\$\#\n% the last one is inserted by the preprocessing scripts
+}
+
+
% insidemost
\long\def\stashed#1{} % stashed material (usually \Cee\ code) :: \stashed{tex_string}
@@ -40,23 +46,24 @@
\long\def\strm[#1]#2.[#3]{} % a stream element :: \strm[digits]{\stashed|\format|\formatlocal}.[digits]
-% types returned by the lexer (* marks the types that get removed by the parser in some cases)
+% types returned by the lexer; the integer type (hex vs. decimal) is determined at the lexer level so
+% these are tagged by the lexer as well
-\def\midf#1#2{} % rule separator :: \midf{fptr}{sptr}
-\defp\stringify#1#2#3#4{} % STRING :: \stringify{12string}{tex_string}{fptr}{sptr}
-\defp\idit#1#2#3#4{} % ID :: \idit{12string}{tex_string}{fptr}{sptr}
-\defp\tagit#1#2#3#4{} % TAG :: \tagit{12string}{tex_string}{fptr}{sptr}
-\defp\charit#1#2#3#4{} % CHAR :: \charit{12string}{tex_string}{fptr}{sptr}
\defp\anint#1#2#3{} % INT :: \anint{digits}{fptr}{sptr}
\defp\hexint#1#2#3{} % INT :: \hexint{0[xX][0-9A-F]+}{fptr}{sptr}
+
+% types inserted by the parser (* marks the types that are essentially returned by the lexer
+% and `tagged' by the parser)
+
+\defp\idit#1#2#3#4{} % *ID :: \idit{12string}{tex_string}{fptr}{sptr}
+\defp\charit#1#2#3#4{} % *CHAR :: \charit{12string}{tex_string}{fptr}{sptr}
+\defp\stringify#1#2#3#4{} % *STRING :: \stringify{12string}{tex_string}{fptr}{sptr}
+\defp\tagit#1#2#3#4{} % *TAG :: \tagit{12string}{tex_string}{fptr}{sptr}
\defp\braceit#1#2#3{} % *BRACED_CODE :: \braceit{tex_string}{fptr}{sptr}
+\defp\midf#1#2{} % *rule separator :: \midf{fptr}{sptr}
\defp\preckind#1#2#3{} % *precedence operator :: \preckind{12string}{fptr}{sptr}
+%
\defp\flexoptionpair#1#2{} % a flex option :: \flexoptoinpair{\idit}{\idit|\stringify}
-
-% types inserted by the parser
-
-% insidemost
-
\defp\termname#1#2{} % a production term :: \termname{\idit|\stringify|\charit}{\idit}
\defp\mergeop#1#2#3{} % merge directive :: \mergeop{\tagit}{fptr}{sptr}
\defp\dprecop#1#2#3{} % dprec directive :: \dprecop{\anint|\hexint}{fptr}{sptr}
@@ -89,6 +96,7 @@
\defp\prodprodsep{} % production separator
\defp\prodprodseplarge{} % production separator
\defp\optoptsepsmall{} % separator between options
+\defp\optoptsepnone{} % (trivial) separator between options
\defp\posmark#1#2{} % the position marker :: \posmark{fptr}{sptr}
\defp\tokendecls#1#2#3{} % token declarations :: \tokendecls{[\onesymbol]+}{fptr}{sptr}
\defp\ntermdecls#1#2#3{} % nterm declarations :: \ntermdecls{[\onesymbol]+}{fptr}{sptr}
@@ -96,7 +104,7 @@
\defp\precdecls#1#2#3#4#5{} % precedence declarations :: \precdecls{left|right|nonassoc|precedence}{\tagit}{[\idit\stringify\charit]+}{fptr}{sptr}
\defp\flexsstatedecls#1#2#3{} % flex state declarations (nonexlusive) :: \flexsstatedecls{[\idit\stringify]+}{fptr}{sptr}
\defp\flexxstatedecls#1#2#3{} % flex state declarations (exclusive) :: \flexxstatedecls{[\idit\stringify]+}{fptr}{sptr}
-\defp\flexoptiondecls#1#2#3{} % flex state declarations :: \flexoptiondecls{[\flexoptionpair]+}{fptr}{sptr}
+\defp\flexoptiondecls#1#2#3{} % flex options :: \flexoptiondecls{[\flexoptionpair]+}{fptr}{sptr}
\defp\oneparametricoption#1#2#3#4{} % option :: \oneparametricoption{option-name}{\stringify|\idit}{fptr}{sptr}
\defp\optionflag#1#2#3#4{} % option flag :: \optionflag{flag name}{abbreviated name}{fptr}{sptr}
\defp\paramdef#1#2#3#4{} % parameters :: \paramdef{\braceit}{parameter domain}{fptr}{sptr}
@@ -133,17 +141,276 @@
\newtoks\yystashlocal
\newtoks\yyformatlocal
+% index implementation
+% note that the rank and domain parameters are handed down by the
+% `outer' macro while the `qualifier' (\term...string) is decided
+% at the point of invocation; some auxilary sequences first
+
+% a macro that always produces the current value of the register
+
+\def\thewrap#1#2{%
+ \noexpand\thewrap{\noexpand#1}{\the#1}%
+}
+
+% a macro that always produces the current page number.
+
+\def\selfpageno#1{%
+ \noexpand\selfpageno{\the\pageno}%
+}
+
+% a macro that keeps the (single) argument intact
+
+\def\stickycs#1{\noexpand\stickycs\noexpand#1}
+
+% for finer control over indexing, there are two levels on which
+% the programmer can disable the production of a whatsit node (\write):
+% (1) by setting the \iftermindex conditional to \iffalse, and (2) by defining
+% the \termindex macro to ignore its argument; both of these are expanded
+% at the point where the index entry is made; the intention is that
+% the conditional controls indexing at large (say, the index entries
+% inside the index should not be indexed again), while redefining the macro
+% provides a finer control over the indexing style
+
\newif\iftermindex
-\def\doparse#1{%
- \table{}\typestable{}\prectable{}\opttable{}%
- \parserinit\yyparse#1\yyeof\yyeof\endparseinput\endparse
+\let\termindex\eatone
+
+% the index is split into different domains: say, the \bison\ domain contains
+% the entries for the terms of a grammar;
+
+\def\bisonidxdomain{B} % marker for the domain (section of the index)
+
+\def\defidxrank{0} % index rank of definitions is lowest so they are listed first
+\def\headeridxrank{1} % index rank of lhs
+\def\termidxrank{2} % ordinary term rank
+
+% the expansion of the various components of an index entry must be carefully
+% controlled: it is not possible to predict how many times the entry will be expanded;
+% there are four types of expandable entries in general:
+% o the entries that must be expanded at the point of invocation (say, \secno)
+% o the entries that must be expanded only when the \write is inside a \box that is
+% being shipped out (say, the \pageno)
+% o the `lazy' entries that can be expanded any time (although at least one expansion
+% must take place)
+% o finally, the entries that must be expanded at a specific point between the point
+% of invocation and the \shipout
+% the indexing macro below treats the first three cases for the entries that the macro
+% below produce; the last case can be implemented by redefining the \stickycs macro
+% at the appropriate point
+
+\def\indxe#1#2{% index entry:
+ % #1 is the output stream
+ % #2 is the entry (see below for the proper form)
+ \expandafter\ind@@\expandafter#2\end{#1}%
+}
+
+\def\ind@@#1\end#2{%
+ \ind@e{#2}{#1}%
+}
+
+\def\ind@e#1#2{% #1 is the output stream
+ % #2 is a 8-parameter sequence, see below
+ \write#1{\in@@e#2\in@@ecomment#2}%
+}
+
+\def\in@@e#1#2#3#4#5#6#7#8{% #1: early expandable (like \secno)
+ % #2: auxilary info (expandable, invariant)
+ % #3: domain
+ % #4: rank
+ % #5: type1
+ % #6: type2
+ % #7: key1
+ % #8: key2
+ \i@@@e{#1}{\errmessage{\noexpand\\in@@e error: expanded page number placeholder}}%
+ {#2}{#3}{#4}{#5}{#6}{\charstonumberse#7\end}{\charstonumberse#8\end}%
+}
+
+\def\in@@ecomment#1#2#3#4#5#6#7#8{% #1: early expandable (like \secno)
+ % #2: auxilary info (expandable, invariant)
+ % #3: domain
+ % #4: rank
+ % #5: type1
+ % #6: type2
+ % #7: key1
+ % #8: key2
+ \harmlesscomment\space\charstocharsx#7\end :: \charstocharsx#8\end%
+}
+
+\def\i@@@e#1#2#3#4#5#6#7#8#9{% #1: early expandable (like \secno)
+ % #2: page number placeholder
+ % #3: auxilary info
+ % #4: domain
+ % #5: rank
+ % #6: type1
+ % #7: type2
+ % #8: key1
+ % #9: key2
+ \noexpand\i@@@e{#1}{\the\pageno}{#3}{#4}{#5}{\noexpand#6}{\noexpand#7}{#8}{#9}%
+}
+
+\def\appendfitotoksa{\toksa\expandafter{\the\toksa\else<>\fi}} % hide \fi inside the loop
+
+% defining expandable macros for string transformations
+
+\tempca\@ne
+\toksa{\ifcase#1<NUL>}
+\tempcb=\uccode`.
+
+\bloop
+\uccode`.=\tempca
+\uppercase{\toksb\expandafter{.}}%
+\toksa\expandafter\expandafter\expandafter{\expandafter\the\expandafter\toksa\expandafter\or\the\toksb}%
+\advance\tempca by\@ne
+\ifnum\tempca>"FF
+ \appendfitotoksa
+\else
+\repeat
+
+\def\charstocharsx#1\end{% expandable version of a `safety' macro, ascii characters only, no tricky space characters
+ \yystartsinspace{#1.}{%
+ \space\ch@rstocharsx#1\end
+ }{%
+ \yystringempty{#1}{}{%
+ \ch@rst@charsx#1\end
+ }%
+ }%
+}
+
+\def\ch@rstocharsx#1 #2\end{\charstocharsx#2\end}
+
+\def\ch@rst@charsx#1#2\end{\asciiprint{`#1}\charstocharsx#2\end}
+
+\edef\next{\toksa{\def\noexpand\asciiprint##1{\the\toksa}}}\next
+\the\toksa
+
+\def\writeidxhentry#1{%
+ \indxe\gindex{{\secno}{{\currentrulecontext}{\hostparsernamespace}}{\bisonidxdomain}{\headeridxrank}#1}%
+}
+
+\def\writeidxentry#1{%
+ \indxe\gindex{{\secno}{{\currentrulecontext}{\hostparsernamespace}}{\bisonidxdomain}{\termidxrank}#1}%
+}
+
+\def\writetokenidxentry#1{%
+ \indxe\gindex{{\secno}{{\currentrulecontext}{\hostparsernamespace}}{\bisonidxdomain}{\defidxrank}#1}%
+}
+
+\def\gidxentry#1#2#3{% a generic index entry:
+ % #1 is the type processor,
+ % #2 is the key
+ % #3 is the visual key
+ \iftermindex
+ \termindex{{}{#1}{#2}{#3}}%
+ \fi
+}
+
+% a convenient shortcut, as many visual keys come in a token register
+
+\def\gidxentryxv#1#2#3{% a generic index entry, e(x)pand (v)isual key:
+ % #1 is the type processor,
+ % #2 is the key
+ % #3 is the visual key
+ \expandafter\gidx@ntryxv\expandafter{#3}{#1}{#2}%
+}
+
+\def\gidxentryxb#1#2#3{% a generic index entry, e(x)pand (b)oth keys:
+ % #1 is the type processor,
+ % #2 is the key
+ % #3 is the visual key
+ \expandafter\gidx@ntryxb\expandafter{#2}{#3}{#1}%
+}
+
+\def\gidx@ntryxb#1#2#3{%
+ \expandafter\gidx@ntryxv\expandafter{#2}{#3}{#1}%
+}
+
+\def\gidx@ntryxv#1#2#3{%
+ \gidxentry{#2}{#3}{#1}%
+}
+
+\def\tidxentry#1#2{% a text index entry, TODO: expand \currentrulecontext
+ \iftermindex
+ \indxe\gindex{{\secno}{{\currentrulecontext}{\indexpseudonamespace}}{\bisonidxdomain}{\termidxrank}{}{#1}{#2}{}}%
+ \fi
+}
+
+\def\idxinline#1{% index the term in text
+ \edef\next{\tidxentry{\termidstring}{#1}}\next
+}
+
+\def\gatherstash#1#2\strm[#3]#4.[#5]{% #1 contains the stash gathered
+ \yystringempty{#3}{ #1}%
+ {%
+ \expandafter\g@therstash\expandafter{\romannumeral0\unstash#4}{#1}%
+ }%
+}
+
+% stash processing wrapper macros
+
+\def\g@therstash#1#2{\gatherstash{#2#1}}
+
+\def\unstash#1#2{\expandafter\space#2}% #1 is \stashed or \sflush
+
+\def\yyreadstash#1\to#2\in#3\with#4{%
+ \yyreadfifo#1\to#2\in#3%
+ #4{#3}%
+ \setbox\z@=\vbox{
+ \hsize\maxdimen\emergencystretch\maxdimen
+ \rightskip\z@ plus 1fill
+ \insertstash{#3}%
+ \ifmmode
+ {}$\aftergroup\yyr@adstash % $o that the box can be finished
+ \else
+ \aftergroup\eatthree
+ \fi
+ }{#1}{#2}{#3}%
+}
+
+\def\yyr@adstash#1#2#3{%
+ #3\expandafter{\the#3{}$}% $
+ \appendlnx#1{\strm[NaN]{\stashed{{}${}}}.[NaN]}% {}, in case \yystash ends with $
+}
+
+% the sequence below may be redefined so that the #2 parameter to \yyreadstash may be a list of
+% token registers
+
+\def\insertstash#1{\the#1\relax}
+
+\def\readstash#1{\yyreadstash\yystash\to#1\in\yystashlocal\with\unpackstash}
+
+\def\readstashwredirect#1#2#3{% helper macro to read the stash when the stash and the format pointers appear together
+ % #1(the format pointer) is ignored
+ % #2 is the stash marker
+ % #3 is the local stash register
+ \yyreadstash\yystash\to#2\in#3\with\unpackstash
+}
+
+\def\unpackstash#1{#1\expandafter{\romannumeral0\expandafter\unp@ckstash\the#1\strm[].[]}}
+
+\def\unp@ckstash{\gatherstash{}}
+
+\def\unwrapstash#1{% to use where only the removal of \strm and \stashed is needed
+ #1\expandafter{\romannumeral0\expandafter\unwr@pstash\the#1\strm[].[]}%
}
-\long\def\strm[#1]#2.[#3]{#2}
+\def\unwr@pstash{\gatherstash{}}
+
+% parser invocation
+
+\def\newparserstateextra{}
+
+\def\d@parse#1{\yyparse#1\yyeof\yyeof\endparseinput\endparse}
+
+\def\astarraylastcs{\expandafter\noexpand\romannumeral\gettopofstackcsx\astarray}
+
+\long\def\strm[#1]#2.[#3]{#2} % this is used with both the format and the stash streams; TODO: deprecate
+
\long\def\inmath#1{$#1$}
-\long\def\stashed#1{\toksa\expandafter{\the\toksa#1}}
+%\long\def\stashed#1{\toksa\expandafter{\the\toksa#1}} % to be used with unpackaged stash
+%\long\def\stashed#1{\toksa\expandafter\expandafter\expandafter{\expandafter\the\expandafter\toksa#1}} % to use with AST array
+\long\def\stashed#1{\errmessage{making \noexpand\stashed\space expandable is deprecated}}
+
\long\def\sflush#1#2{}
\def\ntt{\noexpand\ntt@} % a convenient shortcut
@@ -157,15 +424,6 @@
\let\termindex\eatone
}
-\def\headeridxrank{1} % index rank of lhs
-
-\def\writeidxhentry#1{\write\gindex{%
- \nx\nx
- \expandafter
- \nx
- \csname\bisonidxdomain TI\endcsname\headeridxrank#1}%
-}
-
\def\prodhead@r#1#2{%
\let\idit\prodhdridit
#1\relax\concat\table\toksa\hrhss@p
@@ -229,45 +487,32 @@
\let\stringify\termnamestringify
\let\optstrextra\optstrextraesc
\let\termindex\writeidxentry
- #1\relax\concat\table\toksa
+ \termnam@{#1}{#2}%
\let\termindex\eatone
}
-\def\bisonidxdomain{B} % marker fo the domain (section of the index)
-\def\termidxrank{2} % rank (affects the order in which the entries are
- % listed in the index and the listing style)
-
-\def\writeidxentry#1{\write\gindex{%
- \nx\nx
- \expandafter
- \nx
- \csname\bisonidxdomain TI\endcsname\termidxrank#1}%
-}
+\def\termnam@#1#2{\toksa{}\toksb{}\toksc{}#1\expandafter\termn@m@\expandafter{\the\toksa}{#2}}
-\let\termindex\eatone
+\def\termn@m@#1#2{\toksa{}\toksb{}\toksc{}#2\expandafter\t@rmn@m@\expandafter{\the\toksa}{#1}}
-% index implementation
-% note that the rank and domain parameters are handed down by the
-% `outer' macro while the `qualifier' (\term...string) is decided
-% at the point of invocation
+% several versions of term typesetting
-\def\gidxentry#1#2{% a generic index entry
- \iftermindex
- \termindex{%
- {\nx\nx\nx#1{\charstonumberse#2\end}}%
- {\hostparsernamespace}{\nx\the\pageno}{\secno}%
- }%
- \fi
+\def\t@rmn@m@#1#2{% the termname is #2, the bracketed part is in #1
+ \yystringempty{#1}{\toksa{#2}}{\toksa{#2\hbox{$\langle${}#1{}$\rangle$}}}%
+ \concat\table\toksa
}
-\def\tidxentry#1#2{% a text index entry
- \iftermindex
- \write\gindex{%
- \nx\nx\expandafter\nx\csname\bisonidxdomain TI\endcsname\termidxrank
- {\nx\nx\nx#1{\charstonumberse#2\end}}%
- {\indexpseudonamespace}{\nx\the\pageno}{\secno}%
- }%
- \fi
+\newif\ifbracketedvisible % decide whether to show the bracketed name in productions
+
+\def\t@rmn@m@#1#2{% the termname is #2, the bracketed part is in #1
+ \yystringempty{#1}{\toksa{#2}}{%
+ \ifbracketedvisible
+ \toksa{#2\hbox{$\,\cdot$#1}}%
+ \else
+ \toksa{#2}%
+ \fi
+ }%
+ \concat\table\toksa
}
%
@@ -278,77 +523,46 @@
\yyreadfifo\yyformat\to#3\in\yyformatlocal
\restorecs{table-render}{\strm}%
#5% accumulate all the directives
- \edef\next{\the\bdirects}%
- \ifx\next\empty
- \else
+ \yytoksempty\bdirects{}{%
\edef\next{\bdirects{%
$\vtop{\halign{\nx\quad####\nx\hfil&####\nx\hfil\cr\the\bdirects}}$}}\next
- \fi
+ }%
\the\yyformatlocal
\actbrac@s{#1}{#2}{#4}%
}
\def\actbrac@s#1#2#3{%
- \yyreadfifo\yystash\to#3\in\yystashlocal
- \unmathstash{#3}%
- \edef\next{\toksa{\the\bdirects&\the\yystashlocal\cr}}\next
+ \readstash{#3}%
+ \appendr\table{\the\bdirects&\toksa{\the\yystashlocal}\cr}%
\bdirects{}%
- \concat\table\toksa
-}
-
-\def\unmathstash#1{%
- \setbox\z@=\vbox{
- \hsize\maxdimen\emergencystretch\maxdimen
- \rightskip\z@ plus 1fill
- \let\TeXx\relax\let\stashed\copystashed
- \the\yystashlocal\relax
- \ifmmode
- {}$\aftergroup\unm@thstash % $o that the box can be finished
- \else
- \aftergroup\eatone
- \fi}{#1}%
-}
-
-\def\copystashed#1{#1}
-
-\def\unm@thstash#1{%
- \toksa{\strm[NaN]{\stashed{{}$}}.[NaN]}% $o it cannot be matched if \yystashlocal is read again
- \concat\yystashlocal\toksa
- \toksa{\strm[#1]{\stashed{${}}}.[#1]}%$
- \appendl\yystash{\the\toksa}%
}
\def\bpredicate#1#2#3#4#5\bdend{%
\yyreadfifo\yyformat\to#3\in\yyformatlocal
\restorecs{table-render}{\strm}%
#5% accumulate all the directives
- \edef\next{\the\bdirects}%
- \ifx\next\empty
+ \yytoksempty\bdirects{%
\bdirects{\quad$\dashv\ \nx\vdash$}%
- \else
+ }{%
\edef\next{\bdirects{%
$\vtop{\halign{\nx\quad####\nx\hfil&####\nx\hfil\cr$\dashv\ $&\omit\nx\hfil$\nx\vdash$\cr
\the\bdirects}}$}}\next
- \fi
+ }%
\the\yyformatlocal
\bpredicat@{#1}{#2}{#4}%
}
-\def\bpredicat@#1#2#3{%
- \yyreadfifo\yystash\to#3\in\yystashlocal
- \unmathstash{#3}%
- \edef\next{\toksa{\the\bdirects&\the\yystashlocal\cr}}\next
+\def\bpredicat@#1#2#3{%
+ \readstash{#3}%
+ \appendr\table{\the\bdirects&\toksa{\the\yystashlocal}\cr}%
\bdirects{}%
- \concat\table\toksa
}
\def\bdirective#1#2#3#4{%
\savecslist{local-namespace}\yyunion
\restorecslist{table-render:directives}\yyunion
- \toksa{}%
- #2% make a directive list
- \edef\next{\toksa{$\nx\langle${\ntt #1} &\the\toksa$\nx\rangle$\cr}}\next
- \concat\bdirects\toksa
+ \toksa{}#2% make a directive list
+ \appendr\bdirects{$\nx\langle${\ntt #1} &\the\toksa$\nx\rangle$\nx\cr}%
\restorecslist{local-namespace}\yyunion
}
@@ -379,34 +593,43 @@
\let\currentrulecontext\empty
-\def\emptyterm{%
- \edef\next{\toksa{$\nx\circ$\tidxentry{\termidstring}{emptyrhs}}}\next
- \concat\table\toksa
-}
+% an empty left hand side is indexed in the index pseudonamespace and not in the main
+% parser namespace, since the name parser never encounters this term (as the \emptyterm
+% control sequence is inserted by the main parser) while typesetting productions;
+% because of this it would be unnatural to define a \prettywordpair sequence for it in the main
+% parser namespace; using a special non-\bison\ name should avoid any confusion with
+% the regular grammar terms
+\def\emptyterm{\appendr\table{$\nx\circ$\tidxentry{\termexception}{emptyrhs&}}}
\def\termnameidit#1#2#3#4{%
\yyreadfifot\yyformat\to#3\in\yyformatlocal\with\formatsort\relax
\the\yyformatlocal
- \nameproc{#2}%
- \peekstash{#4}%
- \settermstyle{#1}%
+ \nameproc{#2}\with\parsebin
+ \peekstash{#4}% does not clobber \toksa and \toksb
+ \expandafter\settermstyle\expandafter{\the\toksb}{#1}%
}
-\def\settermstyle#1{%
- \edef\next{\toksa{\termmetastyle{%
- \gidxentry{\termidstring}{#1}%
- \let\nx\idxfont\nx\empty\nx\it\the\toksa\nx\/%
- }}}\next
-}
-
-\def\idxinline#1{% index the term in text
- \edef\next{\tidxentry{\termidstring}{#1}}\next
+\def\settermstyle#1#2{% put the \whatsit node (\write) at the end so that the
+ % term macros can remove the whitespace preceeding it
+ \edef\next{%
+ \toksa{%
+ \termmetastyle{%
+ \let\nx\idxfont\nx\empty\nx\it\the\toksa
+ \gidxentry{\termidstring}{#2}{#1}\nx\/%
+ }%
+ }%
+ }\next
}
\def\termnamestringify#1#2#3#4{%
- \nameproc{#2}%
+ \nameproc{#2}\with\parsebin
+ \ifyyparsefail
+ \toksb{"#1"}\toksc{\termstring}%
+ \else
+ \toksc{\termidstring}%
+ \fi
\edef\next{\toksa{{%
- \gidxentry{\termstring}{#1}%
+ \expandafter\gidxentryxv\expandafter{\the\toksc}{#1}{\the\toksb}%
\ntt\the\toksa
}}}\next
}
@@ -418,31 +641,39 @@
\newif\ifinheader
\def\prodhdridit#1#2#3#4{%
- \setrulecontext{}%
- \nameproc{#2}%
- \expandafter\setrulecontext\expandafter{\the\toksc}%
+ \setrulecontext{}% TODO: ?
+ \nameproc{#2}\with\parsebin
+ \setrulecontext{#1}%
\edef\next{\toksa{{%
- \gidxentry{\termhdrstring}{#1}%
+ \gidxentryxv{\termidstring}{#1}{\the\toksb}%
\nx\inheadertrue\let\nx\idxfont\nx\bf\nx\itbold\the\toksa\nx\/%
}}}\next
}
\def\charit#1#2#3#4{%
- \expandafter\ifx\csname'parser'\hostparsernamespace#1\endcsname\relax
- \edef\next{\toksa{{\ntt #1}\gidxentry{\termvstring}{'#1'}}}%
+ \expandafter\ifx\csname\prettynamecs\hostparsernamespace{#1}\endcsname\relax
+ \toksa{#1}\sansfirst\toksa
+ \edef\next{\toksa{{\ntt\the\toksa}\gidxentryxb{\termvstring}{\the\toksa}{}}}% remove the possible \\
\else
+ \expandafter\ifx\csname\viskeyref\hostparsernamespace{#1}\endcsname\relax
+ \tokse{#1}% we are stripping the quotes so the visible key is adjusted
+ \else
+ \edef\next{\tokse{\expandafter\expandafter\csname\viskeyref
+ \hostparsernamespace{#1}\endcsname\expandafter{\currentrulecontext}}}\next
+ \fi
\edef\next{\expandafter\toksa\expandafter{\expandafter\noexpand
- \csname'parser'\hostparsernamespace#1\endcsname{\currentrulecontext}\gidxentry{\termvstring}{'#1'}}}%
+ %\csname\prettynamecs\hostparsernamespace{#1}\endcsname{\currentrulecontext}\gidxentryxv{\termvstring}{'#1'}{\the\tokse}}}%
+ \csname\prettynamecs\hostparsernamespace{#1}\endcsname{\currentrulecontext}\gidxentryxv{\termidstring}{'#1'}{\the\tokse}}}%
\fi
\next
}
\def\anint#1#2#3{%
- \toksa{#1}\concat\table\toksa
+ \appendrnx\table{#1}%
}
\def\hexint#1#2#3{%
- \toksa\expandafter{\eattwo#1$_{16}$}\concat\table\toksa
+ \expandafter\appendrnx\expandafter\table\expandafter{\eattwo#1$_{16}$}%
}
\def\optoptsepnone{%
@@ -451,22 +682,22 @@
\def\optoptsepsmall{% keep the alignment going but insert a small space
\concat\opttable\postoks
- \toksa{\noalign{\smallskip}}\concat\opttable\toksa
+ \appendrnx\opttable{\noalign{\smallskip}}%
}
\def\prodprodsepsmall{%
\pushothertables
- \toksa{\noalign{\penalty-150 \smallskip}}\concat\table\toksa
+ \appendrnx\table{\noalign{\penalty-150 \smallskip}}%
}
\def\prodprodsep{%
\pushothertables
- \toksa{\noalign{\medskip}}\concat\table\toksa
+ \appendrnx\table{\noalign{\medskip}}%
}
\def\prodprodseplarge{%
\pushothertables
- \toksa{\noalign{\bigskip}}\concat\table\toksa
+ \appendrnx\table{\noalign{\bigskip}}%
}
\def\separatorswitcheq{%
@@ -532,28 +763,22 @@
\def\posmark#1#2{%
% \yyreadfifo\yyformat\to#1\in\yyformatlocal
-% no formattig for locations currently
- \restorecs{table-render}{\strm}%
+% no formatting for locations currently
% \the\yyformatlocal
\posm@rk{#2}%
}
\def\posm@rk#1{%
- \yyreadfifo\yystash\to#1\in\yystashlocal
- \edef\next{\the\yystashlocal}%
- \ifx\next\empty
- \postoks{}%
- \else
- \unmathstash{#1}%
- % TODO: move this out
- \toksa{}\the\yystashlocal\cleanstash\stripstash
+ \readstash{#1}%
+ \yytoksempty\yystashlocal{\postoks{}}{%
+ \toksa\expandafter{\the\yystashlocal}\cleanstash\stripstash
\setbox\z@=\vbox{\setlazyc\the\toksa}%
\ifnum\ht\z@=\z@
\postoks{}%
\else
- \edef\next{\postoks{\noalign{\indent\toksa{}\the\yystashlocal\nx\makestashbox}}}\next
+ \edef\next{\postoks{\noalign{\indent\toksa{\the\yystashlocal}\nx\makestashbox}}}\next
\fi
- \fi
+ }%
}
\def\codeoptionlist{%
@@ -608,29 +833,13 @@
\def\pushothertables{%
%token's
- \edef\next{\the\tokdectoks}%
- \ifx\next\empty
- \else
- \attachtokentable
- \fi
+ \yytoksempty\tokdectoks{}{\attachtokentable}%
%type's
- \edef\next{\the\typestable}%
- \ifx\next\empty
- \else
- \attachtypestable
- \fi
+ \yytoksempty\typestable{}{\attachtypestable}%
%precedence's
- \edef\next{\the\prectable}%
- \ifx\next\empty
- \else
- \attachprectable
- \fi
+ \yytoksempty\prectable{}{\attachprectable}%
% options
- \edef\next{\the\opttable}%
- \ifx\next\empty
- \else
- \attachoptionstable
- \fi
+ \yytoksempty\opttable{}{\attachoptionstable}%
\concat\table\postoks
} %
@@ -643,8 +852,7 @@
\savecslist{local-namespace}{\yyunion\termindex}%
\restorecslist{table-render:token-declarations}\yyunion
\let\termindex\writetokenidxentry
- \displaytokenrawtrue
- \let\currenttokentype\empty#1%
+ \let\currenttokentype\empty#1%
\restorecslist{local-namespace}{\yyunion\termindex}%
}
@@ -652,31 +860,29 @@
\savecslist{local-namespace}{\yyunion\termindex}%
\restorecslist{table-render:type-declarations}\yyunion
\let\termindex\writetokenidxentry
- \typesset{}%
- #2\relax#1%
- \edef\next{\typestable{\the\typestable\nx\onetype{\the\toksa}{\the\typesset}}}\next
+ \typesset{}#2\relax#1%
+ \appendr\typestable{\nx\onetype{\the\toksa}{\the\typesset}}%
\restorecslist{local-namespace}{\yyunion\termindex}%
}
% \onetype is a typeseting macro defined elsewhere
+% TODO: move these to the \flex\ section
\def\flexsstatedecls#1#2#3{%
\savecslist{local-namespace}{\yyunion\termindex}%
\restorecslist{table-render:flex-state-declarations}\yyunion
- \let\termindex\writetokenidxentry
- \typesset{}%
- #1\relax
- \edef\next{\typestable{\the\typestable\nx\flexsstatelist{\the\typesset}}}\next
+ \let\termindex\writeidxfsdentry%\writetokenidxentry
+ \typesset{}#1\relax
+ \appendr\typestable{\nx\flexsstatelist{\the\typesset}}%
\restorecslist{local-namespace}{\yyunion\termindex}%
}
\def\flexxstatedecls#1#2#3{%
\savecslist{local-namespace}{\yyunion\termindex}%
\restorecslist{table-render:flex-state-declarations}\yyunion
- \let\termindex\writetokenidxentry
- \typesset{}%
- #1\relax
- \edef\next{\typestable{\the\typestable\nx\flexxstatelist{\the\typesset}}}\next
+ \let\termindex\writeidxfsdentry%\writetokenidxentry
+ \typesset{}#1\relax
+ \appendr\typestable{\nx\flexxstatelist{\the\typesset}}%
\restorecslist{local-namespace}{\yyunion\termindex}%
}
@@ -685,9 +891,8 @@
\def\flexoptiondecls#1#2#3{%
\savecslist{local-namespace}{\yyunion\termindex}%
\restorecslist{table-render:flex-option-definitions}\yyunion
- \let\termindex\writetokenidxentry
- \toksa{}%
- #1\relax
+ \let\termindex\writeidxfsrnentry%\writetokenidxentry
+ \toksa{}#1\relax
\restorecslist{local-namespace}{\yyunion\termindex}%
}
@@ -700,8 +905,8 @@
\restorecslist{table-render:prec-declarations}\yyunion
\setrulecontext{*#1}%
\let\termindex\writetokenidxentry
- \typesset{}#3\toksa{}#2%
- \edef\next{\prectable{\the\prectable\noexpand\oneprec{#1}{\the\toksa}{\the\typesset}}}\next
+ \typesset{}#3\toksa{}#2%
+ \appendr\prectable{\nx\oneprec{#1}{\the\toksa}{\the\typesset}}%
\restorecslist{local-namespace}{\yyunion\termindex}%
\setrulecontext{}%
}
@@ -712,52 +917,45 @@
\def\oneparametricoption#1#2#3#4{%
\restorecs{table-render:token-declarations}{\braceit\stringify\idit\anint\hexint}%
- \toksa{}\toksb{}\toksc{}%
- #2%
- \edef\next{\noexpand\oneparametricopti@n{#1}{\the\toksa}{\the\toksb}{\the\toksc}}\next
+ \toksa{}\toksb{}\toksc{}#2%
+ \edef\next{\nx\oneparametricopti@n{#1}{\the\toksa}{\the\toksb}{\the\toksc}}\next
}
\def\oneparametricopti@n#1#2#3#4{%
- \edef\next{#2}%
- \ifx\next\empty % there is no identifier
- \def\next{#4}%
- \ifx\next\empty % there is no digit
+ \yystringempty{#2}{% there is no identifier
+ \yystringempty{#4}{% there is no digit
\edef\next{\toksa{{\ntt"\the\toksb"}}}\next
- \else
+ }{%
\edef\next{\toksa{{\nx\rm\the\toksc}}}\next
- \fi
- \else
- \nameproc{#2}%
- \fi % now \toksa contains the value of the parameter
+ }%
+ }{%
+ \nameproc{#2}\with\parsebin
+ }% now \toksa contains the value of the parameter
\toksb{#1}%
\let\termindex\writetokenidxentry
- \edef\next{\toksa{\gidxentry{\termostring}{#1}$\nx\langle${\nx\bf\the\toksb}$\nx\rangle$&\the\toksa\nx\/\nx\cr}}\next
+ \appendr\opttable{\gidxentry{\termostring}{#1}{}$\nx\langle${\nx\bf\the\toksb}$\nx\rangle$&\the\toksa\nx\/\nx\cr}%
\let\termindex\eatone
- \concat\opttable\toksa
}
\def\paramdef#1#2#3#4{%
\restorecs{table-render:token-declarations}\braceit
\let\termindex\writetokenidxentry
- \edef\next{\toksa{\gidxentry{\termostring}{#2}}}\next
- \toksb\expandafter{\the\toksa$\langle${\bf #2}$\rangle$}\toksa{}%
- #1%
+ \appendr\opttable{\gidxentry{\termostring}{#2}{}}%
\let\termindex\eatone
- \concat\toksb\toksa
- \concat\opttable\toksb
+ \appendrnx\opttable{$\langle${\bf #2}$\rangle$}%
+ \toksa{}#1%
+ \concat\opttable\toksa
}
\def\optionflag#1#2#3#4{%
- \def\next{#2}%
- \ifx\next\empty
- \toksb{}%
- \else
- \toksb{ {\rm(set as $\langle${\bf#2}$\rangle$)}}%
- \fi
\let\termindex\writetokenidxentry
- \edef\next{\toksa{\gidxentry{\termostring}{#1}$\nx\langle${\nx\bf#1}$\nx\rangle$\nx\rlap{$\nx\,\star$}&\the\toksb\cr}}\next
+ \appendr\opttable{\gidxentry{\termostring}{#1}{}}%
\let\termindex\eatone
- \concat\opttable\toksa
+ \yystringempty{#2}{%
+ \appendrnx\opttable{$\langle${\bf#1}$\rangle$\rlap{$\,\star$}&\cr}%
+ }{%
+ \appendrnx\opttable{$\langle${\bf#1}$\rangle$\rlap{$\,\star$}& {\rm(set as $\langle${\bf#2}$\rangle$)}\cr}%
+ }%
}
\def\codepropstype#1#2#3#4#5#6#7{%
@@ -768,21 +966,18 @@
}
\def\codepropstyp@#1#2#3#4{%
- \yyreadfifo\yystash\to#4\in\yystashlocal
- \unmathstash{#4}%
+ \readstash{#4}%
\restorecs{table-render:type-declarations}{\idit\stringify\hspace}%
\let\termindex\writetokenidxentry
- \typesset{}%
- #2% collect all the symbols in \typesset
- \edef\next{\toksa{\gidxentry{\termostring}{#3}$\nx\langle${\nx\bf #3}$\nx\rangle$&%
- \toksa{}\the\yystashlocal\nx\makestashbox\nx\hfil\cr
- \noalign{\nx\smallskip}%
- \noalign{\indent$\nx\rlap{\hbox to2em{\nx\hfil$\nx\star$\nx\hfil}}\vcenter{
- \advance\hsize by-\parindent
- \emergencystretch10pt\nx\raggedright\noindent\hangafter\z@\hangindent2em\nx\strut\the\typesset\nx\strut}$}%
- }}\next
+ \typesset{}#2% collect all the symbols in \typesset
+ \appendr\opttable{\gidxentry{\termostring}{#3}{}$\nx\langle${\nx\bf #3}$\nx\rangle$&%
+ \toksa{\the\yystashlocal}\nx\makestashbox\nx\hfil\cr
+ \noalign{\nx\smallskip}%
+ \noalign{\indent$\nx\rlap{\hbox to2em{\nx\hfil$\nx\star$\nx\hfil}}\vcenter{
+ \advance\hsize by-\parindent
+ \emergencystretch10pt\nx\raggedright\noindent\hangafter\z@\hangindent2em\nx\strut\the\typesset\nx\strut}$}%
+ }%
\let\termindex\eatone
- \concat\opttable\toksa
}
\def\codeassoc#1#2#3#4#5#6#7{%
@@ -793,20 +988,15 @@
}
\def\codeass@c#1#2#3#4{%
- \yyreadfifo\yystash\to#4\in\yystashlocal
- \unmathstash{#4}%
+ \readstash{#4}%
\let\idit\termnameidit
\let\termindex\writetokenidxentry
- \toksa{}% in case #1 is empty
- #2% put the typeset form of the identifie in \toksa
- \toksb\toksa
- \edef\next{\toksa{\yystringempty{#1}{}{\gidxentry{\termostring}{#1}}%
- $\nx\langle${\nx\bf #1}$\nx\rangle$\nx\quad{\the\toksb}&%
- \toksa{}\the\yystashlocal
- \nx\makestashbox
- \nx\hfil\cr
- }}\next
- \concat\opttable\toksa
+ \toksa{}#2% put the typeset form of the identifier in \toksa
+ \appendr\opttable{%
+ \yystringempty{#1}{}{\gidxentry{\termostring}{#1}{}}%
+ $\nx\langle${\nx\bf #1}$\nx\rangle$\nx\quad{\the\toksa}&%
+ \toksa{\the\yystashlocal}\nx\makestashbox\nx\hfil\cr
+ }%
\let\termindex\eatone
}
@@ -821,35 +1011,29 @@
\let\idit\termnameidit
\let\stringify\termnamestringify
\let\termindex\writetokenidxentry
- #1% put the typeset form of the identifier or string in \toksa
- \toksb\toksa
- \savecslist{local-namespace}\yyunion
- \restorecslist{table-render:variable-definitions}\yyunion
- \toksa{}% in case #2 is empty
- #2% put the value in toksa
- \toksc\toksa
- \restorecslist{local-namespace}\yyunion
+ #1% put the typeset form of the identifier or string in \toksa
+ \toksb\toksa
+ \savecslist{local-namespace}\yyunion
+ \restorecslist{table-render:variable-definitions}\yyunion
+ \toksa{}#2% put the value in toksa
+ \toksc\toksa
+ \restorecslist{local-namespace}\yyunion
\let\termindex\eatone
- \edef\next{\toksa{$\nx\langle${\nx\bf define}$\nx\rangle$\nx\quad{\the\toksb}&%
- \the\toksc\cr
- }}\next
- \concat\opttable\toksa
+ \appendr\opttable{$\nx\langle${\nx\bf define}$\nx\rangle$\nx\quad{\the\toksb}&\the\toksc\cr}%
}
\def\prologuecode#1#2#3#4#5{% similar to \actbrac@s
- \yyreadfifo\yystash\to#5\in\yystashlocal
- \unmathstash{#5}%
- \restorecs{table-render}{\strm\stashed}%
- \toksa{}\the\yystashlocal
- \edef\next{\toksa{\omit\span\omit\nx\cdotfill\cr
- \omit\span\omit\toksa{\the\toksa}\nx\makestashbox\nx\cr
- \omit\span\omit\nx\cdotfill\cr}}\next
- \concat\opttable\toksa
+ \readstash{#5}%
+ \appendr\opttable{%
+ \omit\span\omit\nx\cdotfill\cr
+ \omit\span\omit\toksa{\the\yystashlocal}\nx\makestashbox\nx\cr
+ \omit\span\omit\nx\cdotfill\cr
+ }%
}
\def\yyunion@tablerender{%
- \strm\stashed\rarhss@p\rrhss@p\prodhead@r\hrhss@p\arhss@p\actbrac@s\hspac@\codepropstyp@\vard@f\stopproduction
- \termmetastyle\peekstash
+ \strm\stashed\rarhss@p\rrhss@p\prodhead@r\hrhss@p\arhss@p\termnam@\termn@m@\t@rmn@m@
+ \actbrac@s\hspac@\codepropstyp@\vard@f\termmetastyle\peekstash\stopproduction
}
\savecslist{table-render}\yyunion
@@ -868,10 +1052,16 @@
#1%
}
\let\formatlocal\format
+\let\formatbegin\format
+\let\formatp\format
\newtoks\yystashlastcontent % contents of the last action
\newif\ifshowlastaction % if true, show the last action of a flattened set of rules
+% flatten: omit the actions, just list all the rules in a single line;
+% if \showlastactiontrue show just the last action, otherwise display \actionfiller
+% instead
+
\def\flatten{%
\let\stopproduction\stopproduction@flatten
\let\actbrac@s\actbrac@s@flatten
@@ -881,45 +1071,44 @@
}
\def\actbrac@s@flatten#1#2#3{% do everything but add the stash contents to the table
- \yyreadfifo\yystash\to#3\in\yystashlocal
- \unmathstash{#3}%
- \yystashlastcontent\yystashlocal
+ \readstash{#3}%
+ \yystashlastcontent\yystashlocal % keep the contents of the braces
+ % in case we want to show the last action
\futurelet\actsep\actbrac@s@fl@tten
}
+% see the remarks before \emptyterm above for the reasons for choosing the index
+% pseudonamespace for this term
\def\actbrac@s@fl@tten{%
\ifx\actsep\arhssep
- \edef\next{%
- \toksa{$\nx\ \nx\diamond\nx\ $\tidxentry{\termidstring}{inline_action}}}\next
- \concat\table\toksa
+ \appendr\table{$\nx\ \nx\diamond\nx\ $\tidxentry{\termexception}{inline_action&}}%
\fi
}
\def\rrhss@p@flatten{%
- \toksa{$\ \vert\ $}\concat\table\toksa
+ \appendrnx\table{$\ \vert\ $}%
}%
\def\stopproduction@flatten{%
\ifshowlastaction
- \toksa\yystashlastcontent
+ \yystashlastcontent\expandafter{\expandafter\toksa\expandafter{\the\yystashlastcontent}}%
\else
- \toksa{\actionfiller}%
+ \yystashlastcontent{\actionfiller}%
\fi
- \toksa\expandafter{\expandafter&\expandafter&\the\toksa\cr}\concat\table\toksa
+ \appendr\table{&&\the\yystashlastcontent\nx\cr}%
}
+% fold: resume normal display
+
\def\fold{%
\restorecs{table-render}{\actbrac@s\rrhss@p\arhss@p\rarhss@p\stopproduction}%
- \ifshowlastaction
- \toksa\yystashlastcontent
- \else
- \toksa{\actionfiller}%
- \fi
- \toksa\expandafter{\expandafter&\expandafter&\the\toksa\cr}\concat\table\toksa
+ \stopproduction@flatten
}
\def\actionfiller{\omit\quad$\ldots$\hfil}
+% inline: put the left hand side of the production on the same line as the rule
+
\def\inline{%
\let\prodhead@r\prodhead@r@inline
\let\hrhss@p\hrhss@p@inline
@@ -928,10 +1117,10 @@
\def\prodhead@r@inline#1#2{%
\let\idit\prodhdridit
#1\relax
+ \concat\table\toksa
\setbox\z@=\hbox{\strut\the\toksa\/$\,$\rm:\quad}%
\tempda=\wd\z@\relax
\advance\tempda-2em
- \edef\next{\table{\the\table{\the\toksa}}}\next
\hrhss@p
}
@@ -940,18 +1129,45 @@
\def\termspostformat{\ifsquashterms\aftergroup\noexpand\squashtermstrue\fi}
\def\hrhss@p@inline{%
- \edef\next{\toksa{&\termspostformat\hbox to \the\tempda{\hss}}}\next\concat\table\toksa
+ \appendr\table{&\termspostformat\hbox to \the\tempda{\hss}}%
}
+% breakline: break the production line
+
\def\breakline{%
\let\hspac@\hspac@@breakline
}
\def\hspac@@breakline{%
- \toksa{{$\,\hookleftarrow$}&\omit\hfil&\omit\hfil\cr\omit\hfil&\hbox to 2em{\hfil}}\concat\table\toksa
+ \appendrnx\table{{$\,\hookleftarrow$}&\omit\hfil&\omit\hfil\cr\omit\hfil&\hbox to 2em{\hfil}}%
\restorecs{table-render}\hspac@
}
+% breakahead: break the production line after #1 terms
+% use: \breakahead{[0-9]+}
+
+\newcount\breakaheadcount
+
+\def\breakahead#1{%
+ \let\bahspac@\hspac@
+ \let\hspac@\hspac@@breakahead
+ \breakaheadcount#1\relax
+}
+
+\def\hspac@@breakahead{%
+ \ifnum\breakaheadcount=\@ne
+ \yybreak{%
+ \appendrnx\table{{$\,\hookleftarrow$}&\omit\hfil&\omit\hfil\cr\omit\hfil&\hbox to 2em{\hfil}}%
+ \let\hspac@\bahspac@
+ }%
+ \else
+ \advance\breakaheadcount by \m@ne
+ \yybreak{\bahspac@}%
+ \yycontinue
+}
+
+% skipheader: do not show the left hand side
+
\def\skipheader{%
\let\prodhead@r\prodhead@r@skipheader
\let\hrhss@p\hrhss@p@skipheader
@@ -961,34 +1177,85 @@
\hrhss@p
}
-\def\hrhss@p@skipheader{\toksa{\omit\hbox to 2em{\hfil}&\termspostformat}\concat\table\toksa\restorecs{table-render}{\hrhss@p}}
+\def\hrhss@p@skipheader{\appendrnx\table{\omit\hbox to 2em{\hfil}&\termspostformat}\restorecs{table-render}{\hrhss@p}}
\def\stashtoterm#1{% in case one needs to insert previous stash as a term
% usage: \let\peekstash\stashtoterm
- \yyreadfifo\yystash\to#1\in\yystashlocal
- \unmathstash{#1}%
- \edef\next{\toksb{{%
- \nx\restorecs{table-render}{\nx\stashed\nx\strm}%
- \toksa{}\the\yystashlocal\nx\makestashbox}}}\next
- \def\termmetastyle##1{\the\toksb}%
+ \readstash{#1}%
+ {\edef\next{\toksc{{\toksa{\the\yystashlocal}\nx\makestashbox}}}\expandafter}\next
+ \let\termmetastyle\xtoksc
\restorecs{table-render}\peekstash
}
+\def\xtoksc#1{\the\toksc}%
+
+% resetf: resume normal formatting
+
\def\resetf{%
\restorecs{table-render}{\prodheader\prodhead@r\hrhss@p\rrhss@p\arhss@p\rhs\termname\actbraces\actbrac@s\rarhss@p\hspac@\rules
\oneproduction\emptyterm\stringify\idit\charit\prodprodsep\onesymbol\strm\stopproduction}%
\squashtermsfalse
}
-\def\prodstyle#1{% to typeset token names in text
+\def\prodstyle#1{% to typeset token names in text;
+ % CAUTION: when many parsers are at play at the same time,
+ % one should use the version of this macro below that takes a
+ % namespace as a parameter, to make sure the term is typeset properly
+ % when another namespace might be in effect (such as when section names are output)
{%
\let\optstrextra\optstrextraesc
- \nameproc{#1}%
- \settermstyle{#1}%
+ \def\termidxrank{5}%
+ \let\termindex\writeidxentry
+ \nameproc{#1}\with\parsebin
+ \edef\next{%
+ \toksa{%
+ \termmetastyle{%
+ \let\nx\idxfont\nx\empty\nx\it\the\toksa
+ \expandafter\gidxentryxv\expandafter{\romannumeral
+ \ifyyparsefail
+ \yybreak{0 \termidstring}%
+ \else
+ \yybreak{\expandafter\tssextract\the\toksa\end}%
+ \yycontinue}{#1}{\the\toksb}\nx\/%
+ }%
+ }%
+ }\next
+ \let\_\uscore
\the\toksa\nobreak
}%
}
+\def\tssextract#1#2#3\end{%
+ \yystringempty{#2}{0 \termidstring}{0 #2}%
+}
+
+\def\prodtstyle#1{% to typeset terminal string names in text;
+ {%
+ \let\optstrextra\optstrextraesc
+ \def\termidxrank{5}%
+ \let\termindex\writeidxentry
+ \nameproc{#1}\with\parsebin
+ \yytoksempty\toksb{\toksb{"#1"}}{}%
+ \edef\next{%
+ \toksa{%
+ \termmetastyle{%
+ \let\nx\idxfont\nx\empty\ntt\the\toksa
+ \gidxentryxv{\termstring}{#1}{\the\toksb}%
+ }%
+ }%
+ }\next
+ \let\_\uscore
+ \the\toksa\nobreak
+ }%
+}
+
+\def\prodstylens#1#2{% to typeset token names in text, in a predefined namespace
+ {%
+ \def\hostparsernamespace{#2}%
+ \prodstyle{#1}%
+ }%
+}
+
% macros used in typesetting the result of the prologue parsing
%type declarations
@@ -1007,7 +1274,7 @@
}
\def\hspace#1#2{%
- \appendr\typesset{ }%
+ \appendrnx\typesset{ }%
}
\def\charit#1#2#3#4{%
@@ -1044,11 +1311,8 @@
}
\def\braceit#1#2#3{% similar to \actbrac@s
- \yyreadfifo\yystash\to#3\in\yystashlocal
- \unmathstash{#3}%
- \restorecs{table-render}{\strm\stashed}%
- \toksa{}\the\yystashlocal
- \edef\next{\toksa{&\toksa{\the\toksa}\nx\makestashbox\nx\cr}}\next
+ \readstash{#3}%
+ \edef\next{\toksa{&\toksa{\the\yystashlocal}\nx\makestashbox\nx\cr}}\next
}
\def\onesymbol#1#2#3{% #1 is the macro name used internally by bison (\idit{}{}{}{})
@@ -1056,42 +1320,42 @@
% #3 is the string value (\stringify{}{}{}{})
\toksa{}\toksb{}\toksc{}%
#1#2#3\relax
- \edef\next{\noexpand\onesymb@l{\the\toksa}{\the\toksc}{\the\toksb}}\next
+ \edef\next{\nx\onesymb@l{\the\toksa}{\the\toksc}{\the\toksb}}\next
}
\def\onesymb@l#1#2#3{% #1 is the macro name used internally by bison
% #2 is the integer value
% #3 is the string value
\let\optstrextra\optstrextraesc
- \yystringempty{#3}{}{\nameproc{#3}\tokstoks\toksa}%
- \yystringempty{#1}{}{\nameproc{#1}}%
- \iftracetokennames\derrmessage{token: \the\toksa, string: \the\tokstoks}\fi
+ \tokse{}\toksf{}\toksg{}% these registers will contain the appropriate visual keys
+ % and index entry processing control sequences
+ \yystringempty{#3}{}{%
+ \nameproc{#3}\with\parsebin
+ \ifyyparsefail % if the parsing failed, create a visible key, process the
+ % index entry as a \.{\\tt} string
+ \tokse{"#3"}\toksf{\termstring}%
+ \else % otherwise use the value of the string as the visual key,
+ % process the string as a \bison\ identifier
+ \tokse\toksb\toksf{\termidstring}%
+ \fi
+ \tokstoks\toksa
+ }%
+ \yystringempty{#1}{}{\nameproc{#1}\with\parsebin\toksg\toksb}%
+ \iftracetokennames\ferrmessage{token: \the\toksa, string: \the\tokstoks}\fi
\toksb\expandafter{\currenttokentype}%
- \edef\next{%
- \tokdectoks{\the\tokdectoks
- \noexpand\toksdefline
- {\the\toksa}%
- {%
- \yystringempty{#1}{}{\gidxentry{\termidstring}{#1}}%
- \yystringempty{#3}{}{\gidxentry{\termstring}{#3}}%
- }%
- {\yytoksempty\toksb{auto}{\the\toksb}}%
- {#2}%
- {\the\tokstoks}%
- }%
- }\next
+ \appendr\tokdectoks{\nx\toksdefline
+ {\the\toksa}%
+ {%
+ \yystringempty{#1}{}{\gidxentryxv{\termidstring}{#1}{\the\toksg}}%
+ \yystringempty{#3}{}{\expandafter\gidxentryxv\expandafter{\the\toksf}{#3}{\the\tokse}}%
+ }%
+ {\yytoksempty\toksb{auto}{\the\toksb}}%
+ {#2}%
+ {\the\tokstoks}%
+ }%
\restorecs{nameparser}{\optstrextra}%
}
-\def\defidxrank{0} % index rank of definitions
-
-\def\writetokenidxentry#1{\write\gindex{%
- \nx\nx
- \expandafter
- \nx
- \csname\bisonidxdomain TI\endcsname\defidxrank#1}%
-}
-
\toyyunion{table-render:token-declarations}
%precedence declarations
@@ -1109,29 +1373,35 @@
}
\def\charit#1#2#3#4{%
- \expandafter\ifx\csname'parser'\hostparsernamespace#1\endcsname\relax
- \edef\next{\toksa{{\ntt #1}\gidxentry{\termvstring}{'#1'}}}%
+ \expandafter\ifx\csname\prettynamecs\hostparsernamespace{#1}\endcsname\relax
+ \toksa{#1}\sansfirst\toksa
+ \edef\next{\toksa{{\ntt\the\toksa}\gidxentryxb{\termvstring}{\the\toksa}{}}}% remove the possible \\
\else
+ \expandafter\ifx\csname\viskeyref\hostparsernamespace{#1}\endcsname\relax
+ \tokse{#1}%
+ \else
+ \edef\next{\tokse{\expandafter\expandafter\csname\viskeyref
+ \hostparsernamespace{#1}\endcsname\expandafter{\currentrulecontext}}}\next
+ \fi
\edef\next{\expandafter\toksa\expandafter{\expandafter\noexpand
- \csname'parser'\hostparsernamespace#1\endcsname{\currentrulecontext}\gidxentry{\termvstring}{'#1'}}}%
+ %\csname\prettynamecs\hostparsernamespace{#1}\endcsname{\currentrulecontext}\gidxentryxv{\termvstring}{'#1'}{\the\tokse}}}%
+ \csname\prettynamecs\hostparsernamespace{#1}\endcsname{\currentrulecontext}\gidxentryxv{\termidstring}{'#1'}{\the\tokse}}}%
\fi
\next
\concat\typesset\toksa
}
\def\anint#1#2#3{%
- \edef\next{\toksa{${}=\hbox{\ntt#1}$}}\next
- \concat\typesset\toksa
+ \appendrnx\typesset{${}=\hbox{\ntt#1}$}%
}
\def\hexint#1#2#3{%
\toksa\expandafter{\eattwo#1}%
- \edef\next{\toksa{${}=\hbox{\ntt\the\toksa}_{16}$}}\next
- \concat\typesset\toksa
+ \appendr\typesset{${}=\hbox{\ntt\the\toksa}_{16}$}%
}
\def\hspace#1#2{%
- \appendr\typesset{ }%
+ \appendrnx\typesset{ }%
}
\def\tagit#1#2#3#4{%
@@ -1153,11 +1423,8 @@
\def\stringify#1#2#3#4{\toksa{#2}}
\def\bracedvalue#1#2#3{% similar to \actbrac@s
- \yyreadfifo\yystash\to#3\in\yystashlocal
- \unmathstash{#3}%
- \restorecs{table-render}{\strm\stashed}%
- \toksa{}\the\yystashlocal
- \edef\next{\toksa{\toksa{\the\toksa}\nx\makestashbox}}\next
+ \readstash{#3}%
+ \edef\next{\toksa{\toksa{\the\yystashlocal}\nx\makestashbox}}\next
}
\toyyunion{table-render:variable-definitions}
@@ -1193,12 +1460,11 @@
\restorecslist{parser-strict}\yyunion
\def\idit#1#2#3#4{%
- \nameproc{#2}%
- \edef\next{\toksa{{%
- \gidxentry{\termttstring}{#1}%
+ \nameproc{#2}\with\parsebin
+ \appendr\typesset{{%
+ \gidxentryxv{\termttstring}{#1}{\the\toksb}%
\ntt\the\toksa
- }}}\next
- \concat\typesset\toksa
+ }}%
}
\def\hspace#1#2{%
@@ -1213,17 +1479,21 @@
\def\flexoptionpair#1#2{%
#1%
- \edef\next{#2}%
- \ifx\next\empty % there is no rihgt hand side
- \edef\next{\toksa{$\nx\langle${\ntt\the\toksa}$\nx\rangle_{\rm f}\,\star$&\cr}}\next
- \else
+ \yystringempty{#2}{% there is no right hand side
+ \appendr\opttable{$\nx\langle${\ntt\the\toksa}$\nx\rangle_{\rm f}\,\star$&\cr}%
+ }{%
\toksb\toksa\toksa{}#2%
- \edef\next{\toksa{$\nx\langle${\ntt\the\toksb}$\nx\rangle_{\rm f}$&\nx\it\the\toksa\nx\/\nx\cr}}\next
- \fi
- \concat\opttable\toksa
+ \appendr\opttable{$\nx\langle${\ntt\the\toksb}$\nx\rangle_{\rm f}$&\nx\it\the\toksa\nx\/\nx\cr}%
+ }%
}
-\def\idit#1#2#3#4{\toksa{#2}}
+\def\idit#1#2#3#4{%
+ \toksa{#2}%
+ \edef\next{\toksa{{%
+ \gidxentry{\termfsopstring}{#2}{}%
+ \ntt\the\toksa
+ }}}\next
+}
\def\stringify#1#2#3#4{\toksa{{\ntt@"#2"}}}
@@ -1244,10 +1514,13 @@
\edef\next{\toksc{\numname:}\toksd{\noexpand\lhs{\the\lastlastname}{\the\lastname}}}\next
}
-\def\rrhssep#1#2{}
-\def\arhssep#1#2{}
-\def\rarhssep#1#2{}
-\def\hspace#1#2{}
+% the macros below are defined in `parser-prototypes'
+%\def\rrhssep#1#2{}
+%\def\arhssep#1#2{}
+%\def\rarhssep#1#2{}
+%\def\hspace#1#2{}
+%\def\prodprodsep{}
+%\def\onesymbol#1#2#3{}
\def\rhs#1#2#3{%
\concat\toksa\toksc
@@ -1317,10 +1590,6 @@
\lastname{'#1'}%
}
-\def\prodprodsep{}
-
-\def\onesymbol#1#2#3{}
-
\def\anint#1#2#3{%
\lastname{#1}%
}
@@ -1342,119 +1611,72 @@
\savecslist{symbols}\yyunion
-% name parser macros
+%% token typesetting macros
-\def\idstr#1#2{%
- \toksb{#1}%
- \concat\toksa\toksb
- \toksb{#2}%
- \concat\toksc\toksb
-}
+\def\prettynamecs#1#2{'parser'#1#2} % naming convention for token typesetting alternatives
-\def\chstr#1#2{%
- \toksb{{\ntt@#1}}%
- \concat\toksa\toksb
- \toksb{#2}%
- \concat\toksc\toksb
+\def\setsafe#1{\def\saferword{#1}\setspecialcharsfrom\saferword}
+\def\prettyword#1{\setsafe{#1}\expandafter\edef\csname\prettynamecs\parsernamespace\saferword\endcsname##1{{\ntt\lowercase{\saferword}}}}
+\def\prettytoken#1{\setsafe{#1}\expandafter\edef\csname\prettynamecs\parsernamespace\saferword\endcsname##1{{\ntt\saferword}}}
+\def\prettytoken@#1{\toksa{}\numberstocharsandspaces#1\end\expandafter\edef
+ \csname\prettynamecs\parsernamespace{\the\toksa}\endcsname##1{{\ntt\the\toksa}}%
}
-\def\sfxi#1#2{\toksb{\/${}_{\idxfont#1}$}\concat\toksa\toksb}
-
-\def\sfxn#1#2{\toksb{#1}\concat\toksa\toksb}
+\def\prettywordpair@@#1#2{\setsafe{#1}\expandafter\def\csname\prettynamecs\parsernamespace\saferword\endcsname##1{#2}}
-\def\dotsp{\futurelet\next\d@tsp}
+\def\prettywordpair#1#2{\prettywordpair@@{#1}{{\ntt@#2}}}
-\def\d@tsp{%
- \ifx\next\sfxi
- \else
- \ifx\next\qual
- \else
- \toksb{.}\concat\toksa\toksb
- \fi
- \fi
+\def\prettywordpair@#1#2{% a `weak symbol' version of the above macro, so that the new typesetting can be overriden
+ \setsafe{#1}\toksa{}\numberstocharsandspaces#2\end\expandafter\nameproc\expandafter{\the\toksa}\with\parsebin
+ \expandafter\edef\csname\prettynamecs\parsernamespace\saferword\endcsname##1{%
+ \noexpand\ifdisplaytokenraw
+ {\ntt\saferword}% \ntt expands to \noexpand\ntt@
+ \noexpand\else
+ {\ntt\def\noexpand\_{\char`\noexpand\_}\the\toksa}%
+ \noexpand\fi
+ }%
}
-\def\optstr#1#2{%
- \toksb\expandafter{\eatone#1}%
- \optstrextra
- \concat\toksa\toksb
- \toksb{#2}%
- \concat\toksc\toksb
+\def\prettywordpairwvis#1#2#3{%
+ \prettywordpair{#1}{#2}%
+ \expandafter\def\csname\viskeyref\parsernamespace{\saferword}\endcsname##1{#3}%
}
-\def\optstrextra{%
- \edef\next{\toksb{{$\nx\ulcorner$\ntt\the\toksb$\nx\urcorner$}}}\next
+\def\prettywordpair@wvis#1#2#3{%
+ \prettywordpair@{#1}{#2}%
+ \expandafter\def\csname\viskeyref\parsernamespace{\saferword}\endcsname##1{#3}%
}
-\def\optstrextraesc{%
- \edef\next{\toksb{{$\nx\langle$\ntt\the\toksb$\nx\rangle$}}}\next
+\def\prettywordpair@@wvis#1#2#3{%
+ \prettywordpair@@{#1}{#2}%
+ \expandafter\def\csname\viskeyref\parsernamespace{\saferword}\endcsname##1{#3}%
}
-\def\qual#1#2{\toksb{\/${}_{\rm#2}$}\concat\toksd\toksb}
-
-\let\idxfont\relax
-
-\savecs{nameparser}{\idstr\chstr\sfxi\sfxn\qual\optstr\dotsp\optstringextra\idxfont}
+% a control sequence to reuse the token definition file for typesetting
-\newtoks\namechars
+\newif\iftraceprettytokens % display prettyfying control sequences
-\newif\iftracebadnames
-\newif\iftracenames
-
-\def\nameproc#1{%
- \ifbootstrapmode
- \toksa{}%
+\def\tokeneqpretty#1#2{%
+ \setsafe{#1}\toksa{}\numberstocharsandspaces#2\end
+ \expandafter\nameproc\expandafter{\the\toksa}\with\parsebin
+ \ifyyparsefail
+ \toksc{{}$_{\rm m}${}}\toksd\toksa
\else
- \savecs{local-namespace}\getcurrentparser
- \tosmallparser
- \basicparserinit
- \yyparse#1\yyeof\yyeof\endparseinput\endparse
- \ifyyparsefail
- \iftracebadnames
- \ferrmessage{bad name: #1}%
- \fi
- \toksa{#1}\toksc{}%
- \else
- \toksa{}\toksb{}\toksc{}\toksd{}%
- \iftracenames
- \ferrmessage{parsed name: \the\namechars}%
- \fi
- \the\namechars\relax
- \iftracenames
- \ferrmessage{processed name: \the\toksa<identifier>, \the\toksb<suffices>, \the\toksc<hash name>, \the\toksd<options>}%
- \fi
- \expandafter\ifx\csname'parser'\hostparsernamespace\the\toksc \endcsname\relax
- \else
- \toksa\expandafter{\csname'parser'\hostparsernamespace\the\toksc \endcsname}%
- \appendr\toksa{{\currentrulecontext}}%
- \fi
- \concat\toksa\toksd
- \fi
- \restorecs{local-namespace}\getcurrentparser
- \getcurrentparser
+ \toksc{}\toksd\toksb % quotes should trigger the creation of a visible key
\fi
-}
-
-%% token typesetting macros
-
-\def\setsafe#1{\def\saferword{#1}\setspecialcharsfrom\saferword}
-\def\prettyword#1{\setsafe{#1}\expandafter\edef\csname'parser'\parsernamespace\saferword\endcsname##1{{\ntt\lowercase{\saferword}}}}
-\def\prettytoken#1{\setsafe{#1}\expandafter\edef\csname'parser'\parsernamespace\saferword\endcsname##1{{\ntt\saferword}}}
-\def\prettytoken@#1{\toksa{}\numberstocharsandspaces#1\end\expandafter\edef
- \csname'parser'\parsernamespace\the\toksa\endcsname##1{{\ntt\the\toksa}}%
-}
-\def\prettywordpair#1#2{\setsafe{#1}\expandafter\def\csname'parser'\parsernamespace\saferword\endcsname##1{{\ntt@#2}}}
-\def\prettywordpair@#1#2{%
- \setsafe{#1}\toksa{}\numberstocharsandspaces#2\end\expandafter\nameproc\expandafter{\the\toksa}%
- \expandafter\edef\csname'parser'\parsernamespace\saferword\endcsname##1{%
+ \expandafter\edef\csname\prettynamecs\parsernamespace\saferword\endcsname##1{%
\noexpand\ifdisplaytokenraw
{\ntt\saferword}%
\noexpand\else
- {\ntt\def\noexpand\_{\char`\noexpand\_}\the\toksa}%
+ {\ntt\def\noexpand\_{\char`\noexpand\_}\the\toksa\the\toksc}%
\noexpand\fi
}%
+ \expandafter\edef\csname\viskeyref\parsernamespace{\saferword}\endcsname##1{\the\toksd}%
+ \iftraceprettytokens
+ \message{token: \expandafter\meaning\csname\prettynamecs\parsernamespace\saferword\endcsname}%
+ \message{visual key: \expandafter\meaning\csname\viskeyref\parsernamespace{\saferword}\endcsname}%
+ \fi
}
-\def\prettywordpair@@#1#2{\setsafe{#1}\expandafter\def\csname'parser'\parsernamespace\saferword\endcsname##1{{#2}}}
% the bootstrap macros
@@ -1471,7 +1693,7 @@
\def\idit#1#2#3#4{\toksb{#1}}
%\def\anint#1#2#3{\toksc{#1}}% this definition is not needed as all
- %information is extracted from yytname
+ % the information is extracted from yytname
% note that the definitions below do not handle cases such as
%token '{' "left brace" since the lexer knows how to return '{'
@@ -1509,15 +1731,923 @@
\savecslist{bootstrap:flex-state-declarations}\yyunion
-\def\charstonumberse#1\end{%
- \yystringempty{#1}{}%
- {\yystartsinspace{#1}{{\number`\ }\charstonumber@@#1\end}{\charstonumbers@#1\end}}%
+% correct a few macros for debugging
+
+\restorecslist{parser-debug}\yyunion
+
+\defc\actbraces{% action braces :: \actbraces{tex_string}{\idit|<nothing>}{fptr}{sptr}directive*\bdend
+ \noexpand\actbraces{#1}{#2}{#3}{#4}#5\noexpand\bdend
+}
+
+\defc\bpredicate{% predicate :: \bpredicate{tex_string}{<nothing>}{fptr}{sptr}directive*\bdend
+ \noexpand\bpredicate{#1}{}{#3}{#4}#5\noexpand\bdend
+}
+
+\defc\rhs{% a `right-hand side' :: \rhs{[\termname\hspace\rarhssep\arhssep\actbraces]+}{\arhssep|<nothing>}{\ifrhsfull}
+ \noexpand\rhs{#1}{#2}{}% omit setting the conditional
+}
+
+\toyyunion{parser-debug}
+
+% \romannumeral-expandable macros for character translation
+
+\def\charstonumbersre#1\end{\charstonumbersr@0\end#1\end}
+
+\def\charstonumbersr@#1\end#2\end{%
+ \yystringempty{#2}{#1}%
+ {\yystartsinspace{#2}{\expandafter\charstonumbers@@swap\expandafter{\number`\ }{#1}#2\end}{\charstonumb@rsr@#1\end#2\end}}%
}
-\def\charstonumbers@#1{%
- {\number`#1}\charstonumberse
+\def\charstonumb@rsr@#1\end#2{%
+ \expandafter\charstonumbersr@swap\expandafter{\number`#2}{#1}%
}
-\def\charstonumber@@#1 {\charstonumberse}
+\def\charstonumbersr@swap#1#2{\charstonumbersr@#2{#1}\end}
+
+\def\charstonumbers@@swap#1#2 {\charstonumbersr@#2{#1}\end}
+
+% variations: set \toksa to the expansion, \edef-expandable version
+
+\def\charstonumbers#1\end{\toksa\expandafter{\romannumeral\charstonumbersre#1\end}}
+\def\charstonumberse#1\end{\romannumeral\charstonumbersre#1\end}
+
+% name parser macros: currently only one suffix is handled, the very last one
+
+\def\yyuniontag{\yynpunion}
+\def\parserstrictnamespace{nameparser-strict}
+\def\parserprototypesnamespace{nameparser-strict:headers}
+\def\parserdebugnamespace{nameparser-debug}
+
+\def\yynpunion{\currentyyunionnamespace}
+
+\defp\idstr#1#2{} % idenifier ::
+\defp\bidstr#1#2{} % bison variable ::
+\defp\chstr#1#2{} % character ::
+\defp\sfxi#1#2{} % literal suffix ::
+\defp\sfxn#1#2{} % integer suffix ::
+\defp\dotsp{} % suffix separator ::
+\defp\optstr#1#2{} % option ::
+\defp\qual#1#2{} % qualifier ::
+\defp\visflag#1#2{} % visible key change flag ::
+
+\toyyunion{nameparser-prototypes}
+
+\defc\idstr{%
+ \appendrnx\toksa{#1}%
+ \appendrnx\toksc{#2}%
+ \toksb{}%
+}
+
+\defc\bidstr{%
+ \appendrnx\toksa{{}$\Upsilon${}}%
+ \appendrnx\toksc{#2}%
+ \toksb{}%
+}
+
+\defc\chstr{%
+ \appendrnx\toksa{{\ntt@#1}}%
+ \appendrnx\toksc{#2}%
+ \toksb{}%
+}
+
+% the \toksb, set in the next two macros is used by \optstr later
+
+\defc\sfxi{\toksb{\/${}_{\idxfont#1}$}\appendrnx\toksa{\/${}_{\idxfont#1}$}}
+
+\defc\sfxn{\toksb{#1}\appendrnx\toksa{#1}}
+
+\defc\dotsp{\futurelet\next\d@tsp}
+
+\def\d@tsp{%
+ \ifx\next\sfxi
+ \else
+ \ifx\next\qual
+ \else
+ \appendrnx\toksa{.}%
+ \fi
+ \fi
+}
+
+\defc\optstr{%
+ \toksb\expandafter{\eatone#1}%
+ \optstrextra
+ \concat\toksa\toksb
+ \appendrnx\toksc{#2}%
+ \toksb{}%
+}
+
+\def\optstrextra{%
+ \edef\next{\toksb{{$\nx\ulcorner$\ntt\the\toksb$\nx\urcorner$}}}\next
+}
+
+\def\optstrextraesc{%
+ \edef\next{\toksb{{$\nx\langle$\ntt\the\toksb$\nx\rangle$}}}\next
+}
+
+\defc\qual{\appendrnx\toksd{\/${}_{\rm#2}$}\toksb{}}
+
+\let\idxfont\relax
+
+\defc\visflag{\toksf{#1}\appendrnx\tokse{#1}}% flag visible key change
+ % \toksf contains the typesetting hint sequence
+
+\toyyunion{nameparser}
+
+\newtoks\namechars
+
+\newif\iftracebadnames
+\newif\iftracenames
+
+\def\viskeyref#1#2{% getting the possible visual key from the index key
+ % #1 is the namespace of the original key
+ % #2 is the key
+ \restorecsname{[nameparser:visual]}{.\prettynamecs{#1}{#2}}%
+}
+
+% ignore explicit (\ ) spaces while parsing names
+% the macro will be called whenever \tosmallparser is executed (see yyinit.sty);
+% this is neccessary due to a shortcut taken by the macros that display
+% \bison\ productions in text (see \beginprod macros in yyinit.sty).
+
+\def\ignoreexplicitspace{\yyinput}
+
+% the main name processing routine
+% does not have any side effects;
+% the parsed term is returned as #1 of the sequence in #2; #2 of #2 will hold a visual key if
+% one exists, otherwise it will be empty; #3 is \yyparsefailtrue or empty
+% #4 is the prettyfied version of the term if one exists; #5 is the typesetting hint from \visflag sequences;
+% the version below prettifies tokens only if the parsing was successful; if
+% needed, prettyfication may be applied after the parsing is complete
+
+\def\nameproc#1\with#2{%
+ {% keeping all changes local
+ \iftracenames
+ \toksa{#1}\ferrmessage{original name: \the\toksa}%
+ \fi
+ \tosmallparser\basicparserinit\d@parse{#1}%
+ \ifyyparsefail
+ \iftracebadnames
+ \toksa{#1}\ferrmessage{bad name: \the\toksa}%
+ \fi
+ \toksb{#2{#1}{}{\yyparsefailtrue}{}{}}%
+ \else
+ \npbuildnames{#2}%
+ \fi
+ \expandafter
+ }\the\toksb
+}
+
+\def\npbuildnames#1{%
+ \iftracenames
+ \ferrmessage{parsed name (namespace: \parsernamespace): \the\namechars}%
+ \fi
+ \toksa{}\toksb{}\toksc{}\toksd{}\tokse{}\toksf{}%
+ \restorecslist{nameparser}\yynpunion
+ \the\namechars\relax
+ \iftracenames
+ \ferrmessage{processed name: \the\toksa<identifier>, \the\toksb<suffices>, %
+ \the\toksc<hash name>, \the\toksd<options>, \the\tokse<visible key>}%
+ \fi
+ \expandafter\npsetkeys\expandafter{\the\toksc}%
+ \iftracenames
+ \ferrmessage{final name: \the\toksa\yytoksempty\toksc{}{, prettyfied name: \the\toksc}%
+ \yytoksempty\toksf{}{, hint: \the\toksf}%
+ \yytoksempty\tokse{}{, visual key: \the\tokse}, %
+ parsing \ifyyparsefail failed\else successful\fi
+ }%
+ \fi
+ \toksb{#1}%
+ \edef\next{%
+ \toksb{%
+ \the\toksb
+ {\the\toksa}% the parsed name
+ {\the\tokse}% the visual key
+ {}% parseflag
+ {\the\toksc}% the prettified name
+ {\the\toksf}% the typesetting hint
+ }%
+ }\next
+}
+
+\def\npsetkeys#1{%
+ \expandafter\ifx\csname\prettynamecs\hostparsernamespace{#1}\endcsname\relax
+ \toksc{}%
+ \else
+ \toksc\expandafter{\csname\prettynamecs\hostparsernamespace{#1}\endcsname}%
+ \appendr\toksc{{\currentrulecontext}\the\toksb\the\toksd}%
+ \toksf{\termidstring}%
+ \fi
+ \concat\toksa\toksd % changes \toksa and \toksc, no need for \toksb or \toksd after this
+ \expandafter\ifx\csname\viskeyref\hostparsernamespace{#1}\endcsname\relax
+ % no special visual key for this token
+ \yybreak{%
+ \yytoksempty\tokse{}{\makeviskeydirect}% if the visual key changed, output the key,
+ % attempt to compute it form the parsed output
+ }%
+ \else
+ \yybreak{\makeviskeyfrompretty{#1}}% modify the production of the visible key to use the
+ % prettified version
+ \yycontinue % changes \toksb, \toksd, and \tokse
+}
+
+% standard postprocessing function
+
+\def\parsebin#1#2#3#4#5{%
+ \yystringempty{#4}{%
+ \yystringempty{#3}{\toksa{\eatone{#5}#1}}{\toksa{#1}}%
+ }{%
+ \yystringempty{#3}{\toksa{\eatone{#5}#4}}{\toksa{#4}}%
+ }%
+ \toksb{#2}%
+ \yystringempty{#3}{\yyparsefailfalse}{\yyparsefailtrue}%
+}
+
+% creating a visual key from the stored visual key and the sufixes
+
+\restorecslist{nameparser-prototypes}\yynpunion % make most sequences do nothing
+
+\defc\qual{\toksb{#2}\concat\toksd\toksb}
+
+\toyyunion{nameparser:vispretty}
+
+\def\makeviskeyfrompretty#1{%
+ \edef\next{\tokse{\expandafter\expandafter\csname\viskeyref
+ \hostparsernamespace{#1}\endcsname\expandafter{\currentrulecontext}}}\next
+ \restorecslist{nameparser:vispretty}\yynpunion%
+ \toksd{}%
+ \the\namechars
+ \yytoksempty\toksd{}{\appendr\tokse.\concat\tokse\toksd}%
+}
+
+% creating a visual key directly from parsed output
+
+\restorecslist{nameparser-strict}\yynpunion
+
+\defc\idstr{\toksb{#2}\concat\tokse\toksb}
+
+\defc\bidstr{\toksb{#2}\concat\tokse\toksb}
+
+\defc\chstr{\toksb{#2}\concat\tokse\toksb}
+
+\defc\sfxi{\toksb{#2}\concat\tokse\toksb}
+
+\defc\sfxn{\toksb{#2}\concat\tokse\toksb}
+
+\defc\dotsp{\appendr\tokse.}
+
+\defc\optstr{\toksb{#2}\concat\tokse\toksb}
+
+\defc\qual{\toksb{#2}\concat\tokse\toksb}
+
+\defc\visflag{}
+
+\toyyunion{nameparser:visdirect}
+
+\def\makeviskeydirect{%
+ \tokse{}%
+ \restorecslist{nameparser:visdirect}\yynpunion
+ \the\namechars
+}
+
+\restorecslist{nameparser-strict}\yynpunion
+
+% \flex\ regex typesetting routine and \flex\ parser value stack types
+
+\def\frexproc#1\with#2{%
+ {% keeping all changes local
+ \iftracenames
+ \toksa{#1}\ferrmessage{original regex: \the\toksa}%
+ \fi
+ \toflexreparser
+ \basicparserinit
+ \flexreparserinit
+ \flexreparserdatainit
+ \d@parse{\ #1}% the parsed regex is put in the \table register
+ \ifyyparsefail
+ \iftracebadnames
+ \toksa{#1}\ferrmessage{bad regex: \the\toksa}%
+ \fi
+ \toksb{#2{#1}{}{\yyparsefailtrue}}%
+ \else
+ \toksd{#1}%
+ \frebuildx{#2}%
+ \fi
+ \expandafter
+ }\the\toksb
+}
+
+\def\frebuildx#1{%
+ \toksb{#1}\toksc{}%
+ \restorecslist{flexparser-re}\yyflunion
+ \the\table % typeset the regex (appears in \toksc)
+ \edef\next{%
+ \toksb{%
+ \the\toksb % the name of the processing routine
+ {\the\toksd}% the original regex
+ {\the\toksc}% processed regex (or empty)
+ {}% parse flag
+ }%
+ }\next
+}
+
+\def\ppregex#1#2#3{%
+ \toksc{#2}%
+}
+
+\def\setregextable{%
+ \restorecslist{flexparser-sect2}\yyflunion
+}
+
+\def\setregexdeftable{%
+ \restorecslist{flexparser-sect1}\yyflunion
+}
+
+% \flex\ parser indexing macros
+
+\def\flexidxdomain{F}
+\def\fstatedefidxrank{0}
+\def\fstateidxrank{1}
+\def\fregexidxrank{2}
+\def\ftextrefrank{5}
+
+\def\writeidxfsentry#1{% \flex\ state reference entry
+ \indxe\gindex{{\secno}{{}{\flexpseudonamespace}}{\flexidxdomain}{\fstateidxrank}#1}%
+}
+
+\def\writeidxfstextentry#1{% \flex\ state reference in text
+ \indxe\gindex{{\secno}{{}{\flexpseudonamespace}}{\flexidxdomain}{\ftextrefrank}#1}%
+}
+
+\def\writeidxfsdentry#1{% \flex\ state definition entry
+ \indxe\gindex{{\secno}{{}{\flexpseudonamespace}}{\flexidxdomain}{\fstatedefidxrank}#1}%
+}
+
+\def\writeidxfsrdentry#1{% \flex\ regex name definition entry
+ \indxe\gindex{{\secno}{{}{\flexpseudorenamespace}}{\flexidxdomain}{\fstatedefidxrank}#1}%
+}
+
+\def\writeidxfsrnentry#1{% \flex\ regex name entry
+ \indxe\gindex{{\secno}{{}{\flexpseudorenamespace}}{\flexidxdomain}{\fregexidxrank}#1}%
+}
+
+\def\writeidxfscclentry#1{% \flex\ chracter class name entry
+ \indxe\gindex{{\secno}{{}{\flexpseudonamespace}}{\flexidxdomain}{\fregexidxrank}#1}%
+}
+
+% \flex\ parser typesetting macros and stack types
+
+\def\yyuniontag{\yyflunion}
+\def\parserstrictnamespace{flexparser-strict}
+\def\parserprototypesnamespace{flexparser-strict:headers}
+\def\parserdebugnamespace{flexparser-debug}
+
+\def\yyflunion{\currentyyunionnamespace}
+
+\defp\flnametok#1#2{} % named definition :: \flnametok{matched text}{formatting command}, the text is \{text\}\ *
+%
+\defp\flcclexpr#1#2#3#4{} % character class expression :: \flcclexpr{matched text}{ccl token}{fptr}{sptr}
+\defp\flchar#1#2#3#4{} % character :: \flchar{char}{char12}{fptr}{sptr}
+\defp\flstring#1#2#3{} % string :: \flstring{{fptr}{sptr}}{\flchar{...}*}{{fptr}{sptr}}
+\defp\flcclrnge#1#2{} % character range :: \flcclrnge{/flchar{...}}{/flchar{...}}
+\defp\flbraceccl#1#2#3{} % character class :: \flbraceccl{{fptr}{sptr}}{ccl}{{fptr}{sptr}}
+\defp\flbracecclneg#1#2#3{} % negative character class :: \flbracecclneg{{fptr}{sptr}}{ccl}{{fptr}{sptr}}
+\defp\flcclunion#1#2{} % union of character classes :: \flcclunion{ccl}{ccl}
+\defp\flccldiff#1#2{} % difference of character classes :: \flccldiff{ccl}{ccl}
+\defp\flrepeat#1{} % asterisk operation :: \flrepeat{re}
+\defp\flrepeatstrict#1{} % plus operation :: \flrepeatstrict{re}
+\defp\flrepeatonce#1{} % ? operation :: \flrepeatonce{re}
+\defp\flrepeatnm#1#2#3{} % {n, m} operation :: \flrepeatnm{re}{num}{num}
+\defp\flrepeatgen#1#2{} % {n,} operation :: \flrepeatgen{re}{num}
+\defp\flrepeatn#1#2{} % {n} operation :: \flrepeatn{re}{num}
+\defp\flparens#1#2#3#4{} % parenthesized regular expression :: \flparens{{fptr}{sptr}}{re}{{fptr}{sptr}}{formatting command}
+\defp\fldot#1#2{} % dot expression :: \fldot{fptr}{sptr}
+\defp\flor#1#2{} % `|' expression :: \flor{fptr}{sptr}
+\defp\fltrail#1#2{} % regular expression sans trailing suffix :: \fltrail{re}{{fptr}{sptr}}
+\defp\flretrail#1#2#3{} % regular expression with a trailing context :: \flretrail{re}{{fptr}{sptr}}{re}
+\defp\flreateol#1#2#3{} % regular expression with the end of line as a trailing context :: \flreateol{re}{fptr}{sptr}
+\defp\flrule#1#2{} % full regular expression :: \flrule{re}{formatting command}
+\defp\flbolrule#1#2{} % full regular expression at the beginning of the line :: \flbolrule{re}{formatting command}
+\defp\fleof#1#2{} % end of file rule :: \fleof{fptr}{sptr}
+\defp\flsconlist#1#2#3{} % start condition list :: \flsconlist{{fptr}{sptr}}{\flname...}{{fptr}{sptr}}
+\defp\flsconuniv#1#2{} % universal start condition :: \flsconuniv{fptr}{sptr}
+\defp\flnamesep#1#2{} % separator between names :: \flnamesep{fptr}{sptr}
+\defp\flname#1#2#3#4{} % name :: \flname{name}{name12}{fptr}{sptr}
+\defp\flopt#1#2#3#4#5{} % \flex option :: \flopt{option}{text}{text12}{fptr}{sptr}
+\defp\floptions#1{} % \flex options :: \floptions{\flopt ...}
+\defp\flscondecl#1#2#3#4{} % start condition declarations :: \flscondecl{x|s}{fptr}{sptr}{\flname ...}
+\defp\flaction#1#2#3#4#5#6#7{} % \flex action :: \flaction{\flsconlist...}{\fl..rule ...}{fptr}{sptr}{fptr}{sptr}{formatting command}
+\defp\flactionc#1#2#3#4#5#6#7{} % \flex continued action :: \flaction{\flsconlist...}{\fl..rule ...}{fptr}{sptr}{fptr}{sptr}{formatting command}
+\defp\flactiongroup#1#2#3#4#5#6{} % a group of \flex actions :: \flactiongroup{\flsconlist...}{fptr}{sptr}{\flaction...}{fptr}{sptr}
+\defp\flbareaction#1#2#3#4#5#6{} % bare action :: \flbareaction{fptr}{sptr}
+\defp\flptropt#1#2{} % %ponter directive :: \flptropt{fptr}{sptr}
+\defp\flarrayopt#1#2{} % %array directive :: \flarrayopt{fptr}{sptr}
+\defp\fltopopt#1#2#3#4{} % %top directive :: \fltopopt{fptr}{sptr}{fptr}{sptr}
+\defp\flredef#1#2#3#4#5#6#7#8{} % regular expression definition :: \flredef{name}{name12}{fptr}{sptr}{text}{text12}{fptr}{sptr}
+
+\toyyunion{flexparser-prototypes}
+
+\restorecslist{flexparser-strict}\yyflunion
+
+\defc\flrule{%#1#2% full regular expression :: \flrule{re}{formatting command}
+ {\toksc{}#1#2\expandafter}\expandafter\toksd\expandafter{\the\toksc}%
+ \concat\toksc\toksd
+}
+
+\newif\iftextre % check if this is a free standing regular expression or appears in the definitions table
+
+\defc\flbolrule{%#1#2% full regular expression at the beginning of the line :: \flbolrule{re}{formatting command}
+ {\toksc{}#1#2\appendlnx\toksc{\raise0.5pt%
+ \iftextre\yybreak{\hbox}\else\yybreak{\llap}\yycontinue{\sscmd$\dashv$\ }}\expandafter}\expandafter%
+ \toksd\expandafter{\the\toksc}\concat\toksc\toksd
+}
+
+\let\astformat@flrule\empty
+
+\def\inscomment#1{% insert a comment before the rule
+ \def\astformat@flrule{%
+ \appendlnx\toksc{\noalign{#1}}%
+ }%
+}
+
+\def\insrulealign#1#2{%
+ \def\astformat@flrule{%
+ \tokse{#1}\toksf{#2}%
+ \concatl\tokse\toksc
+ \concat\toksc\toksf
+ }%
+}
+
+\def\rulealigntemplate{% to hide the hash symbols from \\edef operators
+ \vbox\bgroup\halign\bgroup##\hfil&##\hfil\cr
+}
+
+% regular expression symbols
+
+\defc\flstring{%#1#2#3% string :: \flstring{{fptr}{sptr}}{\flchar{...}*}{{fptr}{sptr}}
+% \toksc\expandafter{\the\toksc\hbox{#2}}%
+ #2%
+}
+
+\defc\flnametok{%#1#2% named definition :: \flnametok{matched text}{formatting command}, the text is \{text\}\ *
+ \let\termindex\writeidxfsrnentry
+ \fln@metok#1\end{#2}%
+ \let\termindex\eatone
+}
+
+\def\fln@metok\{#1\}#2\end#3{%
+ {%
+ \let\hostparsernamespace\flexpseudorenamespace
+ \nameproc{#1}\with\parsebin
+ \edef\next{%
+ \toksd{\toksd{\the\toksa}\tokse{\the\toksb}}%
+ }\next % a trick to `reshuffle' the output of \nameproc:
+ % the parsed name goes to \toksd and the visual key is put in \tokse
+ \expandafter
+ }\the\toksd
+ \toksf{}\toksg{}%
+ #3%
+ \edef\next{%
+ \toksc{\the\toksc\the\toksf{}\nx\flexrendisplay{\the\toksd}%
+ \gidxentryxv{\termfsrestring}{#1}{\the\tokse}\the\toksg
+ }%
+ }\next
+}
+
+\def\insnamealign#1#2{%
+ \def\astformat@flnametok{%
+ \toksf{#1}%
+ \toksg{#2}%
+ }%
+}
+
+\let\astformat@flnametok\empty
+
+\defc\flchar{%#1#2#3#4% character :: \flchar{char}{char12}{fptr}{sptr}
+ \yystringempty{#2}{#1}{\flch@r#2\end}%
+}
+
+\def\flch@r#1#2\end{%
+ \ifnum`#1=`\\
+ \fl@h@r#2\end
+ \else
+ \toksc\expandafter{\the\toksc{\ntt#1#2}}%
+ \fi
+}
+
+\def\fl@h@r#1#2\end{% process the sequence after an escape
+ {%
+ \let\default\flescdefault
+ \tokse{#1}\toksd{#2}%
+ \switchon{\the\tokse}\in\flesccharswitch
+ \edef\next{\toksc{\toksc{\the\toksc\the\toksd}}}\next
+ \expandafter
+ }\the\toksc
+}
+
+\def\flesccharswitch{% recognizing different types of escape sequences
+01234567
+ {% octal constant
+ \edef\next{\toksd{\nx\.{\the\tokse\the\toksd}$_8${}}}\next
+ }
+nfrtabv
+ {% \Cee\ escape sequence
+ \edef\next{\toksd{{\nx\sscmd\nx\sscmd$\nx\langle$\nx\.{\the\tokse\the\toksd}$\nx\rangle$}}}\next
+ }
+x
+ {% hexadecimal constant
+ \edef\next{\toksd{\nx\.{\the\toksd}$_{16}${}}}\next
+ }
+}
+
+\def\flescdefault{%
+ \edef\next{\toksd{\nx\.{\the\tokse\the\toksd}}}\next
+}
+
+\setspecialcharsfrom\flesccharswitch
+
+\defc\fldot{%#1#2% dot expression :: \fldot{fptr}{sptr}
+ \toksc\expandafter{\the\toksc.}%
+}
+
+\defc\flparens{%#1#2#3#4% parenthesized regular expression :: \flparens{{fptr}{sptr}}{re}{{fptr}{sptr}}{format command}
+ {\toksc{}#2\expandafter}\expandafter\toksd\expandafter
+ {\the\toksc{\rm)}}%
+ \toksf{}\toksg{}#4%
+ \tokse\expandafter{\the\toksf{\rm(}}%
+ \concatl\tokse\toksd
+ \concat\toksc\toksd
+ \concat\toksc\toksg
+}
+
+\def\insparensalign#1#2{%
+ \def\astformat@flparens{%
+ \toksf{#1}%
+ \toksg{#2}%
+ }%
+}
+
+\let\astformat@flparens\empty
+
+\defc\flrepeat{%#1% asterisk operation :: \flrepeat{re}
+ {\toksc{}#1\expandafter}\expandafter\toksd\expandafter
+ {\the\toksc{{}${}_{*}$}}%
+ \flselectiveseparator
+ \concat\toksc\toksd%
+}
+
+\defc\flrepeatn{%#1#2% {n} operation :: \flrepeatn{re}{num}
+ {\toksc{}#1\expandafter}\expandafter\toksd\expandafter
+ {\the\toksc{{}${}_{\{\yyfirstoftwo#2\}}$}}%
+ \flselectiveseparator
+ \concat\toksc\toksd%
+}
+
+\defc\flrepeatnm{%#1#2#3% {n, m} operation :: \flrepeatnm{re}{num}{num}
+ {\toksc{}#1\expandafter}\expandafter\toksd\expandafter
+ {\the\toksc{{}${}_{\{\yyfirstoftwo#2,\yyfirstoftwo#3\}}$}}%
+ \flselectiveseparator
+ \concat\toksc\toksd%
+}
+
+\defc\flrepeatonce{%#1% ? operation :: \flrepeatonce{re}
+ {\toksc{}#1\expandafter}\expandafter\toksd\expandafter
+ {\the\toksc{{}${}_{?}$}}%
+ \flselectiveseparator
+ \concat\toksc\toksd%
+}
+
+\defc\flrepeatstrict{%#1% plus operation :: \flrepeatstrict{re}
+ {\toksc{}#1\expandafter}\expandafter\toksd\expandafter{\the\toksc{{}${}_{+}$}}%
+ \flselectiveseparator
+ \concat\toksc\toksd%
+}
+
+\def\flselectiveseparator{%
+ \yytoksempty\toksc{}{%
+ \tokse{{{}$\,$}}%
+ \concatl\tokse\toksd
+ }%
+}
+
+\defc\flcclrnge{%#1#2% character range :: \flcclrnge{\flchar{...}}{\flchar{...}}
+% \toksd{#1}\getfirst\toksd\to\toksd
+% \tokse{#2}\getfirst\tokse\to\tokse
+ #1\appendr\toksc{{\nx\rm--}}#2%
+% \edef\next{\toksc{\the\toksc\the\toksd{\nx\rm--}\the\tokse}}\next
+}
+
+\defc\flcclexpr{%#1#2#3#4% character class expression :: \flcclexpr{matched text}{ccl token}{fptr}{sptr}
+ {%
+ \let\hostparsernamespace\flexpseudonamespace
+ \nameproc{#2}\with\parsebin
+ \edef\next{%
+ \toksd{\toksd{\the\toksa}\tokse{\the\toksb}}%
+ }\next % a trick to `reshuffle' the output of \nameproc:
+ % the parsed name goes to \toksd and the visual key is put in \tokse
+ \expandafter
+ }\the\toksd
+ \let\termindex\writeidxfscclentry
+ \edef\next{\toksc{\the\toksc{\the\toksd}%
+ \gidxentryxv{\termidstring}{#2}{\the\tokse}%
+ }}\next
+ \let\termindex\eatone
+}
+
+\defc\flbraceccl{%#1#2#3% character class :: \flbraceccl{{fptr}{sptr}}{ccl}{{fptr}{sptr}}
+ \appendrnx\toksc{{}$[${}}%
+ #2%
+ \appendrnx\toksc{{}$]${}}%
+}
+
+\def\flbraceccldemo#1#2#3{% character class :: \flbraceccl{{fptr}{sptr}}{ccl}{{fptr}{sptr}}
+ % a demo version without the beginning bracket
+ \appendrnx\toksc{{}$\ldots${}}%
+ #2%
+ \appendrnx\toksc{{}$]${}}%
+}
+
+\defc\flccldiff{%#1#2% difference of character classes :: \flccldiff{ccl}{ccl}
+ #1%
+ \appendr\toksc{${}\nx\setminus{}$}%
+ #2%
+}
+
+\defc\flbracecclneg{%#1#2#3% negative character class :: \flbracecclneg{{fptr}{sptr}}{ccl}{{fptr}{sptr}}
+ \appendr\toksc{{}$[${}}%
+ #2%
+ \appendr\toksc{{}$]^c${}}%
+}
+
+\defc\fleof{%#1#2% end of file rule :: \fleof{fptr}{sptr}
+ \let\termindex\writeidxfsrnentry
+ \edef\next{\toksc{\the\toksc{}\nx\flexrendisplay{EOF}%
+ \gidxentry{\termfsrestring}{EOF}{EOF}}}\next
+ \let\termindex\eatone
+}
+
+\defc\flor{%#1#2% `|' expression :: \flor{fptr}{sptr}
+ \appendr\toksc{{}${}\nx\mid{}${}}%
+}
+
+\defc\flsconuniv{%#1#2% universal start condition :: \flsconuniv{fptr}{sptr}
+ \let\termindex\writeidxfsentry
+ \edef\next{\toksb{\the\toksb{}$\langle*\rangle${}%
+ \gidxentryxv{\termvstring}{*}{*}}}\next % TODO: create a separate name space for state names
+ \let\termindex\eatone
+}
+
+\defc\fltrail{%#1#2% regular expression sans trailing suffix :: \fltrail{re}{{fptr}{sptr}}
+ \errmessage{\nx\fltrail\space macro has not been implemented (see yyunion.sty).}
+}
+
+\defc\flretrail{%#1#2#3% regular expression with a trailing context :: \flretrail{re}{{fptr}{sptr}}{re}
+ \errmessage{\nx\flretrail\space macro has not been implemented (see yyunion.sty).}
+}
+
+\defc\flreateol{%#1#2#3% regular expression with the end of line as a trailing context :: \flreateol{re}{fptr}{sptr}
+ {\toksc{}#1\expandafter}\expandafter\toksd\expandafter{\the\toksc{\raise0.5pt\rlap{\sscmd\ $\vdash$}}}%
+ \concat\toksc\toksd%
+}
+
+\toyyunion{flexparser-re}
+
+\def\flexsndisplay#1{% state name typesetting style
+ \hbox{\tt#1}%
+}
+
+\def\flexrendisplay#1{% regular expression name typesetting style
+ \penalty-100 \hbox{$\langle$\tt#1$\rangle$}%
+}
+
+\def\flexsnstyle#1{% typesetting \flex\ state names in text
+ {%
+ \let\parsernamespace\flexpseudonamespace
+ \nameproc{#1}\with\parsebin
+ \edef\next{%
+ \toksd{\the\toksa}\tokse{\the\toksb}%
+ }\next % a trick to `reshuffle' the output of \nameproc:
+ % the parsed name goes to \toksd and the visual key is put in \tokse
+ \let\termindex\writeidxfstextentry
+ \edef\next{\toksd{\nx\flexsndisplay{\the\toksd}%
+ \gidxentryxv{\termvstring}{#1}{\the\tokse}}}\next
+ \expandafter
+ }\the\toksd
+}
+
+\def\flexrenstyle#1{% typesetting \flex\ regular expression names in text
+ {%
+ \let\parsernamespace\flexpseudorenamespace
+ \nameproc{#1}\with\parsebin
+ \edef\next{%
+ \toksd{\the\toksa}\tokse{\the\toksb}%
+ }\next % a trick to `reshuffle' the output of \nameproc:
+ % the parsed name goes to \toksd and the visual key is put in \tokse
+ \let\termindex\writeidxfstextentry
+ \edef\next{\toksd{\nx\flexrendisplay{\the\toksd}%
+ \gidxentryxv{\termfsrestring}{#1}{\the\tokse}}}\next
+ \expandafter
+ }\the\toksd
+}
+
+\def\flexrestyle#1{% typesetting \flex\ regular expressions in text
+ {%
+ \frexproc{#1}\with\ppregex
+ \edef\next{\toksc{\toksc{\the\toksc}}}\next
+ \expandafter
+ }\the\toksc
+ \hbox{\textretrue\tt\the\toksc}%
+}
+
+\defc\flactiongroup{% #1#2#3#4#5#6% a group of \flex actions :: \flactiongroup{\flsconlist...}{fptr}{sptr}{\flaction...}{fptr}{sptr}
+ \toksb{}#1% collect all the state name information in \toksb
+ % gather the preceeding stash
+ \readstash{#3}% read stash late, after \flsconslist (ignore it)
+ \edef\next{%
+ \toksb{\noalign{\hbox to\nx\hsize{\hskip\the\rgindent
+ \nx\flexsndisplay{\the\toksb}:\toksa{\relax\the\toksg}\nx\makestashbox\nx\hfil}}}%
+ }\next
+ \yytoksempty\table{}{\appendrnx\table{\noalign{\medskip}}}%
+ \concat\table\toksb
+ \advance\rgindent by\parindent
+ #4%
+ \advance\rgindent by-\parindent
+}
+
+\defc\flsconlist{% #1#2#3% start condition list :: \flsconlist{{fptr}{sptr}}{\flname...}{{fptr}{sptr}}
+ \readstashwredirect#1\toksg % save the stash preceeding the conditions list to reinsert later
+ #2% collect the state names (and the stash in between)
+ \readstashwredirect#3\toksf
+ \appendr\toksb{\nx\rm\toksa{\the\toksf}\nx\makestashbox}%
+}
+
+\defc\flname{% #1#2#3#4% name :: \flname{name}{name12}{fptr}{sptr}
+ {%
+ \let\parsernamespace\flexpseudonamespace
+ \nameproc{#2}\with\parsebin
+ \edef\next{%
+ \toksd{\toksd{\the\toksa}\tokse{\the\toksb}}%
+ }\next % a trick to `reshuffle' the output of \nameproc:
+ % the parsed name goes to \toksd and the visual key is put in \tokse
+ \expandafter
+ }\the\toksd
+ \let\termindex\writeidxfsentry
+ \readstashwredirect{}{#4}\toksf % do not touch \yystashlocal
+ \appendr\toksb{{\nx\rm\toksa{\the\toksf}\nx\makestashbox}%
+ \the\toksd\gidxentryxv{\termvstring}{#1}{\the\tokse}}%
+ \let\termindex\eatone
+}
+
+\defc\flnamesep{%#1#2% separator between names :: \flnamesep{fptr}{sptr}
+ \toksb\expandafter{\the\toksb\ }%
+}
+
+\newskip\rgindent
+
+\rgindent=\parindent
+
+\defc\flaction{%#1#2#3#4#5#6#7% \flex action :: \flaction{\flsconlist...}{\fl..rule ...}{fptr}{sptr}{fptr}{sptr}{formatting command}
+ \toksb{}#1%
+ \toksc{}#2%
+ #7% apply formatting
+ \readstash{#6}% read stash late, after \flsconlist and \fl...rule
+ \yytoksempty\toksb{%
+ \appendr\table{\hskip\the\rgindent\hbox{\nx\tt\nx\strut\the\toksc}}%
+ }{%
+ \appendr\table{%
+ \hskip\the\rgindent
+ \vbox{%
+ {\nx\sscmd\nx\expandafter}\nx\expandafter\baselineskip\nx\the\baselineskip
+ \halign{&####\hfil\cr
+ \hbox to\rgindent{\nx\sscmd\nx\tt\nx\strut\the\toksb\hss}\cr
+ \nx\tt\nx\strut\the\toksc\cr
+ }%
+ }%
+ }%
+ }%
+ \appendr\table{&\toksa{\relax\the\yystashlocal}\nx\cr}%
+}
+
+\defc\flactionc{%#1#2#3#4#5#6#7% \flex continued action :: \flaction{\flsconlist...}{\fl..rule ...}{fptr}{sptr}{fptr}{sptr}{formatting command}
+ \toksb{}#1%
+ \toksc{}#2%
+ \readstash{#6}% read stash late, after \flsconlist and \fl...rule
+ #7% apply formatting
+ \yytoksempty\toksb{%
+ \appendr\table{\hskip\the\rgindent\hbox{\nx\tt\nx\strut\the\toksc}}%
+ }{%
+ \appendr\table{%
+ \hskip\the\rgindent
+ \vbox{%
+ {\nx\sscmd\nx\expandafter}\nx\expandafter\baselineskip\nx\the\baselineskip
+ \halign{&####\hfil\cr
+ \hbox to\rgindent{\nx\sscmd\nx\tt\nx\strut\the\toksb\hss}\cr
+ \nx\tt\nx\strut\the\toksc\cr
+ }%
+ }%
+ }%
+ }%
+ \appendr\table{&\toksa{{}$\nx\hookleftarrow$\relax\the\yystashlocal}\nx\cr}% in the event \cr has been redefined
+}
+
+\def\inscomment#1{%
+ \def\astformat@flaction{%
+ \toksd{#1}%
+ \appendr\table{\noalign{\nx\smallskip\noindent\hskip\the\rgindent
+ $\nx\triangleright\;$\the\toksd\nx\smallskip}}%
+ }%
+}
+
+\let\astformat@flaction\empty
+
+\toyyunion{flexparser-sect2}
+
+% \flex\ setion~1 typesetting
+% currently, all \flex\state declarations are handled by an extended \bison\ parser
+% to collect the state data; we thus only implement regular expression definitions;
+
+\restorecslist{flexparser-strict}\yyflunion
+
+\defc\flredef{%#1#2#3#4#5#6#7#8% regular expression definition :: \flredef{name}{name12}{fptr}{sptr}{text}{text12}{fptr}{sptr}
+ \let\termindex\writeidxfsrdentry
+ {%
+ \let\hostparsernamespace\flexpseudorenamespace
+ \nameproc{#1}\with\parsebin
+ \edef\next{%
+ \toksd{\toksd{\the\toksa}\tokse{\the\toksb}}%
+ }\next % a trick to `reshuffle' the output of \nameproc:
+ % the parsed name goes to \toksd and the visual key is put in \tokse
+ \expandafter
+ }\the\toksd
+ \frexproc{#5}\with\ppregex
+ \appendr\table{{}$\nx\langle\hbox{\ntt\the\toksd}\nx\rangle${}%
+ \gidxentryxv{\termfsrestring}{#1}{\the\tokse}&\the\toksc\cr% TODO: make a special type processor for regex names
+ }%
+ \let\termindex\eatone
+}
+
+\defc\flopt{%#1#2#3#4#5% \flex option :: \flopt{option}{text}{text12}{fptr}{sptr}
+ \csname flex@ption_#1\endcsname{#2}{#3}%
+}
+
+\expandafter\def\csname flex@ption_deprecated\endcsname#1#2{%
+ \flex@opt@deprecated#2%
+}
+
+\def\flex@opt@deprecated#1#2#3^^J{%
+ \let\termindex\writeidxfsrnentry
+ \appendr\table{$\langle${\nx\tt #2}$\rangle_{\rm f}$\gidxentryxv{\termfsopstring}{#2}{#2}%
+ \the\toksg&\number\flexgetnumber#3.\nx\cr}%
+ \let\termindex\empty
+}
+
+\def\flexgetnumber#1{%
+ \ifnum`#1<"3A
+ \yybreak{%
+ \ifnum`#1>"2F
+ \yybreak{#1\flexgetnumber}%
+ \else
+ \yybreak{%
+ \ifnum`#1="20
+ \yybreak{\flexgetnumber}%
+ \else
+ \yybreak{}%
+ \yycontinue
+ }
+ \yycontinue
+ }%
+ \else
+ \yybreak{}%
+ \yycontinue
+}
+
+\toyyunion{flexparser-sect1}
+
+% correct a few sequences for debugging purposes
+
+\restorecslist{flexparser-debug}\yyflunion
+
+\defc\flnametok{%#1#2% named definition :: \flnametok{matched text}{formatting command}, the text is \{text\}\ *
+ \noexpand\flnametok{#1}{}%
+}
+
+\defc\flparens{%#1#2#3#4% parenthesized regular expression :: \flparens{{fptr}{sptr}}{re}{{fptr}{sptr}}{formatting command}
+ \noexpand\flparens{#1}{#2}{#3}{}%
+}
+
+\defc\flrule{%#1#2% full regular expression :: \flrule{re}{formatting command}
+ \noexpand\flrule{#1}{}%
+}
+
+\defc\flbolrule{%#1#2% full regular expression at the beginning of the line :: \flbolrule{re}{formatting command}
+ \noexpand\flbolrule{#1}{}%
+}
+
+\defc\flaction{%#1#2#3#4#5#6#7% \flex action :: \flaction{\flsconlist...}{\fl..rule ...}{fptr}{sptr}{fptr}{sptr}{formatting command}
+ \noexpand\flaction{#1}{#2}{#3}{#4}{#5}{#6}{}%
+}
+
+\defc\flactionc{%#1#2#3#4#5#6#7% \flex continued action :: \flaction{\flsconlist...}{\fl..rule ...}{fptr}{sptr}{fptr}{sptr}{formatting command}
+ \noexpand\flactionc{#1}{#2}{#3}{#4}{#5}{#6}{}%
+}
-\def\charstonumbers#1\end{\edef\next{\toksa{\charstonumberse#1\end}}\next}
+\toyyunion{flexparser-debug}
diff --git a/support/splint/tex/yyxunion.sty b/support/splint/tex/yyxunion.sty
deleted file mode 100644
index fb0d2460b3..0000000000
--- a/support/splint/tex/yyxunion.sty
+++ /dev/null
@@ -1,33 +0,0 @@
-% Copyright 2012-2015, Alexander Shibakov
-% This file is part of SPLinT
-%
-% SPLinT is free software: you can redistribute it and/or modify
-% it under the terms of the GNU General Public License as published by
-% the Free Software Foundation, either version 3 of the License, or
-% (at your option) any later version.
-%
-% SPLinT is distributed in the hope that it will be useful,
-% but WITHOUT ANY WARRANTY; without even the implied warranty of
-% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-% GNU General Public License for more details.
-%
-% You should have received a copy of the GNU General Public License
-% along with SPLinT. If not, see <http://www.gnu.org/licenses/>.
-
-% the original, minimal bootstrapping macros were designed to process
-% \prodstyle{\%token} declarations only and are enough to establish the
-% interface between the \bison\ parser and the \bison\ lexer; to serve
-% the secondary task of providing typesetting information to
-% the \bison\ parser, all forms of token declarations must be processed.
-
-\restorecslist{bootstrap}\yyunion % get the original bootstrap macros
-
-\def\precdecls#1#2#3#4#5{#3}
-
-\def\symbolprec#1#2{%
- \toksa{}\toksb{}%
- #1%
- \yytoksempty\toksb{}{\immediate\write\tokendefs{\noexpand\tokenpp{\the\toksb}}}%
-}
-
-\savecslist{bootstrap}\yyunion