summaryrefslogtreecommitdiff
path: root/support/splint/cweb
diff options
context:
space:
mode:
authorNorbert Preining <norbert@preining.info>2019-09-02 13:46:59 +0900
committerNorbert Preining <norbert@preining.info>2019-09-02 13:46:59 +0900
commite0c6872cf40896c7be36b11dcc744620f10adf1d (patch)
tree60335e10d2f4354b0674ec22d7b53f0f8abee672 /support/splint/cweb
Initial commit
Diffstat (limited to 'support/splint/cweb')
-rw-r--r--support/splint/cweb/Makefile115
-rw-r--r--support/splint/cweb/bo.w2479
-rw-r--r--support/splint/cweb/bs.w706
-rw-r--r--support/splint/cweb/common.w788
-rw-r--r--support/splint/cweb/fk.w510
-rw-r--r--support/splint/cweb/lo.w797
-rw-r--r--support/splint/cweb/mkeparser.w123
-rw-r--r--support/splint/cweb/mkscanner.w102
-rw-r--r--support/splint/cweb/np.w380
-rw-r--r--support/splint/cweb/philosophy.w223
-rw-r--r--support/splint/cweb/references.w73
-rw-r--r--support/splint/cweb/splint.w102
-rw-r--r--support/splint/cweb/ssffo.w118
13 files changed, 6516 insertions, 0 deletions
diff --git a/support/splint/cweb/Makefile b/support/splint/cweb/Makefile
new file mode 100644
index 0000000000..ec9973fb66
--- /dev/null
+++ b/support/splint/cweb/Makefile
@@ -0,0 +1,115 @@
+SPLINT_ROOT = $(shell pwd)/..
+
+include ${SPLINT_ROOT}/makefile.inc
+
+all: ${SPLINT_PTABLES} ${SPLINT_LTABLES}
+
+b%out: mkeparser.c b%.c
+ ${CC} ${BISON_STATE} -DPARSER_FILE=\"$(lastword $^)\" -DYYPARSE_PARAMETERS= -o $@ $<
+
+b%.yy: bo.x
+ ${CTANGLE} $<
+
+%yytab.tex: b%out
+ ${SPLINT_DRIVER_DIR}/$< --optimize-actions $@
+
+ltab.tex: ltout
+ ${SPLINT_DRIVER_DIR}/$< --optimize-actions $@
+
+ltout: mkscanner.c lo_states.h lo.c
+ ${CC} -DLEXER_FILE=\"$(lastword $^)\" -o $@ $<
+
+ssffo.ll lo.ll: \
+%.ll: %.x
+ ${CTANGLE} $< && rm $(patsubst %.x, %.c, $^)
+
+lo.c: lo.l
+ ${FLEX} -o $@ $<
+
+mkscanner.c mkeparser.c: \
+%.c: %.w
+ ${CTANGLE} $<
+
+# name parser
+
+smallp_out: mkeparser.c small_parser.c
+ ${CC} ${BISON_STATE} -DPARSER_FILE=\"$(lastword $^)\" -DYYPARSE_PARAMETERS= -o $@ $<
+
+smalll_out: mkscanner.c small_lexer.c
+ ${CC} -DLEXER_FILE=\"$(lastword $^)\" -o $@ $<
+
+small_tab.tex: smallp_out
+ ${SPLINT_DRIVER_DIR}/$< --optimize-actions $@
+
+small_dfa.tex: smalll_out
+ ${SPLINT_DRIVER_DIR}/$< --optimize-actions $@
+
+small_parser.yy small_lexer.ll: np.x
+ @${CTANGLE} $<
+
+bo.tex: bo.x
+ ${CWEAVE} -x $<
+
+splint.tex \
+splint.idx \
+splint.scn: splint.w bo.x lo.x np.x common.w bs.w fk.w philosophy.w references.w
+ ${CWEAVE} $<
+
+ssffo.tex \
+ssffo.idx ssffo.scn: ssffo.x
+ ${CWEAVE} $<
+
+bo.tok: bo.tex ltab.tex byytab.tex
+ ${TEX} ${MODEBOOTSTRAP} \\input $<
+
+ssffo.pdf: %.pdf: ${SPLINT_DOC_PREREQS_XREF}
+ ${PDFTEX} $*.tex
+
+ssffo.dvi: %.dvi: ${SPLINT_DOC_PREREQS_XREF}
+ ${TEX} $*.tex
+
+splint.gdx: %.gdx: ${SPLINT_DOC_PREREQS_XREF}
+ @echo "Making the bison and TeX indices ..."
+ ${TEX} $*.tex
+
+splint.pdf: %.pdf: ${SPLINT_DOC_PREREQS_XREF} %.gdy
+ ${PDFTEX} \\input $*.tex && touch $*.gdy && touch $*.pdf
+
+splint.dvi: %.dvi: ${SPLINT_DOC_PREREQS_XREF} %.gdy
+ ${TEX} $*.tex && touch $*.gdy && touch $*.dvi
+
+${SPLINT_ROOT}/tex/btokenset.sty: # stupid make weirdness
+ @
+
+# state parsing
+
+lstabout: mkscanner.c ssffo.c
+ ${CC} -DLEXER_FILE=\"$(lastword $^)\" -o $@ $<
+
+lstab.tex: lstabout
+ ${SPLINT_DRIVER_DIR}/$< --optimize-actions $@
+
+lo.tex: lo.x
+ ${CWEAVE} $<
+
+lo_states.h: lo.tex lstab.tex byytab.tex
+ ${PDFTEX} $<
+
+# clean will erase all automatically generated files in the current directory
+
+clean: clean_core
+ -rm -f ctablesout b?out ltout smallp_out \
+ smalll_out lstabout
+
+include ${SPLINT_ROOT}/makefile.loc
+
+# since bg.yy is not an intermediate file in examples/symbols/Makefile, repeated 'make all'
+# remakes bg.yy thereby forcing make to update byytab.tex, etc., which results in remaking
+# of bo.tok, lo.tex, eventually leading to remaking of splint.pdf;
+# the special target below tells make to treat bg.yy as if it were not an intermediate file
+
+.PRECIOUS: bg.yy bg.y
+
+# the files below appear as targets but are really intermediaries for other files
+
+.INTERMEDIATE: smallp_out smalll_out lstabout ltout splint.gdx
diff --git a/support/splint/cweb/bo.w b/support/splint/cweb/bo.w
new file mode 100644
index 0000000000..2185f2f65e
--- /dev/null
+++ b/support/splint/cweb/bo.w
@@ -0,0 +1,2479 @@
+% Copyright 2012-2014, Alexander Shibakov
+% Copyright 2002-2014 Free Software Foundation, Inc.
+% This file is part of SPLinT
+%
+% SPLinT is free software: you can redistribute it and/or modify
+% it under the terms of the GNU General Public License as published by
+% the Free Software Foundation, either version 3 of the License, or
+% (at your option) any later version.
+%
+% SPLinT is distributed in the hope that it will be useful,
+% but WITHOUT ANY WARRANTY; without even the implied warranty of
+% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+% GNU General Public License for more details.
+%
+% You should have received a copy of the GNU General Public License
+% along with SPLinT. If not, see <http://www.gnu.org/licenses/>.
+
+\input limbo.sty
+\input frontmatter.sty
+\def\optimization{5}
+\input yy.sty
+% multi-column output
+\input dcols.sty
+
+\let\hostparsernamespace\mainnamespace % the namespace where tokens are looked up
+ % for typesetting purposes
+\let\currentparsernamespace\parsernamespace
+ \let\parsernamespace\mainnamespace
+ \let\currenttokeneq\tokeneq
+ %\def\tokeneq#1#2{\prettytoken{#1}}
+ \let\tokeneq\prettywordpair@@
+ \let\optstrextra\optstrextraesc
+ \input bo.tok % re-use token equivalence table to set the typesetting of tokens
+ \let\tokeneq\currenttokeneq
+ \input btokenset.sty
+ % index entries
+ \let\parsernamespace\indexpseudonamespace
+ \prettywordpair{emptyrhs}{$\circ$ {\rm(empty rhs)}}%
+ \prettywordpair{inline_action}{$\diamond$ {\rm(inline action)}}%
+ \prettywordpair{TOKEN}{{\tt TOKEN} {\rm(example)}}%
+ \prettywordpair{token}{{\tt "token"} {\rm(example)}}%
+\let\parsernamespace\currentparsernamespace
+
+\immediate\openout\exampletable=\jobname.exl
+
+\def\nontitle#1{{\ttl #1}}
+\def\cite[#1]{%
+ \def\next{#1}\setbox0=\hbox{l}%
+ [\ifx\next\empty$\,$\hbox{\vrule width\wd0 height\ht0 depth\dp0}$\,$\else \locallink{#1bibref}#1\endlink\fi]%
+}
+
+\let\oldN\N
+\let\N\textN
+\let\M\textM
+
+\defreserved{Y}{\.{Y}}
+\showlastactiontrue
+
+@**Introduction.
+\setupfootnotes
+\splint\footnote{I was tempted to call the package {\tt ParLALRgram}
+which stands for Parsing {\sc LALR} Grammars or {\tt PinT} for
+`Parsing in \TeX' but both sounded too generic.} (Simple Parsing and
+Lexing in \TeX, or, following the great GNU
+tradition of creating recursive names, \splint\ Parses Languages
+in \TeX) is a system (or
+rather a m\'elange of systems) designed to
+facilitate developing parsing macros in \TeX\ and (to a lesser
+degree) documenting parsers written in other languages. As
+an application, a parser for \bison\ input file syntax has been
+developed, along with a macro collection that makes it possible to
+design and pretty print \bison\ grammars using \CWEB.
+
+Developing software in \CWEB\ involves two programs. The first of these is
+\CTANGLE\ that outputs the actual code, intended to be in
+\Cee. In reality, \CTANGLE\ cares very little about the language it
+produces. Exceptions are \Cee\ comments and |@[#line@]| directives that might
+confuse lesser software, although \bison\ is all too happy to swallow them
+(there are also some \Cee\ specific constructs that \CTANGLE\ tries to
+recognize). \CTANGLE's main function is to rearrange the text of the
+program as written by the programmer (in a way that, hopefully,
+emphasizes the internal logic of the code) into an appropriate
+sequence (e.g.~all variable declaration must textually precede their
+use). All that is required to adopt \CTANGLE\ to produce \bison\
+output is some very rudimentary post- and pre-processing.
+
+Our main concern is thus \CWEAVE\ that not only pretty prints the
+program but also creates an index, cross-references all the
+sections, etc. Getting \CWEAVE\ to pretty print a language other than
+\Cee\ requires some additional attention. A true digital warrior would
+probably try to decipher \CWEAVE's output `in the raw' but, alas, my
+WebFu is not that strong. The loophole comes in the form of a rarely
+(for a good reason) used \CWEB\ command: the verbatim (\.{@@=...@@>})
+output. The material to be output by this construct undergoes minimal
+processing and is put inside \.{\\vb\{}$\ldots$\.{\}}. All that is
+needed now is a way to process this virtually straight text inside \TeX.
+
+@*1 Using the \bison\ parser.
+The process of using \splint\ for writing parsing macros in \TeX\ is
+treated in considerable detail later in this document. A shorter
+(albeit somewhat outdated but still applicable) version of this
+process is outlined in \cite[Sh]. We begin,
+instead, by explaining how one such parser can be used to pretty print a
+\bison\ grammar. Following the convention mentioned above and putting
+all non-\Cee\ code inside \CWEAVE's verbatim blocks, consider the
+following (meaningless) code fragment. The fragment contains a mixture
+of \Cee\ and \bison\ code, the former appears outside of the verbatim blocks.
+\begindemo
+^@@= non_terminal: @@>
+^@@= term.1 term.2 {@@> a = b; @@=}@@>
+^@@= **H term.3 other_term {@@> $$ = $1; @@=}@@>
+^@@= **H still more terms {@@> f($1); @@=}@@>
+^@@= ; @@>
+\enddemo
+The fragment above will appear as (the output of \CTANGLE\ can be
+examined in \.{sill.y})
+@<A silly example@>=
+@G
+non_terminal:
+ term.1 term.2 {@> a = b; @=}
+| term.3 other_term {@> $$ = $1; @=}
+| still more terms {@> f($1); @=}
+;
+@g
+
+@ $\ldots$ if the syntax is correct.
+In case it is a bit off, the parser will give up and
+you will see a different result. The code in the fragment below is easily
+recognizable, and some parts of it (all of \Cee\ code, in fact) are
+still pretty printed in \CWEAVE. Only the verbatim portion is left
+unprocessed.
+@<A silly example@>=
+@G
+whoops
+ term.1 term.2 {@>@+ a = b; @+@=}
+| term.3 other_term {@>@+ $$ = $1; @+@=}
+| still more terms {@>@+ f($1); @+@=}
+;
+@g
+
+@ The \TeX\ header that makes such output possible is quite plain. In this case
+(i.e.\ this very file) it begins as
+\begindemo
+^\input limbo.sty
+^\input frontmatter.sty
+^\input yy.sty
+\nooutput
+\enddemo
+The first two lines are presented here merely for completeness: there is
+no parsing-relevant code in them. The line that
+follows loads the macros that implement the parsing and scanning
+machinery. This is enough to set up all the basic
+mechanisms used by the parsing and lexing macros. The rest of the header
+provides a few definitions to fine tune the typesetting of
+grammar productions. It starts with
+\begindemo
+^\let\currentparsernamespace\parsernamespace
+^ \let\parsernamespace\mainnamespace
+^ \let\currenttokeneq\tokeneq
+^ \def\tokeneq#1#2{\prettytoken{#1}}
+^ \input bo.tok % re-use token equivalence table to set the typesetting of tokens
+^ \let\tokeneq\currenttokeneq
+^ \input btokenset.sty
+\nooutput
+\enddemo
+We will have a chance to discuss all the \.{\\}$\ldots$\.{namespace}
+macros later, at this point it will suffice to say that the lines
+above are responsible for controlling the typesetting of term names. The
+file \.{bo.tok} consists of a number of lines like the ones below:
+\begindemo
+^\tokeneq {STRING}{{34}{115}{116}{114}{105}{110}{103}{34}}
+^\tokeneq {PERCENT_TOKEN}{{34}{37}{116}{111}{107}{101}{110}{34}}
+\nooutput
+\enddemo
+The cryptic looking sequences of integers above are strings of {\sc ASCII}
+codes of the letters that form the name \bison\ uses when it needs to
+refer to the corresponding token (thus, the second one is
+\toksa{}\numberstochars{34}{37}{116}{111}{107}{101}{110}{34}\end
+\.{\the\toksa} which might help explain why such an elaborate scheme
+has been chosen). The macro \.{\\tokeneq} is defined in
+\.{yymisc.sty}, which in turn is input by \.{yy.sty} but what about
+the token names themselves? In this case they were extracted
+automatically from the \CWEB\ source file by the parser during the
+\CWEAVE\ processing stage. All of these definitions can be
+overwritten to get the desired output (say, one might want to typeset
+\.{ID} in a roman font, as `identifier'; all that needs to be done is
+a macro that says \.{\\prettywordpair\{ID\}\{\{\\rm
+identifier\}\}}). The file \.{btokenset.sty} input above contains a
+number of such definitions.
+
+@ To round off this short overview, I must mention a caveat associated
+with using the macros in this collection: while one of the greatest
+advantages of using \CWEB\ is its ability to rearrange the code in a
+very flexible way, the parser will either give up or produce
+unintended output if this feature is abused while describing the
+grammar. For example, in the code below
+@<A silly example@>=
+@G
+next_term:
+ stuff @> @<Rest of line@> @={@> a = f( x ); @=}
+@g
+@<A production@>@;
+
+@ the line titled |@<A production@>| is intended to be a rule defined
+later. Notice that while it seems that the parser was able to recognize
+the first code fragment as a valid \bison\ input, it misplaced the
+|@<Rest of line@>|, having erroneously assumed it to be a part of
+the action code for this grammar (later on we will go into the details of
+why it is necessary to collect all the non-verbatim output of \CWEAVE,
+even the one that contains no interesting \Cee\ code; hint: it has
+something to do with money (\.{\$}), also known as math and the way
+\CWEAVE\ processes the `gaps' between verbatim sections). The production
+line that follows did not fare as well: the parser gave up. There
+is simply no point in including such a small language fragment as a
+valid input for the grammar the parser uses to process the verbatim
+output.
+@<A production@>=
+@G
+ more stuff in this line {@> @[b = g(y);@]@=}
+@g
+
+@ Finally, if you forget that only the verbatim part of the output is
+looked at by the parser you might get something unrecognizable, such
+as
+@<Rest of line@>=
+ but not all of it
+
+@ To correct this, one can provide a more complete grammar fragment to
+allow the parser to complete its task successfully. In some cases,
+this imposes too strict a constraint on the programmer. Instead, the
+parser that pretty prints \bison\ grammars allows one to add {\it
+hidden context\/} to the code fragments above. The context is added
+inside \.{\\vb} sections using \CWEB's \.{@@t}$\ldots$\.{@@>} facility. The \CTANGLE\
+output is not affected by this while the code above can now be typeset as:
+@<A silly example@>=
+@G
+next_term:
+ stuff @> @t}\vb{\formatlocal{\let\peekstash\stashtoterm}}{@> @<Rest of line@> @t}\vb{FAKE}{@> @={@> a = f( x ); @=}
+@g
+@<A production@>@;
+
+@ $\ldots$ even a single line can now be displayed properly.
+@<A production@>=
+@G
+@t}\vb{\formatlocal{\skipheader} FAKE:}{@>
+ more stuff in this line {@> b = g( y ); @=}
+@g
+
+@ With enough hidden context, even a small rule fragment can be
+typeset as intended. The `action star' was inserted to reveal some of
+the context.
+@<Rest of line@>=
+@G
+@t}\vb{\formatlocal{\skipheader} FAKE:}{@>
+ but not all of it
+@t}\vb{\{\stashed{$\star$}\}}{@>
+@g
+@ What makes all of this even more confusing is that \CTANGLE\ will
+have no trouble outputting this as a(n almost, due to the
+intentionally bad \.{whoops} production above) valid \bison\ file
+(as can be checked by looking into \.{sill.y}). The author
+happens to think that one should not fragment the software into pieces
+that are too small: \bison\ is not \Cee\ so it makes sense to write
+\bison\ code differently. However, if the logic behind your code
+organization demands such fine fragmentation, hidden context provides
+you with a tool to show it off. A look inside the source of this
+document shows that adding hidden context can be a bit ugly so it is
+not recommended for routine use. The short example above is output in
+the file below.
+@(sill.y@>=
+ @<A silly example@>@;
+
+@*1 On debugging. This concludes a short introduction to the \bison\
+grammar pretty printing using this macro collection. It would be
+incomplete, however, without a short reference to debugging\footnote{Here
+we are talking about debugging the output produced by \CWEAVE\ when
+the included \bison\ parser is used, {\it not\/} debugging parsers
+written with the help of this software: the latter topic is covered in more
+detail later on}. There is a
+fair amount of debugging information that the macros can output,
+unfortunately, very little of it is tailored to the {\it use\/} of the
+macros in the \bison\ parser. Most of it is designed to help {\it
+build\/} a new parser. If you find that the parser gives up too often
+or even crashes (the latter is most certainly a bug in the parser
+itself), the first approach is to make sure that your code {\it
+compiles\/} i.e.\ forget about the printed output and try to see if
+the `real' \bison\ accepts the code (just the syntax, no need to
+worry about conflicts and such).
+
+If this does not shed any light on why the macros seem to fail, turn
+on the debugging output by saying \.{\\trace$\ldots$true} for various
+trace macros. This can produce {\it a lot\/} of output, even for
+small fragments, so turn it on only for a section at a time. If you
+need still {\it more\/} details of the inner workings of the parser
+and the lexer, various other debugging conditionals are available. For
+example, \.{\\yyflexdebugtrue} turns on the debugging output for the
+scanner. There are a number of such conditionals that are discussed in
+the commentary for the appropriate \TeX\ macros.
+
+Remember, what you are seeing at this point is the parsing process of
+the \bison\ input file, not the one for {\it your\/} grammar (which
+might not even be complete at this point). However, if this fails, you
+are on your own: drop me a line if you figure out how to fix any bugs
+you find.
+
+@*1 Terminology. We now list a few definitions of the concepts used
+repeatedly in this documentation. Most of this terminology is
+rather standard. Formal precision is not the goal here, and intuitive
+explanations are substituted whenever possible.
+{%
+\def\aterm#1{\item{\sqebullet}{\ttl #1}: \ignorespaces}%
+\setbox0=\hbox{\sqebullet\enspace}
+\parindent=0pt
+\advance\parindent by \wd0
+\smallskip
+\aterm{bison parser} while, strictly speaking, not a formally defined
+term, this combination will always stand for one of the parsers generated
+by this package designed to parse a subset of the `official' grammar for
+\bison\ input files. All of these parsers are described later in
+this documentation. The term {\it main parser\/} will be
+used as a substitute in example documentation for the same purpose.
+
+\aterm{driver} a generic but poorly defined concept. In this
+documentation it is used predominantly to mean both the \Cee\ code and
+the resulting executable that outputs the \TeX\ macros that contain the
+parser tables, token values, etc., for the parsers built by the user. It
+is understood that the \Cee\ code of the `driver' is unchanged and the
+information about the parser itself is obtained by {\it including\/} the \Cee\
+file produced by \bison\ in the `driver' (see the examples supplied
+with the package).
+
+\aterm{lexer} a synonym for {\it scanner}, a subroutine that performs the {\it
+lexical analysis\/} phase of the parsing process, i.e.\ groups various
+characters from the input stream into parser {\it tokens}.
+
+\aterm{namespace} this is an overused bit of terminology meaning a
+set of names grouped together according to some relatively
+well defined principle. In a language without a well developed type
+system (such as \TeX) it is usually accompanied by a specially designed
+naming scheme. {\it Parser namespaces\/} are commonly used in this
+documentation to mean a collection of all the data structures describing a
+parser and its state, including tables, stacks, etc., named by using the
+`root' name (say \.{\\yytable}) and adding the name of the parser (for
+example, \.{[main]}). To support this naming scheme, a number of
+macros work in unison to create and rename the `data macros' accordingly.
+
+\aterm{symbolic switch} a macro (or an associative array of macros)
+that let the \TeX\ parser generated by the package associate {\it
+symbolic term names\/} with the terms. Unlike the `real' parser, the
+parser created with this suite requires some extra setup as explained
+in the included examples (one can also consult the source for this
+documentation which creates but does not use a symbolic switch).
+
+\aterm{symbolic term name} a (relatively new) way to refer to stack
+values in \bison. In addition to using the `positional' names such as
+\.{\$}$n$ to refer to term values, one can utilize the new syntax:
+\.{\$}\.{[}{\it name\/}\.{]}. The `{\it name}' can be assigned by the
+user or can be the name of the nonterminal or token used in the
+productions.
+
+\aterm{term} in a narrow sense, an `element' of a grammar. Instead of
+a long winded definition, an example, such as \prodstyle{ID} should
+suffice. Terms are further classified into {\it terminals\/} (tokens)
+and {\it nonterminals\/} (which can be intuitively thought of as
+composite terms).
+
+\aterm{token} in short, an element of a set. Usually encoded as an
+integer by most parsers, an indivisible {\it term\/}
+produced for the parser by the scanner. \TeX's scanner uses a more
+sophisticated token classification, for example, $($character code,
+character category$)$ pairs, etc.
+
+}
+@** Languages, scanners, parsers, and \TeX. % Or $\ldots$
+$$\vbox{\halign to\hsize{\kern-1.5pt\it#\hfil\tabskip0pt plus1fil\cr
+Tokens and tables keep macros in check.\cr
+Make 'em with \bison, use \.{WEAVE} as a tool.\cr
+Add \TeX\ and \CTANGLE, and \Cee\ to the pool.\cr
+Reduce 'em with actions, look forward, not back.\cr
+Macros, productions, recursion and stack!\cr
+\noalign{\vskip2pt}
+\omit\hfil\eightpoint Computer generated (most likely)\cr}}
+$$
+\def\recount#1{${}^{(#1)}$}%
+In order to understand the parsing routines in this collection,
+it would help to gain some familiarity with the internals of the
+parsers produced by \bison\ for its intended target: \Cee. A person
+looking inside a parser delivered by \bison\ would
+quickly discover that the parsing procedure itself (|yyparse|)
+occupies a rather small portion of the file. If (s)he were to further
+reduce the size of the file by removing all the preprocessor
+directives intended to anticipate every conceivable combination of the
+operating system, compiler, and \Cee\ dialect, and various reporting
+and error logging functions it would become very clear that the most
+valuable product of \bison's labor is a collection of integer {\it
+tables\/} that control the actions of the parser routine. Moreover,
+the routine itself is an extremely concise and well-structured loop
+composed of |goto|'s and a number of numerical conditionals. If one
+were to think of a way of accessing arrays and processing conditionals
+in the language of one's choice, once the tables produced by \bison\
+have been converted into a form suitable for the consumption by the
+appropriate language engine, the parser implementation becomes
+straightforward. Or nearly so.
+
+The {\it scanning\/} (or {\it lexing\/}) step of this process---a way
+to convert a stream of symbols into a stream of integers, also
+deserves some attention here. There are a number of excellent tools
+written to automate this step in much the same fashion as \bison\
+automates the generation of parsers. One such tool, \flex, though
+(in the opinion of this author) slightly lacking in the simplicity and
+elegance as compared to \bison, was used to implement the lexer for
+this software suite. Lexing in \TeX\ will be discussed in considerable
+detail later in this manual.
+
+The language of interest in our case is, of course, \TeX, so our
+future discussion will revolve around the five elements mentioned
+above: \recount{1}data structures (mainly arrays and stacks),
+\recount{2}converting
+\bison's output into a form suitable for \TeX's consumption,
+\recount{3}processing raw streams of \TeX's tokens and converting them into
+streams of parser tokens, \recount{4}the implementation of \bison's
+|yyparse| in \TeX, and, finally, \recount{5}producing \TeX\ output via {\it
+syntax-directed translation} (which requires an appropriate
+abstraction to represent \bison's actions inside \TeX). We shall
+begin by discussing the parsing process itself.
+
+@*1 Arrays, stacks and the parser.
+Let us briefly examine the programming environment offered by \TeX.
+Designed for typesetting, \TeX's remarkable language
+provides a layer of macro processing atop of a set of commands that
+produce the output fulfilling its primary mission: delivering page
+layouts. In The \TeX book, macro {\it expansion\/} is likened to
+mastication, whereas \TeX's main product, the typographic output is the
+result of its `digestion' process. Not everything that goes through
+\TeX's digestive tract ends up leaving a trace on the final page: a
+file full of \.{\\relax}'s will produce no output, even though
+\.{\\relax} is not a macro, and thus would have to be processed by
+\TeX\ at the lowest level.
+
+It is time to describe the details of defining suitable data structures
+in \TeX. At first glance, \TeX\ provides rather standard means of
+organizing and using general memory. At the core of its generic
+programming environment is an array of \.{\\count}$\,n$ {\it
+registers\/}, which may be viewed as general purpose integer variables
+that are randomly accessible by their indices. The integer arithmetic
+machinery offered by \TeX\ is spartan but is very adequate for the sort of
+operations a parser would perform: mostly additions and
+comparisons.
+
+Is the \.{\\count} array a good way to store tables in \TeX? Probably
+not. The first factor is the {\it size\/} of this array: only 256
+\.{\\count} registers exist in a standard \TeX\ (the actual number of
+such registers on a typical machine running \TeX\ is significantly
+higher but this author is a great believer in standards, and to his
+knowledge, none of the standardization efforts in the \TeX\ world has
+resulted in anything even close to the definitive masterpiece that is
+The \TeX book). The issue of size can be mitigated to some extent by
+using a number of other similar arrays used by \TeX\ (\.{\\catcode},
+\.{\\uccode}, \.{\\dimen}, \.{\\sfcode} and others can be used for
+this purpose as long as one takes care to restore the `sane' values
+before control is handed off to \TeX's typesetting mechanisms). If a
+table has to span several such arrays, however, the complexity of
+accessing code would have to increase significantly, and the issue of
+size would still haunt the programmer.
+
+The second factor is the use of several registers by \TeX\ for special
+purposes (in addition, some of these registers can only store a
+limited range of values). Thus, the first 10 \.{\\count} registers are
+used by plain \TeX\ for (well, {\it intended\/} for, anyway) the
+purposes of page accounting: their values would have to be carefully
+saved and restored before and after each parsing call,
+respectively. Other registers (\.{\\catcode} in particular) have even
+more disrupting effects on \TeX's internal mechanisms. While all of
+this can be managed (after all, using \TeX\ as an arithmetic engine
+such as a parser suspends the need for any typographic or other
+specialized functions controlled by these arrays), the added
+complexity of using several memory banks simultaneously and the speed penalty
+caused by the need to store and restore register values make this
+approach much less attractive.
+
+What other means of storing arrays are provided by \TeX? Essentially,
+only three options remain: \.{\\token} registers, macros holding whole
+arrays, and associative arrays accessed through
+\.{\\csname}$\,\ldots\,$\.{\\endcsname}. In the first two cases if care
+is taken to store such arrays in an
+appropriate form one can use \TeX's \.{\\ifcase} primitive to access
+individual elements. The trade-off is the speed of such
+access: it is {\it linear\/} in the size of the array for most
+operations, and worse than that for others, such as removing the last
+item of an array. Using clever ways
+of organizing such arrays, one can improve the linear access time to
+$O(\log n)$ by simply modifying the access macros but at the moment, a
+straightforward \.{\\ifcase} is used after expanding a list macro or
+the contents of a \.{\\token}$\,n$ register in an {\it un\/}optimized
+parser. An {\it optimized\/} parser uses associative arrays.
+
+The array discussion above is just as applicable to {\it stacks\/}
+(indeed, an array is the most common form of stack
+implementation). Since stacks pop up and disappear frequently (what
+else are stacks to do?), list macros are usually used to store
+them. The optimized parser uses a separate \.{\\count} register to
+keep track of the top of the stack in the appropriate associative
+array.
+
+Let us now switch our attention
+to the code that implements the parser and scanner {\it functions\/}.
+If one has spent some time writing \TeX\ macros of any sophistication
+(or any macros, for that matter) (s)he must be familiar with the general
+feeling of frustration and the desire to `just call a function here and move
+on'. Macros produce {\it tokens\/}, however, and tokens must either
+expand to nothing or stay and be contributed to your input, or worse,
+be out of place and produce an error. One way to sustain a stream
+of execution with macros is {\it tail recursion\/} (i.e.~always expanding the
+{\it last token left standing}).
+
+As we have already discussed, \bison's
+|yyparse()| is a well laid out loop organized as a sequence of
+|goto|'s (no reason to become religious about structured programming
+here). This fact, and the following well known trick, make \Cee\ to \TeX\
+translation almost straightforward.
+
+% The macro mess below looks painful but this is the only place such layout is used
+% The approach can be easily generalized and put in limbo.sty but it seems
+% a bit redundant at this point.
+
+\newcount\piccount
+\newdimen\lasthsize
+
+\setbox5=\vtop{
+\demomargin=0pt
+\let\demoastyle\empty
+\begindemo
+^label A: ...
+\nooutput
+^ if**L**Krm(condition)**N
+^ goto C;
+\nooutput
+^label B: ...
+\nooutput
+^ goto A;
+\nooutput
+^label C: ...
+\nooutput
+\enddemo
+}
+\dp5=\z@@
+
+\setbox3=\vtop{
+\demomargin=0pt
+\let\demoastyle\empty
+\begindemo
+^\if**L**Krm(condition)**N
+^ \let\next=\labelC
+^\else
+^ \let\next=\labelAtail
+\enddemo
+}
+\dp3=\z@@
+
+\newdimen\lastdepth
+
+\def\startfitpar{%
+ \bgroup
+ \lasthsize=\hsize
+ \advance\lasthsize-1.5in
+ \vsize=\baselineskip
+ \topskip=\z@@
+ \setbox0\box2 % empty it
+ % this sounds good at first but there is no good way to pull the insertions out after the
+ % box manipulations that follow;
+ % insertions will thus be contributed to whatever page was being worked on when the
+ % picture insertions {\it started}; hence, if these happen to start at the very top of the page,
+ % any insertion that follows will be contributed to the previous page; we correct this for footnotes
+ % below
+ % \holdinginserts=1
+ \output{%
+ \global\setbox2=\vbox{
+ \ifvoid2
+ \else
+ \prevdepth=\dp2
+ \unvbox2
+ \fi
+ \lastdepth=\dp255
+ \unvbox255
+ % this would be tempting, however, the \eject that follows should disappear
+ % in addition, one really should not be playing with page breaking in the middle of
+ % such tricky insertions
+ % \penalty\outputpenalty
+ % \kern-\lastdepth % to make sure \baselineskip is accounted for
+ }%
+ }\eject
+ \output{%
+ \setbox0=\vbox{%
+ \unvbox255%
+ }% \lastbox would almost work ... if not for insertions
+ \global\advance\piccount1
+ \global\setbox2=\vbox{%
+ \prevdepth=\dp2 \unvbox2
+ \hbox to\hsize{%
+ \ifnum\piccount<15
+ \hbox to1.5in{%
+ \ifnum\piccount=1
+ \ \box5
+ \fi
+ \hfill}%
+ \fi
+ \box0 \hfill
+ \ifnum\piccount=1
+ \box3 \ %
+ \fi
+ \ifvoid\footins % reinsert footnotes
+ \else
+ \insert\footins{\unvbox\footins}%
+ \fi
+ }%
+ }%
+ }%
+ \parshape=15
+ 0pt 2.7in
+ 0pt 2.7in
+ 0pt 2.7in
+ 0pt 2.7in
+ 0pt 2.7in
+ 0pt 2.7in
+ 0pt 2.7in
+ 0pt \lasthsize
+ 0pt \lasthsize
+ 0pt \lasthsize
+ 0pt \lasthsize
+ 0pt \lasthsize
+ 0pt \lasthsize
+ 0pt \lasthsize
+ 0pt \hsize
+}
+
+\def\endfitpar{%
+ \par
+ \eject
+ \egroup
+ % see the comment above
+ % \holdinginserts=0
+ \prevdepth=\dp2
+ \unvbox2
+}
+
+\startfitpar
+\noindent Given the code on the left (where |goto|'s
+are the only means of branching but can appear inside conditionals),
+one way to translate it into \TeX\ is to define a set of macros (call
+them \.{\\labelA}, \.{\\labelAtail} and so forth for clarity) that end in
+\.{\\next} (a common name for this purpose). Now, \.{\\labelA} will
+implement the code that comes between \.{label A:} and \.{goto C;},
+whereas \.{\\labelAtail} is responsible for the code after \.{goto C;}
+and before \.{label B:}
+(provided no other |goto|'s intervene which can always be
+arranged). The conditional which precedes \.{goto C;} can now be written in
+\TeX\ as presented on the right, where (condition) is an appropriate
+translation of the corresponding condition
+in the code being translated (usually, one of `$=$' or `$\not=$'). Further
+details can be extracted from the \TeX\ code that implements these
+functions where the corresponding \Cee\ code is presented alongside
+the macros that mimic its functionality%
+\footnote{Running the risk of overloading the reader with details, the author
+would like to note that the actual implementation follows a {\it slightly\/} different
+route in order to avoid any \.{\\let} assignments or changing the
+meaning of \.{\\next}}.
+This concludes an overview of the general approach,
+It is time to consider the way characters get consumed
+on the lower levels of the macro hierarchy and the interaction between the different
+layers of the package.
+\endfitpar
+
+@*1 \TeX\ into tokens.
+Thus far we have covered the ideas
+behind items \recount{1} and \recount{4} on our list. It is time to
+discuss the lowest level of processing done by these macros:
+converting \TeX's tokens into the tokens consumed by the parser,
+i.e.\ part\recount{3} of the plan. Perhaps, it would be most appropriate
+to begin by defining the term {\it token}.
+
+As commonly defined, a token is simply an element of a set. Depending on
+how much structure the said set possesses, a token can be represented by
+an integer or a more complicated data structure. In the discussion
+below, we will be dealing with two kinds of tokens: the tokens
+consumed by the parsers and the \TeX\ tokens seen by the input
+routines. The latter play the role of {\it characters\/} that combine
+to become the former. \bison's internal representation for its tokens
+is non-negative integers so this is what a scanner must
+produce.
+
+\TeX's tokens are a good deal more sophisticated: they can be
+either pairs $(c_{\rm ch}, c_{\rm cat})$, where $c_{\rm ch}$ is the
+character code and $c_{\rm cat}$ is \TeX's category code ($1$ and $2$ for
+group characters, $5$ for end of line, etc.), or {\it control
+sequences\/}, such as \.{\\relax}. Some of these tokens (control
+sequences and {\it active}, i.e.~category~13 characters) can have
+complicated internal structure (expansion). The situation is further
+complicated by \TeX's \.{\\let} facility, which can create
+`character-like' control sequences, and the lack of conditionals
+to distinguish them from the `real' characters. Finally, not all pairs
+can appear as part of the input (say, there is no $(n, 0)$ token for
+any $n$, in the terminology above).
+
+The scanner expects to see {\it characters} in its input, which are
+represented by their {\sc ASCII} codes, i.e.~integers between $0$ and
+$255$ (actually, a more general notion of the Unicode character is
+supported but we will not discuss it further). Before character codes
+appear as the input to the scanner, however, and make its integer
+table-driven mechanism `tick', a lot of work must be done to collect
+and process the stream of \TeX\ tokens produced after \CWEAVE\ is done
+with your input. This work becomes further complicated when the
+typesetting routines that interpret the parser's output must sneak
+outside of the parsed stream of text (which is structured by the
+parser) and insert the original \TeX\ code produced by \CWEAVE\ into
+the page.
+
+\splint\ comes with a customizeable input routine of
+moderate complexity (\.{\\yyinput}) that classifies all \TeX\ tokens
+into seven categories: `normal' spaces (i.e.~category~10 tokens,
+skipped by \TeX's parameter scanning mechanism),
+`explicit' spaces (includes the control sequences \.{\\let} to \.{\ },
+as well as \.{\\\ }), groups ({\it avoid} using \.{\\bgroup} and \.{\\egroup} in
+your input but `real', \.{\{}$\ldots$\.{\}} groups are fine), active
+characters, normal characters (of all character categories that can
+appear in \TeX\ input, including \.{\$}, \.{\^}, \.{\#}, \.{a}--\.{Z},
+etc.), single letter control sequences, and multi-letter control
+sequences. Each of these categories can be processed separately to
+`fine-tune' the input routine to the problem at hand. The input
+routine is not very fast, instead, flexibility was the main
+goal. Therefore, if speed is desirable, a customized input routine
+is a great place to start. As an example, a minimalistic
+\.{\\yyinputtrivial} macro is included.
+
+When \.{\\yyinput} `returns' by calling \.{\\yyreturn} (which is a
+macro you design), your lexing routines have access to three
+registers: \.{\\yycp@@}, that holds the character value of the
+character just consumed by \.{\\yyinput}, \.{\\yybyte}, that most of
+the time holds the token just removed from the input,
+and \.{\\yybytepure}, that (again, with very few
+exceptions) holds a `normalized' version of the read character (i.e.~a
+character of the same character code as \.{\\yycp@@}, and category~11
+(to be even more precise (and to use nested parentheses), `normalized'
+characters have the same category code as the current category code of
+\.{@@})).
+
+Most of the time it is the character code one needs (say, in the case
+of \.{\\\{}, \.{\\\}}, \.{\\\&} and so on) but under some circumstances the
+distinction is important (outside of \.{\\vb\{}$\ldots$\.{\}}, the sequence
+\.{\\1} has nothing to do with the digit `\.{1}'). This mechanism
+makes it easy to examine the consumed token. It also forms
+the foundation of the `hidden context' passing mechanism described later.
+
+The remainder of this section discusses the internals of \.{\\yyinput}
+and some of the design trade-offs one has to make while working on
+processing general \TeX\ token streams. It is typeset in `small print'
+and can be skipped if desired.
+\smallskip
+\begingroup
+\abovedisplayskip=5pt%
+\abovedisplayshortskip=2pt%
+\belowdisplayskip=5pt%
+\belowdisplayshortskip=2pt%
+\fnotesstart=1
+\fnotesspan=2
+\noofcolumns=2
+\icgap=1em%
+\eightpoint
+\linecount=73
+\setmcparams
+\def\.#1{{\chardef\\=`\\\chardef\&=`\&\tt #1}}%
+\dsskip=0pt%
+\begindoublecols
+To examine every token in its path (including spaces that are easy to
+skip), the input routine uses one of the two well-known {\sc \TeX}nologies:
+\.{\\futurelet\\next\\examinenext} or equally effective
+\hbox{\.{\\afterassignment\\next\\let={\tt\char"20}}}.
+Recursively inserting one of these sequences, \.{\\yyinput} can go
+through any list of tokens, as long as it knows where to stop
+(i.e.~return an end of file character). The
+signal to stop is provided by the \.{\\yyeof}
+primitive which should not appear in any `ordinary' text
+presented for parsing, other than for the purpose of providing such a
+stop signal. Even the dependence on \.{\\yyeof} can be eliminated if
+one is willing to invest the time in writing macros that juggle \TeX's
+\.{\\token} registers and only limit oneself to input from such
+registers (which is, aside from an obvious efficiency hit, a strain on
+\TeX's memory, as you have to store multiple (3 in the general case)
+copies of your input to be able to back up when the lexer makes a
+wrong choice). There does not seem to be a way of doing it unless the
+text has been stored in a \.{\\token} register first (or storing the
+whole input as a {\it parameter\/} for the appropriate macro: this
+scheme is remarkably powerful and leads to {\it expandable\/} versions
+of very complicated macros, although the amount of effort required to
+write such macros grows at a frightening rate). All of these are
+non-issues for the text inside \.{\\vb\{}$\ldots$\.{\}} and the care that
+\.{\\yyinput} takes in processing characters inside such lists is an
+overkill. In a more `hostile' environment (such as the one encountered
+by the now obsolete \.{\\Tex} macros), this extra attention to detail pays
+off in the form of a more robust input mechanism.
+
+One subtlety deserves a special mention here, as it can be important
+to the designer of `higher-level' scanning macros. Two types of tokens
+are extremely difficult to deal with whenever \TeX's own lexing
+mechanisms are used: (implicit) spaces and even more so, braces. We
+will only discuss braces here, however, almost everything that follows
+applies equally well to spaces (category 10 tokens to be precise), with
+a few simplifications (or complications, in a couple of places). To
+understand the difficulty, let's consider one of the approaches above:
+$$
+\.{\\futurelet\\next\\examinenext}.
+$$
+The macro \.{\\examinenext}
+usually looks at \.{\\next} and inserts another macro (usually also called
+\.{\\next}) at the very end of its expansion list. This macro usually
+takes one parameter, to consume the next token. This mechanism works
+flawlessly, until the lexer encounters a \.{\{}br\.{,}sp\.{\}}ace. The \.{\\next}
+sequence, seen by \.{\\examinenext} contains a lot of information
+about the brace ahead: it knows its category code (left brace, so $1$), its
+character code (in case there was, say a \.{\\catcode`\\[=1{\tt\char`\ }}
+earlier) but not whether it is a `real' brace (i.e.\ a character
+\.{\{}$_1$) or an implicit one (a \.{\\bgroup}). There is no way to find
+that out until the control sequence `launched' by \.{\\examinenext}
+sees the token as a parameter.
+
+If the next token is a `real' brace, however,
+\.{\\examinenext}'s successor will never see the token itself: the
+braces are stripped by \TeX's scanning mechanism. Even if it finds a
+\.{\\bgroup} as the parameter, there is no guarantee that the actual
+input was not \.{\{\\bgroup\}}. One way to handle this is by using
+\.{\\string} ahead of any consumption of the next token. If prior to
+expanding \.{\\string} care has been taken to set the \.{\\escapechar}
+appropriately (remember, we know the character code in advance), as
+soon as one sees a character with \.{\\escapechar}'s character code,
+(s)he knows that an implicit brace has just been seen. One added
+complication to all this is that a very determined programmer can
+insert an {\it active\/} character (using, say, the \.{\\uccode}
+mechanism) that has the {\it same\/} character code as the {\it
+brace\/} token that it has been \.{\\let} to! Setting this possibility
+aside, the \.{\\string} mechanism (or, its cousin, \.{\\meaning}) is
+not perfect: both produce a sequence of category 12 and 10 tokens. If
+it is indeed a brace character that we just saw, we can consume the next
+token and move on but what if this was a control sequence? After all,
+just as easily as \.{\\string} makes a sequence into characters,
+\.{\\csname}$\,\ldots\,$\.{\\endcsname} pair will make any sequence of
+characters into a control sequence. Huh~$\ldots$
+
+What we need is a backup mechanism: if one has a copy of the
+token sequence ahead, one can use \.{\\string} to see if it is a real
+brace first, and if it is, consume it and move on (the active character
+case can be handled as the implicit case below, with one extra backup
+to count how many tokens have been consumed). At this point one has to {\it
+reinsert\/} the brace in case, at some point, a future `back up'
+requires that the rest of the tokens are removed from the output (to
+avoid `\.{Too many \}'s}' complaints from \TeX). This can be done by using
+the \.{\\iftrue\{\\else\}\\fi} trick but of course, some bookkeeping is
+needed to keep track of how far inside the brace groups we
+are.
+
+If it is an implicit brace, more work is needed: read all the
+characters that \.{\\string} produced (an maybe more), then remember
+the number of characters consumed. Remove the rest of the input using
+the method described above and restart the scanning from the same point
+knowing that the next token can be scanned as a parameter.
+
+Another strategy is to design a general enough macro that counts
+tokens in a token register and simply recount the tokens after every
+brace was consumed.
+
+Either way, it takes a lot of work. If anyone would
+like to pursue the counting strategy, simple counting macros
+are provided in \.{/examples/count/count.sty}.
+The macros in this example
+supply a very general counting mechanism that does not depend on
+\.{\\yyeof} (or {\it any\/} other token) being `special' and can count the
+tokens in any token register, as long as none of those tokens is an
+\.{\\outer} control sequence. In other words, if the macro is used
+immediately after the assignment to the token register, it should
+always produce a correct count.
+
+Needless to say, if such a general mechanism is desired, one has to
+look elsewhere. The added complications of treating spaces (\TeX\
+tends to ignore them most of the time) make this a torturous exercise
+in \TeX's macro wizardry. The included \.{\\yyinput} has two ways of
+dealing with braces: strip them or view the whole group as a
+token. Pick one or write a different \.{\\yyinput}. Spaces, implicit
+or explicit are reported as a specially selected character code and
+consumed with a likeness of
+$$
+\hbox{\.{\\afterassignment\\moveon\\let\\next={\tt\char`\ }}}.
+$$
+
+Now that a steady stream of character codes is arriving at \.{\\yylex}
+after \.{\\yyreturn} the job of converting it into numerical tokens
+is performed by the {\it scanner} (or {\it lexer\/}, or {\it tokenizer\/},
+or even {\it tokener}), discussed in the next section.
+\enddoublecols
+\endgroup
+
+@*1 Lexing in \TeX. In a typical system that uses a parser to process
+text, the parsing pass is usually split into several stages: the raw
+input, the lexical analysis (or simply {\it lexing}), and the parsing
+proper. The {\it lexing\/} (also called {\it scanning}, we use these
+terms interchangeably) clumps various sequences of characters into
+{\it tokens\/} to facilitate the parsing stage. The reasons for this
+particular hierarchy are largely pragmatic and are partially historic
+(there is no reason that {\it parsing\/} cannot be done in multiple
+phases, as well, although it usually isn't).
+
+If one remembers a few basic facts from the formal language theory, it
+becomes obvious that a lexer, that parses {\it regular\/} languages,
+can (theoretically) be replaced by an {\sc LALR} parser, that parses {\it
+context-free\/} ones (or some subset thereof, which is
+still a super set of all regular languages). A common justification given for
+creating specialized lexers is efficiency and speed. The
+reality is somewhat more subtle. While we do care about the efficiency of
+parsing in \TeX, having a specialized scanner is important for
+a number of different reasons.
+
+The real advantage of having a dedicated scanner is the ease with which it
+can match incomplete inputs and back up. A parser can, of course,
+{\it recognize\/} any valid input that is also acceptable to a lexer, as well
+as {\it reject\/} any input that does not form a valid token. Between
+those two extremes, however, lies a whole realm of options that a
+traditional parser will have great difficulty exploring. Thus, to
+mention just one example, it
+is relatively easy to set up a DFA\footnote{Which stands for
+Deterministic Finite Automaton, a common (and mathematically unique)
+way of implementing a scanner for regular languages. Incidentally {\sc
+LALR} mentioned above is short for Look Ahead Left to Right.}
+so that the {\it longest\/}
+matching input is accepted. The only straightforward way to do this
+with a traditional parser is to parse longer and longer inputs again
+and again. While this process can be optimized to a certain degree,
+the fact that a parser has a {\it stack\/} to maintain limits its
+ability to back up.
+
+As an aside, the mechanism by which \CWEB\ assembles its `scraps'
+into chunks of recognized code is essentially iterative lexing,
+very similar to what a human does to make sense of complicated
+texts. Instead of trying to match the longest running piece of text,
+\CWEB\ simply looks for patterns to combine inputs into larger
+chunks, which can later be further combined. Note that this is not
+quite the same as the approach taken by, say {\sc GLR} parsers, where
+the parser must match the {\it whole\/} input or declare a
+failure. Where a \CWEB-type parser may settle for the first available
+match (or the longest available) a {\sc GLR} parser must try {\it
+all\/} possible matches or use an algorithm to reject the majority of
+the ones that are bound to fail in the end.
+
+This `\CWEB\ way' is also different from a traditional `strict' {\sc
+LR} parser/scanner approach and certainly deserves serious
+consideration when the text to be parsed possesses some rigid
+structure but the parser is only allowed to process it one small
+fragment at a time.
+
+Returning to the present macro suite, the lexer produced by \flex\
+uses integer tables similar to those employed by \bison\ so the
+usual {\sc\TeX}niques used in implementing \.{\\yyparse} are fully
+applicable to \.{\\yylex}.
+
+An additional advantage provided by having a \flex\ scanner implemented
+as part of the suite is the availability of the original \bison\ scanner written
+in \Cee\ for the use by the macro package.
+
+This said, the code generated by \flex\ contains a few idiosyncrasies
+not present in the \bison\ output. These `quirks' mostly involve
+handling of end of input and error conditions. A quick glance at the
+\.{\\yylex} implementation will reveal a rather extensive collection of
+macros designed to deal with end of input actions.
+
+Another difficulty one has to face in translating \flex\ output into
+\TeX\ is a somewhat unstructured namespace delivered in the final
+output (this is partially due to the \POSIX\ standard that \flex\
+strives to follow). One consequence of this `messy' approach is that the
+writer of a \flex\ scanner targeted to \TeX\ has to declare \flex\
+`states' (more properly called {\it subautomata}) twice: first for the
+benefit of \flex\ itself, and then again, in the {\it \Cee\ preamble\/}
+portion of the code to output the states to be used by the action code
+in the lexer. \.{Define\_State($\ldots$)} macro is provided for this
+purpose. This macro can be used explicitly by the programmer or be
+inserted by a specially designed parser.
+Using \CWEB\ helps to keep these declarations together.
+
+The `hand-off' from the scanner to the parser is implemented
+through a pair of registers: \.{\\yylval}, a token register
+containing the value of the returned token and \.{\\yychar}, a
+\.{\\count} register that contains the numerical value of the
+token to be returned.
+
+Upon matching a token, the scanner passes one crucial piece of
+information to the user: the character sequence representing the token
+just matched (\.{\\yytext}). This is not the whole story
+though. There are three more token sequences that are made available
+to the parser writer whenever a token is matched.
+
+The first of these is simply a `normalized' version of
+\.{\\yytext} (called \.{\\yytextpure}). In most cases it
+is a sequence of \TeX\ tokens with the same character codes as the one
+in \.{\\yytext} but with their category codes set to 11. In
+cases when the tokens in \.{\\yytext} are {\it not}
+$(c_{\rm ch}, c_{\rm cat})$ pairs, a few simple
+conventions are followed, some of which will be explained below. This
+sequence is provided merely for convenience and its typical use is to
+generate a key for an associate array.
+
+The other two sequences are special `stream pointers' that provide
+access to the extended scanner mechanism in order to implement passing
+of `formatting hints' to the parser without introducing any changes to
+the original grammar. As the mechanism itself and the motivation
+behind it are somewhat subtle, let me spend a few moments discussing
+the range of formatting options desirable in a generic pretty-printer.
+
+Unlike strict parsers employed by most compilers, a parser designed
+for pretty printing cannot afford being too picky about the structure
+of its input (\cite[Go] calls such parsers `loose'). To provide
+a simple illustration, an isolated identifier, such as `\.{lg\_integer}'
+can be a type name, a variable name, or a structure tag (in a language like
+\Cee\ for example). If one expects the pretty printer to typeset this
+identifier in a correct style, some context must be supplied, as
+well. There are several strategies a pretty printer can employ to get
+a hold of the necessary context. Perhaps the simplest way to handle
+this, and to reduce the complexity of the pretty printing algorithm is
+to insist on the user providing enough context for the parser to do
+its job. For short examples like the one above, this is an acceptable
+strategy. Unfortunately, it is easy to come up with longer snippets of
+grammatically deficient text that a pretty printer should be expected
+to handle. Some pretty printers, such as the one employed by \CWEB\
+and its ilk (the original \.{WEB}, \.{FWEB}), use a very flexible
+bottom-up technique that tries to make sense of as large a portion of
+the text as it can before outputting the result (see also \cite[Wo],
+which implements a similar algorithm in \LaTeX).
+
+The expectation is that this algorithm will handle the majority (about
+90\%? it would be interesting to carry out a study in the spirit of
+the ones discussed in \cite[Jo] to find out) of the
+cases with the remaining few left for the author to correct. The
+question is, how can such a correction be applied?
+
+\CWEB\ itself provides two rather different mechanisms for handling
+these exceptions. The first uses direct typesetting commands (for
+example, \.{@@/} and \.{@@\#} for canceling and
+introducing a line break, resp.) to change the typographic output.
+
+The second (preferred) way is to supply {\it hidden context\/} to the
+pretty-printer. Two commands, \.{@@;} and
+\.{@@[}$\ldots$\.{@@]} are used for this purpose. The
+former introduces a `virtual semicolon' that acts in every way like a
+real one except it is not typeset (it is not output in the source file
+generated by \CTANGLE, either but this has nothing to do with pretty
+printing, so I will not mention \CTANGLE\ anymore). For
+instance, from the parser's point of view, if the preceding text was
+parsed as a `scrap' of type {\it exp}, the addition of \.{@@;}
+will make it into a `scrap' of type {\it stmt\/} in \CWEB's
+parlance. The second construct (\.{@@[}$\ldots$\.{@@]}),
+is used to create an {\it exp\/} scrap out of whatever happens to be
+inside the brackets.
+
+This is a powerful tool at the author's disposal. Stylistically,
+this is the right way to handle exceptions as it forces the writer to
+emphasize the {\it logical\/} structure of the formal
+text. If the pretty printing style is changed
+extensively later, the texts with such hidden contexts should be able to
+survive intact in the final document (as an example, using a break
+after every statement in \Cee\ may no longer be considered
+appropriate, so any forced break introduced to support this convention
+would now have to be removed, whereas \.{@@;}'s would simply
+quietly disappear into the background).
+
+The same hidden context idea has another important advantage: with
+careful grammar fragmenting (facilitated by \CWEB's or any other
+literate programming tool's `hypertext' structure) and a more diverse
+hidden context (or even arbitrary hidden text) mechanism, it is
+possible to use a strict parser to parse incomplete language
+fragments. For example, the productions that are needed to parse
+\Cee's expressions form a complete subset of the grammar. If the
+grammar's `start' symbol is changed to {\it expression\/} (instead of
+the {\it translation-unit\/} as it is in the full \Cee\ grammar), a
+variety of incomplete \Cee\ fragments can now be parsed and
+pretty-printed. Whenever such granularity is still too `coarse',
+carefully supplied hidden context will give the pretty printer enough
+information to adequately process each fragment. A number of such {\it
+sub}-parsers can be tried on each fragment (this may sound
+computationally expensive, however, in practice, a carefully chosen
+hierarchy of parsers will finish the job rather quickly) until a
+correct parser produced the desired output (this approach is similar
+to, although not quite the same one employed by the {\it General LR
+parsers}).
+
+This somewhat lengthy discussion brings us to the question directly
+related to the tools described in this article: how does one provide
+typographical hints or hidden context to the parser?
+
+One obvious solution is to build such hints directly into the
+grammar. The parser designer can, for instance, add new tokens
+(say, \.{BREAK\_LINE}) to the grammar and extend the
+production set to incorporate the new additions. The risk of
+introducing new conflicts into the grammar is low (although not
+entirely non-existent, due to the lookahead limitations of LR(1)
+grammars) and the changes required are easy, although very tedious, to
+incorporate.
+
+In addition to being labor intensive, this solution has two other
+significant shortcomings: it alters the original grammar and hides its
+logical structure; it also `bakes in' the pretty-printing conventions
+into the language structure (making `hidden' context much less
+`stealthy'). It does avoid the `synchronicity problem' mentioned
+below.
+
+A marginally better technique is to introduce a new regular expression
+recognizable by the scanner which will then do all the necessary
+bookkeeping upon matching the sequence. All the difficulties with
+altering the grammar mentioned above apply in this case, as well, only
+at the `lexical analysis level'. At a minimum, the set of tokens
+matched by the scanner would have to be changed.
+
+A much better approach involves inserting the hints at the input stage and
+passing this information to the scanner and parser as part of the token `values'. The
+hints themselves can masquerade as characters ignored by the scanner
+(white space, for example) and preprocessed by a specially designed
+input routine. The scanner then simply passes on the values to the
+parser. This makes hints, in effect, invisible.
+
+The difficulty lies in synchronizing the token production with the
+parser. This subtle complication is very familiar to anyone who has
+designed \TeX's output routines: the parser and the lexer are not
+synchronous, in the sense that the scanner might be reading several
+(in the case of the general LR$(n)$ parsers) tokens ahead of the
+parser before deciding on how to proceed (the same way \TeX\ can
+consume a whole paragraph's worth of text before exercising its page
+builder).
+
+If we simple-mindedly let the scanner return every hint it has encountered
+so far, we may end up feeding the parser the hints meant for the token
+that appears {\it after\/} the fragment the parser is currently working
+on. In other words, when the scanner `backs up' it must correctly back
+up the hints as well.
+
+This is exactly what the scanner produced by the tools in this package
+does: along with the main stream of tokens meant for the parser, it
+produces two hidden streams (called the \.{\\format} stream and
+the \.{\\stash} stream) and provides the parser with two
+strings (currently only strings of digits are used although arbitrary
+sequences of \TeX\ tokens can be used as pointers) with the promise
+that {\it all the `hints' between the beginning of the corresponding
+stream and the point labeled by the current stream pointer appeared
+among the characters up to and, possibly, including the ones matched
+as the current token}. The macros to extract the relevant parts of the
+streams (\.{\\yyreadfifo} and its cousins) are provided for the
+convenience of the parser designer. The interested reader can consult
+the input routine macros for the details of the internal
+representation of the streams.
+
+In the interest of full disclosure, let me point out that this simple
+technique introduces a significant strain on \TeX's
+computational resources: the lowest level macros, the ones that handle
+character input and are thus executed (sometimes multiple times), for
+{\it every\/} character in the input stream are rather complicated and
+therefore, slow. Whenever the use of such streams is not desired a simpler
+input routine can be written to speed up the process (see
+\.{\\yyinputtrivial} for a working example of such macro).
+
+Finally, while probably not directly related to the present
+discussion, this approach has one more interesting feature: after the
+parser is finished, the parser output and the streams exist
+`statically', fully available for any last minute preprocessing or for
+debugging purposes, if necessary. Under most circumstances, the parser
+output is `executed' and the macros in the output are the ones reading
+the various streams using the pointers supplied at the parsing stage
+(at least, this is the case for all the parsers supplied with the
+package).
+
+@*1 Inside semantic actions: switch statements and `functions' in \TeX.
+Now you have a lexer for your input, and a grammar ready to be put into
+action (we will talk about actions a bit later). It is time to discuss
+how the tables produced by \bison\ get converted into \TeX\ {\it macros\/}
+that drive the parser in {\it \TeX}.
+
+The tables that drive the \bison\ input parsers
+are collected in various \.{\{b,d,f,g,n\}yytab.tex} and \.{small\_tab.tex}. Each
+one of these files contains the tables that implement a specific parser
+used during different stages of processing.
+Their exact function is well explained
+in the source file produced by \bison\ ({\it how} this is done is
+explained elsewhere, see \cite[Ah] for a good reference). It would
+suffice to mention here that there are three types of tables in this
+file: \recount{1}numerical tables such as \.{\\yytable} and
+\.{\\yycheck} (both are either \TeX's token registers in an
+unoptimized parser or associate arrays in an optimized version of such
+as discussed below),
+\recount{2}a string array \.{\\yytname}, and \recount{3}an action
+switch. The action switch is what gets called when the parser does a
+{\it reduction}. It is easy to notice that the numerical tables come
+`premade' whereas the string array consisting of token names
+is difficult to recognize. This is intentional: this form of initialization
+is designed to allow the widest range of
+characters to appear inside names. The macros that do this reside in
+\.{yymisc.sty}. The generated table files also contain
+constant and token declarations used by the parser.
+
+The description of the process used to output \bison\ tables in an
+appropriate form continues in the section about
+\locallink{bsfile}outputting \TeX\ tables\endlink, we pick it up here
+with the description of the syntax-directed translation and the
+actions. The line
+$$
+\.{\\switchon\\next\\in\\currentswitch}
+$$
+is responsible for calling an appropriate action in the current
+switch, as is easy to infer. A {\it switch\/} is also a macro that
+consists of strings of \TeX\ tokens intermixed with \TeX\ macros
+inside braces. Each group of macros
+gets executed whenever the character or the group of characters in
+\.{\\next} matches a substring preceding the braced group. If there
+are two different substrings
+that match, only the earliest group of macros gets expanded.
+Before a state is
+used, a special control sequence,
+\.{\\setspecialcharsfrom\\switchname} can be used to put the \TeX\
+tokens in a form suitable for the consumption by \.{\\switchon}'s. The
+most important step it performs is it {\it turns every token in the
+list into a character with the same character code and category
+12\/}. Thus \.{\\\{} becomes \.{\{}$_{12}$. There are other ways of
+inserting tokens into a state: enclosing a token or a string of tokens in
+\.{\\raw...\\raw} adds it to the state macro unchanged. If you have
+a sequence of category 12 characters you want to add to the state, put
+it after \.{\\classexpand} (such sequences are usually prepared by the
+\.{\\setspecialchars} macro that uses the token tables generated by
+\bison\ from your grammar).
+
+You can give a case a readable label (say, \.{brackets}) and enclose
+this label in \.{\\raw}$\ldots$\.{\\raw}. A word of caution: an `a'
+inside of \.{\\raw}$\ldots$\.{\\raw} (which is most likely an
+\.{a}$_{11}$ unless you played with category codes before loading the
+\.{\\switchon} macros) and the one outside it are two different
+characters, as one is no longer a letter (category 11) in the eyes of
+\TeX\ whereas the other one still is. For this reason one should not
+use characters other than letters in h\.{\{}is\.{,}er\.{\}} state
+names: the way a state picks an action does not distinguish between,
+say, a `\.{(}' in `\.{(letter)}' and a stand alone `\.{(}' and may
+pick an action that you did not intend. This applies even if `\.{(}'
+is not among the characters explicitly inserted in the state macro: if
+an action for a given character is not found in the state macro, the
+\.{\\switchon} macro will insert a current \.{\\default} action
+instead, which most often you would want to be \.{\\yylex} or
+\.{\\yyinput} (i.e.\ skip this token). If `\.{(}' or `\.{)}' matches
+the braced group that follows `\.{(letter)}' chaos may ensue (most
+likely \TeX\ will keep reading past the \.{\\end} or \.{\\yyeof} that
+should have terminated the input). Make the names of character
+categories as unique as possible: the \.{\\switchon} is simply a
+string matching mechanism, with the added distinction between
+characters of different categories.
+
+Finally, the construct \.{\\statecomment}{\it
+anything\/}\.{\\statecoment} allows you to insert comments in the
+state sequence (note that the state {\it name\/} is put at the
+beginning of the state macro (by \.{\\setspecialcharsfrom})
+in the form of a special control sequence
+that expands to nothing: this elaborate scheme is needed because
+another control sequence can be \.{\\let} to the state macro which
+makes the debugging information difficult to decipher). The debugging
+mode for the lexer implemented with these macros is activated by
+\.{\\tracedfatrue}.
+
+The functionality of the \.{\\switchon} macros (for `historical'
+reasons, one can also use \.{\\action} as a synonym) has been
+implemented in a number of other macro packages (see \cite[Fi] that
+discusses the well-known and widely used \.{\\CASE} and \.{\\FIND}
+macros). The macros in this collection have the additional property
+that the only assignments that persist after the \.{\\switchon}
+completes are the ones performed by the user code inside the selected
+case.
+
+This last property of the switch macros is implemented using another
+mechanism that is part of this macro suite: the `subroutine-like'
+macros, \.{\\begingroup}$\ldots$\.{\\tokreturn}. For examples, an
+interested reader can take a look at the macros included with the
+package. A typical use is
+\.{\\begingroup}$\ldots$\.{\\tokreturn\{\}\{\\toks0 \}\{\}} which will
+preserve all the changes to \.{\\toks0} and have no other side effects
+(if, for example, in typical \TeX\ vernacular, \.{\\next} is used
+to implement tail recursion inside the group, after the
+\.{\\tokreturn}, \.{\\next} will still have the same value it
+had before the group was entered). This functionality comes at the
+expense of some computational efficiency.
+
+This covers most of the routine computations inside semantic actions,
+all that is left is a way to `tap' into the stack automaton
+built by \bison\ using an interface similar to the special
+\.{\$$n$} variables utilized by the `genuine' \bison\ parsers
+(i.e.\ written in \Cee\ or any other target language supported by
+\bison).
+
+This role is played by the several varieties of \.{\\yy$\,p$} command
+sequences (for the sake of completeness, $p$ stands for one of \.{($n$)},
+\.{[{\rm name}]}, \.{]{\rm name}[} or $n$, here $n$ is a
+string of digits, and a `name' is any name acceptable as a symbolic
+name for a term in \bison). Instead
+of going into the minutia of various flavors of \.{\\yy}-macros, let me
+just mention that one can get by with only two `idioms' and still
+be able to write parsers of arbitrary sophistication:
+\.{\\yy($n$)} can be treated as a token register containing the
+value of the $n$-th term of the rule's right hand side, $n>0$. The left
+hand side of a production is accessed through \.{\\yyval}. A
+convenient shortcut is \.{\\yy0\{{\rm \TeX\space material}\}} which
+will expand the `\TeX\ material' inside the braces. Thus, a simple way
+to concatenate the values of the first two production terms is
+\.{\\yy0\{\\the\\yy(1)\\the\\yy(2)\}}. The included \bison\
+parser can also be used to provide support for `symbolic names',
+analogous to \bison's \.{{\$}[{\rm name}]} but a
+bit more effort is required on the user's part to initialize such support.
+Using symbolic names can make the parser more readable and maintainable,
+however.
+
+There is also a \.{\\bb$\,n$} macro, that provides access to the term
+values in the `natural order' (e.g.~\.{\\bb1} is the last term read). Its
+intended use is with the `inline' rules (see the main parser for
+such examples). As of version \.{3.0} \bison\ no longer outputs
+|yyrhs| and |yyprhs|, which makes it impossible to produce the
+|yyrthree| array necessary for processing such rules in the `left to right'
+order. One might also note that the new notation is better suited for
+the inline rules since the value that is pushed on the stack is that
+of \.{\\bb0}, i.e.~the term implicitly inserted by \bison. Be aware
+that there are no \.{\\bb[$\cdot$]} or \.{\\bb($\cdot$)} versions of
+these macros, for obvious reasons. A less obvious feature of this
+macro is its `nonexpandable' nature. This means they cannot be used
+inside \.{\\edef}. Thus, the most common use pattern is
+\.{\\bb$\,n$\{\\toks$\,m$\}} with a subsequent expansion of
+\.{\\toks$\,m$}. Making these macros expandable is certainly possible
+but does not seem crucial for the intended limited use pattern.
+
+Naturally, a parser writer may need a number of other data
+abstractions to complete the task. Since these are highly dependent on
+the nature of the processing the parser is supposed to provide, we
+refer the interested reader to the parsers included in the package as
+a source of examples of such specialized data structures.
+
+One last remark about the parser operation is worth making here:
+the parser automaton itself does not make any \.{\\global}
+assignments. This (along with some careful semantic action writing)
+can be used to `localize' the effects of the parser operation and,
+most importantly, to create `reentrant' parsers that can, e.g.\ call
+{\it themselves\/} recursively.
+
+@*1 `Optimization'.
+By default, the generated parser and scanner keep all of their tables
+in separate token registers. Each stack is kept in a single macro (this
+description is further complicated by the support for parser {\it
+namespaces\/} that exists even for unoptimized parsers but this
+subtlety will not be mentioned again---see the macros in the package
+for further details). Thus, every time a table
+is accessed, it has to be expanded making the table access latency
+linear in {\it the size of the table}. The same holds for stacks and
+the action `switches', of
+course. While keeping the parser tables (which are immutable) in token
+registers does not have any better rationale than saving the control
+sequence memory (the most abundant memory in \TeX), this way of
+storing {\it stacks} does have an advantage when multiple parsers get
+to play simultaneously. All one has to do to switch from one parser to
+another is to save the state by renaming the stack control sequences
+accordingly.
+
+When the parser and scanner are `optimized', all these control
+sequenced are `spread over' appropriate associative arrays. One caveat
+to be aware of: the action switches for both the parser and the scanner
+have to be output differently (a command line option is used to
+control this) for optimized and unoptimized parsers. While it is
+certainly possible to optimize only some of the parsers (if your
+document uses multiple) or even only some {\it parts\/} of a given
+parser (or scanner), the details of how to do this are rather
+technical and are left for the reader to discover by reading the
+examples supplied with the package. At least at the beginning it is
+easier to simply set the highest optimization level and use it
+consistently throughout the document.
+
+@*1 {\it \TeX\/} with a different {\sl slant} or do you C an escape?.
+%\def\texnspace{other}
+Some \TeX\ productions below probably look like alien script.
+The authors of \cite[Er] cite a number of reasons pretty printing of
+\TeX\ in general is a nearly impossible task. The macros included with
+the package follow a very straightforward strategy and do not try to
+be very comprehensive. Instead, the burden of presenting \TeX\ code in
+a readable form is placed on the programmer. Appropriate hints can be
+supplied by means of indenting the code, using assignments ($=$) where
+appropriate, etc. If you would rather look at straight \TeX\
+instead, the line \.{\\def\\texnspace\{other\}} at the beginning of
+this section can be uncommented and
+|TeX_( "/noexpand/inmath{/yy0{/yy1{}}}" );| becomes
+\def\texnspace{other}%
+|TeX_( "/noexpand/inmath{/yy0{/yy1{}}}" );|.
+\def\texnspace{texline}%
+There is, however, more to this story. A look at the actual file will
+reveal that the line above was typed as
+$$
+\.{TeX\_( "/noexpand/inmath\{/yy0\{/yy1\{\}\}\}" );}
+$$
+The `escape character' is leaning the other way!
+The lore of \TeX\ is uncompromising: `\.{\\}' is {\it the\/} escape
+character. What is the reason to avoid it in this case?
+
+The mystery is not very deep: `\.{/}' was chosen as an escape character
+by the parser macros (a quick glance at \.{?yytab.tex} will reveal as
+much). There is, of course, nothing sacred (other than tradition,
+which this author is trying his hardest to follow) about what character code
+the escape character has. The reason to look for the alternative is straightforward: `\.{\\}' is
+a special character in \Cee, as well (also an `escape' in fact). The line
+\.{TeX\_( "..." );} is a {\it macro-call\/} but $\ldots$ in \Cee. This
+function simply prints out (almost `as-is') the line in
+parenthesis. An attempt at \.{TeX\_( "\\noexpand" );} would result in
+\numberlinestrue
+\begindemo
+^
+^oexpand
+\enddemo
+\numberlinesfalse
+Other escape combinations\footnote{Here is a full list of {\it
+defined\/} escaped characters in \Cee: \.{\\a}, \.{\\b}, \.{\\f}, \.{\\n},
+\.{\\r}, \.{\\t}, \.{\\v}, \.{\\}{$[$\it octal digit$]$}, \.{\\'},
+\.{\\"}, \.{\\?}, \.{\\\\}, \.{\\x}, \.{\\u}, \.{\\U}. Note that the
+last three combinations must be followed by a specific string of
+characters to appear in the input without generating errors.} are
+even worse: most are simply undefined. If anyone feels trapped without
+an escape, however, the same line can be typed as
+$$
+\.{TeX\_( "\\\\noexpand\\\\inmath\{\\\\yy0\{\\\\yy1\{\}\}\}" );}
+$$
+Twice the escape!
+
+If one were to look closer at the code, another oddity stands
+out: there are no \.{\$}'s anywhere in sight.
+The big money, \.{\$} is a beloved character in
+\bison. It is used in action code to reference the values of the
+appropriate terms in a production. If mathematics pays your bills, use
+\.{\\inmath} instead.
+
+@*1 The \bison\ parser(s). Let's take a short break for a broad overview of the input file.
+The basic structure is that of an ordinary \bison\ file that produces
+plain \Cee\ output. The \Cee\ actions, however, are programmed to output \TeX.
+
+@s TeX_ TeX
+@s TeXa TeX
+@s TeXb TeX
+@s TeXf TeX
+@s TeXfo TeX
+@s TeXao TeX
+
+@(bg.yy@>=
+@G Switch to generic mode.
+%{@> @<Grammar parser \Cee\ preamble@> @=%}
+ @> @<Grammar parser \bison\ options@> @=
+%union {@> @<Union of grammar parser types@> @=}
+%{@> @<Grammar parser \Cee\ postamble@> @=%}
+ @> @<Tokens and types ...@> @=
+%%
+ @> @<Fake start symbol for rules only grammar@> @=
+ @> @<Parser common productions@> @=
+ @> @<Parser grammar productions@> @=
+%%
+@g
+
+@ Bootstrap mode is next. The reason for a separate bootstrap parser is to
+collect the minimal amount of information to `spool up' the `production'
+parsers. To understand the mechanics and the reasons behind it, consider what happens
+following a declaration such as \.{\%token TOKEN "token"}
+(or, as it would be typeset by the macros in this package
+`\prodstyle{\%token} \.{TOKEN} \.{token}'; see the index entries for
+more details)%
+\idxinline{TOKEN}\idxinline{token}.
+The two names for the same token are treated very differently. \.{TOKEN} becomes
+an |enum| constant in the \Cee\ parser generated by \bison. Even when
+that parser becomes part of the `driver' program that outputs the \TeX\
+version of the parser tables, there is no easy way to output the {\it
+names\/} of the appropriate |enum| constants. The other name
+(\.{"token"}) becomes an entry in the |yytname| array. These names
+can be output by either the `driver' or \TeX\ itself after the
+\.{\\yytname} table has been input. The scanner, on the other hand,
+will use the first version (\.{TOKEN}). Therefore, it is important to
+establish an equivalence between the two versions of the name. In the
+`real' parser, the token values are output in a special header
+file. Hence, one has to either parse the header file to establish the
+equivalences or find some other means to find out the numerical values
+of the tokens.
+
+One approach is to parse the file containing the {\it declarations\/}
+and extract the equivalences between the names from it. This is the
+function of the bootstrap parser. Since the lexer is reused, some
+token values need to be known in advance (and the rest either ignored
+or replaced by some `made up' values). These tokens are `hard coded'
+into the parser file generated by \bison\ and output using a special
+function. The switch `|@[#define@]@; BISON_BOOTSTRAP_MODE|' tells the `driver'
+program to output the hard coded token values.
+@q Bizarre looking way of typing #define is due to the awkward way@>
+@q \CWEB\ treats switching in and out of $-mode in inline \Cee@>
+
+Note that the equivalence of the two versions of token names would
+have to be established every time a `string version' of a token is
+declared in the \bison\ file and the `macro name version' of the token
+is used by the corresponding scanner. To establish this equivalence,
+however, the bootstrapping parser below is not always necessary (see
+the \.{xxpression} example, specifically, the file \.{xxpression.w} in
+the \.{examples} directory for an example of using a different parser
+for this purpose). The reason it is necessary here is that a parser
+for an appropriate subset of the \bison\ syntax is not yet available
+(indeed, {\it any\/} functional parser for a \bison\ syntax subset
+would have to use the same scanner (unless you want to write a custom
+scanner for it), which would need to know how to output tokens, for
+which it would need a parser for a subset of \bison\ syntax $\ldots$
+it is a `chicken and egg'). Hence the name `bootstrap'. Once a
+functional parser for a large enough subset of the \bison\ input
+grammar is operational, {\it it\/} can be used to pair up the token
+names.
+
+The second function of the bootstrap parser is to collect information
+about the scanner's states. The mechanism is slightly different for
+states. While the token equivalences are collected purely in
+`\TeX\ mode', the bootstrap parser collects all the state names into a
+special \Cee\ header file. The reason is simple: unlike the token
+values, the numerical values of the scanner states are not passed to
+the `driver' program in any data structure and are instead defined as
+ordinary macros. The header file is the information the `driver' file
+needs to output the state values.
+
+An additional subtlety in the case of state value output is that the
+main lexer for the \bison\ grammar utilizes states extensively and thus
+cannot be easily used with the bootstrap parser before the state
+values are known. The solution is to substitute a very simple scanner barely
+capable of lexing state declarations. Such a scanner is implemented
+in \.{ssffo.w} (the somewhat cryptic name stands for `{\bf s}imple {\bf s}canner
+{\bf f}or {\bf f}lex {\bf o}ptions').
+\saveparseoutputtrue
+@(bb.yy@>=
+@G Switch to generic mode.
+%{
+ @> @<Grammar parser \Cee\ preamble@> @=
+ @> @/#define BISON_BOOTSTRAP_MODE @=
+%}
+ @> @<Grammar parser \bison\ options@> @=
+%union {@> @<Union of grammar parser types@> @=}
+%{@> @<Bootstrap parser \Cee\ postamble@> @=%}
+ @> @<Tokens and types ...@> @=
+%%
+ @> @<Fake start symbol for bootstrap grammar@> @=
+ @> @<Parser bootstrap productions@> @=
+ @> @<\flex\ options parser productions@> @=
+ @> @<List of symbols@> @=
+ @> @<Definition of \prodstyle{symbol}@> @=
+%%
+@g
+
+@ The prologue parser is responsible for parsing various grammar
+declarations as well as parser options.
+\saveparseoutputfalse
+%\traceparserstatestrue
+%\tracestackstrue
+%\tracerulestrue
+%\traceactionstrue
+\saveparseoutputtrue
+@(bd.yy@>=
+@G Switch to generic mode.
+%{@> @<Grammar parser \Cee\ preamble@> @=%}
+ @> @<Grammar parser \bison\ options@> @=
+%union {@> @<Union of grammar parser types@> @=}
+%{@> @<Grammar parser \Cee\ postamble@> @=%}
+ @> @<Tokens and types ...@> @=
+%%
+ @> @<Fake start symbol for prologue grammar@>@;
+ @> @<Parser common productions@> @=
+ @> @<Parser prologue productions@> @=
+%%
+@g
+
+@ Full \bison\ input parser is used when a complete \bison\ file is
+expected. It is also capable of parsing a `skeleton' of such a file,
+similar to the one that follows this paragraph.
+\traceparserstatesfalse
+\tracestacksfalse
+\tracerulesfalse
+\traceactionsfalse
+\checktablefalse
+\saveparseoutputfalse
+@(bf.yy@>=
+@G Switch to generic mode.
+%{@> @<Grammar parser \Cee\ preamble@> @=%}
+ @> @<Grammar parser \bison\ options@> @=
+%union {@> @<Union of grammar parser types@> @=}
+%{@> @<Grammar parser \Cee\ postamble@> @=%}
+ @> @<Tokens and types ...@> @=
+%%
+ @> @<Parser common productions@> @=
+ @> @<Parser prologue productions@> @=
+ @> @<Parser grammar productions@> @=
+ @> @<Parser full productions@> @=
+%%
+@g
+
+@ The first two options are essential for the parser operation. The
+start symbol can be set implicitly by listing the appropriate
+production first.
+@q %define lr.type canonical-lr @>
+@q Make not on this and lexing too much lookahead and the \stashed trick@>
+@q Explain other options @>
+@<Grammar parser \bison\ options@>=
+@G
+%token-table
+%debug
+%start input
+@g
+
+@*2 Grammar rules. Most of the original comments present in
+the grammar file used by \bison\ itself have been preserved and appear in
+{\it italics\/} at the beginning of each appropriate section.
+
+To facilitate the {\it bootstrapping\/} of the parser (see above), some
+declarations have been separated into their own sections. Also, a
+number of new rules have been introduced to create a hierarchy of
+`subparsers' that parse subsets of the grammar. We begin by listing
+most of the tokens used by the grammar. Only the string versions are
+kept in the |yytname| array, which, in part is the reason for a
+special bootstrapping parser as explained earlier.
+@<Tokens and types for the grammar parser@>=
+@G
+%token GRAM_EOF 0 "end of file"
+%token STRING "string"
+
+%token PERCENT_TOKEN "%token"
+%token PERCENT_NTERM "%nterm"
+
+%token PERCENT_TYPE "%type"
+%token PERCENT_DESTRUCTOR "%destructor"
+%token PERCENT_PRINTER "%printer"
+
+%token PERCENT_LEFT "%left"
+%token PERCENT_RIGHT "%right"
+%token PERCENT_NONASSOC "%nonassoc"
+%token PERCENT_PRECEDENCE "%precedence"
+
+%token PERCENT_PREC "%prec"
+%token PERCENT_DPREC "%dprec"
+%token PERCENT_MERGE "%merge"
+@g
+@<Global Declarations@>@;
+
+@ We continue with the list of tokens below, following the layout of
+the original parser.
+@<Global Declarations@>=
+@G
+%token
+ PERCENT_CODE "%code"
+ PERCENT_DEFAULT_PREC "%default-prec"
+ PERCENT_DEFINE "%define"
+ PERCENT_DEFINES "%defines"
+ PERCENT_ERROR_VERBOSE "%error-verbose"
+ PERCENT_EXPECT "%expect"
+ PERCENT_EXPECT_RR "%expect-rr"
+ PERCENT_FLAG "%<flag>"
+ PERCENT_FILE_PREFIX "%file-prefix"
+ PERCENT_GLR_PARSER "%glr-parser"
+ PERCENT_INITIAL_ACTION "%initial-action"
+ PERCENT_LANGUAGE "%language"
+ PERCENT_NAME_PREFIX "%name-prefix"
+ PERCENT_NO_DEFAULT_PREC "%no-default-prec"
+ PERCENT_NO_LINES "%no-lines"
+ PERCENT_NONDETERMINISTIC_PARSER
+ "%nondeterministic-parser"
+ PERCENT_OUTPUT "%output"
+ PERCENT_REQUIRE "%require"
+ PERCENT_SKELETON "%skeleton"
+ PERCENT_START "%start"
+ PERCENT_TOKEN_TABLE "%token-table"
+ PERCENT_VERBOSE "%verbose"
+ PERCENT_YACC "%yacc"
+;
+
+%token BRACED_CODE "{...}"
+%token BRACED_PREDICATE "%?{...}"
+%token BRACKETED_ID "[identifier]"
+%token CHAR "char"
+%token EPILOGUE "epilogue"
+%token EQUAL "="
+%token ID "identifier"
+%token ID_COLON "identifier:"
+%token PERCENT_PERCENT "%%"
+%token PIPE "|"
+%token PROLOGUE "%{...%}"
+%token SEMICOLON ";"
+%token TAG "<tag>"
+%token TAG_ANY "<*>"
+%token TAG_NONE "<>"
+%token INT "integer"
+%token <param> PERCENT_PARAM "%param";
+@g
+
+@ Extra tokens for typesetting \flex\ state
+declarations and options are declared in addition to the ones that a
+standard \bison\ parser recognizes.
+@<Tokens and...@>=
+@G
+%token FLEX_OPTION FLEX_STATE_X FLEX_STATE_S
+@g
+
+@ We are ready to describe the top levels of the parse tree. The first
+`sub parser' we consider is a `full' parser, that is the parser that
+expects a full grammar file, complete with the prologue, declarations,
+etc. This parser can be used to extract information from the grammar
+that is otherwise absent from the executable code generated by
+\bison. This includes, for example, the `name' part of
+\.{\$}\.{[}{\rm name}\.{]}.
+This parser is therefore used to generate the `symbolic
+switch' to provide support for symbolic term names similar to
+`genuine' \bison's \.{\$}\.{[}$\ldots$\.{]} syntax.
+@<Parser full productions@>=
+@G
+@t}\vb{\inline}{@>
+input:
+ prologue_declarations
+ "%%" grammar epilogue.opt {@> @<Finish the input setup@> @=}
+;
+@g
+
+@ The action of the parser in this case is simply to separate the
+accumulated `parse tree' from the auxiliary information carried by the
+parser on the stack.
+@<Finish the input setup@>=
+ @[TeX_( "/getsecond{/yy(3)}/to/toksa" );@]@; /* extract grammar contents */
+ @[TeX_( "/yy0{/the/toksa}/table=/yy(0)" );@]@;
+
+@ Another subgrammar deals with the syntax of isolated \bison\ rules. This is
+the most commonly used `subparser' since a rules cluster is the most
+natural `unit' to include in a \CWEB\ file.
+@<Fake start symbol for rules only grammar@>=
+@G
+@t}\vb{\inline}{@>
+input:
+ grammar epilogue.opt {@> TeX_( "/getsecond{/yy(1)}/to/table" ); @=}
+;
+@g
+
+@ The bootstrap parser has a very narrow set of goals: it is concerned
+with \prodstyle{\%token} declarations only in
+order to supply the token information to the lexer (since, as noted
+above, such information is not kept in the |yytname| array).
+The parser can also parse \prodstyle{\%nterm} declarations but the
+bootstrap lexer ignores the \prodstyle{\%nterm} token, since the
+\bison\ grammar does not use one.
+It also extends the syntax of a \prodstyle{grammar\_declaration} by allowing a
+declaration with or without a semicolon at the end (the latter is only
+allowed in the prologue). This works since the token declarations have
+been carefully separated from the rest of the grammar in different
+\CWEB\ sections. The range of tokens output by the bootstrap
+lexer is limited, hence most of the other rules are ignored.
+@<Fake start symbol for bootstrap grammar@>=
+@G
+@t}\vb{\inline}{@>
+input:
+ grammar_declarations {@> TeX_( "/table=/yy(1)" ); @=}
+;
+@t}\vb{\resetf}{@>
+grammar_declarations:
+ symbol_declaration semi.opt {@> @<Carry on@> @=}
+| flex_declaration semi.opt {@> @<Carry on@> @=}
+| grammar_declarations
+ symbol_declaration semi.opt {@> TeX_( "/yy0{/the/yy(1)/the/yy(2)}" ); @=}
+| grammar_declarations
+ flex_declaration semi.opt {@> TeX_( "/yy0{/the/yy(1)/the/yy(2)}" ); @=}
+;
+@t}\vb{\inline\flatten}{@>
+semi.opt: {} | ";" {};
+@g
+
+@ The following is perhaps the most common action performed by the
+parser. It is done automatically by the parser code but this feature
+is undocumented so we supply an explicit action in each case.
+@<Carry on@>=
+ @[TeX_( "/yy0{/the/yy(1)}" );@]@;
+
+@ Next, a subgrammar for processing prologue declarations. Finer
+differentiation is possible but the `subparsers' described here work
+pretty well and impose a mild style on the grammar writer.
+@<Fake start symbol for prologue grammar@>=
+@G
+@t}\vb{\inline}{@>
+input:
+ prologue_declarations epilogue.opt {@> TeX_( "/getsecond{/yy(1)}/to/table" ); @=}
+| prologue_declarations
+ "%%" "%%" EPILOGUE {@> TeX_( "/getsecond{/yy(1)}/to/table" ); @=}
+| prologue_declarations
+ "%%" "%%" {@> TeX_( "/getsecond{/yy(1)}/to/table" ); @=}
+;
+@g
+
+@ {\it Declarations: before the first \prodstyle{\%\%}}. We are now
+ready to deal with the specifics of the declarations themselves. The
+\.{\\grammar} macro is a `structure', whose first `field' is the
+grammar itself, whereas the second carries the type of the last
+declaration added to the grammar.
+@<Parser prologue productions@>=
+@G
+prologue_declarations:
+ {@> TeX_( "/yy0{/nx/grammar{}{/nx/empty}}" ); @=}
+| prologue_declarations
+ prologue_declaration {@> @<Attach a prologue declaration@> @=}
+;
+@g
+
+@ @<Attach a prologue declaration@>=
+ @<Attach a productions cluster@>@;
+
+@ Here is a list of most kinds of declarations that can appear in the
+prologue. The scanner returns the `stream pointers' for all the
+keywords so the declaration `structures' pass on those pointers to the
+grammar list. The original syntax has been left intact even though for
+the purposes of this parser some of the inline rules are unnecessary.
+@<Parser prologue productions@>=
+@G
+prologue_declaration:
+ grammar_declaration {@> @<Carry on@> @=}
+| "%{...%}" {@> TeX_( "/yy0{/nx/prologuecode/the/yy(1)}" ); @=}
+| "%<flag>" {@> TeX_( "/yy0{/nx/optionflag/the/yy(1)}" ); @=}
+| "%define" variable value {@> TeX_( "/yy0{/nx/vardef{/the/yy(2)}{/the/yy(3)}/the/yy(1)}" ); @=}
+| "%defines" {@> TeX_( "/yy0{/nx/optionflag{defines}{}/the/yy(1)}" ); @=}
+| "%defines" STRING {@> @[TeX_( "/toksa{defines}" );@]@+@<Prepare one parametric option@> @=}
+| "%error-verbose" {@> TeX_( "/yy0{/nx/optionflag{error verbose}{}/the/yy(1)}" ); @=}
+| "%expect" INT {@> @[TeX_( "/toksa{expect}" );@]@+@<Prepare one parametric option@> @=}
+| "%expect-rr" INT {@> @[TeX_( "/toksa{expect-rr}" );@]@+@<Prepare one parametric option@> @=}
+| "%file-prefix" STRING {@> @[TeX_( "/toksa{file prefix}" );@]@+@<Prepare one parametric option@> @=}
+| "%glr-parser" {@> TeX_( "/yy0{/nx/optionflag{glr parser}{}/the/yy(1)}" ); @=}
+| "%initial-action" "{...}" {@> TeX_( "/yy0{/nx/initaction/the/yy(2)}" ); @=}
+| "%language" STRING {@> @[TeX_( "/toksa{language}" );@]@+@<Prepare one parametric option@> @=}
+| "%name-prefix" STRING {@> @[TeX_( "/toksa{name prefix}" );@]@+@<Prepare one parametric option@> @=}
+| "%no-lines" {@> TeX_( "/yy0{/nx/optionflag{no lines}{}/the/yy(1)}" ); @=}
+| "%nondeterministic-parser" {@> TeX_( "/yy0{/nx/optionflag{nondet. parser}{}/the/yy(1)}" ); @=}
+| "%output" STRING {@> @[TeX_( "/toksa{output}" );@]@+@<Prepare one parametric option@> @=}
+@t}\vb{\flatten}{@>
+| "%param" {}
+ params {@> TeX_( "/yy0{/nx/paramdef{/the/yy(3)}/the/yy(1)}" ); @=}
+@t}\vb{\fold}{@>
+| "%require" STRING {@> @[TeX_( "/toksa{require}" );@]@+@<Prepare one parametric option@> @=}
+| "%skeleton" STRING {@> @[TeX_( "/toksa{skeleton}" );@]@+@<Prepare one parametric option@> @=}
+| "%token-table" {@> TeX_( "/yy0{/nx/optionflag{token table}{}/the/yy(1)}" ); @=}
+| "%verbose" {@> TeX_( "/yy0{/nx/optionflag{verbose}{}/the/yy(1)}" ); @=}
+| "%yacc" {@> TeX_( "/yy0{/nx/optionflag{yacc}{}/the/yy(1)}" ); @=}
+| ";" {@> TeX_( "/yy0{/nx/empty}" ); @=}
+;
+
+params:
+ params "{...}" {@> TeX_( "/yy0{/the/yy(1)/nx/braceit/the/yy(2)}" ); @=}
+| "{...}" {@> TeX_( "/yy0{/nx/braceit/the/yy(1)}" ); @=}
+;
+@g
+
+@ This is a typical parser action: encapsulate the `type' of the
+construct just parsed and attach some auxiliary info, in this case the
+stream pointers.
+@<Prepare one parametric option@>=
+ @[TeX_( "/yy0{/nx/oneparametricoption{/the/toksa}{/the/yy(2)}/the/yy(1)}" );@]@;
+
+@ Some extra declarations to typeset \flex\ options and
+declarations. These are not part of the \bison\ syntax but their
+structure is similar enough that they can be included in the grammar.
+@<Parser prologue productions@>=
+@G
+prologue_declaration:
+ flex_declaration {@> @<Carry on@> @=}
+;
+@g
+@<\flex\ options parser productions@>@;
+
+@ The syntax of \flex\ options was extracted from \flex\ documentation
+so it is not guaranteed to be correct.
+@<\flex\ options parser productions@>=
+@G
+flex_declaration:
+ FLEX_OPTION flex_option_list {@> @<Define \flex\ option list@> @=}
+| flex_state symbols.1 {@> @<Define \flex\ states@> @=}
+;
+
+flex_state:
+ FLEX_STATE_X {@> TeX_( "/yy0{/nx/flexxstatedecls/the/yy(1)}" ); @=}
+| FLEX_STATE_S {@> TeX_( "/yy0{/nx/flexsstatedecls/the/yy(1)}" ); @=}
+;
+
+flex_option_list:
+ flex_option {@> @<Carry on@> @=}
+| flex_option_list flex_option {@> @<Add a \flex\ option@> @=}
+;
+
+flex_option:
+ ID {@> TeX_( "/yy0{/nx/flexoptionpair{/the/yy(1)}{}}" ); @=}
+| ID "=" symbol {@> TeX_( "/yy0{/nx/flexoptionpair{/the/yy(1)}{/the/yy(3)}}" ); @=}
+;
+@g
+
+@ @<Define \flex\ option list@>=
+ @[TeX_( "/yy0{/nx/flexoptiondecls{/the/yy(2)}/the/yy(1)}" );@]@;
+
+@ @<Define \flex\ states@>=
+ @[TeX_( "/getfirst{/yy(1)}/to/toksa" );@]@;
+ @[TeX_( "/getsecond{/yy(1)}/to/toksb" );@]@;
+ @[TeX_( "/getthird{/yy(1)}/to/toksc" );@]@;
+ @[TeX_( "/yy0{/the/toksa{/the/yy(2)}{/the/toksb}{/the/toksc}}" );@]@;
+
+@ @<Add a \flex\ option@>=
+ @[TeX_( "/getsecond{/yy(2)}/to/toksa" );@]@; /* the identifier */
+ @[TeX_( "/getfourth{/toksa}/to/toksb" );@]@; /* the format pointer */
+ @[TeX_( "/getfifth{/toksa}/to/toksc" );@]@; /* the stash pointer */
+ @[TeX_( "/yy0{/the/yy(1)/nx/hspace{/the/toksb}{/the/toksc}/the/yy(2)}" );@]@;
+
+@ {\it Grammar declarations}. These declarations can appear in both
+prologue and the rules sections. Their treatment is very similar to
+prologue-only options.
+@<Parser common productions@>=
+@G
+grammar_declaration:
+ precedence_declaration {@> @<Carry on@> @=}
+| symbol_declaration {@> @<Carry on@> @=}
+| "%start" symbol {@> @[TeX_( "/toksa{start}" );@]@+@<Prepare one parametric option@> @=}
+| code_props_type "{...}" generic_symlist {@> @<Assign a code fragment to symbols@> @=}
+| "%default-prec" {@> TeX_( "/yy0{/nx/optionflag{default prec.}{}/the/yy(1)}" ); @=}
+| "%no-default-prec" {@> TeX_( "/yy0{/nx/optionflag{no default prec.}{}/the/yy(1)}" ); @=}
+| "%code" "{...}" {@> TeX_( "/yy0{/nx/codeassoc{code}{}/the/yy(2)/the/yy(1)}" ); @=}
+| "%code" ID "{...}" {@> TeX_( "/yy0{/nx/codeassoc{code}{/the/yy(2)}/the/yy(3)/the/yy(1)}" ); @=}
+;
+
+code_props_type:
+ "%destructor" {@> TeX_( "/yy0{{destructor}/the/yy(1)}" ); @=}
+| "%printer" {@> TeX_( "/yy0{{printer}/the/yy(1)}" ); @=}
+;
+@g
+
+@ @<Assign a code fragment to symbols@>=
+ @[TeX_( "/getfirst{/yy(1)}/to/toksa" );@]@; /* name of the property */
+ @[TeX_( "/getfirst{/yy(2)}/to/toksb" );@]@; /* contents of the braced code */
+ @[TeX_( "/getsecond{/yy(2)}/to/toksc" );@]@; /* braced code format pointer */
+ @[TeX_( "/getthird{/yy(2)}/to/toksd" );@]@; /* braced code stash pointer */
+ @[TeX_( "/getsecond{/yy(1)}/to/tokse" );@]@; /* code format pointer */
+ @[TeX_( "/getthird{/yy(1)}/to/toksf" );@]@; /* code stash pointer */
+ @[TeX_( "/yy0{/nx/codepropstype{/the/toksa}{/the/toksb}{/the/yy(3)}{/the/toksc}{/the/toksd}{/the/tokse}{/the/toksf}}" );@]@;
+
+@ @<Tokens and types...@>=
+@G
+%token PERCENT_UNION "%union";
+@g
+
+@ @<Parser common productions@>=
+@G
+@t}\vb{\inline\flatten}{@>
+union_name:
+ {@> TeX_( "/yy0{}" ); @=}
+| ID {@> @<Carry on@> @=}
+;
+
+grammar_declaration:
+ "%union" union_name "{...}" {@> @<Prepare union definition@> @=}
+;
+
+symbol_declaration:
+ "%type" TAG symbols.1 {@> @<Define symbol types@> @=}
+;
+@t}\vb{\resetf\flatten}{@>
+precedence_declaration:
+ precedence_declarator tag.opt symbols.prec {@> @<Define symbol precedences@> @=}
+;
+
+precedence_declarator:
+ "%left" {@> TeX_( "/yy0{/nx/preckind{left}/the/yy(1)}" ); @=}
+| "%right" {@> TeX_( "/yy0{/nx/preckind{right}/the/yy(1)}" ); @=}
+| "%nonassoc" {@> TeX_( "/yy0{/nx/preckind{nonassoc}/the/yy(1)}" ); @=}
+| "%precedence" {@> TeX_( "/yy0{/nx/preckind{precedence}/the/yy(1)}" ); @=}
+;
+@t}\vb{\inline}{@>
+tag.opt:
+ {@> TeX_( "/yy0{}" ); @=}
+| TAG {@> @<Carry on@> @=}
+;
+@g
+
+@ @<Prepare union definition@>=
+ @[TeX_( "/yy0{/nx/codeassoc{union}{/the/yy(2)}/the/yy(3)/the/yy(1)}" );@]@;
+
+@ @<Define symbol types@>=
+ @[TeX_( "/yy0{/nx/typedecls{/the/yy(2)}{/the/yy(3)}/the/yy(1)}" );@]@;
+
+@ @<Define symbol precedences@>=
+ @[TeX_( "/getthird{/yy(1)}/to/toksa" );@]@; /* format pointer */
+ @[TeX_( "/getfourth{/yy(1)}/to/toksb" );@]@; /* stash pointer */
+ @[TeX_( "/getsecond{/yy(1)}/to/toksc" );@]@; /* kind of precedence */
+ @[TeX_( "/yy0{/nx/precdecls{/the/toksc}{/the/yy(2)}{/the/yy(3)}{/the/toksa}{/the/toksb}}" );@]@;
+
+@ The bootstrap grammar forms the smallest subset of the full grammar.
+@<Parser common productions@>=
+ @<Parser bootstrap productions@>@;
+
+@ These are the two most important rules for the bootstrap parser.
+@<Parser bootstrap productions@>=
+@G
+@t}\vb{\flatten}{@>
+symbol_declaration:
+ "%nterm" {} symbol_defs.1 {@> TeX_( "/yy0{/nx/ntermdecls{/the/yy(3)}/the/yy(1)}" ); @=}
+@t}\vb{\fold\flatten}{@>
+| "%token" {} symbol_defs.1 {@> TeX_( "/yy0{/nx/tokendecls{/the/yy(3)}/the/yy(1)}" ); @=}
+;
+@g
+
+@ {\it Just like \prodstyle{symbols.1} but accept \prodstyle{INT} for
+the sake of \POSIX}. Perhaps the only point worth mentioning here is
+the inserted separator (\.{\\hspace}). Like any other separator, it takes
+two parameters, stream pointers. In this case, however, both pointers are null
+since there seems to be no other meaningful assignment. If any
+formatting or stash information is needed, it can be extracted by the
+symbols themselves.
+@<Parser common productions@>=
+@G
+symbols.prec:
+ symbol.prec {@> @<Carry on@> @=}
+| symbols.prec symbol.prec {@> TeX_( "/yy0{/the/yy(1)/nx/hspace{0}{0}/the/yy(2)}" ); @=}
+;
+
+symbol.prec:
+ symbol {@> TeX_( "/yy0{/nx/symbolprec{/the/yy(1)}{}}" ); @=}
+| symbol INT {@> TeX_( "/yy0{/nx/symbolprec{/the/yy(1)}{/the/yy(2)}}" ); @=}
+;
+@g
+
+@ {\it One or more symbols to be \prodstyle{\%type}'d}.
+@<Parser common productions@>=
+ @<List of symbols@>@;
+
+@ @<List of symbols@>=
+@G
+symbols.1:
+ symbol {@> @<Carry on@> @=}
+| symbols.1 symbol {@> TeX_( "/yy0{/the/yy(1)/nx/hspace{0}{0}/the/yy(2)}" ); @=}
+;
+@g
+
+@ @<Parser common productions@>=
+@G
+generic_symlist:
+ generic_symlist_item {@> @<Carry on @> @=}
+| generic_symlist generic_symlist_item {@> TeX_( "/yy0{/the/yy(1)/nx/hspace{0}{0}/the/yy(2)}" ); @=}
+;
+@t}\vb{\flatten\inline}{@>
+generic_symlist_item:
+ symbol {@> @<Carry on@> @=}
+| tag {@> @<Carry on@> @=}
+;
+
+tag:
+ TAG {@> @<Carry on@> @=}
+| "<*>" {@> @<Carry on@> @=}
+| "<>" {@> @<Carry on@> @=}
+;
+@g
+
+@ {\it One token definition}.
+@<Parser bootstrap productions@>=
+@G
+symbol_def:
+ TAG {@> @<Carry on@> @=}
+@t}\vb{\flatten}{@>
+| id {@> TeX_( "/yy0{/nx/onesymbol{/the/yy(1)}{}{}}" ); @=}
+| id INT {@> TeX_( "/yy0{/nx/onesymbol{/the/yy(1)}{/the/yy(2)}{}}" ); @=}
+| id string_as_id {@> TeX_( "/yy0{/nx/onesymbol{/the/yy(1)}{}{/the/yy(2)}}" ); @=}
+| id INT string_as_id {@> TeX_( "/yy0{/nx/onesymbol{/the/yy(1)}{/the/yy(2)}{/the/yy(3)}}" ); @=}
+;
+@g
+
+@ {\it One or more symbol definitions}.
+@<Parser bootstrap productions@>=
+@G
+symbol_defs.1:
+ symbol_def {@> @<Carry on@> @=}
+| symbol_defs.1 symbol_def {@> @<Add a symbol definition@> @=}
+;
+@g
+
+@ @<Add a symbol definition@>=
+ @[TeX_( "/getsecond{/yy(2)}/to/toksa" );@]@; /* the identifier */
+ @[TeX_( "/getfourth{/toksa}/to/toksb" );@]@; /* the format pointer */
+ @[TeX_( "/getfifth{/toksa}/to/toksc" );@]@; /* the stash pointer */
+ @[TeX_( "/yy0{/the/yy(1)/nx/hspace{/the/toksb}{/the/toksc}/the/yy(2)}" );@]@;
+
+@ {\it The grammar section: between the two
+\prodstyle{\%\%}'s}. Finally, the following few short sections define
+the syntax of \bison's rules.
+@<Parser grammar productions@>=
+@G
+grammar:
+ rules_or_grammar_declaration {@> @<Start with a production cluster@> @=}
+| grammar rules_or_grammar_declaration {@> @<Attach a productions cluster@> @=}
+;
+@g
+
+@ {\it As a \bison\ extension, one can use the grammar declarations in the
+body of the grammar}. What follows is the syntax of the right hand
+side of a grammar rule.
+@<Parser grammar productions@>=
+@G
+rules_or_grammar_declaration:
+ rules {@> @<Add a productions cluster@> @=}
+| grammar_declaration ";" {@> @<Carry on@> @=}
+| error ";" {@> TeX_( "/errmessage{parsing error!}" ); @=}
+;
+@t}\vb{\flatten\inline}{@>
+rules:
+ id_colon named_ref.opt {@> TeX_( "/relax" ); @=}
+ rhses.1 {@> @<Complete a production@> @=}
+;
+@t}\vb{\resetf}{@>
+rhses.1[o]:
+ rhs {@> @<Start the right hand side@> @=}
+| rhses.1[a] "|"[b] {@> @<Insert local formatting@> @=}[c]
+ rhs[d] {@> @<Add a right hand side to a production@> @=}
+| rhses.1 ";" {@> @<Add an optional semicolon@> @=}
+;
+@g
+
+@ The next few actions describe what happens when a left hand side is
+attached to a rule.
+@<Start with a production cluster@>=
+ @[TeX_( "/getfirst{/yy(1)}/to/toksa" );@]@;
+ @[TeX_( "/yy0{/nx/grammar{/the/yy(1)}{/the/toksa}}" );@]@;
+
+@ @<Attach a productions cluster@>=
+ @[TeX_( "/getthird{/yy(1)}/to/toksa" );@]@; /* type of the last rule */
+ @[TeX_( "/getsecond{/yy(1)}/to/toksc" );@]@; /* accumulated rules */
+ @[TeX_( "/getfirst{/yy(2)}/to/toksb" );@]@; /* type of the new rule */
+ @[TeX_( "/let/default/positionswitchdefault" );@]@;
+ @[TeX_( "/switchon{/the/toksb}/in/positionswitch" );@]@; /* determine the position of the first token in the group */
+ @[TeX_( "/edef/next{/the/toksa}" );@]@;
+ @[TeX_( "/edef/default{/the/toksb}" );@]@; /* reuse \.{\\default} */
+ @[TeX_( "/ifx/next/default" );@]@;
+ @[TeX_( " /let/default/separatorswitchdefaulteq" );@]@;
+ @[TeX_( " /switchon{/the/toksa}/in/separatorswitcheq" );@]@;
+ @[TeX_( "/else" );@]@;
+ @[TeX_( " /concat/toksa/toksb" );@]@;
+ @[TeX_( " /let/default/separatorswitchdefaultneq" );@]@;
+ @[TeX_( " /switchon{/the/toksa}/in/separatorswitchneq" );@]@;
+ @[TeX_( "/fi" );@]@;
+ @[TeX_( "/yy0{/nx/grammar{/the/toksc/the/postoks/the/toksd/the/yy(2)}{/the/toksb}}" );@]@;
+
+@ @<Add a productions cluster@>=
+ @[TeX_( "/getsecond{/yy(1)}/to/toksa" );@]@; /* \.{\\prodheader} */
+ @[TeX_( "/getsecond{/toksa}/to/toksb" );@]@; /* \.{\\idit} */
+ @[TeX_( "/getfourth{/toksb}/to/toksc" );@]@; /* format stream pointer */
+ @[TeX_( "/getfifth{/toksb}/to/toksd" );@]@; /* stash stream pointer */
+ @[TeX_( "/getthird{/yy(1)}/to/toksb" );@]@; /* \.{\\rules} */
+ @[TeX_( "/yy0{/nx/oneproduction{/the/toksa/the/toksb}{/the/toksc}{/the/toksd}}" );@]@;
+
+@ @<Complete a production@>=
+ @[TeX_( "/getfourth{/yy(1)}/to/toksa" );@]@; /* format stream pointer */
+ @[TeX_( "/getfifth{/yy(1)}/to/toksb" );@]@; /* stash stream pointer */
+ @[TeXb( "/yy0{/nx/pcluster{/nx/prodheader{/the/yy(1)}{/the/yy(2)}" );@]@;
+ @[TeXao( "{/the/toksa}{/the/toksb}}{/the/yy(4)}}" );@]@;
+
+@ It is important to format the right hand side properly, since we
+would like to indicate that an action is inlined by an
+indentation. The `format' of the \.{\\rhs} `structure' includes the
+stash pointers and a `boolean' to indicate whether the right hand side ends
+with an action. Since the action can be implicit, this decision has to
+be postponed until, say, a semicolon is seen.
+No formatting or stash pointers are added for such implicit action.
+@<Start the right hand side@>=
+ @[TeX_( "/rhsbool{/yy(1)}/to/toksa /the/toksa" );@]@;
+ @[TeX_( "/getthird{/yy(1)}/to/toksb" );@]@; /* the format pointer */
+ @[TeX_( "/getfourth{/yy(1)}/to/toksc" );@]@; /* the stash pointer */
+ @[TeX_( "/ifrhsfull" );@]@;
+ @[TeX_( " /yy0{/nx/rules{/the/yy(1)}{/the/toksb}{/the/toksc}}" );@]@;
+ @[TeX_( "/else" );@]@; /* it does not end with an action, fake one */
+ @[TeX_( " /rhscont{/yy(1)}/to/toksa" );@]@; /* rules */
+ @[TeX_( " /edef/next{/the/toksa}" );@]@;
+ @[TeX_( " /ifx/next/empty" );@]@;
+ @[TeX_( " /toksa{/emptyterm}" );@]@;
+ @[TeX_( " /fi" );@]@;
+ @[TeXb( " /yy0{/nx/rules{/nx/rhs{/the/toksa/nx/rarhssep{0}{0}" );@]@;
+ @[TeXfo( " /nx/actbraces{}{}{0}{0}/nx/bdend}{}{/nx/rhsfulltrue}}{/the/toksb}{/the/toksc}}" );@]@;
+ @[TeX_( "/fi" );@]@;
+
+@ Using standard notation, here is what the middle action
+does.
+@<Old `Insert local formatting'@>=
+ @[TeX_( "/rhscont{/yy(1)}/to{/yy(0)}" );@]@;
+ @[TeX_( "/yy0{/the/yy(0)/nx/midf/the/yy(2)}" );@]@;
+
+@ However, if the length of the rule preceding the inline action
+is not known a different way of accessing the stack is necessary.
+@<Insert local formatting@>=
+ @[TeX_( "/bb2{/toksa}/bb1{/toksb}" );@]@;
+ @[TeX_( "/rhscont{/toksa}/to{/yy(0)}" );@]@;
+ @[TeX_( "/yy0{/the/yy(0)/nx/midf/the/toksb}" );@]@;
+
+@ No pointers are provided for an {\it implicit\/} action.
+@<Add a right hand side to a production@>=
+ @[TeX_( "/rhsbool{/yy(4)}/to/toksa /the/toksa" );@]@;
+ @[TeX_( "/ifrhsfull" );@]@;
+ @[TeX_( " /yy0{/nx/rules{/the/yy(3)/nx/rrhssep/the/yy(2)/the/yy(4)}/the/yy(2)}" );@]@;
+ @[TeX_( "/else" );@]@;
+ @[TeX_( " /rhscont{/yy(4)}/to/toksa" );@]@;
+ @[TeX_( " /edef/next{/the/toksa}" );@]@;
+ @[TeX_( " /ifx/next/empty" );@]@;
+ @[TeX_( " /toksa{/emptyterm}" );@]@;
+ @[TeX_( " /fi" );@]@;
+ @[TeXb( " /yy0{/nx/rules{/the/yy(3)/nx/rrhssep/the/yy(2)" );@]@;
+ @[TeXf( " /nx/rhs{/the/toksa/nx/rarhssep{0}{0}" );@]@; /* streams have already been grabbed */
+ @[TeXfo( " /nx/actbraces{}{}{0}{0}/nx/bdend}{}{/nx/rhsfulltrue}}/the/yy(2)}" );@]@;
+ @[TeX_( "/fi" );@]@;
+
+@ @<Add an optional semicolon@>=
+ @<Carry on@>@;
+
+@ @<Tokens and types...@>=
+@G
+%token PERCENT_EMPTY "%empty";
+@g
+
+@ The centerpiece of the grammar is the syntax of the right hand side
+of a production. Various `precedence hints' must be attached to an
+appropriate portion of the rule, just before an action (which can
+be inline, implicit or both in this case).
+@<Parser grammar productions@>=
+@G
+rhs:
+ {@> @<Make an empty right hand side@> @=}
+| rhs symbol named_ref.opt {@> @<Add a term to the right hand side@> @=}
+| rhs "{...}" named_ref.opt {@> @<Add an action to the right hand side@> @=}
+| rhs "%?{...}" {@> @<Add a predicate to the right hand side@> @=}
+| rhs "%empty" {@> @<Add \prodstyle{\%empty} to the right hand side@> @=}
+| rhs "%prec" symbol {@> @<Add a precedence directive to the right hand side@> @=}
+| rhs "%dprec" INT {@> @<Add a \prodstyle{\%dprec} directive to the right hand side@> @=}
+| rhs "%merge" TAG {@> @<Add a \prodstyle{\%merge} directive to the right hand side@> @=}
+;
+
+named_ref.opt:
+ {@> @<Create an empty named reference@> @=}
+| BRACKETED_ID {@> @<Create a named reference@> @=}
+;
+@g
+
+@ @<Make an empty right hand side@>=
+ @[TeX_( "/yy0{/nx/rhs{}{}{/nx/rhsfullfalse}}" );@]@;
+
+@ @<Add a term to the right hand side@>=
+ @[TeX_( "/rhscont{/yy(1)}/to/toksa" );@]@;
+ @[TeX_( "/rhscnct{/yy(1)}/to/toksb" );@]@;
+ @[TeX_( "/edef/next{/the/toksb}" );@]@;
+ @[TeX_( "/ifx/next/empty" );@]@;
+ @[TeX_( "/else" );@]@;
+ @[TeX_( " /getfourth{/yy(2)}/to/toksc" );@]@;
+ @[TeX_( " /getfifth{/yy(2)}/to/toksd" );@]@;
+ @[TeX_( " /appendr/toksb{{/the/toksc}{/the/toksd}}" );@]@;
+ @[TeX_( "/fi" );@]@;
+ @[TeXb( "/yy0{/nx/rhs{/the/toksa/the/toksb" );@]@;
+ @[TeXao( "/nx/termname{/the/yy(2)}{/the/yy(3)}}{/nx/hspace}{/nx/rhsfullfalse}}" );@]@;
+
+@ @<Add an action to the right hand side@>=
+ @[TeX_( "/rhscont{/yy(1)}/to/toksa" );@]@;
+ @[TeX_( "/rhsbool{/yy(1)}/to/toksb /the/toksb" );@]@;
+ @[TeX_( "/ifrhsfull" );@]@; /* the first half ends with an action */
+ @[TeX_( " /appendr/toksa{/nx/arhssep{0}{0}/nx/emptyterm}" );@]@; /* no pointers to streams */
+ @[TeX_( "/fi" );@]@;
+ @[TeX_( "/edef/next{/the/toksa}" );@]@;
+ @[TeX_( "/ifx/next/empty" );@]@;
+ @[TeX_( " /toksa{/emptyterm}" );@]@;
+ @[TeX_( "/fi" );@]@;
+ @[TeX_( "/getfirst{/yy(2)}/to/toksb" );@]@; /* the contents of the braced code */
+ @[TeX_( "/getsecond{/yy(2)}/to/toksc" );@]@; /* the format stream pointer */
+ @[TeX_( "/getthird{/yy(2)}/to/toksd" );@]@; /* the stash stream pointer */
+ @[TeXb( "/yy0{/nx/rhs{/the/toksa/nx/rarhssep{/the/toksc}{/the/toksd}" );@]@;
+ @[TeXf( " /nx/actbraces{/the/toksb}{/the/yy(3)}{/the/toksc}{/the/toksd}/nx/bdend}" );@]@;
+ @[TeXfo( " {/nx/arhssep}{/nx/rhsfulltrue}}" );@]@;
+
+@ @<Add a predicate to the right hand side@>=
+ @[TeX_( "/rhscont{/yy(1)}/to/toksa" );@]@;
+ @[TeX_( "/rhsbool{/yy(1)}/to/toksb /the/toksb" );@]@;
+ @[TeX_( "/ifrhsfull" );@]@; /* the first half ends with an action */
+ @[TeX_( " /appendr/toksa{/nx/arhssep{0}{0}/nx/emptyterm}" );@]@; /* no pointers to streams */
+ @[TeX_( "/fi" );@]@;
+ @[TeX_( "/edef/next{/the/toksa}" );@]@;
+ @[TeX_( "/ifx/next/empty" );@]@;
+ @[TeX_( " /toksa{/emptyterm}" );@]@;
+ @[TeX_( "/fi" );@]@;
+ @[TeX_( "/getfirst{/yy(2)}/to/toksb" );@]@; /* the contents of the braced code */
+ @[TeX_( "/getsecond{/yy(2)}/to/toksc" );@]@; /* the format stream pointer */
+ @[TeX_( "/getthird{/yy(2)}/to/toksd" );@]@; /* the stash stream pointer */
+ @[TeXb( "/yy0{/nx/rhs{/the/toksa/nx/rarhssep{/the/toksc}{/the/toksd}" );@]@;
+ @[TeXf( " /nx/bpredicate{/the/toksb}{}{/the/toksc}{/the/toksd}/nx/bdend}" );@]@;
+ @[TeXao( "{/nx/arhssep}{/nx/rhsfulltrue}}" );@]@;
+
+@ @<Add \prodstyle{\%empty} to the right hand side@>=
+ @[TeX_( "/rhscont{/yy(1)}/to/toksa" );@]@;
+ @[TeX_( "/rhscnct{/yy(1)}/to/toksb" );@]@;
+ @[TeX_( "/edef/next{/the/toksb}" );@]@;
+ @[TeX_( "/ifx/next/empty" );@]@;
+ @[TeX_( "/else" );@]@;
+ @[TeX_( " /getfourth{/yy(2)}/to/toksc" );@]@;
+ @[TeX_( " /getfifth{/yy(2)}/to/toksd" );@]@;
+ @[TeX_( " /appendr/toksb{{/the/toksc}{/the/toksd}}" );@]@;
+ @[TeX_( "/fi" );@]@;
+ @[TeXb( "/yy0{/nx/rhs{/the/toksa/the/toksb" );@]@;
+ @[TeXao( "/nx/emptyterm}{/nx/hspace}{/nx/rhsfullfalse}}" );@]@;
+
+@ @<Add a precedence directive to the right hand side@>=
+ @[TeX_( "/rhscont{/yy(1)}/to/toksa" );@]@;
+ @[TeX_( "/rhscnct{/yy(1)}/to/toksb" );@]@;
+ @[TeX_( "/rhsbool{/yy(1)}/to/toksc /the/toksc" );@]@;
+ @[TeX_( "/ifrhsfull" );@]@;
+ @[TeX_( " /yy0{/nx/sprecop{/the/yy(3)}/the/yy(2)}" );@]@; /* reuse \.{\\yyval} */
+ @[TeX_( " /supplybdirective/toksa/yyval" );@]@; /* the directive is `absorbed' by the action */
+ @[TeX_( " /yy0{/nx/rhs{/the/toksa}{/the/toksb}{/nx/rhsfulltrue}}" );@]@;
+ @[TeX_( "/else" );@]@;
+ @[TeXb( " /yy0{/nx/rhs{/the/toksa" );@]@;
+ @[TeXao( "/nx/sprecop{/the/yy(3)}/the/yy(2)}{/the/toksb}{/nx/rhsfullfalse}}" );@]@;
+ @[TeX_( "/fi" );@]@;
+
+@ @<Add a \prodstyle{\%dprec} directive to the right hand side@>=
+ @[TeX_( "/rhscont{/yy(1)}/to/toksa" );@]@;
+ @[TeX_( "/rhscnct{/yy(1)}/to/toksb" );@]@;
+ @[TeX_( "/rhsbool{/yy(1)}/to/toksc /the/toksc" );@]@;
+ @[TeX_( "/ifrhsfull" );@]@;
+ @[TeX_( " /yy0{/nx/dprecop{/the/yy(3)}/the/yy(2)}" );@]@; /* reuse \.{\\yyval} */
+ @[TeX_( " /supplybdirective/toksa/yyval" );@]@; /* the directive is `absorbed' by the action */
+ @[TeX_( " /yy0{/nx/rhs{/the/toksa}{/the/toksb}{/nx/rhsfulltrue}}" );@]@;
+ @[TeX_( "/else" );@]@;
+ @[TeXb( " /yy0{/nx/rhs{/the/toksa" );@]@;
+ @[TeXao( "/nx/dprecop{/the/yy(3)}/the/yy(2)}{/the/toksb}{/nx/rhsfullfalse}}" );@]@;
+ @[TeX_( "/fi" );@]@;
+
+@ @<Add a \prodstyle{\%merge} directive to the right hand side@>=
+ @[TeX_( "/rhscont{/yy(1)}/to/toksa" );@]@;
+ @[TeX_( "/rhscnct{/yy(1)}/to/toksb" );@]@;
+ @[TeX_( "/rhsbool{/yy(1)}/to/toksc /the/toksc" );@]@;
+ @[TeX_( "/ifrhsfull" );@]@;
+ @[TeX_( " /yy0{/nx/mergeop{/the/yy(3)}/the/yy(2)}" );@]@; /* reuse \.{\\yyval} */
+ @[TeX_( " /supplybdirective/toksa/yyval" );@]@; /* the directive is `absorbed' by the action */
+ @[TeX_( " /yy0{/nx/rhs{/the/toksa}{/the/toksb}{/nx/rhsfulltrue}}" );@]@;
+ @[TeX_( "/else" );@]@;
+ @[TeXb( " /yy0{/nx/rhs{/the/toksa" );@]@;
+ @[TeXao( "/nx/mergeop{/the/yy(3)}/the/yy(2)}{/the/toksb}{/nx/rhsfullfalse}}" );@]@;
+ @[TeX_( "/fi" );@]@;
+
+@ @<Create an empty named reference@>=
+ @[TeX_( "/yy0{}" );@]@;
+
+@ @<Create a named reference@>=
+ @<Carry on@>@;
+
+@ Identifiers.
+{\it Identifiers are returned as |uniqstr| values by the scanner.
+Depending on their use, we may need to make them genuine symbols}. We,
+on the other hand simply copy the values returned by the scanner.
+@<Parser bootstrap productions@>=
+@G
+id:
+ ID {@> @<Turn an identifier into a term@> @=}
+| CHAR {@> @<Turn a character into a term@> @=}
+;
+@g
+
+@ @<Parser common productions@>=
+ @<Definition of \prodstyle{symbol}@>@;
+
+@ @<Definition of \prodstyle{symbol}@>=
+@G
+symbol:
+ id {@> @<Turn an identifier into a symbol@> @=}
+| string_as_id {@> @<Turn a string into a symbol@> @=}
+;
+@g
+
+@ @<Parser grammar productions@>=
+@G
+@t}\vb{\inline}{@>
+id_colon:
+ ID_COLON {@> @<Prepare the left hand side@> @=}
+;
+@g
+
+@ A string used as an \prodstyle{ID}.
+@<Parser bootstrap productions@>=
+@G
+@t}\vb{\inline}{@>
+string_as_id:
+ STRING {@> @<Prepare a string for use@> @=}
+;
+@g
+
+@ The remainder of the action code is trivial but we reserved the
+placeholders for the appropriate actions in case the parser gains some
+sophistication in processing low level types (or starts expecting
+different types from the scanner).
+@<Turn an identifier into a term@>=
+ @<Carry on@>@;
+
+@ @<Turn a character into a term@>=
+ @<Carry on@>@;
+
+@ @<Turn an identifier into a symbol@>=
+ @<Carry on@>@;
+
+@ @<Turn a string into a symbol@>=
+ @<Carry on@>@;
+
+@ @<Prepare the left hand side@>=
+ @<Carry on@>@;
+
+@ @<Prepare a string for use@>=
+ @<Carry on@>@;
+
+@ {\it Variable and value.
+The \prodstyle{STRING} form of variable is deprecated and is not \.{M4}-friendly.
+For example, \.{M4} fails for \.{\%define "[" "value"}.}
+@<Parser prologue productions@>=
+@G
+@t}\vb{\flatten\inline}{@>
+variable:
+ ID {@> @<Carry on@> @=}
+| STRING {@> @<Carry on@> @=}
+;
+
+value:
+ {@> TeX_( "/yy0{}" ); @=}
+| ID {@> @<Carry on@> @=}
+| STRING {@> @<Carry on@> @=}
+| "{...}" {@> TeX_( "/yy0{/nx/bracedvalue/the/yy(1)}" ); @=}
+;
+@g
+
+@ @<Parser common productions@>=
+@G
+@t}\vb{\flatten\inline}{@>
+epilogue.opt:
+ {@> TeX_( "/yy0{}" ); @=}
+| "%%" EPILOGUE {}
+;
+@g
+
+@ \Cee\ preamble for the grammar parser. In this case, there are no `real' actions that our
+grammar performs, only \TeX\ output, so this section is empty.
+
+@<Grammar parser \Cee\ preamble@>=
+
+@ \Cee\ postamble for the grammar parser. It is tricky to insert function definitions that use \bison's internal types,
+as they have to be inserted in a place that is aware of the internal definitions but before said
+definitions are used.
+
+@<Grammar parser \Cee\ postamble@>=
+#define YYPRINT(file, type, value) yyprint (file, type, value)
+ static void yyprint (FILE *file, int type, YYSTYPE value){}
+
+@ @<Bootstrap parser \Cee\ postamble@>=
+ @<Grammar parser \Cee\ postamble@>@;
+ @<Bootstrap token output@>@;
+
+@ @<Bootstrap token output@>=
+ void bootstrap_tokens( char *bootstrap_token_format ) {
+
+#define _register_token_d(name) fprintf( tables_out, bootstrap_token_format, #name, name, #name );
+ @<Bootstrap token list@>@;
+#undef _register_token_d
+
+ }
+
+@ \namedspot{bootstraptokens}Here is the minimal list of tokens needed
+to make the lexer operational just enough to extract the rest of the
+token information from the grammar.
+@<Bootstrap token list@>=
+ _register_token_d(ID)@;
+ _register_token_d(PERCENT_TOKEN)@;
+ _register_token_d(STRING)@;
+
+@q The tokens below are not required to make a minimal bootstrapping parser work @>
+@q but they do appear in the rules the parser will encounter while extracting @>
+@q token information. @>
+@q _register_token_d(INT) /* only encountered in GRAM_EOF definition which is never used */ @>
+@q _register_token_d(CHAR) /* \bison\ never declares character tokens */ @>
+@q _register_token_d(SEMICOLON) /* can be omitted in prologue */ @>
+@q _register_token_d(TAG) /* only encountered in the definition of PERCENT_PARAM */ @>
+
+@ Union of types.
+@<Union of grammar parser types@>=
diff --git a/support/splint/cweb/bs.w b/support/splint/cweb/bs.w
new file mode 100644
index 0000000000..a2003d81fa
--- /dev/null
+++ b/support/splint/cweb/bs.w
@@ -0,0 +1,706 @@
+@q Copyright 2012-2014, Alexander Shibakov@>
+@q This file is part of SPLinT@>
+
+@q SPLinT is free software: you can redistribute it and/or modify@>
+@q it under the terms of the GNU General Public License as published by@>
+@q the Free Software Foundation, either version 3 of the License, or@>
+@q (at your option) any later version.@>
+
+@q SPLinT is distributed in the hope that it will be useful,@>
+@q but WITHOUT ANY WARRANTY; without even the implied warranty of@>
+@q MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the@>
+@q GNU General Public License for more details.@>
+
+@q You should have received a copy of the GNU General Public License@>
+@q along with SPLinT. If not, see <http://www.gnu.org/licenses/>.@>
+
+@*1\bison\ specific routines.
+The placeholder code left blank in the common routines is filed in
+with the code relevant to the output of parser tables in the following sections.
+
+@*2Tables. \namedspot{bsfile}Here are all the parser table names. Some tables are not output but adding
+one to the list in the future will be easy: it does not even have to
+be done here.
+@<Parser table names@>=
+ _register_table_d(yytranslate)@;
+ _register_table_d(yyr1)@;
+ _register_table_d(yyr2)@;
+ _register_table_d(yydefact)@;
+ _register_table_d(yydefgoto)@;
+ _register_table_d(yypact)@;
+ _register_table_d(yypgoto)@;
+ _register_table_d(yytable)@;
+ _register_table_d(yycheck)@;
+ _register_table_d(yytoknum)@;
+ _register_table_d(yystos)@;
+ _register_table_d(yytname)@;
+ _register_table_d(yyprhs)@;
+ _register_table_d(yyrhs)@;
+
+@ One special table requires a little bit more preparation. This is a
+table that lists the depth of the stack before an implicit terminal. It
+is not one of the tables that is used by \bison\ itself but is needed
+if the symbolic name processing is to be implemented (\bison\ has
+access to this information `on the fly'). The `new' \bison\ (starting
+with version~\.{3.0}) does not generate |yyprhs| and |yyrhs| or any
+other arrays that contain similar information, so we fake them here if
+such a crippled version of \bison\ is used.
+
+@<Variables and types local to the parser@>=
+ unsigned int yyrthree[YYNRULES + 1] = { 0 };
+#ifdef BISON_IS_CRIPPLED
+ unsigned int yyrhs[YYNRULES + 1] = { -1 };
+ unsigned int yyprhs[YYNRULES + 1] = { 0 };
+#endif
+
+@ We populate this table below $\ldots$
+@<Parser defaults@>=
+#ifndef BISON_IS_CRIPPLED
+ assert( YYNRULES + 1 == sizeof(yyprhs)/sizeof(yyprhs[0]) );
+
+ { int i, j;
+
+ for ( i = 1; i <= YYNRULES; i++ ) {
+
+ for ( j = 0; yyrhs[ yyprhs[i] + j ] != -1; j++ ) {
+
+ assert( yyprhs[i] + j < sizeof(yyrhs) );
+ assert( j < yyr1[i] );
+
+ if ( @<This is an implicit term@> ) {
+
+ @<Find the rule that defines it and set |yyrthree|@>@;
+
+ }
+
+ }
+ }
+ }
+#endif
+
+@ @<This is an implicit term@>=
+ ( strlen( yytname[ yyrhs[yyprhs[i]+j] ] ) > 1 ) &&
+ ( yytname[ yyrhs[yyprhs[i]+j] ][0] == '$' ) &&
+ ( yytname[ yyrhs[yyprhs[i]+j] ][1] == '@@' )
+
+@ @<Find the rule that defines it...@>=
+ int rule_number;
+
+ for ( rule_number = 1; rule_number < YYNRULES; rule_number++ ) {
+
+ if ( yyr1[rule_number] == yyrhs[yyprhs[i]+j] ) {
+
+ yyrthree[rule_number] = j;
+ break;
+
+ }
+ }
+
+ assert( rule_number < YYNRULES );
+
+@ $\ldots$ and add its name to the list.
+@<Parser table names@>=
+ _register_table_d(yyrthree)@;
+
+@*2Actions. There are several ways of making |yyparse()| execute all portions of
+the action code. The one chosen here makes sure that none of the
+tables gets written past its last element. To see how it works, it
+might be helpful to `walk through' \bison's output to see how each
+change affects the generated parser.
+@<Output parser semantic actions@>=
+ if ( output_desc.output_actions ) {
+
+ int i, j;
+
+ fprintf( tables_out, "%s", action_desc.preamble );
+
+ if ( !bare_actions ) {
+
+ yypact[0] = YYPACT_NINF;
+ yypgoto[0] = -1;
+ yydefgoto[0] = YYFINAL;
+
+ }
+
+ for ( i = 1; i < sizeof(yyr1)/sizeof(yyr1[0]); i++ ) {
+
+ fprintf( tables_out, action_desc.act_setup, i, yyr2[i] - 1 );
+
+ if ( action_desc.print_rule ) {
+
+ action_desc.print_rule( i );
+
+ }
+
+ if ( yyr2[i] > 0 ) {
+
+ if ( action_desc.action1 ) {
+
+ fprintf( tables_out, "%s", action_desc.action1 );
+
+ }
+ }
+
+ for ( j = 2; j <= yyr2[i]; j++ ) {
+
+ if ( action_desc.actionn ) {
+
+ fprintf( tables_out, action_desc.actionn, j );
+ }
+
+ }
+
+ if ( !bare_actions ) {
+
+ yyr1[i] = YYNTOKENS;
+ yydefact[0] = i;
+ yyr2[i] = 0;
+ yyparse(YYPARSE_PARAMETERS);
+
+ }
+
+ fprintf( tables_out, action_desc.act_suffix, i, yyr2[i] - 1 );
+
+ }
+
+ fprintf( tables_out, "%s", action_desc.postamble );
+
+ if ( action_desc.cleanup ) {
+
+ action_desc.cleanup( &action_desc );
+
+ }
+
+ }
+
+@*2Constants.
+@<Parser constants@>=
+ _register_const_d(YYEMPTY)@;
+ _register_const_d(YYPACT_NINF)@;
+ _register_const_d(YYEOF)@;
+ _register_const_d(YYLAST)@;
+ _register_const_d(YYNTOKENS)@;
+ _register_const_d(YYNRULES)@;
+ _register_const_d(YYNSTATES)@;
+ _register_const_d(YYFINAL)@;
+
+@*2Tokens.
+Similar techniques are employed in token output. Tokens are parser
+specific (the scanner only needs their numeric values) so we need {\it
+some\/} flexibility to output them in a desired format. For special
+purposes (say changing the way tokens are typeset) we can control the
+format tokens are output in.
+@<Variables and types local to the parser@>=
+ char *token_format_char = NULL;
+ char *token_format_affix = NULL;
+ char *token_format_suffix = NULL;
+ char *bootstrap_token_format = NULL;
+
+@ @<Parser specific option list@>=
+ _register_option("token-format-char", required_argument, 0, TOKEN_FORMAT_CHAR, "")@;
+ _register_option("token-format-affix", required_argument, 0, TOKEN_FORMAT_AFFIX, "")@;
+ _register_option("token-format-suffix", required_argument, 0, TOKEN_FORMAT_SUFFIX, "")@;
+ _register_option("bootstrap-token-format", required_argument, 0, BOOTSTRAP_TOKEN_FORMAT, "")@;
+
+@ @<Higher index parser specific options@>=
+ TOKEN_FORMAT_CHAR,@[@]
+ TOKEN_FORMAT_AFFIX,@[@]
+ TOKEN_FORMAT_SUFFIX,@[@]
+ BOOTSTRAP_TOKEN_FORMAT,@[@]
+
+@ @<Handle parser output options@>=
+ case TOKEN_FORMAT_CHAR:@;
+ token_format_char = (char *)malloc( (strlen(optarg) + 1)*sizeof(char) );
+ strcpy(token_format_char, optarg);
+ break;
+
+ case TOKEN_FORMAT_AFFIX:@;
+ token_format_affix = (char *)malloc( (strlen(optarg) + 1)*sizeof(char) );
+ strcpy(token_format_affix, optarg);
+ break;
+
+ case TOKEN_FORMAT_SUFFIX:@;
+ token_format_suffix = (char *)malloc( (strlen(optarg) + 1)*sizeof(char) );
+ strcpy(token_format_suffix, optarg);
+ break;
+
+ case BOOTSTRAP_TOKEN_FORMAT:@;
+ bootstrap_token_format = (char *)malloc( (strlen(optarg) + 1)*sizeof(char) );
+ strcpy(bootstrap_token_format, optarg);
+ break;
+
+@ @<Parser specific output descriptor fields@>=
+ bool output_tokens:1;
+
+@ No tokens are output by default.
+@<Parser specific default outputs@>=
+ @[@].output_tokens = 0,@[@]
+
+@ The only part of the code below that needs any explanation is the
+`bootstrap' token output. In \bison\ every token has three attributes:
+its `macro name' (say, \.{STRING}) that is used by the parse code
+internally, its `print name' (\.{"string"} to continue the example)
+that \bison\ uses to print the token names in its diagnostic messages,
+and its numeric value (that can be assigned implicitly by \bison\
+itself or explicitly by the user). Only the `print names' are kept in
+the |yytname| array so to reuse the scanner used by \bison\ we either
+have to extract the token `macro names' from the \Cee\ code ourselves
+to pass them on to the lexer, or use a special `stripped down' version
+of a \bison\ grammar parser to extract the names from the parser's
+\bison\ grammar. To do this, some token names would still need to be
+known to the scanner. These tokens are selected by hand to make the
+`bootstrapping' parser operational. The token list for the \bison\
+grammar parser can be examined as part of the appropriate
+\locallink{bootstraptokens}driver file\endlink.
+@<Output parser tokens@>=
+ if ( output_desc.output_tokens ) {
+
+ int i;
+ int length;
+ char token;
+ char *token_name;
+ bool too_creative = false;
+
+ for ( i = 258; i < sizeof(yytranslate)/sizeof(yytranslate[0]) ; i++ ) {
+
+ token_name = yytname[yytranslate[i]];
+
+ if ( token_name ) {
+
+ fprintf( tables_out, token_format_affix, yytranslate[i], i );
+
+ length = 0;
+
+ while ( (token = *token_name) ) {
+
+ if ( token_format_char ) {
+
+ length += fprintf( tables_out, token_format_char, (unsigned int)token );
+
+ }
+
+ if ( token < 040 || token == 0177 ) {
+
+ too_creative = true;
+
+ }
+
+ token_name++;
+ }
+
+ fprintf( tables_out, token_format_suffix, too_creative ? ".unprintable." : yytname[yytranslate[i]] );
+
+ }
+ }
+ }
+
+#ifdef BISON_BOOTSTRAP_MODE
+ fprintf( tables_out, "\\bootstrapmodetrue\n" );
+ fprintf( tables_out, "%% token values needed to bootstrap the parser\n" );
+ bootstrap_tokens( bootstrap_token_format );
+#endif
+
+@ The size of the token name table is useful to determine, say, how
+many `named' tokens the parser uses.
+@<Output parser constants@>=
+ fprintf( tables_out, "\\constset{YYTRANSLATESIZE}{%d}%%\n", (int)(sizeof(yytranslate)/sizeof(yytranslate[0])) );
+
+@*2Output modes.
+The code below can be easily extended and modified to output parser
+tables, actions, and constants in a language of one's choice. We are
+only interested in \TeX, however, thus other modes are very
+rudimentary or non-existent at this point.
+
+@*3 Token only mode.
+Token only output mode does exactly what is expected: outputs token
+names and values in the format of your choosing.
+
+@<Parser specific output modes@>=
+ TOKEN_ONLY_OUT,@[@]
+
+@ @<Handle parser related output modes@>=
+ case TOKEN_ONLY_OUT:@;
+ @<Prepare token only output environment@>@;
+ break;
+
+@ @<Parser specific option list@>=
+ _register_option("token-only-mode", no_argument, 0, TOKEN_ONLY_MODE, "")@;
+
+@ @<Higher index parser specific options@>=
+ TOKEN_ONLY_MODE,@[@]
+
+@ @<Configure parser output modes@>=
+ case TOKEN_ONLY_MODE:@;
+ mode = TOKEN_ONLY_OUT;
+ break;
+
+@ @<Prepare token only output environment@>=
+ if ( !token_format_char ) {
+
+ token_format_char = "{%u}";
+
+ }
+
+ if ( !token_format_affix ) {
+
+ token_format_affix = "%% token: %d, token value: %d\n\\prettytoken@@{";
+
+ }
+
+ if ( !token_format_suffix ) {
+
+ token_format_suffix = "}%% %s\n";
+
+ }
+
+ output_desc.output_tokens = 1;
+
+@*3Generic output. Generic output is not programmed yet.
+
+@<Parser specific output modes@>=
+ GENERIC_OUT,@[@]
+
+@ @<Handle parser related output modes@>=
+ case GENERIC_OUT:@;
+ printf( "This mode is not supported yet\n" );
+ exit(0);
+ break;
+
+@*3\TeX\ output. The \TeX\ mode is the main reason for this software.
+
+@<Parser specific output modes@>=
+ TEX_OUT,@[@]
+
+@ @<Handle parser related output modes@>=
+ case TEX_OUT:@;
+ @<Set up \TeX\ table output for parser tables@>@;
+ @<Prepare \TeX\ format for semantic action output@>@;
+ @<Prepare \TeX\ format for parser constants@>@;
+ @<Prepare \TeX\ format for parser tokens@>@;
+ break;
+
+@ Some tables require name adjustments due to \TeX's
+reluctance to treat digits as part of a name.
+@<Set up \TeX\ table output for parser tables@>=
+#define _register_table_d(name) tex_table(name);
+ @<Table names@>@;
+#undef _register_table_d
+
+ yyr1_desc.name = "yyrone";
+ yyr2_desc.name = "yyrtwo";
+
+@ The memory allocated for the |yytname| table is released at the end.
+@<Helper functions declarations for for parser output@>=
+ void yytname_cleanup( struct table_d *table );
+ int yytname_formatter_tex( FILE *stream, int index );
+ int yytname_formatter( FILE *stream, int index );
+
+@ There are a number of helper functions to output complicated names
+in \TeX. The safest way seems to be to output those as sequences of
+{\sc ASCII} codes to accommodate names like \.{\$}\.{end}
+safely. \TeX's \.{\^\^}$\ldots$ convention is supported as well.
+@<Helper functions for parser output@>=
+ void yytname_cleanup( struct table_d *table ) {
+
+ free( table->separator );
+ free( table->null );
+
+ }
+
+ int yytname_formatter_tex( FILE *stream, int index ) {
+
+ char *token_name = yytname[index];
+ unsigned char token;
+ int length = 0;
+
+ fprintf( stream, "\\addname " );
+
+ while ( (token = *token_name) ) {
+
+ if ( token < 040 || token == 0177 ) { /* unprintable characters */
+
+ fprintf( stream, "^^%c", token < 0100 ? (unsigned char)(token + 0100) : (unsigned char)(token - 100) );
+ length += 3;
+
+ } else {
+
+ fprintf( stream, "%c", token );
+ length++;
+
+ }
+
+ token_name++;
+
+ }
+ fprintf( stream, "\n" );
+
+ return length;
+
+ }
+
+ int yytname_formatter( FILE *stream, int index ) {
+
+ char *token_name;
+ unsigned char token;
+ int length = 0;
+ bool too_creative = false; /* to indicate if the name is too dangerous to print */
+
+ fprintf( stream, "\\addname" );
+
+ if ( index >= 0 ) { /* this is not the last name */
+
+ token_name = yytname[index];
+
+ if ( token_name == NULL ) {
+
+ token_name = "$impossible";
+
+ }
+
+ while ( (token = *token_name) ) {
+
+ length += fprintf( stream, "{%u}", (unsigned int)token );
+
+ if ( token < 040 || token == 0177 ) {
+
+ too_creative = true;
+
+ }
+
+ token_name++;
+
+ }
+
+ fprintf( stream, "%% %s\n", too_creative ? ".unprintable." : yytname[index] );
+
+ } else { /* this is the last name */
+
+ token_name = yytname[-index];
+
+ if ( token_name == NULL ) {
+
+ token_name = "$impossible";
+
+ }
+
+ while ( (token = *token_name) ) {
+
+ length += fprintf( stream, "{%u}", (unsigned int)token );
+ token_name++;
+
+ if ( token < 040 || token == 0177 ) {
+
+ too_creative = true;
+
+ }
+
+ }
+
+ fprintf( stream, "%% %s\n\\end\n%%\n", too_creative ? ".unprintable." :
+ ( yytname[-index] ? yytname[-index] : "end of array" ) );
+
+ }
+
+ return length;
+
+ }
+
+@ @<Set up \TeX\ table output for parser tables@>=
+ yytname_desc.preamble = "%%\n\\newtable{yytname}{}\\tempca0\\relax%% a robust way to add the yytname array\n";
+ yytname_desc.separator = NULL;
+ yytname_desc.postamble = NULL;
+ yytname_desc.null = NULL;
+ yytname_desc.null_postamble = NULL;
+ yytname_desc.optimized_numeric = NULL;
+ yytname_desc.prettify = false;
+ yytname_desc.formatter = yytname_formatter;
+
+ yytname_desc.cleanup = NULL;
+
+ output_desc.output_yytname = 1;
+
+@ @<Prepare \TeX\ format for semantic action output@>=
+
+ if ( optimize_actions ) {
+
+ action_desc.preamble = "%\n% the big switch\n%\n"@/
+ "\\catcode`\\/=0\\relax % see the documentation for an explanation of this trick\n"@/
+ "\\def\\yybigswitch#1{%%\n"@/
+ " \\csname dobisonaction\\number #1\\parsernamespace\\endcsname\n"@;
+ "}\\stashswitch{yybigswitch}%%\n";
+ action_desc.act_setup = "\n\\expandafter\\def\\csname dobisonaction%d\\parsernamespace\\endcsname{%%\n%%";
+ action_desc.act_suffix = "}%% end of rule %d\n";
+ action_desc.action1 = NULL;
+ action_desc.actionn = NULL;
+ action_desc.postamble = "\n\\catcode`\\/=12\\relax\n\n";
+ action_desc.print_rule = print_rule;
+ action_desc.cleanup = NULL;
+ output_desc.output_actions = 1;
+
+ } else {
+
+ action_desc.preamble = "%\n% the big switch\n%\n"@/
+ "\\catcode`\\/=0\\relax % see the documentation for an explanation of this trick\n"@/
+ "\\def\\yybigswitch#1{%%\n"@;
+ " \\ifcase#1\\relax\n";
+ action_desc.act_setup = " \\or %% (rule %d) ";
+ action_desc.act_suffix = "";
+ action_desc.action1 = NULL;
+ action_desc.actionn = NULL;
+ action_desc.postamble = " \\else\n \\fi\n}\\stashswitch{yybigswitch}%%\n\\catcode`\\/=12\\relax\n\n";
+ action_desc.print_rule = print_rule;
+ action_desc.cleanup = NULL;
+ output_desc.output_actions = 1;
+
+}
+
+@ Grammar rules are listed in a readable form alongside the action
+code to make it possible to quickly find an appropriate action. The
+rules are not output if a crippled \bison\ is used.
+@<Helper functions for parser output@>=
+ void print_rule( int n ) {
+
+ int i;
+
+ fprintf( tables_out, "%s%s: ", (n < 10 && !optimize_actions ? " " : ""), yytname[yyr1[n]] );
+
+#ifndef BISON_IS_CRIPPLED
+ i = yyprhs[n];
+
+ if ( yyrhs[i] < 0 ) {
+
+ fprintf( tables_out, "<empty>" );
+
+ } else {
+
+ while( yyrhs[i] > 0 ) {
+
+ fprintf( tables_out, "%s ", yytname[yyrhs[i]] );
+ i++;
+
+ }
+
+ }
+#endif
+
+ fprintf( tables_out, "\n" );
+
+ }
+
+@ \TeX\ constant output is another place where the techniques described above are applied.
+As before, the macro handles the repetitive work of initialization, declaration, etc in
+each place where the corresponding constant is mentioned. The one exception is \.{YYPACT\_NINF},
+which has to be handled separately because the underscore in its name makes it difficult to
+use it as a command sequence name.
+\def\YYPACTxNINFxdesc{\.{YYPACT\_NINF\_}\\{desc}}
+
+@s YYPACT_NINF_desc TeX
+
+@<Prepare \TeX\ format for parser constants@>=
+#define _register_const_d(c_name) @[c_name##_desc.format = "\\constset{%s}{%d}%%\n"; \
+ c_name##_desc.name = #c_name; \
+ output_desc.output_##c_name = 1;@]
+ @<Parser constants@>@;
+#undef _register_const_d
+YYPACT_NINF_desc.name = "YYPACTNINF";
+
+@ Token definitions round off the \TeX\ output mode.
+
+@<Prepare \TeX\ format for parser tokens@>=
+
+ token_format_char = NULL; /* do not output individual characters */
+
+ if ( !token_format_affix ) {
+
+ token_format_affix = "\\tokenset{%d}{%d}";
+
+ }
+
+ if ( !token_format_suffix ) {
+
+ token_format_suffix = "%% %s\n";
+
+ }
+
+
+ if ( !bootstrap_token_format ) {
+
+ bootstrap_token_format = "\\expandafter\\def\\csname token\\parsernamespace %s\\endcsname{%d}%% %s\n";
+
+ }
+
+ /* |output_desc.output_tokens = 1;| is no longer necessary as it is done entirely in \TeX */
+
+@*2 Command line options.
+We start with the most obvious option, the one begging for help.
+
+@<Higher index parser specific options@>=
+ LONG_HELP,@[@]
+
+@ @<Parser specific option list@>=
+ _register_option("help", no_argument, 0, LONG_HELP, "")@;
+
+@ @<Shortcuts for command line options affecting parser output@>=
+ "h"
+
+@ @<Handle parser output options@>=
+ case 'h': /* short help */@;
+ fprintf(stderr, "Usage: %s [options] output_file\n", argv[0]);
+ exit(0);
+ break; /* should not be needed */
+
+ case LONG_HELP:@;
+ fprintf(stderr, "%s [--mode=TeX:options] output_file outputs tables\n"
+ " and constants for a TeX parser\n", argv[0]);
+ exit(0);
+ break; /* should not be needed */
+
+@ @<Parser specific option list@>=
+ _register_option("debug", optional_argument, 0, 'b', "")@;
+ _register_option("mode", required_argument, 0, 'm', "")@;
+ _register_option("table-separator", required_argument, 0, 'z', "")@;
+
+ _register_option("format", required_argument, 0, 'f', "")@; /* name? */
+ _register_option("table", required_argument, 0, 't', "")@; /* specific table */
+ _register_option("constant", required_argument, 0, 'c', "")@; /* specific constant */
+ _register_option("name-length", required_argument, 0, 'l', "")@; /* change |MAX_NAME_LENGTH| */
+ _register_option("token", required_argument, 0, 'n', "")@; /* specific token */
+ _register_option("run-parse", required_argument, 0, 'p', "")@; /* run the parser */
+ _register_option("parse-file", required_argument, 0, 'i', "")@; /* input for the parser */
+
+@ The string below is a list of short options.
+
+@<Shortcuts for command line options affecting parser output@>=
+ "z:m:f:t:"
+
+@ A few options can be discussed immediately.
+
+@<Variables and types local to the parser@>=
+ char *table_separator = "%s ";
+
+@ @<Handle parser output options@>=
+ case 'm': /* output mode */@;
+ switch( optarg[0] ) {
+
+ case 'T':
+ case 't':@;
+ mode = TEX_OUT;
+ break;
+
+ case 'b':
+ case 'B':
+ case 'g':
+ case 'G':@;
+ mode = GENERIC_OUT;
+ break;
+
+ default:@;
+ break;
+
+ }
+ break;
+
+ case 'z':
+ table_separator = (char *)malloc( (strlen(optarg) + 1)*sizeof(char) );
+ strcpy(table_separator, optarg);
+ break;
diff --git a/support/splint/cweb/common.w b/support/splint/cweb/common.w
new file mode 100644
index 0000000000..3ab5af93b4
--- /dev/null
+++ b/support/splint/cweb/common.w
@@ -0,0 +1,788 @@
+@q Copyright 2012-2014, Alexander Shibakov@>
+@q This file is part of SPLinT@>
+
+@q SPLinT is free software: you can redistribute it and/or modify@>
+@q it under the terms of the GNU General Public License as published by@>
+@q the Free Software Foundation, either version 3 of the License, or@>
+@q (at your option) any later version.@>
+
+@q SPLinT is distributed in the hope that it will be useful,@>
+@q but WITHOUT ANY WARRANTY; without even the implied warranty of@>
+@q MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the@>
+@q GNU General Public License for more details.@>
+
+@q You should have received a copy of the GNU General Public License@>
+@q along with SPLinT. If not, see <http://www.gnu.org/licenses/>.@>
+
+@** Forcing \bison\ and \flex\ to output \TeX.
+Instead of implementing a \bison\ (or \flex) `plugin' for outputting
+\TeX\ parser, the code that follows produces a separate executable
+that outputs all the required tables after the inclusion of an
+ordinary \Cee\ parser produced by \bison\ (or a scanner produced by
+\flex). The actions in both \bison\ parser and \flex\ scanner are
+assumed to be merely |printf()| statements that output the `real'
+\TeX\ actions. The code below simply cycles through all such actions to
+output an `action switch' appropriate for use with \TeX. In every
+other respect, the included parser or scanner can use any features
+allowed in `real' parsers and scanners.
+
+@*1 Common routines.
+The `top' level of the scanner and parser `drivers' is very similar,
+and is therefore separated into a few sections that are common to both
+drivers. The layout is fairly typical and follows a standard
+`initialize-input-process-output-clean up' scheme. The logic behind each
+section of the program will be explained in detail below.
+
+The section below is called |@<\Cee\ postamble@>| because the output of
+the tables can happen only after the \bison\ (or \flex) generated
+\.{.c} file is included and all the data structures are known.
+
+The actual `assembly' of each driver has to be done separately due to
+some `singularities' of the \CWEB\ system and the design of this
+software. All the essential routines are presented in the sections
+below, though.
+@<\Cee\ postamble@>=
+
+@<Outer definitions@>;
+@<Global variables and types@>@;
+@<Auxiliary function declarations@>@;
+@<Auxiliary function definitions@>@;
+
+int main( int argc, char **argv ) {
+
+ @<Local variable and type declarations@>@;
+ @<Establish defaults@>@;
+ @<Command line processing variables@>@;
+ @<Process command line options@>@;
+
+ switch( mode ) {
+
+ @<Various output modes@>@;
+
+ default:
+ break;
+
+ }
+
+ if ( tables_out ) {
+
+ @<Perform output@>@;
+ @<Output action switch, if any@>@;
+
+ } else {
+
+ fprintf( stderr, "No output, exiting\n" );
+ exit(0);
+
+ }
+
+ @<Clean up@>@;
+
+ return 0;
+
+}
+
+@ Not all the code can be supplied at this stage (most of the routines
+here are at the `top' level so the specifics have to be `filled-in' by
+each driver), so many of the sections
+above are placeholders for the code provided by a specific
+driver. However, we still need to supply a trivial definition here to
+placate \CWEAVE\ whenever this portion of the code is used isolated in
+documentation.
+@<Various output modes@>=
+
+@ Standard library declarations for memory management routines, some
+syntactic sugar, command line processing, and variadic functions are
+all that is needed.
+@<Outer definitions@>=
+#include <stdlib.h>
+#include <stdbool.h>
+#include <stdarg.h>
+#include <assert.h>
+#include <string.h>
+
+@ This code snippet is a payment for some poor (in my view) philosophy
+on the part of the \bison\ and \flex\ developers. There used to be an
+option in \bison\ to output just the tables and the action code but it
+had never worked correctly and it was simply dropped in the latest
+version. Instead, one can only get access to \bison's goodies as part
+of a tangled mess of |@[#define@]|'s and error processing code. Had the
+tables and the parser function itself been considered separate, well
+isolated sections of \bison's output, there would simply be no reason
+for dirty tricks like the one below, one would be able to write custom
+error processing functions, unicorns would roam the Earth and pixies
+would hand open sourced tablets to everyone. At a minimum, it would
+have been a much cleaner, modular approach.
+
+As of version~\.{3.0} of \bison\ some critical arrays, namely,
+|yyprhs| and |yyrhs| are no longer generated (even internally) which
+significantly reduces \bison's useability as a parser generator. As an
+example, the |yyrthree| array, which is necessary for processing
+`inline' actions is computed in \.{bs.w} using the two arrays
+mentioned in the previous sentence. There does not seem to be any
+other way to access this information. A number of tools (GNU and
+otherwise) have taken the path of narrowing the field of application
+to a few use cases invisioned by the maintainers. This includes
+compilers, as well.
+
+There is a strange
+reluctance on the part of the \gcc\ team to output any intermediate
+code other than the results of preprocessing and assembly. I have seen
+an argument that involves some sort of appeal to making the code
+difficult to close source but the logic of it escaped me completely
+(well, there {\it is\/} logic to it, however choosing poor design in
+order to punish a few bad players seems like a rather inferior option).
+
+Ideally, there should be no such thing as a parser generator, or a
+compiler, for that matter: all of these are just basic table driven
+rewriting routines. Tables are hard but table driven code should not
+be. If one had access to the tables themselves, and some canonical
+examples of code driven by such tables, like |yyparse()| and
+|yylex()|, the flexibility of these tools would improve
+tremendously. Barring that, this is what we have to do {\it now}.
+
+There are several ways to gain write access to the data declared |const|
+in \Cee, like passing its address to a function with no prototype. All
+these methods have one drawback: the loopholes that make them possible
+have been steadily getting on the chopping block of the \Cee\
+standards committee. Indeed, |const| data should be constant. Even if
+one succeeds in getting access, there is no reason to believe that the
+data is not allocated in a write-only region of the memory. The
+cleanest way to get write access then is to eliminate |const|
+altogether. The code should have the same semantics after that, and
+the trick is only marginally bad.
+
+The last two definitions are less innocent (and, at least the
+second one, are prohibited by the ISO standard (clause 6.10.8(2),
+see~\cite[ISO/C11])) but
+\gcc\ does not seem to mind, and it gets rid of warnings about
+dropping a |const| qualifier whenever an |assert| is
+encountered. Since the macro is not recursively expanded, this will
+only work if $\ldots$|FUNCTION__| is treated as a pseudo-variable, as
+it is in \gcc, not a macro.
+
+@d const
+@d __PRETTY_FUNCTION__ @[(char *)__PRETTY_FUNCTION__@]
+@d __FUNCTION__ @[(char *)__FUNCTION__@]
+
+@ The output file has to be known to both parts of the code, so it is
+declared at the very beginning of the program. We also add some
+syntactic sugar for loops.
+@q The line below is simply irresistible, one should put it on a T-shirt@>
+@s FOREVER TeX
+
+@d FOREVER for(;;)
+
+@<Common code for \Cee\ preamble@>=
+#include <stdio.h>
+ FILE *tables_out;
+
+@ The clean-up portion of the code can be left empty, as all it does
+is close the output file, which can be left to the operating system but
+we take care of it ourselves to keep out code `clean'\footnote{In case
+the reader has not noticed yet, this is a weak attempt at humor to
+break the monotony of going through the lines of \CTANGLE'd code}.
+@<Clean up@>=
+ fclose( tables_out );
+
+@ There is a descriptor controlling the output of the program as
+a whole. The code below is an example of a literate programming
+technique that will be used repeatedly to maintain large structures
+that can grow during the course of the program design. Note that the
+name of each table is only mentioned once, the rest of the code is
+generic.
+
+Technically speaking, all of this can be done with \Cee\ preprocessor
+macros of moderate complexity, taking advantage of its expansion rules
+but it is not nearly as transparent as the \CWEB\ approach.
+@<Local variable and type declarations@>=
+ struct output_d {
+
+ @<Output descriptor fields@>@;
+
+ };
+
+ struct output_d output_desc = { @<Default outputs@> };
+
+@ To declare each table field in the global output descriptor, all one has to do
+is to provide a general pattern.
+@<Output descriptor fields@>=
+#define _register_table_d(name) @[bool output_##name:1;@]
+ @<Table names@>@;
+#undef _register_table_d
+
+@ Same for assigning default values to each field.
+@<Default outputs@>=
+#define _register_table_d(name) @[.output_##name = 0,@] /* do not output any tables by default */
+ @<Table names@>@;
+#undef _register_table_d
+
+@ Each descriptor is populated using the same approach.
+@<Local variable and type declarations@>=
+#define _register_table_d(name) @[struct table_d name##_desc = {0};@]
+ @<Table names@>@;
+#undef _register_table_d
+
+@ The flag \.{--optimize-tables} affects the way tables are output.
+@<Global variables...@>=
+ static int optimize_tables = 0;
+
+@ It is set using the command line option below.
+@<Raw option list@>=
+ _register_option("optimize-tables", no_argument, &optimize_tables, 1, "")@;
+
+@ The reason to implement the table output routine as a macro is to avoid
+writing separate functions for tables of different types of data
+(stings as well as integers). The output is controlled by each table's
+{\it descriptor\/} defined below. A more sophisticated approach is
+possible but this code is merely a `patch' so we are not after full
+generality\footnote{A somewhat cleaner way to achieve the same effect
+is to use the \.{\_Generic} facility of \Cee11.}.
+
+@d output_table(table_desc, table_name, stream)
+ if ( output_desc.output_##table_name ) {
+
+ int i, j = 0;
+
+ if ( optimize_tables ) {
+
+ fprintf( stream, "\\setoptopt{%s}%%\n", table_desc.name );
+
+ if ( !table_desc.optimized_numeric ) {
+
+ fprintf( stream, "\\beginoptimizednonnumeric{%s}%%\n", table_desc.name );
+
+ }
+
+ for( i = 0; i < sizeof(table_name)/sizeof(table_name[0]) - 1; i++) {
+
+ if ( table_desc.formatter ) {
+
+ table_desc.formatter( stream, i );
+
+ } else {
+
+ fprintf( stream, table_desc.optimized_numeric, table_desc.name, i, table_name[i] );
+
+ }
+
+ }
+
+ if ( table_desc.formatter ) {
+
+ table_desc.formatter( stream, -i );
+
+ } else {
+
+ fprintf( stream, table_desc.optimized_numeric, table_desc.name, i, table_name[i] );
+
+ }
+
+ if ( table_desc.cleanup ) {
+
+ table_desc.cleanup( &table_desc );
+
+ }
+
+ } else {
+
+ fprintf( stream, table_desc.preamble, table_desc.name );
+
+ for( i = 0; i < sizeof(table_name)/sizeof(table_name[0]) - 1; i++) {
+
+ if ( table_desc.formatter ) {
+
+ j += table_desc.formatter( stream, i );
+
+ } else {
+
+ if ( table_name[i] ) {
+
+ j += fprintf( stream, table_desc.separator, table_name[i] );
+
+ } else {
+
+ j += fprintf( stream, "%s", table_desc.null );
+
+ }
+
+ }
+
+ if ( j > MAX_PRETTY_LINE && table_desc.prettify ) {
+
+ fprintf( stream, "\n" );
+ j = 0;
+
+ }
+ }
+
+ if ( table_desc.formatter ) {
+
+ table_desc.formatter( stream, -i );
+
+ } else {
+
+ if ( table_name[i] ) {
+
+ fprintf( stream, table_desc.postamble, table_name[i] );
+
+ } else {
+
+ fprintf( stream, "%s", table_desc.null_postamble );
+
+ }
+
+ }
+
+ if ( table_desc.cleanup ) {
+
+ table_desc.cleanup( &table_desc );
+
+ }
+ }
+ }
+
+@<Global variables and types@>=
+ struct table_d {
+
+ @<Generic table desciptor fields@>@;
+
+ };
+
+@ @<Generic table desciptor fields@>=
+ char *name;
+ char *preamble;
+ char *separator;
+ char *postamble;
+ char *null_postamble;
+ char *null;
+ char *optimized_numeric;
+ bool prettify;
+ int (*formatter)( FILE *, int );
+ void (*cleanup)( struct table_d * );
+
+@ Tables are output first. The action output code must come last since
+it changes the values of the tables to achieve its goals. Again, a
+different approach is possible, that saves the data first but
+simplicity was deemed more important than total generality at this
+point.
+@<Perform output@>=
+ @<Output all tables@>@;
+
+@ One more application of `gather the names first then process' technique.
+@<Output all tables@>=
+#define _register_table_d(name) @[output_table(name##_desc, name, tables_out);@]
+ @<Table names@>@;
+#undef _register_table_d
+
+@ Tables will be output by each driver. Placeholder here, for
+\CWEAVE's piece of mind.
+@<Table names@>=
+
+@ Action output invokes a totally new level of dirty code. If tables,
+constants, and tokens are just data structures, actions are executable
+commands. We can only hope to cycle through all the actions, which is
+enough to successfully use \bison\ and \flex\ to generate \TeX. The
+|switch| statement containing the actions is embedded in the parser
+function so to get access to each action one has to coerce |yyparse()| to
+jump to each case. Here is where we need the table manipulation. The
+appropriate code is highly specific to the program used (since \bison\
+and \flex\ parsing and scanning functions had to be `reverse
+engineered' to make them do what we want),
+so at this point we simply declare the options controlling the level
+of detail and the type of actions output.
+@<Global variables...@>=
+ static int bare_actions = 0; /* (|static| for local variables) and |int| to pacify the compiler
+ (for a constant initializer and compatible type) */
+ static int optimize_actions = 0;
+
+@ The first of the following options allows one to output an action switch without the
+actions themselves. It is useful when one needs to output a \TeX\
+parser for a grammar file that is written in \Cee. In this case it
+will be impossible to cycle through actions (as no setup code has been
+executed), so the parser invocation is omitted.
+
+The second option splits the action switch into several macros to speed up
+the processing of the action code.
+
+The last argument of the `flexible' macro below is supposed to be an
+extended description of each option which can be later utilized by a
+|usage()| function.
+@<Raw option list@>=
+ _register_option("bare-actions", no_argument, &bare_actions, 1, "")@;
+ _register_option("optimize-actions", no_argument, &optimize_actions, 1, "")@;
+
+@ The rest of the action output code mimics that for table output, starting with
+the descriptor. To make the output format more flexible, this
+descriptor should probably be turned into a specialized routine.
+@<Global variables and types@>=
+ struct action_d {
+
+ char *preamble;
+ char *act_setup;
+ char *act_suffix;
+ char *action1;
+ char *actionn;
+ char *postamble;
+ void (*print_rule)( int );
+ void (*cleanup)( struct action_d * );
+
+ };
+
+@ @<Output descriptor fields@>=
+ bool output_actions:1;
+
+@ Nothing is output by default, including actions.
+@<Default outputs@>=
+ @[@].output_actions = 0,@[@]@;
+
+@ @<Local variable and type declarations@>=
+ struct action_d action_desc = {0};
+
+@ Each function below outputs the \TeX\ code of the appropriate
+action when the action is `run' by the action output switch.
+The main concern in designing these functions is to make the
+code easier to look at. Further explanation is given in the grammar
+file. If the parser is doing its job, this is the only place where one
+would actually see these as functions (or, rather, macros).
+
+In compliance with paragraph 6.10.8(2)\footnote{[$\ldots$] {\it Any
+other predefined macro names shall begin with a leading underscore
+followed by an uppercase letter or a second underscore.}} of the \ISO\
+\Cee11 standard the names of these macros do not start with an
+underscore, since the first letter of \.{TeX} is
+uppercase\footnote{One might wonder why one of these functions is
+defined as a \CWEB\ macro while the other is put into the preamble `by
+hand'. It really makes no difference, however, the reason the second
+macro is defined explicitly is \CWEB's lack of awareness of `variadic'
+macros which produces undesirable typesetting artefacts.}.
+\def\TeXx{\hbox{\.{TeX\_}}}
+\def\TeXa{\hbox{\.{TeXa}}}
+\def\TeXb{\hbox{\.{TeXb}}}
+\def\TeXf{\hbox{\.{TeXf}}}
+\def\TeXfo{\hbox{\.{TeXfo}}}
+\def\TeXao{\hbox{\.{TeXao}}}
+\def\TeXxx{\hbox{\.{TeX\_\_}}}
+@s TeX__ TeX
+@d TeX_( string ) fprintf( tables_out, " %s%%\n", string )
+@d TeXb( string ) TeX_( string )
+@d TeXa( string ) TeX_( string )
+@d TeXf( string ) TeX_( string )
+@d TeXfo( string ) TeX_( string )
+@d TeXao( string ) TeX_( string )
+
+@q \CWEB\ is not aware of variadic macros, so this has to be done the old way@>
+@<\Cee\ preamble@>=
+#define TeX__( string, ... ) @[fprintf( tables_out, " " string "%s\n", __VA_ARGS__, "%" )@]
+
+@ We begin with a few macros to facilitate the output
+of tables in the format that \TeX\ can understand. As there is no
+perfect way to represent an array in \TeX\ a rather weak compromise
+was settled upon. Further explanation of this choice is given in the \TeX\
+file that implements the \TeX\ parser for the \bison\ input grammar.
+
+@d tex_table_generic(table_name)
+ table_name##_desc.preamble = "\\newtable{%s}{%%\n";
+ table_name##_desc.separator = "%d\\or ";
+ table_name##_desc.postamble = "%d}%%\n";
+ table_name##_desc.null_postamble = "0}%%\n";
+ table_name##_desc.null = "0\\or ";
+ table_name##_desc.optimized_numeric = "\\expandafter\\def\\csname %s\\parsernamespace %d\\endcsname{%d}%%\n";
+ table_name##_desc.prettify = true;
+ table_name##_desc.formatter = NULL;
+ table_name##_desc.cleanup = NULL;
+ output_desc.output_##table_name = 1;
+
+@d tex_table(table_name)
+ tex_table_generic(table_name);
+ table_name##_desc.name = #table_name;
+
+@ An approach paralleling the table output
+scheme is taken with constants. Since constants are \Cee\ {\it
+macros\/} one has to be careful to avoid the temptation of using
+constant {\it names\/} directly as names for fields in
+structures. They will simply be replaced by the constants'
+values. When the names are concatenated with other tokens, however,
+the \Cee\ preprocessor postpones the macro expansion until the
+concatenation is complete (see clauses 6.10.3.1, 6.10.3.2, and
+6.10.3.3 of the \ISO\ \Cee\ Standard, \cite[ISO/C11]). Unless the result of the
+concatenation is still expandable, the expansion will halt.
+@<Global variables and types@>=
+ struct const_d {
+
+ char *format;
+ char *name;
+
+ };
+
+@ @<Local variable and type declarations@>=
+#define _register_const_d(c_name) @[struct const_d c_name##_desc;@]
+ @<Constant names@>@;
+#undef _register_const_d
+
+@ @<Output descriptor fields@>=
+#define _register_const_d(c_name) @[bool output_##c_name:1;@]
+ @<Constant names@>@;
+#undef _register_const_d
+
+@ @<Default outputs@>=
+#define _register_const_d(c_name) @[@[@].output_##c_name = 0,@[@]@]
+ @<Constant names@>@;
+#undef _register_const_d
+
+@ @<Perform output@>=
+ fprintf( tables_out, "%%\n%% constant definitions\n%%\n" );
+ @<Output constants@>@;
+
+@ @<Output constants@>=
+{
+
+ int any_constants = 0;
+#define _register_const_d(c_name) \
+ \
+ if ( output_desc.output_##c_name ) { \
+ const_out( tables_out, c_name##_desc, c_name)@; \
+ any_constants = 1; \
+ }
+
+ @<Constant names@>@;
+
+#undef _register_const_d
+
+ if ( any_constants ); /* this is merely a placeholder statement */
+
+}
+
+@ Constants are very driver specific, so to make \CWEAVE\ happy $\ldots$
+ @<Constant names@>=
+
+@ A macro to help with constant output.
+@d const_out(stream, c_desc, c_name) fprintf(stream, c_desc.format, c_desc.name, c_name);
+
+@ Action switch output routines modify the automata tables and
+therefore have to be output last. Since action output is highly
+automaton specific, we leave this section blank here, to pacify
+\CWEAVE\ in case this file is typeset by itself.
+@<Output action switch, if any@>=
+
+@*2 Error codes.
+
+@<Global variables and types@>=
+ enum err_codes{ @<Error codes@>@, LAST_ERROR };
+
+@ @<Error codes@>=
+ NO_MEMORY, BAD_STRING, BAD_MIX_FORMAT,@[@]
+
+@ A lot more care is necessary to output the token table. A number of precautions are taken
+to ensure that a maximum possible range of names can be passed safely to \TeX. This involves some
+manipulation of \.{\\catcode}'s and control characters. The
+complicated part is left to \TeX\ so the output code can be kept
+simple. The helper function below is used to `combine' two strings.
+
+@d MAX_PRETTY_LINE 100
+
+@<Auxiliary function declarations@>=
+ char *mix_string( char *format, ... );
+
+@ @<Auxiliary function definitions@>=
+ char *mix_string( char *format, ... ) {
+
+ char *buffer;
+ size_t size = 0;
+ int length = 0;
+ int written = 0;
+ char *formatp = format;
+ va_list ap, ap_save;
+
+ va_start( ap, format );
+ va_copy( ap_save, ap );
+
+ size = strnlen( format, MAX_PRETTY_LINE * 5 );
+
+ if ( size >= MAX_PRETTY_LINE * 5 ) {
+
+ fprintf( stderr, "%s: runaway string?\n", __func__ );
+ exit(BAD_STRING);
+
+ }
+
+ while ( (formatp = strstr( formatp, "%" )) ) {
+
+ switch( formatp[1] ) {
+
+ case 's':@;
+ length = strnlen( va_arg( ap, char * ), MAX_PRETTY_LINE * 5 );
+
+ if ( length >= MAX_PRETTY_LINE * 5 ) {
+
+ fprintf( stderr, "%s: runaway string?\n", __func__ );
+ exit(BAD_STRING);
+
+ }
+
+ size += length;
+ size -=2;
+ formatp++;
+ break;
+
+ case '%':@;
+ size--;
+ formatp +=2;
+
+ default:
+
+ printf( "%s: cannot handle %%%c in mix string format\n", __func__, formatp[1] );
+ exit( BAD_MIX_FORMAT );
+
+ }
+
+ }
+
+ buffer = (char *)malloc( sizeof(char) * size + 1 );
+
+ if ( buffer ) {
+
+ written = vsnprintf( buffer, size + 1, format, ap_save );
+
+ if ( written < 0 || written > size ) {
+
+ fprintf( stderr, "%s: runaway string?\n", __func__ );
+ exit(BAD_STRING);
+
+ }
+
+ } else {
+
+ fprintf( stderr, "%s: failed to allocate memory for the output string\n", __func__ );
+ exit(NO_MEMORY);
+
+ }
+
+ va_end( ap );
+ va_end( ap_save );
+
+ return buffer;
+
+ }
+
+@*2Initial setup. Depending on the output mode (right now only \TeX\
+and `tokens only' (in the \bison\ `driver') are supported) the format of each table, action
+field and token has to be set up.
+
+@<Local variable and type declarations@>=
+ enum output_mode {@<Output modes@>@, LAST_OUT};
+
+@ And to calm down \CWEAVE\ $\ldots$
+@<Output modes@>=
+
+@ \TeX\ is the main output mode.
+@<Establish defaults@>=
+ enum output_mode mode = TEX_OUT;
+
+@*2Command line processing. This program uses a standard way of parsing the command
+line, based on |getopt_long|. At the heart of the setup are the array below with a
+couple of supporting variables.
+
+@<Outer definitions@>=
+#include <unistd.h>
+#include <getopt.h>
+#include <string.h>
+
+@ @<Local variable and type declarations@>=
+ const char *usage = "%s [options] output_file\n";
+
+@ @<Command line processing variables@>=
+ int c, option_index = 0;@#
+
+ enum higher_options{NON_OPTION = 0xFF, @<Higher index options@>@, LAST_HIGHER_OPTION};
+
+ static struct option long_options[] = { @/@[
+ @<Long options array@>@;@/
+ {0, 0, 0, 0} @]
+ };@#
+
+@ The main loop of the command line option processing follows. This
+can be used as a template for setting up the option processing. The
+specific cases are added to in the course of adding new features.
+
+@<Process command line options@>=
+ opterr = 0; /* we do our own error reporting */
+
+ FOREVER {
+
+ c = getopt_long (argc, argv, ":" @<Short option list@>, long_options, &option_index);
+
+ if (c == -1) break;
+
+ switch (c) {
+
+ case 0:@; /* it is a flag, the name is kept in |long_options[option_index].name|,
+ and the value can be found in |long_options[option_index].val| */
+ break;
+
+ @t}\4{@>@<Cases affecting the whole program@>;
+ @t}\4{@>@<Cases involving specific modes@>;
+
+ case '?':@;
+ fprintf (stderr, "Unknown option: `%s', see `Usage' below\n\n", argv[optind - 1]);
+ fprintf(stderr, usage, argv[0]);
+ exit(1);
+ break;
+
+ case ':':@;
+ fprintf (stderr, "Missing argument for `%s'\n\n", argv[optind - 1]);
+ fprintf(stderr, usage, argv[0]);
+ exit(1);
+ break;
+
+ default:@;
+ printf ("warning: feature `%c' is not yet implemented\n", c);
+ }
+
+ }
+
+ if (optind >= argc)
+ {
+
+ fprintf( stderr, "No output file specified!\n" );
+
+ } else {
+
+ tables_out = fopen( argv[optind++], "w" );
+
+ }
+
+ if (optind < argc)
+ {
+
+ printf ("script files to be loaded: ");
+ while (optind < argc)
+ printf ("%s ", argv[optind++]);
+ putchar ('\n');
+
+ }
+
+@ @<Long options array@>=
+#define _register_option(name, arg_flag, loc, val, exp) @[{name, arg_flag, loc, val},@[@]@]
+ @<Raw option list@>@;
+#undef _register_option
+
+@ In addition to spelling out the full command line option name (such
+as \.{--help}) |getopt_long| gives the user a choice of using a
+shortcut (say, \.{-h}). As individual options are treated in drivers
+themselves, there are no shortcuts to supply at this point. We leave
+this section (and a number of others) empty to be filled in with the
+driver specific code to pacify \CWEAVE.
+
+@<Short option list@>=
+
+@ Some options have one-letter `shortcuts', whereas others only exist
+in `fully spelled-out' form. To easily keep track of the latter, a
+special enumerated list is declared. To add to this list, simply add
+to the \CWEB\ section below.
+@<Higher index options@>=
+
+@ @<Cases affecting the whole program@>=
+
+@ @<Cases involving specific modes@>=
+
diff --git a/support/splint/cweb/fk.w b/support/splint/cweb/fk.w
new file mode 100644
index 0000000000..04685ff9aa
--- /dev/null
+++ b/support/splint/cweb/fk.w
@@ -0,0 +1,510 @@
+@q Copyright 2012-2014, Alexander Shibakov@>
+@q This file is part of SPLinT@>
+
+@q SPLinT is free software: you can redistribute it and/or modify@>
+@q it under the terms of the GNU General Public License as published by@>
+@q the Free Software Foundation, either version 3 of the License, or@>
+@q (at your option) any later version.@>
+
+@q SPLinT is distributed in the hope that it will be useful,@>
+@q but WITHOUT ANY WARRANTY; without even the implied warranty of@>
+@q MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the@>
+@q GNU General Public License for more details.@>
+
+@q You should have received a copy of the GNU General Public License@>
+@q along with SPLinT. If not, see <http://www.gnu.org/licenses/>.@>
+
+@*1 \flex\ specific routines. The output of the scanner automaton
+follows the steps similar to the ones taken during the parser output.
+The major difference is in the output of actions and constants.
+@*2 Tables.
+As in the case of a parser we start with all the table names.
+@<Scanner table names@>=
+ _register_table_d(yy_accept)@;
+ _register_table_d(yy_ec)@;
+ _register_table_d(yy_meta)@;
+ _register_table_d(yy_base)@;
+ _register_table_d(yy_def)@;
+ _register_table_d(yy_nxt)@;
+ _register_table_d(yy_chk)@;
+
+@*2Actions. The scanner function, |yylex()|, has been reverse
+engineered to execute all portions of
+the action code. The method chosen here makes sure that none of the
+tables gets written past its last element.
+@<Variables and types local to the scanner driver@>=
+ int max_yybase_entry = 0;
+ int max_yyaccept_entry = 0;
+ int max_yynxt_entry = 0;
+ int max_yy_ec_entry = 0;
+
+@ The `exotic' scanner constants treated below are the constants used
+to control the scanner code itself. Unfortunately they are not given
+any names that can be used by the `driver' to output them in a simple
+way.
+@<Compute exotic scanner constants@>=
+
+ {
+ int i;
+
+ for ( i = 0; i < sizeof( yy_base )/sizeof( yy_base[0] ); i++ ) {
+
+ if ( yy_base[i] > max_yybase_entry ) {
+
+ max_yybase_entry = yy_base[i];
+
+ }
+
+ }
+
+ for ( i = 0; i < sizeof( yy_nxt )/sizeof( yy_nxt[0] ); i++ ) {
+
+ if ( yy_nxt[i] > max_yynxt_entry ) {
+
+ max_yynxt_entry = yy_nxt[i];
+
+ }
+
+ }
+
+ for ( i = 0; i < sizeof( yy_accept )/sizeof( yy_accept[0] ); i++ ) {
+
+ if ( yy_accept[i] > max_yyaccept_entry ) {
+
+ max_yyaccept_entry = yy_accept[i];
+
+ }
+ }
+
+ for ( i = 0; i < sizeof( yy_ec )/sizeof( yy_ec[0] ); i++ ) {
+
+ if ( yy_ec[i] > max_yy_ec_entry ) {
+
+ max_yy_ec_entry = yy_ec[i];
+
+ }
+
+ }
+
+ }
+
+@ @<Output scanner actions@>=
+ if ( output_desc.output_actions ) {
+
+ int i, j;
+ yyscan_t fake_scanner;
+
+ fprintf( tables_out, "%s", action_desc.preamble );
+
+ if ( !bare_actions ) {
+
+ if ( yylex_init( &fake_scanner ) ) {
+
+ printf( "Cannot initialize the scanner\n" );
+
+ }
+
+ yy_ec[0] = 0;
+ yy_base[1] = max_yybase_entry;
+ yy_chk[max_yybase_entry] = 1;
+ yy_nxt[max_yybase_entry] = 1;
+
+ }
+
+ for ( i = 1; i <= max_yyaccept_entry; i++ ) {
+
+ fprintf( tables_out, action_desc.act_setup, i );
+
+ if ( i == YY_END_OF_BUFFER ) {
+
+ fprintf( tables_out, " %% YY_END_OF_BUFFER\n%s\n", " \\yylexeofaction" );
+
+ } else {
+
+ fprintf( tables_out, "\n" );
+
+ if ( !bare_actions ) {
+
+ (( struct yyguts_t *)fake_scanner)->yy_hold_char = 0;
+ yy_accept[1] = i;
+ yylex( NULL, fake_scanner );
+
+ }
+ }
+
+ fprintf( tables_out, action_desc.act_suffix, i );
+
+ }
+
+ fprintf( tables_out, " %% end of file states:\n%s\n",
+ " %#define YY_STATE_EOF(state) (YY_END_OF_BUFFER + state + 1)"
+ );
+
+ if ( max_eof_state == 0 ) { /* in case the user has not declared any states */
+
+ max_eof_state = YY_STATE_EOF( INITIAL );
+
+ }
+
+ for ( ; i <= max_eof_state; i++ ) {
+
+ fprintf( tables_out, action_desc.act_setup, i );
+
+ if ( !bare_actions ) {
+
+ fprintf( tables_out, "\n" );
+
+ (( struct yyguts_t *)fake_scanner)->yy_hold_char = 0;
+ yy_accept[1] = i;
+ yylex( NULL, fake_scanner );
+
+ }
+
+ fprintf( tables_out, action_desc.act_suffix, i );
+
+ }
+
+ fprintf( tables_out, "%s", action_desc.postamble );
+
+ if ( action_desc.cleanup ) {
+
+ action_desc.cleanup( &action_desc );
+
+ }
+
+ }
+
+ @<Compute magic constants@>@;
+ @<Output states@>;
+ fprintf( tables_out, "\\constset{YYECMAGIC}{%d}%%\n", yy_ec_magic );
+ fprintf( tables_out, "\\constset{YYMAXEOFSTATE}{%d}%%\n", max_eof_state );
+
+@ @<Error codes@>=
+ BAD_SCANNER,@[@]
+
+@ @<Variables and types local to the scanner driver@>=
+ int yy_ec_magic;
+
+@ The `magic' constants are similar to the `exotic' ones mentioned
+above except the methods used to compute them rely on reverse
+engineering the scanner function. Since this changes the scanner
+tables it has to be done after the `driver' has finished going through all
+the actions.
+@<Compute magic constants@>=
+ {
+ int i, j;
+ char fake_yytext[ YY_MORE_ADJ + 1 ];
+
+ yyscan_t yyscanner;
+ struct yyguts_t *yyg;
+
+ if ( yylex_init( &yyscanner ) ) {
+
+ printf( "Cannot initialize the scanner\n" );
+ exit( BAD_SCANNER );
+
+ }
+
+ yyg = (struct yyguts_t *)yyscanner;
+ yyg->yy_start = 0;
+ yy_set_bol(0);
+ yyg->yytext_ptr = fake_yytext;
+ yyg->yy_c_buf_p = yyg->yytext_ptr + 1 + YY_MORE_ADJ;
+
+ fake_yytext[YY_MORE_ADJ] = 0; /* |*yy_cp = 0;| */
+
+ yy_accept[0] = 0;
+ yy_base[0] = 0;
+
+ for ( i = 0; i < sizeof( yy_chk )/sizeof( yy_chk[0] ); i++ ) {
+
+ yy_chk[i] = 0;
+
+ }
+
+ for ( i = 0; i < sizeof( yy_nxt )/sizeof( yy_nxt[0] ); i++ ) {
+
+ yy_nxt[i] = i;
+
+ }
+
+ yy_ec_magic = yy_get_previous_state( yyscanner );
+
+ }
+
+@*2State names. There is no easy way to output the symbolic names for
+states, so this has to be done by hand while the actions are output. The
+state names are accumulated in a list structure and are printed out
+after the action output is complete.
+
+Note that parsing the scanner file is only partially helpful (even though the
+extended parser and scanner can recognize the \.{\%x} option). All that can
+be done is output the state {\it names\/} but not their numerical
+values, since all such names are macros whose values are only
+known to the \flex\ generated scanner.
+
+@d Define_State( st_name, st_num ) do {
+
+ struct lexer_state_d *this_state;
+
+ this_state = malloc( sizeof(struct lexer_state_d) );
+ this_state->name = st_name;
+ this_state->value = st_num;
+ this_state->next = NULL;
+
+ if ( last_state ) {
+
+ last_state->next = this_state;
+ last_state = this_state;
+
+ } else {
+
+ last_state = state_list = this_state;
+
+ }
+
+ if ( YY_STATE_EOF( st_num ) > max_eof_state ) {
+
+ max_eof_state = YY_STATE_EOF( st_num );
+
+ }
+
+} while (0);
+
+@<Scanner variables and types for \Cee\ preamble@>=
+ int max_eof_state = 0;
+
+ struct lexer_state_d {
+
+ char *name;
+ int value;
+ struct lexer_state_d *next;
+
+ };
+
+ struct lexer_state_d *state_list = NULL;
+ struct lexer_state_d *last_state = NULL;
+
+@ @<Output states@>=
+ {
+
+ struct lexer_state_d *current_state;
+ struct lexer_state_d *next_state;
+
+ current_state = next_state = state_list;
+
+ if ( current_state ) {
+
+ fprintf( tables_out, "\\def\\setflexstates{%%\n"
+ " \\stateset{INITIAL}{%d}%%\n", INITIAL );
+
+ while ( current_state ) {
+
+ fprintf( tables_out, " \\stateset{%s}{%d}%%\n",
+ current_state->name, current_state->value);
+
+ current_state = current_state->next;
+
+ free( next_state );
+ next_state = current_state; /* the |name| field is not
+ deallocated because it is not
+ allocated on the heap */
+
+ }
+
+ fprintf( tables_out, "}%%\n%%\n" );
+
+ }
+
+ }
+
+@*2Constants.
+@<Scanner constants@>=
+ _register_const_d(YY_END_OF_BUFFER_CHAR)@;
+ _register_const_d(YY_NUM_RULES)@;
+ _register_const_d(YY_END_OF_BUFFER)@;
+
+@*2Output modes.
+The output modes are the same as those in the parser driver with some minor
+changes.
+
+@*3Generic output. Generic output is not programmed yet.
+@<Scanner specific output modes@>=
+ GENERIC_OUT,@[@]
+
+@ @<Handle scanner output modes@>=
+ case GENERIC_OUT:@;
+ printf( "This mode is not supported yet\n" );
+ exit(0);
+ break;
+
+@*3\TeX~mode. The \TeX\ mode is the main focus of this software.
+@<Scanner specific output modes@>=
+ TEX_OUT,@[@]
+
+@ @<Handle scanner output modes@>=
+ case TEX_OUT:@;
+ @<Set up \TeX\ format for scanner tables@>@;
+ @<Set up \TeX\ format for scanner actions@>@;
+ @<Prepare \TeX\ format for scanner constants@>@;
+ break;
+
+@ @<Set up \TeX\ format for scanner tables@>=
+ tex_table_generic(yy_accept);
+ yy_accept_desc.name = "yyaccept";
+ tex_table_generic(yy_ec);
+ yy_ec_desc.name = "yyec";
+ tex_table_generic(yy_meta);
+ yy_meta_desc.name = "yymeta";
+ tex_table_generic(yy_base);
+ yy_base_desc.name = "yybase";
+ tex_table_generic(yy_def);
+ yy_def_desc.name = "yydef";
+ tex_table_generic(yy_nxt);
+ yy_nxt_desc.name = "yynxt";
+ tex_table_generic(yy_chk);
+ yy_chk_desc.name = "yychk";
+
+@ @<Set up \TeX\ format for scanner actions@>=
+
+ if ( optimize_actions ) {
+
+ action_desc.preamble = "%\n% the big switch\n%\n"@/
+ "\\catcode`\\/=0\\relax\n%\n"@/
+ "\\def\\yydoactionswitch#1{%%\n"@/
+ " \\let\\yylextail\\yylexcontinue\n"@/
+ " \\csname doflexaction\\number #1\\parsernamespace\\endcsname\n"@/
+ " \\yylextail\n"@;
+ "}\\stashswitch{yydoactionswitch}%\n";
+ action_desc.act_setup = "\n\\expandafter\\def\\csname doflexaction%d\\parsernamespace\\endcsname{%%\n"
+ " \\YYRULESETUP";
+ action_desc.act_suffix = "}%% end of rule %d\n";
+ action_desc.action1 = NULL;
+ action_desc.actionn = NULL;
+ action_desc.postamble = "\\catcode`\\/=12\\relax\n%\n";
+ action_desc.print_rule = NULL;
+ action_desc.cleanup = NULL;
+ output_desc.output_actions = 1;
+
+ } else {
+
+ action_desc.preamble = "%\n% the big switch\n%\n"@/
+ "\\catcode`\\/=0\\relax\n%\n"@/
+ "\\def\\yydoactionswitch#1{%%\n \\let\\yylextail\\yylexcontinue\n"@;
+ " \\ifcase#1\\relax\n";
+ action_desc.act_setup = " \\or\n"
+ " \\YYRULESETUP %% (rule %d) ";
+ action_desc.act_suffix = " %% end of rule %d\n";
+ action_desc.action1 = NULL;
+ action_desc.actionn = NULL;
+ action_desc.postamble = " \\else\n \\fi\n \\yylextail\n}\\stashswitch{yydoactionswitch}%\n\\catcode`\\/=12\\relax\n%\n";
+ action_desc.print_rule = NULL;
+ action_desc.cleanup = NULL;
+ output_desc.output_actions = 1;
+
+ }
+
+@ \TeX\ constant output is another place where the techniques described above are applied. A few names
+are handled separately, because they contain underscores.
+\def\YYxENDxOFxBUFFERxCHARxdesc{\.{YY\_END\_OF\_BUFFER\_CHAR\_}\\{desc}}
+\def\YYxNUMxRULESxdesc{\.{YY\_NUM\_RULES\_}\\{desc}}
+\def\YYxENDxOFxBUFFERxdesc{\.{YY\_END\_OF\_BUFFER\_}\\{desc}}
+
+@s YY_END_OF_BUFFER_CHAR_desc TeX
+@s YY_NUM_RULES_desc TeX
+@s YY_END_OF_BUFFER_desc TeX
+
+@<Prepare \TeX\ format for scanner constants@>=
+#define _register_const_d(c_name) @[c_name##_desc.format = "\\constset{%s}{%d}%%\n"; \
+ c_name##_desc.name = #c_name; \
+ output_desc.output_##c_name = 1;@]
+ @<Scanner constants@>@;
+#undef _register_const_d
+
+ YY_END_OF_BUFFER_CHAR_desc.name = "YYENDOFBUFFERCHAR";
+ YY_NUM_RULES_desc.name = "YYNUMRULES";
+ YY_END_OF_BUFFER_desc.name = "YYENDOFBUFFER";
+
+@ @<Output exotic scanner constants@>=
+ fprintf( tables_out, "\\constset{YYMAXREALCHAR}{%ld}%%\n", sizeof( yy_accept )/(sizeof( yy_accept[0] )) - 1 );
+ fprintf( tables_out, "\\constset{YYBASEMAXENTRY}{%d}%%\n", max_yybase_entry );
+ fprintf( tables_out, "\\constset{YYNXTMAXENTRY}{%d}%%\n", max_yynxt_entry );
+ fprintf( tables_out, "\\constset{YYMAXRULENO}{%d}%%\n", max_yyaccept_entry );
+ fprintf( tables_out, "\\constset{YYECMAXENTRY}{%d}%%\n", max_yy_ec_entry );
+
+@*2 Command line options.
+We start with the most obvious option, the one begging for help.
+
+@<Higher index scanner specific options@>=
+ LONG_HELP,@[@]
+
+@ @<Scanner specific option list@>=
+ _register_option("help", no_argument, 0, LONG_HELP, "")@;
+
+@ @<Shortcuts for command line options affecting scanner output@>=
+ "h"
+
+@ @<Handle scanner output options@>=
+ case 'h': /* short help */@;
+ fprintf(stderr, "Usage: %s [options] output_file\n", argv[0]);
+ exit(0);
+ break; /* should not be needed */
+
+ case LONG_HELP:@;
+ fprintf(stderr, "%s [--mode=TeX:options] output_file outputs tables\n"
+ " and constants for a TeX scanner\n", argv[0]);
+ exit(0);
+ break; /* should not be needed */
+
+@ @<Scanner specific option list@>=
+ _register_option("debug", optional_argument, 0, 'b', "")@;
+ _register_option("mode", required_argument, 0, 'm', "")@;
+ _register_option("table-separator", required_argument, 0, 'z', "")@;
+
+ _register_option("format", required_argument, 0, 'f', "")@; /* name? */
+ _register_option("table", required_argument, 0, 't', "")@; /* specific table */
+ _register_option("constant", required_argument, 0, 'c', "")@; /* specific constant */
+ _register_option("name-length", required_argument, 0, 'l', "")@; /* change |MAX_NAME_LENGTH| */
+ _register_option("token", required_argument, 0, 'n', "")@; /* specific token */
+ _register_option("run-scan", required_argument, 0, 'p', "")@; /* run the scanner */
+ _register_option("scan-file", required_argument, 0, 'i', "")@; /* input for the scanner */
+
+@ The string below is a list of short options.
+@<Shortcuts for command line options affecting scanner output@>=
+ "b::z:m:f:t:"
+
+@ A few options can be immediately discussed.
+@<Variables and types local to the scanner driver@>=
+ int debug_level = 0;
+ char *table_separator = "%s ";
+
+@ @<Handle scanner output options@>=
+ case 'b': /* debug (level) */@;
+ debug_level = optarg ? atoi(optarg) : 1;
+ break;
+
+ case 'm': /* output mode */@;
+ switch( optarg[0] ) {
+
+ case 'T':
+ case 't':@;
+ mode = TEX_OUT;
+ break;
+
+ case 'b':
+ case 'B':
+ case 'g':
+ case 'G':@;
+ mode = GENERIC_OUT;
+ break;
+
+ default:@;
+ break;
+
+ }
+ break;
+
+ case 'z':
+ table_separator = (char *)malloc( (strlen(optarg) + 1)*sizeof(char) );
+ strcpy(table_separator, optarg);
+ break;
diff --git a/support/splint/cweb/lo.w b/support/splint/cweb/lo.w
new file mode 100644
index 0000000000..b28711423c
--- /dev/null
+++ b/support/splint/cweb/lo.w
@@ -0,0 +1,797 @@
+@q Copyright 2012-2014 Alexander Shibakov@>
+@q Copyright 2002-2014 Free Software Foundation, Inc.@>
+@q This file is part of SPLinT@>
+
+@q SPLinT is free software: you can redistribute it and/or modify@>
+@q it under the terms of the GNU General Public License as published by@>
+@q the Free Software Foundation, either version 3 of the License, or@>
+@q (at your option) any later version.@>
+
+@q SPLinT is distributed in the hope that it will be useful,@>
+@q but WITHOUT ANY WARRANTY; without even the implied warranty of@>
+@q MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the@>
+@q GNU General Public License for more details.@>
+
+@q You should have received a copy of the GNU General Public License@>
+@q along with SPLinT. If not, see <http://www.gnu.org/licenses/>.@>
+
+@*1 The scanner for grammar syntax.
+\ifx\parsernamespace\UNDEFINED
+ \input limbo.sty
+ \input grabstates.sty
+ \immediate\openout\stlist=lo_states.h
+\fi
+The fact that \bison\ has a relatively straightforward grammar is
+due to the sophistication of its scanner. The primary reason for this
+increased complexity is \bison's awareness
+of syntax variations in its input files. In addition to the grammar
+syntax, the parser has to be able to deal with extended \Cee\ syntax
+inside \bison's actions.
+
+Since the names of the scanner {\it states\/} reside in the common
+namespace with other variables, in order to make the \TeX\ version of
+the scanner aware of the numerical values of the states, a special
+procedure is required. It is executed as part of \flex's user
+initialization code but the data for it has to be collected
+separately. The procedure is declared in the preamble section of the scanner.
+
+Below, we follow the same convention (of italicizing the original
+comments) as in the code for the parser.
+@(lo.ll@>=
+@G
+ @> @<Grammar lexer definitions@> @=
+%{@> @<Grammar lexer \Cee\ preamble@> @=%}
+ @> @<Grammar lexer options@> @=
+%%
+ @> @<Grammar token regular expressions@> @=
+%%
+@O
+void define_all_states( void ) {
+ @<Collect state definitions for the grammar lexer@>@;
+}
+@o
+@g
+
+@ It is convenient to abbreviate some commonly used subexpressions.
+@<Grammar lexer definitions@>=
+ @<Grammar lexer states@>@;
+@G
+letter [.abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_]
+notletter [^.abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_]{-}[%\{]
+id {letter}({letter}|[-0-9])*
+int [0-9]+
+@g
+
+@ {\it Zero or more instances of backslash-newline. Following \gcc, allow
+white space between the backslash and the newline}.
+@<Grammar lexer definitions@>=
+@G
+splice (\\[ \f\t\v]*\n)*
+@g
+
+@ {\it An equal sign, with optional leading whitespaces. This is used in some
+deprecated constructs}.
+@<Grammar lexer definitions@>=
+@G
+eqopt ([[:space:]]*=)?
+@g
+
+@ This is how the code for state value output is put inside the
+routine mentioned above. The state information is collected by a
+special small scanner that is coupled with the bootstrap parser. This
+way, all the necessary token information comes `hardwired' in the
+bootstrap parser, and the small scanner itself does not use any state
+manipulation and thus can get away without any state setup. It can,
+however, scan just enough of the \flex\ syntax to extract the state
+information from it (only the state {\it names\/} are needed) and
+output it in the form of a header file for the `real' lexer output
+`driver' to use.
+@<Collect state definitions for the grammar lexer@>=
+#define _register_name( name ) @[Define_State( #name, name )@]
+#include "lo_states.h"
+#undef _register_name
+
+@ {\it A \Cee-like comment in directives/rules}.
+@<Grammar lexer states@>=
+@G
+%x SC_YACC_COMMENT
+@g
+
+@ {\it Strings and characters in directives/rules}.
+@<Grammar lexer states@>=
+@G
+%x SC_ESCAPED_STRING SC_ESCAPED_CHARACTER
+@g
+
+@ {\it A identifier was just read in directives/rules. Special state
+to capture the sequence `\.{identifier:}'}.
+@<Grammar lexer states@>=
+@G
+%x SC_AFTER_IDENTIFIER
+@g
+
+@ {\it \POSIX\ says that a tag must be both an id and a \Cee\ union member, but
+historically almost any character is allowed in a tag. We
+disallow \prodstyle{NUL}, as this simplifies our implementation. We match
+angle brackets in nested pairs: several languages use them for
+generics/template types}.
+@<Grammar lexer states@>=
+@G
+%x SC_TAG
+@g
+
+@ {\it
+\def\aterm{\item{\sqbullet}\ignorespaces}%
+\setbox0=\hbox{\sqbullet\enspace}%
+\parindent=0pt
+\advance\parindent by \wd0
+Four types of user code:
+\aterm prologue (code between \.{\%\{} \.{\%\}} in the first section, before \prodstyle{\%\%});
+
+\aterm actions, printers, union, etc, (between braced in the middle section);
+
+\aterm epilogue (everything after the second \prodstyle{\%\%});
+
+\aterm predicate (code between \.{\%?\{} and \.{\}} in middle section);
+}%
+@<Grammar lexer states@>=
+@G
+%x SC_PROLOGUE SC_BRACED_CODE SC_EPILOGUE SC_PREDICATE
+@g
+
+@ {\it \Cee\ and \Cee++ comments in code}.
+@<Grammar lexer states@>=
+@G
+%x SC_COMMENT SC_LINE_COMMENT
+@g
+
+@ {\it Strings and characters in code}.
+@<Grammar lexer states@>=
+@G
+%x SC_STRING SC_CHARACTER
+@g
+
+@ Bracketed identifiers support.
+@<Grammar lexer states@>=
+@G
+%x SC_BRACKETED_ID SC_RETURN_BRACKETED_ID
+@g
+
+@ @<Grammar lexer \Cee\ preamble@>=
+
+#include <stdint.h>
+#include <stdbool.h>
+
+@ The code for the generated scanner is highly dependent on the options
+supplied. Most of the options below are essential for the scheme
+adopted in this package to work.
+@<Grammar lexer options@>=
+@G
+%option bison-bridge
+%option noyywrap nounput noinput reentrant
+%option noyy_top_state
+%option debug
+%option stack
+%option outfile="lo.c"
+@g
+
+@*2 Tokenizing with regular expressions.
+Here is a full collection of regular expressions employed by the scanner.
+@<Grammar token regular expressions@>=
+ @<Scan grammar white space@>@;
+ @<Scan \flex\ directives and options@>@;
+ @<Scan \bison\ directives@>@;
+ @<Do not support zero characters@>@;
+ @<Scan after an identifier, check whether a colon is next@>@;
+ @<Scan bracketed identifiers@>@;
+ @<Scan a Yacc comment@>@;
+ @<Scan a \Cee\ comment@>@;
+ @<Scan a line comment@>@;
+ @<Scan a \bison\ string@>@;
+ @<Scan a character literal@>@;
+ @<Scan a tag@>@;
+ @<Decode escaped characters@>@;
+ @<Scan user-code characters and strings@>@;
+ @<Strings, comments etc.\ found in user code@>@;
+ @<Scan code in braces@>@;
+ @<Scan prologue@>@;
+ @<Scan the epilogue@>@;
+ @<Add the scanned symbol to the current string@>@;
+
+@ @<Scan grammar white space@>=
+@G
+<INITIAL,SC_AFTER_IDENTIFIER,SC_BRACKETED_ID,SC_RETURN_BRACKETED_ID>
+{
+ /* {\it Comments and white space.} */
+ "," {@> @[TeX_( "/yycomplain{stray `,' treated as white space}/yylexnext" );@]@=}
+ [ \f\n\t\v] |
+ "//".* {@> @[TeX_( "/yylexnext" );@]@=}
+@= "/*" {@> @[TeX_( "/contextstate/YYSTART /yyBEGIN{SC_YACC_COMMENT}/yylexnext" );@]@=}@>@/
+ /* {\it |@[#line@]| directives are not documented, and may be withdrawn or modified in future versions of \bison.} */
+ ^"#line "{int}(" \"".*"\"")?"\n" {@> @[TeX_( "/yylexnext" );@]@=}
+}
+@g
+
+@ {\it For directives that are also command line options, the regex must be
+\.{"\%..."} after \.{"[-\_]"}'s are removed, and the directive must match the \.{--long}
+option name, with a single string argument. Otherwise, add exceptions
+to \.{../build-aux/cross-options.pl}}. For most options the scanner
+returns a pair of pointers as the value.
+
+@<Scan \bison\ directives@>=
+@G
+<INITIAL>
+{
+ "%binary" {@> @[TeX_( "/yylexreturnptr{PERCENT_NONASSOC}" );@]@=}
+ "%code" {@> @[TeX_( "/yylexreturnptr{PERCENT_CODE}" );@]@=}
+ "%debug" {@> @[@<Set \prodstyle{\%debug} flag@>@]@=}
+ "%default-prec" {@> @[TeX_( "/yylexreturnptr{PERCENT_DEFAULT_PREC}" );@]@=}
+ "%define" {@> @[TeX_( "/yylexreturnptr{PERCENT_DEFINE}" );@]@=}
+ "%defines" {@> @[TeX_( "/yylexreturnptr{PERCENT_DEFINES}" );@]@=}
+ "%destructor" {@> @[TeX_( "/yylexreturnptr{PERCENT_DESTRUCTOR}" );@]@=}
+ "%dprec" {@> @[TeX_( "/yylexreturnptr{PERCENT_DPREC}" );@]@=}
+ "%empty" {@> @[TeX_( "/yylexreturnptr{PERCENT_EMPTY}" );@]@=}
+ "%error-verbose" {@> @[TeX_( "/yylexreturnptr{PERCENT_ERROR_VERBOSE}" );@]@=}
+ "%expect" {@> @[TeX_( "/yylexreturnptr{PERCENT_EXPECT}" );@]@=}
+ "%expect-rr" {@> @[TeX_( "/yylexreturnptr{PERCENT_EXPECT_RR}" );@]@=}
+ "%file-prefix" {@> @[TeX_( "/yylexreturnptr{PERCENT_FILE_PREFIX}" );@]@=}
+ "%fixed-output-files" {@> @[TeX_( "/yylexreturnptr{PERCENT_YACC}" );@]@=}
+ "%initial-action" {@> @[TeX_( "/yylexreturnptr{PERCENT_INITIAL_ACTION}" );@]@=}
+ "%glr-parser" {@> @[TeX_( "/yylexreturnptr{PERCENT_GLR_PARSER}" );@]@=}
+ "%language" {@> @[TeX_( "/yylexreturnptr{PERCENT_LANGUAGE}" );@]@=}
+ "%left" {@> @[TeX_( "/yylexreturnptr{PERCENT_LEFT}" );@]@=}
+ "%lex-param" {@> @[@<Return lexer parameters@>@]@=}
+ "%locations" {@> @[@<Set \prodstyle{\%locations} flag@>@]@=}
+ "%merge" {@> @[TeX_( "/yylexreturnptr{PERCENT_MERGE}" );@]@=}
+ "%name-prefix" {@> @[TeX_( "/yylexreturnptr{PERCENT_NAME_PREFIX}" );@]@=}
+ "%no-default-prec" {@> @[TeX_( "/yylexreturnptr{PERCENT_NO_DEFAULT_PREC}" );@]@=}
+ "%no-lines" {@> @[TeX_( "/yylexreturnptr{PERCENT_NO_LINES}" );@]@=}
+ "%nonassoc" {@> @[TeX_( "/yylexreturnptr{PERCENT_NONASSOC}" );@]@=}
+ "%nondeterministic-parser" {@> @[TeX_( "/yylexreturnptr{PERCENT_NONDETERMINISTIC_PARSER}" );@]@=}
+ "%nterm" {@> @[TeX_( "/yylexreturnptr{PERCENT_NTERM}" );@]@=}
+ "%output" {@> @[TeX_( "/yylexreturnptr{PERCENT_OUTPUT}" );@]@=}
+ "%param" {@> @[@<Return lexer and parser parameters@>@]@=}
+ "%parse-param" {@> @[@<Return parser parameters@>@]@=}
+ "%prec" {@> @[TeX_( "/yylexreturnptr{PERCENT_PREC}" );@]@=}
+ "%precedence" {@> @[TeX_( "/yylexreturnptr{PERCENT_PRECEDENCE}" );@]@=}
+ "%printer" {@> @[TeX_( "/yylexreturnptr{PERCENT_PRINTER}" );@]@=}
+ "%pure-parser" {@> @[@<Set \prodstyle{\%pure-parser} flag@>@]@=}
+ "%require" {@> @[TeX_( "/yylexreturnptr{PERCENT_REQUIRE}" );@]@=}
+ "%right" {@> @[TeX_( "/yylexreturnptr{PERCENT_RIGHT}" );@]@=}
+ "%skeleton" {@> @[TeX_( "/yylexreturnptr{PERCENT_SKELETON}" );@]@=}
+ "%start" {@> @[TeX_( "/yylexreturnptr{PERCENT_START}" );@]@=}
+ "%term" {@> @[TeX_( "/yylexreturnptr{PERCENT_TOKEN}" );@]@=}
+ "%token" {@> @[TeX_( "/yylexreturnptr{PERCENT_TOKEN}" );@]@=}
+ "%token-table" {@> @[TeX_( "/yylexreturnptr{PERCENT_TOKEN_TABLE}" );@]@=}
+ "%type" {@> @[TeX_( "/yylexreturnptr{PERCENT_TYPE}" );@]@=}
+ "%union" {@> @[TeX_( "/yylexreturnptr{PERCENT_UNION}" );@]@=}
+ "%verbose" {@> @[TeX_( "/yylexreturnptr{PERCENT_VERBOSE}" );@]@=}
+ "%yacc" {@> @[TeX_( "/yylexreturnptr{PERCENT_YACC}" );@]@=}
+
+ /* {\it deprecated} */
+ "%default"[-_]"prec" {@> @[TeX_( "/yypdeprecated{\\%default-prec}" );@]@=}
+ "%error"[-_]"verbose" {@> @[TeX_( "/yypdeprecated{\\%define parse.error verbose}" );@]@=}
+ "%expect"[-_]"rr" {@> @[TeX_( "/yypdeprecated{\\%expect-rr}" );@]@=}
+ "%file-prefix"{eqopt} {@> @[TeX_( "/yypdeprecated{\\%file-prefix}" );@]@=}
+ "%fixed"[-_]"output"[-_]"files" {@> @[TeX_( "/yypdeprecated{\\%fixed-output-files}" );@]@=}
+ "%name"[-_]"prefix"{eqopt} {@> @[TeX_( "/yypdeprecated{\\%name-prefix}" );@]@=}
+ "%no"[-_]"default"[-_]"prec" {@> @[TeX_( "/yypdeprecated{\\%no-default-prec}" );@]@=}
+ "%no"[-_]"lines" {@> @[TeX_( "/yypdeprecated{\\%no-lines}" );@]@=}
+ "%output"{eqopt} {@> @[TeX_( "/yypdeprecated{\\%output}" );@]@=}
+ "%pure"[-_]"parser" {@> @[TeX_( "/yypdeprecated{\\%pure-parser}" );@]@=}
+ "%token"[-_]"table" {@> @[TeX_( "/yypdeprecated{\\%token-table}" );@]@=}
+
+ /* {\it Semantic predicate.} */
+ "%?"[ \f\n\t\v]*"{" {@> @[TeX_( "/yyBEGIN{SC_PREDICATE}/yylexnext" );@]@=}
+
+ "%"{id}|"%"{notletter}([[:graph:]])+ {@> @[@<Possbly complain about a bad directive@>@]@=}
+
+ "=" {@> @[TeX_( "/yylexreturnptr{EQUAL}" );@]@=}
+ "|" {@> @[TeX_( "/yylexreturnptr{PIPE}" );@]@=}
+ ";" {@> @[TeX_( "/yylexreturnptr{SEMICOLON}" );@]@=}
+
+ {id} {@> @[@<Prepare an identifier@>@]@=}
+ {int} {@> @[TeX_( "/edef/next{/yylval{/nx/anint{/the/yytext}" );@]@;
+ @> @[TeX_( "{/the/yyfmark}{/the/yysmark}}}/next" );@]@;
+ @> @[TeX_( "/yylexreturn{INT}" );@]@=}
+ 0[xX][0-9abcdefABCDEF]+ {@> @[TeX_( "/edef/next{/yylval{/nx/hexint{/the/yytext}" );@]@;
+ @> @[TeX_( "{/the/yyfmark}{/the/yysmark}}}/next" );@]@;
+ @> @[TeX_( "/yylexreturn{INT}" );@]@=}
+
+ /* {\it Identifiers may not start with a digit. Yet, don't silently accept \.{1FOO} as \.{1 FOO}.} */
+ {int}{id} {@> @[TeX_( "/yycomplain{invalid identifier: /the/yytext}" );@]
+ @> @[TeX_( "/yyerrterminate" );@]@=}
+
+ /* {\it Characters.} */
+ "'" {@> @[TeX_( "/yyBEGIN{SC_ESCAPED_CHARACTER}/yylexnext" );@]@=}
+
+ /* {\it Strings.} */
+ "\"" {@> @[TeX_( "/yyBEGIN{SC_ESCAPED_STRING}/yylexnext" );@]@=}
+
+ /* {\it Prologue.} */
+ "%{" {@> @[@<Start assembling prologue code@>@]@=}
+
+ /* {\it Code in between braces.} Originally preceded by \.{\\STRINGGROW} but it is omitted here. */
+ "{" {@> @[TeX_( "/lonesting/z@@/yyBEGIN{SC_BRACED_CODE}/yylexnext" );@]@=}
+
+ /* {\it A type.} */
+ "<*>" {@> @[TeX_( "/yylexreturnptr{TAG_ANY}" );@]@=}
+ "<>" {@> @[TeX_( "/yylexreturnptr{TAG_NONE}" );@]@=}
+ "<" {@> @[TeX_( "/lonesting=/z@@/yyBEGIN{SC_TAG}/yylexnext" );@]@=}
+
+ "%%" {@> @[@<Switch sections@>@]@=}
+ "[" {@> @[TeX_( "/let/bracketedidstr=/empty" );@]@;
+ @> @[TeX_( "/bracketedidcontextstate/YYSTART" );@]
+ @> @[TeX_( "/yyBEGIN{SC_BRACKETED_ID}/yylexnext" );@]@=}
+
+ <<EOF>> {@> @[TeX_( "/yyterminate% EOF in INITIAL" );@]@=}
+
+ [^\[%A-Za-z0-9_<>{}\"\'*;|=/, \f\n\t\v]+|. {@> @[@<Process a bad character@>@]@=}
+}
+@g
+
+@ Some additional constructs needed to typeset simple \flex\
+declarations. This is not part of the original \bison\ scanner.
+@<Scan \flex\ directives and options@>=
+@G
+<INITIAL>
+{
+ "%option" {@> @[TeX_( "/yylexreturnptr{FLEX_OPTION}" );@]@=}
+ "%x" {@> @[TeX_( "/yylexreturnptr{FLEX_STATE_X}" );@]@=}
+ "%s" {@> @[TeX_( "/yylexreturnptr{FLEX_STATE_S}" );@]@=}
+}
+@g
+
+@ We present the `bad character' code first, before going into the details
+of the character matching by the rest of the lexer.
+@<Process a bad character@>=
+ @[TeX_( "/edef/next{/nx/csname lexspecial[/the/yytextpure]/nx/endcsname}" );@]@;
+ @[TeX_( "/expandafter/expandafter/expandafter/ifx/next/relax" );@]@;
+ @[TeX_( " /iftracebadchars" );@]@;
+ @[TeX_( " /yycomplain{invalid character(s): /the/yytext}" );@]@;
+ @[TeX_( " /fi" );@]@;
+ @[TeX_( " /yylexreturn{$undefined}" );@]@;
+ @[TeX_( "/else" );@]@;
+ @[TeX_( " /expandafter/lexspecialchar/expandafter{/next}{/the/yyfmark}{/the/yysmark}/yylexnext" );@]@;
+ @[TeX_( "/fi" );@]@;
+
+@ @<Set \prodstyle{\%debug} flag@>=
+ @[TeX_( "/edef/next{/yylval{{parse.trace}{debug}{/the/yyfmark}{/the/yysmark}}}/next" );@]@;
+ @[TeX_( "/yylexreturn{PERCENT_FLAG}" );@]@;
+
+@ @<Return lexer parameters@>=
+ @[TeX_( "/edef/next{/yylval{{lex-param}{/the/yyfmark}{/the/yysmark}}}/next" );@]@;
+ @[TeX_( "/yylexreturn{PERCENT_PARAM}" );@]@;
+
+@ @<Set \prodstyle{\%locations} flag@>=
+ @[TeX_( "/edef/next{/yylval{{locations}{}{/the/yyfmark}{/the/yysmark}}}/next" );@]@;
+ @[TeX_( "/yylexreturn{PERCENT_FLAG}" );@]@;
+
+@ @<Return lexer and parser parameters@>=
+ @[TeX_( "/edef/next{/yylval{{both-param}{/the/yyfmark}{/the/yysmark}}}/next" );@]@;
+ @[TeX_( "/yylexreturn{PERCENT_PARAM}" );@]@;
+
+@ @<Return parser parameters@>=
+ @[TeX_( "/edef/next{/yylval{{parse-param}{/the/yyfmark}{/the/yysmark}}}/next" );@]@;
+ @[TeX_( "/yylexreturn{PERCENT_PARAM}" );@]@;
+
+@ @<Set \prodstyle{\%pure-parser} flag@>=
+ @[TeX_( "/edef/next{/yylval{{api.pure}{pure-parser}{/the/yyfmark}{/the/yysmark}}}/next" );@]@;
+ @[TeX_( "/yylexreturn{PERCENT_FLAG}" );@]@;
+
+@ @<Possbly complain about a bad directive@>=
+ @[TeX_( "/iftracebadchars" );@]@;
+ @[TeX_( " /yycomplain{invalid directive: /the/yytext}" );@]@;
+ @[TeX_( "/fi" );@]@;
+ @[TeX_( "/yylexnext" );@]@;
+
+@ @<Prepare an identifier@>=
+ @[TeX_( "/edef/next{/yylval{/nx/idit{/the/yytextpure}{/the/yytext}" );@]@;
+ @[TeX_( " {/the/yyfmark}{/the/yysmark}}}/next" );@]@;
+ @[TeX_( "/let/bracketedidstr=/empty" );@]@;
+ @[TeX_( "/yyBEGIN{SC_AFTER_IDENTIFIER}/yylexnext" );@]@;
+
+@ @<Switch sections@>=
+ @[TeX_( "/advance/percentpercentcount/@@ne" );@]@;
+ @[TeX_( "/ifnum/percentpercentcount=/tw@@" );@]@;
+ @[TeX_( " /yyBEGIN{SC_EPILOGUE}" );@]@;
+ @[TeX_( "/fi" );@]@;
+ @[TeX_( "/yylexreturnptr{PERCENT_PERCENT}" );@]@;
+
+@ @<Start assembling prologue code@>=
+ @[TeX_( "/edef/next{/postoks{{/the/yyfmark}{/the/yysmark}}}/next" );@]@;
+ @[TeX_( "/yyBEGIN{SC_PROLOGUE}/yylexnext" );@]@;
+
+@ {\it Supporting \.{\\0} complexifies our implementation for no expected added value}.
+
+@<Do not support zero characters@>=
+@G
+<SC_ESCAPED_CHARACTER,SC_ESCAPED_STRING,SC_TAG>
+{
+ \0 {@> @[TeX_( "/yycomplain{invalid null character}/yylexnext" );@]@=}
+}
+@g
+
+@ @<Scan after an identifier, check whether a colon is next@>=
+@G
+<SC_AFTER_IDENTIFIER>
+{
+ "[" {@> @[@<Process the bracketed part of an identifier@>@]@=}
+ ":" {@> @[@<Process a colon after an identifier@>@]@=}
+ <<EOF>> {@> @[@<End the scan with an identifier@>@]@=}
+ . {@> @[@<Process a character after an identifier@>@]@=}
+}
+@g
+
+@ @<Process the bracketed part of an identifier@>=
+ @[TeX_( "/ifx/bracketedidstr/empty" );@]@;
+ @[TeX_( " /bracketedidcontextstate/YYSTART /yyBEGIN{SC_BRACKETED_ID}" );@]@;
+ @[TeX_( " /let/next=/yylexnext" );@]@;
+ @[TeX_( "/else" );@]@;
+ @[TeX_( " /ROLLBACKCURRENTTOKEN" );@]@;
+ @[TeX_( " /yyBEGIN{SC_RETURN_BRACKETED_ID}" );@]@;
+ @[TeX_( " /def/next{/yylexreturn{ID}}" );@]@;
+ @[TeX_( "/fi" );@]@;
+ @[TeX_( "/next" );@]@;
+
+@ @<Process a colon after an identifier@>=
+ @[TeX_( "/ifx/bracketedidstr/empty" );@]@;
+ @[TeX_( " /yyBEGIN{INITIAL}" );@]@;
+ @[TeX_( "/else" );@]@;
+ @[TeX_( " /yyBEGIN{SC_RETURN_BRACKETED_ID}" );@]@;
+ @[TeX_( "/fi" );@]@;
+ @[TeX_( "/yylexreturn{ID_COLON}" );@]@;
+
+@ @<Process a character after an identifier@>=
+ @[TeX_( "/ROLLBACKCURRENTTOKEN" );@]@;
+ @[TeX_( "/ifx/bracketedidstr/empty" );@]@;
+ @[TeX_( " /yyBEGIN{INITIAL}" );@]@;
+ @[TeX_( "/else" );@]@;
+ @[TeX_( " /yyBEGIN{SC_RETURN_BRACKETED_ID}" );@]@;
+ @[TeX_( "/fi" );@]@;
+ @[TeX_( "/yylexreturn{ID}" );@]@;
+
+@ @<End the scan with an identifier@>=
+ @[TeX_( "/ifx/bracketedidstr/empty" );@]@;
+ @[TeX_( " /yyBEGIN{INITIAL}" );@]@;
+ @[TeX_( "/else" );@]@;
+ @[TeX_( " /yyBEGIN{SC_RETURN_BRACKETED_ID}" );@]@;
+ @[TeX_( "/fi" );@]@;
+ @[TeX_( "/ROLLBACKCURRENTTOKEN" );@]@;
+ @[TeX_( "/yylexreturn{ID}" );@]@;
+
+@ @<Scan bracketed identifiers@>=
+@G
+<SC_BRACKETED_ID>
+{
+ <<EOF>> {@> @[@<Complain about unexpected end of file inside brackets@>@]@=}
+ {id} {@> @[@<Process bracketed identifier@>@]@=}
+ "]" {@> @[@<Finish processing bracketed identifier@>@]@=}
+ [^\].A-Za-z0-9_/ \f\n\t\v]+|. {@> @[@<Complain about improper identifier characters@>@]@=}
+}
+@g
+
+@ @<Process bracketed identifier@>=
+ @[TeX_( "/ifx/bracketedidstr/empty" );@]@;
+ @[TeX_( " /edef/bracketedidstr{/nx/idit{/the/yytextpure}" );@]@;
+ @[TeX_( " {/the/yytext}{/the/yyfmark}{/the/yysmark}}" );@]@;
+ @[TeX_( " /let/next=/yylexnext" );@]@;
+ @[TeX_( "/else" );@]@;
+ @[TeX_( " /def/next{/yycomplain{unexpected " );@]@;
+ @[TeX_( " identifier in bracketed name: /the/yytext}/yylexnext}" );@]@;
+ @[TeX_( "/fi" );@]@;
+ @[TeX_( "/next" );@]@;
+
+@ @<Finish processing bracketed identifier@>=
+ @[TeX_( "/yyBEGINr/bracketedidcontextstate" );@]@;
+ @[TeX_( "/ifx/bracketedidstr/empty" );@]@;
+ @[TeX_( " /def/next{/yycomplain{an identifier expected}/yylexnext}" );@]@;
+ @[TeX_( "/else" );@]@;
+ @[TeX_( " /ifnum/bracketedidcontextstate=/yylexstate{INITIAL}/relax" );@]@;
+ @[TeX_( " /expandafter/yylval/expandafter{/bracketedidstr}" );@]@;
+ @[TeX_( " /let/bracketedidstr=/empty" );@]@;
+ @[TeX_( " /def/next{/yylexreturn{BRACKETED_ID}}" );@]@;
+ @[TeX_( " /else" );@]@;
+ @[TeX_( " /let/next=/yylexnext" );@]@;
+ @[TeX_( " /fi" );@]@;
+ @[TeX_( "/fi" );@]@;
+ @[TeX_( "/next" );@]@;
+
+@ @<Complain about improper identifier characters@>=
+ @[TeX_( "/yycomplain{invalid character(s) in bracketed name: /the/yytext}/yyerrterminate" );@]@;
+
+@ @<Complain about unexpected end of file inside brackets@>=
+ @[TeX_( "/yyBEGINr/bracketedidcontextstate" );@]@;
+ @[TeX_( "/yycomplain{unexpected end of file inside brackets}/yyerrterminate" );@]@;
+
+@ @<Scan bracketed identifiers@>=
+@G
+<SC_RETURN_BRACKETED_ID>
+{
+ . {@> @[@<Return a bracketed identifier@>@]@=}
+}
+@g
+
+@ @<Return a bracketed identifier@>=
+ @[TeX_( "/ROLLBACKCURRENTTOKEN" );@]@;
+ @[TeX_( "/expandafter/yylval/expandafter{/bracketedidstr}" );@]@;
+ @[TeX_( "/let/bracketedidstr=/empty" );@]@;
+ @[TeX_( "/yyBEGIN{INITIAL}" );@]@;
+ @[TeX_( "/yylexreturn{BRACKETED_ID}" );@]@;
+
+@ {\it Scanning a Yacc comment. The initial \.{/*} is already eaten}.
+@<Scan a Yacc comment@>=
+@G
+<SC_YACC_COMMENT>
+{
+ <<EOF>> {@> @[TeX_( "/yycomplain{unexpected end of file in " );@]
+ @> @[TeX_( " a comment}/yyerrterminate" );@]@=}
+ "*/" {@> @[TeX_( "/yyBEGINr{/contextstate}/yylexnext" );@]@=}
+ .|\n {@> @[TeX_( "/yylexnext" );@]@=}
+}
+@g
+
+@ {\it Scanning a \Cee\ comment. The initial \.{/*} is already eaten}.
+@<Scan a \Cee\ comment@>=
+@G
+<SC_COMMENT>
+{
+ <<EOF>> {@> @[TeX_( "/yycomplain{unexpected end of file in " );@]
+ @> @[TeX_( " a comment}/yyerrterminate" );@]@=}
+ "*"{splice}"/" {@> @[TeX_( "/STRINGGROW/yyBEGINr/contextstate/yylexnext" );@]@=}
+}
+@g
+
+@ {\it Scanning a line comment. The initial \.{//} is already eaten}.
+@<Scan a line comment@>=
+@G
+<SC_LINE_COMMENT>
+{
+ <<EOF>> {@> @[TeX_( "/yyBEGINr/contextstate /ROLLBACKCURRENTTOKEN" );@]
+ @> @[TeX_( " /yylexnext" );@]@=}
+ "\n" {@> @[TeX_( "/STRINGGROW/yyBEGINr/contextstate /yylexnext" );@]@=}
+ {splice} {@> @[TeX_( "/STRINGGROW/yylexnext" );@]@=}
+}
+@g
+
+@ {\it Scanning a \bison\ string, including its escapes.
+The initial quote is already eaten}.
+@<Scan a \bison\ string@>=
+@G
+<SC_ESCAPED_STRING>
+{
+ <<EOF>> {@> @[TeX_( "/yycomplain{unexpected end of file in " );@]
+ @> @[TeX_( " a string}/yyerrterminate" );@]@=}
+ "\"" {@> @[@<Finish a \bison\ string@>@]@=}
+ "\n" {@> @[TeX_( "/yycomplain{unexpected end of line in " );@]
+ @> @[TeX_( " a string}/yyerrterminate" );@]@=}
+}
+@g
+
+@ @<Finish a \bison\ string@>=
+ @[TeX_( "/STRINGFINISH" );@]@;
+ @[TeX_( "/edef/next{/yylval{/nx/stringify{/the/laststring}" );@]@;
+ @[TeX_( "{/the/laststringraw}{/the/yyfmark}{/the/yysmark}}}/next" );@]@;
+ @[TeX_( "/yyBEGIN{INITIAL}" );@]@;
+ @[TeX_( "/yylexreturn{STRING}" );@]@;
+
+@ {\it Scanning a \bison\ character literal, decoding its escapes.
+The initial quote is already eaten}.
+@<Scan a character literal@>=
+@G
+<SC_ESCAPED_CHARACTER>
+{
+ <<EOF>> {@> @[TeX_( "/yycomplain{unexpected end of file in " );@]
+ @> @[TeX_( " a literal}/yyerrterminate" );@]@=}
+ "'" {@> @[@<Return an escaped character@>@]@=}
+ "\n" {@> @[TeX_( "/yycomplain{unexpected end of line in " );@]
+ @> @[TeX_( " a literal}/yyerrterminate" );@]@=}
+}
+@g
+
+@ @<Return an escaped character@>=
+ @[TeX_( "/STRINGFINISH" );@]@;
+ @[TeX_( "/edef/next{/yylval{/nx/charit{/the/laststring}{/the/laststringraw}" );@]@;
+ @[TeX_( " {/the/yyfmark}{/the/yysmark}}}/next" );@]@;
+ @[TeX_( "/STRINGFREE" );@]@;
+ @[TeX_( "/yyBEGIN{INITIAL}" );@]@;
+ @[TeX_( "/yylexreturn{CHAR}" );@]@;
+
+@ {\it Scanning a tag. The initial angle bracket is already eaten}.
+@<Scan a tag@>=
+@G
+<SC_TAG>
+{
+ ">" {@> @[@<Finish a tag@>@]@=}
+ ([^<>]|->)+ {@> @[TeX_( "/STRINGGROW/yylexnext" );@]@=}
+ "<" {@> @[@<Raise nesting level@>@]@=}
+ <<EOF>> {@> @[TeX_( "/yycomplain{unexpected end of file in " );@]
+ @> @[TeX_( " a literal}/yyerrterminate" );@]@=}
+}
+@g
+
+@ @<Finish a tag@>=
+ @[TeX_( "/advance/lonesting/m@@ne" );@]@;
+ @[TeX_( "/ifnum/lonesting</z@@" );@]@;
+ @[TeX_( " /STRINGFINISH" );@]@;
+ @[TeX_( " /edef/next{/yylval{/nx/tagit{/the/laststring}{/the/laststringraw}" );@]@;
+ @[TeX_( " {/the/yyfmark}{/the/yysmark}}}/next" );@]@;
+ @[TeX_( " /STRINGFREE" );@]@;
+ @[TeX_( " /yyBEGIN{INITIAL}" );@]@;
+ @[TeX_( " /def/next{/yylexreturn{TAG}}" );@]@;
+ @[TeX_( "/else" );@]@;
+ @[TeX_( " /STRINGGROW/let/next=/yylexnext" );@]@;
+ @[TeX_( "/fi" );@]@;
+ @[TeX_( "/next" );@]@;
+
+@ This is a slightly different rule from the original scanner. We do not perform |yyleng| computations,
+so it makes sense to raise the nesting level one by one.
+@<Raise nesting level@>=
+ @[TeX_( "/STRINGGROW" );@]@;
+ @[TeX_( "/advance/lonesting/@@ne" );@]@;
+ @[TeX_( "/yylexnext" );@]@;
+
+@ @<Decode escaped characters@>=
+@G
+<SC_ESCAPED_STRING,SC_ESCAPED_CHARACTER>
+{
+ \\[0-7]{1,3} {@> @[TeX_( "/STRINGGROW/yylexnext" );@]@=}
+ \\x[0-9abcdefABCDEF]+ {@> @[TeX_( "/STRINGGROW/yylexnext" );@]@=}
+ \\a {@> @[TeX_( "/STRINGGROW/yylexnext" );@]@=}
+ \\b {@> @[TeX_( "/STRINGGROW/yylexnext" );@]@=}
+ \\f {@> @[TeX_( "/STRINGGROW/yylexnext" );@]@=}
+ \\n {@> @[TeX_( "/STRINGGROW/yylexnext" );@]@=}
+ \\r {@> @[TeX_( "/STRINGGROW/yylexnext" );@]@=}
+ \\t {@> @[TeX_( "/STRINGGROW/yylexnext" );@]@=}
+ \\v {@> @[TeX_( "/STRINGGROW/yylexnext" );@]@=}
+
+ /* {\it \.{\\\\[\\"\\'?\\\\]} would be shorter, but it confuses |xgettext|.} */
+ \\("\""|"'"|"?"|"\\") {@> @[TeX_( "/STRINGGROW/yylexnext" );@]@=}
+
+ \\(u|U[0-9abcdefABCDEF]{4})[0-9abcdefABCDEF]{4} {@> @[TeX_( "/STRINGGROW/yylexnext" );@]@=}
+ \\(.|\n) {@> @[TeX_( "/yycomplain{invalid character after " );@]
+ @> @[TeX_( " /\\-escape: /the/yytext}/yylexnext" );@]@=}
+}
+@g
+
+@ @<Scan user-code characters and strings@>=
+@G
+<SC_CHARACTER,SC_STRING>
+{
+ {splice}|\\{splice}[^\n\[\]] {@> @[TeX_( "/STRINGGROW/yylexnext" );@]@=}
+}
+
+<SC_CHARACTER>
+{
+ "'" {@> @[TeX_( "/STRINGGROW /yyBEGINr{/contextstate}/yylexnext" );@]@=}
+ \n {@> @[TeX_( "/yycomplain{unexpected end of line instead of " );@]
+ @> @[TeX_( " a character}/yyerrterminate" );@]@=}
+ <<EOF>> {@> @[TeX_( "/yycomplain{unexpected end of file instead of " );@]
+ @> @[TeX_( " a character}/yyerrterminate" );@]@=}
+}
+
+<SC_STRING>
+{
+ "\"" {@> @[TeX_( "/STRINGGROW /yyBEGINr{/contextstate}/yylexnext" );@]@=}
+ \n {@> @[TeX_( "/yycomplain{unexpected end of line instead of " );@]
+ @> @[TeX_( " a character}/yyerrterminate" );@]@=}
+ <<EOF>> {@> @[TeX_( "/yycomplain{unexpected end of file instead of " );@]
+ @> @[TeX_( " a character}/yyerrterminate" );@]@=}
+}
+@g
+
+@ @<Strings, comments etc.\ found in user code@>=
+@G
+<SC_BRACED_CODE,SC_PROLOGUE,SC_EPILOGUE,SC_PREDICATE>
+{
+ "'" {@> @[TeX_( "/STRINGGROW /contextstate/YYSTART" );@]
+ @> @[TeX_( " /yyBEGIN{SC_CHARACTER}/yylexnext" );@]@=}
+ "\"" {@> @[TeX_( "/STRINGGROW /contextstate/YYSTART" );@]
+ @> @[TeX_( " /yyBEGIN{SC_STRING}/yylexnext" );@]@=}
+ "/"{splice}"*" {@> @[TeX_( "/STRINGGROW /contextstate/YYSTART" );@]
+ @> @[TeX_( " /yyBEGIN{SC_COMMENT}/yylexnext" );@]@=}
+ "/"{splice}"/" {@> @[TeX_( "/STRINGGROW /contextstate/YYSTART" );@]
+ @> @[TeX_( " /yyBEGIN{SC_LINE_COMMENT}/yylexnext" );@]@=}
+}
+@g
+
+@ {\it Scanning some code in braces (actions, predicates). The
+initial \.{\{} is already eaten}.
+@<Scan code in braces@>=
+@G
+<SC_BRACED_CODE,SC_PREDICATE>
+{
+ "{"|"<"{splice}"%" {@> @[TeX_( "/STRINGGROW /advance/lonesting/@@ne /yylexnext" );@]@=}
+ "%"{splice}">" {@> @[TeX_( "/STRINGGROW /advance/lonesting/m@@ne /yylexnext" );@]@=}
+
+ /* {\it Tokenize \.{<<\%} correctly (as \.{<<} \.{\%}) rather than incorrectly (as \.{<} \.{<\%}).} */
+ "<"{splice}"<" {@> @[TeX_( "/STRINGGROW /yylexnext" );@]@=}
+ <<EOF>> {@> @[TeX_( "/yycomplain{unexpected end of line " );@]
+ @> @[TeX_( " inside braced code}/yyerrterminate" );@]@=}
+}
+
+<SC_BRACED_CODE>
+{
+ "}" {@> @[@<Add closing brace to the braced code@>@]@=}
+}
+
+<SC_PREDICATE>
+{
+ "}" {@> @[@<Add closing brace to a predicate@>@]@=}
+}
+@g
+
+@ Unlike the original lexer, we do not return the closing brace as part of the
+braced code.
+
+@<Add closing brace to the braced code@>=
+ @[TeX_( "/advance/lonesting/m@@ne" );@]@;
+ @[TeX_( "/ifnum/lonesting</z@@" );@]@;
+ @[TeX_( " /STRINGFINISH" );@]@;
+ @[TeX_( " /edef/next{/yylval{{/the/laststring}{/the/yyfmark}{/the/yysmark}}}/next" );@]@;
+ @[TeX_( " /def/next{/yylexreturn{BRACED_CODE}}" );@]@;
+ @[TeX_( " /yyBEGIN{INITIAL}" );@]@;
+ @[TeX_( "/else" );@]@;
+ @[TeX_( " /STRINGGROW" );@]@;
+ @[TeX_( " /let/next=/yylexnext" );@]@;
+ @[TeX_( "/fi" );@]@;
+ @[TeX_( "/next" );@]@;
+
+@ @<Add closing brace to a predicate@>=
+ @[TeX_( "/advance/lonesting/m@@ne" );@]@;
+ @[TeX_( "/ifnum/lonesting</z@@" );@]@;
+ @[TeX_( " /STRINGFINISH" );@]@;
+ @[TeX_( " /edef/next{/yylval{{/the/laststring}{/the/yyfmark}{/the/yysmark}}}/next" );@]@;
+ @[TeX_( " /yyBEGIN{INITIAL}" );@]@;
+ @[TeX_( " /def/next{/yylexreturn{BRACED_PREDICATE}}" );@]@;
+ @[TeX_( "/else" );@]@;
+ @[TeX_( " /STRINGGROW" );@]@;
+ @[TeX_( " /let/next=/yylexnext" );@]@;
+ @[TeX_( "/fi" );@]@;
+ @[TeX_( "/next" );@]@;
+
+@ {\it Scanning some prologue: from \.{\%\{} (already scanned) to \.{\%\}}}.
+@<Scan prologue@>=
+@G
+<SC_PROLOGUE>
+{
+ "%}" {@> @[@<Finish braced code@>@]@=}
+ <<EOF>> {@> @[TeX_( "/yycomplain{unexpected end of file " );@]
+ @> @[TeX_( " inside prologue}/yyerrterminate" );@]@=}
+}
+@g
+
+@ @<Finish braced code@>=
+ @[TeX_( "/STRINGFINISH" );@]@;
+ @[TeX_( "/edef/next{/yylval{{/the/laststring}/the/postoks{/the/yyfmark}{/the/yysmark}}}/next" );@]@;
+ @[TeX_( "/yyBEGIN{INITIAL}" );@]@;
+ @[TeX_( "/yylexreturn{PROLOGUE}" );@]@;
+
+@ {\it Scanning the epilogue (everything after the second \prodstyle{\%\%}, which
+has already been eaten)}.
+@<Scan the epilogue@>=
+@G
+<SC_EPILOGUE>
+{
+ <<EOF>> {@> @[@<Handle end of file in the epilogue@>@]@=}
+}
+@g
+
+@ @<Handle end of file in the epilogue@>=
+ @[TeX_( "/ROLLBACKCURRENTTOKEN" );@]@;
+ @[TeX_( "/STRINGFINISH" );@]@;
+ @[TeX_( "/yylval=/laststring" );@]@;
+ @[TeX_( "/yyBEGIN{INITIAL}" );@]@;
+ @[TeX_( "/yylexreturn{EPILOGUE}" );@]@;
+
+@ {\it By default, grow the string obstack with the input}.
+\ifbootstrapmode % only if this file is used to extract state information
+ \immediate\closeout\stlist
+\fi
+@<Add the scanned symbol to the current string@>=
+@G
+<SC_COMMENT,SC_LINE_COMMENT,SC_BRACED_CODE,SC_PREDICATE,SC_PROLOGUE,SC_EPILOGUE,
+ SC_STRING,SC_CHARACTER,SC_ESCAPED_STRING,SC_ESCAPED_CHARACTER>. |
+ <SC_COMMENT,SC_LINE_COMMENT,SC_BRACED_CODE,SC_PREDICATE,
+ SC_PROLOGUE,SC_EPILOGUE>\n {@> @[TeX_( "/STRINGGROW /yylexnext" );@]@=}
+@g
diff --git a/support/splint/cweb/mkeparser.w b/support/splint/cweb/mkeparser.w
new file mode 100644
index 0000000000..937a9e5498
--- /dev/null
+++ b/support/splint/cweb/mkeparser.w
@@ -0,0 +1,123 @@
+% Copyright 2012-2014, Alexander Shibakov
+% This file is part of SPLinT
+%
+% SPLinT is free software: you can redistribute it and/or modify
+% it under the terms of the GNU General Public License as published by
+% the Free Software Foundation, either version 3 of the License, or
+% (at your option) any later version.
+%
+% SPLinT is distributed in the hope that it will be useful,
+% but WITHOUT ANY WARRANTY; without even the implied warranty of
+% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+% GNU General Public License for more details.
+%
+% You should have received a copy of the GNU General Public License
+% along with SPLinT. If not, see <http://www.gnu.org/licenses/>.
+
+\input limbo.sty
+\input yy.sty
+
+\let\N\textN
+
+@**Parser tables. This is the main table output code. Its core comes from the Bison
+Sourcer(er?) (\.{bs.w}) which will be included as soon as this paragraph is
+over. Since the main function of this code is to {\it dump\/} tables
+produced by \bison, and a {\it bison\/} is a large buffalo like
+animal, {\it and\/} the only reason it has to be done like this is due
+to the less than optimal choices of a few developers, feel free to
+interpret the acronym as something easier to remember.
+
+@ @<Table names@>=
+ @<Parser table names@>@;
+
+@ @<Constant names@>=
+ @<Parser constants@>@;
+
+@ @<Local variable and type declarations@>=
+ @<Variables and types local to the parser@>@;
+
+@ @<Establish defaults@>=
+ @<Parser defaults@>@;
+
+@ @<Auxiliary function declarations@>=
+ @<Helper functions declarations for for parser output@>@;
+
+@ @<Auxiliary function definitions@>=
+ @<Helper functions for parser output@>@;
+
+@ @<Short option list@>=
+ @<Shortcuts for command line options affecting parser output@>@;
+
+@ @<Raw option list@>=
+ @<Parser specific option list@>@;
+
+@ @<Higher index options@>=
+ @<Higher index parser specific options@>@;
+
+@ @<Cases affecting the whole program@>=
+ @<Handle parser output options@>@;
+
+@ @<Output descriptor fields@>=
+ @<Parser specific output descriptor fields@>@;
+
+@ @<Default outputs@>=
+ @<Parser specific default outputs@>@;
+
+@ @<Various output modes@>=
+ @<Handle parser related output modes@>@;
+
+@ @<Output modes@>=
+ @<Parser specific output modes@>@;
+
+@ @<Output constants@>=
+ @<Output parser constants@>@;
+
+@ @<Cases involving specific modes@>=
+ @<Configure parser output modes@>@;
+
+@ @<Perform output@>=
+ @<Output parser tokens@>@;
+
+@ @<Output action switch, if any@>=
+ @<Output parser semantic actions@>@;
+
+@ @<\Cee\ preamble@>=
+ @<Common code for \Cee\ preamble@>@;
+
+@i common.w
+@i bs.w
+
+@*1 Parser dependent settings.
+This is it for the core table output functions.
+To make all this into a working code in this
+case, lexing and error function declarations are supplied.
+@<Auxiliary function declarations@>=
+#ifndef HAS_SCANNER
+ int yylex(void);
+ int yyerror(void);
+#endif
+
+@ @<Auxiliary function definitions@>=
+#ifndef HAS_SCANNER
+ int yylex(void){}
+ int yyerror(void){}
+#endif
+
+@ \let\B\oldB % \Cee\ mode mixes all up
+@c
+
+@<\Cee\ preamble@>@;
+
+#include PARSER_FILE
+
+@<\Cee\ postamble@>@;
+
+@** Index (for {\tt \jobname}).
+\def\readcontents{%
+ {%
+ \acrofalse
+% \def\jobname{bparser}\input bparser.toc
+% \def\jobname{ftablesout}\input ftablesout.toc
+ }%
+ \input \contentsfile
+} \ No newline at end of file
diff --git a/support/splint/cweb/mkscanner.w b/support/splint/cweb/mkscanner.w
new file mode 100644
index 0000000000..22c39e9342
--- /dev/null
+++ b/support/splint/cweb/mkscanner.w
@@ -0,0 +1,102 @@
+% Copyright 2012-2014, Alexander Shibakov
+% This file is part of SPLinT
+%
+% SPLinT is free software: you can redistribute it and/or modify
+% it under the terms of the GNU General Public License as published by
+% the Free Software Foundation, either version 3 of the License, or
+% (at your option) any later version.
+%
+% SPLinT is distributed in the hope that it will be useful,
+% but WITHOUT ANY WARRANTY; without even the implied warranty of
+% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+% GNU General Public License for more details.
+%
+% You should have received a copy of the GNU General Public License
+% along with SPLinT. If not, see <http://www.gnu.org/licenses/>.
+
+@**Lexer tables. This is the main table output code.
+Its core comes from the Flex Kit(ten?) (\.{fk.w})
+which will be included as soon as this paragraph is
+over. Feel free to
+interpret the acronym as something easier to remember.
+
+@<Table names@>=
+ @<Scanner table names@>@;
+
+@ @<Local variable and type declarations@>=
+ @<Variables and types local to the scanner driver@>@;
+
+@ @<Establish defaults@>=
+ @<Compute exotic scanner constants@>@;
+
+@ @<Output action switch, if any@>=
+ @<Output scanner actions@>@;
+
+@ @<Constant names@>=
+ @<Scanner constants@>@;
+
+@ @<Output constants@>=
+ @<Output exotic scanner constants@>@;
+
+@ @<\Cee\ preamble@>=
+ @<Common code for \Cee\ preamble@>@;
+ @<Scanner variables and types for \Cee\ preamble@>@;
+
+@ @<Output modes@>=
+ @<Scanner specific output modes@>@;
+
+@ @<Various output modes@>=
+ @<Handle scanner output modes@>@;
+
+@ @<Cases affecting the whole program@>=
+ @<Handle scanner output options@>@;
+
+@ @<Raw option list@>=
+ @<Scanner specific option list@>@;
+
+@ @<Higher index options@>=
+ @<Higher index scanner specific options@>@;
+
+@ @<Short option list@>=
+ @<Shortcuts for command line options affecting scanner output@>@;
+
+@i common.w
+@i fk.w
+
+@*1 Lexer dependent settings.
+This is it for the core table output functions.
+To make all this into a working code in this
+case, no function declarations are supplied.
+@<Auxiliary function declarations@>=
+
+@ @<Auxiliary function definitions@>=
+
+@ @<\Cee\ preamble@>=
+ void define_all_states( void );
+
+@ The lexer takes no parameters in this case but if one reuses a
+lexer written for a different purpose, the situation may be different.
+%\let\B\oldB % \Cee\ mode mixes all up
+
+@d YYPARSE_PARAMETERS
+@d YY_USER_INIT define_all_states();
+@d yyterminate() TeX_( "/yyterminate" ); return YY_NULL
+
+@c
+
+@<\Cee\ preamble@>@;
+typedef int YYSTYPE;
+#define YY_BREAK return 0;
+
+#include LEXER_FILE
+
+@<\Cee\ postamble@>@;
+
+@** Index (for {\tt \jobname}).
+%\def\readcontents{%
+% {%
+% \acrofalse
+% \def\jobname{bparser}\input bparser.toc
+% }%
+% \input \contentsfile
+%} \ No newline at end of file
diff --git a/support/splint/cweb/np.w b/support/splint/cweb/np.w
new file mode 100644
index 0000000000..aa20e551c7
--- /dev/null
+++ b/support/splint/cweb/np.w
@@ -0,0 +1,380 @@
+@q Copyright 2012-2014, Alexander Shibakov@>
+@q This file is part of SPLinT@>
+
+@q SPLinT is free software: you can redistribute it and/or modify@>
+@q it under the terms of the GNU General Public License as published by@>
+@q the Free Software Foundation, either version 3 of the License, or@>
+@q (at your option) any later version.@>
+
+@q SPLinT is distributed in the hope that it will be useful,@>
+@q but WITHOUT ANY WARRANTY; without even the implied warranty of@>
+@q MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the@>
+@q GNU General Public License for more details.@>
+
+@q You should have received a copy of the GNU General Public License@>
+@q along with SPLinT. If not, see <http://www.gnu.org/licenses/>.@>
+
+@*1 The name parser. What follows is an example parser for the name
+processing. This approach (i.e. using a `full blown' parser/scanner
+combination) is probably not the best way to implement such machinery
+but its main purpose is to demonstrate a way to create a separate
+parser for local purposes.
+% We include the macros here since this file is intended to be
+% included by the documentation `aggregator' so putting bare \TeX\
+% at the beginning of the file runs the risk of producing and error
+% of having \TeX\ material inside a \Cee\ section.
+\let\currentparsernamespace\parsernamespace
+ \let\parsernamespace\smallnamespace
+ \let\hostparsernamespace\smallnamespace
+ \input stokenset.sty
+\let\parsernamespace\currentparsernamespace
+@(small_parser.yy@>=
+@G Switch to generic mode.
+%{@> @<Name parser \Cee\ preamble@> @=%}
+ @> @<Bison options@> @=
+%union {@> @<Union of parser types@> @=}
+%{@> @<Name parser \Cee\ postamble@> @=%}
+ @> @<Token and types ...@> @=
+%%
+ @> @<Parser productions@> @=
+%%
+@g
+
+@ @<Bison options@>=
+@G
+%token-table
+%debug
+%start full_name
+@g
+
+@ @<Token and types declarations@>=
+@G
+%token PERCENT_IDENTIFIER
+%token IDENTIFIER
+%token OPTIONAL
+%token NO_ATTR
+%token INTEGER
+%token EXTENDED
+%token WILDCARD
+@g
+
+@ @<Parser productions@>=
+@G
+full_name:
+ identifier_string suffixes.opt {@> @<Compose the full name@> @=}
+;
+
+identifier_string:
+ PERCENT_IDENTIFIER {@> @<Attach option name@> @=}
+| IDENTIFIER {@> @<Start with an identifier@> @=}
+| '<' IDENTIFIER '>' {@> @<Start with a tag@> @=}
+| '\'' WILDCARD '\'' {@> @<Start with a quoted string@> @=}
+| '\'' '>' '\'' {@> @<Start with a \prodstyle{'>'} string@> @=}
+| '\'' '<' '\'' {@> @<Start with a \prodstyle{'<'} string@> @=}
+| '\'' '.' '\'' {@> @<Start with a \prodstyle{'.'} string@> @=}
+| '\'' '_' '\'' {@> @<Start with an \prodstyle{'\_'} string@> @=}
+| '\'' '-' '\'' {@> @<Start with a \prodstyle{'-'} string@> @=}
+| qualifier {@> @<Turn a qualifier into an identifier@> @=}
+| identifier_string IDENTIFIER {@> @<Attach an identifier@> @=}
+| identifier_string qualifier {@> @<Attach qualifier to a name@> @=}
+| identifier_string INTEGER {@> @<Attach an integer@> @=}
+;
+
+suffixes.opt:
+ {@> TeX_( "/yy0{}" ); @=}
+| '.' {@> TeX_( "/yy0{/nx/dotsp/nx/sfxnone}" ); @=}
+| '.' suffixes {@> @<Attach suffixes@> @=}
+| '.' qualified_suffixes {@> @<Attach qualified suffixes@> @=}
+;
+
+suffixes:
+ IDENTIFIER {@> @<Start with a named suffix@> @=}
+| INTEGER {@> @<Start with a numeric suffix@> @=}
+| suffixes '.' {@> @<Add a dot separator@> @=}
+| suffixes IDENTIFIER {@> @<Attach a named suffix@> @=}
+| suffixes INTEGER {@> @<Attach integer suffix@> @=}
+| qualifier '.' {@> TeX_( "/yy0{/nx/sfxn/the/yy(1)/nx/dotsp}" ); @=}
+| suffixes qualifier '.' {@> TeX_( "/yy0{/the/yy(1)/nx/sfxn/the/yy(2)/nx/dotsp}" ); @=}
+;
+
+qualified_suffixes:
+ suffixes qualifier {@> @<Attach a qualifier@> @=}
+| qualifier {@> @<Start suffixes with a qualifier@> @=}
+;
+
+qualifier:
+ OPTIONAL {@> TeX_( "/yy0{/the/yy(1)}" ); @=}
+| NO_ATTR {@> TeX_( "/yy0{/the/yy(1)}" ); @=}
+| EXTENDED {@> TeX_( "/yy0{/the/yy(1)}" ); @=}
+;
+@g
+
+@ @<Compose the full name@>=
+ @[TeX_( "/yy0{/the/yy(1)/the/yy(2)}/namechars/yyval" );@]@;
+
+@ @<Attach option name@>=
+ @[TeX_( "/getfirst{/yy(1)}/to/toksa" );@]@;
+ @[TeX_( "/getsecond{/yy(1)}/to/toksb" );@]@;
+ @[TeX_( "/yy0{/nx/optstr{/the/toksa}{/the/toksb}}" );@]@;
+
+@ @<Start with an identifier@>=
+ @[TeX_( "/getfirst{/yy(1)}/to/toksa" );@]@;
+ @[TeX_( "/getsecond{/yy(1)}/to/toksb" );@]@;
+ @[TeX_( "/yy0{/nx/idstr{/the/toksa}{/the/toksb}}" );@]@;
+
+@ @<Start with a tag@>=
+ @[TeX_( "/getfirst{/yy(2)}/to/toksa" );@]@;
+ @[TeX_( "/getsecond{/yy(2)}/to/toksb" );@]@;
+ @[TeX_( "/yy0{/nx/idstr{</the/toksa>}{</the/toksb>}}" );@]@;
+
+@ @<Start with a quoted string@>=
+ @[TeX_( "/getfirst{/yy(2)}/to/toksa" );@]@;
+ @[TeX_( "/getsecond{/yy(2)}/to/toksb" );@]@;
+ @[TeX_( "/yy0{/nx/chstr{/the/toksa}{/the/toksb}}" );@]@;
+
+@ @<Start with a \prodstyle{'<'} string@>=
+ @[TeX_( "/yy0{/nx/chstr{<}{<}}" );@]@;
+
+@ @<Start with a \prodstyle{'>'} string@>=
+ @[TeX_( "/yy0{/nx/chstr{/greaterthan}{/greaterthan}}" );@]@;
+
+@ @<Start with an \prodstyle{'\_'} string@>=
+ @[TeX_( "/yy0{/nx/chstr{/uscoreletter}{/uscoreletter}}" );@]@;
+
+@ @<Start with a \prodstyle{'-'} string@>=
+ @[TeX_( "/yy0{/nx/chstr{-}{-}}" );@]@;
+
+@ @<Start with a \prodstyle{'.'} string@>=
+ @[TeX_( "/yy0{/nx/chstr{.}{.}}" );@]@;
+
+@ @<Turn a qualifier into an identifier@>=
+ @<Start with an identifier@>@;
+
+@ @<Attach an identifier@>=
+ @[TeX_( "/getsecond{/yy(1)}/to/toksa" );@]@;
+ @[TeX_( "/appendr/toksa{/space}" );@]@;
+ @[TeX_( "/getfirst{/yy(2)}/to/toksb" );@]@;
+ @[TeX_( "/concat/toksa/toksb" );@]@;
+ @[TeX_( "/getthird{/yy(1)}/to/toksb" );@]@;
+ @[TeX_( "/appendr/toksb{/space}" );@]@;
+ @[TeX_( "/getsecond{/yy(2)}/to/toksc" );@]@;
+ @[TeX_( "/concat/toksb/toksc" );@]@;
+ @[TeX_( "/yy0{/nx/idstr{/the/toksa}{/the/toksb}}" );@]@;
+
+@ @<Attach qualifier to a name@>=
+ @<Attach an identifier@>
+
+@ @<Attach an integer@>=
+ @<Attach an identifier@>@;
+
+@ @<Attach suffixes@>=
+ @[TeX_( "/yy0{/nx/dotsp/the/yy(2)}" );@]@;
+
+@ @<Attach qualified suffixes@>=
+ @<Attach suffixes@>@;
+
+@ @<Start with a named suffix@>=
+ @[TeX_( "/yy0{/nx/sfxn/the/yy(1)}" );@]@;
+
+@ @<Start with a numeric suffix@>=
+ @[TeX_( "/yy0{/nx/sfxi/the/yy(1)}" );@]@;
+
+@ @<Add a dot separator@>=
+ @[TeX_( "/yy0{/the/yy(1)/nx/dotsp}" );@]@;
+
+@ @<Attach integer suffix@>=
+ @[TeX_( "/yy0{/the/yy(1)/nx/sfxi/the/yy(2)}" );@]@;
+
+@ @<Attach a named suffix@>=
+ @[TeX_( "/yy0{/the/yy(1)/nx/sfxn/the/yy(2)}" );@]@;
+
+@ @<Attach a qualifier@>=
+ @[TeX_( "/yy0{/the/yy(1)/nx/qual/the/yy(2)}" );@]@;
+
+@ @<Start suffixes with a qualifier@>=
+ @[TeX_( "/yy0{/nx/qual/the/yy(1)}" );@]@;
+
+@ \Cee\ preamble. In this case, there are no `real' actions that our
+grammar performs, only \TeX\ output, so this section is empty.
+
+@<Name parser \Cee\ preamble@>=
+
+@ \Cee\ postamble. It is tricky to insert function definitions that use \bison's internal types,
+as they have to be inserted in a place that is aware of the internal definitions but before said
+definitions are used.
+
+@<Name parser \Cee\ postamble@>=
+#define YYPRINT(file, type, value) @[yyprint (file, type, value)@]
+ static void yyprint (FILE *file, int type, YYSTYPE value){}
+
+@ Union of types.
+
+@<Union of parser types@>=
+
+@*1 The name scanner.
+%\checktabletrue
+@(small_lexer.ll@>=
+@G
+ @> @<Lexer definitions@> @=
+%{@> @<Lexer \Cee\ preamble@> @=%}
+ @> @<Lexer options@> @=
+%%
+ @> @<Regular expressions@> @=
+%%
+@O
+void define_all_states( void ) {
+ @<Collect all state definitions@>@;
+}
+@o
+@g
+
+@ @<Lexer definitions@>=
+ @<Lexer states@>@;
+@G
+letter [_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ]
+wc ([^\\\'\"]{-}[_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0-9]|\\.)
+id {letter}({letter}|[-0-9])*
+int [0-9]+
+@g
+
+@ @<Collect all state definitions@>=
+#define _register_name( name ) @[Define_State( #name, name )@]
+/* nothing for now */
+#undef _register_name
+
+@ Strings and characters in directives/rules.
+@<Lexer states@>=
+@G
+%x SC_ESCAPED_STRING SC_ESCAPED_CHARACTER
+@g
+
+@ @<Lexer \Cee\ preamble@>=
+
+#include <stdint.h>
+#include <stdbool.h>
+
+@ @<Lexer options@>=
+@G
+%option bison-bridge
+%option noyywrap nounput noinput reentrant
+%option noyy_top_state
+%option debug
+%option stack
+%option outfile="small_lexer.c"
+@g
+
+@ @<Regular expressions@>=
+ @<Scan white space@>@;
+ @<Scan identifiers@>@;
+
+@ White space skipping.
+\traceparserstatestrue
+\tracestackstrue
+\tracerulestrue
+\traceactionstrue
+\tracelookaheadtrue
+\traceparseresultstrue
+\tracebadcharstrue
+\yyflexdebugtrue
+%
+\traceparserstatesfalse
+\tracestacksfalse
+\tracerulesfalse
+\traceactionsfalse
+\tracelookaheadfalse
+\traceparseresultsfalse
+\tracebadcharsfalse
+\yyflexdebugfalse
+%
+\yyskipparsetrue
+@<Scan white space@>=
+@G
+[ \f\n\t\v] {@> @[TeX_( "/yylexnext" );@]@=}
+@g
+
+@ This collection of regular expressions might seem redundant, and in
+its present state, it certainly is. However, if later on the
+typesetting style for some of the keywords would need to be adjusted,
+such changes would be easy to implement, since the template is already
+here.
+\yyskipparsefalse % this is not necessary
+@<Scan identifiers@>=
+@G
+"%binary" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+"%code" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+"%debug" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+"%default-prec" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+"%define" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+"%defines" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+"%destructor" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+"%dprec" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+"%empty" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+"%error-verbose" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+"%expect" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+"%expect-rr" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+"%file-prefix" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+"%fixed-output-files" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+"%initial-action" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+"%glr-parser" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+"%language" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+"%left" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+"%lex-param" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+"%locations" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+"%merge" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+"%name-prefix" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+"%no-default-prec" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+"%no-lines" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+"%nonassoc" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+"%nondeterministic-parser" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+"%nterm" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+"%output" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+"%param" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+"%parse-param" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+"%prec" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+"%precedence" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+"%printer" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+"%pure-parser" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+"%require" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+"%right" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+"%skeleton" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+"%start" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+"%term" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+"%token" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+"%token-table" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+"%type" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+"%union" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+"%verbose" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+"%yacc" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+"%default"[-_]"prec" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+"%error"[-_]"verbose" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+"%expect"[-_]"rr" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+"%fixed"[-_]"output"[-_]"files" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+"%name"[-_]"prefix" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+"%no"[-_]"default"[-_]"prec" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+"%no"[-_]"lines" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+"%pure"[-_]"parser" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+"%token"[-_]"table" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+"%"({letter}|[0-9]|[-_]|"%"|[<>])+ {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=}
+
+"opt" {@> @[TeX_( "/yylexreturnval{OPTIONAL}" );@]@=}
+"na" {@> @[TeX_( "/yylexreturnval{NO_ATTR}" );@]@=}
+"ext" {@> @[TeX_( "/yylexreturnval{EXTENDED}" );@]@=}
+
+[<>._\'] {@> @[TeX_( "/yylexreturnchar" );@]@=}
+{wc} {@> @[TeX_( "/yylexreturnval{WILDCARD}" );@]@=}
+
+{id} {@> @[@<Prepare to process an identifier@>@]@=}
+{int} {@> @[TeX_( "/yylexreturnval{INTEGER}" );@]@=}
+
+"\"" {@> @[TeX_( "/yylexnext" );@]@=}
+. {@> @[@<React to a bad character@>@]@=}
+@g
+
+@ @<Prepare to process an identifier@>=
+ @[TeX_( "/yylexreturnval{IDENTIFIER}" );@]@;
+
+@ @<React to a bad character@>=
+ @[TeX_( "/iftracebadchars" );@]@;
+ @[TeX_( " /yycomplain{invalid character(s): /the/yytext}" );@]@;
+ @[TeX_( "/fi" );@]@;
+ @[TeX_( "/yylexreturn{$undefined}" );@]@;
diff --git a/support/splint/cweb/philosophy.w b/support/splint/cweb/philosophy.w
new file mode 100644
index 0000000000..4d30f2d765
--- /dev/null
+++ b/support/splint/cweb/philosophy.w
@@ -0,0 +1,223 @@
+@**Philosophy.
+This section should, perhaps, be more appropriately called {\it rant\/} but
+{\it philosophy\/} sounds more academic. The design of any software involves
+numerous choices, and \splint\ is no exception. Some of these choices
+are explained in the appropriate places in the package files. This
+section collects a few `big picture' viewpoints that did not fit elsewhere.
+
+@*1 On typographic style.
+It must seem quite perplexing to some readers that a
+document with a focus on {\it pretty-printing\/} displays such a
+wanton disregard for good typographic taste. Haphazard choice of
+styles to present programming constructs, random overabundance of
+fonts on almost every single page are just a few among the many typographic sins
+and design guffaws so amply manifested on these pages. The author has
+to take full responsibility for the lack of taste in this
+opus and has only one argument in his defense: this is not
+merely a book for a good night read but a piece of technical
+documentation.
+
+In many ways, the goal of this document is somewhat contrary to that
+of a well-written manual: to display the main features
+prominently and in logical order. After all, this is a package that is
+intended to help {\it write\/} such manuals so it inevitably must
+display some use cases that demonstrate a variety of typographic styles
+possible to achieve with \splint. Needless to say, {\it variety\/} and
+{\it consistency\/} seldom go hand in hand and it is consistency that
+makes for a pretty page. One of the objectives has been to demonstrate a
+number of quite technical programming constructs so one should keep in
+mind that it is assumed that the reader will want to look up the input
+files to see how some (however ugly and esoteric) typographic effects
+had been achieved.
+
+On the other hand, to use a clich\'e, beauty is in the eyes of the
+beholder so what makes a book readable (or even beautiful) may well
+depend on the background of the reader. As an example, letterspacing
+as a typographic device is almost universally reviled in Western
+typography (aside from a few niche uses such as setting titles). In
+Russian, however (at least until recently), letterspacing has been
+routinely used for emphasis (or, as a Russian would put it,
+e$\,$m$\,$p$\,$h$\,$a$\,$s$\,$i$\,$s) in lieu of, say, {\it italics}. Before
+I hear any objections from typography purists, let me just say that
+this technique fits perfectly with the way emphasis works in the Russian speech: a
+speaker slowly enunciates the sounds of each word (incidentally,
+emphasizing {\it emphasis\/} is a perfect example of why this method
+would fail in most English texts). Letterspaced
+sentences are easy to find on a page and set a special reading rhythm,
+which is an added bonus in many cases, although it does violate the
+`universally gray pages are a must' dogma.
+
+@*1 Why GPL.
+The choice of license for this project goes beyond merely showing the
+source. \TeX, by its very nature is an open source language, so it
+is not a matter of hiding anything from the user or a potential
+developer. The \Cee\ code is a different matter but the source is not
+that complicated. Reducing the licensing issue to the ability of
+someone else to see the source code is a great
+oversimplification. Without getting into too many details of so-called
+`open source licenses' (other than GPL) and arguing with their advocates, let me simply
+express my lack of understanding at the arguments that purport that
+BSD-style licenses introduce more freedom by allowing a software
+vendor to incorporate the BSD-licensed software into their
+products. What benefit does one derive from such `extension' of software
+freedom? Perhaps the hope that the `open source' (for the lack of a
+better term) will prompt the vendor to follow the accepted free (or
+any other, for that matter!) software standards and make its software
+more interoperable with the free alternatives? A well-known software
+giant's {\it embrace, extend, extinguish\/} philosophy shows how na\"{\i}ve and
+misplaced such hopes are.
+
+I am not going to argue for the benefits of free software at length, either
+(such benefits seem self-evident to me, although the readers should
+feel free to disagree). Let me just point out that software companies
+enjoy quite a few freedoms that we, as software consumers elect to
+afford them. Among such freedoms are the ability to renege on any
+promises made to potential users and withdraw any guarantees that such
+users might enjoy. Free software, of course, does not provide any
+guarantees, either but `you get what you paid for'. As a result of
+such `release of any responsibility', the claims of increased
+reliability or better support for the commercial software sound a
+bit hollow. Another well spread tactic is user brainwashing and
+changing the culture (usually for the worse) in order to promote new
+`user-friendly' features of commercial software. Instead of taking
+advantage of computers as cognitive machines we have come to view
+them as advanced media players that we interact with through
+artificial, unnatural interfaces. Meaningless terminology (`UX' for
+`user experience'? What in the world is `user experience'?)
+proliferates, and programmers are happy to deceive themselves with
+their newly discovered business prowess.
+
+One would hope that the somewhat higher standards of the `real'
+manufacturers might percolate to the software world, however, the
+reality is very different. Not only has life-cycle `engineering'
+got to the point where manufacturers can predict the life spans of
+their products precisely, embedded software in those products has
+become an enabling technology that makes this `life design' much
+easier.
+
+In effect, by embedding software in their products, hardware
+manufacturers not only piggy-back on software's perceived complexity,
+and argue that such complex systems cannot be made reliable, they have
+an added incentive to uphold this image. The software weighs nothing,
+memory is cheap, consumers are easy to deceive, thus `software is
+expensive' and `reliable software is prohibitively so'. Designing reliable
+software is quite possible, though, just look at programmable
+thermostats, simple cellphones and other `invisible' gadgets we
+enjoy. The `software ideology' with its `IP' lingo is spreading like a
+virus even through the world of real things. We now expect products to
+break and are too quick to forgive sloppy engineering that goes into
+everyday things. We are also getting used to the idea that it is the
+manufacturers that get to dictate the terms of use for `their' products
+and that we are merely borrowing `their' stuff.
+
+The GPL was conceived as an antidote to this scourge. This document is a
+remarkable piece of `legal engineering': a self-propagating license
+with a clearly outlined set of goals. While by itself it does not
+guarantee reliability or quality, it does inhibit the spread of the `IP'
+(which is sometimes sarcastically, though quite perceptively,
+`deabbreviated' as {\sl I}maginary {\sl P}roperty) disease through
+software.
+
+The industry has adapted, of course. So called (non GPL) `open source
+licenses', that are supposed to be an improvement on GPL,
+are a sort of `immune reaction' to the free software
+movement. Convince and confuse enough apathetic users and the
+protections granted by GPL are no longer visible.
+
+@*1 Why not \Cee$++$ or OOP in general.
+The choice of the language was mainly driven by \ae sthetic motives:
+\Cee$++$ has a bloated and confusing standard, partially supported by
+various compilers. It seems that there is no agreement on what
+\Cee$++$ really is or how to use some of its constructs. This is all
+in contrast to \Cee\ with its well defined and concise body of
+specifications and rather well established stylistics. The existence
+of `obfuscated \Cee' is not good evidence of deficiency and \Cee$++$
+is definitely not immune to this malady.
+
+Object oriented design has certainly taken on an aura of a religious
+dictate, universally adhered to and forcefully promoted by its
+followers. Unfortunately, the definition of what constitutes an
+`object-oriented' approach is rather vague. A few abstract concepts are
+commonly tossed about to give the illusion of a well developed
+abstraction (such as `polymorphism', `encapsulation', and so on) but
+definitions vary in both length and contents, depending on the source.
+
+On a syntactic level, some features of object-oriented languages are
+undoubtedly very practical (such as a |this| pointer in \Cee$++$),
+however, many of those features can be effectively emulated with some
+clever uses of an appropriate preprocessor (there are a few
+exceptions, of course, |this| being one of them). The rest of the
+`object-oriented philosophy' is just that: a design philosophy. Before
+that we had structured programming, now there are patterns, extreme,
+agile, reactive, etc. They might all find their uses, however, there
+are always numerous exceptions (sometimes even global variables and
+|goto|'s have their place, as well).
+
+A pedantic reader might point out a few object-oriented features even
+in the \TeX\ portion of the package and then accuse the author of
+being `inconsistent'. I am always interested in possible improvements
+in style but I am unlikely to consider any changes based solely on the
+adherence to any particular design fad.
+
+In short, OOP was not shunned simply because a `non-OOP' language was
+chosen, instead, whatever approach or style was deemed most effective
+was used. The author's judgment has not always been perfect, of course,
+and given a good reason, changes can be made, including the choice of
+the language. `Make it object-oriented' is neither a good reason nor a
+clearly defined one, however.
+
+@*1 Why not $*$\TeX.
+Simple. I never use it and have no idea of how packages, classes,
+etc., are designed. I have heard it has impressive mechanisms for
+dealing with various problems commonly encountered in \TeX. Sadly, my
+knowledge of $*$\TeX\ machinery is almost nonexistent. This may change
+but right now I have tried to make the macros as generic as possible,
+hopefully making $*$\TeX\ adaptation easy.
+
+The following quote from \cite[Ho] makes me feel particularly uneasy
+about the current state of development of various \TeX\ variants:
+``{\it Finally, to many current programmers\/ \.{WEB} source simply feels over-documented
+and even more important is that the general impression is that of a finished book:
+sometimes it seems like\/ \.{WEB} actively discourages development. This is
+a subjective point, but nevertheless a quite important one.}''
+
+{\it Discouraging development\/} seems like a good thing to
+me. Otherwise we are one step away from encouraging writing poor
+software with inadequate tools merely `to encourage development'.
+
+The feeling of a \.{WEB} source being {\it over-documented\/} is most
+certainly subjective, and, I am sure, not shared by all `current
+programmers'. The advantage of using \.{WEB}-like tools, however, is
+that it gives the programmer the ability to place the vital
+information where it does not distract the reader (`developer',
+`maintainer', call it whatever you like) from the logical flow of the
+code.
+
+Some of the complaints in \cite[Ho] are definitely justified,
+although it seems that a better approach would be to write an improved
+tool similar to \.{WEB}, rather than give up all the flexibility such
+a tool provides.
+
+@*1 Why \CWEB.
+\CWEB\ is not as polished as \TeX\ but it works and has a
+number of impressive features. It is, regrettably, a `niche' tool and
+a few existing extensions of \CWEB\ and software based on similar ideas
+do not enjoy the popularity they deserve. Literate philosophy has been
+largely neglected even though it seems to have a more logical
+foundation than OOP. Under these circumstances, \CWEB\ seemed to be
+the best available option.
+
+@*1 Why not GitHub, Bitbucket, etc.
+Git is an incredible tool and is used extensively in the development
+of \splint. The distribution archive is a Git repository. The use of
+centralized services such as GitHub, however, seems redundant. The
+standard cycle, `clone-modify-create pull request' works the same even
+when `clone' is replaced by `download'. Thus, no functionality is
+lost. This might change if the popularity of the package unexpectedly
+increases.
+
+On the other hand, GitHub and its cousins are commercial entities,
+whose availability in the future is not guaranteed (nothing is
+certain, of course, no matter what distribution method is
+chosen). Keeping \splint\ as an archive of a Git repository seems like
+an efficient way of being ready for an unexpected change.
diff --git a/support/splint/cweb/references.w b/support/splint/cweb/references.w
new file mode 100644
index 0000000000..76dc718d3b
--- /dev/null
+++ b/support/splint/cweb/references.w
@@ -0,0 +1,73 @@
+@** Bibliography. This list of references is not meant to be
+exhaustive or complete. These are merely the papers and the books
+mentioned in the body of the program above. Naturally, this project
+has been influenced by many outside ideas but it would be impossible
+to list them all due to time and (human) memory limitations.
+
+{%
+\def\BASIX{{B\kern-.7ptA\kern-.7ptS\kern-.3pt\lower1.3pt\hbox{I}\kern-.3pt X}}
+\def\MF{{\tt METAFONT}}
+\def\bterm#1{\item{[#1]\namedspot{#1bibref}\quad}\ignorespaces}%
+\setbox0=\hbox{[ISO/C11]\quad}
+\parindent=0pt
+\advance\parindent by \wd0
+\ninepoint
+\smallskip
+\centerline{\dinkus}%
+\smallskip
+
+\bterm{Ah}Alfred V.~Aho et al., {\it Compilers: Principles,
+Techniques, and Tools}, Pearson Education, 2006.
+
+\bterm{Bi}Charles Donnelly and Richard Stallman, {\it Bison, The
+Yacc-compatible Parser Generator}, The Free Software Foundation, 2013.
+\url{http://www.gnu.org/software/bison/}
+
+\bterm{DEK1}Donald E.~Knuth, {\it The \TeX book}, Addison-Wesley Reading, Massachusetts, 1984.
+
+\bterm{DEK2}Donald E.~Knuth {\it The future of \TeX\ and \MF}, TUGboat {\bf 11} (4), p.~489, 1990.
+
+\bterm{Do}Jean-luc Doumont, {\it Pascal pretty-printing: an example of ``preprocessing with \TeX''},
+TUGboat {\bf 15} (3), 1994---Proceedings of the 1994 TUG Annual Meeting
+
+\bterm{Er}Sebastian Thore Erdweg and Klaus Ostermann, {\it Featherweight \TeX\ and Parser Correctness},
+Proceedings of the Third International Conference on Software Language Engineering,
+pp.\ 397--416, Springer-Verlag Berlin, Heidelberg {\bf 2011}.
+
+\bterm{Fi}Jonathan Fine, {\it The \.{\\CASE} and \.{\\FIND} macros},
+TUGboat {\bf 14} (1), pp.~35--39, 1993.
+
+\bterm{Go}Pedro Palao Gostanza, {\it Fast scanners and self-parsing in \TeX},
+TUGboat {\bf 21} (3), 2000---Proceedings of the 2000 Annual Meeting.
+
+\bterm{Gr}Andrew Marc Greene, {\it \BASIX---an interpreter written in \TeX}, TUGboat {\bf 11} (3),
+1990---Proceedings of the 1990 TUG Annual Meeting.
+
+\bterm{Ha}Hans Hagen, {\it Lua\TeX: Halfway to version~1}, TUGboat
+{\bf 30} (2), pp.~183--186, 2009. \url{http://tug.org/TUGboat/tb30-2/tb95hagen-luatex.pdf}.
+
+\bterm{Ho}Taco Hoekwater, {\it Lua\TeX\ says goodbye to Pascal}, TUGboat {\bf 30} (3),
+pp.~136--140, 2009---Euro\TeX\ 2009 Proceedings.
+
+\bterm{Ie}R.~Ierusalimschy et al., {\it Lua~5.1 Reference Manual},
+{\tt Lua.org}, August 2006. \url{http://www.lua.org/manual/5.1/}.
+
+\bterm{ISO/C11}{\it ISO/IEC 9899---Programming languages---C (C11)}, December~2011, draft available at
+\url{http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf}
+
+\bterm{Jo}Derek M.~Jones, {\it The New C Standard: An Economic and
+Cultural Commentary}, available at \url{http://www.knosof.co.uk/cbook/cbook.html}.
+
+\bterm{La}{\it The \.{l3regex} package: regular expressions in \TeX}, The \LaTeX3\ Project.
+
+\bterm{Pa}Vern Paxson et al., {\it Lexical Analysis With Flex, for
+Flex~2.5.37}, July~2012. \url{http://flex.sourceforge.net/manual/}.
+
+\bterm{Sh}Alexander Shibakov, {\it Parsers in \TeX\ and using \CWEB\ for general pretty-printing},
+TUGboat {\bf 35} (1), 2014, available as part of the documentation supplied with \splint.
+
+\bterm{Wo}Marcin Woli\'nski, {\it {\tt Pretprin}---a \LaTeX2$\epsilon$ package for
+pretty-printing texts in formal languages},
+TUGboat {\bf 19} (3), 1998---Proceedings of the 1998 TUG Annual Meeting.
+
+} \ No newline at end of file
diff --git a/support/splint/cweb/splint.w b/support/splint/cweb/splint.w
new file mode 100644
index 0000000000..05fe5c42fc
--- /dev/null
+++ b/support/splint/cweb/splint.w
@@ -0,0 +1,102 @@
+% Copyright 2012-2015, Alexander Shibakov
+% This file is part of SPLinT
+%
+% SPLinT is free software: you can redistribute it and/or modify
+% it under the terms of the GNU General Public License as published by
+% the Free Software Foundation, either version 3 of the License, or
+% (at your option) any later version.
+%
+% SPLinT is distributed in the hope that it will be useful,
+% but WITHOUT ANY WARRANTY; without even the implied warranty of
+% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+% GNU General Public License for more details.
+%
+% You should have received a copy of the GNU General Public License
+% along with SPLinT. If not, see <http://www.gnu.org/licenses/>.
+\newwrite\gindex
+
+@i bo.x
+@i lo.x
+@i np.x
+@i common.w
+@i bs.w
+@i fk.w
+@i philosophy.w
+@i references.w
+
+\let\N\oldN
+\let\hostparsernamespace\mainnamespace % to typeset examples in the text
+ % properly
+@**Index. This section is, perhaps, the most valuable product of
+\CWEB's labors. It lists references to definitions (set in {\it
+italic}) as well as uses for each \Cee\ identifier used in the
+source. Special facilities have been added to extend indexing to
+\bison\ grammar terms and \TeX\ control sequences encountered in
+\bison\ actions. Definitions of tokens (via \prodstyle{\%token},
+\prodstyle{\%nterm} and \prodstyle{\%type} directives) are
+%$\underline{\hbox{underlined}}$
+typeset in {\bf bold}.
+The \bison\ and \TeX\ entries are put
+in distinct sections of the index in order to keep the separation
+between the \Cee\ entries and the rest. It may be worth noting that
+the {\it definition\/} of the symbol is listed under both its `macro
+name' (such as \.{CHAR}, typeset as \prodstyle{CHAR} in the case of
+the grammar below), as well as its `string' name if present (to
+continue the previous example, \.{"char"} is synonymous with
+\prodstyle{CHAR} after a declaration such as `\prodstyle{\%token}
+\prodstyle{CHAR} \.{"char"}'), while the {\it use\/} of the term lists
+whichever token form was referenced at the point of use (both forms
+are accessible when the entry is typeset for the index and a macro can
+be written to mention the other form as well). The original syntax of
+\bison\ allows the programmer to declare tokens such as
+\prodstyle{'\{'} and \prodstyle{'\}'} and the indexing macros honor
+this convention even though in a typeless environment such as the
+one the present typesetting parser is operating in such declarations
+are redundant. The indexing macros also record the use of such
+character tokens. The quotes indicate
+that the `string' form of the token's name was used. A section set in
+{\it italic\/} references the point where the corresponding term
+appeared on the left hand side of a production. A production:
+\let\TeXx\TeXxi
+\def\gatoks{%
+ \omit\hfil&\omit\hfil&\omit\hfil\hbox to2em{\hfil}&\omit\hfil\cr
+ \noalign{\vskip-\baselineskip}%
+}%
+\beginmprod
+left_hand_side:
+ term.1 term.2 term.3 \{\stashed{|TeX_("/dosomething/yy(1)");|}\}
+\endmprod
+inside the \TeX\ part of a \CWEB\ section will generate several
+index entries, as well, including the entries for any
+material inside the action, mimicking \CWEB's behavior for the
+{\it inline \Cee\/} (\.{\yl}$\ldots$\.{\yl}). Such entries (except for
+the references to the \Cee\ code inside actions) are labeled with $^\circ$,
+to provide a reminder of their origin.
+
+This parser collection, as well as the indexing facilities therein have been
+designed to showcase the broadest range of options available to the user
+and thus it does not always exhibit the most sane choices one could make (for
+example, using a full blown parser for term {\it names\/} is poor
+design but it was picked to demonstrate multiple parsers in one
+program). The same applies to the way the index is constructed (it
+would be easy to only use the `string' name of the token if
+available, thus avoiding referencing the same token twice).
+
+\TeX\ control sequences are listed following the index of all \bison\
+entries. The two indices are separated by a {\it dinkus}
+(\dinkus). Since it is nearly impossible to determine at what point a
+\TeX\ macro is defined (and most of them are defined outside of the
+\CWEB\ sources), only their uses are listed (to be more precise, {\it
+every\/} appearance of a macro is assumed to be its use). In a few cases, a
+`graphic' representation for a control sequence is also listed (for
+example, {\termindexfalse\def\texnspace{texline}\inlineTeXx{/getfirst}} represents
+{\termindexfalse\def\texnspace{other}\inlineTeXx{/getfirst}}). The index entries are ordered alphabetically. The
+latter may not be entirely obvious in the cases when the `graphical
+representation' of the corresponding token manifests a significant
+departure from its string version (such as |TeX_("/yy(1)");|
+instead of {\def\texnspace{other}|TeX_("/yy(1)");|}).
+\closeout\gindex
+\let\inx\inxmod
+\let\fin\finmod
+\displaytokenrawtrue
+\def\topofcontents{\null\vskip-3\baselineskip\centerline{C{\sc ONTENTS} (\sc\uppercase\expandafter{\title})}\medskip}
diff --git a/support/splint/cweb/ssffo.w b/support/splint/cweb/ssffo.w
new file mode 100644
index 0000000000..56e710b8c6
--- /dev/null
+++ b/support/splint/cweb/ssffo.w
@@ -0,0 +1,118 @@
+@q Copyright 2012-2015, Alexander Shibakov@>
+@q This file is part of SPLinT@>
+
+@q SPLinT is free software: you can redistribute it and/or modify@>
+@q it under the terms of the GNU General Public License as published by@>
+@q the Free Software Foundation, either version 3 of the License, or@>
+@q (at your option) any later version.@>
+
+@q SPLinT is distributed in the hope that it will be useful,@>
+@q but WITHOUT ANY WARRANTY; without even the implied warranty of@>
+@q MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the@>
+@q GNU General Public License for more details.@>
+
+@q You should have received a copy of the GNU General Public License@>
+@q along with SPLinT. If not, see <http://www.gnu.org/licenses/>.@>
+\input limbo.sty
+\def\optimization{5}
+\input yy.sty
+
+\let\oldN\N
+\let\N\textN
+\let\M\textM
+\def\hostparsernamespace{[unreacheable]}
+
+@** A simple scanner for \flex\ options.
+This is a `bare-bones' scanner for a subset of the `extended' \bison\
+grammar that parses, well, some of the `extensions', namely, the
+\flex\ state declarations. It does not use the state mechanism
+itself, and is supposed to be used with the bootstrapping parser, even
+though it is not strictly necessary. It parses state declarations as
+long as they are separated into their own \CWEB\ sections and extracts
+the {\it names\/} of the states. The \flex\ scanner output `driver'
+does the rest after including the produced header file.
+
+If reusing the existing scanner for \bison\ were not a priority a
+proper way to design a scanner like this is to make it a subset of the
+existing scanner code. This way portions of the program would be made
+more reusable and the overall design made more consistent.
+@s TeX_ TeX
+@(ssffo.ll@>=
+@G
+ @> @<Lexer definitions@> @=
+%{@> @<Lexer \Cee\ preamble@> @=%}
+ @> @<Lexer options@> @=
+%%
+ @> @<Regular expressions@> @=
+%%
+@g
+
+@ A couple of handy abbreviations to get started. Note that the
+definition of a letter is more restrictive in this case since we only
+need to grab the states of an existing \bison\ lexer. For a
+bootstrapping scanner like this it is beneficial to fail early while
+scanning something that is not in its attention domain: it results in
+faster bootstrapping and lower chance of accidentally parsing
+something that should not have been. Making the syntax and the grammar
+more restrictive helps to acheive this, as well as makes the overall
+design simpler.
+@<Lexer definitions@>=
+@G
+letter [_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ]
+id {letter}({letter}|[-0-9])*
+@g
+
+@ @<Lexer \Cee\ preamble@>=
+
+#include <stdint.h>
+#include <stdbool.h>
+
+ void define_all_states( void ){}
+
+@ A standard combination of options to match the \TeX\ code that
+drives the scanner.
+@<Lexer options@>=
+@G
+%option bison-bridge
+%option noyywrap nounput noinput reentrant
+%option noyy_top_state
+%option debug
+%option stack
+%option outfile="ssffo.c"
+@g
+
+@ There are not that many regular expressions to list, since the range
+of tokens recognized by this routine is not very wide.
+@<Regular expressions@>=
+ @<Scan white space@>@;
+ @<Scan identifiers@>@;
+
+@ White space skipping.
+@<Scan white space@>=
+@G
+[ \f\n\t\v] {@> @[TeX_( "/yylexnext" );@]@=}
+@g
+
+@ The rest of it are either identifiers or \.{\%}-options.
+@<Scan identifiers@>=
+@G
+{id} {@> @<Return an identifier@> @=}
+"%x" {@> @[TeX_( "/yylexreturnptr{FLEX_STATE_X}" );@] @=}
+"%s" {@> @[TeX_( "/yylexreturnptr{FLEX_STATE_S}" );@] @=}
+. {@> @<React to a bad character@> @=}
+@g
+
+@ @<React to a bad character@>=
+ @[TeX_( "/iftracebadchars" );@]@;
+ @[TeX_( " /yycomplain{invalid character(s): /the/yytext}" );@]@;
+ @[TeX_( "/fi" );@]@;
+ @[TeX_( "/yylexreturn{$undefined}" );@]@;
+
+@ The lexer returns standard \.{\\yyunion} types.
+@<Return an identifier@>=
+ @[TeX_( "/edef/next{/yylval{/nx/idit{/the/yytextpure}{/the/yytext}" );@]@;
+ @[TeX_( " {/the/yyfmark}{/the/yysmark}}}/next" );@]@;
+ @[TeX_( "/yylexreturn{ID}" );@]@;
+
+@** Index.
+\def\TeXx{\TeX\ material} \ No newline at end of file