From e0c6872cf40896c7be36b11dcc744620f10adf1d Mon Sep 17 00:00:00 2001 From: Norbert Preining Date: Mon, 2 Sep 2019 13:46:59 +0900 Subject: Initial commit --- support/splint/COPYING | 674 ++++++ support/splint/INSTALL | 59 + support/splint/Makefile | 39 + support/splint/README | 82 + support/splint/TODO | 27 + support/splint/VERSION | 1 + support/splint/cweb/Makefile | 115 + support/splint/cweb/bo.w | 2479 +++++++++++++++++++++ support/splint/cweb/bs.w | 706 ++++++ support/splint/cweb/common.w | 788 +++++++ support/splint/cweb/fk.w | 510 +++++ support/splint/cweb/lo.w | 797 +++++++ support/splint/cweb/mkeparser.w | 123 + support/splint/cweb/mkscanner.w | 102 + support/splint/cweb/np.w | 380 ++++ support/splint/cweb/philosophy.w | 223 ++ support/splint/cweb/references.w | 73 + support/splint/cweb/splint.w | 102 + support/splint/cweb/ssffo.w | 118 + support/splint/doc/ldman.pdf | Bin 0 -> 633311 bytes support/splint/doc/splint.pdf | Bin 0 -> 829494 bytes support/splint/doc/tb109shibakov.pdf | Bin 0 -> 265118 bytes support/splint/examples/count/count.sty | 552 +++++ support/splint/examples/expression/Makefile | 59 + support/splint/examples/expression/etoks.sty | 3 + support/splint/examples/expression/expression.sty | 25 + support/splint/examples/expression/expression.w | 262 +++ support/splint/examples/ld/Makefile | 118 + support/splint/examples/ld/ldfrontmatter.sty | 2 + support/splint/examples/ld/ldgram.w | 1083 +++++++++ support/splint/examples/ld/ldgramo.w | 1645 ++++++++++++++ support/splint/examples/ld/ldint.sty | 259 +++ support/splint/examples/ld/ldlex.w | 550 +++++ support/splint/examples/ld/ldlexo.w | 763 +++++++ support/splint/examples/ld/ldman.w | 572 +++++ support/splint/examples/ld/ldnp.w | 362 +++ support/splint/examples/ld/ldnump.w | 306 +++ support/splint/examples/ld/ldunion.sty | 351 +++ support/splint/examples/ld/lstokenset.sty | 23 + support/splint/examples/ld/ltokenset.sty | 91 + support/splint/examples/symbols/Makefile | 37 + support/splint/examples/symbols/symbols.w | 193 ++ support/splint/examples/symbols/symmap.sty | 164 ++ support/splint/examples/symbols/symtoks.sty | 8 + support/splint/examples/types/basic.sty | 464 ++++ support/splint/examples/types/test.sty | 63 + support/splint/examples/types/tree.sty | 190 ++ support/splint/examples/xxpression/Makefile | 74 + support/splint/examples/xxpression/xtoks.sty | 2 + support/splint/examples/xxpression/xxpression.sty | 27 + support/splint/examples/xxpression/xxpression.w | 365 +++ support/splint/examples/xxpression/xymmap.sty | 155 ++ support/splint/makefile.inc | 76 + support/splint/makefile.loc | 5 + support/splint/scripts/bindx.pl | 101 + support/splint/scripts/brack.pl | 105 + support/splint/scripts/cslist.pl | 40 + support/splint/scripts/unline.pl | 31 + support/splint/tex/btokenset.sty | 31 + support/splint/tex/dcols.sty | 817 +++++++ support/splint/tex/flex.sty | 547 +++++ support/splint/tex/frontmatter.sty | 2 + support/splint/tex/grabstates.sty | 69 + support/splint/tex/limbo.sty | 1372 ++++++++++++ support/splint/tex/stokenset.sty | 23 + support/splint/tex/trt1.sty | 37 + support/splint/tex/xarithm.sty | 317 +++ support/splint/tex/yxunion.sty | 33 + support/splint/tex/yy.sty | 76 + support/splint/tex/yybootstrap.sty | 70 + support/splint/tex/yyboth.sty | 181 ++ support/splint/tex/yycommon.sty | 588 +++++ support/splint/tex/yyfaststack.sty | 352 +++ support/splint/tex/yyinit.sty | 682 ++++++ support/splint/tex/yyinput.sty | 406 ++++ support/splint/tex/yymisc.sty | 1422 ++++++++++++ support/splint/tex/yynested.sty | 85 + support/splint/tex/yyparse.sty | 319 +++ support/splint/tex/yystype.sty | 65 + support/splint/tex/yytexlex.sty | 1145 ++++++++++ support/splint/tex/yyunion.sty | 1523 +++++++++++++ support/splint/tex/yyxunion.sty | 33 + 82 files changed, 26719 insertions(+) create mode 100644 support/splint/COPYING create mode 100644 support/splint/INSTALL create mode 100644 support/splint/Makefile create mode 100644 support/splint/README create mode 100644 support/splint/TODO create mode 100644 support/splint/VERSION create mode 100644 support/splint/cweb/Makefile create mode 100644 support/splint/cweb/bo.w create mode 100644 support/splint/cweb/bs.w create mode 100644 support/splint/cweb/common.w create mode 100644 support/splint/cweb/fk.w create mode 100644 support/splint/cweb/lo.w create mode 100644 support/splint/cweb/mkeparser.w create mode 100644 support/splint/cweb/mkscanner.w create mode 100644 support/splint/cweb/np.w create mode 100644 support/splint/cweb/philosophy.w create mode 100644 support/splint/cweb/references.w create mode 100644 support/splint/cweb/splint.w create mode 100644 support/splint/cweb/ssffo.w create mode 100644 support/splint/doc/ldman.pdf create mode 100644 support/splint/doc/splint.pdf create mode 100644 support/splint/doc/tb109shibakov.pdf create mode 100644 support/splint/examples/count/count.sty create mode 100644 support/splint/examples/expression/Makefile create mode 100644 support/splint/examples/expression/etoks.sty create mode 100644 support/splint/examples/expression/expression.sty create mode 100644 support/splint/examples/expression/expression.w create mode 100644 support/splint/examples/ld/Makefile create mode 100644 support/splint/examples/ld/ldfrontmatter.sty create mode 100644 support/splint/examples/ld/ldgram.w create mode 100644 support/splint/examples/ld/ldgramo.w create mode 100644 support/splint/examples/ld/ldint.sty create mode 100644 support/splint/examples/ld/ldlex.w create mode 100644 support/splint/examples/ld/ldlexo.w create mode 100644 support/splint/examples/ld/ldman.w create mode 100644 support/splint/examples/ld/ldnp.w create mode 100644 support/splint/examples/ld/ldnump.w create mode 100644 support/splint/examples/ld/ldunion.sty create mode 100644 support/splint/examples/ld/lstokenset.sty create mode 100644 support/splint/examples/ld/ltokenset.sty create mode 100644 support/splint/examples/symbols/Makefile create mode 100644 support/splint/examples/symbols/symbols.w create mode 100644 support/splint/examples/symbols/symmap.sty create mode 100644 support/splint/examples/symbols/symtoks.sty create mode 100644 support/splint/examples/types/basic.sty create mode 100644 support/splint/examples/types/test.sty create mode 100644 support/splint/examples/types/tree.sty create mode 100644 support/splint/examples/xxpression/Makefile create mode 100644 support/splint/examples/xxpression/xtoks.sty create mode 100644 support/splint/examples/xxpression/xxpression.sty create mode 100644 support/splint/examples/xxpression/xxpression.w create mode 100644 support/splint/examples/xxpression/xymmap.sty create mode 100644 support/splint/makefile.inc create mode 100644 support/splint/makefile.loc create mode 100755 support/splint/scripts/bindx.pl create mode 100755 support/splint/scripts/brack.pl create mode 100755 support/splint/scripts/cslist.pl create mode 100755 support/splint/scripts/unline.pl create mode 100644 support/splint/tex/btokenset.sty create mode 100644 support/splint/tex/dcols.sty create mode 100644 support/splint/tex/flex.sty create mode 100644 support/splint/tex/frontmatter.sty create mode 100644 support/splint/tex/grabstates.sty create mode 100644 support/splint/tex/limbo.sty create mode 100644 support/splint/tex/stokenset.sty create mode 100644 support/splint/tex/trt1.sty create mode 100644 support/splint/tex/xarithm.sty create mode 100644 support/splint/tex/yxunion.sty create mode 100644 support/splint/tex/yy.sty create mode 100644 support/splint/tex/yybootstrap.sty create mode 100644 support/splint/tex/yyboth.sty create mode 100644 support/splint/tex/yycommon.sty create mode 100644 support/splint/tex/yyfaststack.sty create mode 100644 support/splint/tex/yyinit.sty create mode 100644 support/splint/tex/yyinput.sty create mode 100644 support/splint/tex/yymisc.sty create mode 100644 support/splint/tex/yynested.sty create mode 100644 support/splint/tex/yyparse.sty create mode 100644 support/splint/tex/yystype.sty create mode 100644 support/splint/tex/yytexlex.sty create mode 100644 support/splint/tex/yyunion.sty create mode 100644 support/splint/tex/yyxunion.sty (limited to 'support/splint') diff --git a/support/splint/COPYING b/support/splint/COPYING new file mode 100644 index 0000000000..94a9ed024d --- /dev/null +++ b/support/splint/COPYING @@ -0,0 +1,674 @@ + GNU GENERAL PUBLIC LICENSE + Version 3, 29 June 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU General Public License is a free, copyleft license for +software and other kinds of works. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +the GNU General Public License is intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. We, the Free Software Foundation, use the +GNU General Public License for most of our software; it applies also to +any other work released this way by its authors. You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + To protect your rights, we need to prevent others from denying you +these rights or asking you to surrender the rights. Therefore, you have +certain responsibilities if you distribute copies of the software, or if +you modify it: responsibilities to respect the freedom of others. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must pass on to the recipients the same +freedoms that you received. You must make sure that they, too, receive +or can get the source code. And you must show them these terms so they +know their rights. + + Developers that use the GNU GPL protect your rights with two steps: +(1) assert copyright on the software, and (2) offer you this License +giving you legal permission to copy, distribute and/or modify it. + + For the developers' and authors' protection, the GPL clearly explains +that there is no warranty for this free software. For both users' and +authors' sake, the GPL requires that modified versions be marked as +changed, so that their problems will not be attributed erroneously to +authors of previous versions. + + Some devices are designed to deny users access to install or run +modified versions of the software inside them, although the manufacturer +can do so. This is fundamentally incompatible with the aim of +protecting users' freedom to change the software. The systematic +pattern of such abuse occurs in the area of products for individuals to +use, which is precisely where it is most unacceptable. Therefore, we +have designed this version of the GPL to prohibit the practice for those +products. If such problems arise substantially in other domains, we +stand ready to extend this provision to those domains in future versions +of the GPL, as needed to protect the freedom of users. + + Finally, every program is threatened constantly by software patents. +States should not allow patents to restrict development and use of +software on general-purpose computers, but in those that do, we wish to +avoid the special danger that patents applied to a free program could +make it effectively proprietary. To prevent this, the GPL assures that +patents cannot be used to render the program non-free. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Use with the GNU Affero General Public License. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU Affero General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the special requirements of the GNU Affero General Public License, +section 13, concerning interaction through a network will apply to the +combination as such. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If the program does terminal interaction, make it output a short +notice like this when it starts in an interactive mode: + + Copyright (C) + This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, your program's commands +might be different; for a GUI interface, you would use an "about box". + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU GPL, see +. + + The GNU General Public License does not permit incorporating your program +into proprietary programs. If your program is a subroutine library, you +may consider it more useful to permit linking proprietary applications with +the library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. But first, please read +. diff --git a/support/splint/INSTALL b/support/splint/INSTALL new file mode 100644 index 0000000000..11e85736ed --- /dev/null +++ b/support/splint/INSTALL @@ -0,0 +1,59 @@ +There is no specific installation procedure to follow for SPLinT, although +there are a few dependencies to keep in mind. To build all the tools +in the package, you will need the usual suite of build tools (gcc, +make and friends), bison and flex (which usually come separately), +cweb and cwebmac.tex (supplied by the texlive-extra-utils in Mint, +for example), and various fonts (cyrillic, such as lhr10, etc., found in +texlive-lang-cyrillic, and extra sizes of Computer Modern, supplied by +texlive-fonts-extra). The only purpose for the cyrillic fonts is to supply +a decent looking pair of `french quotes' or `guillemot's used in +./tex/btokenset.sty. + +As of version 3.0 of bison, the arrays yyprhs and yyrhs are no longer +exported as part of the bison output (indeed, they are no longer created +internally, either, so even writing a `plugin' would not help). A very +elaborate scheme, akin to the one used to extract the actions can be +implemented to reproduce those arrays but this seems too high a price +to pay for the arrogance and shortsightedness of bison maintainers and +developers. Therefore, as of this release, this package is only +intended to be used with bison version 2.7 or lower. So far, the +latest release of flex (2.5.39) is still compatible with SPLinT. In +case the local version of bison (and possibly, flex, in the future) is +incompatible with splint, it is recommended that a local version is +compiled and used. For this purpose, make variables BISON_ROOT and +FLEX_ROOT can be set to the appropriate locations in makefile.loc. The +appropriate versions of bison and flex can be downloaded from +http://ftp.gnu.org/gnu/bison/ and http://flex.sourceforge.net/, +respectively. The installation instructions in those packages are easy +to follow (the standard ./configure --prefix= &&\ +make && cp src/bison && cp -a data/* \ +/share/bison should work). + +The arrays (yyprhs and yyrhs) only affect the error reporting and the +`symbolic switch' output. The former dependence can be eliminated +(following the route taken by bison itself), however, the latter one +is a much more serious issue. If one is not using symbolic names for +grammar terms, the arrays can be ignored. The approach taken by the +curent version of bison is to use the state stack and yystos, yyr1, +and yyr2 arrays instead. Note that this is somewhat inconsistent with +the purpose of the debugging output since the error reporting routines +rely on the correct state of the state stack (yyssa) rather than on a +static set of grammar rules. Use an older version of bison. + +The setup involved is minimal, but for the build procedure to work, +all the style files (that reside in ./tex and ./cweb) should be visible +to \TeX\ (i.e. you should be able to \input them). Building everything +from scratch also requires Perl to be installed (see the scripts in the +./scripts directory). Building after the result of `make mostlyclean' +requires a C compiler and CWEB only. + +* A note about make: while using make to perform quick rebuilds after + minor modifications mostly works, this style of project is not very + suitable for make's capabilities. Make is really good at + codifying the build procedures that form a tree. In the case of + SPLinT, the same commands often have to be run several times. What + is even worse, running those commands repeatedly modifies the + prerequisites for earlier targets. Thus, view make as simply a + convenient way of recording every step needed to build the project + and its various parts, not as an efficient building tool: it is + always safer to say 'make distclean' and repeat the build. \ No newline at end of file diff --git a/support/splint/Makefile b/support/splint/Makefile new file mode 100644 index 0000000000..05209cf6f8 --- /dev/null +++ b/support/splint/Makefile @@ -0,0 +1,39 @@ +SPLINT_ROOT = $(shell pwd) +SPLINT_EXAMPLES_DIRS = expression xxpression symbols ld + +DO_SUBMAKE = for dir in ${SPLINT_EXAMPLES_DIRS}; do cd ${SPLINT_ROOT}/examples/$$dir && ${MAKE} $(1); done + +include ${SPLINT_ROOT}/makefile.inc +include ${SPLINT_ROOT}/makefile.loc + +# output a list of all control sequences defined in the package + +lists: tex/*.sty + perl scripts/cslist.pl $^ > cseqs.lst + +manual: + cd ${SPLINT_ROOT}/cweb && ${MAKE} splint.pdf + +docs: + cd ${SPLINT_ROOT}/cweb && ${MAKE} splint.pdf && ${MAKE} ssffo.pdf + $(call DO_SUBMAKE,docs) + +# clean will erase all automatically generated files in the current directory + +clean: clean_core + -rm -f cseqs.lst + +# mostlyclean will leave all the generated table and token equivalence files + +mostlyclean: + -cd cweb && ${MAKE} clean_temp && rm -f ctablesout b?out ltout smallp_out \ + smalll_out lstabout bo.c np.c + $(call DO_SUBMAKE,mostlyclean) + +# distclean will erase all automatically generated files + +distclean: clean + rm -f splint.tar.bz2 + cd cweb && ${MAKE} clean + $(call DO_SUBMAKE,clean) + diff --git a/support/splint/README b/support/splint/README new file mode 100644 index 0000000000..9a0111b681 --- /dev/null +++ b/support/splint/README @@ -0,0 +1,82 @@ +To make the licensing part clear, SPLinT is GPL v.~3: + +% Copyright 2012-2014, Alexander Shibakov +% This file is part of SPLinT +% +% SPLinT is free software: you can redistribute it and/or modify +% it under the terms of the GNU General Public License as published by +% the Free Software Foundation, either version 3 of the License, or +% (at your option) any later version. +% +% SPLinT is distributed in the hope that it will be useful, +% but WITHOUT ANY WARRANTY; without even the implied warranty of +% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +% GNU General Public License for more details. +% +% You should have received a copy of the GNU General Public License +% along with SPLinT. If not, see . + +SPLinT is a \TeX\ package for building parsers and scanners in +\TeX. Bison and Flex are used to create the tables that drive the +automata programmed as \TeX\ macros (thus, after the parser/scanner +has been completed it can be used in plain \TeX, without invoking any +outside software). The documentation included with the package has a +detailed description of the features of the produced parsers and +scanners. To get started, run `make' to create a manual (which can be +found as cweb/splint.pdf in the cweb/ directory) and take a look at the +`examples' directory. Here is a short +description of the various files included in the collection. + +Files: + +cweb/* - executables and documentation: + bs.w - Bison Sourcer(er): table code core for bison + fk.w - Flex Kit(ten): table code core for flex + common.w - common code for table generators + bo.w - parser for the bison grammar + lo.w - lexer for the bison grammar + np.w - scanner and parser for token names + ssffo.w - lexer for state grabbing + mkeparser.w - parser output `driver' + mkscanner.w - lexer output `driver' + splint.w - documentation + +tex/* - \TeX\ macros + yy*.sty, yx*.sty, flex.sty - automata machinery + trt1.sty - `\TeX\ runtime': temporary register definitions + xarithm.sty - expandable arithmetic for parsing macros + grabstates.sty - macros for state grabbing + ?tokenset.sty - token typesetting definitions + dcols.sty - multiple column output + limbo.sty - limbo section macros + frontmatter.sty - macros to typeset the cover page + +examples/* - various examples: + count - an example of robust token counting macros. + expression - a simple expression parser built with the package. + ld - a typesetting parser for the \GNU\ linker, ld, with a very + detailed implementation manual. + symbols - a demonstration of the features of the bison parser + included in the package. + types - expandable arithmetic (e.g. addition and subtraction + macros that can be used inside \edef), tree data structure; + incomplete and slow, merely a proof of concept; only + standard plain \TeX\ is used + xxpression - an extended version of the `expression' + example above, with a demonstration of symbolic name + mechanism setup. + +scripts/* - helper (perl, for now) scripts to aid in pre- and + post-processing of various files + +makefile.inc - common definitions +makefile.loc - placeholder file for local make rules +COPYING - license information +INSTALL - cursory instalation notes +README - this file +TODO - plans for future development +VERSION - the current version + +The most recent version of this software can be downloaded at + + math.tntech.edu/alex/splint.tar.bz2 diff --git a/support/splint/TODO b/support/splint/TODO new file mode 100644 index 0000000000..42c0b2b51f --- /dev/null +++ b/support/splint/TODO @@ -0,0 +1,27 @@ +... ongoing + +o Typo and style fixes + +... sometime in the future + +o Change generic macro names (such as \table, \symstream, etc) to something more specific +* Add \bgrulealign and \egrulealign macros to align rules across sections (a simple way to + implement this is given as an example in the included package, a more flexible method would + have to wait) +* Rewrite yytexlex.sty for better \TeX\ pretty printing macros (* the new macros are still + not ideal but are very unlikely to change in the near future) +o Rewrite limbo.sty to provide a more logical structure +* Add indexing features to \TeXx macros (* there is a mechanism to do this) +o Change CWEB macros so that \pdfoutput=0 does not cause conflicts (this is a bug in cwebmac.tex) + +... very remote future + +o Make glr parser generation possible (* probably not feasible with + the current state of bison design, likely to require a fork or even + a completely different tool) +o Make the `core' parsing and scanning macros prefix-expandable + +... very very remote future + +o Make automatic translation of \Cee\ into \TeX\ possible so that + bison routines can be simply translated on the fly. \ No newline at end of file diff --git a/support/splint/VERSION b/support/splint/VERSION new file mode 100644 index 0000000000..993f095645 --- /dev/null +++ b/support/splint/VERSION @@ -0,0 +1 @@ +1.05 diff --git a/support/splint/cweb/Makefile b/support/splint/cweb/Makefile new file mode 100644 index 0000000000..ec9973fb66 --- /dev/null +++ b/support/splint/cweb/Makefile @@ -0,0 +1,115 @@ +SPLINT_ROOT = $(shell pwd)/.. + +include ${SPLINT_ROOT}/makefile.inc + +all: ${SPLINT_PTABLES} ${SPLINT_LTABLES} + +b%out: mkeparser.c b%.c + ${CC} ${BISON_STATE} -DPARSER_FILE=\"$(lastword $^)\" -DYYPARSE_PARAMETERS= -o $@ $< + +b%.yy: bo.x + ${CTANGLE} $< + +%yytab.tex: b%out + ${SPLINT_DRIVER_DIR}/$< --optimize-actions $@ + +ltab.tex: ltout + ${SPLINT_DRIVER_DIR}/$< --optimize-actions $@ + +ltout: mkscanner.c lo_states.h lo.c + ${CC} -DLEXER_FILE=\"$(lastword $^)\" -o $@ $< + +ssffo.ll lo.ll: \ +%.ll: %.x + ${CTANGLE} $< && rm $(patsubst %.x, %.c, $^) + +lo.c: lo.l + ${FLEX} -o $@ $< + +mkscanner.c mkeparser.c: \ +%.c: %.w + ${CTANGLE} $< + +# name parser + +smallp_out: mkeparser.c small_parser.c + ${CC} ${BISON_STATE} -DPARSER_FILE=\"$(lastword $^)\" -DYYPARSE_PARAMETERS= -o $@ $< + +smalll_out: mkscanner.c small_lexer.c + ${CC} -DLEXER_FILE=\"$(lastword $^)\" -o $@ $< + +small_tab.tex: smallp_out + ${SPLINT_DRIVER_DIR}/$< --optimize-actions $@ + +small_dfa.tex: smalll_out + ${SPLINT_DRIVER_DIR}/$< --optimize-actions $@ + +small_parser.yy small_lexer.ll: np.x + @${CTANGLE} $< + +bo.tex: bo.x + ${CWEAVE} -x $< + +splint.tex \ +splint.idx \ +splint.scn: splint.w bo.x lo.x np.x common.w bs.w fk.w philosophy.w references.w + ${CWEAVE} $< + +ssffo.tex \ +ssffo.idx ssffo.scn: ssffo.x + ${CWEAVE} $< + +bo.tok: bo.tex ltab.tex byytab.tex + ${TEX} ${MODEBOOTSTRAP} \\input $< + +ssffo.pdf: %.pdf: ${SPLINT_DOC_PREREQS_XREF} + ${PDFTEX} $*.tex + +ssffo.dvi: %.dvi: ${SPLINT_DOC_PREREQS_XREF} + ${TEX} $*.tex + +splint.gdx: %.gdx: ${SPLINT_DOC_PREREQS_XREF} + @echo "Making the bison and TeX indices ..." + ${TEX} $*.tex + +splint.pdf: %.pdf: ${SPLINT_DOC_PREREQS_XREF} %.gdy + ${PDFTEX} \\input $*.tex && touch $*.gdy && touch $*.pdf + +splint.dvi: %.dvi: ${SPLINT_DOC_PREREQS_XREF} %.gdy + ${TEX} $*.tex && touch $*.gdy && touch $*.dvi + +${SPLINT_ROOT}/tex/btokenset.sty: # stupid make weirdness + @ + +# state parsing + +lstabout: mkscanner.c ssffo.c + ${CC} -DLEXER_FILE=\"$(lastword $^)\" -o $@ $< + +lstab.tex: lstabout + ${SPLINT_DRIVER_DIR}/$< --optimize-actions $@ + +lo.tex: lo.x + ${CWEAVE} $< + +lo_states.h: lo.tex lstab.tex byytab.tex + ${PDFTEX} $< + +# clean will erase all automatically generated files in the current directory + +clean: clean_core + -rm -f ctablesout b?out ltout smallp_out \ + smalll_out lstabout + +include ${SPLINT_ROOT}/makefile.loc + +# since bg.yy is not an intermediate file in examples/symbols/Makefile, repeated 'make all' +# remakes bg.yy thereby forcing make to update byytab.tex, etc., which results in remaking +# of bo.tok, lo.tex, eventually leading to remaking of splint.pdf; +# the special target below tells make to treat bg.yy as if it were not an intermediate file + +.PRECIOUS: bg.yy bg.y + +# the files below appear as targets but are really intermediaries for other files + +.INTERMEDIATE: smallp_out smalll_out lstabout ltout splint.gdx diff --git a/support/splint/cweb/bo.w b/support/splint/cweb/bo.w new file mode 100644 index 0000000000..2185f2f65e --- /dev/null +++ b/support/splint/cweb/bo.w @@ -0,0 +1,2479 @@ +% Copyright 2012-2014, Alexander Shibakov +% Copyright 2002-2014 Free Software Foundation, Inc. +% This file is part of SPLinT +% +% SPLinT is free software: you can redistribute it and/or modify +% it under the terms of the GNU General Public License as published by +% the Free Software Foundation, either version 3 of the License, or +% (at your option) any later version. +% +% SPLinT is distributed in the hope that it will be useful, +% but WITHOUT ANY WARRANTY; without even the implied warranty of +% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +% GNU General Public License for more details. +% +% You should have received a copy of the GNU General Public License +% along with SPLinT. If not, see . + +\input limbo.sty +\input frontmatter.sty +\def\optimization{5} +\input yy.sty +% multi-column output +\input dcols.sty + +\let\hostparsernamespace\mainnamespace % the namespace where tokens are looked up + % for typesetting purposes +\let\currentparsernamespace\parsernamespace + \let\parsernamespace\mainnamespace + \let\currenttokeneq\tokeneq + %\def\tokeneq#1#2{\prettytoken{#1}} + \let\tokeneq\prettywordpair@@ + \let\optstrextra\optstrextraesc + \input bo.tok % re-use token equivalence table to set the typesetting of tokens + \let\tokeneq\currenttokeneq + \input btokenset.sty + % index entries + \let\parsernamespace\indexpseudonamespace + \prettywordpair{emptyrhs}{$\circ$ {\rm(empty rhs)}}% + \prettywordpair{inline_action}{$\diamond$ {\rm(inline action)}}% + \prettywordpair{TOKEN}{{\tt TOKEN} {\rm(example)}}% + \prettywordpair{token}{{\tt "token"} {\rm(example)}}% +\let\parsernamespace\currentparsernamespace + +\immediate\openout\exampletable=\jobname.exl + +\def\nontitle#1{{\ttl #1}} +\def\cite[#1]{% + \def\next{#1}\setbox0=\hbox{l}% + [\ifx\next\empty$\,$\hbox{\vrule width\wd0 height\ht0 depth\dp0}$\,$\else \locallink{#1bibref}#1\endlink\fi]% +} + +\let\oldN\N +\let\N\textN +\let\M\textM + +\defreserved{Y}{\.{Y}} +\showlastactiontrue + +@**Introduction. +\setupfootnotes +\splint\footnote{I was tempted to call the package {\tt ParLALRgram} +which stands for Parsing {\sc LALR} Grammars or {\tt PinT} for +`Parsing in \TeX' but both sounded too generic.} (Simple Parsing and +Lexing in \TeX, or, following the great GNU +tradition of creating recursive names, \splint\ Parses Languages +in \TeX) is a system (or +rather a m\'elange of systems) designed to +facilitate developing parsing macros in \TeX\ and (to a lesser +degree) documenting parsers written in other languages. As +an application, a parser for \bison\ input file syntax has been +developed, along with a macro collection that makes it possible to +design and pretty print \bison\ grammars using \CWEB. + +Developing software in \CWEB\ involves two programs. The first of these is +\CTANGLE\ that outputs the actual code, intended to be in +\Cee. In reality, \CTANGLE\ cares very little about the language it +produces. Exceptions are \Cee\ comments and |@[#line@]| directives that might +confuse lesser software, although \bison\ is all too happy to swallow them +(there are also some \Cee\ specific constructs that \CTANGLE\ tries to +recognize). \CTANGLE's main function is to rearrange the text of the +program as written by the programmer (in a way that, hopefully, +emphasizes the internal logic of the code) into an appropriate +sequence (e.g.~all variable declaration must textually precede their +use). All that is required to adopt \CTANGLE\ to produce \bison\ +output is some very rudimentary post- and pre-processing. + +Our main concern is thus \CWEAVE\ that not only pretty prints the +program but also creates an index, cross-references all the +sections, etc. Getting \CWEAVE\ to pretty print a language other than +\Cee\ requires some additional attention. A true digital warrior would +probably try to decipher \CWEAVE's output `in the raw' but, alas, my +WebFu is not that strong. The loophole comes in the form of a rarely +(for a good reason) used \CWEB\ command: the verbatim (\.{@@=...@@>}) +output. The material to be output by this construct undergoes minimal +processing and is put inside \.{\\vb\{}$\ldots$\.{\}}. All that is +needed now is a way to process this virtually straight text inside \TeX. + +@*1 Using the \bison\ parser. +The process of using \splint\ for writing parsing macros in \TeX\ is +treated in considerable detail later in this document. A shorter +(albeit somewhat outdated but still applicable) version of this +process is outlined in \cite[Sh]. We begin, +instead, by explaining how one such parser can be used to pretty print a +\bison\ grammar. Following the convention mentioned above and putting +all non-\Cee\ code inside \CWEAVE's verbatim blocks, consider the +following (meaningless) code fragment. The fragment contains a mixture +of \Cee\ and \bison\ code, the former appears outside of the verbatim blocks. +\begindemo +^@@= non_terminal: @@> +^@@= term.1 term.2 {@@> a = b; @@=}@@> +^@@= **H term.3 other_term {@@> $$ = $1; @@=}@@> +^@@= **H still more terms {@@> f($1); @@=}@@> +^@@= ; @@> +\enddemo +The fragment above will appear as (the output of \CTANGLE\ can be +examined in \.{sill.y}) +@= +@G +non_terminal: + term.1 term.2 {@> a = b; @=} +| term.3 other_term {@> $$ = $1; @=} +| still more terms {@> f($1); @=} +; +@g + +@ $\ldots$ if the syntax is correct. +In case it is a bit off, the parser will give up and +you will see a different result. The code in the fragment below is easily +recognizable, and some parts of it (all of \Cee\ code, in fact) are +still pretty printed in \CWEAVE. Only the verbatim portion is left +unprocessed. +@= +@G +whoops + term.1 term.2 {@>@+ a = b; @+@=} +| term.3 other_term {@>@+ $$ = $1; @+@=} +| still more terms {@>@+ f($1); @+@=} +; +@g + +@ The \TeX\ header that makes such output possible is quite plain. In this case +(i.e.\ this very file) it begins as +\begindemo +^\input limbo.sty +^\input frontmatter.sty +^\input yy.sty +\nooutput +\enddemo +The first two lines are presented here merely for completeness: there is +no parsing-relevant code in them. The line that +follows loads the macros that implement the parsing and scanning +machinery. This is enough to set up all the basic +mechanisms used by the parsing and lexing macros. The rest of the header +provides a few definitions to fine tune the typesetting of +grammar productions. It starts with +\begindemo +^\let\currentparsernamespace\parsernamespace +^ \let\parsernamespace\mainnamespace +^ \let\currenttokeneq\tokeneq +^ \def\tokeneq#1#2{\prettytoken{#1}} +^ \input bo.tok % re-use token equivalence table to set the typesetting of tokens +^ \let\tokeneq\currenttokeneq +^ \input btokenset.sty +\nooutput +\enddemo +We will have a chance to discuss all the \.{\\}$\ldots$\.{namespace} +macros later, at this point it will suffice to say that the lines +above are responsible for controlling the typesetting of term names. The +file \.{bo.tok} consists of a number of lines like the ones below: +\begindemo +^\tokeneq {STRING}{{34}{115}{116}{114}{105}{110}{103}{34}} +^\tokeneq {PERCENT_TOKEN}{{34}{37}{116}{111}{107}{101}{110}{34}} +\nooutput +\enddemo +The cryptic looking sequences of integers above are strings of {\sc ASCII} +codes of the letters that form the name \bison\ uses when it needs to +refer to the corresponding token (thus, the second one is +\toksa{}\numberstochars{34}{37}{116}{111}{107}{101}{110}{34}\end +\.{\the\toksa} which might help explain why such an elaborate scheme +has been chosen). The macro \.{\\tokeneq} is defined in +\.{yymisc.sty}, which in turn is input by \.{yy.sty} but what about +the token names themselves? In this case they were extracted +automatically from the \CWEB\ source file by the parser during the +\CWEAVE\ processing stage. All of these definitions can be +overwritten to get the desired output (say, one might want to typeset +\.{ID} in a roman font, as `identifier'; all that needs to be done is +a macro that says \.{\\prettywordpair\{ID\}\{\{\\rm +identifier\}\}}). The file \.{btokenset.sty} input above contains a +number of such definitions. + +@ To round off this short overview, I must mention a caveat associated +with using the macros in this collection: while one of the greatest +advantages of using \CWEB\ is its ability to rearrange the code in a +very flexible way, the parser will either give up or produce +unintended output if this feature is abused while describing the +grammar. For example, in the code below +@= +@G +next_term: + stuff @> @ @={@> a = f( x ); @=} +@g +@@; + +@ the line titled |@| is intended to be a rule defined +later. Notice that while it seems that the parser was able to recognize +the first code fragment as a valid \bison\ input, it misplaced the +|@|, having erroneously assumed it to be a part of +the action code for this grammar (later on we will go into the details of +why it is necessary to collect all the non-verbatim output of \CWEAVE, +even the one that contains no interesting \Cee\ code; hint: it has +something to do with money (\.{\$}), also known as math and the way +\CWEAVE\ processes the `gaps' between verbatim sections). The production +line that follows did not fare as well: the parser gave up. There +is simply no point in including such a small language fragment as a +valid input for the grammar the parser uses to process the verbatim +output. +@= +@G + more stuff in this line {@> @[b = g(y);@]@=} +@g + +@ Finally, if you forget that only the verbatim part of the output is +looked at by the parser you might get something unrecognizable, such +as +@= + but not all of it + +@ To correct this, one can provide a more complete grammar fragment to +allow the parser to complete its task successfully. In some cases, +this imposes too strict a constraint on the programmer. Instead, the +parser that pretty prints \bison\ grammars allows one to add {\it +hidden context\/} to the code fragments above. The context is added +inside \.{\\vb} sections using \CWEB's \.{@@t}$\ldots$\.{@@>} facility. The \CTANGLE\ +output is not affected by this while the code above can now be typeset as: +@= +@G +next_term: + stuff @> @t}\vb{\formatlocal{\let\peekstash\stashtoterm}}{@> @ @t}\vb{FAKE}{@> @={@> a = f( x ); @=} +@g +@@; + +@ $\ldots$ even a single line can now be displayed properly. +@= +@G +@t}\vb{\formatlocal{\skipheader} FAKE:}{@> + more stuff in this line {@> b = g( y ); @=} +@g + +@ With enough hidden context, even a small rule fragment can be +typeset as intended. The `action star' was inserted to reveal some of +the context. +@= +@G +@t}\vb{\formatlocal{\skipheader} FAKE:}{@> + but not all of it +@t}\vb{\{\stashed{$\star$}\}}{@> +@g +@ What makes all of this even more confusing is that \CTANGLE\ will +have no trouble outputting this as a(n almost, due to the +intentionally bad \.{whoops} production above) valid \bison\ file +(as can be checked by looking into \.{sill.y}). The author +happens to think that one should not fragment the software into pieces +that are too small: \bison\ is not \Cee\ so it makes sense to write +\bison\ code differently. However, if the logic behind your code +organization demands such fine fragmentation, hidden context provides +you with a tool to show it off. A look inside the source of this +document shows that adding hidden context can be a bit ugly so it is +not recommended for routine use. The short example above is output in +the file below. +@(sill.y@>= + @@; + +@*1 On debugging. This concludes a short introduction to the \bison\ +grammar pretty printing using this macro collection. It would be +incomplete, however, without a short reference to debugging\footnote{Here +we are talking about debugging the output produced by \CWEAVE\ when +the included \bison\ parser is used, {\it not\/} debugging parsers +written with the help of this software: the latter topic is covered in more +detail later on}. There is a +fair amount of debugging information that the macros can output, +unfortunately, very little of it is tailored to the {\it use\/} of the +macros in the \bison\ parser. Most of it is designed to help {\it +build\/} a new parser. If you find that the parser gives up too often +or even crashes (the latter is most certainly a bug in the parser +itself), the first approach is to make sure that your code {\it +compiles\/} i.e.\ forget about the printed output and try to see if +the `real' \bison\ accepts the code (just the syntax, no need to +worry about conflicts and such). + +If this does not shed any light on why the macros seem to fail, turn +on the debugging output by saying \.{\\trace$\ldots$true} for various +trace macros. This can produce {\it a lot\/} of output, even for +small fragments, so turn it on only for a section at a time. If you +need still {\it more\/} details of the inner workings of the parser +and the lexer, various other debugging conditionals are available. For +example, \.{\\yyflexdebugtrue} turns on the debugging output for the +scanner. There are a number of such conditionals that are discussed in +the commentary for the appropriate \TeX\ macros. + +Remember, what you are seeing at this point is the parsing process of +the \bison\ input file, not the one for {\it your\/} grammar (which +might not even be complete at this point). However, if this fails, you +are on your own: drop me a line if you figure out how to fix any bugs +you find. + +@*1 Terminology. We now list a few definitions of the concepts used +repeatedly in this documentation. Most of this terminology is +rather standard. Formal precision is not the goal here, and intuitive +explanations are substituted whenever possible. +{% +\def\aterm#1{\item{\sqebullet}{\ttl #1}: \ignorespaces}% +\setbox0=\hbox{\sqebullet\enspace} +\parindent=0pt +\advance\parindent by \wd0 +\smallskip +\aterm{bison parser} while, strictly speaking, not a formally defined +term, this combination will always stand for one of the parsers generated +by this package designed to parse a subset of the `official' grammar for +\bison\ input files. All of these parsers are described later in +this documentation. The term {\it main parser\/} will be +used as a substitute in example documentation for the same purpose. + +\aterm{driver} a generic but poorly defined concept. In this +documentation it is used predominantly to mean both the \Cee\ code and +the resulting executable that outputs the \TeX\ macros that contain the +parser tables, token values, etc., for the parsers built by the user. It +is understood that the \Cee\ code of the `driver' is unchanged and the +information about the parser itself is obtained by {\it including\/} the \Cee\ +file produced by \bison\ in the `driver' (see the examples supplied +with the package). + +\aterm{lexer} a synonym for {\it scanner}, a subroutine that performs the {\it +lexical analysis\/} phase of the parsing process, i.e.\ groups various +characters from the input stream into parser {\it tokens}. + +\aterm{namespace} this is an overused bit of terminology meaning a +set of names grouped together according to some relatively +well defined principle. In a language without a well developed type +system (such as \TeX) it is usually accompanied by a specially designed +naming scheme. {\it Parser namespaces\/} are commonly used in this +documentation to mean a collection of all the data structures describing a +parser and its state, including tables, stacks, etc., named by using the +`root' name (say \.{\\yytable}) and adding the name of the parser (for +example, \.{[main]}). To support this naming scheme, a number of +macros work in unison to create and rename the `data macros' accordingly. + +\aterm{symbolic switch} a macro (or an associative array of macros) +that let the \TeX\ parser generated by the package associate {\it +symbolic term names\/} with the terms. Unlike the `real' parser, the +parser created with this suite requires some extra setup as explained +in the included examples (one can also consult the source for this +documentation which creates but does not use a symbolic switch). + +\aterm{symbolic term name} a (relatively new) way to refer to stack +values in \bison. In addition to using the `positional' names such as +\.{\$}$n$ to refer to term values, one can utilize the new syntax: +\.{\$}\.{[}{\it name\/}\.{]}. The `{\it name}' can be assigned by the +user or can be the name of the nonterminal or token used in the +productions. + +\aterm{term} in a narrow sense, an `element' of a grammar. Instead of +a long winded definition, an example, such as \prodstyle{ID} should +suffice. Terms are further classified into {\it terminals\/} (tokens) +and {\it nonterminals\/} (which can be intuitively thought of as +composite terms). + +\aterm{token} in short, an element of a set. Usually encoded as an +integer by most parsers, an indivisible {\it term\/} +produced for the parser by the scanner. \TeX's scanner uses a more +sophisticated token classification, for example, $($character code, +character category$)$ pairs, etc. + +} +@** Languages, scanners, parsers, and \TeX. % Or $\ldots$ +$$\vbox{\halign to\hsize{\kern-1.5pt\it#\hfil\tabskip0pt plus1fil\cr +Tokens and tables keep macros in check.\cr +Make 'em with \bison, use \.{WEAVE} as a tool.\cr +Add \TeX\ and \CTANGLE, and \Cee\ to the pool.\cr +Reduce 'em with actions, look forward, not back.\cr +Macros, productions, recursion and stack!\cr +\noalign{\vskip2pt} +\omit\hfil\eightpoint Computer generated (most likely)\cr}} +$$ +\def\recount#1{${}^{(#1)}$}% +In order to understand the parsing routines in this collection, +it would help to gain some familiarity with the internals of the +parsers produced by \bison\ for its intended target: \Cee. A person +looking inside a parser delivered by \bison\ would +quickly discover that the parsing procedure itself (|yyparse|) +occupies a rather small portion of the file. If (s)he were to further +reduce the size of the file by removing all the preprocessor +directives intended to anticipate every conceivable combination of the +operating system, compiler, and \Cee\ dialect, and various reporting +and error logging functions it would become very clear that the most +valuable product of \bison's labor is a collection of integer {\it +tables\/} that control the actions of the parser routine. Moreover, +the routine itself is an extremely concise and well-structured loop +composed of |goto|'s and a number of numerical conditionals. If one +were to think of a way of accessing arrays and processing conditionals +in the language of one's choice, once the tables produced by \bison\ +have been converted into a form suitable for the consumption by the +appropriate language engine, the parser implementation becomes +straightforward. Or nearly so. + +The {\it scanning\/} (or {\it lexing\/}) step of this process---a way +to convert a stream of symbols into a stream of integers, also +deserves some attention here. There are a number of excellent tools +written to automate this step in much the same fashion as \bison\ +automates the generation of parsers. One such tool, \flex, though +(in the opinion of this author) slightly lacking in the simplicity and +elegance as compared to \bison, was used to implement the lexer for +this software suite. Lexing in \TeX\ will be discussed in considerable +detail later in this manual. + +The language of interest in our case is, of course, \TeX, so our +future discussion will revolve around the five elements mentioned +above: \recount{1}data structures (mainly arrays and stacks), +\recount{2}converting +\bison's output into a form suitable for \TeX's consumption, +\recount{3}processing raw streams of \TeX's tokens and converting them into +streams of parser tokens, \recount{4}the implementation of \bison's +|yyparse| in \TeX, and, finally, \recount{5}producing \TeX\ output via {\it +syntax-directed translation} (which requires an appropriate +abstraction to represent \bison's actions inside \TeX). We shall +begin by discussing the parsing process itself. + +@*1 Arrays, stacks and the parser. +Let us briefly examine the programming environment offered by \TeX. +Designed for typesetting, \TeX's remarkable language +provides a layer of macro processing atop of a set of commands that +produce the output fulfilling its primary mission: delivering page +layouts. In The \TeX book, macro {\it expansion\/} is likened to +mastication, whereas \TeX's main product, the typographic output is the +result of its `digestion' process. Not everything that goes through +\TeX's digestive tract ends up leaving a trace on the final page: a +file full of \.{\\relax}'s will produce no output, even though +\.{\\relax} is not a macro, and thus would have to be processed by +\TeX\ at the lowest level. + +It is time to describe the details of defining suitable data structures +in \TeX. At first glance, \TeX\ provides rather standard means of +organizing and using general memory. At the core of its generic +programming environment is an array of \.{\\count}$\,n$ {\it +registers\/}, which may be viewed as general purpose integer variables +that are randomly accessible by their indices. The integer arithmetic +machinery offered by \TeX\ is spartan but is very adequate for the sort of +operations a parser would perform: mostly additions and +comparisons. + +Is the \.{\\count} array a good way to store tables in \TeX? Probably +not. The first factor is the {\it size\/} of this array: only 256 +\.{\\count} registers exist in a standard \TeX\ (the actual number of +such registers on a typical machine running \TeX\ is significantly +higher but this author is a great believer in standards, and to his +knowledge, none of the standardization efforts in the \TeX\ world has +resulted in anything even close to the definitive masterpiece that is +The \TeX book). The issue of size can be mitigated to some extent by +using a number of other similar arrays used by \TeX\ (\.{\\catcode}, +\.{\\uccode}, \.{\\dimen}, \.{\\sfcode} and others can be used for +this purpose as long as one takes care to restore the `sane' values +before control is handed off to \TeX's typesetting mechanisms). If a +table has to span several such arrays, however, the complexity of +accessing code would have to increase significantly, and the issue of +size would still haunt the programmer. + +The second factor is the use of several registers by \TeX\ for special +purposes (in addition, some of these registers can only store a +limited range of values). Thus, the first 10 \.{\\count} registers are +used by plain \TeX\ for (well, {\it intended\/} for, anyway) the +purposes of page accounting: their values would have to be carefully +saved and restored before and after each parsing call, +respectively. Other registers (\.{\\catcode} in particular) have even +more disrupting effects on \TeX's internal mechanisms. While all of +this can be managed (after all, using \TeX\ as an arithmetic engine +such as a parser suspends the need for any typographic or other +specialized functions controlled by these arrays), the added +complexity of using several memory banks simultaneously and the speed penalty +caused by the need to store and restore register values make this +approach much less attractive. + +What other means of storing arrays are provided by \TeX? Essentially, +only three options remain: \.{\\token} registers, macros holding whole +arrays, and associative arrays accessed through +\.{\\csname}$\,\ldots\,$\.{\\endcsname}. In the first two cases if care +is taken to store such arrays in an +appropriate form one can use \TeX's \.{\\ifcase} primitive to access +individual elements. The trade-off is the speed of such +access: it is {\it linear\/} in the size of the array for most +operations, and worse than that for others, such as removing the last +item of an array. Using clever ways +of organizing such arrays, one can improve the linear access time to +$O(\log n)$ by simply modifying the access macros but at the moment, a +straightforward \.{\\ifcase} is used after expanding a list macro or +the contents of a \.{\\token}$\,n$ register in an {\it un\/}optimized +parser. An {\it optimized\/} parser uses associative arrays. + +The array discussion above is just as applicable to {\it stacks\/} +(indeed, an array is the most common form of stack +implementation). Since stacks pop up and disappear frequently (what +else are stacks to do?), list macros are usually used to store +them. The optimized parser uses a separate \.{\\count} register to +keep track of the top of the stack in the appropriate associative +array. + +Let us now switch our attention +to the code that implements the parser and scanner {\it functions\/}. +If one has spent some time writing \TeX\ macros of any sophistication +(or any macros, for that matter) (s)he must be familiar with the general +feeling of frustration and the desire to `just call a function here and move +on'. Macros produce {\it tokens\/}, however, and tokens must either +expand to nothing or stay and be contributed to your input, or worse, +be out of place and produce an error. One way to sustain a stream +of execution with macros is {\it tail recursion\/} (i.e.~always expanding the +{\it last token left standing}). + +As we have already discussed, \bison's +|yyparse()| is a well laid out loop organized as a sequence of +|goto|'s (no reason to become religious about structured programming +here). This fact, and the following well known trick, make \Cee\ to \TeX\ +translation almost straightforward. + +% The macro mess below looks painful but this is the only place such layout is used +% The approach can be easily generalized and put in limbo.sty but it seems +% a bit redundant at this point. + +\newcount\piccount +\newdimen\lasthsize + +\setbox5=\vtop{ +\demomargin=0pt +\let\demoastyle\empty +\begindemo +^label A: ... +\nooutput +^ if**L**Krm(condition)**N +^ goto C; +\nooutput +^label B: ... +\nooutput +^ goto A; +\nooutput +^label C: ... +\nooutput +\enddemo +} +\dp5=\z@@ + +\setbox3=\vtop{ +\demomargin=0pt +\let\demoastyle\empty +\begindemo +^\if**L**Krm(condition)**N +^ \let\next=\labelC +^\else +^ \let\next=\labelAtail +\enddemo +} +\dp3=\z@@ + +\newdimen\lastdepth + +\def\startfitpar{% + \bgroup + \lasthsize=\hsize + \advance\lasthsize-1.5in + \vsize=\baselineskip + \topskip=\z@@ + \setbox0\box2 % empty it + % this sounds good at first but there is no good way to pull the insertions out after the + % box manipulations that follow; + % insertions will thus be contributed to whatever page was being worked on when the + % picture insertions {\it started}; hence, if these happen to start at the very top of the page, + % any insertion that follows will be contributed to the previous page; we correct this for footnotes + % below + % \holdinginserts=1 + \output{% + \global\setbox2=\vbox{ + \ifvoid2 + \else + \prevdepth=\dp2 + \unvbox2 + \fi + \lastdepth=\dp255 + \unvbox255 + % this would be tempting, however, the \eject that follows should disappear + % in addition, one really should not be playing with page breaking in the middle of + % such tricky insertions + % \penalty\outputpenalty + % \kern-\lastdepth % to make sure \baselineskip is accounted for + }% + }\eject + \output{% + \setbox0=\vbox{% + \unvbox255% + }% \lastbox would almost work ... if not for insertions + \global\advance\piccount1 + \global\setbox2=\vbox{% + \prevdepth=\dp2 \unvbox2 + \hbox to\hsize{% + \ifnum\piccount<15 + \hbox to1.5in{% + \ifnum\piccount=1 + \ \box5 + \fi + \hfill}% + \fi + \box0 \hfill + \ifnum\piccount=1 + \box3 \ % + \fi + \ifvoid\footins % reinsert footnotes + \else + \insert\footins{\unvbox\footins}% + \fi + }% + }% + }% + \parshape=15 + 0pt 2.7in + 0pt 2.7in + 0pt 2.7in + 0pt 2.7in + 0pt 2.7in + 0pt 2.7in + 0pt 2.7in + 0pt \lasthsize + 0pt \lasthsize + 0pt \lasthsize + 0pt \lasthsize + 0pt \lasthsize + 0pt \lasthsize + 0pt \lasthsize + 0pt \hsize +} + +\def\endfitpar{% + \par + \eject + \egroup + % see the comment above + % \holdinginserts=0 + \prevdepth=\dp2 + \unvbox2 +} + +\startfitpar +\noindent Given the code on the left (where |goto|'s +are the only means of branching but can appear inside conditionals), +one way to translate it into \TeX\ is to define a set of macros (call +them \.{\\labelA}, \.{\\labelAtail} and so forth for clarity) that end in +\.{\\next} (a common name for this purpose). Now, \.{\\labelA} will +implement the code that comes between \.{label A:} and \.{goto C;}, +whereas \.{\\labelAtail} is responsible for the code after \.{goto C;} +and before \.{label B:} +(provided no other |goto|'s intervene which can always be +arranged). The conditional which precedes \.{goto C;} can now be written in +\TeX\ as presented on the right, where (condition) is an appropriate +translation of the corresponding condition +in the code being translated (usually, one of `$=$' or `$\not=$'). Further +details can be extracted from the \TeX\ code that implements these +functions where the corresponding \Cee\ code is presented alongside +the macros that mimic its functionality% +\footnote{Running the risk of overloading the reader with details, the author +would like to note that the actual implementation follows a {\it slightly\/} different +route in order to avoid any \.{\\let} assignments or changing the +meaning of \.{\\next}}. +This concludes an overview of the general approach, +It is time to consider the way characters get consumed +on the lower levels of the macro hierarchy and the interaction between the different +layers of the package. +\endfitpar + +@*1 \TeX\ into tokens. +Thus far we have covered the ideas +behind items \recount{1} and \recount{4} on our list. It is time to +discuss the lowest level of processing done by these macros: +converting \TeX's tokens into the tokens consumed by the parser, +i.e.\ part\recount{3} of the plan. Perhaps, it would be most appropriate +to begin by defining the term {\it token}. + +As commonly defined, a token is simply an element of a set. Depending on +how much structure the said set possesses, a token can be represented by +an integer or a more complicated data structure. In the discussion +below, we will be dealing with two kinds of tokens: the tokens +consumed by the parsers and the \TeX\ tokens seen by the input +routines. The latter play the role of {\it characters\/} that combine +to become the former. \bison's internal representation for its tokens +is non-negative integers so this is what a scanner must +produce. + +\TeX's tokens are a good deal more sophisticated: they can be +either pairs $(c_{\rm ch}, c_{\rm cat})$, where $c_{\rm ch}$ is the +character code and $c_{\rm cat}$ is \TeX's category code ($1$ and $2$ for +group characters, $5$ for end of line, etc.), or {\it control +sequences\/}, such as \.{\\relax}. Some of these tokens (control +sequences and {\it active}, i.e.~category~13 characters) can have +complicated internal structure (expansion). The situation is further +complicated by \TeX's \.{\\let} facility, which can create +`character-like' control sequences, and the lack of conditionals +to distinguish them from the `real' characters. Finally, not all pairs +can appear as part of the input (say, there is no $(n, 0)$ token for +any $n$, in the terminology above). + +The scanner expects to see {\it characters} in its input, which are +represented by their {\sc ASCII} codes, i.e.~integers between $0$ and +$255$ (actually, a more general notion of the Unicode character is +supported but we will not discuss it further). Before character codes +appear as the input to the scanner, however, and make its integer +table-driven mechanism `tick', a lot of work must be done to collect +and process the stream of \TeX\ tokens produced after \CWEAVE\ is done +with your input. This work becomes further complicated when the +typesetting routines that interpret the parser's output must sneak +outside of the parsed stream of text (which is structured by the +parser) and insert the original \TeX\ code produced by \CWEAVE\ into +the page. + +\splint\ comes with a customizeable input routine of +moderate complexity (\.{\\yyinput}) that classifies all \TeX\ tokens +into seven categories: `normal' spaces (i.e.~category~10 tokens, +skipped by \TeX's parameter scanning mechanism), +`explicit' spaces (includes the control sequences \.{\\let} to \.{\ }, +as well as \.{\\\ }), groups ({\it avoid} using \.{\\bgroup} and \.{\\egroup} in +your input but `real', \.{\{}$\ldots$\.{\}} groups are fine), active +characters, normal characters (of all character categories that can +appear in \TeX\ input, including \.{\$}, \.{\^}, \.{\#}, \.{a}--\.{Z}, +etc.), single letter control sequences, and multi-letter control +sequences. Each of these categories can be processed separately to +`fine-tune' the input routine to the problem at hand. The input +routine is not very fast, instead, flexibility was the main +goal. Therefore, if speed is desirable, a customized input routine +is a great place to start. As an example, a minimalistic +\.{\\yyinputtrivial} macro is included. + +When \.{\\yyinput} `returns' by calling \.{\\yyreturn} (which is a +macro you design), your lexing routines have access to three +registers: \.{\\yycp@@}, that holds the character value of the +character just consumed by \.{\\yyinput}, \.{\\yybyte}, that most of +the time holds the token just removed from the input, +and \.{\\yybytepure}, that (again, with very few +exceptions) holds a `normalized' version of the read character (i.e.~a +character of the same character code as \.{\\yycp@@}, and category~11 +(to be even more precise (and to use nested parentheses), `normalized' +characters have the same category code as the current category code of +\.{@@})). + +Most of the time it is the character code one needs (say, in the case +of \.{\\\{}, \.{\\\}}, \.{\\\&} and so on) but under some circumstances the +distinction is important (outside of \.{\\vb\{}$\ldots$\.{\}}, the sequence +\.{\\1} has nothing to do with the digit `\.{1}'). This mechanism +makes it easy to examine the consumed token. It also forms +the foundation of the `hidden context' passing mechanism described later. + +The remainder of this section discusses the internals of \.{\\yyinput} +and some of the design trade-offs one has to make while working on +processing general \TeX\ token streams. It is typeset in `small print' +and can be skipped if desired. +\smallskip +\begingroup +\abovedisplayskip=5pt% +\abovedisplayshortskip=2pt% +\belowdisplayskip=5pt% +\belowdisplayshortskip=2pt% +\fnotesstart=1 +\fnotesspan=2 +\noofcolumns=2 +\icgap=1em% +\eightpoint +\linecount=73 +\setmcparams +\def\.#1{{\chardef\\=`\\\chardef\&=`\&\tt #1}}% +\dsskip=0pt% +\begindoublecols +To examine every token in its path (including spaces that are easy to +skip), the input routine uses one of the two well-known {\sc \TeX}nologies: +\.{\\futurelet\\next\\examinenext} or equally effective +\hbox{\.{\\afterassignment\\next\\let={\tt\char"20}}}. +Recursively inserting one of these sequences, \.{\\yyinput} can go +through any list of tokens, as long as it knows where to stop +(i.e.~return an end of file character). The +signal to stop is provided by the \.{\\yyeof} +primitive which should not appear in any `ordinary' text +presented for parsing, other than for the purpose of providing such a +stop signal. Even the dependence on \.{\\yyeof} can be eliminated if +one is willing to invest the time in writing macros that juggle \TeX's +\.{\\token} registers and only limit oneself to input from such +registers (which is, aside from an obvious efficiency hit, a strain on +\TeX's memory, as you have to store multiple (3 in the general case) +copies of your input to be able to back up when the lexer makes a +wrong choice). There does not seem to be a way of doing it unless the +text has been stored in a \.{\\token} register first (or storing the +whole input as a {\it parameter\/} for the appropriate macro: this +scheme is remarkably powerful and leads to {\it expandable\/} versions +of very complicated macros, although the amount of effort required to +write such macros grows at a frightening rate). All of these are +non-issues for the text inside \.{\\vb\{}$\ldots$\.{\}} and the care that +\.{\\yyinput} takes in processing characters inside such lists is an +overkill. In a more `hostile' environment (such as the one encountered +by the now obsolete \.{\\Tex} macros), this extra attention to detail pays +off in the form of a more robust input mechanism. + +One subtlety deserves a special mention here, as it can be important +to the designer of `higher-level' scanning macros. Two types of tokens +are extremely difficult to deal with whenever \TeX's own lexing +mechanisms are used: (implicit) spaces and even more so, braces. We +will only discuss braces here, however, almost everything that follows +applies equally well to spaces (category 10 tokens to be precise), with +a few simplifications (or complications, in a couple of places). To +understand the difficulty, let's consider one of the approaches above: +$$ +\.{\\futurelet\\next\\examinenext}. +$$ +The macro \.{\\examinenext} +usually looks at \.{\\next} and inserts another macro (usually also called +\.{\\next}) at the very end of its expansion list. This macro usually +takes one parameter, to consume the next token. This mechanism works +flawlessly, until the lexer encounters a \.{\{}br\.{,}sp\.{\}}ace. The \.{\\next} +sequence, seen by \.{\\examinenext} contains a lot of information +about the brace ahead: it knows its category code (left brace, so $1$), its +character code (in case there was, say a \.{\\catcode`\\[=1{\tt\char`\ }} +earlier) but not whether it is a `real' brace (i.e.\ a character +\.{\{}$_1$) or an implicit one (a \.{\\bgroup}). There is no way to find +that out until the control sequence `launched' by \.{\\examinenext} +sees the token as a parameter. + +If the next token is a `real' brace, however, +\.{\\examinenext}'s successor will never see the token itself: the +braces are stripped by \TeX's scanning mechanism. Even if it finds a +\.{\\bgroup} as the parameter, there is no guarantee that the actual +input was not \.{\{\\bgroup\}}. One way to handle this is by using +\.{\\string} ahead of any consumption of the next token. If prior to +expanding \.{\\string} care has been taken to set the \.{\\escapechar} +appropriately (remember, we know the character code in advance), as +soon as one sees a character with \.{\\escapechar}'s character code, +(s)he knows that an implicit brace has just been seen. One added +complication to all this is that a very determined programmer can +insert an {\it active\/} character (using, say, the \.{\\uccode} +mechanism) that has the {\it same\/} character code as the {\it +brace\/} token that it has been \.{\\let} to! Setting this possibility +aside, the \.{\\string} mechanism (or, its cousin, \.{\\meaning}) is +not perfect: both produce a sequence of category 12 and 10 tokens. If +it is indeed a brace character that we just saw, we can consume the next +token and move on but what if this was a control sequence? After all, +just as easily as \.{\\string} makes a sequence into characters, +\.{\\csname}$\,\ldots\,$\.{\\endcsname} pair will make any sequence of +characters into a control sequence. Huh~$\ldots$ + +What we need is a backup mechanism: if one has a copy of the +token sequence ahead, one can use \.{\\string} to see if it is a real +brace first, and if it is, consume it and move on (the active character +case can be handled as the implicit case below, with one extra backup +to count how many tokens have been consumed). At this point one has to {\it +reinsert\/} the brace in case, at some point, a future `back up' +requires that the rest of the tokens are removed from the output (to +avoid `\.{Too many \}'s}' complaints from \TeX). This can be done by using +the \.{\\iftrue\{\\else\}\\fi} trick but of course, some bookkeeping is +needed to keep track of how far inside the brace groups we +are. + +If it is an implicit brace, more work is needed: read all the +characters that \.{\\string} produced (an maybe more), then remember +the number of characters consumed. Remove the rest of the input using +the method described above and restart the scanning from the same point +knowing that the next token can be scanned as a parameter. + +Another strategy is to design a general enough macro that counts +tokens in a token register and simply recount the tokens after every +brace was consumed. + +Either way, it takes a lot of work. If anyone would +like to pursue the counting strategy, simple counting macros +are provided in \.{/examples/count/count.sty}. +The macros in this example +supply a very general counting mechanism that does not depend on +\.{\\yyeof} (or {\it any\/} other token) being `special' and can count the +tokens in any token register, as long as none of those tokens is an +\.{\\outer} control sequence. In other words, if the macro is used +immediately after the assignment to the token register, it should +always produce a correct count. + +Needless to say, if such a general mechanism is desired, one has to +look elsewhere. The added complications of treating spaces (\TeX\ +tends to ignore them most of the time) make this a torturous exercise +in \TeX's macro wizardry. The included \.{\\yyinput} has two ways of +dealing with braces: strip them or view the whole group as a +token. Pick one or write a different \.{\\yyinput}. Spaces, implicit +or explicit are reported as a specially selected character code and +consumed with a likeness of +$$ +\hbox{\.{\\afterassignment\\moveon\\let\\next={\tt\char`\ }}}. +$$ + +Now that a steady stream of character codes is arriving at \.{\\yylex} +after \.{\\yyreturn} the job of converting it into numerical tokens +is performed by the {\it scanner} (or {\it lexer\/}, or {\it tokenizer\/}, +or even {\it tokener}), discussed in the next section. +\enddoublecols +\endgroup + +@*1 Lexing in \TeX. In a typical system that uses a parser to process +text, the parsing pass is usually split into several stages: the raw +input, the lexical analysis (or simply {\it lexing}), and the parsing +proper. The {\it lexing\/} (also called {\it scanning}, we use these +terms interchangeably) clumps various sequences of characters into +{\it tokens\/} to facilitate the parsing stage. The reasons for this +particular hierarchy are largely pragmatic and are partially historic +(there is no reason that {\it parsing\/} cannot be done in multiple +phases, as well, although it usually isn't). + +If one remembers a few basic facts from the formal language theory, it +becomes obvious that a lexer, that parses {\it regular\/} languages, +can (theoretically) be replaced by an {\sc LALR} parser, that parses {\it +context-free\/} ones (or some subset thereof, which is +still a super set of all regular languages). A common justification given for +creating specialized lexers is efficiency and speed. The +reality is somewhat more subtle. While we do care about the efficiency of +parsing in \TeX, having a specialized scanner is important for +a number of different reasons. + +The real advantage of having a dedicated scanner is the ease with which it +can match incomplete inputs and back up. A parser can, of course, +{\it recognize\/} any valid input that is also acceptable to a lexer, as well +as {\it reject\/} any input that does not form a valid token. Between +those two extremes, however, lies a whole realm of options that a +traditional parser will have great difficulty exploring. Thus, to +mention just one example, it +is relatively easy to set up a DFA\footnote{Which stands for +Deterministic Finite Automaton, a common (and mathematically unique) +way of implementing a scanner for regular languages. Incidentally {\sc +LALR} mentioned above is short for Look Ahead Left to Right.} +so that the {\it longest\/} +matching input is accepted. The only straightforward way to do this +with a traditional parser is to parse longer and longer inputs again +and again. While this process can be optimized to a certain degree, +the fact that a parser has a {\it stack\/} to maintain limits its +ability to back up. + +As an aside, the mechanism by which \CWEB\ assembles its `scraps' +into chunks of recognized code is essentially iterative lexing, +very similar to what a human does to make sense of complicated +texts. Instead of trying to match the longest running piece of text, +\CWEB\ simply looks for patterns to combine inputs into larger +chunks, which can later be further combined. Note that this is not +quite the same as the approach taken by, say {\sc GLR} parsers, where +the parser must match the {\it whole\/} input or declare a +failure. Where a \CWEB-type parser may settle for the first available +match (or the longest available) a {\sc GLR} parser must try {\it +all\/} possible matches or use an algorithm to reject the majority of +the ones that are bound to fail in the end. + +This `\CWEB\ way' is also different from a traditional `strict' {\sc +LR} parser/scanner approach and certainly deserves serious +consideration when the text to be parsed possesses some rigid +structure but the parser is only allowed to process it one small +fragment at a time. + +Returning to the present macro suite, the lexer produced by \flex\ +uses integer tables similar to those employed by \bison\ so the +usual {\sc\TeX}niques used in implementing \.{\\yyparse} are fully +applicable to \.{\\yylex}. + +An additional advantage provided by having a \flex\ scanner implemented +as part of the suite is the availability of the original \bison\ scanner written +in \Cee\ for the use by the macro package. + +This said, the code generated by \flex\ contains a few idiosyncrasies +not present in the \bison\ output. These `quirks' mostly involve +handling of end of input and error conditions. A quick glance at the +\.{\\yylex} implementation will reveal a rather extensive collection of +macros designed to deal with end of input actions. + +Another difficulty one has to face in translating \flex\ output into +\TeX\ is a somewhat unstructured namespace delivered in the final +output (this is partially due to the \POSIX\ standard that \flex\ +strives to follow). One consequence of this `messy' approach is that the +writer of a \flex\ scanner targeted to \TeX\ has to declare \flex\ +`states' (more properly called {\it subautomata}) twice: first for the +benefit of \flex\ itself, and then again, in the {\it \Cee\ preamble\/} +portion of the code to output the states to be used by the action code +in the lexer. \.{Define\_State($\ldots$)} macro is provided for this +purpose. This macro can be used explicitly by the programmer or be +inserted by a specially designed parser. +Using \CWEB\ helps to keep these declarations together. + +The `hand-off' from the scanner to the parser is implemented +through a pair of registers: \.{\\yylval}, a token register +containing the value of the returned token and \.{\\yychar}, a +\.{\\count} register that contains the numerical value of the +token to be returned. + +Upon matching a token, the scanner passes one crucial piece of +information to the user: the character sequence representing the token +just matched (\.{\\yytext}). This is not the whole story +though. There are three more token sequences that are made available +to the parser writer whenever a token is matched. + +The first of these is simply a `normalized' version of +\.{\\yytext} (called \.{\\yytextpure}). In most cases it +is a sequence of \TeX\ tokens with the same character codes as the one +in \.{\\yytext} but with their category codes set to 11. In +cases when the tokens in \.{\\yytext} are {\it not} +$(c_{\rm ch}, c_{\rm cat})$ pairs, a few simple +conventions are followed, some of which will be explained below. This +sequence is provided merely for convenience and its typical use is to +generate a key for an associate array. + +The other two sequences are special `stream pointers' that provide +access to the extended scanner mechanism in order to implement passing +of `formatting hints' to the parser without introducing any changes to +the original grammar. As the mechanism itself and the motivation +behind it are somewhat subtle, let me spend a few moments discussing +the range of formatting options desirable in a generic pretty-printer. + +Unlike strict parsers employed by most compilers, a parser designed +for pretty printing cannot afford being too picky about the structure +of its input (\cite[Go] calls such parsers `loose'). To provide +a simple illustration, an isolated identifier, such as `\.{lg\_integer}' +can be a type name, a variable name, or a structure tag (in a language like +\Cee\ for example). If one expects the pretty printer to typeset this +identifier in a correct style, some context must be supplied, as +well. There are several strategies a pretty printer can employ to get +a hold of the necessary context. Perhaps the simplest way to handle +this, and to reduce the complexity of the pretty printing algorithm is +to insist on the user providing enough context for the parser to do +its job. For short examples like the one above, this is an acceptable +strategy. Unfortunately, it is easy to come up with longer snippets of +grammatically deficient text that a pretty printer should be expected +to handle. Some pretty printers, such as the one employed by \CWEB\ +and its ilk (the original \.{WEB}, \.{FWEB}), use a very flexible +bottom-up technique that tries to make sense of as large a portion of +the text as it can before outputting the result (see also \cite[Wo], +which implements a similar algorithm in \LaTeX). + +The expectation is that this algorithm will handle the majority (about +90\%? it would be interesting to carry out a study in the spirit of +the ones discussed in \cite[Jo] to find out) of the +cases with the remaining few left for the author to correct. The +question is, how can such a correction be applied? + +\CWEB\ itself provides two rather different mechanisms for handling +these exceptions. The first uses direct typesetting commands (for +example, \.{@@/} and \.{@@\#} for canceling and +introducing a line break, resp.) to change the typographic output. + +The second (preferred) way is to supply {\it hidden context\/} to the +pretty-printer. Two commands, \.{@@;} and +\.{@@[}$\ldots$\.{@@]} are used for this purpose. The +former introduces a `virtual semicolon' that acts in every way like a +real one except it is not typeset (it is not output in the source file +generated by \CTANGLE, either but this has nothing to do with pretty +printing, so I will not mention \CTANGLE\ anymore). For +instance, from the parser's point of view, if the preceding text was +parsed as a `scrap' of type {\it exp}, the addition of \.{@@;} +will make it into a `scrap' of type {\it stmt\/} in \CWEB's +parlance. The second construct (\.{@@[}$\ldots$\.{@@]}), +is used to create an {\it exp\/} scrap out of whatever happens to be +inside the brackets. + +This is a powerful tool at the author's disposal. Stylistically, +this is the right way to handle exceptions as it forces the writer to +emphasize the {\it logical\/} structure of the formal +text. If the pretty printing style is changed +extensively later, the texts with such hidden contexts should be able to +survive intact in the final document (as an example, using a break +after every statement in \Cee\ may no longer be considered +appropriate, so any forced break introduced to support this convention +would now have to be removed, whereas \.{@@;}'s would simply +quietly disappear into the background). + +The same hidden context idea has another important advantage: with +careful grammar fragmenting (facilitated by \CWEB's or any other +literate programming tool's `hypertext' structure) and a more diverse +hidden context (or even arbitrary hidden text) mechanism, it is +possible to use a strict parser to parse incomplete language +fragments. For example, the productions that are needed to parse +\Cee's expressions form a complete subset of the grammar. If the +grammar's `start' symbol is changed to {\it expression\/} (instead of +the {\it translation-unit\/} as it is in the full \Cee\ grammar), a +variety of incomplete \Cee\ fragments can now be parsed and +pretty-printed. Whenever such granularity is still too `coarse', +carefully supplied hidden context will give the pretty printer enough +information to adequately process each fragment. A number of such {\it +sub}-parsers can be tried on each fragment (this may sound +computationally expensive, however, in practice, a carefully chosen +hierarchy of parsers will finish the job rather quickly) until a +correct parser produced the desired output (this approach is similar +to, although not quite the same one employed by the {\it General LR +parsers}). + +This somewhat lengthy discussion brings us to the question directly +related to the tools described in this article: how does one provide +typographical hints or hidden context to the parser? + +One obvious solution is to build such hints directly into the +grammar. The parser designer can, for instance, add new tokens +(say, \.{BREAK\_LINE}) to the grammar and extend the +production set to incorporate the new additions. The risk of +introducing new conflicts into the grammar is low (although not +entirely non-existent, due to the lookahead limitations of LR(1) +grammars) and the changes required are easy, although very tedious, to +incorporate. + +In addition to being labor intensive, this solution has two other +significant shortcomings: it alters the original grammar and hides its +logical structure; it also `bakes in' the pretty-printing conventions +into the language structure (making `hidden' context much less +`stealthy'). It does avoid the `synchronicity problem' mentioned +below. + +A marginally better technique is to introduce a new regular expression +recognizable by the scanner which will then do all the necessary +bookkeeping upon matching the sequence. All the difficulties with +altering the grammar mentioned above apply in this case, as well, only +at the `lexical analysis level'. At a minimum, the set of tokens +matched by the scanner would have to be changed. + +A much better approach involves inserting the hints at the input stage and +passing this information to the scanner and parser as part of the token `values'. The +hints themselves can masquerade as characters ignored by the scanner +(white space, for example) and preprocessed by a specially designed +input routine. The scanner then simply passes on the values to the +parser. This makes hints, in effect, invisible. + +The difficulty lies in synchronizing the token production with the +parser. This subtle complication is very familiar to anyone who has +designed \TeX's output routines: the parser and the lexer are not +synchronous, in the sense that the scanner might be reading several +(in the case of the general LR$(n)$ parsers) tokens ahead of the +parser before deciding on how to proceed (the same way \TeX\ can +consume a whole paragraph's worth of text before exercising its page +builder). + +If we simple-mindedly let the scanner return every hint it has encountered +so far, we may end up feeding the parser the hints meant for the token +that appears {\it after\/} the fragment the parser is currently working +on. In other words, when the scanner `backs up' it must correctly back +up the hints as well. + +This is exactly what the scanner produced by the tools in this package +does: along with the main stream of tokens meant for the parser, it +produces two hidden streams (called the \.{\\format} stream and +the \.{\\stash} stream) and provides the parser with two +strings (currently only strings of digits are used although arbitrary +sequences of \TeX\ tokens can be used as pointers) with the promise +that {\it all the `hints' between the beginning of the corresponding +stream and the point labeled by the current stream pointer appeared +among the characters up to and, possibly, including the ones matched +as the current token}. The macros to extract the relevant parts of the +streams (\.{\\yyreadfifo} and its cousins) are provided for the +convenience of the parser designer. The interested reader can consult +the input routine macros for the details of the internal +representation of the streams. + +In the interest of full disclosure, let me point out that this simple +technique introduces a significant strain on \TeX's +computational resources: the lowest level macros, the ones that handle +character input and are thus executed (sometimes multiple times), for +{\it every\/} character in the input stream are rather complicated and +therefore, slow. Whenever the use of such streams is not desired a simpler +input routine can be written to speed up the process (see +\.{\\yyinputtrivial} for a working example of such macro). + +Finally, while probably not directly related to the present +discussion, this approach has one more interesting feature: after the +parser is finished, the parser output and the streams exist +`statically', fully available for any last minute preprocessing or for +debugging purposes, if necessary. Under most circumstances, the parser +output is `executed' and the macros in the output are the ones reading +the various streams using the pointers supplied at the parsing stage +(at least, this is the case for all the parsers supplied with the +package). + +@*1 Inside semantic actions: switch statements and `functions' in \TeX. +Now you have a lexer for your input, and a grammar ready to be put into +action (we will talk about actions a bit later). It is time to discuss +how the tables produced by \bison\ get converted into \TeX\ {\it macros\/} +that drive the parser in {\it \TeX}. + +The tables that drive the \bison\ input parsers +are collected in various \.{\{b,d,f,g,n\}yytab.tex} and \.{small\_tab.tex}. Each +one of these files contains the tables that implement a specific parser +used during different stages of processing. +Their exact function is well explained +in the source file produced by \bison\ ({\it how} this is done is +explained elsewhere, see \cite[Ah] for a good reference). It would +suffice to mention here that there are three types of tables in this +file: \recount{1}numerical tables such as \.{\\yytable} and +\.{\\yycheck} (both are either \TeX's token registers in an +unoptimized parser or associate arrays in an optimized version of such +as discussed below), +\recount{2}a string array \.{\\yytname}, and \recount{3}an action +switch. The action switch is what gets called when the parser does a +{\it reduction}. It is easy to notice that the numerical tables come +`premade' whereas the string array consisting of token names +is difficult to recognize. This is intentional: this form of initialization +is designed to allow the widest range of +characters to appear inside names. The macros that do this reside in +\.{yymisc.sty}. The generated table files also contain +constant and token declarations used by the parser. + +The description of the process used to output \bison\ tables in an +appropriate form continues in the section about +\locallink{bsfile}outputting \TeX\ tables\endlink, we pick it up here +with the description of the syntax-directed translation and the +actions. The line +$$ +\.{\\switchon\\next\\in\\currentswitch} +$$ +is responsible for calling an appropriate action in the current +switch, as is easy to infer. A {\it switch\/} is also a macro that +consists of strings of \TeX\ tokens intermixed with \TeX\ macros +inside braces. Each group of macros +gets executed whenever the character or the group of characters in +\.{\\next} matches a substring preceding the braced group. If there +are two different substrings +that match, only the earliest group of macros gets expanded. +Before a state is +used, a special control sequence, +\.{\\setspecialcharsfrom\\switchname} can be used to put the \TeX\ +tokens in a form suitable for the consumption by \.{\\switchon}'s. The +most important step it performs is it {\it turns every token in the +list into a character with the same character code and category +12\/}. Thus \.{\\\{} becomes \.{\{}$_{12}$. There are other ways of +inserting tokens into a state: enclosing a token or a string of tokens in +\.{\\raw...\\raw} adds it to the state macro unchanged. If you have +a sequence of category 12 characters you want to add to the state, put +it after \.{\\classexpand} (such sequences are usually prepared by the +\.{\\setspecialchars} macro that uses the token tables generated by +\bison\ from your grammar). + +You can give a case a readable label (say, \.{brackets}) and enclose +this label in \.{\\raw}$\ldots$\.{\\raw}. A word of caution: an `a' +inside of \.{\\raw}$\ldots$\.{\\raw} (which is most likely an +\.{a}$_{11}$ unless you played with category codes before loading the +\.{\\switchon} macros) and the one outside it are two different +characters, as one is no longer a letter (category 11) in the eyes of +\TeX\ whereas the other one still is. For this reason one should not +use characters other than letters in h\.{\{}is\.{,}er\.{\}} state +names: the way a state picks an action does not distinguish between, +say, a `\.{(}' in `\.{(letter)}' and a stand alone `\.{(}' and may +pick an action that you did not intend. This applies even if `\.{(}' +is not among the characters explicitly inserted in the state macro: if +an action for a given character is not found in the state macro, the +\.{\\switchon} macro will insert a current \.{\\default} action +instead, which most often you would want to be \.{\\yylex} or +\.{\\yyinput} (i.e.\ skip this token). If `\.{(}' or `\.{)}' matches +the braced group that follows `\.{(letter)}' chaos may ensue (most +likely \TeX\ will keep reading past the \.{\\end} or \.{\\yyeof} that +should have terminated the input). Make the names of character +categories as unique as possible: the \.{\\switchon} is simply a +string matching mechanism, with the added distinction between +characters of different categories. + +Finally, the construct \.{\\statecomment}{\it +anything\/}\.{\\statecoment} allows you to insert comments in the +state sequence (note that the state {\it name\/} is put at the +beginning of the state macro (by \.{\\setspecialcharsfrom}) +in the form of a special control sequence +that expands to nothing: this elaborate scheme is needed because +another control sequence can be \.{\\let} to the state macro which +makes the debugging information difficult to decipher). The debugging +mode for the lexer implemented with these macros is activated by +\.{\\tracedfatrue}. + +The functionality of the \.{\\switchon} macros (for `historical' +reasons, one can also use \.{\\action} as a synonym) has been +implemented in a number of other macro packages (see \cite[Fi] that +discusses the well-known and widely used \.{\\CASE} and \.{\\FIND} +macros). The macros in this collection have the additional property +that the only assignments that persist after the \.{\\switchon} +completes are the ones performed by the user code inside the selected +case. + +This last property of the switch macros is implemented using another +mechanism that is part of this macro suite: the `subroutine-like' +macros, \.{\\begingroup}$\ldots$\.{\\tokreturn}. For examples, an +interested reader can take a look at the macros included with the +package. A typical use is +\.{\\begingroup}$\ldots$\.{\\tokreturn\{\}\{\\toks0 \}\{\}} which will +preserve all the changes to \.{\\toks0} and have no other side effects +(if, for example, in typical \TeX\ vernacular, \.{\\next} is used +to implement tail recursion inside the group, after the +\.{\\tokreturn}, \.{\\next} will still have the same value it +had before the group was entered). This functionality comes at the +expense of some computational efficiency. + +This covers most of the routine computations inside semantic actions, +all that is left is a way to `tap' into the stack automaton +built by \bison\ using an interface similar to the special +\.{\$$n$} variables utilized by the `genuine' \bison\ parsers +(i.e.\ written in \Cee\ or any other target language supported by +\bison). + +This role is played by the several varieties of \.{\\yy$\,p$} command +sequences (for the sake of completeness, $p$ stands for one of \.{($n$)}, +\.{[{\rm name}]}, \.{]{\rm name}[} or $n$, here $n$ is a +string of digits, and a `name' is any name acceptable as a symbolic +name for a term in \bison). Instead +of going into the minutia of various flavors of \.{\\yy}-macros, let me +just mention that one can get by with only two `idioms' and still +be able to write parsers of arbitrary sophistication: +\.{\\yy($n$)} can be treated as a token register containing the +value of the $n$-th term of the rule's right hand side, $n>0$. The left +hand side of a production is accessed through \.{\\yyval}. A +convenient shortcut is \.{\\yy0\{{\rm \TeX\space material}\}} which +will expand the `\TeX\ material' inside the braces. Thus, a simple way +to concatenate the values of the first two production terms is +\.{\\yy0\{\\the\\yy(1)\\the\\yy(2)\}}. The included \bison\ +parser can also be used to provide support for `symbolic names', +analogous to \bison's \.{{\$}[{\rm name}]} but a +bit more effort is required on the user's part to initialize such support. +Using symbolic names can make the parser more readable and maintainable, +however. + +There is also a \.{\\bb$\,n$} macro, that provides access to the term +values in the `natural order' (e.g.~\.{\\bb1} is the last term read). Its +intended use is with the `inline' rules (see the main parser for +such examples). As of version \.{3.0} \bison\ no longer outputs +|yyrhs| and |yyprhs|, which makes it impossible to produce the +|yyrthree| array necessary for processing such rules in the `left to right' +order. One might also note that the new notation is better suited for +the inline rules since the value that is pushed on the stack is that +of \.{\\bb0}, i.e.~the term implicitly inserted by \bison. Be aware +that there are no \.{\\bb[$\cdot$]} or \.{\\bb($\cdot$)} versions of +these macros, for obvious reasons. A less obvious feature of this +macro is its `nonexpandable' nature. This means they cannot be used +inside \.{\\edef}. Thus, the most common use pattern is +\.{\\bb$\,n$\{\\toks$\,m$\}} with a subsequent expansion of +\.{\\toks$\,m$}. Making these macros expandable is certainly possible +but does not seem crucial for the intended limited use pattern. + +Naturally, a parser writer may need a number of other data +abstractions to complete the task. Since these are highly dependent on +the nature of the processing the parser is supposed to provide, we +refer the interested reader to the parsers included in the package as +a source of examples of such specialized data structures. + +One last remark about the parser operation is worth making here: +the parser automaton itself does not make any \.{\\global} +assignments. This (along with some careful semantic action writing) +can be used to `localize' the effects of the parser operation and, +most importantly, to create `reentrant' parsers that can, e.g.\ call +{\it themselves\/} recursively. + +@*1 `Optimization'. +By default, the generated parser and scanner keep all of their tables +in separate token registers. Each stack is kept in a single macro (this +description is further complicated by the support for parser {\it +namespaces\/} that exists even for unoptimized parsers but this +subtlety will not be mentioned again---see the macros in the package +for further details). Thus, every time a table +is accessed, it has to be expanded making the table access latency +linear in {\it the size of the table}. The same holds for stacks and +the action `switches', of +course. While keeping the parser tables (which are immutable) in token +registers does not have any better rationale than saving the control +sequence memory (the most abundant memory in \TeX), this way of +storing {\it stacks} does have an advantage when multiple parsers get +to play simultaneously. All one has to do to switch from one parser to +another is to save the state by renaming the stack control sequences +accordingly. + +When the parser and scanner are `optimized', all these control +sequenced are `spread over' appropriate associative arrays. One caveat +to be aware of: the action switches for both the parser and the scanner +have to be output differently (a command line option is used to +control this) for optimized and unoptimized parsers. While it is +certainly possible to optimize only some of the parsers (if your +document uses multiple) or even only some {\it parts\/} of a given +parser (or scanner), the details of how to do this are rather +technical and are left for the reader to discover by reading the +examples supplied with the package. At least at the beginning it is +easier to simply set the highest optimization level and use it +consistently throughout the document. + +@*1 {\it \TeX\/} with a different {\sl slant} or do you C an escape?. +%\def\texnspace{other} +Some \TeX\ productions below probably look like alien script. +The authors of \cite[Er] cite a number of reasons pretty printing of +\TeX\ in general is a nearly impossible task. The macros included with +the package follow a very straightforward strategy and do not try to +be very comprehensive. Instead, the burden of presenting \TeX\ code in +a readable form is placed on the programmer. Appropriate hints can be +supplied by means of indenting the code, using assignments ($=$) where +appropriate, etc. If you would rather look at straight \TeX\ +instead, the line \.{\\def\\texnspace\{other\}} at the beginning of +this section can be uncommented and +|TeX_( "/noexpand/inmath{/yy0{/yy1{}}}" );| becomes +\def\texnspace{other}% +|TeX_( "/noexpand/inmath{/yy0{/yy1{}}}" );|. +\def\texnspace{texline}% +There is, however, more to this story. A look at the actual file will +reveal that the line above was typed as +$$ +\.{TeX\_( "/noexpand/inmath\{/yy0\{/yy1\{\}\}\}" );} +$$ +The `escape character' is leaning the other way! +The lore of \TeX\ is uncompromising: `\.{\\}' is {\it the\/} escape +character. What is the reason to avoid it in this case? + +The mystery is not very deep: `\.{/}' was chosen as an escape character +by the parser macros (a quick glance at \.{?yytab.tex} will reveal as +much). There is, of course, nothing sacred (other than tradition, +which this author is trying his hardest to follow) about what character code +the escape character has. The reason to look for the alternative is straightforward: `\.{\\}' is +a special character in \Cee, as well (also an `escape' in fact). The line +\.{TeX\_( "..." );} is a {\it macro-call\/} but $\ldots$ in \Cee. This +function simply prints out (almost `as-is') the line in +parenthesis. An attempt at \.{TeX\_( "\\noexpand" );} would result in +\numberlinestrue +\begindemo +^ +^oexpand +\enddemo +\numberlinesfalse +Other escape combinations\footnote{Here is a full list of {\it +defined\/} escaped characters in \Cee: \.{\\a}, \.{\\b}, \.{\\f}, \.{\\n}, +\.{\\r}, \.{\\t}, \.{\\v}, \.{\\}{$[$\it octal digit$]$}, \.{\\'}, +\.{\\"}, \.{\\?}, \.{\\\\}, \.{\\x}, \.{\\u}, \.{\\U}. Note that the +last three combinations must be followed by a specific string of +characters to appear in the input without generating errors.} are +even worse: most are simply undefined. If anyone feels trapped without +an escape, however, the same line can be typed as +$$ +\.{TeX\_( "\\\\noexpand\\\\inmath\{\\\\yy0\{\\\\yy1\{\}\}\}" );} +$$ +Twice the escape! + +If one were to look closer at the code, another oddity stands +out: there are no \.{\$}'s anywhere in sight. +The big money, \.{\$} is a beloved character in +\bison. It is used in action code to reference the values of the +appropriate terms in a production. If mathematics pays your bills, use +\.{\\inmath} instead. + +@*1 The \bison\ parser(s). Let's take a short break for a broad overview of the input file. +The basic structure is that of an ordinary \bison\ file that produces +plain \Cee\ output. The \Cee\ actions, however, are programmed to output \TeX. + +@s TeX_ TeX +@s TeXa TeX +@s TeXb TeX +@s TeXf TeX +@s TeXfo TeX +@s TeXao TeX + +@(bg.yy@>= +@G Switch to generic mode. +%{@> @ @=%} + @> @ @= +%union {@> @ @=} +%{@> @ @=%} + @> @ @= +%% + @> @ @= + @> @ @= + @> @ @= +%% +@g + +@ Bootstrap mode is next. The reason for a separate bootstrap parser is to +collect the minimal amount of information to `spool up' the `production' +parsers. To understand the mechanics and the reasons behind it, consider what happens +following a declaration such as \.{\%token TOKEN "token"} +(or, as it would be typeset by the macros in this package +`\prodstyle{\%token} \.{TOKEN} \.{token}'; see the index entries for +more details)% +\idxinline{TOKEN}\idxinline{token}. +The two names for the same token are treated very differently. \.{TOKEN} becomes +an |enum| constant in the \Cee\ parser generated by \bison. Even when +that parser becomes part of the `driver' program that outputs the \TeX\ +version of the parser tables, there is no easy way to output the {\it +names\/} of the appropriate |enum| constants. The other name +(\.{"token"}) becomes an entry in the |yytname| array. These names +can be output by either the `driver' or \TeX\ itself after the +\.{\\yytname} table has been input. The scanner, on the other hand, +will use the first version (\.{TOKEN}). Therefore, it is important to +establish an equivalence between the two versions of the name. In the +`real' parser, the token values are output in a special header +file. Hence, one has to either parse the header file to establish the +equivalences or find some other means to find out the numerical values +of the tokens. + +One approach is to parse the file containing the {\it declarations\/} +and extract the equivalences between the names from it. This is the +function of the bootstrap parser. Since the lexer is reused, some +token values need to be known in advance (and the rest either ignored +or replaced by some `made up' values). These tokens are `hard coded' +into the parser file generated by \bison\ and output using a special +function. The switch `|@[#define@]@; BISON_BOOTSTRAP_MODE|' tells the `driver' +program to output the hard coded token values. +@q Bizarre looking way of typing #define is due to the awkward way@> +@q \CWEB\ treats switching in and out of $-mode in inline \Cee@> + +Note that the equivalence of the two versions of token names would +have to be established every time a `string version' of a token is +declared in the \bison\ file and the `macro name version' of the token +is used by the corresponding scanner. To establish this equivalence, +however, the bootstrapping parser below is not always necessary (see +the \.{xxpression} example, specifically, the file \.{xxpression.w} in +the \.{examples} directory for an example of using a different parser +for this purpose). The reason it is necessary here is that a parser +for an appropriate subset of the \bison\ syntax is not yet available +(indeed, {\it any\/} functional parser for a \bison\ syntax subset +would have to use the same scanner (unless you want to write a custom +scanner for it), which would need to know how to output tokens, for +which it would need a parser for a subset of \bison\ syntax $\ldots$ +it is a `chicken and egg'). Hence the name `bootstrap'. Once a +functional parser for a large enough subset of the \bison\ input +grammar is operational, {\it it\/} can be used to pair up the token +names. + +The second function of the bootstrap parser is to collect information +about the scanner's states. The mechanism is slightly different for +states. While the token equivalences are collected purely in +`\TeX\ mode', the bootstrap parser collects all the state names into a +special \Cee\ header file. The reason is simple: unlike the token +values, the numerical values of the scanner states are not passed to +the `driver' program in any data structure and are instead defined as +ordinary macros. The header file is the information the `driver' file +needs to output the state values. + +An additional subtlety in the case of state value output is that the +main lexer for the \bison\ grammar utilizes states extensively and thus +cannot be easily used with the bootstrap parser before the state +values are known. The solution is to substitute a very simple scanner barely +capable of lexing state declarations. Such a scanner is implemented +in \.{ssffo.w} (the somewhat cryptic name stands for `{\bf s}imple {\bf s}canner +{\bf f}or {\bf f}lex {\bf o}ptions'). +\saveparseoutputtrue +@(bb.yy@>= +@G Switch to generic mode. +%{ + @> @ @= + @> @/#define BISON_BOOTSTRAP_MODE @= +%} + @> @ @= +%union {@> @ @=} +%{@> @ @=%} + @> @ @= +%% + @> @ @= + @> @ @= + @> @<\flex\ options parser productions@> @= + @> @ @= + @> @ @= +%% +@g + +@ The prologue parser is responsible for parsing various grammar +declarations as well as parser options. +\saveparseoutputfalse +%\traceparserstatestrue +%\tracestackstrue +%\tracerulestrue +%\traceactionstrue +\saveparseoutputtrue +@(bd.yy@>= +@G Switch to generic mode. +%{@> @ @=%} + @> @ @= +%union {@> @ @=} +%{@> @ @=%} + @> @ @= +%% + @> @@; + @> @ @= + @> @ @= +%% +@g + +@ Full \bison\ input parser is used when a complete \bison\ file is +expected. It is also capable of parsing a `skeleton' of such a file, +similar to the one that follows this paragraph. +\traceparserstatesfalse +\tracestacksfalse +\tracerulesfalse +\traceactionsfalse +\checktablefalse +\saveparseoutputfalse +@(bf.yy@>= +@G Switch to generic mode. +%{@> @ @=%} + @> @ @= +%union {@> @ @=} +%{@> @ @=%} + @> @ @= +%% + @> @ @= + @> @ @= + @> @ @= + @> @ @= +%% +@g + +@ The first two options are essential for the parser operation. The +start symbol can be set implicitly by listing the appropriate +production first. +@q %define lr.type canonical-lr @> +@q Make not on this and lexing too much lookahead and the \stashed trick@> +@q Explain other options @> +@= +@G +%token-table +%debug +%start input +@g + +@*2 Grammar rules. Most of the original comments present in +the grammar file used by \bison\ itself have been preserved and appear in +{\it italics\/} at the beginning of each appropriate section. + +To facilitate the {\it bootstrapping\/} of the parser (see above), some +declarations have been separated into their own sections. Also, a +number of new rules have been introduced to create a hierarchy of +`subparsers' that parse subsets of the grammar. We begin by listing +most of the tokens used by the grammar. Only the string versions are +kept in the |yytname| array, which, in part is the reason for a +special bootstrapping parser as explained earlier. +@= +@G +%token GRAM_EOF 0 "end of file" +%token STRING "string" + +%token PERCENT_TOKEN "%token" +%token PERCENT_NTERM "%nterm" + +%token PERCENT_TYPE "%type" +%token PERCENT_DESTRUCTOR "%destructor" +%token PERCENT_PRINTER "%printer" + +%token PERCENT_LEFT "%left" +%token PERCENT_RIGHT "%right" +%token PERCENT_NONASSOC "%nonassoc" +%token PERCENT_PRECEDENCE "%precedence" + +%token PERCENT_PREC "%prec" +%token PERCENT_DPREC "%dprec" +%token PERCENT_MERGE "%merge" +@g +@@; + +@ We continue with the list of tokens below, following the layout of +the original parser. +@= +@G +%token + PERCENT_CODE "%code" + PERCENT_DEFAULT_PREC "%default-prec" + PERCENT_DEFINE "%define" + PERCENT_DEFINES "%defines" + PERCENT_ERROR_VERBOSE "%error-verbose" + PERCENT_EXPECT "%expect" + PERCENT_EXPECT_RR "%expect-rr" + PERCENT_FLAG "%" + PERCENT_FILE_PREFIX "%file-prefix" + PERCENT_GLR_PARSER "%glr-parser" + PERCENT_INITIAL_ACTION "%initial-action" + PERCENT_LANGUAGE "%language" + PERCENT_NAME_PREFIX "%name-prefix" + PERCENT_NO_DEFAULT_PREC "%no-default-prec" + PERCENT_NO_LINES "%no-lines" + PERCENT_NONDETERMINISTIC_PARSER + "%nondeterministic-parser" + PERCENT_OUTPUT "%output" + PERCENT_REQUIRE "%require" + PERCENT_SKELETON "%skeleton" + PERCENT_START "%start" + PERCENT_TOKEN_TABLE "%token-table" + PERCENT_VERBOSE "%verbose" + PERCENT_YACC "%yacc" +; + +%token BRACED_CODE "{...}" +%token BRACED_PREDICATE "%?{...}" +%token BRACKETED_ID "[identifier]" +%token CHAR "char" +%token EPILOGUE "epilogue" +%token EQUAL "=" +%token ID "identifier" +%token ID_COLON "identifier:" +%token PERCENT_PERCENT "%%" +%token PIPE "|" +%token PROLOGUE "%{...%}" +%token SEMICOLON ";" +%token TAG "" +%token TAG_ANY "<*>" +%token TAG_NONE "<>" +%token INT "integer" +%token PERCENT_PARAM "%param"; +@g + +@ Extra tokens for typesetting \flex\ state +declarations and options are declared in addition to the ones that a +standard \bison\ parser recognizes. +@= +@G +%token FLEX_OPTION FLEX_STATE_X FLEX_STATE_S +@g + +@ We are ready to describe the top levels of the parse tree. The first +`sub parser' we consider is a `full' parser, that is the parser that +expects a full grammar file, complete with the prologue, declarations, +etc. This parser can be used to extract information from the grammar +that is otherwise absent from the executable code generated by +\bison. This includes, for example, the `name' part of +\.{\$}\.{[}{\rm name}\.{]}. +This parser is therefore used to generate the `symbolic +switch' to provide support for symbolic term names similar to +`genuine' \bison's \.{\$}\.{[}$\ldots$\.{]} syntax. +@= +@G +@t}\vb{\inline}{@> +input: + prologue_declarations + "%%" grammar epilogue.opt {@> @ @=} +; +@g + +@ The action of the parser in this case is simply to separate the +accumulated `parse tree' from the auxiliary information carried by the +parser on the stack. +@= + @[TeX_( "/getsecond{/yy(3)}/to/toksa" );@]@; /* extract grammar contents */ + @[TeX_( "/yy0{/the/toksa}/table=/yy(0)" );@]@; + +@ Another subgrammar deals with the syntax of isolated \bison\ rules. This is +the most commonly used `subparser' since a rules cluster is the most +natural `unit' to include in a \CWEB\ file. +@= +@G +@t}\vb{\inline}{@> +input: + grammar epilogue.opt {@> TeX_( "/getsecond{/yy(1)}/to/table" ); @=} +; +@g + +@ The bootstrap parser has a very narrow set of goals: it is concerned +with \prodstyle{\%token} declarations only in +order to supply the token information to the lexer (since, as noted +above, such information is not kept in the |yytname| array). +The parser can also parse \prodstyle{\%nterm} declarations but the +bootstrap lexer ignores the \prodstyle{\%nterm} token, since the +\bison\ grammar does not use one. +It also extends the syntax of a \prodstyle{grammar\_declaration} by allowing a +declaration with or without a semicolon at the end (the latter is only +allowed in the prologue). This works since the token declarations have +been carefully separated from the rest of the grammar in different +\CWEB\ sections. The range of tokens output by the bootstrap +lexer is limited, hence most of the other rules are ignored. +@= +@G +@t}\vb{\inline}{@> +input: + grammar_declarations {@> TeX_( "/table=/yy(1)" ); @=} +; +@t}\vb{\resetf}{@> +grammar_declarations: + symbol_declaration semi.opt {@> @ @=} +| flex_declaration semi.opt {@> @ @=} +| grammar_declarations + symbol_declaration semi.opt {@> TeX_( "/yy0{/the/yy(1)/the/yy(2)}" ); @=} +| grammar_declarations + flex_declaration semi.opt {@> TeX_( "/yy0{/the/yy(1)/the/yy(2)}" ); @=} +; +@t}\vb{\inline\flatten}{@> +semi.opt: {} | ";" {}; +@g + +@ The following is perhaps the most common action performed by the +parser. It is done automatically by the parser code but this feature +is undocumented so we supply an explicit action in each case. +@= + @[TeX_( "/yy0{/the/yy(1)}" );@]@; + +@ Next, a subgrammar for processing prologue declarations. Finer +differentiation is possible but the `subparsers' described here work +pretty well and impose a mild style on the grammar writer. +@= +@G +@t}\vb{\inline}{@> +input: + prologue_declarations epilogue.opt {@> TeX_( "/getsecond{/yy(1)}/to/table" ); @=} +| prologue_declarations + "%%" "%%" EPILOGUE {@> TeX_( "/getsecond{/yy(1)}/to/table" ); @=} +| prologue_declarations + "%%" "%%" {@> TeX_( "/getsecond{/yy(1)}/to/table" ); @=} +; +@g + +@ {\it Declarations: before the first \prodstyle{\%\%}}. We are now +ready to deal with the specifics of the declarations themselves. The +\.{\\grammar} macro is a `structure', whose first `field' is the +grammar itself, whereas the second carries the type of the last +declaration added to the grammar. +@= +@G +prologue_declarations: + {@> TeX_( "/yy0{/nx/grammar{}{/nx/empty}}" ); @=} +| prologue_declarations + prologue_declaration {@> @ @=} +; +@g + +@ @= + @@; + +@ Here is a list of most kinds of declarations that can appear in the +prologue. The scanner returns the `stream pointers' for all the +keywords so the declaration `structures' pass on those pointers to the +grammar list. The original syntax has been left intact even though for +the purposes of this parser some of the inline rules are unnecessary. +@= +@G +prologue_declaration: + grammar_declaration {@> @ @=} +| "%{...%}" {@> TeX_( "/yy0{/nx/prologuecode/the/yy(1)}" ); @=} +| "%" {@> TeX_( "/yy0{/nx/optionflag/the/yy(1)}" ); @=} +| "%define" variable value {@> TeX_( "/yy0{/nx/vardef{/the/yy(2)}{/the/yy(3)}/the/yy(1)}" ); @=} +| "%defines" {@> TeX_( "/yy0{/nx/optionflag{defines}{}/the/yy(1)}" ); @=} +| "%defines" STRING {@> @[TeX_( "/toksa{defines}" );@]@+@ @=} +| "%error-verbose" {@> TeX_( "/yy0{/nx/optionflag{error verbose}{}/the/yy(1)}" ); @=} +| "%expect" INT {@> @[TeX_( "/toksa{expect}" );@]@+@ @=} +| "%expect-rr" INT {@> @[TeX_( "/toksa{expect-rr}" );@]@+@ @=} +| "%file-prefix" STRING {@> @[TeX_( "/toksa{file prefix}" );@]@+@ @=} +| "%glr-parser" {@> TeX_( "/yy0{/nx/optionflag{glr parser}{}/the/yy(1)}" ); @=} +| "%initial-action" "{...}" {@> TeX_( "/yy0{/nx/initaction/the/yy(2)}" ); @=} +| "%language" STRING {@> @[TeX_( "/toksa{language}" );@]@+@ @=} +| "%name-prefix" STRING {@> @[TeX_( "/toksa{name prefix}" );@]@+@ @=} +| "%no-lines" {@> TeX_( "/yy0{/nx/optionflag{no lines}{}/the/yy(1)}" ); @=} +| "%nondeterministic-parser" {@> TeX_( "/yy0{/nx/optionflag{nondet. parser}{}/the/yy(1)}" ); @=} +| "%output" STRING {@> @[TeX_( "/toksa{output}" );@]@+@ @=} +@t}\vb{\flatten}{@> +| "%param" {} + params {@> TeX_( "/yy0{/nx/paramdef{/the/yy(3)}/the/yy(1)}" ); @=} +@t}\vb{\fold}{@> +| "%require" STRING {@> @[TeX_( "/toksa{require}" );@]@+@ @=} +| "%skeleton" STRING {@> @[TeX_( "/toksa{skeleton}" );@]@+@ @=} +| "%token-table" {@> TeX_( "/yy0{/nx/optionflag{token table}{}/the/yy(1)}" ); @=} +| "%verbose" {@> TeX_( "/yy0{/nx/optionflag{verbose}{}/the/yy(1)}" ); @=} +| "%yacc" {@> TeX_( "/yy0{/nx/optionflag{yacc}{}/the/yy(1)}" ); @=} +| ";" {@> TeX_( "/yy0{/nx/empty}" ); @=} +; + +params: + params "{...}" {@> TeX_( "/yy0{/the/yy(1)/nx/braceit/the/yy(2)}" ); @=} +| "{...}" {@> TeX_( "/yy0{/nx/braceit/the/yy(1)}" ); @=} +; +@g + +@ This is a typical parser action: encapsulate the `type' of the +construct just parsed and attach some auxiliary info, in this case the +stream pointers. +@= + @[TeX_( "/yy0{/nx/oneparametricoption{/the/toksa}{/the/yy(2)}/the/yy(1)}" );@]@; + +@ Some extra declarations to typeset \flex\ options and +declarations. These are not part of the \bison\ syntax but their +structure is similar enough that they can be included in the grammar. +@= +@G +prologue_declaration: + flex_declaration {@> @ @=} +; +@g +@<\flex\ options parser productions@>@; + +@ The syntax of \flex\ options was extracted from \flex\ documentation +so it is not guaranteed to be correct. +@<\flex\ options parser productions@>= +@G +flex_declaration: + FLEX_OPTION flex_option_list {@> @ @=} +| flex_state symbols.1 {@> @ @=} +; + +flex_state: + FLEX_STATE_X {@> TeX_( "/yy0{/nx/flexxstatedecls/the/yy(1)}" ); @=} +| FLEX_STATE_S {@> TeX_( "/yy0{/nx/flexsstatedecls/the/yy(1)}" ); @=} +; + +flex_option_list: + flex_option {@> @ @=} +| flex_option_list flex_option {@> @ @=} +; + +flex_option: + ID {@> TeX_( "/yy0{/nx/flexoptionpair{/the/yy(1)}{}}" ); @=} +| ID "=" symbol {@> TeX_( "/yy0{/nx/flexoptionpair{/the/yy(1)}{/the/yy(3)}}" ); @=} +; +@g + +@ @= + @[TeX_( "/yy0{/nx/flexoptiondecls{/the/yy(2)}/the/yy(1)}" );@]@; + +@ @= + @[TeX_( "/getfirst{/yy(1)}/to/toksa" );@]@; + @[TeX_( "/getsecond{/yy(1)}/to/toksb" );@]@; + @[TeX_( "/getthird{/yy(1)}/to/toksc" );@]@; + @[TeX_( "/yy0{/the/toksa{/the/yy(2)}{/the/toksb}{/the/toksc}}" );@]@; + +@ @= + @[TeX_( "/getsecond{/yy(2)}/to/toksa" );@]@; /* the identifier */ + @[TeX_( "/getfourth{/toksa}/to/toksb" );@]@; /* the format pointer */ + @[TeX_( "/getfifth{/toksa}/to/toksc" );@]@; /* the stash pointer */ + @[TeX_( "/yy0{/the/yy(1)/nx/hspace{/the/toksb}{/the/toksc}/the/yy(2)}" );@]@; + +@ {\it Grammar declarations}. These declarations can appear in both +prologue and the rules sections. Their treatment is very similar to +prologue-only options. +@= +@G +grammar_declaration: + precedence_declaration {@> @ @=} +| symbol_declaration {@> @ @=} +| "%start" symbol {@> @[TeX_( "/toksa{start}" );@]@+@ @=} +| code_props_type "{...}" generic_symlist {@> @ @=} +| "%default-prec" {@> TeX_( "/yy0{/nx/optionflag{default prec.}{}/the/yy(1)}" ); @=} +| "%no-default-prec" {@> TeX_( "/yy0{/nx/optionflag{no default prec.}{}/the/yy(1)}" ); @=} +| "%code" "{...}" {@> TeX_( "/yy0{/nx/codeassoc{code}{}/the/yy(2)/the/yy(1)}" ); @=} +| "%code" ID "{...}" {@> TeX_( "/yy0{/nx/codeassoc{code}{/the/yy(2)}/the/yy(3)/the/yy(1)}" ); @=} +; + +code_props_type: + "%destructor" {@> TeX_( "/yy0{{destructor}/the/yy(1)}" ); @=} +| "%printer" {@> TeX_( "/yy0{{printer}/the/yy(1)}" ); @=} +; +@g + +@ @= + @[TeX_( "/getfirst{/yy(1)}/to/toksa" );@]@; /* name of the property */ + @[TeX_( "/getfirst{/yy(2)}/to/toksb" );@]@; /* contents of the braced code */ + @[TeX_( "/getsecond{/yy(2)}/to/toksc" );@]@; /* braced code format pointer */ + @[TeX_( "/getthird{/yy(2)}/to/toksd" );@]@; /* braced code stash pointer */ + @[TeX_( "/getsecond{/yy(1)}/to/tokse" );@]@; /* code format pointer */ + @[TeX_( "/getthird{/yy(1)}/to/toksf" );@]@; /* code stash pointer */ + @[TeX_( "/yy0{/nx/codepropstype{/the/toksa}{/the/toksb}{/the/yy(3)}{/the/toksc}{/the/toksd}{/the/tokse}{/the/toksf}}" );@]@; + +@ @= +@G +%token PERCENT_UNION "%union"; +@g + +@ @= +@G +@t}\vb{\inline\flatten}{@> +union_name: + {@> TeX_( "/yy0{}" ); @=} +| ID {@> @ @=} +; + +grammar_declaration: + "%union" union_name "{...}" {@> @ @=} +; + +symbol_declaration: + "%type" TAG symbols.1 {@> @ @=} +; +@t}\vb{\resetf\flatten}{@> +precedence_declaration: + precedence_declarator tag.opt symbols.prec {@> @ @=} +; + +precedence_declarator: + "%left" {@> TeX_( "/yy0{/nx/preckind{left}/the/yy(1)}" ); @=} +| "%right" {@> TeX_( "/yy0{/nx/preckind{right}/the/yy(1)}" ); @=} +| "%nonassoc" {@> TeX_( "/yy0{/nx/preckind{nonassoc}/the/yy(1)}" ); @=} +| "%precedence" {@> TeX_( "/yy0{/nx/preckind{precedence}/the/yy(1)}" ); @=} +; +@t}\vb{\inline}{@> +tag.opt: + {@> TeX_( "/yy0{}" ); @=} +| TAG {@> @ @=} +; +@g + +@ @= + @[TeX_( "/yy0{/nx/codeassoc{union}{/the/yy(2)}/the/yy(3)/the/yy(1)}" );@]@; + +@ @= + @[TeX_( "/yy0{/nx/typedecls{/the/yy(2)}{/the/yy(3)}/the/yy(1)}" );@]@; + +@ @= + @[TeX_( "/getthird{/yy(1)}/to/toksa" );@]@; /* format pointer */ + @[TeX_( "/getfourth{/yy(1)}/to/toksb" );@]@; /* stash pointer */ + @[TeX_( "/getsecond{/yy(1)}/to/toksc" );@]@; /* kind of precedence */ + @[TeX_( "/yy0{/nx/precdecls{/the/toksc}{/the/yy(2)}{/the/yy(3)}{/the/toksa}{/the/toksb}}" );@]@; + +@ The bootstrap grammar forms the smallest subset of the full grammar. +@= + @@; + +@ These are the two most important rules for the bootstrap parser. +@= +@G +@t}\vb{\flatten}{@> +symbol_declaration: + "%nterm" {} symbol_defs.1 {@> TeX_( "/yy0{/nx/ntermdecls{/the/yy(3)}/the/yy(1)}" ); @=} +@t}\vb{\fold\flatten}{@> +| "%token" {} symbol_defs.1 {@> TeX_( "/yy0{/nx/tokendecls{/the/yy(3)}/the/yy(1)}" ); @=} +; +@g + +@ {\it Just like \prodstyle{symbols.1} but accept \prodstyle{INT} for +the sake of \POSIX}. Perhaps the only point worth mentioning here is +the inserted separator (\.{\\hspace}). Like any other separator, it takes +two parameters, stream pointers. In this case, however, both pointers are null +since there seems to be no other meaningful assignment. If any +formatting or stash information is needed, it can be extracted by the +symbols themselves. +@= +@G +symbols.prec: + symbol.prec {@> @ @=} +| symbols.prec symbol.prec {@> TeX_( "/yy0{/the/yy(1)/nx/hspace{0}{0}/the/yy(2)}" ); @=} +; + +symbol.prec: + symbol {@> TeX_( "/yy0{/nx/symbolprec{/the/yy(1)}{}}" ); @=} +| symbol INT {@> TeX_( "/yy0{/nx/symbolprec{/the/yy(1)}{/the/yy(2)}}" ); @=} +; +@g + +@ {\it One or more symbols to be \prodstyle{\%type}'d}. +@= + @@; + +@ @= +@G +symbols.1: + symbol {@> @ @=} +| symbols.1 symbol {@> TeX_( "/yy0{/the/yy(1)/nx/hspace{0}{0}/the/yy(2)}" ); @=} +; +@g + +@ @= +@G +generic_symlist: + generic_symlist_item {@> @ @=} +| generic_symlist generic_symlist_item {@> TeX_( "/yy0{/the/yy(1)/nx/hspace{0}{0}/the/yy(2)}" ); @=} +; +@t}\vb{\flatten\inline}{@> +generic_symlist_item: + symbol {@> @ @=} +| tag {@> @ @=} +; + +tag: + TAG {@> @ @=} +| "<*>" {@> @ @=} +| "<>" {@> @ @=} +; +@g + +@ {\it One token definition}. +@= +@G +symbol_def: + TAG {@> @ @=} +@t}\vb{\flatten}{@> +| id {@> TeX_( "/yy0{/nx/onesymbol{/the/yy(1)}{}{}}" ); @=} +| id INT {@> TeX_( "/yy0{/nx/onesymbol{/the/yy(1)}{/the/yy(2)}{}}" ); @=} +| id string_as_id {@> TeX_( "/yy0{/nx/onesymbol{/the/yy(1)}{}{/the/yy(2)}}" ); @=} +| id INT string_as_id {@> TeX_( "/yy0{/nx/onesymbol{/the/yy(1)}{/the/yy(2)}{/the/yy(3)}}" ); @=} +; +@g + +@ {\it One or more symbol definitions}. +@= +@G +symbol_defs.1: + symbol_def {@> @ @=} +| symbol_defs.1 symbol_def {@> @ @=} +; +@g + +@ @= + @[TeX_( "/getsecond{/yy(2)}/to/toksa" );@]@; /* the identifier */ + @[TeX_( "/getfourth{/toksa}/to/toksb" );@]@; /* the format pointer */ + @[TeX_( "/getfifth{/toksa}/to/toksc" );@]@; /* the stash pointer */ + @[TeX_( "/yy0{/the/yy(1)/nx/hspace{/the/toksb}{/the/toksc}/the/yy(2)}" );@]@; + +@ {\it The grammar section: between the two +\prodstyle{\%\%}'s}. Finally, the following few short sections define +the syntax of \bison's rules. +@= +@G +grammar: + rules_or_grammar_declaration {@> @ @=} +| grammar rules_or_grammar_declaration {@> @ @=} +; +@g + +@ {\it As a \bison\ extension, one can use the grammar declarations in the +body of the grammar}. What follows is the syntax of the right hand +side of a grammar rule. +@= +@G +rules_or_grammar_declaration: + rules {@> @ @=} +| grammar_declaration ";" {@> @ @=} +| error ";" {@> TeX_( "/errmessage{parsing error!}" ); @=} +; +@t}\vb{\flatten\inline}{@> +rules: + id_colon named_ref.opt {@> TeX_( "/relax" ); @=} + rhses.1 {@> @ @=} +; +@t}\vb{\resetf}{@> +rhses.1[o]: + rhs {@> @ @=} +| rhses.1[a] "|"[b] {@> @ @=}[c] + rhs[d] {@> @ @=} +| rhses.1 ";" {@> @ @=} +; +@g + +@ The next few actions describe what happens when a left hand side is +attached to a rule. +@= + @[TeX_( "/getfirst{/yy(1)}/to/toksa" );@]@; + @[TeX_( "/yy0{/nx/grammar{/the/yy(1)}{/the/toksa}}" );@]@; + +@ @= + @[TeX_( "/getthird{/yy(1)}/to/toksa" );@]@; /* type of the last rule */ + @[TeX_( "/getsecond{/yy(1)}/to/toksc" );@]@; /* accumulated rules */ + @[TeX_( "/getfirst{/yy(2)}/to/toksb" );@]@; /* type of the new rule */ + @[TeX_( "/let/default/positionswitchdefault" );@]@; + @[TeX_( "/switchon{/the/toksb}/in/positionswitch" );@]@; /* determine the position of the first token in the group */ + @[TeX_( "/edef/next{/the/toksa}" );@]@; + @[TeX_( "/edef/default{/the/toksb}" );@]@; /* reuse \.{\\default} */ + @[TeX_( "/ifx/next/default" );@]@; + @[TeX_( " /let/default/separatorswitchdefaulteq" );@]@; + @[TeX_( " /switchon{/the/toksa}/in/separatorswitcheq" );@]@; + @[TeX_( "/else" );@]@; + @[TeX_( " /concat/toksa/toksb" );@]@; + @[TeX_( " /let/default/separatorswitchdefaultneq" );@]@; + @[TeX_( " /switchon{/the/toksa}/in/separatorswitchneq" );@]@; + @[TeX_( "/fi" );@]@; + @[TeX_( "/yy0{/nx/grammar{/the/toksc/the/postoks/the/toksd/the/yy(2)}{/the/toksb}}" );@]@; + +@ @= + @[TeX_( "/getsecond{/yy(1)}/to/toksa" );@]@; /* \.{\\prodheader} */ + @[TeX_( "/getsecond{/toksa}/to/toksb" );@]@; /* \.{\\idit} */ + @[TeX_( "/getfourth{/toksb}/to/toksc" );@]@; /* format stream pointer */ + @[TeX_( "/getfifth{/toksb}/to/toksd" );@]@; /* stash stream pointer */ + @[TeX_( "/getthird{/yy(1)}/to/toksb" );@]@; /* \.{\\rules} */ + @[TeX_( "/yy0{/nx/oneproduction{/the/toksa/the/toksb}{/the/toksc}{/the/toksd}}" );@]@; + +@ @= + @[TeX_( "/getfourth{/yy(1)}/to/toksa" );@]@; /* format stream pointer */ + @[TeX_( "/getfifth{/yy(1)}/to/toksb" );@]@; /* stash stream pointer */ + @[TeXb( "/yy0{/nx/pcluster{/nx/prodheader{/the/yy(1)}{/the/yy(2)}" );@]@; + @[TeXao( "{/the/toksa}{/the/toksb}}{/the/yy(4)}}" );@]@; + +@ It is important to format the right hand side properly, since we +would like to indicate that an action is inlined by an +indentation. The `format' of the \.{\\rhs} `structure' includes the +stash pointers and a `boolean' to indicate whether the right hand side ends +with an action. Since the action can be implicit, this decision has to +be postponed until, say, a semicolon is seen. +No formatting or stash pointers are added for such implicit action. +@= + @[TeX_( "/rhsbool{/yy(1)}/to/toksa /the/toksa" );@]@; + @[TeX_( "/getthird{/yy(1)}/to/toksb" );@]@; /* the format pointer */ + @[TeX_( "/getfourth{/yy(1)}/to/toksc" );@]@; /* the stash pointer */ + @[TeX_( "/ifrhsfull" );@]@; + @[TeX_( " /yy0{/nx/rules{/the/yy(1)}{/the/toksb}{/the/toksc}}" );@]@; + @[TeX_( "/else" );@]@; /* it does not end with an action, fake one */ + @[TeX_( " /rhscont{/yy(1)}/to/toksa" );@]@; /* rules */ + @[TeX_( " /edef/next{/the/toksa}" );@]@; + @[TeX_( " /ifx/next/empty" );@]@; + @[TeX_( " /toksa{/emptyterm}" );@]@; + @[TeX_( " /fi" );@]@; + @[TeXb( " /yy0{/nx/rules{/nx/rhs{/the/toksa/nx/rarhssep{0}{0}" );@]@; + @[TeXfo( " /nx/actbraces{}{}{0}{0}/nx/bdend}{}{/nx/rhsfulltrue}}{/the/toksb}{/the/toksc}}" );@]@; + @[TeX_( "/fi" );@]@; + +@ Using standard notation, here is what the middle action +does. +@= + @[TeX_( "/rhscont{/yy(1)}/to{/yy(0)}" );@]@; + @[TeX_( "/yy0{/the/yy(0)/nx/midf/the/yy(2)}" );@]@; + +@ However, if the length of the rule preceding the inline action +is not known a different way of accessing the stack is necessary. +@= + @[TeX_( "/bb2{/toksa}/bb1{/toksb}" );@]@; + @[TeX_( "/rhscont{/toksa}/to{/yy(0)}" );@]@; + @[TeX_( "/yy0{/the/yy(0)/nx/midf/the/toksb}" );@]@; + +@ No pointers are provided for an {\it implicit\/} action. +@= + @[TeX_( "/rhsbool{/yy(4)}/to/toksa /the/toksa" );@]@; + @[TeX_( "/ifrhsfull" );@]@; + @[TeX_( " /yy0{/nx/rules{/the/yy(3)/nx/rrhssep/the/yy(2)/the/yy(4)}/the/yy(2)}" );@]@; + @[TeX_( "/else" );@]@; + @[TeX_( " /rhscont{/yy(4)}/to/toksa" );@]@; + @[TeX_( " /edef/next{/the/toksa}" );@]@; + @[TeX_( " /ifx/next/empty" );@]@; + @[TeX_( " /toksa{/emptyterm}" );@]@; + @[TeX_( " /fi" );@]@; + @[TeXb( " /yy0{/nx/rules{/the/yy(3)/nx/rrhssep/the/yy(2)" );@]@; + @[TeXf( " /nx/rhs{/the/toksa/nx/rarhssep{0}{0}" );@]@; /* streams have already been grabbed */ + @[TeXfo( " /nx/actbraces{}{}{0}{0}/nx/bdend}{}{/nx/rhsfulltrue}}/the/yy(2)}" );@]@; + @[TeX_( "/fi" );@]@; + +@ @= + @@; + +@ @= +@G +%token PERCENT_EMPTY "%empty"; +@g + +@ The centerpiece of the grammar is the syntax of the right hand side +of a production. Various `precedence hints' must be attached to an +appropriate portion of the rule, just before an action (which can +be inline, implicit or both in this case). +@= +@G +rhs: + {@> @ @=} +| rhs symbol named_ref.opt {@> @ @=} +| rhs "{...}" named_ref.opt {@> @ @=} +| rhs "%?{...}" {@> @ @=} +| rhs "%empty" {@> @ @=} +| rhs "%prec" symbol {@> @ @=} +| rhs "%dprec" INT {@> @ @=} +| rhs "%merge" TAG {@> @ @=} +; + +named_ref.opt: + {@> @ @=} +| BRACKETED_ID {@> @ @=} +; +@g + +@ @= + @[TeX_( "/yy0{/nx/rhs{}{}{/nx/rhsfullfalse}}" );@]@; + +@ @= + @[TeX_( "/rhscont{/yy(1)}/to/toksa" );@]@; + @[TeX_( "/rhscnct{/yy(1)}/to/toksb" );@]@; + @[TeX_( "/edef/next{/the/toksb}" );@]@; + @[TeX_( "/ifx/next/empty" );@]@; + @[TeX_( "/else" );@]@; + @[TeX_( " /getfourth{/yy(2)}/to/toksc" );@]@; + @[TeX_( " /getfifth{/yy(2)}/to/toksd" );@]@; + @[TeX_( " /appendr/toksb{{/the/toksc}{/the/toksd}}" );@]@; + @[TeX_( "/fi" );@]@; + @[TeXb( "/yy0{/nx/rhs{/the/toksa/the/toksb" );@]@; + @[TeXao( "/nx/termname{/the/yy(2)}{/the/yy(3)}}{/nx/hspace}{/nx/rhsfullfalse}}" );@]@; + +@ @= + @[TeX_( "/rhscont{/yy(1)}/to/toksa" );@]@; + @[TeX_( "/rhsbool{/yy(1)}/to/toksb /the/toksb" );@]@; + @[TeX_( "/ifrhsfull" );@]@; /* the first half ends with an action */ + @[TeX_( " /appendr/toksa{/nx/arhssep{0}{0}/nx/emptyterm}" );@]@; /* no pointers to streams */ + @[TeX_( "/fi" );@]@; + @[TeX_( "/edef/next{/the/toksa}" );@]@; + @[TeX_( "/ifx/next/empty" );@]@; + @[TeX_( " /toksa{/emptyterm}" );@]@; + @[TeX_( "/fi" );@]@; + @[TeX_( "/getfirst{/yy(2)}/to/toksb" );@]@; /* the contents of the braced code */ + @[TeX_( "/getsecond{/yy(2)}/to/toksc" );@]@; /* the format stream pointer */ + @[TeX_( "/getthird{/yy(2)}/to/toksd" );@]@; /* the stash stream pointer */ + @[TeXb( "/yy0{/nx/rhs{/the/toksa/nx/rarhssep{/the/toksc}{/the/toksd}" );@]@; + @[TeXf( " /nx/actbraces{/the/toksb}{/the/yy(3)}{/the/toksc}{/the/toksd}/nx/bdend}" );@]@; + @[TeXfo( " {/nx/arhssep}{/nx/rhsfulltrue}}" );@]@; + +@ @= + @[TeX_( "/rhscont{/yy(1)}/to/toksa" );@]@; + @[TeX_( "/rhsbool{/yy(1)}/to/toksb /the/toksb" );@]@; + @[TeX_( "/ifrhsfull" );@]@; /* the first half ends with an action */ + @[TeX_( " /appendr/toksa{/nx/arhssep{0}{0}/nx/emptyterm}" );@]@; /* no pointers to streams */ + @[TeX_( "/fi" );@]@; + @[TeX_( "/edef/next{/the/toksa}" );@]@; + @[TeX_( "/ifx/next/empty" );@]@; + @[TeX_( " /toksa{/emptyterm}" );@]@; + @[TeX_( "/fi" );@]@; + @[TeX_( "/getfirst{/yy(2)}/to/toksb" );@]@; /* the contents of the braced code */ + @[TeX_( "/getsecond{/yy(2)}/to/toksc" );@]@; /* the format stream pointer */ + @[TeX_( "/getthird{/yy(2)}/to/toksd" );@]@; /* the stash stream pointer */ + @[TeXb( "/yy0{/nx/rhs{/the/toksa/nx/rarhssep{/the/toksc}{/the/toksd}" );@]@; + @[TeXf( " /nx/bpredicate{/the/toksb}{}{/the/toksc}{/the/toksd}/nx/bdend}" );@]@; + @[TeXao( "{/nx/arhssep}{/nx/rhsfulltrue}}" );@]@; + +@ @= + @[TeX_( "/rhscont{/yy(1)}/to/toksa" );@]@; + @[TeX_( "/rhscnct{/yy(1)}/to/toksb" );@]@; + @[TeX_( "/edef/next{/the/toksb}" );@]@; + @[TeX_( "/ifx/next/empty" );@]@; + @[TeX_( "/else" );@]@; + @[TeX_( " /getfourth{/yy(2)}/to/toksc" );@]@; + @[TeX_( " /getfifth{/yy(2)}/to/toksd" );@]@; + @[TeX_( " /appendr/toksb{{/the/toksc}{/the/toksd}}" );@]@; + @[TeX_( "/fi" );@]@; + @[TeXb( "/yy0{/nx/rhs{/the/toksa/the/toksb" );@]@; + @[TeXao( "/nx/emptyterm}{/nx/hspace}{/nx/rhsfullfalse}}" );@]@; + +@ @= + @[TeX_( "/rhscont{/yy(1)}/to/toksa" );@]@; + @[TeX_( "/rhscnct{/yy(1)}/to/toksb" );@]@; + @[TeX_( "/rhsbool{/yy(1)}/to/toksc /the/toksc" );@]@; + @[TeX_( "/ifrhsfull" );@]@; + @[TeX_( " /yy0{/nx/sprecop{/the/yy(3)}/the/yy(2)}" );@]@; /* reuse \.{\\yyval} */ + @[TeX_( " /supplybdirective/toksa/yyval" );@]@; /* the directive is `absorbed' by the action */ + @[TeX_( " /yy0{/nx/rhs{/the/toksa}{/the/toksb}{/nx/rhsfulltrue}}" );@]@; + @[TeX_( "/else" );@]@; + @[TeXb( " /yy0{/nx/rhs{/the/toksa" );@]@; + @[TeXao( "/nx/sprecop{/the/yy(3)}/the/yy(2)}{/the/toksb}{/nx/rhsfullfalse}}" );@]@; + @[TeX_( "/fi" );@]@; + +@ @= + @[TeX_( "/rhscont{/yy(1)}/to/toksa" );@]@; + @[TeX_( "/rhscnct{/yy(1)}/to/toksb" );@]@; + @[TeX_( "/rhsbool{/yy(1)}/to/toksc /the/toksc" );@]@; + @[TeX_( "/ifrhsfull" );@]@; + @[TeX_( " /yy0{/nx/dprecop{/the/yy(3)}/the/yy(2)}" );@]@; /* reuse \.{\\yyval} */ + @[TeX_( " /supplybdirective/toksa/yyval" );@]@; /* the directive is `absorbed' by the action */ + @[TeX_( " /yy0{/nx/rhs{/the/toksa}{/the/toksb}{/nx/rhsfulltrue}}" );@]@; + @[TeX_( "/else" );@]@; + @[TeXb( " /yy0{/nx/rhs{/the/toksa" );@]@; + @[TeXao( "/nx/dprecop{/the/yy(3)}/the/yy(2)}{/the/toksb}{/nx/rhsfullfalse}}" );@]@; + @[TeX_( "/fi" );@]@; + +@ @= + @[TeX_( "/rhscont{/yy(1)}/to/toksa" );@]@; + @[TeX_( "/rhscnct{/yy(1)}/to/toksb" );@]@; + @[TeX_( "/rhsbool{/yy(1)}/to/toksc /the/toksc" );@]@; + @[TeX_( "/ifrhsfull" );@]@; + @[TeX_( " /yy0{/nx/mergeop{/the/yy(3)}/the/yy(2)}" );@]@; /* reuse \.{\\yyval} */ + @[TeX_( " /supplybdirective/toksa/yyval" );@]@; /* the directive is `absorbed' by the action */ + @[TeX_( " /yy0{/nx/rhs{/the/toksa}{/the/toksb}{/nx/rhsfulltrue}}" );@]@; + @[TeX_( "/else" );@]@; + @[TeXb( " /yy0{/nx/rhs{/the/toksa" );@]@; + @[TeXao( "/nx/mergeop{/the/yy(3)}/the/yy(2)}{/the/toksb}{/nx/rhsfullfalse}}" );@]@; + @[TeX_( "/fi" );@]@; + +@ @= + @[TeX_( "/yy0{}" );@]@; + +@ @= + @@; + +@ Identifiers. +{\it Identifiers are returned as |uniqstr| values by the scanner. +Depending on their use, we may need to make them genuine symbols}. We, +on the other hand simply copy the values returned by the scanner. +@= +@G +id: + ID {@> @ @=} +| CHAR {@> @ @=} +; +@g + +@ @= + @@; + +@ @= +@G +symbol: + id {@> @ @=} +| string_as_id {@> @ @=} +; +@g + +@ @= +@G +@t}\vb{\inline}{@> +id_colon: + ID_COLON {@> @ @=} +; +@g + +@ A string used as an \prodstyle{ID}. +@= +@G +@t}\vb{\inline}{@> +string_as_id: + STRING {@> @ @=} +; +@g + +@ The remainder of the action code is trivial but we reserved the +placeholders for the appropriate actions in case the parser gains some +sophistication in processing low level types (or starts expecting +different types from the scanner). +@= + @@; + +@ @= + @@; + +@ @= + @@; + +@ @= + @@; + +@ @= + @@; + +@ @= + @@; + +@ {\it Variable and value. +The \prodstyle{STRING} form of variable is deprecated and is not \.{M4}-friendly. +For example, \.{M4} fails for \.{\%define "[" "value"}.} +@= +@G +@t}\vb{\flatten\inline}{@> +variable: + ID {@> @ @=} +| STRING {@> @ @=} +; + +value: + {@> TeX_( "/yy0{}" ); @=} +| ID {@> @ @=} +| STRING {@> @ @=} +| "{...}" {@> TeX_( "/yy0{/nx/bracedvalue/the/yy(1)}" ); @=} +; +@g + +@ @= +@G +@t}\vb{\flatten\inline}{@> +epilogue.opt: + {@> TeX_( "/yy0{}" ); @=} +| "%%" EPILOGUE {} +; +@g + +@ \Cee\ preamble for the grammar parser. In this case, there are no `real' actions that our +grammar performs, only \TeX\ output, so this section is empty. + +@= + +@ \Cee\ postamble for the grammar parser. It is tricky to insert function definitions that use \bison's internal types, +as they have to be inserted in a place that is aware of the internal definitions but before said +definitions are used. + +@= +#define YYPRINT(file, type, value) yyprint (file, type, value) + static void yyprint (FILE *file, int type, YYSTYPE value){} + +@ @= + @@; + @@; + +@ @= + void bootstrap_tokens( char *bootstrap_token_format ) { + +#define _register_token_d(name) fprintf( tables_out, bootstrap_token_format, #name, name, #name ); + @@; +#undef _register_token_d + + } + +@ \namedspot{bootstraptokens}Here is the minimal list of tokens needed +to make the lexer operational just enough to extract the rest of the +token information from the grammar. +@= + _register_token_d(ID)@; + _register_token_d(PERCENT_TOKEN)@; + _register_token_d(STRING)@; + +@q The tokens below are not required to make a minimal bootstrapping parser work @> +@q but they do appear in the rules the parser will encounter while extracting @> +@q token information. @> +@q _register_token_d(INT) /* only encountered in GRAM_EOF definition which is never used */ @> +@q _register_token_d(CHAR) /* \bison\ never declares character tokens */ @> +@q _register_token_d(SEMICOLON) /* can be omitted in prologue */ @> +@q _register_token_d(TAG) /* only encountered in the definition of PERCENT_PARAM */ @> + +@ Union of types. +@= diff --git a/support/splint/cweb/bs.w b/support/splint/cweb/bs.w new file mode 100644 index 0000000000..a2003d81fa --- /dev/null +++ b/support/splint/cweb/bs.w @@ -0,0 +1,706 @@ +@q Copyright 2012-2014, Alexander Shibakov@> +@q This file is part of SPLinT@> + +@q SPLinT is free software: you can redistribute it and/or modify@> +@q it under the terms of the GNU General Public License as published by@> +@q the Free Software Foundation, either version 3 of the License, or@> +@q (at your option) any later version.@> + +@q SPLinT is distributed in the hope that it will be useful,@> +@q but WITHOUT ANY WARRANTY; without even the implied warranty of@> +@q MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the@> +@q GNU General Public License for more details.@> + +@q You should have received a copy of the GNU General Public License@> +@q along with SPLinT. If not, see .@> + +@*1\bison\ specific routines. +The placeholder code left blank in the common routines is filed in +with the code relevant to the output of parser tables in the following sections. + +@*2Tables. \namedspot{bsfile}Here are all the parser table names. Some tables are not output but adding +one to the list in the future will be easy: it does not even have to +be done here. +@= + _register_table_d(yytranslate)@; + _register_table_d(yyr1)@; + _register_table_d(yyr2)@; + _register_table_d(yydefact)@; + _register_table_d(yydefgoto)@; + _register_table_d(yypact)@; + _register_table_d(yypgoto)@; + _register_table_d(yytable)@; + _register_table_d(yycheck)@; + _register_table_d(yytoknum)@; + _register_table_d(yystos)@; + _register_table_d(yytname)@; + _register_table_d(yyprhs)@; + _register_table_d(yyrhs)@; + +@ One special table requires a little bit more preparation. This is a +table that lists the depth of the stack before an implicit terminal. It +is not one of the tables that is used by \bison\ itself but is needed +if the symbolic name processing is to be implemented (\bison\ has +access to this information `on the fly'). The `new' \bison\ (starting +with version~\.{3.0}) does not generate |yyprhs| and |yyrhs| or any +other arrays that contain similar information, so we fake them here if +such a crippled version of \bison\ is used. + +@= + unsigned int yyrthree[YYNRULES + 1] = { 0 }; +#ifdef BISON_IS_CRIPPLED + unsigned int yyrhs[YYNRULES + 1] = { -1 }; + unsigned int yyprhs[YYNRULES + 1] = { 0 }; +#endif + +@ We populate this table below $\ldots$ +@= +#ifndef BISON_IS_CRIPPLED + assert( YYNRULES + 1 == sizeof(yyprhs)/sizeof(yyprhs[0]) ); + + { int i, j; + + for ( i = 1; i <= YYNRULES; i++ ) { + + for ( j = 0; yyrhs[ yyprhs[i] + j ] != -1; j++ ) { + + assert( yyprhs[i] + j < sizeof(yyrhs) ); + assert( j < yyr1[i] ); + + if ( @ ) { + + @@; + + } + + } + } + } +#endif + +@ @= + ( strlen( yytname[ yyrhs[yyprhs[i]+j] ] ) > 1 ) && + ( yytname[ yyrhs[yyprhs[i]+j] ][0] == '$' ) && + ( yytname[ yyrhs[yyprhs[i]+j] ][1] == '@@' ) + +@ @= + int rule_number; + + for ( rule_number = 1; rule_number < YYNRULES; rule_number++ ) { + + if ( yyr1[rule_number] == yyrhs[yyprhs[i]+j] ) { + + yyrthree[rule_number] = j; + break; + + } + } + + assert( rule_number < YYNRULES ); + +@ $\ldots$ and add its name to the list. +@= + _register_table_d(yyrthree)@; + +@*2Actions. There are several ways of making |yyparse()| execute all portions of +the action code. The one chosen here makes sure that none of the +tables gets written past its last element. To see how it works, it +might be helpful to `walk through' \bison's output to see how each +change affects the generated parser. +@= + if ( output_desc.output_actions ) { + + int i, j; + + fprintf( tables_out, "%s", action_desc.preamble ); + + if ( !bare_actions ) { + + yypact[0] = YYPACT_NINF; + yypgoto[0] = -1; + yydefgoto[0] = YYFINAL; + + } + + for ( i = 1; i < sizeof(yyr1)/sizeof(yyr1[0]); i++ ) { + + fprintf( tables_out, action_desc.act_setup, i, yyr2[i] - 1 ); + + if ( action_desc.print_rule ) { + + action_desc.print_rule( i ); + + } + + if ( yyr2[i] > 0 ) { + + if ( action_desc.action1 ) { + + fprintf( tables_out, "%s", action_desc.action1 ); + + } + } + + for ( j = 2; j <= yyr2[i]; j++ ) { + + if ( action_desc.actionn ) { + + fprintf( tables_out, action_desc.actionn, j ); + } + + } + + if ( !bare_actions ) { + + yyr1[i] = YYNTOKENS; + yydefact[0] = i; + yyr2[i] = 0; + yyparse(YYPARSE_PARAMETERS); + + } + + fprintf( tables_out, action_desc.act_suffix, i, yyr2[i] - 1 ); + + } + + fprintf( tables_out, "%s", action_desc.postamble ); + + if ( action_desc.cleanup ) { + + action_desc.cleanup( &action_desc ); + + } + + } + +@*2Constants. +@= + _register_const_d(YYEMPTY)@; + _register_const_d(YYPACT_NINF)@; + _register_const_d(YYEOF)@; + _register_const_d(YYLAST)@; + _register_const_d(YYNTOKENS)@; + _register_const_d(YYNRULES)@; + _register_const_d(YYNSTATES)@; + _register_const_d(YYFINAL)@; + +@*2Tokens. +Similar techniques are employed in token output. Tokens are parser +specific (the scanner only needs their numeric values) so we need {\it +some\/} flexibility to output them in a desired format. For special +purposes (say changing the way tokens are typeset) we can control the +format tokens are output in. +@= + char *token_format_char = NULL; + char *token_format_affix = NULL; + char *token_format_suffix = NULL; + char *bootstrap_token_format = NULL; + +@ @= + _register_option("token-format-char", required_argument, 0, TOKEN_FORMAT_CHAR, "")@; + _register_option("token-format-affix", required_argument, 0, TOKEN_FORMAT_AFFIX, "")@; + _register_option("token-format-suffix", required_argument, 0, TOKEN_FORMAT_SUFFIX, "")@; + _register_option("bootstrap-token-format", required_argument, 0, BOOTSTRAP_TOKEN_FORMAT, "")@; + +@ @= + TOKEN_FORMAT_CHAR,@[@] + TOKEN_FORMAT_AFFIX,@[@] + TOKEN_FORMAT_SUFFIX,@[@] + BOOTSTRAP_TOKEN_FORMAT,@[@] + +@ @= + case TOKEN_FORMAT_CHAR:@; + token_format_char = (char *)malloc( (strlen(optarg) + 1)*sizeof(char) ); + strcpy(token_format_char, optarg); + break; + + case TOKEN_FORMAT_AFFIX:@; + token_format_affix = (char *)malloc( (strlen(optarg) + 1)*sizeof(char) ); + strcpy(token_format_affix, optarg); + break; + + case TOKEN_FORMAT_SUFFIX:@; + token_format_suffix = (char *)malloc( (strlen(optarg) + 1)*sizeof(char) ); + strcpy(token_format_suffix, optarg); + break; + + case BOOTSTRAP_TOKEN_FORMAT:@; + bootstrap_token_format = (char *)malloc( (strlen(optarg) + 1)*sizeof(char) ); + strcpy(bootstrap_token_format, optarg); + break; + +@ @= + bool output_tokens:1; + +@ No tokens are output by default. +@= + @[@].output_tokens = 0,@[@] + +@ The only part of the code below that needs any explanation is the +`bootstrap' token output. In \bison\ every token has three attributes: +its `macro name' (say, \.{STRING}) that is used by the parse code +internally, its `print name' (\.{"string"} to continue the example) +that \bison\ uses to print the token names in its diagnostic messages, +and its numeric value (that can be assigned implicitly by \bison\ +itself or explicitly by the user). Only the `print names' are kept in +the |yytname| array so to reuse the scanner used by \bison\ we either +have to extract the token `macro names' from the \Cee\ code ourselves +to pass them on to the lexer, or use a special `stripped down' version +of a \bison\ grammar parser to extract the names from the parser's +\bison\ grammar. To do this, some token names would still need to be +known to the scanner. These tokens are selected by hand to make the +`bootstrapping' parser operational. The token list for the \bison\ +grammar parser can be examined as part of the appropriate +\locallink{bootstraptokens}driver file\endlink. +@= + if ( output_desc.output_tokens ) { + + int i; + int length; + char token; + char *token_name; + bool too_creative = false; + + for ( i = 258; i < sizeof(yytranslate)/sizeof(yytranslate[0]) ; i++ ) { + + token_name = yytname[yytranslate[i]]; + + if ( token_name ) { + + fprintf( tables_out, token_format_affix, yytranslate[i], i ); + + length = 0; + + while ( (token = *token_name) ) { + + if ( token_format_char ) { + + length += fprintf( tables_out, token_format_char, (unsigned int)token ); + + } + + if ( token < 040 || token == 0177 ) { + + too_creative = true; + + } + + token_name++; + } + + fprintf( tables_out, token_format_suffix, too_creative ? ".unprintable." : yytname[yytranslate[i]] ); + + } + } + } + +#ifdef BISON_BOOTSTRAP_MODE + fprintf( tables_out, "\\bootstrapmodetrue\n" ); + fprintf( tables_out, "%% token values needed to bootstrap the parser\n" ); + bootstrap_tokens( bootstrap_token_format ); +#endif + +@ The size of the token name table is useful to determine, say, how +many `named' tokens the parser uses. +@= + fprintf( tables_out, "\\constset{YYTRANSLATESIZE}{%d}%%\n", (int)(sizeof(yytranslate)/sizeof(yytranslate[0])) ); + +@*2Output modes. +The code below can be easily extended and modified to output parser +tables, actions, and constants in a language of one's choice. We are +only interested in \TeX, however, thus other modes are very +rudimentary or non-existent at this point. + +@*3 Token only mode. +Token only output mode does exactly what is expected: outputs token +names and values in the format of your choosing. + +@= + TOKEN_ONLY_OUT,@[@] + +@ @= + case TOKEN_ONLY_OUT:@; + @@; + break; + +@ @= + _register_option("token-only-mode", no_argument, 0, TOKEN_ONLY_MODE, "")@; + +@ @= + TOKEN_ONLY_MODE,@[@] + +@ @= + case TOKEN_ONLY_MODE:@; + mode = TOKEN_ONLY_OUT; + break; + +@ @= + if ( !token_format_char ) { + + token_format_char = "{%u}"; + + } + + if ( !token_format_affix ) { + + token_format_affix = "%% token: %d, token value: %d\n\\prettytoken@@{"; + + } + + if ( !token_format_suffix ) { + + token_format_suffix = "}%% %s\n"; + + } + + output_desc.output_tokens = 1; + +@*3Generic output. Generic output is not programmed yet. + +@= + GENERIC_OUT,@[@] + +@ @= + case GENERIC_OUT:@; + printf( "This mode is not supported yet\n" ); + exit(0); + break; + +@*3\TeX\ output. The \TeX\ mode is the main reason for this software. + +@= + TEX_OUT,@[@] + +@ @= + case TEX_OUT:@; + @@; + @@; + @@; + @@; + break; + +@ Some tables require name adjustments due to \TeX's +reluctance to treat digits as part of a name. +@= +#define _register_table_d(name) tex_table(name); + @@; +#undef _register_table_d + + yyr1_desc.name = "yyrone"; + yyr2_desc.name = "yyrtwo"; + +@ The memory allocated for the |yytname| table is released at the end. +@= + void yytname_cleanup( struct table_d *table ); + int yytname_formatter_tex( FILE *stream, int index ); + int yytname_formatter( FILE *stream, int index ); + +@ There are a number of helper functions to output complicated names +in \TeX. The safest way seems to be to output those as sequences of +{\sc ASCII} codes to accommodate names like \.{\$}\.{end} +safely. \TeX's \.{\^\^}$\ldots$ convention is supported as well. +@= + void yytname_cleanup( struct table_d *table ) { + + free( table->separator ); + free( table->null ); + + } + + int yytname_formatter_tex( FILE *stream, int index ) { + + char *token_name = yytname[index]; + unsigned char token; + int length = 0; + + fprintf( stream, "\\addname " ); + + while ( (token = *token_name) ) { + + if ( token < 040 || token == 0177 ) { /* unprintable characters */ + + fprintf( stream, "^^%c", token < 0100 ? (unsigned char)(token + 0100) : (unsigned char)(token - 100) ); + length += 3; + + } else { + + fprintf( stream, "%c", token ); + length++; + + } + + token_name++; + + } + fprintf( stream, "\n" ); + + return length; + + } + + int yytname_formatter( FILE *stream, int index ) { + + char *token_name; + unsigned char token; + int length = 0; + bool too_creative = false; /* to indicate if the name is too dangerous to print */ + + fprintf( stream, "\\addname" ); + + if ( index >= 0 ) { /* this is not the last name */ + + token_name = yytname[index]; + + if ( token_name == NULL ) { + + token_name = "$impossible"; + + } + + while ( (token = *token_name) ) { + + length += fprintf( stream, "{%u}", (unsigned int)token ); + + if ( token < 040 || token == 0177 ) { + + too_creative = true; + + } + + token_name++; + + } + + fprintf( stream, "%% %s\n", too_creative ? ".unprintable." : yytname[index] ); + + } else { /* this is the last name */ + + token_name = yytname[-index]; + + if ( token_name == NULL ) { + + token_name = "$impossible"; + + } + + while ( (token = *token_name) ) { + + length += fprintf( stream, "{%u}", (unsigned int)token ); + token_name++; + + if ( token < 040 || token == 0177 ) { + + too_creative = true; + + } + + } + + fprintf( stream, "%% %s\n\\end\n%%\n", too_creative ? ".unprintable." : + ( yytname[-index] ? yytname[-index] : "end of array" ) ); + + } + + return length; + + } + +@ @= + yytname_desc.preamble = "%%\n\\newtable{yytname}{}\\tempca0\\relax%% a robust way to add the yytname array\n"; + yytname_desc.separator = NULL; + yytname_desc.postamble = NULL; + yytname_desc.null = NULL; + yytname_desc.null_postamble = NULL; + yytname_desc.optimized_numeric = NULL; + yytname_desc.prettify = false; + yytname_desc.formatter = yytname_formatter; + + yytname_desc.cleanup = NULL; + + output_desc.output_yytname = 1; + +@ @= + + if ( optimize_actions ) { + + action_desc.preamble = "%\n% the big switch\n%\n"@/ + "\\catcode`\\/=0\\relax % see the documentation for an explanation of this trick\n"@/ + "\\def\\yybigswitch#1{%%\n"@/ + " \\csname dobisonaction\\number #1\\parsernamespace\\endcsname\n"@; + "}\\stashswitch{yybigswitch}%%\n"; + action_desc.act_setup = "\n\\expandafter\\def\\csname dobisonaction%d\\parsernamespace\\endcsname{%%\n%%"; + action_desc.act_suffix = "}%% end of rule %d\n"; + action_desc.action1 = NULL; + action_desc.actionn = NULL; + action_desc.postamble = "\n\\catcode`\\/=12\\relax\n\n"; + action_desc.print_rule = print_rule; + action_desc.cleanup = NULL; + output_desc.output_actions = 1; + + } else { + + action_desc.preamble = "%\n% the big switch\n%\n"@/ + "\\catcode`\\/=0\\relax % see the documentation for an explanation of this trick\n"@/ + "\\def\\yybigswitch#1{%%\n"@; + " \\ifcase#1\\relax\n"; + action_desc.act_setup = " \\or %% (rule %d) "; + action_desc.act_suffix = ""; + action_desc.action1 = NULL; + action_desc.actionn = NULL; + action_desc.postamble = " \\else\n \\fi\n}\\stashswitch{yybigswitch}%%\n\\catcode`\\/=12\\relax\n\n"; + action_desc.print_rule = print_rule; + action_desc.cleanup = NULL; + output_desc.output_actions = 1; + +} + +@ Grammar rules are listed in a readable form alongside the action +code to make it possible to quickly find an appropriate action. The +rules are not output if a crippled \bison\ is used. +@= + void print_rule( int n ) { + + int i; + + fprintf( tables_out, "%s%s: ", (n < 10 && !optimize_actions ? " " : ""), yytname[yyr1[n]] ); + +#ifndef BISON_IS_CRIPPLED + i = yyprhs[n]; + + if ( yyrhs[i] < 0 ) { + + fprintf( tables_out, "" ); + + } else { + + while( yyrhs[i] > 0 ) { + + fprintf( tables_out, "%s ", yytname[yyrhs[i]] ); + i++; + + } + + } +#endif + + fprintf( tables_out, "\n" ); + + } + +@ \TeX\ constant output is another place where the techniques described above are applied. +As before, the macro handles the repetitive work of initialization, declaration, etc in +each place where the corresponding constant is mentioned. The one exception is \.{YYPACT\_NINF}, +which has to be handled separately because the underscore in its name makes it difficult to +use it as a command sequence name. +\def\YYPACTxNINFxdesc{\.{YYPACT\_NINF\_}\\{desc}} + +@s YYPACT_NINF_desc TeX + +@= +#define _register_const_d(c_name) @[c_name##_desc.format = "\\constset{%s}{%d}%%\n"; \ + c_name##_desc.name = #c_name; \ + output_desc.output_##c_name = 1;@] + @@; +#undef _register_const_d +YYPACT_NINF_desc.name = "YYPACTNINF"; + +@ Token definitions round off the \TeX\ output mode. + +@= + + token_format_char = NULL; /* do not output individual characters */ + + if ( !token_format_affix ) { + + token_format_affix = "\\tokenset{%d}{%d}"; + + } + + if ( !token_format_suffix ) { + + token_format_suffix = "%% %s\n"; + + } + + + if ( !bootstrap_token_format ) { + + bootstrap_token_format = "\\expandafter\\def\\csname token\\parsernamespace %s\\endcsname{%d}%% %s\n"; + + } + + /* |output_desc.output_tokens = 1;| is no longer necessary as it is done entirely in \TeX */ + +@*2 Command line options. +We start with the most obvious option, the one begging for help. + +@= + LONG_HELP,@[@] + +@ @= + _register_option("help", no_argument, 0, LONG_HELP, "")@; + +@ @= + "h" + +@ @= + case 'h': /* short help */@; + fprintf(stderr, "Usage: %s [options] output_file\n", argv[0]); + exit(0); + break; /* should not be needed */ + + case LONG_HELP:@; + fprintf(stderr, "%s [--mode=TeX:options] output_file outputs tables\n" + " and constants for a TeX parser\n", argv[0]); + exit(0); + break; /* should not be needed */ + +@ @= + _register_option("debug", optional_argument, 0, 'b', "")@; + _register_option("mode", required_argument, 0, 'm', "")@; + _register_option("table-separator", required_argument, 0, 'z', "")@; + + _register_option("format", required_argument, 0, 'f', "")@; /* name? */ + _register_option("table", required_argument, 0, 't', "")@; /* specific table */ + _register_option("constant", required_argument, 0, 'c', "")@; /* specific constant */ + _register_option("name-length", required_argument, 0, 'l', "")@; /* change |MAX_NAME_LENGTH| */ + _register_option("token", required_argument, 0, 'n', "")@; /* specific token */ + _register_option("run-parse", required_argument, 0, 'p', "")@; /* run the parser */ + _register_option("parse-file", required_argument, 0, 'i', "")@; /* input for the parser */ + +@ The string below is a list of short options. + +@= + "z:m:f:t:" + +@ A few options can be discussed immediately. + +@= + char *table_separator = "%s "; + +@ @= + case 'm': /* output mode */@; + switch( optarg[0] ) { + + case 'T': + case 't':@; + mode = TEX_OUT; + break; + + case 'b': + case 'B': + case 'g': + case 'G':@; + mode = GENERIC_OUT; + break; + + default:@; + break; + + } + break; + + case 'z': + table_separator = (char *)malloc( (strlen(optarg) + 1)*sizeof(char) ); + strcpy(table_separator, optarg); + break; diff --git a/support/splint/cweb/common.w b/support/splint/cweb/common.w new file mode 100644 index 0000000000..3ab5af93b4 --- /dev/null +++ b/support/splint/cweb/common.w @@ -0,0 +1,788 @@ +@q Copyright 2012-2014, Alexander Shibakov@> +@q This file is part of SPLinT@> + +@q SPLinT is free software: you can redistribute it and/or modify@> +@q it under the terms of the GNU General Public License as published by@> +@q the Free Software Foundation, either version 3 of the License, or@> +@q (at your option) any later version.@> + +@q SPLinT is distributed in the hope that it will be useful,@> +@q but WITHOUT ANY WARRANTY; without even the implied warranty of@> +@q MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the@> +@q GNU General Public License for more details.@> + +@q You should have received a copy of the GNU General Public License@> +@q along with SPLinT. If not, see .@> + +@** Forcing \bison\ and \flex\ to output \TeX. +Instead of implementing a \bison\ (or \flex) `plugin' for outputting +\TeX\ parser, the code that follows produces a separate executable +that outputs all the required tables after the inclusion of an +ordinary \Cee\ parser produced by \bison\ (or a scanner produced by +\flex). The actions in both \bison\ parser and \flex\ scanner are +assumed to be merely |printf()| statements that output the `real' +\TeX\ actions. The code below simply cycles through all such actions to +output an `action switch' appropriate for use with \TeX. In every +other respect, the included parser or scanner can use any features +allowed in `real' parsers and scanners. + +@*1 Common routines. +The `top' level of the scanner and parser `drivers' is very similar, +and is therefore separated into a few sections that are common to both +drivers. The layout is fairly typical and follows a standard +`initialize-input-process-output-clean up' scheme. The logic behind each +section of the program will be explained in detail below. + +The section below is called |@<\Cee\ postamble@>| because the output of +the tables can happen only after the \bison\ (or \flex) generated +\.{.c} file is included and all the data structures are known. + +The actual `assembly' of each driver has to be done separately due to +some `singularities' of the \CWEB\ system and the design of this +software. All the essential routines are presented in the sections +below, though. +@<\Cee\ postamble@>= + +@; +@@; +@@; +@@; + +int main( int argc, char **argv ) { + + @@; + @@; + @@; + @@; + + switch( mode ) { + + @@; + + default: + break; + + } + + if ( tables_out ) { + + @@; + @@; + + } else { + + fprintf( stderr, "No output, exiting\n" ); + exit(0); + + } + + @@; + + return 0; + +} + +@ Not all the code can be supplied at this stage (most of the routines +here are at the `top' level so the specifics have to be `filled-in' by +each driver), so many of the sections +above are placeholders for the code provided by a specific +driver. However, we still need to supply a trivial definition here to +placate \CWEAVE\ whenever this portion of the code is used isolated in +documentation. +@= + +@ Standard library declarations for memory management routines, some +syntactic sugar, command line processing, and variadic functions are +all that is needed. +@= +#include +#include +#include +#include +#include + +@ This code snippet is a payment for some poor (in my view) philosophy +on the part of the \bison\ and \flex\ developers. There used to be an +option in \bison\ to output just the tables and the action code but it +had never worked correctly and it was simply dropped in the latest +version. Instead, one can only get access to \bison's goodies as part +of a tangled mess of |@[#define@]|'s and error processing code. Had the +tables and the parser function itself been considered separate, well +isolated sections of \bison's output, there would simply be no reason +for dirty tricks like the one below, one would be able to write custom +error processing functions, unicorns would roam the Earth and pixies +would hand open sourced tablets to everyone. At a minimum, it would +have been a much cleaner, modular approach. + +As of version~\.{3.0} of \bison\ some critical arrays, namely, +|yyprhs| and |yyrhs| are no longer generated (even internally) which +significantly reduces \bison's useability as a parser generator. As an +example, the |yyrthree| array, which is necessary for processing +`inline' actions is computed in \.{bs.w} using the two arrays +mentioned in the previous sentence. There does not seem to be any +other way to access this information. A number of tools (GNU and +otherwise) have taken the path of narrowing the field of application +to a few use cases invisioned by the maintainers. This includes +compilers, as well. + +There is a strange +reluctance on the part of the \gcc\ team to output any intermediate +code other than the results of preprocessing and assembly. I have seen +an argument that involves some sort of appeal to making the code +difficult to close source but the logic of it escaped me completely +(well, there {\it is\/} logic to it, however choosing poor design in +order to punish a few bad players seems like a rather inferior option). + +Ideally, there should be no such thing as a parser generator, or a +compiler, for that matter: all of these are just basic table driven +rewriting routines. Tables are hard but table driven code should not +be. If one had access to the tables themselves, and some canonical +examples of code driven by such tables, like |yyparse()| and +|yylex()|, the flexibility of these tools would improve +tremendously. Barring that, this is what we have to do {\it now}. + +There are several ways to gain write access to the data declared |const| +in \Cee, like passing its address to a function with no prototype. All +these methods have one drawback: the loopholes that make them possible +have been steadily getting on the chopping block of the \Cee\ +standards committee. Indeed, |const| data should be constant. Even if +one succeeds in getting access, there is no reason to believe that the +data is not allocated in a write-only region of the memory. The +cleanest way to get write access then is to eliminate |const| +altogether. The code should have the same semantics after that, and +the trick is only marginally bad. + +The last two definitions are less innocent (and, at least the +second one, are prohibited by the ISO standard (clause 6.10.8(2), +see~\cite[ISO/C11])) but +\gcc\ does not seem to mind, and it gets rid of warnings about +dropping a |const| qualifier whenever an |assert| is +encountered. Since the macro is not recursively expanded, this will +only work if $\ldots$|FUNCTION__| is treated as a pseudo-variable, as +it is in \gcc, not a macro. + +@d const +@d __PRETTY_FUNCTION__ @[(char *)__PRETTY_FUNCTION__@] +@d __FUNCTION__ @[(char *)__FUNCTION__@] + +@ The output file has to be known to both parts of the code, so it is +declared at the very beginning of the program. We also add some +syntactic sugar for loops. +@q The line below is simply irresistible, one should put it on a T-shirt@> +@s FOREVER TeX + +@d FOREVER for(;;) + +@= +#include + FILE *tables_out; + +@ The clean-up portion of the code can be left empty, as all it does +is close the output file, which can be left to the operating system but +we take care of it ourselves to keep out code `clean'\footnote{In case +the reader has not noticed yet, this is a weak attempt at humor to +break the monotony of going through the lines of \CTANGLE'd code}. +@= + fclose( tables_out ); + +@ There is a descriptor controlling the output of the program as +a whole. The code below is an example of a literate programming +technique that will be used repeatedly to maintain large structures +that can grow during the course of the program design. Note that the +name of each table is only mentioned once, the rest of the code is +generic. + +Technically speaking, all of this can be done with \Cee\ preprocessor +macros of moderate complexity, taking advantage of its expansion rules +but it is not nearly as transparent as the \CWEB\ approach. +@= + struct output_d { + + @@; + + }; + + struct output_d output_desc = { @ }; + +@ To declare each table field in the global output descriptor, all one has to do +is to provide a general pattern. +@= +#define _register_table_d(name) @[bool output_##name:1;@] + @
@; +#undef _register_table_d + +@ Same for assigning default values to each field. +@= +#define _register_table_d(name) @[.output_##name = 0,@] /* do not output any tables by default */ + @
@; +#undef _register_table_d + +@ Each descriptor is populated using the same approach. +@= +#define _register_table_d(name) @[struct table_d name##_desc = {0};@] + @
@; +#undef _register_table_d + +@ The flag \.{--optimize-tables} affects the way tables are output. +@= + static int optimize_tables = 0; + +@ It is set using the command line option below. +@= + _register_option("optimize-tables", no_argument, &optimize_tables, 1, "")@; + +@ The reason to implement the table output routine as a macro is to avoid +writing separate functions for tables of different types of data +(stings as well as integers). The output is controlled by each table's +{\it descriptor\/} defined below. A more sophisticated approach is +possible but this code is merely a `patch' so we are not after full +generality\footnote{A somewhat cleaner way to achieve the same effect +is to use the \.{\_Generic} facility of \Cee11.}. + +@d output_table(table_desc, table_name, stream) + if ( output_desc.output_##table_name ) { + + int i, j = 0; + + if ( optimize_tables ) { + + fprintf( stream, "\\setoptopt{%s}%%\n", table_desc.name ); + + if ( !table_desc.optimized_numeric ) { + + fprintf( stream, "\\beginoptimizednonnumeric{%s}%%\n", table_desc.name ); + + } + + for( i = 0; i < sizeof(table_name)/sizeof(table_name[0]) - 1; i++) { + + if ( table_desc.formatter ) { + + table_desc.formatter( stream, i ); + + } else { + + fprintf( stream, table_desc.optimized_numeric, table_desc.name, i, table_name[i] ); + + } + + } + + if ( table_desc.formatter ) { + + table_desc.formatter( stream, -i ); + + } else { + + fprintf( stream, table_desc.optimized_numeric, table_desc.name, i, table_name[i] ); + + } + + if ( table_desc.cleanup ) { + + table_desc.cleanup( &table_desc ); + + } + + } else { + + fprintf( stream, table_desc.preamble, table_desc.name ); + + for( i = 0; i < sizeof(table_name)/sizeof(table_name[0]) - 1; i++) { + + if ( table_desc.formatter ) { + + j += table_desc.formatter( stream, i ); + + } else { + + if ( table_name[i] ) { + + j += fprintf( stream, table_desc.separator, table_name[i] ); + + } else { + + j += fprintf( stream, "%s", table_desc.null ); + + } + + } + + if ( j > MAX_PRETTY_LINE && table_desc.prettify ) { + + fprintf( stream, "\n" ); + j = 0; + + } + } + + if ( table_desc.formatter ) { + + table_desc.formatter( stream, -i ); + + } else { + + if ( table_name[i] ) { + + fprintf( stream, table_desc.postamble, table_name[i] ); + + } else { + + fprintf( stream, "%s", table_desc.null_postamble ); + + } + + } + + if ( table_desc.cleanup ) { + + table_desc.cleanup( &table_desc ); + + } + } + } + +@= + struct table_d { + + @@; + + }; + +@ @= + char *name; + char *preamble; + char *separator; + char *postamble; + char *null_postamble; + char *null; + char *optimized_numeric; + bool prettify; + int (*formatter)( FILE *, int ); + void (*cleanup)( struct table_d * ); + +@ Tables are output first. The action output code must come last since +it changes the values of the tables to achieve its goals. Again, a +different approach is possible, that saves the data first but +simplicity was deemed more important than total generality at this +point. +@= + @@; + +@ One more application of `gather the names first then process' technique. +@= +#define _register_table_d(name) @[output_table(name##_desc, name, tables_out);@] + @
@; +#undef _register_table_d + +@ Tables will be output by each driver. Placeholder here, for +\CWEAVE's piece of mind. +@
= + +@ Action output invokes a totally new level of dirty code. If tables, +constants, and tokens are just data structures, actions are executable +commands. We can only hope to cycle through all the actions, which is +enough to successfully use \bison\ and \flex\ to generate \TeX. The +|switch| statement containing the actions is embedded in the parser +function so to get access to each action one has to coerce |yyparse()| to +jump to each case. Here is where we need the table manipulation. The +appropriate code is highly specific to the program used (since \bison\ +and \flex\ parsing and scanning functions had to be `reverse +engineered' to make them do what we want), +so at this point we simply declare the options controlling the level +of detail and the type of actions output. +@= + static int bare_actions = 0; /* (|static| for local variables) and |int| to pacify the compiler + (for a constant initializer and compatible type) */ + static int optimize_actions = 0; + +@ The first of the following options allows one to output an action switch without the +actions themselves. It is useful when one needs to output a \TeX\ +parser for a grammar file that is written in \Cee. In this case it +will be impossible to cycle through actions (as no setup code has been +executed), so the parser invocation is omitted. + +The second option splits the action switch into several macros to speed up +the processing of the action code. + +The last argument of the `flexible' macro below is supposed to be an +extended description of each option which can be later utilized by a +|usage()| function. +@= + _register_option("bare-actions", no_argument, &bare_actions, 1, "")@; + _register_option("optimize-actions", no_argument, &optimize_actions, 1, "")@; + +@ The rest of the action output code mimics that for table output, starting with +the descriptor. To make the output format more flexible, this +descriptor should probably be turned into a specialized routine. +@= + struct action_d { + + char *preamble; + char *act_setup; + char *act_suffix; + char *action1; + char *actionn; + char *postamble; + void (*print_rule)( int ); + void (*cleanup)( struct action_d * ); + + }; + +@ @= + bool output_actions:1; + +@ Nothing is output by default, including actions. +@= + @[@].output_actions = 0,@[@]@; + +@ @= + struct action_d action_desc = {0}; + +@ Each function below outputs the \TeX\ code of the appropriate +action when the action is `run' by the action output switch. +The main concern in designing these functions is to make the +code easier to look at. Further explanation is given in the grammar +file. If the parser is doing its job, this is the only place where one +would actually see these as functions (or, rather, macros). + +In compliance with paragraph 6.10.8(2)\footnote{[$\ldots$] {\it Any +other predefined macro names shall begin with a leading underscore +followed by an uppercase letter or a second underscore.}} of the \ISO\ +\Cee11 standard the names of these macros do not start with an +underscore, since the first letter of \.{TeX} is +uppercase\footnote{One might wonder why one of these functions is +defined as a \CWEB\ macro while the other is put into the preamble `by +hand'. It really makes no difference, however, the reason the second +macro is defined explicitly is \CWEB's lack of awareness of `variadic' +macros which produces undesirable typesetting artefacts.}. +\def\TeXx{\hbox{\.{TeX\_}}} +\def\TeXa{\hbox{\.{TeXa}}} +\def\TeXb{\hbox{\.{TeXb}}} +\def\TeXf{\hbox{\.{TeXf}}} +\def\TeXfo{\hbox{\.{TeXfo}}} +\def\TeXao{\hbox{\.{TeXao}}} +\def\TeXxx{\hbox{\.{TeX\_\_}}} +@s TeX__ TeX +@d TeX_( string ) fprintf( tables_out, " %s%%\n", string ) +@d TeXb( string ) TeX_( string ) +@d TeXa( string ) TeX_( string ) +@d TeXf( string ) TeX_( string ) +@d TeXfo( string ) TeX_( string ) +@d TeXao( string ) TeX_( string ) + +@q \CWEB\ is not aware of variadic macros, so this has to be done the old way@> +@<\Cee\ preamble@>= +#define TeX__( string, ... ) @[fprintf( tables_out, " " string "%s\n", __VA_ARGS__, "%" )@] + +@ We begin with a few macros to facilitate the output +of tables in the format that \TeX\ can understand. As there is no +perfect way to represent an array in \TeX\ a rather weak compromise +was settled upon. Further explanation of this choice is given in the \TeX\ +file that implements the \TeX\ parser for the \bison\ input grammar. + +@d tex_table_generic(table_name) + table_name##_desc.preamble = "\\newtable{%s}{%%\n"; + table_name##_desc.separator = "%d\\or "; + table_name##_desc.postamble = "%d}%%\n"; + table_name##_desc.null_postamble = "0}%%\n"; + table_name##_desc.null = "0\\or "; + table_name##_desc.optimized_numeric = "\\expandafter\\def\\csname %s\\parsernamespace %d\\endcsname{%d}%%\n"; + table_name##_desc.prettify = true; + table_name##_desc.formatter = NULL; + table_name##_desc.cleanup = NULL; + output_desc.output_##table_name = 1; + +@d tex_table(table_name) + tex_table_generic(table_name); + table_name##_desc.name = #table_name; + +@ An approach paralleling the table output +scheme is taken with constants. Since constants are \Cee\ {\it +macros\/} one has to be careful to avoid the temptation of using +constant {\it names\/} directly as names for fields in +structures. They will simply be replaced by the constants' +values. When the names are concatenated with other tokens, however, +the \Cee\ preprocessor postpones the macro expansion until the +concatenation is complete (see clauses 6.10.3.1, 6.10.3.2, and +6.10.3.3 of the \ISO\ \Cee\ Standard, \cite[ISO/C11]). Unless the result of the +concatenation is still expandable, the expansion will halt. +@= + struct const_d { + + char *format; + char *name; + + }; + +@ @= +#define _register_const_d(c_name) @[struct const_d c_name##_desc;@] + @@; +#undef _register_const_d + +@ @= +#define _register_const_d(c_name) @[bool output_##c_name:1;@] + @@; +#undef _register_const_d + +@ @= +#define _register_const_d(c_name) @[@[@].output_##c_name = 0,@[@]@] + @@; +#undef _register_const_d + +@ @= + fprintf( tables_out, "%%\n%% constant definitions\n%%\n" ); + @@; + +@ @= +{ + + int any_constants = 0; +#define _register_const_d(c_name) \ + \ + if ( output_desc.output_##c_name ) { \ + const_out( tables_out, c_name##_desc, c_name)@; \ + any_constants = 1; \ + } + + @@; + +#undef _register_const_d + + if ( any_constants ); /* this is merely a placeholder statement */ + +} + +@ Constants are very driver specific, so to make \CWEAVE\ happy $\ldots$ + @= + +@ A macro to help with constant output. +@d const_out(stream, c_desc, c_name) fprintf(stream, c_desc.format, c_desc.name, c_name); + +@ Action switch output routines modify the automata tables and +therefore have to be output last. Since action output is highly +automaton specific, we leave this section blank here, to pacify +\CWEAVE\ in case this file is typeset by itself. +@= + +@*2 Error codes. + +@= + enum err_codes{ @@, LAST_ERROR }; + +@ @= + NO_MEMORY, BAD_STRING, BAD_MIX_FORMAT,@[@] + +@ A lot more care is necessary to output the token table. A number of precautions are taken +to ensure that a maximum possible range of names can be passed safely to \TeX. This involves some +manipulation of \.{\\catcode}'s and control characters. The +complicated part is left to \TeX\ so the output code can be kept +simple. The helper function below is used to `combine' two strings. + +@d MAX_PRETTY_LINE 100 + +@= + char *mix_string( char *format, ... ); + +@ @= + char *mix_string( char *format, ... ) { + + char *buffer; + size_t size = 0; + int length = 0; + int written = 0; + char *formatp = format; + va_list ap, ap_save; + + va_start( ap, format ); + va_copy( ap_save, ap ); + + size = strnlen( format, MAX_PRETTY_LINE * 5 ); + + if ( size >= MAX_PRETTY_LINE * 5 ) { + + fprintf( stderr, "%s: runaway string?\n", __func__ ); + exit(BAD_STRING); + + } + + while ( (formatp = strstr( formatp, "%" )) ) { + + switch( formatp[1] ) { + + case 's':@; + length = strnlen( va_arg( ap, char * ), MAX_PRETTY_LINE * 5 ); + + if ( length >= MAX_PRETTY_LINE * 5 ) { + + fprintf( stderr, "%s: runaway string?\n", __func__ ); + exit(BAD_STRING); + + } + + size += length; + size -=2; + formatp++; + break; + + case '%':@; + size--; + formatp +=2; + + default: + + printf( "%s: cannot handle %%%c in mix string format\n", __func__, formatp[1] ); + exit( BAD_MIX_FORMAT ); + + } + + } + + buffer = (char *)malloc( sizeof(char) * size + 1 ); + + if ( buffer ) { + + written = vsnprintf( buffer, size + 1, format, ap_save ); + + if ( written < 0 || written > size ) { + + fprintf( stderr, "%s: runaway string?\n", __func__ ); + exit(BAD_STRING); + + } + + } else { + + fprintf( stderr, "%s: failed to allocate memory for the output string\n", __func__ ); + exit(NO_MEMORY); + + } + + va_end( ap ); + va_end( ap_save ); + + return buffer; + + } + +@*2Initial setup. Depending on the output mode (right now only \TeX\ +and `tokens only' (in the \bison\ `driver') are supported) the format of each table, action +field and token has to be set up. + +@= + enum output_mode {@@, LAST_OUT}; + +@ And to calm down \CWEAVE\ $\ldots$ +@= + +@ \TeX\ is the main output mode. +@= + enum output_mode mode = TEX_OUT; + +@*2Command line processing. This program uses a standard way of parsing the command +line, based on |getopt_long|. At the heart of the setup are the array below with a +couple of supporting variables. + +@= +#include +#include +#include + +@ @= + const char *usage = "%s [options] output_file\n"; + +@ @= + int c, option_index = 0;@# + + enum higher_options{NON_OPTION = 0xFF, @@, LAST_HIGHER_OPTION}; + + static struct option long_options[] = { @/@[ + @@;@/ + {0, 0, 0, 0} @] + };@# + +@ The main loop of the command line option processing follows. This +can be used as a template for setting up the option processing. The +specific cases are added to in the course of adding new features. + +@= + opterr = 0; /* we do our own error reporting */ + + FOREVER { + + c = getopt_long (argc, argv, ":" @, long_options, &option_index); + + if (c == -1) break; + + switch (c) { + + case 0:@; /* it is a flag, the name is kept in |long_options[option_index].name|, + and the value can be found in |long_options[option_index].val| */ + break; + + @t}\4{@>@; + @t}\4{@>@; + + case '?':@; + fprintf (stderr, "Unknown option: `%s', see `Usage' below\n\n", argv[optind - 1]); + fprintf(stderr, usage, argv[0]); + exit(1); + break; + + case ':':@; + fprintf (stderr, "Missing argument for `%s'\n\n", argv[optind - 1]); + fprintf(stderr, usage, argv[0]); + exit(1); + break; + + default:@; + printf ("warning: feature `%c' is not yet implemented\n", c); + } + + } + + if (optind >= argc) + { + + fprintf( stderr, "No output file specified!\n" ); + + } else { + + tables_out = fopen( argv[optind++], "w" ); + + } + + if (optind < argc) + { + + printf ("script files to be loaded: "); + while (optind < argc) + printf ("%s ", argv[optind++]); + putchar ('\n'); + + } + +@ @= +#define _register_option(name, arg_flag, loc, val, exp) @[{name, arg_flag, loc, val},@[@]@] + @@; +#undef _register_option + +@ In addition to spelling out the full command line option name (such +as \.{--help}) |getopt_long| gives the user a choice of using a +shortcut (say, \.{-h}). As individual options are treated in drivers +themselves, there are no shortcuts to supply at this point. We leave +this section (and a number of others) empty to be filled in with the +driver specific code to pacify \CWEAVE. + +@= + +@ Some options have one-letter `shortcuts', whereas others only exist +in `fully spelled-out' form. To easily keep track of the latter, a +special enumerated list is declared. To add to this list, simply add +to the \CWEB\ section below. +@= + +@ @= + +@ @= + diff --git a/support/splint/cweb/fk.w b/support/splint/cweb/fk.w new file mode 100644 index 0000000000..04685ff9aa --- /dev/null +++ b/support/splint/cweb/fk.w @@ -0,0 +1,510 @@ +@q Copyright 2012-2014, Alexander Shibakov@> +@q This file is part of SPLinT@> + +@q SPLinT is free software: you can redistribute it and/or modify@> +@q it under the terms of the GNU General Public License as published by@> +@q the Free Software Foundation, either version 3 of the License, or@> +@q (at your option) any later version.@> + +@q SPLinT is distributed in the hope that it will be useful,@> +@q but WITHOUT ANY WARRANTY; without even the implied warranty of@> +@q MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the@> +@q GNU General Public License for more details.@> + +@q You should have received a copy of the GNU General Public License@> +@q along with SPLinT. If not, see .@> + +@*1 \flex\ specific routines. The output of the scanner automaton +follows the steps similar to the ones taken during the parser output. +The major difference is in the output of actions and constants. +@*2 Tables. +As in the case of a parser we start with all the table names. +@= + _register_table_d(yy_accept)@; + _register_table_d(yy_ec)@; + _register_table_d(yy_meta)@; + _register_table_d(yy_base)@; + _register_table_d(yy_def)@; + _register_table_d(yy_nxt)@; + _register_table_d(yy_chk)@; + +@*2Actions. The scanner function, |yylex()|, has been reverse +engineered to execute all portions of +the action code. The method chosen here makes sure that none of the +tables gets written past its last element. +@= + int max_yybase_entry = 0; + int max_yyaccept_entry = 0; + int max_yynxt_entry = 0; + int max_yy_ec_entry = 0; + +@ The `exotic' scanner constants treated below are the constants used +to control the scanner code itself. Unfortunately they are not given +any names that can be used by the `driver' to output them in a simple +way. +@= + + { + int i; + + for ( i = 0; i < sizeof( yy_base )/sizeof( yy_base[0] ); i++ ) { + + if ( yy_base[i] > max_yybase_entry ) { + + max_yybase_entry = yy_base[i]; + + } + + } + + for ( i = 0; i < sizeof( yy_nxt )/sizeof( yy_nxt[0] ); i++ ) { + + if ( yy_nxt[i] > max_yynxt_entry ) { + + max_yynxt_entry = yy_nxt[i]; + + } + + } + + for ( i = 0; i < sizeof( yy_accept )/sizeof( yy_accept[0] ); i++ ) { + + if ( yy_accept[i] > max_yyaccept_entry ) { + + max_yyaccept_entry = yy_accept[i]; + + } + } + + for ( i = 0; i < sizeof( yy_ec )/sizeof( yy_ec[0] ); i++ ) { + + if ( yy_ec[i] > max_yy_ec_entry ) { + + max_yy_ec_entry = yy_ec[i]; + + } + + } + + } + +@ @= + if ( output_desc.output_actions ) { + + int i, j; + yyscan_t fake_scanner; + + fprintf( tables_out, "%s", action_desc.preamble ); + + if ( !bare_actions ) { + + if ( yylex_init( &fake_scanner ) ) { + + printf( "Cannot initialize the scanner\n" ); + + } + + yy_ec[0] = 0; + yy_base[1] = max_yybase_entry; + yy_chk[max_yybase_entry] = 1; + yy_nxt[max_yybase_entry] = 1; + + } + + for ( i = 1; i <= max_yyaccept_entry; i++ ) { + + fprintf( tables_out, action_desc.act_setup, i ); + + if ( i == YY_END_OF_BUFFER ) { + + fprintf( tables_out, " %% YY_END_OF_BUFFER\n%s\n", " \\yylexeofaction" ); + + } else { + + fprintf( tables_out, "\n" ); + + if ( !bare_actions ) { + + (( struct yyguts_t *)fake_scanner)->yy_hold_char = 0; + yy_accept[1] = i; + yylex( NULL, fake_scanner ); + + } + } + + fprintf( tables_out, action_desc.act_suffix, i ); + + } + + fprintf( tables_out, " %% end of file states:\n%s\n", + " %#define YY_STATE_EOF(state) (YY_END_OF_BUFFER + state + 1)" + ); + + if ( max_eof_state == 0 ) { /* in case the user has not declared any states */ + + max_eof_state = YY_STATE_EOF( INITIAL ); + + } + + for ( ; i <= max_eof_state; i++ ) { + + fprintf( tables_out, action_desc.act_setup, i ); + + if ( !bare_actions ) { + + fprintf( tables_out, "\n" ); + + (( struct yyguts_t *)fake_scanner)->yy_hold_char = 0; + yy_accept[1] = i; + yylex( NULL, fake_scanner ); + + } + + fprintf( tables_out, action_desc.act_suffix, i ); + + } + + fprintf( tables_out, "%s", action_desc.postamble ); + + if ( action_desc.cleanup ) { + + action_desc.cleanup( &action_desc ); + + } + + } + + @@; + @; + fprintf( tables_out, "\\constset{YYECMAGIC}{%d}%%\n", yy_ec_magic ); + fprintf( tables_out, "\\constset{YYMAXEOFSTATE}{%d}%%\n", max_eof_state ); + +@ @= + BAD_SCANNER,@[@] + +@ @= + int yy_ec_magic; + +@ The `magic' constants are similar to the `exotic' ones mentioned +above except the methods used to compute them rely on reverse +engineering the scanner function. Since this changes the scanner +tables it has to be done after the `driver' has finished going through all +the actions. +@= + { + int i, j; + char fake_yytext[ YY_MORE_ADJ + 1 ]; + + yyscan_t yyscanner; + struct yyguts_t *yyg; + + if ( yylex_init( &yyscanner ) ) { + + printf( "Cannot initialize the scanner\n" ); + exit( BAD_SCANNER ); + + } + + yyg = (struct yyguts_t *)yyscanner; + yyg->yy_start = 0; + yy_set_bol(0); + yyg->yytext_ptr = fake_yytext; + yyg->yy_c_buf_p = yyg->yytext_ptr + 1 + YY_MORE_ADJ; + + fake_yytext[YY_MORE_ADJ] = 0; /* |*yy_cp = 0;| */ + + yy_accept[0] = 0; + yy_base[0] = 0; + + for ( i = 0; i < sizeof( yy_chk )/sizeof( yy_chk[0] ); i++ ) { + + yy_chk[i] = 0; + + } + + for ( i = 0; i < sizeof( yy_nxt )/sizeof( yy_nxt[0] ); i++ ) { + + yy_nxt[i] = i; + + } + + yy_ec_magic = yy_get_previous_state( yyscanner ); + + } + +@*2State names. There is no easy way to output the symbolic names for +states, so this has to be done by hand while the actions are output. The +state names are accumulated in a list structure and are printed out +after the action output is complete. + +Note that parsing the scanner file is only partially helpful (even though the +extended parser and scanner can recognize the \.{\%x} option). All that can +be done is output the state {\it names\/} but not their numerical +values, since all such names are macros whose values are only +known to the \flex\ generated scanner. + +@d Define_State( st_name, st_num ) do { + + struct lexer_state_d *this_state; + + this_state = malloc( sizeof(struct lexer_state_d) ); + this_state->name = st_name; + this_state->value = st_num; + this_state->next = NULL; + + if ( last_state ) { + + last_state->next = this_state; + last_state = this_state; + + } else { + + last_state = state_list = this_state; + + } + + if ( YY_STATE_EOF( st_num ) > max_eof_state ) { + + max_eof_state = YY_STATE_EOF( st_num ); + + } + +} while (0); + +@= + int max_eof_state = 0; + + struct lexer_state_d { + + char *name; + int value; + struct lexer_state_d *next; + + }; + + struct lexer_state_d *state_list = NULL; + struct lexer_state_d *last_state = NULL; + +@ @= + { + + struct lexer_state_d *current_state; + struct lexer_state_d *next_state; + + current_state = next_state = state_list; + + if ( current_state ) { + + fprintf( tables_out, "\\def\\setflexstates{%%\n" + " \\stateset{INITIAL}{%d}%%\n", INITIAL ); + + while ( current_state ) { + + fprintf( tables_out, " \\stateset{%s}{%d}%%\n", + current_state->name, current_state->value); + + current_state = current_state->next; + + free( next_state ); + next_state = current_state; /* the |name| field is not + deallocated because it is not + allocated on the heap */ + + } + + fprintf( tables_out, "}%%\n%%\n" ); + + } + + } + +@*2Constants. +@= + _register_const_d(YY_END_OF_BUFFER_CHAR)@; + _register_const_d(YY_NUM_RULES)@; + _register_const_d(YY_END_OF_BUFFER)@; + +@*2Output modes. +The output modes are the same as those in the parser driver with some minor +changes. + +@*3Generic output. Generic output is not programmed yet. +@= + GENERIC_OUT,@[@] + +@ @= + case GENERIC_OUT:@; + printf( "This mode is not supported yet\n" ); + exit(0); + break; + +@*3\TeX~mode. The \TeX\ mode is the main focus of this software. +@= + TEX_OUT,@[@] + +@ @= + case TEX_OUT:@; + @@; + @@; + @@; + break; + +@ @= + tex_table_generic(yy_accept); + yy_accept_desc.name = "yyaccept"; + tex_table_generic(yy_ec); + yy_ec_desc.name = "yyec"; + tex_table_generic(yy_meta); + yy_meta_desc.name = "yymeta"; + tex_table_generic(yy_base); + yy_base_desc.name = "yybase"; + tex_table_generic(yy_def); + yy_def_desc.name = "yydef"; + tex_table_generic(yy_nxt); + yy_nxt_desc.name = "yynxt"; + tex_table_generic(yy_chk); + yy_chk_desc.name = "yychk"; + +@ @= + + if ( optimize_actions ) { + + action_desc.preamble = "%\n% the big switch\n%\n"@/ + "\\catcode`\\/=0\\relax\n%\n"@/ + "\\def\\yydoactionswitch#1{%%\n"@/ + " \\let\\yylextail\\yylexcontinue\n"@/ + " \\csname doflexaction\\number #1\\parsernamespace\\endcsname\n"@/ + " \\yylextail\n"@; + "}\\stashswitch{yydoactionswitch}%\n"; + action_desc.act_setup = "\n\\expandafter\\def\\csname doflexaction%d\\parsernamespace\\endcsname{%%\n" + " \\YYRULESETUP"; + action_desc.act_suffix = "}%% end of rule %d\n"; + action_desc.action1 = NULL; + action_desc.actionn = NULL; + action_desc.postamble = "\\catcode`\\/=12\\relax\n%\n"; + action_desc.print_rule = NULL; + action_desc.cleanup = NULL; + output_desc.output_actions = 1; + + } else { + + action_desc.preamble = "%\n% the big switch\n%\n"@/ + "\\catcode`\\/=0\\relax\n%\n"@/ + "\\def\\yydoactionswitch#1{%%\n \\let\\yylextail\\yylexcontinue\n"@; + " \\ifcase#1\\relax\n"; + action_desc.act_setup = " \\or\n" + " \\YYRULESETUP %% (rule %d) "; + action_desc.act_suffix = " %% end of rule %d\n"; + action_desc.action1 = NULL; + action_desc.actionn = NULL; + action_desc.postamble = " \\else\n \\fi\n \\yylextail\n}\\stashswitch{yydoactionswitch}%\n\\catcode`\\/=12\\relax\n%\n"; + action_desc.print_rule = NULL; + action_desc.cleanup = NULL; + output_desc.output_actions = 1; + + } + +@ \TeX\ constant output is another place where the techniques described above are applied. A few names +are handled separately, because they contain underscores. +\def\YYxENDxOFxBUFFERxCHARxdesc{\.{YY\_END\_OF\_BUFFER\_CHAR\_}\\{desc}} +\def\YYxNUMxRULESxdesc{\.{YY\_NUM\_RULES\_}\\{desc}} +\def\YYxENDxOFxBUFFERxdesc{\.{YY\_END\_OF\_BUFFER\_}\\{desc}} + +@s YY_END_OF_BUFFER_CHAR_desc TeX +@s YY_NUM_RULES_desc TeX +@s YY_END_OF_BUFFER_desc TeX + +@= +#define _register_const_d(c_name) @[c_name##_desc.format = "\\constset{%s}{%d}%%\n"; \ + c_name##_desc.name = #c_name; \ + output_desc.output_##c_name = 1;@] + @@; +#undef _register_const_d + + YY_END_OF_BUFFER_CHAR_desc.name = "YYENDOFBUFFERCHAR"; + YY_NUM_RULES_desc.name = "YYNUMRULES"; + YY_END_OF_BUFFER_desc.name = "YYENDOFBUFFER"; + +@ @= + fprintf( tables_out, "\\constset{YYMAXREALCHAR}{%ld}%%\n", sizeof( yy_accept )/(sizeof( yy_accept[0] )) - 1 ); + fprintf( tables_out, "\\constset{YYBASEMAXENTRY}{%d}%%\n", max_yybase_entry ); + fprintf( tables_out, "\\constset{YYNXTMAXENTRY}{%d}%%\n", max_yynxt_entry ); + fprintf( tables_out, "\\constset{YYMAXRULENO}{%d}%%\n", max_yyaccept_entry ); + fprintf( tables_out, "\\constset{YYECMAXENTRY}{%d}%%\n", max_yy_ec_entry ); + +@*2 Command line options. +We start with the most obvious option, the one begging for help. + +@= + LONG_HELP,@[@] + +@ @= + _register_option("help", no_argument, 0, LONG_HELP, "")@; + +@ @= + "h" + +@ @= + case 'h': /* short help */@; + fprintf(stderr, "Usage: %s [options] output_file\n", argv[0]); + exit(0); + break; /* should not be needed */ + + case LONG_HELP:@; + fprintf(stderr, "%s [--mode=TeX:options] output_file outputs tables\n" + " and constants for a TeX scanner\n", argv[0]); + exit(0); + break; /* should not be needed */ + +@ @= + _register_option("debug", optional_argument, 0, 'b', "")@; + _register_option("mode", required_argument, 0, 'm', "")@; + _register_option("table-separator", required_argument, 0, 'z', "")@; + + _register_option("format", required_argument, 0, 'f', "")@; /* name? */ + _register_option("table", required_argument, 0, 't', "")@; /* specific table */ + _register_option("constant", required_argument, 0, 'c', "")@; /* specific constant */ + _register_option("name-length", required_argument, 0, 'l', "")@; /* change |MAX_NAME_LENGTH| */ + _register_option("token", required_argument, 0, 'n', "")@; /* specific token */ + _register_option("run-scan", required_argument, 0, 'p', "")@; /* run the scanner */ + _register_option("scan-file", required_argument, 0, 'i', "")@; /* input for the scanner */ + +@ The string below is a list of short options. +@= + "b::z:m:f:t:" + +@ A few options can be immediately discussed. +@= + int debug_level = 0; + char *table_separator = "%s "; + +@ @= + case 'b': /* debug (level) */@; + debug_level = optarg ? atoi(optarg) : 1; + break; + + case 'm': /* output mode */@; + switch( optarg[0] ) { + + case 'T': + case 't':@; + mode = TEX_OUT; + break; + + case 'b': + case 'B': + case 'g': + case 'G':@; + mode = GENERIC_OUT; + break; + + default:@; + break; + + } + break; + + case 'z': + table_separator = (char *)malloc( (strlen(optarg) + 1)*sizeof(char) ); + strcpy(table_separator, optarg); + break; diff --git a/support/splint/cweb/lo.w b/support/splint/cweb/lo.w new file mode 100644 index 0000000000..b28711423c --- /dev/null +++ b/support/splint/cweb/lo.w @@ -0,0 +1,797 @@ +@q Copyright 2012-2014 Alexander Shibakov@> +@q Copyright 2002-2014 Free Software Foundation, Inc.@> +@q This file is part of SPLinT@> + +@q SPLinT is free software: you can redistribute it and/or modify@> +@q it under the terms of the GNU General Public License as published by@> +@q the Free Software Foundation, either version 3 of the License, or@> +@q (at your option) any later version.@> + +@q SPLinT is distributed in the hope that it will be useful,@> +@q but WITHOUT ANY WARRANTY; without even the implied warranty of@> +@q MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the@> +@q GNU General Public License for more details.@> + +@q You should have received a copy of the GNU General Public License@> +@q along with SPLinT. If not, see .@> + +@*1 The scanner for grammar syntax. +\ifx\parsernamespace\UNDEFINED + \input limbo.sty + \input grabstates.sty + \immediate\openout\stlist=lo_states.h +\fi +The fact that \bison\ has a relatively straightforward grammar is +due to the sophistication of its scanner. The primary reason for this +increased complexity is \bison's awareness +of syntax variations in its input files. In addition to the grammar +syntax, the parser has to be able to deal with extended \Cee\ syntax +inside \bison's actions. + +Since the names of the scanner {\it states\/} reside in the common +namespace with other variables, in order to make the \TeX\ version of +the scanner aware of the numerical values of the states, a special +procedure is required. It is executed as part of \flex's user +initialization code but the data for it has to be collected +separately. The procedure is declared in the preamble section of the scanner. + +Below, we follow the same convention (of italicizing the original +comments) as in the code for the parser. +@(lo.ll@>= +@G + @> @ @= +%{@> @ @=%} + @> @ @= +%% + @> @ @= +%% +@O +void define_all_states( void ) { + @@; +} +@o +@g + +@ It is convenient to abbreviate some commonly used subexpressions. +@= + @@; +@G +letter [.abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_] +notletter [^.abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ_]{-}[%\{] +id {letter}({letter}|[-0-9])* +int [0-9]+ +@g + +@ {\it Zero or more instances of backslash-newline. Following \gcc, allow +white space between the backslash and the newline}. +@= +@G +splice (\\[ \f\t\v]*\n)* +@g + +@ {\it An equal sign, with optional leading whitespaces. This is used in some +deprecated constructs}. +@= +@G +eqopt ([[:space:]]*=)? +@g + +@ This is how the code for state value output is put inside the +routine mentioned above. The state information is collected by a +special small scanner that is coupled with the bootstrap parser. This +way, all the necessary token information comes `hardwired' in the +bootstrap parser, and the small scanner itself does not use any state +manipulation and thus can get away without any state setup. It can, +however, scan just enough of the \flex\ syntax to extract the state +information from it (only the state {\it names\/} are needed) and +output it in the form of a header file for the `real' lexer output +`driver' to use. +@= +#define _register_name( name ) @[Define_State( #name, name )@] +#include "lo_states.h" +#undef _register_name + +@ {\it A \Cee-like comment in directives/rules}. +@= +@G +%x SC_YACC_COMMENT +@g + +@ {\it Strings and characters in directives/rules}. +@= +@G +%x SC_ESCAPED_STRING SC_ESCAPED_CHARACTER +@g + +@ {\it A identifier was just read in directives/rules. Special state +to capture the sequence `\.{identifier:}'}. +@= +@G +%x SC_AFTER_IDENTIFIER +@g + +@ {\it \POSIX\ says that a tag must be both an id and a \Cee\ union member, but +historically almost any character is allowed in a tag. We +disallow \prodstyle{NUL}, as this simplifies our implementation. We match +angle brackets in nested pairs: several languages use them for +generics/template types}. +@= +@G +%x SC_TAG +@g + +@ {\it +\def\aterm{\item{\sqbullet}\ignorespaces}% +\setbox0=\hbox{\sqbullet\enspace}% +\parindent=0pt +\advance\parindent by \wd0 +Four types of user code: +\aterm prologue (code between \.{\%\{} \.{\%\}} in the first section, before \prodstyle{\%\%}); + +\aterm actions, printers, union, etc, (between braced in the middle section); + +\aterm epilogue (everything after the second \prodstyle{\%\%}); + +\aterm predicate (code between \.{\%?\{} and \.{\}} in middle section); +}% +@= +@G +%x SC_PROLOGUE SC_BRACED_CODE SC_EPILOGUE SC_PREDICATE +@g + +@ {\it \Cee\ and \Cee++ comments in code}. +@= +@G +%x SC_COMMENT SC_LINE_COMMENT +@g + +@ {\it Strings and characters in code}. +@= +@G +%x SC_STRING SC_CHARACTER +@g + +@ Bracketed identifiers support. +@= +@G +%x SC_BRACKETED_ID SC_RETURN_BRACKETED_ID +@g + +@ @= + +#include +#include + +@ The code for the generated scanner is highly dependent on the options +supplied. Most of the options below are essential for the scheme +adopted in this package to work. +@= +@G +%option bison-bridge +%option noyywrap nounput noinput reentrant +%option noyy_top_state +%option debug +%option stack +%option outfile="lo.c" +@g + +@*2 Tokenizing with regular expressions. +Here is a full collection of regular expressions employed by the scanner. +@= + @@; + @@; + @@; + @@; + @@; + @@; + @@; + @@; + @@; + @@; + @@; + @@; + @@; + @@; + @@; + @@; + @@; + @@; + @@; + +@ @= +@G + +{ + /* {\it Comments and white space.} */ + "," {@> @[TeX_( "/yycomplain{stray `,' treated as white space}/yylexnext" );@]@=} + [ \f\n\t\v] | + "//".* {@> @[TeX_( "/yylexnext" );@]@=} +@= "/*" {@> @[TeX_( "/contextstate/YYSTART /yyBEGIN{SC_YACC_COMMENT}/yylexnext" );@]@=}@>@/ + /* {\it |@[#line@]| directives are not documented, and may be withdrawn or modified in future versions of \bison.} */ + ^"#line "{int}(" \"".*"\"")?"\n" {@> @[TeX_( "/yylexnext" );@]@=} +} +@g + +@ {\it For directives that are also command line options, the regex must be +\.{"\%..."} after \.{"[-\_]"}'s are removed, and the directive must match the \.{--long} +option name, with a single string argument. Otherwise, add exceptions +to \.{../build-aux/cross-options.pl}}. For most options the scanner +returns a pair of pointers as the value. + +@= +@G + +{ + "%binary" {@> @[TeX_( "/yylexreturnptr{PERCENT_NONASSOC}" );@]@=} + "%code" {@> @[TeX_( "/yylexreturnptr{PERCENT_CODE}" );@]@=} + "%debug" {@> @[@@]@=} + "%default-prec" {@> @[TeX_( "/yylexreturnptr{PERCENT_DEFAULT_PREC}" );@]@=} + "%define" {@> @[TeX_( "/yylexreturnptr{PERCENT_DEFINE}" );@]@=} + "%defines" {@> @[TeX_( "/yylexreturnptr{PERCENT_DEFINES}" );@]@=} + "%destructor" {@> @[TeX_( "/yylexreturnptr{PERCENT_DESTRUCTOR}" );@]@=} + "%dprec" {@> @[TeX_( "/yylexreturnptr{PERCENT_DPREC}" );@]@=} + "%empty" {@> @[TeX_( "/yylexreturnptr{PERCENT_EMPTY}" );@]@=} + "%error-verbose" {@> @[TeX_( "/yylexreturnptr{PERCENT_ERROR_VERBOSE}" );@]@=} + "%expect" {@> @[TeX_( "/yylexreturnptr{PERCENT_EXPECT}" );@]@=} + "%expect-rr" {@> @[TeX_( "/yylexreturnptr{PERCENT_EXPECT_RR}" );@]@=} + "%file-prefix" {@> @[TeX_( "/yylexreturnptr{PERCENT_FILE_PREFIX}" );@]@=} + "%fixed-output-files" {@> @[TeX_( "/yylexreturnptr{PERCENT_YACC}" );@]@=} + "%initial-action" {@> @[TeX_( "/yylexreturnptr{PERCENT_INITIAL_ACTION}" );@]@=} + "%glr-parser" {@> @[TeX_( "/yylexreturnptr{PERCENT_GLR_PARSER}" );@]@=} + "%language" {@> @[TeX_( "/yylexreturnptr{PERCENT_LANGUAGE}" );@]@=} + "%left" {@> @[TeX_( "/yylexreturnptr{PERCENT_LEFT}" );@]@=} + "%lex-param" {@> @[@@]@=} + "%locations" {@> @[@@]@=} + "%merge" {@> @[TeX_( "/yylexreturnptr{PERCENT_MERGE}" );@]@=} + "%name-prefix" {@> @[TeX_( "/yylexreturnptr{PERCENT_NAME_PREFIX}" );@]@=} + "%no-default-prec" {@> @[TeX_( "/yylexreturnptr{PERCENT_NO_DEFAULT_PREC}" );@]@=} + "%no-lines" {@> @[TeX_( "/yylexreturnptr{PERCENT_NO_LINES}" );@]@=} + "%nonassoc" {@> @[TeX_( "/yylexreturnptr{PERCENT_NONASSOC}" );@]@=} + "%nondeterministic-parser" {@> @[TeX_( "/yylexreturnptr{PERCENT_NONDETERMINISTIC_PARSER}" );@]@=} + "%nterm" {@> @[TeX_( "/yylexreturnptr{PERCENT_NTERM}" );@]@=} + "%output" {@> @[TeX_( "/yylexreturnptr{PERCENT_OUTPUT}" );@]@=} + "%param" {@> @[@@]@=} + "%parse-param" {@> @[@@]@=} + "%prec" {@> @[TeX_( "/yylexreturnptr{PERCENT_PREC}" );@]@=} + "%precedence" {@> @[TeX_( "/yylexreturnptr{PERCENT_PRECEDENCE}" );@]@=} + "%printer" {@> @[TeX_( "/yylexreturnptr{PERCENT_PRINTER}" );@]@=} + "%pure-parser" {@> @[@@]@=} + "%require" {@> @[TeX_( "/yylexreturnptr{PERCENT_REQUIRE}" );@]@=} + "%right" {@> @[TeX_( "/yylexreturnptr{PERCENT_RIGHT}" );@]@=} + "%skeleton" {@> @[TeX_( "/yylexreturnptr{PERCENT_SKELETON}" );@]@=} + "%start" {@> @[TeX_( "/yylexreturnptr{PERCENT_START}" );@]@=} + "%term" {@> @[TeX_( "/yylexreturnptr{PERCENT_TOKEN}" );@]@=} + "%token" {@> @[TeX_( "/yylexreturnptr{PERCENT_TOKEN}" );@]@=} + "%token-table" {@> @[TeX_( "/yylexreturnptr{PERCENT_TOKEN_TABLE}" );@]@=} + "%type" {@> @[TeX_( "/yylexreturnptr{PERCENT_TYPE}" );@]@=} + "%union" {@> @[TeX_( "/yylexreturnptr{PERCENT_UNION}" );@]@=} + "%verbose" {@> @[TeX_( "/yylexreturnptr{PERCENT_VERBOSE}" );@]@=} + "%yacc" {@> @[TeX_( "/yylexreturnptr{PERCENT_YACC}" );@]@=} + + /* {\it deprecated} */ + "%default"[-_]"prec" {@> @[TeX_( "/yypdeprecated{\\%default-prec}" );@]@=} + "%error"[-_]"verbose" {@> @[TeX_( "/yypdeprecated{\\%define parse.error verbose}" );@]@=} + "%expect"[-_]"rr" {@> @[TeX_( "/yypdeprecated{\\%expect-rr}" );@]@=} + "%file-prefix"{eqopt} {@> @[TeX_( "/yypdeprecated{\\%file-prefix}" );@]@=} + "%fixed"[-_]"output"[-_]"files" {@> @[TeX_( "/yypdeprecated{\\%fixed-output-files}" );@]@=} + "%name"[-_]"prefix"{eqopt} {@> @[TeX_( "/yypdeprecated{\\%name-prefix}" );@]@=} + "%no"[-_]"default"[-_]"prec" {@> @[TeX_( "/yypdeprecated{\\%no-default-prec}" );@]@=} + "%no"[-_]"lines" {@> @[TeX_( "/yypdeprecated{\\%no-lines}" );@]@=} + "%output"{eqopt} {@> @[TeX_( "/yypdeprecated{\\%output}" );@]@=} + "%pure"[-_]"parser" {@> @[TeX_( "/yypdeprecated{\\%pure-parser}" );@]@=} + "%token"[-_]"table" {@> @[TeX_( "/yypdeprecated{\\%token-table}" );@]@=} + + /* {\it Semantic predicate.} */ + "%?"[ \f\n\t\v]*"{" {@> @[TeX_( "/yyBEGIN{SC_PREDICATE}/yylexnext" );@]@=} + + "%"{id}|"%"{notletter}([[:graph:]])+ {@> @[@@]@=} + + "=" {@> @[TeX_( "/yylexreturnptr{EQUAL}" );@]@=} + "|" {@> @[TeX_( "/yylexreturnptr{PIPE}" );@]@=} + ";" {@> @[TeX_( "/yylexreturnptr{SEMICOLON}" );@]@=} + + {id} {@> @[@@]@=} + {int} {@> @[TeX_( "/edef/next{/yylval{/nx/anint{/the/yytext}" );@]@; + @> @[TeX_( "{/the/yyfmark}{/the/yysmark}}}/next" );@]@; + @> @[TeX_( "/yylexreturn{INT}" );@]@=} + 0[xX][0-9abcdefABCDEF]+ {@> @[TeX_( "/edef/next{/yylval{/nx/hexint{/the/yytext}" );@]@; + @> @[TeX_( "{/the/yyfmark}{/the/yysmark}}}/next" );@]@; + @> @[TeX_( "/yylexreturn{INT}" );@]@=} + + /* {\it Identifiers may not start with a digit. Yet, don't silently accept \.{1FOO} as \.{1 FOO}.} */ + {int}{id} {@> @[TeX_( "/yycomplain{invalid identifier: /the/yytext}" );@] + @> @[TeX_( "/yyerrterminate" );@]@=} + + /* {\it Characters.} */ + "'" {@> @[TeX_( "/yyBEGIN{SC_ESCAPED_CHARACTER}/yylexnext" );@]@=} + + /* {\it Strings.} */ + "\"" {@> @[TeX_( "/yyBEGIN{SC_ESCAPED_STRING}/yylexnext" );@]@=} + + /* {\it Prologue.} */ + "%{" {@> @[@@]@=} + + /* {\it Code in between braces.} Originally preceded by \.{\\STRINGGROW} but it is omitted here. */ + "{" {@> @[TeX_( "/lonesting/z@@/yyBEGIN{SC_BRACED_CODE}/yylexnext" );@]@=} + + /* {\it A type.} */ + "<*>" {@> @[TeX_( "/yylexreturnptr{TAG_ANY}" );@]@=} + "<>" {@> @[TeX_( "/yylexreturnptr{TAG_NONE}" );@]@=} + "<" {@> @[TeX_( "/lonesting=/z@@/yyBEGIN{SC_TAG}/yylexnext" );@]@=} + + "%%" {@> @[@@]@=} + "[" {@> @[TeX_( "/let/bracketedidstr=/empty" );@]@; + @> @[TeX_( "/bracketedidcontextstate/YYSTART" );@] + @> @[TeX_( "/yyBEGIN{SC_BRACKETED_ID}/yylexnext" );@]@=} + + <> {@> @[TeX_( "/yyterminate% EOF in INITIAL" );@]@=} + + [^\[%A-Za-z0-9_<>{}\"\'*;|=/, \f\n\t\v]+|. {@> @[@@]@=} +} +@g + +@ Some additional constructs needed to typeset simple \flex\ +declarations. This is not part of the original \bison\ scanner. +@= +@G + +{ + "%option" {@> @[TeX_( "/yylexreturnptr{FLEX_OPTION}" );@]@=} + "%x" {@> @[TeX_( "/yylexreturnptr{FLEX_STATE_X}" );@]@=} + "%s" {@> @[TeX_( "/yylexreturnptr{FLEX_STATE_S}" );@]@=} +} +@g + +@ We present the `bad character' code first, before going into the details +of the character matching by the rest of the lexer. +@= + @[TeX_( "/edef/next{/nx/csname lexspecial[/the/yytextpure]/nx/endcsname}" );@]@; + @[TeX_( "/expandafter/expandafter/expandafter/ifx/next/relax" );@]@; + @[TeX_( " /iftracebadchars" );@]@; + @[TeX_( " /yycomplain{invalid character(s): /the/yytext}" );@]@; + @[TeX_( " /fi" );@]@; + @[TeX_( " /yylexreturn{$undefined}" );@]@; + @[TeX_( "/else" );@]@; + @[TeX_( " /expandafter/lexspecialchar/expandafter{/next}{/the/yyfmark}{/the/yysmark}/yylexnext" );@]@; + @[TeX_( "/fi" );@]@; + +@ @= + @[TeX_( "/edef/next{/yylval{{parse.trace}{debug}{/the/yyfmark}{/the/yysmark}}}/next" );@]@; + @[TeX_( "/yylexreturn{PERCENT_FLAG}" );@]@; + +@ @= + @[TeX_( "/edef/next{/yylval{{lex-param}{/the/yyfmark}{/the/yysmark}}}/next" );@]@; + @[TeX_( "/yylexreturn{PERCENT_PARAM}" );@]@; + +@ @= + @[TeX_( "/edef/next{/yylval{{locations}{}{/the/yyfmark}{/the/yysmark}}}/next" );@]@; + @[TeX_( "/yylexreturn{PERCENT_FLAG}" );@]@; + +@ @= + @[TeX_( "/edef/next{/yylval{{both-param}{/the/yyfmark}{/the/yysmark}}}/next" );@]@; + @[TeX_( "/yylexreturn{PERCENT_PARAM}" );@]@; + +@ @= + @[TeX_( "/edef/next{/yylval{{parse-param}{/the/yyfmark}{/the/yysmark}}}/next" );@]@; + @[TeX_( "/yylexreturn{PERCENT_PARAM}" );@]@; + +@ @= + @[TeX_( "/edef/next{/yylval{{api.pure}{pure-parser}{/the/yyfmark}{/the/yysmark}}}/next" );@]@; + @[TeX_( "/yylexreturn{PERCENT_FLAG}" );@]@; + +@ @= + @[TeX_( "/iftracebadchars" );@]@; + @[TeX_( " /yycomplain{invalid directive: /the/yytext}" );@]@; + @[TeX_( "/fi" );@]@; + @[TeX_( "/yylexnext" );@]@; + +@ @= + @[TeX_( "/edef/next{/yylval{/nx/idit{/the/yytextpure}{/the/yytext}" );@]@; + @[TeX_( " {/the/yyfmark}{/the/yysmark}}}/next" );@]@; + @[TeX_( "/let/bracketedidstr=/empty" );@]@; + @[TeX_( "/yyBEGIN{SC_AFTER_IDENTIFIER}/yylexnext" );@]@; + +@ @= + @[TeX_( "/advance/percentpercentcount/@@ne" );@]@; + @[TeX_( "/ifnum/percentpercentcount=/tw@@" );@]@; + @[TeX_( " /yyBEGIN{SC_EPILOGUE}" );@]@; + @[TeX_( "/fi" );@]@; + @[TeX_( "/yylexreturnptr{PERCENT_PERCENT}" );@]@; + +@ @= + @[TeX_( "/edef/next{/postoks{{/the/yyfmark}{/the/yysmark}}}/next" );@]@; + @[TeX_( "/yyBEGIN{SC_PROLOGUE}/yylexnext" );@]@; + +@ {\it Supporting \.{\\0} complexifies our implementation for no expected added value}. + +@= +@G + +{ + \0 {@> @[TeX_( "/yycomplain{invalid null character}/yylexnext" );@]@=} +} +@g + +@ @= +@G + +{ + "[" {@> @[@@]@=} + ":" {@> @[@@]@=} + <> {@> @[@@]@=} + . {@> @[@@]@=} +} +@g + +@ @= + @[TeX_( "/ifx/bracketedidstr/empty" );@]@; + @[TeX_( " /bracketedidcontextstate/YYSTART /yyBEGIN{SC_BRACKETED_ID}" );@]@; + @[TeX_( " /let/next=/yylexnext" );@]@; + @[TeX_( "/else" );@]@; + @[TeX_( " /ROLLBACKCURRENTTOKEN" );@]@; + @[TeX_( " /yyBEGIN{SC_RETURN_BRACKETED_ID}" );@]@; + @[TeX_( " /def/next{/yylexreturn{ID}}" );@]@; + @[TeX_( "/fi" );@]@; + @[TeX_( "/next" );@]@; + +@ @= + @[TeX_( "/ifx/bracketedidstr/empty" );@]@; + @[TeX_( " /yyBEGIN{INITIAL}" );@]@; + @[TeX_( "/else" );@]@; + @[TeX_( " /yyBEGIN{SC_RETURN_BRACKETED_ID}" );@]@; + @[TeX_( "/fi" );@]@; + @[TeX_( "/yylexreturn{ID_COLON}" );@]@; + +@ @= + @[TeX_( "/ROLLBACKCURRENTTOKEN" );@]@; + @[TeX_( "/ifx/bracketedidstr/empty" );@]@; + @[TeX_( " /yyBEGIN{INITIAL}" );@]@; + @[TeX_( "/else" );@]@; + @[TeX_( " /yyBEGIN{SC_RETURN_BRACKETED_ID}" );@]@; + @[TeX_( "/fi" );@]@; + @[TeX_( "/yylexreturn{ID}" );@]@; + +@ @= + @[TeX_( "/ifx/bracketedidstr/empty" );@]@; + @[TeX_( " /yyBEGIN{INITIAL}" );@]@; + @[TeX_( "/else" );@]@; + @[TeX_( " /yyBEGIN{SC_RETURN_BRACKETED_ID}" );@]@; + @[TeX_( "/fi" );@]@; + @[TeX_( "/ROLLBACKCURRENTTOKEN" );@]@; + @[TeX_( "/yylexreturn{ID}" );@]@; + +@ @= +@G + +{ + <> {@> @[@@]@=} + {id} {@> @[@@]@=} + "]" {@> @[@@]@=} + [^\].A-Za-z0-9_/ \f\n\t\v]+|. {@> @[@@]@=} +} +@g + +@ @= + @[TeX_( "/ifx/bracketedidstr/empty" );@]@; + @[TeX_( " /edef/bracketedidstr{/nx/idit{/the/yytextpure}" );@]@; + @[TeX_( " {/the/yytext}{/the/yyfmark}{/the/yysmark}}" );@]@; + @[TeX_( " /let/next=/yylexnext" );@]@; + @[TeX_( "/else" );@]@; + @[TeX_( " /def/next{/yycomplain{unexpected " );@]@; + @[TeX_( " identifier in bracketed name: /the/yytext}/yylexnext}" );@]@; + @[TeX_( "/fi" );@]@; + @[TeX_( "/next" );@]@; + +@ @= + @[TeX_( "/yyBEGINr/bracketedidcontextstate" );@]@; + @[TeX_( "/ifx/bracketedidstr/empty" );@]@; + @[TeX_( " /def/next{/yycomplain{an identifier expected}/yylexnext}" );@]@; + @[TeX_( "/else" );@]@; + @[TeX_( " /ifnum/bracketedidcontextstate=/yylexstate{INITIAL}/relax" );@]@; + @[TeX_( " /expandafter/yylval/expandafter{/bracketedidstr}" );@]@; + @[TeX_( " /let/bracketedidstr=/empty" );@]@; + @[TeX_( " /def/next{/yylexreturn{BRACKETED_ID}}" );@]@; + @[TeX_( " /else" );@]@; + @[TeX_( " /let/next=/yylexnext" );@]@; + @[TeX_( " /fi" );@]@; + @[TeX_( "/fi" );@]@; + @[TeX_( "/next" );@]@; + +@ @= + @[TeX_( "/yycomplain{invalid character(s) in bracketed name: /the/yytext}/yyerrterminate" );@]@; + +@ @= + @[TeX_( "/yyBEGINr/bracketedidcontextstate" );@]@; + @[TeX_( "/yycomplain{unexpected end of file inside brackets}/yyerrterminate" );@]@; + +@ @= +@G + +{ + . {@> @[@@]@=} +} +@g + +@ @= + @[TeX_( "/ROLLBACKCURRENTTOKEN" );@]@; + @[TeX_( "/expandafter/yylval/expandafter{/bracketedidstr}" );@]@; + @[TeX_( "/let/bracketedidstr=/empty" );@]@; + @[TeX_( "/yyBEGIN{INITIAL}" );@]@; + @[TeX_( "/yylexreturn{BRACKETED_ID}" );@]@; + +@ {\it Scanning a Yacc comment. The initial \.{/*} is already eaten}. +@= +@G + +{ + <> {@> @[TeX_( "/yycomplain{unexpected end of file in " );@] + @> @[TeX_( " a comment}/yyerrterminate" );@]@=} + "*/" {@> @[TeX_( "/yyBEGINr{/contextstate}/yylexnext" );@]@=} + .|\n {@> @[TeX_( "/yylexnext" );@]@=} +} +@g + +@ {\it Scanning a \Cee\ comment. The initial \.{/*} is already eaten}. +@= +@G + +{ + <> {@> @[TeX_( "/yycomplain{unexpected end of file in " );@] + @> @[TeX_( " a comment}/yyerrterminate" );@]@=} + "*"{splice}"/" {@> @[TeX_( "/STRINGGROW/yyBEGINr/contextstate/yylexnext" );@]@=} +} +@g + +@ {\it Scanning a line comment. The initial \.{//} is already eaten}. +@= +@G + +{ + <> {@> @[TeX_( "/yyBEGINr/contextstate /ROLLBACKCURRENTTOKEN" );@] + @> @[TeX_( " /yylexnext" );@]@=} + "\n" {@> @[TeX_( "/STRINGGROW/yyBEGINr/contextstate /yylexnext" );@]@=} + {splice} {@> @[TeX_( "/STRINGGROW/yylexnext" );@]@=} +} +@g + +@ {\it Scanning a \bison\ string, including its escapes. +The initial quote is already eaten}. +@= +@G + +{ + <> {@> @[TeX_( "/yycomplain{unexpected end of file in " );@] + @> @[TeX_( " a string}/yyerrterminate" );@]@=} + "\"" {@> @[@@]@=} + "\n" {@> @[TeX_( "/yycomplain{unexpected end of line in " );@] + @> @[TeX_( " a string}/yyerrterminate" );@]@=} +} +@g + +@ @= + @[TeX_( "/STRINGFINISH" );@]@; + @[TeX_( "/edef/next{/yylval{/nx/stringify{/the/laststring}" );@]@; + @[TeX_( "{/the/laststringraw}{/the/yyfmark}{/the/yysmark}}}/next" );@]@; + @[TeX_( "/yyBEGIN{INITIAL}" );@]@; + @[TeX_( "/yylexreturn{STRING}" );@]@; + +@ {\it Scanning a \bison\ character literal, decoding its escapes. +The initial quote is already eaten}. +@= +@G + +{ + <> {@> @[TeX_( "/yycomplain{unexpected end of file in " );@] + @> @[TeX_( " a literal}/yyerrterminate" );@]@=} + "'" {@> @[@@]@=} + "\n" {@> @[TeX_( "/yycomplain{unexpected end of line in " );@] + @> @[TeX_( " a literal}/yyerrterminate" );@]@=} +} +@g + +@ @= + @[TeX_( "/STRINGFINISH" );@]@; + @[TeX_( "/edef/next{/yylval{/nx/charit{/the/laststring}{/the/laststringraw}" );@]@; + @[TeX_( " {/the/yyfmark}{/the/yysmark}}}/next" );@]@; + @[TeX_( "/STRINGFREE" );@]@; + @[TeX_( "/yyBEGIN{INITIAL}" );@]@; + @[TeX_( "/yylexreturn{CHAR}" );@]@; + +@ {\it Scanning a tag. The initial angle bracket is already eaten}. +@= +@G + +{ + ">" {@> @[@@]@=} + ([^<>]|->)+ {@> @[TeX_( "/STRINGGROW/yylexnext" );@]@=} + "<" {@> @[@@]@=} + <> {@> @[TeX_( "/yycomplain{unexpected end of file in " );@] + @> @[TeX_( " a literal}/yyerrterminate" );@]@=} +} +@g + +@ @= + @[TeX_( "/advance/lonesting/m@@ne" );@]@; + @[TeX_( "/ifnum/lonesting= + @[TeX_( "/STRINGGROW" );@]@; + @[TeX_( "/advance/lonesting/@@ne" );@]@; + @[TeX_( "/yylexnext" );@]@; + +@ @= +@G + +{ + \\[0-7]{1,3} {@> @[TeX_( "/STRINGGROW/yylexnext" );@]@=} + \\x[0-9abcdefABCDEF]+ {@> @[TeX_( "/STRINGGROW/yylexnext" );@]@=} + \\a {@> @[TeX_( "/STRINGGROW/yylexnext" );@]@=} + \\b {@> @[TeX_( "/STRINGGROW/yylexnext" );@]@=} + \\f {@> @[TeX_( "/STRINGGROW/yylexnext" );@]@=} + \\n {@> @[TeX_( "/STRINGGROW/yylexnext" );@]@=} + \\r {@> @[TeX_( "/STRINGGROW/yylexnext" );@]@=} + \\t {@> @[TeX_( "/STRINGGROW/yylexnext" );@]@=} + \\v {@> @[TeX_( "/STRINGGROW/yylexnext" );@]@=} + + /* {\it \.{\\\\[\\"\\'?\\\\]} would be shorter, but it confuses |xgettext|.} */ + \\("\""|"'"|"?"|"\\") {@> @[TeX_( "/STRINGGROW/yylexnext" );@]@=} + + \\(u|U[0-9abcdefABCDEF]{4})[0-9abcdefABCDEF]{4} {@> @[TeX_( "/STRINGGROW/yylexnext" );@]@=} + \\(.|\n) {@> @[TeX_( "/yycomplain{invalid character after " );@] + @> @[TeX_( " /\\-escape: /the/yytext}/yylexnext" );@]@=} +} +@g + +@ @= +@G + +{ + {splice}|\\{splice}[^\n\[\]] {@> @[TeX_( "/STRINGGROW/yylexnext" );@]@=} +} + + +{ + "'" {@> @[TeX_( "/STRINGGROW /yyBEGINr{/contextstate}/yylexnext" );@]@=} + \n {@> @[TeX_( "/yycomplain{unexpected end of line instead of " );@] + @> @[TeX_( " a character}/yyerrterminate" );@]@=} + <> {@> @[TeX_( "/yycomplain{unexpected end of file instead of " );@] + @> @[TeX_( " a character}/yyerrterminate" );@]@=} +} + + +{ + "\"" {@> @[TeX_( "/STRINGGROW /yyBEGINr{/contextstate}/yylexnext" );@]@=} + \n {@> @[TeX_( "/yycomplain{unexpected end of line instead of " );@] + @> @[TeX_( " a character}/yyerrterminate" );@]@=} + <> {@> @[TeX_( "/yycomplain{unexpected end of file instead of " );@] + @> @[TeX_( " a character}/yyerrterminate" );@]@=} +} +@g + +@ @= +@G + +{ + "'" {@> @[TeX_( "/STRINGGROW /contextstate/YYSTART" );@] + @> @[TeX_( " /yyBEGIN{SC_CHARACTER}/yylexnext" );@]@=} + "\"" {@> @[TeX_( "/STRINGGROW /contextstate/YYSTART" );@] + @> @[TeX_( " /yyBEGIN{SC_STRING}/yylexnext" );@]@=} + "/"{splice}"*" {@> @[TeX_( "/STRINGGROW /contextstate/YYSTART" );@] + @> @[TeX_( " /yyBEGIN{SC_COMMENT}/yylexnext" );@]@=} + "/"{splice}"/" {@> @[TeX_( "/STRINGGROW /contextstate/YYSTART" );@] + @> @[TeX_( " /yyBEGIN{SC_LINE_COMMENT}/yylexnext" );@]@=} +} +@g + +@ {\it Scanning some code in braces (actions, predicates). The +initial \.{\{} is already eaten}. +@= +@G + +{ + "{"|"<"{splice}"%" {@> @[TeX_( "/STRINGGROW /advance/lonesting/@@ne /yylexnext" );@]@=} + "%"{splice}">" {@> @[TeX_( "/STRINGGROW /advance/lonesting/m@@ne /yylexnext" );@]@=} + + /* {\it Tokenize \.{<<\%} correctly (as \.{<<} \.{\%}) rather than incorrectly (as \.{<} \.{<\%}).} */ + "<"{splice}"<" {@> @[TeX_( "/STRINGGROW /yylexnext" );@]@=} + <> {@> @[TeX_( "/yycomplain{unexpected end of line " );@] + @> @[TeX_( " inside braced code}/yyerrterminate" );@]@=} +} + + +{ + "}" {@> @[@@]@=} +} + + +{ + "}" {@> @[@@]@=} +} +@g + +@ Unlike the original lexer, we do not return the closing brace as part of the +braced code. + +@= + @[TeX_( "/advance/lonesting/m@@ne" );@]@; + @[TeX_( "/ifnum/lonesting= + @[TeX_( "/advance/lonesting/m@@ne" );@]@; + @[TeX_( "/ifnum/lonesting= +@G + +{ + "%}" {@> @[@@]@=} + <> {@> @[TeX_( "/yycomplain{unexpected end of file " );@] + @> @[TeX_( " inside prologue}/yyerrterminate" );@]@=} +} +@g + +@ @= + @[TeX_( "/STRINGFINISH" );@]@; + @[TeX_( "/edef/next{/yylval{{/the/laststring}/the/postoks{/the/yyfmark}{/the/yysmark}}}/next" );@]@; + @[TeX_( "/yyBEGIN{INITIAL}" );@]@; + @[TeX_( "/yylexreturn{PROLOGUE}" );@]@; + +@ {\it Scanning the epilogue (everything after the second \prodstyle{\%\%}, which +has already been eaten)}. +@= +@G + +{ + <> {@> @[@@]@=} +} +@g + +@ @= + @[TeX_( "/ROLLBACKCURRENTTOKEN" );@]@; + @[TeX_( "/STRINGFINISH" );@]@; + @[TeX_( "/yylval=/laststring" );@]@; + @[TeX_( "/yyBEGIN{INITIAL}" );@]@; + @[TeX_( "/yylexreturn{EPILOGUE}" );@]@; + +@ {\it By default, grow the string obstack with the input}. +\ifbootstrapmode % only if this file is used to extract state information + \immediate\closeout\stlist +\fi +@= +@G +. | + \n {@> @[TeX_( "/STRINGGROW /yylexnext" );@]@=} +@g diff --git a/support/splint/cweb/mkeparser.w b/support/splint/cweb/mkeparser.w new file mode 100644 index 0000000000..937a9e5498 --- /dev/null +++ b/support/splint/cweb/mkeparser.w @@ -0,0 +1,123 @@ +% Copyright 2012-2014, Alexander Shibakov +% This file is part of SPLinT +% +% SPLinT is free software: you can redistribute it and/or modify +% it under the terms of the GNU General Public License as published by +% the Free Software Foundation, either version 3 of the License, or +% (at your option) any later version. +% +% SPLinT is distributed in the hope that it will be useful, +% but WITHOUT ANY WARRANTY; without even the implied warranty of +% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +% GNU General Public License for more details. +% +% You should have received a copy of the GNU General Public License +% along with SPLinT. If not, see . + +\input limbo.sty +\input yy.sty + +\let\N\textN + +@**Parser tables. This is the main table output code. Its core comes from the Bison +Sourcer(er?) (\.{bs.w}) which will be included as soon as this paragraph is +over. Since the main function of this code is to {\it dump\/} tables +produced by \bison, and a {\it bison\/} is a large buffalo like +animal, {\it and\/} the only reason it has to be done like this is due +to the less than optimal choices of a few developers, feel free to +interpret the acronym as something easier to remember. + +@ @
= + @@; + +@ @= + @@; + +@ @= + @@; + +@ @= + @@; + +@ @= + @@; + +@ @= + @@; + +@ @= + @@; + +@ @= + @@; + +@ @= + @@; + +@ @= + @@; + +@ @= + @@; + +@ @= + @@; + +@ @= + @@; + +@ @= + @@; + +@ @= + @@; + +@ @= + @@; + +@ @= + @@; + +@ @= + @@; + +@ @<\Cee\ preamble@>= + @@; + +@i common.w +@i bs.w + +@*1 Parser dependent settings. +This is it for the core table output functions. +To make all this into a working code in this +case, lexing and error function declarations are supplied. +@= +#ifndef HAS_SCANNER + int yylex(void); + int yyerror(void); +#endif + +@ @= +#ifndef HAS_SCANNER + int yylex(void){} + int yyerror(void){} +#endif + +@ \let\B\oldB % \Cee\ mode mixes all up +@c + +@<\Cee\ preamble@>@; + +#include PARSER_FILE + +@<\Cee\ postamble@>@; + +@** Index (for {\tt \jobname}). +\def\readcontents{% + {% + \acrofalse +% \def\jobname{bparser}\input bparser.toc +% \def\jobname{ftablesout}\input ftablesout.toc + }% + \input \contentsfile +} \ No newline at end of file diff --git a/support/splint/cweb/mkscanner.w b/support/splint/cweb/mkscanner.w new file mode 100644 index 0000000000..22c39e9342 --- /dev/null +++ b/support/splint/cweb/mkscanner.w @@ -0,0 +1,102 @@ +% Copyright 2012-2014, Alexander Shibakov +% This file is part of SPLinT +% +% SPLinT is free software: you can redistribute it and/or modify +% it under the terms of the GNU General Public License as published by +% the Free Software Foundation, either version 3 of the License, or +% (at your option) any later version. +% +% SPLinT is distributed in the hope that it will be useful, +% but WITHOUT ANY WARRANTY; without even the implied warranty of +% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +% GNU General Public License for more details. +% +% You should have received a copy of the GNU General Public License +% along with SPLinT. If not, see . + +@**Lexer tables. This is the main table output code. +Its core comes from the Flex Kit(ten?) (\.{fk.w}) +which will be included as soon as this paragraph is +over. Feel free to +interpret the acronym as something easier to remember. + +@
= + @@; + +@ @= + @@; + +@ @= + @@; + +@ @= + @@; + +@ @= + @@; + +@ @= + @@; + +@ @<\Cee\ preamble@>= + @@; + @@; + +@ @= + @@; + +@ @= + @@; + +@ @= + @@; + +@ @= + @@; + +@ @= + @@; + +@ @= + @@; + +@i common.w +@i fk.w + +@*1 Lexer dependent settings. +This is it for the core table output functions. +To make all this into a working code in this +case, no function declarations are supplied. +@= + +@ @= + +@ @<\Cee\ preamble@>= + void define_all_states( void ); + +@ The lexer takes no parameters in this case but if one reuses a +lexer written for a different purpose, the situation may be different. +%\let\B\oldB % \Cee\ mode mixes all up + +@d YYPARSE_PARAMETERS +@d YY_USER_INIT define_all_states(); +@d yyterminate() TeX_( "/yyterminate" ); return YY_NULL + +@c + +@<\Cee\ preamble@>@; +typedef int YYSTYPE; +#define YY_BREAK return 0; + +#include LEXER_FILE + +@<\Cee\ postamble@>@; + +@** Index (for {\tt \jobname}). +%\def\readcontents{% +% {% +% \acrofalse +% \def\jobname{bparser}\input bparser.toc +% }% +% \input \contentsfile +%} \ No newline at end of file diff --git a/support/splint/cweb/np.w b/support/splint/cweb/np.w new file mode 100644 index 0000000000..aa20e551c7 --- /dev/null +++ b/support/splint/cweb/np.w @@ -0,0 +1,380 @@ +@q Copyright 2012-2014, Alexander Shibakov@> +@q This file is part of SPLinT@> + +@q SPLinT is free software: you can redistribute it and/or modify@> +@q it under the terms of the GNU General Public License as published by@> +@q the Free Software Foundation, either version 3 of the License, or@> +@q (at your option) any later version.@> + +@q SPLinT is distributed in the hope that it will be useful,@> +@q but WITHOUT ANY WARRANTY; without even the implied warranty of@> +@q MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the@> +@q GNU General Public License for more details.@> + +@q You should have received a copy of the GNU General Public License@> +@q along with SPLinT. If not, see .@> + +@*1 The name parser. What follows is an example parser for the name +processing. This approach (i.e. using a `full blown' parser/scanner +combination) is probably not the best way to implement such machinery +but its main purpose is to demonstrate a way to create a separate +parser for local purposes. +% We include the macros here since this file is intended to be +% included by the documentation `aggregator' so putting bare \TeX\ +% at the beginning of the file runs the risk of producing and error +% of having \TeX\ material inside a \Cee\ section. +\let\currentparsernamespace\parsernamespace + \let\parsernamespace\smallnamespace + \let\hostparsernamespace\smallnamespace + \input stokenset.sty +\let\parsernamespace\currentparsernamespace +@(small_parser.yy@>= +@G Switch to generic mode. +%{@> @ @=%} + @> @ @= +%union {@> @ @=} +%{@> @ @=%} + @> @ @= +%% + @> @ @= +%% +@g + +@ @= +@G +%token-table +%debug +%start full_name +@g + +@ @= +@G +%token PERCENT_IDENTIFIER +%token IDENTIFIER +%token OPTIONAL +%token NO_ATTR +%token INTEGER +%token EXTENDED +%token WILDCARD +@g + +@ @= +@G +full_name: + identifier_string suffixes.opt {@> @ @=} +; + +identifier_string: + PERCENT_IDENTIFIER {@> @ @=} +| IDENTIFIER {@> @ @=} +| '<' IDENTIFIER '>' {@> @ @=} +| '\'' WILDCARD '\'' {@> @ @=} +| '\'' '>' '\'' {@> @'} string@> @=} +| '\'' '<' '\'' {@> @ @=} +| '\'' '.' '\'' {@> @ @=} +| '\'' '_' '\'' {@> @ @=} +| '\'' '-' '\'' {@> @ @=} +| qualifier {@> @ @=} +| identifier_string IDENTIFIER {@> @ @=} +| identifier_string qualifier {@> @ @=} +| identifier_string INTEGER {@> @ @=} +; + +suffixes.opt: + {@> TeX_( "/yy0{}" ); @=} +| '.' {@> TeX_( "/yy0{/nx/dotsp/nx/sfxnone}" ); @=} +| '.' suffixes {@> @ @=} +| '.' qualified_suffixes {@> @ @=} +; + +suffixes: + IDENTIFIER {@> @ @=} +| INTEGER {@> @ @=} +| suffixes '.' {@> @ @=} +| suffixes IDENTIFIER {@> @ @=} +| suffixes INTEGER {@> @ @=} +| qualifier '.' {@> TeX_( "/yy0{/nx/sfxn/the/yy(1)/nx/dotsp}" ); @=} +| suffixes qualifier '.' {@> TeX_( "/yy0{/the/yy(1)/nx/sfxn/the/yy(2)/nx/dotsp}" ); @=} +; + +qualified_suffixes: + suffixes qualifier {@> @ @=} +| qualifier {@> @ @=} +; + +qualifier: + OPTIONAL {@> TeX_( "/yy0{/the/yy(1)}" ); @=} +| NO_ATTR {@> TeX_( "/yy0{/the/yy(1)}" ); @=} +| EXTENDED {@> TeX_( "/yy0{/the/yy(1)}" ); @=} +; +@g + +@ @= + @[TeX_( "/yy0{/the/yy(1)/the/yy(2)}/namechars/yyval" );@]@; + +@ @= + @[TeX_( "/getfirst{/yy(1)}/to/toksa" );@]@; + @[TeX_( "/getsecond{/yy(1)}/to/toksb" );@]@; + @[TeX_( "/yy0{/nx/optstr{/the/toksa}{/the/toksb}}" );@]@; + +@ @= + @[TeX_( "/getfirst{/yy(1)}/to/toksa" );@]@; + @[TeX_( "/getsecond{/yy(1)}/to/toksb" );@]@; + @[TeX_( "/yy0{/nx/idstr{/the/toksa}{/the/toksb}}" );@]@; + +@ @= + @[TeX_( "/getfirst{/yy(2)}/to/toksa" );@]@; + @[TeX_( "/getsecond{/yy(2)}/to/toksb" );@]@; + @[TeX_( "/yy0{/nx/idstr{}{}}" );@]@; + +@ @= + @[TeX_( "/getfirst{/yy(2)}/to/toksa" );@]@; + @[TeX_( "/getsecond{/yy(2)}/to/toksb" );@]@; + @[TeX_( "/yy0{/nx/chstr{/the/toksa}{/the/toksb}}" );@]@; + +@ @= + @[TeX_( "/yy0{/nx/chstr{<}{<}}" );@]@; + +@ @'} string@>= + @[TeX_( "/yy0{/nx/chstr{/greaterthan}{/greaterthan}}" );@]@; + +@ @= + @[TeX_( "/yy0{/nx/chstr{/uscoreletter}{/uscoreletter}}" );@]@; + +@ @= + @[TeX_( "/yy0{/nx/chstr{-}{-}}" );@]@; + +@ @= + @[TeX_( "/yy0{/nx/chstr{.}{.}}" );@]@; + +@ @= + @@; + +@ @= + @[TeX_( "/getsecond{/yy(1)}/to/toksa" );@]@; + @[TeX_( "/appendr/toksa{/space}" );@]@; + @[TeX_( "/getfirst{/yy(2)}/to/toksb" );@]@; + @[TeX_( "/concat/toksa/toksb" );@]@; + @[TeX_( "/getthird{/yy(1)}/to/toksb" );@]@; + @[TeX_( "/appendr/toksb{/space}" );@]@; + @[TeX_( "/getsecond{/yy(2)}/to/toksc" );@]@; + @[TeX_( "/concat/toksb/toksc" );@]@; + @[TeX_( "/yy0{/nx/idstr{/the/toksa}{/the/toksb}}" );@]@; + +@ @= + @ + +@ @= + @@; + +@ @= + @[TeX_( "/yy0{/nx/dotsp/the/yy(2)}" );@]@; + +@ @= + @@; + +@ @= + @[TeX_( "/yy0{/nx/sfxn/the/yy(1)}" );@]@; + +@ @= + @[TeX_( "/yy0{/nx/sfxi/the/yy(1)}" );@]@; + +@ @= + @[TeX_( "/yy0{/the/yy(1)/nx/dotsp}" );@]@; + +@ @= + @[TeX_( "/yy0{/the/yy(1)/nx/sfxi/the/yy(2)}" );@]@; + +@ @= + @[TeX_( "/yy0{/the/yy(1)/nx/sfxn/the/yy(2)}" );@]@; + +@ @= + @[TeX_( "/yy0{/the/yy(1)/nx/qual/the/yy(2)}" );@]@; + +@ @= + @[TeX_( "/yy0{/nx/qual/the/yy(1)}" );@]@; + +@ \Cee\ preamble. In this case, there are no `real' actions that our +grammar performs, only \TeX\ output, so this section is empty. + +@= + +@ \Cee\ postamble. It is tricky to insert function definitions that use \bison's internal types, +as they have to be inserted in a place that is aware of the internal definitions but before said +definitions are used. + +@= +#define YYPRINT(file, type, value) @[yyprint (file, type, value)@] + static void yyprint (FILE *file, int type, YYSTYPE value){} + +@ Union of types. + +@= + +@*1 The name scanner. +%\checktabletrue +@(small_lexer.ll@>= +@G + @> @ @= +%{@> @ @=%} + @> @ @= +%% + @> @ @= +%% +@O +void define_all_states( void ) { + @@; +} +@o +@g + +@ @= + @@; +@G +letter [_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ] +wc ([^\\\'\"]{-}[_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0-9]|\\.) +id {letter}({letter}|[-0-9])* +int [0-9]+ +@g + +@ @= +#define _register_name( name ) @[Define_State( #name, name )@] +/* nothing for now */ +#undef _register_name + +@ Strings and characters in directives/rules. +@= +@G +%x SC_ESCAPED_STRING SC_ESCAPED_CHARACTER +@g + +@ @= + +#include +#include + +@ @= +@G +%option bison-bridge +%option noyywrap nounput noinput reentrant +%option noyy_top_state +%option debug +%option stack +%option outfile="small_lexer.c" +@g + +@ @= + @@; + @@; + +@ White space skipping. +\traceparserstatestrue +\tracestackstrue +\tracerulestrue +\traceactionstrue +\tracelookaheadtrue +\traceparseresultstrue +\tracebadcharstrue +\yyflexdebugtrue +% +\traceparserstatesfalse +\tracestacksfalse +\tracerulesfalse +\traceactionsfalse +\tracelookaheadfalse +\traceparseresultsfalse +\tracebadcharsfalse +\yyflexdebugfalse +% +\yyskipparsetrue +@= +@G +[ \f\n\t\v] {@> @[TeX_( "/yylexnext" );@]@=} +@g + +@ This collection of regular expressions might seem redundant, and in +its present state, it certainly is. However, if later on the +typesetting style for some of the keywords would need to be adjusted, +such changes would be easy to implement, since the template is already +here. +\yyskipparsefalse % this is not necessary +@= +@G +"%binary" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} +"%code" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} +"%debug" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} +"%default-prec" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} +"%define" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} +"%defines" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} +"%destructor" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} +"%dprec" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} +"%empty" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} +"%error-verbose" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} +"%expect" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} +"%expect-rr" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} +"%file-prefix" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} +"%fixed-output-files" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} +"%initial-action" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} +"%glr-parser" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} +"%language" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} +"%left" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} +"%lex-param" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} +"%locations" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} +"%merge" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} +"%name-prefix" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} +"%no-default-prec" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} +"%no-lines" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} +"%nonassoc" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} +"%nondeterministic-parser" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} +"%nterm" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} +"%output" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} +"%param" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} +"%parse-param" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} +"%prec" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} +"%precedence" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} +"%printer" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} +"%pure-parser" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} +"%require" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} +"%right" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} +"%skeleton" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} +"%start" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} +"%term" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} +"%token" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} +"%token-table" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} +"%type" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} +"%union" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} +"%verbose" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} +"%yacc" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} +"%default"[-_]"prec" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} +"%error"[-_]"verbose" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} +"%expect"[-_]"rr" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} +"%fixed"[-_]"output"[-_]"files" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} +"%name"[-_]"prefix" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} +"%no"[-_]"default"[-_]"prec" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} +"%no"[-_]"lines" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} +"%pure"[-_]"parser" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} +"%token"[-_]"table" {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} +"%"({letter}|[0-9]|[-_]|"%"|[<>])+ {@> @[TeX_( "/yylexreturnval{PERCENT_IDENTIFIER}" );@]@=} + +"opt" {@> @[TeX_( "/yylexreturnval{OPTIONAL}" );@]@=} +"na" {@> @[TeX_( "/yylexreturnval{NO_ATTR}" );@]@=} +"ext" {@> @[TeX_( "/yylexreturnval{EXTENDED}" );@]@=} + +[<>._\'] {@> @[TeX_( "/yylexreturnchar" );@]@=} +{wc} {@> @[TeX_( "/yylexreturnval{WILDCARD}" );@]@=} + +{id} {@> @[@@]@=} +{int} {@> @[TeX_( "/yylexreturnval{INTEGER}" );@]@=} + +"\"" {@> @[TeX_( "/yylexnext" );@]@=} +. {@> @[@@]@=} +@g + +@ @= + @[TeX_( "/yylexreturnval{IDENTIFIER}" );@]@; + +@ @= + @[TeX_( "/iftracebadchars" );@]@; + @[TeX_( " /yycomplain{invalid character(s): /the/yytext}" );@]@; + @[TeX_( "/fi" );@]@; + @[TeX_( "/yylexreturn{$undefined}" );@]@; diff --git a/support/splint/cweb/philosophy.w b/support/splint/cweb/philosophy.w new file mode 100644 index 0000000000..4d30f2d765 --- /dev/null +++ b/support/splint/cweb/philosophy.w @@ -0,0 +1,223 @@ +@**Philosophy. +This section should, perhaps, be more appropriately called {\it rant\/} but +{\it philosophy\/} sounds more academic. The design of any software involves +numerous choices, and \splint\ is no exception. Some of these choices +are explained in the appropriate places in the package files. This +section collects a few `big picture' viewpoints that did not fit elsewhere. + +@*1 On typographic style. +It must seem quite perplexing to some readers that a +document with a focus on {\it pretty-printing\/} displays such a +wanton disregard for good typographic taste. Haphazard choice of +styles to present programming constructs, random overabundance of +fonts on almost every single page are just a few among the many typographic sins +and design guffaws so amply manifested on these pages. The author has +to take full responsibility for the lack of taste in this +opus and has only one argument in his defense: this is not +merely a book for a good night read but a piece of technical +documentation. + +In many ways, the goal of this document is somewhat contrary to that +of a well-written manual: to display the main features +prominently and in logical order. After all, this is a package that is +intended to help {\it write\/} such manuals so it inevitably must +display some use cases that demonstrate a variety of typographic styles +possible to achieve with \splint. Needless to say, {\it variety\/} and +{\it consistency\/} seldom go hand in hand and it is consistency that +makes for a pretty page. One of the objectives has been to demonstrate a +number of quite technical programming constructs so one should keep in +mind that it is assumed that the reader will want to look up the input +files to see how some (however ugly and esoteric) typographic effects +had been achieved. + +On the other hand, to use a clich\'e, beauty is in the eyes of the +beholder so what makes a book readable (or even beautiful) may well +depend on the background of the reader. As an example, letterspacing +as a typographic device is almost universally reviled in Western +typography (aside from a few niche uses such as setting titles). In +Russian, however (at least until recently), letterspacing has been +routinely used for emphasis (or, as a Russian would put it, +e$\,$m$\,$p$\,$h$\,$a$\,$s$\,$i$\,$s) in lieu of, say, {\it italics}. Before +I hear any objections from typography purists, let me just say that +this technique fits perfectly with the way emphasis works in the Russian speech: a +speaker slowly enunciates the sounds of each word (incidentally, +emphasizing {\it emphasis\/} is a perfect example of why this method +would fail in most English texts). Letterspaced +sentences are easy to find on a page and set a special reading rhythm, +which is an added bonus in many cases, although it does violate the +`universally gray pages are a must' dogma. + +@*1 Why GPL. +The choice of license for this project goes beyond merely showing the +source. \TeX, by its very nature is an open source language, so it +is not a matter of hiding anything from the user or a potential +developer. The \Cee\ code is a different matter but the source is not +that complicated. Reducing the licensing issue to the ability of +someone else to see the source code is a great +oversimplification. Without getting into too many details of so-called +`open source licenses' (other than GPL) and arguing with their advocates, let me simply +express my lack of understanding at the arguments that purport that +BSD-style licenses introduce more freedom by allowing a software +vendor to incorporate the BSD-licensed software into their +products. What benefit does one derive from such `extension' of software +freedom? Perhaps the hope that the `open source' (for the lack of a +better term) will prompt the vendor to follow the accepted free (or +any other, for that matter!) software standards and make its software +more interoperable with the free alternatives? A well-known software +giant's {\it embrace, extend, extinguish\/} philosophy shows how na\"{\i}ve and +misplaced such hopes are. + +I am not going to argue for the benefits of free software at length, either +(such benefits seem self-evident to me, although the readers should +feel free to disagree). Let me just point out that software companies +enjoy quite a few freedoms that we, as software consumers elect to +afford them. Among such freedoms are the ability to renege on any +promises made to potential users and withdraw any guarantees that such +users might enjoy. Free software, of course, does not provide any +guarantees, either but `you get what you paid for'. As a result of +such `release of any responsibility', the claims of increased +reliability or better support for the commercial software sound a +bit hollow. Another well spread tactic is user brainwashing and +changing the culture (usually for the worse) in order to promote new +`user-friendly' features of commercial software. Instead of taking +advantage of computers as cognitive machines we have come to view +them as advanced media players that we interact with through +artificial, unnatural interfaces. Meaningless terminology (`UX' for +`user experience'? What in the world is `user experience'?) +proliferates, and programmers are happy to deceive themselves with +their newly discovered business prowess. + +One would hope that the somewhat higher standards of the `real' +manufacturers might percolate to the software world, however, the +reality is very different. Not only has life-cycle `engineering' +got to the point where manufacturers can predict the life spans of +their products precisely, embedded software in those products has +become an enabling technology that makes this `life design' much +easier. + +In effect, by embedding software in their products, hardware +manufacturers not only piggy-back on software's perceived complexity, +and argue that such complex systems cannot be made reliable, they have +an added incentive to uphold this image. The software weighs nothing, +memory is cheap, consumers are easy to deceive, thus `software is +expensive' and `reliable software is prohibitively so'. Designing reliable +software is quite possible, though, just look at programmable +thermostats, simple cellphones and other `invisible' gadgets we +enjoy. The `software ideology' with its `IP' lingo is spreading like a +virus even through the world of real things. We now expect products to +break and are too quick to forgive sloppy engineering that goes into +everyday things. We are also getting used to the idea that it is the +manufacturers that get to dictate the terms of use for `their' products +and that we are merely borrowing `their' stuff. + +The GPL was conceived as an antidote to this scourge. This document is a +remarkable piece of `legal engineering': a self-propagating license +with a clearly outlined set of goals. While by itself it does not +guarantee reliability or quality, it does inhibit the spread of the `IP' +(which is sometimes sarcastically, though quite perceptively, +`deabbreviated' as {\sl I}maginary {\sl P}roperty) disease through +software. + +The industry has adapted, of course. So called (non GPL) `open source +licenses', that are supposed to be an improvement on GPL, +are a sort of `immune reaction' to the free software +movement. Convince and confuse enough apathetic users and the +protections granted by GPL are no longer visible. + +@*1 Why not \Cee$++$ or OOP in general. +The choice of the language was mainly driven by \ae sthetic motives: +\Cee$++$ has a bloated and confusing standard, partially supported by +various compilers. It seems that there is no agreement on what +\Cee$++$ really is or how to use some of its constructs. This is all +in contrast to \Cee\ with its well defined and concise body of +specifications and rather well established stylistics. The existence +of `obfuscated \Cee' is not good evidence of deficiency and \Cee$++$ +is definitely not immune to this malady. + +Object oriented design has certainly taken on an aura of a religious +dictate, universally adhered to and forcefully promoted by its +followers. Unfortunately, the definition of what constitutes an +`object-oriented' approach is rather vague. A few abstract concepts are +commonly tossed about to give the illusion of a well developed +abstraction (such as `polymorphism', `encapsulation', and so on) but +definitions vary in both length and contents, depending on the source. + +On a syntactic level, some features of object-oriented languages are +undoubtedly very practical (such as a |this| pointer in \Cee$++$), +however, many of those features can be effectively emulated with some +clever uses of an appropriate preprocessor (there are a few +exceptions, of course, |this| being one of them). The rest of the +`object-oriented philosophy' is just that: a design philosophy. Before +that we had structured programming, now there are patterns, extreme, +agile, reactive, etc. They might all find their uses, however, there +are always numerous exceptions (sometimes even global variables and +|goto|'s have their place, as well). + +A pedantic reader might point out a few object-oriented features even +in the \TeX\ portion of the package and then accuse the author of +being `inconsistent'. I am always interested in possible improvements +in style but I am unlikely to consider any changes based solely on the +adherence to any particular design fad. + +In short, OOP was not shunned simply because a `non-OOP' language was +chosen, instead, whatever approach or style was deemed most effective +was used. The author's judgment has not always been perfect, of course, +and given a good reason, changes can be made, including the choice of +the language. `Make it object-oriented' is neither a good reason nor a +clearly defined one, however. + +@*1 Why not $*$\TeX. +Simple. I never use it and have no idea of how packages, classes, +etc., are designed. I have heard it has impressive mechanisms for +dealing with various problems commonly encountered in \TeX. Sadly, my +knowledge of $*$\TeX\ machinery is almost nonexistent. This may change +but right now I have tried to make the macros as generic as possible, +hopefully making $*$\TeX\ adaptation easy. + +The following quote from \cite[Ho] makes me feel particularly uneasy +about the current state of development of various \TeX\ variants: +``{\it Finally, to many current programmers\/ \.{WEB} source simply feels over-documented +and even more important is that the general impression is that of a finished book: +sometimes it seems like\/ \.{WEB} actively discourages development. This is +a subjective point, but nevertheless a quite important one.}'' + +{\it Discouraging development\/} seems like a good thing to +me. Otherwise we are one step away from encouraging writing poor +software with inadequate tools merely `to encourage development'. + +The feeling of a \.{WEB} source being {\it over-documented\/} is most +certainly subjective, and, I am sure, not shared by all `current +programmers'. The advantage of using \.{WEB}-like tools, however, is +that it gives the programmer the ability to place the vital +information where it does not distract the reader (`developer', +`maintainer', call it whatever you like) from the logical flow of the +code. + +Some of the complaints in \cite[Ho] are definitely justified, +although it seems that a better approach would be to write an improved +tool similar to \.{WEB}, rather than give up all the flexibility such +a tool provides. + +@*1 Why \CWEB. +\CWEB\ is not as polished as \TeX\ but it works and has a +number of impressive features. It is, regrettably, a `niche' tool and +a few existing extensions of \CWEB\ and software based on similar ideas +do not enjoy the popularity they deserve. Literate philosophy has been +largely neglected even though it seems to have a more logical +foundation than OOP. Under these circumstances, \CWEB\ seemed to be +the best available option. + +@*1 Why not GitHub, Bitbucket, etc. +Git is an incredible tool and is used extensively in the development +of \splint. The distribution archive is a Git repository. The use of +centralized services such as GitHub, however, seems redundant. The +standard cycle, `clone-modify-create pull request' works the same even +when `clone' is replaced by `download'. Thus, no functionality is +lost. This might change if the popularity of the package unexpectedly +increases. + +On the other hand, GitHub and its cousins are commercial entities, +whose availability in the future is not guaranteed (nothing is +certain, of course, no matter what distribution method is +chosen). Keeping \splint\ as an archive of a Git repository seems like +an efficient way of being ready for an unexpected change. diff --git a/support/splint/cweb/references.w b/support/splint/cweb/references.w new file mode 100644 index 0000000000..76dc718d3b --- /dev/null +++ b/support/splint/cweb/references.w @@ -0,0 +1,73 @@ +@** Bibliography. This list of references is not meant to be +exhaustive or complete. These are merely the papers and the books +mentioned in the body of the program above. Naturally, this project +has been influenced by many outside ideas but it would be impossible +to list them all due to time and (human) memory limitations. + +{% +\def\BASIX{{B\kern-.7ptA\kern-.7ptS\kern-.3pt\lower1.3pt\hbox{I}\kern-.3pt X}} +\def\MF{{\tt METAFONT}} +\def\bterm#1{\item{[#1]\namedspot{#1bibref}\quad}\ignorespaces}% +\setbox0=\hbox{[ISO/C11]\quad} +\parindent=0pt +\advance\parindent by \wd0 +\ninepoint +\smallskip +\centerline{\dinkus}% +\smallskip + +\bterm{Ah}Alfred V.~Aho et al., {\it Compilers: Principles, +Techniques, and Tools}, Pearson Education, 2006. + +\bterm{Bi}Charles Donnelly and Richard Stallman, {\it Bison, The +Yacc-compatible Parser Generator}, The Free Software Foundation, 2013. +\url{http://www.gnu.org/software/bison/} + +\bterm{DEK1}Donald E.~Knuth, {\it The \TeX book}, Addison-Wesley Reading, Massachusetts, 1984. + +\bterm{DEK2}Donald E.~Knuth {\it The future of \TeX\ and \MF}, TUGboat {\bf 11} (4), p.~489, 1990. + +\bterm{Do}Jean-luc Doumont, {\it Pascal pretty-printing: an example of ``preprocessing with \TeX''}, +TUGboat {\bf 15} (3), 1994---Proceedings of the 1994 TUG Annual Meeting + +\bterm{Er}Sebastian Thore Erdweg and Klaus Ostermann, {\it Featherweight \TeX\ and Parser Correctness}, +Proceedings of the Third International Conference on Software Language Engineering, +pp.\ 397--416, Springer-Verlag Berlin, Heidelberg {\bf 2011}. + +\bterm{Fi}Jonathan Fine, {\it The \.{\\CASE} and \.{\\FIND} macros}, +TUGboat {\bf 14} (1), pp.~35--39, 1993. + +\bterm{Go}Pedro Palao Gostanza, {\it Fast scanners and self-parsing in \TeX}, +TUGboat {\bf 21} (3), 2000---Proceedings of the 2000 Annual Meeting. + +\bterm{Gr}Andrew Marc Greene, {\it \BASIX---an interpreter written in \TeX}, TUGboat {\bf 11} (3), +1990---Proceedings of the 1990 TUG Annual Meeting. + +\bterm{Ha}Hans Hagen, {\it Lua\TeX: Halfway to version~1}, TUGboat +{\bf 30} (2), pp.~183--186, 2009. \url{http://tug.org/TUGboat/tb30-2/tb95hagen-luatex.pdf}. + +\bterm{Ho}Taco Hoekwater, {\it Lua\TeX\ says goodbye to Pascal}, TUGboat {\bf 30} (3), +pp.~136--140, 2009---Euro\TeX\ 2009 Proceedings. + +\bterm{Ie}R.~Ierusalimschy et al., {\it Lua~5.1 Reference Manual}, +{\tt Lua.org}, August 2006. \url{http://www.lua.org/manual/5.1/}. + +\bterm{ISO/C11}{\it ISO/IEC 9899---Programming languages---C (C11)}, December~2011, draft available at +\url{http://www.open-std.org/jtc1/sc22/wg14/www/docs/n1570.pdf} + +\bterm{Jo}Derek M.~Jones, {\it The New C Standard: An Economic and +Cultural Commentary}, available at \url{http://www.knosof.co.uk/cbook/cbook.html}. + +\bterm{La}{\it The \.{l3regex} package: regular expressions in \TeX}, The \LaTeX3\ Project. + +\bterm{Pa}Vern Paxson et al., {\it Lexical Analysis With Flex, for +Flex~2.5.37}, July~2012. \url{http://flex.sourceforge.net/manual/}. + +\bterm{Sh}Alexander Shibakov, {\it Parsers in \TeX\ and using \CWEB\ for general pretty-printing}, +TUGboat {\bf 35} (1), 2014, available as part of the documentation supplied with \splint. + +\bterm{Wo}Marcin Woli\'nski, {\it {\tt Pretprin}---a \LaTeX2$\epsilon$ package for +pretty-printing texts in formal languages}, +TUGboat {\bf 19} (3), 1998---Proceedings of the 1998 TUG Annual Meeting. + +} \ No newline at end of file diff --git a/support/splint/cweb/splint.w b/support/splint/cweb/splint.w new file mode 100644 index 0000000000..05fe5c42fc --- /dev/null +++ b/support/splint/cweb/splint.w @@ -0,0 +1,102 @@ +% Copyright 2012-2015, Alexander Shibakov +% This file is part of SPLinT +% +% SPLinT is free software: you can redistribute it and/or modify +% it under the terms of the GNU General Public License as published by +% the Free Software Foundation, either version 3 of the License, or +% (at your option) any later version. +% +% SPLinT is distributed in the hope that it will be useful, +% but WITHOUT ANY WARRANTY; without even the implied warranty of +% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +% GNU General Public License for more details. +% +% You should have received a copy of the GNU General Public License +% along with SPLinT. If not, see . +\newwrite\gindex + +@i bo.x +@i lo.x +@i np.x +@i common.w +@i bs.w +@i fk.w +@i philosophy.w +@i references.w + +\let\N\oldN +\let\hostparsernamespace\mainnamespace % to typeset examples in the text + % properly +@**Index. This section is, perhaps, the most valuable product of +\CWEB's labors. It lists references to definitions (set in {\it +italic}) as well as uses for each \Cee\ identifier used in the +source. Special facilities have been added to extend indexing to +\bison\ grammar terms and \TeX\ control sequences encountered in +\bison\ actions. Definitions of tokens (via \prodstyle{\%token}, +\prodstyle{\%nterm} and \prodstyle{\%type} directives) are +%$\underline{\hbox{underlined}}$ +typeset in {\bf bold}. +The \bison\ and \TeX\ entries are put +in distinct sections of the index in order to keep the separation +between the \Cee\ entries and the rest. It may be worth noting that +the {\it definition\/} of the symbol is listed under both its `macro +name' (such as \.{CHAR}, typeset as \prodstyle{CHAR} in the case of +the grammar below), as well as its `string' name if present (to +continue the previous example, \.{"char"} is synonymous with +\prodstyle{CHAR} after a declaration such as `\prodstyle{\%token} +\prodstyle{CHAR} \.{"char"}'), while the {\it use\/} of the term lists +whichever token form was referenced at the point of use (both forms +are accessible when the entry is typeset for the index and a macro can +be written to mention the other form as well). The original syntax of +\bison\ allows the programmer to declare tokens such as +\prodstyle{'\{'} and \prodstyle{'\}'} and the indexing macros honor +this convention even though in a typeless environment such as the +one the present typesetting parser is operating in such declarations +are redundant. The indexing macros also record the use of such +character tokens. The quotes indicate +that the `string' form of the token's name was used. A section set in +{\it italic\/} references the point where the corresponding term +appeared on the left hand side of a production. A production: +\let\TeXx\TeXxi +\def\gatoks{% + \omit\hfil&\omit\hfil&\omit\hfil\hbox to2em{\hfil}&\omit\hfil\cr + \noalign{\vskip-\baselineskip}% +}% +\beginmprod +left_hand_side: + term.1 term.2 term.3 \{\stashed{|TeX_("/dosomething/yy(1)");|}\} +\endmprod +inside the \TeX\ part of a \CWEB\ section will generate several +index entries, as well, including the entries for any +material inside the action, mimicking \CWEB's behavior for the +{\it inline \Cee\/} (\.{\yl}$\ldots$\.{\yl}). Such entries (except for +the references to the \Cee\ code inside actions) are labeled with $^\circ$, +to provide a reminder of their origin. + +This parser collection, as well as the indexing facilities therein have been +designed to showcase the broadest range of options available to the user +and thus it does not always exhibit the most sane choices one could make (for +example, using a full blown parser for term {\it names\/} is poor +design but it was picked to demonstrate multiple parsers in one +program). The same applies to the way the index is constructed (it +would be easy to only use the `string' name of the token if +available, thus avoiding referencing the same token twice). + +\TeX\ control sequences are listed following the index of all \bison\ +entries. The two indices are separated by a {\it dinkus} +(\dinkus). Since it is nearly impossible to determine at what point a +\TeX\ macro is defined (and most of them are defined outside of the +\CWEB\ sources), only their uses are listed (to be more precise, {\it +every\/} appearance of a macro is assumed to be its use). In a few cases, a +`graphic' representation for a control sequence is also listed (for +example, {\termindexfalse\def\texnspace{texline}\inlineTeXx{/getfirst}} represents +{\termindexfalse\def\texnspace{other}\inlineTeXx{/getfirst}}). The index entries are ordered alphabetically. The +latter may not be entirely obvious in the cases when the `graphical +representation' of the corresponding token manifests a significant +departure from its string version (such as |TeX_("/yy(1)");| +instead of {\def\texnspace{other}|TeX_("/yy(1)");|}). +\closeout\gindex +\let\inx\inxmod +\let\fin\finmod +\displaytokenrawtrue +\def\topofcontents{\null\vskip-3\baselineskip\centerline{C{\sc ONTENTS} (\sc\uppercase\expandafter{\title})}\medskip} diff --git a/support/splint/cweb/ssffo.w b/support/splint/cweb/ssffo.w new file mode 100644 index 0000000000..56e710b8c6 --- /dev/null +++ b/support/splint/cweb/ssffo.w @@ -0,0 +1,118 @@ +@q Copyright 2012-2015, Alexander Shibakov@> +@q This file is part of SPLinT@> + +@q SPLinT is free software: you can redistribute it and/or modify@> +@q it under the terms of the GNU General Public License as published by@> +@q the Free Software Foundation, either version 3 of the License, or@> +@q (at your option) any later version.@> + +@q SPLinT is distributed in the hope that it will be useful,@> +@q but WITHOUT ANY WARRANTY; without even the implied warranty of@> +@q MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the@> +@q GNU General Public License for more details.@> + +@q You should have received a copy of the GNU General Public License@> +@q along with SPLinT. If not, see .@> +\input limbo.sty +\def\optimization{5} +\input yy.sty + +\let\oldN\N +\let\N\textN +\let\M\textM +\def\hostparsernamespace{[unreacheable]} + +@** A simple scanner for \flex\ options. +This is a `bare-bones' scanner for a subset of the `extended' \bison\ +grammar that parses, well, some of the `extensions', namely, the +\flex\ state declarations. It does not use the state mechanism +itself, and is supposed to be used with the bootstrapping parser, even +though it is not strictly necessary. It parses state declarations as +long as they are separated into their own \CWEB\ sections and extracts +the {\it names\/} of the states. The \flex\ scanner output `driver' +does the rest after including the produced header file. + +If reusing the existing scanner for \bison\ were not a priority a +proper way to design a scanner like this is to make it a subset of the +existing scanner code. This way portions of the program would be made +more reusable and the overall design made more consistent. +@s TeX_ TeX +@(ssffo.ll@>= +@G + @> @ @= +%{@> @ @=%} + @> @ @= +%% + @> @ @= +%% +@g + +@ A couple of handy abbreviations to get started. Note that the +definition of a letter is more restrictive in this case since we only +need to grab the states of an existing \bison\ lexer. For a +bootstrapping scanner like this it is beneficial to fail early while +scanning something that is not in its attention domain: it results in +faster bootstrapping and lower chance of accidentally parsing +something that should not have been. Making the syntax and the grammar +more restrictive helps to acheive this, as well as makes the overall +design simpler. +@= +@G +letter [_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ] +id {letter}({letter}|[-0-9])* +@g + +@ @= + +#include +#include + + void define_all_states( void ){} + +@ A standard combination of options to match the \TeX\ code that +drives the scanner. +@= +@G +%option bison-bridge +%option noyywrap nounput noinput reentrant +%option noyy_top_state +%option debug +%option stack +%option outfile="ssffo.c" +@g + +@ There are not that many regular expressions to list, since the range +of tokens recognized by this routine is not very wide. +@= + @@; + @@; + +@ White space skipping. +@= +@G +[ \f\n\t\v] {@> @[TeX_( "/yylexnext" );@]@=} +@g + +@ The rest of it are either identifiers or \.{\%}-options. +@= +@G +{id} {@> @ @=} +"%x" {@> @[TeX_( "/yylexreturnptr{FLEX_STATE_X}" );@] @=} +"%s" {@> @[TeX_( "/yylexreturnptr{FLEX_STATE_S}" );@] @=} +. {@> @ @=} +@g + +@ @= + @[TeX_( "/iftracebadchars" );@]@; + @[TeX_( " /yycomplain{invalid character(s): /the/yytext}" );@]@; + @[TeX_( "/fi" );@]@; + @[TeX_( "/yylexreturn{$undefined}" );@]@; + +@ The lexer returns standard \.{\\yyunion} types. +@= + @[TeX_( "/edef/next{/yylval{/nx/idit{/the/yytextpure}{/the/yytext}" );@]@; + @[TeX_( " {/the/yyfmark}{/the/yysmark}}}/next" );@]@; + @[TeX_( "/yylexreturn{ID}" );@]@; + +@** Index. +\def\TeXx{\TeX\ material} \ No newline at end of file diff --git a/support/splint/doc/ldman.pdf b/support/splint/doc/ldman.pdf new file mode 100644 index 0000000000..22a07ae754 Binary files /dev/null and b/support/splint/doc/ldman.pdf differ diff --git a/support/splint/doc/splint.pdf b/support/splint/doc/splint.pdf new file mode 100644 index 0000000000..101984caa1 Binary files /dev/null and b/support/splint/doc/splint.pdf differ diff --git a/support/splint/doc/tb109shibakov.pdf b/support/splint/doc/tb109shibakov.pdf new file mode 100644 index 0000000000..094e15fb55 Binary files /dev/null and b/support/splint/doc/tb109shibakov.pdf differ diff --git a/support/splint/examples/count/count.sty b/support/splint/examples/count/count.sty new file mode 100644 index 0000000000..bb8998e64f --- /dev/null +++ b/support/splint/examples/count/count.sty @@ -0,0 +1,552 @@ +% the sequences in this file have the eventual goal of implementing macros that count and compare token sequences +% (as either parameters or contents of token registers) in expandable manner. +% currently, a new sequence is prepared first, consisting of `markers' +% that indicate whether the token in the coresponding position of the +% original sequence is a potential blank space or brace (closing or +% opening); +% the macros then make the determination about the last possible blank +% on whether this is a true space or not; the goal is to make this +% macro independent of the value of \escapechar +% +% as of now the implementation is far from refined although an expandable conting macro +% can be easily produced + +\catcode`\@=11 +\input ../../tex/yycommon.sty +\input ../../tex/yymisc.sty + +%%%%%%%%%%%%% the material before this point should be included from appropriate files + +% #1 -- `call stack' +% #2 -- remaining sequence +% #3 -- `parsed' sequence + +\def\yypreparsetokensequenc@#1#2#3{% + \yystringempty{#2}{#1{#3}}{\yypreparsetokensequen@@{#1}{#2}{#3}}% +} + +\def\yypreparsetokensequen@@#1#2#3{% remaining sequence is nonempty + \yystartsinbrace{#2}{\yydealwithbracedgroup{#1}{#2}{#3}}{\yypreparsetokensequ@n@@{#1}{#2}{#3}}% +} + +\def\yydealwithbracedgroup#1#2#3{% the first token of the remaining sequence is a brace + \iffalse{\fi\yydealwithbracedgro@p#2}{#1}{#3}% +} + +\def\yydealwithbracedgro@p#1{% + \yypreparsetokensequenc@{\yyrepackagesequence}{#1}{}% +} + +% #1 -- parsed sequence +% this is a sequence to `propagate expansion' into the next parameter. +% the same can be achieved by packaging the whole sequence with a +% \csname ... \endcsname pair and using a simple \expandafter + +\def\yyrepackagesequence#1{% + \yyrepackagesequenc@{}#1\end +} + +% #1 -- `packaged' sequence (\expandafter\expandafter\expandafter ? ...) +% #2 -- the next category 12 character or \end + +\def\yyrepackagesequenc@#1#2{% + \ifx#2\end + \yybreak{\yyrepackagesequ@nc@{#1\expandafter\expandafter\expandafter}}% + \else + \yybreak{\yyrepackagesequenc@{#1\expandafter\expandafter\expandafter#2}}% + \yycontinue +} + +% #1 -- `packaged' sequence (\expandafter\expandafter\expandafter ? ...) +% this macro is followed by the remainder of the original sequence with a so far +% unmatched right brace, the `call stack' and the parsed sequence. + +\def\yyrepackagesequ@nc@#1{% + \expandafter\expandafter\expandafter\yyrepackagesequ@nc@swap#1{\expandafter\eatone\string}% +} + +% #1 -- parsed sequence without packaging + +\def\yyrepackagesequ@nc@swap#1#{% + \yyrepackagesequ@nc@sw@p{#1}% +} + +% #1 -- parsed `inner' sequence +% #2 -- remainder of the original sequence +% #3 -- `call stack' +% #4 -- parsed sequence so far + +\def\yyrepackagesequ@nc@sw@p#1#2#3#4{% + \yypreparsetokensequenc@{#3}{#2}{#4[#1]}% +} + +% `braced group' thread ends here + +% #1 -- `call stack' +% #2 -- remaining sequence +% #3 -- `parsed' sequence + +\def\yypreparsetokensequ@n@@#1#2#3{% the remaining group in #2 is nonempty and does not start with a brace + \yystartsinspace{#2}{\yyconsumetruespace{#1}{#2}{#3}}{\yypreparsetokenseq@@n@@{#1}{#2}{#3}}% +} + +\def\yyconsumetruespace#1#2#3{% + \expandafter\yyconsumetruespac@swap\expandafter{\eatonespace#2}{#1}{#3.}% +} + +\def\yyconsumetruespac@swap#1#2#3{% + \yypreparsetokensequenc@{#2}{#1}{#3}% +} + +% `group starting with a true (character code 32, category code 10) space' thread ends here + +% #1 -- `call stack' +% #2 -- remaining sequence +% #3 -- `parsed' sequence + +\def\yypreparsetokenseq@@n@@#1#2#3{% a nonempty group, that does not start with a brace or a true space + \yymatchblankspace{#2}{\yyrescanblankspace{#2}{#1}{#3}}{\yypreparsetokens@q@@n@@{#1}{#2}{#3}}% +} + +% #1 -- remaining sequence +% #2 -- `call stack' +% #3 -- `parsed' sequence + +\def\yyrescanblankspace#1#2#3{% + \expandafter\expandafter\expandafter + \yyrescanblankspac@swap + \expandafter\expandafter\expandafter{\expandafter\yynormalizeblankspac@\meaning#1}{#2}{#3*}% +} + +\def\yyrescanblankspac@swap#1#2#3{% + \yystartsinspace{#1}{% + \expandafter\yyrescanblankspac@sw@p\expandafter{\eatonespace#1}{#2}{#3}% + }{% + \expandafter\yyrescanblankspac@sw@p\expandafter{\eatone#1}{#2}{#3}% + }% +} + +\def\yyrescanblankspac@sw@p#1#2#3{% + \yypreparsetokensequenc@{#2}{#1}{#3}% +} + +% `group starting with a blank space' ends here + +% #1 -- `call stack' +% #2 -- remaining sequence +% #3 -- `parsed' sequence + +\def\yypreparsetokens@q@@n@@#1#2#3{% nonempty group starting with a non blank, non brace token + \expandafter\yypreparsetokens@q@@n@@swap\expandafter{\eatone#2}{#1}{#30}% +} + +\def\yypreparsetokens@q@@n@@swap#1#2#3{% + \yypreparsetokensequenc@{#2}{#1}{#3}% +} + +% #1 -- `call stack' +% #2 -- remaining sequence +% #3 -- `parsed' sequence + +\def\yydebracesequence#1#2#3{% + \yybracesleft#3[\end{#1}{#2}% +} + +% #1 -- tokens preceding the brace +% #2 -- tokens following the brace +% #3 -- `call stack' +% #4 -- remaining sequence + +\def\yybracesleft#1[#2\end#3#4{% + \if9#29% + \yybreak{\yyfirstoftwo}% + \else + \yybreak{\yysecondoftwo}% + \yycontinue{#3{#4}}{\yydebracesequenc@swap{#4}{#3}{#1#2}}% +} + +\def\yydebracesequenc@swap#1#2#3{% + \expandafter\expandafter\expandafter + \yydebracesequenc@sw@p + \expandafter\expandafter\expandafter{\yygrabprebrace#1}{#2}{#3}% +} + +\def\yydebracesequenc@sw@p#1#2#3{% + \yydebracesequ@nc@sw@p#3\end{#1}{#2}% +} + +\def\yydebracesequ@nc@sw@p#1[\end#2#3{% + \yydebracesequence{#3}{#2}{#1}% +} +\def\yygrabprebrace#1#{% + \yygrabprebrac@{#1}% +} + +\def\yygrabprebrac@#1#2{#1#2} + +% the `debracing group' ends here + +% string comparison macros below are woefully inefficient and can be replaced by a much +% shorter and faster version; they are easy to analyze, though; + +% #1 -- string of category code 12 or 10 characters +% #2 -- string of category code 12 or 10 characters + +\def\yycomparesimplestrings#1#2{% + \yystringempty{#1}{% + \yystringempty{#2}{\yyfirstoftwo}{\yysecondoftwo}% + }{\yycomparesimplestrings@{#1}{#2}}% +} + +\def\yycomparesimplestrings@#1#2{% the first string is nonempty + \yystringempty{#2}{\yysecondoftwo}{\yycomparesimplestrings@@{#1}{#2}}% +} + +\def\yycomparesimplestrings@@#1#2{% both strings are nonempty + \yystartsinspace{#1}{% + \yystartsinspace{#2}{\yyabsorbfirstspace{#1}{#2}}{\yysecondoftwo}% + }{% + \yystartsinspace{#2}{\yysecondoftwo}{\yyabsorbfirstnonspace{#1}{#2}}% + } +} + +\def\yyabsorbfirstspace#1#2{% + \expandafter\yyabsorbfirstspac@swap\expandafter{\eatonespace#1}{#2}% +} + +\def\yyabsorbfirstspac@swap#1#2{% + \expandafter\yyabsorbfirst@swap\expandafter{\eatonespace#2}{#1}% +} + +\def\yyabsorbfirstnonspace#1#2{% + \expandafter\yyabsorbfirstnonspac@swap\expandafter{\eatone#1}{#2}% +} + +\def\yyabsorbfirstnonspac@swap#1#2{% + \expandafter\yyabsorbfirst@swap\expandafter{\eatone#2}{#1}% +} + +\def\yyabsorbfirst@swap#1#2{% + \yycomparesimplestrings{#2}{#1}% +} + +% `compare strings of category code 12' thread ends here + +% #1 -- a balanced sequence of tokens +% #2 -- a `parsed version' of the same sequence +% +% #1 should be such that \eatone#1 does not lead to an immediate error + +\def\yycomparetailsignatures#1#2{% + \yypreparsetokensequenc@{\yycomparesimplestrings{#2}}{#1}{}% +} + +% #1 -- `call stack' +% #2 -- remaining sequence +% #3 -- remaining parsed sequence +% #4 -- resolved sequence + +\def\yyresolvespaces#1#2#3#4{% + \yystringempty{#2}{#1{#4}}{\yyresolveoneblank#3\end{#2}{#4}{#1}}% +} + +\def\yyresolveoneblank#1{% + \if#1*% + \expandafter\yyfirstoftwo + \else + \expandafter\yysecondoftwo + \fi + {\yyresolveonebl@nk}{% + \ifx#1.% + \expandafter\yyfirstoftwo + \else + \expandafter\yysecondoftwo + \fi + {\yyresolveonebl@nk}% + {\yyconsumebothnonblanks}% + }% +} + +% #1 -- the remainder of the parsed sequence +% #2 -- remaining sequence +% #3 -- resolved sequence + +\def\yyresolveonebl@nk#1\end#2#3{% + \expandafter\yycomparetailsignatures\expandafter{\eatone#2}{#1}% + {\expandafter\yyresolvespac@sswap\expandafter{\eatone#2}{#1}{#3+}}% + {\expandafter\yyresolveonebl@nklight\expandafter{\eatone#2}{#1}{#3-}}% +} + +% #1 -- remaining sequence +% #2 -- remaining parsed sequence +% #3 -- resolved sequence +% #4 -- `call stack' + +\def\yyresolvespac@sswap#1#2#3#4{% + \yyresolvespaces{#4}{#1}{#2}{#3}% +} + +\def\yyconsumebothnonblanks#1\end#2#3{% + \expandafter\yyconsumebothnonblanksswap\expandafter{\eatone#2}{#1}{#30}% +} + +\def\yyconsumebothnonblanksswap#1#2#3#4{% + \yyresolvespaces{#4}{#1}{#2}{#3}% +} + +% #1 -- remaining sequence +% #2 -- the remainder of the parsed sequence +% #3 -- resolved sequence + +\def\yyresolveonebl@nklight#1#2#3{% + \yyresolveonebl@nklighttest#2\end{#1}{#3}% +} + +\def\yyresolveonebl@nklighttest#1#2\end#3#4{% + \yycomparetailsignatures{#3}{#2}% + {% + \if#1*% + \expandafter\yyfirstoftwo + \else + \expandafter\yysecondoftwo + \fi + {\yyresolvespac@sswap{#3}{#2}{#4+}}% + {\yyresolvespac@sswap{#3}{#2}{#40}}% + }% + {\yyresolveonebl@nklight{#3}{#2}{#4-}}% +} + +% `space resolution code' ends here + +% #1 -- remaining parsed sequence +% #2 -- analysed sequence + +\def\yyanalysetokens@#1#2{% + \yystringempty{#1}{{#2}}% + {\yyanalysetok@ns@#1\end{#2}}% +} + +\def\yyanalysetok@ns@#1#2\end{% + \ifx#1.% + \expandafter\yyfirstoftwo + \else + \expandafter\yysecondoftwo + \fi + {\yygrabablank{#2}}% + {% + \ifx#1[% not a space, an opening brace + \expandafter\yyfirstoftwo + \else + \expandafter\yysecondoftwo + \fi + {% + \yydisableobrace{#2}% + }{% + \ifx#1]% not a space, a closing brace + \expandafter\yyfirstoftwo + \else + \expandafter\yysecondoftwo + \fi + {% + \yydisablecbrace{#2}% + }{% neither space nor brace + \yygrabtokenraw{#2}% + }% + }% + }% +} + +% #1 -- remaining parsed sequence +% #2 -- analysed sequence +% #3 -- next token + +\def\yygrabtokenraw#1#2#3{% + \expandafter\yyanalysetokens@swap\expandafter{\meaning#3}{#1}{#2}% +} + +\def\yyanalysetokens@swap#1#2#3{% + \yyanalysetokens@{#2}{#3t#1e}% +} + +\def\yygrabablank#1#2 {% + \yyanalysetokens@{#1}{#2s0e}% +} + +% #1 -- remaining parsed sequence +% #2 -- analysed sequence + +\def\yydisablecbrace#1#2{% + \yydisablecbrac@{}#1\relax#2\end +} + + +\def\yydisablecbrac@#1#2{% + \ifx#2\end + \yybreak{\yydisablecbrac@@{#1\expandafter\expandafter\expandafter}}% + \else + \yybreak{\yydisablecbrac@{#1\expandafter\expandafter\expandafter#2}}% + \yycontinue +} + +\def\yydisablecbrac@@#1{% + \expandafter\expandafter\expandafter + \yydisablecbrace@@@#1\end + \expandafter\expandafter\expandafter + {\iffalse}\fi\string +} + +\def\yydisablecbrace@@@#1\relax#2\end#3{% + \yystartsinspace{#3}% + {\expandafter\yyanalysetok@nsswap\expandafter{\eatonespace#3}{#1}{#2c1e}}% + {\expandafter\yyanalysetok@nsswap\expandafter{\eatone#3}{#1}{#2c2e}}% +} + +\def\yyanalysetok@nsswap#1#2#3{% + \iffalse{\fi\yyanalysetokens@{#2}{#3}#1}% +} + +% #1 -- remaining parsed sequence +% #2 -- analysed sequence + +\def\yydisableobrace#1#2{% + \yydisableobrac@{}#1\relax#2\end +} + + +\def\yydisableobrac@#1#2{% + \ifx#2\end + \yybreak{\yydisableobrac@@{#1\expandafter\expandafter\expandafter}}% + \else + \yybreak{\yydisableobrac@{#1\expandafter\expandafter\expandafter#2}}% + \yycontinue +} + +\def\yydisableobrac@@#1{% + \expandafter\expandafter\expandafter + \yydisableobrace@@@#1\end + \expandafter\expandafter\expandafter + {\iffalse}\fi\string +} + +\def\yydisableobrace@@@#1\relax#2\end#3{% + \yystartsinspace{#3}% + {\expandafter\yyanalysetok@nsswap\expandafter{\eatonespace#3}{#1}{#2o1e}}% + {\expandafter\yyanalysetok@nsswap\expandafter{\eatone#3}{#1}{#2o2e}}% +} + +% \ifx1\expandafter\eatone\string\11% check if \escapechar is nontrivial + +\uccode`\ =`\- + +\uppercase{\def\dotspace{ }} + +\toksa\expandafter\expandafter\expandafter{\expandafter\meaning\dotspace} + +\toksb{-} + +\toksc{#2} + +\toksd\toksa + +\yyreplacestring\toksb\in\toksa\with\toksc + +\toksc{} +\yyreplacestring\toksb\in\toksd\with\toksc + +\expandafter\def\expandafter\yymatchblankspac@\expandafter#\expandafter1\the\toksd{% + \yystringempty{#1}{\expandafter\yysecondofthree\expandafter{\string}}% + {\expandafter\yythirdofthree\expandafter{\string}}% +} + +\edef\yymatchblankspace#1{% is it \catcode 10 token? + \noexpand\iffalse{\noexpand\fi + \noexpand\expandafter + \noexpand\yymatchblankspac@ + \noexpand\meaning#1\the\toksd}% +} + +% the idea behind the sequence below is that a leading character of category code 10 +% is replaced either by a character of category code 10 and charachter code 32 or a character +% of category code 12 and character code other than 32 +% note that while it is tempting to replace the definition below by something that ends in +% ... blank space #2{ ... with the hope of absorbing the result of \meaning in one step, +% this will not give the desired result in case of an active character, +% say, `~' that had been \let to the normal blank space + +\expandafter\def\expandafter\yynormalizeblankspac@\expandafter#\expandafter1\the\toksd{} + +%% test code begins here + +\tracingmacros=3 +\tracingonline=3 + +\catcode`\ =13\relax% +\def\actspace{ }% +\catcode`\ =10\relax% + +\catcode`\.=13\relax% +\def\actdotspace{.}% +\catcode`\.=12\relax% + +\edef\makefunkydotspace{\let\expandafter\noexpand\actdotspace= \dotspace} +\edef\makefunkyspace{\let\expandafter\noexpand\actspace= \space} + +\makefunkyspace +\makefunkydotspace + +\def\identity#1{#1} + +%\def\tempseq{\space\dotspace\space\space\dotspace\expandafter\noexpand\actspace\expandafter\noexpand\actdotspace +% \end\noexpand\fi\noexpand\else\noexpand\iffalse} +%\def\tempseq{\space\dotspace e} + +%\edef\mypars@{% +% \noexpand\yypreparsetokensequenc@{\noexpand\identity}% +% {\tempseq}% +% {}% +%} + +%\edef\myparse{\mypars@} + +%\toksa\expandafter{\myparse} + +%\showthe\toksa + +%\edef\myresolve{\noexpand\yyresolvespaces{\errmessage}% +% {\tempseq}% +% {\the\toksa}{} +%} + +%\show\myresolve + +%\myresolve + +\catcode`\<=1 +\catcode`\>=2 +\uccode`\<=32 +\uccode`\>=32 + +\uppercase{\edef\temptest{<> \space\space\dotspace\expandafter\noexpand\actspace\expandafter\noexpand\actdotspace{{} {{}{{ u o l k kk + \end\noexpand\fi\noexpand\else\noexpand\iffalse{}} }}}}} + +%\uppercase{\edef\temptest{\dotspace E <>}} + +\show\temptest + +\def\displaypreparse#1{% + \expandafter\errmessage\expandafter{\romannumeral-1\yypreparsetokensequenc@{\yyanalysetokens@}{#1}{}{}#1}% +} + +%\def\displaydebraceparse#1{% +% \yypreparsetokensequenc@{\yydebracesequence{\displayseq}{#1}}{#1}{}% +%} + +%\def\displayseq#1{\toksa{#1}\showthe\toksa} + +\expandafter\displaypreparse\expandafter{\temptest} + +\errmessage{stop!} + +%\expandafter\displaydebraceparse\expandafter{\temptest} + +\end diff --git a/support/splint/examples/expression/Makefile b/support/splint/examples/expression/Makefile new file mode 100644 index 0000000000..0e2f33c2ba --- /dev/null +++ b/support/splint/examples/expression/Makefile @@ -0,0 +1,59 @@ +# this Makefile uses a flat directory structure for demonstration +# purposes; the main directory (../..) contains a slightly more +# modular organization. + +SPLINT_ROOT = $(shell pwd)/../.. + +include ${SPLINT_ROOT}/makefile.inc +include ${SPLINT_ROOT}/makefile.loc + +# rules specific to this example + +PARSER = expp +LEXER = expl + +all: expression.pdf ptab.tex ltab.tex + +step1: expp.y expl.l +step2: ptabout ltabout +step3: ltab.tex ptab.tex + +ptabout: ${SPLINT_ROOT}/cweb/mkeparser.c ${PARSER}.c + ${CC} -DPARSER_FILE=\"../examples/expression/$(lastword $^)\" -DYYPARSE_PARAMETERS= -o $@ $< + +ptab.tex: ptabout + $< --optimize-actions $@ + +ltabout: ${SPLINT_ROOT}/cweb/mkscanner.c ${LEXER}.c + ${CC} -DLEXER_FILE=\"../examples/expression/$(lastword $^)\" -o $@ $< + +ltab.tex: ltabout + $< --optimize-actions $@ + +${LEXER}.c: ${LEXER}.l + ${FLEX} -o $@ $< + +test.tex ${PARSER}.yy ${LEXER}.ll: expression.x + @${CTANGLE} $< + ${UNLINE} test.txx test.tex + +expression.tex: expression.x + @${CWEAVE} $< + +expression.pdf: expression.tex ${SPLINT_XPTABLES} ${SPLINT_XLTABLES} ${SPLINT_ROOT}/cweb/bo.tok etoks.sty + @${PDFTEX} $< + +test: test.tex ptab.tex ltab.tex + tex test.tex + +docs: expression.pdf + +mostlyclean: clean_temp + -rm -f ptabout ltabout expression.c *.txx + +clean: clean_core + -rm -f ptabout ltabout *.txx + +distclean: clean + cd ${SPLINT_ROOT} && ${MAKE} clean + diff --git a/support/splint/examples/expression/etoks.sty b/support/splint/examples/expression/etoks.sty new file mode 100644 index 0000000000..7326245338 --- /dev/null +++ b/support/splint/examples/expression/etoks.sty @@ -0,0 +1,3 @@ +\prettywordpair{IDENTIFIER}{{$\langle$\rm identifier$\rangle$}} +\prettywordpair{INTEGER}{{\bf int}} +\prettywordpair{BOGUS}{{\tt Oh, @\%\$\&*!}} diff --git a/support/splint/examples/expression/expression.sty b/support/splint/examples/expression/expression.sty new file mode 100644 index 0000000000..9f9195aafb --- /dev/null +++ b/support/splint/examples/expression/expression.sty @@ -0,0 +1,25 @@ +\def\optimization{5} + +\input yycommon.sty % general routines for stack and array access +\input yymisc.sty % helper macros (stack manipulation, table processing, value stack pointers) +\input yyinput.sty % input functions +\input yyparse.sty % parser machinery +\input flex.sty % lexer functions +\input yyfaststack.sty % sped up stack access functions +\input yyboth.sty % parser initializatio, optimization + +\let\yylexreturn\yylexreturnregular +\let\setflexstates\relax +\let\parsernamespace\empty + +\genericparser + name: main, + ptables: ptab.tex, + ltables: ltab.tex, + tokens: {}, + asetup: {}, + dsetup: {}, + rsetup: {}, + optimization: \optimizeall;% + +\tomainparser diff --git a/support/splint/examples/expression/expression.w b/support/splint/examples/expression/expression.w new file mode 100644 index 0000000000..c775741184 --- /dev/null +++ b/support/splint/examples/expression/expression.w @@ -0,0 +1,262 @@ +@q Copyright 2012-2014, Alexander Shibakov@> +@q This file is part of SPLinT@> + +@q SPLinT is free software: you can redistribute it and/or modify@> +@q it under the terms of the GNU General Public License as published by@> +@q the Free Software Foundation, either version 3 of the License, or@> +@q (at your option) any later version.@> + +@q SPLinT is distributed in the hope that it will be useful,@> +@q but WITHOUT ANY WARRANTY; without even the implied warranty of@> +@q MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the@> +@q GNU General Public License for more details.@> + +@q You should have received a copy of the GNU General Public License@> +@q along with SPLinT. If not, see .@> +\input limbo.sty +\def\optimization{5} +\input yy.sty + +@** Parser file. This an example parser for expressions. It takes +advantage of some of the features of \splint\ generated parsers, +although anything that takes more than a straightforward setup is +omitted. + +The top-level structure of the input file presents no surprises and is +presented below. +\let\currentparsernamespace\parsernamespace + \let\parsernamespace\smallnamespace + \let\hostparsernamespace\smallnamespace + \input etoks.sty +\let\parsernamespace\currentparsernamespace +\def\texnspace{[other]} +@s TeX_ TeX + +@(expp.yy@>= +@G Switch to generic mode. +%{@> @<\.{expression} parser \Cee\ preamble@> @=%} + @> @ @= +%union {@> @ @=} +%{@> @<\.{expression} parser \Cee\ postamble@> @=%} + @> @ @= +%% + @> @ @= +%% +@g + +@ The \prodstyle{\%token-table} option is not merely a debugging help, +as it is in the case of the `real' \bison\ parsers and cannot be +omitted . The name table it is responsible for setting up is used as +a set of keys for various associative arrays. Token declarations are +parsed by a bootstrap parser during the \TeX\ processing stage to +establish equivalences between the names kept in |yytname| and the +macro names used internally by the parsers built by \bison. The reason +this is necessary is not very complicated: either version of the token +name can be used in the grammar while the `driver' program +(\.{mkeparser.c}) only has access to the names in |yytname|. In +general, this is important whenever the grammar uses a different set of +token names from the lexer or when diagnostics messages are output. An +important case is the symbolic name switch: before the rules can be +listed to create the switch, the token numerical values must be +known. If the parser is only aware of the |yytname| listed names and the +grammar being parsed uses the `internal' names, the listing macros +will fail. The array, |yytname| is used in a few functions inside the +`driver', as well, so omitting this option would make building the +parser impossible. +@= +@G +%token-table +%debug +%start value +@g + +@ To continue the token name discussion, this parser uses internal +names only so this is what will appear in the |yytname| array. No +bootstrapping is necessary. The typesetting of the tokens can be +adjusted using \.{\\prettywordpair} macros (see the included +\.{etoks.sty} file for examples). +@= +@G +%token IDENTIFIER +%token INTEGER +%token BOGUS +@g + +@ Here is the whole grammar, simply additive expressions with two +levels of precedence. +@= +@G +value: + expression {@> TeX_( "/yy0{/the/yy(1)}" ); @=} +; + +expression: + term {@> TeX_( "/yy0{/the/yy(1)}" ); @=} +| expression '+' term {@> @ @=} +; + +term: + atom {@> TeX_( "/yy0{/the/yy(1)}" ); @=} +| term '*' atom {@> @ @=} +; + +atom: + IDENTIFIER {@> @ @=} +| INTEGER {@> @ @=} +| '(' expression ')' {@> TeX_( "/yy0{/the/yy(2)}" ); @=} +; +@g + +@ @= + @[TeX_( "/tempca/the/yy(1)/relax" );@]@; + @[TeX_( "/tempcb/the/yy(3)/relax" );@]@; + @[TeX_( "/advance/tempca by /tempcb" );@]@; + @[TeX_( "/yy0{/the/tempca}" );@]@; + +@ @= + @[TeX_( "/tempca/the/yy(1)/relax" );@]@; + @[TeX_( "/tempcb/the/yy(3)/relax" );@]@; + @[TeX_( "/multiply/tempca by /tempcb" );@]@; + @[TeX_( "/yy0{/the/tempca}" );@]@; + +@ @= + @[TeX_( "/getsecond{/yy(1)}/to/toksa" );@]@; + @[TeX_( "/toksb/expandafter/expandafter/expandafter{/expandafter" );@]@; + @[TeX_( " /number/csname/the/toksa/endcsname}" );@]@; + @[TeX_( "/yy0{/the/toksb}" );@]@; + +@ @= + @[TeX_( "/getfirst{/yy(1)}/to/toksa" );@]@; + @[TeX_( "/yy0{/the/toksa}" );@]@; + +@ \Cee\ preamble. In this case, there are no `real' actions that our +grammar performs, only \TeX\ output, so this section is empty. + +@<\.{expression} parser \Cee\ preamble@>= + +@ \Cee\ postamble. It is tricky to insert function definitions that use \bison's internal types, +as they have to be inserted in a place that is aware of the internal definitions but before said +definitions are used. + +@<\.{expression} parser \Cee\ postamble@>= +#define YYPRINT(file, type, value) @[yyprint (file, type, value)@] + static void yyprint (FILE *file, int type, YYSTYPE value){} + +@ Union of types. Empty as well. + +@= + +@** The lexer file. The scanner for the grammar above is even +simpler. Identifiers are interpreted as variable names that expand to +appropriate values. +%\checktabletrue +@(expl.ll@>= +@G + @> @ @= +%{@> @ @=%} + @> @ @= +%% + @> @ @= +%% + +@ @= +@G +letter [_abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ] +id {letter}({letter}|[-0-9])* +int [0-9]+ +@g + +@ @= + +#include +#include + + void define_all_states( void ){} + +@ @= +@G +%option bison-bridge +%option noyywrap nounput noinput reentrant +%option noyy_top_state +%option debug +%option stack +%option outfile="expl.c" +@g + +@ @= + @@; + @@; + +@ White space skipping. +\traceparserstatestrue +\tracestackstrue +\tracerulestrue +\traceactionstrue +\tracelookaheadtrue +\traceparseresultstrue +\tracebadcharstrue +\yyflexdebugtrue +% +\traceparserstatesfalse +\tracestacksfalse +\tracerulesfalse +\traceactionsfalse +\tracelookaheadfalse +\traceparseresultsfalse +\tracebadcharsfalse +\yyflexdebugfalse +@= +@G +[ \f\n\t\v] {@> @[TeX_( "/yylexnext" );@]@=} +@g + +@ @= +@G +{id} {@> @[TeX_( "/yylexreturnval{IDENTIFIER}" );@]@=} +{int} {@> @[TeX_( "/yylexreturnval{INTEGER}" );@]@=} +[+*()] {@> @[TeX_( "/yylexreturnchar" );@]@=} +. {@> @[@@]@=} +@g + +@ @= + @[TeX_( "/iftracebadchars" );@]@; + @[TeX_( " /yycomplain{invalid character(s): /the/yytext}" );@]@; + @[TeX_( "/fi" );@]@; + @[TeX_( "/yylexreturn{$undefined}" );@]@; + +@*Test file. The test file includes a handy list of debugging options +that can be activated to see the inner workings of the parser and +scanner routines. +@(test.txx@>= +@G +\input expression.sty + +\iffalse + \tracedfatrue + \traceparserstatestrue + \tracestackstrue + \tracerulestrue + \traceactionstrue + \tracelookaheadtrue + \traceparseresultstrue + \tracebadcharstrue + \yyflexdebugtrue + \yyinputdebugtrue +\fi + +\def\varone{10} +\def\expression{1 + 3 * ( 5 + 7 ) + varone} +\basicparserinit\expandafter\yyparse \expression \yyeof\yyeof\endparseinput\endparse + +{ + \newlinechar`^^J + \immediate\write16{^^Jexpression: \expression^^Jthe value: \the\yyval^^J^^J} +} + +\bye +@g + +@**Index. +\immediate\closeout\exampletable +\def\Tex{\TeX\ output} +\def\TeXx{\TeX\ output} diff --git a/support/splint/examples/ld/Makefile b/support/splint/examples/ld/Makefile new file mode 100644 index 0000000000..a23f191b9c --- /dev/null +++ b/support/splint/examples/ld/Makefile @@ -0,0 +1,118 @@ +# this Makefile uses a flat directory structure for demonstration +# purposes; the main directory (../..) contains a slightly more +# modular organization. + +SPLINT_ROOT = $(shell pwd)/../.. + +PARSER = ldp +LEXER = ldl +MANUAL = ldman + +LD_CORE_PREREQS = ${SPLINT_XTEXSTYLES} ${SPLINT_XPTABLES} ${SPLINT_XLTABLES} ${SPLINT_ROOT}/cweb/bo.tok +LD_DOC_PREREQS = %.tex %.sty ${PARSER}.tok ldunion.sty ldint.sty ldfrontmatter.sty ${LD_CORE_PREREQS} ptab.tex ltab.tex ld_small_tab.tex +LD_DOC_PREREQS += ld_small_dfa.tex ltokenset.sty lstokenset.sty +LD_DOC_PREREQS_XREF = ${LD_DOC_PREREQS} %.scn %.idx + +include ${SPLINT_ROOT}/makefile.inc +include ${SPLINT_ROOT}/makefile.loc + +# rules specific to this example + +ptabout: ${SPLINT_ROOT}/cweb/mkeparser.c ${PARSER}.c + ${CC} -DPARSER_FILE=\"../examples/ld/$(lastword $^)\" -DYYPARSE_PARAMETERS= -o $@ $< + +ptab.tex: ptabout + $< --optimize-actions --optimize-tables $@ + +ltabout: ${SPLINT_ROOT}/cweb/mkscanner.c ldl_states.h ${LEXER}.c + ${CC} -DLEXER_FILE=\"../examples/ld/$(lastword $^)\" -o $@ $< + +ltab.tex: ltabout + $< --optimize-actions --optimize-tables $@ + +${LEXER}.c: ${LEXER}.l + ${FLEX} -o $@ $< + +${PARSER}.yy : ldgram.x + @${CTANGLE} $< + +${LEXER}.ll: ldlex.x + @${CTANGLE} $< + +${MANUAL}.stx: ${MANUAL}.x ldgram.x ldlex.x ldlexo.x ldgramo.x ldnp.x + @${CTANGLE} $< + +# term name parser for ld grammar + +ldsmallp_out: ${SPLINT_ROOT}/cweb/mkeparser.c ld_small_parser.c + ${CC} ${BISON_STATE} -DPARSER_FILE=\"../examples/ld/$(lastword $^)\" -DYYPARSE_PARAMETERS= -o $@ $< + +ldsmalll_out: ${SPLINT_ROOT}/cweb/mkscanner.c ld_small_lexer.c + ${CC} -DLEXER_FILE=\"../examples/ld/$(lastword $^)\" -o $@ $< + +ld_small_tab.tex: ldsmallp_out + $< --optimize-actions --optimize-tables $@ + +ld_small_dfa.tex: ldsmalll_out + $< --optimize-actions --optimize-tables $@ + +ld_small_parser.yy \ +ld_small_lexer.ll: ldnp.x + @${CTANGLE} $< + +# numeric parser for ld grammar + +ldnump_out: ${SPLINT_ROOT}/cweb/mkeparser.c ld_num_parser.c + ${CC} ${BISON_STATE} -DPARSER_FILE=\"../examples/ld/$(lastword $^)\" -DYYPARSE_PARAMETERS= -o $@ $< + +ldnuml_out: ${SPLINT_ROOT}/cweb/mkscanner.c ld_num_lexer.c + ${CC} -DLEXER_FILE=\"../examples/ld/$(lastword $^)\" -o $@ $< + +ld_num_tab.tex: ldnump_out + $< --optimize-actions --optimize-tables $@ + +ld_num_dfa.tex: ldnuml_out + $< --optimize-actions --optimize-tables $@ + +ld_num_parser.yy \ +ld_num_lexer.ll: ldnump.x + @${CTANGLE} $< + +${MANUAL}.tex: ${MANUAL}.x ldgram.x ldlex.x ldlexo.x ldgramo.x ldnp.x + ${CWEAVE} $< + +${MANUAL}.sty: ${MANUAL}.stx + ${UNLINE} $< $@ + +${MANUAL}.gdx: %.gdx: ${LD_DOC_PREREQS_XREF} ${PARSER}.tok + @echo "Making the bison and TeX indices ..." + ${TEX} $*.tex + +${MANUAL}.pdf: %.pdf: ${LD_DOC_PREREQS_XREF} %.gdy + ${PDFTEX} $< && touch $*.gdy && touch $*.pdf + +#${PARSER}.tok \ +#ldl_states.h: ${MANUAL}.tex ${MANUAL}.sty ${LD_CORE_PREREQS} +# ${TEX} ${MODEBOOTSTRAP} \\input $< + +${PARSER}.tok: ldgram.tex ${MANUAL}.sty ${LD_CORE_PREREQS} + ${TEX} ${MODEBOOTSTRAP} \\input $< + +ldl_states.h: ldlex.tex ${MANUAL}.sty ${LD_CORE_PREREQS} + ${TEX} ${MODEBOOTSTRAP} \\input $< + +ldgram.tex ldlex.tex: \ +%.tex: %.x + ${CWEAVE} -x $< + +docs: ${MANUAL}.pdf + +mostlyclean: clean_temp + -rm -f ptabout ltabout ldsmalll_out ldsmallp_out ldnump_out ldnuml_out ${MANUAL}.c + +clean: clean_core + -rm -f ptabout ltabout ldsmalll_out ldsmallp_out ldnump_out ldnuml_out *.stx ldman.sty + +distclean: clean + cd ${SPLINT_ROOT} && ${MAKE} clean + diff --git a/support/splint/examples/ld/ldfrontmatter.sty b/support/splint/examples/ld/ldfrontmatter.sty new file mode 100644 index 0000000000..e4fd94bcf2 --- /dev/null +++ b/support/splint/examples/ld/ldfrontmatter.sty @@ -0,0 +1,2 @@ +% this file is left blank on purpose to avoid referencing non-free fonts +% it would contain the \TeX\ macros that generate the cover page for \ld\ parser manual diff --git a/support/splint/examples/ld/ldgram.w b/support/splint/examples/ld/ldgram.w new file mode 100644 index 0000000000..4fb9a9a876 --- /dev/null +++ b/support/splint/examples/ld/ldgram.w @@ -0,0 +1,1083 @@ +@q Copyright 2012-2015 Alexander Shibakov@> +@q Copyright 2002-2014 Free Software Foundation, Inc.@> +@q This file is part of SPLinT@> + +@q SPLinT is free software: you can redistribute it and/or modify@> +@q it under the terms of the GNU General Public License as published by@> +@q the Free Software Foundation, either version 3 of the License, or@> +@q (at your option) any later version.@> + +@q SPLinT is distributed in the hope that it will be useful,@> +@q but WITHOUT ANY WARRANTY; without even the implied warranty of@> +@q MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the@> +@q GNU General Public License for more details.@> + +@q You should have received a copy of the GNU General Public License@> +@q along with SPLinT. If not, see .@> + +@** The parser. +\ifx\parsernamespace\UNDEFINED + \def\tokendeffile{ldp.tok}% + \input ldman.sty + \input limbo.sty + \input dcols.sty + \setupfootnotes + \def\MRI{} + \def\ld{} +\fi +\immediate\openout\exampletable=\jobname.exl\relax% +The outline of the grammar file below does not reveal anything unusual +in the general layout of \ld\ grammar. The first section lists all the +token definitions, \prodstyle{\%union} styles, and some \Cee\ +code. The original comments that come with the grammar file of the +linker have been mostly left intact. They are typeset in {\it +italics\/} to make them easy to recognize. + +@s TeX_ TeX +@s TeXa TeX +@s TeXb TeX +@s TeXf TeX +@s TeXfo TeX +@s TeXao TeX + +@(ldp.yy@>= +@G Switch to generic mode. +%{@> @<\ld\ parser \Cee\ preamble@> @=%} + @> @<\ld\ parser \bison\ options@> @= +%union {@> @ @=} +%{@> @<\ld\ parser \Cee\ postamble@> @=%} + @> @ @= +%% + @> @<\ld\ parser productions@> @= +%% +@g + +@ Among the options listed in this section, \prodstyle{\%token-table} +is the most critical for the proper operation of the parser and must be enabled +to supply the token information to the lexer (the traditional way +of passing this information along is to use a \Cee\ header file with +the appropriate definitions). The start symbol does not have to be +given explicitly and can be indicated by listing the appropriate rules +at the beginning. + +Most other sections of the grammar file, with the exception of the +rules are either empty or hold placeholder values. The functionality +provided by the code in these sections in the case of a \Cee\ parser +is supplied by the \TeX\ macros in \.{ldman.sty}. +@<\ld\ parser \bison\ options@>= +@G +%token-table +%debug +%start script_file +@g + +@ @<\ld\ parser \Cee\ preamble@>= + +@ @= + +@ @<\ld\ parser \Cee\ postamble@>= +#define YYPRINT(file, type, value) yyprint (file, type, value) + static void yyprint (FILE *file, int type, YYSTYPE value){} + +@ @<\ld\ parser productions@>= + @<\GNU\ \ld\ script rules@>@; + @@; + +@ The tokens are declared first. This section is also used to supply +numerical token values to the lexer by the original parser, as well as +the bootstrapping phase of the typesetting parser. Unlike the native (\Cee) parser for +\ld\ the typesetting parser has no need for the type of each token +(rather, the type consistency is based on the weak dynamic type system +coded in \.{yyunion.sty} and \.{ldunion.sy}). Thus all the tokens used +by the \ld\ parser are put in a single list. +@= +@G +%token INT +%token NAME LNAME +%token END +%token ALIGN_K BLOCK BIND QUAD SQUAD LONG SHORT BYTE +%token SECTIONS PHDRS INSERT_K AFTER BEFORE +%token DATA_SEGMENT_ALIGN DATA_SEGMENT_RELRO_END DATA_SEGMENT_END +%token SORT_BY_NAME SORT_BY_ALIGNMENT SORT_NONE +%token SORT_BY_INIT_PRIORITY +%token '{' '}' +%token SIZEOF_HEADERS OUTPUT_FORMAT FORCE_COMMON_ALLOCATION OUTPUT_ARCH +%token INHIBIT_COMMON_ALLOCATION +%token SEGMENT_START +%token INCLUDE +%token MEMORY +%token REGION_ALIAS +%token LD_FEATURE +%token NOLOAD DSECT COPY INFO OVERLAY +%token DEFINED TARGET_K SEARCH_DIR MAP ENTRY +%token NEXT +%token SIZEOF ALIGNOF ADDR LOADADDR MAX_K MIN_K +%token STARTUP HLL SYSLIB FLOAT NOFLOAT NOCROSSREFS +%token ORIGIN FILL +%token LENGTH CREATE_OBJECT_SYMBOLS INPUT GROUP OUTPUT CONSTRUCTORS +%token ALIGNMOD AT SUBALIGN HIDDEN PROVIDE PROVIDE_HIDDEN AS_NEEDED +%token CHIP LIST SECT ABSOLUTE LOAD NEWLINE ENDWORD ORDER NAMEWORD ASSERT_K +%token LOG2CEIL FORMAT PUBLIC DEFSYMEND BASE ALIAS TRUNCATE REL +%token INPUT_SCRIPT INPUT_MRI_SCRIPT INPUT_DEFSYM CASE EXTERN START +%token VERS_TAG VERS_IDENTIFIER +%token GLOBAL LOCAL VERSIONK INPUT_VERSION_SCRIPT +%token KEEP ONLY_IF_RO ONLY_IF_RW SPECIAL INPUT_SECTION_FLAGS ALIGN_WITH_INPUT +%token EXCLUDE_FILE +%token CONSTANT +%token INPUT_DYNAMIC_LIST +%right PLUSEQ MINUSEQ MULTEQ DIVEQ '=' LSHIFTEQ RSHIFTEQ ANDEQ OREQ '?' ':' UNARY +%left OROR ANDAND '|' '^' '&' EQ NE '<' '>' LE GE LSHIFT RSHIFT '+' '-' '*' '/' '%' '(' +@g + +@*1 Grammar rules, an overview. +The first natural step in transforming an existing parser into a +`parser stack' for pretty printing is to understand the `anatomy' of +the grammar. Not every grammar is suitable for such a transformation +and in almost every case, some modifications are needed. The +parser and lexer implementation for \ld\ is not terrible although it does +have some idiosynchasies that could have been eliminated by a careful +grammar redesign. Instead of invasive rewriting of significant +portions of the grammar, the approach taken here merely omits some +rules and partitions the grammar into several subsets, each of which +is supposed to handle a well defined logical section of an \ld\ script +file. + +One example of a trick used by the \ld\ parser that is not appropriate for a +pretty printing grammar implements a way of handling the choice of the +format of an input file. After a command line option that selects the +input format has been read (or the format has been determined using +some other method), the first token output by the lexer branches the +parser to the appropriate portion of the full grammar. + +Since the token never appears as part of the input file there is no +need to include this part of the main grammar for the purposes of +typesetting. +\traceparserstatestrue +\tracestackstrue +\tracerulestrue +\traceactionstrue +\tracelookaheadtrue +\traceparseresultstrue +\tracebadcharstrue +\yyflexdebugtrue +%\checktabletrue +% +\traceparserstatesfalse +\tracestacksfalse +\tracerulesfalse +\traceactionsfalse +\tracelookaheadfalse +\traceparseresultsfalse +\tracebadcharsfalse +\yyflexdebugfalse +\checktablefalse +% +\saveparseoutputfalse +@= +@G +file: + INPUT_SCRIPT script_file + | INPUT_MRI_SCRIPT mri_script_file + | INPUT_VERSION_SCRIPT version_script_file + | INPUT_DYNAMIC_LIST dynamic_list_file + | INPUT_DEFSYM defsym_expr +; +@g + +@ @= +@G +@t}\vb{\inline\flatten}{@> +filename: + NAME {@>@[TeX_( "/yy0{/noexpand/ldfilename{/the/yy(1)}}" );@]@=} +; +@g + +@ The simplest parser subset is intended to parse symbol definitions +given in the command line that invokes the linker. Creating a parser +for it involves almost no extra effort so we leave it in. + +Note that the simpliciy is somewhat deceptive as the syntax of +\prodstyle{exp} is rather complex. That part of the grammar is needed +elsewhere, however, so symbol definitions parsing costs almost nothing +on top of the already required effort. The only practical use for this +part of the \ld\ grammar is presenting examples in text. + +The\namedspot{pingpong} \TeX\ macro \.{\\ldlex@@defsym} switches the lexer state to +\.{DEFSYMEXP} (see \locallink{stateswitchers}all the state switching +macros\endlink\ in the chapter about the lexer implementation +below). Switching lexer states from the parser presents some +difficulties which can be overcome by careful design. For example, the +state switching macros can be invoked before the lexer is called and +initialized (when the parser performs a {\it default action\/}). +@= +@G +defsym_expr: + {@>@[TeX_( "/ldlex@@defsym" );@]@=} + NAME '=' exp {@>@[TeX_( "/ldlex@@popstate" );@]@=} + ; +@g + +@ {\it Syntax within an \MRI\ script file}\footnote{As explained at the +beginning of this chapter, the text in {\it italics\/} was taken from +the original comments by \ld\ parser and lexer programmers.}. The parser for typesetting +is only intended to process \GNU\ \ld\ scripts and does not concern +itself with any additional compatibility modes. For this reason, all +support for \MRI\ style scripts has been omitted. One use for the +section below is a small demonstration of the formatting tools that +change the output of the \bison\ parser. +@<\MRI\ style script rules@>= +@G +@t}\vb{\inline\flatten}{@> +mri_script_file: + {@>@[TeX_( "/ldlex@@mri@@script" );@]@=} + mri_script_lines {@>@[TeX_( "/ldlex@@popstate" );@]@=} + ; + +mri_script_lines: + mri_script_lines mri_script_command NEWLINE + | + ; +@t}\vb{\resetf}{@> +mri_script_command: + CHIP exp + | CHIP exp ',' exp + | NAME {} + | LIST {} + | ORDER ordernamelist {} + | ENDWORD {} +@t}\vb{\flatten}{@> + | PUBLIC NAME '=' exp {} + | PUBLIC NAME ',' exp {} + | PUBLIC NAME exp +@t}\vb{\resetf}{@> + {} + | FORMAT NAME {} +@t}\vb{\flatten}{@> + | SECT NAME ',' exp {} + | SECT NAME exp {} + | SECT NAME '=' exp +@t}\vb{\resetf}{@> + {} +@t}\vb{\flatten}{@> + | ALIGN_K NAME '=' exp {} + | ALIGN_K NAME ',' exp +@t}\vb{\resetf}{@> + {} +@t}\vb{\flatten}{@> + | ALIGNMOD NAME '=' exp {} + | ALIGNMOD NAME ',' exp {} +@t}\vb{\resetf}{@> + {} + | ABSOLUTE mri_abs_name_list + | LOAD mri_load_name_list + | NAMEWORD NAME {} +@t}\vb{\flatten}{@> + | ALIAS NAME ',' NAME {} + | ALIAS NAME ',' INT {} +@t}\vb{\resetf}{@> + {} + | BASE exp {} + | TRUNCATE INT {} + | CASE casesymlist + | EXTERN extern_name_list +@t}\vb{\flatten}{@> + | INCLUDE filename {@>@@=} + mri_script_lines END +@t}\vb{\resetf}{@> + {@>@@=} + | START NAME {} + | + ; +@t}\vb{\inline\flatten}{@> +ordernamelist: + ordernamelist ',' NAME {} + | ordernamelist NAME {} + | + ; + +mri_load_name_list: + NAME {} + | mri_load_name_list ',' NAME {} + ; + +mri_abs_name_list: + NAME {} + | mri_abs_name_list ',' NAME {} + ; + +casesymlist: + {} + | NAME + | casesymlist ',' NAME + ; +@g + +@ {\it Parsed as expressions so that commas separate entries.} The +core of the parser consists of productions describing \GNU\ \ld\ linker +scripts. The first rule is common to both \MRI\ and \GNU\ formats. +@<\GNU\ \ld\ script rules@>= +@G +extern_name_list: + {@>@[TeX_( "/ldlex@@expression" );@]@=} + extern_name_list_body {@>@[TeX_( "/ldlex@@popstate" );@]@=} + ; + +extern_name_list_body: + NAME {} + | extern_name_list_body NAME {} + | extern_name_list_body ',' NAME {} + ; + +@ The top level productions simply define a script file as a list of +script commands. +@<\GNU\ \ld\ script rules@>= +@G +script_file: + {@>@[TeX_( "/ldlex@@both" );@]@=} + ifile_list {@>@[TeX_( "/getsecond{/yy(2)}/to/ldcmds/ldlex@@popstate" );@]@=} + ; + +ifile_list: + ifile_list ifile_p1 {@>@@=} + | {@>@[TeX_( "/yy0{{}{}}" );@]@=} + ; +@g + +@ @= + @[TeX_( "/getfirst{/yy(1)}/to/toksa/getsecond{/yy(1)}/to/toksb" );@]@; + @[TeX_( "/getfirst{/yy(2)}/to/toksc/getsecond{/yy(2)}/to/toksd" );@]@; + @[TeXb( "/yytoksempty{/toksb}{/yy0{/the/yy(2)}}" );@]@; + @[TeXao( "{/yy0{{/the/toksc}{/the/toksb/noexpand/ldcommandseparator{/the/toksa}{/the/toksc}/the/toksd}}}" );@]@; + +@*1 Script internals. +There are a number of different commands. For typesetting purposes, +the handling of most of these can be significantly +simplified. In the \prodstyle{GROUP} command there is no need to +perform any actions upon entering the group, for +instance. \prodstyle{INCLUDE} presents a special challenge. In +the original grammar this command is followed by a general list of script +commands (the contents of the included file) terminated by +\prodstyle{END}. The `magic' of opening the file and inserting its +contents into the stream being parsed is performed by the lexer and +the parser in the +background. The typesetting parser, on the other hand, only has to +typeset the \prodstyle{INCLUDE} command itself and has no need for +opening and parsing the file being included. We can simply change the +grammar rule to omit the follow up script commands but that would +require altering the existing grammar. \namedspot{pretendbuffersw}Since the command list +(\prodstyle{ifile\_list}) is allowed to be empty, +we simply \locallink{pretendbufferswlex}fake\endlink\ the +inclusion of the file in the lexer by immediately outputting +\prodstyle{END} upon entering the appropriate lexer state. One +advantage in using this approach is the ability, when desired, to +examine the included file for possible cross-referencing information. + +Each command is packaged with a qualifier that records its type for +the rule that adds the fragment to the script file. +@<\GNU\ \ld\ script rules@>= +@G +ifile_p1: + memory {@>@[TeX_( "/yy0{{mem}{/the/yy(1)}}" );@]@=} + | sections {@>@[TeX_( "/yy0{{sect}{/the/yy(1)}}" );@]@=} + | phdrs + | startup + | high_level_library + | low_level_library + | floating_point_support + | statement_anywhere {@>@[TeX_( "/yy0{{stmt}{/noexpand/ldstatement{/the/yy(1)}}}" );@]@=} + | version + | ';' {@>@[TeX_( "/yy0{{none}{}}" );@]@=} + | TARGET_K '(' NAME ')' {} + | SEARCH_DIR '(' filename ')' {} + | OUTPUT '(' filename ')' {} + | OUTPUT_FORMAT '(' NAME ')' {} + | OUTPUT_FORMAT '(' NAME ',' + NAME ',' NAME ')' {} + | OUTPUT_ARCH '(' NAME ')' {} + | FORCE_COMMON_ALLOCATION {} + | INHIBIT_COMMON_ALLOCATION {} + | INPUT '(' input_list ')' + | GROUP {} + '(' input_list ')' {} + | MAP '(' filename ')' {} + | INCLUDE filename {@>@@=} + ifile_list END {@>@@=} + | NOCROSSREFS '(' + nocrossref_list ')' {} + | EXTERN '(' extern_name_list ')' + | INSERT_K AFTER NAME {} + | INSERT_K BEFORE NAME {} + | REGION_ALIAS '(' NAME ',' NAME ')' {} + | LD_FEATURE '(' NAME ')' {} + ; + +input_list: + NAME {} + | input_list ',' NAME {} + | input_list NAME {} + | LNAME {} + | input_list ',' LNAME {} + | input_list LNAME {} + | AS_NEEDED '(' {} + input_list ')' {} + | input_list ',' AS_NEEDED '(' {} + input_list ')' {} + | input_list AS_NEEDED '(' {} + input_list ')' {} + ; + +sections: + SECTIONS '{' sec_or_group_p1 '}' {@>@[TeX_( "/yy0{/noexpand/ldsections{/the/yy(3)}}" );@]@=} + ; + +sec_or_group_p1: + sec_or_group_p1 section {@>@@=} + | sec_or_group_p1 statement_anywhere {@>@@=} + | {@>@[TeX_( "/yy0{}" );@]@=} + ; + +statement_anywhere: + ENTRY '(' NAME ')' {} + | assignment end {@>@@=} + | ASSERT_K {@>@[TeX_( "/ldlex@@expression" );@]@=} + '(' exp ',' NAME ')' {@>@[TeX_( "/ldlex@@popstate" );@]@=} + ; +@g + +@ @= + @[TeXb( "/yytoksempty{/yy(1)}{/yy0{/the/yy(2)}}" );@]@; + @[TeXao( "{/yy0{/the/yy(1)/noexpand/ldsectionseparator/the/yy(2)}}" );@]@; + +@ @= + @[TeXb( "/yytoksempty{/yy(1)}{/yy0{/the/yy(2)}}" );@]@; + @[TeXao( "{/yy0{/the/yy(1)/noexpand/ldsectionseparator/ldstatement{/the/yy(2)}}}" );@]@; + +@ This is the default action performed by the parser when the parser +writer does not supply one. For a minor gain in efficiency, this +definition can be made empty. +@= + @[TeX_( "/yy0{/the/yy(1)}" );@]@; + +@ @= + @[TeX_( "/ldlex@@script" );@]@; + @[TeX_( "/ldfile@@open@@command@@file{/yy(2)}" );@]@; + +@ @= + @[TeX_( "/yy0{/noexpand/ldinclude{/the/yy(2)}}/ldlex@@popstate" );@]@; + +@ @= + @[TeX_( "/yy0{{inc}{/noexpand/ldinclude{/the/yy(2)}}}/ldlex@@popstate" );@]@; + +@ \tracebadcharstrue +{\it The \prodstyle{'*'} and \prodstyle{'?'} cases are there because the lexer returns them as +separate tokens rather than as \prodstyle{NAME}.} +\tracebadcharsfalse +@= +@G +wildcard_name: + NAME {@>@@=} + | '*' {@>@[TeX_( "/yy0{{*}{*}}" );@]@=} + | '?' {@>@[TeX_( "/yy0{{?}{?}}" );@]@=} + ; +@g + +@ @= +@G +wildcard_spec: + wildcard_name {} + | EXCLUDE_FILE '(' exclude_name_list ')' wildcard_name + {} + | SORT_BY_NAME '(' wildcard_name ')' {} + | SORT_BY_ALIGNMENT '(' wildcard_name ')' + {} + | SORT_NONE '(' wildcard_name ')' + {} + | SORT_BY_NAME '(' SORT_BY_ALIGNMENT '(' wildcard_name ')' ')' + {} + | SORT_BY_NAME '(' SORT_BY_NAME '(' wildcard_name ')' ')' + {} + | SORT_BY_ALIGNMENT '(' SORT_BY_NAME '(' wildcard_name ')' ')' + {} + | SORT_BY_ALIGNMENT +@t}\vb{\breakline}{@> + '(' SORT_BY_ALIGNMENT '(' wildcard_name ')' ')' + {} + | SORT_BY_NAME +@t}\vb{\breakline}{@> + '(' EXCLUDE_FILE '(' exclude_name_list ')' wildcard_name ')' + {} + | SORT_BY_INIT_PRIORITY '(' wildcard_name ')' + {} + ; + +sect_flag_list: + NAME {} + | sect_flag_list '&' NAME {} + ; + +sect_flags: + INPUT_SECTION_FLAGS '(' sect_flag_list ')' {} + ; + +exclude_name_list: + exclude_name_list wildcard_name {} + | wildcard_name {} + ; + +file_NAME_list: + file_NAME_list opt_comma wildcard_spec {@>@[TeX_( "/yy0{/the/yy(1)/noexpand/ldspace/noexpand/ldregexp{/the/yy(2)}}" );@]@=} + | wildcard_spec {@>@[TeX_( "/yy0{/noexpand/ldregexp{/the/yy(1)}}" );@]@=} + ; + +input_section_spec_no_keep: + NAME {} + | sect_flags NAME {} + | '[' file_NAME_list ']' {} + | sect_flags '[' file_NAME_list ']' {} + | wildcard_spec '(' file_NAME_list ')' {@>@[TeX_( "/yy0{/noexpand/ldregexp{/the/yy(1)}(/the/yy(3))}" );@]@=} + | sect_flags wildcard_spec '(' file_NAME_list ')' + {} + ; +@g + +@ @= +@G +input_section_spec: + input_section_spec_no_keep {@>@@=} + | KEEP '(' {} + input_section_spec_no_keep ')' {@>@[TeX_( "/yy0{/mathop{/hbox{/noexpand/ttl keep}(/the/yy(4))}}" );@]@=} + ; + +statement: + assignment end + | CREATE_OBJECT_SYMBOLS {} + | ';' {@>@[TeX_( "/yy0{}" );@]@=} + | CONSTRUCTORS {} + | SORT_BY_NAME '(' CONSTRUCTORS ')' {} + | input_section_spec + | length '(' mustbe_exp ')' {} + | FILL '(' fill_exp ')' {} + | ASSERT_K {@>@[TeX_( "/ldlex@@expression" );@]@=} + '(' exp ',' NAME ')' end {@>@[TeX_( "/ldlex@@popstate" );@]@=} + | INCLUDE filename {@>@@=} + statement_list_opt END {@>@@=} + ; + +statement_list: + statement_list statement {@>@@=} + | statement {@>@@=} + ; + +statement_list_opt: + {@>@[TeX_( "/yy0{}" );@]@=} + | statement_list {@>@@=} + ; +@g + +@ @= + @[TeX_( "/yy0{/the/yy(1)/yytoksempty{/yy(2)}{}{/yytoksempty{/yy(1)}{}{/noexpand/ldor}{/the/yy(2)}}}" );@] + +@ @= + @[TeX_( "/yy0{/yytoksempty{/yy(1)}{}{{/the/yy(1)}}}" );@] + +@ @= +@G +@t}\vb{\inline\flatten}{@> +length: + QUAD {} + | SQUAD {} + | LONG {} + | SHORT {} + | BYTE {} + ; + +fill_exp: + mustbe_exp {@>@@=} + ; + +fill_opt: + '=' fill_exp {@>@[TeX_( "/yy0{/noexpand/ldfill{/the/yy(2)}}" );@]@=} + | {@>@[TeX_( "/yy0{}" );@]@=} + ; + +@t}\vb{\resetf}{@> +assign_op: + PLUSEQ {@>@[TeX_( "/yy(0)={/MRL{+{/K}}}" );@]@=} + | MINUSEQ {@>@[TeX_( "/yy(0)={/MRL{-{/K}}}" );@]@=} +@t}\vb{\inline\flatten}{@> + | MULTEQ {@>@[TeX_( "/yy(0)={/MRL*{/K}}" );@]@=} + | DIVEQ {@>@[TeX_( "/yy(0)={/MRL{{/div}{/K}}}" );@]@=} + | LSHIFTEQ {@>@[TeX_( "/yy(0)={/MRL{{/ll}{/K}}}" );@]@=} + | RSHIFTEQ {@>@[TeX_( "/yy(0)={/MRL{{/gg}{/K}}}" );@]@=} + | ANDEQ {@>@[TeX_( "/yy(0)={/Xandxeq}" );@]@=} + | OREQ {@>@[TeX_( "/yy(0)={/Xorxeq}" );@]@=} + ; + +end: ';' | ',' + ; + +opt_comma: ',' | + ; +@t}\vb{\resetf}{@> +@g + +@ Assignments are not expressions as in \Cee. +@= +@G +assignment: + NAME '=' mustbe_exp {@>@@=} + | NAME assign_op mustbe_exp {@>@@=} + | HIDDEN '(' NAME '=' mustbe_exp ')' {} + | PROVIDE '(' NAME '=' mustbe_exp ')' {} + | PROVIDE_HIDDEN '(' NAME '=' mustbe_exp ')' {} + ; +@g + +@ @= + @[TeX_( "/yy0{/noexpand/ldassignment{/noexpand/ldregexp{/the/yy(1)}}{/K}{/the/yy(3)}}" );@] + +@ @= + @[TeX_( "/yy0{/noexpand/ldassignment{/noexpand/ldregexp{/the/yy(1)}}{/the/yy(2)}{/the/yy(3)}}" );@] + +@ @= +@G +memory: + MEMORY '{' memory_spec_list_opt '}' {@>@[TeX_( "/yy0{/noexpand/ldmemory{/the/yy(3)}}" );@]@=} + ; + +memory_spec_list_opt: + memory_spec_list {@>@@=} + | {@>@[TeX_( "/yy0{}" );@]@=} + ; + +memory_spec_list: + memory_spec_list opt_comma memory_spec {@>@[TeX_( "/yy0{/the/yy(1)/the/yy(3)}" );@]@=} + | memory_spec {@>@[TeX_( "/yy0{/the/yy(1)}" );@]@=} + ; + + +memory_spec: + NAME {} + attributes_opt ':' + origin_spec +@t}\vb{\breakline}{@> + opt_comma length_spec {@>@[TeX_( "/yy0{/noexpand/ldmemoryspec{/the/yy(1)}{/the/yy(3)}{/the/yy(5)}{/the/yy(7)}}" );@]@=} + | INCLUDE filename {@>@@=} + memory_spec_list_opt END {@>@@=} + ; +@g + +@ @= +@G +origin_spec: + ORIGIN '=' mustbe_exp {@>@[TeX_( "/yy0{/noexpand/ldoriginspec{/the/yy(3)}}" );@]@=} + ; + +length_spec: + LENGTH '=' mustbe_exp {@>@[TeX_( "/yy0{/noexpand/ldlengthspec{/the/yy(3)}}" );@]@=} + ; + +attributes_opt: + + {@>@[TeX_( "/yy0{}" );@]@=} + | '(' attributes_list ')' {@>@[TeX_( "/yy0{/the/yy(2)}" );@]@=} + ; + +attributes_list: + attributes_string {@>@[TeX_( "/yy0{/the/yy(1)}" );@]@=} + | attributes_list attributes_string {@>@[TeX_( "/yy0{/the/yy(1)/noexpand/ldspace/the/yy(2)}" );@]@=} + ; + +attributes_string: + NAME {@>@[TeX_( "/yy0{/noexpand/ldattributes{/the/yy(1)}}" );@]@=} + | '!' NAME {@>@[TeX_( "/yy0{/noexpand/ldattributesneg{/the/yy(2)}}" );@]@=} + ; + +startup: + STARTUP '(' filename ')' {} + ; + +high_level_library: + HLL '(' high_level_library_NAME_list ')' + | HLL '(' ')' {} + ; + +high_level_library_NAME_list: + high_level_library_NAME_list opt_comma filename + {} + | filename {} + ; + +low_level_library: + SYSLIB '(' low_level_library_NAME_list ')' {} + ; + +low_level_library_NAME_list: + low_level_library_NAME_list opt_comma filename + {} + | {} + ; + +floating_point_support: + FLOAT {} + | NOFLOAT {} + ; + +nocrossref_list: + {} + | NAME nocrossref_list {} + | NAME ',' nocrossref_list {} + ; + +mustbe_exp: + {@>@[TeX_( "/ldlex@@expression" );@]@=} + exp {@>@[TeX_( "/ldlex@@popstate/yy0{/the/yy(2)}" );@]@=} + ; +@g + +@*1 {\ifheader\ninepoint\fi\prodstyle{SECTIONS}} and expressions. +The linker supports an extensive range of expressions. The precedence +mechanism provided by \bison\ is used to present the composition of +expressions out of simpler chunks and basic building blocks tied +together by algebraic operations. +@= +@G +exp : + '-' exp %prec UNARY {@>@[TeX_( "/yy0{{-/the/yy(2)}}" );@]@=} + | '(' exp ')' {@>@[TeX_( "/yy0{(/the/yy(2))}" );@]@=} + | NEXT '(' exp ')' %prec UNARY {@>@[TeX_( "/yy0{/hbox{/nx/ssf next}(/the/yy(3))}" );@]@=} + | '!' exp %prec UNARY {@>@[TeX_( "/yy0{{/noexpand/CM/the/yy(2)}}" );@]@=} + | '+' exp %prec UNARY {@>@[TeX_( "/yy0{{+/the/yy(2)}}" );@]@=} + | '~' exp %prec UNARY {@>@[TeX_( "/yy0{{/noexpand/R/the/yy(2)}}" );@]@=} + | exp '*' exp {@>@[TeX_( "/yy0{/the/yy(1)/noexpand/times/the/yy(3)}" );@]@=} + | exp '/' exp {@>@[TeX_( "/yy0{/the/yy(1)/noexpand///the/yy(3)}" );@]@=} + | exp '%' exp {@>@[TeX_( "/yy0{/the/yy(1)/noexpand/div/the/yy(3)}" );@]@=} + | exp '+' exp {@>@[TeX_( "/yy0{/the/yy(1)+/the/yy(3)}" );@]@=} + | exp '-' exp {@>@[TeX_( "/yy0{/the/yy(1)-/the/yy(3)}" );@]@=} + | exp LSHIFT exp {@>@[TeX_( "/yy0{/the/yy(1)/noexpand/ll/the/yy(3)}" );@]@=} + | exp RSHIFT exp {@>@[TeX_( "/yy0{/the/yy(1)/noexpand/gg/the/yy(3)}" );@]@=} + | exp EQ exp {@>@[TeX_( "/yy0{/the/yy(1)=/the/yy(3)}" );@]@=} + | exp NE exp {@>@[TeX_( "/yy0{/the/yy(1)/noexpand/not=/the/yy(3)}" );@]@=} + | exp LE exp {@>@[TeX_( "/yy0{/the/yy(1)/noexpand/leq/the/yy(3)}" );@]@=} + | exp GE exp {@>@[TeX_( "/yy0{/the/yy(1)/noexpand/geq/the/yy(3)}" );@]@=} + | exp '<' exp {@>@[TeX_( "/yy0{/the/yy(1)' exp {@>@[TeX_( "/yy0{/the/yy(1)>/the/yy(3)}" );@]@=} + | exp '&' exp {@>@[TeX_( "/yy0{/the/yy(1)/noexpand/AND/the/yy(3)}" );@]@=} + | exp '^' exp {@>@[TeX_( "/yy0{/the/yy(1)/noexpand/XOR/the/yy(3)}" );@]@=} + | exp '|' exp {@>@[TeX_( "/yy0{/the/yy(1)/noexpand/OR/the/yy(3)}" );@]@=} + | exp '?' exp ':' exp {@>@@=} + | exp ANDAND exp {@>@[TeX_( "/yy0{/the/yy(1)/noexpand/V/the/yy(3)}" );@]@=} + | exp OROR exp {@>@[TeX_( "/yy0{/the/yy(1)/noexpand/W/the/yy(3)}" );@]@=} +@g + +@ More atomic expression types specific to the linker's function. +@= +@G +exp : + DEFINED '(' NAME ')' {} + | INT {} + | SIZEOF_HEADERS {} + | ALIGNOF '(' NAME ')' {} + | SIZEOF '(' NAME ')' {} + | ADDR '(' NAME ')' {} + | LOADADDR '(' NAME ')' {} + | CONSTANT '(' NAME ')' {} + | ABSOLUTE '(' exp ')' {} + | ALIGN_K '(' exp ')' {@>@[TeX_( "/yy0{/nx/mathop{/hbox{/nx/ssf align}}(/the/yy(3))}" );@]@=} + | ALIGN_K '(' exp ',' exp ')' {} + | DATA_SEGMENT_ALIGN '(' exp ',' exp ')' {} + | DATA_SEGMENT_RELRO_END '(' exp ',' exp ')' {} + | DATA_SEGMENT_END '(' exp ')' {} + | SEGMENT_START '(' NAME ',' exp ')' {} + | BLOCK '(' exp ')' {} + | NAME {@>@[TeX_( "/yy0{/noexpand/ldregexp{/the/yy(1)}}" );@]@=} + | MAX_K '(' exp ',' exp ')' {} + | MIN_K '(' exp ',' exp ')' {} + | ASSERT_K '(' exp ',' NAME ')' {} + | ORIGIN '(' NAME ')' {} + | LENGTH '(' NAME ')' {} + | LOG2CEIL '(' exp ')' {} + ; +@g + +@ @= +@q TeX_( "/yy0{/hbox{/nx/ttl let }/nx/xi(0)=/the/yy(5): /nx/xi(/nx/CM0)=/the/yy(3)/hbox{ /nx/ttl do }/xi(/the/yy(1))}" );@> +@[TeX_( "/yy0{/hbox{/nx/ttl do }/xi(/the/yy(1))/hbox{ /nx/ttl where }" );@> +@[TeX_( " {/let/nx/{/nx/bigbracedel/nx/xi(x)=/nx/cases{/the/yy(5)& if /inmath{x=0}/cr/the/yy(3)& if /inmath{x/nx/not=0}}}}" );@] + +@ @= +@G +memspec_at_opt: + AT '>' NAME {@>@[TeX_( "/yy0{/the/yy(3)}" );@]@=} + | {@>@[TeX_( "/yy0{}" );@]@=} + ; + +opt_at: + AT '(' exp ')' {@>@[TeX_( "/yy0{/the/yy(3)}" );@]@=} + | {@>@[TeX_( "/yy0{}" );@]@=} + ; + +opt_align: + ALIGN_K '(' exp ')' {@>@[TeX_( "/yy0{/the/yy(3)}" );@]@=} + | {@>@[TeX_( "/yy0{}" );@]@=} + ; + +opt_align_with_input: + ALIGN_WITH_INPUT {@>@[TeX_( "/yy0{align with input}" );@]@=} + | {@>@[TeX_( "/yy0{}" );@]@=} + ; + +opt_subalign: + SUBALIGN '(' exp ')' {@>@[TeX_( "/yy0{/the/yy(3)}" );@]@=} + | {@>@[TeX_( "/yy0{}" );@]@=} + ; + +sect_constraint: + ONLY_IF_RO {@>@[TeX_( "/yy0{only/_if/_ro}" );@]@=} + | ONLY_IF_RW {@>@[TeX_( "/yy0{only/_if/_rw}" );@]@=} + | SPECIAL {@>@[TeX_( "/yy0{special}" );@]@=} + | {@>@[TeX_( "/yy0{}" );@]@=} + ; + +@g + +@ {\it The \prodstyle{GROUP} case is just enough to support the \gcc\ +|svr3.ifile| script. It is not intended to be full support. I'm +not even sure what \prodstyle{GROUP} is supposed to mean.} A careful +analysis of the productions below reveals some +pitfalls in the parser/lexer interaction setup that uses the state switching +macros (or functions in the case of the original parser). The switch +to the \.{EXPRESSION} state at the end of the production for +\prodstyle{section} is invoked before \prodstyle{opt\_comma} which can +be empty. This means that the next (lookahead) token (which could be a +\prodstyle{NAME} in a different context) might be read before the +lexer is in the appropriate state. In practice, the names of the sections +and other \prodstyle{NAME}s are usually pretty straightforward so this +parser idiosynchrasy is unlikely to lead to a genuine problem. Since +the goal was to keep the original grammar intact as much as possible, +it was decided to leave this production unchanged. +@= +@G +section: + NAME {@>@[TeX_( "/ldlex@@expression" );@]@=} + opt_exp_with_type opt_at opt_align opt_align_with_input +@t}\vb{\breakline}{@> + opt_subalign {@>@[TeX_( "/ldlex@@popstate/ldlex@@script" );@]@=} + sect_constraint + '{' {} + statement_list_opt + '}' {@>@[TeX_( "/ldlex@@popstate/ldlex@@expression" );@]@=} + memspec_opt memspec_at_opt phdr_opt fill_opt + {@>@[TeX_( "/ldlex@@popstate" );@]@=} + opt_comma {@>@@=} + | OVERLAY {@>@[TeX_( "/ldlex@@expression" );@]@=} + opt_exp_without_type opt_nocrossrefs opt_at opt_subalign + {@>@[TeX_( "/ldlex@@popstate/ldlex@@script" );@]@=} + '{' {} + overlay_section '}' {@>@[TeX_( "/ldlex@@popstate/ldlex@@expression" );@]@=} + memspec_opt memspec_at_opt phdr_opt fill_opt + {@>@[TeX_( "/ldlex@@popstate" );@]@=} + opt_comma {@>@@=} + | GROUP {@>@[TeX_( "/ldlex@@expression" );@]@=} + opt_exp_with_type {@>@[TeX_( "/ldlex@@popstate" );@]@=} + '{' sec_or_group_p1 '}' + | INCLUDE filename {@>@@=} + sec_or_group_p1 END + {@>@@=} + ; +@g + +@ @= + @[TeXb( "/yy0{/nx/ldnamedsection{/the/yy(1)}{/the/yy(3)}{/the/yy(4)}" );@]@; + @[TeXf( " {{/the/yy(5)}{/the/yy(6)}{/the/yy(7)}}" );@]@;/* alignment */ + @[TeXf( " {/the/yy(9)}{/the/yy(12)}" );@]@; + @[TeXfo( " {{/the/yy(15)}{/the/yy(16)}{/the/yy(17)}{/the/yy(18)}}}" );@]@; /*memory specifiers */ + +@ @= + +@ @= +@G +type: + NOLOAD {@>@[TeX_( "/yy0{noload}" );@]@=} + | DSECT {@>@[TeX_( "/yy0{dsect}" );@]@=} + | COPY {@>@[TeX_( "/yy0{copy}" );@]@=} + | INFO {@>@[TeX_( "/yy0{info}" );@]@=} + | OVERLAY {@>@[TeX_( "/yy0{overlay}" );@]@=} + ; + +atype: + '(' type ')' {@>@[TeX_( "/yy0{/noexpand/ldtype{/the/yy(2)}}" );@]@=} + | '(' ')' {@>@[TeX_( "/yy0{/noexpand/ldtype{}}" );@]@=} + | {@>@[TeX_( "/yy0{}" );@]@=} + ; +@g + +@ {\it The \prodstyle{BIND} cases are to support the \gcc\ |svr3.ifile| +script. They aren't intended to implement full support for the +\prodstyle{BIND} keyword. I'm not even sure what \prodstyle{BIND} +is supposed to mean.} +@= +@G +opt_exp_with_type: + exp atype ':' {@>@[TeX_( "/yy0{{}{/the/yy(1)}{}{}{/the/yy(2)}}" );@]@=} + | atype ':' {@>@[TeX_( "/yy0{{}{}{}{}{/the/yy(1)}}" );@]@=} + | BIND '(' exp ')' atype ':' {@>@[TeX_( "/yy0{{bind}{/the/yy(3)}{}{}{/the/yy(5)}}" );@]@=} + | BIND '(' exp ')' BLOCK '(' exp ')' atype ':' + {@>@[TeX_( "/yy0{{bind}{/the/yy(3)}{block}{/the/yy(7)}{/the/yy(9)}}" );@]@=} + ; + +opt_exp_without_type: + exp ':' {} + | ':' {} + ; + +opt_nocrossrefs: + {} + | NOCROSSREFS {} + ; + +memspec_opt: + '>' NAME {@>@[TeX_( "/yy0{/the/yy(2)}" );@]@=} + | {@>@[TeX_( "/yy0{}" );@]@=} + ; + +phdr_opt: + {@>@[TeX_( "/yy0{}" );@]@=} + | phdr_opt ':' NAME {@>@@=} + ; + +overlay_section: + + | overlay_section + NAME {@>@[TeX_( "/ldlex@@script" );@]@=} + '{' statement_list_opt '}' {@>@[TeX_( "/ldlex@@popstate/ldlex@@expression" );@]@=} + phdr_opt fill_opt {@>@[TeX_( "/ldlex@@popstate" );@]@=} + opt_comma + ; +@g + +@ @= + @[TeXb( "/yytoksempty{/yy(1)}{/yy0{{/the/yy(3)}}}" );@]@; + @[TeXao( "{/yy0{/the/yy(1)/noexpand/ldor{/the/yy(3)}}}" );@]@; + +@ @= +@G +phdrs: + PHDRS '{' phdr_list '}' + ; + +phdr_list: + + | phdr_list phdr + ; + +phdr: + NAME {@>@[TeX_( "/ldlex@@expression" );@]@=} + phdr_type phdr_qualifiers {@>@[TeX_( "/ldlex@@popstate" );@]@=} + ';' {} + ; + +phdr_type: + exp {} + ; + +phdr_qualifiers: + {} + | NAME phdr_val phdr_qualifiers {} + | AT '(' exp ')' phdr_qualifiers {} + ; + +phdr_val: + {} + | '(' exp ')' {} + ; +@g + +@*1 Other types of script files. At present time other script types +are ignored, although some of the rules are used in linker scripts +that are processed by the parser. +@= +@G +dynamic_list_file: + {@>@[TeX_( "/ldlex@@version@@file" );@]@=} + dynamic_list_nodes {@>@[TeX_( "/ldlex@@popstate" );@]@=} + ; + +dynamic_list_nodes: + dynamic_list_node + | dynamic_list_nodes dynamic_list_node + ; + +dynamic_list_node: + '{' dynamic_list_tag '}' ';' + ; + +dynamic_list_tag: + vers_defns ';' {} + ; +@g + +@ {\it This syntax is used within an external version script file.} +@= +@G +version_script_file: + {@>@[TeX_( "/ldlex@@version@@file" );@]@=} + vers_nodes {@>@[TeX_( "/ldlex@@popstate" );@]@=} + ; +@g + +@ {\it This is used within a normal linker script file.} +@= +@G +version: + {@>@[TeX_( "/ldlex@@version@@script" );@]@=} + VERSIONK '{' vers_nodes '}' {@>@[TeX_( "/ldlex@@popstate" );@]@=} + ; + +vers_nodes: + vers_node + | vers_nodes vers_node + ; + +vers_node: + '{' vers_tag '}' ';' {} + | VERS_TAG '{' vers_tag '}' ';' {} + | VERS_TAG '{' vers_tag '}' verdep ';' {} + ; + +verdep: + VERS_TAG {} + | verdep VERS_TAG {} + ; + +vers_tag: + {} + | vers_defns ';' {} + | GLOBAL ':' vers_defns ';' {} + | LOCAL ':' vers_defns ';' {} + | GLOBAL ':' vers_defns ';' LOCAL ':' vers_defns ';' + {} + ; + +vers_defns: + VERS_IDENTIFIER {} + | NAME {} + | vers_defns ';' VERS_IDENTIFIER {} + | vers_defns ';' NAME {} + | vers_defns ';' EXTERN NAME '{' {} + vers_defns opt_semicolon '}' {} + | EXTERN NAME '{' {} + vers_defns opt_semicolon '}' {} + | GLOBAL {} + | vers_defns ';' GLOBAL {} + | LOCAL {} + | vers_defns ';' LOCAL {} + | EXTERN {} + | vers_defns ';' EXTERN {} + ; +@t}\vb{\inline\flatten}{@> +opt_semicolon: + + | ';' + ; +@g diff --git a/support/splint/examples/ld/ldgramo.w b/support/splint/examples/ld/ldgramo.w new file mode 100644 index 0000000000..3069bb71eb --- /dev/null +++ b/support/splint/examples/ld/ldgramo.w @@ -0,0 +1,1645 @@ +@q Copyright 2015 Alexander Shibakov@> +@q Copyright 2002-2015 Free Software Foundation, Inc.@> +@q This file is part of SPLinT@> + +@q SPLinT is free software: you can redistribute it and/or modify@> +@q it under the terms of the GNU General Public License as published by@> +@q the Free Software Foundation, either version 3 of the License, or@> +@q (at your option) any later version.@> + +@q SPLinT is distributed in the hope that it will be useful,@> +@q but WITHOUT ANY WARRANTY; without even the implied warranty of@> +@q MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the@> +@q GNU General Public License for more details.@> + +@q You should have received a copy of the GNU General Public License@> +@q along with SPLinT. If not, see .@> + +@*1 The original parser. {\it Here we present the full grammar of \ld, including some actions. +The grammar is split into sections but otherwise is reproduced exactly. +In addition to improving readability, such splitting allows \CWEB\ to process the code in +manageable increments. An observant reader will notice the difficulty +\CWEAVE\ is having with typesetting the structure tags that have the same +name as the structure variables of the appropriate type. This is a well-known +defect in \CWEAVE's design (see the requisite documentation) left +uncorrected to discourage the poor programming practice.} + +@s bfd_boolean int +@s bfd_vma int +@s fill_type int +@s lang_nocrossref int +@s lang_output_section_phdr_list int +@s bfd_elf_version_deps int +@s bfd_elf_version_expr int +@s bfd_elf_version_tree int +@s etree_union int +@s wildcard_spec int +@s section_type int +@s lang_memory_region_type int + +@= +@G +%{ + @>@<\Cee\ setup for \ld\ grammar@>@; +%} +%union { +@O + bfd_vma integer; + struct big_int + { + bfd_vma integer; + char *str; + } bigint; + fill_type *fill; + char *name; + const char *cname; + struct wildcard_spec wildcard; + struct wildcard_list *wildcard_list; + struct name_list *name_list; + struct flag_info_list *flag_info_list; + struct flag_info *flag_info; + int token; + union etree_union *etree; + struct phdr_info + { + bfd_boolean filehdr; + bfd_boolean phdrs; + union etree_union *at; + union etree_union *flags; + } phdr; + struct lang_nocrossref *nocrossref; + struct lang_output_section_phdr_list *section_phdr; + struct bfd_elf_version_deps *deflist; + struct bfd_elf_version_expr *versyms; + struct bfd_elf_version_tree *versnode; +@o + } + @>@@; +%% + @>@@; +%% +@g + +@ {\it The \Cee\ code is left mostly intact (with the exception of a few +comments) although it does not show up in the final output. The parts +that {\it are\/} typeset represent the semantics that is reproduced in +the typesetting parser. This includes all the state switching, as well +as some other actions that affect the parser-lexer interaction (such +as opening a new input buffer). The only exception to this rule is the +code for the \MRI\ script section of the grammar. It is reproduced +mostly as an example of a pretty printed grammar, since otherwise, +\MRI\ scripts are completely ignored by the typeseting parser.} +\def\initbootstrap{\restorecslist{parser-prototypes}\yyunion}% + +@<\Cee\ setup for \ld\ grammar@>= +#define DONTDECLARE_MALLOC + +#include "sysdep.h" +#include "bfd.h" +#include "bfdlink.h" +#include "ld.h" +#include "ldexp.h" +#include "ldver.h" +#include "ldlang.h" +#include "ldfile.h" +#include "ldemul.h" +#include "ldmisc.h" +#include "ldmain.h" +#include "mri.h" +#include "ldctor.h" +#include "ldlex.h" + +#ifndef YYDEBUG +#define YYDEBUG 1 +#endif + +static enum section_type sectype; +static lang_memory_region_type *region; + +bfd_boolean ldgram_had_keep = FALSE; +char *ldgram_vers_current_lang = NULL; + +#define ERROR_NAME_MAX 20 +static char *error_names[ERROR_NAME_MAX]; +static int error_index; +#define PUSH_ERROR(x) if (error_index < ERROR_NAME_MAX) error_names[error_index] = x; error_index++; +#define POP_ERROR() @[error_index--;@] + +@ {\it The token definitions and the corresponding \prodstyle{\%union} +styles are intermixed, which makes sense in the traditional style of a +\bison\ script. When \CWEB\ is used, however, it helps to introduce the +code in small, manageable sections and take advantage of \CWEB's crossreferencing +facilities to provide cues on the relationships between various parts +of the code.} +@= +@G +%type exp opt_exp_with_type mustbe_exp opt_at phdr_type phdr_val +%type opt_exp_without_type opt_subalign opt_align +%type fill_opt fill_exp +%type exclude_name_list +%type file_NAME_list +%type sect_flag_list +%type sect_flags +%type memspec_opt casesymlist +%type memspec_at_opt +%type wildcard_name +%type wildcard_spec +%token INT +%token NAME LNAME +%type length +%type phdr_qualifiers +%type nocrossref_list +%type phdr_opt +%type opt_nocrossrefs + +%right PLUSEQ MINUSEQ MULTEQ DIVEQ '=' LSHIFTEQ RSHIFTEQ ANDEQ OREQ +%right '?' ':' +%left OROR +%left ANDAND +%left '|' +%left '^' +%left '&' +%left EQ NE +%left '<' '>' LE GE +%left LSHIFT RSHIFT + +%left '+' '-' +%left '*' '/' '%' + +%right UNARY +%token END +%left '(' +%token ALIGN_K BLOCK BIND QUAD SQUAD LONG SHORT BYTE +%token SECTIONS PHDRS INSERT_K AFTER BEFORE +%token DATA_SEGMENT_ALIGN DATA_SEGMENT_RELRO_END DATA_SEGMENT_END +%token SORT_BY_NAME SORT_BY_ALIGNMENT SORT_NONE +%token SORT_BY_INIT_PRIORITY +%token '{' '}' +%token SIZEOF_HEADERS OUTPUT_FORMAT FORCE_COMMON_ALLOCATION OUTPUT_ARCH +%token INHIBIT_COMMON_ALLOCATION +%token SEGMENT_START +%token INCLUDE +%token MEMORY +%token REGION_ALIAS +%token LD_FEATURE +%token NOLOAD DSECT COPY INFO OVERLAY +%token DEFINED TARGET_K SEARCH_DIR MAP ENTRY +%token NEXT +%token SIZEOF ALIGNOF ADDR LOADADDR MAX_K MIN_K +%token STARTUP HLL SYSLIB FLOAT NOFLOAT NOCROSSREFS +%token ORIGIN FILL +%token LENGTH CREATE_OBJECT_SYMBOLS INPUT GROUP OUTPUT CONSTRUCTORS +%token ALIGNMOD AT SUBALIGN HIDDEN PROVIDE PROVIDE_HIDDEN AS_NEEDED +%type assign_op atype attributes_opt sect_constraint opt_align_with_input +%type filename +%token CHIP LIST SECT ABSOLUTE LOAD NEWLINE ENDWORD ORDER NAMEWORD ASSERT_K +%token LOG2CEIL FORMAT PUBLIC DEFSYMEND BASE ALIAS TRUNCATE REL +%token INPUT_SCRIPT INPUT_MRI_SCRIPT INPUT_DEFSYM CASE EXTERN START +%token VERS_TAG VERS_IDENTIFIER +%token GLOBAL LOCAL VERSIONK INPUT_VERSION_SCRIPT +%token KEEP ONLY_IF_RO ONLY_IF_RW SPECIAL INPUT_SECTION_FLAGS ALIGN_WITH_INPUT +%token EXCLUDE_FILE +%token CONSTANT +%type vers_defns +%type vers_tag +%type verdep +%token INPUT_DYNAMIC_LIST +@g + +@ {\it The original \Cee\ code has been preserved and presented along with +the grammar rules in the next two sections (the \Cee\ code has not +been deleted in the subsequent sections either, it is just not +typeset).} +\saveparseoutputfalse +@= +@G +file: + INPUT_SCRIPT script_file + | INPUT_MRI_SCRIPT mri_script_file + | INPUT_VERSION_SCRIPT version_script_file + | INPUT_DYNAMIC_LIST dynamic_list_file + | INPUT_DEFSYM defsym_expr + ; + + +filename: NAME; + + +defsym_expr: + { @> ldlex_defsym(); @=} + NAME '=' exp + { +@O + ldlex_popstate(); +@o + lang_add_assignment (exp_defsym ($2, $4)); + } + ; + +@g + +@ Syntax within an \MRI\ script file. +@= +@G +mri_script_file: + { +@O + @[ldlex_mri_script ();@] +@o + PUSH_ERROR (_("MRI style script")); + } + mri_script_lines + { +@O + ldlex_popstate (); +@o + mri_draw_tree (); + POP_ERROR (); + } + ; + +mri_script_lines: + mri_script_lines mri_script_command NEWLINE + | + ; + +@g + +@ @= +@G +mri_script_command: + CHIP exp + | CHIP exp ',' exp + | NAME { @> @ @=} + | LIST { @> config.map_filename = "-"; @=} + | ORDER ordernamelist + | ENDWORD + | PUBLIC NAME '=' exp + { @> mri_public($2, $4); @=} + | PUBLIC NAME ',' exp + { @> mri_public($2, $4); @=} + | PUBLIC NAME exp + { @> mri_public($2, $3); @=} + | FORMAT NAME + { @> mri_format($2); @=} + | SECT NAME ',' exp + { @> mri_output_section($2, $4); @=} + | SECT NAME exp + { @> mri_output_section($2, $3); @=} + | SECT NAME '=' exp + { @> mri_output_section($2, $4); @=} + | ALIGN_K NAME '=' exp + { @> mri_align($2,$4); @=} + | ALIGN_K NAME ',' exp + { @> mri_align($2,$4); @=} + | ALIGNMOD NAME '=' exp + { @> mri_alignmod($2,$4); @=} + | ALIGNMOD NAME ',' exp + { @> mri_alignmod($2,$4); @=} + | ABSOLUTE mri_abs_name_list + | LOAD mri_load_name_list + | NAMEWORD NAME + { @> mri_name($2); @=} + | ALIAS NAME ',' NAME + { @> mri_alias($2,$4,0); @=} + | ALIAS NAME ',' INT + { @> mri_alias ($2, 0, (int) $4.integer); @=} + | BASE exp + { @> mri_base($2); @=} + | TRUNCATE INT + { @> mri_truncate ((unsigned int) $2.integer); @=} + | CASE casesymlist + | EXTERN extern_name_list + | INCLUDE filename + { @> ldlex_script (); ldfile_open_command_file($2); @=} + mri_script_lines END + { @> ldlex_popstate (); @=} + | START NAME + { @> lang_add_entry ($2, FALSE); @=} + | + ; + +ordernamelist: + ordernamelist ',' NAME { @> mri_order($3); @=} + | ordernamelist NAME { @> mri_order($2); @=} + | + ; + +mri_load_name_list: + NAME { @> mri_load($1); @=} + | mri_load_name_list ',' NAME { @> mri_load($3); @=} + ; + +mri_abs_name_list: + NAME { @> mri_only_load($1); @=} + | mri_abs_name_list ',' NAME { @> mri_only_load($3); @=} + ; + +casesymlist: + { @> $$ = NULL; @=} + | NAME + | casesymlist ',' NAME + ; + +@g + +@ {\it Here is one way to deal with code that is too long to fit in an action.} +@= + einfo(_("%P%F: unrecognised keyword in MRI style script '%s'\n"),$1); + +@ Parsed as expressions so that commas separate entries. +@= +@G +extern_name_list: + {@> @[ldlex_expression ();@]@=} + extern_name_list_body + {@> @[ldlex_popstate ();@]@=} + +extern_name_list_body: + NAME + { ldlang_add_undef ($1, FALSE); } + | extern_name_list_body NAME + { ldlang_add_undef ($2, FALSE); } + | extern_name_list_body ',' NAME + { ldlang_add_undef ($3, FALSE); } + ; + +script_file: + {@> @[ldlex_both();@]@=} + ifile_list + {@> @[ldlex_popstate();@]@=} + ; + +ifile_list: + ifile_list ifile_p1 + | + ; + +@g + +@ {\it All the commands that can appear in a standard linker script.}% +@= +@G +ifile_p1: + memory + | sections + | phdrs + | startup + | high_level_library + | low_level_library + | floating_point_support + | statement_anywhere + | version + | ';' + | TARGET_K '(' NAME ')' + { lang_add_target($3); } + | SEARCH_DIR '(' filename ')' + { ldfile_add_library_path ($3, FALSE); } + | OUTPUT '(' filename ')' + { lang_add_output($3, 1); } + | OUTPUT_FORMAT '(' NAME ')' + { lang_add_output_format ($3, (char *) NULL, + (char *) NULL, 1); } + | OUTPUT_FORMAT '(' NAME ',' NAME ',' NAME ')' + { lang_add_output_format ($3, $5, $7, 1); } + | OUTPUT_ARCH '(' NAME ')' + { ldfile_set_output_arch ($3, bfd_arch_unknown); } + | FORCE_COMMON_ALLOCATION + { command_line.force_common_definition = TRUE ; } + | INHIBIT_COMMON_ALLOCATION + { command_line.inhibit_common_definition = TRUE ; } + | INPUT '(' input_list ')' + | GROUP + { lang_enter_group (); } + '(' input_list ')' + { lang_leave_group (); } + | MAP '(' filename ')' + { lang_add_map($3); } + | INCLUDE filename + { +@O + ldlex_script (); ldfile_open_command_file($2); +@o + } + ifile_list END + {@> @[ldlex_popstate ();@]@=} + | NOCROSSREFS '(' nocrossref_list ')' + { + lang_add_nocrossref ($3); + } + | EXTERN '(' extern_name_list ')' + | INSERT_K AFTER NAME + { lang_add_insert ($3, 0); } + | INSERT_K BEFORE NAME + { lang_add_insert ($3, 1); } + | REGION_ALIAS '(' NAME ',' NAME ')' + { lang_memory_region_alias ($3, $5); } + | LD_FEATURE '(' NAME ')' + { lang_ld_feature ($3); } + ; + +@g + +@ @= +@G +input_list: + NAME + { lang_add_input_file($1,lang_input_file_is_search_file_enum, + (char *)NULL); } + | input_list ',' NAME + { lang_add_input_file($3,lang_input_file_is_search_file_enum, + (char *)NULL); } + | input_list NAME + { lang_add_input_file($2,lang_input_file_is_search_file_enum, + (char *)NULL); } + | LNAME + { lang_add_input_file($1,lang_input_file_is_l_enum, + (char *)NULL); } + | input_list ',' LNAME + { lang_add_input_file($3,lang_input_file_is_l_enum, + (char *)NULL); } + | input_list LNAME + { lang_add_input_file($2,lang_input_file_is_l_enum, + (char *)NULL); } + | AS_NEEDED '(' + { $$ = input_flags.add_DT_NEEDED_for_regular; + input_flags.add_DT_NEEDED_for_regular = TRUE; } + input_list ')' + { input_flags.add_DT_NEEDED_for_regular = $3; } + | input_list ',' AS_NEEDED '(' + { $$ = input_flags.add_DT_NEEDED_for_regular; + input_flags.add_DT_NEEDED_for_regular = TRUE; } + input_list ')' + { input_flags.add_DT_NEEDED_for_regular = $5; } + | input_list AS_NEEDED '(' + { $$ = input_flags.add_DT_NEEDED_for_regular; + input_flags.add_DT_NEEDED_for_regular = TRUE; } + input_list ')' + { input_flags.add_DT_NEEDED_for_regular = $4; } + ; + +sections: + SECTIONS '{' sec_or_group_p1 '}' + ; + +sec_or_group_p1: + sec_or_group_p1 section + | sec_or_group_p1 statement_anywhere + | + ; + +statement_anywhere: + ENTRY '(' NAME ')' + { lang_add_entry ($3, FALSE); } + | assignment end + | ASSERT_K {@>@[ldlex_expression ();@>@=} '(' exp ',' NAME ')' + { +@O + ldlex_popstate (); +@o + lang_add_assignment (exp_assert ($4, $6)); + } + ; + +@g + +@ \tracebadcharstrue +The \prodstyle{'*'} and \prodstyle{'?'} cases are there because the lexer returns them as +separate tokens rather than as \prodstyle{NAME}. +\tracebadcharsfalse +@= +@G +wildcard_name: + NAME + { + $$ = $1; + } + | '*' + { + $$ = "*"; + } + | '?' + { + $$ = "?"; + } + ; + +@g + +@ @= +@G +wildcard_spec: + wildcard_name + { + $$.name = $1; + $$.sorted = none; + $$.exclude_name_list = NULL; + $$.section_flag_list = NULL; + } + | EXCLUDE_FILE '(' exclude_name_list ')' wildcard_name + { + $$.name = $5; + $$.sorted = none; + $$.exclude_name_list = $3; + $$.section_flag_list = NULL; + } + | SORT_BY_NAME '(' wildcard_name ')' + { + $$.name = $3; + $$.sorted = by_name; + $$.exclude_name_list = NULL; + $$.section_flag_list = NULL; + } + | SORT_BY_ALIGNMENT '(' wildcard_name ')' + { + $$.name = $3; + $$.sorted = by_alignment; + $$.exclude_name_list = NULL; + $$.section_flag_list = NULL; + } + | SORT_NONE '(' wildcard_name ')' + { + $$.name = $3; + $$.sorted = by_none; + $$.exclude_name_list = NULL; + $$.section_flag_list = NULL; + } + | SORT_BY_NAME '(' SORT_BY_ALIGNMENT '(' wildcard_name ')' ')' + { + $$.name = $5; + $$.sorted = by_name_alignment; + $$.exclude_name_list = NULL; + $$.section_flag_list = NULL; + } + | SORT_BY_NAME '(' SORT_BY_NAME '(' wildcard_name ')' ')' + { + $$.name = $5; + $$.sorted = by_name; + $$.exclude_name_list = NULL; + $$.section_flag_list = NULL; + } + | SORT_BY_ALIGNMENT '(' SORT_BY_NAME '(' wildcard_name ')' ')' + { + $$.name = $5; + $$.sorted = by_alignment_name; + $$.exclude_name_list = NULL; + $$.section_flag_list = NULL; + } + | SORT_BY_ALIGNMENT '(' SORT_BY_ALIGNMENT '(' wildcard_name ')' ')' + { + $$.name = $5; + $$.sorted = by_alignment; + $$.exclude_name_list = NULL; + $$.section_flag_list = NULL; + } + | SORT_BY_NAME '(' EXCLUDE_FILE '(' exclude_name_list ')' wildcard_name ')' + { + $$.name = $7; + $$.sorted = by_name; + $$.exclude_name_list = $5; + $$.section_flag_list = NULL; + } + | SORT_BY_INIT_PRIORITY '(' wildcard_name ')' + { + $$.name = $3; + $$.sorted = by_init_priority; + $$.exclude_name_list = NULL; + $$.section_flag_list = NULL; + } + ; + +sect_flag_list: NAME + { + struct flag_info_list *n; + n = ((struct flag_info_list *) xmalloc (sizeof *n)); + if ($1[0] == '!') + { + n->with = without_flags; + n->name = &$1[1]; + } + else + { + n->with = with_flags; + n->name = $1; + } + n->valid = FALSE; + n->next = NULL; + $$ = n; + } + | sect_flag_list '&' NAME + { + struct flag_info_list *n; + n = ((struct flag_info_list *) xmalloc (sizeof *n)); + if ($3[0] == '!') + { + n->with = without_flags; + n->name = &$3[1]; + } + else + { + n->with = with_flags; + n->name = $3; + } + n->valid = FALSE; + n->next = $1; + $$ = n; + } + ; + +sect_flags: + INPUT_SECTION_FLAGS '(' sect_flag_list ')' + { + struct flag_info *n; + n = ((struct flag_info *) xmalloc (sizeof *n)); + n->flag_list = $3; + n->flags_initialized = FALSE; + n->not_with_flags = 0; + n->only_with_flags = 0; + $$ = n; + } + ; + +exclude_name_list: + exclude_name_list wildcard_name + { + struct name_list *tmp; + tmp = (struct name_list *) xmalloc (sizeof *tmp); + tmp->name = $2; + tmp->next = $1; + $$ = tmp; + } + | + wildcard_name + { + struct name_list *tmp; + tmp = (struct name_list *) xmalloc (sizeof *tmp); + tmp->name = $1; + tmp->next = NULL; + $$ = tmp; + } + ; + +file_NAME_list: + file_NAME_list opt_comma wildcard_spec + { + struct wildcard_list *tmp; + tmp = (struct wildcard_list *) xmalloc (sizeof *tmp); + tmp->next = $1; + tmp->spec = $3; + $$ = tmp; + } + | + wildcard_spec + { + struct wildcard_list *tmp; + tmp = (struct wildcard_list *) xmalloc (sizeof *tmp); + tmp->next = NULL; + tmp->spec = $1; + $$ = tmp; + } + ; + +input_section_spec_no_keep: + NAME + { + struct wildcard_spec tmp; + tmp.name = $1; + tmp.exclude_name_list = NULL; + tmp.sorted = none; + tmp.section_flag_list = NULL; + lang_add_wild (&tmp, NULL, ldgram_had_keep); + } + | sect_flags NAME + { + struct wildcard_spec tmp; + tmp.name = $2; + tmp.exclude_name_list = NULL; + tmp.sorted = none; + tmp.section_flag_list = $1; + lang_add_wild (&tmp, NULL, ldgram_had_keep); + } + | '[' file_NAME_list ']' + { + lang_add_wild (NULL, $2, ldgram_had_keep); + } + | sect_flags '[' file_NAME_list ']' + { + struct wildcard_spec tmp; + tmp.name = NULL; + tmp.exclude_name_list = NULL; + tmp.sorted = none; + tmp.section_flag_list = $1; + lang_add_wild (&tmp, $3, ldgram_had_keep); + } + | wildcard_spec '(' file_NAME_list ')' + { + lang_add_wild (&$1, $3, ldgram_had_keep); + } + | sect_flags wildcard_spec '(' file_NAME_list ')' + { + $2.section_flag_list = $1; + lang_add_wild (&$2, $4, ldgram_had_keep); + } + ; +@g + +@ @= +@G +input_section_spec: + input_section_spec_no_keep + | KEEP '(' + { ldgram_had_keep = TRUE; } + input_section_spec_no_keep ')' + { ldgram_had_keep = FALSE; } + ; + +statement: + assignment end + | CREATE_OBJECT_SYMBOLS + { + lang_add_attribute(lang_object_symbols_statement_enum); + } + | ';' + | CONSTRUCTORS + { + + lang_add_attribute(lang_constructors_statement_enum); + } + | SORT_BY_NAME '(' CONSTRUCTORS ')' + { + constructors_sorted = TRUE; + lang_add_attribute (lang_constructors_statement_enum); + } + | input_section_spec + | length '(' mustbe_exp ')' + { + lang_add_data ((int) $1, $3); + } + + | FILL '(' fill_exp ')' + { + lang_add_fill ($3); + } + | ASSERT_K {@>@[ldlex_expression ();@]@=} '(' exp ',' NAME ')' end + { +@O + ldlex_popstate (); +@o + lang_add_assignment (exp_assert ($4, $6)); + } + | INCLUDE filename + {@> @[ldlex_script (); ldfile_open_command_file($2);@]@=} + statement_list_opt END + {@> @[ldlex_popstate ();@]@=} + ; +@t}\vb{\flatten\inline\squashtermstrue}{@> +statement_list: + statement_list statement + | statement + ; + +statement_list_opt: + + | statement_list + ; + +length: + QUAD + { $$ = $1; } + | SQUAD + { $$ = $1; } + | LONG + { $$ = $1; } + | SHORT + { $$ = $1; } + | BYTE + { $$ = $1; } + ; + +fill_exp: + mustbe_exp + { + $$ = exp_get_fill ($1, 0, "fill value"); + } + ; + +fill_opt: + '=' fill_exp + { $$ = $2; } + | { $$ = (fill_type *) 0; } + ; + +assign_op: + PLUSEQ + { $$ = '+'; } + | MINUSEQ + { $$ = '-'; } + | MULTEQ + { $$ = '*'; } + | DIVEQ + { $$ = '/'; } + | LSHIFTEQ + { $$ = LSHIFT; } + | RSHIFTEQ + { $$ = RSHIFT; } + | ANDEQ + { $$ = '&'; } + | OREQ + { $$ = '|'; } + + ; +end: ';' | ',' + ; +@t}\vb{\resetf}{@> + +assignment: + NAME '=' mustbe_exp + { + lang_add_assignment (exp_assign ($1, $3, FALSE)); + } + | NAME assign_op mustbe_exp + { + lang_add_assignment (exp_assign ($1, + exp_binop ($2, + exp_nameop (NAME, + $1), + $3), FALSE)); + } + | HIDDEN '(' NAME '=' mustbe_exp ')' + { + lang_add_assignment (exp_assign ($3, $5, TRUE)); + } + | PROVIDE '(' NAME '=' mustbe_exp ')' + { + lang_add_assignment (exp_provide ($3, $5, FALSE)); + } + | PROVIDE_HIDDEN '(' NAME '=' mustbe_exp ')' + { + lang_add_assignment (exp_provide ($3, $5, TRUE)); + } + ; + +@t}\vb{\inline\flatten}{@> +opt_comma: + ',' | ; +@t}\vb{\resetf}{@> +memory: + MEMORY '{' memory_spec_list_opt '}' + ; + +memory_spec_list_opt: memory_spec_list | ; + +memory_spec_list: + memory_spec_list opt_comma memory_spec + | memory_spec + ; + + +memory_spec: NAME + { region = lang_memory_region_lookup ($1, TRUE); } + attributes_opt ':' + origin_spec opt_comma length_spec + {} + | INCLUDE filename + {@> @[ldlex_script (); ldfile_open_command_file($2);@]@=} + memory_spec_list_opt END + {@> @[ldlex_popstate ();@]@=} + ; +@g + +@ @= +@G +origin_spec: + ORIGIN '=' mustbe_exp + { + region->origin = exp_get_vma ($3, 0, "origin"); + region->current = region->origin; + } + ; + +length_spec: + LENGTH '=' mustbe_exp + { + region->length = exp_get_vma ($3, -1, "length"); + } + ; + +attributes_opt: + + { /* dummy action to avoid bison 1.25 error message */ } + | '(' attributes_list ')' + ; + +attributes_list: + attributes_string + | attributes_list attributes_string + ; + +attributes_string: + NAME + { lang_set_flags (region, $1, 0); } + | '!' NAME + { lang_set_flags (region, $2, 1); } + ; + +startup: + STARTUP '(' filename ')' + { lang_startup($3); } + ; + +high_level_library: + HLL '(' high_level_library_NAME_list ')' + | HLL '(' ')' + { ldemul_hll((char *)NULL); } + ; + +high_level_library_NAME_list: + high_level_library_NAME_list opt_comma filename + { ldemul_hll($3); } + | filename + { ldemul_hll($1); } + + ; + +low_level_library: + SYSLIB '(' low_level_library_NAME_list ')' + ; low_level_library_NAME_list: + low_level_library_NAME_list opt_comma filename + { ldemul_syslib($3); } + | + ; + +floating_point_support: + FLOAT + { lang_float(TRUE); } + | NOFLOAT + { lang_float(FALSE); } + ; + +nocrossref_list: + + { + $$ = NULL; + } + | NAME nocrossref_list + { + struct lang_nocrossref *n; + + n = (struct lang_nocrossref *) xmalloc (sizeof *n); + n->name = $1; + n->next = $2; + $$ = n; + } + | NAME ',' nocrossref_list + { + struct lang_nocrossref *n; + + n = (struct lang_nocrossref *) xmalloc (sizeof *n); + n->name = $1; + n->next = $3; + $$ = n; + } + ; + +mustbe_exp: {@> @[ldlex_expression ();@]@=} + exp + {@> @[ldlex_popstate ();@]@= $$=$2;} + ; + +@g + +@ {\it Rich expression syntax reproducing the one in \Cee.} +@= +@G +exp : + '-' exp %prec UNARY + { $$ = exp_unop ('-', $2); } + | '(' exp ')' + { $$ = $2; } + | NEXT '(' exp ')' %prec UNARY + { $$ = exp_unop ((int) $1,$3); } + | '!' exp %prec UNARY + { $$ = exp_unop ('!', $2); } + | '+' exp %prec UNARY + { $$ = $2; } + | '~' exp %prec UNARY + { $$ = exp_unop ('~', $2);} +@t}\vb{\flatten\inline}{@> + | exp '*' exp + { $$ = exp_binop ('*', $1, $3); } + | exp '/' exp + { $$ = exp_binop ('/', $1, $3); } + | exp '%' exp + { $$ = exp_binop ('%', $1, $3); } + | exp '+' exp + { $$ = exp_binop ('+', $1, $3); } + | exp '-' exp +@t}\vb{\resetf}{@> + { $$ = exp_binop ('-' , $1, $3); } +@t}\vb{\flatten\inline}{@> + | exp LSHIFT exp + { $$ = exp_binop (LSHIFT , $1, $3); } + | exp RSHIFT exp + { $$ = exp_binop (RSHIFT , $1, $3); } + | exp EQ exp + { $$ = exp_binop (EQ , $1, $3); } + | exp NE exp + { $$ = exp_binop (NE , $1, $3); } + | exp LE exp +@t}\vb{\resetf}{@> + { $$ = exp_binop (LE , $1, $3); } +@t}\vb{\flatten\inline}{@> + | exp GE exp + { $$ = exp_binop (GE , $1, $3); } + | exp '<' exp + { $$ = exp_binop ('<' , $1, $3); } + | exp '>' exp + { $$ = exp_binop ('>' , $1, $3); } + | exp '&' exp + { $$ = exp_binop ('&' , $1, $3); } + | exp '^' exp +@t}\vb{\resetf}{@> + { $$ = exp_binop ('^' , $1, $3); } +@t}\vb{\flatten\inline}{@> + | exp '|' exp + { $$ = exp_binop ('|' , $1, $3); } + | exp '?' exp ':' exp + { $$ = exp_trinop ('?' , $1, $3, $5); } + | exp ANDAND exp + { $$ = exp_binop (ANDAND , $1, $3); } + | exp OROR exp +@t}\vb{\resetf}{@> + { $$ = exp_binop (OROR , $1, $3); } +@t}\vb{\flatten\inline}{@> + | DEFINED '(' NAME ')' + { $$ = exp_nameop (DEFINED, $3); } + | INT + { $$ = exp_bigintop ($1.integer, $1.str); } + | SIZEOF_HEADERS +@t}\vb{\resetf}{@> + { $$ = exp_nameop (SIZEOF_HEADERS,0); } +@t}\vb{\flatten\inline}{@> + | ALIGNOF '(' NAME ')' + { $$ = exp_nameop (ALIGNOF,$3); } + | SIZEOF '(' NAME ')' +@t}\vb{\resetf}{@> + { $$ = exp_nameop (SIZEOF,$3); } +@t}\vb{\flatten\inline}{@> + | ADDR '(' NAME ')' + { $$ = exp_nameop (ADDR,$3); } + | LOADADDR '(' NAME ')' +@t}\vb{\resetf}{@> + { $$ = exp_nameop (LOADADDR,$3); } +@t}\vb{\flatten\inline}{@> + | CONSTANT '(' NAME ')' + { $$ = exp_nameop (CONSTANT,$3); } + | ABSOLUTE '(' exp ')' + { $$ = exp_unop (ABSOLUTE, $3); } + | ALIGN_K '(' exp ')' +@t}\vb{\resetf}{@> + { $$ = exp_unop (ALIGN_K,$3); } +@t}\vb{\flatten\inline}{@> + | ALIGN_K '(' exp ',' exp ')' + { $$ = exp_binop (ALIGN_K,$3,$5); } + | DATA_SEGMENT_ALIGN '(' exp ',' exp ')' +@t}\vb{\resetf}{@> + { $$ = exp_binop (DATA_SEGMENT_ALIGN, $3, $5); } + | DATA_SEGMENT_RELRO_END '(' exp ',' exp ')' + { $$ = exp_binop (DATA_SEGMENT_RELRO_END, $5, $3); } + | DATA_SEGMENT_END '(' exp ')' + { $$ = exp_unop (DATA_SEGMENT_END, $3); } + | SEGMENT_START '(' NAME ',' exp ')' + { $$ = exp_binop (SEGMENT_START, + $5, + exp_nameop (NAME, $3)); } + | BLOCK '(' exp ')' + { $$ = exp_unop (ALIGN_K,$3); } + | NAME + { $$ = exp_nameop (NAME,$1); } + | MAX_K '(' exp ',' exp ')' + { $$ = exp_binop (MAX_K, $3, $5 ); } + | MIN_K '(' exp ',' exp ')' + { $$ = exp_binop (MIN_K, $3, $5 ); } + | ASSERT_K '(' exp ',' NAME ')' + { $$ = exp_assert ($3, $5); } + | ORIGIN '(' NAME ')' + { $$ = exp_nameop (ORIGIN, $3); } + | LENGTH '(' NAME ')' + { $$ = exp_nameop (LENGTH, $3); } + | LOG2CEIL '(' exp ')' + { $$ = exp_unop (LOG2CEIL, $3); } + ; + +@g + +@ @= +@G +memspec_at_opt: + AT '>' NAME { $$ = $3; } + | { $$ = 0; } + ; + +opt_at: + AT '(' exp ')' { $$ = $3; } + | { $$ = 0; } + ; + +opt_align: + ALIGN_K '(' exp ')' { $$ = $3; } + | { $$ = 0; } + ; + +opt_align_with_input: + ALIGN_WITH_INPUT { $$ = ALIGN_WITH_INPUT; } + | { $$ = 0; } + ; + +opt_subalign: + SUBALIGN '(' exp ')' { $$ = $3; } + | { $$ = 0; } + ; + +sect_constraint: + ONLY_IF_RO { $$ = ONLY_IF_RO; } + | ONLY_IF_RW { $$ = ONLY_IF_RW; } + | SPECIAL { $$ = SPECIAL; } + | { $$ = 0; } + ; + +@g + +@ The \prodstyle{GROUP} case is just enough to support the \gcc\ +|svr3.ifile| script. It is not intended to be full +support. I'm not even sure what \prodstyle{GROUP} is supposed +to mean. +@= +@G +section: NAME {@> @[ldlex_expression();@]@=} + opt_exp_with_type + opt_at + opt_align +@t}\vb{\breakline}{@> + opt_align_with_input + opt_subalign {@> @[ldlex_popstate (); ldlex_script ();@]@=} + sect_constraint + '{' + { + lang_enter_output_section_statement($1, $3, + sectype, + $5, $7, $4, $9, $6); + } + statement_list_opt + '}' {@> @[ldlex_popstate (); ldlex_expression ();@]@=} + memspec_opt memspec_at_opt phdr_opt fill_opt + { +@O + ldlex_popstate (); +@o + lang_leave_output_section_statement ($18, $15, $17, $16); + } + opt_comma + {} + | OVERLAY + {@> @[ldlex_expression ();@]@=} + opt_exp_without_type opt_nocrossrefs +@t}\vb{\breakline}{@> + opt_at opt_subalign + {@> @[ldlex_popstate (); ldlex_script ();@]@=} + '{' + { + lang_enter_overlay ($3, $6); + } + overlay_section + '}' + {@> @[ldlex_popstate (); ldlex_expression ();@]@=} + memspec_opt memspec_at_opt phdr_opt fill_opt + { +@O + ldlex_popstate (); +@o + lang_leave_overlay ($5, (int) $4, + $16, $13, $15, $14); + } + opt_comma + | GROUP {@> @[ldlex_expression ();@]@=} + opt_exp_with_type + { +@O + ldlex_popstate (); +@o + lang_add_assignment (exp_assign (".", $3, FALSE)); + } + '{' sec_or_group_p1 '}' + | INCLUDE filename + {@> @[ldlex_script (); ldfile_open_command_file($2);@]@=} + sec_or_group_p1 END + {@> @[ldlex_popstate ();@]@=} + ; +@t}\vb{\flatten\inline}{@> +type: + NOLOAD { sectype = noload_section; } + | DSECT { sectype = noalloc_section; } + | COPY { sectype = noalloc_section; } + | INFO { sectype = noalloc_section; } + | OVERLAY { sectype = noalloc_section; } + ; + +atype: + '(' type ')' + | { sectype = normal_section; } + | '(' ')' { sectype = normal_section; } + ; + +@g + +@ The \prodstyle{BIND} cases are to support the \gcc\ |svr3.ifile| +script. They aren't intended to implement full +support for the \prodstyle{BIND} keyword. I'm not even sure +what \prodstyle{BIND} is supposed to mean. +@= +@G + +opt_exp_with_type: + exp atype ':' { $$ = $1; } + | atype ':' { $$ = (etree_type *)NULL; } + | BIND '(' exp ')' atype ':' { $$ = $3; } + | BIND '(' exp ')' BLOCK '(' exp ')' atype ':' + { $$ = $3; } + ; + +opt_exp_without_type: + exp ':' { $$ = $1; } + | ':' { $$ = (etree_type *) NULL; } + ; + +opt_nocrossrefs: + + { $$ = 0; } + | NOCROSSREFS + { $$ = 1; } + ; + +memspec_opt: + '>' NAME + { $$ = $2; } + | { $$ = DEFAULT_MEMORY_REGION; } + ; + +phdr_opt: + + { + $$ = NULL; + } + | phdr_opt ':' NAME + { + struct lang_output_section_phdr_list *n; + + n = ((struct lang_output_section_phdr_list *) + xmalloc (sizeof *n)); + n->name = $3; + n->used = FALSE; + n->next = $1; + $$ = n; + } + ; + +overlay_section: + + | overlay_section + NAME + { +@O + ldlex_script (); +@o + lang_enter_overlay_section ($2); + } + '{' statement_list_opt '}' + {@> @[ldlex_popstate (); ldlex_expression ();@]@=} + phdr_opt fill_opt + { +@O + ldlex_popstate (); +@o + lang_leave_overlay_section ($9, $8); + } + opt_comma + ; +@g + +@ @= +@G +phdrs: + PHDRS '{' phdr_list '}' + ; + +phdr_list: + + | phdr_list phdr + ; + +phdr: + NAME {@> @[ldlex_expression ();@]@=} + phdr_type phdr_qualifiers {@> @[ldlex_popstate ();@]@=} + ';' + { + lang_new_phdr ($1, $3, $4.filehdr, $4.phdrs, $4.at, + $4.flags); + } + ; + +phdr_type: + exp + { + $$ = $1; + + if ($1->type.node_class == etree_name + && $1->type.node_code == NAME) + { + const char *s; + unsigned int i; + static const char * const phdr_types[] = + { + "PT_NULL", "PT_LOAD", "PT_DYNAMIC", + "PT_INTERP", "PT_NOTE", "PT_SHLIB", + "PT_PHDR", "PT_TLS" + }; + + s = $1->name.name; + for (i = 0; + i < sizeof phdr_types / sizeof phdr_types[0]; + i++) + if (strcmp (s, phdr_types[i]) == 0) + { + $$ = exp_intop (i); + break; + } + if (i == sizeof phdr_types / sizeof phdr_types[0]) + { + if (strcmp (s, "PT_GNU_EH_FRAME") == 0) + $$ = exp_intop (0x6474e550); + else if (strcmp (s, "PT_GNU_STACK") == 0) + $$ = exp_intop (0x6474e551); + else + { + einfo (_("\ +%X%P:%S: unknown phdr type `%s' (try integer literal)\n"), + NULL, s); + $$ = exp_intop (0); + } + } + } + } + ; + +phdr_qualifiers: + + { + memset (&$$, 0, sizeof (struct phdr_info)); + } + | NAME phdr_val phdr_qualifiers + { + $$ = $3; + if (strcmp ($1, "FILEHDR") == 0 && $2 == NULL) + $$.filehdr = TRUE; + else if (strcmp ($1, "PHDRS") == 0 && $2 == NULL) + $$.phdrs = TRUE; + else if (strcmp ($1, "FLAGS") == 0 && $2 != NULL) + $$.flags = $2; + else + einfo (_("%X%P:%S: PHDRS syntax error at `%s'\n"), + NULL, $1); + } + | AT '(' exp ')' phdr_qualifiers + { + $$ = $5; + $$.at = $3; + } + ; + +phdr_val: + + { + $$ = NULL; + } + | '(' exp ')' + { + $$ = $2; + } + ; + +dynamic_list_file: + { +@O + ldlex_version_file (); +@o + PUSH_ERROR (_("dynamic list")); + } + dynamic_list_nodes + { +@O + ldlex_popstate (); +@o + POP_ERROR (); + } + ; + +dynamic_list_nodes: + dynamic_list_node + | dynamic_list_nodes dynamic_list_node + ; + +dynamic_list_node: + '{' dynamic_list_tag '}' ';' + ; + +dynamic_list_tag: + vers_defns ';' + { + lang_append_dynamic_list ($1); + } + ; + +@g + +@ This syntax is used within an external version script file. +@= +@G + +version_script_file: + { +@O + ldlex_version_file (); +@o + PUSH_ERROR (_("VERSION script")); + } + vers_nodes + { +@O + ldlex_popstate (); +@o + POP_ERROR (); + } + ; +@g + +@ This is used within a normal linker script file. +@= +@G +version: + { +@O + ldlex_version_script (); +@o + } + VERSIONK '{' vers_nodes '}' + { +@O + ldlex_popstate (); +@o + } + ; + +vers_nodes: + vers_node + | vers_nodes vers_node + ; + +vers_node: + '{' vers_tag '}' ';' + { + lang_register_vers_node (NULL, $2, NULL); + } + | VERS_TAG '{' vers_tag '}' ';' + { + lang_register_vers_node ($1, $3, NULL); + } + | VERS_TAG '{' vers_tag '}' verdep ';' + { + lang_register_vers_node ($1, $3, $5); + } + ; + +verdep: + VERS_TAG + { + $$ = lang_add_vers_depend (NULL, $1); + } + | verdep VERS_TAG + { + $$ = lang_add_vers_depend ($1, $2); + } + ; + +vers_tag: + + { + $$ = lang_new_vers_node (NULL, NULL); + } + | vers_defns ';' + { + $$ = lang_new_vers_node ($1, NULL); + } + | GLOBAL ':' vers_defns ';' + { + $$ = lang_new_vers_node ($3, NULL); + } + | LOCAL ':' vers_defns ';' + { + $$ = lang_new_vers_node (NULL, $3); + } + | GLOBAL ':' vers_defns ';' LOCAL ':' vers_defns ';' + { + $$ = lang_new_vers_node ($3, $7); + } + ; + +vers_defns: + VERS_IDENTIFIER + { + $$ = lang_new_vers_pattern (NULL, $1, ldgram_vers_current_lang, FALSE); + } + | NAME + { + $$ = lang_new_vers_pattern (NULL, $1, ldgram_vers_current_lang, TRUE); + } + | vers_defns ';' VERS_IDENTIFIER + { + $$ = lang_new_vers_pattern ($1, $3, ldgram_vers_current_lang, FALSE); + } + | vers_defns ';' NAME + { + $$ = lang_new_vers_pattern ($1, $3, ldgram_vers_current_lang, TRUE); + } + | vers_defns ';' EXTERN NAME '{' + { + $$ = ldgram_vers_current_lang; + ldgram_vers_current_lang = $4; + } + vers_defns opt_semicolon '}' + { + struct bfd_elf_version_expr *pat; + for (pat = $7; pat->next != NULL; pat = pat->next); + pat->next = $1; + $$ = $7; + ldgram_vers_current_lang = $6; + } + | EXTERN NAME '{' + { + $$ = ldgram_vers_current_lang; + ldgram_vers_current_lang = $2; + } + vers_defns opt_semicolon '}' + { + $$ = $5; + ldgram_vers_current_lang = $4; + } + | GLOBAL + { + $$ = lang_new_vers_pattern (NULL, "global", ldgram_vers_current_lang, FALSE); + } + | vers_defns ';' GLOBAL + { + $$ = lang_new_vers_pattern ($1, "global", ldgram_vers_current_lang, FALSE); + } + | LOCAL + { + $$ = lang_new_vers_pattern (NULL, "local", ldgram_vers_current_lang, FALSE); + } + | vers_defns ';' LOCAL + { + $$ = lang_new_vers_pattern ($1, "local", ldgram_vers_current_lang, FALSE); + } + | EXTERN + { + $$ = lang_new_vers_pattern (NULL, "extern", ldgram_vers_current_lang, FALSE); + } + | vers_defns ';' EXTERN + { + $$ = lang_new_vers_pattern ($1, "extern", ldgram_vers_current_lang, FALSE); + } + ; +@t}\vb{\inline\flatten}{@> +opt_semicolon: + + | ';' + ; + +@g + +@q%%@> + +@ \Cee\ sugar. +@c +void +yyerror(arg) + const char *arg; +{ + if (ldfile_assumed_script) + einfo (_("%P:%s: file format not recognized; treating as linker script\n"), + ldlex_filename ()); + if (error_index > 0 && error_index < ERROR_NAME_MAX) + einfo ("%P%F:%S: %s in %s\n", NULL, arg, error_names[error_index - 1]); + else + einfo ("%P%F:%S: %s\n", NULL, arg); +} + diff --git a/support/splint/examples/ld/ldint.sty b/support/splint/examples/ld/ldint.sty new file mode 100644 index 0000000000..478bdef17d --- /dev/null +++ b/support/splint/examples/ld/ldint.sty @@ -0,0 +1,259 @@ +% Copyright 2012-2015, Alexander Shibakov +% This file is part of SPLinT +% +% SPLinT is free software: you can redistribute it and/or modify +% it under the terms of the GNU General Public License as published by +% the Free Software Foundation, either version 3 of the License, or +% (at your option) any later version. +% +% SPLinT is distributed in the hope that it will be useful, +% but WITHOUT ANY WARRANTY; without even the implied warranty of +% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +% GNU General Public License for more details. +% +% You should have received a copy of the GNU General Public License +% along with SPLinT. If not, see . + +% Simple integer parsing macros to display various styles of integers +% used in ld scripts. Instead of creating a dedicated parser for this +% task, it was decided to implement it as a set of expandable +% macros. The degree of complexity of both implementations would +% probably be about the same, and a true parser approach will be more +% flexible. The choice was made in favor of a macro implementation to +% ensure expandability and speed. + +\def\ldintiterator#1#2#3#4#5#6#7#8{% + % #1 a control sequence to insert at the end + % #2 first divisor + % #3 second divisor + % #4 first remainder + % #5 second remainder + % #6 read digits + % #7 last read digit + % #8 current digit + \ifcat\noexpand#8\relax + \yybreak{#1{#2}{#3}{#4}{#5}{#6}{#7}}% + \else + \yybreak{\expandafter\lditeratormodone\expandafter{\number\incrementmod{#4}{#2}}{#5}{#3}{#1}{#2}{#3}{#6#7}{#8}}% + \yycontinue +} + +\def\incrementmod#1#2{% + \expandafter\incr@mentmod\expandafter{\number\xincrement{#1}}{#2}% +} + +\def\incr@mentmod#1#2{% + \ifnum#1=#2 + \yybreak{0}% + \else + \yybreak{#1}% + \yycontinue +} + +\def\decrementmod#1#2{% + \ifnum#1=\z@ + \yybreak{\xdecrement{#2}}% + \else + \yybreak{\xdecrement{#1}}% + \yycontinue +} + +\def\lditeratormodone#1#2#3{% + \expandafter\lditeratormodtwo\expandafter{\number\incrementmod{#2}{#3}}{#1}% +} + +\def\lditeratormodtwo#1#2#3#4#5{% + \ldintiterator{#3}{#4}{#5}{#2}{#1}% +} + +\def\ldintegerspacingmodone#1#2#3#4#5#6#7#8#9{% + % #1 control sequence to insert at the end + % #2 first divisor + % #3 second divisor + % #4 number of digits mod #2 + % #5 number of digits mod #3 + % #6 digits read + % #7 separator + % #8 last digit + % #9 current digit + \ifcat\noexpand#9\relax + \yybreak{#1{#2}{#3}{#4}{#5}{#6}{#8}}% + \else + \yybreak{\ldintegerspacingmodon@{#1}{#2}{#3}{#4}{#5}{#6}{#7}{#8}{#9}}% + \yycontinue +} + +\def\ldintegerspacingmodon@#1#2#3#4#5#6#7#8#9{% + \ifnum#4=\z@ + \yybreak{% + \yystringempty{#8}{% + \expandafter\ldintegerspacingmod@n@\expandafter{\number\decrementmod{#4}{#2}}{#1}{#2}{#3}{#5}{}{#7}{#9}% + }{% + \expandafter\ldintegerspacingmod@n@\expandafter{\number\decrementmod{#4}{#2}}{#1}{#2}{#3}{#5}{#6#8#7}{#7}{#9}% + }% + }% + \else + \yybreak{% + \expandafter\ldintegerspacingmod@n@\expandafter{\number\decrementmod{#4}{#2}}{#1}{#2}{#3}{#5}{#6#8}{#7}{#9}% + }% + \yycontinue +} + +\def\ldintegerspacingmod@n@#1#2#3#4#5#6#7#8{% + \ldintegerspacingmodone{#2}{#3}{#4}{#1}{#5}{#6}{#7}{#8}% +} + +\def\showiteratorresults#1#2#3#4#5#6{% + \errmessage{mod #1: #3, mod #2: #4, digits: #5, last digit: #6}% +} + +\def\showseparatorresults#1#2#3#4#5#6#7{% + \errmessage{digits after separation: \yystringempty{#1}{}{[#1]}#6.#7}% +} + +\def\ldseparatemodone#1#2#3#4#5#6#7{% + \expandafter\yystringempty\expandafter{\romannumeral-"0#7}{% + \ldintegerspacingmodone{#1{}}{#2}{#3}{#4}{#5}{}{\ldintsep}{}#6#7\end + }{% + \ldseparatewithsuffix{#1}{#2}{#3}{#4}{#5}{#6}{#7}% + }% +} + +\def\ldseparatewithsuffix#1#2#3#4#5#6#7{% + \ifcase\csname ldsuffix#7\endcsname\space + \expandafter\ldseparatewiths@ffix\expandafter{\number\decrementmod{#4}{#2}}{#1}{#2}{#3}{#5}{#6}{#7}% + \or + \expandafter\ldseparatewiths@ffix\expandafter{\number\decrementmod{#5}{#3}}{#1}{#3}{#2}{#4}{#6}{#7}% + \else + \fi +} + +\def\ldseparatewiths@ffix#1#2#3#4#5#6#7{% + \ldintegerspacingmodone{#2{#7}}{#3}{#4}{#1}{#5}{}{\ldintsep}{}#6\end +} + +\def\ldsuffixD{0} +\def\ldsuffixO{0} +\def\ldsuffixB{1} +\def\ldsuffixH{1} +\def\ldsuffixX{1} +% +\def\ldradixD{} +\def\ldradixO{8} +\def\ldradixB{01} +\def\ldradixH{16} +\def\ldradixX{16} +% +\def\ldsuffixK{0} +\def\ldsuffixM{0} +\def\ldsuffixG{0} + +\let\ldintsep\relax + +\def\intprefix#1{% analyzing the prefix of an integer + \intpr@fix#1..\end +} + +\def\intpr@fix#1#2#3\end{% + \if#10% + \if#2X% prefix 0X + 1% hex number + \else + \if#2.% + 4% zero + \else + 3% octal number + \fi + \fi + \else + \if#1$% + 2% hex number + \else + 0% decimal number (no prefix) + \fi + \fi +} + +\def\ldsciinteger#1{% + \ifcase\intprefix{#1} + % decimal number (no prefix) + \lddecsplitws{#1}{}% + \or % hex number (0X) + \expandafter\ldhexsplitws\expandafter{\eattwo#1}{16}% + \or % hex number ($) + \expandafter\ldhexsplitws\expandafter{\eatone#1}{16}% + \or % octal number + \lddecsplitws{#1}{8}% + \or % zero + 0% + \else + \fi +} + +\def\ldhexsplitws#1#2{% + \ldintiterator{\ldseparatemodone{\displayinteger{#2}}}{4}{4}{0}{0}{}{}#1\end +} + +\def\lddecsplitws#1#2{% + \ldintiterator{\ldseparatemodone{\displayinteger{#2}}}{3}{3}{0}{0}{}{}#1\end +} + +\def\ldbasedinteger#1{% + \ldintiterator{\ldseparatewithsuffix{\displayintegerws}}{3}{4}{0}{0}{}{}#1\end +} + +\def\displayinteger#1#2#3#4#5#6#7#8{% + {\def\ldintsep{$\,$}\hbox{\tt#7#8${}_{#1}\yystringempty{#2}{}{\,\hbox{\tt#2}}$}}% +} + +\def\displayintegerws#1#2#3#4#5#6#7{% + {\def\ldintsep{$\,$}\hbox{\tt#6#7${}_{\yystringempty{#1}{}{\csname ldradix#1\endcsname}}$}}% +} + +% typeseting examples in text + +\def\beginldprod{% +% \par + \begingroup + \b@ginldprod +} + +\long\def\b@ginldprod#1\endprod{% + \setldproduction{#1}% + \endgroup +% \par +} + +\long\def\setldproduction#1{% + \def\termidxrank{5}% + \def\headeridxrank{4}% + \def\defidxrank{3}% + \def\texcsidxrank{5}% + \ninepoint + \let\returnexplicitspace\splitexplicitspace + \let\acharswitch\texcharadjust + \let\onecharswitch\texcsadjust + \let\yyinputgroup\yyinputldgroup + \toldparser + \ldparserinit + \yyparse#1\yyeof\yyeof\endparseinput\endparse + \ifyyparsefail % revert to generic macros if parsing failed + \yybreak{\toks0{#1}\errmessage{failed to parse: \the\toks0}}% + \else % Stage three, process the parsed table + \yybreak{% + {% + \restorecslist{ld-display}\ldunion + \the\ldcmds +% \par + \the\lddisplay + }% + }% + \yycontinue +} + +% to make it possible to write {...} without changes + +\def\yyinputldgroup#1{% + \yyinput\{#1\}% +} + diff --git a/support/splint/examples/ld/ldlex.w b/support/splint/examples/ld/ldlex.w new file mode 100644 index 0000000000..9d1c256e62 --- /dev/null +++ b/support/splint/examples/ld/ldlex.w @@ -0,0 +1,550 @@ +@q Copyright 2012-2015 Alexander Shibakov@> +@q Copyright 2002-2014 Free Software Foundation, Inc.@> +@q This file is part of SPLinT@> + +@q SPLinT is free software: you can redistribute it and/or modify@> +@q it under the terms of the GNU General Public License as published by@> +@q the Free Software Foundation, either version 3 of the License, or@> +@q (at your option) any later version.@> + +@q SPLinT is distributed in the hope that it will be useful,@> +@q but WITHOUT ANY WARRANTY; without even the implied warranty of@> +@q MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the@> +@q GNU General Public License for more details.@> + +@q You should have received a copy of the GNU General Public License@> +@q along with SPLinT. If not, see .@> + +@** The lexer. +\ifx\parsernamespace\UNDEFINED + \input ldman.sty + \input limbo.sty + \input dcols.sty + \setupfootnotes + \def\MRI{} + \def\ld{} +\fi +The lexer used by \ld\ is almost straightforward. There are a few +facilities (\Cee\ header files, some output functions) needed by the +lexer that are conviniently coded into the \Cee\ code run by the +driver routines that make the lexer more complex than it should have +been but the function of each such facility can be easily clarified +using this documentation and occasionally referring to the manual for +the \bison\ parser which is part of this distribution. +@(ldl.ll@>= +@G + @> @<\ld\ lexer definitions@> @= +%{@> @<\ld\ lexer \Cee\ preamble@> @=%} + @> @<\ld\ lexer options@> @= +%% + @> @<\ld\ token regular expressions@> @= +%% +@O +void define_all_states( void ) { + @@; +} +@o +@g + +@ @<\ld\ lexer options@>= +@G +%option bison-bridge +%option noyywrap nounput noinput reentrant +%option noyy_top_state +%option debug +%option stack +%option outfile="ldl.c" +@g + +@ @<\ld\ lexer \Cee\ preamble@>= +#include +#include + +@ @= +#define _register_name( name ) @[Define_State( #name, name )@] +#include "ldl_states.h" +#undef _register_name + +@ \yyskipparsetrue The character classes used by the scanner as well as +lexer state declarations have been put in the definitions section of +the input file. No attempt has been made to clean up the definitions +of the character classes. +@<\ld\ lexer definitions@>= +@<\ld\ lexer states@>@; +@G +CMDFILENAMECHAR [_a-zA-Z0-9\/\.\\_\+\$\:\[\]\\\,\=\&\!\<\>\-\~] +CMDFILENAMECHAR1 [_a-zA-Z0-9\/\.\\_\+\$\:\[\]\\\,\=\&\!\<\>\~] +FILENAMECHAR1 [_a-zA-Z\/\.\\\$\_\~] +SYMBOLCHARN [_a-zA-Z\/\.\\\$\_\~0-9] +FILENAMECHAR [_a-zA-Z0-9\/\.\-\_\+\=\$\:\[\]\\\,\~] +WILDCHAR [_a-zA-Z0-9\/\.\-\_\+\=\$\:\[\]\\\,\~\?\*\^\!] +WHITE [ \t\n\r]+ +NOCFILENAMECHAR [_a-zA-Z0-9\/\.\-\_\+\$\:\[\]\\\~] +V_TAG [.$_a-zA-Z][._a-zA-Z0-9]* +V_IDENTIFIER [*?.$_a-zA-Z\[\]\-\!\^\\]([*?.$_a-zA-Z0-9\[\]\-\!\^\\]|::)* +@g + +@ The lexer uses different sets of rules depending on the context and the current state. +These can be changed from the lexer itself or externally by the parser +(as is the case in \ld\ +implementation). \locallink{stateswitchers}Later\endlink, a number of +helper macros implement state switching so that the state names are +very rarely used explicitly. Keeping all the state declarations in the +same section simplifies the job of the +\locallink{bootstrapstates}bootstrap parser\endlink, as well. +\ifbootstrapmode\immediate\openout\stlist=ldl_states.h\fi +\yyskipparsefalse +@<\ld\ lexer states@>= +@G +%s SCRIPT +%s EXPRESSION +%s BOTH +%s DEFSYMEXP +%s MRI +%s VERS_START +%s VERS_SCRIPT +%s VERS_NODE +@g + +@*1 Macros for lexer functions. +The \locallink{pingpong}state switching\endlink\ `ping-pong' between the lexer and the parser aside, +the \ld\ lexer is very traditional. One implementation choice +deserving some attenion is the treatment of comments by the lexer. The +difficulty of implementing \Cee\ style comment lexing using regular +expressions is well-known so an often used alternative is a +special function that simply skips to the end of the comment. This is +exactly what the \ld\ lexer does with an aptly named |comment()| +function. The typesetting parser uses the \.{\\ldcomment} macro for +the same purpose. For the curious, here is a \flex\ style regular +expression defining \Cee\ comments\footnote{Taken from W.~McKeeman's site +at +\url{http://www.cs.dartmouth.edu/~mckeeman/cs118/assignments/comment.html} +and adopted to \flex\ syntax.}: +$$ +\hbox{\.{"/*" ("/"\yl[\^*/]\yl"*"+[\^*/])* "*"+ "/"}} +$$ +This expression does not handle {\it every\/} practical situation, +however, since it assumes that the end of line character can be +matched like any other. Neither does it detect some often made +mistakes such as attempting to nest comments. A few minor +modifications can fix this deficiency, as well as add some error +handling, however, for the sake of consistency, the approach taken +here mirrors the one in the original \ld. + +The top level of the \.{\\ldcomment} macro simply bypasses the state +setup of the lexer and enters a `|while| loop' in the input +routine. This macro is a reasonable approximation of the functionality +provided by |comment()|. +@= +@G +\def\ldcomment{% + \let\oldyyreturn\yyreturn + \let\oldyylextail\yylextail + \let\yylextail\yymatch %/* start inputting characters until {\tt *}{\tt /} is seen */ + \let\yyreturn\ldcommentskipchars +} +@g + +@ The rest of the |while| loop merely waits for the \.{*/} combination. +@= +@G +\def\ldcommentskipchars{% + \ifnum\yycp@@=`* + \yybreak{\let\yyreturn\ldcommentseekslash\yyinput}% + %/* {\tt *} found, look for {\tt /} */ + \else + \yybreak{\yyinput}% %/* keep skipping characters */ + \yycontinue +}% + +\def\ldcommentseekslash{% + \ifnum\yycp@@=`/ + \yybreak{\ldcommentfinish}%/* {\tt /} found, exit */ + \else + \ifnum\yycp@@=`* + \yybreak@@{\yyinput}% %/* keep skipping {\tt *}'s looking for a {\tt /} */ + \else + \yybreak@@{\let\yyreturn\ldcommentskipchars\yyinput}% + %/* found a character other than {\tt *} or {\tt /} */ + \fi + \yycontinue +}% +@g + +@ Once the end of the comment has been found, resume lexing the input +stream. +@= +@G +\def\ldcommentfinish{% + \let\yyreturn\oldyyreturn + \let\yylextail\oldyylextail + \yylextail +} +@g + +@ The semantics of the macros defined above do not quite match that +of the |comment()| function. The most significant difference is that +the portion of the action following \.{\\ldcomment} expands {\it +before\/} the comment characters are skipped. In most applications, +|comment()| is the last function called so this would not limit the use +of \.{\\ldcomment} too dramatically. + +A more intuitive and easier to use version of \.{\\ldcomment} is +possible, however, if \.{\\yylextail} is not used inside actions (in the case of +an `optimized' lexer the restriction is even weaker, namely, +\.{\\yylextail} merely has to be absent in the portion of the action +following \.{\\ldcomment}). +@= +@G +\def\ldcomment#1\yylextail{% + \let\oldyyreturn\yyreturn + \def\yylexcontinuation{#1\yylextail}% + \let\yyreturn\ldcommentskipchars %/* start inputting characters until {\tt *}{\tt /} is seen */ + \yymatch +} + +\def\ldcommentfinish{% + \let\yyreturn\oldyyreturn + \yylexcontinuation +} +@g + +@ \namedspot{pretendbufferswlex}The same idea can be applied to +`\locallink{pretendbuffersw}pretend buffer switching\endlink'. Whenever +the `real' \ld\ parser encounters an \prodstyle{INCLUDE} command, it +switches the input buffer for the lexer and waits for the lexer to +return the tokens from the file it just opened. When the lexer scans +the end of the included file, it returns a special token, \prodstyle{END} that +completes the appropriate production and lets the parser continue with +its job. + +We would like to simulate the file inclusion by inserting the +appropriate end of file marker for the lexer (a double +\.{\\yyeof}). After the relevant production completes, the marker +has to be cleaned up from the input stream (the lexer is designed to +leave it intact). The macros below are designed to handle this assignment. +@= +@G +\def\ldcleanyyeof#1\yylextail{% + \let\oldyyinput\yyinput + \def\yyinput\yyeof\yyeof{\let\yyinput\oldyyinput#1\yylextail}% + \yymatch +} +@g + +@*1 Regular expressions. +The `heart' of any lexer is the collection of regular expressions that +describe the {\it tokens\/} of the appropriate language. The variey of +tokens recognized by \ld\ is quite extensive and is described in the +sections that follow. + +Variable names and algebraic operations come first. +@<\ld\ token regular expressions@>= +@G +"/*" {@> @[TeX_( "/ldcomment/yylexnext" );@]@=} +"-" {@> @[TeX_( "/yylexreturnchar" );@]@=} +"+" {@> @[TeX_( "/yylexreturnchar" );@]@=} +{FILENAMECHAR1}{SYMBOLCHARN}* {@> @[TeX_( "/yylexreturnval{NAME}" );@]@=} +"=" {@> @[TeX_( "/yylexreturnchar" );@]@=} +"$"([0-9A-Fa-f])+ {@> @ @=} +([0-9A-Fa-f])+(H|h|X|x|B|b|O|o|D|d) {@> @@=} +((("$"|0[xX])([0-9A-Fa-f])+)|(([0-9])+))(M|K|m|k)? { + @> @@=} +"]" {@> @[TeX_( "/yylexreturnchar" );@]@=} +"[" {@> @[TeX_( "/yylexreturnchar" );@]@=} +"<<=" {@> @[TeX_( "/yylexreturnptr{LSHIFTEQ}" );@]@=} +">>=" {@> @[TeX_( "/yylexreturnptr{RSHIFTEQ}" );@]@=} +"||" {@> @[TeX_( "/yylexreturnptr{OROR}" );@]@=} +"==" {@> @[TeX_( "/yylexreturnptr{EQ}" );@]@=} +"!=" {@> @[TeX_( "/yylexreturnptr{NE}" );@]@=} +">=" {@> @[TeX_( "/yylexreturnptr{GE}" );@]@=} +"<=" {@> @[TeX_( "/yylexreturnptr{LE}" );@]@=} +"<<" {@> @[TeX_( "/yylexreturnptr{LSHIFT}" );@]@=} +">>" {@> @[TeX_( "/yylexreturnptr{RSHIFT}" );@]@=} +"+=" {@> @[TeX_( "/yylexreturnptr{PLUSEQ}" );@]@=} +"-=" {@> @[TeX_( "/yylexreturnptr{MINUSEQ}" );@]@=} +"*=" {@> @[TeX_( "/yylexreturnptr{MULTEQ}" );@]@=} +"/=" {@> @[TeX_( "/yylexreturnptr{DIVEQ}" );@]@=} +"&=" {@> @[TeX_( "/yylexreturnptr{ANDEQ}" );@]@=} +"|=" {@> @[TeX_( "/yylexreturnptr{OREQ}" );@]@=} +"&&" {@> @[TeX_( "/yylexreturnptr{ANDAND}" );@]@=} +">" {@> @[TeX_( "/yylexreturnchar" );@]@=} +"," {@> @[TeX_( "/yylexreturnchar" );@]@=} +"&" {@> @[TeX_( "/yylexreturnchar" );@]@=} +"|" {@> @[TeX_( "/yylexreturnchar" );@]@=} +"~" {@> @[TeX_( "/yylexreturnchar" );@]@=} +"!" {@> @[TeX_( "/yylexreturnchar" );@]@=} +"?" {@> @[TeX_( "/yylexreturnchar" );@]@=} +"*" {@> @[TeX_( "/yylexreturnchar" );@]@=} +"+" {@> @[TeX_( "/yylexreturnchar" );@]@=} +"-" {@> @[TeX_( "/yylexreturnchar" );@]@=} +"/" {@> @[TeX_( "/yylexreturnchar" );@]@=} +"%" {@> @[TeX_( "/yylexreturnchar" );@]@=} +"<" {@> @[TeX_( "/yylexreturnchar" );@]@=} +"=" {@> @[TeX_( "/yylexreturnchar" );@]@=} +"}" {@> @[TeX_( "/yylexreturnchar" );@]@=} +"{" {@> @[TeX_( "/yylexreturnchar" );@]@=} +")" {@> @[TeX_( "/yylexreturnchar" );@]@=} +"(" {@> @[TeX_( "/yylexreturnchar" );@]@=} +":" {@> @[TeX_( "/yylexreturnchar" );@]@=} +";" {@> @[TeX_( "/yylexreturnchar" );@]@=} +@g + +@ The bulk of tokens produced by the lexer are the keywords used +inside script files. File name syntax is listed as well, along with +miscellanea such as whitespace and version symbols. +@<\ld\ token regular expressions@>= +@G +"MEMORY" {@> @[TeX_( "/yylexreturnptr{MEMORY}" );@]@=} +"REGION_ALIAS" {@> @[TeX_( "/yylexreturnptr{REGION_ALIAS}" );@]@=} +"LD_FEATURE" {@> @[TeX_( "/yylexreturnptr{LD_FEATURE}" );@]@=} +"ORIGIN" {@> @[TeX_( "/yylexreturnptr{ORIGIN}" );@]@=} +"VERSION" {@> @[TeX_( "/yylexreturnptr{VERSIONK}" );@]@=} +"BLOCK" {@> @[TeX_( "/yylexreturnptr{BLOCK}" );@]@=} +"BIND" {@> @[TeX_( "/yylexreturnptr{BIND}" );@]@=} +"LENGTH" {@> @[TeX_( "/yylexreturnptr{LENGTH}" );@]@=} +"ALIGN" {@> @[TeX_( "/yylexreturnptr{ALIGN_K}" );@]@=} +"DATA_SEGMENT_ALIGN" {@> @[TeX_( "/yylexreturnptr{DATA_SEGMENT_ALIGN}" );@]@=} +"DATA_SEGMENT_RELRO_END" {@> @[TeX_( "/yylexreturnptr{DATA_SEGMENT_RELRO_END}" );@]@=} +"DATA_SEGMENT_END" {@> @[TeX_( "/yylexreturnptr{DATA_SEGMENT_END}" );@]@=} +"ADDR" {@> @[TeX_( "/yylexreturnptr{ADDR}" );@]@=} +"LOADADDR" {@> @[TeX_( "/yylexreturnptr{LOADADDR}" );@]@=} +"ALIGNOF" {@> @[TeX_( "/yylexreturnptr{ALIGNOF}" );@]@=} +"MAX" {@> @[TeX_( "/yylexreturnptr{MAX_K}" );@]@=} +"MIN" {@> @[TeX_( "/yylexreturnptr{MIN_K}" );@]@=} +"LOG2CEIL" {@> @[TeX_( "/yylexreturnptr{LOG2CEIL}" );@]@=} +"ASSERT" {@> @[TeX_( "/yylexreturnptr{ASSERT_K}" );@]@=} +"ENTRY" {@> @[TeX_( "/yylexreturnptr{ENTRY}" );@]@=} +"EXTERN" {@> @[TeX_( "/yylexreturnptr{EXTERN}" );@]@=} +"NEXT" {@> @[TeX_( "/yylexreturnptr{NEXT}" );@]@=} +"sizeof_headers" {@> @[TeX_( "/yylexreturnptr{SIZEOF_HEADERS}" );@]@=} +"SIZEOF_HEADERS" {@> @[TeX_( "/yylexreturnptr{SIZEOF_HEADERS}" );@]@=} +"SEGMENT_START" {@> @[TeX_( "/yylexreturnptr{SEGMENT_START}" );@]@=} +"MAP" {@> @[TeX_( "/yylexreturnptr{MAP}" );@]@=} +"SIZEOF" {@> @[TeX_( "/yylexreturnptr{SIZEOF}" );@]@=} +"TARGET" {@> @[TeX_( "/yylexreturnptr{TARGET_K}" );@]@=} +"SEARCH_DIR" {@> @[TeX_( "/yylexreturnptr{SEARCH_DIR}" );@]@=} +"OUTPUT" {@> @[TeX_( "/yylexreturnptr{OUTPUT}" );@]@=} +"INPUT" {@> @[TeX_( "/yylexreturnptr{INPUT}" );@]@=} +"GROUP" {@> @[TeX_( "/yylexreturnptr{GROUP}" );@]@=} +"AS_NEEDED" {@> @[TeX_( "/yylexreturnptr{AS_NEEDED}" );@]@=} +"DEFINED" {@> @[TeX_( "/yylexreturnptr{DEFINED}" );@]@=} +"CREATE_OBJECT_SYMBOLS" {@> @[TeX_( "/yylexreturnptr{CREATE_OBJECT_SYMBOLS}" );@]@=} +"CONSTRUCTORS" {@> @[TeX_( "/yylexreturnptr{CONSTRUCTORS}" );@]@=} +"FORCE_COMMON_ALLOCATION" {@> @[TeX_( "/yylexreturnptr{FORCE_COMMON_ALLOCATION}" );@]@=} +"INHIBIT_COMMON_ALLOCATION" {@> @[TeX_( "/yylexreturnptr{INHIBIT_COMMON_ALLOCATION}" );@]@=} +"SECTIONS" {@> @[TeX_( "/yylexreturnptr{SECTIONS}" );@]@=} +"INSERT" {@> @[TeX_( "/yylexreturnptr{INSERT_K}" );@]@=} +"AFTER" {@> @[TeX_( "/yylexreturnptr{AFTER}" );@]@=} +"BEFORE" {@> @[TeX_( "/yylexreturnptr{BEFORE}" );@]@=} +"FILL" {@> @[TeX_( "/yylexreturnptr{FILL}" );@]@=} +"STARTUP" {@> @[TeX_( "/yylexreturnptr{STARTUP}" );@]@=} +"OUTPUT_FORMAT" {@> @[TeX_( "/yylexreturnptr{OUTPUT_FORMAT}" );@]@=} +"OUTPUT_ARCH" {@> @[TeX_( "/yylexreturnptr{OUTPUT_ARCH}" );@]@=} +"HLL" {@> @[TeX_( "/yylexreturnptr{HLL}" );@]@=} +"SYSLIB" {@> @[TeX_( "/yylexreturnptr{SYSLIB}" );@]@=} +"FLOAT" {@> @[TeX_( "/yylexreturnptr{FLOAT}" );@]@=} +"QUAD" {@> @[TeX_( "/yylexreturnptr{QUAD}" );@]@=} +"SQUAD" {@> @[TeX_( "/yylexreturnptr{SQUAD}" );@]@=} +"LONG" {@> @[TeX_( "/yylexreturnptr{LONG}" );@]@=} +"SHORT" {@> @[TeX_( "/yylexreturnptr{SHORT}" );@]@=} +"BYTE" {@> @[TeX_( "/yylexreturnptr{BYTE}" );@]@=} +"NOFLOAT" {@> @[TeX_( "/yylexreturnptr{NOFLOAT}" );@]@=} +"NOCROSSREFS" {@> @[TeX_( "/yylexreturnptr{NOCROSSREFS}" );@]@=} +"OVERLAY" {@> @[TeX_( "/yylexreturnptr{OVERLAY}" );@]@=} +"SORT_BY_NAME" {@> @[TeX_( "/yylexreturnptr{SORT_BY_NAME}" );@]@=} +"SORT_BY_ALIGNMENT" {@> @[TeX_( "/yylexreturnptr{SORT_BY_ALIGNMENT}" );@]@=} +"SORT" {@> @[TeX_( "/yylexreturnptr{SORT_BY_NAME}" );@]@=} +"SORT_BY_INIT_PRIORITY" {@> @[TeX_( "/yylexreturnptr{SORT_BY_INIT_PRIORITY}" );@]@=} +"SORT_NONE" {@> @[TeX_( "/yylexreturnptr{SORT_NONE}" );@]@=} +"NOLOAD" {@> @[TeX_( "/yylexreturnptr{NOLOAD}" );@]@=} +"DSECT" {@> @[TeX_( "/yylexreturnptr{DSECT}" );@]@=} +"COPY" {@> @[TeX_( "/yylexreturnptr{COPY}" );@]@=} +"INFO" {@> @[TeX_( "/yylexreturnptr{INFO}" );@]@=} +"OVERLAY" {@> @[TeX_( "/yylexreturnptr{OVERLAY}" );@]@=} +"ONLY_IF_RO" {@> @[TeX_( "/yylexreturnptr{ONLY_IF_RO}" );@]@=} +"ONLY_IF_RW" {@> @[TeX_( "/yylexreturnptr{ONLY_IF_RW}" );@]@=} +"SPECIAL" {@> @[TeX_( "/yylexreturnptr{SPECIAL}" );@]@=} +"o" {@> @[TeX_( "/yylexreturnptr{ORIGIN}" );@]@=} +"org" {@> @[TeX_( "/yylexreturnptr{ORIGIN}" );@]@=} +"l" {@> @[TeX_( "/yylexreturnptr{LENGTH}" );@]@=} +"len" {@> @[TeX_( "/yylexreturnptr{LENGTH}" );@]@=} +"INPUT_SECTION_FLAGS" {@> @[TeX_( "/yylexreturnptr{INPUT_SECTION_FLAGS}" );@]@=} +"INCLUDE" {@> @[TeX_( "/yylexreturnptr{INCLUDE}" );@]@=} +"PHDRS" {@> @[TeX_( "/yylexreturnptr{PHDRS}" );@]@=} +"AT" {@> @[TeX_( "/yylexreturnptr{AT}" );@]@=} +"ALIGN_WITH_INPUT" {@> @[TeX_( "/yylexreturnptr{ALIGN_WITH_INPUT}" );@]@=} +"SUBALIGN" {@> @[TeX_( "/yylexreturnptr{SUBALIGN}" );@]@=} +"HIDDEN" {@> @[TeX_( "/yylexreturnptr{HIDDEN}" );@]@=} +"PROVIDE" {@> @[TeX_( "/yylexreturnptr{PROVIDE}" );@]@=} +"PROVIDE_HIDDEN" {@> @[TeX_( "/yylexreturnptr{PROVIDE_HIDDEN}" );@]@=} +"KEEP" {@> @[TeX_( "/yylexreturnptr{KEEP}" );@]@=} +"EXCLUDE_FILE" {@> @[TeX_( "/yylexreturnptr{EXCLUDE_FILE}" );@]@=} +"CONSTANT" {@> @[TeX_( "/yylexreturnptr{CONSTANT}" );@]@=} +"#".*\n? {@> @[TeX_( "/yylexnext" );@]@=} +"\n" {@> @[TeX_( "/yylexreturnptr{NEWLINE}" );@]@=} +"*".* {@> @[TeX_( "/yylexnext" );@]@=} +";".* {@> @[TeX_( "/yylexnext" );@]@=} +"END" {@> @[TeX_( "/yylexreturnptr{ENDWORD}" );@]@=} +"ALIGNMOD" {@> @[TeX_( "/yylexreturnptr{ALIGNMOD}" );@]@=} +"ALIGN" {@> @[TeX_( "/yylexreturnptr{ALIGN_K}" );@]@=} +"CHIP" {@> @[TeX_( "/yylexreturnptr{CHIP}" );@]@=} +"BASE" {@> @[TeX_( "/yylexreturnptr{BASE}" );@]@=} +"ALIAS" {@> @[TeX_( "/yylexreturnptr{ALIAS}" );@]@=} +"TRUNCATE" {@> @[TeX_( "/yylexreturnptr{TRUNCATE}" );@]@=} +"LOAD" {@> @[TeX_( "/yylexreturnptr{LOAD}" );@]@=} +"PUBLIC" {@> @[TeX_( "/yylexreturnptr{PUBLIC}" );@]@=} +"ORDER" {@> @[TeX_( "/yylexreturnptr{ORDER}" );@]@=} +"NAME" {@> @[TeX_( "/yylexreturnptr{NAMEWORD}" );@]@=} +"FORMAT" {@> @[TeX_( "/yylexreturnptr{FORMAT}" );@]@=} +"CASE" {@> @[TeX_( "/yylexreturnptr{CASE}" );@]@=} +"START" {@> @[TeX_( "/yylexreturnptr{START}" );@]@=} +"LIST".* {@> @[TeX_( "/yylexreturnptr{LIST}" );@]@=} +"SECT" {@> @[TeX_( "/yylexreturnptr{SECT}" );@]@=} +"ABSOLUTE" {@> @[TeX_( "/yylexreturnptr{ABSOLUTE}" );@]@=} +"end" {@> @[TeX_( "/yylexreturnptr{ENDWORD}" );@]@=} +"alignmod" {@> @[TeX_( "/yylexreturnptr{ALIGNMOD}" );@]@=} +"align" {@> @[TeX_( "/yylexreturnptr{ALIGN_K}" );@]@=} +"chip" {@> @[TeX_( "/yylexreturnptr{CHIP}" );@]@=} +"base" {@> @[TeX_( "/yylexreturnptr{BASE}" );@]@=} +"alias" {@> @[TeX_( "/yylexreturnptr{ALIAS}" );@]@=} +"truncate" {@> @[TeX_( "/yylexreturnptr{TRUNCATE}" );@]@=} +"load" {@> @[TeX_( "/yylexreturnptr{LOAD}" );@]@=} +"public" {@> @[TeX_( "/yylexreturnptr{PUBLIC}" );@]@=} +"order" {@> @[TeX_( "/yylexreturnptr{ORDER}" );@]@=} +"name" {@> @[TeX_( "/yylexreturnptr{NAMEWORD}" );@]@=} +"format" {@> @[TeX_( "/yylexreturnptr{FORMAT}" );@]@=} +"case" {@> @[TeX_( "/yylexreturnptr{CASE}" );@]@=} +"extern" {@> @[TeX_( "/yylexreturnptr{EXTERN}" );@]@=} +"start" {@> @[TeX_( "/yylexreturnptr{START}" );@]@=} +"list".* {@> @[TeX_( "/yylexreturnptr{LIST}" );@]@=} +"sect" {@> @[TeX_( "/yylexreturnptr{SECT}" );@]@=} +"absolute" {@> @[TeX_( "/yylexreturnptr{ABSOLUTE}" );@]@=} +{FILENAMECHAR1}{NOCFILENAMECHAR}* {@> @[TeX_( "/yylexreturnval{NAME}" );@]@=} +{FILENAMECHAR1}{FILENAMECHAR}* {@> @[TeX_( "/yylexreturnval{NAME}" );@]@=} +"-l"{FILENAMECHAR}+ {@> @[TeX_( "/yylexreturnval{NAME}" );@]@=} +{FILENAMECHAR1}{NOCFILENAMECHAR}* {@> @[TeX_( "/yylexreturnval{NAME}" );@]@=} +"-l"{NOCFILENAMECHAR}+ {@> @[TeX_( "/yylexreturnval{NAME}" );@]@=} +