summaryrefslogtreecommitdiff
path: root/web/noweb/contrib
diff options
context:
space:
mode:
authorNorbert Preining <norbert@preining.info>2019-09-02 13:46:59 +0900
committerNorbert Preining <norbert@preining.info>2019-09-02 13:46:59 +0900
commite0c6872cf40896c7be36b11dcc744620f10adf1d (patch)
tree60335e10d2f4354b0674ec22d7b53f0f8abee672 /web/noweb/contrib
Initial commit
Diffstat (limited to 'web/noweb/contrib')
-rw-r--r--web/noweb/contrib/Makefile13
-rw-r--r--web/noweb/contrib/README9
-rw-r--r--web/noweb/contrib/avs/email2
-rw-r--r--web/noweb/contrib/avs/filelist.txt59
-rw-r--r--web/noweb/contrib/avs/ftpsites.txt65
-rw-r--r--web/noweb/contrib/avs/generate.ksh114
-rw-r--r--web/noweb/contrib/avs/howto386.txt254
-rw-r--r--web/noweb/contrib/avs/icon.1330
-rw-r--r--web/noweb/contrib/avs/jrtex12a.avs1054
-rw-r--r--web/noweb/contrib/avs/make_ico.awk50
-rw-r--r--web/noweb/contrib/avs/make_lib.awk12
-rw-r--r--web/noweb/contrib/avs/make_src.awk72
-rw-r--r--web/noweb/contrib/avs/make_xdo.awk14
-rw-r--r--web/noweb/contrib/avs/mks42bug.0d128
-rw-r--r--web/noweb/contrib/avs/mksfixes.ksh15
-rw-r--r--web/noweb/contrib/avs/myenv.ksh38
-rw-r--r--web/noweb/contrib/avs/norman1.txt136
-rw-r--r--web/noweb/contrib/avs/nw_c.bat63
-rw-r--r--web/noweb/contrib/avs/nwicon.bat43
-rw-r--r--web/noweb/contrib/avs/nwinst.ksh9
-rw-r--r--web/noweb/contrib/avs/readme39
-rw-r--r--web/noweb/contrib/avs/report1.bug97
-rw-r--r--web/noweb/contrib/conrado/Makefile15
-rw-r--r--web/noweb/contrib/conrado/README38
-rw-r--r--web/noweb/contrib/conrado/algoritmos.sty169
-rwxr-xr-xweb/noweb/contrib/conrado/d2tex144
-rw-r--r--web/noweb/contrib/conrado/email1
-rw-r--r--web/noweb/contrib/conrado/hospital.nw165
-rw-r--r--web/noweb/contrib/conrado/keywords.tex59
-rw-r--r--web/noweb/contrib/davelove/Makefile6
-rw-r--r--web/noweb/contrib/davelove/README2
-rw-r--r--web/noweb/contrib/davelove/email1
-rw-r--r--web/noweb/contrib/davelove/subref.doc235
-rw-r--r--web/noweb/contrib/fischer/README40
-rwxr-xr-xweb/noweb/contrib/fischer/noscript-0.1/noscript15
-rw-r--r--web/noweb/contrib/fischer/noscript-0.1/test-none.nw8
-rw-r--r--web/noweb/contrib/fischer/noscript-0.1/test-py.nw23
-rw-r--r--web/noweb/contrib/fischer/noscript-0.1/test-sh.nw10
-rw-r--r--web/noweb/contrib/gregory/README2
-rw-r--r--web/noweb/contrib/gregory/dots.nw154
-rw-r--r--web/noweb/contrib/gregory/email1
-rw-r--r--web/noweb/contrib/jobling/Makefile34
-rw-r--r--web/noweb/contrib/jobling/README21
-rw-r--r--web/noweb/contrib/jobling/correct-refs.bbl36
-rw-r--r--web/noweb/contrib/jobling/correct-refs.nw391
-rw-r--r--web/noweb/contrib/jobling/email1
-rw-r--r--web/noweb/contrib/jonkrom/Makefile15
-rw-r--r--web/noweb/contrib/jonkrom/README4
-rw-r--r--web/noweb/contrib/jonkrom/email1
-rw-r--r--web/noweb/contrib/jonkrom/noxref.nw491
-rw-r--r--web/noweb/contrib/kaelin/README19
-rw-r--r--web/noweb/contrib/kaelin/email1
-rw-r--r--web/noweb/contrib/kaelin/pp.nw616
-rw-r--r--web/noweb/contrib/kostas/C++_translation_table64
-rw-r--r--web/noweb/contrib/kostas/C_translation_table55
-rw-r--r--web/noweb/contrib/kostas/Makefile75
-rw-r--r--web/noweb/contrib/kostas/Makefile.gnu75
-rw-r--r--web/noweb/contrib/kostas/Makefile.make75
-rw-r--r--web/noweb/contrib/kostas/README14
-rw-r--r--web/noweb/contrib/kostas/WHATS_NEW46
-rw-r--r--web/noweb/contrib/kostas/defns.nw33
-rw-r--r--web/noweb/contrib/kostas/email1
-rw-r--r--web/noweb/contrib/kostas/icon_translation_table140
-rw-r--r--web/noweb/contrib/kostas/math_translation_table68
-rw-r--r--web/noweb/contrib/kostas/mathdefs.nw24
-rw-r--r--web/noweb/contrib/kostas/oot_translation_table137
-rw-r--r--web/noweb/contrib/kostas/ootdefs.nw198
-rw-r--r--web/noweb/contrib/kostas/pp.nw541
-rw-r--r--web/noweb/contrib/leew/Makefile6
-rw-r--r--web/noweb/contrib/leew/README12
-rw-r--r--web/noweb/contrib/leew/custom-code/README.custom-code17
-rwxr-xr-xweb/noweb/contrib/leew/custom-code/custom-code12
-rw-r--r--web/noweb/contrib/leew/custom-code/example.nw101
-rw-r--r--web/noweb/contrib/leew/custom-code/example.pdfbin0 -> 46036 bytes
-rw-r--r--web/noweb/contrib/leew/custom-code/example.tex105
-rw-r--r--web/noweb/contrib/leew/custom-code/n20
-rw-r--r--web/noweb/contrib/leew/email2
-rw-r--r--web/noweb/contrib/leew/nobrace.nw321
-rw-r--r--web/noweb/contrib/leew/nocond.nw375
-rw-r--r--web/noweb/contrib/leew/strhack.nw32
-rw-r--r--web/noweb/contrib/leyn/README23
-rw-r--r--web/noweb/contrib/leyn/email1
-rwxr-xr-xweb/noweb/contrib/leyn/notangleall27
-rwxr-xr-xweb/noweb/contrib/leyn/ttroots41
-rw-r--r--web/noweb/contrib/norman/Makefile10
-rw-r--r--web/noweb/contrib/norman/README6
-rw-r--r--web/noweb/contrib/norman/cleanchunks.nw42
-rw-r--r--web/noweb/contrib/norman/email1
-rwxr-xr-xweb/noweb/contrib/norman/generate-to24
-rw-r--r--web/noweb/contrib/norman/htmlgif/htmlgif.icn54
-rw-r--r--web/noweb/contrib/norman/htmlgif/newer.c19
-rwxr-xr-xweb/noweb/contrib/norman/htmlgif/pstopbm86
-rw-r--r--web/noweb/contrib/norman/moddate.nw79
-rw-r--r--web/noweb/contrib/norman/numarkup/Makefile37
-rw-r--r--web/noweb/contrib/norman/numarkup/numarkup.bbl58
-rw-r--r--web/noweb/contrib/norman/numarkup/numarkup.nw1264
-rw-r--r--web/noweb/contrib/norman/pp/mkfile24
-rw-r--r--web/noweb/contrib/norman/pp/pp.nw314
-rw-r--r--web/noweb/contrib/norman/scopehack.icn44
-rw-r--r--web/noweb/contrib/partingr/README148
-rwxr-xr-xweb/noweb/contrib/partingr/TeXthings52
-rw-r--r--web/noweb/contrib/partingr/addscore.nw183
-rw-r--r--web/noweb/contrib/partingr/email2
-rwxr-xr-xweb/noweb/contrib/partingr/mm2mx63160
-rwxr-xr-xweb/noweb/contrib/partingr/mm2mx64173
-rwxr-xr-xweb/noweb/contrib/partingr/mm2mx65179
-rwxr-xr-xweb/noweb/contrib/partingr/mm2tex41
-rwxr-xr-xweb/noweb/contrib/partingr/mx2tex31130
-rw-r--r--web/noweb/contrib/partingr/nwindex.tex134
-rwxr-xr-xweb/noweb/contrib/partingr/nwnweave2
-rwxr-xr-xweb/noweb/contrib/partingr/nwtangle2
-rwxr-xr-xweb/noweb/contrib/partingr/nwweave2
-rwxr-xr-xweb/noweb/contrib/partingr/xpand20
-rw-r--r--web/noweb/contrib/rsc/README1
-rw-r--r--web/noweb/contrib/rsc/email1
-rw-r--r--web/noweb/contrib/rsc/rc/cpif.nw47
-rw-r--r--web/noweb/contrib/rsc/rc/emptydefn.nw10
-rw-r--r--web/noweb/contrib/rsc/rc/mkfile28
-rw-r--r--web/noweb/contrib/rsc/rc/noidx.nw432
-rw-r--r--web/noweb/contrib/rsc/rc/noindex.nw194
-rw-r--r--web/noweb/contrib/rsc/rc/noroots.nw16
-rw-r--r--web/noweb/contrib/rsc/rc/notangle.nw51
-rw-r--r--web/noweb/contrib/rsc/rc/nountangle.nw93
-rw-r--r--web/noweb/contrib/rsc/rc/noweave.nw594
-rw-r--r--web/noweb/contrib/rsc/rc/noweave.simple56
-rw-r--r--web/noweb/contrib/rsc/rc/noweb.nw63
-rw-r--r--web/noweb/contrib/rsc/rc/toascii.nw279
-rw-r--r--web/noweb/contrib/rsc/rc/tohtml.nw362
-rw-r--r--web/noweb/contrib/rsc/rc/totex.nw312
-rw-r--r--web/noweb/contrib/rsc/rc/unmarkup.nw53
-rw-r--r--web/noweb/contrib/ydirson/Makefile11
-rw-r--r--web/noweb/contrib/ydirson/README32
-rw-r--r--web/noweb/contrib/ydirson/email1
-rwxr-xr-xweb/noweb/contrib/ydirson/enscript-html150
-rwxr-xr-xweb/noweb/contrib/ydirson/guesslang57
-rwxr-xr-xweb/noweb/contrib/ydirson/inheritlang76
136 files changed, 14184 insertions, 0 deletions
diff --git a/web/noweb/contrib/Makefile b/web/noweb/contrib/Makefile
new file mode 100644
index 0000000000..0e048da8ea
--- /dev/null
+++ b/web/noweb/contrib/Makefile
@@ -0,0 +1,13 @@
+SHELL=/bin/sh
+LIB=/dev/null # to be overridden
+ICONC=icont # to be overridden
+DIRS=davelove jonkrom leew norman
+
+# don't do kostas; it requires gnu make (ugh)
+
+all: ; for i in $(DIRS); do (cd $$i; make ICONC=$(ICONC) all); done
+install: ; for i in $(DIRS); do (cd $$i; make LIB=$(LIB) BIN=$(BIN) install); done
+source: ; for i in $(DIRS); do (cd $$i; make source); done
+clean: ; for i in $(DIRS); do (cd $$i; make clean); done
+clobber: clean
+
diff --git a/web/noweb/contrib/README b/web/noweb/contrib/README
new file mode 100644
index 0000000000..7b5d7c97ed
--- /dev/null
+++ b/web/noweb/contrib/README
@@ -0,0 +1,9 @@
+These directories contain software that was contributed by users of
+noweb. Each directory contains a file called `email' that has the
+email address of the contributor. I don't pretend to have copyright
+rights, and I don't provide any warranty or any support. If you find
+something useful here, we're both pleased.
+
+Some contributed software contains binaries or is too big to be
+included in the standard noweb distribution. In such cases, the
+directories here may contain only pointers to anonymous ftp sites.
diff --git a/web/noweb/contrib/avs/email b/web/noweb/contrib/avs/email
new file mode 100644
index 0000000000..9c280ceffc
--- /dev/null
+++ b/web/noweb/contrib/avs/email
@@ -0,0 +1,2 @@
+avs@daimi.aau.dk (Alexandre Valente Sousa)
+If this doesn't work (in a couple of years it won't) try avs@monet.inescn.pt
diff --git a/web/noweb/contrib/avs/filelist.txt b/web/noweb/contrib/avs/filelist.txt
new file mode 100644
index 0000000000..73ee43dd75
--- /dev/null
+++ b/web/noweb/contrib/avs/filelist.txt
@@ -0,0 +1,59 @@
+File list for noweb/contrib/avs
+This is not in Noweb 2.7a but it has been proposed to be in the contrib dir
+Thus these files are from ftp.daimi.aau.dk:/pub/empl/avs/avs386_noweb27a.tar.gz
+
+Although the target is Dos I use .tar.gz format instead of .arj or .zip because
+anyhow you need DJGPP (which has gzip) and MKS (which has tar) to use this. For
+your convenience just in case (i.e. you only have MKS yet and will be using
+my instructions to get DJGPP) I supply the file
+ftp.daimi.aau.dk:/pub/empl/avs/gzip386.exe' (you should rename it to gzip.exe)
+
+ CHANGE THIS:
+myenv.ksh --> edit this file for your environment (Korn shell script)
+
+ DOCUMENTATION:
+readme --> brief description
+email --> my contact info
+norman1.txt --> mail sent to Norman Ramsey describing this Dos HOWTO
+filelist.txt --> this file
+howto386.txt --> documentation/troubleshooting/explanation/Dos recipe
+ftpsites.txt --> where to get additional software that might be required
+
+ ADDITIONAL DOCUMENTATION:
+icon.1 --> NROFF processed man file for Icon (because we are using it)
+jrtex12a.avs --> excellent J. Refling's PC386 LaTeX2e HOWTO annotated by me
+mks42bug.0d --> text file with my personal list of MKS 4.2 bugs
+report1.bug --> bug report for noweb 2.7a
+
+ BUILD & INSTALL SCRIPTS (used by myenv.ksh):
+mksfixes.ksh --> fix noweb.ksh and cpif.ksh for the MKS Toolkit 4.2
+generate.ksh --> generates 'automate.bat' (the build/install script)
+nwicon.bat --> Make Icon code (the '\\\\' hack)
+nw_c.bat --> Patch DJGPP bug & Make C code & avoid out of memory errors
+nwinst.ksh --> install Noweb (just to separate install from build)
+make_ico.awk --> patches noweb/src/icon/makefile for Ms-Dos
+make_src.awk --> patches noweb/src/makefile for Ms-Dos
+make_xdo.awk --> patches noweb/src/xdoc/makefile for Ms-Dos
+make_lib.awk --> patches noweb/src/lib/makefile for Ms-Dos
+
+ 2 17 122 email
+ 59 353 2735 filelist.txt
+ 63 412 2978 ftpsites.txt
+ 114 687 4883 generate.ksh
+ 254 2270 13918 howto386.txt
+ 330 1243 10462 icon.1
+ 1054 5692 43470 jrtex12a.avs
+ 50 297 1694 make_ico.awk
+ 12 80 498 make_lib.awk
+ 72 509 2832 make_src.awk
+ 14 104 593 make_xdo.awk
+ 128 1219 7347 mks42bug.0d
+ 14 110 714 mksfixes.ksh
+ 38 321 1923 myenv.ksh
+ 136 1409 8168 norman1.txt
+ 62 288 2077 nw_c.bat
+ 43 201 1411 nwicon.bat
+ 9 33 253 nwinst.ksh
+ 39 302 1807 readme
+ 97 796 4554 report1.bug
+ 2590 16343 112439 total
diff --git a/web/noweb/contrib/avs/ftpsites.txt b/web/noweb/contrib/avs/ftpsites.txt
new file mode 100644
index 0000000000..8abcbd4bdc
--- /dev/null
+++ b/web/noweb/contrib/avs/ftpsites.txt
@@ -0,0 +1,65 @@
+NOTE: The information below is from May 30, 1995. It is likely to be
+obsolete. And to appease the maintainers of CTAN, references to ftp
+dot tex dot ac dot uk have been removed.
+----------------------------------------------------------------
+These are the anonymous ftp sites that I prefer (I am in Europe and these are
+the fastest sites for me)
+
+Noweb 2.7a (only in this site in 29-May-95)
+ftp.shsu.edu:/pub/tex/web
+
+Noweb 2.7
+(No longer available. Try https://github.com/nrnrnr/noweb, tag v2_7,
+or better yet, a version that is up to date.)
+
+How to build Noweb 2.7a for Dos + PC386 + MKS toolkit
+ftp.daimi.aau.dk:/pub/empl/avs/avs386_noweb27a.tar.gz
+(my site, just for completeness, you might also want to look there for a newer
+version in case some bug in my scripts has been reported and fixed)
+
+emTeX 386 3.1415 [3c-beta12], LaTeX2e (1-Dec-94 patch level 1), Dvips 5.54
+(several ftp sites must be used, see the file 'jrtex12a.avs')
+
+Chicago LaTeX package (required by noweb/src/xdoc/guide.tex)
+CTAN
+Assuming that you followed the instructions from the file 'jrtex12a.avs' put
+the 3 files (chicago.bst, chicago.sty, chicagoa.bst) in
+?:/emtex/texinputs/local)
+
+DJGPP (GNU software Dos port for PC 386)
+ftp.funet.fi:/pub/mirrors/oak.oakland.edu/Simtel/msdos/djgpp (empty in
+21-Mar-95 due to disk crash)
+or
+omnigate.clarkson.edu:/pub/msdos/djgpp
+or
+oak.oakland.edu:/pub/msdos/djgpp
+The easiest way is to get the README's and FAQ (readme.1st, readme.dj,
+djgpp.faq), to read them, and then to get the installation program
+(install.exe, install.dat) and the remaining binaries and docs (the minimum set
+of files that you need is the first FAQ entry, you will also need the Make
+module, and if you have a 386 without a coprocessor the 80387 emulator)
+
+Binaries for Icon 9.0 for MsDos 386/486:
+cs.arizona.edu:/icon/packages/msdos/de-386.lzh
+(if you don't have lha.exe to unpack the archive you should also get it from
+there)
+
+The MKS Toolkit for Dos is commercial software (Sorry)
+Don't know which is the last version, I use 4.2 (Oct-93)
+Tel: (519) 884-2251 Mortice Kern Systems Inc.
+Fax: (519) 884-8861 35 King Street North,
+Technical Advice: (519) 884-2270 Waterloo, Ontario,
+Internet: inquiry@mks.com N2J 2W9
+CompuServe User ID: 73260,1043 CANADA
+BIX User Name: mks
+
+GhostScript 3.12
+ftp.funet.fi:/gnu/ghostscript3/aladdin
+If you don't have a PostScript printer you might want to use GhostScript which
+can take PostScript and translate it to your printer (presuming your printer is
+somehow supported). See 'jrtex12a.avs' for a list of the devices in the gs.exe,
+gs386.exe, gswin.exe, and gswin32s.exe binaries. Notice that gs.exe crawls when
+processing noweave output (after intermediate processing by latex and dvips),
+while gs386.exe is about 40 times faster (about 1 page/second in a 486
+DX2-80). This speed difference does not apply to normal PostScript code it is
+caused by something in the noweb code
diff --git a/web/noweb/contrib/avs/generate.ksh b/web/noweb/contrib/avs/generate.ksh
new file mode 100644
index 0000000000..8bf6050166
--- /dev/null
+++ b/web/noweb/contrib/avs/generate.ksh
@@ -0,0 +1,114 @@
+# do not use directly, it is better to edit the file 'myenv.ksh'
+
+if [ -e ../../contrib/avs/$0.ksh -o -e ../../contrib/avs/$0 ]
+then
+else
+ echo Wrong dir, must run \'$0\' from noweb/contrib/avs dir
+ exit 1
+fi
+
+if [ -z "$7" ]
+then
+ echo Usage: $0 BIN LIB MAN TEXINPUTS GMAKEPATH TMP ICONTRANSLATORPATH
+ echo If your environment is OK installs noweb27 in your PC386
+ echo "(icont.exe, iconx.exe, ixhdr.exe in dir e:\\b), e.g."
+ echo " $0 i:/b g:/usr/local/lib/noweb g:/man h:/emtex/texinputs/local j:/djgpp/bin/make.exe d:/tmp e:/b/icont.exe"
+ echo "(the '.exe' in make.exe and icont.exe is not necessary)"
+ exit 1
+fi
+
+cd ../..
+# now one is at ./noweb
+
+echo "Renaming src/makefile src/icon/makefile src/install src/xdoc/makefile src/lib/makefile src/awkname to *.old:"
+for f in src/makefile src/icon/makefile src/install src/xdoc/makefile src/lib/makefile src/awkname
+do
+ if [ -e $f.old ]
+ then
+ echo File \'$f.old\' already exists, skipped
+ else
+ mv $f $f.old
+ fi
+done
+echo Done!
+
+echo "Adding Dos specific makefiles and scripts for the combination MKS/DJGPP/ICONT:"
+awk -f contrib/avs/make_ico.awk src/icon/makefile.old > src/icon/makefile
+awk -f contrib/avs/make_src.awk src/makefile.old > src/makefile
+awk -f contrib/avs/make_xdo.awk src/xdoc/makefile.old > src/xdoc/makefile
+awk -f contrib/avs/make_lib.awk src/lib/makefile.old > src/lib/makefile
+cp -p contrib/avs/nw_c.bat src
+cp -p contrib/avs/nwinst.ksh src
+cp -p contrib/avs/nwicon.bat src
+echo Done!
+
+cd src
+echo "Adapting awkname for Dos and changing the awk name to 'awk' in all scripts:"
+sed "s@new=/tmp/\$\$.new; old=/tmp/\$\$.old@new=$6/\$\$.new; old=$6/\$\$.old@" <awkname.old >awkname
+# because the script has no '.ksh' extension one has to use 'awkname.'
+sh -c "./awkname. awk"
+echo Done!
+
+echo "Touch'ing all *.nw source code to avoid potential date/time problems:"
+echo "(all of it gets dummy date 12:00 23-Feb-95)"
+find . -name "*.nw" -exec touch -t 9502231200 "{}" \;
+echo Done!
+
+echo "Touch'ing all *.1 man pages to avoid potential date/time problems:"
+echo "(all of it gets dummy date 12:00 24-Feb-95)"
+find xdoc -name "*.1" -exec touch -t 9502241200 "{}" \;
+echo Done!
+
+cd ../contrib/avs
+echo "Generating 'contrib/avs/automate.bat' (to avoid out of memory errors):"
+echo "@echo off" >automate.bat
+echo "REM This file was generated by $0" >>automate.bat
+echo "cd ..\\\\..\\\\src" >> automate.bat
+
+echo "echo ***" >>automate.bat
+echo "echo *** Make icon code" >>automate.bat
+echo "echo ***" >>automate.bat
+
+# Trying to get something like:
+# call nwicon i:/b g:/usr/local/lib/noweb j:\djgpp\bin\make.exe e:\\\\b\\\\icont.exe
+# The '\c' arg to echo means not to add a \n at the end
+# Careful not to allow echo to interpret e.g. djgpp\bin as having an embedded backspace (\b)
+# 'sed' adds a \n line to its output, that's why tr was needed
+echo call nwicon $1 $2 \\c >>automate.bat
+echo $5 \\c | sed 's#/#\\#g' | tr -d '\015\012' >>automate.bat
+echo $7 | sed 's#/#\\\\\\\\#g' >>automate.bat
+
+echo if errorlevel 1 goto FAILURE >>automate.bat
+echo "echo ***" >>automate.bat
+echo "echo *** Make C code" >>automate.bat
+echo "echo ***" >>automate.bat
+echo set DJGPPMAKE=$5 >>automate.bat
+echo call nw_c $5 | sed 's@/@\\@g' >>automate.bat
+echo if errorlevel 1 goto FAILURE >>automate.bat
+echo "echo ***" >>automate.bat
+echo "echo *** Installing noweb" >>automate.bat
+echo "echo ***" >>automate.bat
+echo sh -c \"./nwinst.ksh $1 $2 $3 $4\" >>automate.bat
+echo if errorlevel 1 goto FAILURE >>automate.bat
+echo "echo ***" >>automate.bat
+echo "echo *** Fixing $1/cpif.ksh and $1/noweb.ksh as documented in 'howto386.txt'" >>automate.bat
+echo "echo ***" >>automate.bat
+echo "cd ..\\\\contrib\\\\avs" >>automate.bat
+echo sh -c \"./mksfixes.ksh $1 $6\" >>automate.bat
+echo if errorlevel 1 goto FAILURE >>automate.bat
+echo "echo Success, noweb 2.7a built & installed! Now use 'man noweb'" >>automate.bat
+echo 'echo Noweb 2.7| banner -c n | sed' "'s/[ ]$//'" >>automate.bat
+echo goto THEEND >>automate.bat
+echo :FAILURE >>automate.bat
+echo "echo Previous command failed (non 0 exit code), out of memory?" >>automate.bat
+echo echo Aborting... sorry you have to manually fix the problem >>automate.bat
+echo "echo (and after that go to noweb/contrib/avs and rerun automate.bat)" >>automate.bat
+echo :THEEND >>automate.bat
+
+echo "****"
+echo "The file 'automate.bat' has been generated, now LEAVE the Korn shell (to avoid"
+echo "out of memory errors) and run it. If 'automate.bat' fails at some point, e.g."
+echo "out of memory, then you may try to fix the problem by hand e.g. calling the C"
+echo "compiler directly by looking at the previous output from Make, and then to"
+echo "rerun 'automate'. You can run 'automate.bat' as many times as you need until"
+echo "you reach the end with success"
diff --git a/web/noweb/contrib/avs/howto386.txt b/web/noweb/contrib/avs/howto386.txt
new file mode 100644
index 0000000000..49dedea19d
--- /dev/null
+++ b/web/noweb/contrib/avs/howto386.txt
@@ -0,0 +1,254 @@
+** Recipe for building and installing noweb 2.7a in a PC386 running Dos
+** Recipe version 0.3 (30-May-95), report problems to avs@daimi.aau.dk
+
+This recipe assumes its support files are at './noweb/contrib/avs'. If they are
+not (as is the case with the noweb 2.7a distribution) then just unpack
+noweb27a.tar.gz (official distribution) and avs386_noweb27a.tar.gz (my
+contribution) from the same base dir
+
+I'm only doing a minimum patch of the noweb source files and installation
+scripts to get a successful install (binaries plus support files plus man
+pages). I do not patch things that I am not likely to need like support for
+'make clean' or changing the '.nw' files in the sources dir. Also I didn't try
+to install the contributed software from the contrib dir. After noweb is
+successfully installed just remove the distribution files from your system (it
+is better to archive it somewhere, there are some docs, examples and contribs
+that might be useful later)
+
+Bugs/problems with noweb 2.7a: see the file 'report1.bug'
+
+This recipe supports Awk but that option is not thoroughly tested. Why should I
+use Awk if the Noweb distribution explicitly says that the Awk versions of the
+tools are untested and so probably have bugs? Compiling Icon for Ms-Dos is not
+easy but one can ftp the binaries (only 512 KB)
+
+** History:
+
+0.3 recipe for noweb 2.7 (30-May-95)
+0.2 recipe for noweb 2.7 (26-Mar-95), internal use only
+0.1 recipe for noweb 2.6c (12-Dec-94), internal use only
+
+**** Software ****
+
+Requires:
+- MKS Toolkit 4.2 for Dos (older versions of MKS might not work)
+- the DJGPP port of GNU gcc, GNU make, and GNU gzip
+- version 9.0 of Icon for MsDos 386/486,
+- LaTeX (LaTeX 2.09 or LaTeX2e)
+
+LaTeX: I use emTeX 386, the installation is everything except straightforward,
+if you don't already have LaTeX in your PC the best way to get it up and
+running is by using John Refling's jrtex12a.txt ("How I installed emtex,
+latex2e, mf, dvips, on a 386 with postscript or hplaser"). I supply an
+annotated copy of that document with some corrections and some additions
+made by me in the file 'jrtex12a.avs'
+
+Because MKS does not have nroff, a nroff processed man page for Icon is
+provided here. (I also provide mks42bug.0d which has a list of all the MKS 4.2
+bugs that I am aware of, some of them required a fix in this recipe)
+
+See the file 'ftpsites.txt' for download info for the additional software
+
+**** Install ****
+
+NOTE: my MKS installation allows me to switch easily between using the MKS
+Korn-shell (sh.exe) and the Ms-Dos command interpreter (command.com), i.e. I
+boot with command.com and then I run login to get the Korn shell and by logging
+out I am back in command.com. You need the ability to switch between using
+command.com and sh.exe because some software has problems with the MKS pathname
+separator '/'. This is why the steps normally specify if they are to be run
+under command.com or sh.exe. If nothing is said it would work under
+either. Also command.com uses much less memory than sh.exe, for instance I am
+unable to compile the C code when running under sh.exe (this has to do with the
+640KB MsDos limit, I have lots of extended memory and a good memory manager)
+
+Change to some temporary directory (if '.' is that dir then all files will be
+under './noweb') and extract the distribution (the extension .tar.gz was
+changed to .tgz to comply with Ms-Dos filenames, 386avs27.tgz is
+avs386_noweb27.tar.gz):
+
+cd ...
+gzip -dc noweb27.tgz | tar xvf -
+gzip -dc 386avs27.tgz | tar xvf -
+ (my files are not part of noweb 2.7)
+cd ./noweb/contrib/avs
+
+All my files are at ./noweb/contrib/avs, see the file 'filelist.txt'
+
+If you feel lucky edit the site specific line of the file 'myenv.ksh'
+(i.e. specify the paths where things are, and where things will go) and run
+it. Then run the generated file 'automate.bat'. If things work as they ought
+to, you can stop reading.
+
+**** Troubleshooting ****
+
+Print this file and go through it step by step (I'm explaining what 'myenv.ksh',
+'automate.bat' and the scripts called by these are doing)
+
+a) change to ./noweb/src directory and edit 'awkname'
+ (this script uses '/tmp', if your tmp dir is not in the same drive as noweb,
+ replace in line 8
+ new=/tmp/$$.new; old=/tmp/$$.old
+ with e.g.
+ new=c:/tmp/$$.new; old=c:/tmp/$$.old
+ (Notice that although one is using Icon instead of Awk line 26 of
+ noweb.ksh, line 7 of noroots.ksh and line 32 of nountang.ksh use Awk
+ anyway, so it's easier to update the awk name everywhere)
+
+b) run the shell script 'awkname' under the Korn shell with 'awk' as argument
+ (because the Korn shell awk is named awk). Notice that because the awkname
+ script does not have a .ksh extension one has to explicitly supply a '.' as
+ the extension name, otherwise the shell won't find it, e.g.
+ cd ./noweb/src; ./awkname. awk
+
+c) rename src/makefile src/icon/makefile src/install src/xdoc/makefile
+ src/awkname to *.old. And then run the awk scripts to patch the makefiles
+ (each action in each script has a comment explaining what it is doing and
+ why). The file 'install' had to be renamed to avoid confusing MKS make when
+ it tries to run 'make install'.
+ awk -f contrib/avs/make_ico.awk src/icon/makefile.old > src/icon/makefile
+ awk -f contrib/avs/make_src.awk src/makefile.old > src/makefile
+ awk -f contrib/avs/make_xdo.awk src/xdoc/makefile.old > src/xdoc/makefile
+
+d) copy the scripts to the /noweb/src dir (you need to run them from there)
+ cp -p contrib/avs/nw_c.bat src
+ cp -p contrib/avs/nwinst.ksh src
+ cp -p contrib/avs/nwicon.bat src
+
+e) run nwicon.bat under command.com, run it 1st without args to get an usage
+ screen, and then provide the correct args (careful with the number and use
+ of slashes and backslashes). This compiles the icon code
+ (Concerning the location of your icon binaries: if it is d:/bin/icont.exe
+ and d:/bin/ixhdr.exe then use d:\\\\bin\\\\icont, '.exe' extension not
+ necessary. Sorry about the \\\\, but believe me that's the only way I
+ managed for it to work, see below for an explanation. Give also the
+ location of your DJGPP make, and the locations where later on you want to
+ install the noweb LIB and the noweb BIN)
+
+f) run nw_c.bat under command.com without args to get the usage screen, then
+ use the 'set' command and then rerun nw_c.bat. This compiles the C code
+ (if I try to run under the shell instead of command.com and the makefile
+ tries to compile a C program I get out of memory errors, i.e. this is the
+ place where you need more free Ram, 600000 bytes free is enough)
+ (the full pathname of DJGPP Make is given twice, the one with Unix style
+ slashes goes in the environment var and the one with MsDos style
+ backslashes is given as arg. Notice that an environment variable called
+ DJGPPMAKE is set, make sure that you have enough space for it, otherwise
+ increase the value used in the /E:### option of the SHELL command in
+ your config.sys)
+ (nw_c.bat also patches src/c/finduses.c because of a problem with DJGPP
+ tmpfile() in libc.a, the screen will show what is being changed. This
+ might not be necessary under a newer version of DJGPP (I did not yet
+ install the last DJGPP maintenance patches) but it is better to do it
+ anyway because tempnam() uses the environment var TMPDIR, while
+ tmpfile() doesn't)
+
+g) run nwinst.ksh under the Korn shell (sh.exe), there is an usage screen. You
+ can also run it from command.com using 'sh -c "./nwinst.ksh ..."'. This
+ installs the icon based version of noweb (some files will need to be patched
+ afterwards, see below).
+ (because an Unix style shell and MKS make are needed)
+ (make sure that MKS make is the 1st make in your path, see Technical
+ Notes below)
+ (if you prefer to use Awk instead of Icon update the scripts yourself.
+ Notice that although Awk is slower than Icon, MKS has a 32 bits version
+ of Awk which might be good enough, and MKS has also an Awk compiler
+ (awkc, see 'man awkc'), thus by removing from the shell scripts the
+ awk code, puting it into a file, compiling it, and updating the shell
+ scripts to use that executable you might get good performance, I didn't
+ try to use Awk thus I don't know)
+ (in noweb27.tar.gz in the file "install.dos" there are some references
+ to an MKS awk bug in handling backslashes in gsub(), I was unable to find
+ the code that it refered to in the noweb distribution (did it only apply
+ to an older version of noweb?), but anyhow you should check it out if you
+ are going to use awk)
+
+h) fix cpif.ksh and noweb.ksh by running mksfixes.ksh (from noweb/contrib/avs),
+ there is an usage screen. This edits the noweb.ksh in the BIN location
+ specified to nwinst.ksh and removes twice (in lines 20 and 25) the
+ PATH="$PATH:$LIB"
+ which is not necessary and causes the error 'cannot execute go32' at run-time
+ (go32 is the Dos extender used by DJGPP). It also edits cpif.ksh (also in the
+ BIN location) and:
+ - removes the PATH statement in line 8 (because 'PATH=/bin:/usr/bin' is
+ probably wrong for your system)
+ - adds a full pathname (with a drive letter) to "new=/tmp/$$" in line 20
+ (e.g. "new=d:/tmp/$$"), because otherwise the script will fail when run
+ from a drive which has no tmp dir
+ - because of a bug in MKS 4.2 changes line 28 from
+ -eq0|-ne1|*2) cp $new $i
+ to
+ -eq0|-ne1|*2|*3) cp $new $i
+ Maybe you would like to check out if that bug applies to your system,
+ in MKS 4.2 the program cmp ("cmp.exe") gives an exit code of 3 if one of
+ the files to be compared cannot be opened or doesn't exist, this is in
+ contrast with the MKS man page which says that an error code of 2 is
+ given (to fix this bug is very important otherwise e.g. the command noweb
+ will never create the TeX file)
+
+i) copy './noweb/contrib/avs/icon.1' to '.../yourman/cat1' and 'mks42bug.0d' to
+ '.../yourman/cat0' and add '.../yourman' to your MANPATH.
+
+j) test the installation by running noweb/examples/...
+ (you must run noweb under the Korn shell. I have enough free RAM to run
+ everything including the Dos extenders of Icon, DJGPP Emacs, emTeX, etc.
+ Probably this happens because 'mem' run from the Korn Shell reports
+ 590272 free bytes (of the 640 KB), and I have enough free EMS. I use
+ QEMM 7.5 as the memory manager)
+
+**** Technical Notes: ****
+
+- DJGPP Make can be in your path but it can't be 1st otherwise install.ksh does
+not work
+- noweb/install renamed to noweb/install.old to avoid confusing MKS Make when
+ trying to execute 'make install'
+- it is assumed that MKS Make is the 1st Make in your path
+- nw_c.bat does not run coff2exe, thus you CANNOT test the binaries BEFORE
+ you install them, i.e. you don't get the .exe files. This avoids the need to
+ change the src/c/makefile
+ When you run install.ksh, coff2exe will be run for all binaries (i.e. a stub
+ will be added to them which will call the Dos extender go32.exe)
+- if you have problems with the 127 chars PATH limit do:
+ Command.com 6.0 can take a PATH longer than 127 chars by setting it in the
+ config.sys (but it is not possible to change it afterwards from e.g.
+ autoexec.bat or the command line), also some programs might crash with this
+ extra long path and one can only see the 1st 122 chars (although all are used
+ in a search)
+ The Ndos shareware command interpreter can have a PATH 256 chars long.
+
+- main changes made to the Unix noweb/src/makefile to create the Dos Makefile:
+ (see also make_src.awk)
+a) in some places of the makefile quotes had to be removed (e.g. instead of
+ "CFLAGS=$(CFLAGS)" one has to use CFLAGS=$(CFLAGS), this is caused by the
+ use of 'command.com' as the shell while building noweb). Of course if using
+ more than one CFLAGS (i.e. embedded blanks) this won't work (the easiest and
+ dirty fix is to add them directly to the makefile in that case)
+b) MKS strip cannot be used on the DJGPP .exe, otherwise go32 would fail
+c) it is an hybrid makefile, 'make all' only works with DJGPP Make and
+ 'command.com', while 'make install' only works with MKS Make and the MKS
+ Korn Shell
+d) 'SHELL=/bin/sh' had to be disabled (e.g. if MKS $ROOTDIR is not '/')
+e) links (used in the man pages) not supported under MsDos, copy was used
+f) there is also an external 'cd.exe' in the MKS Toolkit (don't ask me what
+ it can be used for, it is just a source of problems). This gives problems
+ that the MKS make will use it instead of the internal Korn-shell cd command
+ unless the line to be executed is such that it requires the Korn-shell to
+ interpret it. This is why:
+ cd c; coff2exe nt markup mnt finduses
+ won't work, but
+ cd "c"; coff2exe nt markup mnt finduses
+ works fine
+
+- main changes made to the noweb/src/icon/makefile (see also make_ico.awk):
+a) cannot run under the Korn shell otherwise icont.exe becomes confused about
+ the current directory
+b) when executing icont.exe if argv[0] has slashes (instead of backslashes)
+ icont.exe runs OK but at the end it is unable to run ixhdr.exe thus it fails
+c) I got several spurious CPU locks when calling make from make while executing
+ icont.exe, that's why only one make is used
+d) icont must be replaced with the full path (with backslashes) to itself,
+e) the option '-I' to the 'icont.exe' is not documented in the Icon man page
+ (see section 3 of document IPD248a from the Icon project), it produces a
+ non-executable icode file (.icx) which can be run by iconx.exe
+
+**** The End ****
diff --git a/web/noweb/contrib/avs/icon.1 b/web/noweb/contrib/avs/icon.1
new file mode 100644
index 0000000000..04c5465e6d
--- /dev/null
+++ b/web/noweb/contrib/avs/icon.1
@@ -0,0 +1,330 @@
+}
+
+
+ IIIICCCCOOOONNNN((((1111)))) IIIICCCCOOOONNNN((((1111))))
+ 11113333 MMMMaaaarrrrcccchhhh 1111999999993333 IIIIPPPPDDDD222211119999
+
+
+
+ NNNNAAAAMMMMEEEE
+ icon - interpret or compile Icon programs
+
+ SSSSYYYYNNNNOOOOPPPPSSSSIIIISSSS
+ icont [ option ... ] file ... [ -x arg ... ]
+ iconc [ option ... ] file ... [ -x arg ... ]
+
+ DDDDEEEESSSSCCCCRRRRIIIIPPPPTTTTIIIIOOOONNNN
+ icont and iconc each convert an Icon source program into executable
+ form. icont translates quickly and provides interpretive execution.
+ iconc takes longer to compile but produces programs that execute
+ faster. icont and iconc for the most part can be used
+ interchangeably.
+
+ This manual page describes both icont and iconc. Where there there are
+ differences in usage between icont and iconc, these are noted.
+
+ FFFFiiiilllleeee NNNNaaaammmmeeeessss:::: Files whose names end in .icn are assumed to be Icon
+ source files. The .icn suffix may be omitted; if it is not present, it
+ is supplied. The character - can be used to indicate an Icon source
+ file given in standard input. Several source files can be given on
+ the same command line; if so, they are combined to produce a single
+ program.
+
+ The name of the executable file is the base name of the first input
+ file, formed by deleting the suffix, if present. stdin is used for
+ source programs given in standard input.
+
+ PPPPrrrroooocccceeeessssssssiiiinnnngggg:::: As noted in the synopsis above, icont and iconc accept
+ options followed by file names, optionally followed by -x and
+ arguments. If -x is given, the program is executed automatically and
+ any following arguments are passed to it.
+
+ icont: The processing performed by icont consists of two phases:
+ _t_r_a_n_s_l_a_t_i_o_n and _l_i_n_k_i_n_g. During translation, each Icon source file is
+ translated into an intermediate language called _u_c_o_d_e. Two ucode files
+ are produced for each source file, with base names from the source
+ file and suffixes .u1 and .u2. During linking, the one or more pairs
+ of ucode files are combined to produce a single _i_c_o_d_e file. The ucode
+ files are deleted after the icode file is created.
+
+ Processing by icont can be terminated after translation by the -c
+ option. In this case, the ucode files are not deleted. The names of
+ .u1 files from previous translations can be given on the icont command
+ line. These files and the corresponding .u2 files are included in the
+ linking phase after the translation of any source files. The suffix
+ .u can be used in place of .u1; in this case the 1 is supplied
+ automatically. Ucode files that are explicitly named are not deleted.
+
+ iconc: The processing performed by iconc consists of two phases: _c_o_d_e
+ _g_e_n_e_r_a_t_i_o_n and _c_o_m_p_i_l_a_t_i_o_n _a_n_d _l_i_n_k_i_n_g. The code generation phase
+
+
+
+ - 1 - Formatted: December 9, 1994
+
+
+
+
+
+
+ IIIICCCCOOOONNNN((((1111)))) IIIICCCCOOOONNNN((((1111))))
+ 11113333 MMMMaaaarrrrcccchhhh 1111999999993333 IIIIPPPPDDDD222211119999
+
+
+
+ produces C code, consisting of a .c and a .h file, with the base name
+ of the first source file. These files are then compiled and linked to
+ produce an executable binary file. The C files normally are deleted
+ after compilation and linking.
+
+ Processing by iconc can be terminated after code generation by the -c
+ option. In this case, the C files are not deleted.
+
+ OOOOPPPPTTTTIIIIOOOONNNNSSSS
+ The following options are recognized by icont and iconc:
+
+ -c Stop after producing intermediate files and do not delete them.
+
+ -e _f_i_l_e
+ Redirect standard error output to _f_i_l_e.
+
+ -m Preprocess each .icn source file with the _m_4(_1) macro processor.
+
+ -o _n_a_m_e
+ Name the output file _n_a_m_e.
+
+ -s Suppress informative messages. Normally, both informative
+ messages and error messages are sent to standard error output.
+
+ -t Arrange for &trace to have an initial value of -1 when the program
+ is executed and for iconc enable debugging features.
+
+ -u Issue warning messages for undeclared identifiers in the program.
+
+ -E Direct the results of preprocessing to standard output and inhibit
+ further processing.
+
+ The following additional options are recognized by iconc:
+
+ -f _s_t_r_i_n_g
+ Enable features as indicated by the letters in _s_t_r_i_n_g:
+
+ a all, equivalent to delns
+
+ d enable debugging features: display(), name(), variable(),
+ error trace back, and the effect of -f n (see below)
+
+ e enable error conversion
+
+ l enable large-integer arithmetic
+
+ n produce code that keeps track of line numbers and file names
+ in the source code
+
+ s enable full string invocation
+
+
+
+
+ - 2 - Formatted: December 9, 1994
+
+
+
+
+
+
+ IIIICCCCOOOONNNN((((1111)))) IIIICCCCOOOONNNN((((1111))))
+ 11113333 MMMMaaaarrrrcccchhhh 1111999999993333 IIIIPPPPDDDD222211119999
+
+
+
+ -n _s_t_r_i_n_g
+ Disable specific optimizations. These are indicated by the letters
+ in _s_t_r_i_n_g:
+
+ a all, equivalent to cest
+
+ c control flow optimizations other than switch statement
+ optimizations
+
+ e expand operations in-line when reasonable (keywords are always
+ put in-line)
+
+ s optimize switch statements associated with operation
+ invocations
+
+ t type inference
+
+ -p _a_r_g
+ Pass _a_r_g on to the C compiler used by iconc
+
+ -r _p_a_t_h
+ Use the run-time system at _p_a_t_h, which must end with a slash.
+
+ -v _i
+ Set verbosity level of informative messages to _i
+
+ -_C _p_r_g
+ Have iconc use the C compiler given by _p_r_g
+
+ EEEENNNNVVVVIIIIRRRROOOONNNNMMMMEEEENNNNTTTT VVVVAAAARRRRIIIIAAAABBBBLLLLEEEESSSS
+ When an Icon program is executed, several environment variables are
+ examined to determine certain execution parameters. Values in
+ parentheses are the default values.
+
+ BLKSIZE (65000)
+ The initial size of the allocated block region, in bytes.
+
+ COEXPSIZE (2000)
+ The size, in words, of each co-expression block.
+
+ DBLIST
+ The location of data bases for iconc to search before the standard
+ one. The value of DBLIST should be a blank-separated string of
+ the form _p_1 _p_2 ... _p_n where the _p_i name directories.
+
+ ICONCORE
+ If set, a core dump is produced for error termination.
+
+ ICONX
+ The location of iconx, the executor for icode files, is built into
+ an icode file when it is produced. This location can be overridden
+
+
+
+ - 3 - Formatted: December 9, 1994
+
+
+
+
+
+
+ IIIICCCCOOOONNNN((((1111)))) IIIICCCCOOOONNNN((((1111))))
+ 11113333 MMMMaaaarrrrcccchhhh 1111999999993333 IIIIPPPPDDDD222211119999
+
+
+
+ by setting the environment variable ICONX. If ICONX is not set
+ and iconx is not found on the built-in path, PATH is searched for
+ it. If this environment variable is set, it specifies the
+ location of iconx to use to execute an icode file.
+
+ IPATH
+ The location of ucode files specified in link declarations for
+ icont. IPATH is a blank-separated list of directories. The
+ current directory is always searched first, regardless of the
+ value of IPATH.
+
+ LPATH
+ The location of source files specified in link declarations for
+ iconc. LPATH is otherwise similar to IPATH.
+
+ MSTKSIZE (10000)
+ The size, in words, of the main interpreter stack for icont.
+
+ NOERRBUF
+ By default, &errout is buffered. If this variable is set, &errout
+ is not buffered.
+
+ QLSIZE (5000)
+ The size, in bytes, of the region used for pointers to strings
+ during garbage collection.
+
+ STRSIZE (65000)
+ The initial size of the string space, in bytes.
+
+ TRACE
+ The initial value of &trace. If this variable has a value, it
+ overrides the translation-time -t option.
+
+ FFFFIIIILLLLEEEESSSS
+ icont Icon translator
+ iconc Icon compiler
+ iconx Icon executor
+
+ SSSSEEEEEEEE AAAALLLLSSSSOOOO
+ _T_h_e _I_c_o_n _P_r_o_g_r_a_m_m_i_n_g _L_a_n_g_u_a_g_e, Ralph E. Griswold and Madge T.
+ Griswold, Prentice-Hall Inc., Englewood Cliffs, New Jersey, Second
+ Edition, 1990. _V_e_r_s_i_o_n _8._1_0 _o_f _I_c_o_n, Ralph E. Griswold, Clinton L.
+ Jeffery, and Gregg M. Townsend, IPD212, Department of Computer
+ Science, The University of Arizona, 1993. _U_s_i_n_g _V_e_r_s_i_o_n _8._1_0 _o_f _t_h_e
+ _I_c_o_n _C_o_m_p_i_l_e_r, Ralph E. Griswold, IPD214, Department of Computer
+ Science, The University of Arizona, 1993. m4(1), icon_vt(1)
+
+ LLLLIIIIMMMMIIIITTTTAAAATTTTIIIIOOOONNNNSSSS AAAANNNNDDDD BBBBUUUUGGGGSSSS
+ The icode files for the interpreter do not stand alone; the Icon run-
+ time system (iconx) must be present. Stack overflow is checked using
+ a heuristic that is not always effective. If the -m option is used,
+
+
+
+ - 4 - Formatted: December 9, 1994
+
+
+
+
+
+
+ IIIICCCCOOOONNNN((((1111)))) IIIICCCCOOOONNNN((((1111))))
+ 11113333 MMMMaaaarrrrcccchhhh 1111999999993333 IIIIPPPPDDDD222211119999
+
+
+
+ line numbers reported in error messages and tracing messages are from
+ the file after, not before, preprocessing.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ - 5 - Formatted: December 9, 1994
+
+
+
diff --git a/web/noweb/contrib/avs/jrtex12a.avs b/web/noweb/contrib/avs/jrtex12a.avs
new file mode 100644
index 0000000000..a9d7131aee
--- /dev/null
+++ b/web/noweb/contrib/avs/jrtex12a.avs
@@ -0,0 +1,1054 @@
+**** NOTE: this is the superb document 'jrtex12a' from John Refling. I just
+**** fixed a couple of bugs and added a few comments (for my own use). The
+**** changes are marked with '****'. The biggest problem was that his setup
+**** didn't contemplate installing emTeX in another drive than C, nor did it
+**** contemplate putting the fonts generated on demand on yet another drive
+**** (handy if one wants emTeX to be in a READONLY drive)
+****
+**** If you only have a 386 without coprocessor be careful with some
+**** delete instructions (which assume you have a 387 or a 486DX)
+****
+**** Known problems:
+**** If running under the MKS Toolkit shell, font generation on demand may
+**** fail. I don't have this problem any more (don't know what fixed it,
+**** the new version of emTeX?) but I had it in the past. Just do the font
+**** generation under 'command.com' and then go back to the Korn shell
+**** avs@daimi.aau.dk (18-Mar-95)
+
+How I installed emtex, latex2e, mf, dvips, on a 386 w/ postscript or hplaser
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ John Refling, University of California
+ jprefling@lbl.gov [DO NOT SEND REQUESTS HERE]
+ date: 13 January 1995, Version: 12a
+
+Archivists
+~~~~~~~~~~
+This file should be stored on your machine with a basename of 'jrtex12a'.
+in a place related to tex. A one line description of this guide might be:
+"Describes install of emtex, latex2e, hplj & PS on PC"
+
+How to get latest
+~~~~~~~~~~~~~~~~~
+This will be available at simtel mirrors such as wuarchive.wustl.edu
+and oak.oakland.edu in/or near the /pub/msdos/tex directory, and perhaps
+CTAN machines. A similar file which describes an older
+version of emtex installation (pre latex-2e) has basename jrhelp11.
+
+What is this
+~~~~~~~~~~~~
+This is a short account of my experiences installing emtex on a few different
+computers, with a few different printers.
+
+Hardware you NEED to follow this guide
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+a 386 or greater computer with reasonable (4MB) ram about 35 meg free space
+ peak for the install (for easy installation, although you can get by with
+ much less if you are willing to really work at it), reduced to ~10 MB
+ disk space permanently (less w/o postscript support and the docs),
+ plus fonts (about 5 MB max).
+
+a reasonable dos version (best if you can recall and edit commands)
+
+a laser printer (postscript or pcl hplaserjet or hplaserjet iv, are tested).
+
+What you get
+~~~~~~~~~~~~
+emtex system (tex, latex2e, mf, screen previewer, postscript support)
+dynamically generated fonts if you wish (that's right, I don't generate them
+ until I use `em)
+The emtex system is a great tex / latex system.
+
+A note on the version numbers
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+The base name of this document is jrtex which takes 5 characters out of 8
+possible. The next two digits correspond to the betatest version of emtex,
+currently 12. Unfortunately, other pieces of the emtex package change without
+notice and so it is hard to pin a version number on the entire package, which
+I could carry over to this document, plus I might have revisions. The final
+character in this help file's name will serve as a minor revision indicator.
+
+Even so, there might be minor discrepancies between what you see here
+and what is on the net. Perhaps one of the best ways to check for
+revisions on the net would be to check the file's size and/or it's date.
+However, I just don't see any good way to build this info into the name
+of this document. I have included the file size of files obtained from
+the net. When the size indicated is different from the size you received,
+you need to be cautious... most likely a new version has been added to the
+distribution, and you need to take that into account. Sometimes in the
+betatest subdirectory there are multiple versions of the same thing... use
+the latest... even if I don't in this document!
+
+How to do it
+~~~~~~~~~~~~
+Print out a copy of this file, and follow along with it. CROSS OFF STEPS
+WHEN COMPLETED. You need to be able to transfer files from the internet
+to your PC. You also need pkunzip, and know how to use it. If you use
+unzip, the options are slightly different.
+
+What you don't get
+~~~~~~~~~~~~~~~~~~
+This worked for me, but I can't guarantee it for you. You take all
+responsibility for implementing this. You are assumed to know enough
+about what is going on to not do something dangerous even if it says to
+do so here. Besides, someone could have changed this before you got it.
+ALSO, I DON'T COVER MIGRATION FROM THE 2.09 VERSION OF LATEX TO THE NEW
+2E VERSION. I ASSUME A NEW INSTALLATION! Also, this is only how I did
+it! and is not an official guideline, or recommendation. It is possible
+that something in here may guide you contrary to some of the published
+or yet to be published standards. This is provided in the hope that it
+may be useful, in the event that you are having trouble with your
+installation. THAT'S IT. You have been warned!
+
+Introduction
+~~~~~~~~~~~~
+The emtex distribution was a little confusing to me at first, since I am
+used to obtaining one compressed distribution archive file and working with
+that. When a new version is distributed, one goes back to the ftp site and
+gets the compressed file with the next higher version number.
+
+With emtex, things are spread across directories, not only in terms of package
+components, but also in terms of revision levels. The most serious drawback
+of this method is that it is difficult to determine the revision level of
+programs, or where a particular revision level is. The simpliest way to
+install it is just to install ALL of the archives in order, and let
+them overwrite [hopefully] older versions. Then, you will have the latest
+versions, and you delete what you don't want.
+
+1. OBTAIN THE DISTRIBUTION AND EXTRACT IT AND ARCHIVE IT FOR LATER
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+a. cd \
+
+b. use ftp to connect to a distribution machine, ftp.dante.de,
+ ftp.shsu.edu, wuarchive.wustl.edu, etc, and get to the emtex directory
+ something like /tex-archive/systems/msdos/emtex. We wish to grab all the
+ files in the disk1, disk2, disk3, disk4, disk5, betatest subdirs.
+ MAKE SURE YOU SET BINARY MODE BEFORE THE TRANSFER OF BINARY FILES.
+ One way to do it all is as follows (assuming that the remote ftp server
+ has on-the-fly compression):
+
+ ftp ftp.shsu.edu
+**** ftp.dante.de is too slow
+ cd /tex-archive/systems/msdos
+ bin
+ hash
+ get emtex.zip [15,173,126]
+**** get emtex.tar.gz [11,859,740]
+
+ This will recursively get everything. Last time I tried it, it ended up
+ being a 14,992,564 byte file, and took 60 minutes via ftp.
+
+ Then, get the latex2e stuff:
+
+ cd /tex-archive/macros/latex
+ get base.zip [771,461]
+ cd packages
+ get tools.zip [221,517]
+**** get graphics.tar.gz [80,020]
+**** 'graphics' has epsfig (allows including Encapsulated PostScript files in LaTeX docs)
+ quit
+
+c. Then on the pc:
+
+ mkdir \emtmp
+ cd \emtmp
+ pkunzip \emtex.zip [or unzip -j \emtex.zip]
+ cd \
+
+ You will need 15 + 15 = 30 MB disk space for this process. Read the
+ readme.tst and note the date in the upper right-hand corner. For my
+ installation it was 12-Dec-94. After success, you may delete \emtex.zip
+ and get 15 MB disk space back. Then goto step d.
+**** version of 03-Feb-95 (includes web.zip with weave, tangle and pooltype)
+
+ If this does not work [because you don't have enough temporary disk space
+ for the one huge file, or if the ftp server can't compress into one huge
+ file], you have to get all the files individually. The ones we are most
+ interested in are listed in item 5 below with their locations in [].
+
+d. Do the following in order [if you use unzip, do not use the -d]:
+
+ pkunzip -d \emtmp\tex1.zip [disk1, 375854]
+ pkunzip -d \emtmp\tex2.zip [disk1, 268240]
+ pkunzip -d \emtmp\latex1.zip [disk2, 248823]
+ pkunzip -d \emtmp\latex2.zip [disk2, 238131]
+ pkunzip -d \emtmp\blatex.zip [disk2, 231677]
+ pkunzip -d \emtmp\makeindx.zip [disk2, 52717]
+ pkunzip -d \emtmp\bmf1.zip [disk3, 266527]
+ pkunzip -d \emtmp\bmf2.zip [disk5, 271791]
+ pkunzip -d \emtmp\texcad.zip [disk3, 120547]
+ pkunzip -d \emtmp\mf1.zip [disk4, 249916]
+ pkunzip -d \emtmp\mf2.zip [disk4, 344463]
+ pkunzip -d \emtmp\mf3.zip [disk4, 278545]
+ pkunzip -d \emtmp\mfware1.zip [disk4, 327215]
+ pkunzip -d \emtmp\mfware2.zip [disk5, 140361]
+ pkunzip -d \emtmp\btex1.zip [disk5, 265159]
+ pkunzip -d \emtmp\btex2.zip [disk5, 274600]
+ pkunzip -d \emtmp\misc_mf.zip [disk5, 36349]
+ pkunzip -d -o \emtmp\mfb5.zip [betatest, 1044415]
+ pkunzip -d -o \emtmp\mfjob11n.zip [betatest, ?]
+ pkunzip -d -o \emtmp\texb12.zip [betatest, 1120943]
+ pkunzip -d -o \emtmp\bibtexb1.zip [betatest, 167593]
+ pkunzip -d -o \emtmp\fontl12a.zip [betatest, 88321]
+ pkunzip -d -o \emtmp\dvid15g1.zip [betatest, 1213323]
+ pkunzip -d -o \emtmp\dvid15g2.zip [betatest, 727077]
+
+ Answer yes to any overwrites.
+
+ move \emtmp\readme.tst \emtex\doc
+ move \emtmp\readme.eng \emtex\doc\english
+
+ Extracting these files generates another 18 MB of files. If this is
+ successful, you may now delete everything in \emtmp, as well as \emtex.zip.
+
+ If you run out of space in the process, you may delete each .zip you
+ have extracted successfully. After extracting ALL the .zips, you may
+ delete \emtex.zip.
+
+e. Get rid of stuff (I save only the 386 stuff):
+ cd \emtex
+ del *.cmd [os/2 stuff]
+ del bmf*.* [big mf]
+ del blatex.bat [big latex]
+ del btex*.* [big tex]
+ del mf.exe [small mf]
+ del mf186.exe [small mf]
+ del mf286.exe [small mf]
+ del tex.exe [small tex]
+ del tex186.exe [small tex]
+ del tex286.exe [small tex]
+ del texp.exe [os/2 stuff]
+ del texfmts [small tex]
+ rmdir texfmts [small tex]
+ del mfbases [small tex]
+ rmdir mfbases [small tex]
+****
+ del remove [intnl files]
+ rmdir remove [intnl files]
+**** Might not be a good idea to remove, tells from where a given file came
+ del mfp*.* [os/2 stuff]
+ del dvipm*.* [os/2 stuff]
+ del mfjob1.ovl [small mfjob]
+ del mfjob2.ovl [small mfjob]
+ del ask.exe
+ del bibtex.exe [small bibt]
+****
+ del book
+ rmdir book
+**** book has subdirs (skip these steps, german subdir will be deleted below)
+
+ IF YOU REALLY ARE GOING TO USE A DOT MATRIX PRINTER, YOU MIGHT KEEP THESE:
+ del dvidot*.* [dot matrix]
+ del gh.*
+ del prtfx*.*
+ del prtitoh.*
+ del prtlq*.*
+ del prtp6*.*
+ del prtsty.bat
+ del pcx*.*
+ del prtaiw.bat
+ del makedot.exe
+
+ I DON'T HAVE LIMITED MEMORY, SO DELETE
+ del vs.bat [ltd memory vers]
+ del dviscrs.exe [ltd memory vers]
+
+ OLD FORMATS FOR FONTS, IF YOU ARE STARTING NEW, YOU DON'T WANT THEM:
+ del chtopx.exe
+ del gftopxl.exe
+ del gftype.exe
+ del pktopx.exe
+ del pxtoch.exe
+ del pxtopk.exe
+
+ I HAVE A COPROCESSOR SO DON'T NEED THE COPROCESSOR-LESS VERSIONS
+ del dviscr.exe
+ del dvihplj.exe
+
+ I DON'T HAVE A VIKING DISPLAY
+ del dvivik.exe
+
+ SAVE SOME MORE SPACE since I regenerate the bases & formats later
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ cd \emtex\bmfbases
+ del *.cmd
+ del *.log
+ del *.bas
+ edit bas.bat and change "bmf" to "\emtex\mf386" in all cases (2)
+**** use only 'mf386' (i.e. ?:/emtex will be in the PATH)
+
+ cd \emtex\btexfmts
+ del *.cmd
+ del *.log
+ del *.fmt
+ edit fmt.bat and change "btex" to "\emtex\tex386" in all cases (2)
+ edit slifmt.bat and change "btex" to "\emtex\tex386" in all cases (1)
+**** use only 'tex386' (i.e. ?:/emtex will be in the PATH)
+
+ I delete some dot-matrix printer configuration files. Actually, all I
+ keep in the \emtex\data\ subdirectory is:
+ dvidrv.err fax.cnf fontlist lj.cnf ljh.cnf newfonts.sub oldfonts.sub
+
+ I choose to get rid of a few of the doc files in German to save space:
+ del \emtex\doc\german
+ rmdir \emtex\doc\german
+ del \emtex\book\german
+ rmdir \emtex\book\german
+
+ and get rid of some more o/s-2 stuff:
+ del \emtex\help
+ rmdir \emtex\help
+
+f. FINALLY, ARCHIVE IT:
+ cd \
+ pkzip -rP em941212 \emtex\*.* [941212 = date in readme.tst]
+ copy em941212.zip [your archive place]
+ copy base.zip [your archive place]
+ copy tools.zip [your archive place]
+
+ These should fit on two 3" floppies. This is what I use to archive the
+ distribution. The em941212.zip will have to be split across the disks. I
+ picked 941212 as the version number since that is the date in the readme.tst
+ file. When the date changes, change the number.
+
+g. This will give you the following software and versions:
+
+ bibtex32.exe 0.99c [3c-beta1, have to dig thru exe to find vers]
+ dvidrv ?
+ dvihplj 1.5g
+**** dvihplj7 1.5g
+ dviscr 1.5g
+**** dviscr 1.5a
+ emx.exe 0.8h (rev 16) [have to dig thru exe to find vers]
+**** emx.exe 0.8h (rev 18)
+ fontlib 1.2a
+ gftodvi.exe 3.0 [1g]
+ gftopk.exe 3.2 [1j]
+**** gftopk.exe 2.3 [1j]
+ makeindx.exe 2.11
+ maketcp.exe 1.1c
+ mf386.exe 2.71 [3c-beta5]
+ mfjob.exe 1.1n
+ mft.exe 2.0 [1e]
+ pktogf.exe 1.0 [1c]
+ pktype.exe 2.3 [1c]
+ tex386.exe 3.1415 [3c-beta12]
+ texcad.exe 2.8
+
+
+2. RESTORING THE ARCHIVE
+~~~~~~~~~~~~~~~~~~~~~~~~
+If you are continuing from above skip to the next section. If you are
+restoring em941212 from the archive you made earlier, do that now, and:
+
+pkunzip -d em941212.zip
+
+3. COMPLETING THE INSTALLATION OF LATEX2e
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Make sure that the following is in \config.sys: (the numbers can be larger)
+
+shell=command.com /e:1024 /p
+buffers=20
+files=20
+
+if tex complains that it can't open an output file, increase the files.
+
+Edit \autoexec.bat and append the following to the path statement:
+;\emtex
+
+also add the following lines
+
+set texinput=c:\emtex\texinput!
+set dvidrvfonts=c:\texfonts
+set dviscr=-s2
+set mfjobopt=-3
+
+You may insert the contents of \emtex\set-tex.bat into your autoexec.bat,
+although it is not necessary, since the default paths are hard coded into
+the .exe files. However, if you change any paths later, you will have to
+edit \emtex\set-tex.bat to reflect them, and execute \emtex\set-tex.bat
+each time you boot, or automatic subdirectory searching will not work.
+Make sure that the ! follows the `texinput' as in the first line above.
+Also, the last two are not in set-tex.bat, so you have to add them anyway.
+
+You should add the following line to your autoexec.bat, with appropriate
+number for the lpt port. This prevents prthplj & prthpljh from aborting when
+you take too long to refill the printer's paper:
+mode lpt1 retry=r
+
+create \emtex\tex.bat in path:
+tex386 &plain %1 %2 %3 %4 %5 %6 %7 %8 %9
+
+create \emtex\latex.bat in path:
+tex386 &latex %1 %2 %3 %4 %5 %6 %7 %8 %9
+
+create \emtex\slitex.bat in path:
+tex386 &splain %1 %2 %3 %4 %5 %6 %7 %8 %9
+
+or add them to a ced-like alias file. If you don't know what that is, don't
+ask. You will now type tex, latex, slitex followed by the filename to
+process a document. If the extension on the filename is .tex, it is optional.
+
+reboot now!
+
+EXTRACT LATEX2E
+~~~~~~~~~~~~~~~
+cd \
+pkunzip -d \base
+cd \base
+\emtex\tex386 /i unpack.ins
+
+replace any existing files in the following (to override them):
+move *.ltx \emtex\btexfmts
+move ltxcheck.tex \emtex\texinput
+move testpage.tex \emtex\texinput
+move docstrip.tex \emtex\texinput
+move *.cls \emtex\texinput
+move *.clo \emtex\texinput
+move *.sty \emtex\texinput
+move *.fd \emtex\texinput
+move *.def \emtex\texinput
+move *.cfg \emtex\texinput
+
+mkdir \emtex\makeindx
+move *.ist \emtex\makeindx
+
+mkdir \emtex\doc\base
+move *.tex \emtex\doc\base [save and read these docs]
+move *.txt \emtex\doc\base [save and read these docs]
+**** move *.err \emtex\doc\base [LaTeX companion & LaTeX book errata]
+
+mkdir \emtex\texinput\local
+**** For local files, will be used by automatic 1 subdir level search ('!')
+
+cd \
+del base.zip
+del base
+rmdir base
+
+BUILD THE CM BASES
+~~~~~~~~~~~~~~~~~~
+cd \emtex\bmfbases
+bas
+
+BUILD THE TEX AND LATEX2e FORMATS
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+cd \emtex\btexfmts
+fmt
+slifmt
+del lplain.* [this is the old 2.09 format replaced later on]
+\emtex\tex386 /i latex.ltx [this is the new latex 2e format file]
+
+cd \
+latex ltxcheck [all should be ok]
+**** Complains about missing AMS fonts (this was to be expected)
+
+
+EXTRACT THE MAINZ TOOLS
+~~~~~~~~~~~~~~~~~~~~~~~
+cd \
+pkunzip -d \tools
+cd tools
+latex tools.ins
+del temp.tex
+mkdir \emtex\texinput\tools
+move *.sty \emtex\texinput\tools
+move *.tex \emtex\texinput\tools
+**** move *.txt \emtex\doc\tools
+
+mkdir \emtex\doc\tools
+move *.dtx \emtex\doc\tools [these are all the docs for tools]
+
+cd \
+del tools.zip
+del tools
+rmdir tools
+
+TESTING THE LATEX INSTALLATION
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+mkdir \test
+cd \test
+
+Make a small file called cm.tex containing:
+
+\documentclass{article}
+\begin{document}
+Hello world...........$\sum$
+\end{document}
+
+and latex it by typing `latex cm'. It should not complain.
+
+
+4. VIEWING THE RESULTS
+~~~~~~~~~~~~~~~~~~~~~~
+By latexing the above test document we have only used brief font info that
+is stored in the TFM files (in \emtex\tfm). You cannot print or view the
+results until you put fonts on your computer and/or printer. The only
+exception to this is postscript printers or viewers which have their fonts
+built in. More on that later.
+
+If you have a postscipt printer, you may use its internal fonts, or you may
+download fonts (e.g. CM [computer modern]), or do a combination of the two).
+You may also choose to use a postscript previewer for screen previewing (e.g.
+ghostscript). OR, you may choose to use the CM fonts that come with latex for
+screen previewing and use a dvi previewer (e.g. dviscr).
+
+If you have a reasonably recent HP pcl laser printer (any of the laser jets
+excluding the laser jet 1 [the laser jet 1+ is ok, although limited memory)
+or a laser printer that emulates an HP laser printer, you will want to use
+the CM fonts.
+
+The new HPLJ IV has its own scaleable fonts, so perhaps you will be able to
+use those internal fonts much like with postscript printers. You would need
+the TFM files describing the fonts inside the printer. I haven't done much
+work with the HP LJ IV so it is possible this has already been done. I don't
+discuss it here.
+
+Moving on in the choices, if you choose to use CM for either printing or
+display, then you need to obtain the actual fonts. You can either get
+them from the net, generate them all at once, or generate them on demand
+and the save them for future use. I do the latter. On a 486-33 MHz
+it only takes a minute or so to generate a font in a particular size.
+All these options are described next.
+
+Pick one, and skip to a), b), c), d), e), or f) below.
+**** Best choice is step c), generate fonts on demand
+
+a. CM fonts from the net, for screen viewing & 300 dpi hplj printing
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ mkdir \texfonts
+ cd \texfonts
+ ftp ftp.shsu.edu [or other site]
+ cd tex-archive/systems/msdos/emtex-fonts
+ bin
+ hash
+ get lj_fonts.zip [this will be about 5 MB]
+ quit
+
+ pkunzip lj_fonts *.fli [or unzip -j lj_fonts *.fli]
+
+ Archive lj_fonts.zip somewhere for future use.
+
+ To display the document on your screen, use v <filename>. v.bat is a batch
+ file in \emtex which calls dviscr. If you have a math coprocessor and
+ deleted dviscr.exe and kept dviscr7.exe, edit v.bat to call dviscr7.exe
+ instead of dviscr.exe. Example:
+
+ cd \test
+ v test
+
+ I add a /pt to \emtex\data\lj.cnf to prevent all the .dlg files.
+
+ You can also put the one line contents of v.bat into a ced-like program. If
+ you don't know what that is, skip it.
+
+ Read the documentation on dvi viewers in \emtex\doc\english\dvidrv.doc
+
+b. CM fonts from the net, for screen viewing & 600 dpi hplj IV printing
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ I haven't tried this, since I couldn't find the .fli files for the HPLJ 4 on
+ the net. They would be rather large (up to 4x larger than the 300dpi---twice
+ in each dimension, 2 dimensions). If you do find them, edit
+ \emtex\v.bat to use @ljh.cnf and follow along similarly to the previous
+ section for 300dpi fonts, but edit \emtex\ljh.cnf, instead.
+
+ Or, You may want to dynamically generate the fonts (below)...
+
+
+c. Dynamically generating the CM fonts on demand, 300 dpi for hplj & screen
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ This is how I do it. Append the following line to \emtex\data\lj.cnf
+ /fb
+
+ I also add a /pt to the file to prevent all the .dlg files.
+
+ You may wish to comment out the /pl line with a %, since you will not be
+ using the font libraries, as was done in 4a.
+
+ Make sure that `set mfjobopt=-3' is in your autoexec.bat, or that it is
+ set before trying to generate the fonts. This tells mfjob to use mf386.exe
+ instead of mf.exe.
+
+ cd \test
+ v test
+
+ should generate 2 fonts and then display the dvi file on your screen. The
+ next time you do `v test' the fonts will already be generated. v.bat is a
+ batch file in \emtex which calls dviscr. If you have a math coprocessor,
+ edit v.bat to call dviscr7.exe. You can also put the one line contents of
+ v.bat into a ced-like line. The fonts go to \texfonts\pixel.lj\300dpi.
+
+
+d. Dynamically generating the CM fonts on demand, 600 dpi for hplj IV & screen
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ Append the following lines to \emtex\data\ljh.cnf
+ /fb
+
+ & comment out the /pl line with a %. Also edit v.bat to use ljh.cfg instead
+ of lj.cnf. v.bat is a batch file in \emtex which calls dviscr. If you
+ have a math coprocessor, edit v.bat to call dviscr7.exe. I also add a /pt
+ to the config file to prevent all the .dlg files.
+
+ Make sure that `set mfjobopt=-3' is in your autoexec.bat, or that it is
+ set before trying to generate the fonts. This tells mfjob to use mf386.exe
+ instead of mf.exe.
+
+ cd \test
+ v test
+
+ should generate 2 fonts and display the dvi file on your screen. The fonts
+ go to \texfonts\pixel.ljh\600dpi. Next time, it won't have to generate
+ these fonts.
+
+e. Locally generating all 300dpi fonts for HP
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ cd \emtex\mfjob
+ mfjob all [wait several hours]
+ cd \
+ del \newfonts\tfm
+ rmdir \newfonts\tfm
+ del \newfonts\log
+ rmdir \newfonts\log
+ move newfonts texfonts
+
+f. Locally generating all 600dpi fonts for HP IV
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ cd \emtex\mfjob
+ edit modes.mfj and change `def m=[lj]' to `def m=[ljh]' at end
+ mfjob all [wait several hours]
+ cd \
+ del \newfonts\tfm
+ rmdir \newfonts\tfm
+ del \newfonts\log
+ rmdir \newfonts\log
+ move newfonts texfonts
+
+
+5. Printing using CM fonts and HPLJ or HPLJ IV
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+use prthplj.bat and prthpljh.bat respectively. You may want to edit
+them and change the output file /po=lpt1:, etc. If you have a math coproc,
+edit prthplj.bat and prthpljh.bat to call dvihplj7.exe. If you are using
+the HPLJ IV, add /og=600 to \emtex\prthpljh.bat on the same line.
+
+These routines use the same fonts and config files as the screen previewer,
+so once the fonts are generated by the screen viewer, they are generated for
+the printer too.
+
+IF you have an hp printer you are now done with the installation.
+
+6. PSNFSS & DVIPS for postscript printers (skip if you don't have a PS printer)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+a. Using CM fonts only, and not the internal PS fonts of your printer
+
+ if all you want to do is to use the CM fonts (and not the fonts in your
+ postscript printer), you may stop here. However this is kind of a waste
+ because the extra money you paid for the postscript printer gives you a
+ lot of advanced features and fonts. However, with those features comes
+ more installation difficulties, ie, the rest of this document. So no
+ one would blame you for stopping here.
+
+ If you want to stop here and generate CM fonts and download them to the
+ printer as a bit-map each time you print, do the following:
+
+ cd \
+ ftp wuarchive.wustl.edu
+**** 4.8 KB/s
+ cd /mirrors/msdos/postscript
+**** cd /systems/ibmpc/simtel/msdos/postscrp
+ bin
+ hash
+ get dvips554.zip [592,893 bytes, or latest]
+ quit
+
+ pkzip -d \dvips554.zip *.tfm [delete the .tfm's from the zip]
+ pkzip -d \dvips554.zip *.vf [delete the .vf's from the zip]
+ pkunzip -d \dvips554.zip
+
+ cd \emtex\texinput\dvips
+ del times.sty
+ del palatino.sty
+ del bookman.sty
+ del chancery.sty
+ del avantgar.sty
+ del lucida.sty
+ del ncs.sty
+ del psfonts.sty
+ del psgreek.sty
+
+ The above represent old style files incompatible with latex2e and psnfss2e.
+
+ mkdir \emtex\doc\dvips
+ move dvi*.tex \emtex\doc\dvips
+
+ The fonts for the Apple Laserwriters are the same as for the HP LJ, since
+ the engines are the same [write black]. Choosing dvihplj or dvips formats
+ the bit maps properly in each printer's language. This is where the
+ difference is. Thus CM fonts obtained in 4a, 4b, 4e, or 4f are suitable
+ for dvips. Follow those instructions if you haven't already for the
+ screen fonts. If you have already obtained or generated or set up
+ dynamic (on the fly generation of fonts) for the screen you are already
+ to go:
+**** The problem is that the Dvips config files assume drive C unless
+**** told otherwise (dvips won't find 'tex.pro', see page 37 Dvips manual)
+**** Assuming emTeX in drive D, fonts in drive E:
+**** set texconfig=d:\emtex\ps
+**** set emtexdir=d:\emtex
+**** set dvipsheader=d:\emtex\ps
+**** set texfonts=e:\texfonts
+**** set texpks=e:\texfonts\pixel.lj\%bdpi\%f.pk
+**** (put these in a batch file, remember to use '%%' to mean '%' in the
+**** texpks value)
+
+ cd \test
+ dvips cm
+ copy cm.ps lpt1: [or where ever your printer is]
+
+ You may need an iteration of `mfjob dvips' to generate fonts. You may
+ stop here. Delete dvips.mfj after doing mfjob.
+
+
+b. Using the internal PS fonts of your printer, and CM for occasional math
+
+ IF you wish to take advantage of the native fonts in
+ your postscript printer, continue on.
+
+ Do the steps in a) above.
+
+ To use use native fonts, you will
+ insert one command \usepackage{font} in your latex document after the
+ \documentclass{} command. font=one of times, palatino, helvet, avant,
+ newcent, or bookman. To get these psnfss2e packages:
+
+ cd \
+ ftp ftp.shsu.edu [or other site]
+ cd tex-archive/macros/latex/packages
+ bin
+ hash
+ get psnfss.zip [724,648]
+ quit
+
+ pkunzip -d \psnfss.zip
+ cd \psnfss
+ latex psfonts.ins [installation]
+**** answer y to "do you have the 36 common PS fonts or do you intend to install them"
+ mkdir \emtex\texinput\ps
+ move *.sty \emtex\texinput\ps
+ mkdir \emtex\doc\ps
+ move psnfss2e.tex \emtex\doc\ps
+ move psfonts.dtx \emtex\dec\ps
+ cd \
+ del \psnfss
+ rmdir \psnfss
+
+ get the latex2e tfm, vf, fd and other associated files for adobe ps fonts:
+
+ ftp ftp.shsu.edu [or other site]
+ cd tex-archive/fonts/metrics
+ bin
+ hash
+ get adobe.zip [1,895,291 bytes]
+ quit
+
+ cd \
+ pkunzip -d \adobe.zip
+
+ edit \emtex\ps\config.ps and uncomment (delete the *) & change the line:
+ T c:\emtex\tfm;c:\emtex\tfm\ps;c:\emtex\tfm\local
+**** Change all the following paths (V, P, L, S, H) also because they assume drive C
+
+ there are 6 packages (times, palatino, helvet, avant, newcent, bookman) which
+ you use to select the primary postscript fonts for your document. These in
+ turn call upon the following 7 native postscipt fonts in the printer:
+ Helvetica, AvantGarde, Times, Palatino, NewCenturySchoolbook, Bookman, and
+ Courier. Your printer and screen previewer should have these to work properly
+ with the 6 packages (they are part of the standard 35 fonts). This info came
+ from Table 1 of psnfess2e.tex (page 4). The file adobe.zip that you just
+ obtained contains fd, tfm, and vf data for these and many other postscript
+ fonts. I will only cover the 7 fonts used by the 6 packages.
+
+ move \adobe\helvetic\fd\*.fd \emtex\texinput\ps
+ move \adobe\avantgar\fd\*.fd \emtex\texinput\ps
+**** md emtex\tfm\ps
+**** This requires adding this path to the environment var TEXTFM
+ move \adobe\times\fd\*.fd \emtex\texinput\ps
+ move \adobe\palatino\fd\*.fd \emtex\texinput\ps
+ move \adobe\newcentu\fd\*.fd \emtex\texinput\ps
+ move \adobe\bookman\fd\*.fd \emtex\texinput\ps
+ move \adobe\courier\fd\*.fd \emtex\texinput\ps
+
+ mkdir \texfonts\vf
+ move \adobe\helvetic\vf\*.vf \texfonts\vf
+ move \adobe\avantgar\vf\*.vf \texfonts\vf
+ move \adobe\times\vf\*.vf \texfonts\vf
+ move \adobe\palatino\vf\*.vf \texfonts\vf
+ move \adobe\newcentu\vf\*.vf \texfonts\vf
+ move \adobe\bookman\vf\*.vf \texfonts\vf
+ move \adobe\courier\vf\*.vf \texfonts\vf
+
+ move \adobe\helvetic\tfm\*.tfm \emtex\tfm\ps
+ move \adobe\avantgar\tfm\*.tfm \emtex\tfm\ps
+ move \adobe\times\tfm\*.tfm \emtex\tfm\ps
+ move \adobe\palatino\tfm\*.tfm \emtex\tfm\ps
+ move \adobe\newcentu\tfm\*.tfm \emtex\tfm\ps
+ move \adobe\bookman\tfm\*.tfm \emtex\tfm\ps
+ move \adobe\courier\tfm\*.tfm \emtex\tfm\ps
+
+ move \adobe\helvetic\phv.map \emtex\ps
+ move \adobe\avantgar\pag.map \emtex\ps
+ move \adobe\times\ptm.map \emtex\ps
+ move \adobe\palatino\ppl.map \emtex\ps
+ move \adobe\newcentu\pnc.map \emtex\ps
+ move \adobe\bookman\pbk.map \emtex\ps
+ move \adobe\courier\pcr.map \emtex\ps
+
+ cd \
+ del adobe.zip
+ del adobe
+ rmdir adobe
+
+ cd \emtex\ps
+ del psfonts.map
+ ren *.map *.x
+ copy phv.x + pag.x + ptm.x + ppl.x + pnc.x + pbk.x + pcr.x psfonts.map
+ del *.x
+
+ cd \test and create a file called ps.tex
+ \documentclass{article}
+ \usepackage{times}
+ \begin{document}
+ Hello world...........$\sum$
+ \end{document}
+
+ dvips ps
+
+ this should work. The math needs to use the old CM fonts, a small subset
+ of which may need to be generated on the fly or downloaded. dvips should
+ have created an instruction file for mfjob to create the missing math
+ fonts. If all is well, run
+
+ mfjob dvips
+ dvips ps
+ copy ps.ps to lpt1: [or whatever is your printer port]
+
+ and you should have the complete document. I create everything on demand,
+ when dvips complains, using mfjob dvips as above.
+
+ cd \
+ del adobe
+ rmdir adobe
+
+ Note, as I mentioned before, you can't view postscript on your screen.
+ The missing fonts appear as boxes on the screen, but print fine on a
+ postscript printer.
+
+ You can look at the document on the screen using ghostscript or another
+ postscript previewer, which allows you to look at any included post-
+ script figures too, but involves a few more hours of install time (if you've
+ gotten this far then you can handle anything!).
+
+ Check out gs386.exe on wuarchive.wustl.edu.
+
+
+7. CUSTOM FONTS
+~~~~~~~~~~~~~~~
+to use a custom font, say APL, get the .mf file off the net, and
+put it in a \emtex\mfinput\local subdirectory. As an example, use cmapl10.mf
+from the CTAN archive in /tex-archive/fonts/apl:
+
+cd \emtex\mfinput\local
+mf386 \mode=hplaser; \nonstopmode; mag=1.0; input cmapl10
+
+move cmapl10.tfm \emtex\tfm\local
+
+to look at it, for example, do:
+
+tex testfont
+and reply:
+cmapl10
+\table
+\end
+
+then
+v testfont
+
+and let it generate the fonts and look at them! To use them in a document,
+read the texbook.
+
+you can print out the result on an hp printer:
+dvihplj testfont
+
+you can print out the result on an hp IV printer:
+dvihpljh testfont
+
+you can print out the result on a postscipt printer:
+dvips testfont
+
+(c) 1995 John P. Refling, All rights reserved.
+
+
+ # - # - #
+
+
+
+
+ printing
+ /|\
+/- / | \
+| / | \
+| / | \
+|printer hp / | hpIV \ ps
+| / | \
+| / | \
+\- / | \
+ / | \
+/- / / \ / \
+|font CM / CM / \ HP CM / \ PS /w CM
+|- / / ? / \
+ / / / \
+ prthplj prthpljh dvips1 dvips2
+ 300 600 300 300
+/- |\ /| |\ /|
+| | \ LIB OTF / | | \ LIB OTF / |
+| OTF | \ / | LIB OTF | \ / |
+| | \ / | | \ / |
+|source | \ | | \ |
+| | / \ | | / \ |
+| | / \ | | / \ |
+| | / \ | | / \ |
+\- |/ \| |/ \|
+
+ 4c,4d download=4a or 4b 6a+(4c or 4d) download=6b+(4a or 4b)
+ genall=4e or 4f genall=6b+(4e or 4f)
+
+
+Type of printer
+~~~~~~~~~~~~~~~
+hp = PCL (hplaserjet 1+ through BUT not including the HP Laserjet IV)
+hpIV = PJL (hplaserjet IV, w/o postscript)
+ps = any postscript printer (including hplaserjet IV w/postscript)
+
+Choice of font
+~~~~~~~~~~~~~~
+CM = Computer Modern fonts, freely available, and included with emtex
+ (either generated locally with metafont, or downloaded off the net,
+ see source of CM font below)
+HP = scaleable fonts inside the HP laserjet IV printer
+PS = scaleable postscript fonts inside a postscript printer, however, when
+ you are using math, it will revert to CM for the math fonts. See source
+ of CM fonts below.
+
+Source of CM fonts (for CM or CM for postscript math)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+OTF = On the fly font generation (initially slower, small space rqmt)
+LIB = downloaded libraries of all the fonts (faster, large space rqmt ~5 MB)
+
+
+
+What to use
+~~~~~~~~~~~
+prthplj = from emtex
+prthpljh = from emtex
+dvips1 = with OTF CM generation from MakeTeXPK, or PK libraries
+dvips2 = mostly internal postscript fonts, but some of OTF CM generation
+ for fonts missing from postscript.
+? = no solution that I am aware of, but I didn't look very hard
+
+(c) 1995 John P. Refling. All rights reserved.
+
+**** All lines below this one represent my setup, they are not from John Refling:
+**** (they are not all marked with '****' to avoid cluttering)
+
+**** Encapsulated PostScript support:
+To be able to use the epsfig package expand 'graphics.tar.gz' somewhere,
+read the '00readme.txt' and then run:
+('?' is your drive letter, .tgz because .tar.gz is invalid in Ms-Dos)
+
+gzip -dc graphics.tgz | tar xvf -
+latex graphics.ins
+md ?:\emtex\texinputs\graphics
+copy *.def ?:\emtex\texinputs\graphics
+copy *.sty ?:\emtex\texinputs\graphics
+copy 00readme.txt ?:\emtex\texinputs\graphics
+echo \ExecuteOptions{dvips} > ?:\emtex\texinputs\graphics\color.cfg
+echo \ExecuteOptions{dvips} > ?:\emtex\texinputs\graphics\graphics.cfg
+
+**** I put this in my 'autoexec.bat':
+
+rem emTeX, don't change, just update DVIDRVFONTS, EMTEXDRIVE, PATH and edit the
+rem file '?:\emtex\ps\config.ps' and fix its T, V, P, L, S, H paths
+set DVIDRVFONTS=G:\texfonts
+set EMTEXDRIVE=H
+echo Calling %EMTEXDRIVE%:\emtex\set-tex.bat
+if exist %EMTEXDRIVE%:\emtex\set-tex.bat goto SETTEX
+echo File not found!
+pause
+:SETTEX
+call %EMTEXDRIVE%:\emtex\set-tex.bat
+SET MFINPUT=%MFINPUT%;g:\usr\local\lib\tex\mfinput
+
+**** And I put this in the 'set-tex.bat' file in the emtex dir:
+
+rem emTeX env vars setup, see also jrtex12a.txt.
+rem If changing location update:
+rem a) env vars 'EMTEXDRIVE' (emTeX drive) and 'DVIDRVFONTS' (TeX fonts path)
+rem b) edit the file 'emtex\ps\config.ps' and fix the paths T, V, P, L, S, H
+if %DVIDRVFONTS%.==. goto NOENVVAR
+if %emtexdrive%.==. goto NOENVVAR
+rem Append `!' to a directory name to get one-level subdirectory search
+rem Append `!!' to a directory name to let emTeX search all subdirectories
+SET TEXINPUT=%emtexdrive%:\EMTEX\TEXINPUT!
+SET TEXFMT=%emtexdrive%:\EMTEX\TEXFMTS
+SET BTEXFMT=%emtexdrive%:\EMTEX\BTEXFMTS
+SET TEXTFM=%emtexdrive%:\EMTEX\TFM;%emtexdrive%:\EMTEX\TFM\PS
+SET MFINPUT=%emtexdrive%:\EMTEX\MFINPUT
+SET MFBAS=%emtexdrive%:\EMTEX\MFBASES
+SET BMFBAS=%emtexdrive%:\EMTEX\BMFBASES
+SET MFJOB=%emtexdrive%:\EMTEX\MFJOB
+SET BIBINPUT=%emtexdrive%:\EMTEX\BIBINPUT
+SET DVIDRVINPUT=%emtexdrive%:\MYTEX;%emtexdrive%:\EMTEX\DOC
+rem SET DVIDRVGRAPH=C:\MYTEX;%emtexdrive%:\EMTEX\DOC\GR$r
+SET DVIDRVGRAPH=%emtexdrive%:\EMTEX\DOC\GR$r
+set dviscr=-s2
+set mfjobopt=-3
+rem DVIPS 5.54 (also possible to only use TEXCONFIG and to set rest in cfg file)
+set texconfig=%emtexdrive%:\emtex\ps
+set emtexdir=%emtexdrive%:\emtex
+set dvipsheaders=%emtexdrive%:\emtex\ps
+set texfonts=%emtexdrive%:\emtex\tfm;%emtexdrive%:\emtex\tfm\ps
+set texpks=%DVIDRVFONTS%\pixel.lj\%%bdpi\%%f.pk
+goto THEEND
+:NOENVVAR
+echo %0: ERROR, env vars DVIDRVFONTS and EMTEXDRIVE not set!
+pause
+:THEEND
+
+**** GhostScript (PostScript level 2 interpreter/previewer)
+**** The current version is 3.12 (Oct-94)
+**** ftp.funet.fi:/pub/gnu/ghostscript3/aladdin
+There are several Dos/Win binaries:
+ 1) gs.exe was compiled for the following devices:
+ vga ega svga16 vesa atiw tseng tvga deskjet
+ djet500 laserjet ljetplus ljet2p ljet3 ljet4 cdeskjet cdjcolor
+ cdjmono cdj550 pj pjxl bj10e bj200 epson eps9high
+ ibmpro gifmono gif8 pcxmono pcxgray pcx16 pcx256 pcx24b
+ 2) gs386.exe was compiled for the following devices:
+ vga ega svga16 atiw tseng tvga deskjet djet500
+ laserjet ljetplus ljet2p ljet3 ljet4 cdeskjet cdjcolor cdjmono
+ cdj550 paintjet pjetxl epson eps9high ibmpro bj10e bj200
+ gifmono gif8 tiffg3 faxg3 pcxmono pcxgray pcx16 pcx256
+ pcx24b pbm pbmraw pgm pgmraw ppm ppmraw bitcmyk
+ 3) don't know what gswin.exe was compiled for (use the PostScript command
+ 'devicenames ==' to find out, using the option '-h' won't work here)
+ 4) gswin32s.exe was compiled (not by Aladdin) for the following devices:
+ mswin mswinprn deskjet djet500 laserjet ljetplus ljet2p
+ ljet3 ljet4 cdeskjet cdjcolor cdjmono cdj550 paintjet pjetxl epson
+ eps9high ibmpro st800 bj10e bj200 gifmono gif8 tiffg3
+ tiffg4 bmpmono bmp16 bmp256 bmp16m pcxmono
+ pcx16 pcx256 pbm pbmraw pgm pgmraw ppm ppmraw
+If you need something that is not in here you will have to compile GhostScript
+yourself (if the device code already exists just add it to the makefile,
+otherwise you'll have to write it yourself, there is documentation on how to do
+that) or if your printer is supported by Windows to use gswin?.exe with the
+device 'mswinprn')
+
+**** end of this file
diff --git a/web/noweb/contrib/avs/make_ico.awk b/web/noweb/contrib/avs/make_ico.awk
new file mode 100644
index 0000000000..f422176173
--- /dev/null
+++ b/web/noweb/contrib/avs/make_ico.awk
@@ -0,0 +1,50 @@
+# edits noweb/src/icon/makefile for Ms-Dos + PC386 + Icon 386 9.0 + DJGPP + MKS 4.2
+# tested with noweb 2.7a
+
+BEGIN { print "# generated MsDos makefile, original in makefile.old" }
+
+/SHELL=/ { $0 = "# " $0 } # disable SHELL def
+
+/BINEXECS=/ { # add .exe extension
+ s = "";
+ for (k = 1; k <= NF; ++k)
+ s = s sprintf("%s.exe ", $k);
+ $0 = s
+}
+
+function splitLineTooLong() { # appends to strings s1 & s2 (does not initialize them)
+ for (k = 1; k <= NF; ++k)
+ if (match($k, "\\."))
+ s1 = s1 $k " ";
+ else
+ s2 = s2 $k ".exe ";
+ }
+/LIBEXECS=/ { # split in 2 parts (to avoid 128 chars command.com overflow) and add .exe if no extension is provided
+ s1 = ""; s2 = "";
+ if ($NF == "\\") { # tackles problem of a '\' meaning continue in next line
+ $NF = "";
+ NF = NF - 1;
+ splitLineTooLong();
+ getline; # read next line due to '\' continuation char
+ }
+ splitLineTooLong();
+ printf("LIBEXECS2=%s\n", s1);
+ $0 = s2;
+}
+
+/^EXECS=/ { $0 = $0 " $(LIBEXECS2)" } # because now LIBEXECS is split into LIBEXECS and LIBEXECS2
+
+/cp \$\(LIBEXECS\)/ { printf("\tcp $(LIBEXECS2) $(LIB)\n"); } # the new LIBEXECS2 also need to be copied
+
+/\/bin\/rm/ { $1 = "\trm" } # rm might not be at "/bin/rm", remember to add the tab \t
+
+/\$\(ICON.\) -o/ {
+ if (!match($3, "\\.")) { # if no extension add .exe
+ sub(/[a-z0-9]+/, "&.exe", $3);
+ $1 = "\t" $1 " -I" # add -I option to icon translator (see Icon 386 9.0 Ms-Dos docs)
+ }
+ }
+
+/^[a-z0-9]+: [a-z0-9]+\.icn/ && NF == 2 { sub(/[a-z0-9]+/, "&.exe", $1) } # add .exe
+
+{ print $0 } # prints the line (which might have been changed)
diff --git a/web/noweb/contrib/avs/make_lib.awk b/web/noweb/contrib/avs/make_lib.awk
new file mode 100644
index 0000000000..84e2c15bd6
--- /dev/null
+++ b/web/noweb/contrib/avs/make_lib.awk
@@ -0,0 +1,12 @@
+# edits noweb/src/lib/makefile for Ms-Dos + PC386 + Icon 386 9.0 + DJGPP + MKS 4.2
+# tested with noweb 2.7a
+
+BEGIN { print "# generated MsDos makefile, original in makefile.old"; }
+
+/SHELL\=/ { $0 = "# " $0 } # disable SHELL def
+
+/cp unmarkup emptydefn toascii \$/ { # add an extension .ksh) when copying
+ $0 = "\tcp unmarkup $(LIB)/unmarkup.ksh\n\tcp emptydefn $(LIB)/emptydefn.ksh\n\tcp toascii $(LIB)/toascii.ksh"
+}
+
+{ print $0 } # prints the line (which might have been changed)
diff --git a/web/noweb/contrib/avs/make_src.awk b/web/noweb/contrib/avs/make_src.awk
new file mode 100644
index 0000000000..d9dec16af4
--- /dev/null
+++ b/web/noweb/contrib/avs/make_src.awk
@@ -0,0 +1,72 @@
+# edits noweb/src/makefile for Ms-Dos + PC386 + Icon 386 9.0 + DJGPP + MKS 4.2
+# tested with noweb 2.7a
+
+BEGIN { print "# generated MsDos makefile, original in makefile.old" }
+
+/SHELL\=/ { $0 = "# " $0 } # disable SHELL def
+
+/for i in shell lib xdoc tex;/ { # new fix for noweb 2.7a (not needed in 2.7)
+ $0 = "\tcd shell\n\tmake all\n\tcd ..\n\tcd lib\n\tmake all\n\tcd ..\n\tcd xdoc\n\tmake all\n\tcd ..\n\tcd tex\n\tmake all\n\tcd ..\n"
+}
+
+/cd ([a-z]+)|(\$\(LIBSRC\)); make/ { # fix problems with quotes and 'cd' and explode into 3 lines
+ if ($NF == "all") {
+ for (k = 1; k <= NF; ++k) gsub("\"", "", $k); # remove quotes
+ sub(/;/, "", $2); # remove semicolon
+ s = ""; for (k = 4; k <= NF; ++k) s = s " " $k; # group 'Make' args in a single string (for the sprintf)
+ $0 = sprintf("cd %s\n\t$(DJGPPMAKE) %s\n\tcd ..", $2, s);
+ } else
+ if ($NF == "install")
+ sub("^[^;]+", " \"&\"", $2); # add quotes (which need a shell) to force use of shell internal 'cd' instead of bin/cd.exe
+ $0 = "\t" $0;
+}
+
+/\/dev\/null/ { sub(/\/dev\/null/, "NUL") } # Ms-Dos uses NUL to mean /dev/null
+
+/strip/ {
+ sub(/strip/, "# strip"); # remove the strip, MKS strip would ruin the binaries
+ $0 = $0 "\n\tcd \"c\"; coff2exe nt markup mnt finduses"; # and in next line add the coff2exe command (see DJGPP docs)
+}
+
+/chmod \+x/ { $0 = "\t# " $0 } # disable chmod (not necessary and sometimes tries to 'chmod +x foo' instead of 'chmod +x foo.ksh')
+
+/cp / { # add an eventual extension (.exe or .ksh) when copying
+ if (match($2, "^c/")) {
+ $1 = "\t" $1;
+ for (k = 2; k < NF; ++k) # NB: 1st & last field not processed
+ $k = $k ".exe";
+ } else {
+ if ($NF == "$(LIB)") { # add .ksh and split in several lines
+ s = "";
+ for (k = 2; k < NF; ++k) {
+ baseName = substr($k, 1 + match($k, "/"));
+ s = s sprintf("\tcp %s %s/%s.ksh", $k, $NF, baseName);
+ if (k != NF-1)
+ s = s "\n";
+ }
+ $0 = s;
+ }
+ }
+}
+
+/install: install-code install-man install-tex/ { $3 = "install-preformat-man" } # MKS has no NROFF
+
+/sed / { # all files processed by sed require something to be fixed for MsDos
+ if (match($0, "\$\(BIN\)"))
+ $NF = $NF ".ksh" # add .ksh extension
+ else {
+ if (match($0, "gzip")) { # remove gzip of man pages, MKS supports compressed man pages but in .dbz not in .gz format
+ sub("\| gzip", ""); # remove call to gzip
+ sub("\.gz$", "", $NF); # remove .gz extension
+ } else
+ $0 = "#" $0; # Disable because MKS does not support NROFF
+ }
+ $0 = "\t" $0;
+}
+
+/; ln / { # no links in MsDos
+ sub(/; ln /, "; cp -p "); # replace link (ln) with copy (cp)
+ gsub(/\.gz/, ""); # gzip compressed man pages not used
+}
+
+{ print $0 } # prints the line (which might have been changed)
diff --git a/web/noweb/contrib/avs/make_xdo.awk b/web/noweb/contrib/avs/make_xdo.awk
new file mode 100644
index 0000000000..b59ccefb5d
--- /dev/null
+++ b/web/noweb/contrib/avs/make_xdo.awk
@@ -0,0 +1,14 @@
+# edits noweb/src/xdoc/makefile for Ms-Dos + PC386 + Icon 386 9.0 + DJGPP + MKS 4.2
+# tested with noweb 2.7a
+
+BEGIN { print "# generated MsDos makefile, original in makefile.old"; }
+
+/SHELL\=/ { $0 = "# " $0 } # disable SHELL def
+
+/WWW=/ { $0 = "WWW=../.." } # Put World Wide Web files in noweb because the dir $(HOME)/www/noweb might not exist
+
+/\/bin\/rm/ { sub(/\/bin\/rm/, "rm") } # rm might not be at "/bin/rm"
+
+/\.ps\.gz/ { sub(/\.ps\.gz/, ".pgz"); } # MsDos limitation, use extension .pgz instead of .ps.gz
+
+{ print $0 } # prints the line (which might have been changed)
diff --git a/web/noweb/contrib/avs/mks42bug.0d b/web/noweb/contrib/avs/mks42bug.0d
new file mode 100644
index 0000000000..f6ba43e2dd
--- /dev/null
+++ b/web/noweb/contrib/avs/mks42bug.0d
@@ -0,0 +1,128 @@
+ MKS Toolkit 4.2 Dos Bugs
+
+Environment: 486DX2-80 VLbus, 32 MB RAM, 2 EIDE HDs, ATI Mach64 GPT 2048K VLbus
+Sblaster 16 ASP, Logitech serial Mouseman, Ms-Dos6.22, WfW3.11 (32 bits disk
+access/32 bits file access), QEMM7.5, Stacker4.0 for OS/2 & Dos,
+OS/2 boot manager (Dos/Win, Warp, Linux)
+
+1- bin32/diff.exe doesn't do correctly wild card expansion under command.com
+ (i.e. doesn't use glob.exe correctly). It works fine under the Korn shell.
+ No problems with bin/diff.exe
+ (as it can be much faster than the 16 bits diff, e.g. 5s against 18s, I
+ just renamed it to diff32.exe)
+ BUT: it seems that in some cases with QEMM 7.5 diff32 works sometimes with
+ wildcards expansion, and it is diff.exe that crashes when wild card expansion
+ is used and at the same time there are differences (QEMM says invalid ...).
+ Notice that it seems that if the 2 files are identical no problem! Under
+ a clean boot no crash, but again no one can check if invalid statements
+ are used. See the 2 files MOD_FBUS.LST and LIXO for an example of a crash!
+
+2- bin/find.exe with the option -exec doesn't always work correctly under
+ command.com (Ms-Dos 6.2 or Ms-Dos 6.22). The 1st exec works, the 2nd gives a
+ memory allocation error (under Ms-Dos 6.2) or an unspecified error (under
+ Ms-Dos 6.22).
+ E.g.:
+ find . -type f -exec ls -l {} ;
+ always works, but in
+ find . -name "*.zip" -exec ls -l {} ;
+ only the first exec works, the 2nd gives the error message
+ find: cannot execute "ls": not a directory or path not found
+ and find aborts.
+ The same behaviour occurs with executing non MKS commands.
+ No problems if executing under the Korn shell.
+ The find.exe from version 2.3 of the MKS toolkit works fine under
+ command.com, thus this is a new bug.
+
+3- the Korn shell crashes if one presses ctrl-BREAK (even ctrl-alt-del doesn't
+ work). Under windows it "violates system integrity" and windows terminates
+ it and asks one to reboot the machine because it should be unstable. Under
+ Qemm, Qemm also complains in the same way. The setting of 'break' in
+ config.sys/autoexec.bat (i.e. ON/OFF) is not relevant here
+
+4- there are several bugs in bin/pax.exe
+ a) -i never works (i.e. whether one writes a name, presses '.' or
+ presses return it always is unable to create that file)
+ b) -s only works partially, most of the time (using s/.../.../p to see
+ what happens) it does erroneous substitutions. It seems to work if
+ all ocurrences of '.' or '/' on the original name are matched by
+ using a '.' (e.g. never use a regular expression that matches both the
+ basename and the extension)
+ c) using '-f path' if path has a backslash anywhere then it crashes
+ (e.g. using 'pax -f g:\users\foo.tar' from the DOS prompt). The
+ crash looks like an infinite loop. If running in a Dos window then it
+ is possible to kill that window without crashing the system
+ d) this is not really a bug but a thing that would come handy. It would be
+ good if pax discarded the sufix ',v' from the filenames, this because
+ Unix RCS adds that kind of sufix and when extracting it creates an
+ invalid filename. In the current version 'RCS/filename.txt,v' extracts
+ correctly (because it already has a 3 char extension thus the ',v' is
+ discarded) but RCS/filename.c,v doesn't extract at all (and as there
+ are bugs in the option -s it is hard to extract those filenames)
+ (a good thing with tar/pax is that when creating a tar file if one
+ specifies a longer filename or a filename with a mixture of upper/lower
+ case the file will be stored in the tar file with that name, this
+ simplifies going back and forward between Ms-Dos/Unix if after truncation
+ and case convertion no filenames are equal. Thus it would be also good
+ if it did accept ',v' when creating a tar file under Ms-Dos)
+#
+# How to extract RCS files from a tar file if they have ',v' on their name
+#
+# Warning: pax has bugs, -i doesn't work, -s only works if all ocurrences of
+# either '.' or '/' in the expression to match are matched by using '.'
+# Assuming filenames of the type 'cpnauto/RCS/*.*,v', extracts to 'rcs/*.*'
+# BUG: a filename without an extension (e.g. cpnauto/RCS/makefile,v') loses
+# the last char (i.e. becomes 'rcs/makefil')
+#
+# Don't try to automate (e.g. to use args, the bugs in the handling of -s
+# will probably defeat any attempts)
+#
+pax -f /cpnauto.tar >cpnauto.lst
+pax -krvf /cpnauto.tar -s '/cpnauto\(.\)RCS.\([a-z0-9_]*\).\([a-z]*\),v/rcs\1\2.\3/p' `grep RCS cpnauto.lst`
+
+5- bin/tar.exe crashes (infinite loop?) when given wildcards, e.g.
+ 'tar tvf *.tar' from the Dos prompt (command.com) and there is a single
+ file with extension .tar in that dir, no problem if using the full
+ filename [check if it is the same as 4-c)]. It also crashes if a backslash
+ is given in a pathname (infinite loop)
+
+6- there is no online man page for dircmp(1) although 'man tkerrat' says there
+ is. Anyhow dircmp(1) works
+
+ Usage: dircmp [-ds] dir1 dir2
+
+ -d -> works only for text files, shows differences in diff format
+ -s -> silent mode (non verbose, only the differences)
+
+7- 'make -f makefile' doesn't work, it always tries to use makefile.mak as
+ the makefile. Probably because 'whence -v makefile' says that makefile
+ is a function. Not checked any further
+
+8- man who(1) says that login(1) only uses the file $ROOTDIR/etc/utmp if
+ it already exists, this is not true, it is created if it does not exist.
+ Anyhow one can still install MKS in a READONLY drive because if etc/utmp is
+ set to READONLY it is not changed and there is no error message (it is not
+ enough to only have the drive but not the file write protected, in that case
+ one gets an error message: Write protect error writing drive ?)
+
+9- bin/cmp.exe gives an exit code of 3 if one of the files to be compared cannot
+ be open or doesn't exist, this is in contrast with the MKS man page (and with
+ Unix) which says that an error code of 2 is to be given. If only 1 filename
+ or more than 2 filenames are given, an exit code of 2 occurs, if one of the
+ files does not exist (the 1st or the 2nd) an exit code code of 3 occurs
+
+10- the date of the '.' and '..' entries given by 'ls -al' is wrong, it's
+ always 31-Dec-79 (i.e. 0, because the PC time starts at 1-Jan-80). In other
+ words if current dir is 'foo/bar' then to get the creation date of dir
+ 'bar' one has to do 'ls -ld ../bar' instead of just 'ls -ld .'
+
+11- 'vi -x pathname' where pathname is not from the current dir
+ or has dir names (e.g. ./filename.txt)
+ Under command.com do not use '/' but '\' otherwise one gets
+ 0 lines 0 chars (even if the file exists). Under the Korn shell
+ do not use '\\' but use '/' otherwise 0 lines 0 chars.
+ This has something to do with the implicit call to crypt (caused by
+ the -x).
+ This bug is not very serious because usually one uses '\' under
+ command.com and '/' under the korn shell, still all the MKS toolkit
+ commands are supposed to accept both (and I usually use '/' even
+ under command.com)
diff --git a/web/noweb/contrib/avs/mksfixes.ksh b/web/noweb/contrib/avs/mksfixes.ksh
new file mode 100644
index 0000000000..6ca89997de
--- /dev/null
+++ b/web/noweb/contrib/avs/mksfixes.ksh
@@ -0,0 +1,15 @@
+if [ -z "$1" -o -z "$2" ]
+then
+ echo Usage $0 BIN TMP >&2
+ echo Fixes "'BIN/cpif.ksh'" for use with MKS Toolkit "(see 'man mks42bug', cmp entry)" >&2
+ echo "Fixes 'BIN/noweb.ksh' for use with MKS toolkit (the PATH problem, see howto386.txt)" >&2
+ echo TMP is for later use by cpif.ksh i.e. at run-time >&2
+ echo "Changes only line 8 (if it has 'PATH='), line 20 (if it has 'new=') and line 28 (if it has '-eq0')" >&2
+ exit 1
+fi
+
+cat $1/cpif.ksh | sed -e '8s/\(PATH=.*\)/#\1/' -e '20s@\(new=.*\)@new='$2'/$$@' -e '28s/-eq0.*/ -eq0|-ne1|*2|*3) cp $new $i/' > $2/cpif.tmp
+mv $2/cpif.tmp $1/cpif.ksh
+
+cat $1/noweb.ksh | sed '21,26s/PATH="$PATH:$LIB"//' > $2/noweb.tmp
+mv $2/noweb.tmp $1/noweb.ksh \ No newline at end of file
diff --git a/web/noweb/contrib/avs/myenv.ksh b/web/noweb/contrib/avs/myenv.ksh
new file mode 100644
index 0000000000..d390df0c59
--- /dev/null
+++ b/web/noweb/contrib/avs/myenv.ksh
@@ -0,0 +1,38 @@
+# You should only edit the line ' myargs=...', see below
+
+echo Builds and installs Noweb 2.7 from source code
+echo "Full documentation in 'howto386.txt'"
+echo Assumptions:
+echo "1- There is free environment space (between 30 and 40 bytes)"
+echo "2- DJGPP is installed (gcc, go32, coff2exe, make)"
+echo "3- MKS Toolkit 4.2 (or above?) for Dos is installed"
+echo "4- The MKS Toolkit Make is the 1st make in your PATH env var"
+echo "5- Icon 9.0 translator binaries are installed"
+echo "6- You have enough free Ram (around 600000 bytes)"
+echo "7- Your paths are not too long to break some script"
+echo " (i.e. 128 bytes Dos command line limit)"
+echo "8- To fully use Noweb, LaTeX2e already is or WILL be installed"
+echo "9- You are running a not too old Dos version (e.g. 'call batchfile')"
+
+# Edit the 'myArgs=...' line to adapt for your environment:
+# Use always FULLPATHS, i.e. with DRIVE LETTER
+# Use only slashes '/' in pathnames, backslashes won't work
+# BIN is where the noweb binaries will be installed
+# LIB is where the noweb support files will be installed
+# MAN is where to put the man pages (MANPATH env var or Mks ROOTDIR/etc)
+# DJGPPmake is the fullpath to Gnu make (does not have to be in your PATH)
+# TMP is a temporary directory to be used by Noweb at run-time
+# ICON is the fullpath to icont (the Icon translator)
+# (in both DJGPPmake and ICON the '.exe' is not necessary,
+# remove it if you have problems with names too long)
+
+theArgs="BIN LIB MAN TEXINPUTS DJGPPmake TMP ICON"
+ myArgs="i:/b g:/usr/local/lib/noweb g:/man h:/emtex/texinputs/local j:/djgpp/bin/make.exe d:/tmp e:/b/icont.exe"
+
+echo "generate $theArgs"
+echo "generate $myArgs"
+read f?"Check if the line above is OK for your machine, continue (y/n)? "
+if [ "$f" = "y" -o "$f" = "Y" -o "$f" = "yes" -o "$f" = "YES" ]
+then
+ ./generate.ksh $myArgs
+fi
diff --git a/web/noweb/contrib/avs/norman1.txt b/web/noweb/contrib/avs/norman1.txt
new file mode 100644
index 0000000000..5db826cb87
--- /dev/null
+++ b/web/noweb/contrib/avs/norman1.txt
@@ -0,0 +1,136 @@
+Hi Norman
+
+I'm one of your noweb fans (one of these days you'll get a postcard from
+Denmark) since version 2.6c. I use it in several Unix systems (Sun4, Solaris,
+HPUX, Linux) and in a PC running Dos. I am able to use Noweb in the same way
+under both Dos and Unix. This means Dos+Icon+LaTeX2e in all platforms.
+
+When I downloaded Noweb 2.6c I had problems compiling it for Dos. In a way the
+information on how to do it was there (e.g. the 'install.dos' file from Lee
+Wittenburg), but it just gave general guidelines and it used Awk instead of
+Icon. To be able to compile and use Icon took one day's work and several
+machine crashes. So I wrote a kind of HOWTO for my own use when the next
+version came around.
+
+Noweb 2.7 should have been easy, but it took a couple of hours because the
+directory hierarchy had changed. Now there are Dos binaries on the
+distribution, but they have a couple of problems (besides being from 2.6c).
+
+Today I just downloaded Noweb 2.7a. It has binaries for both 2.6c and 2.7, but
+that version 2.7 if for the Watcom compiler (I have it but it is not installed)
+and it uses Perl, which is nice but is risky if you change your code.
+After half an hour I had updated my scripts to cope with version 2.7a and
+after that I just compiled the source and got 2.7a Dos binaries.
+
+So I cleaned up my HOWTO and I'm asking if you would be interested in putting
+it in the next noweb distribution. Here is why you might be interested:
+
+1- no binaries, only text files i.e. reduced space usage. You already have Dos
+binaries in your distribution and you wouldn't want more. For those users that
+don't manage to build it, I offer to supply ftp access to the binaries (all
+they have to do is ask). Another advantage is that my scripts might work
+without changes in the next Noweb version (if you don't change the directory
+hierarchy nor the part of the makefiles that I'm patching nor add anything new
+that requires patching). Gzip compressed the files take about 40 KB, expanded
+about 112 KB. Notice that the scripts have comments and check for errors trying
+to fail gracefully, otherwise they would be one fourth as big. Also more than
+two thirds is documentation files
+
+2- the noweb/src/install.dos doesn't say enough (in my case it required 1 day's
+work), noweb/binaries/dos-2.6c.zip is already two versions behind and
+although it uses Icon it does not use Icon for a PC 386 (or above) thus memory
+problems are to be expected. Also only some of the shell scripts were
+translated to C++, thus using noweb at work (Unix) and home (Dos) wouldn't be
+exactly the same. The same applies to the Dos binaries for version 2.7, using
+Perl might make a difference
+
+3- minimum hardware/software requirements are a PC386 and the MKS Toolkit
+(commercial software). Any user that is thinking about using Noweb in an
+e.g. PC 286 is not doing serious work (or is brave). Thus asking for a 386 as a
+minimum is reasonable. It is a pity that one has to rely on commercial software
+but although there are some shareware Unix shells, I have no experience with
+them thus I don't know how good they are. The MKS toolkit is very popular and
+the price tag is quite reasonable (I think around $250). It closely follows
+Posix, there are versions for Dos, OS/2 and Win NT. It offers a Korn shell and
+all the standard Unix utilities, e.g. find, tar, man, awk, sed, vi, make. The
+only thing missing is multitasking (I manage to get a kind of multitasking by
+unning several Dos boxes under Windows, but that is another story). One can
+even use '/' instead of '\'.
+
+4- if the user has the above then the missing parts can be downloaded. I supply
+ftp sites and directories where to find DJGPP binaries (the PC386 Dos port of
+gcc, gmake, gzip, etc), Icon PC386 binaries, and LaTeX2e PC386 binaries (I
+supply an original document of John Refling annotated by me because the
+installation for a normal user is a nightmare, with that document it is
+straightforward, my annotations only fix some small bugs in that document and
+explain a few more points). As an option GhostScript can also be used
+
+5- after the user has MKS, DJGPP, Icon, and LaTeX2e (latex2e can be installed
+after noweb) properly installed, to get noweb 2.7a up and running all he/she
+has to do is to download it and to edit a one line script (in the same fashion
+as your noweb/src/nwmake) to specify the paths.
+
+6- that one line script works as follows: it supplies the args to another
+script which generates a third script. If the user is lucky running that script
+is enough (it works for me). Otherwise he/she has to look at that script and
+compare it with my document which explains ALL the steps required for fixing
+Noweb to run under Dos limitations. The generated script begins by renaming
+some of your makefiles and replacing them with equivalent makefiles (patched
+with awk scripts to circunvent Dos problems), then it uses GNU Make and Icon to
+compile the Icon sources, GNU Make and gcc to compile the C sources, and MKS
+Make and the Korn shell to install noweb. It might seem contrived but it avoids
+out of memory errors and I want to use your makefiles (i.e. slightly patched
+versions of them), not to supply my own makefiles which would be more sensitive
+to changes in your source code
+
+7- for troubleshooting I supply a document that explains all the steps for
+building Noweb, and I also supply a list of the MKS bugs that I discovered (one
+of them while compiling Noweb). I also discovered a couple of bugs/problems in
+your code, all of them trivial except one (the missing chunk problem, this is
+easy to reproduce in Dos but tough in Unix). See the file
+noweb/contrib/avs/report1.bug
+
+8- notice that the user gets EVERYTHING, e.g. 'cd noweb/src/xdoc; latex guide;
+dvips guide' works (any missing METAFONT fonts will be generated on the
+fly). Then if he/she does not have a PostScript printer he/she can get
+GhostScript (mentioned in the ftpsites.txt) to translate for the printer at
+hand. The same applies to all Noweb commands (they work as in Unix). This
+allows using Noweb for doing serious literate programming on a Dos PC all the
+way down to the printing process.
+
+9- I assume that the next version won't change the directory hierarchy thus my
+scripts might work without change or will need half an hour work to update them
+(I had to update my 2.7 scripts to 2.7a because you changed sensitive parts of
+your makefiles)
+
+10- I don't supply makefiles for commercial C compilers. As noweb is free I'll
+rather use as much free software as possible. MKS is the unfortunate exception.
+
+The tar file (39 KB) ftp.daimi.aau.dk:/pub/empl/avs/avs386_noweb27a.tar.gz
+has files that expand into noweb/contrib/avs (I copied the style of the other
+contributions, e.g. readme, email). By uncompressing noweb27a.tar.gz and
+avs386_noweb27a.tar.gz from the same directory and by editing a single line of
+my script 'noweb/contrib/avs/myenv.ksh' one can compile Noweb 2.7a for Dos.
+Here is what that line looks like in my case:
+ generate i:/bin g:/usr/local/lib/noweb g:/man h:/emtex/texinputs/local j:/djgpp/bin/make.exe d:/tmp e:/bin/icont.exe
+
+If you think this is interesting you might
+a) mention it in comp.programming.literate
+b) add it to the noweb 2.7b distribution, after all now this allows me to build
+ and install Noweb 2.7a from the 2 tar files in just under 3 minutes ;-)
+
+Regards
+/avs
+
+P.S: myself I switched to Linux but up to now Noweb under Dos helped me a
+lot. Although I won't be using Noweb under Dos so much in the future I'm still
+willing to support it (i.e. any eventual updates to my recipe). And if you ever
+give me a prerelease of a new Noweb (with a week's notice) I will supply you
+with either the Dos binaries for incorporating into the official distribution
+or better still with updated patches/scripts
+
+P.S.: if you want the noweb 2.7a Dos binaries just ask
+
+P.S.: remember that the files in avs386_noweb27a.tar.gz were Dos manipulated.
+ To see any of those files under Unix (e.g. report1.bug) remember to do
+ tr -d '\015' < infile > outfile
diff --git a/web/noweb/contrib/avs/nw_c.bat b/web/noweb/contrib/avs/nw_c.bat
new file mode 100644
index 0000000000..d62641151e
--- /dev/null
+++ b/web/noweb/contrib/avs/nw_c.bat
@@ -0,0 +1,63 @@
+@echo off
+if %1.==. goto USAGE
+if %DJGPPMAKE%.==. goto NOENVVAR
+goto DOIT
+
+:NOENVVAR
+echo Aborting, environment var DJGPPMAKE not set
+goto THEEND
+:USAGE
+echo %0 DJGPPmakePath
+echo ** Use backslash path in arg & slash path in DJGPPMAKE env var, e.g.
+echo set DJGPPMAKE=j:/djgpp/bin/make
+echo %0 j:\djgpp\bin\make
+goto THEEND
+
+:FAILURE0
+echo Failed to patch src/c/finduses.c (probably the source lines to patch are
+echo not any more at lines 44 and 65)
+goto THEEND
+:FAILURE2
+echo Failed to patch src/c/finduses.c, reason unknown
+goto THEEND
+
+:DOIT
+rem Requires DJGPP port of GNU gcc (gcc, make)
+rem Beware of the Ms-Dos command line 128 chars limit!
+rem MKS make won't do, use DJGPP make!
+
+rem Use UNIX style pathnames in DJGPPMAKE path, otherwise DJGPP make chokes!
+rem This is used for make to launch submakes assuming that you might have 2
+rem different makes in your path, the MKS make (which we don't want to use)
+rem and the DJGPP make (which we want to use)
+
+rem Avoid using broken tmpfile() function from DJGPP 'libc.a'
+if not exist c\finduses.old cp c/finduses.c c/finduses.old
+if errorlevel 1 goto THEEND
+echo Patching lines 44 and 65 of src/c/finduses.c (DJGPP tmpfile() broken!)
+sed '44s/FILE \*tmp = tmpfile()/char *tmpName;FILE*tmp=fopen(tmpName=tempnam(".",NULL),"w+")/' c/finduses.old>c\finduses.tmp
+diff c/finduses.old c/finduses.tmp
+if errorlevel 2 goto FAILURE2
+if errorlevel 1 goto STEP2
+if errorlevel 0 goto FAILURE0
+:STEP2
+sed '65s/add_use_markers(tmp, stdout);/add_use_markers(tmp, stdout); remove(tmpName);/' c/finduses.tmp > c\finduses.new
+diff c/finduses.tmp c/finduses.new
+if errorlevel 2 goto FAILURE2
+if errorlevel 1 goto STEP3
+if errorlevel 0 goto FAILURE0
+:STEP3
+rm c/finduses.tmp
+if errorlevel 1 goto THEEND
+cmp -s c/finduses.c c/finduses.new
+if errorlevel 1 goto DIFFERENT
+rm c/finduses.new
+goto THEMAKE
+:DIFFERENT
+mv c/finduses.new c/finduses.c
+
+:THEMAKE
+rem Use Ms-Dos style pathnames here, otherwise command.com chokes!
+@echo on
+%1 CC=gcc
+:THEEND
diff --git a/web/noweb/contrib/avs/nwicon.bat b/web/noweb/contrib/avs/nwicon.bat
new file mode 100644
index 0000000000..d416013c2c
--- /dev/null
+++ b/web/noweb/contrib/avs/nwicon.bat
@@ -0,0 +1,43 @@
+@echo off
+if %1.==. goto USAGE
+if %2.==. goto USAGE
+if %3.==. goto USAGE
+if %4.==. goto USAGE
+if not %5.==. goto USAGE
+
+if exist ..\..\noweb\src\%0 goto DOIT
+if exist ..\..\noweb\src\%0.bat goto DOIT
+echo '..\..\noweb\src\%0' not found!
+cd
+echo Bad startup dir? Aborting.
+echo Change to ./noweb/src and run the file installed by msdosfix.bat in there
+echo (you cannot use the original in noweb\contrib\avs\nwicon.bat)
+goto THEEND
+:DOIT
+
+rem This represents 'make iconlib', but only like this I could put it to work
+cd icon
+rem j:\djgpp\bin\make ICONC=e:\\\\b\\\\icont ICONT=e:\\\\b\\\\icont
+%3 ICONC=%4 ICONT=%4
+if errorlevel 1 goto THEEND
+cp -p totex.exe ../lib
+if errorlevel 1 goto THEEND
+cp -p tohtml.exe ../lib
+if errorlevel 1 goto THEEND
+cp -p noidx.exe ../lib
+if errorlevel 1 goto THEEND
+cp -p noindex.exe ../shell
+if errorlevel 1 goto THEEND
+rem j:\djgpp\bin\make ICONC=e:\\\\b\\\\icont ICONT=e:\\\\b\\\\icont LIB=g:/usr/local/lib/noweb BIN=i:/b install
+%3 ICONC=%4 ICONT=%4 LIB=%2 BIN=%1 install
+if errorlevel 1 goto THEEND
+cd ..
+goto THEEND
+
+:USAGE
+if not %1.==. echo Wrong usage: %0 %1 %2 %3 %4 %5 %6 %7 %8 %9
+echo Usage: %0 BIN LIB DJGPPmakeBackslashPath 4BackslashedIconTranslatorPath
+echo e.g. %0 i:/b g:/usr/local/lib/noweb j:\djgpp\bin\make.exe e:\\\\b\\\\icont.exe
+echo If you are running this as part of another script abort with CTRL-C
+pause
+:THEEND
diff --git a/web/noweb/contrib/avs/nwinst.ksh b/web/noweb/contrib/avs/nwinst.ksh
new file mode 100644
index 0000000000..6e11001f5a
--- /dev/null
+++ b/web/noweb/contrib/avs/nwinst.ksh
@@ -0,0 +1,9 @@
+export SHELL=${SHELL:=$ROOTDIR/bin/sh.exe}
+if [ -z "$1" ]
+then
+ echo "Usage: $0 BIN LIB MAN TEXINPUTS"
+ echo "-- Installs noweb using icon"
+ exit 1
+fi
+
+make CC="gcc" BIN=$1 LIB=$2 MAN=$3 TEXINPUTS=$4 LIBSRC=icon install
diff --git a/web/noweb/contrib/avs/readme b/web/noweb/contrib/avs/readme
new file mode 100644
index 0000000000..f284d69d52
--- /dev/null
+++ b/web/noweb/contrib/avs/readme
@@ -0,0 +1,39 @@
+version 0.3 (30-May-95)
+
+How to install noweb 2.7a in a PC386 or above running Dos if you have Mortice
+Kern Systems' MKS Toolkit 4.2 for Dos. The rest of the software can be obtained
+by ftp (GNU DJGPP, Icon binaries, LaTeX2e, GhostScript). Complete details on
+how to get and install everything are provided
+
+ a) Look at filelist.txt, and if you need something look at ftpsites.txt
+ b) To install noweb if you are lucky you just have to edit 'myenv.ksh' and
+ to run it, e.g. from the Ms-Dos command.com prompt do:
+ cd noweb\contrib\avs
+ edit myenv.ksh
+ sh -c ./myenv.ksh
+ automate
+ c) Test the installation by doing (from the Korn shell prompt):
+ cd noweb/src/tex
+ noweb support.nw
+ latex support
+ latex support
+ latex support
+ v support
+ (assuming your dvi viewer is a batch file called v.bat)
+ dvips -o support.ps support
+ d) See if you have the same problem as I do. If section 2.2 is missing
+ from page 7, chunk 9b from page 9 and chunk 28b from page 28 then
+ you also have that problem that some code/doc chunks come out as white
+ space. The current fix is to add a newline at the start/end of the chunk
+ to make a paragraph. See report1.bug
+ e) Further technical details on howto386.txt
+
+If using this recipe you still run into trouble contact me. As a last resort
+I'm willing to supply you by anonymous ftp with everything you need to run
+noweb 2.7a (except the MKS toolkit which is commercial software, i.e. I can
+supply binaries for LaTeX2e, Icon 9.0 and Noweb 2.7 that fit in 4 * 1.44 MB
+floppies)
+
+My recipe has only been tested in my machine (which is heavily loaded, dozens
+of TSR's and has recent versions of most software). I will appreciate receiving
+comments that allow it to run smoothly in other environments
diff --git a/web/noweb/contrib/avs/report1.bug b/web/noweb/contrib/avs/report1.bug
new file mode 100644
index 0000000000..c19c2abcd4
--- /dev/null
+++ b/web/noweb/contrib/avs/report1.bug
@@ -0,0 +1,97 @@
+Noweb 2.7a bug report (avs@daimi.aau.dk, 29-May-95)
+
+*****************
+"strip problem in hpux8"
+
+In noweb/src/Makefile the strip command in an HPUX8 machine causes the make to
+fail. Commenting out the 'strip' fixes the problem
+
+*****************
+\n problem in -option output of 'noweave -h'
+
+The 'Add \noweboptions{opt} to ...' should be 'Add noweboptions{opt} to ...'
+
+*****************
+Dates problem in source code distribution (as of 29-May-95)
+
+I used ftp get with on the fly compression of the noweb dir to a .tar.gz file
+thus I got the original dates (I didn't cause the dates problem)
+
+The .nw files are not always older than the generated files and the same
+applies to the man page files (regarding the nroff processed man pages). The
+problem seems to be that the dates are identical instead of say 1 second older.
+A simple fix is:
+
+cd noweb/src
+find . -name "*.nw" -exec touch -t 9502231200 "{}" \;
+find xdoc -name "*.1" -exec touch -t 9502241200 "{}" \;
+
+*****************
+Style hook in 'src/tex/support.nw' page 21
+
+The margin note style hook in page 21 is wrong handed (should have come on the
+right instead of on the left)
+
+*****************
+There is a problem with noweb 2.7a/LaTeX: some chunks might disappear from the
+LaTeX output, i.e. they are in the output .tex file but in the .dvi file they
+come out as white space (sometimes this is noticed because it seems strange
+that a page has a big block of white space in the middle, but if it is in the
+end of the page the disappearance might go unnoticed)
+
+A similar problem was found with noweb 2.6c and 2.7 (I didn't report it
+before). I was once able under Unix (HPUX8) to see the problem, but
+unfortunately I'm unable to reproduce it (sorry!). I can see it under Dos with
+e.g in 2.7a
+ cd noweb/src/tex
+ noweb support.nw
+ latex support
+ latex support
+ latex support
+ v support
+ (v is the Dos equivalent of xdvi)
+The section 2.2 in page 7 and the chunks 9b and 28b come out as a white
+rectangle in the middle of the page. If you want I can send you the .dvi files
+(Dos generated) that show the disappearance. This is a subtle problem
+
+The problem can always be solved by adding an empty newline (to make a
+paragraph). In my noweb source file the problem causes a chunk of documentation
+to disappear. A doc chunk comes after a code chunk and if I started with '@
+...', i.e. a '@' followed by a blank followed by the text the chunk came out as
+white space, but if I added a newline to the '@' (i.e. started the text in the
+next line) everything was OK (the lost chunk could be seen in the .tex file, it
+just didn't make to the .dvi file). This didn't happen everywhere or always
+only in some doc or code chunks and only in some particular circunstances,
+i.e. adding a couple of lines several pages before (which changed the page
+layout in the problematic page) made the problem disappear
+
+Using diff to compare the .tex outputs I noticed that the only difference is
+that the \n makes its way to the .tex output thus the problem does not seem to
+be with the noweb scripts but with the noweb LaTeX support code (nwmac.tex or
+noweb.sty)
+
+Or maybe it's a LaTeX bug (it is NOT only a bug in the emTeX Dos port because
+the first time I saw this it was in an Unix machine running Latex 2.09)
+
+Other symptoms:
+ a) using the .dvi file (Dos generated) and seeing it under Linux
+ caused a crash (floating point exception) when viewing the affected
+ pages (7, 9, 28). No problem if those pages were skipped (i.e. never
+ displayed on the screen). When with a now lost .nw file
+ I saw chunks disappearing under HPUX8 I also got floating point
+ exceptions and core dumps (until I made a slight change in the page
+ layout which caused the core dumps to disappear, still the missing
+ chunks hapenned still later on). Seeing the .dvi file under Dos does
+ not cause any crashes, only a white rectangle in the affected area
+ b) using the .tex file (Dos generated) caused no problem under Linux
+
+My tentative diagnostic:
+ - it is not a LaTeX2e specific problem (I saw it under LaTeX)
+ - it is not a Dos specific problem (I saw it under HPUX8)
+ - it is not a noweb/noweave problem (if the .tex output is manipulated
+ under another machine it works fine)
+ - it is either in the LaTeX support file noweb.sty or in LaTeX, still
+ it is a machine dependent problem rare under Unix frequent under Dos
+
+If I manage to pinpoint the problem, or reproduce it under Unix I'll let you
+know
diff --git a/web/noweb/contrib/conrado/Makefile b/web/noweb/contrib/conrado/Makefile
new file mode 100644
index 0000000000..f7a9ee59e2
--- /dev/null
+++ b/web/noweb/contrib/conrado/Makefile
@@ -0,0 +1,15 @@
+LIB=/dev/null # to be overridden by install
+
+.SUFFIXES: .nw .icn
+.nw.icn: ; notangle -L'#line %-1L "%F"%N' $*.nw | cpif $*.icn
+
+all: d2tex
+source: d2tex
+install:
+ cp d2tex $(LIB)/dijkstra.filter
+
+# TeX files.
+hospital.tex: hospital.nw d2tex
+ noweave -delay -filter ./d2tex hospital.nw > hospital.tex
+clean:
+ /bin/rm -f hospital.tex *.dvi *.aux *.log *.blg *.bbl *~
diff --git a/web/noweb/contrib/conrado/README b/web/noweb/contrib/conrado/README
new file mode 100644
index 0000000000..c21ed85589
--- /dev/null
+++ b/web/noweb/contrib/conrado/README
@@ -0,0 +1,38 @@
+This is the README file for the collection of files related to d2tex.
+These files are:
+
+- algoritmos.sty:
+defines the 'code' environment and several other
+related environments (code*, algorithm, ...); the implementation is
+not documented and can be difficult to understand, but I have
+documented the main features of the environments.
+- d2tex:
+an noweb filter (written in awk) that prettyprints; for instance,
+ /* this is a comment */ is replaced by \COMMENT{this is a comment}
+ <= is replaced by \le
+ PROCEDURE initialize is replaced by \PROCEDURE |initialize|
+
+this script does not parse the input; it only uses pattern-matching,
+but produces reasonable output if some thumb rules are followed.
+d2tex has a list of keywords built in.
+
+- keywords.tex:
+
+contains TeX macro definitions for producing keywords in boldface,
+special simbols (as the boxes between guarded commands), etc.; most of
+the macros include indentation macros \tab and \untab; \tab is sets a
+tab and moves the margin tab to the right, while \untab removes a tab
+stop and moves to the left the margin tab. Since the 'code'
+environment is based upon the tabbing environment, it is useful for
+the keyword macros (such as \WHILE) to include these tab/untab
+macros. \tab and \untab can be used inside the 'code' environment when
+needed, for instance, to override the default indentation or to
+correct minor mistakes.
+
+- hospital.nw:
+a weird example of the type of code that d2tex is 'able' to
+prettyprint; the documentation is not written in english (several
+paragraphs and comments in the code are written in catalan) and the
+code is not complete.
+
+
diff --git a/web/noweb/contrib/conrado/algoritmos.sty b/web/noweb/contrib/conrado/algoritmos.sty
new file mode 100644
index 0000000000..23bbc711f3
--- /dev/null
+++ b/web/noweb/contrib/conrado/algoritmos.sty
@@ -0,0 +1,169 @@
+\catcode`@=11
+%
+% \listofalgorithms
+%
+% this macro is like \listoffigures or \listoftables; it prepares a
+% list of algorithms/programs using an auxiliary file (.lop)
+% CUSTOMIZATION: the banner of the list is a string defined by \listalgorithmsname;
+% the default value is ``List of Algorithms''
+
+\def\listofalgorithms{\@restonecolfalse\if@twocolumn\@restonecoltrue\onecolumn
+ \fi\chapter*{\listalgorithmsname\@mkboth
+ {\uppercase{\listalgorithmsname}}{\uppercase{\listalgorithmsname}}}
+ \addcontentsline{toc}{chapter}{\listalgorithmsname}
+ {\ssp\@starttoc{lop}}\if@restonecol
+ \twocolumn\fi}
+\let\l@algorithm\l@figure
+
+\def\listalgorithmsname{List of Algorithms}
+
+% \begin{algorithm} ... \end{algorithm}
+%
+% the 'algorithm' environment encloses a float object (like figure or table)
+% algorithm's get their own numbering; captions begin with
+% ``Algorithm'', then the number, and then the text;
+% and there is an entry in the .lop file for each 'algorithm'
+% CUSTOMIZATION: the first word in a caption is given by \algorithmname
+
+\newcounter{algorithm}
+\def\thealgorithm{\@arabic\c@algorithm}
+\def\fps@algorithm{tbp}
+\def\ftype@algorithm{4}
+\def\ext@algorithm{lop}
+\def\fnum@algorithm{\algorithmname\ \thealgorithm}
+\def\algorithm{\@float{algorithm}}
+\def\endalgorithm{\end@float}
+\@namedef{algorithm*}{\@dblfloat{algorithm}}
+\@namedef{endalgorithm*}{\end@dblalgorithm}
+
+\def\algorithmname{Algorithm}
+
+% \begin{code} ... \end{code}
+%
+% this environment is a slight modification of the tabbing environment.
+% the specific characteristics are:
+% 0) it has a parameter from xref information
+% 1) there is a small skip between the paragraph above and the first
+% line of the code.
+% 2) a rule is drawn at the beginning and at the end of the code.
+% 3) all lines are typeset in math mode
+% 4) |<something>| prints <something> in \rm if issued inside math
+% mode (for instance, inside this environment);
+% and in \sl if issued outside math mode
+% 5) _ can be used instead of \_ inside the code environments and in
+% text; it produces subscripts in math mode as usual
+% 6) <return>'s are obeyed; if you want to break lines in the source
+% but not in the final document terminate the line with a comment char %
+% 7) you can leave a blank line using \\
+%
+% it also introduces a command \numberlines that has no effect in this
+% environment, but it has in the *-form of code
+% CUST: \algosep and \algoruleheight are the length parameters that
+% give the amount of skip between the previous paragraph and the
+% beginning of code, and the width of the rule.
+% Both have default value 0pt. \algofontsize allows changing the font size
+% of the text written inside a code environment. The default is \footnotesize
+
+
+\def\numberlines{}
+\newlength{\algosep}
+\newlength{\algoruleheight}
+\def\rulecode{\rule{\textwidth}{\algoruleheight}}
+\def\sepcode{\vspace{\algosep}}
+\def\algofontsize{\footnotesize}
+\newif\ifinsidecode
+\insidecodefalse
+
+\def\@programcr{$\@tabcr$}
+
+{\catcode`\;=\active \gdef;{\semicolon\;}}
+\mathchardef\semicolon="603B
+
+\catcode`\_=\active \gdef_{\ifinsidecode\_\else\ifmmode\sb\else\_\fi\fi}
+
+\catcode`\|=12\relax
+\def\origbar{|}
+\catcode`\|=\active
+
+\def|{\ifx\@sharp\relax\origbar\else\@dovar\fi}
+\def\@dovar#1#2|{#1\ifmmode{\mbox{\rm #2}}\else{\sl #2}\fi}
+
+
+\def\@tabcommandson{% activate tabbing commands:
+ % \tab sets a tab stop and adds one to the margin tab:
+ \def\tab{$\=\+$}
+ % Must finish current field before testing if at margin:
+ \def\@finishfield{\@stopfield\@addfield\@startfield}
+ % \untab removes one tab stop and moves left if at the beginning of a line:
+ \def\untab{$\@finishfield\@ifatmargin\@ltab\else\relax\fi\-$}
+ % \@marginspace adds an extra space unless there is no text on the line:
+ \def\@marginspace{$\@finishfield$\@ifatmargin\relax\else\ \fi}}
+
+\def\@tabcommandsoff{% deactivate tabbing commands:
+ \let\tab=\relax
+ \let\@finishfield=\relax
+ \let\untab=\relax
+ \let\@marginspace=\ % never at margin thus always a space
+}
+\@tabcommandsoff
+
+\def\code#1{\par\sepcode
+\noindent\rulecode\algofontsize\numberlines\insidecodetrue
+\@tabcommandson \obeycr
+\lineskip \z@\let\>\@rtab\let\<\@ltab\let\=\@settab
+ \let\+\@tabplus\let\-\@tabminus\let\t\tab\let\u\untab
+\let\\=\@programcr
+\global\@hightab\@firsttab \global\@nxttabmar\@firsttab
+\dimen\@firsttab\@totalleftmargin \global\@tabpush0
+\global\@rjfieldfalse \trivlist
+\item[]\if@minipage\else\vskip\parskip\fi
+\setbox\@tabfbox\hbox{\rlap{\indent\hskip\@totalleftmargin
+\the\everypar}}\def\@itemfudge{\box\@tabfbox}\@startline\ignorespaces
+$\@gobblecr
+}
+
+\def\endcode{$\@stopline\ifnum\@tabpush > 0 \@badpoptabs
+\fi\endtrivlist\noindent\@tabcommandsoff
+\rulecode
+\sepcode
+}
+
+% \begin{code*} ... \end{code*}
+%
+% the *-form of code numbers sequentially each line of the code (lines
+% are separated by \\'s)
+% CUST: the label of each line can be easily changed, by redefining
+% the macro \codelinelabel and/or the macro \thecodeline; the counter
+% is codeline.
+
+\newcounter{codeline}
+\def\thecodeline{\arabic{codeline}}
+\def\codelinelabel{\thecodeline.\ \ }
+
+\@namedef{code*}{\def\numberlines{\setcounter{codeline}{0}
+\def\@stopline{\addtocounter{codeline}{1}
+\unskip\@stopfield\if@rjfield \global\@rjfieldfalse
+ \@tempdima\@totalleftmargin \advance\@tempdima\linewidth
+\hbox to\@tempdima{\@itemfudge\codelinelabel\hskip\dimen\@curtabmar
+ \box\@curline\hfil\box\@curfield}\else\@addfield
+ \hbox{\@itemfudge\codelinelabel\hskip\dimen\@curtabmar\box\@curline}\fi}
+}\code}
+\@namedef{endcode*}{\endcode}
+
+\catcode`@=12
+
+% \begin{cntcode} ... \end{cntcode}
+%
+% a slight variation of the code environment, cntcode centers its
+% contents; it should be used only if there is no page break in
+% the middle of the code; that means that its usefulness is limited to
+% small chunks of code, specifications, short declarations, ...
+\newlength{\codewidth}
+\setlength{\codewidth}{0.7\textwidth}
+\newenvironment{cntcode}{\def\sepcode{}\def\rulecode{}\centering
+\begin{minipage}[t]{\codewidth}
+\begin{code}}{\end{code}\end{minipage}
+\par}
+
+
+
diff --git a/web/noweb/contrib/conrado/d2tex b/web/noweb/contrib/conrado/d2tex
new file mode 100755
index 0000000000..5b807b7ce0
--- /dev/null
+++ b/web/noweb/contrib/conrado/d2tex
@@ -0,0 +1,144 @@
+#! /bin/sh
+KEYGEN=$(mktemp)
+trap "rm -f $KEYGEN; exit 1" 1 2 3 15
+cat > $KEYGEN <<END_OF_FILE
+COMMENT#1
+ASSERT#1
+PROGRAM
+ENDPROGRAM
+USES
+ENDUSES
+MODULE
+ENDMODULE
+SPECIFICATION
+ENDSPECIFICATION
+IMPLEMENTATION
+ENDIMPLEMENTATION
+IMPORT
+ENDIMPORT
+TYPE
+ENDTYPE
+VAR
+ENDVAR
+CONST
+ENDCONST
+ARRAY
+RECORD
+ENDRECORD
+BEGIN
+IF
+ELSE
+ENDIF
+THEN
+SKIP
+WHILE
+ENDWHILE
+DO
+DDO
+ENDDO
+FORALL
+FOR
+ENDFOR
+ENDFORALL
+PARALLEL
+REPEAT
+UNTIL
+AND
+OR
+NOT
+CAT
+OF
+IN
+DIV
+MOD
+PROCEDURE
+ENDPROCEDURE
+FUNCTION
+ENDFUNCTION
+RETURNS
+INP
+OUTP
+INOUTP
+PRIVATE
+@@@
+END_OF_FILE
+
+nawk '
+BEGIN { code=0 ; quoting=0 ; inside_comm=0
+ while ((getline kw < "'"$KEYGEN"'") >0)
+ keywords[kw]++
+ }
+/^@begin code/ {
+ printf "@literal \\begin{code}{%s}\\let\\maybehbox=\\hbox\n", substr($0, 13)
+ code=1; next
+}
+/^@end code/ { code=0 ; printf "@literal \\end{code}\n"; next}
+/^@quote$/ { quoting = 1 }
+/^@endquote$/ { quoting = 0 }
+/^@text / { if (code) { print_code(substr($0, 7)) }
+ else { print }
+ next
+ }
+{print}
+function print_code(line) {
+ temp = line;
+ comm = "";
+ if (temp ~ /\/\*/)
+ { r = index(temp, "/*");
+ inside_comm = 1; comm = "\\COMMENT{" substr(temp, r+3);
+ if (comm ~ /\*\//) { inside_comm = 0; comm = comm "}"; temp = ""; }
+ }
+ if (temp ~ /\*\//)
+ { comm = temp "}"; temp = ""; inside_comm = 0; }
+
+ sub(/\*\//,"",comm);
+ sub(/\/\*.*/,"",temp);
+
+ if (temp != "" && inside_comm == 0)
+ {
+ temp = operators(temp);
+ temp = substitute_keywords(temp);
+ temp = mark_vars_types(temp);
+ }
+ line = temp comm;
+ print "@literal " line;
+}
+
+function substitute_keywords(s, kw) {
+ for (kw in keywords)
+ if (s ~ kw)
+ {
+ gsub("(^|[ \\t]+)" kw "($|[ \\t]+)", " \\" kw " ", s);
+ gsub("(^|[ \\t]+)" kw ";", " \\" kw ";", s);
+ gsub("(^|[ \\t]+)" kw "\\.", " \\" kw ".", s);
+ gsub("[(]" kw, "(\\" kw, s);
+ gsub("," kw, ",\\" kw, s);
+ }
+ return s
+}
+
+function mark_vars_types(s ) {
+ gsub(/\\OF[ \t\n]*[a-zA-Z_]+/,"|&|", s);
+ gsub(/\\RETURN[ \t\n]*[a-zA-Z_]+/,"|&|", s);
+ gsub(/[a-zA-Z_]+[ \t\n]*=[ \t\n](\\RECORD|\\ARRAY)/,"|&|", s);
+ gsub(/[a-zA-Z_]+[ \t\n]*=[ \t\n]\\{/,"|&|",s);
+ gsub(/:[\t\n ]*[a-zA-Z_]+/, "|&|" ,s);
+ gsub(/[a-zA-Z_]+[ \\t\\n]*\(/,"|&|",s);
+ gsub(/\(\|/, "|(", s);
+ gsub(/\|:[ \t\n]*/, ":|", s);
+ gsub(/\|\\de[ \t\n]*/, "\\de |", s);
+ gsub(/\|\\RETURN[ \t\n]*/, "\\RETURN |", s);
+ gsub(/\=[\t\n ]*\\ARRAY\|/, "| = \\ARRAY", s);
+ gsub(/\=[\t\n ]*\\RECORD\|/, "| = \\RECORD", s);
+ gsub(/\=[\t\n ]*\\{\|/, "| = \\{", s);
+ return s
+}
+
+function operators(s ) {
+ gsub(/ >= /," \\ge ", s);
+ gsub(/ <= /, " \\le ", s);
+ gsub(/ != /, " \\not= ", s);
+ return s;
+}
+' "$@"
+rm -f $KEYGEN
diff --git a/web/noweb/contrib/conrado/email b/web/noweb/contrib/conrado/email
new file mode 100644
index 0000000000..ef96d5bb44
--- /dev/null
+++ b/web/noweb/contrib/conrado/email
@@ -0,0 +1 @@
+conrado@moon.upc.es (Conrado Martinez-Parra)
diff --git a/web/noweb/contrib/conrado/hospital.nw b/web/noweb/contrib/conrado/hospital.nw
new file mode 100644
index 0000000000..46b5d9fe1f
--- /dev/null
+++ b/web/noweb/contrib/conrado/hospital.nw
@@ -0,0 +1,165 @@
+\documentstyle[noweb,algoritmos]{article}
+\input{keywords}
+\title{Hospital General}
+\author{Conrado Mart\'{\i}nez-Parra}
+\pagestyle{plain}
+\begin{document}
+\maketitle
+
+@ The problem.
+
+
+An hospital has $np$ floors ($1\le np\le |MAX_PISOS|$),
+the $i$-th floor has $nh_i$ rooms
+($1\le nh_i\le |MAX_HAB|$) and the $j$-th room of the $i$-th floor
+has $nll_{i,j}$ beds ($1\le nll_{i,j}\le |MAX_LLITS|$). The number of
+beds that are occupied by patients in the $j$-th room of the $i$-th
+floor in a given moment is denoted by $oc_{i,j}$.
+
+\smallskip
+
+The patients are distributed in the hospital according to one of the
+following three cathegories: {\tt child}, {\tt men}, {\tt women}.
+All the patients in a given room must belong to the same cathegory.
+
+Consider the following type definitions:
+<<definicio de tipus>>=
+TYPE operation_class = { ingress, dismin, null };
+ cathegory = { child, women, men };
+
+ id_llit = RECORD
+ pis, hab, llit : integer;
+/* any bed can be identified by a tuple
+(floor number, room number, bed number) */
+ ENDRECORD;
+
+ operation = RECORD
+ class : operation_class;
+ cat_ingress_patient : categhory;
+/* used only in ingress operations */
+ vacant_bed : id_llit;
+/* used only for dismin operations */
+ ENDRECORD;
+
+ENDTYPE
+@
+
+
+@ The solution.
+<<definicio de tipus>>=
+TYPE hospital = RECORD
+ nr_pisos : integer;
+ pisos : ARRAY [1..MAX_PISOS] OF pis;
+ /* com que el tipus 'taula [...] de pis' es
+ totalment accesori, no ho definim per separat; fem el
+ mateix en la definicio del tipus 'pis' */
+ ENDRECORD;
+ pis = RECORD
+ nr_habitacions : integer;
+ habs : ARRAY [1..MAX_HABIT] OF pis;
+ ENDRECORD;
+
+ENDTYPE
+
+@
+Pel que fa a cadascuna de les habitacions, ens caldr\`a saber: 1)
+quants llits hi ha; 2) quants llits estan ocupats; 3) la categoria
+dels pacients (buida, si no n'hi ha cap); 4) l'estat de cadascun dels
+llits. Nom\'es ens caldr\`a saber la categoria d'un pacient, ja que
+tots els pacients dins una habitaci\'o tenen la mateixa categoria.
+Aix\'{\i} doncs, proposem la seg\"uent definici\'o:
+
+<<inicialitzar hospital>>=
+PROCEDURE inicialitzar(OUTP H : hospital)
+VAR i,j,k : enter
+ENDVAR
+
+ llegir(H.nr_pisos);
+
+ /* hem obtingut el nombre de pisos del hospital */
+
+ i:= 1;
+
+ WHILE i <= H.nr_pisos DO
+
+ llegir(H.pisos[i].nr_nabitacions);
+ i:= i + 1
+
+ ENDWHILE;
+
+ /* hem obtingut el nombre d'habitacions per a cada pis */
+
+ i:= 1;
+
+ WHILE i <= H.nr_pisos DO
+
+ j:= 1;
+ WHILE j <= H.pisos[i].nr_habitacions DO
+
+ llegir(H.pisos[i].habs[j].nr_llits;
+ /* hem obtingut el nombre de llits per a cada habitacio */
+ H.pisos[i].habs[j].nr_ocupats:= 0;
+ H.pisos[i].habs[j].cat_pacients:= buida;
+
+ k:= 1;
+ WHILE k <= H.pisos[i].habs[j].nr_llits DO
+
+ H.pisos[i].habs[j].ocupat[k]:= false;
+ k:= k + 1
+
+ ENDWHILE;
+
+ /* hem fet les definicions que indiquen que l'habitacio es
+ buida */
+ j:= j + 1
+
+ ENDWHILE;
+
+ i:= i + 1
+
+ ENDWHILE
+
+ENDPROCEDURE
+@
+
+<<*>>=
+PROGRAM Hospital
+
+<<definicio de tipus>>
+
+VAR op : operacio;
+ H : hospital;
+ENDVAR
+
+ inicialitzar_hospital(H);
+
+ llegir_operacio(op);
+
+ WHILE op.classe != nul DO
+
+ tractar_operacio(op,H);
+ llegir_operacio(op)
+
+ ENDWHILE
+
+ENDPROGRAM.
+
+@
+
+<<tractar l'operacio en curs $op$, modificant l'estat de l'hospital $H$>>=
+PROCEDURE tractar_operacio(INP op : operacio; INOUTP H : hospital)
+VAR ll_asgn: id_llit;
+ENDVAR
+
+ IF op.classe = ingres THEN ingres(op,cat_pacient_ingressat, H, ll_asgn);
+ escriure("Ingres:",ll_asgn.pis,ll_asgn.hab,
+ ll_asgn.llit);
+
+ ELSE op.classe = baixa THEN donar_baixa(op.llit_abandonat,H);
+
+ ENDIF
+
+ENDPROCEDURE;
+
+@
+\end{document}
diff --git a/web/noweb/contrib/conrado/keywords.tex b/web/noweb/contrib/conrado/keywords.tex
new file mode 100644
index 0000000000..7dc7c2968c
--- /dev/null
+++ b/web/noweb/contrib/conrado/keywords.tex
@@ -0,0 +1,59 @@
+\def\COMMENT#1{\{\parbox[t]{\codewidth}{\rm #1\}}}
+\def\ASSERT#1{\{\mbox{\rm #1}\}}
+\def\PROGRAM{{\bf pro}\tab{\bf gram\ }}
+\def\ENDPROGRAM{\untab{\bf end}}
+\def\USES{{\bf uses\ }\tab}
+\def\ENDUSES{\untab{\bf end}}
+\def\MODULE{{\bf mod}\tab{\bf ule\ }}
+\def\ENDMODULE{\untab{\bf end}}
+\def\SPECIFICATION{{\bf spe}\tab{\bf cification\ }}
+\def\ENDSPECIFICATION{\untab{\bf end}}
+\def\IMPLEMENTATION{{\bf imp}\tab{\bf lementation\ }}
+\def\ENDIMPLEMENTATION{\untab{\bf end}}
+\def\IMPORT{{\bf imp}\tab{\bf orts\ }}
+\def\ENDIMPORT{\untab{\bf end}}
+\def\TYPE{{\bf type\ }\tab}
+\def\ENDTYPE{\untab}
+\def\VAR{{\bf var\ }\tab}
+\def\ENDVAR{\untab}
+\def\CONST{{\bf const\ }\tab}
+\def\ENDCONST{\untab}
+\def\ARRAY{{\bf array\ }}
+\def\RECORD{\tab{\bf rec}\tab{\bf ord\ }}
+\def\ENDRECORD{\untab{\bf end}$\-$}
+\def\BEGIN{}
+\def\IF{{\bf if }\tab\ \ \tab}
+\def\ELSE{\untab\untab [\!]$\>$}
+\def\ENDIF{\untab\untab {\bf fi}}
+\def\THEN{\ \longrightarrow\ \ \tab}
+\def\SKIP{\emptyset}
+\def\WHILE{{\bf whi}\tab{\bf le\ }}
+\def\ENDWHILE{\untab{\bf end}}
+\def\DO{{\bf\ do\ }}
+\def\DDO{{\bf do\ }\tab}
+\def\ENDDO{\untab{\bf end}}
+\def\FORALL{{\bf for}\tab{\bf\ all\ }}
+\def\FOR{{\bf for}\tab\ }
+\def\ENDFOR{\untab{\bf end}}
+\def\ENDFORALL{\untab{\bf end}}
+\def\PARALLEL{{\bf\ parallel\ }}
+\def\REPEAT{{\bf rep}\tab{\bf eat\ }}
+\def\UNTIL{\untab{\bf until\ }}
+\def\AND{\mathbin{\hbox{\bf and}}}
+\def\OR{\mathbin{\hbox{\bf or}}}
+\def\NOT{\mathop{\hbox{\bf not}}}
+\def\CAT{\mathbin{\&}}
+\def\OF{{\bf\ of\ }}
+\def\IN{{\bf\ in\ }}
+\def\DIV{\mathbin{\hbox{\bf div}}}
+\def\MOD{\mathbin{\hbox{\bf mod}}}
+\def\PROCEDURE{{\bf pro}\tab{\bf cedure\ }}
+\def\ENDPROCEDURE{\untab{\bf end}}
+\def\FUNCTION{{\bf fun}\tab{\bf ction\ }}
+\def\ENDFUNCTION{\untab{\bf end}}
+\def\RETURNS{{\bf return\ }}
+\def\INP{{\bf in\ }}
+\def\OUTP{{\bf out\ }}
+\def\INOUTP{{\bf in/out\ }}
+\def\PRIVATE{{\bf private\ }}
+
diff --git a/web/noweb/contrib/davelove/Makefile b/web/noweb/contrib/davelove/Makefile
new file mode 100644
index 0000000000..5a349d23c1
--- /dev/null
+++ b/web/noweb/contrib/davelove/Makefile
@@ -0,0 +1,6 @@
+SHELL=/bin/sh
+all:
+source:
+install:
+clean:
+ /bin/rm -f *.dvi *.log *.aux
diff --git a/web/noweb/contrib/davelove/README b/web/noweb/contrib/davelove/README
new file mode 100644
index 0000000000..2ca66a6bac
--- /dev/null
+++ b/web/noweb/contrib/davelove/README
@@ -0,0 +1,2 @@
+subref.doc is a literate version of the code that noxref uses to
+number definitions 7a, 7b, 7c, and so on.
diff --git a/web/noweb/contrib/davelove/email b/web/noweb/contrib/davelove/email
new file mode 100644
index 0000000000..eeaf4fb95f
--- /dev/null
+++ b/web/noweb/contrib/davelove/email
@@ -0,0 +1 @@
+d.love@daresbury.ac.uk
diff --git a/web/noweb/contrib/davelove/subref.doc b/web/noweb/contrib/davelove/subref.doc
new file mode 100644
index 0000000000..93a5c48d8d
--- /dev/null
+++ b/web/noweb/contrib/davelove/subref.doc
@@ -0,0 +1,235 @@
+%<*x> ^^A -*-latex-*-
+% [Standard D. Carlisle boilerplate.]
+% This file may be used without modification as a style (.sty) file.
+%
+% If you have Mittelbach's doc.sty, this file may be formatted with a
+% command like:
+% latex subref.sty
+%
+% If you have the Mittelbach/Duchier/Braams docstrip utility, you may
+% produce a faster loading .sty file.
+% Rename this file to: subref.doc
+% Then run this file through *plain* TeX:
+% tex subref.doc
+% This should produce the file subref.sty.
+% If you do not have plain TeX on your system, you can trick LaTeX into
+% doing the work as follows:
+% latex \def\fmtname{plain} \input subref.doc
+% Note that you may need to quote the arguments here to stop your
+% operating system treating the \ characters incorrectly.
+%
+% latex subref.doc
+% Will produce a typeset version of the documentation, as above.
+%
+% [Although this is a fairly trivial style, it is for a literate
+% programming task, so it better be written literately, i.e. with the
+% `doc' option.]
+%
+% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+\def\plain{plain}\ifx\fmtname\plain\csname fi\endcsname
+ \def\batchfile{subref.doc}
+ \input docstrip
+ \preamble
+
+ Copyright D.Love, SERC Daresbury Laboratory, 1993
+ The `doc' version of this style is re-distributable and usable
+ under conditions of the GNU copyleft, but please mark any changes,
+ list them here and report any major enhancements to the author.
+ Do not distribute the stripped version of this file.
+
+ \endpreamble
+ \generateFile{subref.sty}{t}{\from{subref.doc}{}}
+ \endinput
+\fi
+%
+\ifcat a\noexpand @\let\next\relax\else\def\next{%
+ \documentstyle[doc%,a4
+ ,subref]{article}\MakePercentIgnore}\fi\next
+%
+%\def\eatmodule<#1>{}\eatmodule
+%</x>
+% \def\fileversion{1.0}
+% \def\filedate{7/7/93}
+% \def\docdate {7/7/93}
+% \CheckSum{113}
+%% \CharacterTable
+%% {Upper-case \A\B\C\D\E\F\G\H\I\J\K\L\M\N\O\P\Q\R\S\T\U\V\W\X\Y\Z
+%% Lower-case \a\b\c\d\e\f\g\h\i\j\k\l\m\n\o\p\q\r\s\t\u\v\w\x\y\z
+%% Digits \0\1\2\3\4\5\6\7\8\9
+%% Exclamation \! Double quote \" Hash (number) \#
+%% Dollar \$ Percent \% Ampersand \&
+%% Acute accent \' Left paren \( Right paren \)
+%% Asterisk \* Plus \+ Comma \,
+%% Minus \- Point \. Solidus \/
+%% Colon \: Semicolon \; Less than \<
+%% Equals \= Greater than \> Question mark \?
+%% Commercial at \@ Left bracket \[ Backslash \\
+%% Right bracket \] Circumflex \^ Underscore \_
+%% Grave accent \` Left brace \{ Vertical bar \|
+%% Right brace \} Tilde \~}
+%%
+% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%
+% \textwidth=355pt ^^A Allow macrocode text with 72 columns.
+% \CodelineIndex ^^A Code lines numbered.
+% \DisableCrossrefs ^^A No Cross references.
+% \MakeShortVerb{\"} ^^A "\foo" works like \verb+\foo+
+%
+% \title{{\tt subref.sty}:\\Counting references on pages\thanks{This
+% file has version number \fileversion{} dated \filedate{}. The
+% documentation was last revised on \docdate.}}
+% \author{Dave Love}
+% \date{}
+% \begin{document}
+% \maketitle
+% \begin{abstract}
+% \noindent This \LaTeX{} style option
+% provides a mechanism for defining `page
+% sub-references' using "\sublabel{foo}" referenced with
+% "\subpageref{foo}". Sub-references will be numbered like these real
+% examples: \subpageref{ref:foo}, \subpageref{ref:bar},
+% \subpageref{ref:baz}\sublabel{ref:foo}\sublabel{ref:bar}\sublabel{ref:baz}
+% etc.\ unless there is only one on the page, in which case the letter
+% will be dropped like this: \subpageref{ref:fred}.
+% \end{abstract}
+%
+% \subsection*{Usage}
+%
+% For use in "noweb", Norman Ramsey requires:\DescribeMacro{\subpageref}
+% \begin{quote}
+% What's wanted is a latex macro "\subpageref{quux}" that produces
+% either a page number (for a page containing only one definition) or
+% a page number followed by a, b, c, etc\dots
+% \end{quote}
+% To be able to use "\subpageref" we must define the label with
+% "\sublabel"\DescribeMacro{\sublabel}, used like label. (Using
+% "\ref" \DescribeMacro{\ref} with a label defined by "\sublabel" will
+% produce the sub-reference number, by the way, and "\pageref"
+% \DescribeMacro{\pageref} works as expected.) Note that
+% "\subpageref" is robust and "\ref" and "\pageref" are defined to be
+% robust also, as they will be in future \LaTeX{} releases.
+% Incidentally, these expand to the relevant text plus "\null"---you
+% might want to strip this off, e.g.\ for sorting lists.
+%
+% \StopEventually
+%
+% \subsection*{Code}
+%
+% There are various ways we could attack this task (which is made
+% non-trivial by the well-known asynchrony of (La)\TeX's output
+% routine). There are various ways we might tackle the problem, but
+% they all must depend on hacks in the ".aux" file or a similar one.
+% Joachim Schrod's "fnpag.sty" does the same sort of thing differently
+% to this \LaTeX-specific approach. See "latex.tex" for enlightenment
+% on the cross-referencing mechanism and the \LaTeX{} internals used
+% below.
+% \begin{macro}{\subpageref}
+% The "\subpageref" macro first does a normal "\pageref". If the
+% reference is actually defined, it then goes on to check whether the
+% control sequence "2on"\meta{page referenced} is defined and sets the
+% "\ref" value to get "a" etc.\ if so. The magic, of course, is in
+% defining the "2on" bit appropriately.
+% \begin{macrocode}
+\newcommand{\subpageref}[1]{%
+ \pageref{#1}%
+ \@ifundefined{r@#1}%
+ {}%
+ {\@ifundefined{2on\@pageref{#1}}%
+ {}%
+ {\ref{#1}}}}
+% \end{macrocode}
+% \end{macro}
+% \begin{macro}{\@pageref}
+% This is like "\pageref", but expands to "\relax" without a warning
+% if the reference is undefined.
+% \begin{macrocode}
+\def\@pageref#1{\expandafter\expandafter\expandafter
+ \@cdr\csname r@#1\endcsname\@nil}
+% \end{macrocode}
+% \end{macro}
+% \begin{macro}{\sublabel}
+% This is like the usual "\label" command, except that it writes
+% "\newsublabel" onto the ".aux" file rather than "\newlabel".
+% \begin{macrocode}
+\newcommand{\sublabel}[1]{%
+ \@bsphack\if@filesw {\let\thepage\relax
+ \def\protect{\noexpand\noexpand\noexpand}%
+ \edef\@tempa{\write\@auxout{\string
+ \newsublabel{#1}{{}{\thepage}}}}%
+ \expandafter}\@tempa
+ \if@nobreak \ifvmode\nobreak\fi\fi\fi\@esphack}
+% \end{macrocode}
+% \end{macro}
+% \begin{macro}{\newsublabel}
+% This is the macro that does the important work. It is called with the
+% same sort of arguments as "\newlabel": the first argument is the
+% label name and the second is "{"\meta{ref value}"}{"\meta{page
+% number}"}". (Note that the only definition here which needs to be
+% global is the one which is, and that "\global" is redefined by
+% "\enddocument", which will bite you if you use it\dots.)
+%
+% First we extract the page number into "\this@page".
+% \begin{macrocode}
+\newcommand{\newsublabel}[2]{%
+ \edef\this@page{\@cdr#2\@nil}%
+% \end{macrocode}
+% Then we see whether it's greater than the value of "\last@page"
+% which was stashed away by the last "\newsublabel" (or is zero if
+% this is the first one). If the page has changed, we reset the
+% counter "\sub@page" telling us how many sub-labels there have been
+% on the page.
+% \begin{macrocode}
+ \ifnum\this@page>\last@page
+ \sub@page=0\relax
+ \fi
+ \last@page=\this@page
+ \advance\sub@page by 1
+% \end{macrocode}
+% If we've had at least two on the page, we define the "2on"\meta{page
+% no.} macro to indicate the fact.
+% \begin{macrocode}
+ \ifnum\sub@page=2
+ \global\@namedef{2on\this@page}{}%
+ \fi
+% \end{macrocode}
+% Then we write a normal "\newlabel" with the sub-reference as the
+% normal reference value in the second argument.
+% \begin{macrocode}
+ \edef\@tempa{\noexpand\newlabel{#1}%
+ {{\@alph{\number\sub@page}}{\this@page}}}%
+ \@tempa}
+% \end{macrocode}
+% \end{macro}
+% \begin{macro}{\last@page}
+% \begin{macro}{\sub@page}
+% We need to define these counters. "\last@page" could be a
+% suitably-initialised macro instead.
+% \begin{macrocode}
+\newcount\last@page
+\newcount\sub@page
+% \end{macrocode}
+% \end{macro}
+% \end{macro}
+% \begin{macro}{\pageref}
+% \begin{macro}{\ref}
+% Let's use Rainer's new expandable definitions of "\ref" and
+% "\pageref" to minimise the risk of nasty surprises.
+% \begin{macrocode}
+%% RmS 92/08/14: made \ref and \pageref robust
+\def\ref#1{\@ifundefined{r@#1}{{\reset@font\bf ??}\@warning
+ {Reference `#1' on page \thepage \space
+ undefined}}{\expandafter\expandafter\expandafter
+ \@car\csname r@#1\endcsname
+ \@nil\null}}
+\def\pageref#1{\@ifundefined{r@#1}{{\reset@font\bf ??}\@warning
+ {Reference `#1' on page \thepage \space
+ undefined}}{\expandafter\expandafter\expandafter
+ \@cdr\csname r@#1\endcsname
+ \@nil\null}}
+% \end{macrocode}
+% \end{macro}
+% \end{macro}\sublabel{ref:fred}
+% \Finale
+% \end{document}
+%
+\endinput
diff --git a/web/noweb/contrib/fischer/README b/web/noweb/contrib/fischer/README
new file mode 100644
index 0000000000..12150874a3
--- /dev/null
+++ b/web/noweb/contrib/fischer/README
@@ -0,0 +1,40 @@
+Date: Wed, 23 Feb 2011 23:02:00 -0500
+From: Greyson Fischer <greyson@foosoft.us>
+To: nr@cs.tufts.edu
+Subject: noweb + interpreter line = noscript
+
+
+Dear Norman,
+
+I have been a literate programmer for a few years now. Although I must
+limit my use of it most of the time due to corporate pressure, it comes
+in extremely handy for particularly new or challenging tasks. Of course
+I use noweb for many of my literate programs, preferring it over even
+cweb for C and C++ (sure, 'int' isn't bold, but at least it's indented
+the way I like to read).
+
+I found myself repeating a pattern when it came to writing in
+interpreted languages; specifically those that use a shebang to specify
+their interpreter. I write the literate script, tangle it, and then copy
+the result off to be used. The problem with this approach comes when I
+want, or need, to edit the script again later. Although I know it came
+from noweb (usually because of the complete lack of comments) I couldn't
+always track down the original source in a timely manner, leading me to
+make changes to the derived script, rather than the source.
+
+So, I came up with a simple fix. noscript tangles a document on the fly
+(assuming the first line has %!) and executes it inline with the
+specified interpreter.
+
+To use it, take a noweb document (for example: myscript.nw) which
+tangles <<*>> into, for example, a shell script. Add a "%!/bin/sh" at
+the top of the file. Run 'noscript myscript.nw'. Done, the script has
+been executed.
+
+I've attached my version 0.1 in case your interested (along with some
+trivial test documents). It's made it quite a bit easier for me to keep
+the document and the script together. Perhaps you, or one of your users,
+might find it of use.
+
+Cheers,
+Greyson Fischer
diff --git a/web/noweb/contrib/fischer/noscript-0.1/noscript b/web/noweb/contrib/fischer/noscript-0.1/noscript
new file mode 100755
index 0000000000..7b2a559005
--- /dev/null
+++ b/web/noweb/contrib/fischer/noscript-0.1/noscript
@@ -0,0 +1,15 @@
+#!/bin/sh
+
+SCRIPT="$1"; shift
+INTERP=`sed -ne '
+ s!/bin/sh!/bin/sh -s!;
+ 1s/^%!//p
+ ' "$SCRIPT"`
+
+case "$INTERP" in
+ '')
+ echo 'No interpreter line (%!) found!' >&2
+ exit 1
+ ;;
+ *) notangle "$SCRIPT" | exec $INTERP - "$@" ;;
+esac
diff --git a/web/noweb/contrib/fischer/noscript-0.1/test-none.nw b/web/noweb/contrib/fischer/noscript-0.1/test-none.nw
new file mode 100644
index 0000000000..eb54b3864a
--- /dev/null
+++ b/web/noweb/contrib/fischer/noscript-0.1/test-none.nw
@@ -0,0 +1,8 @@
+@ This is a test script which displays the ability of my new noweb-script to
+actually execute a script which is embedded in a noweb styled file. This test
+case ensures that nothing is run when there is no interpreter specified.
+
+<<*>>=
+/bin/false -- nothing here.
+
+@ And that should do it.
diff --git a/web/noweb/contrib/fischer/noscript-0.1/test-py.nw b/web/noweb/contrib/fischer/noscript-0.1/test-py.nw
new file mode 100644
index 0000000000..113b5a8e0f
--- /dev/null
+++ b/web/noweb/contrib/fischer/noscript-0.1/test-py.nw
@@ -0,0 +1,23 @@
+%!/usr/bin/env python
+
+@ This is a test script which displays the ability of my new noweb-script to
+actually execute a script which is embedded in a noweb styled file.
+
+<<*>>=
+<<imports>>
+
+print "Hello Python World!"
+print "Args:", sys.argv[1:]
+for n in xrange( 0, 10 ):
+ <<Do something with numbers>>
+print
+
+@ And that should do it, now we just need to do something with the numbers:
+
+<<Do something with numbers>>=
+print n,
+
+@ Oh yeah, I need to import [[sys]] as well
+
+<<imports>>=
+import sys
diff --git a/web/noweb/contrib/fischer/noscript-0.1/test-sh.nw b/web/noweb/contrib/fischer/noscript-0.1/test-sh.nw
new file mode 100644
index 0000000000..18f29b2efd
--- /dev/null
+++ b/web/noweb/contrib/fischer/noscript-0.1/test-sh.nw
@@ -0,0 +1,10 @@
+%!/bin/sh
+
+@ This is a test script which displays the ability of my new noweb-script to
+actually execute a script which is embedded in a noweb styled file.
+
+<<*>>=
+echo "Hello Bourne World!"
+echo "Args: $*"
+
+@ And that should do it.
diff --git a/web/noweb/contrib/gregory/README b/web/noweb/contrib/gregory/README
new file mode 100644
index 0000000000..ae55232fb7
--- /dev/null
+++ b/web/noweb/contrib/gregory/README
@@ -0,0 +1,2 @@
+dots.nw enables the use of trailing dots in chunk names. It does the same
+job as the example program `disambiguate.nw', but it is written in perl.
diff --git a/web/noweb/contrib/gregory/dots.nw b/web/noweb/contrib/gregory/dots.nw
new file mode 100644
index 0000000000..b7d1f650dc
--- /dev/null
+++ b/web/noweb/contrib/gregory/dots.nw
@@ -0,0 +1,154 @@
+\section*{Resolving trailing dots\ldots}
+Gregory Tucker-Kellogg\\
+gtk@walsh.med.harvard.edu
+\subsection*{Introduction}
+
+Unlike \verb|WEB|, \verb|noweb| does not allow the use of trailing
+dots in chunk (section) names. \verb|Dots| corrects for this. It is
+similar but not identical to \verb|disambiguate|, an \verb|Icon|
+program to accomplish the same task. \verb|Dots| is written in
+\verb|perl|.
+
+Before it does much else, \verb|noweb| creates a markup description of
+a source file. That markup description is passed along (in both
+\verb|noweave| and \verb|notangle|) to other programs in the pipeline
+(\verb|totex| for \verb|noweave| and \verb|nt| for \verb|notangle|).
+\verb|Dots| intervenes after the markup stage as a filter. The chunk
+name references are passed in the form described in Ramsey's paper,
+i.e.,
+\begin{quote}
+\leavevmode\rlap{\begin{tabular}{ll}
+\tt @defn {\rm\it name}&The code chunk named {\rm\it name} is being defined\\
+\tt @use {\rm\it name}&A reference to code chunk named {\rm\it name}\\
+\end{tabular}}
+\end{quote}
+If trailing dots are used in a chunk name, they will be passed along
+at the markup stage verbatim without any attempt at resolution.
+That's where \verb|dots| comes in.
+
+We require two passes over the noweb code as passed through
+\verb|markup|. The first pass picks out all of the unambigious chunk
+names and stores them in associative arrays. In between the passes,
+we expand the ambigious names and do some simple error checking. The
+second pass does a simple replace on incomplete names and writes
+output to the next stage of the pipeline.
+
+The choices for handling the input stream seems to be between sucking
+the whole markup into memory at once (as \verb|disambiguate| does) or storing
+the markup as a temporary file between the passes. The second is
+slower but will not break as the file gets bigger. We'll choose the
+first for now.
+
+\subsection*{Program outline}
+<<*>>=
+#!/usr/local/bin/perl
+while (<>) { # the first pass takes the input from STDIN
+ <<create lists of identifiers>>
+}
+<<resolve ambiguities in identifier names>>
+<<printout while replacing those with trailing dots>>
+@
+
+\subsection*{Representation}
+
+What's the best structure for the list of chunk names? It could just be a
+normal array, except we would have to check if a given name is already
+defined before adding it too the list. We could make an associative
+array, except we really don't have a key to associate. On the other
+hand, we could make a single associative array of names with
+associations ``complete'' and ``incomplete'' depending on the
+presence of dots. This would require no checking on predefinitions,
+and a key sorted list brings up each full chunk name as the {\em next}
+member of the list for which [[$completion{$identifier}=$complete]].
+
+<<create lists of identifiers>>=
+if (/^@(defn|use)\s(.*)$/) { # we've found a name of some sort
+ if (($truncated = $2) =~ s/\.\.\.$//) { # this one ends in dots.
+ $completion{$truncated} ="incomplete";
+ $truncations{$.-1} = $truncated;
+ $usage_type{$.-1} = $1;
+ }
+ else {$completion{$2} ="complete";}
+ }
+ push(lines,$_);
+
+@
+
+\subsection*{Chunkname resolution}
+The associative array [[%completion]] contains all of the names. The
+associative array [[%truncation_table]] contains the line numbers of the
+names with trailing dots. We can change the values of [[%completion]]
+from ``complete'' and ``incomplete'' to a number representing the
+index of the appropriate completion. If there is more than one, we
+can print out a warning but still resolve on the closest name.
+
+<<resolve ambiguities in identifier names>>=
+@namelist = sort(keys(%completion));
+$j = $i = 0;
+while ($i < $#namelist) { #collect all the ambiguities in a row
+ while ($completion{$namelist[$j]} eq "incomplete") {
+ $ambiguity_found = 1;
+ $j = $i + 1;
+ }
+ <<check for remaining ambiguity>>
+ foreach $name (@namelist[$i..$j]) {
+ $completion{$name} = $namelist[$j];
+ }
+ $j=$i=$j + 1;
+ undef($ambiguity_found);
+}
+@
+
+
+After we've gotten the expansions of abbreviated chunk names, we still
+might run into a problem. First, if no correct expansion was
+established, we might just missassign the abbreviation. The expansion
+might still be ambiguous if more than one complete expansion can give
+the same abbreviation. The first case is a fatal error. The second
+can be resolved by seeing if a complete chunkname immediately
+following the first completion is a solution. If so, we take the
+first completion anyway but print a warning for the user.
+
+<<check for remaining ambiguity>>=
+if (defined $ambiguity_found) {
+ $suggested = $namelist[$j];
+ $nextchance = $namelist[$j+1];
+ foreach $name (@namelist[$i..$j-1]) {
+ if (substr($suggested,0,length($name)) ne $name) {
+ die "FATAL ERROR: can't resolve @<<$name...>>\n"
+ }
+ }
+ if ($completion{$nextchance} eq "complete") {
+ foreach $name (@namelist[$i..$j-1]) {
+ if (substr($nextchance,0,length($name)) eq $name) {
+ print STDERR "WARNING--Ambiguous chunkname:\n";
+ print STDERR "\t<<${name}...@>> could be either\n";
+ print STDERR "\t<<$suggested@>> or\n\t<<$nextchance@>>\n";
+ print STDERR "I will use <<$suggested@>>\n"
+ }
+ }
+ }
+}
+@
+
+
+\subsection*{Printout}
+Finally, the [[%truncations]] and [[%usage_type]] arrays are put to
+work. We use the line numbers (as [[keys()]]) to pull up the
+truncations, and then associate truncations with completed names.
+Since we found everything on the first pass we don't have to scan
+each line for a [[@defn]] or [[@use]] statement. Note: this part of
+the program, analogous to {\em pass2} in \verb|disambiguate|, is
+different from \verb|disambiguiate|, which went through a search on
+the second pass. If we decided to store the markup in a temporary
+file after the first pass to save memory, we would change this section
+for blockwise printout. We still would not be forced to scan each
+line.
+
+<<printout while replacing those with trailing dots>>=
+foreach $trunc_line (sort(keys(%truncations))) {
+ $lines[$trunc_line] =
+ "\@$usage_type{$trunc_line} $completion{$truncations{$trunc_line}}\n";
+}
+print @lines;
+@
diff --git a/web/noweb/contrib/gregory/email b/web/noweb/contrib/gregory/email
new file mode 100644
index 0000000000..04b61f60a9
--- /dev/null
+++ b/web/noweb/contrib/gregory/email
@@ -0,0 +1 @@
+gtk@walsh.med.harvard.edu (Gregory Tucker-Kellogg)
diff --git a/web/noweb/contrib/jobling/Makefile b/web/noweb/contrib/jobling/Makefile
new file mode 100644
index 0000000000..5f91558927
--- /dev/null
+++ b/web/noweb/contrib/jobling/Makefile
@@ -0,0 +1,34 @@
+PROG = correct-refs
+DOCSRC = $(PROG).tex
+PROGSRC = $(PROG).csh
+SCRIPTS = list-anchors.awk awk-scripts.awk
+
+all: correct-refs.tex correct-refs.csh all-scripts
+
+correct-refs.tex: correct-refs.nw
+ noweave -delay -index $< > $@
+
+correct-refs.csh: correct-refs.nw
+ notangle -Rcorrect-refs.csh $< | cpif $@
+ chmod +x $@
+
+all-scripts: correct-refs.nw
+ notangle -Rlist-anchors.awk $< | cpif list-anchors.awk
+ notangle -Rawk-scripts.awk $< | cpif awk-scripts.awk
+ touch all-scripts
+
+install:
+ cp correct-refs.csh $(HOME)/bin
+ cp *.awk $(HOME)/lib
+
+tidy:
+ -rm *~ *% *.bak *.log *.blg
+
+clean: tidy
+ -rm *.ps *.dvi *.toc *.aux *.bbl *.dep $(PROG).shar
+
+realclean: clean
+ -rm $(DOCSRC) $(PROGSRC) $(SCRIPTS)
+
+shar:
+ shar README Makefile $(PROG).nw > $(PROG).shar
diff --git a/web/noweb/contrib/jobling/README b/web/noweb/contrib/jobling/README
new file mode 100644
index 0000000000..c522c48295
--- /dev/null
+++ b/web/noweb/contrib/jobling/README
@@ -0,0 +1,21 @@
+Correct-refs:
+============
+
+A set of awk scripts and to correct the internal anchors and links
+produced by noweave -latex+html followed by latex2html.
+
+This is necessary because the design of the html back-ends for
+noweave assume that the resulting HTML file will be the single
+document produced by latex2html -split 0 but this is not such
+a good idea if the document is large (e.g. the nuweb example).
+By default latex2html will split documents into nodes at
+section boundaries --- but if the document contains links,
+the names have to be changed from "#name" to "noden.html#name".
+
+The full documentation is contained in correct-refs.nw. Comments
+to
+
+Chris P. Jobling, University of Wales, Swansea
+C.P.Jobling@Swansea.ac.uk
+
+
diff --git a/web/noweb/contrib/jobling/correct-refs.bbl b/web/noweb/contrib/jobling/correct-refs.bbl
new file mode 100644
index 0000000000..dbeac6d068
--- /dev/null
+++ b/web/noweb/contrib/jobling/correct-refs.bbl
@@ -0,0 +1,36 @@
+\begin{thebibliography}{1}
+
+\bibitem{awk-book}
+Alfred~V. Aho, Brain~W. Kernighan, and Peter~J. Weinberger.
+\newblock {\em The {AWK} Programming Language}.
+\newblock Addison Wesley, Reading, MA, USA, 1988.
+
+\bibitem{gawk-manual}
+Diane~Barlow Close, Arnold~D. Robbins, Paul~H. Rubin, and Richard Stallman.
+\newblock {\em The {GAWK} Manual}.
+\newblock Free Software Foundation, 675 Massachusetts Avenue, Cambridge, MA
+ 02139, USA, version 0.11 beta edition, October 1989.
+
+\bibitem{nutshell-unix}
+Daniel Gilly.
+\newblock {\em UNIX in a Nutsell}.
+\newblock O'Reilly \& Associates, Sebastopol, CA, USA, {System V} edition,
+ 1992.
+
+\bibitem{ramsey94}
+Norman Ramsey.
+\newblock Literate programming simplified.
+\newblock {\em IEEE Software}, pages 97--105, September 1994.
+
+\bibitem{perl-llama}
+Randal~L. Schwartz.
+\newblock {\em Learning perl}.
+\newblock O'Reilly \& Associates, Sebastopol, CA, USA, 1993.
+
+
+\bibitem{perl-camel}
+Larry Wall and Randal~L. Schwartz.
+\newblock {\em Programming perl}.
+\newblock O'Reilly \& Associates, Sebastopol, CA, USA, 1991.
+
+\end{thebibliography}
diff --git a/web/noweb/contrib/jobling/correct-refs.nw b/web/noweb/contrib/jobling/correct-refs.nw
new file mode 100644
index 0000000000..6ab051e9a1
--- /dev/null
+++ b/web/noweb/contrib/jobling/correct-refs.nw
@@ -0,0 +1,391 @@
+%========================================================================%
+% @noweb-file{
+% author = "C.P. Jobling",
+% version = "$Revision: 1.5 $",
+% date = "$Date: 1995/04/01 13:54:43 $,
+% filename = "correct-refs.nw",
+% address = "Control and computer aided engineering
+% Department of Electrical and Electronic Engineering
+% University of Wales, Swansea
+% Singleton Park
+% Swansea SA2 8PP
+% Wales, UK",
+% telephone = "+44-792-295580",
+% FAX = "+44-792-295686",
+% checksum = "",
+% email = "C.P.Jobling@Swansea.ac.uk",
+% codetable = "ISO/ASCII",
+% keywords = "",
+% supported = "yes",
+% abstract = "Postprocessing routines to correct link
+% errors introduced by \verb|rawhtml| when
+% anchors are moved into sub-nodes of the
+% main HTML document.",
+% docstring = "The checksum field above contains a CRC-16
+% checksum as the first value, followed by the
+% equivalent of the standard UNIX wc (word
+% count) utility output of lines, words, and
+% characters. This is produced by Robert
+% Solovay's checksum utility.",
+% }
+%========================================================================
+\documentclass[a4paper]{article}
+\usepackage{noweb,html,multicol}
+
+\newcommand{\noweb}{\texttt{noweb}}
+\newcommand{\command}{\texttt{correct-xref}}
+
+\title{\command \\
+ Correct HTML In-Document Anchors and Hyperlinks}
+\author{C.P. Jobling \\
+University of Wales, Swansea \\
+(C.P.Jobling@Swansea.ac.uk)}
+\date{$Date: 1995/04/01 13:54:43 $ \\
+Version $Revision: 1.5 $}
+
+\begin{document}
+\maketitle
+\begin{abstract}
+Postprocessing routines to correct link
+errors introduced by \verb|rawhtml| when
+anchors are moved into sub-nodes of the
+main HTML document.
+\end{abstract}
+
+\tableofcontents
+<<Copyright>>=
+# This file is part of the correct-refs.nw package which is
+# Copyright (C) C.P. Jobling, 1995
+#
+# The code may be freely used for any purpose whatever
+# provided that it distributed with
+# the noweb source correct-refs.nw and that this copyright
+# notice is kept intact.
+#
+# Please report any problems, errors or suggestions to
+# Chris Jobling, University of Wales, Swansea
+# email: C.P.Jobling@Swansea.ac.uk
+# www-home page: http://faith.swan.ac.uk/chris.html/cpj.html
+#
+# $Id: correct-refs.nw,v 1.5 1995/04/01 13:54:43 eechris Exp $
+@
+
+\section{Purpose}
+When a \LaTeX $+$ HTML document is processed by \LaTeX 2HTML the
+resulting HTML document consists of a set of smaller documents, called
+``nodes''. Cross-referencing between nodes, works well if \LaTeX{}
+\verb|\label| and \verb|\ref| commands have been used to establish
+them. However, if the \verb|rawhtml| environment is used to create
+HTML anchors, e.g.
+\begin{verbatim}
+I want
+\begin{rawhtml}
+<a name="anchor">this</a>
+\end{rawhtml}
+to be an anchor.
+ :
+ :
+And now I want to go back to the anchor:
+follow this
+\begin{rawhtml}
+<a href="#anchor">link</a>
+\end{rawhtml}
+\end{verbatim}
+will only work if the link and the anchor happen to be in the same
+document. This can only be guaranteed when the \verb|-split 0| option
+is used.
+
+Why is this a problem? The answer is that I am interested in
+``literate programming'' using \noweb{} \cite{ramsey94} which has an
+option to create a \LaTeX{} file for its support of fancy mathematics, tables
+and figures with code-chunks, code and variable crossreferences
+formatted using \verb|rawhtml|. And the cross-references are exactly
+as described above. For largeish programs, particularly those that make
+heavy use of mathematics, tables and figures in the documentation sections
+(exactly the kind that I write!), it is inconvenient to have a single
+HTML file so I want to have the document split into pieces. Hence, I
+have to postprocess the resulting HTML files changing links so that
+the node-name of the anchor is included.
+
+For the previous example, say
+\begin{verbatim}
+I want
+\begin{rawhtml}
+<a name="anchor">this</a>
+\end{rawhtml}
+to be an anchor.
+\end{verbatim}
+ends up as:
+\begin{verbatim}
+I want
+<a name="anchor">this</a>
+to be an anchor.
+\end{verbatim}
+in \texttt{node1.html}
+
+Then any links to this anchor which are of the form
+\begin{verbatim}
+And now I want to go back to the anchor:
+follow this
+<a href="#anchor">link</a>
+\end{verbatim}
+Have to be changed to:
+\begin{verbatim}
+And now I want to go back to the anchor:
+follow this
+<a href="node1.html#anchor">link</a>
+\end{verbatim}
+in every node (except node1.html itself).
+
+In best UNIX, and \noweb{} tradition, the tools to achieve this are
+written in a mixture of \texttt{csh} \cite[Chapter 5]{nutshell-unix},
+\texttt{awk} \cite{awk-book,gawk-manual} and \texttt{awk}
+\cite[Chapter 10]{nutshell-unix} although one day I might redo them in
+\texttt{perl} \cite{perl-llama,perl-camel} for better portability.
+
+\section{Usage}
+The command to correct the cross-references is
+\begin{quote}
+\command{} {\it html-dir}
+\end{quote}
+where {\it html-dir} is the document directory created by \LaTeX 2HTML
+from {\it html-dir.tex}.
+
+\section{The Code}
+
+\subsection{Finding the anchors}
+On examining the HTML created by \LaTeX 2HTML, I noticed that all
+anchors created from \LaTeX{} \verb|\ref|, \verb|\tableofcontents|,
+\verb|\index| and by the cross-referencing done by \LaTeX 2HTML itself
+are of the form
+\begin{verbatim}
+<a name=sometext>blah, blah</a>
+\end{verbatim}
+That is the label name is not enclosed in double quotes. So, providing
+that the anchors that you create in your \verb|rawhtml| environments
+are of the form
+\begin{verbatim}
+<a name="sometext">Blah, blah</a>
+\end{verbatim}
+\command{} will be able to distinguish between anchors and links
+created by \LaTeX 2HTML and anchors and links created by \noweb{} or
+by the author of the original \LaTeX{} document.
+
+Here is an \texttt{awk} script to extract all the anchors from a
+series of documents and to output them in a list in the form
+\begin{verbatim}
+filename:anchor-name1
+filename:anchor-name2
+\end{verbatim}
+
+<<list-anchors.awk>>=
+# list-anchors.awk --- process a set of .html files and list
+# anchors in form filename:anchor-name
+# usage: [gn]awk -f list-anchors.awk *.html
+#
+<<Copyright>>
+{ <<Throw away [[<meta name=""]] stuff>>
+ for (i = 1; i <= NF; i++)
+ <<Find and print anchors>>
+}
+@
+
+\LaTeX 2HTML adds \verb|<meta name="">| tags to the head of all nodes.
+These could confuse the anchor finder, so we have to throw them away.
+
+<<Throw away [[<meta name=""]] stuff>>=
+if ($1 != "<meta")
+@
+
+
+This code just compares each field in the line with the pattern
+\verb|^name=".*"$| and when it finds one, strips it to leave just the
+anchor name and writes the filename and anchor name on {\it stdout}.
+<<Find and print anchors>>=
+if ($i ~ /^(name|NAME)\=\".*\".*$/) {
+ anchor = $i
+ sub(/^(name|NAME)\=\"/,"",anchor)
+ sub(/\".*$/,"",anchor)
+ printf("%s:%s\n",FILENAME,anchor)
+}
+@
+
+To use this program to create the anchor list:
+<<Create the list of anchors>>=
+cd $DOC
+echo Creating list of anchors
+gawk -f $LIB/list-anchors.awk *.html | sort -u >! anchor-list
+@
+
+\subsection{Generating link editing scripts}
+
+In order to correct the links in the HTML files, we now use the {\it
+ anchor-list} to control the creation of a set of \texttt{awk}
+scripts, one per html file, which will edit the links and replace
+\begin{verbatim}
+<a href="#anchor">link</a>
+\end{verbatim}
+by
+\begin{verbatim}
+<a href="node.html#anchor">link</a>
+\end{verbatim}
+unless the anchor and link happen to be in the same file.
+
+The processing is again done with a \texttt{[gn]awk} script.
+
+<<awk-scripts.awk>>=
+# awk-scripts.awk --- process a file conatining node:anchor
+# information to create a set of awk
+# scripts to correct the links in the nodes.
+# usage: [gn]awk -f awk-scripts.awk anchor-list
+#
+<<Copyright>>
+BEGIN {FS = ":"}
+{
+ <<Collect file names and anchors>>
+}
+END {
+ <<produce \texttt{awk} scripts>>
+}
+@
+
+The first part of this script just reads the entry in the {\it
+ anchor-list} and stores the information in arrays for use later.
+
+<<Collect file names and anchors>>=
+if (! ($1 in files)) {
+ files[$1] = NR
+}
+prefix[NR] = $1
+anchor[NR] = $2
+@
+
+To produce the \texttt{awk} scripts we loop over each unique file name
+encountered:
+<<produce \texttt{awk} scripts>>=
+for (file in files) {
+ <<Open new \texttt{awk} script>>
+ <<Write \texttt{awk} commands for each anchor>>
+}
+@
+
+To create a new \texttt{awk} script:
+<<Open new \texttt{awk} script>>=
+<<Create file name for \texttt{awk} script>>
+<<Open file>>
+@
+
+The first thing we need to is
+to get the root of the HTML file and append awk.
+
+<<Create file name for \texttt{awk} script>>=
+filename = file
+sub(/\..*$/,".awk",filename)
+@
+
+Next we open the file using redirection
+<<Open file>>=
+printf("# awk script to correct HTML links in %s\n",file) > filename
+@
+
+To create the \texttt{awk} code, we have to loop over each item in the
+list of anchor names. We throw away any that have the same filename as
+the currently open file. It will be ok to leave these as
+\begin{verbatim}
+<a href="#anchor">link</a>
+\end{verbatim}
+because the anchor is in the same file as the link. The rest get the
+file-name prepended onto each use of the anchor name in all links.
+When this is run, the output will be:
+\begin{verbatim}
+{ gsub(/href=\"#anchor-1\"/,"href=\"node1.html#anchor-1\"") }
+{ gsub(/href=\"#anchor-2\"/,"href=\"node1.html#anchor-2\"") }
+ :
+ :
+{ gsub(/href=\"#anchor-n\"/,"href=\"nodem.html#anchor-n\"") }
+{ print $0 }
+\end{verbatim}
+The actual string printing command doesn't look much like this because
+of all the escaping that has to be done.
+
+<<Write \texttt{awk} commands for each anchor>>=
+for (i=1; i < NR; i++) {
+ <<Reject links to anchors in current file>>
+ printf("{ gsub(/href=\\\"#%s\\\"/,\"href=\\\"%s#%s\\\"\") }\n",
+ anchor[i],prefix[i],anchor[i]) >> filename
+}
+printf("{ print $0 }") >> filename
+close(filename)
+@
+
+<<Reject links to anchors in current file>>=
+if (prefix[i] != file)
+@
+
+To create the \texttt{awk} scripts
+<<Create \texttt{awk} scripts for HTML files>>=
+echo Creating awk scripts
+gawk -f $LIB/awk-scripts.awk anchor-list
+@
+
+<<Use the \texttt{awk} scripts to correct HTML nodes>>=
+echo Processing HTML nodes
+foreach f (*.awk)
+ set root=$f:r
+ set tmpfile=`mktemp --suffix=.html`
+ echo -n Processing $root.html
+ gawk -f $f < $root.html >! $tmpfile
+ echo "..." Done
+ cp $root.html $root.html.bak
+ cp $tmpfile $root.html
+end
+
+@
+
+\subsection{\texttt{csh} Script to do anchor correction}
+<<correct-refs.csh>>=
+#!/usr/bin/csh
+# correct-refs.csh - CSH script file to correct HTML links and anchors
+# usage: correct-refs HTMLDIR
+<<Copyright>>
+unalias rm
+set LIB = $HOME/lib
+set DOC = $argv[1]
+echo Correcting anchors in HTML dir $DOC
+<<Create the list of anchors>>
+<<Create \texttt{awk} scripts for HTML files>>
+<<Use the \texttt{awk} scripts to correct HTML nodes>>
+<<Clean up>>
+@
+
+<<Clean up>>=
+rm *.awk
+rm anchor-list
+echo Done!
+@
+
+\bibliographystyle{plain}
+\bibliography{refs}
+
+\section*{Code Chunks}
+\nowebchunks
+
+\section*{Revision History}
+
+\begin{description}
+\item[1.3 to 1.4] The {\tt sed} script didn't work for \noweb{}
+ generated long anchor and link names. The substitute command string
+ was too long apparently! I changed the {\tt sed} scripts to {\tt awk
+ scripts}. Also tidied a few bits of the documentation.
+\end{description}
+\end{document}
+
+
+
+
+
+
+
+
+
+
diff --git a/web/noweb/contrib/jobling/email b/web/noweb/contrib/jobling/email
new file mode 100644
index 0000000000..4d70f5cc4b
--- /dev/null
+++ b/web/noweb/contrib/jobling/email
@@ -0,0 +1 @@
+C.P.Jobling@Swansea.ac.uk
diff --git a/web/noweb/contrib/jonkrom/Makefile b/web/noweb/contrib/jonkrom/Makefile
new file mode 100644
index 0000000000..12485906bc
--- /dev/null
+++ b/web/noweb/contrib/jonkrom/Makefile
@@ -0,0 +1,15 @@
+LIB=/dev/null # override for installation
+SHELL=/bin/sh
+all: noxref.krom
+ chmod +x noxref.krom
+
+install:
+ cp noxref.krom $(LIB)
+
+source: noxref.krom
+
+noxref.krom: noxref.nw
+ notangle -Rnoxref noxref.nw > noxref.krom
+
+clean:
+ /bin/rm -f *.tex *.dvi *.ilg *.idx *.aux *.log *.blg *.bbl *~ *.ind noxref.krom
diff --git a/web/noweb/contrib/jonkrom/README b/web/noweb/contrib/jonkrom/README
new file mode 100644
index 0000000000..8d294db2b9
--- /dev/null
+++ b/web/noweb/contrib/jonkrom/README
@@ -0,0 +1,4 @@
+An altered version of noxref (written in awk) that claims to support
+the same \chunklist semantics as the Icon version, plus boilerplate
+for supporting DOS.
+
diff --git a/web/noweb/contrib/jonkrom/email b/web/noweb/contrib/jonkrom/email
new file mode 100644
index 0000000000..1b7699c64c
--- /dev/null
+++ b/web/noweb/contrib/jonkrom/email
@@ -0,0 +1 @@
+jgk@jet.uk
diff --git a/web/noweb/contrib/jonkrom/noxref.nw b/web/noweb/contrib/jonkrom/noxref.nw
new file mode 100644
index 0000000000..06480117a8
--- /dev/null
+++ b/web/noweb/contrib/jonkrom/noxref.nw
@@ -0,0 +1,491 @@
+%=============================================================================
+% ----------------------------------------------------------------------------
+% Noxref, the cross referencing program for Noweb
+
+\documentstyle[noweb]{article}
+%\RCSdef $Id: noxref.nw,v 1.2 1993/05/06 18:15:40 jgk Exp $
+
+% ----------------------------------------------------------------------------
+%\title{{\tt Noxref\thanks{\RCSId},}
+\title{{\tt Noxref\thanks{Id: noxref.nw,v 1.2 1993/05/06 18:15:40 jgk Exp},}
+ the cross referencing program for {\tt Noweb}}
+\author{\setcounter{footnote}{6}%
+ JG Krom\thanks{JET Joint Undertaking, e-mail address: jgk@jet.uk}}
+
+\date{Printed: \today}
+
+% ----------------------------------------------------------------------------
+\makeindex
+\begin{document}
+\maketitle
+
+% ----------------------------------------------------------------------------
+% A bit of jargon:
+\newcommand{\noweb}{{\tt noweb}}
+\newcommand{\noweave}{{\tt noweave}}
+\newcommand{\notangle}{{\tt notangle}}
+\newcommand{\noxref}{{\tt noxref}} \newcommand{\Noxref}{{\tt Noxref}}
+\newcommand{\markup}{{\tt markup}}
+\newcommand{\awk}{{\rm awk}}
+\newcommand{\unix}{{\sc unix}}
+\newcommand{\dos}{{\sc msdos}}
+\newcommand{\chklist}{{$\setminus$\tt nowebchunks}}
+%----------------------------------------------------------------------------
+\section{Introduction}
+
+N Ramsey presents in [2] a clean, language independent system for
+literate programming, called \noweb. One of the components of
+\noweave, the ``weave'' program for this system, is the \noxref\ program.
+
+This system has been been ported to \dos\ by L Wittenberg.
+
+The author of this paper customised this \noxref\ program. The purpose
+of this paper is to describe these customisations. In order to
+implement these cutomisations in a ``literate Programming'' style, the
+codes written by the above mentioned authors are included in this
+document.
+
+% ----------------------------------------------------------------------------
+\section{Problem Definition}
+
+The \noxref\ program consists of an \awk\ [1] program, driven by an
+\unix\ shell script or, as appropriate, by an \dos\ batch file. This
+\noxref\ program adds page number references to the usage and
+definitions of the code chunks in a ``woven'' printing of a literate
+program.
+
+A feature that is available in other implementations of the \noxref\
+program, the alphabetical chunk list, was missing from the \awk\
+implementation of this program. As this feature seemed useful, it is
+implemented as an addition to the existing \noxref\ \awk\ program.
+
+This noxref program is the proper place to create this chunk list,
+since all information required for this list is already collected by
+this program.
+
+The chunk list should take a form, similar to a table of contents:
+chunk names, in the ``\LA{chunk name}\RA{}'' format, on the left hand
+side of the page, a list of page numbers on the right hand side of the
+page and leaders between the two. The list of page numbers should first
+contain the pages on which the chunk is defined and then the pages on
+which the chunk is used. Root chunks are to be indicated with the word
+``Root''. Chunks that are used, but not defined, are marked with the
+word ``Undefined''.
+
+This whole chunk list, formatted using \LaTeX\ commands, replaces any
+line, in the original source, containing only the word ``\chklist''.
+
+\Noxref\ was, and still is, intended as a stage in the \noweave\
+pipeline. This means that it will receive input in the ``marked-up''
+format generated by the \markup\ program. The output of \noxref\
+should also be in this format.
+
+% ----------------------------------------------------------------------------
+\pagebreak
+\section{Web Structure}
+This document describes three different files for two different environments:
+\begin{enumerate}
+\item [[noxref]] The executable shell script for use under \unix.
+ This file includes the awk script.
+\item [[noxref.bat]] The executable shell script for use under \dos.
+ This file calls upon the following file.
+\item [[noxref.awk]] The awk source code used or included by the
+ files above.
+\end{enumerate}
+Each of these can be generated by specifying the required filename as
+the root chunk name when executing the \notangle\ program on this web.
+To obtain the \dos\ batch file the following command should be executed:
+\begin{center}
+[[notangle -Rnoxref.bat noxref.nw > noxref.bat]]
+\end{center}
+Users of these shell scripts might have to adapt the [[awk]] program
+name in these scripts to match their local system configuration.
+<<noxref>>=
+#!/bin/sh
+# $Id: noxref.nw,v 1.2 1993/05/06 18:15:40 jgk Exp $
+nawk '<<noxref.awk>>'
+<<noxref.bat>>=
+@echo off
+REM # $Id: noxref.nw,v 1.2 1993/05/06 18:15:40 jgk Exp $
+REM The NOWEB environment variable must be set to the directory
+REM where NOXREF.AWK is. It must end in '/' or '\' as required
+REM by the AWK interpreter in use.
+awk -f %NOWEB%noxref.awk
+<<noxref.awk>>=
+# $Id: noxref.nw,v 1.2 1993/05/06 18:15:40 jgk Exp $
+<<Noxref awk source>>
+<<Noxref awk chunk list additions>>
+@
+% ----------------------------------------------------------------------------
+\section{The AWK Source Code}
+This is mostly Ramsey's original code. The fragment that has been
+changed is included as the chunk: \LA{Find and process the \chklist\
+request}\RA. Module label generation has been upgraded to the
+algorithm used in N~Ramsey's last release of the \noweb\ system.
+<<Noxref awk source>>=
+BEGIN { defns[0] = 0 ; uses[0] = 0 ; dcounts[0] = 0 ; firstdef[0] = 0;
+ ucounts[0] = 0 ; idtable[0] = 0 ; keycounts[0] = 0 ;
+ firstdefnout[0] = 0; filetable[0] = 0 }
+{ lines[nextline++] = $0 }
+/^@defn / { logname("DEFN", defns, dcounts, substr($0, 7)) }
+/^@use / { logname("USE", uses, ucounts, substr($0, 6)) }
+/^@file / { curfile = modid(substr($0, 7) substr($0, 10, 3)) }
+<<Noxref awk source>>=
+function logname(which, tbl, counts, name, id) {
+ counts[name] = counts[name] + 1
+ id = which curfile "-" modid(name) "-" counts[name]
+ tbl[name] = tbl[name] id " "
+ lines[nextline++] = "@literal \\label{" id "}"
+ if (which == "DEFN" && firstdef[name] == "") firstdef[name] = id
+}
+<<Noxref awk source>>=
+function modid(name, key) {
+ if (idtable[name] == "") {
+ key = name
+ gsub(/[\[\]\\{} -]/, "*", key)
+ if (length(key) > 6) key = substr(key,1,3) substr(key, length(key)-2, 3)
+ keycounts[key] = keycounts[key] + 1
+ idtable[name] = key "-" keycounts[key]
+ }
+ return idtable[name]
+}
+<<Noxref awk source>>=
+END {
+ for (i=0; i < nextline; i++) {
+ name = substr(lines[i], 2)
+ name = substr(name, 1, index(name, " ")-1)
+ arg = substr(lines[i], length(name)+3)
+ if (name == "defn") {
+ thischunk = arg
+ printf "@defn %s~{\\footnotesize\\rm\\pageref{%s}}\n", arg, firstdef[arg]
+ } else if (name == "use") {
+ if (firstdef[arg] != "")
+ printf "@use %s~{\\footnotesize\\rm\\pageref{%s}}\n", arg, firstdef[arg]
+ else
+ printf "@use %s~{\\footnotesize\\rm (never defined)}\n", arg
+ } else if (name == "end") {
+ if (substr(arg, 1, 4) == "code" && firstdefnout[thischunk] == 0) {
+ firstdefnout[thischunk] = 1
+ n = split(defns[thischunk], a)
+ if (n > 1) {
+ printf "@literal \\nwalsodefined{"
+ for (j = 2; j <= n; j++)
+ printf "\\\\{%s}", a[j]
+ printf "}\n@nl\n"
+ }
+ if (uses[thischunk] != "") {
+ printf "@literal \\nwused{"
+ n = split(uses[thischunk], a)
+ for (j = 1; j <= n; j++)
+ printf "\\\\{%s}", a[j]
+ printf "}\n@nl\n"
+ } else
+ printf "@literal \\nwnotused\n@nl\n"
+ }
+ print lines[i]
+ }
+ <<Find and process the \chklist\ request>>
+ else
+ print lines[i]
+ delete lines[i]
+ }
+}
+@ Finding the \chklist\ command is straight forward, it must be on a
+\verb+@text+ line. The unclean way of using a chunk to insert an
+[[else]]~[[if]] clause in a larger [[if -- else if -- else]]
+structure should be noted.
+<<Find and process the \chklist\ request>>=
+else if (name == "text") {
+ if (arg == "\\nowebchunks")
+ printChunkList()
+ else
+ print lines[i]
+}
+@ If the keyword has been found the function [[printChunkList()]] is
+called to do the actual printing.
+% ----------------------------------------------------------------------------
+\section{The Chunk List Additions}
+These additions consist, except for the chunk \LA{Find and process the
+\chklist\ request}\RA\ mentioned above, of two functions: one to sort
+the list of chunks, and one to print this list.
+
+<<Noxref awk chunk list additions>>=
+<<Sort chunk list>>
+<<Print chunk list>>
+@
+% ----------------------------------------------------------------------------
+\subsection{The Sorting Routine}
+This function implements essentially a simple insertion sort. If
+performance becomes a problem, some effort could be invested to use a
+better algorithm, but that seems unnecessary at the moment.
+
+\subsubsection{The Sorting Function Framework}
+Two global variables, [[nextFreeIdx]] and [[sortedNames]], carry the
+results of this function.
+
+The [[sortedNames]] array is empty to start with, except for the
+first element, which contains the null string as a sentinel; no
+string compares to less than the null string. The invariant on this
+array is that it will always contain chunk names in sorted order,
+with the lowest (according to the awk comparison rules) coming first.
+
+The invariant on [[nextFreeIdx]] is that it always contains the index
+number of the next free element in the array.
+<<Sort chunk list>>=
+function sortChunkNames( <<Sort --- Local Variables>>)
+{
+ sortedNames[0] = ""
+ nextFreeIdx=1; # The next index to use (range 1--N)
+ <<Run through the [[chunkname]]s as stored in [[defns]] array>>
+ <<Run through the [[chunkname]]s as stored in [[uses]] array>>
+}
+@
+\subsubsection{Scan the Arrays}
+All chunk names have been stored in the [[defns]] array when they were
+defined. Using the ``{\tt for \em xyz \tt in \em arrayname}'' feature of
+awk, it is possible to step through all elements of the array. The
+zero element in the arrays would confuse the sorting algorithm, so these
+elements have to be discarded.
+<<Run through the [[chunkname]]s as stored in [[defns]] array>>=
+for (chunkname in defns) {
+ if (chunkname != 0) {
+ <<Insert in proper place in sorted array>>
+ }
+}
+<<Sort --- Local Variables>>=
+chunkname,
+@ All names that have been used are stored in the array [[uses]]. This
+array has to be scanned for chunk names that might have been used, but
+that were not defined. Such chunks should also be included in the chunk
+list, so they are inserted in the [[sortedNames]] array.
+<<Run through the [[chunkname]]s as stored in [[uses]] array>>=
+for (chunkname in uses) {
+ if ((chunkname != 0) && !(chunkname in defns)) {
+ <<Insert in proper place in sorted array>>
+ }
+}
+@
+\subsubsection{Insert into the Sorted Array}
+The proper place for the insertion is found by scanning the sorted
+array from the end to the beginning. The local variable [[idx]] is
+used for this scan, it will always point at a possible insertion
+location. [[nextFreeIdx]] is incremented, since it is now known that
+there is another element which will be inserted.
+
+If the element before the current scan location is greater than the
+chunkname to be inserted, then the chunkname will be inserted before
+that element. The scanned element should be moved one position up (to
+the current insertion location) at this point.
+
+Otherwise, the chunk name should come after the element before the
+scan location, ie. it should be inserted at the current position.
+[[idx]] is pushed to the end condition, to stop the scan over the
+sorted array and to get a new chunk name.
+<<Insert in proper place in sorted array>>=
+for ( idx = nextFreeIdx++ ; idx>0 ; idx-- ) {
+ if ( sortedNames[idx-1] > chunkname )
+ sortedNames[idx] = sortedNames[idx-1] ;
+ else {
+ sortedNames[idx] = chunkname ;
+ idx = -1 ;
+ }
+}
+<<Sort --- Local Variables>>=
+idx
+@
+% ----------------------------------------------------------------------------
+\subsection{Print the Chunk List}
+The function to print the chunk list, first calls upon the sorting
+function to get the names in order. It then inserts, if required,
+some heading material and lastly prints the names.
+<<Print chunk list>>=
+function printChunkList( <<Print --- Local Variables>> )
+{
+ sortChunkNames() ;
+ <<Optional Header material>>
+ <<Header material>>
+ <<Print Loop>>
+ <<Closing Material>>
+}
+@
+% ----------------------------------------------------------------------------
+\subsubsection{The Printing Loop}
+This loops steps through the indices of the sorted names array, up to
+the next free index number. It prints each name, using the \markup\
+\verb+@use+ directive, followed by a row of dots. The printing of the
+page numbers, root markers etc. is delegated to the chunk \LA{print page
+numbers etc.}\RA.
+<<Print Loop>>=
+for (idx=1; idx<nextFreeIdx; idx++) {
+ print "@use " sortedNames[idx] ;
+ print "@literal \\dotfill" ;
+ <<Print page numbers etc.>>
+ print "@nl" ;
+}
+<<Print --- Local Variables>>=
+idx,
+@ When printing the page references, the following cases should be considered.
+\begin{itemize}
+\item If a name does not appear in the [[uses]] array, it must have been
+ in the [[defns]] arrray. It is therefore a root chunk.
+\item If a name does not appear in the [[defns]] array, it is undefined in
+ the source file currently processed.
+\item If a name is defined, print the page references of the definitions.
+\item If a name is used, print the usage page references.
+\end{itemize}
+<<Print page numbers etc.>>=
+if (uses[sortedNames[idx]] == "") {
+ print "@literal { \\rm Root}," ;
+}
+if (defns[sortedNames[idx]] == "") {
+ print "@literal { \\rm Undefined}" ;
+}
+else {
+ <<Print definition page numbers>>
+}
+if (uses[sortedNames[idx]] != "") {
+ <<Print usage page numbers>>
+}
+@
+\paragraph{Definition References.}
+Definition page references are derived from the [[defns]] arrays, by
+splitting them into fields with the [[split]] function. The first one
+should not be preceded by a ``,'' (comma character), but all subsequent
+numbers (if any) should have a comma in front of them. Page references
+for the definitions are printed underlined.
+<<Print definition page numbers>>=
+n = split(defns[sortedNames[idx]], a)
+print "@literal { \\underline{\\pageref{" a[1] "}}}";
+if (2 <= n)
+ for (j = 2; j <= n; j++)
+ print "@literal {, \\underline{\\pageref{" a[j] "}}}";
+<<Print --- Local Variables>>=
+n, a, j
+@
+\paragraph{Usage References.}
+The page references for the places where the chunks are used are
+obtained from the [[uses]] array. These always have
+preceding text (definition page references or the word ``Undefined''),
+so these should always have a ``,'' in front of them.
+<<Print usage page numbers>>=
+n = split(uses[sortedNames[idx]], a)
+for (j = 1; j <= n; j++)
+ print "@literal {\\rm, \\pageref{" a[j] "}}";
+@
+\paragraph{A Small Test.}
+Both chunk names should appear in the chunk list: one
+marked as ``Root'', the other as ``Undefined''.
+<<An unused (therefore root) chunk>>=
+<<An undefined chunk>>
+@
+\pagebreak
+\subsubsection{List Opening and Closing Definitions}
+If required, some commands could be included to generate a chapter or
+section heading above the chunk list. However, the author of this
+code prefers to have such sectioning commands under the control of the
+final document source file.
+
+Users who prefer to have these section commands automatically
+generated (like the Icon implementation of the \noxref\ program does)
+can redefine \LA{Optional Header material}\RA\ to be equal to the
+current definition of \LA{Not used header material}\RA.
+<<Optional Header material>>=
+<<Not used header material>>=
+print "@literal \\ifx\\chapter\\undefined\\section*"
+print "@literal {Alphabetical List of Chunk Names}" ;
+print "@literal \\else\\chapter"
+print "@literal {Alphabetical List of Chunk Names}" ;
+print "@literal \\fi"
+print "@nl" ;
+@ The following header material is required, it sets up the
+environment for the list.
+<<Header material>>=
+print "@literal {\\obeylines" ;
+print "@literal \\setlength{\\parindent}{0mm}" ;
+print "@literal \\setlength{\\parskip}{1.4ex}" ;
+print "@nl" ;
+<<Closing Material>>=
+print "@literal }" ;
+@
+% ----------------------------------------------------------------------------
+\newpage
+\section{References}
+% This is faked (ie, not a real LaTeX bibliography), since this file
+% is likely to get included in other files, with other bibliographies.
+{
+\begin{description}
+\sfcode`\.=1000\relax
+
+\item[{\rm [1]~~~}]
+Aho AV., Kernighan BW., Weinberger PJ. 1988,
+{\sl The AWK Programming Language,}
+Addison-Wesley.
+
+\item[{\rm [2]~~~}]
+Ramsey N. 1992--1993,
+``Literate Programming Tools Need Not Be Complex,''
+To be published in {\sl IEEE Software.} 1993.
+
+\end{description}
+}
+
+
+% ----------------------------------------------------------------------------
+% This should go in RCS.sty!
+\newenvironment{RCSlog}%
+{\begin{trivlist} \item[]%
+\setlength{\parindent}{0mm}%
+\setlength{\parskip}{3ex}%
+\catcode`\$=12%
+\hbadness=10000\ignorespaces\obeycr}%
+{\end{trivlist}}
+
+\section{RCS Maintained Log}
+\begin{RCSlog}
+$Log: noxref.nw,v $
+Revision 1.2 1993/05/06 18:15:40 jgk
+Moved from using bold to underlining for the page references
+of definitions. (On advice of Lee Wittenberg.)
+A few linguistic improvements. RCS ID strings included in progs.
+
+Revision 1.1 1993/05/01 21:08:21 JG~Krom
+Initial revision
+
+A version with the same code, but some errors and typos
+in the documentation text was known as:
+Revision 1.1 1993/04/28 17:03:23 jgk
+Initial revision
+
+This file was derived from:
+``NOXREF.BAT'' by L~Wittenberg and ``noxref'' By N~Ramsey.
+(No change log was available for these files.)
+
+And from:
+Log: noxref.awk
+Revision 1.5 1993/04/23 12:52:16 JG~Krom
+On advice of Lee Wittenberg, used the new way of label generation.
+
+Revision 1.4 1993/04/20 22:41:44 JG~Krom
+Improved layout of chunklist.
+
+Revision 1.3 1993/04/11 17:47:38 JG~Krom
+Indicate root chunks in the chunklist.
+
+Revision 1.2 1993/04/11 15:52:53 JG~Krom
+First stab at the chunklist command.
+
+Revision 1.1 1992/10/21 17:00:00 LEEW
+checked in with -k by JG~Krom at 1993/04/10 16:53:28
+
+Which in turn was also derived from: ``noxref'' By N~Ramsey.
+\end{RCSlog}
+% ----------------------------------------------------------------------------
+\newpage
+\section{Alphabetical List of Chunk Names}
+\nowebchunks
+% ----------------------------------------------------------------------------
+\input{noxref.ind}
+% ----------------------------------------------------------------------------
+\end{document}
+% End of noweb code
+%=============================================================================
diff --git a/web/noweb/contrib/kaelin/README b/web/noweb/contrib/kaelin/README
new file mode 100644
index 0000000000..f8be279ff9
--- /dev/null
+++ b/web/noweb/contrib/kaelin/README
@@ -0,0 +1,19 @@
+Prettyprinters for Icon and C++ based on Kostas's work, but using Computer Modern fonts.
+Prettyprinters for Icon and C++ based on Kostas's work, but using Computer Mode
+rn fonts.
+
+Theres is no Makefile
+Type
+> noweave -x -delay pp.nw > pp.tex < /* creates pp.tex */
+> latex pp.tex < /* creates pp.dvi and warnings */
+> latex pp.tex < /* creates final pp.dvi */
+to get documentation and
+> noweb -t pp.nw < /* creates cnw.icn and inw.icn */
+> iconc cnw.icn < /* creates cnw */
+> iconc inw.icn < /* creates inw */
+to get filters named cnw and inw.
+(Maybe you have to use full path-names for noweave, noweb, latex and iconc)
+
+For installation: Look at the other Makefiles in contrib/*
+
+Cleaning up is trivial: Remove pp.aux, pp.log, pp.tex, inw.icn and cnw.icn
diff --git a/web/noweb/contrib/kaelin/email b/web/noweb/contrib/kaelin/email
new file mode 100644
index 0000000000..8790d4fd1f
--- /dev/null
+++ b/web/noweb/contrib/kaelin/email
@@ -0,0 +1 @@
+Kaelin Colclasure <kaelin@bridge.com>
diff --git a/web/noweb/contrib/kaelin/pp.nw b/web/noweb/contrib/kaelin/pp.nw
new file mode 100644
index 0000000000..dd06757340
--- /dev/null
+++ b/web/noweb/contrib/kaelin/pp.nw
@@ -0,0 +1,616 @@
+% For LaTeX input: noweave -x -delay filename.nw > filename.tex
+% For source code: noweb -t filename.nw
+\documentstyle[noweb,11pt]{article}
+\pagestyle{noweb}
+\noweboptions{longchunks,smallcode}
+
+\newcommand{\CEE}{{\sc C\spacefactor1000}}
+\newcommand{\CPP}{{\sc C\PP\spacefactor1000}}
+\newcommand{\ICON}{{\sc Icon\spacefactor1000}}
+\newcommand{\PP}{\kern.5pt\raisebox{.4ex}
+ {$\scriptscriptstyle+\kern-1pt+$}\kern.5pt}
+
+\begin{document}
+\title{Beyond the {\tt \symbol{"5C}tt} Font\\{\large A {\tt noweb} Extension}}
+\author{Kaelin L. Colclasure \\ {\tt kaelin@bridge.com}}
+\date{Preliminary Draft}
+\maketitle
+
+\section{Introduction}
+This document contains two filters for {\tt noweave}, written in \ICON,
+which add basic pretty-printing for \CPP\ and \ICON\ code to {\tt noweb}'s
+repetoir of functionality. The bulk of the code herein is derived from
+\cite{kostas}, and at least a nodding familiarity with that work is assumed
+by this documentation. A working knowledge of \ICON\ is, of course, a must.
+Refer to \cite{griswold} as necessary.
+
+\subsection{Design philosophy}
+While I relish the idea of inflicting my own code formatting
+preferences on the unsuspecting masses, I was less enthusiastic
+towards the prospect of writing a scanner for each target language.
+Besides (I adeptly rationalized), so doing would violate one of the
+fundamental tenets of {\tt noweb}-- that the programmer maintain
+control over the formatting of code.
+
+Thus, this implementaion does not address indentation or line-breaking
+within code chunks. Like its predecessor, it is limited bolding target
+language keywords, operator substitution and other such in-line markup
+generation. However, even these restricted transformations can have a
+marked effect on the clarity of exposition of the source code.\footnote{This
+is particularly true for languages like Icon which have a plethora of
+multi-character operators.}
+
+\subsection{Why a new implementation?}
+While \cite{kostas} provides an easily-extensible implementation, it also
+has one unfortunate limitation-- the technique used to distinguish between
+operators and comment delimiters relies upon those categories consisting of
+disjoint character sets\footnote{More precisely, it relies upon the {\em
+start sets} of those two categories of tokens being disjoint.} in the target
+language. While true for the target languages Kostas implemented, this
+assertion does not hold for a large class of commonly used languages. The
+\CEE\ lagnuage, for instance, uses \verb|/*| \ldots \verb|*/| to bracket
+comments when both \verb|/| and \verb|*| are operators as well.
+
+While obviously a problem for me, since my first target language was \CPP,
+this could undoubtably have been redressed with relatively minor surgery to
+\cite{kostas}. However, once I'd familiarized myself with Kostas' code, I
+felt compelled to add some additional tweaks and features of my own. The
+principle enhancements provided are as follows:
+\begin{itemize}
+\item A more versatile scheme for handling token recognition. This not only
+addresses the problem outlined above, but makes it possible to do target
+language dependent processing during scanning as well as during
+initialization.
+\item More robust handling of string constants. Kostas did not deal with
+escaped embedded quoting characters at all.
+\item Pretty-printing of [[[[quoted]]]] code as well as code in {\tt noweb}
+chunks.
+\item Use of \ICON s conditional compilation facilities to allow all the code
+for different targets to reside in one file.\footnote{Well, {\em I} like it
+better this way\ldots}
+\end{itemize}
+Along the way, there have been some steps backwards as well:
+\begin{itemize}
+\item I make no provisions for multi-line comments at all. I
+personally use them only for commenting out code regions, and in that
+context special treatment is undesirable.\footnote{This does not
+preclude other target language implementations based upon this work
+from using target language dependent code to deal with multi-line
+comments.}
+\item Font utilization has been restricted to the Computer Modern fonts
+provided with all implementations of \TeX\ (primarily because my
+site has only those fonts).
+\item Due to my ignorance of ``vanilla'' \TeX, the markup code generated
+is \LaTeX\ dependent (I was tempted to add this as an item to the list
+above, but decided to refrain from provoking any nasty EMail).
+\end{itemize}
+No doubt I have introduced some new bugs as well, but discovering those is
+left as an excercise for the reader.
+
+\section{Implementation}
+Like Kostas, I have split the implementation of each target filter into a
+common part and a target dependent part. However, in contrast with
+\cite{kostas}, I have kept all the pieces together in one {\tt noweb} input
+file. Readers interested only in adding a new target should read \S
+\ref{reqs} and then skim \S \ref{c} and \S \ref{icon} for complete examples.
+
+\subsection{Target requirements\label{reqs}}
+\paragraph{Minimal implementation:}
+Each target implementation must provide at least the following:\footnote{In
+point of fact, the list of keywords could be left null. It merely provides
+a convenient way to populate the [[translation]] table with the
+language's reserved words, which will likely all receive the same markup
+treatment. However, here again, Icon is an exception\ldots}
+\begin{itemize}
+\item A root chunk which generates the \ICON\ source file for the target
+language filter.
+\item A table containing all of the target languages ``interesting''
+character sequences and either their typographic translation {\em or an \ICON\
+procedure for deriving it}.
+\item A list of keywords.
+\item A list of operators. For our purposes here, comment-delimiting
+punctuation marks are considered operators.
+\end{itemize}
+Variables to hold the required table and lists exist at global scope.
+<<Global declarations>>=
+global translation # Table of typographic translations
+global keyword_list, operator_list # List of keywords, operators
+@
+
+\paragraph{The [[translation]] table:} The table of typographic substitutions
+is built by the following code chunk during program initialization:
+<<Define the [[translation]] table>>=
+translation := table();
+<<Keywords>>
+<<Operators>>
+@
+
+\paragraph{Initial character sets:}
+The {\em start sets} for the two classes of tokens are defined globally in
+the following two chunks. Note that [[op_chars]] is automatically derived
+from the list of operators already defined above. However, some target
+languages will require adjustments to the definition of
+[[id_chars]].\footnote{Note that the Icon implementation does {\em not}
+add $\&$ to this list even though it prefixes some identifiers. We treat
+it always as an operator instead.}
+<<Global declarations>>=
+global id_chars, op_chars
+@
+<<Define initial character sets>>=
+id_chars := &letters ++ &digits ++ '_'
+op_chars := ''
+every op := !operator_list do op_chars ++:= cset(op[1])
+@
+
+\paragraph{The [[begin_token]] character set:}
+The next two chunks globally define the {\em start set} for {\bf all}
+interesting tokens. In general this will be \mbox{[[id_chars]] $\cup$
+[[op_chars]]}, but in some cases it may be desirable to add additional
+elements to this set.
+<<Global declarations>>=
+global begin_token
+@
+<<Define the [[begin_token]] character set>>=
+begin_token := id_chars ++ op_chars
+@
+
+\paragraph{Procedures:}
+\ICON\ procedures referenced in the [[translation]] table must, of
+course, be defined by the target implementation. When invoked, such a
+procedure will receive one argument-- the token which caused it to be
+invoked.
+
+Additionally, the [[TeXify]] procedure's [[case]] statement includes a
+target language dependent chunk which may be used to implement anything I've
+forgotten or neglected (like multi-line comments). The \CEE/\CPP
+implementation provides a skeletal example of how this might be used to
+process \CEE preprocessor directives.\footnote{It takes the trouble to find
+them, but then merely writes them verbatim to the output with no special
+formatting. This could have been more easily accomplished with a [[procedure]]
+entry in the [[translation]] table.}
+
+\subsection{Target independent code}
+
+\subsubsection{The [[main]] procedure}
+
+<<Procedure [[main]]>>=
+procedure main()
+ <<Define the [[translation]] table>>
+ <<Define initial character sets>>
+ <<Define the [[begin_token]] character set>>
+
+ <<Emit special \LaTeX\ definitions>>
+ <<Process each input line through the [[filter]] procedure>>
+ return
+end
+@
+<<Emit special \LaTeX\ definitions>>=
+write("@literal \\def\\begcom{\\begingroup\\rm" ||
+ "\\catcode`\\$=3\\catcode`\\^=7\\catcode`\\_=8{}}")
+write("@literal \\def\\endcom{\\endgroup}")
+@
+<<Process each input line through the [[filter]] procedure>>=
+while line := read() do
+ line ? (="@" & filter(tab(upto(' ') | 0), if =" " then tab(0) else &null))
+@
+
+\subsubsection{The [[filter]] procedure}
+
+<<Procedure [[filter]]>>=
+procedure filter(name, arg)
+static kind
+static whitespace
+static code_in_line
+ initial {
+ whitespace := ' \t'
+ }
+ case name of {
+ "begin": {
+ arg ? kind := tab(many(&letters))
+ copyline(name, arg)
+ }
+ "defn" | "literal" | "use": {
+ code_in_line := 1
+ copyline(name, arg)
+ }
+ "endquote": {
+ kind := "docs"
+ copyline(name, arg)
+ }
+ "nl": {
+ if \kind == "code" & /code_in_line then
+ write("@literal \\smallskip\\eatline")
+ copyline(name, arg)
+ code_in_line := &null
+ }
+ "quote": {
+ kind := "code"
+ copyline(name, arg)
+ }
+ "text": {
+ if \kind == "code" then {
+ if *(cset(arg) -- whitespace) > 0 then code_in_line := 1
+ TeXify(arg)
+ }
+ else copyline(name, arg)
+ }
+ default: copyline(name, arg)
+ }
+ return
+end
+@
+<<Procedure [[copyline]]>>=
+procedure copyline(name, arg)
+ return write("@", name, (" " || \arg) | "")
+end
+@
+
+\subsubsection{The [[TeXify]] procedure}
+
+<<Procedure [[TeXify]]>>=
+procedure TeXify(arg)
+ writes("@literal ")
+ arg ? {
+ while writes(preTeX(tab(upto(begin_token)))) do case &pos + 1 of {
+ <<Language-dependent \TeX ify chunk>>
+ any(id_chars): <<Identifier or numeric constant>>
+ any(op_chars): <<Operator>>
+ default: stop("\n** Error at input pos ", &pos)
+ }
+ writes(preTeX(tab(0)))
+ }
+ write()
+ return rval
+end
+@
+<<Identifier or numeric constant>>=
+{
+ token := tab(many(id_chars))
+ <<Write [[token]] or its typographic translation>>
+}
+@
+<<Operator>>=
+{
+ token := tab(match(!operator_list) | &pos + 1)
+ <<Write [[token]] or its typographic translation>>
+}
+@
+<<Write [[token]] or its typographic translation>>=
+trans := translation[token]
+case type(trans) of {
+ "procedure": writes(trans(token))
+ "null": writes(preTeX(token))
+ default: writes("\\mbox{" || trans || "}")
+}
+@
+<<Procedure [[preTeX]]>>=
+procedure preTeX(arg)
+static TeX, hex
+ initial {
+ TeX := '\\${}&#^_%'
+ hex := table();
+ hex["\\"] := "5C"; hex["$"] := "24"; hex["{"] := "7B"; hex["}"] := "7D"
+ hex["&"] := "26"; hex["#"] := "23"; hex["^"] := "5E"; hex["_"] := "5F"
+ hex["%"] := "25"
+ }
+ str := ""
+ every c := !arg do
+ str ||:= if *(cset(c) ** TeX) > 0 then "\\symbol{\"" || hex[c] || "}"
+ else c
+ return str
+end
+@
+
+\subsubsection{Target utility procedures}
+
+<<Procedure [[comment_eol]]>>=
+procedure comment_eol(arg)
+ return "\\begcom" || arg || tab(0) || "\\endcom"
+end
+@
+
+\paragraph{The [[quoted_c_string]] procedure:}
+This procedure matches a \CEE/\CPP-style
+string constant which may contain embedded quotes escaped by a backslash
+(\verb|\|) character. We want string constants to be typeset with
+\verb|\verb*| (ala {\tt CWEB}). The result looks like
+\verb*|"a string constant"| which makes counting multiple embedded spaces
+{\em much} easier.
+<<Procedure [[quoted_c_string]]>>=
+procedure quoted_c_string(arg)
+local c, str
+ c := cset(arg)
+ str := tab(upto(c))
+ if \str then while str[-1] == "\\" & (*str < 2 | str[-2] ~== "\\") do
+ str ||:= tab(&pos + 1) || tab(upto(c))
+ else str := ""
+ str ||:= tab(&pos + 1) # Pick up closing quote
+ return "\\verb*\^K" || arg || str || "\^K"
+end
+@ Note the use of ASCII {\tt VT} control characters to bracket the
+\verb|\verb*| environment. Any target implementation which utilizes this
+procedure must include [[write("@literal \\catcode`^^K=3")]] in the \LaTeX\
+special definitions chunk. This is a gross hack, but it's made necessary by
+the fact that a string literal could conceiveably contain {bf all} of the
+printable ASCII characters. We therefore arbitrarily choose a control
+character which we deem unlikely to be found in a string
+literal.\footnote{Incidently, my original choice was NUL, but this proved
+problematic for {\tt vi} users because that editor stripped the NULs away
+when the {\tt .tex} file was edited by hand.}
+
+\subsection{\protect\CEE/\protect\CPP\ code markup\label{c}}
+
+<<cnw.icn>>=
+$define LANG_CPLUSPLUS
+<<Global declarations>>
+<<Procedure [[main]]>>
+<<Procedure [[filter]]>>
+<<Procedure [[copyline]]>>
+<<Procedure [[preTeX]]>>
+<<Procedure [[TeXify]]>>
+
+<<Procedure [[comment_eol]]>>
+<<Procedure [[quoted_c_string]]>>
+@
+<<Keywords>>=
+$ifdef LANG_CPLUSPLUS
+keyword_list := [
+ "asm","auto","break","case","catch","char","class","const",
+ "continue","default","delete","do","double","else","enum","extern",
+ "float","for","friend","goto","if","inline","int","long",
+ "new","operator","private","protected","public","register","return","short",
+ "signed","sizeof","static","struct","switch","template","this","throw",
+ "try","typedef","union","unsigned","virtual","void","volatile","while"
+]
+every key := !keyword_list do translation[key] := "{\\bf{}" || key || "}"
+$endif
+@
+<<Operators>>=
+$ifdef LANG_CPLUSPLUS
+operator_list := [
+ "<\<=",">>=","->","++","--","<\<",">>","<=",">=","==","!=","&&","||",
+ "*=","/=","%=","+=","-=","&=","^=","|=","()","[]","//","/*","*/",
+ "!","%","^","&","*","(",")","-","+","=","{","}","|","~","[","]","<",">",
+ "?","/","'","\""
+]
+translation["<\<="] := "\\protect\\OPASSIGN{\\ll}"
+translation[">>="] := "\\protect\\OPASSIGN{\\gg}"
+translation["->"] := "\^K\\rightharpoonup\^K"
+translation["++"] := "\\protect\\PP"
+translation["--"] := "\\protect\\MM"
+translation["<\<"] := "\^K\\ll\^K"
+translation[">>"] := "\^K\\gg\^K"
+translation["<="] := "\^K\\leq\^K"
+translation[">="] := "\^K\\geq\^K"
+translation["=="] := "\^K\\equiv\^K"
+translation["!="] := "\^K\\neq\^K"
+translation["&&"] := "\^K\\wedge\^K"
+translation["||"] := "\^K\\vee\^K"
+translation["*="] := "\\protect\\OPASSIGN{\\ast}"
+translation["/="] := "\\protect\\OPASSIGN{\\div}"
+translation["%="] := "\\protect\\OPASSIGN{" || preTeX("%") || "}"
+translation["+="] := "\\protect\\OPASSIGN{+}"
+translation["-="] := "\\protect\\OPASSIGN{-}"
+translation["&="] := "\\protect\\OPASSIGN{" || preTeX("&") || "}"
+translation["^="] := "\\protect\\OPASSIGN{\\oplus}"
+translation["|="] := "\\protect\\OPASSIGN{\\mid}"
+translation["()"] := "\^K(\\;)\^K"
+translation["[]"] := "\^K[\\;]\^K"
+translation["//"] := comment_eol
+
+translation["!"] := "\^K\\neg\^K"
+translation["%"] := "\^K" || preTeX("%") || "\^K"
+translation["^"] := "\^K\\oplus\^K"
+translation["&"] := "\^K" || preTeX("&") || "\^K"
+translation["*"] := "\^K\\ast\^K"
+translation["="] := "\^K\\leftarrow\^K"
+translation["{"] := "\\boldmath\^K\\{\^K"
+translation["}"] := "\\boldmath\^K\\}\^K"
+translation["|"] := "\^K\\mid\^K"
+translation["~"] := "\^K\\sim\^K"
+translation["/"] := "\^K\\div\^K"
+translation["'"] := quoted_c_string
+translation["\""] := quoted_c_string
+
+every op := !operator_list do /translation[op] := "\^K" || op || "\^K"
+$endif
+@
+<<Language-dependent \TeX ify chunk>>=
+$ifdef LANG_CPLUSPLUS
+any(cpp_mark): writes(preTeX(tab(0)))
+$endif
+@
+<<Global declarations>>=
+$ifdef LANG_CPLUSPLUS
+global cpp_mark
+$endif
+@
+<<Define initial character sets>>=
+$ifdef LANG_CPLUSPLUS
+cpp_mark := '#'
+$endif
+@
+<<Define the [[begin_token]] character set>>=
+$ifdef LANG_CPLUSPLUS
+begin_token ++:= cpp_mark
+$endif
+@
+<<Emit special \LaTeX\ definitions>>=
+$ifdef LANG_CPLUSPLUS
+write("@literal \\catcode`^^K=3")
+write("@literal \\newcommand{\\MM}{\\kern.5pt\\raisebox{.4ex}" ||
+ "{\^K\\scriptscriptstyle-\\kern-1pt-\^K}\\kern.5pt}")
+write("@literal \\newcommand{\\PP}{\\kern.5pt\\raisebox{.4ex}" ||
+ "{\^K\\scriptscriptstyle+\\kern-1pt+\^K}\\kern.5pt}")
+write("@literal \\newcommand{\\OPASSIGN}[1]{\\raisebox{-.4ex}" ||
+ "{\^K\\stackrel{\\scriptscriptstyle\\,#1}{\\leftarrow}\^K}}")
+$endif
+@
+
+\subsection{\protect\ICON\ code markup\label{icon}}
+
+<<inw.icn>>=
+$define LANG_ICON
+<<Global declarations>>
+<<Procedure [[main]]>>
+<<Procedure [[filter]]>>
+<<Procedure [[copyline]]>>
+<<Procedure [[preTeX]]>>
+<<Procedure [[TeXify]]>>
+
+<<Procedure [[comment_eol]]>>
+<<Procedure [[quoted_c_string]]>>
+<<\ICON\ [[prefixed_keyword_check]] procedure>>
+@
+<<Global declarations>>=
+$ifdef LANG_ICON
+global an_word_list, ss_word_list
+$endif
+@
+<<Keywords>>=
+$ifdef LANG_ICON
+keyword_list := [
+ "by","break","case","create","default","do","else","end",
+ "every","fail","global","if","initial","link","local","next",
+ "not","of","procedure","record","repeat","return","static","suspend",
+ "to","then","while","until"
+]
+an_word_list := [
+ "ascii","clock","collections","cset","current","date","dateline","digits",
+ "error","errornumber","errortext","errorvalue","errout","fail","features",
+ "file","host","input","lcase","letters","level","line","main","null",
+ "output","pos","random","regions","source","storage","subject","time",
+ "trace","ucase","version",
+# Added in Version 8.10
+ "allocated","e","phi","pi","progname",
+# Added by X interface
+ "col","control","interval","ldrag","lpress","lrelease","mdrag","meta",
+ "mpress","mrelease","resize","rdrag","row","rpress","rrelease","shift",
+ "window","x","y"
+]
+ss_word_list := [
+# Translator directives for Version 8.10
+ "define","else","endif","ifdef","ifndef","include","line","undef",
+]
+every key := !keyword_list do translation[key] := "{\\bf{}" || key || "}"
+$endif
+@
+
+<<Operators>>=
+$ifdef LANG_ICON
+operator_list := [
+ "#","~===:=","<\<=:=",">>=:=","~==:=","|||:=","===:=","~===","<=:=",
+ ">=:=","~=:=","++:=","--:=","**:=","||:=","<\<:=","==:=",">>:=",
+ "<\<=",">>=","~==","|||",":=:","<->","===","+:=","-:=","*:=","/:=","%:=",
+ "^:=","<:=","=:=",">:=","@:=","&:=","?:=","()","[]",
+ "<=",">=","~=","++","--","**","||","<\<","==",">>",":=","<-","{","}","|",
+ "+","-","?","~","=","!","@","^","*",".","/","\\","%","<","&","$","'","\""
+]
+
+translation["#"] := comment_eol
+translation["~===:="] := "\\protect\\OPASSIGN{\\not\\equiv}"
+translation["<\<=:="] := "\\protect\\OPASSIGN{\\preceq}"
+translation[">>=:="] := "\\protect\\OPASSIGN{\\succeq}"
+translation["~==:="] := "\\protect\\OPASSIGN{\\not\\approx}"
+translation["|||:="] := "\\protect\\LONGOPASSIGN{[\\:]\\Join}"
+translation["===:="] := "\\protect\\OPASSIGN{\\equiv}"
+translation["~==="] := "\^K\\not\\equiv\^K"
+translation["<=:="] := "\\protect\\OPASSIGN{\\leq}"
+translation[">=:="] := "\\protect\\OPASSIGN{\\geq}"
+translation["~=:="] := "\\protect\\OPASSIGN{\\neq}"
+translation["++:="] := "\\protect\\OPASSIGN{\\uplus}"
+translation["--:="] := "\\protect\\OPASSIGN{\\ni}"
+translation["**:="] := "\\protect\\OPASSIGN{\\in}"
+translation["||:="] := "\\protect\\OPASSIGN{\\Join}"
+translation["<\<:="] := "\\protect\\OPASSIGN{\\prec}"
+translation["==:="] := "\\protect\\OPASSIGN{\\approx}"
+translation[">>:="] := "\\protect\\OPASSIGN{\\succ}"
+
+translation["<\<="] := "\^K\\preceq\^K"
+translation[">>="] := "\^K\\succeq\^K"
+translation["~=="] := "\^K\\not\\approx\^K"
+translation["|||"] := "\\protect\\OPSTACK{[\\:]}{\\Join}"
+translation[":=:"] := "\^K\\leftrightarrow\^K"
+translation["<->"] := "\^K\\Leftrightarrow\^K"
+translation["==="] := "\^K\\equiv\^K"
+translation["+:="] := "\\protect\\OPASSIGN{+}"
+translation["-:="] := "\\protect\\OPASSIGN{-}"
+translation["*:="] := "\\protect\\OPASSIGN{\\ast}"
+translation["/:="] := "\\protect\\OPASSIGN{\\div}"
+translation["%:="] := "\\protect\\OPASSIGN{" || preTeX("%") || "}"
+translation["^:="] := "\\protect\\OPASSIGN{\\uparrow}"
+translation["<:="] := "\\protect\\OPASSIGN{<}"
+translation["=:="] := "\\protect\\OPASSIGN{=}"
+translation[">:="] := "\\protect\\OPASSIGN{>}"
+translation["@:="] := "\\protect\\OPASSIGN{\\partial}"
+translation["&:="] := "\\protect\\OPASSIGN{\\wedge}"
+translation["?:="] := "\\protect\\OPASSIGN{\\wr}"
+translation["()"] := "\^K(\\;)\^K"
+translation["[]"] := "\^K[\\;]\^K"
+translation["<="] := "\^K\\leq\^K"
+translation[">="] := "\^K\\geq\^K"
+translation["~="] := "\^K\\neq\^K"
+translation["++"] := "\^K\\uplus\^K"
+translation["--"] := "\^K\\ni\^K"
+translation["**"] := "\^K\\in\^K"
+translation["||"] := "\^K\\Join\^K"
+translation["<\<"] := "\^K\\prec\^K"
+translation["=="] := "\^K\\approx\^K"
+translation[">>"] := "\^K\\succ\^K"
+translation[":="] := "\^K\\leftarrow\^K"
+translation["<-"] := "\^K\\Leftarrow\^K"
+translation["{"] := "\\boldmath\^K\\{\^K"
+translation["}"] := "\\boldmath\^K\\}\^K"
+translation["|"] := "\^K\\vee\^K"
+translation["?"] := "\^K\\wr\^K"
+translation["~"] := "\^K\\ni\^K"
+translation["!"] := "\^K\\forall\^K"
+translation["^"] := "\^K\\uparrow\^K"
+translation["*"] := "\^K\\ast\^K"
+translation["\\"] := "\^K\\exists\^K"
+translation["%"] := "\^K" || preTeX("%") || "\^K"
+translation["&"] := prefixed_keyword_check
+translation["$"] := prefixed_keyword_check
+translation["'"] := quoted_c_string
+translation["\""] := quoted_c_string
+
+every op := !operator_list do /translation[op] := "\^K" || op || "\^K"
+$endif
+@
+<<\ICON\ [[prefixed_keyword_check]] procedure>>=
+procedure prefixed_keyword_check(arg)
+local keyword_list, keyword, result
+ keyword_list := (arg == "&", an_word_list) | ss_word_list
+ keyword := tab(match(!keyword_list)) | &null
+ if \keyword then result := "{\\bf{}" || preTeX(arg) || keyword || "}"
+ else result := "\^K" || ((arg == "&", "\\wedge") | preTeX(arg)) || "\^K"
+ return result
+end
+@
+<<Emit special \LaTeX\ definitions>>=
+$ifdef LANG_ICON
+write("@literal \\catcode`^^K=3")
+write("@literal \\newcommand{\\LONGOPASSIGN}[1]{\\raisebox{-.4ex}" ||
+ "{\^K\\stackrel{\\scriptscriptstyle\\,#1}{\\longleftarrow}\^K}}")
+write("@literal \\newcommand{\\OPASSIGN}[1]{\\raisebox{-.4ex}" ||
+ "{\^K\\stackrel{\\scriptscriptstyle\\,#1}{\\leftarrow}\^K}}")
+write("@literal \\newcommand{\\OPSTACK}[2]{\\raisebox{-.4ex}" ||
+ "{\^K\\stackrel{\\scriptscriptstyle#1}{#2}\^K}}")
+$endif
+@
+
+\section{Chunks}
+\nowebchunks
+
+\begin{thebibliography}{Mmoo}
+\bibitem[Gris]{griswold}Ralph E. Griswold and Madge T. Griswold. {\em
+The Icon Programming Language}. Prentice-Hall, Englewood Cliffs, New
+Jersey, 1983.
+\bibitem[Oiko]{kostas}Kostas N. Oikonomou. {\em Extending Noweb With
+Some Typesetting}. Unpublished. Included in {\tt contrib} directory
+of the standard {\tt noweb} distribution.
+\end{thebibliography}
+\end{document}
+% Local Variables:
+% outline-regexp: "\\([\\\]\\(sub\\)*sec\\)\\|\\(<[^>]+>>=\\)"
+% End:
diff --git a/web/noweb/contrib/kostas/C++_translation_table b/web/noweb/contrib/kostas/C++_translation_table
new file mode 100644
index 0000000000..1653f51057
--- /dev/null
+++ b/web/noweb/contrib/kostas/C++_translation_table
@@ -0,0 +1,64 @@
+# This file defines translations into \TeX\ code for keywords of C++. It also defines
+# translations for special tokens, such as <=.
+
+# Initialize the translation table to contain nulls.
+translation := table()
+
+# Reserved words.
+translation["asm"] := "{\\ttb{}asm}"
+translation["auto"] := "{\\ttb{}auto}"
+translation["break"] := "{\\ttb{}break}"
+translation["case"] := "{\\ttb{}case}"
+translation["char"] := "{\\ttb{}char}"
+translation["class"] := "{\\ttb{}class}"
+translation["const"] := "{\\ttb{}const}"
+translation["continue"] := "{\\ttb{}continue}"
+translation["default"] := "{\\ttb{}default}"
+translation["delete"] := "{\\ttb{}delete}"
+translation["do"] := "{\\ttb{}do}"
+translation["double"] := "{\\ttb{}double}"
+translation["else"] := "{\\ttb{}else}"
+translation["enum"] := "{\\ttb{}enum}"
+translation["extern"] := "{\\ttb{}extern}"
+translation["float"] := "{\\ttb{}float}"
+translation["for"] := "{\\ttb{}for}"
+translation["friend"] := "{\\ttb{}friend}"
+translation["goto"] := "{\\ttb{}goto}"
+translation["if"] := "{\\ttb{}if}"
+translation["inline"] := "{\\ttb{}inline}"
+translation["int"] := "{\\ttb{}int}"
+translation["long"] := "{\\ttb{}long}"
+translation["new"] := "{\\ttb{}new}"
+translation["operator"] := "{\\ttb{}operator}"
+translation["overload"] := "{\\ttb{}overload}"
+translation["private"] := "{\\ttb{}private}"
+translation["protected"] := "{\\ttb{}protected}"
+translation["public"] := "{\\ttb{}public}"
+translation["register"] := "{\\ttb{}register}"
+translation["return"] := "{\\ttb{}return}"
+translation["short"] := "{\\ttb{}short}"
+translation["sizeof"] := "{\\ttb{}sizeof}"
+translation["static"] := "{\\ttb{}static}"
+translation["struct"] := "{\\ttb{}struct}"
+translation["switch"] := "{\\ttb{}switch}"
+translation["this"] := "{\\ttb{}this}"
+translation["typedef"] := "{\\ttb{}typedef}"
+translation["union"] := "{\\ttb{}union}"
+translation["unsigned"] := "{\\ttb{}unsigned}"
+translation["virtual"] := "{\\ttb{}virtual}"
+translation["void"] := "{\\ttb{}void}"
+translation["while"] := "{\\ttb{}while}"
+
+# Translations for operators.
+translation["{"] := "\\{"
+translation["}"] := "\\}"
+translation["<"] := "\\(<\\)"
+translation[">"] := "\\(>\\)"
+translation["<<"] := "\\(\\ll\\)"
+translation[">>"] := "\\(\\gg\\)"
+translation["!="] := "\\(\\neq\\)"
+translation["&&"] := "\\(\\land\\)"
+translation["||"] := "\\(\\lor\\)"
+translation["<="] := "\\(\\le\\)"
+translation[">="] := "\\(\\ge\\)"
+translation["->"] := "\\(\\to\\)"
diff --git a/web/noweb/contrib/kostas/C_translation_table b/web/noweb/contrib/kostas/C_translation_table
new file mode 100644
index 0000000000..a283eb89ce
--- /dev/null
+++ b/web/noweb/contrib/kostas/C_translation_table
@@ -0,0 +1,55 @@
+# This file defines translations into \TeX\ code for keywords of C. It also defines
+# translations for special tokens, such as <=.
+
+# Initialize the translation table to contain nulls.
+translation := table()
+
+# Reserved words.
+translation["auto"] := "{\\ttb{}auto}"
+translation["break"] := "{\\ttb{}break}"
+translation["case"] := "{\\ttb{}case}"
+translation["char"] := "{\\ttb{}char}"
+translation["continue"] := "{\\ttb{}continue}"
+translation["default"] := "{\\ttb{}default}"
+translation["do"] := "{\\ttb{}do}"
+translation["double"] := "{\\ttb{}double}"
+translation["else"] := "{\\ttb{}else}"
+translation["enum"] := "{\\ttb{}enum}"
+translation["extern"] := "{\\ttb{}extern}"
+translation["float"] := "{\\ttb{}float}"
+translation["for"] := "{\\ttb{}for}"
+translation["goto"] := "{\\ttb{}goto}"
+translation["if"] := "{\\ttb{}if}"
+translation["int"] := "{\\ttb{}int}"
+translation["long"] := "{\\ttb{}long}"
+translation["register"] := "{\\ttb{}register}"
+translation["return"] := "{\\ttb{}return}"
+translation["short"] := "{\\ttb{}short}"
+translation["sizeof"] := "{\\ttb{}sizeof}"
+translation["static"] := "{\\ttb{}static}"
+translation["struct"] := "{\\ttb{}struct}"
+translation["switch"] := "{\\ttb{}switch}"
+translation["typedef"] := "{\\ttb{}typedef}"
+translation["union"] := "{\\ttb{}union}"
+translation["unsigned"] := "{\\ttb{}unsigned}"
+translation["void"] := "{\\ttb{}void}"
+translation["while"] := "{\\ttb{}while}"
+
+# Pre-processor directives
+translation["#define"] := "{#\\ttb{}define}"
+translation["#include"] := "{#\\ttb{}include}"
+
+# Translations for operators.
+translation["{"] := "\\{"
+translation["}"] := "\\}"
+translation["<"] := "\\(<\\)"
+translation[">"] := "\\(>\\)"
+translation["<<"] := "\\(\\ll\\)"
+translation[">>"] := "\\(\\gg\\)"
+translation["!="] := "\\(\\neq\\)"
+translation["&&"] := "\\(\\land\\)"
+translation["||"] := "\\(\\lor\\)"
+translation["<="] := "\\(\\le\\)"
+translation[">="] := "\\(\\ge\\)"
+translation["->"] := "\\(\\to\\)"
+
diff --git a/web/noweb/contrib/kostas/Makefile b/web/noweb/contrib/kostas/Makefile
new file mode 100644
index 0000000000..27bbffad71
--- /dev/null
+++ b/web/noweb/contrib/kostas/Makefile
@@ -0,0 +1,75 @@
+# Only works with Gnu make.
+
+LIB=/opt/noweb/lib
+ICONC=icont
+# This is supposed to be the defns.nw file in the icon directory of the distribution.
+defns=defns.nw
+TANGLE=notangle
+WEAVE=noweave -delay -filter icon.filter -index
+
+.SUFFIXES: .nw .icn .tex .dvi
+
+
+all: C.filter C++.filter icon.filter oot.filter math.filter\
+ autodefs.oot autodefs.math
+
+install:
+ mv *.filter $(LIB)
+ mv autodefs.* $(LIB)
+
+
+# TeX files.
+%.tex : %.nw
+ $(WEAVE) $< > $@
+pp.tex: pp.nw
+ noweave -delay -autodefs icon -filter icon.filter -index pp.nw > pp.tex
+%.dvi : %.tex
+ latex $<
+# Don't delete the intermediate .tex file.
+.PRECIOUS : %.tex
+
+
+# Icon files.
+C.icn: pp.nw C_translation_table
+ $(TANGLE) -R"C" pp.nw > $@
+C++.icn: pp.nw C++_translation_table
+ $(TANGLE) -R"C++" pp.nw > $@
+icon.icn: pp.nw icon_translation_table
+ $(TANGLE) -R"Icon" pp.nw > $@
+oot.icn: pp.nw oot_translation_table
+ $(TANGLE) -R"OOT" pp.nw > $@
+math.icn: pp.nw math_translation_table
+ $(TANGLE) -R"Mathematica" pp.nw > $@
+
+ootdefs.icn: ootdefs.nw
+ $(TANGLE) $< $(defns) > $@
+mathdefs.icn: mathdefs.nw
+ $(TANGLE) $< $(defns) > $@
+
+
+# Executables: filters.
+%.filter : %.icn
+ $(ICONC) -o $@ $<
+
+# Executables: autodefs.
+autodefs.oot: ootdefs.icn
+ $(ICONC) -o autodefs.oot ootdefs.icn
+autodefs.math: mathdefs.icn
+ $(ICONC) -o autodefs.math mathdefs.icn
+
+
+# Cleaning: remove all files that can be recreated from noweb sources.
+nowebs := $(wildcard *.nw)
+rem := $(nowebs:.nw=.icn)
+rem := $(rem) $(nowebs:.nw=.tex)
+rem := $(rem) $(nowebs:.nw=.log)
+rem := $(rem) $(nowebs:.nw=.aux)
+rem := $(rem) $(nowebs:.nw=.toc)
+
+
+# Also remove the Icon files for the filters.
+clean:
+ -rm -f $(rem) C.icn C++.icn icon.icn oot.icn math.icn *.filter autodefs.*
+
+
+
diff --git a/web/noweb/contrib/kostas/Makefile.gnu b/web/noweb/contrib/kostas/Makefile.gnu
new file mode 100644
index 0000000000..27bbffad71
--- /dev/null
+++ b/web/noweb/contrib/kostas/Makefile.gnu
@@ -0,0 +1,75 @@
+# Only works with Gnu make.
+
+LIB=/opt/noweb/lib
+ICONC=icont
+# This is supposed to be the defns.nw file in the icon directory of the distribution.
+defns=defns.nw
+TANGLE=notangle
+WEAVE=noweave -delay -filter icon.filter -index
+
+.SUFFIXES: .nw .icn .tex .dvi
+
+
+all: C.filter C++.filter icon.filter oot.filter math.filter\
+ autodefs.oot autodefs.math
+
+install:
+ mv *.filter $(LIB)
+ mv autodefs.* $(LIB)
+
+
+# TeX files.
+%.tex : %.nw
+ $(WEAVE) $< > $@
+pp.tex: pp.nw
+ noweave -delay -autodefs icon -filter icon.filter -index pp.nw > pp.tex
+%.dvi : %.tex
+ latex $<
+# Don't delete the intermediate .tex file.
+.PRECIOUS : %.tex
+
+
+# Icon files.
+C.icn: pp.nw C_translation_table
+ $(TANGLE) -R"C" pp.nw > $@
+C++.icn: pp.nw C++_translation_table
+ $(TANGLE) -R"C++" pp.nw > $@
+icon.icn: pp.nw icon_translation_table
+ $(TANGLE) -R"Icon" pp.nw > $@
+oot.icn: pp.nw oot_translation_table
+ $(TANGLE) -R"OOT" pp.nw > $@
+math.icn: pp.nw math_translation_table
+ $(TANGLE) -R"Mathematica" pp.nw > $@
+
+ootdefs.icn: ootdefs.nw
+ $(TANGLE) $< $(defns) > $@
+mathdefs.icn: mathdefs.nw
+ $(TANGLE) $< $(defns) > $@
+
+
+# Executables: filters.
+%.filter : %.icn
+ $(ICONC) -o $@ $<
+
+# Executables: autodefs.
+autodefs.oot: ootdefs.icn
+ $(ICONC) -o autodefs.oot ootdefs.icn
+autodefs.math: mathdefs.icn
+ $(ICONC) -o autodefs.math mathdefs.icn
+
+
+# Cleaning: remove all files that can be recreated from noweb sources.
+nowebs := $(wildcard *.nw)
+rem := $(nowebs:.nw=.icn)
+rem := $(rem) $(nowebs:.nw=.tex)
+rem := $(rem) $(nowebs:.nw=.log)
+rem := $(rem) $(nowebs:.nw=.aux)
+rem := $(rem) $(nowebs:.nw=.toc)
+
+
+# Also remove the Icon files for the filters.
+clean:
+ -rm -f $(rem) C.icn C++.icn icon.icn oot.icn math.icn *.filter autodefs.*
+
+
+
diff --git a/web/noweb/contrib/kostas/Makefile.make b/web/noweb/contrib/kostas/Makefile.make
new file mode 100644
index 0000000000..27bbffad71
--- /dev/null
+++ b/web/noweb/contrib/kostas/Makefile.make
@@ -0,0 +1,75 @@
+# Only works with Gnu make.
+
+LIB=/opt/noweb/lib
+ICONC=icont
+# This is supposed to be the defns.nw file in the icon directory of the distribution.
+defns=defns.nw
+TANGLE=notangle
+WEAVE=noweave -delay -filter icon.filter -index
+
+.SUFFIXES: .nw .icn .tex .dvi
+
+
+all: C.filter C++.filter icon.filter oot.filter math.filter\
+ autodefs.oot autodefs.math
+
+install:
+ mv *.filter $(LIB)
+ mv autodefs.* $(LIB)
+
+
+# TeX files.
+%.tex : %.nw
+ $(WEAVE) $< > $@
+pp.tex: pp.nw
+ noweave -delay -autodefs icon -filter icon.filter -index pp.nw > pp.tex
+%.dvi : %.tex
+ latex $<
+# Don't delete the intermediate .tex file.
+.PRECIOUS : %.tex
+
+
+# Icon files.
+C.icn: pp.nw C_translation_table
+ $(TANGLE) -R"C" pp.nw > $@
+C++.icn: pp.nw C++_translation_table
+ $(TANGLE) -R"C++" pp.nw > $@
+icon.icn: pp.nw icon_translation_table
+ $(TANGLE) -R"Icon" pp.nw > $@
+oot.icn: pp.nw oot_translation_table
+ $(TANGLE) -R"OOT" pp.nw > $@
+math.icn: pp.nw math_translation_table
+ $(TANGLE) -R"Mathematica" pp.nw > $@
+
+ootdefs.icn: ootdefs.nw
+ $(TANGLE) $< $(defns) > $@
+mathdefs.icn: mathdefs.nw
+ $(TANGLE) $< $(defns) > $@
+
+
+# Executables: filters.
+%.filter : %.icn
+ $(ICONC) -o $@ $<
+
+# Executables: autodefs.
+autodefs.oot: ootdefs.icn
+ $(ICONC) -o autodefs.oot ootdefs.icn
+autodefs.math: mathdefs.icn
+ $(ICONC) -o autodefs.math mathdefs.icn
+
+
+# Cleaning: remove all files that can be recreated from noweb sources.
+nowebs := $(wildcard *.nw)
+rem := $(nowebs:.nw=.icn)
+rem := $(rem) $(nowebs:.nw=.tex)
+rem := $(rem) $(nowebs:.nw=.log)
+rem := $(rem) $(nowebs:.nw=.aux)
+rem := $(rem) $(nowebs:.nw=.toc)
+
+
+# Also remove the Icon files for the filters.
+clean:
+ -rm -f $(rem) C.icn C++.icn icon.icn oot.icn math.icn *.filter autodefs.*
+
+
+
diff --git a/web/noweb/contrib/kostas/README b/web/noweb/contrib/kostas/README
new file mode 100644
index 0000000000..d26e9c78b5
--- /dev/null
+++ b/web/noweb/contrib/kostas/README
@@ -0,0 +1,14 @@
+This directory contains noweb programs (written in Icon) that extend the basic noweb
+with some pretty-printing capabilities. By this we don't mean formatting, but just
+printing keywords in bold, comments in roman, mathematical operators using special
+symbols, etc. The pretty-printers work as noweb filters.
+
+There are 4 pretty-printers here, all generated from the same file, pp.nw. The
+pretty-printers are for C/C++, Icon, Turing (OOT), and Mathematica. pp.nw contains
+full documentation, and is written in a way that adding another language should be
+easy. (Let me know if you find it isn't so.)
+
+The pretty-printers work only with LaTeX2e, and you must have the cmttb10 font.
+
+There are also filters that write noweb index entries (autodefs) for Turing and
+Mathematica.
diff --git a/web/noweb/contrib/kostas/WHATS_NEW b/web/noweb/contrib/kostas/WHATS_NEW
new file mode 100644
index 0000000000..e08b859d56
--- /dev/null
+++ b/web/noweb/contrib/kostas/WHATS_NEW
@@ -0,0 +1,46 @@
+WHAT'S NEW:
+
+1) Indexing: the pretty-printers now fix up some of the deficiencies of noweb's
+indexing mechanism.
+
+"finduses" generates spurious references to identifiers when it sees a string
+matching an identifier inside a comment or string literal. For example, try
+
+ noweave -autodefs c -index t.nw > t.tex
+ latex t
+ latex t
+
+on the file
+
+<<Section A>>=
+int num;
+<<Section B>>=
+/* num is the number ... */
+<<Section C>>=
+printf("num is the number of things\n");
+@
+\nowebindex
+
+noweave (finduses) will generate a reference to num in each of sections B and C.
+
+Now try
+
+ noweave -autodefs c -filter C.filter -index t.nw > t.tex
+ latex t
+ latex t
+
+(NOTE: -filter first, -index next!)
+You get a pretty-printed version without these references. All pretty-printer
+filters suppress these spurious references.
+
+
+2) A bug in pretty-printing operators has been fixed. For example, if ``=='' is
+meant to be typeset as ``\equiv'', say, but ``==='' is supposed to be left alone, the
+old version would typeset ``==='' as ``\equiv=''.
+
+
+3) For those who know about OOT: the autodefs.oot now does not index identifiers
+declared in subprograms. This reduces clutter. However, it has the capability to
+index such identifiers selectively if the user supplies an argument to the filter,
+the name of a file listing subprogram names one per line. (See ootdefs.nw for
+details.)
diff --git a/web/noweb/contrib/kostas/defns.nw b/web/noweb/contrib/kostas/defns.nw
new file mode 100644
index 0000000000..0d3326c446
--- /dev/null
+++ b/web/noweb/contrib/kostas/defns.nw
@@ -0,0 +1,33 @@
+<<*>>=
+procedure go()
+ local line
+ while line := read() do {
+ apply(prepass, line)
+ write(line)
+ apply(postpass, line)
+ }
+end
+
+procedure apply(pass, line)
+ line ? (="@" & pass(tab(upto(' ')|0), if =" " then tab(0) else &null))
+end
+@
+[[indextext]] is a hack to introduce suitable ``[[@index nl]],'' but it
+messes up the line counts!
+<<*>>=
+procedure writedefn(defn)
+ static indextext
+ initial indextext := ""
+ if /defn then
+ *indextext > 0 & <<flush index>>
+ else {
+ if *indextext + *defn > 65 then <<flush index>>
+ write("@index defn ", defn)
+ indextext ||:= " " || defn
+ }
+ return
+end
+<<flush index>>=
+{ # write("@index nl") # don't!
+ indextext := ""
+}
diff --git a/web/noweb/contrib/kostas/email b/web/noweb/contrib/kostas/email
new file mode 100644
index 0000000000..fec98b1e32
--- /dev/null
+++ b/web/noweb/contrib/kostas/email
@@ -0,0 +1 @@
+ko@surya.ho.att.com
diff --git a/web/noweb/contrib/kostas/icon_translation_table b/web/noweb/contrib/kostas/icon_translation_table
new file mode 100644
index 0000000000..d3b8099666
--- /dev/null
+++ b/web/noweb/contrib/kostas/icon_translation_table
@@ -0,0 +1,140 @@
+# This file defines translations into \TeX\ code for reserved words, and keywords of Icon.
+# It also defines translations for special tokens, such as <=.
+
+
+# Initialize the translation table to contain nulls.
+translation := table()
+
+
+# Reserved words.
+translation["by"] := "{\\ttb{}by}"
+translation["break"] := "{\\ttb{}break}"
+translation["case"] := "{\\ttb{}case}"
+translation["create"] := "{\\ttb{}create}"
+translation["default"] := "{\\ttb{}default}"
+translation["do"] := "{\\ttb{}do}"
+translation["else"] := "{\\ttb{}else}"
+translation["end"] := "{\\ttb{}end}"
+translation["every"] := "{\\ttb{}every}"
+translation["global"] := "{\\ttb{}global}"
+translation["fail"] := "{\\ttb{}fail}"
+translation["if"] := "{\\ttb{}if}"
+translation["initial"] := "{\\ttb{}initial}"
+translation["link"] := "{\\ttb{}link}"
+translation["local"] := "{\\ttb{}local}"
+translation["next"] := "{\\ttb{}next}"
+translation["not"] := "{\\ttb{}not}"
+translation["of"] := "{\\ttb{}of}"
+translation["procedure"] := "{\\ttb{}procedure}"
+translation["record"] := "{\\ttb{}record}"
+translation["repeat"] := "{\\ttb{}repeat}"
+translation["return"] := "{\\ttb{}return}"
+translation["static"] := "{\\ttb{}static}"
+translation["suspend"] := "{\\ttb{}suspend}"
+translation["to"] := "{\\ttb{}to}"
+translation["then"] := "{\\ttb{}then}"
+translation["while"] := "{\\ttb{}while}"
+translation["until"] := "{\\ttb{}until}"
+
+# Icon keywords.
+translation["&ascii"] := "{\\ttb{}&ascii}"
+translation["&clock"] := "{\\ttb{}\&clock}"
+translation["&collections"] := "{\\ttb{}\&collections}"
+translation["&cset"] := "{\\ttb{}\&cset}"
+translation["&current"] := "{\\ttb{}\&current}"
+translation["&date"] := "{\\ttb{}\&date}"
+translation["&dateline"] := "{\\ttb{}\&dateline}"
+translation["&digits"] := "{\\ttb{}\&digits}"
+translation["&error"] := "{\\ttb{}\&error}"
+translation["&errornumber"] := "{\\ttb{}\&errornumber}"
+translation["&errortext"] := "{\\ttb{}\&errortext}"
+translation["&errorvalue"] := "{\\ttb{}\&errorvalue}"
+translation["&errout"] := "{\\ttb{}\&errout}"
+translation["&fail"] := "{\\ttb{}\&fail}"
+translation["&features"] := "{\\ttb{}\&features}"
+translation["&file"] := "{\\ttb{}\&file}"
+translation["&host"] := "{\\ttb{}\&host}"
+translation["&input"] := "{\\ttb{}\&input}"
+translation["&lcase"] := "{\\ttb{}\&lcase}"
+translation["&letters"] := "{\\ttb{}\&letters}"
+translation["&level"] := "{\\ttb{}\&level}"
+translation["&line"] := "{\\ttb{}\&line}"
+translation["&main"] := "{\\ttb{}\&main}"
+translation["&null"] := "{\\ttb{}\&null}"
+translation["&output"] := "{\\ttb{}\&output}"
+translation["&pos"] := "{\\ttb{}\&pos}"
+translation["&random"] := "{\\ttb{}\&random}"
+translation["&regions"] := "{\\ttb{}\&regions}"
+translation["&source"] := "{\\ttb{}\&source}"
+translation["&storage"] := "{\\ttb{}\&storage}"
+translation["&subject"] := "{\\ttb{}\&subject}"
+translation["&time"] := "{\\ttb{}\&time}"
+translation["&trace"] := "{\\ttb{}\&trace}"
+translation["&ucase"] := "{\\ttb{}\&ucase}"
+translation["&version"] := "{\\ttb{}\&version}"
+
+# Added in Version 8.10.
+translation["&allocated"] := "{\\ttb{}\&allocated}"
+translation["&e"] := "{\\ttb{}\&e}"
+translation["&phi"] := "{\\ttb{}\&phi}"
+translation["&pi"] := "{\\ttb{}\&pi}"
+translation["&progname"] := "{\\ttb{}\&progname}"
+
+# Added by the X interface.
+#translation["&col"] := "{\\ttb{}\&col}"
+#translation["&control"] := "{\\ttb{}\&control}"
+#translation["&interval"] := "{\\ttb{}\&interval}"
+#translation["&ldrag"] := "{\\ttb{}\&ldrag}"
+#translation["&lpress"] := "{\\ttb{}\&lpress}"
+#translation["&lrelease"] := "{\\ttb{}\&lrelease}"
+#translation["&mdrag"] := "{\\ttb{}\&mdrag}"
+#translation["&meta"] := "{\\ttb{}\&meta}"
+#translation["&mpress"] := "{\\ttb{}\&mpress}"
+#translation["&mrelease"] := "{\\ttb{}\&mrelease}"
+#translation["&resize"] := "{\\ttb{}\&resize}"
+#translation["&rdrag"] := "{\\ttb{}\&rdrag}"
+#translation["&row"] := "{\\ttb{}\&row}"
+#translation["&rpress"] := "{\\ttb{}\&rpress}"
+#translation["&rrelease"] := "{\\ttb{}\&rrelease}"
+#translation["&shift"] := "{\\ttb{}\&shift}"
+#translation["&window"] := "{\\ttb{}\&window}"
+#translation["&x"] := "{\\ttb{}\&x}"
+#translation["&y"] := "{\\ttb{}\&y}"
+
+
+# Translator directives (V8.10).
+translation["$include"] := "{\\ttb{}\\$include}"
+translation["$line"] := "{\\ttb\\$line}"
+translation["$define"] := "{\\ttb\\$define}"
+translation["$undef"] := "{\\ttb\\$undef}"
+translation["$ifdef"] := "{\\ttb\\$ifdef}"
+translation["$ifndef"] := "{\\ttb\\$ifndef}"
+translation["$else"] := "{\\ttb\\$else}"
+translation["$endif"] := "{\\ttb\\$endif}"
+
+
+# Translations for operators, and other good stuff.
+translation["{"] := "\\{"
+translation["}"] := "\\}"
+translation["\\"] := "\\verb|\\|"
+translation["<"] := "\\(<\\)"
+translation[">"] := "\\(>\\)"
+translation["<="] := "\\(\\le\\)"
+translation[">="] := "\\(\\ge\\)"
+translation["~="] := "\\(\\neq\\)"
+translation["++"] := "\\(\\cup\\)"
+translation["**"] := "\\(\\cap\\)"
+translation["--"] := "\\(\\setminus\\)"
+translation["&"] := "\\(\\land\\)" # Conjunction
+translation["|"] := "\\(\\lor\\)" # Alternation
+translation[">>"] := "\\(\\succ\\)"
+translation["<<"] := "\\(\\prec\\)"
+translation["||"] := "\\(\\Vert\\)"
+translation[">>="] := "\\(\\succeq\\)"
+translation["<<="] := "\\(\\preceq\\)"
+#translation["=="] := ?
+#translation["~=="] := ?
+translation["==="] := "\\(\\equiv\\)"
+translation["~==="] := "\\(\\not\\equiv\\)"
+translation[":=:"] := "\\(\\leftrightarrow\\)"
+translation["<->"] := "\\(\\leftrightarrow\\)"
diff --git a/web/noweb/contrib/kostas/math_translation_table b/web/noweb/contrib/kostas/math_translation_table
new file mode 100644
index 0000000000..cc57c4e995
--- /dev/null
+++ b/web/noweb/contrib/kostas/math_translation_table
@@ -0,0 +1,68 @@
+# This file defines translations into \TeX\ code for some of the most common Mathematica
+# keywords. Not all of them, because there are too many.
+# It also defines translations for special tokens, such as <=.
+
+
+# Initialize the translation table to contain nulls.
+translation := table()
+
+# Keywords.
+translation["Abort"] := "{\\ttb{}Abort}"
+translation["And"] := "{\\ttb{}And}"
+translation["Append"] := "{\\ttb{}Append}"
+translation["AppendTo"] := "{\\ttb{}AppendTo}"
+translation["Apply"] := "{\\ttb{}Apply}"
+translation["Array"] := "{\\ttb{}Array}"
+translation["Assert"] := "{\\ttb{}Assert}" # This is mine.
+translation["Begin"] := "{\\ttb{}Begin}"
+translation["BeginPackage"] := "{\\ttb{}BeginPackage}"
+translation["Block"] := "{\\ttb{}Block}"
+translation["Break"] := "{\\ttb{}Break}"
+translation["Chop"] := "{\\ttb{}Chop}"
+translation["Continue"] := "{\\ttb{}Continue}"
+translation["Do"] := "{\\ttb{}Do}"
+translation["End"] := "{\\ttb{}End}"
+translation["EndPackage"] := "{\\ttb{}EndPackage}"
+# My addition:
+translation["ExitWhen"] := "{\\ttb{}ExitWhen}"
+translation["False"] := "{\\ttb{}False}"
+translation["For"] := "{\\ttb{}For}"
+translation["Function"] := "{\\ttb{}Function}"
+translation["If"] := "{\\ttb{}If}"
+translation["Join"] := "{\\ttb{}Join}"
+translation["Length"] := "{\\ttb{}Length}"
+# My addition:
+translation["Loop"] := "{\\ttb{}Loop}"
+translation["Map"] := "{\\ttb{}Map}"
+translation["Module"] := "{\\ttb{}Module}"
+translation["Needs"] := "{\\ttb{}Needs}"
+translation["Not"] := "{\\ttb{}Not}"
+translation["Part"] := "{\\ttb{}Part}"
+translation["Prepend"] := "{\\ttb{}Prepend}"
+translation["Print"] := "{\\ttb{}Print}"
+translation["Return"] := "{\\ttb{}Return}"
+translation["Scan"] := "{\\ttb{}Scan}"
+translation["Switch"] := "{\\ttb{}Switch}"
+translation["Table"] := "{\\ttb{}Table}"
+translation["Take"] := "{\\ttb{}Take}"
+translation["True"] := "{\\ttb{}True}"
+translation["Union"] := "{\\ttb{}Union}"
+translation["Which"] := "{\\ttb{}Which}"
+translation["While"] := "{\\ttb{}While}"
+
+
+# Translations for operators, etc.
+translation["{"] := "\\{"
+translation["}"] := "\\}"
+translation["<"] := "\\(<\\)"
+translation[">"] := "\\(>\\)"
+translation["!="] := "\\(\\neq\\)"
+translation["=="] := "\\(\\equiv\\)"
+translation["<="] := "\\(\\le\\)"
+translation[">="] := "\\(\\ge\\)"
+translation["->"] := "\\(\\rightarrow\\)"
+translation["&&"] := "\\(\\land\\)"
+translation["||"] := "\\(\\lor\\)"
+translation["**"] := "\\(\\otimes\\)"
+translation["<>"] := "\\(\\bowtie\\)"
+
diff --git a/web/noweb/contrib/kostas/mathdefs.nw b/web/noweb/contrib/kostas/mathdefs.nw
new file mode 100644
index 0000000000..e77f7d3845
--- /dev/null
+++ b/web/noweb/contrib/kostas/mathdefs.nw
@@ -0,0 +1,24 @@
+\section{Finding \textsl{Mathematica} definitions}
+
+This will simply recognize definitions made with ``:=''.
+<<*>>=
+procedure main(args)
+ go()
+end
+<<*>>=
+procedure postpass(name, arg)
+ static kind, id
+ initial {kind := "bogus"; id := &letters ++ &digits}
+ case name of {
+ "begin" : arg ? kind := tab(upto(' ')|0)
+ "text" : if kind == "code" then
+ arg ? if s := tab(find(":=")) then
+ {s ? {tab(many(' ')); writedefn(tab(many(id)))}
+ }
+ }
+ return
+end
+
+procedure prepass(name, arg)
+ if name == "end" then writedefn(&null) # force newline
+end
diff --git a/web/noweb/contrib/kostas/oot_translation_table b/web/noweb/contrib/kostas/oot_translation_table
new file mode 100644
index 0000000000..0ca9078412
--- /dev/null
+++ b/web/noweb/contrib/kostas/oot_translation_table
@@ -0,0 +1,137 @@
+# This file defines translations into \TeX\ code for keywords of OOT (Object-Oriented
+# Turing). It also defines translations for special tokens, such as <=.
+
+# Initialize the translation table to contain nulls.
+translation := table()
+
+# Reserved words.
+translation["addressint"] := "{\\ttb{}addressint}"
+translation["all"] := "{\\ttb{}all}"
+translation["and"] := "\\(\\land\\)"
+translation["anyclass"] := "{\\ttb{}anyclass}"
+translation["array"] := "{\\ttb{}array}"
+translation["assert"] := "{\\ttb{}assert}"
+translation["begin"] := "{\\ttb{}begin}"
+translation["bind"] := "{\\ttb{}bind}"
+translation["body"] := "{\\ttb{}body}"
+translation["boolean"] := "{\\ttb{}boolean}"
+translation["by"] := "{\\ttb{}by}"
+translation["case"] := "{\\ttb{}case}"
+translation["char"] := "{\\ttb{}char}"
+translation["cheat"] := "{\\ttb{}cheat}"
+translation["checked"] := "{\\ttb{}checked}"
+translation["class"] := "{\\ttb{}class}"
+translation["close"] := "{\\ttb{}close}"
+translation["collection"] := "{\\ttb{}collection}"
+translation["const"] := "{\\ttb{}const}"
+translation["decreasing"] := "{\\ttb{}decreasing}"
+translation["deferred"] := "{\\ttb{}deferred}"
+translation["div"] := "{\\ttb{}div}"
+translation["else"] := "{\\ttb{}else}"
+translation["elsif"] := "{\\ttb{}elsif}"
+translation["end"] := "{\\ttb{}end}"
+translation["enum"] := "{\\ttb{}enum}"
+translation["exit"] := "{\\ttb{}exit}"
+translation["export"] := "{\\ttb{}export}"
+translation["external"] := "{\\ttb{}external}"
+translation["false"] := "{\\ttb{}false}"
+translation["flexible"] := "{\\ttb{}flexible}"
+translation["for"] := "{\\ttb{}for}"
+translation["fork"] := "{\\ttb{}fork}"
+translation["forward"] := "{\\ttb{}forward}"
+translation["free"] := "{\\ttb{}free}"
+translation["function"] := "{\\ttb{}function}"
+translation["get"] := "{\\ttb{}get}"
+translation["if"] := "{\\ttb{}if}"
+translation["implement"] := "{\\ttb{}implement}"
+translation["import"] := "{\\ttb{}import}"
+translation["in"] := "\\(\\in\\)"
+translation["include"] := "{\\ttb{}include}"
+translation["inherit"] := "{\\ttb{}inherit}"
+translation["init"] := "{\\ttb{}init}"
+translation["int"] := "{\\ttb{}int}"
+translation["int1"] := "{\\ttb{}int1}"
+translation["int2"] := "{\\ttb{}int2}"
+translation["int4"] := "{\\ttb{}int4}"
+translation["invariant"] := "{\\ttb{}invariant}"
+translation["label"] := "{\\ttb{}label}"
+translation["loop"] := "{\\ttb{}loop}"
+translation["mod"] := "{\\ttb{}mod}"
+translation["module"] := "{\\ttb{}module}"
+translation["monitor"] := "{\\ttb{}monitor}"
+translation["nat"] := "{\\ttb{}nat}"
+translation["nat1"] := "{\\ttb{}nat1}"
+translation["nat2"] := "{\\ttb{}nat2}"
+translation["nat4"] := "{\\ttb{}nat4}"
+translation["new"] := "{\\ttb{}new}"
+translation["nil"] := "{\\ttb{}nil}"
+translation["not"] := "\\(\\neg\\)\\kern-0.3em"
+translation["of"] := "{\\ttb{}of}"
+translation["opaque"] := "{\\ttb{}opaque}"
+translation["open"] := "{\\ttb{}open}"
+translation["or"] := "\\(\\lor\\)"
+translation["pause"] := "{\\ttb{}pause}"
+translation["pervasive"] := "{\\ttb{}pervasive}"
+translation["pointer"] := "{\\ttb{}pointer}"
+translation["post"] := "{\\ttb{}post}"
+translation["pre"] := "{\\ttb{}pre}"
+translation["procedure"] := "{\\ttb{}procedure}"
+translation["process"] := "{\\ttb{}process}"
+translation["put"] := "{\\ttb{}put}"
+translation["quit"] := "{\\ttb{}quit}"
+translation["read"] := "{\\ttb{}read}"
+translation["real"] := "{\\ttb{}real}"
+translation["real4"] := "{\\ttb{}real4}"
+translation["real8"] := "{\\ttb{}real8}"
+translation["record"] := "{\\ttb{}record}"
+translation["register"] := "{\\ttb{}register}"
+translation["result"] := "{\\ttb{}result}"
+translation["return"] := "{\\ttb{}return}"
+translation["seek"] := "{\\ttb{}seek}"
+translation["set"] := "{\\ttb{}set}"
+#translation["shl"] := "\\(\\triangleleft\\)"
+translation["shl"] := "{\\ttb{}shl}"
+#translation["shr"] := "\\(\\triangleright\\)"
+translation["shr"] := "{\\ttb{}shr}"
+translation["signal"] := "{\\ttb{}signal}"
+translation["skip"] := "{\\ttb{}skip}"
+translation["string"] := "{\\ttb{}string}"
+translation["tag"] := "{\\ttb{}tag}"
+translation["tell"] := "{\\ttb{}tell}"
+translation["then"] := "{\\ttb{}then}"
+translation["to"] := "{\\ttb{}to}"
+translation["true"] := "{\\ttb{}true}"
+translation["type"] := "{\\ttb{}type}"
+translation["unchecked"] := "{\\ttb{}unchecked}"
+translation["union"] := "{\\ttb{}union}"
+translation["unit"] := "{\\ttb{}unit}"
+translation["unqualified"] := "{\\ttb{}unqualified}"
+translation["var"] := "{\\ttb{}var}"
+translation["wait"] := "{\\ttb{}wait}"
+translation["when"] := "{\\ttb{}when}"
+translation["write"] := "{\\ttb{}write}"
+translation["xor"] := "\\(\\oplus\\)"
+
+# Translations for operators.
+translation["<"] := "\\(<\\)"
+translation[">"] := "\\(>\\)"
+translation["<="] := "\\(\\le\\)"
+translation[">="] := "\\(\\ge\\)"
+translation["=>"] := "\\(\\Rightarrow\\)"
+translation["**"] := "\\^{}"
+translation["~="] := "\\(\\neq\\)"
+translation["->"] := "\\(\\triangleright\\)"
+
+
+# Pre-processor directives
+translation["#else"] := "{#\\ttb{}else}"
+translation["#elsif"] := "{#\\ttb{}elsif}"
+translation["#end"] := "{#\\ttb{}end}"
+translation["#if"] := "{#\\ttb{}if}"
+translation["#macro"] := "{#\\ttb{}macro}"
+
+
+# Turing Plus
+#translation["child"] := "{\\ttb{}child}"
+#translation["stub"] := "{\\ttb{}stub}"
+
diff --git a/web/noweb/contrib/kostas/ootdefs.nw b/web/noweb/contrib/kostas/ootdefs.nw
new file mode 100644
index 0000000000..dcbf8aedea
--- /dev/null
+++ b/web/noweb/contrib/kostas/ootdefs.nw
@@ -0,0 +1,198 @@
+% -*-lang : icon-*-
+
+\documentclass [11pt] {article}
+\usepackage {noweb}
+\usepackage {fullpage}
+\pagestyle {noweb}
+
+\title {Automatic \texttt{noweb} Indexing for OOT Identifiers}
+\author{Kostas N. Oikonomou \\ \textsf{ko@surya.ho.att.com}}
+
+\begin {document}
+\maketitle
+
+@ This code builds a [[noweb]] filter which produces automatic index entries for OOT
+identifiers. It is used with the generic file [[icon/defns.nw]]. (The Makefile takes
+care of this.)\\
+\textsc{Note}: There are some good examples here of how someone used to programming
+in Turing can screw up in Icon. They have to do with assignment (Icon may succeed or
+fail), and with booleans.
+<<*>>=
+<<Finding OOT definitions>>
+<<Other routines needed by [[defns.nw]]>>
+@
+
+
+@
+\section {Finding OOT definitions}
+
+[[begin_decl]] is a set of reserved words that signal the beginning of a declaration.
+Ideally, after encountering a token in [[begin_decl]], we would find the identifier
+and write an index entry. However, some declarations are tricky:
+\begin {enumerate}
+ \item {\ttb{}deferred procedure} P
+ \item {\ttb{}const pervasive} k := 3
+ \item {\ttb{}external} \texttt{"}f\texttt{"} {\ttb{}function} F
+ \item {\ttb{}var} x, y, z : {\ttb{}real}
+ \item {\ttb{}import} a, b, {\ttb{}var} u \% No declarations here!
+ \item We normally don't index identifiers declared inside procedures or functions.
+ This can be changed, if desired, by making [[ie_list]] empty. However, it is
+ also possible to invoke the filter with an argument \texttt{\itshape filename},
+ where \texttt{\itshape filename\/} contains subprogram names one per line. Then
+ the local declarations in these subprograms {\em will\/} be indexed\footnote {For
+ this to work, invoke [[noweave]] as \texttt{noweave -autodefs "oot {\itshape
+ filename\/}" -filter oot.filter -index}}.
+\end {enumerate}
+Handling these cases makes life a bit more difficult. We do it by setting up various
+categories of tokens (Icon lists).
+@
+
+
+<<Set up token categories>>=
+id_chars := &letters ++ &digits ++ '_'
+begin_decl := ["class", "const", "function", "module", "monitor", "procedure",
+ "process", "type", "var"]
+qualifier1 := ["deferred", "external", "forward"] # Cases 1, 3
+qualifier2 := ["pervasive"] # Case 2
+ie_list := ["import", "export"]
+begin_subprogram := ["procedure", "function"]
+@
+
+@
+
+
+@
+<<Finding OOT definitions>>=
+procedure postpass(name, arg)
+ static kind, id_chars, begin_decl, qualifier1, qualifier2, ie_list,
+ begin_subprogram, in_ie_list, in_subprogram, sub_name
+ local token
+ initial {
+ <<Set up token categories>>
+ <<Initialize static variables>>
+ }
+ if name == "begin" then
+ arg ? kind := tab(upto(' ')|0)
+ else
+ if name == "text" & kind == "code" then
+ arg ? {<<Go for the identifiers>>}
+ return
+end
+@
+
+@
+<<Initialize static variables>>=
+kind := "bogus"
+in_ie_list := &null
+in_subprogram := &null
+@
+
+@
+<<Go for the identifiers>>=
+tab(many(' '))
+token := tab(many(id_chars)) # May be null.
+if /in_ie_list & /in_subprogram then # ``/'' is Turings' \textbf{not}.
+ case token of {
+ !ie_list : in_ie_list := "true"
+ !qualifier1 : {
+ <<If \textbf{external}, skip the quoted string>>
+ <<Find the identifiers and write index entries>>}
+ !begin_subprogram : <<Index [[sub_name]] and remember it>>
+ !begin_decl : {
+ tab(match(!qualifier2)); tab(many(' ')) # Case 2
+ <<Find the identifiers and write index entries>>}
+ }
+else {
+ <<Check for end of \textbf{import/export} list>>
+ <<Check for end of subprogram>>}
+@
+
+
+@ This is case 3. A quoted string may or may not be there!
+<<If \textbf{external}, skip the quoted string>>=
+if token == "external" then {
+ tab(many(' '))
+ # Icon note: the next expression fails if one of the sub-expressions fails.
+ tab(match('"')) & tab(many(id_chars)) & tab(match('"'))
+ tab(many(' '))
+}
+@
+
+@ This is case 6. [[sub_name]] is the subprogram's identifier.
+<<Index [[sub_name]] and remember it>>=
+{tab(many(' '))
+ sub_name := tab(many(id_chars))
+ writedefn(sub_name) # Defined in \texttt{defns.nw}
+ # \texttt{sub\_name ~== !subs\_to\_index} doesn't work. Why does it work in \texttt{case}?
+ if not member(set(subs_to_index), sub_name) then in_subprogram:= "true"}
+@
+
+@ This is case 4.
+Multiple identifiers can occur only in \textbf{var} or \textbf{const} declarations.
+<<Find the identifiers and write index entries>>=
+repeat {
+ writedefn(tab(many(id_chars))) # Defined in \texttt{defns.nw}
+ tab(many(' '))
+ if not tab(match(",")) then break
+ tab(many(' '))
+}
+@
+
+
+@ We assume that every line of a multi-line \textbf{import/export} lists ends with a
+comma, unless it is the last line. So if [[in_ie_list]] is set, we check the last
+non-blank character of the line. If it is not a ``,'' we set [[in_ie_list]] to null.
+Clearly, the Turing compiler will accept multi-line lists even though their lines do
+not end with a comma, so this method of detecting the end of the list is not
+foolproof. It is, however, simple.
+<<Check for end of \textbf{import/export} list>>=
+# This is not Turing! \texttt{in\_list := trim(arg)[-1] == ","} is wrong!
+if \in_ie_list then
+ if arg == "" | trim(arg)[-1] ~== "," then in_ie_list := &null
+@
+
+@
+<<Check for end of subprogram>>=
+if \in_subprogram then
+ if \token == "end" then
+ {tab(many(' ')); if tab(match(sub_name)) then in_subprogram := &null}
+@
+
+
+
+@
+\section {Procedures [[main]] and [[prepass]]}
+
+This is copied (modified) from [[icon/icondefs.nw]].
+<<Other routines needed by [[defns.nw]]>>=
+global subs_to_index
+procedure main(args)
+ local name, f
+ <<Initialize [[subs_to_index]]>>
+ go()
+end
+procedure prepass(name, arg)
+ if name == "end" then writedefn(&null) # Force newline.
+end
+@
+
+@
+<<Initialize [[subs_to_index]]>>=
+subs_to_index := []
+f := open(args[1], "r")
+if \f then {
+ every put(subs_to_index, !f) # Neat! See p. 132 of the Icon book.
+ writes(&errout, "(Indexing subprograms")
+ every writes(&errout, " ", !subs_to_index)
+ write(&errout, ")")
+ close(f)
+}
+@
+
+
+
+%@
+%\section {Index}
+%\nowebindex
+
+\end {document} \ No newline at end of file
diff --git a/web/noweb/contrib/kostas/pp.nw b/web/noweb/contrib/kostas/pp.nw
new file mode 100644
index 0000000000..05e6c18254
--- /dev/null
+++ b/web/noweb/contrib/kostas/pp.nw
@@ -0,0 +1,541 @@
+% -*-lang : icon-*-
+
+\documentclass [11pt] {article}
+\usepackage {noweb}
+\usepackage {fullpage}
+\pagestyle {noweb}
+
+\title {Extending Noweb With Some Typesetting}
+\author{Kostas N. Oikonomou \\ \textsf{ko@surya.ho.att.com}}
+
+\begin {document}
+\maketitle
+\tableofcontents
+
+@
+\section {Introduction}
+
+This is a pretty-printer, written in Icon, for the [[noweb]] system. The capabilities
+of the prettyprinter are to typeset reserved words, comments, quoted strings, and
+special (e.g.\ mathematical) symbols of the target language in an almost arbitrary
+way\footnote{It is also possible to typeset identifiers arbitrarily.}. This
+generality is achieved by the brute-force method of looking up the translation of
+(\TeX\ code for) a token in a table. See \S\ref{sec:lang} for the languages that can
+be handled. Adding a new language entails making additions \emph{only} to
+\S\ref{sec:lang}. All the material in \S\ref{sec:ind} is language-independent, and
+should not be touched.
+
+The pretty-printer's design is based on the following two premises:
+\begin {itemize}
+ \item It should be as independent of the target language as possible, and
+ \item We don't want to write a full-blown scanner for the target language.
+\end {itemize}
+Strings of characters of the target language which we want to typeset specially are
+called ``interesting tokens''. Having had some experience with Web and SpiderWeb, we
+define three categories of interesting tokens:
+\begin {enumerate}
+ \item Reserved words of the target language: we want to typeset them in bold, say.
+ \item Other strings that we want to typeset specially: e.g. $\le$ for [[<=]].
+ \item Comment and quoting tokens (characters): we want what follows them or what is
+ enclosed by them to be typeset literally.
+\end {enumerate}
+In addition, comments are typeset in roman font, and math mode is active in comments.
+
+A table [[translation]] defines a translation into \TeX\ code for every interesting
+token in the target language. Here is an excerpt from the translation table for
+Object-Oriented Turing:
+\begin {center}
+ \begin {tabular}{l}
+ [[translation["addressint"] := "{\\ttb{}addressint}"]] \\
+ [[translation["all"] := "{\\ttb{}all}"]] \\
+ [[translation["and"] := "\\(\\land\\)"]] \\
+ [[translation["anyclass"] := "{\\ttb{}anyclass}"]] \\
+ [[translation["array"] := "{\\ttb{}array}"]] \\
+ [[translation["~="] := "\\(\\neq\\)"]]
+ \end {tabular}
+\end {center}
+(Here the control sequence \verb+\ttb+ selects the bold typewriter font
+cmttb10\footnote{The empty group \{\} serves to separate the control sequence from
+its argument without introducing an extra space.}.) We use four sets of strings to
+define the tokens in categories 2 and 3:
+\begin {center}
+ [[special]], [[comment1]], [[comment2]], [[quote2]].
+\end {center}
+[[comment1]] is for unbalanced comment strings (e.g.\ the character [[%]] in Turing
+and [[#]] in Icon), [[comment2]] is for balanced comment strings (e.g.\ [[/*]] and
+[[*/]]), and [[quote2]] is for literal quotes, such as [["]], which we assume to be
+balanced.
+
+Our approach to recognizing the interesting tokens while scanning a line, is to have
+a set of characters [[interesting]] (an Icon cset), containing all the characters by
+which an interesting token may begin. [[interesting]] is the union of
+\begin {itemize}
+ \item the cset defining the characters which may begin a reserved word, and
+ \item the cset containing the initial characters of all strings in the special,
+ comment, and quote sets.
+\end {itemize}
+The basic idea is this: given a line of text, we scan up to a character in
+[[interesting]], and, depending on what this character is, we may try to complete the
+token by further scanning. If we succeed, we look up the token in the
+[[translation]] table, and if the token is found, we output its translation,
+otherwise we output the token itself unchanged. When comment or quote tokens are
+recognized, further processing of the line may stop altogether, or temporarily, until
+a matching token is found.
+
+
+\section {Languages}
+\label{sec:lang}
+The languages handled at the moment are C, C++, Icon, Object-Oriented Turing (OOT),
+and Mathematica. Looking at the structure that follows should make it clear that
+adding another language should be easy. Only this section has to be touched.
+<<C>>=
+<<[[main]] for C>>
+<<Language-independent procedures>>
+@
+<<C++>>=
+<<[[main]] for C++>>
+<<Language-independent procedures>>
+@
+<<OOT>>=
+<<[[main]] for OOT>>
+<<Language-independent procedures>>
+@
+<<Icon>>=
+<<[[main]] for Icon>>
+<<Language-independent procedures>>
+@
+<<Mathematica>>=
+<<[[main]] for Mathematica>>
+<<Language-independent procedures>>
+@
+
+
+@
+\subsection {Main Procedures}
+
+<<[[main]] for C>>=
+procedure main(args)
+ <<Local variables>>
+ $include "C_translation_table"
+ <<C interesting tokens>>
+ <<Emit special {\TeX} definitions>>
+ while line := read() do filter(line)
+end
+@
+@
+<<[[main]] for C++>>=
+procedure main(args)
+ <<Local variables>>
+ $include "C++_translation_table"
+ <<C interesting tokens>>
+ <<Emit special {\TeX} definitions>>
+ while line := read() do filter(line)
+end
+@
+@
+<<[[main]] for OOT>>=
+procedure main(args)
+ <<Local variables>>
+ $include "oot_translation_table"
+ <<OOT interesting tokens>>
+ <<Emit special {\TeX} definitions>>
+ while line := read() do filter(line)
+end
+@
+@
+<<[[main]] for Icon>>=
+procedure main(args)
+ <<Local variables>>
+ $include "icon_translation_table"
+ <<Icon interesting tokens>>
+ <<Emit special {\TeX} definitions>>
+ while line := read() do filter(line)
+end
+@
+@
+<<[[main]] for Mathematica>>=
+procedure main(args)
+ <<Local variables>>
+ $include "math_translation_table"
+ <<Mathematica interesting tokens>>
+ <<Emit special {\TeX} definitions>>
+ while line := read() do filter(line)
+end
+@
+
+
+@
+\subsection {Definition of the Interesting Tokens}
+\label{sec:int}
+
+\textsc{Note}: all of the lists [[special]] must be arranged so that longest tokens
+come first!
+<<OOT interesting tokens>>=
+res_word_chars := &letters ++ '#'
+id_chars := res_word_chars ++ &digits ++ '_'
+comment1 := ["%"] # Unbalanced comment
+comment2 := [["/*", "*/"]] # Balanced comment. This is a set of \emph{pairs}.
+quote2 := [["\"","\""]] # Balanced quote
+# The special tokens must be sorted so that longest strings come first!
+special := ["~=", "**", ">=", "<=", "=>", "->", ">", "<"]
+<<Detecting the beginning of a token>>
+@
+@ Icon presents an interesting problem, unresolved at this time. See \S\ref{sec:todo}.
+<<Icon interesting tokens>>=
+res_word_chars := &letters ++ '&$'
+id_chars := res_word_chars ++ &digits ++ '_'
+comment1 := ["#"]
+comment2 := [[]]
+quote2 := [["\"","\""], ["\'","\'"]]
+special := ["\\", "||", ">=", "<=", "=>", "~=", "++", "**", "--", "{", "}", "<", ">"]
+<<Detecting the beginning of a token>>
+@
+<<Mathematica interesting tokens>>=
+res_word_chars := &letters ++ '&$'
+id_chars := res_word_chars ++ &digits
+comment1 := []
+comment2 := [["(*", "*)"]]
+quote2 := [["\"", "\""]]
+special := ["!=", "==", ">=", "<=", "->", "||", "&&", "<>", "**", "{", "}", "<", ">"]
+<<Detecting the beginning of a token>>
+@
+<<C interesting tokens>>=
+res_word_chars := &letters ++ '#'
+id_chars := res_word_chars ++ &digits ++ '_'
+comment1 := []
+comment2 := [["/*", "*/"]]
+quote2 := [["\"","\""],["\'","\'"]]
+special := ["!=", ">=", "<=", "@<<", "@>>", "||", "&&", "->", "{", "}", "<", ">"]
+<<Detecting the beginning of a token>>
+@
+
+
+
+@
+\section {Language-Independent Pretty-Printing}
+\label{sec:ind}
+<<Language-independent procedures>>=
+<<Global variables>>
+<<Procedure [[filter]]>>
+<<Procedure [[TeXify]]>>
+@
+
+@
+\subsection {Detecting the Beginning of a Token}
+
+For each interesting category define a cset containing the characters by which a
+token in that category may begin.
+<<Detecting the beginning of a token>>=
+begin_comment1 := begin_comment2 := begin_quote2 := begin_special := ''
+every e := !comment1 do begin_comment1 ++:= cset(e[1])
+every e := !comment2 do begin_comment2 ++:= cset(e[1][1])
+every e := !quote2 do begin_quote2 ++:= cset(e[1])
+every e := !special do begin_special ++:= cset(e[1])
+<<Check that these sets are disjoint>>
+interesting := res_word_chars ++ begin_comment1 ++ begin_comment2 ++
+ begin_quote2 ++ begin_special
+@
+@ The token recognition method used in procedure [[TeXify]] is based on the
+assumption that the above sets are mutually disjoint, and that they also do not
+intersect the set [[id_chars]]. If this assumption does not hold, the results are
+unpredictable.
+<<Check that these sets are disjoint>>=
+I := begin_comment1 ** begin_comment2 ** begin_quote2 ** begin_special ** id_chars
+if *I ~= 0 then stop ("** Pretty-printer problem: the characters in the set ",
+ image(I), "\n may begin tokens in more than one category!")
+@
+
+@ Local and global variables for the [[main]]'s.
+\enlargethispage*{1cm}
+<<Local variables>>=
+local line, special_set
+<<Global variables>>=
+global translation, res_word_chars, id_chars, special, comment1, comment2, quote2
+global interesting, begin_special, begin_comment1, begin_comment2, begin_quote2
+@
+
+
+@
+\subsection {The procedure TeXify}
+
+This procedure formats [[@text]] lines in the [[noweb]] file. It is called by
+procedure [[filter]]. Note that every \TeX{}ified line is a ``literal'' in
+[[noweb]]'s sense.
+<<Procedure [[TeXify]]>>=
+procedure TeXify(line, p0)
+ <<Local variables for [[TeXify]]>>
+ writes("@literal ")
+ line ?
+ {if \in_comment1 then
+ <<Write unbalanced comment text>>
+ else if \in_comment2 then
+ <<Write balanced comment text>>
+ else if \in_quote then
+ <<Write quoted text>>
+ else
+ <<Not inside a comment or quote>>
+ # Write the remainder of the line, if any.
+ writes(tab(0))
+ }
+ write()
+end
+@
+@
+<<Not inside a comment or quote>>=
+while writes(tab(upto(interesting))) do
+ case &pos+1 of {
+ # To understand the \texttt{&pos+1}, look at Icon's semantics of \texttt{case} and of \texttt{any}.
+ any(id_chars) : <<Identifier or reserved word>>
+ any(begin_special) : <<Possible ``special'' token>>
+ any(begin_comment1) : <<Possible unbalanced comment>>
+ any(begin_comment2) : <<Possible balanced comment>>
+ any(begin_quote2) : <<Possible quote>>
+ default : <<Internal error!>>
+ }
+@
+@ Well, if we got here there's something wrong in the scanning algorithm. [[p0]] is
+the position in the line of the source file where the argument [[line]] of [[TeXify]]
+begins.
+<<Internal error!>>=
+stop("\n** Error in pretty-printer procedure TeXify:\n input line ", line_num,
+ ", column ", p0+&pos-2)
+# Note: this is the column in the Emacs sense, i.e.\ the first character is in column 0.
+@
+
+
+@
+\subsubsection {Handling the interesting tokens}
+
+All identifiers will be matched (and some non-identifiers, such as explicit numeric
+constants), but the [[translation]] table defines \TeX\ code for reserved words
+only. Matching this larger set allows one to include translations for some
+identifiers too.
+
+As an exercise in understanding Icon's semantics, spend some time figuring out why
+saying {\ttfamily writes(if t := translation[token] then t else token)} will
+\emph{not} work here\footnote{Hint: consider the case in which no translation is
+defined for the token.}.
+<<Identifier or reserved word>>=
+{token := tab(many(id_chars))
+ t := translation[token]
+ writes(if \t then t else token)}
+@
+
+@ There are two issues here. Suppose our set [[special]] contains both [[=]] and
+[[==]], and it does not contain [[=-]]. What happens when we encounter [[=]]?
+First, we have to be sure that this is not the string [[==]]. So (a) we must match
+the {\em longest\/} token in [[special]], in case a special token is a prefix of
+another special token. Second, we must check that we do not have the string [[=-]],
+since this is not a special token. So (b) we must check that a token in [[special]]
+is followed by a proper terminating character.
+
+To ensure (a), [[match(!special)]] will match the longest token if the list
+[[special]] is arranged so that longest tokens come first, as noted in
+\S\ref{sec:int}. To ensure (b) define the cset [[not_special]]:
+<<Global variables>>=
+global not_special
+<<Detecting the beginning of a token>>=
+special_set := ''
+every e := !special do special_set ++:= cset(e)
+not_special := ~special_set
+@
+
+@ We will {\em assume\/} that if a token in [[special]] is followed by a character in
+[[not_special]] (or the end of the line), then it is a legitimate special token. So
+<<Possible ``special'' token>>=
+if (token := tab(match(!special)) & (any(not_special) | pos(0))) then
+ writes(translation[token])
+else
+ writes(move(1))
+@
+
+
+@
+\subsubsection {Comments and quotes}
+
+Procedures [[filter]] and [[TeXify]] interact via the variables [[in_comment]] and
+[[in_quote]] in handling comments and quotes. This is because the [[finduses]] and
+[[noidx]] filters are language-independent, and so can insert spurious [[@index]] and
+[[@xref]] lines in the middle of commented or quoted text of the target language.
+While this is merely an annoyance with balanced quotes and comments, it causes a real
+problem with unbalanced comments, in that [[TeXify]] cannot detect the end of an
+\emph{unbalanced} comment. This must be done by [[filter]], when it encounters a
+[[@nl]] line. See \S\ref{sec:filter} for some more details.
+<<Global variables>>=
+global in_comment1, in_comment2, in_quote
+@
+
+
+@ If we match a token in [[comment1]], we output it and the rest of the line as is,
+but in [[\rm]] font. Within a comment, characters special to \TeX\ are active,
+e.g. \verb+$x^2$+ will produce $x^2$. A problem with this is that if you comment out
+the (C) line \verb+printf("Hi there!\n")+, \TeX\ will complain that [[\n]] is an
+undefined control sequence.
+<<Possible unbalanced comment>>=
+if writes(tab(match(!comment1))) then
+ {in_comment1 := "yes"
+ writes("\\begcom{}" || tab(0))
+ break} # We let \texttt{filter} detect the end of the comment.
+else
+ writes(move(1)) # The character wasn't the beginning of a comment token.
+@
+
+
+@ If we are at this point, it is not necessarily true that we have found a comment.
+For example, in \textsl{Mathematica} comments begin with a [[(]], which may also
+appear in [[x+(y+z)]]. The additional complexity comes from the fact the we have to
+handle comments extending over many lines.
+<<Possible balanced comment>>=
+{every c := !comment2 do
+ # The conjunction is needed here!
+ {writes(c_open := tab(match(c[1]))) & c_close := c[2] & break}
+ if \c_open then
+ {in_comment2 := "yes"
+ writes("\\begcom{}")
+ <<Write balanced comment text>>}
+ else
+ writes(move(1)) # The character wasn't the beginning of a comment after all.
+}
+@
+@ Quoted strings may extend over multiple lines. Except for the formatting, we handle
+them like balanced comments.
+<<Possible quote>>=
+{every q := !quote2 do
+ {writes(q_open := tab(match(q[1]))) & q_close := q[2] & break}
+ if \q_open then
+ {in_quote := "yes"
+ <<Write quoted text>>}
+ else
+ writes(move(1)) # The character wasn't the beginning of a quoting token.
+}
+@
+@
+<<Write unbalanced comment text>>=
+writes(tab(0))
+@
+@
+<<Write balanced comment text>>=
+{if writes(tab(find(c_close))) then # Comment ends here
+ {writes("\\endcom{}" || move(*c_close))
+ in_comment2 := &null}
+ else # Comment doesn't close on this line
+ writes(tab(0))
+}
+@
+@ After encountering a quote we write literally, except that we precede every
+character special to \TeX\ by a backslash and follow it by an empty group. (This is
+necessary for the characters ``\~{}'' and ``\^{}''.)
+<<Write quoted text>>=
+{q := tab(find(q_close)|0) # $q$ doesn't include the closing quote.
+ q ? {while writes(tab(upto(TeXspecial))) do writes("\\" || move(1) || "{}")
+ writes(tab(0))}
+ if writes(tab(match(q_close))) then # Quote ends on this line
+ in_quote := &null
+}
+@
+@
+<<Local variables for [[TeXify]]>>=
+local token, c, t, q, c_open
+static c_close, q_close, TeXspecial
+initial {TeXspecial := '\\${}&#^_%~'} # The cset of characters treated specially by \TeX.
+@
+
+
+
+@
+\subsection {Filtering the Input}
+\label{sec:filter}
+
+First we set up the typewriter bold font [[\ttb]], corresponding to cmttb10. Then we
+define the macros [[\begcom]] (begin comment) and [[\endcom]]. [[\begcom]]
+\begin {itemize}
+ \item switches to [[\rmfamily]],
+ \item activates [[$]] by changing its catcode to 3,
+ \item makes the characters ``\texttt{\^{}}'' and ``[[_]]'' active for superscripts
+ and subscripts,
+ \item changes the catcode of the space character to 10. This way comments will be
+ typeset normally, and not as if [[\obeyspaces]] were active.
+\end {itemize}
+<<Emit special {\TeX} definitions>>=
+write("@literal \\DeclareFontShape{OT1}{cmtt}{bx}{n}{ <-> cmttb10 }{}")
+write("@nl")
+write("@literal \\def\\ttb{\\bfseries}")
+write("@nl")
+write("@literal \\def\\begcom{\\begingroup\\rmfamily \\catcode`\\$=3_
+ \\catcode`\\^=7 \\catcode`\\_=8 \\catcode`\\ =10}")
+write("@nl")
+write("@literal \\def\\endcom{\\endgroup}")
+write("@nl")
+@
+
+
+@ Procedure [[filter]] is straightforward, except that it interacts with [[TeXify]]
+when it comes to comments and quotes. [[line_num]] is used by both.
+<<Global variables>>=
+global line_num
+<<Emit special {\TeX} definitions>>=
+line_num := 0
+<<Procedure [[filter]]>>=
+procedure filter(line)
+ static kind # Local and static.
+ local keyword, rest, p0
+ line_num := line_num + 1 # line no. in the input file; used by \texttt{TeXify}.
+ line ? (keyword := tab(upto(' ')|0) &
+ rest := if tab(match(" ")) then {p0 := &pos; tab(0)} else &null)
+ case keyword of {
+ "@begin" : {rest ? kind := tab(many(&letters))
+ write(line)}
+ "@text" : if \kind == "code" then TeXify(rest,p0) else write(line)
+ "@nl" : {if \in_comment1 then # This must be an unbalanced comment.
+ {write("@literal \\endcom{}"); in_comment1 := &null}
+ write(line)}
+ "@index" | "@xref" : <<Not if in comment or quote!>>
+ default : write(line)
+ }
+ return
+end
+@
+
+@ Don't output spurious [[@index]] or [[@xref]] lines when in a comment or quote.
+([[@index]] is produced by [[finduses]] and [[@xref]] by [[noidx]].)
+This works only if the language filter is run {\em before\/} [[noidx]].
+<<Not if in comment or quote!>>=
+if /in_comment1 & /in_comment2 & /in_quote then write(line)
+@
+
+
+
+@
+\section {To do}
+\label{sec:todo}
+
+We have the following unresolved issue, exemplified by Icon. The current filter
+translates the symbol ``\&''as ``$\land$'', even though ``\&'' is {\em not\/} in
+Icon's [[special]]. This happens because ``\&'' is in Icon's [[res_word_chars]], and
+a translation for it is defined in [[icon_translation_table]]. So when [[TeXify]]
+encounters it, it recognizes it as an Icon reserved word, and uses the translation
+defined for it. Now if this translation is not wanted, remove ``\&'' from
+[[icon_translation_table]] and don't bother me any more. However, if this
+translation is ok, we have an inconsistency, in that ``\&'' is not in [[special]].
+
+While this is not a real problem, achieving consistency (which may be needed in a
+more general case) is not so easy. If we add ``\&'' to [[special]], the check in
+$\langle$\textit{Check that these sets are disjoint\/}$\rangle$ will fail. To fix
+this, we could
+\begin {enumerate}
+ \item Add a constraint to the recognition of a reserved word: it has to be
+ a token of length $>1$.
+ \item Revise the [[case]] structure in $\langle$\textit{Not inside a comment or
+ quote\/}$\rangle$, as it will no longer work.
+\end {enumerate}
+We could also consider having a separate translation table for special tokens.
+@
+
+
+@
+\appendix
+\section {Index}
+\nowebindex
+
+
+\end {document} \ No newline at end of file
diff --git a/web/noweb/contrib/leew/Makefile b/web/noweb/contrib/leew/Makefile
new file mode 100644
index 0000000000..975bf88864
--- /dev/null
+++ b/web/noweb/contrib/leew/Makefile
@@ -0,0 +1,6 @@
+SHELL=/bin/sh
+all:
+install:
+source:
+clean:
+ /bin/rm -f nocond *.dvi *.log *.aux *.toc *.tex *.tex nocond.1
diff --git a/web/noweb/contrib/leew/README b/web/noweb/contrib/leew/README
new file mode 100644
index 0000000000..337fb068c9
--- /dev/null
+++ b/web/noweb/contrib/leew/README
@@ -0,0 +1,12 @@
+Lee Wittenberg has kindly provided his port of noweb to DOS, which
+includes executable binaries. It once appeared in the DOS
+subdirectory of the standard noweb distribution, but it got superseded
+by somebody else's port later on.
+
+nocond is a slick noweb filter that supports conditional code in a
+nicer way than ugly old #ifdef's.
+
+nobrace looks for unbalanced braces in code chunks.
+
+pretty-comment uses LaTeX to typeset inline comments...
+and it's written in SNOBOL!
diff --git a/web/noweb/contrib/leew/custom-code/README.custom-code b/web/noweb/contrib/leew/custom-code/README.custom-code
new file mode 100644
index 0000000000..06367f1ceb
--- /dev/null
+++ b/web/noweb/contrib/leew/custom-code/README.custom-code
@@ -0,0 +1,17 @@
+The "custom-code" script is a simple noweb filter that allows for some
+very simple typesetting of code chunks. It simply inserts a
+
+ \bgroup\nwcustomcode ... \egroup
+
+wrapper around everything in code chunks and quoted code. The user
+simply defines the \nwcustomcode macro to typeset the code
+appropriately. This is primarily useful for programming languages like
+Neliac (and, perhaps, APL), which use specialized character sets (and
+don't look quite right when rendered in ``standard'' ASCII characters.
+
+The "example.nw" file demonstrates "custom-code" in action. Simply
+
+ noweave -t4 -delay -index example.nw -filter custom-code >example.tex
+
+and run the result through LaTeX for a sample of the same code both
+with and without the custom typesetting.
diff --git a/web/noweb/contrib/leew/custom-code/custom-code b/web/noweb/contrib/leew/custom-code/custom-code
new file mode 100755
index 0000000000..4d9ce45104
--- /dev/null
+++ b/web/noweb/contrib/leew/custom-code/custom-code
@@ -0,0 +1,12 @@
+#!/bin/awk -f
+BEGIN { needcontrol = 0; }
+/^@begin code / { needcontrol = 1 }
+/^@quote$/ { needcontrol = 1 }
+/^@text / { if (needcontrol) {
+ print "@literal \\bgroup\\nwcustomcode{}"
+ }
+ needcontrol = 0
+ }
+/^@end code / { print "@literal \\egroup{}" }
+/^@endquote$/ { print "@literal \\egroup{}" }
+ { print }
diff --git a/web/noweb/contrib/leew/custom-code/example.nw b/web/noweb/contrib/leew/custom-code/example.nw
new file mode 100644
index 0000000000..c6a0b252f5
--- /dev/null
+++ b/web/noweb/contrib/leew/custom-code/example.nw
@@ -0,0 +1,101 @@
+% to weave:
+% noweave -t4 -delay -index example.nw -filter custom-code >example.tex
+%
+\documentclass{article}
+\usepackage{noweb}
+\noweboptions{smallcode}
+\topmargin=0pt
+\textheight=9in
+\def\sub#1{$_{#1}$}
+\def\minus{-}
+\def\plus{+}
+\def\equals{=}
+\def\lt{<}
+\def\gt{>}
+\def\neliac{%
+ \catcode`_=\active%
+ \catcode`|=\active%
+ \catcode`&=\active%
+ \catcode`*=\active%
+ \catcode`'=\active%
+ \catcode`^=\active%
+ \catcode`-=\active%
+ \catcode`+=\active%
+ \catcode`==\active%
+ \catcode`<=\active%
+ \catcode`>=\active%
+ \catcode`/=\active%
+}
+\bgroup\neliac
+\global\def\nwcustomcode{\neliac%
+ \global\def_##1{\sub{##1}}%
+ \global\def|{$\cup$}%
+ \global\def&{$\cap$}%
+ \global\def*{$\times$}%
+ \global\def'{$\vert$}%
+ \global\def^{$\uparrow$}%
+ \global\def+{$\plus$}%
+ \global\def={$\equals$}%
+ \global\def/##1{\(\ifx##1=\ne\else\slash##1\fi\)}%
+ \global\def<##1{\(\ifx##1=\le\else\lt##1\fi\)}%
+ \global\def>##1{\(\ifx##1=\ge\else\gt##1\fi\)}%
+ \global\def-##1{\(\ifx##1>\to\else\minus##1\fi\)}%
+}
+\egroup
+
+\begin{document}
+Here is some actual code from the original Neliac compiler for the
+Univac M-460 ``Countess'' computer (written in Neliac, of course):
+<<*>>=
+DEBUG SCAN:
+i = 0: standard compiling location -> i; ;
+j = 0: obj prog std last address -> j; ;
+i = i(1)j{ [i] = straight jump function | [i] = return jump function:
+ fault 9. ; [i](15 -> 29) = 61000_8 & [i](0 -> 14) - bias -> k /= 0:
+ { [k] = 0 | [k] = straight jump function: fault 10. ; }; ;
+l'oop exit: }. check key sets, turn off flex, clear indices,
+key[2] /= 0: dump name lists and stop. exit.
+F'AULT 9:
+start flex, carriage return upper case, 69 -> lower loop limit,
+72 -> upper loop limit, dump a title,
+n = 177_8(1)0{ undefined name location[n] = i:
+ write undefined name, continue. ; },
+C'ONTINUE: write address, loop exit.
+F'AULT 10:
+start flex, carriage return upper case,
+77 -> lower loop limit, 82 -> upper loop limit, dump a title,
+n = 777_8(1)0{ name address[n] - bias = k: write name, go on. ; },
+k -> upper dump buffer[1], dump five number,
+G'O ON: write address, loop exit.
+W'RITE ADDRESS:
+{ 73 -> lower loop limit, 76 -> upper loop limit, dump a title,
+i -> upper dump buffer[1], dump five numbers, }. e'xit: . .
+@
+And here is the very same code without the \verb"custom-code"
+typesetting:\let\nwcustomcode=\relax
+<<*>>=
+DEBUG SCAN:
+i = 0: standard compiling location -> i; ;
+j = 0: obj prog std last address -> j; ;
+i = i(1)j{ [i] = straight jump function | [i] = return jump function:
+ fault 9. ; [i](15 -> 29) = 61000_8 & [i](0 -> 14) - bias -> k /= 0:
+ { [k] = 0 | [k] = straight jump function: fault 10. ; }; ;
+l'oop exit: }. check key sets, turn off flex, clear indices,
+key[2] /= 0: dump name lists and stop. exit.
+F'AULT 9:
+start flex, carriage return upper case, 69 -> lower loop limit,
+72 -> upper loop limit, dump a title,
+n = 177_8(1)0{ undefined name location[n] = i:
+ write undefined name, continue. ; },
+C'ONTINUE: write address, loop exit.
+F'AULT 10:
+start flex, carriage return upper case,
+77 -> lower loop limit, 82 -> upper loop limit, dump a title,
+n = 777_8(1)0{ name address[n] - bias = k: write name, go on. ; },
+k -> upper dump buffer[1], dump five number,
+G'O ON: write address, loop exit.
+W'RITE ADDRESS:
+{ 73 -> lower loop limit, 76 -> upper loop limit, dump a title,
+i -> upper dump buffer[1], dump five numbers, }. e'xit: . .
+@ \relax
+\end{document}
diff --git a/web/noweb/contrib/leew/custom-code/example.pdf b/web/noweb/contrib/leew/custom-code/example.pdf
new file mode 100644
index 0000000000..51145950df
--- /dev/null
+++ b/web/noweb/contrib/leew/custom-code/example.pdf
Binary files differ
diff --git a/web/noweb/contrib/leew/custom-code/example.tex b/web/noweb/contrib/leew/custom-code/example.tex
new file mode 100644
index 0000000000..b039a96f3f
--- /dev/null
+++ b/web/noweb/contrib/leew/custom-code/example.tex
@@ -0,0 +1,105 @@
+% to weave:% ===> this file was generated automatically by noweave --- better not edit it
+% noweave -t4 -delay -index example.nw -filter custom-code >example.tex
+%
+\documentclass{article}
+\usepackage{noweb}
+\noweboptions{smallcode}
+\topmargin=0pt
+\textheight=9in
+\def\sub#1{$_{#1}$}
+\def\minus{-}
+\def\plus{+}
+\def\equals{=}
+\def\lt{<}
+\def\gt{>}
+\def\neliac{%
+ \catcode`_=\active%
+ \catcode`|=\active%
+ \catcode`&=\active%
+ \catcode`*=\active%
+ \catcode`'=\active%
+ \catcode`^=\active%
+ \catcode`-=\active%
+ \catcode`+=\active%
+ \catcode`==\active%
+ \catcode`<=\active%
+ \catcode`>=\active%
+ \catcode`/=\active%
+}
+\bgroup\neliac
+\global\def\nwcustomcode{\neliac%
+ \global\def_##1{\sub{##1}}%
+ \global\def|{$\cup$}%
+ \global\def&{$\cap$}%
+ \global\def*{$\times$}%
+ \global\def'{$\vert$}%
+ \global\def^{$\uparrow$}%
+ \global\def+{$\plus$}%
+ \global\def={$\equals$}%
+ \global\def/##1{\(\ifx##1=\ne\else\slash##1\fi\)}%
+ \global\def<##1{\(\ifx##1=\le\else\lt##1\fi\)}%
+ \global\def>##1{\(\ifx##1=\ge\else\gt##1\fi\)}%
+ \global\def-##1{\(\ifx##1>\to\else\minus##1\fi\)}%
+}
+\egroup
+
+\begin{document}
+Here is some actual code from the original Neliac compiler for the
+Univac M-460 ``Countess'' computer (written in Neliac, of course):
+\nwfilename{example.nw}\nwbegincode{1}\sublabel{NW36h5rr-1p0Y9w-1}\nwmargintag{{\nwtagstyle{}\subpageref{NW36h5rr-1p0Y9w-1}}}\moddef{*~{\nwtagstyle{}\subpageref{NW36h5rr-1p0Y9w-1}}}\endmoddef\nwstartdeflinemarkup\nwprevnextdefs{\relax}{NW36h5rr-1p0Y9w-2}\nwenddeflinemarkup
+\bgroup\nwcustomcode{}DEBUG SCAN:
+i = 0: standard compiling location -> i; ;
+j = 0: obj prog std last address -> j; ;
+i = i(1)j\{ [i] = straight jump function | [i] = return jump function:
+ fault 9. ; [i](15 -> 29) = 61000_8 & [i](0 -> 14) - bias -> k /= 0:
+ \{ [k] = 0 | [k] = straight jump function: fault 10. ; \}; ;
+l'oop exit: \}. check key sets, turn off flex, clear indices,
+key[2] /= 0: dump name lists and stop. exit.
+F'AULT 9:
+start flex, carriage return upper case, 69 -> lower loop limit,
+72 -> upper loop limit, dump a title,
+n = 177_8(1)0\{ undefined name location[n] = i:
+ write undefined name, continue. ; \},
+C'ONTINUE: write address, loop exit.
+F'AULT 10:
+start flex, carriage return upper case,
+77 -> lower loop limit, 82 -> upper loop limit, dump a title,
+n = 777_8(1)0\{ name address[n] - bias = k: write name, go on. ; \},
+k -> upper dump buffer[1], dump five number,
+G'O ON: write address, loop exit.
+W'RITE ADDRESS:
+\{ 73 -> lower loop limit, 76 -> upper loop limit, dump a title,
+i -> upper dump buffer[1], dump five numbers, \}. e'xit: . .
+\egroup{}\nwalsodefined{\\{NW36h5rr-1p0Y9w-2}}\nwnotused{*}\nwendcode{}\nwbegindocs{2}\nwdocspar
+And here is the very same code without the \verb"custom-code"
+typesetting:\let\nwcustomcode=\relax
+\nwenddocs{}\nwbegincode{3}\sublabel{NW36h5rr-1p0Y9w-2}\nwmargintag{{\nwtagstyle{}\subpageref{NW36h5rr-1p0Y9w-2}}}\moddef{*~{\nwtagstyle{}\subpageref{NW36h5rr-1p0Y9w-1}}}\plusendmoddef\nwstartdeflinemarkup\nwprevnextdefs{NW36h5rr-1p0Y9w-1}{\relax}\nwenddeflinemarkup
+\bgroup\nwcustomcode{}DEBUG SCAN:
+i = 0: standard compiling location -> i; ;
+j = 0: obj prog std last address -> j; ;
+i = i(1)j\{ [i] = straight jump function | [i] = return jump function:
+ fault 9. ; [i](15 -> 29) = 61000_8 & [i](0 -> 14) - bias -> k /= 0:
+ \{ [k] = 0 | [k] = straight jump function: fault 10. ; \}; ;
+l'oop exit: \}. check key sets, turn off flex, clear indices,
+key[2] /= 0: dump name lists and stop. exit.
+F'AULT 9:
+start flex, carriage return upper case, 69 -> lower loop limit,
+72 -> upper loop limit, dump a title,
+n = 177_8(1)0\{ undefined name location[n] = i:
+ write undefined name, continue. ; \},
+C'ONTINUE: write address, loop exit.
+F'AULT 10:
+start flex, carriage return upper case,
+77 -> lower loop limit, 82 -> upper loop limit, dump a title,
+n = 777_8(1)0\{ name address[n] - bias = k: write name, go on. ; \},
+k -> upper dump buffer[1], dump five number,
+G'O ON: write address, loop exit.
+W'RITE ADDRESS:
+\{ 73 -> lower loop limit, 76 -> upper loop limit, dump a title,
+i -> upper dump buffer[1], dump five numbers, \}. e'xit: . .
+\egroup{}\nwendcode{}
+
+\nwixlogsorted{c}{{*}{NW36h5rr-1p0Y9w-1}{\nwixd{NW36h5rr-1p0Y9w-1}\nwixd{NW36h5rr-1p0Y9w-2}}}%
+\nwbegindocs{4}\relax
+\end{document}
+\nwenddocs{}
diff --git a/web/noweb/contrib/leew/custom-code/n b/web/noweb/contrib/leew/custom-code/n
new file mode 100644
index 0000000000..b2016f1622
--- /dev/null
+++ b/web/noweb/contrib/leew/custom-code/n
@@ -0,0 +1,20 @@
+Date: Wed, 13 May 2009 19:32:13 -0400
+From: Alec <alec@deviant-logic.net>
+To: Norman Ramsey <nr@cs.tufts.edu>
+Subject: Re: A couple links
+
+Actually, the pain and suffering from the python article
+turns out to be found at his followup post on tail calls on
+his regular blog:
+http://neopythonic.blogspot.com/2009/04/tail-recursion-elimination.html
+
+It's truly thrilling.
+
+-A
+
+On Wed, May 6, 2009 at 9:44 PM, Norman Ramsey
+<nr@cs.tufts.edu> wrote:
+> Thanks!
+>
+> N
+>
diff --git a/web/noweb/contrib/leew/email b/web/noweb/contrib/leew/email
new file mode 100644
index 0000000000..e6186cf003
--- /dev/null
+++ b/web/noweb/contrib/leew/email
@@ -0,0 +1,2 @@
+Lee Wittenberg <leew@alumni.stanford.edu>
+
diff --git a/web/noweb/contrib/leew/nobrace.nw b/web/noweb/contrib/leew/nobrace.nw
new file mode 100644
index 0000000000..1190f8a291
--- /dev/null
+++ b/web/noweb/contrib/leew/nobrace.nw
@@ -0,0 +1,321 @@
+%
+% to tangle:
+% notangle -t4 -L nobrace.nw > nobrace.icn
+% to weave:
+% noweave -t4 -delay -autodefs icon -index nobrace.nw > nobrace.tex
+% to create the manpage:
+% notangle -Rnobrace.1 nobrace.nw > nobrace.1
+%
+\documentclass{article}
+
+\usepackage{noweb,multicol}
+\noweboptions{longchunks} % noweave -option longchunks would be
+ % better, but won't work with -delay, and
+ % we need stuff before \begin{document}
+
+% show spaces in string constants
+\global\let\xsetup=\setupcode
+\bgroup
+ \catcode`\"=\active\gdef\setupcode{\xsetup
+ \catcode`\"=\active\def"##1"{\char`\"\xxx{##1}\char`\"}}%
+\egroup
+\bgroup
+ \catcode`\ =\active\gdef\xxx#1{{\catcode`\ =\active\chardef ='40#1}}%
+\egroup
+
+\def\noweb/{\texttt{noweb}}
+\def\nobrace/{\texttt{nobrace}}
+\def\notangle/{\texttt{notangle}}
+\def\noweave/{\texttt{noweave}}
+
+\title {A Filter For Matching Braces in \noweb/ Programs%
+ \thanks{Copyright \copyright~1996 by Lee Wittenberg.
+ Although this software is freely distributable, it is not in
+ the public domain. It is provided ``as is'' and without any
+ express or implied warranties, including, without limitation,
+ the implied warranties of merchantability and fitness for a
+ particular purpose.}}
+\author {Lee Wittenberg\\\tt leew@pilot.njin.net}
+
+\pagestyle{noweb}
+\begin{document}
+\maketitle
+@ \iffalse
+%
+% We don't want a troff man page woven by TeX, do we?
+%
+<<nobrace.1>>=
+.TH NOBRACE 1 "local 4/9/96"
+.SH NAME
+nobrace \- check noweb chunks for brace mismatches
+.SH SYNOPSIS
+.B nobrace
+[ brace-pair ... ]
+.SH DESCRIPTION
+.I nobrace
+is a filter designed to work with
+.I notangle(1)
+or
+.I noweave(1)
+to ensure that the braces in each code chunk are balanced.
+.I nobrace
+generates warning messages on the standard error stream for each
+chunk with unbalanced braces.
+
+If no brace pairs are specified on the command line,
+.I nobrace
+will check parentheses, square brackets, and curly braces.
+.SH BUGS
+.I nobrace
+is naive about braces in string constants, comments, etc.
+.PP
+No provision is made for multiple character braces, so C-style
+comments cannot be checked (nor can Algol-like \fBbegin\fP's and
+\fBend\fP's).
+.PP
+This manual page would be better if its author knew more about troff
+and the -man macros.
+.SH SEE ALSO
+.I notangle(1), noweave(1)
+.SH AUTHOR
+Lee Wittenberg. Internet address: \fBleew@pilot.njin.net\fP
+@ \fi
+@
+\section{Introduction}
+Many literate programming authorities
+consider it good practice for each code chunk definition to be syntactically
+and semantically complete in itself, with each chunk use representing
+a complete entity (statement, expression, etc.). To be complete, the
+braces in a chunk must be balanced. This web provides a
+\noweb/ filter that warns the user about mismatched braces in chunks.
+@
+\section{The Program}
+The main program reads and echoes each
+line of the standard input (so it will be invisible in the pipeline),
+processing only relevant markup lines.
+<<*>>=
+procedure main(args)
+ <<Initialization>>
+ while inputline := read() do {
+ write(inputline)
+ inputline ? {
+ <<Process relevant markup lines>>
+ {} # for final else
+ }
+ }
+end
+@
+Each command-line
+argument is taken to be a pair of braces, the open brace first. We
+construct two tables, [[pair]], and [[delta]], which are used to keep
+the brace balancing straight and separate (we don't want `[[{]]'
+matching `[[)]]'). We use the [[braces]] cset for scanning the text
+lines in code chunks.
+
+If no command line arguments are specified, we assume `\verb"() [] {}"'.
+<<Initialization>>=
+pair := table("")
+delta := table(0)
+braces := ''
+if *args = 0 then
+ args := ["()", "[]", "{}"]
+every p := !args do {
+ braces ++:= p
+ every pair[!p] := p
+ delta[p[1]] := +1
+ delta[p[2]] := -1
+}
+@ %def pair delta braces
+@
+\section{Relevant Markup}
+Our \emph{raison d'\^etre} is to match braces in code chunks.
+Each [[@text]] line in a code chunk is scanned for braces, which we
+attempt to balance.
+<<Process relevant markup lines>>=
+if ="@text " then {
+ if \code then {
+ line := tab(0)
+ every p := upto(braces, line) do {
+ b := line[p]
+ <<Balance brace [[b]] at [[p]]>>
+ }
+ }
+} else
+@
+Whenever we enter a code chunk, we need to set our [[code]] flag,
+<<Process relevant markup lines>>=
+if ="@begin code " then {
+ code := 1
+} else
+@ \noindent
+and reset it whenever we leave. Whenever a code chunk ends (thus
+ending a definition), we also need to check for any remaining umatched
+braces in that chunk.
+<<Process relevant markup lines>>=
+if ="@end code " then {
+ code := &null
+ <<Check for unmatched braces>>
+} else
+@
+All webs start with a text chunk, not code.
+<<Initialization>>=
+code := &null
+@ %def code
+@
+The variables [[curr_chunkname]], [[curr_filename]], and [[curr_line]]
+help keep track of where mismatches are found.
+@ \noindent
+We initialize them to ``safe'' values, just in case.
+<<Initialization>>=
+curr_line := 1
+curr_filename := "Standard Input"
+curr_chunkname := "***Unknown Chunk***"
+@ %def curr_filename curr_line curr_chunkname
+@
+Newlines simply increase the [[curr_line]] count.
+<<Process relevant markup lines>>=
+if ="@nl" & pos(0) then {
+ curr_line +:= 1
+} else
+@
+Whenever we get a new file name, we make note of it.
+<<Process relevant markup lines>>=
+if ="@file " then {
+ curr_filename := tab(0)
+ curr_line := 1
+} else
+@
+The [[@line]] directive can be used to adjust the current line
+number in a source file. We hear and obey.
+<<Process relevant markup lines>>=
+if ="@line " then {
+ curr_line := integer(tab(0))
+} else
+@
+New chunk definitions give us a new [[curr_chunkname]].
+``[[<<Check for unmatched braces>>]]''
+is here because it is not illegal for a single code chunk to have more
+than one definition. None of the standard tools produce anything like
+that, but we allow for this (remote) possibility in the interests of
+defensive programming.
+<<Process relevant markup lines>>=
+if ="@defn " then {
+ <<Check for unmatched braces>>
+ curr_chunkname := tab(0)
+} else
+@
+\section{Dealing With Braces}
+Whenever we see an opening brace, we push its location
+on the appropriate stack,
+and when we see a closing brace, we pop the
+corresponding opening brace's location
+(we use [[pull]] instead of [[pop]] to keep the list sorted; we use
+list concatenation instead of [[put]] because of Icon's table
+initialization semantics).
+
+If there is no opening brace to pop,
+we've found a mismatched closing brace.
+We add each line containing a brace to the [[error_line]] table
+because it may be needed for a warning message.
+<<Balance brace [[b]] at [[p]]>>=
+if delta[b] > 0 then {
+# put(bstack[pair[b]], loc(curr_line, p))
+ bstack[pair[b]] |||:= [loc(curr_line, p)]
+} else {
+ pull(bstack[pair[b]]) |
+ {<<Note brace error at [[curr_line]], [[p]]>>}
+}
+error_line[curr_line] := line
+@
+A brace's location is a line number and a column position in that
+line.
+<<*>>=
+record loc(line,col)
+@
+We keep all brace errors in a sorted list, [[error_list]].
+<<Note brace error at [[curr_line]], [[p]]>>=
+put(error_list, loc(curr_line, p))
+@
+The brace stacks are initially empty, as is the error list.
+<<Initialization>>=
+bstack := table([])
+error_list := []
+error_line := table("")
+@ %def bstack error_list error_line
+@
+If either the error list or any of the brace stacks are not empty, we
+have mismatched braces
+<<Check for unmatched braces>>=
+if (*error_list | *!bstack) ~= 0 then {
+ <<Generate warning messages>>
+}
+@
+We merge [[error_list]] with all the brace stacks to create a single
+(sorted) list of mismatched brace locations. We write a single
+warning message for the chunk, followed by all the lines with
+mismatched braces. When we're finished, we clear the error list and
+the brace stacks for the next chunk definition.
+<<Generate warning messages>>=
+every error_list := merge(!bstack, error_list)
+write(&errout, "Warning: Mismatched braces in @<<",
+ curr_chunkname, ">>",
+ if curr_filename ~== ""
+ then " (" || curr_filename || ")"
+ else "",
+ ":")
+<<Display all relevant lines with mismatched braces marked>>
+error_list := []
+bstack := table([])
+@
+For each line represented in the error list, we print the line with a
+marker line under it. We use the `\verb"^"' character to mark the
+position of each mismatched brace. Each line is prefixed with its
+line number.
+<<Display all relevant lines with mismatched braces marked>>=
+lineno := 0;
+every e := !error_list do {
+ if e.line ~= lineno then {
+ if lineno ~=0 then
+ write(&errout, marker)
+ lineno := e.line
+ write(&errout, right(e.line || ": ", 10),
+ error_line[e.line])
+ marker := repl(" ", 10)
+ }
+ marker := left(marker, e.col+10-1) || "^"
+}
+write(&errout, marker)
+@
+The [[merge]] procedure merges two sorted lists of [[loc]]'s. We plow
+through both lists more or less in parallel, adding the earliest
+brace location to the result list. When [[a]] is exhausted,
+the remaining elements of [[b]] are concatenated to the result. Thus,
+the longer list should always be passed as the second parameter if
+possible.
+<<*>>=
+procedure merge(a, b)
+ local i, j, result
+ result := []
+ i := j := 1
+ while a[i] do {
+ if a[i].line > b[j].line |
+ (a[i].line = b[j].line & a[i].col > b[j].col) then {
+ put(result, b[j])
+ j +:= 1
+ } else {
+ put(result, a[i])
+ i +:= 1
+ }
+ }
+ return result ||| b[j:0]
+end
+@
+\appendix
+\section{Chunk Index}
+\nowebchunks
+\section{Identifier Index}
+\begin{multicols}{2}
+\nowebindex
+\end{multicols}
+@
+\end{document}
diff --git a/web/noweb/contrib/leew/nocond.nw b/web/noweb/contrib/leew/nocond.nw
new file mode 100644
index 0000000000..50eb35bc64
--- /dev/null
+++ b/web/noweb/contrib/leew/nocond.nw
@@ -0,0 +1,375 @@
+%
+% $Header: d:/noweb/work/RCS/nocond.nw%v 1.4 1995/07/29 17:14:49 LEEW Exp LEEW $
+% $Workfile$
+%
+% to tangle the sed script
+% notangle -t4 -R"sed script" nocond.nw > nocond
+% to tangle the shell script:
+% notangle -t4 -R"shell script" nocond.nw > nocond
+% to tangle the awk program
+% notangle -t4 -Rnocond.awk nocond.nw > nocond.awk
+% (use -filter "nocond MKS AWKC" if necessary)
+% to weave:
+% noweave -t4 -delay -x nocond.nw > nocond.tex
+%
+\documentstyle[noweb,twoside]{article}
+\noweboptions{longchunks}
+\let\nwnotused=\nwoutput
+\oddsidemargin=63pt % standard LaTeX margins don't work well for 2-sided webs
+\evensidemargin=63pt
+
+\ifx\LaTeXe\undefined\def\LaTeXe{\LaTeX2e}\fi % for old installations
+\def\noweb/{{\tt noweb}}
+\def\nocond/{{\tt nocond}}
+\def\notangle/{{\tt notangle}}
+\def\noweave/{{\tt noweave}}
+
+\title {A Filter For Conditional Tangling in \noweb/%
+ \thanks{Copyright \copyright~1994, 1995 by Lee Wittenberg.
+ Although this software is freely distributable, it is not in
+ the public domain. It is provided ``as is'' and without any
+ express or implied warranties, including, without limitation,
+ the implied warranties of merchantability and fitness for a
+ particular purpose.}}
+\author {Lee Wittenberg\\\tt leew@pilot.njin.net}
+
+%\input{nocondmac.tex}
+\bgroup
+\catcode`\@=11
+\global\let\nc@LA=\LA
+\gdef\nocondmark#1)){\/{\bf[\negthinspace[#1]\negthinspace]}}%
+\global\let\nc@notused=\nwnotused
+\global\let\nc@output=\nwoutput
+\gdef\nc@rootchunk#1{\nwcodecomment{\nocondrootnote}%
+ \global\let\nwnotused=\nc@notused
+ \global\let\nwoutput=\nc@output
+}%
+\gdef\nocondrootnote{Conditional definition.}%
+% for noweb 2.6 (bug, since fixed?)
+\global\@namedef{r@???}{{0}{\nocondxref}}
+% for noweb 2.5:
+\global\@namedef{r@nw@notdef}{{0}{\nocondxref}}
+\gdef\nocondxref{(conditional)}
+\global\let\nc@nwixlog=\nwixlogsorted
+\gdef\nwixlogsorted#1#2{
+\ifx#1c%
+ \immediate\write\@auxout{\string\bgroup\string\catcode`\string\(=\string\active}
+\fi
+ \nc@nwixlog{#1}{#2}
+\ifx#1c%
+ \immediate\write\@auxout{\string\egroup}
+\fi}%
+\catcode`\(=\active
+\gdef\LA{\nc@LA
+ \catcode`\(=\active
+ \def(##1{\ifx##1(\global\let\nwnotused=\nc@rootchunk
+ \global\let\nwoutput=\nc@rootchunk
+ \nocondmark
+ \else\char`\(##1\fi}%
+}%
+\egroup
+\pagestyle{noweb}
+\begin{document}
+\maketitle
+@ \iffalse
+%
+% We don't want a troff man page woven by TeX, do we?
+%
+<<nocond.1>>=
+.TH NOCOND 1 "local 8/1/94"
+.SH NAME
+nocond \- provide noweb with conditional tangling
+.SH SYNOPSIS
+.B nocond
+version
+.br
+\fBawk -f nocond.awk\fP version
+.SH DESCRIPTION
+.I nocond
+is a filter designed to work with
+.I notangle(1)
+to provide it with a simple
+conditional capability. Chunk definitions may be
+marked as conditional by including a version name
+wrapped in double parentheses as part of the chunk name.
+.PP
+.I nocond
+concatenates its command line arguments
+(with a single space between each argument) to form
+a version name, and removes matching conditional marks
+from chunk definition names so
+.I notangle(1)
+will include the chunks as part of the appropriate
+definition.
+.PP
+.I nocond
+also provides a file of TeX macros, \fInocondmac.tex\fP, which
+will nicely typeset conditional chunk names.
+.SH EXAMPLE
+Suppose that a Pascal web (\fIpgm.nw\fP) uses the chunk
+.IP
+\fB@<<\fPOpen the output file\fB@>>\fP
+.PP
+The author can provide multiple definitions of this chunk:
+.IP
+\fB@<<\fPOpen the output file ((UCSD Pascal))\fB@>>=\fP
+.nf
+REWRITE(outfile, 'XYZ.DAT');
+\fB@<<\fPOpen the output file ((Turbo Pascal))\fB@>>=\fP
+ASSIGN(outfile, 'XYZ.DAT');
+REWRITE(outfile);
+.fi
+.PP
+To tangle the UCSD Pascal version, the command line
+.IP
+notangle -filter "nocond UCSD Pascal" pgm.nw > pgm.pas
+.PP
+will suffice. The Turbo Pascal version can be tangled
+similarly.
+.SH SEE ALSO
+.I notangle(1)
+.SH AUTHOR
+Lee Wittenberg. Internet address: \fBleew@pilot.njin.net\fP
+@ \fi
+@
+\section{Introduction}
+This program is a very simple filter that provides \notangle/
+with a simple conditional capability. It should be
+written in {\tt sed}, but non-Unix versions
+of that venerable utility are not as readily available as they
+should be, so we are using Awk instead (however, see
+section~\ref{sed script}).
+
+\section{The Awk Program}
+The Awk program simply passes all its input lines to the
+standard output. However, when it encounters a chunk
+definition name, it first removes any conditional marks that
+match the version specified on the command line.
+<<nocond.awk>>=
+<<Version control info>>
+BEGIN{
+ <<System-dependent initialization>>
+ <<Initialization>>
+}
+<<Remove desired conditional marks from any chunk definition names>>
+{print}
+<<Version control info>>
+@
+Chunk definition names are prefixed with the markup code
+`\verb*"@defn "', and [[gsub]] is just made for this kind of
+work.
+<<Remove desired conditional marks from any chunk definition names>>=
+/^@defn / { gsub(pattern, "", $0) }
+@
+We want to remove marks surrounded by `\verb"(("' and
+`\verb"))"'. We need the backslashes in the pattern so
+Awk doesn't treat the parentheses as grouping symbols.
+<<Initialization>>=
+<<Use [[ARGV]] to determine the [[version]] desired>>
+pattern = " *\(\(" version "\)\)"
+@
+Some command processors are not very friendly about dealing
+with command line arguments containing spaces, so rather than
+require the version name to be supplied as a single argument,
+we treat all the arguments as a single, multi\-word one (with
+single spaces between the words). We then set [[ARGC]] to~1 to
+prevent Awk from trying to re-use the arguments as filenames.
+<<Use [[ARGV]] to determine the [[version]] desired>>=
+version = ARGV[1]
+for (i = 2; i < ARGC; i++) {
+ version = version " " ARGV[i]
+}
+ARGC = 1
+@
+\subsection{System Dependencies}
+The MKS Awk compiler tends to get confused about command line
+arguments, even though the interpreter has no problems. The
+following kludge seems to take care of it (don't ask):
+<<System-dependent initialization ((MKS AWKC))>>=
+ARGV[1]
+@
+It's likely that other system-dependencies will arise as
+\nocond/ is tried with other versions of Awk. A bit
+depressing, but that's what the tool was designed for, and it's
+kind of nice to use it to implement itself. Since every Awk
+won't require special initialization, we provide a null chunk
+to avoid ``undefined chunk'' complaints from \notangle/.
+<<System-dependent initialization>>=
+@
+\section{The Shell Script}
+Unix users can use the following shell script as a \notangle/
+filter:
+<<shell script>>=
+nawk '<<nocond.awk>>'
+@
+\section{A {\tt sed} Script}
+\label{sed script}.
+{\sc Gnu} Awk, running under Linux, doesn't seem amenable to any
+patches that will make the above Awk program work correctly (the
+[[gsub]] function seems to be a sore spot in many Awk implementations).
+A {\tt sed} script, therefore, seems to be a necessity. The following
+does the trick.
+<<sed script>>=
+<<Version control info>>
+sed "/^@defn/s/ *(($*))//"
+@
+\section{Weaving a Conditional Web}
+Some people think the double parentheses don't look very
+good in woven output, and that the version name should stand
+out a bit from the chunk name. We provide the macro file
+\verb"nocondmac.tex" for those with such beliefs. These macros
+should be usable both in plain \TeX\ and \LaTeX, but have only
+been tested with the latter. They seem to work okay in \LaTeXe{}
+(in both native and compatibility modes), as well.
+
+We simply redefine the meaning of \noweb/'s [[\LA]] macro to
+make `\verb"("' an active character that typesets stuff in
+{\tt ((~$\ldots$~))} nicely and leaves other parentheses alone.
+As long as [[\LA]] exists and contains a \verb"\bgroup" this
+ought to work.
+<<nocondmac.tex>>=
+\bgroup
+\catcode`\@=11
+\global\let\nc@LA=\LA
+<<Useful macros>>
+<<Make `[[(]]' active>>
+\gdef\LA{\nc@LA
+ <<Make `[[(]]' active>>
+ \def(##1{\ifx##1(<<Adjust root chunk footnote>>\nocondmark
+ \else\char`\(##1\fi}%
+}%
+\egroup
+<<Make `[[(]]' active>>=
+\catcode`\(=\active
+@
+The real work will be done by [[\nocondmark]]. This is the
+only macro that should be changed if you want to adjust the way
+conditionals are typeset.
+<<Useful macros>>=
+\gdef\nocondmark#1)){\/{\bf[\negthinspace[#1]\negthinspace]}}%
+@
+In \LaTeX\ webs,
+the cross-reference footnotes for root chunks are generated by
+[[\nwnotused]] or [[\nwoutput]], depending on whether the woven
+output was generated with the \notangle/ or \noweb/ command.
+We note the original definitions, but change them to
+print `Conditional definition.' when a chunk name includes
+{\tt ((~$\ldots$~))}.
+<<Useful macros>>=
+\ifx\nwnotused\undefined\else
+ \global\let\nc@notused=\nwnotused
+\fi
+\ifx\nwoutput\undefined\else
+ \global\let\nc@output=\nwoutput
+\fi
+<<Adjust root chunk footnote>>=
+\ifx\nc@rootchunk\undefined\else
+\global\let\nwnotused=\nc@rootchunk
+\global\let\nwoutput=\nc@rootchunk
+\fi
+@
+The macro [[\nc@rootchunk]] is defined so that it resets
+[[\nwnotused]] and [[\nwoutput]] when it's finished, so that
+real root chunks will have the proper footnote. We use
+[[\nocondrootnote]] so that the conditional footnote can
+easily be customized.
+<<Useful macros>>=
+\ifx\nwnotused\undefined\else
+ \ifx\nwoutput\undefined\else
+ \gdef\nc@rootchunk#1{\nwcodecomment{\nocondrootnote}%
+ \global\let\nwnotused=\nc@notused
+ \global\let\nwoutput=\nc@output
+ }%
+ \gdef\nocondrootnote{Conditional definition.}%
+\fi\fi
+@
+In a web with conditional definitions, chunks that appear to be
+undefined are actually conditionally defined, so we change the
+`never defined' message to a more meaningful `conditional'.
+<<Useful macros>>=
+\ifx\documentstyle\undefined\else % LaTeX only
+% noweb 2.5:
+\global\@namedef{r@nw@notdef}{{0}{\nocondxref}}
+% noweb 2.6: (bug, since fixed?)
+\global\@namedef{r@???}{{0}{\nocondxref}}
+\gdef\nocondxref{(conditional)}
+\fi
+@
+The chunk index is a bit of a problem because \TeX\ assigns
+catcodes when a token is first read, but the chunk index is
+read in as part of the \verb".aux" file, when `\verb"("' is not
+an active character. We fix [[\nwixlogsorted]] so it will
+change the catcode of `\verb"("' temporarily for chunk index
+info in the \verb".aux" file.
+<<Useful macros>>=
+\ifx\nwixlogsorted\undefined\else
+ \global\let\nc@nwixlog=\nwixlogsorted
+ \gdef\nwixlogsorted#1#2{
+ \ifx#1c
+ \immediate\write\@auxout{\string\bgroup
+ \string\catcode`\string\(=\string\active}%
+ \fi
+ \nc@nwixlog{#1}{#2}
+ \ifx#1c
+ \immediate\write\@auxout{\string\egroup}%
+ \fi
+ }%
+\fi
+@
+On the other hand, all we need do for
+[[\nowebchunks@external]] is to set the catcode. Note
+that the \verb"externalindex" option must be set {\em after\/}
+executing \verb"\input nocondmac.tex" for things to work
+properly.
+<<Useful macros>>=
+\ifx\nowebchunks@external\undefined\else
+ \global\let\nc@chunks@external=\nowebchunks@external
+ \gdef\nowebchunks@external{%
+ \bgroup
+ <<Make `[[(]]' active>>
+ \nc@chunks@external
+ \egroup
+ }%
+\fi
+@
+\appendix
+\section {Language and Version Control Tools}
+This puts revision information in the program so we can make
+sure things don't get ``out of sync.''
+<<Version control info>>=
+# $Header: d:/noweb/work/RCS/nocond.nw%v 1.4 1995/07/29 17:14:49 LEEW Exp LEEW $
+@
+\section {Chunk Index}
+\nowebchunks
+%\twocolumn[\section{Identifier Index}] % no point, really, for this web
+%\nowebindex
+@
+\end{document}
+% $Log: nocond.nw%v $
+% Revision 1.4 1995/07/29 17:14:49 LEEW
+% Added sed script; minor cosmetic changes
+%
+% Revision 1.3 1994/09/11 18:06:26 LEEW
+% Fixed macros for noweb 2.6c
+%
+% Revision 1.2 1994/08/05 20:46:48 LEEW
+% Added macros to typeset chunk index correctly.
+%
+% Revision 1.1 1994/08/01 14:05:33 LEEW
+% Changed manpage to troff format.
+% Spiffed up nocondmac macros.
+% Removed non-standard macro packages.
+%
+% Revision 1.0 1994/06/20 17:47:55 LEEW
+% First public version.
+%
+% Revision 0.3 1994/06/20 17:31:49 LEEW
+% Added TeX macros
+%
+% Revision 0.2 1994/06/20 16:36:59 LEEW
+% Awk script complete
+%
+% Revision 0.1 1994/06/20 14:54:49 LEEW
+% Manpage only.
+%
+% $Header: d:/noweb/work/RCS/nocond.nw%v 1.4 1995/07/29 17:14:49 LEEW Exp LEEW $
diff --git a/web/noweb/contrib/leew/strhack.nw b/web/noweb/contrib/leew/strhack.nw
new file mode 100644
index 0000000000..4b0e3fff9d
--- /dev/null
+++ b/web/noweb/contrib/leew/strhack.nw
@@ -0,0 +1,32 @@
+\documentstyle[11pt,noweb]{article}
+\noweboptions{longchunks,noidentxref,smallcode}
+\pagestyle{noweb}
+
+\def\noweb/{{\tt noweb}}
+
+\title{A Hack for Typesetting Strings in \noweb/\thanks{This
+code is hereby placed in the public domain.}}
+\author{Lee Wittenberg\\Kean College of New Jersey\\Union, NJ
+07083\\\tt leew@pilot.njin.net}
+
+\begin{document}
+\maketitle
+
+The following macros adjust things so that \noweb/ will use
+``visible spaces'' in double-quoted strings within code chunks.
+The same effect can be
+achieved for single-quoted strings by replacing each occurrence
+of `[["]]', below, with `[[']]'.
+It doesn't work within \verb"[["~\ldots~\verb"]]" (although I
+can't figure out why).
+<<*>>=
+\global\let\xsetup=\setupcode
+\bgroup
+ \catcode`\"=\active\gdef\setupcode{\xsetup
+ \catcode`\"=\active\def"##1"{\char`\"\xxx{##1}\char`\"}}%
+\egroup
+\bgroup
+ \catcode`\ =\active\gdef\xxx#1{{\catcode`\ =\active\chardef ='40#1}}%
+\egroup
+@
+\end{document}
diff --git a/web/noweb/contrib/leyn/README b/web/noweb/contrib/leyn/README
new file mode 100644
index 0000000000..79d24b3a3b
--- /dev/null
+++ b/web/noweb/contrib/leyn/README
@@ -0,0 +1,23 @@
+ttroots
+=======
+
+-Allows underscores in root chunks that are written to disk.
+-All root chunks are printed out in the LaTeX document as
+ upright verbatim names.
+
+
+notangleall
+===========
+
+-creates all required directories for the code chunks
+-tangles all root chunks (as noweb -t)
+-makes all scripts mared executable. The marking is done
+ as follows:
+ %unx chmod +x chunk_name
+ <<chunk_name>>=
+ ...
+ @
+
+ Can also be used for other UNIX commands that should be
+ executed after the tangling.
+
diff --git a/web/noweb/contrib/leyn/email b/web/noweb/contrib/leyn/email
new file mode 100644
index 0000000000..f330ba5499
--- /dev/null
+++ b/web/noweb/contrib/leyn/email
@@ -0,0 +1 @@
+Francky.Leyn@esat.kuleuven.ac.be
diff --git a/web/noweb/contrib/leyn/notangleall b/web/noweb/contrib/leyn/notangleall
new file mode 100755
index 0000000000..9ab9f75e8d
--- /dev/null
+++ b/web/noweb/contrib/leyn/notangleall
@@ -0,0 +1,27 @@
+#! /bin/sh
+#
+# notangleall
+#
+# -creates all required directories for the code chunks
+# -tangles all root chunks (as noweb -t)
+# -makes all scripts mared executable. The marking is done
+# as follows:
+# %unx chmod +x chunk_name
+# <<chunk_name>>=
+# ...
+# @
+#
+# Can also be used for other UNIX commands that should be
+# executed after the tangling.
+
+
+echo 'making required subdirs'
+noroots $1 | \
+ sed '/<<[^/]*>>/d ; s/<<\(.*\)\/[^\/]*>>/\1/' | \
+ sort | uniq | sed 's/\(.*\)/mkdir -p \1 ;/' | sh
+
+echo 'extracting code chunks'
+noweb -t $1
+
+echo 'making scripts executable'
+egrep '^%unix' $1 | sed 's/%unix //; s/.*/& ;/' | sh
diff --git a/web/noweb/contrib/leyn/ttroots b/web/noweb/contrib/leyn/ttroots
new file mode 100755
index 0000000000..b03bc91922
--- /dev/null
+++ b/web/noweb/contrib/leyn/ttroots
@@ -0,0 +1,41 @@
+#! /bin/sh
+#
+# ttroots
+#
+# -Allows underscores in root chunks that are written to disk.
+# -All root chunks are printed out in the LaTeX document as
+# upright verbatim names.
+
+gawk '
+ { line[NR] = $0 ; }
+# a root chunk name can not contain spaces
+$1 == "@use" && NF == 2 { used[$2] = 1 ; next ; }
+$1 == "@defn" && NF == 2 { defined[$2] = 1 ; }
+
+END {
+ # determine root chunks
+ for (i in defined)
+ if (!(i in used))
+ root_chunks[i] = 1 ;
+
+ # root chunk substitutions
+ # Root chunk names can be used in 3 contexts:
+ # @defn name
+ # @xref notused name
+ # @xref chunkbegin label name
+ for (i=1; i<=NR; i++) {
+ if (line[i] ~ /^(@xref notused|@xref chunkbegin|@defn)/) {
+ nr = split(line[i], array, " ") ;
+ stat = array[1] ;
+ name = array[nr] ;
+ if ((stat == "@xref" || (stat == "@defn" && nr == 2)) && (name in root_ch
+unks)) {
+ replace = " \\textup{\\texttt{"name"}}" ;
+ gsub("_", "\\_", replace) ;
+ gsub(" "name, replace, line[i]) ;
+ } ;
+ } ;
+ print line[i] ;
+ }
+}
+'
diff --git a/web/noweb/contrib/norman/Makefile b/web/noweb/contrib/norman/Makefile
new file mode 100644
index 0000000000..471f71e82b
--- /dev/null
+++ b/web/noweb/contrib/norman/Makefile
@@ -0,0 +1,10 @@
+LIB=/dev/null # to be overridden
+DIRS=numarkup
+
+all: ; for i in $(DIRS); do (cd $$i; make ICONC=$(ICONC) ICONT=$(ICONT) all); done
+install: ; for i in $(DIRS); do (cd $$i; make LIB=$(LIB) BIN=$(BIN) install); done
+source: ; for i in $(DIRS); do (cd $$i; make source); done
+clean: ; for i in $(DIRS); do (cd $$i; make clean); done
+iconlib: # cheap hack for slackmake
+ true
+
diff --git a/web/noweb/contrib/norman/README b/web/noweb/contrib/norman/README
new file mode 100644
index 0000000000..722d895409
--- /dev/null
+++ b/web/noweb/contrib/norman/README
@@ -0,0 +1,6 @@
+cleanchunks.nw escape special characters in chunk names
+moddate.nw use file modification time as date (POSIX)
+numarkup contains a substitute for markup that works with nuweb files
+pp contains a very simple prettyprinter
+scopehack.icn A hack for scoping files; see the FAQ
+generate-to A hack for renumbering lines in tools like yacc
diff --git a/web/noweb/contrib/norman/cleanchunks.nw b/web/noweb/contrib/norman/cleanchunks.nw
new file mode 100644
index 0000000000..9334c11225
--- /dev/null
+++ b/web/noweb/contrib/norman/cleanchunks.nw
@@ -0,0 +1,42 @@
+\section{Code to clean up special characters in chunk names}
+
+Some people want to use {\tt noweb} to browse big pieces of legacy
+code without having to touch any of it by hand. Since legacy code
+sometimes uses underscores and other special characters in chunk
+names, it makes sense to have a filter to escape special characters in
+chunk names.
+<<*>>=
+procedure main()
+ local tag, line
+ while line := read() do
+ line ?
+ if tag := =("@defn " | "@use ") then
+ write(tag, TeXliteral(tab(0)))
+ else
+ write(line)
+end
+@
+<<'\\{}$&#^_ ~%'>>=
+<<*>>=
+procedure TeXliteral(arg)
+ static nospace, code, TeXspecials
+ initial { codes := ["\\", 92, "{", 123, "}", 125, "$", 36, "&", 38, "#", 35, "^", 94,
+ "_", 95, "%", 37, "~", 126]
+ code := table()
+ TeXspecials := '\\{}$&#^_~%'
+ while (c := get(codes), n := get(codes)) do code[c] := string(n)
+ if c := !TeXspecials & c ~== " " & not member(code, c) then
+ stop("internal error, character-code mismatch, report a bug!")
+ }
+ s := ""
+ arg ? {
+ while s ||:= tab(upto(TeXspecials)) do {
+ c := move(1)
+ if member(code, c) then
+ s ||:= "{\\tt\\char" || code[c] || "}"
+ else
+ s ||:= "\\" || c
+ }
+ return s || tab(0)
+ }
+end
diff --git a/web/noweb/contrib/norman/email b/web/noweb/contrib/norman/email
new file mode 100644
index 0000000000..d87018308d
--- /dev/null
+++ b/web/noweb/contrib/norman/email
@@ -0,0 +1 @@
+nr@eecs.harvard.edu
diff --git a/web/noweb/contrib/norman/generate-to b/web/noweb/contrib/norman/generate-to
new file mode 100755
index 0000000000..9bc8f65e6e
--- /dev/null
+++ b/web/noweb/contrib/norman/generate-to
@@ -0,0 +1,24 @@
+#!/usr/bin/env lua5.1
+
+-- Usage: $0 filename
+-- Reads from stdin and writes to filename, renumbering directives
+-- that say "generated code"
+
+assert(#arg == 1, 'Usage: $0 outfilename')
+local filename = assert(arg[1])
+
+local f = assert(io.open(filename, 'w'))
+
+local n = 0 -- how many lines have already been written to f
+local function rewrite()
+ return string.format('#line %d "%s"', n, filename)
+end
+for l in io.lines() do
+ n = n + 1
+ l = l:gsub('%#line%s+%d+%s*"generated code"', rewrite, 1)
+ f:write(l, '\n')
+end
+
+f:close()
+
+ \ No newline at end of file
diff --git a/web/noweb/contrib/norman/htmlgif/htmlgif.icn b/web/noweb/contrib/norman/htmlgif/htmlgif.icn
new file mode 100644
index 0000000000..1a681be801
--- /dev/null
+++ b/web/noweb/contrib/norman/htmlgif/htmlgif.icn
@@ -0,0 +1,54 @@
+# htmlgif -- convert eps references to gifs
+
+procedure newer(target, prereq)
+ return system("newer " || target || " " || prereq) == 0
+end
+
+procedure main(args)
+ if *args > 0 then every cvt(open(!args))
+ else cvt(&input)
+ return
+end
+
+procedure cvt(file)
+ while line := read(file) do
+ line ?
+ if (pre := tab(find("<a ")), optwhite(), ="<a href=", optwhite(),
+ ps := tab(upto(' \t>')), optwhite(), =">",
+ tab(lookfor(s := "PostScript figure " || (ps|strip_quotes(ps)))),
+ =s, any(' ]<>"'), tab(find("</a>")), post := tab(0)) then
+ {
+ ps := strip_quotes(ps)
+ gif := suffex(ps) || ".gif"
+ write(pre, "<img inline src=", quote(gif),
+ " alt=", quote("[GIF derived from PostScript figure " || ps || "]"), ">",
+ post)
+ if not newer(gif, ps) then
+ system("pstopbm " || ps || " | ppmtogif | giftool -rgb white > " || gif)
+ } else {
+ write(line)
+ }
+ return
+end
+
+procedure lookfor(s)
+ suspend find(s)
+end
+
+procedure strip_quotes(s)
+ return if s[1] == s[-1] == "\"" then s[2:-1] else s
+end
+
+procedure suffex(s)
+ static nodot
+ initial nodot := ~ '.'
+ s ? return if (b := tab(upto('.')), =".", tab(many(nodot)), pos(0)) then b else s
+end
+
+procedure quote(s)
+ return "\"" || s || "\""
+end
+
+procedure optwhite()
+ suspend tab(many(' \t')) | ""
+end
diff --git a/web/noweb/contrib/norman/htmlgif/newer.c b/web/noweb/contrib/norman/htmlgif/newer.c
new file mode 100644
index 0000000000..9b741bd6f9
--- /dev/null
+++ b/web/noweb/contrib/norman/htmlgif/newer.c
@@ -0,0 +1,19 @@
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <stdio.h>
+#include <errno.h>
+
+main(int argc, char *argv[]) {
+ struct stat b1, b2;
+
+ if (argc != 3) {
+ fprintf(stderr, "Usage: %s file1 file2\n", argv[0]);
+ exit (-1);
+ } else if (stat(argv[1],&b1) < 0) {
+ perror(argv[1]);
+ exit(-2);
+ } else if (stat(argv[2],&b2) < 0) {
+ perror(argv[2]);
+ exit(-2);
+ } else exit(b1.st_mtime > b2.st_mtime ? 0 : 1);
+}
diff --git a/web/noweb/contrib/norman/htmlgif/pstopbm b/web/noweb/contrib/norman/htmlgif/pstopbm
new file mode 100755
index 0000000000..eeb65ff385
--- /dev/null
+++ b/web/noweb/contrib/norman/htmlgif/pstopbm
@@ -0,0 +1,86 @@
+#!/bin/sh
+# exec gs -sDEVICE=djet500 -sOutputFile=- -q -dNOPAUSE "$@"
+
+device=pbm
+case $0 in
+ *pbm) device=pbm ;;
+ *ppm) device=ppm ;;
+esac
+
+border=10
+leftborder=$border
+rightborder=$border
+upperborder=$border
+lowerborder=$border
+translate=yes
+showpage=showpage
+
+while [ $# -gt 0 ]; do
+ case $1 in
+ -border) border="$2"
+ leftborder=$border
+ rightborder=$border
+ upperborder=$border
+ lowerborder=$border
+ shift ;;
+ -upper) upperborder="$2" ; shift ;;
+ -lower) lowerborder="$2" ; shift ;;
+ -left) leftborder="$2" ; shift ;;
+ -right) rightborder="$2" ; shift ;;
+ -notrans) translate= ;;
+ -showpage) showpage=showpage ;;
+ -noshowpage) showpage= ;;
+ -*) echo "Unknown option $1" 1>&2 ; exit 1;;
+ *) break ;;
+ esac
+ shift
+done
+
+tmp=$(mktemp)
+tmpa=$(mktemp --suffix=.a)
+if [ $# -eq 0 ]; then cat > $tmp; else cat "$@" > $tmp; fi
+
+if echo "$@" | fgrep .eps > /dev/null; then
+ echo showpage >> $tmp
+fi
+
+set foo `psbb $tmp`
+shift
+
+if [ -n "$translate" -a $# -eq 4 ]; then
+
+ llx="$1" lly="$2" urx="$3" ury="$4"
+ llx=`expr $llx - $leftborder`
+ lly=`expr $lly - $lowerborder`
+ urx=`expr $urx + $rightborder`
+ ury=`expr $ury + $upperborder`
+ width=`expr $urx - $llx`
+ height=`expr $ury - $lly`
+
+ awk '{print}
+ /^%%EndComments/ { printf "%d neg %d neg translate\n", '"$llx, $lly"' }
+ ' $tmp > $tmpa
+ echo "$showpage" |
+ gs -q -g"${width}x$height" -sDEVICE=$device -sOutputFile=- -dNOPAUSE $tmpa -
+else
+ echo "$showpage" |
+ gs -q -sDEVICE=$device -sOutputFile=- -dNOPAUSE $tmp -
+fi
+
+# rm -f $tmp $tmpa
+
+exit 0
+
+#### old version
+
+
+
+if [ $# -eq 0 ]; then
+ tmp=$(mktemp)
+ cat > $tmp
+ gs -q -sDEVICE=$device -sOutputFile=- -dNOPAUSE -dMAGSTEP=1.0 $tmp
+else
+ gs -q -sDEVICE=$device -sOutputFile=- -dNOPAUSE -dMAGSTEP=1.0 "$@"
+fi
+rm -rf $tmp
+
diff --git a/web/noweb/contrib/norman/moddate.nw b/web/noweb/contrib/norman/moddate.nw
new file mode 100644
index 0000000000..bf43e8fe4f
--- /dev/null
+++ b/web/noweb/contrib/norman/moddate.nw
@@ -0,0 +1,79 @@
+% -*- mode: Noweb; noweb-code-mode: c-mode -*-
+\section{Using file modification dates}
+
+\date{October 31, 1996}
+
+This \texttt{noweb} filter
+sets the date to the modification date of the file being woven.
+It relies on the convention that
+\begin{quote}
+\verb+\date{+\emph{mumble}\verb+}+
+\end{quote}
+appears on a line by itself. \emph{Mumble} stands for any string.
+The filter replaces \emph{Mumble} with the modifcation date and time
+of the file as announced by \verb+@file+.
+
+The filter uses POSIX functions, and it uses the \texttt{noweb} input
+stuff, so it has to be linked with
+\texttt{getline.o} \texttt{columns.o}, and \texttt{errors.o} from the
+\texttt{noweb} distribution.
+<<moddate.c*>>=
+<<includes>>
+<<local procs>>
+main() {
+ char *s;
+ char *file = NULL;
+ while (s = getline(stdin))
+ if (match("@file ", s)) {
+ printf("%s", s);
+ file = get_file_name(s);
+ } else if (matches_date(s) && file != NULL)
+ print_modification_time(file);
+ else
+ printf("%s", s);
+}
+@
+Matching ideas are stolen from Icon.
+<<local procs>>=
+match(char *pattern, char *s) {
+ return !strncmp(pattern, s, strlen(pattern));
+}
+<<local procs>>=
+matches_date(char *s) {
+ return match("@text \\date{", s) && s[strlen(s)-2] == '}';
+}
+@
+Allocate space for a file name and strip the trailing newline.
+<<local procs>>=
+char *get_file_name(char *s) {
+ char *p = (char *)malloc(strlen(s)); /* wastes some characters */
+ assert(p);
+ strcpy(p, s + strlen("@file "));
+ p[strlen(p)-1] = 0; /* trim newline */
+ return p;
+}
+@
+To get a nicer format for the time, I would replace the call to
+[[asctime]] with something else.
+<<local procs>>=
+void print_modification_time(char *file) {
+ struct stat buf;
+ int n = stat(file, &buf);
+ char *time;
+ if (n) {fprintf(stderr, "could not stat %s\n", file); exit(1); }
+ time = asctime(localtime(&buf.st_mtime));
+ if (time[strlen(time)-1] == '\n')
+ time[strlen(time)-1] = 0;
+ printf("@text \\date{%s}\\def\\today{%s}\n", time, time);
+}
+@
+<<includes>>=
+#include <time.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <string.h>
+#include <stdio.h>
+#include "getline.h"
+
diff --git a/web/noweb/contrib/norman/numarkup/Makefile b/web/noweb/contrib/norman/numarkup/Makefile
new file mode 100644
index 0000000000..9165670393
--- /dev/null
+++ b/web/noweb/contrib/norman/numarkup/Makefile
@@ -0,0 +1,37 @@
+LIB=/dev/null # to be overridden
+CC = cc
+CFLAGS = -O
+
+TARGET = numarkup
+OBJS = main.o pass1.o latex.o input.o scraps.o names.o arena.o global.o
+
+.SUFFIXES: .nw
+.nw.c: ; notangle -R"$@"'*' -L $< | cpif $@
+.nw.h: ; notangle -R"$@" $< | cpif $@
+
+all:
+ noweb -t numarkup.nw
+ make $(TARGET)
+
+install:
+ noweb -t numarkup.nw
+ make $(TARGET)
+ strip $(TARGET)
+ cp $(TARGET) $(LIB)
+
+source: main.c pass1.c latex.c input.c scraps.c names.c arena.c global.c
+
+clean:
+ rm -f *.o *.c *.h *.tex *.log *.dvi *~ *.blg $(TARGET) *.html *~
+
+$(OBJS): global.h
+
+$(TARGET): $(OBJS)
+ $(CC) -o $(TARGET) $(OBJS)
+
+numarkup.html: numarkup.nw
+ noweave -filter l2h -html -index numarkup.nw > numarkup.html
+
+numarkup.tex: numarkup.nw
+ noweb -o numarkup.nw
+
diff --git a/web/noweb/contrib/norman/numarkup/numarkup.bbl b/web/noweb/contrib/norman/numarkup/numarkup.bbl
new file mode 100644
index 0000000000..224d2050b3
--- /dev/null
+++ b/web/noweb/contrib/norman/numarkup/numarkup.bbl
@@ -0,0 +1,58 @@
+\begin{thebibliography}{10}
+
+\bibitem{aho:75}
+Alfred~V. Aho and Margaret~J. Corasick.
+\newblock Efficient string matching: An aid to bibliographic search.
+\newblock {\em Communications of the ACM}, 18(6):333--340, June 1975.
+
+\bibitem{hanson:90}
+David~R. Hanson.
+\newblock Fast allocation and deallocation of memory based on object lifetimes.
+\newblock {\em Software -- Practice and Experience}, 20(1):5--12, January 1990.
+
+\bibitem{knuth:84}
+Donald~E. Knuth.
+\newblock Literate programming.
+\newblock {\em The Computer Journal}, 27(2):97--111, May 1984.
+
+\bibitem{metafont:program}
+Donald~E. Knuth.
+\newblock {\em {{\small\sf METAFONT:}} The Program}.
+\newblock Computers \& Typesetting. Addison-Wesley, 1986.
+
+\bibitem{tex:program}
+Donald~E. Knuth.
+\newblock {\em {{\TeX}}: The Program}.
+\newblock Computers \& Typesetting. Addison-Wesley, 1986.
+
+\bibitem{texbook}
+Donald~E. Knuth.
+\newblock {\em The {{\TeX}}book}.
+\newblock Computers \& Typesetting. Addison-Wesley, 1986.
+
+\bibitem{latex}
+Leslie Lamport.
+\newblock {\em {{\LaTeX:}} A Document Preparation System}.
+\newblock Addison-Wesley, 1986.
+
+\bibitem{levy:90}
+Silvio Levy and Donald~E. Knuth.
+\newblock {{\tt CWEB}} user manual: The {{\small CWEB}} system of structured
+ documentation.
+\newblock Technical Report {\small STAN}-{\small CS}-83-977, Stanford
+ University, October 1990.
+\newblock Available for anonymous ftp from {\tt labrea.stanford.edu} in
+ directory {\tt pub/cweb}.
+
+\bibitem{noweb}
+Norman Ramsey.
+\newblock Literate-programming tools need not be complex.
+\newblock Submitted to IEEE Software, August 1992.
+
+\bibitem{funnelweb}
+Ross~N. Williams.
+\newblock {FunnelWeb} user's manual, May 1992.
+\newblock Available for anonymous ftp from {\tt sirius.itd.adelaide.edu.au} in
+ directory {\tt pub/funnelweb}.
+
+\end{thebibliography}
diff --git a/web/noweb/contrib/norman/numarkup/numarkup.nw b/web/noweb/contrib/norman/numarkup/numarkup.nw
new file mode 100644
index 0000000000..e47c5a356c
--- /dev/null
+++ b/web/noweb/contrib/norman/numarkup/numarkup.nw
@@ -0,0 +1,1264 @@
+\documentstyle[noweb]{report}
+
+\title{Nuweb front end for noweb}
+\author{Norman Ramsey\\(from code by Preston Briggs)}
+
+\begin{document}
+\pagenumbering{roman}
+\maketitle
+\tableofcontents
+
+\chapter{Introduction}
+\pagenumbering{arabic}
+
+This code reads one or more nuweb files and produces noweb intermediate code
+ (as described in the {\em Noweb Hackers' Guide}) on
+standard output.
+It was created by modifying version 0.87 of nuweb.
+
+
+
+\chapter{The Overall Structure}
+
+Processing a web requires two major steps:
+\begin{enumerate}
+\item Read the source, accumulating file names, macro names, scraps,
+and lists of cross-references.
+This pass is needed so we can disambiguated scrap names.
+\item Reread the source, transforming it to noweb form on standard output.
+\end{enumerate}
+
+
+\section{Files}
+
+I have divided the program into several files for quicker
+recompilation during development.
+<<global.h>>=
+<<Include files>>
+<<Type declarations>>
+<<Global variable declarations>>
+<<Function prototypes>>
+@
+We'll need at least three of the standard system include files.
+<<Include files>>=
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <ctype.h>
+@ %def FILE stderr exit fprintf fputs fopen fclose getc putc strlen toupper isupper islower isgraph isspace tempnam remove malloc size_t
+@
+\newpage
+\noindent
+I also like to use [[TRUE]] and [[FALSE]] in my code.
+I'd use an [[enum]] here, except that some systems seem to provide
+definitions of [[TRUE]] and [[FALSE]] be default. The following
+code seems to work on all the local systems.
+<<Type declarations>>=
+#ifndef FALSE
+#define FALSE 0
+#endif
+#ifndef TRUE
+#define TRUE (!0)
+#endif
+@
+\subsection{The Main Files}
+
+The code is divided into four main files (introduced here) and five
+support files (introduced in the next section).
+The file [[main.c]] will contain the driver for the whole program
+(see Section~\ref{main-routine}).
+<<main.c*>>=
+#include "global.h"
+@
+The first pass over the source file is contained in [[pass1.c]].
+It handles collection of all the file names, macros names, and scraps
+(see Section~\ref{pass-one}).
+<<pass1.c*>>=
+#include "global.h"
+@
+The [[.tex]] file is created during a second pass over the source
+file. The file [[latex.c]] contains the code controlling the
+construction of the [[.tex]] file
+(see Section~\ref{latex-file}).
+<<latex.c*>>=
+#include "global.h"
+@
+\subsection{Support Files}
+
+The support files contain a variety of support routines used to define
+and manipulate the major data abstractions.
+The file [[input.c]] holds all the routines used for referring to
+source files (see Section~\ref{source-files}).
+<<input.c*>>=
+#include "global.h"
+@
+Creation and lookup of scraps is handled by routines in [[scraps.c]]
+(see Section~\ref{scraps}).
+<<scraps.c*>>=
+#include "global.h"
+@
+The handling of file names and macro names is detailed in [[names.c]]
+(see Section~\ref{names}).
+<<names.c*>>=
+#include "global.h"
+@
+Memory allocation and deallocation is handled by routines in [[arena.c]]
+(see Section~\ref{memory-management}).
+<<arena.c*>>=
+#include "global.h"
+@
+Finally, for best portability, I seem to need a file containing
+(useless!) definitions of all the global variables.
+<<global.c*>>=
+#include "global.h"
+<<Global variable definitions>>
+@
+\section{The Main Routine} \label{main-routine}
+
+The main routine is quite simple in structure.
+It wades through the optional command-line arguments,
+then handles any files listed on the command line.
+<<main.c*>>=
+void main(argc, argv)
+ int argc;
+ char **argv;
+{
+ int arg = 1;
+ <<Interpret command-line arguments>>
+ <<Process the remaining arguments (file names)>>
+ exit(0);
+}
+@
+\subsection{Command-Line Arguments}
+
+There is one possible command-line argument:
+\begin{description}
+\item[\tt -v] The verbose flag. Forces output of progress reports.
+\end{description}
+
+Global flags are declared for each of the arguments.
+<<Global variable declarations>>=
+extern int verbose_flag; /* if TRUE, write progress information */
+@
+The flags are all initialized for correct default behavior.
+
+<<Global variable definitions>>=
+int verbose_flag = FALSE;
+@
+We save the invocation name of the command in a global variable
+[[command_name]] for use in error messages.
+<<Global variable declarations>>=
+extern char *command_name;
+<<Global variable definitions>>=
+char *command_name = NULL;
+@
+The invocation name is conventionally passed in [[argv[0]]].
+<<Interpret command-line arguments>>=
+command_name = argv[0];
+@
+We need to examine the remaining entries in [[argv]], looking for
+command-line arguments.
+<<Interpret command-line arguments>>=
+while (arg < argc) {
+ char *s = argv[arg];
+ if (*s++ == '-') {
+ <<Interpret the argument string [[s]]>>
+ arg++;
+ }
+ else break;
+}
+@
+Several flags can be stacked behind a single minus sign; therefore,
+we've got to loop through the string, handling them all.
+<<Interpret the argument string [[s]]>>=
+{
+ char c = *s++;
+ while (c) {
+ switch (c) {
+ case 'v': verbose_flag = TRUE;
+ break;
+ default: fprintf(stderr, "%s: unexpected argument ignored. ",
+ command_name);
+ fprintf(stderr, "Usage is: %s [-v] file...\n",
+ command_name);
+ break;
+ }
+ c = *s++;
+ }
+}
+@
+\subsection{File Names}
+
+We expect at least one file name. While a missing file name might be
+ignored without causing any problems, we take the opportunity to report
+the usage convention.
+<<Process the remaining arguments (file names)>>=
+{
+ if (arg >= argc) {
+ fprintf(stderr, "%s: expected a file name. ", command_name);
+ fprintf(stderr, "Usage is: %s [-v] file-name...\n", command_name);
+ exit(-1);
+ }
+ do {
+ <<Handle the file name in [[argv[arg]]]>>
+ arg++;
+ } while (arg < argc);
+}
+@
+The code to handle a particular file name is rather more tedious than
+the actual processing of the file. A file name may be an arbitrarily
+complicated path name, with an optional extension. If no extension is
+present, we add [[.w]] as a default. The extended path name will be
+kept in a local variable [[source_name]].
+<<Handle the file name in [[argv[arg]]]>>=
+{
+ char source_name[100];
+ <<Build [[source_name]]>>
+ <<Process a file>>
+}
+@
+I bump the pointer [[p]] through all the characters in [[argv[arg]]],
+copying all the characters into [[source_name]] (via the pointer
+[[q]]).
+
+At each slash, I update [[trim]] to point just past the
+slash in [[source_name]]. The effect is that [[trim]] will point
+at the file name without any leading directory specifications.
+
+The pointer [[dot]] is made to point at the file name extension, if
+present. If there is no extension, we add [[.w]] to the source name.
+In any case, we create the [[tex_name]] from [[trim]], taking
+care to get the correct extension.
+<<Build [[source_name]]>>=
+{
+ char *p = argv[arg];
+ char *q = source_name;
+ char *dot = NULL;
+ char c = *p++;
+ while (c) {
+ *q++ = c;
+ if (c == '/') {
+ dot = NULL;
+ }
+ else if (c == '.')
+ dot = q - 1;
+ c = *p++;
+ }
+ *q = '\0';
+ if (!dot) {
+ *q++ = '.';
+ *q++ = 'w';
+ *q = '\0';
+ }
+}
+@
+Now that we're finally ready to process a file, it's not really too
+complex. We bundle most of the work into routines [[pass1]]
+(see Section~\ref{pass-one}) and [[write_tex]] (see
+Section~\ref{latex-file}). After we're finished with a
+particular file, we must remember to release its storage (see
+Section~\ref{memory-management}).
+<<Process a file>>=
+{
+ pass1(source_name);
+ write_tex(source_name, "bogus");
+ arena_free();
+}
+@
+\section{Pass One} \label{pass-one}
+
+During the first pass, we scan the file, saving names so we'll be able to
+disambiguated them later.
+<<Function prototypes>>=
+extern void pass1();
+@
+The routine [[pass1]] takes a single argument, the name of the
+source file. It opens the file, then initializes the scrap structures
+(see Section~\ref{scraps}) and the roots of the file-name tree, the
+macro-name tree, and the tree of user-specified index entries (see
+Section~\ref{names}). After completing all the
+necessary preparation, we make a pass over the file, filling in all
+our data structures. Next, we seach all the scraps for references to
+the user-specified index entries. Finally, we must reverse all the
+cross-reference lists accumulated while scanning the scraps.
+<<pass1.c*>>=
+void pass1(file_name)
+ char *file_name;
+{
+ if (verbose_flag)
+ fprintf(stderr, "reading %s\n", file_name);
+ source_open(file_name);
+ macro_names = NULL;
+ file_names = NULL;
+ <<Scan the source file, looking for at-sequences>>
+}
+@
+The only thing we look for in the first pass are the command
+sequences. All ordinary text is skipped entirely.
+<<Scan the source file, looking for at-sequences>>=
+{
+ int c = source_get();
+ while (c != EOF) {
+ if (c == '@')
+ <<Scan at-sequence>>
+ c = source_get();
+ }
+}
+@
+Only four of the at-sequences are interesting during the first pass.
+We skip past others immediately; warning if unexpected sequences are
+discovered.
+<<Scan at-sequence>>=
+{
+ c = source_get();
+ switch (c) {
+ case 'O':
+ case 'o': <<Build output file definition>>
+ break;
+ case 'D':
+ case 'd': <<Build macro definition>>
+ break;
+ case '@':
+ case 'u':
+ case 'm':
+ case 'f': /* ignore during this pass */
+ break;
+ default: fprintf(stderr,
+ "%s: unexpected @ sequence ignored (%s, line %d)\n",
+ command_name, source_name, source_line);
+ break;
+ }
+}
+@
+\subsection{Accumulating Definitions}
+
+There are two steps required to handle a definition:
+\begin{enumerate}
+\item Build an entry for the name so we can look it up later.
+\item Skip past the scrap.
+\end{enumerate}
+We go through the same steps for both file names and macro names.
+<<Build output file definition>>=
+{
+ collect_file_name();
+ collect_scrap();
+}
+<<Build macro definition>>=
+{
+ collect_macro_name();
+ collect_scrap();
+}
+@
+\section{Writing the Latex File} \label{latex-file}
+
+The second pass (invoked via a call to [[write_tex]]) copies most of
+the text from the source file straight into a [[.tex]] file.
+Definitions are formatted slightly and cross-reference information is
+printed out.
+
+Note that all the formatting is handled in this section.
+If you don't like the format of definitions or indices or whatever,
+it'll be in this section somewhere. Similarly, if someone wanted to
+modify nuweb to work with a different typesetting system, this would
+be the place to look.
+
+<<Function prototypes>>=
+extern void write_tex();
+@
+We need a few local function declarations before we get into the body
+of [[write_tex]].
+<<latex.c*>>=
+static void copy_scrap(); /* formats the body of a scrap */
+@
+The routine [[write_tex]] takes two file names as parameters: the
+name of the web source file and the name of the [[.tex]] output file.
+<<latex.c*>>=
+void write_tex(file_name, tex_name)
+ char *file_name;
+ char *tex_name;
+{
+ if (verbose_flag)
+ fprintf(stderr, "writing %s\n", "standard output");
+ source_open(file_name);
+ printf("@file %s\n", file_name);
+ <<Copy [[source_file]] into standard output, transforming to noweb>>
+}
+@
+We make our second (and final) pass through the source web, this time
+copying characters straight into the [[.tex]] file. However, we keep
+an eye peeled for [[@]]~characters, which signal a command sequence.
+
+We keep track of state.
+
+<<Copy [[source_file]] into standard output, transforming to noweb>>=
+{
+ int scraps = 1;
+ int c = source_get();
+ int docs_begun = 0;
+ while (c != EOF) {
+ if (c == '@') {
+ <<Interpret at-sequence>>
+ } else {
+ <<begin documentation chunk>>
+ if (c == '\n')
+ printf("\n@nl\n@text ");
+ else
+ putchar(c);
+ c = source_get();
+ }
+ }
+}
+<<begin documentation chunk>>=
+if (!docs_begun) {
+ docs_begun = 1;
+ printf("@begin docs %d\n", ++chunk_count);
+ printf("@text ");
+}
+@
+
+Counting chunks needs a global variable.
+<<Global variable declarations>>=
+
+extern int chunk_count;
+<<Global variable definitions>>=
+
+int chunk_count = 0;
+<<end documentation chunk>>=
+
+if (docs_begun) {
+ printf("\n@end docs %d\n", chunk_count);
+ docs_begun = 0;
+}
+<<Interpret at-sequence>>=
+{
+ int big_definition = FALSE;
+ c = source_get();
+ switch (c) {
+ case 'O': big_definition = TRUE;
+ case 'o': <<end documentation chunk>>
+ <<Write output file definition>>
+ break;
+ case 'D': big_definition = TRUE;
+ case 'd': <<end documentation chunk>>
+ <<Write macro definition>>
+ break;
+ case 'f': <<Write index of file names>>
+ break;
+ case 'm': <<Write index of macro names>>
+ break;
+ case 'u': <<Write index of user-specified names>>
+ break;
+ case '@': <<begin documentation chunk>> putchar(c); /* fall through */
+ default: c = source_get();
+ break;
+ }
+}
+@
+Macro and file definitions are formatted nearly identically.
+<<Write output file definition>>=
+{
+ Name *name = collect_file_name();
+ printf("@begin code %d\n", ++chunk_count);
+ printf("@defn %s%s\n@nl\n", name->spelling, name->debug_flag ? "*" : "");
+ copy_scrap();
+ <<Finish the scrap environment>>
+}
+<<Write macro definition>>=
+{
+ Name *name = collect_macro_name();
+ printf("@begin code %d\n", ++chunk_count);
+ printf("@defn %s\n@nl\n", name->spelling);
+ copy_scrap();
+ <<Finish the scrap environment>>
+}
+<<Finish the scrap environment>>=
+{
+ printf("\n@end code %d\n", chunk_count);
+ do
+ c = source_get();
+ while (isspace(c)); /* may not be appropriate for noweb */
+}
+@
+\subsubsection{Formatting a Scrap}
+<<latex.c*>>=
+static void copy_scrap()
+{
+ int c = source_get();
+ int indent = 0;
+ printf("@text ");
+ while (1) {
+ switch (c) {
+ case '@': <<Check at-sequence for end-of-scrap>>
+ break;
+ case '\n': printf("\n@nl\n@text ");
+ indent = 0;
+ break;
+ case '\t': <<Expand tab into spaces>>
+ break;
+ default: putchar(c);
+ indent++;
+ break;
+ }
+ c = source_get();
+ }
+}
+<<Expand tab into spaces>>=
+{
+ int delta = 8 - (indent % 8);
+ indent += delta;
+ while (delta > 0) {
+ putchar(' ');
+ delta--;
+ }
+}
+<<Check at-sequence for end-of-scrap>>=
+{
+ c = source_get();
+ switch (c) {
+ case '@': putchar('@');
+ break;
+ case '|': printf("\n");
+ <<print out index entries>> /* fall through */
+ case '}': return;
+ case '<': <<Format macro name>>
+ break;
+ default : fprintf(stderr, "%s: unexpected @%c in scrap (%s, %d)\n",
+ command_name, c, source_name, source_line);
+ exit(-1);
+ }
+}
+<<print out index entries>>=
+{
+ do {
+ char new_name[100];
+ char *p = new_name;
+ do
+ c = source_get();
+ while (isspace(c));
+ if (c != '@') {
+ Name *name;
+ do {
+ *p++ = c;
+ c = source_get();
+ } while (c != '@' && !isspace(c));
+ *p = '\0';
+ printf("@index defn %s\n", new_name);
+ }
+ } while (c != '@');
+ printf("@text "); /* maintain invariant, even though no more text is coming */
+ c = source_get();
+ if (c != '}') {
+ fprintf(stderr, "%s: unexpected @%c in scrap (%s, %d)\n",
+ command_name, c, source_name, source_line);
+ exit(-1);
+ }
+}
+<<Format macro name>>=
+{
+ Name *name = collect_scrap_name();
+ printf("\n@use %s\n@text ", name->spelling);
+}
+@
+\subsection{Generating the Indices}
+
+<<Write index of file names>>=
+{
+ /* noweb doesn't do files; they're all macros */
+ c = source_get();
+}
+<<Write index of macro names>>=
+{
+ <<begin documentation chunk>>
+ printf("\\nowebchunks ");
+ c = source_get();
+}
+<<Write index of user-specified names>>=
+{
+ <<begin documentation chunk>>
+ printf("\\nowebindex ");
+ c = source_get();
+}
+@
+\chapter{The Support Routines}
+
+\section{Source Files} \label{source-files}
+
+\subsection{Global Declarations}
+
+We need two routines to handle reading the source files.
+<<Function prototypes>>=
+extern void source_open(); /* pass in the name of the source file */
+extern int source_get(); /* no args; returns the next char or EOF */
+@
+There are also two global variables maintained for use in error
+messages and such.
+<<Global variable declarations>>=
+extern char *source_name; /* name of the current file */
+extern int source_line; /* current line in the source file */
+<<Global variable definitions>>=
+char *source_name = NULL;
+int source_line = 0;
+@
+\subsection{Local Declarations}
+
+
+<<input.c*>>=
+static FILE *source_file; /* the current input file */
+static int source_peek;
+static int double_at;
+static int include_depth;
+<<input.c*>>=
+struct {
+ FILE *file;
+ char *name;
+ int line;
+} stack[10];
+@
+\subsection{Reading a File}
+
+The routine [[source_get]] returns the next character from the
+current source file. It notices newlines and keeps the line counter
+[[source_line]] up to date. It also catches [[EOF]] and watches
+for [[@]]~characters. All other characters are immediately returned.
+<<input.c*>>=
+int source_get()
+{
+ int c = source_peek;
+ switch (c) {
+ case EOF: <<Handle [[EOF]]>>
+ return c;
+ case '@': <<Handle an ``at'' character>>
+ return c;
+ case '\n': source_line++;
+ default: source_peek = getc(source_file);
+ return c;
+ }
+}
+@
+This whole [[@]]~character handling mess is pretty annoying.
+I want to recognize [[@i]] so I can handle include files correctly.
+At the same time, it makes sense to recognize illegal [[@]]~sequences
+and complain; this avoids ever having to check anywhere else.
+Unfortunately, I need to avoid tripping over the [[@@]]~sequence;
+hence this whole unsatisfactory [[double_at]] business.
+<<Handle an ``at'' character>>=
+{
+ c = getc(source_file);
+ if (double_at) {
+ source_peek = c;
+ double_at = FALSE;
+ c = '@';
+ }
+ else
+ switch (c) {
+ case 'i': <<Open an include file>>
+ break;
+ case 'f': case 'm': case 'u':
+ case 'd': case 'o': case 'D': case 'O':
+ case '{': case '}': case '<': case '>': case '|':
+ source_peek = c;
+ c = '@';
+ break;
+ case '@': source_peek = c;
+ double_at = TRUE;
+ break;
+ default: fprintf(stderr, "%s: bad @ sequence (%s, line %d)\n",
+ command_name, source_name, source_line);
+ exit(-1);
+ }
+}
+<<Open an include file>>=
+{
+ char name[100];
+ if (include_depth >= 10) {
+ fprintf(stderr, "%s: include nesting too deep (%s, %d)\n",
+ command_name, source_name, source_line);
+ exit(-1);
+ }
+ <<Collect include-file name>>
+ stack[include_depth].name = source_name;
+ stack[include_depth].file = source_file;
+ stack[include_depth].line = source_line + 1;
+ include_depth++;
+ source_line = 1;
+ source_name = save_string(name);
+ source_file = fopen(source_name, "r");
+ if (!source_file) {
+ fprintf(stderr, "%s: can't open include file %s\n",
+ command_name, source_name);
+ exit(-1);
+ }
+ source_peek = getc(source_file);
+ c = source_get();
+}
+<<Collect include-file name>>=
+{
+ char *p = name;
+ do
+ c = getc(source_file);
+ while (c == ' ' || c == '\t');
+ while (isgraph(c)) {
+ *p++ = c;
+ c = getc(source_file);
+ }
+ *p = '\0';
+ if (c != '\n') {
+ fprintf(stderr, "%s: unexpected characters after file name (%s, %d)\n",
+ command_name, source_name, source_line);
+ exit(-1);
+ }
+}
+@
+If an [[EOF]] is discovered, the current file must be closed and
+input from the next stacked file must be resumed. If no more files are
+on the stack, the [[EOF]] is returned.
+<<Handle [[EOF]]>>=
+{
+ fclose(source_file);
+ if (include_depth) {
+ include_depth--;
+ source_file = stack[include_depth].file;
+ source_line = stack[include_depth].line;
+ source_name = stack[include_depth].name;
+ source_peek = getc(source_file);
+ c = source_get();
+ }
+}
+@
+\subsection{Opening a File}
+
+The routine [[source_open]] takes a file name and tries to open the
+file. If unsuccessful, it complains and halts. Otherwise, it sets
+[[source_name]], [[source_line]], and [[double_at]].
+<<input.c*>>=
+void source_open(name)
+ char *name;
+{
+ source_file = fopen(name, "r");
+ if (!source_file) {
+ fprintf(stderr, "%s: couldn't open %s\n", command_name, name);
+ exit(-1);
+ }
+ source_name = name;
+ source_line = 1;
+ source_peek = getc(source_file);
+ double_at = FALSE;
+ include_depth = 0;
+}
+@
+\section{Scraps} \label{scraps}
+<<scraps.c*>>=
+void collect_scrap()
+{
+ char *source = save_string(source_name);
+ int line = source_line;
+ <<Accumulate scrap and return>>
+}
+<<Accumulate scrap and return>>=
+{
+ int c = source_get();
+ while (1) {
+ switch (c) {
+ case EOF: fprintf(stderr, "%s: unexpected EOF in scrap (%s, %d)\n",
+ command_name, source, line);
+ exit(-1);
+ case '@': <<Handle at-sign during scrap accumulation>>
+ break;
+ default: c = source_get();
+ break;
+ }
+ }
+}
+<<Handle at-sign during scrap accumulation>>=
+{
+ c = source_get();
+ switch (c) {
+ case '@': c = source_get();
+ break;
+ case '|': <<skip user-specified index entries>>
+ case '}': return;
+ case '<': <<Handle macro invocation in scrap>>
+ break;
+ default : fprintf(stderr, "%s: unexpected @%c in scrap (%s, %d)\n",
+ command_name, c, source_name, source_line);
+ exit(-1);
+ }
+}
+<<skip user-specified index entries>>=
+{
+ do {
+ do
+ c = source_get();
+ while (c != '@');
+ c = source_get();
+ } while (c == '@');
+ if (c != '}') {
+ fprintf(stderr, "%s: unexpected @%c in scrap (%s, %d)\n",
+ command_name, c, source_name, source_line);
+ exit(-1);
+ }
+}
+<<Handle macro invocation in scrap>>=
+{
+ (void) collect_scrap_name();
+ c = source_get();
+}
+<<Function prototypes>>=
+extern void collect_scrap();
+@
+\section{Names} \label{names}
+<<Type declarations>>=
+typedef struct name {
+ char *spelling;
+ struct name *llink;
+ struct name *rlink;
+ int mark;
+ char tab_flag;
+ char indent_flag;
+ char debug_flag;
+} Name;
+<<Global variable declarations>>=
+extern Name *file_names;
+extern Name *macro_names;
+<<Global variable definitions>>=
+Name *file_names = NULL;
+Name *macro_names = NULL;
+<<Function prototypes>>=
+extern Name *collect_file_name();
+extern Name *collect_macro_name();
+extern Name *collect_scrap_name();
+extern Name *name_add();
+extern Name *prefix_add();
+extern char *save_string();
+<<names.c*>>=
+enum { LESS, GREATER, EQUAL, PREFIX, EXTENSION };
+
+static int compare(x, y)
+ char *x;
+ char *y;
+{
+ int len, result;
+ int xl = strlen(x);
+ int yl = strlen(y);
+ int xp = x[xl - 1] == ' ';
+ int yp = y[yl - 1] == ' ';
+ if (xp) xl--;
+ if (yp) yl--;
+ len = xl < yl ? xl : yl;
+ result = strncmp(x, y, len);
+ if (result < 0) return GREATER;
+ else if (result > 0) return LESS;
+ else if (xl < yl) {
+ if (xp) return EXTENSION;
+ else return LESS;
+ }
+ else if (xl > yl) {
+ if (yp) return PREFIX;
+ else return GREATER;
+ }
+ else return EQUAL;
+}
+@ %def compare LESS GREATER EQUAL PREFIX EXTENSION
+<<names.c*>>=
+char *save_string(s)
+ char *s;
+{
+ char *new = (char *) arena_getmem((strlen(s) + 1) * sizeof(char));
+ strcpy(new, s);
+ return new;
+}
+@ %def save_string
+<<names.c*>>=
+static int ambiguous_prefix();
+
+Name *prefix_add(root, spelling)
+ Name **root;
+ char *spelling;
+{
+ Name *node = *root;
+ while (node) {
+ switch (compare(node->spelling, spelling)) {
+ case GREATER: root = &node->rlink;
+ break;
+ case LESS: root = &node->llink;
+ break;
+ case EQUAL: return node;
+ case EXTENSION: node->spelling = save_string(spelling);
+ return node;
+ case PREFIX: <<Check for ambiguous prefix>>
+ return node;
+ }
+ node = *root;
+ }
+ <<Create new name entry>>
+}
+@ %def prefix_add
+@
+Since a very short prefix might match more than one macro name, I need
+to check for other matches to avoid mistakes. Basically, I simply
+continue the search down {\em both\/} branches of the tree.
+
+<<Check for ambiguous prefix>>=
+{
+ if (ambiguous_prefix(node->llink, spelling) ||
+ ambiguous_prefix(node->rlink, spelling))
+ fprintf(stderr,
+ "%s: ambiguous prefix @<%s...@> (%s, line %d)\n",
+ command_name, spelling, source_name, source_line);
+}
+<<names.c*>>=
+static int ambiguous_prefix(node, spelling)
+ Name *node;
+ char *spelling;
+{
+ while (node) {
+ switch (compare(node->spelling, spelling)) {
+ case GREATER: node = node->rlink;
+ break;
+ case LESS: node = node->llink;
+ break;
+ case EQUAL:
+ case EXTENSION:
+ case PREFIX: return TRUE;
+ }
+ }
+ return FALSE;
+}
+@ %def ambiguous_prefix
+<<names.c*>>=
+Name *name_add(root, spelling)
+ Name **root;
+ char *spelling;
+{
+ Name *node = *root;
+ while (node) {
+ int result = strcmp(node->spelling, spelling);
+ if (result > 0)
+ root = &node->llink;
+ else if (result < 0)
+ root = &node->rlink;
+ else
+ return node;
+ node = *root;
+ }
+ <<Create new name entry>>
+}
+@ %def name_add
+<<Create new name entry>>=
+{
+ node = (Name *) arena_getmem(sizeof(Name));
+ node->spelling = save_string(spelling);
+ node->mark = FALSE;
+ node->llink = NULL;
+ node->rlink = NULL;
+ node->tab_flag = TRUE;
+ node->indent_flag = TRUE;
+ node->debug_flag = FALSE;
+ *root = node;
+ return node;
+}
+@
+Name terminated by whitespace. Also check for ``per-file'' flags. Keep
+skipping white space until we reach scrap.
+<<names.c*>>=
+Name *collect_file_name()
+{
+ Name *new_name;
+ char name[100];
+ char *p = name;
+ int start_line = source_line;
+ int c = source_get();
+ while (isspace(c))
+ c = source_get();
+ while (isgraph(c)) {
+ *p++ = c;
+ c = source_get();
+ }
+ if (p == name) {
+ fprintf(stderr, "%s: expected file name (%s, %d)\n",
+ command_name, source_name, start_line);
+ exit(-1);
+ }
+ *p = '\0';
+ new_name = name_add(&file_names, name);
+ <<Handle optional per-file flags>>
+ if (c != '@' || source_get() != '{') {
+ fprintf(stderr, "%s: expected @{ after file name (%s, %d)\n",
+ command_name, source_name, start_line);
+ exit(-1);
+ }
+ return new_name;
+}
+@ %def collect_file_name
+<<Handle optional per-file flags>>=
+{
+ while (1) {
+ while (isspace(c))
+ c = source_get();
+ if (c == '-') {
+ c = source_get();
+ do {
+ switch (c) {
+ case 't': new_name->tab_flag = FALSE;
+ break;
+ case 'd': new_name->debug_flag = TRUE;
+ break;
+ case 'i': new_name->indent_flag = FALSE;
+ break;
+ default : fprintf(stderr, "%s: unexpected per-file flag (%s, %d)\n",
+ command_name, source_name, source_line);
+ break;
+ }
+ c = source_get();
+ } while (!isspace(c));
+ }
+ else break;
+ }
+}
+@
+Name terminated by \verb+\n+ or \verb+@{+; but keep skipping until \verb+@{+
+<<names.c*>>=
+Name *collect_macro_name()
+{
+ char name[100];
+ char *p = name;
+ int start_line = source_line;
+ int c = source_get();
+ while (isspace(c))
+ c = source_get();
+ while (c != EOF) {
+ switch (c) {
+ case '@': <<Check for terminating at-sequence and return name>>
+ break;
+ case '\t':
+ case ' ': *p++ = ' ';
+ do
+ c = source_get();
+ while (c == ' ' || c == '\t');
+ break;
+ case '\n': <<Skip until scrap begins, then return name>>
+ default: *p++ = c;
+ c = source_get();
+ break;
+ }
+ }
+ fprintf(stderr, "%s: expected macro name (%s, %d)\n",
+ command_name, source_name, start_line);
+ exit(-1);
+ return NULL; /* unreachable return to avoid warnings on some compilers */
+}
+@ %def collect_macro_name
+<<Check for terminating at-sequence and return name>>=
+{
+ c = source_get();
+ switch (c) {
+ case '@': *p++ = c;
+ break;
+ case '{': <<Cleanup and install name>>
+ default: fprintf(stderr,
+ "%s: unexpected @%c in macro name (%s, %d)\n",
+ command_name, c, source_name, start_line);
+ exit(-1);
+ }
+}
+<<Cleanup and install name>>=
+{
+ if (p > name && p[-1] == ' ')
+ p--;
+ if (p - name > 3 && p[-1] == '.' && p[-2] == '.' && p[-3] == '.') {
+ p[-3] = ' ';
+ p -= 2;
+ }
+ if (p == name || name[0] == ' ') {
+ fprintf(stderr, "%s: empty scrap name (%s, %d)\n",
+ command_name, source_name, source_line);
+ exit(-1);
+ }
+ *p = '\0';
+ return prefix_add(&macro_names, name);
+}
+<<Skip until scrap begins, then return name>>=
+{
+ do
+ c = source_get();
+ while (isspace(c));
+ if (c != '@' || source_get() != '{') {
+ fprintf(stderr, "%s: expected @{ after macro name (%s, %d)\n",
+ command_name, source_name, start_line);
+ exit(-1);
+ }
+ <<Cleanup and install name>>
+}
+@
+Terminated by \verb+@>+
+<<names.c*>>=
+Name *collect_scrap_name()
+{
+ char name[100];
+ char *p = name;
+ int c = source_get();
+ while (c == ' ' || c == '\t')
+ c = source_get();
+ while (c != EOF) {
+ switch (c) {
+ case '@': <<Look for end of scrap name and return>>
+ break;
+ case '\t':
+ case ' ': *p++ = ' ';
+ do
+ c = source_get();
+ while (c == ' ' || c == '\t');
+ break;
+ default: if (!isgraph(c)) {
+ fprintf(stderr,
+ "%s: unexpected character in macro name (%s, %d)\n",
+ command_name, source_name, source_line);
+ exit(-1);
+ }
+ *p++ = c;
+ c = source_get();
+ break;
+ }
+ }
+ fprintf(stderr, "%s: unexpected end of file (%s, %d)\n",
+ command_name, source_name, source_line);
+ exit(-1);
+ return NULL; /* unreachable return to avoid warnings on some compilers */
+}
+@ %def collect_scrap_name
+<<Look for end of scrap name and return>>=
+{
+ c = source_get();
+ switch (c) {
+ case '@': *p++ = c;
+ c = source_get();
+ break;
+ case '>': <<Cleanup and install name>>
+ default: fprintf(stderr,
+ "%s: unexpected @%c in macro name (%s, %d)\n",
+ command_name, c, source_name, source_line);
+ exit(-1);
+ }
+}
+@
+\section{Memory Management} \label{memory-management}
+
+I manage memory using a simple scheme inspired by Hanson's idea of
+{\em arenas\/}~\cite{hanson:90}.
+Basically, I allocate all the storage required when processing a
+source file (primarily for names and scraps) using calls to
+[[arena_getmem(n)]], where [[n]] specifies the number of bytes to
+be allocated. When the storage is no longer required, the entire arena
+is freed with a single call to [[arena_free()]]. Both operations
+are quite fast.
+<<Function prototypes>>=
+extern void *arena_getmem();
+extern void arena_free();
+<<arena.c*>>=
+typedef struct chunk {
+ struct chunk *next;
+ char *limit;
+ char *avail;
+} Chunk;
+@ %def Chunk
+@
+We define an empty chunk called [[first]]. The variable [[arena]] points
+at the current chunk of memory; it's initially pointed at [[first]].
+As soon as some storage is required, a ``real'' chunk of memory will
+be allocated and attached to [[first->next]]; storage will be
+allocated from the new chunk (and later chunks if necessary).
+<<arena.c*>>=
+static Chunk first = { NULL, NULL, NULL };
+static Chunk *arena = &first;
+@ %def first arena
+@
+\subsection{Allocating Memory}
+
+The routine [[arena_getmem(n)]] returns a pointer to (at least)
+[[n]] bytes of memory. Note that [[n]] is rounded up to ensure
+that returned pointers are always aligned. We align to the nearest
+8~byte segment, since that'll satisfy the more common 2-byte and
+4-byte alignment restrictions too.
+
+<<arena.c*>>=
+void *arena_getmem(n)
+ size_t n;
+{
+ char *q;
+ char *p = arena->avail;
+ n = (n + 7) & ~7; /* ensuring alignment to 8 bytes */
+ q = p + n;
+ if (q <= arena->limit) {
+ arena->avail = q;
+ return p;
+ }
+ <<Find a new chunk of memory>>
+}
+@ %def arena_getmem
+@
+If the current chunk doesn't have adequate space (at least [[n]]
+bytes) we examine the rest of the list of chunks (starting at
+[[arena->next]]) looking for a chunk with adequate space. If [[n]]
+is very large, we may not find it right away or we may not find a
+suitable chunk at all.
+<<Find a new chunk of memory>>=
+{
+ Chunk *ap = arena;
+ Chunk *np = ap->next;
+ while (np) {
+ char *v = sizeof(Chunk) + (char *) np;
+ if (v + n <= np->limit) {
+ np->avail = v + n;
+ arena = np;
+ return v;
+ }
+ ap = np;
+ np = ap->next;
+ }
+ <<Allocate a new chunk of memory>>
+}
+@
+If there isn't a suitable chunk of memory on the free list, then we
+need to allocate a new one.
+<<Allocate a new chunk of memory>>=
+{
+ size_t m = n + 10000;
+ np = (Chunk *) malloc(m);
+ np->limit = m + (char *) np;
+ np->avail = n + sizeof(Chunk) + (char *) np;
+ np->next = NULL;
+ ap->next = np;
+ arena = np;
+ return sizeof(Chunk) + (char *) np;
+}
+@
+\subsection{Freeing Memory}
+
+To free all the memory in the arena, we need only point [[arena]]
+back to the first empty chunk.
+<<arena.c*>>=
+void arena_free()
+{
+ arena = &first;
+}
+@ %def arena_free
+@
+\chapter{Indices} \label{indices}
+
+\section{Chunks}
+
+\nowebchunks
+
+\section{Identifiers}
+
+Knuth prints his index of indentifiers in a two-column format.
+I could force this automatically by emitting the [[\twocolumn]]
+command; but this has the side effect of forcing a new page.
+Therefore, it seems better to leave it this up to the user.
+
+\nowebindex
+
+\bibliographystyle{plain}
+\bibliography{literate}
+
+\end{document}
diff --git a/web/noweb/contrib/norman/pp/mkfile b/web/noweb/contrib/norman/pp/mkfile
new file mode 100644
index 0000000000..894316536f
--- /dev/null
+++ b/web/noweb/contrib/norman/pp/mkfile
@@ -0,0 +1,24 @@
+ICONT=icont
+
+%: %.icn
+ $ICONT $target
+
+%.icn: %.nw
+ notangle -L'#line %-1L "%F"%N' $prereq > $target
+
+all:V: pp pp.ps pp.html
+
+$BIN/nwpp: pp.icn
+ $ICONT -o $target $prereq
+
+pp.tex: pp.nw
+ noweave -delay -autodefs icon -index $prereq > $target
+
+pp.html: pp.nw
+ noweave -filter l2h -delay -autodefs icon -index -html $prereq > $target
+
+clean:V:
+ rm -f *~ *.tex *.dvi *.logf *.icn *.u1 *.u2 *.aux *.toc *.log
+
+clobber:V: clean
+ rm -f *.ps *.dvi *.html pp
diff --git a/web/noweb/contrib/norman/pp/pp.nw b/web/noweb/contrib/norman/pp/pp.nw
new file mode 100644
index 0000000000..b8260d226b
--- /dev/null
+++ b/web/noweb/contrib/norman/pp/pp.nw
@@ -0,0 +1,314 @@
+% -*- mode: Noweb; noweb-code-mode: icon-mode -*-
+
+\documentclass {article}
+\usepackage {noweb}
+
+\title {Simple prettyprinting with Noweb}
+\author{Norman Ramsey\\ \texttt{nr@eecs.harvard.edu}}
+
+\begin {document}
+@
+\maketitle
+
+\section {Introduction}
+
+This is a pretty-printer, written as a filter for the noweb
+literate-programming tool.
+The prettyprinter does not touch indentation and line breaks; what it
+does is break each code line into tokens, then reformat the tokens.
+Some of the prettyprinter's capabilities are specified in a
+translation table.
+This table is written in a file, which must be named as the first argument.
+The prettyprinter will:
+\begin{itemize}
+\item format special tokens as specified in the parameter file
+\item keep track of which tokens need to be in math mode, and take
+care of it
+\item change underscores to subscripts in the names of identifiers
+\end{itemize}
+The prettyprinter doesn't do a great job with quoted strings, and it
+doesn't do anything intelligent with comments.
+Users are invited to improve these aspects.
+
+Using the prettyprinter requires changing the {\TeX} code that noweb
+runs at the start of a code chunk. This may do the job:
+\begin{verbatim}
+\usepackage{noweb}
+\let\originalprime='
+\def\setupcode{\catcode`\ =10 \catcode`\'=13 \regressprime}
+{\catcode`\'=\active
+ \makeatletter
+ \gdef\regressprime{\def'{^\bgroup\prim@s}}}
+\let\Tt\relax
+\end{verbatim}
+
+The prettyprinter uses the ``finduses'' model of symbols, alphanumerics, and
+delimiters.
+A token is
+\begin{itemize}
+\item whitespace,
+\item a maximal string of symbols,
+\item a maximal string of alphanumerics
+\item a single delimiter, or
+\item a string that begins with a delimiter and appears in the
+translation table.
+\end{itemize}
+<<*>>=
+global alphanum, symbols # anything else is a delimiter
+@ The defaults are as in ``finduses.''
+<<initialization>>=
+alphanum := &letters ++ &digits ++ '_\'@#'
+symbols := '!%^&*-+:=|~<>./?`'
+@
+All tokens become {\TeX} strings, and we track three kinds.
+<<*>>=
+record space(string) # white space
+record math(string) # string to appear in math mode
+record nonmath(string) # string to appear outside of math mode
+@ Space between two math tokens goes in math mode; space adjacent to a
+nonmath token goes in nonmath mode.
+@
+Sometimes we have to convert something to math mode.
+<<*>>=
+procedure mathcvt(s)
+ return case type(s) of {
+ "math" | "space" : s
+ "nonmath" : math("\\mbox{" || s.string || "}")
+ }
+ stop("bad math conversion of ", image(s))
+end
+procedure mathstring(s)
+ return mathcvt(s).string
+end
+@
+A table [[translation]] defines a translation into \TeX\ code for every interesting
+token in the target language.
+The table is a sequence of lines of the form
+\begin{quote}
+\begin{tabular}{ll}
+\verb+$+\emph{token} \emph{translation}&A math-mode token\\
+\verb+-+\emph{token} \emph{translation}&A non-math token\\
+\verb+A+\emph{chars}&List of all characters to be considered alphanumerics\\
+\verb+S+\emph{chars}&List of all characters to be considered symbols\\
+\end{tabular}
+\end{quote}
+Tokens, including identifiers and symbols, are considered to be
+math-mode tokens unless the translation table specifies otherwise.
+<<*>>=
+procedure read_translation(fname)
+ local f, line, k, v, t
+ f := open(fname) | stop("Cannot open file ", fname)
+ t := table()
+ while line := read(f) do
+ line ?
+ case move(1) of {
+ "$" : { tab(many(' \t')); k := tab(upto(' \t')); tab(many(' \t')); v := tab(0)
+ t[k] := math(v) }
+ "-" : { tab(many(' \t')); k := tab(upto(' \t')); tab(many(' \t')); v := tab(0)
+ t[k] := nonmath(v) }
+ "A" : alphanum := cset(tab(0))
+ "S" : symbols := cset(tab(0))
+ default : stop("Table entry must begin with $ or - or A or S")
+ }
+ close(f)
+ return t
+end
+@
+The rest is uninteresting Icon code, which surely could be better documented.
+<<*>>=
+global trans
+procedure main(args)
+ local curline, curmath
+ <<initialization>>
+ trans := read_translation(get(args)) | stop("Must specify translation table")
+ <<add \TeX\ specials to [[trans]]>>
+ dtrans := table()
+ every k := key(trans) & not any(symbols, k) & not any(alphanum, k) do
+ dtrans[k] := trans[k]
+ curline := []
+ code := &null
+ while line := read() do
+ line ? { <<consume input>> }
+end
+@
+Instead of escaping the {\TeX} specials, I just put them in the
+translation table if they aren't already.
+<<add \TeX\ specials to [[trans]]>>=
+every c := !"{}#$%^&_" do /trans[c] := math("\\" || c)
+/trans["\\"] := math("\\backslash ")
+@
+We accumulate tokens into [[curline]], then emit them when we reach
+the end of a line or the end of code.
+<<consume input>>=
+="@" | stop("Malformed line in noweb pipeline")
+keyword := tab(upto(' ')|0)
+value := if pos(0) then &null else (=" ", tab(0))
+case keyword of {
+ "begin" : {if match("code", value) then code := 1 else code := &null
+ write(line)}
+ "end" : { <<drain accumulation>>; code := &null; write(line) }
+ "quote" : {code := 1; write(line)}
+ "endquote" : {<<drain accumulation>>; code := &null; write(line)}
+ "text" : if \code then {<<accumulate [[value]]>>} else write(line)
+ "nl" | "use" : { <<drain accumulation>>; write(line) }
+ default : write(line)
+}
+@
+Converting text to tokens is the heart of the algorithm.
+This code looks at the first character and finds maximal sequences.
+Digit sequences are treated specially
+Strings with single or double quotes are hacked in.
+<<accumulate [[value]]>>=
+value ?
+ while not pos(0) do
+ if any(' \t') then put(curline, space(tab(many(' \t'))))
+ else if any(alphanum) then { # maximal alphanumeric string
+ id := tab(many(alphanum))
+ put(curline, xform_alphanum(id))
+ } else if any(symbols) then { # maximal symbol string
+ id := tab(many(symbols))
+ put(curline, xform_symbols(id))
+ } else if delim := =("\"" | "'") then {
+ put(curline, xform_literal(delim || tab(find(delim)) || =delim))
+ } else if =(id := key(dtrans)) then { # if delimiter starts table string, xlate
+ put(curline, dtrans[id])
+ } else { # single delimiter character
+ put(curline, math(move(1)))
+ }
+@
+Underscores become subscripts, initial hats become hats, and we wrap
+long strings in \verb+\mathit+ unless they are strings of digits.
+Leading underscores are botched.
+<<*>>=
+procedure xform_alphanum(id)
+ local base
+ if \trans[id] then return trans[id]
+ if id[1] == "^" then # scope is to end of symbol
+ return math("\\nwpphat{" || mathstring(xform_alphanum(id[2:0])) || "}")
+ id ?
+ if *(base := tab(upto('_'))) > 0 & move(1) & not pos(0) then
+ return math(mathstring(xform_alphanum(base)) || "_" ||
+ mathstring(xform_alphanum(tab(0))))
+ else
+ return math(mathwrap(tab(0)))
+end
+procedure mathwrap(s)
+ if *s = 1 then return s
+ else if s ? (tab(upto('\'') == 2), tab(many('\'')), pos(0)) then
+ return "{" || s || "}"
+ else if upto(~&digits, s) then return "{\\mathit{" || s || "}}"
+ else return s # numbers don't get italic
+end
+@
+Symbols don't get any of this massaging.
+<<*>>=
+procedure xform_symbols(id)
+ if \trans[id] then return trans[id]
+ return math(id)
+end
+@
+I haven't tested any of this literal jazz.
+<<*>>=
+procedure xform_literal(id)
+ static chars
+ initial chars := "=|+-@!$#" || &letters || &digits
+ if c := !chars & not(find(c, id)) then
+ return nonmath("\\verb" || c || id || c)
+ else
+ return nonmath("\\texttt{" || id || "}")
+end
+@
+To emit tokens, I track mathness, and I turn it on and off
+appropriately.
+I also make sure to get space outside of math mode wherever
+appropriate, so it will show up.
+<<drain accumulation>>=
+if *curline > 0 then {
+ writes("@literal ")
+ curmath := &null
+ while t := get(curline) do
+ case type(t) of {
+ "math" : { <<ensure math>>; writes(t.string) }
+ "nonmath" : { <<ensure non-math>>; writes(t.string) }
+ "space" : { if /curmath then writes(repl("{\\ }", *t.string))
+ else if type(curline[1]) == "math" then writes(t.string)
+ else { <<ensure non-math>>; writes(repl("{\\ }", *t.string)) }
+ }
+ default : stop("This can't happen --- bad token ", image(t))
+ }
+ <<ensure non-math>>
+ write()
+}
+<<ensure math>>=
+/curmath := 1 & writes("\\(")
+<<ensure non-math>>=
+\curmath := &null & writes("\\)")
+@
+\section{Example}
+Here's a fragment of source code I used in a paper:
+\begin{verbatim}
+fun simple () =
+ let (b_I --> PC := target_I | I_c) = tgt[PC]
+ in if [[b_I]] then
+ PC := [[target_I]] | [[I_c]]
+ else
+ PC := succ(PC) | [[I_c]]
+ fi
+ ; simple()
+ end
+\end{verbatim}
+Here's the corresponding output\ifhtml
+, which looks pretty stupid in HTML because it's intended for {\TeX}\fi:
+\begin{trivlist}
+\item \obeylines
+\textbf{fun}\ \({\mathit{simple}} () \equiv \)
+\ \ \textbf{let}\ \((b_I \mathbin{\rightarrow} {\mathit{PC}} \mathrel{:=} {\mathit{target}}_I \mathrel{|} I_c) \equiv {\mathit{tgt}}[{\mathit{PC}}]\)
+\ \ \textbf{in}\ \ \textbf{if}\ \([\![b_I]\!]\)\ \textbf{then}
+\ \ \ \ \ \ \ \ \({\mathit{PC}} \mathrel{:=} [\![{\mathit{target}}_I]\!] \mathrel{|} [\![I_c]\!]\)
+\ \ \ \ \ \ \textbf{else}
+\ \ \ \ \ \ \ \ \({\mathit{PC}} \mathrel{:=} {\mathit{succ}}({\mathit{PC}}) \mathrel{|} [\![I_c]\!]\)
+\ \ \ \ \ \ \textbf{fi}
+\ \ \ \ \ \ \(; {\mathit{simple}}()\)
+\ \ \textbf{end}
+\end{trivlist}
+@
+And finally,
+here's the translation table I used:
+{\small
+\begin{verbatim}
+A^_'@ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789#
+S!%&*-+:=|~<>./?`
+$true \textbf{true}
+$false \textbf{false}
+-if \textbf{if}
+-then \textbf{then}
+-else \textbf{else}
+-fi \textbf{fi}
+-fun \textbf{fun}
+-let \textbf{let}
+-in \textbf{in}
+-end \textbf{end}
+$@[[ [\![
+$@]] ]\!]
+$:= \mathrel{:=}
+$andalso \land
+$--> \mathbin{\rightarrow}
+$= \equiv
+$== =
+$| \mathrel{|}
+$~ \mathord{-}
+$not \lnot
+$!= \ne
+$<= \le
+$>= \ge
+$... \bullet
+\end{verbatim}
+\par}
+
+
+\appendix
+\section {Index}
+\nowebindex
+\nowebchunks
+@
+\end {document}
diff --git a/web/noweb/contrib/norman/scopehack.icn b/web/noweb/contrib/norman/scopehack.icn
new file mode 100644
index 0000000000..d837a2ff61
--- /dev/null
+++ b/web/noweb/contrib/norman/scopehack.icn
@@ -0,0 +1,44 @@
+# scopehack; a replacement for totex but splitting output into multiple files
+
+global totex
+
+procedure main(args)
+ totex := "PATH=/usr/local/noweb/lib:/usr/lib/noweb:$PATH totex"
+ every totex ||:= " '" || !args || "'"
+
+ lines := []
+ file
+
+ while line := read() do {
+ line ?
+ if ="@fatal" then exit(1)
+ else if ="@file " then
+ if =\file & pos(0) then # no change
+ &null
+ else {
+ flush(file, lines)
+ file := tab(0)
+ }
+ put(lines, line)
+ }
+ flush(file, lines)
+end
+
+procedure flush(file, lines)
+ if /file & *lines > 0 then
+ stop("First line is not @file")
+ else if *lines = 0 then
+ return
+ else {
+ outfile := suffex(file) || ".tex"
+ p := open(totex || " > " || outfile, "wp") | stop ("cannot run ", totex)
+ while write(p, get(lines))
+ close(p)
+ return
+ }
+end
+
+procedure suffex(s)
+ return reverse (reverse(s) ? {tab(upto('.')) & ="."; tab(0)})
+end
+
diff --git a/web/noweb/contrib/partingr/README b/web/noweb/contrib/partingr/README
new file mode 100644
index 0000000000..a0f06fecb0
--- /dev/null
+++ b/web/noweb/contrib/partingr/README
@@ -0,0 +1,148 @@
+This is plain TeX indexing and cross-reference support for noweb.
+
+total 38
+-rwxr-x--- 1 partingr 959 Aug 2 14:10 TeXthings
+-rwxr-x--- 1 partingr 5544 Aug 2 14:19 addscore.nw
+-rwxr-xr-x 1 partingr 4672 Aug 2 13:35 mm2mx63
+-rwxr-xr-x 1 partingr 5033 Aug 2 13:35 mm2mx64
+-rwxr-xr-x 1 partingr 5180 Aug 2 13:35 mm2mx65
+-rwxr-xr-x 1 partingr 1445 Aug 2 13:37 mm2tex
+-rwxr-xr-x 1 partingr 4109 Aug 2 13:35 mx2tex31
+-rwxr-xr-x 1 partingr 4055 Aug 2 13:35 nwindex.tex
+-rwxr-x--- 1 partingr 67 Aug 2 14:14 nwnweave
+-rwxr-xr-x 1 partingr 35 Aug 2 13:35 nwtangle
+-rwxr-xr-x 1 partingr 64 Aug 2 13:35 nwweave
+-rwxr-xr-x 1 partingr 524 Aug 2 13:58 xpand
+
+This is what each file does/is:
+
+TeXthings perl 'header' file. needs to be somewhere
+ perl will find it when it executes mm2mx63
+ or mx2tex31 or mm2tex.
+
+mm2mx63 version 6.3 of mm2mx. converts mm files
+ (those created by markup) into mx files
+ (which is my modified markup file).
+ sectionref macros are aaa,aab,aac,aad
+ reads from STDIN and outputs to STDOUT
+ see below for cli options.
+
+mm2mx64 version 6.4 of mm2mx. virtually the same
+ as mm2mx63, but the sectionref macros that
+ get created have different names. default
+ is za,zc,zd etc. (I have lots of 2 character
+ macros for TeX so I don't use this so much,
+ but it might be useful...)
+
+mm2mx65 same as mm2mx63, but converts @<name@> in
+ documentation chunks into a @use reference
+ so you get the reference style in the
+ documentation, if you see what I mean.
+ EG. "see @<main@>" will become
+ "see <main 1a>" if main is defined in 1a
+ (with the proper typesetting for the module
+ name of course).
+
+mx2tex31 converts mx files into TeX. see below for
+ cli options.
+
+mm2tex weaver for normal markup files. tends to be
+ faster than awk because perl semi-compiles
+ it's programs before execution.
+
+nwindex.tex TeX macros for indexing.
+
+nwnweave shell script to weave a file with certain
+ cli options.
+
+nwweave shell script to weave a file with certain
+ cli options.
+
+xpand expands ...>> references in markup files.
+ !only *after* a full name has been seen!
+
+CLI OPTIONS:
+
+mm2mx63 -i create an identifier index from @ %def lines
+ -m create a module index
+ -n case insensitive module name matching
+ (nb all module names come out in lower case)
+
+mx2tex31 -i create the indexes (from the mx file)
+ -n name set the name of the output files. input is
+ from STDIN, output is to name.tex and
+ name.texnique.
+ -s hack for 'only first definition gets the full
+ list of defining chunks'.
+ (look at output to see the difference)
+ -f name takes the markup file from name.markup and
+ outputs to name.tex and name.texnique
+ -h help message
+ -q no output to terminal at all
+
+NWINDEX.MAC
+
+When mx2tex31 creates a TeX file, it inserts 'hooks' into the code
+so that the chunk references can be printed out according to the
+user's preferences.
+
+For all the hooks, \list contains a list of the defining chunks
+of the named chunk and \ulist contains a list of the chunks the
+named chunk is used in.
+(These lists are from Appendix D of The TeXBook. Items are
+seperated by \\ and contained in braces, so a list containing 1,2,3
+would be defined as \def\list{\\{1}\\{2}\\{3}}
+
+For a chunk defined in 1a and 1b, and used in 1c
+\list={\\{1a}\\{1b}}
+\ulist={\\{1c}}
+
+3 hooks are provided...
+
+\inmodname is called just before the right-angle of the chunk name
+\beforecode is called just after the chunk name (and angles) have
+ been set
+\aftercode is called just before \nwendcode{}
+
+The default definitions:
+\inmodname = put the *first* defining chunk xref in chunk names
+\beforecode = empty
+\aftercode = 'This definition is continued in section...' if the name
+ is defined in more than one chunk and
+ 'This code is used in section...'
+
+[look at the output to see what happens]
+This gives output like the LaTeX cross-referencing (and like CWEB).
+
+There are macros in nwindex.tex for printing out lists neatly
+(ie with commas and 'and' at the end) and for putting
+'section' or 'sections' at the front of the list.
+
+[if you need more info, either email me or wait for the documentation
+ which should be finished this week]
+
+INDEXES
+
+if asked to, mx2tex31 creates two files: name.ids and name.mods
+ids - identifier index
+mods - module index
+
+It *doesn't* include these by default in the TeX file. you have
+to ask for them by putting \printindex{ids} or \printindex{mods}
+
+you have to ask for them because (IMHO) indexes aren't vitally
+important while a program is being developed or for small
+programs.
+
+the indexes are created in seperate files to allow other tools
+to create them independently of mx2tex31. for example, a
+c program could create name.ids by parsing the code chunks
+for identifiers etc. (like CWEB/WEB do) which would be a
+better way of indexing than @ %def lines...
+
+
+BTW I apologise for this documentation. I haven't had a
+chance to get a proper version written yet.
+
+Do people want a TeXinfo version of the documentation or
+just plain TeX file?
diff --git a/web/noweb/contrib/partingr/TeXthings b/web/noweb/contrib/partingr/TeXthings
new file mode 100755
index 0000000000..35f537a93f
--- /dev/null
+++ b/web/noweb/contrib/partingr/TeXthings
@@ -0,0 +1,52 @@
+sub convquotes
+{
+ local($line)=@_;
+ local($pre,$mid,$end,$obrace,$cbrace);
+
+ $obrace=index($line,'[[',0);
+ while($obrace!=-1)
+ {
+ $pre=substr($line,0,$obrace);
+ $cbrace=index($line,']]',$obrace);
+ while(substr($line,$cbrace+2,1) eq ']')
+ { $cbrace++;
+ }
+ $mid=substr($line,$obrace+2,$cbrace-$obrace-2);
+ $end=substr($line,$cbrace+2);
+ $line=$pre . "\\code{}" . &TeXliteral($mid) . "\\edoc{}" . $end;
+ $obrace=index($line,'[[',0);
+ }
+ return $line;
+}
+
+sub escapebslash
+{
+ local($line)=@_;
+
+ $line=~s/([\\\{\}])/\n\1/g;
+ $line=~s/\n/\\/g;
+ return $line;
+}
+
+sub TeXliteral
+{
+ local($_)=@_;
+
+ s/\\/<\\char92>/g;
+ s/\}/<\\char125}/g;
+ s/\{/{\\char123}/g;
+ s/<\\char/{\\char/g;
+ s/\{\\char92>/{\\char92}/g;
+ s/\$/{\\char36}/g;
+ s/&/{\\char38}/g;
+ s/#/{\\char35}/g;
+ s/\^/{\\char94}/g;
+ s/_/{\\char95}/g;
+ s/%/{\\char37}/g;
+ s/~/{\\char126}/g;
+ s/ /\\ /g;
+ return $_;
+}
+
+1;
+
diff --git a/web/noweb/contrib/partingr/addscore.nw b/web/noweb/contrib/partingr/addscore.nw
new file mode 100644
index 0000000000..e0e5d9eb79
--- /dev/null
+++ b/web/noweb/contrib/partingr/addscore.nw
@@ -0,0 +1,183 @@
+\def\musecs{$\mu$secs}
+\itemwidth=.25in
+@
+\section{Archery Database: {\tt AddScore}}
+This perl script adds a score (or scores) to the archery database.
+It provides only a basic user interface.
+
+<<addscore>>=
+<<setup>>
+<<main program loop>>
+<<subroutines>>
+@
+
+\section{Setting up the database and script}
+Because much of the code is shared between this family of scripts,
+some of the code is in subroutine files. We [[require]] these.
+
+<<setup>>=
+require "custom" || die "can't open custom routines library";
+require "archsubs" || die "can't open subroutines library";
+@ %def &readrounds &init
+
+Early versions of this script allowed you to abort an entry by not
+typing anything at a prompt. However this became cumbersome when only
+the third prompt would let you do this. So we now trap [[SIGINT]]
+and point it to the same abort routine.
+
+<<setup>>=
+$SIG{'INT'}='abort';
+@ %def $SIG
+
+We need know what date it is today (for the default database and
+date) so we get this from the operating system via the [[gmtime()]]
+call.
+
+<<setup>>=
+$start=time;
+($ts,$tmi,$th,$tmd,$tmo,$ty,@junk)=gmtime($start);
+@ %def $start $ts $tmi $th $tmd $tmo $ty @junk | $start gmtime()
+
+Now we read in the rounds database, and open the relevant dbm file for
+the archery database. [[&init]] handles the command line options that
+relate to the person and year that are asked for.
+
+<<setup>>=
+&readrounds;
+do &init;
+@ %def | &readrounds &init
+\section{The main body of the program}
+The main part of the program justs loops asking the user for
+a score to be entered and then prompting for another loop.
+
+Unfortunately, we still treat dates as strings (due to problems in the
+conversion of dates to \musecs\ and back) so all this data cannot
+be accessed by date order.
+
+\subsection{Outline of main loop}
+<<main...>>=
+while(1)
+{
+ <<date entry and validation>>
+ <<round entry and validation>>
+ <<score entry and validation>>
+ <<create and store dbm entry>>
+ <<prompt user for another go>>
+}
+@
+
+We close the dbm database explicitly here, just to be on the safe side.
+<<main...>>=
+dbmclose(scores);
+@
+\subsection{Entering the date}
+<<date...>>=
+ dateloop:
+ while(1)
+ {
+ print "Enter date ($tmd-$tmo-$year) - ";
+ $date=scalar(<STDIN>);
+ chop $date;
+ $date=join('-',$tmd,$tmo) unless $date;
+ if($date=~/(\d{1,2})-(\d{1,2})/)
+ {
+ $dt_ntrd=1;
+ $dindex=sprintf("%02d:%02d:%04d",$1,$2,$year);
+ $usecs=&retime($1,$2,$year);
+
+ ($xts,$xtmi,$xth,$xtmd,$xtmo,$xty,@junk)=gmtime($usecs);
+ printf "%02d-%02d-%04d\n",$xtmd,$xtmo,$xty;
+
+ last dateloop;
+ }
+ else { print "invalid date\n"; }
+ }
+@ %def dateloop: $date $dt_ntrd $dindex $usecs $xts $xtmi $xth $xtmd $xtmo $xty @junk | STDIN &retime() gmtime()
+\subsection{Entering the round}
+<<round...>>=
+ $round=&getround;
+ ($rtype,@rdists)=split(/,/,@rounds{$round});
+ $mult=($rtype='y'?9:10);
+@ %def $round @rdists $mult | $rtype @rounds{} &getround
+\subsection{Entering the score}
+<<score...>>=
+ until(defined($sc_ntrd))
+ {
+ print 'Enter scores (100,100,100) - ';
+ $sclist=scalar(<STDIN>);
+ chop $sclist;
+ &abort unless $sclist;
+ @rscores=split(/,/,$sclist);
+ if(2*$#rscores==$#rdists-1) {$sc_ntrd=1;}
+ else { print "wrong number of scores\n"; }
+ for($i=0;$i<=$#rscores;$i++)
+ {
+ $ms=@rdists[2*$i]*$mult;
+ if(@rscores[$i]>$ms)
+ { printf "invalid: %d>possible ($ms)\n",@rscores[$i];
+ $sc_ntrd=$undefined;
+ }
+ }
+ }
+@ %def $sclist @rscores $ms $sc_ntrd | STDIN $mult @rdists $undefined
+\subsection{Storing the data}
+The dbm entries are created in a fairly naive fashion at the moment.
+The round name and scores are turned into a CSV string. Future versions
+will almost certainly use templates and packing to reduce the amount
+of information that is stored.
+
+<<create...>>=
+ $entry=join(',',$round,@rscores);
+ @scores{$dindex}=$entry;
+@ %def $entry @scores | $round @rscores $dindex
+\subsection{Prompt the user}
+Now we prompt the user to see if they want to enter another score.
+The default answer (ie just pressing [[RETURN]]) is yes on the
+assumption that more than one score will be entered at a time. It
+will be changed so that a score entered with today's date will
+change the default to no on the assumption that the rest of the
+database is up to date.
+
+<<prompt...>>=
+ $loopagain=&yesno("Another score",'y');
+ last unless $loopagain;
+ undef $sc_ntrd;
+@ %def $loopagain | &yesno() $sc_ntrd
+\section{Subroutines}
+\subsection{[[retime]]}
+This subroutine is supposed to convert a date in dd-mm-yyyy form into
+the number of \musecs\ since 1-1-1970, but it sometimes gets it
+mysteriously wrong, so we don't use it for indexing yet.
+
+<<Subroutines>>=
+sub retime
+{ local($d,$m,$y)=@_;
+ local($wm,$md,$yd);
+
+ $wm=$m-3;
+ if($wm<0) { $wm+=12; $y--; } # modified month number
+ $md=int(30.6*$wm+.5); # month day
+ $yd=int(365.25*($y-1970)+$md+$d)+60; # actual day number
+ 86400*$yd;
+}
+@ %def $d $m $y $wm $md $yd
+\section{Things to do}
+\itemize
+\item
+The way the rounds are stored and accessed needs to be changed.
+`Compiling' the rounds into a more compact form would save space,
+and possibly make things a little faster.
+
+\item
+A better user interface would also be a good idea. Possibly making
+the program run under a different screen mode for larger text.
+
+\item
+Adding a confirm just before the data is inserted into the dbm database.
+
+\enditemize
+
+\chapter{Variables}
+\printindex{ids}
+\chapter{Modules}
+\printindex{mods}
diff --git a/web/noweb/contrib/partingr/email b/web/noweb/contrib/partingr/email
new file mode 100644
index 0000000000..d80033763c
--- /dev/null
+++ b/web/noweb/contrib/partingr/email
@@ -0,0 +1,2 @@
+Robert Partington <rjp@browser.org>
+
diff --git a/web/noweb/contrib/partingr/mm2mx63 b/web/noweb/contrib/partingr/mm2mx63
new file mode 100755
index 0000000000..41e7f3ca67
--- /dev/null
+++ b/web/noweb/contrib/partingr/mm2mx63
@@ -0,0 +1,160 @@
+#!/usr/common/bin/perl
+do "getopts.pl" || die "$!";
+do Getopts('imnd:l:');
+
+do "TeXthings";
+
+$sectionref='aaa'; $i=0;
+if($opt_l)
+ { open(LOGFILE,">$opt_l") || die "$!"; }
+
+while(<>)
+{
+# s/\n$//;
+ s/^@//;
+# expand wildcard references here, then process as normal
+ if(/^(use|defn) (.*)\.{3}$/)
+ { print LOGFILE "Wildcard `$2...', expands to " if $opt_l;
+ @matches=grep(/^$2.*/i,split(/¤/,$known));
+ if($#matches>0)
+ {
+ print LOGFILE "[",join('][',@matches),"] " if $opt_l;
+ print STDERR "Ambiguous module name `$mod...', line $i\n";
+ print STDERR "Matches: [",join('][',@matches),"]\n";
+ print STDERR "Using `",@matches[0],"'\n";
+ #die "\nAmbiguous module name `$mod...', line $i";
+ }
+ elsif($#matches==-1)
+ {
+ die "\nNo match for name `$mod...', line $i";
+ }
+ $mn=@matches[0];
+ if($mn=~/\[\[/) { $mn=&convquotes($mn); }
+ print LOGFILE $mn,"\n" if $opt_l;
+ $_="$1 $mn";
+ }
+
+# process a <<defn>>=
+ if(/^defn (.*)$/)
+ { print LOGFILE "Defining chunk `$1' with macro $sectionref\n" if $opt_l;
+ $md=$1; $mn=$1; if($opt_n) { $md=~tr/A-Z/a-z/; }
+ $mt=$md;
+ $mt=~s/([*+.?{}()])/\\\1/g;
+ if($known!~/¤$mt/) { $known=$known . "¤$md"; }
+ @names{$md}=1; $currentmod=$md;
+ @defines{$md}=@defines{$md} . "\\\\{\\xp\\$sectionref}";
+ #if($mn=~/\[\[/) { $mn=&convquotes($mn); }
+ @lines[$i]="defn $sectionref $mn\n"; $oldref=$sectionref;
+ $sectionref++;
+ push(@uses,$i);
+ $indxing=0;
+ }
+# process a <<use>>
+ elsif(/^use (.*)$/)
+ { print LOGFILE "Using chunk `$1' in chunk $oldref\n" if $opt_l;
+ $md=$1; $mn=$1; if($opt_n) { $md=~tr/A-Z/a-z/; }
+ $mt=$md;
+ $mt=~s/([*+.?{}()])/\\\1/g;
+ if($known!~/¤$mt/) { $known=$known . "¤$md"; }
+ @reference{$md}=@reference{$md} . "\\\\{\\xp\\$oldref}";
+ #if($mn=~/\[\[/) { $mn=&convquotes($mn); }
+ @lines[$i]="use $oldref $mn\n";
+ push(@uses,$i);
+ $indxing=0;
+ }
+# process identifier information
+ elsif(/^index (nl|defn |use )(.*)/)
+ {
+ if($2 eq '|') { $indxing=1; }
+ else
+ { if($2)
+ { print LOGFILE "Identifier `$2' indexed as " if $opt_l;
+ if($1 eq "defn " && !$indxing)
+ { print LOGFILE "defined [$oldref]\n" if $opt_l;
+ $style="";
+ }
+ else
+ { print LOGFILE "used [$oldref]\n" if $opt_l;
+ $style="\\it";
+ }
+ @variables{$2}=@variables{$2} . ",\\thinspace{$style\\xp\\$oldref}";
+ }
+ }
+ $i--; # don't put this line in the file here
+ }
+# stick the line in the array
+ else
+ { @lines[$i]=$_; }
+ $i++;
+}
+
+if($opt_l)
+ {
+ print LOGFILE "\n\nList of modules currently defined\n";
+ print LOGFILE join("\n",sort keys(defines));
+ print LOGFILE "\n\nList of modules currently referenced\n";
+ print LOGFILE join("\n",sort keys(reference));
+ print LOGFILE "\n\n";
+ }
+
+foreach(keys(reference))
+ { @longlist{$_}=@reference{$_}; }
+
+foreach(keys(defines))
+ { @longlist{$_}=@longlist{$_} . ('%' . @defines{$_}); }
+
+foreach(@uses)
+ {
+ $ref=@lines[$_];
+ $ref=~/^(use|defn) (...) (.*)/;
+ $mn=$3; $dr=$2; $ac=$1;
+ $defns=@defines{$mn};
+ print LOGFILE "Module $mn " if $opt_l;
+ if($ac eq 'defn')
+ { $uses=@reference{$mn}; $uses="{$uses}|";
+ print LOGFILE "defined at line $_, $uses\n" if $opt_l;
+ }
+ else
+ { $uses='';
+ print LOGFILE "referenced at line $_\n" if $opt_l;
+ }
+ print LOGFILE "Line $_ modified to `$ac $dr {$defns} $uses$mn'\n" if $opt_l;
+ $mn=&convquotes($mn);
+ @lines[$_]="$ac $dr|{$defns}|$uses $mn\n";
+ }
+
+print STDOUT "header tex \n",@lines;
+
+# now @longlist{MOD} contains a list of all the references to <MOD>
+# sort them to make them look pretty
+
+if($opt_m)
+ {
+ print LOGFILE "Making module index...\n" if $opt_l;
+ print "index mods\n";
+ foreach(sort keys(longlist))
+ {
+ $defns=@defines{$_};
+ $defns=~s/^,\\\\thinspace//;
+ print LOGFILE "Module <$_ $defns> ",@reference{$_},"\n" if $opt_l;
+ # first we print the module name and defining numbers
+ print "entry {\\LA ",&convquotes($_),"\\ \\xwp{$defns}\\RA}\\quad";
+ # now we print the bit after that : assume foot=cmr8
+ print "{\\foot\\xtc{",@reference{$_},"}}\n";
+ }
+ print "end index\n";
+ }
+
+if($opt_i)
+ {
+ print "index ids\n";
+ foreach(sort keys(variables))
+ {
+ $vars=@variables{$_};
+ $vars=~s/^,\\thinspace//;
+ print "entry {\\code ",&TeXliteral($_),"\\edoc} :\\quad",$vars,"\n";
+ }
+ print "end index\n";
+ }
+
+print STDOUT "trailer tex\n";
diff --git a/web/noweb/contrib/partingr/mm2mx64 b/web/noweb/contrib/partingr/mm2mx64
new file mode 100755
index 0000000000..58f26356b5
--- /dev/null
+++ b/web/noweb/contrib/partingr/mm2mx64
@@ -0,0 +1,173 @@
+#!/usr/common/bin/perl
+do "getopts.pl" || die "$!";
+do Getopts('imnd:l:');
+
+do "TeXthings";
+
+$sectionref='a'; $i=0;
+if($opt_l)
+ { open(LOGFILE,">$opt_l") || die "$!"; }
+
+while(<>)
+{
+# s/\n$//;
+ s/^@//;
+# expand wildcard references here, then process as normal
+ if(/^(use|defn) (.*)\.{3}$/)
+ { print LOGFILE "Wildcard `$2...', expands to " if $opt_l;
+ @matches=grep(/^$2.*/i,split(/¤/,$known));
+ if($#matches>0)
+ {
+ print LOGFILE "[",join('][',@matches),"] " if $opt_l;
+ print STDERR "Ambiguous module name `$mod...', line $i\n";
+ print STDERR "Matches: [",join('][',@matches),"]\n";
+ print STDERR "Using `",@matches[0],"'\n";
+ #die "\nAmbiguous module name `$mod...', line $i";
+ }
+ elsif($#matches==-1)
+ {
+ die "\nNo match for name `$mod...', line $i";
+ }
+ $mn=@matches[0];
+ if($mn=~/\[\[/) { $mn=&convquotes($mn); }
+ print LOGFILE $mn,"\n" if $opt_l;
+ $_="$1 $mn";
+ }
+
+# process a <<defn>>=
+ if(/^defn (.*)$/)
+ { print LOGFILE "Defining chunk `$1' with macro $sectionref\n" if $opt_l;
+ $md=$1; $mn=$1; if($opt_n) { $md=~tr/A-Z/a-z/; }
+ $mt=$md;
+ $mt=~s/([*+.?{}()])/\\\1/g;
+ if($known!~/¤$mt/) { $known=$known . "¤$md"; }
+ @names{$md}=1; $currentmod=$md;
+ @defines{$md}=@defines{$md} . "\\\\{\\xp\\z$sectionref}";
+ #if($mn=~/\[\[/) { $mn=&convquotes($mn); }
+ @lines[$i]="defn z$sectionref $mn\n"; $oldref=$sectionref;
+ $sectionref++;
+ push(@uses,$i);
+ $indxing=0;
+ }
+# process a <<use>>
+ elsif(/^use (.*)$/)
+ { print LOGFILE "Using chunk `$1' in chunk $oldref\n" if $opt_l;
+ $md=$1; $mn=$1; if($opt_n) { $md=~tr/A-Z/a-z/; }
+ $mt=$md;
+ $mt=~s/([*+.?{}()])/\\\1/g;
+ if($known!~/¤$mt/) { $known=$known . "¤$md"; }
+ @reference{$md}=@reference{$md} . "\\\\{\\xp\\z$oldref}";
+ #if($mn=~/\[\[/) { $mn=&convquotes($mn); }
+ @lines[$i]="use z$oldref $mn\n";
+ push(@uses,$i);
+ $indxing=0;
+ }
+# process identifier information
+ elsif(/^index (nl|defn |use )(.*)/)
+ {
+ if($2 eq '|') { $indxing=1; }
+ else
+ { if($2)
+ { print LOGFILE "Identifier `$2' indexed as " if $opt_l;
+ if($1 eq "defn " && !$indxing)
+ { print LOGFILE "defined [$oldref]\n" if $opt_l;
+ $style="";
+ }
+ else
+ { print LOGFILE "used [$oldref]\n" if $opt_l;
+ $style="\\it";
+ }
+ @variables{$2}=@variables{$2} . ",\\thinspace{$style\\xp\\z$oldref}";
+ }
+ }
+ $i--; # don't put this line in the file here
+ }
+# stick the line in the array
+ else { @lines[$i]=$_; }
+ $i++;
+}
+
+if($opt_l)
+ {
+ print LOGFILE "\n\nList of modules currently defined\n";
+ print LOGFILE join("\n",sort keys(defines));
+ print LOGFILE "\n\nList of modules currently referenced\n";
+ print LOGFILE join("\n",sort keys(reference));
+ print LOGFILE "\n\n";
+ }
+
+foreach(keys(reference))
+ { @longlist{$_}=@reference{$_}; }
+
+foreach(keys(defines))
+ { @longlist{$_}=@longlist{$_} . ('%' . @defines{$_}); }
+
+foreach(@uses)
+ {
+ $ref=@lines[$_];
+ $ref=~/^(use|defn) (z[a-z]+) (.*)/;
+ $mn=$3; $dr=$2; $ac=$1;
+ $defns=@defines{$mn};
+ print LOGFILE "Module $mn " if $opt_l;
+ if($ac eq 'defn')
+ { $uses=@reference{$mn}; $uses="{$uses}|";
+ print LOGFILE "defined at line $_, $uses\n" if $opt_l;
+ }
+ else
+ { $uses='';
+ print LOGFILE "referenced at line $_\n" if $opt_l;
+ }
+ print LOGFILE "Line $_ modified to `$ac $dr {$defns} $uses$mn'\n" if $opt_l;
+ $mn=&convquotes($mn);
+ @lines[$_]="$ac $dr|{$defns}|$uses $mn\n";
+ }
+
+print STDOUT "header tex \n",@lines;
+
+# now @longlist{MOD} contains a list of all the references to <MOD>
+# sort them to make them look pretty
+
+if($opt_m)
+ {
+ print LOGFILE "Making module index...\n" if $opt_l;
+ print "index mods\n";
+ foreach(sort keys(longlist))
+ {
+ $defns=@defines{$_};
+ $defns=~s/^,\\\\thinspace//;
+ print LOGFILE "Module <$_ $defns> ",@reference{$_},"\n" if $opt_l;
+ # first we print the module name and defining numbers
+ print "entry {\\LA ",&convquotes($_),"\\ \\xwp{$defns}\\RA}\\quad";
+ # now we print the bit after that : assume foot=cmr8
+ print "{\\foot\\xtc{",@reference{$_},"}}\n";
+ }
+ print "end index\n";
+ }
+
+if($opt_i)
+ {
+ print "index ids\n";
+ foreach(sort keys(variables))
+ {
+ $vars=@variables{$_};
+ $vars=~s/^,\\thinspace//;
+ print "entry {\\code ",&TeXliteral($_),"\\edoc} :\\quad",$vars,"\n";
+ }
+ print "end index\n";
+ }
+
+print STDOUT "trailer tex\n";
+
+sub usage_info
+{
+ local($line)=@_;
+ @ixrefs=sort(split(/%/,@reference{$line}));
+ if($#ixrefs==-1)
+ { return "This code is never referenced. It may be a root module.";}
+ elsif($#ixrefs==0)
+ { return "This code is used in section ",@ixrefs[0]; }
+ else
+ { $lastref=pop(@ixrefs);
+ return "This code is used in sections ",join(",\\,",@ixrefs)," and $lastref";
+ }
+}
diff --git a/web/noweb/contrib/partingr/mm2mx65 b/web/noweb/contrib/partingr/mm2mx65
new file mode 100755
index 0000000000..7c7809b4b8
--- /dev/null
+++ b/web/noweb/contrib/partingr/mm2mx65
@@ -0,0 +1,179 @@
+#!/usr/common/bin/perl
+do "getopts.pl" || die "$!";
+do Getopts('imnd:l:');
+
+do "TeXthings";
+
+$sectionref='aaa'; $i=0;
+if($opt_l)
+ { open(LOGFILE,">$opt_l") || die "$!"; }
+
+while(<>)
+{
+# s/\n$//;
+ s/^@//;
+ if(/@</)
+ { ($pre,$mid,$end)=&convxrefs($_);
+ @lines[$i]=$pre; $i++;
+ $_=$mid; # need to expand the module name here
+ }
+# expand wildcard references here, then process as normal
+ if(/^(use|defn) (.*)\.{3}$/)
+ { print LOGFILE "Wildcard `$2...', expands to " if $opt_l;
+ @matches=grep(/^$2.*/i,split(/¤/,$known));
+ if($#matches>0)
+ {
+ print LOGFILE "[",join('][',@matches),"] " if $opt_l;
+ print STDERR "Ambiguous module name `$mod...', line $i\n";
+ print STDERR "Matches: [",join('][',@matches),"]\n";
+ print STDERR "Using `",@matches[0],"'\n";
+ #die "\nAmbiguous module name `$mod...', line $i";
+ }
+ elsif($#matches==-1)
+ {
+ die "\nNo match for name `$mod...', line $i";
+ }
+ $mn=@matches[0];
+ if($mn=~/\[\[/) { $mn=&convquotes($mn); }
+ print LOGFILE $mn,"\n" if $opt_l;
+ $_="$1 $mn";
+ }
+
+# process a <<defn>>=
+ if(/^defn (.*)$/)
+ { print LOGFILE "Defining chunk `$1' with macro $sectionref\n" if $opt_l;
+ $md=$1; $mn=$1; if($opt_n) { $md=~tr/A-Z/a-z/; }
+ $mt=$md;
+ $mt=~s/([*+.?{}()])/\\\1/g;
+ if($known!~/¤$mt/) { $known=$known . "¤$md"; }
+ @names{$md}=1; $currentmod=$md;
+ @defines{$md}=@defines{$md} . "\\\\{\\xp\\$sectionref}";
+ #if($mn=~/\[\[/) { $mn=&convquotes($mn); }
+ @lines[$i]="defn $sectionref $mn\n"; $oldref=$sectionref;
+ $sectionref++;
+ push(@uses,$i);
+ $indxing=0;
+ }
+# process a <<use>>
+ elsif(/^use (.*)$/)
+ { print LOGFILE "Using chunk `$1' in chunk $oldref\n" if $opt_l;
+ $md=$1; $mn=$1; if($opt_n) { $md=~tr/A-Z/a-z/; }
+ $mt=$md;
+ $mt=~s/([*+.?{}()])/\\\1/g;
+ if($known!~/¤$mt/) { $known=$known . "¤$md"; }
+ @reference{$md}=@reference{$md} . "\\\\{\\xp\\$oldref}";
+ #if($mn=~/\[\[/) { $mn=&convquotes($mn); }
+ @lines[$i]="use $oldref $mn\n";
+ push(@uses,$i);
+ $indxing=0;
+ }
+# process identifier information
+ elsif(/^index (nl|defn |use )(.*)/)
+ {
+ if($2 eq '|') { $indxing=1; }
+ else
+ { if($2)
+ { print LOGFILE "Identifier `$2' indexed as " if $opt_l;
+ if($1 eq "defn " && !$indxing)
+ { print LOGFILE "defined [$oldref]\n" if $opt_l;
+ $style="";
+ }
+ else
+ { print LOGFILE "used [$oldref]\n" if $opt_l;
+ $style="\\it";
+ }
+ @variables{$2}=@variables{$2} . ",\\thinspace{$style\\xp\\$oldref}";
+ }
+ }
+ $i--; # don't put this line in the file here
+ }
+# stick the line in the array
+ else
+ { @lines[$i]=$_; }
+ $i++;
+ if($end) { @lines[$i]=$end; $i++; undef $end; }
+}
+
+if($opt_l)
+ {
+ print LOGFILE "\n\nList of modules currently defined\n";
+ print LOGFILE join("\n",sort keys(defines));
+ print LOGFILE "\n\nList of modules currently referenced\n";
+ print LOGFILE join("\n",sort keys(reference));
+ print LOGFILE "\n\n";
+ }
+
+foreach(keys(reference))
+ { @longlist{$_}=@reference{$_}; }
+
+foreach(keys(defines))
+ { @longlist{$_}=@longlist{$_} . ('%' . @defines{$_}); }
+
+foreach(@uses)
+ {
+ $ref=@lines[$_];
+ $ref=~/^(use|defn) (...) (.*)/;
+ $mn=$3; $dr=$2; $ac=$1;
+ $defns=@defines{$mn};
+ print LOGFILE "Module $mn " if $opt_l;
+ if($ac eq 'defn')
+ { $uses=@reference{$mn}; $uses="{$uses}|";
+ print LOGFILE "defined at line $_, $uses\n" if $opt_l;
+ }
+ else
+ { $uses='';
+ print LOGFILE "referenced at line $_\n" if $opt_l;
+ }
+ print LOGFILE "Line $_ modified to `$ac $dr {$defns} $uses$mn'\n" if $opt_l;
+ $mn=&convquotes($mn);
+ @lines[$_]="$ac $dr|{$defns}|$uses $mn\n";
+ }
+
+print STDOUT "header tex \n",@lines;
+
+# now @longlist{MOD} contains a list of all the references to <MOD>
+# sort them to make them look pretty
+
+if($opt_m)
+ {
+ print LOGFILE "Making module index...\n" if $opt_l;
+ print "index mods\n" if $opt_l;
+ foreach(sort keys(longlist))
+ {
+ $defns=@defines{$_};
+ $defns=~s/^,\\\\thinspace//;
+ print LOGFILE "Module <$_ $defns> ",@reference{$_},"\n" if $opt_l;
+ # first we print the module name and defining numbers
+ print "entry {\\LA ",&convquotes($_),"\\ \\xwp{$defns}\\RA}\\quad";
+ # now we print the bit after that : assume foot=cmr8
+ print "{\\foot\\xtc{",@reference{$_},"}}\n";
+ }
+ print "end index\n";
+ }
+
+if($opt_i)
+ {
+ print "index ids\n";
+ foreach(sort keys(variables))
+ {
+ $vars=@variables{$_};
+ $vars=~s/^,\\thinspace//;
+ print "entry {\\code ",&TeXliteral($_),"\\edoc} :\\quad",$vars,"\n";
+ }
+ print "end index\n";
+ }
+
+print STDOUT "trailer tex\n";
+
+sub convxrefs
+ {
+ local($l)=@_;
+ local($found,$output);
+ $found=index($l,'@<');
+ $lost=index($l,'@>',$found);
+ $pre=substr($l,0,$found) . "\n"; # before the use
+ $mid="use " . substr($l,$found+2,$lost-$found-2) . "\n";
+ $end="text " . substr($l,$lost+2);
+ substr($l,$found,$lost+2-$found)="";
+ return $pre,$mid,$end;
+ }
diff --git a/web/noweb/contrib/partingr/mm2tex b/web/noweb/contrib/partingr/mm2tex
new file mode 100755
index 0000000000..79fd7a9c3d
--- /dev/null
+++ b/web/noweb/contrib/partingr/mm2tex
@@ -0,0 +1,41 @@
+#!/usr/common/bin/perl
+do "TeXthings" || die "$!";
+print "\\input nwmac ";
+while(<>)
+{
+ if(/^@begin code (.*)$/) { print "\\nwbegincode{$1}"; $code=1; $text=5; }
+ elsif(/^@end code/) { print "\\nwendcode{}\\filbreak$defing"; $code=0; }
+ elsif(/^@begin docs (.*)$/) { print "\\nwbegindocs{$1}"; $text=0; $textmode=0; }
+ elsif(/^@end docs/) { print "\\nwenddocs{}"; }
+ elsif(/^@text (.*)$/)
+ { $text+=length $1;
+ if($code==1) { print &escapebslash($1); }
+ elsif($quoting==1) { print &TeXliteral($1); }
+ else { print $1; }
+ $textmode=1 if $text>0;
+ }
+ elsif(/^@nl$/)
+ { if($code==0)
+ { if($text==0)
+ { if($textmode==1) { print "\\nwdocspar\\noindent\n"; }
+ else { print "\n"; }
+ $textmode=1; $text=1;
+ }
+ else { print "\n"; }
+ }
+ elsif($quoting) { print "\\nwnewline"; }
+ else { if($text>0) { print "\n"; } }
+ }
+ elsif(/^@defn (.*)$/)
+ { $name=$1;
+ print "\\moddef{",&convquotes($name),"}\\",@defns{$name},"endmoddef";
+ @defns{$name}='plus';
+ }
+ elsif(/^@use (.*)$/)
+ { print "\\LA{}",&convquotes($1),"\\RA{}"; }
+ elsif(/^@quote$/) { $quoting=1; print "{\\tt "; }
+ elsif(/^@endquote$/) { $quoting=0; print "}"; $textmode=0; }
+ elsif(/^@file (.*)$/) { $filename=$1; print "\\filename{$filename}"; }
+ elsif(/^@literal (.*)$/) { print "$1"; }
+}
+print "\\bye\n";
diff --git a/web/noweb/contrib/partingr/mx2tex31 b/web/noweb/contrib/partingr/mx2tex31
new file mode 100755
index 0000000000..e7e3ef674d
--- /dev/null
+++ b/web/noweb/contrib/partingr/mx2tex31
@@ -0,0 +1,130 @@
+#!/usr/common/bin/perl
+do "TeXthings";
+do "getopts.pl" || die "$!";
+do Getopts('n:idf:hqst');
+
+if($opt_h)
+ {
+ print STDERR <<ENDOFHELP;
+
+Usage: mx2tex [options]
+
+mx2tex takes a file created by mm2mx and converts it into TeX.
+
+Options: d debugging information
+ f <name> use this file for the job (from <name>.mx to <name>.tex/texnique)
+ h this help text
+ i create the indexes
+ n <name> use this as the name for the output files
+ default: woven
+ q operate quietly (no output)
+ s only have full list of defines for first chunk
+ENDOFHELP
+ exit;
+ }
+
+unless($opt_q)
+ {
+ print STDERR "mx2tex version 3, 1994 by Rob Partington\n";
+ if($opt_d) { push(@options,"debugging"); }
+ if($opt_f) { push(@options,"file:$opt_f"); }
+ if($opt_i) { push(@options,"indexes"); }
+ if($opt_n) { push(@options,"name:$opt_n"); }
+ if($opt_s) { push(@options,"first define"); }
+ if($opt_t) { push(@options,"force write"); }
+ print STDERR "Options:",sort(join(' + ',@options)),"\n";
+ undef @options;
+ }
+
+$macrofile="nwindex"; if($opt_d) { $macrofile="nwidxmac"; }
+
+$filename=$opt_f; if($opt_n) { $filename=$opt_n; }
+
+open(TEX,">$filename.texnique") || die "$!";
+
+if($opt_f) { push(@ARGV,"$filename.markup"); }
+
+unless(-e "$filename.tex" && !$opt_t)
+ {
+ open(TEXCNTL,">$filename.tex") || die "$!";
+ print TEXCNTL <<EOTEX;
+\\input $macrofile
+\\def\\defined{}\\init\\output={\\plainoutput\\global\\subpageref=97}
+{\\def\\shipout{\\message{[p\\the\\pageno]}\\setbox0}
+\\input \\jobname.texnique \\vfill\\supereject}
+\\init{\\gdef\\passtwo{}\\input \\jobname.texnique }
+\\end
+EOTEX
+ close(TEXCNTL);
+ }
+
+
+$code=0; $text=1; $ignore=0;
+whileloop:
+while(<>)
+{
+ if(/^begin code (.*)$/) { $delayed="\\nwbegincode{$1}"; $code=1; $text=5; }
+ elsif(/^end code/) { print TEX "\\nwendcode{}\\filbreak$defing"; $code=0; }
+ elsif(/^begin docs (.*)$/) { print TEX "\\nwbegindocs{$1}"; $text=0; $textmode=0; }
+ elsif(/^end docs/) { print TEX "\\nwenddocs{}"; }
+ elsif(/^text (.*)$/)
+ { $text+=length $1;
+ if($code==1) { print TEX &escapebslash($1); }
+ elsif($quoting==1) { print TEX &TeXliteral($1); }
+ else { print TEX $1; }
+ $textmode=1 if $text>0;
+ }
+ elsif(/^nl$/)
+ { if($code==0)
+ { if($text==0)
+ { if($textmode==1) { print TEX "\\nwdocspar\\noindent\n"; }
+ else { print TEX "\n"; }
+ $textmode=1; $text=1;
+ }
+ else { print TEX "\n"; }
+ }
+ elsif($quoting) { print TEX "\\nwnewline"; }
+ else { if($text>0) { print TEX "\n"; } }
+ }
+ elsif(/^defn ([a-z]+)\|(.*)\|(.*)\| (.*)$/)
+ { $xref=$1; $name=$2;
+ $defing="\\makeref{$1}";
+ $deflist=$2; $uselist=$3;
+ if($opt_s && @defns{$name} eq 'plus')
+ {
+ if(($firstr=index($deflist,'}\\'))!=-1)
+ { $deflist=substr($deflist,0,$firstr) . '}}'; }
+ if(($firstr=index($uselist,'}\\'))!=-1)
+ { $uselist=substr($uselist,0,$firstr) . '}}'; }
+ }
+
+ print TEX "$delayed\\def\\list$deflist\\def\\ulist$uselist",
+ "\\moddef{\\xp{\\$1}}{",&convquotes($4),"}",
+ "\\inmodname\\",@defns{$name},"endmoddef";
+ $ignore=1;
+ @defns{$name}='plus';
+ }
+ elsif(/^use ([a-z]+)\|(.*)\| (.*)$/)
+ {
+ $deflist=$2;
+ if($opt_s)
+ { $deflist=~s/^({\\\\\{.*\}).*\}$/\1\}/;}
+ print TEX "\\LA{}",&convquotes($3),"\\def\\list",$deflist,"\\inmodname\\RA{}";
+ }
+ elsif(/^quote$/) { $quoting=1; print TEX "{\\tt "; }
+ elsif(/^endquote$/) { $quoting=0; print TEX "}"; $textmode=0; }
+ elsif(/^file (.*)$/) { print TEX "\\filename{$1}"; }
+ elsif(/^literal (.*)$/) { print TEX "$1"; }
+ elsif(/^entry (.*)$/ && $opt_i) { print INDEX "$1\n"; }
+ elsif(/^index (.*)$/ && $opt_i)
+ { print STDERR "creating index `$filename.$1'\n" unless $opt_q;
+ open(INDEX,">$filename.$1");
+ print INDEX "\\begingroup\\parindent=0pt\\obeylines%\n";
+ }
+ elsif(/^end index$/ && $opt_i)
+ {
+ print INDEX "\\endgroup\n";
+ close(INDEX);
+ }
+}
+print TEX "\\passfin\n";
diff --git a/web/noweb/contrib/partingr/nwindex.tex b/web/noweb/contrib/partingr/nwindex.tex
new file mode 100644
index 0000000000..bff95acd7b
--- /dev/null
+++ b/web/noweb/contrib/partingr/nwindex.tex
@@ -0,0 +1,134 @@
+\ifx\NwInDeXLoaded\undefined\relax\else\endinput\fi
+\def\NwInDeXLoaded{}
+\ifx\moddef\undefined\input nwmac \fi
+
+% last minute panic comments
+
+% font for cross references
+\font\foot=cmr8
+
+% \init re-initialises stuff for each pass
+\def\initnwindex{\global\pageno=1\global\subpageref=97}
+\let\init=\initnwindex
+
+% macros to place information in the left margin
+% from The TeXBook
+\def\strutdepth{\dp\strutbox}
+\def\marginalnote#1{\strut\vadjust{\kern-\strutdepth\specialstar{#1}}}
+\def\specialstar#1{\vtop to \strutdepth{
+ \baselineskip\strutdepth
+ \vss\llap{#1\quad}\null}}
+
+% redefine moddef to take an argument (this section's code)
+\def\moddef#1{\vskip3pt\leavevmode\marginalnote{{\bf#1}}\kern-\codemargin \LA}
+
+% make the contents file immediately open
+\immediate\openout\cont=\contentsfile
+\immediate\write\cont{\string\catcode`\string\@=11}% a hack to make contents
+ % take stuff in plain.tex
+
+% redefine \nwendcode to provide the \aftercode hook
+\def\nwendcode{\aftercode{}\endgroup}
+
+% \subpageref is the letter part of the code
+\newcount\subpageref \subpageref=97
+
+% an entry in an index (AFAIK this is unused)
+\def\index#1#2{\line{\hskip.5in{\vbox{{\ignorespaces#1}\hskip4pt #2.}\hss}}}
+
+% advance the \subpageref, going to 'A' if 'z' was the last one
+\def\nextref{\global\advance\subpageref by 1\ifnum\subpageref=123\subpageref=65\fi}
+
+% \xp expands to #1 if #1 is defined or \relax if it is not
+\def\xp#1{\ifx#1\undefined\relax\else#1\fi}
+
+% AFAIK this is unused
+\let\ag=\aftergroup
+
+% \xref is called each time a code is defined - sort of a hook
+\def\xref#1{}
+
+% \defined is a defined macro!
+\def\defined{}
+
+% AFAIK this is unused
+\def\killpage{\setbox0=\box255\deadcycles=0 \global\subpageref=97\global\advance\pageno by 1}
+
+% a big macro for the end of a pass
+\def\passfin{%
+ \ifx\passtwo\defined
+ \write\cont{}% ensure that the contents file isn't empty after pass2
+ \closeout\cont
+ \vfil\eject\pageno=-1 % new page causes contents to be really closed
+ \topofcontents\readcontents\botofcontents
+ \else
+ \vfill\supereject
+ \fi
+}
+
+% define a reference on pass one only
+\def\makeref#1{\ifx\passtwo\undefined
+ \edef\next{\gdef\csname#1\endcsname{\folio\char\the\subpageref}}
+ \next\xref{\csname#1\endcsname}\nextref\fi}
+
+% list macros from appendix D, The TeXBook
+\toksdef\ta=0 \toksdef\tb=2 %
+\long\def\leftappenditem#1\to#2{\ta={\\{#1}}\tb=\expandafter{#2}%
+ \edef#2{\the\ta\the\tb}}
+
+\def\lop#1\to#2{\expandafter\lopoff#1\lopoff#1#2}
+
+\long\def\lopoff\\#1#2\lopoff#3#4{\def#4{#1}\def#3{#2}}
+
+\def\cardinality#1\to#2{#2=0 \long\def\\##1{\global\advance#2 by 1}#1}
+\def\list{}
+\def\ulist{}
+\newcount\listlength
+
+% write out a nicely formatted list
+\def\writeoutlist#1#2#3{\cardinality#1\to\listlength
+\def\\##1{\advance\listlength by -1\relax##1\ifnum\listlength>1 #2%
+\else\ifnum\listlength=1 #3\fi\fi}#1}
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+% \prettylist{list}{one}{many}{between}{end}
+% 1 -> {one}E1
+% 2 -> {many}E1{end}E2
+% 3 -> {many}E1{between}E2{end}E3
+% etc.
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+\def\prettylist#1#2#3#4#5{%
+\cardinality#1\to\listlength\ifnum\listlength>1 #3\else#2\fi
+\writeoutlist{#1}{#4}{#5}}
+
+\def\writeplain#1{\prettylist#1{}{}{,\thinspace{}}{,\thinspace{}}}
+\def\writelist#1{\prettylist#1{}{}{,\thinspace{}}{ and }}
+\def\writesections#1{\prettylist#1{section }{sections }{,\thinspace }{\ and }}
+
+\def\usage#1{This code is used in \writesections#1.}
+\def\defs#1{\cardinality#1\to\listlength\ifnum\listlength>1{%
+{\lop#1\to\hello This definition is continued in \writesections#1.}}
+\fi}
+
+\def\first#1{\ifx\list\defined\else\ {{\lop#1\to\hello \hello}}\fi}
+\def\thiscode#1{\ifx#1\defined Root module (never referenced in this document)%
+\else\usage#1\fi}
+
+% \thiscode and \writeplain can't take direct lists as parameters, so
+% fake it with these two
+\def\xtc#1{\def\list{#1}\thiscode\list}
+\def\xwp#1{\def\list{#1}\writeplain\list}
+
+% hooks for cross-references
+\def\inmodname{\ifx\list\defined\else\thinspace{\foot\first\list}\fi}
+\def\beforecode{}
+\def\aftercode{\vbox{\kern3pt\hbox{{\foot\defs\list}}\kern-2pt\hbox{{\foot\thiscode\ulist}}\kern3pt}}
+
+% \printindex - check for file existing before \input'ing it
+\newwrite\filecheck
+\def\printindex#1{\openin\filecheck=\jobname.#1
+\ifeof\filecheck\message{[#1 file missing]}{\tt noweb} has no index `#1'\else
+\closein\filecheck\input \jobname.#1
+\fi
+}
diff --git a/web/noweb/contrib/partingr/nwnweave b/web/noweb/contrib/partingr/nwnweave
new file mode 100755
index 0000000000..d78a2af3af
--- /dev/null
+++ b/web/noweb/contrib/partingr/nwnweave
@@ -0,0 +1,2 @@
+#!/bin/csh
+markup $1.nw | mm2mx63 -n -i -m | mx2tex31 -i -n $1 -s
diff --git a/web/noweb/contrib/partingr/nwtangle b/web/noweb/contrib/partingr/nwtangle
new file mode 100755
index 0000000000..b9b115bd01
--- /dev/null
+++ b/web/noweb/contrib/partingr/nwtangle
@@ -0,0 +1,2 @@
+#!/bin/csh
+markup $1.nw | nt -R'$2'
diff --git a/web/noweb/contrib/partingr/nwweave b/web/noweb/contrib/partingr/nwweave
new file mode 100755
index 0000000000..0108ccf4aa
--- /dev/null
+++ b/web/noweb/contrib/partingr/nwweave
@@ -0,0 +1,2 @@
+#!/bin/csh
+markup $1.nw | mm2mx63 -i -m | mx2tex31 -i -n $1 -s
diff --git a/web/noweb/contrib/partingr/xpand b/web/noweb/contrib/partingr/xpand
new file mode 100755
index 0000000000..772375ff54
--- /dev/null
+++ b/web/noweb/contrib/partingr/xpand
@@ -0,0 +1,20 @@
+#!/usr/common/bin/perl
+while(<>)
+{
+# expand wildcard references here, then process as normal
+ if(/^@(use|defn) (.*)$/)
+ {
+ $action=$1;
+ if($2=~/\.{3}$/)
+ { @matches=grep(/^$2.*/i,split(/¤/,$known));
+ if($#matches>0)
+ { die "\nAmbiguous module name `$mod...'"; }
+ elsif($#matches==-1)
+ { die "\nNo match for name `$mod...',line $i "; }
+ else
+ { $mn=@matches[0]; $_="@$action $mn\n"; }
+ }
+ else { $known=$known . "¤$2"; }
+ }
+ print STDOUT $_;
+}
diff --git a/web/noweb/contrib/rsc/README b/web/noweb/contrib/rsc/README
new file mode 100644
index 0000000000..76e57bbd0c
--- /dev/null
+++ b/web/noweb/contrib/rsc/README
@@ -0,0 +1 @@
+These scripts support use of Noweb under Plan 9.
diff --git a/web/noweb/contrib/rsc/email b/web/noweb/contrib/rsc/email
new file mode 100644
index 0000000000..2e53c38912
--- /dev/null
+++ b/web/noweb/contrib/rsc/email
@@ -0,0 +1 @@
+Russ Cox <rsc@plan9.bell-labs.com>
diff --git a/web/noweb/contrib/rsc/rc/cpif.nw b/web/noweb/contrib/rsc/rc/cpif.nw
new file mode 100644
index 0000000000..e4bb15e512
--- /dev/null
+++ b/web/noweb/contrib/rsc/rc/cpif.nw
@@ -0,0 +1,47 @@
+<<cpif>>=
+#!/bin/rc
+#
+# Based on shell script by Norman Ramsey
+# Translated from sh to rc by Russ Cox
+#
+# see /sys/src/cmd/noweb/COPYRIGHT
+#
+# cpif [ -eq -ne ] file...
+# copy standard input to each of the named files
+# if new * old is true or old doesn't exist;
+# * defaults to -ne
+
+rfork en
+
+# set -x
+op=-ne
+if(~ $1 -eq -ne){
+ op=$1
+ shift
+}
+if(~ $1 -* || ~ $#* 0) {
+ echo 'Usage: '$0' [-eq -ne] file...' >[1=2]
+ exit usage
+}
+
+new=$(mktemp) || { echo "$0: Cannot create temporary file" >&2; exit 1; }
+
+# trap 'rm -f $new; exit 1' 1 2 15 # clean up files
+
+cat >$new
+for(i) {
+ cmp -s $new $i
+
+ switch($op^$status) {
+ # differed but we wanted same
+ case -eq*differ
+ ;
+ # didn't differ but we wanted different
+ case -ne
+ ;
+ # got what we wanted or perhaps an error
+ case *
+ cp $new $i
+ }
+}
+rm -f $new
diff --git a/web/noweb/contrib/rsc/rc/emptydefn.nw b/web/noweb/contrib/rsc/rc/emptydefn.nw
new file mode 100644
index 0000000000..33901008d0
--- /dev/null
+++ b/web/noweb/contrib/rsc/rc/emptydefn.nw
@@ -0,0 +1,10 @@
+<<emptydefn>>=
+#!/bin/rc
+#
+# notangle filter that makes the definition of an empty chunk @<<>>=
+# stand for a continuation of the previous chunk definition.
+
+awk 'BEGIN { lastdefn = "@defn " }
+/^@defn $/ { print lastdefn; next }
+/^@defn / { lastdefn = $0 }
+{ print }' $*
diff --git a/web/noweb/contrib/rsc/rc/mkfile b/web/noweb/contrib/rsc/rc/mkfile
new file mode 100644
index 0000000000..a2165adb42
--- /dev/null
+++ b/web/noweb/contrib/rsc/rc/mkfile
@@ -0,0 +1,28 @@
+
+RCTARG=cpif noroots noweave notangle nountangle
+RCLIBTARG=emptydefn noidx noindex toascii tohtml totex unmarkup noweb
+AWKTARG=noidx.awk noindex.awk toascii1.awk toascii2.awk tohtml.awk
+
+%: %.nw
+ notangle -R$stem $stem.nw >$stem
+
+%2.awk: %.nw
+ notangle -R$target $stem.nw >$target
+%1.awk: %.nw
+ notangle -R$target $stem.nw >$target
+%.awk: %.nw
+ notangle -R$target $stem.nw >$target
+
+TARG=$RCTARG $RCLIBTARG $AWKTARG
+
+default:V: $TARG
+
+clean:V:
+ rm $TARG
+
+install:V:
+ cp $RCTARG /rc/bin
+ cp $RCLIBTARG /sys/lib/noweb/bin/rc
+ chmod 775 /rc/bin/^($RCTARG)
+ chmod 775 /sys/lib/noweb/bin/rc/^($RCLIBTARG)
+ cp $AWKTARG /sys/lib/noweb
diff --git a/web/noweb/contrib/rsc/rc/noidx.nw b/web/noweb/contrib/rsc/rc/noidx.nw
new file mode 100644
index 0000000000..672badb84f
--- /dev/null
+++ b/web/noweb/contrib/rsc/rc/noidx.nw
@@ -0,0 +1,432 @@
+\documentstyle[noweb]{article}
+\pagestyle{noweb}
+\begin{document}
+@
+\section{Cross-reference and index support}
+Here is is.
+<<noidx>>=
+#!/bin/rc
+#
+# Translated to rc by Russ Cox
+# bugs -> rsc@plan9.bell-labs.com
+
+delay=0
+anchordist=0
+while(! ~ $#* 0) {
+ switch($1){
+ case -delay
+ delay=1
+ case -docanchor
+ anchordist=$2
+ shift
+ case *
+ echo 'cannot happen -- '^$1^' passed to noidx' >[1=2]
+ exit cannothappen
+ }
+ shift
+}
+awk -f /sys/lib/noweb/noidx.awk -v 'delay='$delay -v 'anchordist='$anchordist
+@
+<<noidx.awk>>=
+<<functions>>
+BEGIN { <<initialization>> nextline = 0 }
+<<first pass>>
+{ lines[nextline] = $0; nextline++ }
+END {
+ for (i = 0; i < nextline; i ++) {
+ <<second pass over [[lines[i]]]>>
+ delete lines[i]
+ }
+ if (!delay) <<write trailers>>
+}
+@ %def lines nextline
+<<initialization>>=
+curfile = "standard input?"
+lastchunkbegin = "never any chunks?" ;
+<<initialization>>=
+allchunks[0] = 0 ; allidents[0] = 0 ; indexlabels[0] = 0
+defanchors[0] = 0 ; uses[0] = 0 ; anchorlabel[0] = 0 ; indexanchorlabel[0] = 0
+@ %def allchunks allidents indexlabels defanchors uses anchorlabel indexancho
+label
+<<first pass>>=
+/^@file / { curfile = uniqueid(substr($0, 7)) }
+/^@begin / { lastchunkbegin = $0 }
+/^@end docs / { if (anchordist > 0) <<insert and set [[lastanchorlabel]]>> }
+/^@end code / { lastanchorlabel = "" }
+@ %def curfile lastchunkbegin lastanchorlabel
+<<first pass>>=
+/^@defn / { arg = substr($0, 7)
+ allchunks[arg] = 1
+ lastdefnlabel = newdefnlabel(arg)
+ slipin("@xref label " lastdefnlabel)
+ if (lastanchorlabel == "") lastanchorlabel = lastdefnlabel
+ if (anchorlabel[arg] == "") anchorlabel[arg] = lastanchorlabel
+ addlabel(defanchors, arg, lastanchorlabel)
+ addud(chunkud, "defn", arg, lastanchorlabel)
+ thisusecount = 0
+ }
+/^@use / { if (lastchunkbegin ~ /^@begin code /) {
+ arg = substr($0, 6)
+ allchunks[arg] = 1
+ slipin("@xref label " lastdefnlabel "-u" (++thisusecount))
+ addlabel(uses, arg, lastanchorlabel)
+ addud(chunkud, "use", arg, lastanchorlabel)
+ }
+ }
+@ %def allchunks lastdefnlabel
+<<first pass>>=
+/^@index use / { arg = substr($0, 12)
+ allidents[arg] = 1
+ if (lastanchorlabel != "") addud(indexud, "use", arg, lastan
+horlabel)
+ }
+/^@index defn / { arg = substr($0, 13)
+ <<handle index definition of [[arg]]>>
+ }
+/^@index localdefn / { arg = substr($0, 18)
+ <<handle index definition of [[arg]]>>
+ }
+<<handle index definition of [[arg]]>>=
+allidents[arg] = 1
+if (lastanchorlabel != "") {
+ l = lastanchorlabel
+} else {
+ l = newdocslabel()
+ slipin("@xref label " l)
+}
+addud(indexud, "defn", arg, l)
+if (indexanchorlabel[arg] == "") indexanchorlabel[arg] = l
+slipin("@xref ref " l) # bug fix
+@ %def allidents indexanchorlabel
+The bug fix\label{multi-def-bug}
+alluded to above occurs when there are multiple definitions of an identifier.
+In this case, we can't just use [[indexanchorlabel[id]]], because that refers
+nly to
+the first definition. In the {\TeX} back end, that leads to bogus
+tags like \hbox{\it x \small\rm 7b 7b 7b} instead of \hbox{\it x
+\small\rm 7b 9 12a}; the first tag is repeated again and again.
+Because the tag for the current [[@defn]] is lost by the time pass~2
+rolls around, we have to slip it in on pass~1.
+@
+<<insert and set [[lastanchorlabel]]>>=
+{ n = anchordist
+ lastanchorlabel = newdocslabel()
+ for(i = nextline - 1; i >= 0; i--) {
+ if (n == 0 || lines[i] ~ /^@begin docs /) {
+ insertafter(i, "@xref label " lastanchorlabel)
+ i = -1 # cause loop to terminate
+ } else if (lines[i] == "@nl") {
+ n--
+ }
+ }
+}
+<<functions>>=
+function insertafter(i, s, n) {
+ for(n = nextline++; n - 1 > i; n--) lines[n] = lines[n-1]
+ lines[n] = s
+}
+@
+In the awk version, [[slipin]] is called {\em before} the current line is
+added to [[lines]].
+<<functions>>=
+function slipin(s) {
+ lines[nextline++] = s
+}
+<<initialization>>=
+thesedefns[0] = 0; theseuses[0] = 0 ;
+<<second pass over [[lines[i]]]>>=
+line = lines[i]
+if (line ~ /^@begin /) {
+ if (delay && lastchunkbegin == line) <<write trailers>>
+ print line
+ for (x in thesedefns) delete thesedefns[x]
+ for (x in theseuses) delete theseuses[x]
+ thischunk = ""
+} else if (line ~ /^@defn /) {
+ thischunk = substr(line, 7)
+ printf "@xref ref %s\n", anchorlabel[thischunk]
+ print line
+} else if (line ~ /^@use /) {
+ arg = substr(line, 6)
+ printf "@xref ref %s\n", (anchorlabel[arg] == "" ? "nw@notdef" : anchorlab
+l[arg])
+ print line
+} else if (line ~ /^@index defn /) {
+ arg = substr(line, 13)
+ thesedefns[arg] = 1
+ # no xref ref because of bug fix
+ # if (indexanchorlabel[arg] != "")
+ # printf "@xref ref %s\n", indexanchorlabel[arg]
+ print line
+} else if (line ~ /^@index localdefn /) {
+ arg = substr(line, 18)
+ thesedefns[arg] = 1
+ # no xref ref because of bug fix
+ # if (indexanchorlabel[arg] != "")
+ # printf "@xref ref %s\n", indexanchorlabel[arg]
+ print line
+} else if (line ~ /^@index use /) {
+ arg = substr(line, 12)
+ theseuses[arg] = 1
+ if (indexanchorlabel[arg] != "")
+ printf "@xref ref %s\n", indexanchorlabel[arg]
+ print line
+} else if (line ~ /^@end code/) {
+ <<write cross-reference>>
+ print line
+} else if (line ~ /^@text /) {
+ # grotesque hack to get indexes in HTML
+ if (thischunk == "") { # docs mode
+ arg = substr(line, 7)
+ if (arg == "<nowebchunks>") lognowebchunks()
+ else if (arg == "<nowebindex>") lognowebindex()
+ else print line
+ } else {
+ print line
+ }
+} else {
+ print line
+}
+@ %def thesedefns theseuses
+The case of the [[@index defn]] is the one case where we don't emit a
+reference, because the reference has to go in earlier. See
+page~\pageref{multi-def-bug} for an explanation.
+<<write cross-reference>>=
+defout[thischunk]++
+<<write index cross-reference>>
+if (defout[thischunk] == 1) {<<write chunk cross-reference>>}
+if (defout[thischunk] > 1)
+ printf "@xref prevdef %s\n", listget(defanchors[thischunk], defout[thischunk
+-1)
+if (defout[thischunk] < defcount[thischunk])
+ printf "@xref nextdef %s\n", listget(defanchors[thischunk], defout[thischunk
++1)
+<<write index cross-reference>>=
+for (x in thesedefns)
+ delete theseuses[x]
+delete thesedefns[0]
+n = alphasort(thesedefns)
+if (n > 0) {
+ print "@index begindefs"
+ for (j = 0; j < n; j++) {
+ m = split(indexud[sorted[j]], a)
+ for (k = 1; k <= m; k++)
+ if (a[k] ~ /^use/)
+ printf "@index isused %s\n", substr(a[k], 5, length(a[k])-5)
+ printf "@index defitem %s\n", sorted[j]
+ delete sorted[j]
+ }
+ print "@index enddefs"
+}
+<<write index cross-reference>>=
+delete theseuses[0]
+n = alphasort(theseuses)
+if (n > 0) {
+ print "@index beginuses"
+ for (j = 0; j < n; j++) {
+ m = split(indexud[sorted[j]], a)
+ for (k = 1; k <= m; k++)
+ if (a[k] ~ /^defn/)
+ printf "@index isdefined %s\n", substr(a[k], 6, length(a[k])-6)
+ printf "@index useitem %s\n", sorted[j]
+ delete sorted[j]
+ }
+ print "@index enduses"
+}
+<<write chunk cross-reference>>=
+if (defcount[thischunk] > 1) {
+ print "@xref begindefs"
+ n = split(defanchors[thischunk], a)
+ for (j = 2; j <= n; j++) printf "@xref defitem %s\n", a[j]
+ print "@xref enddefs"
+
+}
+if (uses[thischunk] != "") {
+ print "@xref beginuses"
+ n = split(uses[thischunk], a)
+ for (j = 1; j <= n; j++) printf "@xref useitem %s\n", a[j]
+ print "@xref enduses"
+} else {
+ printf "@xref notused %s\n", thischunk
+}
+<<functions>>=
+function newdefnlabel(arg, label) {
+ defcount[arg] = defcount[arg] + 1
+ label = "NW" curfile "-" uniqueid(arg) "-" alphacode(defcount[arg])
+ return label
+}
+@ %def newdefnlabel
+<<initialization>>=
+defcount[0] = 0 ;
+<<functions>>=
+function newdocslabel() {
+ newdocslabelcount++
+ return "NWD" alphacode(newdocslabelcount)
+}
+@ %def newdocslabel
+<<functions>>=
+function addlabel(tbl, arg, label, marker) {
+ marker = " " label
+ if (!tailmatch(tbl[arg], marker))
+ tbl[arg] = tbl[arg] marker
+ return label
+}
+@ %def addlabel
+<<functions>>=
+function tailmatch(string, tail, pos) {
+ pos = length(string) - length(tail) + 1
+ if (pos > 0 && substr(string, pos) == tail)
+ return 1
+ else
+ return 0
+}
+@ %def tailmatch
+<<functions>>=
+function addud(udlist, name, arg, label, s) {
+ s = " " name "{" label "}"
+ if (!tailmatch(udlist[arg], s))
+ udlist[arg] = udlist[arg] s
+}
+@ %def addud
+<<functions>>=
+function listget(l, i, n, a) {
+ n = split(l, a)
+ return a[i]
+}
+@ %def listget
+<<initialization>>=
+udlist[0] = 0 ;
+@
+[[uniqueid]] eliminates both {\TeX} and HTML specials.
+Escaping the [[/]] in the character class in the regexp pattern works
+around a bug in many awks.
+Unpalatable, but what can one do?
+<<functions>>=
+function uniqueid(name, key) {
+ if (uidtable[name] == "") {
+ key = make_key(name)
+ # gsub(/[\]\[ \\{}`#%&~_^<>"-]/, "*", key) # old
+ gsub(/[^a-zA-Z0-9!$()*+,.\/:;=?@|]/, "*", key)
+ keycounts[key] = keycounts[key] + 1
+ uidtable[name] = key
+ if (keycounts[key] > 1)
+ uidtable[name] = uidtable[name] "." alphacode(keycounts[key])
+ }
+ return uidtable[name]
+}
+@ %def uniqueid
+<<functions>>=
+function make_key(name, key, l) {
+ l = length(name)
+ sub(/^.*\//, "", name)
+ key = substr(name, 1, 3)
+ if (l >= 3) key = key alphacode(l)
+ return key
+}
+<<initialization>>=
+uidtable[0] = 0
+keycounts[0] = 0 ;
+<<write trailers>>=
+{ print "@nl"
+ print "@nl"
+ lognowebchunks()
+ lognowebindex()
+}
+@
+Now, a special hack, so we can write this stuff in the right place on pass 2.
+<<functions>>=
+function lognowebchunks(l, j, n, x) {
+ if (loggednowebchunks > 0) return
+ loggednowebchunks = 1
+ delete allchunks[0]
+ n = alphasort(allchunks)
+ print "@xref beginchunks"
+ for (j = 0; j < n; j++) {
+ name = sorted[j]; delete sorted[j]
+ printf "@xref chunkbegin %s %s\n",
+ (anchorlabel[name] != "" ? anchorlabel[name] : "nw@notdef"), name
+ m = split(chunkud[name], a)
+ for (k = 1; k <= m; k++)
+ if (a[k] ~ /^use/)
+ printf "@xref chunkuse %s\n", substr(a[k], 5, length(a[k])-5)
+ else if (a[k] ~ /^defn/)
+ printf "@xref chunkdefn %s\n", substr(a[k], 6, length(a[k])-6)
+ print "@xref chunkend"
+ }
+ print "@xref endchunks"
+}
+@ %def lognowebchunks
+<<functions>>=
+function lognowebindex(l, j, n, x) {
+ if (loggednowebindex > 0) return
+ loggednowebindex = 1
+ delete allidents[0]
+ n = alphasort(allidents)
+ print "@index beginindex"
+ for (j = 0; j < n; j++) {
+ name = sorted[j]; delete sorted[j]
+ printf "@index entrybegin %s %s\n",
+ (indexanchorlabel[name] != "" ? indexanchorlabel[name] : "nw@notdef"),
+ame
+ m = split(indexud[name], a)
+ for (k = 1; k <= m; k++)
+ if (a[k] ~ /^use/)
+ printf "@index entryuse %s\n", substr(a[k], 5, length(a[k])-5)
+ else if (a[k] ~ /^defn/)
+ printf "@index entrydefn %s\n", substr(a[k], 6, length(a[k])-6)
+ print "@index entryend"
+ }
+ print "@index endindex"
+}
+@ %def lognowebindex
+<<functions>>=
+function alphasort(a, x, n) {
+ n = 0
+ for (x in a)
+ n = insertitem(x, n)
+ return n
+}
+function insertitem(x, n, i, tmp) {
+ sorted[n] = x
+ sortkeys[n] = sortkey(x)
+ i = n
+ while (i > 0 && (sortkeys[i] < sortkeys[i-1] ||
+ sortkeys[i] == sortkeys[i-1] && sorted[i] < sorted[i-1])) {
+ tmp = sortkeys [i]; sortkeys [i] = sortkeys [i-1]; sortkeys [i-1] = tmp
+ tmp = sorted[i]; sorted[i] = sorted[i-1]; sorted[i-1] = tmp
+ i = i - 1
+ }
+ return n + 1
+}
+@ %def alphasort insertitem
+<<initialization>>=
+sorted[0] = 0; sortkeys[0] = 0;
+<<functions>>=
+function sortkey(name, s) {
+ s = name
+ gsub(/[^a-zA-Z ]/, "", s)
+ return s
+}
+@ %def sortkey
+<<functions>>=
+function alphacode(n) {
+ if (n < 0)
+ return "-" alphacode(-n)
+ else if (n >= alphacodelen)
+ return alphacode(n / alphacodelen) alphacode(n % alphacodelen)
+ else
+ return substr(alphacodes, n+1, 1)
+}
+@ %def alphacode
+<<initialization>>=
+alphacodes = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
+alphacodelen = length(alphacodes) ;
+@
+\section{List of chunks}
+\nowebchunks
+
+\twocolumn
+\section{Index}
+\nowebindex
+@
+\end{document}
diff --git a/web/noweb/contrib/rsc/rc/noindex.nw b/web/noweb/contrib/rsc/rc/noindex.nw
new file mode 100644
index 0000000000..193f2c6429
--- /dev/null
+++ b/web/noweb/contrib/rsc/rc/noindex.nw
@@ -0,0 +1,194 @@
+This program is similar to [[makeindex]] in that it grovels through [[.aux]]
+files looking for index information, which it writes to a [[.nwi]] file.
+It's used when [[noweave -indexfrom]] is used on many files separately;
+it combines the separate indices into a single, correctly sorted index.
+That index file is read by [[\nowebindex*]].
+<<noindex>>=
+#!/bin/rc
+#
+# Translated to rc by Russ Cox
+# bugs -> rsc@plan9.bell-labs.com
+
+if(! ~ $#* 1) {
+ echo 'Usage: '^`{basename $0}^' file[.tex]' >[1=2]
+ exit usage
+}
+
+awk -f /sys/lib/noweb/noindex.awk >[1=2]
+
+<<noindex.awk>>=
+BEGIN { infile="'"$1"'"
+ <<main code>>
+ exit
+}
+<<functions>>
+<<main code>>=
+if (infile ~ /\.tex$/) {
+ infile = substr(infile, 1, length(infile)-4) ".aux"
+} else if (infile !~ /\.aux$/) {
+ infile = infile ".aux"
+}
+idx[0] = ""
+delete idx[0]
+gobble(infile)
+alphasort(idx)
+outname = substr(infile, 1, length(infile)-4) ".nwi"
+last = ""
+for (i = 0; i < count; i++) {
+ out = stripcount(sorted[i])
+ if (last != out) {
+ print out > outname
+ last = out
+# <show sort key [[i]]>
+ }
+}
+<<show sort key [[i]]>>=
+key = sortkeys[i]
+sub(/^\n+/, "", key)
+sub(/\n.*$/, "", key)
+print "% " key > outname
+<<functions>>=
+function gobble(name, infile, rc, tag) {
+ for (rc = (getline line < name); rc > 0; rc = (getline line < name)) {
+ if (line ~ /^\\@input{[^}]*}$/)
+ gobble(substr(line, 9, length(line)-9))
+ else if (line ~ /^\\nwixadds{/) {
+ count++
+ tag = "000000" count
+ tag = substr(tag, length(tag)-6+1)
+ idx[count] = tag " " substr(line, 11)
+ }
+ }
+ if (rc < 0) print "No file " name "."
+ else close(name)
+ return
+}
+<<functions>>=
+function stripcount(s) {
+ sub(/^[0-9]+/, "", s)
+ sub(/ +/, "", s)
+ return "\\nwixaddsx{" s
+}
+<<functions>>=
+function alphasort(a, x, n) {
+ n = 0
+ for (x in a)
+ n = insertitem(a[x], n)
+ finish_sorting(n)
+ return n
+}
+function insertitem(x, n, i, tmp) {
+ sorted[n] = x
+ sortkeys[n] = sortkey(x)
+ return n+1
+}
+<<functions>>=
+function finish_sorting(n) {
+ firstwork = nextwork = 0
+ addquick(0, n)
+ while(nextwork > firstwork)
+ qsort()
+}
+<<functions>>=
+function addquick(l, r) {
+ workq[nextwork++] = l
+ workq[nextwork++] = r
+}
+<<get [[l]] and [[r]] out of work queue>>=
+l = workq[firstwork]
+delete workq[firstwork]
+firstwork++
+r = workq[firstwork]
+delete workq[firstwork]
+firstwork++
+<<functions>>=
+function qsort(l, r, mid, i, last) {
+ <<get [[l]] and [[r]] out of work queue>>
+ if (r - l < 10)
+ isort(l, r)
+ else {
+ mid = l + int((r - l) * rand())
+ swap(l, mid)
+ last = l
+ for (i = l+1; i < r; i++)
+ if (sortkeys[i] < sortkeys[l] ||
+ sortkeys[i] == sortkeys[l] && sorted[i] < sorted[l])
+ swap(++last, i)
+ swap(l, last)
+ addquick(l, last)
+ addquick(last+1, r)
+ }
+}
+<<functions>>=
+function isort(l, r, n) {
+ for (n = l + 1; n < r; n++)
+ for (i = n; i > l && (sortkeys[i] < sortkeys[i-1] ||
+ sortkeys[i] == sortkeys[i-1] && sorted[i] < sorted[i
+1]); i--)
+ swap(i, i-1)
+}
+<<functions>>=
+function swap(i, j, tmp) {
+ tmp = sortkeys [i]; sortkeys [i] = sortkeys [j]; sortkeys [j] = tmp
+ tmp = sorted[i]; sorted[i] = sorted[j]; sorted[j] = tmp
+}
+<<functions>>=
+function sortkey(s, count) {
+ match(s, /^[0-9]+/)
+ count = substr(s, RSTART, RLENGTH)
+ sub(/^[0-9]+ */, "", s)
+ if (s ~ /c}/) {
+ return firstkey(substr(s, 3)) "\n" count
+ } else if (s ~ /i}/) {
+ return firstkey(substr(s, 3)) "\n" count
+ } else {
+ print "sortkey handed non-chunk and non-index: " s
+ exit 1
+ }
+}
+<<functions>>=
+function firstkey(s, r, openbrace) {
+ if (s !~ /^{{/) {
+ <<complain about format and exit>>
+ }
+ sub (/^{{/, "", s)
+ gsub(/\\([a-zA-Z]+|.) */, "", s) # kill control sequences
+ openbrace = 1
+ r = ""
+ while (openbrace > 0)
+ if (match(s, /^[^{}]*/) <= 0)
+ openbrace--
+ else {
+ r = r substr(s, RSTART, RLENGTH)
+ c = substr(s, RSTART+RLENGTH, 1)
+ s = substr(s, RSTART+RLENGTH+1)
+ if (c == "}") openbrace--
+ else openbrace++
+ if (openbrace > 0) r = r c
+ }
+ return alphabet(r) "\n" r
+}
+<<complain about format and exit>>=
+print "key \"" substr(s, 1, 6) "...\" is ill-formatted"
+exit 1
+<<functions>>=
+function alphabet(s, r) {
+ r = ""
+ while (match(s, /[a-zA-Z \t]/) > 0) {
+ s = substr(s, RSTART)
+ c = substr(s, 1, 1)
+ if (c == " " || c == "\t") {
+ r = r " "
+ sub(/^[ \t]+/, "", s)
+ } else {
+ match(s, /^[a-zA-Z]+/)
+ r = r substr(s, RSTART, RLENGTH)
+ s = substr(s, RSTART+RLENGTH)
+ }
+ }
+ sub(/^ +/, "", r)
+ return r
+}
+@
+\section{Index}
+\nowebindex
diff --git a/web/noweb/contrib/rsc/rc/noroots.nw b/web/noweb/contrib/rsc/rc/noroots.nw
new file mode 100644
index 0000000000..74f4a792a1
--- /dev/null
+++ b/web/noweb/contrib/rsc/rc/noroots.nw
@@ -0,0 +1,16 @@
+<<noroots>>=
+#!/bin/rc
+#
+# Copyright 1991 by Norman Ramsey. All rights reserved.
+# See file /sys/src/cmd/noweb/COPYRIGHT for more information.
+#
+# set -x
+/sys/lib/noweb/bin/$objtype/markup $* | awk '
+/^@quote$/,/^@endquote$/ { next }
+/^@defn / { chunk=substr($0,7) ; defined[chunk]=1 }
+/^@use / { chunk=substr($0,6) ; used[chunk]=1 }
+END {
+ for (chunk in defined) {
+ if (defined[chunk]==1 && used[chunk]==0) printf "@<<%s>>\n", chunk
+ }
+}'
diff --git a/web/noweb/contrib/rsc/rc/notangle.nw b/web/noweb/contrib/rsc/rc/notangle.nw
new file mode 100644
index 0000000000..476534ca27
--- /dev/null
+++ b/web/noweb/contrib/rsc/rc/notangle.nw
@@ -0,0 +1,51 @@
+<<notangle>>=
+#!/bin/rc
+# Copyright 1991 by Norman Ramsey. All rights reserved.
+# See file /sys/src/cmd/noweb/COPYRIGHT for more information.
+#
+# Translated from sh to rc by Russ Cox
+# bugs -> rsc@plan9.bell-labs.com
+#
+
+rfork en
+bind -b /sys/lib/texmf/bin/$objtype /bin
+bind -b /sys/lib/texmf/bin/rc /bin
+
+LIB=/sys/lib/texmf/noweb
+markup=markup
+opt=()
+arg=()
+markopt=()
+filters=()
+
+while(! ~ $#* 0) {
+ switch($1) {
+ case -m -m3 -awk -icn -icon -pascal -c -c++ -f77 -f90 -tex -w[0-9][0-9]
+
+ ;
+ case -t
+ ;
+ case -t*
+ markopt=($markopt -t)
+ opt=($opt $1)
+ case -filter
+ filters=($filters $2)
+ shift
+ case -markup
+ markup=$2
+ shift
+ case -
+ arg=($arg $1)
+ case -L*
+ opt=($opt -t $1)
+ markopt=($markopt -t)
+ case -*
+ opt=($opt $1)
+ case *
+ arg=($arg $1)
+ }
+ shift
+}
+
+$markup $markopt $arg | $filters nt $opt
+exit $status
diff --git a/web/noweb/contrib/rsc/rc/nountangle.nw b/web/noweb/contrib/rsc/rc/nountangle.nw
new file mode 100644
index 0000000000..207eb5e08c
--- /dev/null
+++ b/web/noweb/contrib/rsc/rc/nountangle.nw
@@ -0,0 +1,93 @@
+<<nountangle>>=
+#!/bin/rc
+#
+# Copyright 1991 by Norman Ramsey. All rights reserved.
+# See file /sys/src/cmd/noweb/COPYRIGHT for more information.
+#
+# Translated to rc by Russ Cox
+# bugs -> rsc@plan9.bell-labs.com
+
+# set -x
+rfork en
+bind -b /sys/lib/noweb/bin/$objtype /bin
+bind -b /sys/lib/noweb/bin/rc /bin
+
+markup=markup
+opt=''
+arg=''
+filters=''
+width=72
+subst='gsub("\\*/", "* /", s)'
+format='/* %%-%ds */'
+
+while(! ~ $#* 0) {
+ switch($1) {
+ case -ml -m3
+ format='(* %%-%ds *)'
+ subst='gsub("\\*\\)", "* )", s) gsub("\\(\\*", "( *", s)'
+ case -awk -icn -icon
+ format='# %%-%ds' ; subst=' '
+ case -lisp -scm
+ format=';;; %%-%ds' ; subst=' '
+ case -c++
+ format='// %%-%ds' ; subst=' '
+ case -c
+ format='/* %%-%ds */' subst='gsub("\\*/", "* /", s)'
+ case -pascal
+ format='{ %%-%ds }' ; subst='gsub("[{}]", "-", s)'
+ case -f77
+ format='C %%-%ds' ; subst=' ' ;;
+ case -f90
+ format='! %%-%ds' ; subst=' ' ;;
+ case -tex
+ format='%%%% %%-%ds' ; subst=' ' ;;
+ case -L*
+ # deliberately ignore requests for #line
+ case -w[0-9][0-9]*; width=`{echo $1 | sed 's/^-w//'} ;;
+ case -filter; filters=' | '$filters' '$2 ; shift ;;
+ case -markup; markup=$2; shift ;;
+ case -; arg=$arg' '$1;;
+ case -*; opt=$opt' '$1 ;;
+ case *; arg=$arg' '$1 ;;
+ }
+ shift
+}
+
+eval $markup^' '^$arg^' '^$filters |
+awk 'BEGIN { line = 0; capture = 0; format=sprintf("'$format'",'$width') }
+
+function comment(s) {
+ '$subst'
+ return sprintf(format,s)
+}
+
+function grab(s) {
+ if (capture==0) print
+ else holding[line] = holding[line] s
+}
+
+/^@end doc/ { capture = 0; holding[++line] = "" ; next }
+/^@begin doc/ { capture = 1; next }
+
+/^@text / { grab(substr($0,7)); next}
+/^@quote$/ { grab("[[") ; next}
+/^@endquote$/ { grab("]]") ; next}
+
+/^@nl$/ { if (capture !=0 ) {
+ holding[++line] = ""
+ } else if (defn_pending != 0) {
+ print "@nl"
+ for (i=0; i<=line && holding[i] ~ /^ *$/; i++) i=i
+ for (; i<=line; i++) printf "@text %s\n@nl\n", comment(holding[i])
+ line = 0; holding[0] = ""
+ defn_pending = 0
+ } else print
+ next
+ }
+
+/^@defn / { holding[line] = holding[line] "<"substr($0,7)">=" # (line should b
+ blank)
+ print ; defn_pending = 1 ; next }
+{ print }' |
+eval nt^' '^$opt
+
diff --git a/web/noweb/contrib/rsc/rc/noweave.nw b/web/noweb/contrib/rsc/rc/noweave.nw
new file mode 100644
index 0000000000..46b49ae81e
--- /dev/null
+++ b/web/noweb/contrib/rsc/rc/noweave.nw
@@ -0,0 +1,594 @@
+\section{Weaving a {\tt noweb} file into a \TeX file}
+The copyright applies both to the {\tt noweb} source and to the
+generated shell script.
+<<copyright notice>>=
+# Copyright 1991-1997 by Norman Ramsey. All rights reserved.
+# See file /sys/src/cmd/noweb/COPYRIGHT for more information.
+# $Id: noweave.nw,v 1.6 1998/08/17 00:10:34 nr Exp nr $
+#
+# Translated from sh to rc by Russ Cox
+# bugs -> rsc@plan9.bell-labs.com
+@
+Here's the organization of the source:
+<<noweave>>=
+#!/bin/rc
+<<copyright notice>>
+rfork en
+<<initialization>>
+<<set up /bin union>>
+<<scan options and arguments>>
+{
+ <<emit markup on standard output>>
+ status=''
+} | {
+ args=(`{echo $noindex $delay $shortxref})
+ eval $backend $args
+}
+exit $status
+<<if verbose, show back end>>=
+if(! ~ $verbose '')
+ echo $backend $noindex $delay $shortxref >[1=2]
+@
+The first item of initialization is to locate the {\tt noweb} library.
+<<initialization>>=
+LIB=/sys/lib/noweb
+@
+We need to add the {\tt noweb} bin directories to the union mount on {\tt /bin
+.
+<<set up /bin union>>=
+bind -b $LIB/bin/$objtype /bin
+bind -b $LIB/bin/rc /bin
+@
+We continue with initialization.
+We use strings throughout rather than {\tt rc} lists,
+since we're just going to echo it anyway, and it makes
+keeping the filterlist easy.
+<<initialization>>=
+markup=markup
+backend=totex
+wrapper=''
+delay=''
+args=''
+markopts=''
+noweboptions=''
+autodefs=''
+verbose=''
+shortxref=''
+noquote=-noquote
+docanchor=''
+noindex=-noindex
+filterlist=()
+# following supported by change in totex back end
+noquote=''
+@
+I make two passes over the arguments so as not to require that options
+be given in a certain order
+<<scan options and arguments>>=
+saveargs=($*)
+arg=''
+while(! ~ $#* 0) {
+ switch($1) {
+ <<first pass {\tt noweave} options>>
+ case -
+ arg=$arg^' '$1
+ case -*
+ echo $0': unrecognized option '$1 >[1=2]
+ <<show usage>>
+ exit usage
+ case *
+ arg=$arg^' '$1
+ }
+ shift
+}
+<<insist first-pass options are self-consistent>>
+if(~ $wrapper '')
+ wrapper=latex
+
+*=$saveargs
+shift
+
+while(! ~ $#* 0) {
+ switch($1) {
+ <<second pass {\tt noweave} options>>
+ }
+ shift
+}
+
+<<add [[$newopt]] to [[noweboptions]]>>=
+if(~ $noweboptions '')
+ noweboptions=$newopt
+if not
+ noweboptions=$noweboptions','$newopt
+
+<<first pass {\tt noweave} options>>=
+case -latex
+ if(! ~ $wrapper none)
+ wrapper=latex
+case -tex
+ wrapper=tex
+case -html
+ if(! ~ $wrapper none)
+ wrapper=html
+ backend='tohtml -localindex'
+ noquote=''
+ docanchor='-docanchor 10'
+case -latex+html
+ if(! ~ $wrapper none)
+ wrapper=latex
+ backend='tohtml -localindex -raw'
+ noquote=''
+ docanchor='-docanchor 10'
+case -ascii
+ wrapper=none
+ backend=toascii
+case -troff
+ backend=toroff
+case -n
+ wrapper=none
+case -backend
+ backend=$2
+ shift
+case -markup
+ markup=$2
+ shift
+@
+Note some versions of echo break on [[echo "-n..."]], echoing nothing
+at all. The leading space is claimed to prevent this problem.
+<<option printout for usage>>=
+echo '-latex Emit LaTeX with headers and trailers (default).' >[1=2]
+echo '-tex Emit plain TeX with headers and trailers.' >[1=2]
+echo '-html Emit HTML with headers and trailers.' >[1=2]
+echo '-latex+html Assume LaTeX in documentation, but use HTML in code.' >
+1=2]
+# echo '-ascii Emit ASCII.' >[1=2]
+echo '-troff Emit troff (actually GNU groff).' >[1=2]
+echo ' -n Don''t use any header or trailer.' >[1=2]
+echo '-markup frontend Parse input with frontend (e.g., numarkup).' >[1=2]
+@ \iffalse
+<<noweave man page option table>>=
+.TP
+.B \-latex
+Emit LaTeX, including wrapper in
+.B article
+style with the
+.B noweb
+package and page style. (Default)
+.TP
+.B \-tex
+Emit plain TeX, including wrapper with
+.B nwmac
+macros.
+.TP
+.B \-html
+Emit HTML, using HTML wrapper.
+The output is uninteresting without \fB-index\fP or \fB-x\fP.
+The tags \fB<nowebchunks>\fP and \fB<nowebindex>\fP, on lines by themselves,
+produce a list of chunks and an index of identifiers, respectively.
+If these tags are not present, the list and index are placed at the end of the
+file.
+.TP
+.B \-latex+html
+Assume documentation chunks are LaTeX, but generate HTML for code chunks,
+suitably marked so conversion with
+.I latex2html(1)
+yields reasonable output.
+A LaTeX wrapper is implied, but can be turned off with \fB-n\fP.
+.I Use of this option is
+.B deprecated;
+use
+.B \-html
+with
+.B "\-filter l2h"
+instead.
+<<noweave man page option table>>=
+.TP
+.B \-troff
+Emit
+.IR troff (1)
+markup (with no wrapper).
+The result should be processed with
+.IR noroff (1).
+Bug reports for
+.B \-troff
+to Arnold Robbins
+.B <arnold@skeeve.com>.
+<<bogus noweave man page option table>>=
+.TP
+.B \-ascii
+Emit ASCII (with no wrapper).
+Bug reports for
+.B \-ascii
+to Phil Bewig
+.B <pbewig@netcom.com>.
+<<noweave man page option table>>=
+.TP
+.B \-n
+Don't use any wrapper (header or trailer).
+This option is useful when \fInoweave\fP's output will be
+a part of a larger document.
+See also
+.B \-delay.
+@ \fi
+A common bug seems to be using both [[-x]] and [[-index]] on the same
+command line, so I complain about it.
+<<insist first-pass options are self-consistent>>=
+if(! ~ $using_xref '' && ! ~ $using_index '') {
+ echo $0': you may not use -x with -index or -indexfrom (drop the -x)' >
+1=2]
+ exit -x-index
+}
+<<initialization>>=
+using_index=''
+using_xref=''
+@
+<<first pass {\tt noweave} options>>=
+case -filter
+ shift
+case -x
+ using_xref=1
+case -index
+ noindex=''
+ using_index=1
+case -indexfrom
+ shift
+ noindex=''
+ using_index=1
+<<second pass {\tt noweave} options>>=
+case -filter
+ newfilter=$2
+ shift
+ <<add [[$newfilter]]>>
+case -x
+ newfilter='finduses noquote'
+ <<add [[$newfilter]]>>
+case -index
+ newfilter='finduses '^$noquote
+ <<add [[$newfilter]]>>
+ newfilter='noidx '^$docanchor^' '^$delay
+ <<add [[$newfilter]]>>
+case -indexfrom
+ newfilter='finduses '^$noquote^' '^$2
+ <<add [[$newfilter]]>>
+ newfilter='noidx '^$docanchor^' '^$delay
+ <<add [[$newfilter]]>>
+ shift
+<<option printout for usage>>=
+echo '-x Use the default cross-referencer (needs LaTeX or HTML).
+ >[1=2]
+echo '-index Create index using identifiers defined in input files.'
+ >[1=2]
+echo '-indexfrom defs Create index of identifers listed in file defs.' >[1=2
+
+echo '-filter cmd Filter through ''cmd'' before weaving; cmd could pretty
+rint' >[1=2]
+echo ' or perform other functions.' >[1=2]
+@ \iffalse
+<<noweave man page indexing options>>=
+.TP
+.B \-x
+For
+.I LaTeX,
+add a page number to each chunk name identifying the location of that
+chunk's definition, and emit cross-reference information relating definitions
+nd uses.
+For
+.I HTML,
+create hypertext links between uses and definitions of chunks.
+When
+.B noweave -x
+is used with
+.I LaTeX,
+the control sequence
+.B "\\\\nowebchunks"
+expands to a sorted list of all code chunks.
+.TP
+.B \-index
+Build cross-reference information (or hypertext links) for identifiers defined
+by
+.br
+.B "@ %def"
+.I identifiers
+.br
+Definitions are those found in input files.
+Requires
+.I LaTeX
+or
+.I HTML.
+.B \-index
+implies
+.B \-x;
+including both will generate strange-looking output.
+.I noweave
+does not generate
+cross-references to identifiers that appear in quoted code (\fB@[[\fP...\fB@]]
+fP),
+but it does generate hypertext links.
+When
+.B noweave -index
+is used with
+.I LaTeX,
+the control sequence
+.B "\\\\nowebindex"
+expands to an index of identifiers.
+.TP
+.B \-indexfrom \fIindex\fP
+Like
+.B \-index,
+but the identifiers to be indexed are taken from file \fIindex\fP.
+See
+.I noindex(1).
+<<noweave man page option table>>=
+.TP
+.B \-filter \fIcmd\fP
+Filters the
+.I noweb
+source through
+.I cmd
+after converting it to tool form and before converting to
+.I TeX.
+.I noweave
+looks for
+.I cmd
+first on the user's
+.B PATH,
+then in
+.B |LIBDIR|.
+Such filters
+can be used to add features to
+.I noweave;
+for an example, see
+.B |LIBDIR|/noxref.krom.
+.I Noweave
+supports up to four filters; one can get more by shell trickery,
+for example, \fB-filter "icon.filter | noidx"\fP.
+The \fB-autodefs\fP,
+\fB-x\fP, \fB-index\fP, and \fB-indexfrom\fP options are implemented as filter
+.
+Filters are executed with the shell's
+.B eval
+command, so
+.I cmd
+should be quoted accordingly.
+<<description of -markup option>>
+@ \fi
+Note that it would be appropriate to look for autodefs
+using [[[ -x $newfilter ]]],
+but that stupid DEC Ultrix doesn't support [[test -x]], so the best I can
+do in a portable way is [[test -r]].
+<<first pass {\tt noweave} options>>=
+case -autodefs
+ newfilter='autodefs.'^$2
+ if(test -r $newfilter) {
+ <<add [[$newfilter]]>>
+ }
+ if not {
+ echo $0^': don''t know how to find definitions for '^$2
+ exit defns
+ }
+ shift
+
+case -showautodefs
+ <<print all legal [[-autodefs]] or complain>>
+ exit complain
+<<option printout for usage>>=
+echo '-autodefs lang Source is in language ''lang''; find definitions automa
+ically.' >[1=2]
+echo '-showautodefs List languages that can be used with -autodefs' >[1=2]
+@ \iffalse
+<<noweave man page indexing options>>=
+.TP
+.B \-autodefs \fIlang\fP
+Discover identifier definitions automatically.
+Code in chunks must be in language \fIlang\fP.
+Permissible \fIlang\fPs vary but may include
+.B tex
+or
+.B icon.
+Useless without
+.B \-index,
+which it must precede.
+.TP
+.B \-showautodefs
+Show values of \fIlang\fP usable with \fB-autodefs\fP.
+@ \fi
+Same note as above regarding [[test -x]] vs [[test -r]].
+<<print all legal [[-autodefs]] or complain>>=
+foundautodefs=no
+for(i in $LIB/autodefs.*) {
+ if(test -r $i) {
+ echo 'Supports -autodefs '^$i | sed 's!$LIB/autodefs\.!!' >[1=2
+
+ foundautodefs=yes
+ }
+}
+if(~ $foundautodefs no)
+ echo 'Does not support -autodefs' >[1=2]
+@
+Here's an embarrassing hack --- if we spot \verb+-option shortxref+
+or \verb+-option longxref+ on the
+command line, we pass something suitable to the back end, in case we're doing
+HTML.
+<<first pass {\tt noweave} options>>=
+case -option
+ newopt=$2
+ shift
+ if(~ $newopt shortxref)
+ shortxref=-shortxref
+ if(~ $newopt longxref)
+ shortxref=-longxref
+ <<add [[$newopt]] to [[noweboptions]]>>
+<<option printout for usage>>=
+echo '-option opt Add \noweboptions{opt} to header (latex only)' >[1=2]
+@ \iffalse
+<<noweave man page option table>>=
+.TP
+.B \-option \fIopt\fP
+Adds \fB\enoweboptions{\fP\fIopt\fP\fB}\fP to the
+.I LaTeX
+header.
+See
+.I nowebstyle(1)
+for values of
+.I opt.
+Normally useful only with the
+.B \-latex
+option, but
+.B "\-option longxref"
+works black magic with
+.B \-html.
+@ \fi
+<<first pass {\tt noweave} options>>=
+# case -nodelay
+# delay=''
+case -delay
+ delay=-delay
+ wrapper=none
+<<option printout for usage>>=
+echo '-delay Delay markup until after first documentation chunk.' >[
+=2]
+@ \iffalse
+<<noweave man page option table>>=
+.TP
+.B \-delay
+By default,
+.I noweave
+puts file-name and other information into the output before the first chunk
+of the program.
+.B \-delay
+delays that information until after the first documentation chunk, making
+act a little bit like the
+.I WEB
+``limbo.''
+The option is typically used to enable a user to put a specialized
+.I LaTeX
+.B "\\\\documentclass"
+command and other preamble material in the first documentation chunk.
+This option also forces trailing cross-referencing information to be emitted
+just before the final chunk, instead of at the end of the document;
+the final chunk is expected to contain
+.B "\\\\end{document}."
+The
+.B \-delay
+option implies the
+.B \-n
+option.
+@ \fi
+% .TP
+% .B \-nodelay
+% Don't delay, put file-name and other information right after header. (Defaul
+)
+% @ \fi
+<<first pass {\tt noweave} options>>=
+case -t*
+ markopts=$markopts' '$1
+<<option printout for usage>>=
+echo '-tk Expand tab stops every k columns' >[1=2]
+echo '-t Copy tabs to the output' >[1=2]
+@ \iffalse
+<<noweave man page option table>>=
+.TP
+.B \-t\fIk\fP
+Expand tabs with stops every \fIk\fP columns.
+(Default is to expand every 8 columns.)
+.TP
+.B \-t
+Copy tabs to the output.
+@ \fi
+<<first pass {\tt noweave} options>>=
+case -v
+ echo 'RCS id $Id: noweave.nw,v 1.6 1998/08/17 00:10:34 nr Exp nr $' >[1
+2]
+ verbose=1
+<<option printout for usage>>=
+echo '-v Print pipeline and RCS info on standard error' >[1=2]
+@ \iffalse
+<<noweave man page option table>>=
+.TP
+.B \-v
+Print the pipeline and RCS info on standard error.
+@ \fi
+\iffalse
+<<man page: WEAVING section>>=
+Output from \fInoweave\fP can
+be used in \fITeX\fP documents that
+.B "\\\\input nwmac,"
+in \fILaTeX\fP documents that use the
+.B noweb
+package (see \fInowebstyle(1)\fP),
+and in \fIHTML\fP documents to be browsed with
+.I Mosaic(1).
+.I Noweave
+treats code chunks somewhat like
+.I LaTeX list environments.
+If the ``\fB@ \fP'' that terminates a code chunk is followed immediately by te
+t,
+that text follows the code chunk without a paragraph break.
+If the rest of the line is blank,
+.I noweave
+puts
+.I TeX
+into ``vertical mode,'' and later text starts a fresh, indented paragraph.
+.PP
+No page breaks occur in the middle of code chunks unless necessary to avoid
+an overfull vbox.
+The documentation chunk immediately preceding a code chunk appears on
+the same page as that code chunk unless doing so would violate the previous ru
+e.
+.PP
+.I Noweave
+inserts no extra newlines in its \fITeX\fP output, so the line numbers given
+in
+.I TeX
+error messages are the same as those in the input file.
+.PP
+.I noweave
+has
+options that dictate choice of
+formatter
+and that support different formatting idioms and tools.
+Basic options are described here; options related to index
+and cross-reference information are described in the
+INDEXING AND CROSS-REFERENCE section.
+<<noweave man page option table>>
+@
+<<man page: INDEXING AND CROSS-REFERENCE section>>=
+
+@ \fi
+<<add [[$newfilter]]>>=
+filterlist=($filterlist $newfilter)
+<<show usage>>=
+echo 'Usage: '$0' [options] [files]' >[1=2]
+echo 'Options recognized are:' >[1=2]
+<<option printout for usage>>
+@
+To avoid inserting any extra newlines into the output,
+I use [[@literal]]to insert headers and trailers.
+<<emit markup on standard output>>=
+header=''
+# whatis wrapper
+# whatis arg
+switch($wrapper) {
+case none
+ ;
+case latex
+ header='@header '^$wrapper^' '^$noweboptions
+ trailer='@trailer '^$wrapper
+case *
+ header='@header '^$wrapper^$arg
+}
+if(! ~ $header '')
+ echo $header
+<<if verbose, make noise about pipeline>>
+<<if verbose, show back end>>
+if(~ $#filterlist 0)
+ filterlist=cat
+pipeline='| '^$filterlist
+pipeline=cat^$"pipeline
+
+# whatis pipeline
+argx=(`{echo $markopts $arg})
+# whatis argx
+$markup $argx | eval $pipeline
+if(! ~ $trailer '')
+ echo $trailer
+<<if verbose, make noise about pipeline>>=
diff --git a/web/noweb/contrib/rsc/rc/noweave.simple b/web/noweb/contrib/rsc/rc/noweave.simple
new file mode 100644
index 0000000000..2f2bb84a3e
--- /dev/null
+++ b/web/noweb/contrib/rsc/rc/noweave.simple
@@ -0,0 +1,56 @@
+#!/bin/rc
+
+rfork en
+
+LIB=/sys/lib/noweb
+
+bind -b /sys/lib/noweb/bin/$objtype /bin
+bind -b /sys/lib/noweb/bin/rc /bin
+
+files=()
+for(i) {
+ switch($i) {
+ case -*
+ ;
+ case *
+ files=($files $i)
+ }
+}
+
+markup $files | awk '
+BEGIN { code=0
+ print "\\documentstyle{article}"
+ print "\\newcommand{\\fragment}[1]{{\\sl$\\langle$#1\\/$\\ran
+le$}}"
+ print "\\begin{document}"
+ }
+END { if (code) print "\\end{trivlist}"
+ print "\\end{document}"
+ }
+/^@quote$/ { printf "\\verb@"}
+/^@endquote$/ { printf "@" }
+/^@begin code/ { if (!code) print "\\begin{trivlist}\\raggedright\\obeylines\\
+eftskip=2em\\small\\item[]%"; code=1 }
+/^@end code/ { }
+/^@begin docs/ { if (code) print "\\end{trivlist}"; code=0 }
+/^@end docs/ { }
+/^@defn / { gsub(/\[\[/, "\\verb@"); gsub(/]]/, "@")
+ name = substr($0,7)
+ printf "\\hspace{-2em}"
+ printf "\\fragment{%s}", name
+ defs[name] += 1
+ if (defs[name] > 1)
+ printf "$+\\!\\!\\equiv$"
+ else
+ printf "$\\equiv$"
+ printf "\\index{\\fragment{%s}}", name
+ }
+/^@use / { gsub(/\[\[/, "\\verb@"); gsub(/]]/, "@")
+ name = substr($0,6)
+ printf "\\fragment{%s}", name
+ printf "\\index{\\fragment{%s}}", name
+ }
+/^@literal / { printf "%s", substr($0, 10) }
+/^@nl$/ { print ""}
+/^@text / { if (code) printf "\\verb@%s@", substr($0,7)
+ else printf "%s", substr($0,7) }'
diff --git a/web/noweb/contrib/rsc/rc/noweb.nw b/web/noweb/contrib/rsc/rc/noweb.nw
new file mode 100644
index 0000000000..e790c47e42
--- /dev/null
+++ b/web/noweb/contrib/rsc/rc/noweb.nw
@@ -0,0 +1,63 @@
+<<noweb>>=
+#!/bin/rc
+# Copyright 1991 by Norman Ramsey. All rights reserved.
+# See file /sys/src/cmd/noweb/COPYRIGHT for more information.
+#
+# Translated to rc by Russ Cox
+# bugs -> rsc@plan9.bell-labs.com
+
+rfork en
+bind -b /sys/lib/noweb/bin/$objtype /bin
+bind -b /sys/lib/noweb/bin/rc /bin
+
+markup=markup
+mntopt=-L
+status=0
+tex=1
+output=1
+
+break=no
+while(! ~ $#* 0 && ~ $break no) {
+ switch($1) {
+ case -to -ot
+ tex=''
+ output=''
+ shift
+ case -t
+ tex=''
+ shift
+ case -o
+ output=''
+ shift
+ case -L*
+ mntopt=$1
+ shift
+ case -markup
+ markup=$2
+ shift
+ shift
+ case -*
+ echo unrecognized option $1 >[1=2]
+ exit usage
+ case *
+ break=yes
+ }
+}
+
+for(source) {
+ if(test -n $output) {
+ eval $markup' -t '$source' | mnt -t8 '$mntopt' -all' || status=
+
+ }
+ if(test -n $tex) {
+ texname=`{echo $source | sed '/\./s/\.[^.]*$//'}
+ texname=$texname.tex
+ eval $markup' '$source | finduses -noquote | noidx -delay |
+ awk '{print}
+ /^@defn [^ ]*$/ { print "@literal \\let\\nwnotused=\\nw
+utput{}" }' |
+ totex -delay | cpif $texname || status=1
+ }
+}
+
+exit $status
diff --git a/web/noweb/contrib/rsc/rc/toascii.nw b/web/noweb/contrib/rsc/rc/toascii.nw
new file mode 100644
index 0000000000..3805fa4bf2
--- /dev/null
+++ b/web/noweb/contrib/rsc/rc/toascii.nw
@@ -0,0 +1,279 @@
+[[Toascii]] is a [[noweb]] back end for formatting text as a plain ascii file.
+It was written by Phil Bewig (pbewig@netcom.com) on March 31, 1995, and
+contributed to Norman Ramsey's [[noweb]] literate programming system.
+@
+The main program is shown below. Option [[-delay]] is processed, for
+compatibility with other back ends, but ignored; since the initial document
+chunk used with [[-delay]] normally contains only [[TeX]] formatting commands
+in limbo, and since those commands will be deleted before formatting, there is
+no need to handle [[-delay]].
+<<toascii>>=
+#!/bin/rc
+#
+# Based on shell script by Phil Bewig
+# Translated to rc by Russ Cox
+# bugs -> rsc@plan9.bell-lab.scom
+#
+delay=0
+noindex=0
+for(i) {
+ switch($i){
+ case -delay
+ delay=1
+ case -noindex
+ noindex=1
+ case *
+ echo 'cannot happen' >[1=2]
+ exit coredump
+ }
+}
+<<arrange temporary files>>
+<<invoke first pass using parameter>>
+<<invoke second pass using file>>
+exit $status
+@ %def delay noindex
+[[Toascii]] uses two temporary files, one for storing the text between passes
+and one for communicating the conversion of labels to tags. The files are
+named here, and disposal of the file on exit from [[toascii]] is arranged.
+Also arranged here is a temporary file for storage of the awk program on an
+ugly system, as discussed below.
+<<arrange temporary files>>=
+awkfile=$(mktemp) || { echo "$0: Cannot create temporary file" >&2; exit 1; }
+textfile=$(mktemp ) || { echo "$0: Cannot create temporary file" >&2; exit 1; }
+tagsfile=$(mktemp ) || { echo "$0: Cannot create temporary file" >&2; exit 1; }
+@ %def textfile tagsfile awkfile
+The actual formatting of the text, code, and index entries is done by various
+unix text processing commands in pipelines. There are four formatting pipes:
+tfmt, which formats text, cfmt, which formats code, xfmt, which formats index
+entries within the running text, and zfmt, which formats the lists of chunks
+and identifiers at the end of the text. The formatters established below are
+only suggestions, and may be modified to suit local taste (and the presence of
+various text processing commands on the local machine!); in particular, [[c]]
+programmers may want to format code with cb or indent. The sed patterns below
+insert four blank spaces at the beginning of the line.
+<<initialize formatters>>=
+tfmt"detex | fmt -l 79"
+cfmt="cat"
+xfmt="cat"
+zfmt="cat"
+@ %def tfmt cfmt xfmt zfmt
+Forgiving systems allow the awk program to be specified as a parameter to the
+awk interpreter; ugly systems require that it be placed in a temporary file.
+The chunks below implement both options.
+<<invoke first pass using parameter>>=
+awk '<<first pass>>'
+<<invoke second pass using parameter>>=
+awk '<<second pass>>' -v 'noindex='$noindex $textfile
+<<toascii1.awk>>=
+<<first pass>>
+<<invoke first pass using file>>=
+awk -f toascii1.awk
+<<toascii2.awk>>=
+<<second pass>>
+<<invoke second pass using file>>=
+awk -f /sys/lib/noweb/toascii2.awk -v 'noindex='$noindex $textfile
+@
+The first pass is responsible for extracting [[label]]s and assigning them
+section numbers, which are used for cross-referencing in the second pass. The
+first pass also removes from the input file those lines which are not used by
+the second pass.
+The environment is gotten from the associative array [[ENVIRON]].
+<<initialize environment>>=
+textfile=ENVIRON["textfile"]
+tagsfile=ENVIRON["tagsfile"]
+<<first pass>>=
+BEGIN { <<initialize environment>> }
+/^@begin code/ { ++secno }
+/^@xref label/ { print $3, secno >tagsfile }
+/^@((begin|end) (docs|code))/ { print >textfile }
+/^@(text|nl|defn|use)/ { print >textfile }
+/^@xref (ref|notused)/ { print >textfile }
+/^@xref (begin|end)(defs|uses)/ { print >textfile }
+/^@xref (def|use)item/ { print >textfile}
+/^@xref ((begin|end)chunks)|(chunk(begin|use|defn|end))/ { print >textfile }
+/^@index (begin|end)(defs|uses)/ { print >textfile }
+/^@index (is(us|defin)ed)|((def|use)item)/ { print >textfile }
+/^@index ((begin|end)index)|(entry(begin|use|defn|end))/ { print >textfile }
+@
+The second pass performs formatting. After looking up the temp file names and
+formatters in the environment and reading the [[tagsfile]] created in the firs
+
+pass, the second pass processes each input command in the body of the awk
+[[pattern-action]] processing loop.
+<<second pass>>=
+BEGIN {
+ <<initialize environment>>
+ <<initialize formatters>>
+ while (getline <tagsfile > 0)
+ tag[$1] = $2
+ close(tagsfile)
+}
+<<process [[noweb]] commands>>
+/^@fatal / { exit 1 }
+END {
+ close(out)
+}
+<<functions>>
+@ %def tag
+The rest of the program consists of a series of awk [[pattern-action]]
+statements which each process a particular type of [[noweb]] pipeline command.
+They are discussed in related groups, and all collected in a single chunk. We
+begin with the commands that process the text of the document and code chunks.
+The basic strategy is always write text to [[out]] and open and close various
+pipes as needed. Variable [[code]] is true only within code chunks, and
+[[secno]] numbers the sections as they appear. Function [[endcode()]] closes
+the code pipeline at the end of a code section or whenever the first indexing
+command appears.
+<<process [[noweb]] commands>>=
+/^@begin docs/ { out = tfmt }
+/^@end docs/ { close(out) }
+/^@begin code/ { out = cfmt; code = 1; ++secno }
+/^@end code/ { endcode(); close(out); printf "\n" }
+/^@text/ { printf "%s", substr($0, 7) | out }
+/^@nl/ { # printf "(->%s)", formatname(out) | out ;
+ printf "\n" | out }
+@ %def out secno code
+<<functions>>=
+function endcode() {
+ if (code == 1) {
+ code = 0
+ close(out)
+ out = xfmt
+ printf "\n" | out } }
+@ %def endcode
+Definitions and uses of code chunks are handled below. Variable [[defn[name]]
+
+is set to a plus sign after a definition is printed, so that continuations of
+the definition are properly identified. Variable [[lastxrefref]] is the tag
+associated with the most-recently-seen cross-reference label, and refers to th
+
+section number of the original definition of the code chunk. Definition lines
+are printed directly, without passing through any of the formatters defined
+above.
+<<process [[noweb]] commands>>=
+/^@xref ref/ { lastxrefref = tag[substr($0, 11)] }
+/^@defn/ { name = convquote(substr($0, 7))
+ printf "\n### %d ### %s%s=",
+ secno, chunkname(name, lastxrefref), defn[name]
+ defn[name] = "+" }
+/^@use/ { name = convquote(substr($0, 6))
+ printf "%s", chunkname(name, lastxrefref) | out }
+@ %def lastxref name defn
+There are three messages related to the definition and use of code chunks whic
+
+may appear in the output: "This definition continued in ...", "This code used
+in ...", and "This code not used in this document." These messages are printe
+
+by the following code.
+<<process [[noweb]] commands>>=
+/^@xref begindefs/ { endcode()
+ printf "This definition continued in" | out }
+/^@xref beginuses/ { endcode()
+ printf "This code used in" | out }
+/^@xref notused/ { endcode()
+ print "This code not used in this document." | out }
+/^@xref (def|use)item/ { addlist(tag[$3]) }
+/^@xref end(defs|uses)/ { printlist() }
+@
+Processing of the [[noweb]] commands which produce the identifier definition
+message "Defines: ... used in ..." is performed by the following code. The
+[[if]] in [[@index isused]] prevents index definitions from pointing to
+themselves.
+<<process [[noweb]] commands>>=
+$0 ~ /^@index begindefs/ && !noindex {
+ endcode()
+ print "Defines:" | out }
+
+$0 ~ /^@index isused/ && !noindex {
+ if (tag[$3] != lastxrefref) addlist(tag[$3]) }
+
+$0 ~ /^@index defitem/ && !noindex {
+ printf " %s,", $3 | out
+ if (nlist == 0) printf " not used in this document.\n" | out
+ else { printf " used in" | out; printlist() } }
+@
+Processing of the [[noweb]] commands which produce the identifier usage messag
+
+"Uses ..." is performed by the following code.
+<<process [[noweb]] commands>>=
+$0 ~ /^@index beginuses/ && !noindex { endcode(); printf "Uses" | out }
+$0 ~ /^@index isdefined/ && !noindex { lastuse = tag[$3] }
+$0 ~ /^@index useitem/ && !noindex { addlist(sprintf("%s %s", $3, lastuse))
+
+$0 ~ /^@index enduses/ && !noindex { printlist() }
+@ %def lastuse
+The [[noweb]] commands which print the list of chunks at the end of the
+document are processed by the following code.
+<<process [[noweb]] commands>>=
+/^@xref beginchunks/ { close(out); out = zfmt
+ print "List of code chunks\n" | out }
+/^@xref chunkbegin/ { name = convquote(substr($0, length($3) + 19))
+ printf "%s\n", chunkname(name, tag[$3]) | out }
+/^@xref chunkuse/ { addlist(tag[$3]) }
+/^@xref chunkdefn/ { }
+/^@xref chunkend/ { if (nlist == 0)
+ print " Not used in this document." | out
+ else { printf " Used in" | out; printlist() } }
+/^@xref endchunks/ { }
+@
+The [[noweb]] commands which print the list of identifiers at the end of the
+document are processed by the following code.
+<<process [[noweb]] commands>>=
+$0 ~ /^@index beginindex/ && !noindex { print "\nList of identifiers (defini"
+
+ "tion in parentheses)\n" | out }
+$0 ~ /^@index entrybegin/ && !noindex { name = substr($0, length($3 + 19))
+ lastdefn = tag[$3]
+ printf "%s: ", $4 | out }
+$0 ~ /^@index entryuse/ && !noindex { addlist(tag[$3]) }
+$0 ~ /^@index entrydefn/ && !noindex { }
+$0 ~ /^@index entryend/ && !noindex { for (i = 1; i <= nlist; i++)
+ if (list[i] == lastdefn)
+ sub(/.*/, "(&)", list[i])
+ if (nlist == 0)
+ print "Not used." | out
+ else printlist() }
+$0 ~ /^@index endindex/ && !noindex { }
+@
+Several of the cross-reference and indexing commands use the [[addlist(s)]] an
+
+[[printlist()]] functions to manage the printing of lists of code sections and
+variable names: [[addlist(s)]] adds string [[s]] to a queued [[list]] waiting
+to be printed and [[printlist()]] prints the [[list]], appropriately formatted
+with commas. These two functions are described below.
+<<functions>>=
+function addlist(s, i) {
+ for (i = 1; i <= nlist; i++)
+ if (s == list[i]) return
+ list[++nlist] = s }
+
+function printlist( i) {
+ if (nlist == 1) printf " %s.\n", list[1] | out
+ else if (nlist == 2) printf " %s and %s.\n", list[1], list[2] | out
+ else {
+ for (i = 1; i < nlist; i++)
+ printf " %s,", list[i] | out
+ printf " and %s.\n", list[nlist] | out }
+ for (i in list) delete list[i]
+ nlist = 0 }
+@ %def list nlist addlist printlist
+Chunk names which appear in definitions and uses of chunks consist of text
+which may contain quoted code embedded between double square brackets. Quoted
+code in text chunks are handled by the [[@quote ... @endquote]] mechanism, but
+quoted code in chunk names must be handled explicitly by the back end. The
+function below does what is needed.
+<<functions>>=
+function convquote(s) { gsub(/\[\[|\]\]/, "", s); return s }
+@ %def convquote
+
+<<functions>>=
+function chunkname(name, number) {
+ if (number == 0)
+ return sprintf("<%s>", name)
+ else
+ return sprintf("<%s %d>", name, number)
+}
+@ %def chunkname
+@
+<nowebchunks>
+<nowebindex>
diff --git a/web/noweb/contrib/rsc/rc/tohtml.nw b/web/noweb/contrib/rsc/rc/tohtml.nw
new file mode 100644
index 0000000000..46b1bc3e3b
--- /dev/null
+++ b/web/noweb/contrib/rsc/rc/tohtml.nw
@@ -0,0 +1,362 @@
+\documentstyle[noweb]{article}
+\pagestyle{noweb}
+\begin{document}
+\section{Converting {\tt noweb} markup to {\tt HTML}}
+This copyright applies both to the {\tt noweb} source and to the
+generated shell script.
+Thanks to Bill Trost for getting me started with an early version.
+<<copyright notice>>=
+# Copyright 1994 by Norman Ramsey. All rights reserved.
+# See file /sys/src/cmd/noweb/COPYRIGHT for more information.
+#
+# Translated to rc by Russ Cox
+# bugs -> rsc@plan9.bell-labs.com
+
+<<tohtml>>=
+#!/bin/rc
+<<copyright notice>>
+# Do not try to understand this file! Look at lib/tohtml.nw in the noweb sour
+e!
+
+delay=0
+raw=0
+localindex=0
+noindex=0
+for(i) {
+ switch($i) {
+ case -delay
+ delay=1
+ case -raw
+ raw=1
+ case -localindex
+ if(~ $noindex 0)
+ localindex=1
+ case -noindex
+ localindex=0
+ noindex=1
+ }
+}
+
+awk -f /sys/lib/noweb/tohtml.awk \
+ -v 'delay='$delay -v 'raw='$raw -v 'localindex='$localindex -v 'noindex
+'$noindex
+<<tohtml.awk>>=
+<<awk program for conversion to {\tt HTML}>>
+@
+The [[-raw]] option brackets HTML with [[\begin{rawhtml}]] and
+[[\end{rawhtml}]]; the purpose is to embed HTML in a {\LaTeX}
+document before converting the document with {\tt latex2html}.
+[[braw]] and [[eraw]] hold those delimiters (or else empty strings).
+<<awk program for conversion to {\tt HTML}>>=
+<<functions>>
+BEGIN { <<initialization>> }
+!doneraw { # do not do in BEGIN because not all awks assign variables yet
+ if (raw) { braw = "\\begin{rawhtml}"; eraw = "\\end{rawhtml}" }
+ else braw = eraw = ""
+ doneraw = 1
+}
+<<patterns>>
+END { print "" }
+@
+[[ecode]] is the marker used at the end of the current code chunk.
+If there is no cross-reference stuff at the end, we just use [[</pre>]];
+otherwise we terminate whatever environment is used for the cross-reference st
+ff.
+<<patterns>>=
+/^@begin code / { code = 1; printf "%s<pre>", braw; ecode = "</pre>" }
+/^@end code / { code = 0; previscode = 1; <<dump pending cross-reference inf
+>>
+ printf "%s%s", ecode, eraw
+ }
+@
+We want to try to avoid emitting paragraph elements when the
+preceding chunk is a code chunk, as tracked by [[previscode]].
+Also, if we do slip in a paragraph, we may use the {\LaTeX} style.
+<<patterns>>=
+/^@begin docs / { if (previscode) printf "%s", (raw ? "\\par" : "<p>")
+ previscode = text = 0
+ }
+@
+Sometimes it happens that a document-chunk anchor is put in a document chunk t
+at
+contains no text. In that case, we put in a phony anchor at the end of the ch
+nk so
+we won't lose the cross-reference.
+<<patterns>>=
+/^@end docs / { if (lastxreflabel != "")
+ printf "%s%s%s\n", braw, linklabel(lastxreflabel, "*"), er
+w
+ lastxreflabel = ""
+ }
+@
+Normally, if there's a pending anchor, we put it on the first available text l
+ne.
+<<patterns>>=
+/^@text / { line = substr($0, 7); text += length(line)
+ if (code) {
+ if (lastindexref != "" && line ~ /[^ \t]/) {
+ printf "%s", linkto(lastindexref, line)
+ lastindexref = ""
+ } else {
+ printf "%s", escapeSpecials(line)
+ }
+ } else if (quoting) {
+ if (line ~ /[^ \t]/) {
+ printf "%s", linklabelto(lastxreflabel, lastindexref,
+ escapeSpecials(line))
+ lastindexref = lastxreflabel = ""
+ } else {
+ printf "%s", escapeSpecials(line)
+ }
+ } else {
+ if (lastxreflabel != "" && line ~ /[^ \t]/) {
+ <<print docs anchor>>
+ lastxreflabel = ""
+ } else {
+ printf "%s", line
+ }
+ }
+ }
+@
+We anchor on the first nonblank character of the line, unless that's
+a \TeX\ control sequence or an SGML tag.
+In that case we insert a {\tt*} to anchor to.
+None of this crap would be necessary if HTML could anchor to empty text.
+<<print docs anchor>>=
+match(line, /^[ \t]*/)
+blanks = substr(line, RSTART, RLENGTH)
+line = substr(line, RSTART+RLENGTH)
+if (line ~ /^[{}\\<&]/) {
+ char = "*"
+} else {
+ char = substr(line, 1, 1)
+ line = substr(line, 2)
+}
+printf "%s%s%s%s%s", braw, blanks, linklabel(lastxreflabel, char), eraw, line
+if (lastxreflabel != "") defns_above[lastxreflabel] = 1
+<<patterns>>=
+/^@nl$/ { print "" }
+/^@defn / { thischunk = name = substr($0, 7)
+ if (lastxreflabel != "") defns_above[lastxreflabel] = 1
+ writechunk(lastxreflabel, lastxrefref, "dfn", name, defns[name] "=
+)
+ <<clear [[lastxref*]]>>
+ defns[name] = "+"
+ }
+<<initialization>>=
+defns[0] = 0
+defns_above[0] = 0
+<<patterns>>=
+/^@use / { writechunk(lastxreflabel, lastxrefref, "i", substr($0, 6), "") }
+@
+Writing a chunk involves creating an anchor for it.
+<<functions>>=
+function writechunk(label, ref, tag, name, suffix) {
+ printf "%s",
+ linklabelto(label, ref, sgmlwrap(tag, "&lt;" convquotes(name) "&gt;" suffi
+))
+}
+@
+<<patterns>>=
+/^@quote$/ { quoting = 1 ; printf "%s<code>", braw }
+/^@endquote$/ { quoting = 0 ; printf "</code>%s", eraw }
+/^@file / { filename = substr($0, 7); <<clear [[lastxref*]]>> }
+/^@literal / { printf "%s", substr($0, 10) }
+/^@header html / { <<write HTML header>> }
+/^@trailer html$/ { <<write HTML trailer>> }
+@
+<<write HTML header>>=
+printf "<html><head><title>%s</title></head><body>", substr($0, 14)
+
+<<write HTML trailer>>=
+print "</body></html>"
+@
+<<patterns>>=
+/^@xref label / { lastxreflabel = substr($0, 13) }
+/^@xref ref / { lastxrefref = substr($0, 11) }
+/^@xref prevdef/ { pendingprev = substr($0, 15) }
+/^@xref nextdef/ { pendingnext = substr($0, 15) }
+/^@xref beginuses/ { useitems = "" }
+/^@xref useitem / { useitems = useitems " " substr($0, 15) }
+/^@xref enduses/ { useitemstab[thischunk] = useitems }
+/^@xref notused / { <<code-to-blockquote>>
+ printf "This code is written to a file (or else not used
+.<p>"
+ }
+<<initialization>>=
+useitemstab[0] = 0
+<<clear [[lastxref*]]>>=
+lastxreflabel = lastxrefref = ""
+<<dump pending cross-reference info>>=
+useitemscount = split(useitemstab[thischunk], a)
+if (pendingprev != "" || pendingnext != "" || useitemscount > 0) {
+ <<code-to-blockquote>>
+ <<write out uses with links>>
+ if (useitemscount > 0 && (pendingprev != "" || pendingnext != ""))
+ printf "; "
+ p = useitemscount > 0 ? "previous" : "Previous"
+ n = useitemscount > 0 ? "next" : "Next"
+ if (pendingprev != "")
+ if (pendingnext != "")
+ printf "%s and %s definitions", linkto(pendingprev, p), linkto(pendingne
+t, "next")
+ else
+ printf "%s definition", linkto(pendingprev, p)
+ else
+ if (pendingnext != "")
+ printf "%s definition", linkto(pendingnext, n)
+ pendingprev = pendingnext = ""
+ useitems = ""
+ print ".<p>"
+}
+<<write out uses with links>>=
+useprefix = "Used "
+for (j = 1; j <= useitemscount; j++) {
+ if (defns_above[a[j]] > 0)
+ usedir = "above"
+ else
+ usedir = "below"
+ printf "%s%s", useprefix, linkto(a[j], usedir (useitemscount > 1 ? " (" j ")
+ : ""))
+ useprefix = ", "
+}
+@
+The hack here is to put the supplementary information in a blockquote area
+after the code.
+<<code-to-blockquote>>=
+if (ecode == "</pre>") {
+ printf "</pre><blockquote>"
+ ecode = "</blockquote>"
+}
+@
+The HTML back end ignores [[@xref begindefs]], [[@xref defitem]], and
+[[@xref enddefs]]; it uses the [[nextdef]] and [[prevdef]] links instead.
+<<patterns>>=
+/^@xref (begindefs|defitem|enddefs)/ { }
+/^@xref beginchunks$/ { printf "%s<ul>\n", braw }
+/^@xref chunkbegin / { label = $3; name = substr($0, 19 + length(label))
+ printf "<li>"; comma = ": "; count = 0
+ writechunk("", label, "i", name, "")
+ }
+/^@xref chunkuse / { printf "%s%s", comma, linkto(substr($0, 16), "U" ++cou
+t)
+ comma = ", "
+ }
+/^@xref chunkdefn / { printf "%s%s", comma, linkto(substr($0, 17), "D" ++cou
+t)
+ comma = ", "
+ }
+/^@xref chunkend$/ { print "" }
+/^@xref endchunks$/ { printf "</ul>%s\n", eraw }
+<<patterns>>=
+/^@index beginindex$/ { if (!noindex) { printf "%s<ul>\n", braw } }
+/^@index entrybegin / { if (!noindex) {
+ label = $3; name = substr($0, 20 + length(label))
+ printf "<li>"; comma = ": "; count = 0
+ printf "%s",
+ linklabelto("NWI-" escapeSpecials(name), label, na
+e)
+
+ } }
+/^@index entryuse / { if (!noindex) {
+ printf "%s%s", comma, linkto(substr($0, 17), "U" ++co
+nt)
+ comma = ", "
+ } }
+/^@index entrydefn / { if (!noindex) {
+ printf "%s%s", comma, linkto(substr($0, 18), "D" ++
+ount)
+ comma = ", "
+ } }
+/^@index entryend$/ { if (!noindex) { print "" } }
+/^@index endindex$/ { if (!noindex) { printf "</ul>%s\n", eraw } }
+@
+The local identifier cross-reference doesn't show each use; it just shows
+the identifiers that are defined, with links to the full index.
+<<patterns>>=
+/^@index use/ { lastindexref = lastxrefref; lastxrefref = "" }
+/^@index defn/ { <<clear [[lastxref*]]>> }
+/^@index localdefn/ { <<clear [[lastxref*]]>> }
+/^@index nl/ { } # do nothing -- destroys line numbering
+/^@index begindefs/ { if (localindex) {
+ <<code-to-blockquote>>; printf "Defines"; comma = " "
+} }
+/^@index isused / { }
+/^@index defitem / { if (localindex) {
+ arg = substr($0, 16)
+ printf "%s%s", comma,
+ linkto("NWI-" escapeSpecials(arg), sgmlwrap("code", escapeSpecials(arg)
+)
+ comma = ", "
+} }
+/^@index enddefs/ { if (localindex) { printf " (links are to index).<p>\n" }
+}
+/^@index (beginuses|isdefined|useitem|enduses)/ { } # use local links
+@
+\subsection{Support functions}
+Here's all our anchor support goo.
+<<functions>>=
+function linklabelto(label, ref, contents, s) {
+ s = label != "" || ref != "" ? "<a" : ""
+ if (label != "") s = s " name=" image(label)
+ if (ref != "") s = s " href=" image("#" ref)
+ s = s (label != "" || ref != "" ? ">" : "")
+ s = s contents
+ s = s (label != "" || ref != "" ? "</a>" : "")
+ return s
+}
+
+function linkto(ref, contents) {
+ return linklabelto("", ref, contents)
+}
+
+function linklabel(label, contents) {
+ return linklabelto(label, "", contents)
+}
+@
+Another support function is used for wrapping tags around text:
+<<functions>>=
+function sgmlwrap(tag, s) {
+ return "<" tag ">" s "</" tag ">"
+}
+<<functions>>=
+function image(s) {
+ gsub(/"/, "\\\"", s)
+ return "\"" s "\""
+}
+@
+Lucky for us, {\tt HTML} has few special characters. Unlucky for us,
+we have to deal with each one seperately. Nothing much to whine
+about, really.
+<<functions>>=
+function escapeSpecials (l) {
+ gsub(/&/, "\\&amp;", l)
+ gsub(/</, "\\&lt;", l)
+ gsub(/>/, "\\&gt;", l)
+ gsub(/"/, "\\&quot;", l)
+ return l
+}
+@
+A special function is used to implement {\tt noweb}'s quoting
+convention within chunk names.
+<<functions>>=
+function convquotes(s, r, i, line) {
+ r = ""
+ while (i = index(s, "[[")) {
+ r = r substr(s, 1, i-1) "<code>"
+ s = substr(s, i+2)
+ if (i = match(s, "\\]\\]+")) {
+ line = substr(s, 1, i-1+RLENGTH-2)
+ # line = escapeSpecials(line) # destroys internal markup --- do not cal
+
+ r = r line "</code>"
+ s = substr(s, i+RLENGTH)
+ } else {
+ r = r s "</code>"
+ s = ""
+ }
+ }
+ return r s
+}
+@
+\end{document}
diff --git a/web/noweb/contrib/rsc/rc/totex.nw b/web/noweb/contrib/rsc/rc/totex.nw
new file mode 100644
index 0000000000..160a364341
--- /dev/null
+++ b/web/noweb/contrib/rsc/rc/totex.nw
@@ -0,0 +1,312 @@
+\section{Converting {\tt noweb} markup to {\TeX} markup}
+The copyright applies both to the {\tt noweb} source and to the
+generated shell script.
+<<copyright notice>>=
+# Copyright 1991 by Norman Ramsey. All rights reserved.
+# See file /sys/src/cmd/noweb/COPYRIGHT for more information.
+#
+# Translated to rc by Russ Cox
+# bugs -> rsc@plan9.bell-labs.com
+
+<<totex>>=
+#!/bin/rc
+<<copyright notice>>
+# Don't try to understand this file! Look at lib/totex.nw in the noweb source
+
+delay=0
+noindex=0
+for(i) {
+ switch($i) {
+ case -delay
+ delay=1
+ case -noindex
+ noindex=1
+ case *
+ echo 'This can''t happen -- '$i' passed to totex' >[1=2]
+ exit cannothappen
+ }
+}
+<<invoke awk program using file>>
+@
+On a forgiving system, we make the awk program an argument:
+<<invoke awk program using parameter>>=
+awk '<<awk program for conversion to {\TeX}>>' -v 'delay='$delay -v 'noindex='
+noindex
+@
+On an ugly system, we have to put it in a file.
+<<invoke awk program using file>>=
+awk -f /sys/lib/noweb/totex.awk -v 'delay='$delay -v 'noindex='$noindex
+<<totex.awk>>=
+<<awk program for conversion to {\TeX}>>
+@
+The markup carefully adds no newlines not already present in the input,
+so that the line numbers of the {\TeX} file will be the same as the
+numbers of the corresponding {\tt noweb} file.
+The variables are:
+\begin{description}
+\item[\tt code] Nonzero if converting a code chunk.
+\item[\tt quoting] Nonzero if quoting code in documentation.
+\item[\tt text] Number of characters written since start of
+ documentation chunk.
+\end{description}
+[[text]] is used to write [[\par]] if a newline appears at the
+beginning of a documentation chunk without any intervening text.
+This subtle trick preserves new-paragraph semantics without requiring
+the insertion of a blank line that would throw off the line count.
+<<awk program for conversion to {\TeX}>>=
+BEGIN { code=0 ; quoting=0 ; text=1; <<initialization>> }
+/^@begin code/ { code=1 ; printf "\\nwbegincode{%s}", substr($0, 13) }
+/^@end code/ { code=0 ; printf "\\nwendcode{}"; lastdefnlabel = "" }
+<<special patterns for document chunk 0>>
+/^@begin docs/ { text=0 ; printf "\\nwbegindocs{%s}", substr($0, 13
+ }
+/^@end docs/ { printf "\\nwenddocs{}" }
+/^@text / { line = substr($0, 7) ; text += length - 6
+ if (code) printf "%s", escape_brace_bslash(line)
+ else if (quoting) printf "%s", TeXliteral(line)
+ else printf "%s", line
+ }
+/^@nl$/ { if (!code) {<<print [[\nwdocspar]] if no text>>}
+ if (quoting) printf "\\nwnewline"
+ printf "\n"
+ }
+/^@defn / { name = substr($0, 7); <<defn of [[name]], with cross-refe
+ence>> }
+/^@use / { printf "\\LA{}%s%s\\RA{}",
+ convquotes(substr($0, 6)), <<optional ref tag>>
+ }
+/^@quote$/ { quoting = 1 ; printf "{\\tt{}" }
+/^@endquote$/ { quoting = 0 ; printf "}" }
+/^@file / { filename = substr($0, 7); <<clear [[lastxref*]]>>
+ if (!delay) printf "\\nwfilename{%s}", filename
+ }
+/^@literal / { printf "%s", substr($0, 10) }
+/^@header latex / { <<write {\LaTeX} header>> }
+/^@header tex / { printf "\\input nwmac " }
+/^@trailer latex$/ { print "\\end{document}" }
+/^@trailer tex$/ { print "\\bye" }
+<<xref patterns>>
+<<index patterns>>
+END { printf "\n" }
+<<functions>>
+@
+<<print [[\nwdocspar]] if no text>>=
+if (text==0) printf "\\nwdocspar"
+text=1
+@
+Delaying markup is handled by special patterns for the first document chunk.
+Because several {\tt noweb} files can be marked up at once, there can be
+several document chunks numbered 0.
+The later ones are given no special treatment by the simple expedient of
+turning [[delay]] off after the first one.
+<<special patterns for document chunk 0>>=
+/^@begin docs 0$/ { if (delay) next }
+/^@end docs 0$/ { if (delay) {
+ printf "\\nwfilename{%s}", filename; delay=0; next
+ } }
+@
+<<defn of [[name]], with cross-reference>>=
+if (lastxreflabel != "") {
+ printf "\\sublabel{%s}", lastxreflabel
+ printf "\\nwmargintag{%s}", label2tag(lastxreflabel)
+}
+printf "\\moddef{%s%s}\\%sendmoddef", convquotes(name), <<optional ref tag>>,
+efns[name]
+lastdefnlabel = lastxreflabel
+<<clear [[lastxref*]]>>
+defns[name] = "plus"
+<<optional ref tag>>=
+(lastxrefref != "" ? ("~" label2tag(lastxrefref)) : "")
+<<functions>>=
+function label2tag(label) {
+ return "{\\nwtagstyle{}\\subpageref{" label "}}"
+}
+<<initialization>>=
+defns[0] = 0
+@
+<<write {\LaTeX} header>>=
+printf "\\documentclass{article}\\usepackage{noweb}\\pagestyle{noweb}\\nowebop
+ions{%s}%s",
+ substr($0, 15), "\\begin{document}"
+@
+\subsection{Cross-reference and index support}
+<<xref patterns>>=
+/^@xref label / { lastxreflabel = substr($0, 13) }
+/^@xref ref / { lastxrefref = substr($0, 11) }
+/^@xref begindefs$/ { printf "\\nwalsodefined{" }
+/^@xref defitem / { printf "\\\\{%s}", substr($0, 15) }
+/^@xref enddefs$/ { printf "}" }
+/^@xref beginuses$/ { printf "\\nwused{" }
+/^@xref useitem / { printf "\\\\{%s}", substr($0, 15) }
+/^@xref enduses$/ { printf "}" }
+/^@xref notused / { printf "\\nwnotused{%s}", TeXliteral(substr($0, 15)) }
+/^@xref nextdef / { }
+/^@xref prevdef / { }
+<<clear [[lastxref*]]>>=
+lastxreflabel = lastxrefref = ""
+<<index patterns>>=
+/^@index nl$/ { print (code ? "\\eatline" : "%") }
+/^@index defn / {
+ if (!noindex) { arg = substr($0, 13); <<handle index defn of [[arg]]>
+ } }
+/^@index localdefn / {
+ if (!noindex) { arg = substr($0, 18); <<handle index defn of [[arg]]>
+ } }
+/^@index use / {
+ if (!noindex) { arg = substr($0, 12); <<handle index use of [[arg]]>>
+} }
+@
+Nothing is involved in handling definitions and uses unless there are cross-re
+erence
+labels pending.
+An index definition or use has its own [[@xref label]] only if it's in documen
+ation;
+if it's in code, we use the anchor label of the definition.
+(You don't have to know that to understand what happens here, but I thought yo
+
+might like to.)
+<<handle index defn of [[arg]]>>=
+if (lastxreflabel != "") printf "\\nosublabel{%s}", lastxreflabel
+if (lastxrefref != "")
+ printf "\\nwindexdefn{%s}{%s}{%s}", TeXliteral(arg), indexlabel(arg), lastxr
+fref
+<<clear [[lastxref*]]>>
+@
+The {\LaTeX} back end ignores uses in code; they get bundled up by a previous
+ilter
+(the cross-referencer) and handled elsewhere.
+<<handle index use of [[arg]]>>=
+if (!code) {
+ if (lastxreflabel != "") printf "\\protect\\nosublabel{%s}", lastxreflabel
+ if (lastxrefref != "")
+ printf "\\protect\\nwindexuse{%s}{%s}{%s}",
+ TeXliteral(arg), indexlabel(arg), lastxrefref
+}
+<<clear [[lastxref*]]>>
+@
+Here's the local identifier cross-reference that appears at the end of a code
+hunk.
+We guard everything with \LA{}SI\RA, as before.
+<<index patterns>>=
+/^@index begindefs$/ { if (!noindex) { printf "\\nwidentdefs{" } }
+/^@index isused / { if (!noindex) { } } # handled by latex
+/^@index defitem / { if (!noindex) { i = substr($0,16); <<write [[i]] with [
+\\]]>> } }
+/^@index enddefs$/ { if (!noindex) { printf "}" } }
+/^@index beginuses$/ { if (!noindex) { printf "\\nwidentuses{"; ucount = 0 } }
+/^@index isdefined / { if (!noindex) { } } # latex finds the definitions
+/^@index useitem / { if (!noindex) { i = substr($0, 16); <<write [[i]] with
+[\\]]>>
+ ulist[ucount++] = i
+ } }
+/^@index enduses$/ { if (!noindex) { printf "}"; <<write [[ulist]]>> } }
+<<initialization>>=
+ulist[0] = 0
+<<write [[i]] with [[\\]]>>=
+printf "\\\\{{%s}{%s}}", TeXliteral(i), indexlabel(i)
+<<write [[ulist]]>>=
+if (lastdefnlabel != "") {
+ for (j = 0; j < ucount; j++)
+ printf "\\nwindexuse{%s}{%s}{%s}",
+ TeXliteral(ulist[j]), indexlabel(ulist[j]), lastdefnlabel
+}
+@
+\subsubsection{The list of chunks and the index}
+The treatments of the list of chunks and the index are similar.
+Both use [[\nwixlogsorted]], which writes magic goo into the {\tt .aux} file.
+The real cross-referencing is done by the underlying {\LaTeX} code.
+<<xref patterns>>=
+/^@xref beginchunks$/ { }
+/^@xref chunkbegin / { label = $3; name = substr($0, 19 + length(
+abel))
+ printf "\\nwixlogsorted{c}{{%s}{%s}{",
+ convquotes(name), label
+ }
+/^@xref chunkuse / { printf "\\nwixu{%s}", substr($0, 16) }
+/^@xref chunkdefn / { printf "\\nwixd{%s}", substr($0, 17) }
+/^@xref chunkend$/ { print "}}%" }
+/^@xref endchunks$/ { }
+<<index patterns>>=
+/^@index beginindex$/ { if (!noindex) { } }
+/^@index entrybegin / { if (!noindex) { label = $3; name = substr($0, 20 + len
+th(label))
+ printf "\\nwixlogsorted{i}{{%s}{%s}}%%\n",
+
+ TeXliteral(name), indexlabel(name)
+ } }
+/^@index entryuse / { if (!noindex) { } } # handled by latex
+/^@index entrydefn / { if (!noindex) { } } # handled by latex
+/^@index entryend$/ { if (!noindex) { } }
+/^@index endindex$/ { if (!noindex) { } }
+
+@
+\subsection{Miscellany}
+I first insert a newline before the special characters, then change the
+newline to a backslash. I can't do the backslash directly because
+[[\&]] means a literal ampersand.
+<<functions>>=
+function escape_brace_bslash(line) {
+ gsub(/[\\{}]/, "\n&", line)
+ gsub(/\n/, "\\", line)
+ return line
+}
+@
+A special function is used to implement {\tt noweb}'s quoting
+convention within chunk names.
+<<functions>>=
+function convquotes(s, r, i) {
+ r = ""
+ while (i = index(s, "[[")) {
+ r = r substr(s, 1, i-1) "\\code{}"
+ s = substr(s, i+2)
+ if (i = match(s, "\\]\\]+")) {
+ r = r TeXliteral(substr(s, 1, i-1+RLENGTH-2)) "\\edoc{}"
+ s = substr(s, i+RLENGTH)
+ } else {
+ r = r s "\\edoc{}"
+ s = ""
+ }
+ }
+ return r s
+}
+<<functions>>=
+function indexlabel(ident, l) {
+ l = ident
+ gsub(/:/, ":col", l) # must be first (colon)
+ gsub(/ /, ":sp", l) # space
+ gsub(/#/, ":has", l) # hash
+ gsub(/\$/, ":do", l) # dollar
+ gsub(/%/, ":pe", l) # percent
+ gsub(/&/, ":am", l) # ampersand
+ gsub(/,/, ":com", l) # commad
+ gsub(/\\/, ":bs", l) # backslash
+ gsub(/\^/, ":hat", l) # hat
+ gsub(/_/, ":un", l) # underscore
+ gsub(/{/, ":lb", l) # left brace
+ gsub(/}/, ":rb", l) # right brace
+ gsub(/~/, ":ti", l) # tilde
+ return l
+}
+@ %def indexlabel
+@
+Because latex2e uses [[`]] as an active character, I have to use
+decimal character codes for the specials.
+<<functions>>=
+function TeXliteral(arg) {
+ gsub(/\\/, "<\\char92>", arg)
+ gsub(/}/, "<\\char125}", arg)
+ gsub(/{/, "{\\char123}", arg)
+ gsub(/<\\char/, "{\\char", arg)
+ gsub(/{\\char92>/, "{\\char92}", arg)
+ gsub(/\$/, "{\\char36}", arg)
+ gsub(/&/, "{\\char38}", arg)
+ gsub(/#/, "{\\char35}", arg)
+ gsub(/\^/, "{\\char94}", arg)
+ gsub(/_/, "{\\char95}", arg)
+ gsub(/%/, "{\\char37}", arg)
+ gsub(/~/, "{\\char126}", arg)
+ gsub(/ /, "\\ ", arg)
+ return arg
+}
+@ %def TeXliteral
+
diff --git a/web/noweb/contrib/rsc/rc/unmarkup.nw b/web/noweb/contrib/rsc/rc/unmarkup.nw
new file mode 100644
index 0000000000..9d04b3389d
--- /dev/null
+++ b/web/noweb/contrib/rsc/rc/unmarkup.nw
@@ -0,0 +1,53 @@
+<<unmarkup>>=
+#!/bin/rc
+#
+# Copyright 1991 by Norman Ramsey. All rights reserved.
+# See file /sys/src/cmd/noweb/COPYRIGHT for more information.
+#
+# Translated to rc by Russ Cox
+# bugs -> rsc@plan9.bell-labs.com
+#
+
+awk '
+BEGIN {
+ rcsid = "$Id: unmarkup,v 1.5 1999/02/16 21:11:54 nr Exp nr $"
+ rcsname = "$Name: v2_9a $"
+}
+/^@begin docs 0$/ { next }
+/^@begin docs / { printf "@ " }
+/^@begin code / { code = 1 }
+/^@end [cd]o[dc][es] / {
+ code = 0
+ if (dangling_text) printf "\n"
+ dangling_text = 0
+ printf "%s", deflines
+ if (defline != "") printf "%s\n", defline
+ deflines = "" ; defline = ""
+ }
+/^@defn / { printf "@<<%s>>=", substr($0,7) }
+/^@text $/ {next}
+/^@text / {
+ gsub("@<<", "@@<<");
+ gsub("@>>", "@@>>");
+ if (!(code || quoting)) {
+ gsub(/\[\[/, "@[[");
+ gsub(/\]\]/, "@]]");
+ }
+ printf "%s", substr($0,7)
+ dangling_text = 1
+}
+/^@quote$/ { printf("[["); dangling_text = 1; quoting = 1 }
+/^@endquote$/ { printf("]]"); dangling_text = 1; quoting = 0 }
+/^@nl$/ { printf "\n"; dangling_text = 0}
+
+/^@index defn / {
+ if (defline == "") defline = "@ %def"
+ defline = defline " " substr($0, 13)
+}
+/^@index nl$/ {
+ deflines = deflines defline "\n"
+ defline = ""
+}
+/^@use / { printf "@<<%s>>", substr($0,6)
+ dangling_text = 1
+ }' $* | sed 's/^@ $/@/'
diff --git a/web/noweb/contrib/ydirson/Makefile b/web/noweb/contrib/ydirson/Makefile
new file mode 100644
index 0000000000..a6a6dadc99
--- /dev/null
+++ b/web/noweb/contrib/ydirson/Makefile
@@ -0,0 +1,11 @@
+LIB=/dev/null # to be overridden
+
+FILTERS = guesslang inheritlang enscript-html
+
+# nothing to tangle or weave
+all:
+source:
+clean:
+
+install:
+ cp -p $(FILTERS) $(LIB)
diff --git a/web/noweb/contrib/ydirson/README b/web/noweb/contrib/ydirson/README
new file mode 100644
index 0000000000..54b5b43c71
--- /dev/null
+++ b/web/noweb/contrib/ydirson/README
@@ -0,0 +1,32 @@
+guesslang <list of root chunks>
+ Attempts to set the @language of given root chunks.
+ Note: Currently only inspects '#!' lines, not filename.
+inheritlang
+ Propagates @language to non-root chunks.
+enscript-html <enscript flags>
+ Uses enscript(1) to pretty-print chunks in HTML according to @language.
+ Most useful enscript flags include --color and --style=...
+
+ Note: Should ultimately work with all languages
+ supported by enscript, but needs extra info about how
+ to mangle @use clauses. If it complains "Don't know
+ how to mangle @use" for your language, you can edit
+ mangle_use() and demangle_use() to turn the @use
+ clause into a meaningfull language clause, and then
+ convert back in @use form.
+
+ Note: Supports all highlighting styles of enscript
+ 1.6.4, but the regexp in demangle_use() may need to be
+ adapted in the future.
+
+Typical use lokks like:
+ noweave -html \
+ -filter "guesslang ${NOWEBOUTSRC} | inheritlang | enscript-html" \
+ -x <your.nw>
+
+Be sure to specify -x or possibly other filters *after* the -filter,
+since the pretty-printer does not preserve the position of remaining
+directives (esp. @xref) within code chunks.
+
+Sample output is viewable at
+http://ydirson.free.fr/en/software/noweb/dh-kpatches.html
diff --git a/web/noweb/contrib/ydirson/email b/web/noweb/contrib/ydirson/email
new file mode 100644
index 0000000000..4d569e5f14
--- /dev/null
+++ b/web/noweb/contrib/ydirson/email
@@ -0,0 +1 @@
+ydirson@altern.org
diff --git a/web/noweb/contrib/ydirson/enscript-html b/web/noweb/contrib/ydirson/enscript-html
new file mode 100755
index 0000000000..1f179bef8a
--- /dev/null
+++ b/web/noweb/contrib/ydirson/enscript-html
@@ -0,0 +1,150 @@
+#!/usr/bin/perl -w
+
+# Noweb filter which calls enscript to prettyprint according to
+# @language directives (see guesslang and inheritlang filters to have
+# those directive automatically generated).
+
+# Copyright (c) 2003 by Yann Dirson <ydirson@altern.org>
+
+# Distribute under the terms of the GNU General Public Licence,
+# version 2.
+
+# FIXME:
+# - @use in code chunks is not supported for all @language's yet
+# => find a way to plug external data ?
+# - when a perl chunk ends with comment lines, we get enscript
+# trailers in woven output
+
+use strict;
+use File::Temp qw(tempfile);
+
+my $mangledID='__NOWEB__mangled__use__';
+sub mangle_use {
+ my ($usedchunk, $lang) = @_;
+
+ if (grep { $lang eq $_ } ('perl', 'c', 'c++') ) {
+ return "$mangledID (\"$usedchunk\")\n";
+ } else {
+ die "Don't know how to mangle \@use for language $lang";
+ }
+}
+
+sub demangle_use {
+ my ($mangled, $lang) = @_;
+
+ if (grep { $lang eq $_ } ('perl', 'c', 'c++') ) {
+ $mangled =~ m|^(.*)$mangledID \((?:<B>)?(?:<FONT.*>)?\&quot;(.*)\&quot;(?:</FONT>)?(?:</B>)?\)(.*)$|;
+ return ($1, $2, $3);
+ } else {
+ die "Don't know how to demangle \@use for language $lang";
+ }
+}
+
+
+# Find out languages supported by the available version of enscript
+
+my @knownlangs;
+open (LANGS, 'enscript --help-highlight | grep ^Name: |') or
+ die "enscript --help-highlight failed: $!";
+while (<LANGS>) {
+ chomp;
+ @_ = split /\s+/;
+ push @knownlangs, $_[1];
+}
+
+
+while (<STDIN>) {
+ if (m/^\@begin code/) {
+
+ # we found a code chunk, now bufferize its contents until
+ # @language, or until @end if no @language is there. Store in
+ # $event which of these 2 events just occured
+
+ my (@buffer, $event);
+ push @buffer, $_;
+ while (defined($_ = <STDIN>) and
+ not ((m/^\@end code / and $event = [1]) or
+ (m/^\@language (.*)/ and $event = [2, $1])) ) {
+ push @buffer, $_;
+ }
+ die "$0 hit EOF before seing \@end code or \@language" unless defined $event;
+
+ if ($event->[0] == 1) {
+ # we got @end first, everything read goes through unmodified
+
+ push @buffer, $_; # the @end line
+ # no declared language: dump @buffer
+ foreach (@buffer) { print; }
+ } else {
+ # we found @language...
+
+ # check that language is supported
+ my $lang = $event->[1];
+ if (grep { $_ eq $lang } @knownlangs ) {
+ # language is supported
+
+ # (implicitely) drop @language from output, read remainder
+ my $chunknum;
+ while (defined($_ = <STDIN>) and not (m/^\@end code (.*)/ and $chunknum = $1)) {
+ push @buffer, $_;
+ }
+ # we don't want "@end code" in the buffer, delay its output
+ my $endcode = $_;
+
+ # transform the code chunk to be accepted by enscript, and
+ # store it into an auto-unlinked temporary file
+
+ my $tmp = new File::Temp();
+ # demangle @-directives into something suitable for enscript
+ foreach (@buffer) {
+ if (m/^\@text (.*)/ ) {
+ print $tmp $1;
+ } elsif (m/^\@nl$/) {
+ print $tmp "\n";
+ } elsif (m/^\@use (.*)/) {
+ print $tmp mangle_use ($1, $lang);
+ } else {
+ print;
+ }
+ }
+
+ # pipe, remangle
+ open PRETTY, "enscript --highlight=$lang --language=html " .
+ join (' ', @ARGV) .
+ " --silent -o - $tmp |" or
+ die "enscript failed: $!";
+ {
+ my $started = undef;
+ while (<PRETTY>) {
+ if (m|^<PRE>$|) {
+ $started = 1;
+ next;
+ }
+ if (m|^</PRE>$|) {
+ last;
+ }
+
+ if (m/$mangledID/) {
+ my ($prefix, $use, $suffix) = demangle_use ($_, $lang);
+ print "\@literal $prefix\n" if $prefix ne '';
+ print "\@use $use\n" ;
+ print "\@literal $suffix\n" if $suffix ne '';
+ next;
+ }
+ print "\@literal $_\@nl\n" if defined $started;
+ }
+ }
+ close PRETTY;
+ close $tmp; # auto-unlinked
+
+ print $endcode;
+ } else {
+ push @buffer, $_; # the @language line
+ # unsupported language: dump @buffer
+ foreach (@buffer) { print; }
+ }
+ }
+ } else {
+ print $_;
+ }
+}
diff --git a/web/noweb/contrib/ydirson/guesslang b/web/noweb/contrib/ydirson/guesslang
new file mode 100755
index 0000000000..d659e57ee2
--- /dev/null
+++ b/web/noweb/contrib/ydirson/guesslang
@@ -0,0 +1,57 @@
+#!/usr/bin/perl -w
+
+# Noweb filter which attempt to add @language directives to root
+# chunks named on command-line.
+
+# Copyright (c) 2003 by Yann Dirson <ydirson@altern.org>
+
+# Distribute under the terms of the GNU General Public Licence,
+# version 2.
+
+# TODO: Currently only look at the 1st line of the chunk, expecting to
+# find a standard UN*X "#!" declaration. Still has to look at file
+# names/suffixes as well.
+
+use strict;
+use File::Basename;
+
+my @roots = @ARGV;
+
+my %interpreters = (
+ '[gn]awk' => 'awk',
+ 'perl[0-9.]*' => 'perl',
+ 'python[0-9.]*' => 'python',
+ '(wish|tclsh)[0-9.]*' => 'tcl',
+ '(|k|ba|z)sh' => 'sh',
+ );
+
+while (<STDIN>) {
+ if (m/^\@defn (.*)$/ and grep (/^$1$/, @roots)) {
+ my $language;
+ my $defn = $_;
+
+ # FIXME: should lookup a filename-based hash first (or second ?)
+
+ # memorize all lines until we can guess the language
+ my @buffer;
+ while (defined($_ = <STDIN>) and not m/^\@text/) {
+ push @buffer, $_;
+ }
+ # we have found the 1st @text line in the file, go see if it's a hash-bang
+ if (m/^\@text #!\s*(\/\S+)/) {
+ my $interp = basename ($1);
+
+ # lookup in our knowledge base
+ foreach my $re (keys %interpreters) {
+ $language=$interpreters{$re} if $interp =~ m/^$re$/;
+ }
+ # default to interpreter's name (FIXME: ugly, should be optional)
+ $language = $interp unless defined $language;
+ }
+ #
+ print $defn;
+ print "\@language $language\n" if defined $language;
+ print @buffer;
+ }
+ print $_;
+}
diff --git a/web/noweb/contrib/ydirson/inheritlang b/web/noweb/contrib/ydirson/inheritlang
new file mode 100755
index 0000000000..f71cce0a61
--- /dev/null
+++ b/web/noweb/contrib/ydirson/inheritlang
@@ -0,0 +1,76 @@
+#!/usr/bin/perl -w
+
+# Noweb filter to propagate @language directive from a chunk to used
+# chunks. Assumes that root chunks already have a @language directive
+# (see guesslang filter). Takes no argument.
+
+# Copyright (c) 2003 by Yann Dirson <ydirson@altern.org>
+
+# Distribute under the terms of the GNU General Public Licence,
+# version 2.
+
+use strict;
+
+my (%chunklangs, %chunkchildren);
+
+# FIXME: we could bufferize as needed, if we want to grow more complex
+my @data = <STDIN>;
+
+# register the chunk hierarchy
+{
+ my $thischunk = undef;
+ foreach (@data) {
+ if (m/^\@end code/) { # this one first to limit to code chunks
+ $thischunk = undef;
+ } elsif (m/^\@use (.*)$/) {
+ push @{$chunkchildren{$thischunk}}, $1 if defined $thischunk;
+ } elsif (m/^\@defn (.*)$/) {
+ $thischunk = $1;
+ } elsif (m/^\@language (.*)$/) {
+ die "\@language without a \@defn: $_" unless defined $thischunk;
+ $chunklangs{$thischunk} = $1;
+ }
+ }
+}
+
+# propagate to argument's children
+sub propagate {
+ my ($thischunk) = @_;
+ if (defined $chunklangs{$thischunk}) {
+ foreach my $child (@{$chunkchildren{$thischunk}}) {
+ if (defined $chunklangs{$child}) {
+ if ($chunklangs{$child} eq $chunklangs{$thischunk}) {
+ print STDERR "Notice: chunk used more than once: \`$child'\n";
+ } else {
+ die "Chunk cannot inherits languages \`$chunklangs{$child}' and " .
+ "\`$chunklangs{$thischunk}': \`$child'\n";
+ }
+ } else {
+ $chunklangs{$child} = $chunklangs{$thischunk};
+ }
+
+ # recurse
+ propagate($child);
+ }
+ } else {
+ print STDERR "Warning: could not infer language for \`$thischunk'\n";
+ }
+}
+
+# propagate from all known chunks
+foreach my $chunk (keys %chunklangs) {
+ propagate($chunk);
+}
+
+# output
+foreach (@data) {
+ if (m/^\@defn (.*)$/) {
+ print $_;
+ print "\@language $chunklangs{$1}\n" if (defined $chunklangs{$1})
+ } elsif (m/^\@language /) {
+ # Do not output twice. Since we already asserted consistency we can
+ # simply ignore this one.
+ } else {
+ print $_;
+ }
+}