diff options
author | Karl Berry <karl@freefriends.org> | 2019-02-28 15:11:56 +0000 |
---|---|---|
committer | Karl Berry <karl@freefriends.org> | 2019-02-28 15:11:56 +0000 |
commit | cc17ad601696f5cf966c8a0b9d79144cd42828a6 (patch) | |
tree | f6007b9b2f886d942f9b30b4797e8718001f65bd /Master/texmf-dist/doc/context/sources | |
parent | e298746bb92cdf7e87bc3b5b9bd973b6c429a47f (diff) |
context for tl19
git-svn-id: svn://tug.org/texlive/trunk@50165 c570f23f-e606-0410-a88d-b1316a301751
Diffstat (limited to 'Master/texmf-dist/doc/context/sources')
97 files changed, 22572 insertions, 7678 deletions
diff --git a/Master/texmf-dist/doc/context/sources/general/fonts/fonts/fonts-demo-rule.lua b/Master/texmf-dist/doc/context/sources/general/fonts/fonts/fonts-demo-rule.lua new file mode 100644 index 00000000000..667d1e43c90 --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/fonts/fonts/fonts-demo-rule.lua @@ -0,0 +1,47 @@ +local startactualtext = backends.codeinjections.startunicodetoactualtext +local stopactualtext = backends.codeinjections.stopunicodetoactualtext + +return function(specification) + local features = specification.features.normal + local name = features.original or "dejavu-serif" + local option = features.option -- we only support "line" + local size = specification.size -- always set + local detail = specification.detail -- e.g. default + if detail then + name = name .. "*" .. detail + end + local f, id = fonts.constructors.readanddefine(name,size) + if f then + f.properties.name = specification.name + f.properties.virtualized = true + f.fonts = { + { id = id }, + } + for s in string.gmatch("aeuioy",".") do + local n = utf.byte(s) + local c = f.characters[n] + if c then + local w = c.width or 0 + local h = c.height or 0 + local d = c.depth or 0 + if option == "line" then + f.characters[n].commands = { + { "special", "pdf:direct:" .. startactualtext(n) }, + { "rule", option == "line" and size/10, w }, + { "special", "pdf:direct:" .. stopactualtext() }, + } + else + f.characters[n].commands = { + { "special", "pdf:direct:" .. startactualtext(n) }, + { "down", d }, + { "rule", h + d, w }, + { "special", "pdf:direct:" .. stopactualtext() }, + } + end + else + -- probably a real bad font + end + end + end + return f +end diff --git a/Master/texmf-dist/doc/context/sources/general/fonts/fonts/fonts-extensions.tex b/Master/texmf-dist/doc/context/sources/general/fonts/fonts/fonts-extensions.tex index ace0f771e87..afe6fd82386 100644 --- a/Master/texmf-dist/doc/context/sources/general/fonts/fonts/fonts-extensions.tex +++ b/Master/texmf-dist/doc/context/sources/general/fonts/fonts/fonts-extensions.tex @@ -718,6 +718,8 @@ the comma also pulls the preceding character into the margin. \getbuffer [example] \stopplacefigure +\stopsubsubject + \stopsection \startsection[title=Expansion] @@ -822,6 +824,8 @@ You can see what happens in \in {figure} [expansion:visualized]. \getbuffer [example] \stopplacefigure +\stopsubsubject + \startsubsubject[title=Expansion and kerning] When we expand glyphs we also need to look at the font kerns between them. In the @@ -1763,7 +1767,6 @@ only when we touch a space: }, }, } - \stopluacode \stopbuffer @@ -2220,9 +2223,11 @@ fonts.handlers.otf.addfeature { { type = "pair", data = { - [<char|code>] = { [<char|code>] = { - false | { <value>, <value>, <value>, <value> }, - false | { <value>, <value>, <value>, <value> } + [<char|code>] = { + [<char|code>] = { + false | { <value>, <value>, <value>, <value> }, + false | { <value>, <value>, <value>, <value> } + } } } }, diff --git a/Master/texmf-dist/doc/context/sources/general/fonts/fonts/fonts-features.tex b/Master/texmf-dist/doc/context/sources/general/fonts/fonts/fonts-features.tex index 73d1cd96df1..bf9f39385df 100644 --- a/Master/texmf-dist/doc/context/sources/general/fonts/fonts/fonts-features.tex +++ b/Master/texmf-dist/doc/context/sources/general/fonts/fonts/fonts-features.tex @@ -2667,7 +2667,7 @@ We can enable and disable features any time in the input by using the \startbuffer \definefont [WeirdShapes] - [file:linlibertiner*default] + [file:libertiner*default] \definefontfeature [hist] diff --git a/Master/texmf-dist/doc/context/sources/general/fonts/fonts/fonts-tricks.tex b/Master/texmf-dist/doc/context/sources/general/fonts/fonts/fonts-tricks.tex index b8903b5edee..65b210e2c48 100644 --- a/Master/texmf-dist/doc/context/sources/general/fonts/fonts/fonts-tricks.tex +++ b/Master/texmf-dist/doc/context/sources/general/fonts/fonts/fonts-tricks.tex @@ -386,3 +386,5 @@ and in the distribution. \stopsection \stopchapter + +\stopcomponent diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/about/about-metafun.tex b/Master/texmf-dist/doc/context/sources/general/manuals/about/about-metafun.tex index d289dd80303..bc298e9954d 100644 --- a/Master/texmf-dist/doc/context/sources/general/manuals/about/about-metafun.tex +++ b/Master/texmf-dist/doc/context/sources/general/manuals/about/about-metafun.tex @@ -727,15 +727,15 @@ We load the data in datasets: \startbuffer \startMPcode - lua.mp.datasets.load("foo","foo.tmp") ; - lua.mp.datasets.load("bar","bar.tmp") ; + lua.mp.datasets("load","foo","foo.tmp") ; + lua.mp.datasets("load","bar","bar.tmp") ; fill area - lua.mp.datasets.foo.Line() + lua.mp.datasets("foo","line") xysized (HSize/2-EmWidth,10ExHeight) withpen pencircle scaled .25ExHeight withcolor green/2 ; fill area - lua.mp.datasets.bar.Line() + lua.mp.datasets("bar","line") xysized (HSize/2-EmWidth,10ExHeight) shifted (HSize/2+EmWidth,0) withpen pencircle scaled .25ExHeight @@ -755,8 +755,8 @@ following is valid: \starttyping \startMPcode - lua.mp.datasets.load("foo.tmp") ; - lua.mp.datasets.load("bar.tmp") ; + lua.mp.datasets("load","foo.tmp") ; + lua.mp.datasets("load","bar.tmp") ; \stopMPcode \stoptyping diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/cld/cld-moreonfunctions.tex b/Master/texmf-dist/doc/context/sources/general/manuals/cld/cld-moreonfunctions.tex index fab22515eac..da3d6fe4653 100644 --- a/Master/texmf-dist/doc/context/sources/general/manuals/cld/cld-moreonfunctions.tex +++ b/Master/texmf-dist/doc/context/sources/general/manuals/cld/cld-moreonfunctions.tex @@ -173,24 +173,27 @@ example of why coding in \TEX\ makes sense as it looks more intuitive: \test{test 4 \test{test 5} test 6} \stoptyping -There is also another mechanism available. In the next example the second -argument is actually a string. - -\starttyping -local nested = context.nested - -context.test("test 8",nested.test("test 9"),"test 10") -\stoptyping - -There is a pitfall here: a nested context command needs to be flushed explicitly, -so in the case of: - -\starttyping -context.nested.test("test 9") -\stoptyping - -a string is created but nothing ends up at the \TEX\ end. Flushing is up to you. -Beware: \type {nested} only works with the regular \CONTEXT\ catcode regime. +The \type {context.nested} variant is now an alias to \type {context.delayed} and +no longer builds a string representation. + +% There is also another mechanism available. In the next example the second +% argument is actually a string. +% +% \starttyping +% local nested = context.nested +% +% context.test("test 8",nested.test("test 9"),"test 10") +% \stoptyping +% +% There is a pitfall here: a nested context command needs to be flushed explicitly, +% so in the case of: +% +% \starttyping +% context.nested.test("test 9") +% \stoptyping +% +% a string is created but nothing ends up at the \TEX\ end. Flushing is up to you. +% Beware: \type {nested} only works with the regular \CONTEXT\ catcode regime. \stopsection diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/graphics/graphics.tex b/Master/texmf-dist/doc/context/sources/general/manuals/graphics/graphics.tex new file mode 100644 index 00000000000..7c1000c4e15 --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/graphics/graphics.tex @@ -0,0 +1,415 @@ +% language=uk + +\usemodule[article-basic] +\usemodule[abbreviations-smallcaps] +\usemodule[setups-basics] +\usemodule[scite] + +% \setupbodyfont +% [dejavu] + +\loadsetups[context-en] + +\definecolor + [mysetupscolora] + [a=1, + t=.25, + r=.5, + g=.5] + +\definecolor + [mysetupscolorb] + [a=1, + t=.25, + g=.25, + b=.25] + +\definetextbackground + [mysetups] + [before=\blank, + after=\blank, + topoffset=10pt, + leftoffset=10pt, + location=paragraph, + backgroundcolor=mysetupscolora, + backgroundcolor=mysetupscolorb, + frame=off] + +\startsetups xml:setups:start + \starttextbackground[mysetups] +\stopsetups + +\startsetups xml:setups:stop + \stoptextbackground +\stopsetups + +\starttext + +\startbuffer[image] + \startluacode + + local min, max, random = math.min, math.max, math.random + + -- kind of self-explaining: + + local xsize = 210 + local ysize = 297 + local colordepth = 1 + local usemask = true + local colorspace = "rgb" + + -- initialization: + + local bitmap = graphics.bitmaps.new(xsize,ysize,colorspace,colordepth,usemask) + + -- filling the bitmap: + + local data = bitmap.data + local mask = bitmap.mask + local minmask = 100 + local maxmask = 200 + + for i=1,ysize do + local d = data[i] + local m = mask[i] + for j=1,xsize do + d[j] = { i, max(i,j), j, min(i,j) } + m[j] = random(minmask,maxmask) + end + end + + -- flushing the lot: + + graphics.bitmaps.tocontext(bitmap) + + \stopluacode +\stopbuffer + +\definelayer + [page] + [width=\paperwidth, + height=\paperheight] + +\setlayer + [page] + {\scale + [width=\paperwidth] + {\ignorespaces + \getbuffer[image]% + \removeunwantedspaces}} + +\setlayer + [page] + [preset=rightbottom, + hoffset=10mm, + voffset=45mm] + {\scale + [width=.6\paperwidth] + {Graphics}} + +% \setlayer +% [page] +% [preset=righttop, +% hoffset=10mm, +% voffset=20mm] +% {\rotate{\scale +% [width=.3\paperheight] +% {\ConTeXt\ MkIV}}} + +\setlayer + [page] + [preset=rightbottom, + hoffset=10mm, + voffset=20mm] + {\scale + [width=.6\paperwidth] + {Hans Hagen}} + +\startpagemakeup + \flushlayer[page] + \vfill +\stoppagemakeup + +\startsubject[title=Introduction] + +This manual is about integrating graphics your document. Doing this is not really +that complex so this manual will be short. Because graphic inclusion is related +to the backend some options will discussed. It's typical one of these manuals +that can grow over time. + +\stopsubject + +\startsubject[title=Basic formats] + +In \TEX\ a graphic is not really known as graphic. The core task of the engine is +to turn input into typeset paragraphs. By the time that happens the input has +become a linked list of so called nodes: glyphs, kerns, glue, rules, boxes and a +couple of more items. But, when doing the job, \TEX\ is only interested in +dimensions. + +In traditional \TEX\ an image inclusion happens via the extension primitive +\type {\special}, so you can think of something: + +\starttyping +\vbox to 10cm {% + \hbox to 4cm {% + \special{image foo.png width 4cm height 10cm}% + \hss + }% +} +\stoptyping + +When typesetting \TEX\ sees a box and uses its dimensions. It doesn't care what +is inside. The special itself is just a so called whatsit that is not +interpreted. When the page is eventually shipped out, the \DVI|-|to|-|whatever +driver interprets the special's content and embeds the image. + +It will be clear that this will only work correctly when the image dimensions are +communicated. That can happen in real dimensions, but using scale factors is also +a variant. In the latter case one has to somehow determine the original dimensions +in order to calculate the scale factor. When you embed \EPS\ images, which is the +usual case in for instance \DVIPS, you can use \TEX\ macros to figure out the +(high res) boundingbox, but for bitmaps that often meant that some external +program had to do the analysis. + +It sounds complex but in practice this was all quite doable. I say \quote {was} +because nowadays most \TEX\ users use an engine like \PDFTEX\ that doesn't need +an external program for generating the final output format. As a consequence it +has built-in support for analyzing and including images. There are additional +primitives that analyze the image and additional ones that inject them. + +\starttyping +\pdfximage + {foo.png}% +\pdfrefximage + \pdflastximage + width 4cm + height 10cm +\relax +\stoptyping + +A difference with traditional \TEX\ is that one doesn't need to wrap them into a +box. This is easier on the user (not that it matters much as often a macro +package hides this) but complicates the engine because suddenly it has to check a +so called extension whatsit node (representing the image) for dimensions. + +Therefore in \LUATEX\ this model has been replaced by one where an image +internally is a special kind of rule, which in turn means that the code for +checking the whatsit could go away as rules are already taken into account. The +same is true for reuseable boxes (xforms in \PDF\ speak). + +\starttyping +\useimageresource + {foo.png}% +\saveimageresource + \lastsavedimageresourceindex + width 4cm + height 10cm +\relax +\stoptyping + +While \DVIPS\ supported \EPS\ images, \PDFTEX\ and \LUATEX\ natively support +\PNG, \JPG\ en \PDF\ inclusion. The easiest to support is \JPG\ because the PDF\ +format supports so called \JPG\ compression in its full form. The engine only has +to pass the image blob plus a bit of extra information. Analyzing the file for +resolution, dimensions and colorspace is relative easy: consult some tables that +have this info and store it. No special libraries are needed for this kind of +graphics. + +A bit more work is needed for \PDF\ images. A \PDF\ file is a collection of +(possibly compressed) objects. These objects can themselves refer to other +objects so basically we we have a tree of objects. This means that when we embed +a page from a \PDF\ file, we start with embedding the (content stream of the) +page object and then embed all the objects it refers to, which is a recursive +process because those objects themselves can refer to objects. In the process we +keep track of which objects are copied so that when we include another page we +don't copy duplicates. + +A dedicated library is used for opening the file, and looking for objects that +tell us the dimensions and fetching objects that we need to embed. In \PDFTEX\ +the poppler library is used, but in \LUATEX\ we have switched to pplib which is +specially made for this engine (by Pawel Jackowski) as a consequence of some +interchange that we had at the 2018 Bacho\TEX\ meeting. This change of library +gives us a greater independency and a much smaller code base. After all, we only +need access to \PDF\ files and its objects. + +One can naively think that \PNG\ inclusion is as easy as \JPG\ inclusion because +\PDF\ supports \PNG\ compression. Well, this is indeed true, but it only supports +so called \PNG\ filter based compression. The image blob in a \PNG\ file +describes pixels in rows and columns where each row has a filter byte telling how +that row is to be interpreted. Pixel information can be derived from preceding +pixels, pixels above it, or a combination. Also some averaging can come into +play. This way repetitive information can (for instance) become for instance a +sequence of zeros because no change in pixel values took place. And such a +sequence can be compressed very well which is why the whole blob is compressed +with zlib. + +In \PDF\ zlib compression can be applied to object streams so that bit is +covered. In addition a stream can be \PNG\ compressed, which means that it can +have filter bytes that need to be interpreted. But the \PNG\ can do more: the +image blob is actual split in chunks that need to be assembled. The image +information can be interlaced which means that the whole comes in 7~seperate +chunks thet get overlayed in increasing accuracy. Then there can be an image mask +part of the blob and that mask needs to be separated in \PDF\ (think of +transparency). Pixels can refer to a palette (grayscale or color) and pixels can +be codes in~1, 2, 4, 8 or 16~bits where color images can have 3~bytes. When +multiple pixels are packed into one byte they need to be expanded. + +This all means that embedding \PNG\ file can demand a conversion and when you +have to do that each run, it has a performance hit. Normally, in a print driven +workflow, one will have straightforward \PNG\ images: 1 byte or 3 bytes which no +mask and not interlaced. These can be transferred directly to the \PDF\ file. In +all other cases it probably makes sense to convert the images beforehand (to +simple \PNG\ or just \PDF). + +So, to summarize the above: a modern \TEX\ engine supports image inclusion +natively but for \PNG\ images you might need to convert them beforehand if +runtime matters and one has to run many times. + +\stopsubject + +\startsubject[title=Inclusion] + +The command to include an image is: + +\showsetup{externalfigure} + +and its related settings are: + +\showsetup{setupexternalfigure} + +So you can say: + +\starttyping[option=TEX] +\externalfigure[cow.pdf][width=4cm] +\stoptyping + +The suffix is optional, which means that this will also work: + +\starttyping[option=TEX] +\externalfigure[cow][width=4cm] +\stoptyping + +\stopsubject + +\startsubject[title=Defining] + +{\em todo} + +\showsetup{useexternalfigure} +\showsetup{defineexternalfigure} +\showsetup{registerexternalfigure} + +\stopsubject + +\startsubject[title=Analyzing] + +{\em todo} + +\showsetup{getfiguredimensions} + +\showsetup{figurefilename} +\showsetup{figurefilepath} +\showsetup{figurefiletype} +\showsetup{figurefullname} +\showsetup{figureheight} +\showsetup{figurenaturalheight} +\showsetup{figurenaturalwidth} +\showsetup{figuresymbol} +\showsetup{figurewidth} + +\showsetup{noffigurepages} + +\stopsubject + +\startsubject[title=Collections] + +{\em todo} + +\showsetup{externalfigurecollectionmaxheight} +\showsetup{externalfigurecollectionmaxwidth} +\showsetup{externalfigurecollectionminheight} +\showsetup{externalfigurecollectionminwidth} +\showsetup{externalfigurecollectionparameter} +\showsetup{startexternalfigurecollection} + +\stopsubject + +\startsubject[title=Conversion] + +{\em todo} + +\stopsubject + +\startsubject[title=Figure databases] + +{\em todo} + +\showsetup{usefigurebase} + +\stopsubject + +\startsubject[title=Overlays] + +{\em todo} + +\showsetup{overlayfigure} +\showsetup{pagefigure} + +\stopsubject + +\startsubject[title=Scaling] + +Images are normally scaled proportionally but if needed you can give an +explicit height and width. The \type {\scale} command shares this property +and can be used to scale in the same way as \type {\externalfigure}. I will +illustrate this with an example. + +You can define your own bitmaps, like I did with the cover of this manual: + +\typebuffer[image][option=LUA] + +The actually inclusion of this image happened with: + +\starttyping[option=TEX] +\scale + [width=\paperwidth] + {\getbuffer[image]} +\stoptyping + +\stopsubject + +% \startsubject[title=The backend] +% +% Traditionally \TEX\ sees an image as just a box with dimensions and in \LUATEX\ +% it is actually a special kind of rule that carries information about what to +% inject in the final (\PDF) file. In regular \LUATEX\ the core formats \type +% {pdf}, \type {png}, \type {jpg} and \type {jp2} are dealt with by the backend but +% in \CONTEXT\ we can use \LUA\ instead. We might default to that method at some +% point but for now you need to enable that explicitly: +% +% \starttyping[option=TEX] +% \enabledirectrive[graphics.pdf.uselua] +% \enabledirectrive[graphics.jpg.uselua] +% \enabledirectrive[graphics.jp2.uselua] +% \enabledirectrive[graphics.png.uselua] +% \stoptyping +% +% All four can be enabled with: +% +% \starttyping[option=TEX] +% \enabledirectrive[graphics.uselua] +% \stoptyping +% +% Performance|-|wise only \PNG\ inclusion can be less efficient, but only when you +% use interlaced images or large images with masks. It makes no real sense in a +% professional workflow to use the (larger) interlaced images, and masks are seldom +% used at high resolutions, so in practice one will not really notice loss of +% performance. +% +% The advantage of this method is that we can provide more options, intercept bad +% images that make the backend abort and lessen the dependency on libraries. +% +% \stopsubject + +\stoptext diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-actions.tex b/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-actions.tex new file mode 100644 index 00000000000..a5caf2c0bdf --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-actions.tex @@ -0,0 +1,204 @@ +% language=uk + +\environment interaction-style + +\usemodule[references-identify] + +\startcomponent interaction-actions + +\startchapter[title=Actions] + +The reference mechanism not only deals with the more traditional cross +references, but also takes care of navigation, launching applications (although +that is often limited by viewers), running \JAVASCRIPT, etc. By integrating these +features in one mechanism, we limit the number of commands needed for hyperlinks, +menus and buttons. Normally such actions are driven by the \type {\goto} command, +but you can also use buttons: + +\starttyping +\goto[inner reference] +\goto[outer reference::] +\goto[outer reference::inner reference] +\stoptyping + +The inner reference is normally a user defined one, for instance a reference to a +named location like a chapter or figure. The outer reference refers to a file or +\URL and is normally defined at the document level and is accessed by the \type +{::}. By using symbolic names updating them becomes easier. + +There are also predefined references, like \type {nextpage} to go to the next +page or \type {forward} to cycle, \type {nextcontents} for the next level table +of contents in a linked list of such tables, etc. Some keywords are actually +shortcuts to actions that are delegated to the viewer. Here you need to keep in +mind that nowadays we're talking of \PDF\ viewers, but originally (\MKII) we also +supported \DVI\ viewers. A special class of references are the viewer control +ones, like \type {CloseDocument} or \type {PreviousJump}. They can be recognized +by their capitals. + +When we speak of a reference, we actually refer to a whole bunch of possible +references. We already mentioned inner and outer references, but special actions +are also possible. These are actually plugins. Examples are the \JAVASCRIPT\ and +\URL\ plugins. The interface evolved a bit over a few decades but most has been +there right from the start, which is why we keep it as is. Actually, there is not +that much new functionality added in \MKIV, although the implementation was +mostly rewritten. Here is a overview of the syntax, just to give you an idea. + +\starttyping +\goto[inner] +\goto[inner(foo,bar)] +\goto[inner{argument,argument}] +\goto[inner{argument}] +\goto[outer::] +\goto[outer::inner] +\goto[outer::special(operation{argument,argument})] +\goto[outer::special(operation)] +\goto[outer::special()] +\goto[outer::inner{argument}] +\goto[special(operation{argument})] +\goto[special(operation{argument,argument})] +\goto[special(operation)] +\goto[special(outer::operation)] +\goto[special(operation)] +\goto[special(operation(whatever))] +\goto[special(operation{argument,argument{whatever}})] +\goto[special(operation{argument{whatever}})] +\stoptyping + +There can be multiple actions, separated by a comma, think of: go to the page +with label \quote {foo} and start video \quote {bar}. + +\showsetup{goto} + +Examples of operations are \type {page}, \type {program}, \type {action}, \type +{url} and \type {JS}. \footnote {There are a few more operations but not all make +sense at the user level.} The \type {page} operation accepts a pagenumber as +well as relevant keywords. One can prefix a pagenumber by a file or \URL\ tag. +The \type {program} operation starts up a program. It is an example of an old +feature that has proven to be unstable, simply because viewers change behaviour +over time. + +\showsetup{definereference} + +The built|-|in actions are interfaces via shortcuts with camelcase names. In most +cases the name is a good indication of what to expect: + +\starttyping +\definereference [CloseDocument] [action(close)] +\definereference [ExitViewer] [action(exit)] +\definereference [FirstPage] [action(first)] +\definereference [LastPage] [action(last)] +\definereference [NextJump] [action(forward)] +\definereference [NextPage] [action(next)] +\definereference [PauseMovie] [action(pausemovie)] +\definereference [PauseSound] [action(pausesound)] +\definereference [PauseRendering] [action(pauserendering)] +\definereference [PreviousJump] [action(backward)] +\definereference [PreviousPage] [action(previous)] +\definereference [PrintDocument] [action(print)] +\definereference [SaveForm] [action(exportform)] +\definereference [LoadForm] [action(importform)] +\definereference [ResetForm] [action(resetform)] +\definereference [ResumeMovie] [action(resumemovie)] +\definereference [ResumeSound] [action(resumesound)] +\definereference [ResumeRendering] [action(resumerendering)] +\definereference [SaveDocument] [action(save)] +\definereference [SaveNamedDocument][action(savenamed)] +\definereference [OpenNamedDocument][action(opennamed)] +\definereference [SearchDocument] [action(search)] +\definereference [SearchAgain] [action(searchagain)] +\definereference [StartMovie] [action(startmovie)] +\definereference [StartSound] [action(startsound)] +\definereference [StartRendering] [action(startrendering)] +\definereference [StopMovie] [action(stopmovie)] +\definereference [StopSound] [action(stopsound)] +\definereference [StopRendering] [action(stoprendering)] +\definereference [SubmitForm] [action(submitform)] +\definereference [ToggleViewer] [action(toggle)] +\definereference [ViewerHelp] [action(help)] +\definereference [HideField] [action(hide)] +\definereference [ShowField] [action(show)] +\definereference [GotoPage] [action(gotopage)] +\definereference [Query] [action(query)] +\definereference [QueryAgain] [action(queryagain)] +\definereference [FitWidth] [action(fitwidth)] +\definereference [FitHeight] [action(fitheight)] +\definereference [ShowThumbs] [action(thumbnails)] +\definereference [ShowBookmarks] [action(bookmarks)] +\definereference [HideLayer] [action(hidelayer)] +\definereference [VideLayer] [action(videlayer)] +\definereference [ToggleLayer] [action(togglelayer)] +\stoptyping + +In the \type {java-imp-*.mkiv} files you will find examples of similar shortcuts, +for instance: + +\starttyping +\definereference [SetupStepper] [JS(SetupStepper{step,50})] +\definereference [ResetStepper] [JS(ResetStepper)] +\definereference [CheckStepper] [JS(CheckStepper{\StepCounter})] +\definereference [InvokeStepper] [JS(InvokeStepper)] +\stoptyping + +Other examples of redefined references are: + +\starttyping +\definereference [firstpage] [page(firstpage)] +\definereference [previouspage] [page(previouspage)] +\definereference [nextpage] [page(nextpage)] +\definereference [lastpage] [page(lastpage)] +\definereference [forward] [page(forward)] +\definereference [backward] [page(backward)] +\definereference [firstsubpage] [page(firstsubpage)] +\definereference [previoussubpage] [page(previoussubpage)] +\definereference [nextsubpage] [page(nextsubpage)] +\definereference [lastsubpage] [page(lastsubpage)] +\stoptyping + +Some of these actions expect arguments, for instance: + +\starttyping +\goto{start}[StartMovie{mymovie}] +\stoptyping + +You can load the module \type {references-identify} which gives you +a command: + +\startbuffer +\showreference[page(123),StartMovie{mymovie}] +\stopbuffer + +\typebuffer \getbuffer + +\startbuffer +\showreference[JS(Forget_Changes),CloseDocument] +\stopbuffer + +\typebuffer \getbuffer + +\startbuffer +\showreference[manual::contents] +\stopbuffer + +\typebuffer \getbuffer + +You should be careful with colons in references. This gives you an idea how +\CONTEXT\ interprets what you requested. + +\starttabulate[|T|p|] +\NC prefix:whatever \NC The \type {prefix} creates a namespace. When references are + resolved and there is no hit a lookup without prefix + takes place. \NC \NR +\NC document::whatever \NC The \type {document} is a symbolic reference to an external + resource. This is explained elsewhere. \NC \NR +\NC component:::whatever \NC The \type {component} is a symbolic reference to a component in + a product. References defined in such a component are loaded on + demand. \NC \NR +\stoptabulate + +% url(http://a,b.c) +% url(http://a,b.c) +% url(http://a.b.c) + +\stopchapter + +\stopcomponent diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-annotations.tex b/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-annotations.tex new file mode 100644 index 00000000000..e61a0953d3d --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-annotations.tex @@ -0,0 +1,106 @@ +% language=uk + +\environment interaction-style + +\startcomponent interaction-annotations + +\startchapter[title=Annotations] + +Before we discuss interactive features (in following chapter) a few words will be +spent on so called annotations. This term is used in Acrobat and is somewhat +confusing as hyperlinks conceptually are not really annotations while comments +are. The term relates to the way \PDF\ files can have added functionality. It +might help understand the following chapters better when you know what model is +used inside a \PDF. + +If you open a \PDF\ file in an editor you will finds lots of objects. Numbers are +an object, as are strings and booleans. Symbols (represented as strings with a +leading slash) are also objects. Objects can be collected in indexed tables +(arrays) and hash tables (dictionaries). Serialized arrays are bounded by square +brackets: + +\starttyping +[ (value1) (value2) ] +[ 1 2 3 ] +[ 1 2 (value1) (value3) true /foo ] +\stoptyping + +and hashes by double angle brackets: + +\starttyping +<< + /Key1 (value1) + /Key2 (value2) + /Key3 123 + /Key4 true + /Key5 [ 1 2 3 4 ] +>> +\stoptyping + +A \PDF\ file is a collection of objects: + +\starttyping +1 0 obj + ... +endobj +\stoptyping + +Instead of a number, string, boolean, array or dictionary an object can also be +a stream of bytes: + +\starttyping +1 0 obj << /Length 123 >> +stream +... 123 bytes ... +endstream +endobj +\stoptyping + +Objects can refer to each other and can be looked up via a so called xref table. +We refer to object with number one and revision zero as follows: + +\starttyping +/foo 1 0 R +\stoptyping + +When an object is updated it can be added to the end of the file and the version +number can get bumped. A program that does something with the \PDF\ is supposed +to do something clever with these numbers. More often the revision stays zero. + +A document is normally a sequence of pages. When a file is opened the cross +reference table is loaded and the so called catalog is looked up. From there +pages can be found. Pages have a content stream and can refer to resources, like +fonts, special color spaces, complex objects (xforms) and among other things +annotations. + +The page is rendered from the content stream but that stream has no information +about hyperlinks and such. The \type {/Link} annotation objects that implement +interactivity are independent and kind of layered over the rendered page. They +describe rectangular areas that a viewer can use to intercept mouse clicks. If +you want to see where the actions happens, search for \type {/Dest} and \type +{/Annot} in an (uncompressed) \PDF file. When you process a file with + +\starttyping +\nopdfcompression +\stoptyping + +you get an uncompressed file and with an appropriate editor you can see where +annotations end up. + +The main reason for mentioning these details is that when you are checking a file +for problems you need to look for annotation objects instead of the page stream. +You also need to realize that these annotations in no way interfere with the page +stream. They only exist because a viewer can use that information to add +functionality. Their reference point is the lower left corner of the page. This +is a conceptual difference with \HTML\ where hyperlinks are often in|-|line and +part of the content stream. Both approaches have their advantages and +disadvantages. From the perspective of quality typesetting it makes much sense to +have them overlayed and explicitly defined (in terms of dimensions) but users +will of course in most cases define them inline. This means that in order to make +the \PDF\ some analyzing and juggling has to take place. In \CONTEXT\ we always +have done as much as possible at the \TEX\ (therefore bypassing some limitations +in the engine) end in \MKIV\ we don't use the engine's features at all. + +\stopchapter + +\stopcomponent diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-attachments.tex b/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-attachments.tex new file mode 100644 index 00000000000..6ae9b63103c --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-attachments.tex @@ -0,0 +1,174 @@ +% language=uk + +% Written while intermittently watching those always positive "Walk off the Earth" +% videos to keep me in the mood. They make what's normally not my kind of music +% into something very interesting. I hope that this chapter is kind of interesting +% in the end too. + +\environment interaction-style + +\enabletrackers[attachments.anchors] + +\setupattachment + [location=inmargin] + +\startcomponent interaction-attachments + +\startchapter[title=Attachments] + +Attachments are (normally) embedded files that the reader can extract. A viewer +can decide to just show the content or call an associated program to deal with +the file (which one depends on the operating system). As with other annotations +they started out depicted by symbols but then browsers started showing them in +lists next to the displayed page. + +\startbuffer +\attachment + [attachment 1] + [file=interaction-attached-001.txt, + title=Just some text, + width=2em, + height=2em, + author=Hans, + subtitle=Plain text] + +\attachment + [attachment 2] + [file=cow.mp, + title=Just a graphic, + author=Hans, + subtitle=Some MetaPost, + method=hidden] +\stopbuffer + +\typebuffer \getbuffer + +These two attachments differ in one aspect: the second one is hidden, i.e. it has +no icon in the text. However, the attachment is in the file and is (often) +visible in the side bar. The symbol for the visible one is in the margin, which +is achieved with: + +\starttyping +\setupattachment + [location=inmargin] +\stoptyping + +You can use your own icon, for instance: + +\startbuffer +\startuniqueMPgraphic{cow}{height,s:color} + loadfigure "cow.mp" number 1 ; + refill currentpicture withcolor "\MPvar{color}" ; + currentpicture := currentpicture ysized \MPvar{height} ; +\stopuniqueMPgraphic + +\definesymbol + [attachment-normal] + [\uniqueMPgraphic{cow}{height=4ex,color=darkblue}] +\definesymbol + [attachment-down] + [\uniqueMPgraphic{cow}{height=4ex,color=darkyellow}] +\stopbuffer + +\typebuffer \getbuffer + +% \startattachment[hello][symbol={attachment-normal,attachment-down}] +% oeps +% \stopattachment + +\attachment + [symbol={attachment-normal,attachment-down}, + file=cow.pdf, + title=A cow, + author=Hans, + subtitle=graphic] + +This time we get a cow as icon and the cow is also embedded as image. When +writing this manual a click in Sumatra just opens the \PDF\ file, but when I +embed an mp3 file, a save-as window pops up. + +The previous examples directly injected the attachment using the \type +{\attachment} commands with the appropriate arguments. You can add titles, define +a name that will be used when the attachment is saved: + +\starttyping +\attachment[sometag][extra specs] +\attachment[test.tex] +\attachment[file=test.tex] +\attachment[file=test.tex,method=hidden] +\attachment[name=newname,file=test.tex] +\attachment[title=mytitle,name=newname,file=test.tex] +\stoptyping + +but there's also a more indirect way, for instance here we define some +attachments: + +\starttyping +\defineattachment[whatever-1][file=test.tex] +\defineattachment[whatever-2][file=test.tex,method=hidden] +\stoptyping + +that later can be called up with: + +\starttyping +\attachment[whatever-1][method=hidden] +\attachment[whatever-2] +\stoptyping + +where of course hidden is to be omitted when you want an icon. The commands that +are used to define and tune an instance are: + +\showsetup{defineattachment} + +\showsetup{setupattachment} + +There is one predefined instance: + +\starttyping +\defineattachment[attachment] +\stoptyping + +So we have: + +\showsetup{startattachment:instance} + +\showsetup{attachment:instance} + +Yet another level of abstraction can be achieved with: + +\showsetup{registerattachment} + +For example: + +\starttyping +\registerattachment + [sometag] + [name=fool.txt, + file=foo.txt, + title=Fool me, + subtitle=Not you, + author= Joker] +\stoptyping + +This is the \MKIV\ replacement for the \MKII\ method: + +\starttyping +\useattachment[test.tex] +\useattachment[whatever][test.tex] +\useattachment[whatever][newname][test.tex] +\useattachment[whatever][title][newname][test.tex] +\stoptyping + +or with all options: + +\starttyping +\useattachment[name][file][author][title][subtitle] +\stoptyping + +The \type {\use...} variant stays around for old times sake and just maps onto +\type {\registerattachment}, you can better use that one because it frees you +from remembering which arguments is what for. + +\stopchapter + +\stopcomponent diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-bookmarks.tex b/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-bookmarks.tex new file mode 100644 index 00000000000..04dd3f89d05 --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-bookmarks.tex @@ -0,0 +1,75 @@ +% language=uk + +\environment interaction-style + +\startcomponent interaction-bookmarks + +\startchapter[title=Bookmarks] + +Bookmarks are a sort of table of contents displayed by the viewer and as such +they take up extra space on the screen. You need to turn on interaction in +order to get bookmark data embedded in the document. + +\starttyping +\placebookmarks + [chapter,section,subsection,mylist] + [chapter] +\stoptyping + +A bookmark list is added to the document only when interaction is enabled. The +list in the first argument are bookmarked while the second argument specifies +what bookmark (sub)trees are opened. if you don't get what you expect, check your +document structure! Also, use the \type {\start}|-|\type {stop} alternatives. + +Bookmarks are taken from the section title, but you can overload the title +as follows: + +\starttyping +\startchapter[title=Foot,bookmark=food] + ... +\stopchapter +\stoptyping + +If you have a more complex typeset title you can also try: + +\starttyping +\enabledirectives[references.bookmarks.preroll] +\stoptyping + +From \MKII\ we inherit the option to overload the last set bookmark but the +previously mentioned approach is better. + +\starttyping +\chapter {the first chapter} +\bookmark {the first bookmark} +\stoptyping + +You can add entries to a bookmark list: + +\starttyping +\bookmark[mylist]{whatever} +\stoptyping + +This assumes that you have defined the list. + +\showsetup {bookmark} + +If you want to have the bookmark tab open when you start a document, you +can say: + +\starttyping +\setupinteractionscreen[option=bookmark] +\stoptyping + +There are only a few options that you can use. The \type {number} parameter can +be used to hide section numbers. The \type {sectionblock} parameter controls the +addition of section block entries, something that can be handy when you have +multiple section blocks with similar section titles. With \type {force} you force +an entry to the file, bypassing mechanisms that to be clever. + +\showsetup{setupbookmark} + +\stopchapter + +\stopcomponent + diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-buttons.tex b/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-buttons.tex new file mode 100644 index 00000000000..d083a61a774 --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-buttons.tex @@ -0,0 +1,98 @@ +\environment interaction-style + +\startcomponent interaction-buttons + +\startchapter[title={Buttons}] + +There is not much to tell about buttons. They are clickable areas on the screen +that when clicked on bring you some location or invoke some action in the viewer, +for instance triggered by a \JAVASCRIPT. As usual with many commands, you can +define categories of buttons and set them up globally or per category. + +\showsetup{definebutton} + +\showsetup{setupbutton} + +The default button command is: + +\showsetup{button} + +Buttons are an example of a construct that builds upon \type {\framed} so the +keys that apply there also apply to buttons. You can enable or disable buttons +with the \type {state} parameter. As usual there are a \type {style} and \type +{color} parameters and an additional \type {contrastcolor} option for tuning the +color of a button which action let you stay on the same page. Actually, when you +do stay on the same page, the \type {samepage} parameter let you control if the +button should be empty, hidden or whatever. + +\starttabulate[|B|c|c|c|] +\NC \BC frame \BC text \BC shown \NC \NR +\NC \type {yes} \NC + \NC + \NC + \NC \NR % 0 +\NC \type {empty} \NC + \NC - \NC + \NC \NR % 1 +\NC \type {no} \NC - \NC - \NC + \NC \NR % 2 +\NC \type {none} \NC - \NC - \NC - \NC \NR % 3 +\NC \type {normal} \NC + \NC + \NC + \NC \NR % 1 +\NC \type {default} \NC + \NC + \NC + \NC \NR % 1 +\stoptabulate + +Here is an example of a button: + +\startbuffer +\button + [background=color,backgroundcolor=darkred, + style=bold,color=white, + framecolor=blue,rulethickness=2pt, + width=3cm,height=1.5cm] + {go to the next page} + [nexpage] +\stopbuffer + +\typebuffer + +This colorful button shows up as: + +\startlinecorrection +\getbuffer +\stoplinecorrection + +When you use interaction in presentations you might want to make the page +and|/|or text area active. Here is an example. + +\starttyping +\defineoverlay + [PrevPage] + [\overlaybutton{PrevPage}] + +\setupbackgrounds + [page] + [background=PrevPage] + +\setuptexttexts + [\overlaybutton{NextPage}] +\stoptyping + +We provide two variants: the normal one with square brackets, but also a more +direct one that accepts curly braces, which is handy when you pass an overlay +button as argument. + +\showsetup {overlaybutton} +\showsetup {overlaybutton:direct} + +The difference in usage is shown here: + +\starttyping +\setuptexttexts [\overlaybutton{NextPage}] +\setuptexttexts[{\overlaybutton[NextPage]}] +\stoptyping + +An overlay button adapts its size to the current overlay so you don't need to +worry about passing dimensions. + +It is possible to define more complex buttons, like roll|-|over buttons or +buttons that change appearance when you clock on them. These are more resource +hungry and also depend on the viewer. These will discussed in the chapter about +widgets. + +\stopchapter + +\stopcomponent diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-comments.tex b/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-comments.tex new file mode 100644 index 00000000000..b19894d577d --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-comments.tex @@ -0,0 +1,194 @@ +\environment interaction-style + +\enabletrackers[comments.anchors] + +\startcomponent interaction-comments + +\setupcomment + [location=inmargin] + +\startchapter[title={Comments}] + +Many \PDF\ viewers support text annotations. These are small notes that can be +popped up. In \CONTEXT\ we call them comments, because often that's what they are +used for. Comments evolved from simple ones using a limited encoding into more +advanced ones with representations. A comment looks like: + +\startbuffer +\startcomment + Hello beautiful world! +\stopcomment +\stopbuffer + +\typebuffer + +\getbuffer + +When you open a document with comment you will likely see some symbol depicting +it. But, it's one of those features that is viewer dependent so when it looks odd +or unexpected, check in \ACROBAT\ first. The position and size can differ per +viewer and when you zoom in the size can either stay the same or scale. The +viewer can show the pop up text at the same location or someplace else. Although +in principle there is control over this, my experience is that viewers (also +Acrobat) keep changing this (not always for the best). Just assume the worst: it +will never look good and although for a while we kept up with viewers, the +inconsistency (and accumulated waste of time) led us to the current minimalistic +approach. + +By default, in \CONTEXT\ comments are placed at the spot a bit raised. In this +document we put them in the margin, by saying: + +\starttyping +\setupcomment + [location=inmargin] +\stoptyping + +Comments can have titles and properties but not all viewers support properties. +Contrary to other environments, the first argument is not a category but a title. +This because we are compatible with \MKII. + +\startbuffer +\startcomment[french] + In France they use «angle bracket glyphs» in subsentences. +\stopcomment + +\startcomment[accents][color=darkgreen] + You can used an àçéñţêð character too. +\stopcomment +\stopbuffer + +\typebuffer \getbuffer + +And normally empty lines are also supported (again this can differ per viewer): + +\startbuffer +\startcomment[lines][color=darkblue] + How about an + + empty line? +\stopcomment +\stopbuffer + +\typebuffer \getbuffer + +As we can see here, comments are sort of stacked. These examples also show that +we can pass an optional title and set up some characteristics. An inline comment +is defined with \type {\comment}: + +\startbuffer +\comment {How I hate those notes spoiling the layout.} Maybe some day +I can convince myself to add some features \comment {Think of comment classes +that can be turned on and off and get their own colors.} related to version +control. +\stopbuffer + +\typebuffer + +\inlinebuffer\ Comments hide part of the text and thereby are to be used with +care. Until now I never used them. Anyhow, from now on, one can happily use: + +You can use other symbols than the default, and a couple are predefined in +the standard: {\tt \cldcontext {table.concat (lpdf.commentsymbols(), ", ")}}. + +\startbuffer +\startcomment[symbol=Help] + Do we want this kind of rubish? +\stopcomment +\stopbuffer + +You can also use your own symbols: + +% \definesymbol [comment-normal][{\externalfigure[cow.pdf]}] +% \definesymbol [comment-down] [{\externalfigure[cow.pdf]}] +% +% \unexpanded\def\CowSymbol#1#2% +% {\scale +% [height=#1] +% {\startMPcode +% loadfigure "cow.mp" number 1 ; +% refill currentpicture withcolor #2 ; +% \stopMPcode}} +% +% \definesymbol [comment-normal] [\CowSymbol{4ex}{darkred}] +% \definesymbol [comment-down] [\CowSymbol{4ex}{darkgreen}] + +\startbuffer +\startuniqueMPgraphic{cow}{height,s:color} + loadfigure "cow.mp" number 1 ; + refill currentpicture withcolor "\MPvar{color}" ; + currentpicture := currentpicture ysized \MPvar{height} ; +\stopuniqueMPgraphic + +\definesymbol + [comment-normal] + [\uniqueMPgraphic{cow}{height=4ex,color=darkred}] +\definesymbol + [comment-down] + [\uniqueMPgraphic{cow}{height=4ex,color=darkgreen}] + +\stopbuffer + +\typebuffer \getbuffer + +\startbuffer +\startcomment[hello][symbol={comment-normal,comment-down}] + oeps +\stopcomment +\stopbuffer + +\typebuffer \getbuffer + +Again the way this shows up depends on the viewer capabilities so there might be +a fallback on the normal comment symbol. You can influence the size of the image +(icon): + +\startbuffer +\startcomment[hello] + [symbol={comment-normal,comment-down},width=\marginwidth] + oeps +\stopcomment +\stopbuffer + +\typebuffer \getbuffer + +There are some options that you can use for finetuning the comments. + +\showsetup{setupcomment} + +A new instance is defined with: + +\showsetup{definecomment} + +The default instance is predefined by + +\starttyping +\definecomment[comment] +\stoptyping + +You can define your own instances: + +\starttyping +\definecomment[mycomment] +\stoptyping + +The generated commands have a syntax like: + +\showsetup{startcomment:instance} + +and: + +\showsetup{comment:instance} + +Most fields explain themselves. With \type {state} you can disable this feature. +Comments can be hidden in which there is no icon shown. The \type {nx} and \type +{ny} fields determine the size of the popup. + +In case you wonder where the yellow backgrounds come from, here is the trick: + +\starttyping +\enabletrackers[comments.anchors] +\stoptyping + +\stopchapter + +\stopcomponent diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-contents.tex b/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-contents.tex new file mode 100644 index 00000000000..0b59c1cbb00 --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-contents.tex @@ -0,0 +1,11 @@ +% language=uk + +\environment interaction-style + +\startcomponent interaction-contents + +\starttitle[title=Contents] + \placelist[chapter][criterium=text] +\stoptitle + +\stopcomponent diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-enabling.tex b/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-enabling.tex new file mode 100644 index 00000000000..8e31ccbe6f7 --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-enabling.tex @@ -0,0 +1,117 @@ +% language=uk + +\environment interaction-style + +\startcomponent interaction-enabling + +\startchapter[title=Enabling] + +Interaction is turned off by default. Of course cross referencing +work without interaction but there are no hyperlinks. You turn on +interaction with the \type {\setupinteraction} command: + +\showsetup {setupinteraction} + +The \type {state} key is the switch you need to use. In addition you might want +to setup the style and color. + +\starttyping +\setupinteraction + [state=start, + style=, + color=, + contrastcolor=] +\stoptyping + +This is the least intrusive way to get interaction in your document. By default +the style is bold and the \type {color} defaults to green. The \type +{contrastcolor} is used when a hyperlink refers to the same page and defaults to +red. A neutral setup makes sense because nowadays the reader kind of knows what +can be clicked on. + +The \type {title}, \type {subtitle}, \type {author}, \type {date} and \type +{keyword} parameters are passed to the document and will show up when you request +document information. + +The \type {openaction} parameter can for instance be used to start at a specific +page, while the \type {closeaction} can be used to trigger a \JAVASCRIPT\ cleanup +script. The \type {openpageaction} and \type {closepageaction} can for instance +initialize and reset states, something we do in some presentation styles. + +The \type {click} parameter controls how a viewer responds to pressing a mouse +button on an annotation: highlight or not. The \type {display} parameter +determines if a cross document link opens in the current window. + +The \type {menu} parameter is a quick way to disable menus, of which there can be +many: at each side of the page, stacked or not, etc. The \type {symbolset} +determines the look and feel of symbols used in for instance menus, buttons and +status bars. + +The \type {page} parameters is a bit special, and it function is an inheritance +from the early days. Some \DVI\ and \PDF\ viewers supported named destinations, +others only page references. This parameter can be used to force one or the +other. There was a time that there was a limit on the number of named references, +so going page was the only option \footnote {We're talking of 1995 when we made +documents of many thousands of pages with tens of thousands of hyperlinks, cross +linked tables of contents, registers, active graphics, etc.\ Think of +dictionaries used in very specific projects, or quality assurance manuals.} + +Personally I consider an electronic document an entity to be seen full screen on +a dedicated device. However some users prefer the target of a link to fit the +width of the screen and alike. The \type {focus} parameter can (within) +reasonable bounds provide this. The \type {focusoffset} is then used to keep +things a bit visual convenient. + +The \type {height} and \type{depth} parameters are sort of special and probably never +used. When we go back in time, to when we started adding interactivity, there were +a few issues that needed to be dealt with: + +\startitemize[packed] +\startitem + We need to make sure that we have something to click on, so we need to add + some offset if needed. +\stopitem +\startitem + We need to handle nested hyperlinks, which is why \CONTEXT\ didn't use the + link features of for instance \PDFTEX\ but built its own. +\stopitem +\startitem + Hyperlinks should break properly across lines without side effects, again a + reason for bypassing some of the \TEX\ engine's behaviour. +\stopitem +\startitem + We have to make sure that there is at least a consistent height and depth + of hyperlinks. These tight links with viewer supplied bounding boxes to + click on just look real bad! So, we had to do better. +\stopitem +\stopitemize + +Normally the two mentioned parameters are not used. However, their value will +kick in when we say \type {\setfalse \locationstrut}, in which case the given +height and depth will be used. Some advice: don't mess with this. We only have +this because it permits special effects. + +If you want to see what the target (destinations) and sources (references) of +links are, you can say: + +\starttyping +\enabletrackers[nodes.references,nodes.destinations] +\stoptyping + +The \type {fieldlayer} parameter can be used to set a so called viewer layer, so +that you can hide them (given that a viewer supports that). The \type {calculate} +parameter can associate a calculator (initializer) with the fields. + +You can create an interaction environment with: + +\showsetup {defineinteraction} + +which then can be used with: + +\showsetup {startinteraction} + +\stopchapter + +\stopcomponent + + diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-hyperlinks.tex b/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-hyperlinks.tex new file mode 100644 index 00000000000..f204d502e18 --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-hyperlinks.tex @@ -0,0 +1,32 @@ +% language=uk + +\environment interaction-style + +\startcomponent interaction-hyperlinks + +\startchapter[title=Hyperlinks] + +A hyperlink is something that you click on and that brings you to nother spot in +the document. The regular links are a side effect of references. The most commonly +used references are: + +\starttyping +\in{figure}[fig:foo] +\at{page}[fig:foo] +\about[fig:foo] +\stoptyping + +The first argument is what gets prepended to the number and the while can be +clicked on. Here we create a namespace with \type {fig:}. This can be somewhat +confusing with respect to prefixes but normally the resolver does a direct lookup +first. + +\showsetup{at} +\showsetup{in} +\showsetup{about} + +\stopchapter + +\stopcomponent + + diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-importing.tex b/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-importing.tex new file mode 100644 index 00000000000..8c36128f6b5 --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-importing.tex @@ -0,0 +1,73 @@ +% language=uk + +\environment interaction-style + +\startcomponent interaction-importing + +\startchapter[title=Importing] + +This is a very short chapter that deals with external figures. Normally an image +is a graphic with possible some text. There are however workflows where one +includes pages from other documents. Such documents can contain cross references, +bookmarks, comments and|/|or fields. Normally annotations of any kind are ignored +and for good reason: they assume the whole document to be the, not just one or a +few pages. Merging references for instance is a source for clashes, not only for +named ones but also for page references. + +But when you {\em know} what you're doing, as for instance Taco (who requested +this feature) does, there is a way to merge annotations. This is controlled by +the interaction keys in \type {externalfigure}: + +\starttyping +\externalfigure[somedoc][page=1,interaction=yes] +\externalfigure[somedoc][page=2,interaction={reference,bookmark}] +\stoptyping + +However, only references and bookmarks are officially supported! The other +annotations are possible but the code is experimental and will be finished +when we find a good reason for it. + +\starttabulate[|B|p|] +\FL +\NC \type {reference} \NC named and page references and urls \NC \NR +\NC \type {comment} \NC comments if possible with relevant icon \NC \NR +\NC \type {bookmark} \NC text bookmarks that refer to pages \NC \NR +\NC \type {field} \NC widgets but only within reason \NC \NR +\NC \type {layer} \NC viewer layers \NC \NR +\ML +\NC \type {yes} \NC named and page references, urls and bookmarks \NC \NR +\NC \type {all} \NC all annotations \NC \NR +\LL +\stoptabulate + +If things don't work out well, imagine for a while what is involved in supporting +this: analyzing a page from a document, remapping the annotations onto some +\CONTEXT\ mechanism, making sure that we don't get clashes, keeping overhead +acceptable. + +Because this is a somewhat tricky feature, tracing can help you to identify +problems: \typ {figures.merging}, \typ {figures.links}, \typ {figures.comments}, +\typ {figures.fields} and \typ {figures.outlines}. + +Another complication when including pages can be the presence of so called marked +content in the page stream. There is experimental support for removing those but +right now (2018) you need to explicitly enable this explicitly: + +\starttyping +\enabledirectives[graphics.pdf.uselua] +\enabledirectives[graphics.pdf.stripmarked] +%enabledirectives[graphics.pdf.recompress] +\stoptyping + +This will delegate inclusion from the backend to \LUA. This might become the +default as it is just as efficient as using the backend. That way we can filter +the content stream. \footnote {We might add a callback to \LUATEX\ for filtering +the content stream (no hard todo but post version 1.10).} + +\stopchapter + +\stopcomponent + + + + diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-introduction.tex b/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-introduction.tex new file mode 100644 index 00000000000..a6f44b05b1c --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-introduction.tex @@ -0,0 +1,58 @@ +% language=uk + +\environment interaction-style + +\startcomponent interaction-introduction + +\startchapter[title=Introduction] + +This document introduces the cross reference mechanism, viewer control, fill||in +fields, \JAVASCRIPT\ support, comments, attachments and more. It is a rewrite of +the \MKII\ widgets manual. There is (always) more available than discussed in +manuals so if you miss something, take a look at test suite or when you're brave, +peek into the source code as there can be examples there. + +Interactivity has always been available in \CONTEXT\ and in fact it was one of +the reasons for writing it. In for instance the YandY \WINDOWS\ previewer, one +could have hyperlinks and we used that for a while when checking documents. Later +Acrobat showed up and \PDF\ stepwise added interactive features that we always +supported right from the start. Unfortunately there is a viewer dependency and +the documentation of \PDF\ lagged behind, so solutions based on trial and error +could not work well in a follow up on \PDF. Some features disappeared or became +so limited that they effectively became useless. Especially multi||media have a +reputation of unreliability. Because open source viewers never really catched up +(at least not in this area) the momentum was lost to make sure that documents +could have audio and video embedded in reliable ways. Even forms and basic +\JAVASCRIPT\ control of for instance layers is often missing. + +That said, we do support a lot but can support more when it makes sense. Deep +down in \CONTEXT\ we always had the mechanisms to deal with this, so extensions +are not that hard to program. Somehow we thought that publishers would like these +features but that never really was the case, so there was no pressure from that +end. Most features are user driven or just there because at some point we wanted +to make some fancy presentation. In fact, the \type {s-present-*} files provide +examples of interactivity. + +The original \PDF\ reference was a couple of hundred pages and looked quite nice. +A later print has many more pages and still looks ok, but nowadays we have to do +with a \PDF\ document. If you want to see what \PDF\ supports you can study this +(now about) 750 page standard. It is, being an \ISO\ standard, not public but +you can probably find a (maybe older) copy someplace on the web. + +When reading this manual you need to keep in mind that we assume that you design +a decent layout and when you make something for an electronic medium, we hope +that you pay attention to the way you can enhance accessibility. + +If you miss something here, don't hesitate to ask for clarification, or even +better, provide an example that we then can use to discuss (an aspect of) some +mechanism. + +\stopchapter + +\stopcomponent + +% A nice double page e-ink device can be seen at: +% +% https://www.youtube.com/watch?v=QdOXCn1vvzI : +% +% vkgoeswild: Playing Pink Floyds Great Gig in the Sky on Imperial by Bösendorfer diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-javascript.tex b/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-javascript.tex new file mode 100644 index 00000000000..64e78446493 --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-javascript.tex @@ -0,0 +1,136 @@ +% language=uk + +\environment interaction-style + +\startcomponent interaction-javascript + +\startchapter[title={JavaScript}] + +Annotations can be controlled with \JAVASCRIPT\ but it really depends on the +viewer if it works out well. Using these scripts is a multi||step process where +common functions and data structures can be shared and collected in preambles: + +\starttyping +\startJSpreamble {name} + MyCounter = 0 ; +\stopJSpreamble +\stoptyping + +The more action oriented scripts are defined as: + +\starttyping +\startJScode {increment} + MyCounter = MyCounter + 1 ; // or: ++MyCounter ; +\stopJScode +\stoptyping + +This script is executed with: + +\starttyping +\goto {advance by one} [JS(increment)] +\stoptyping + +Nicer is to define a function: + +\starttyping +\startJSpreamble {helpers} used now + function Increment(n) { + MyCounter = MyCounter + n ; + } +\stopJSpreamble +\stoptyping + +and then say: + +\starttyping +\goto {advance by one} [JS(Increment{5})] +\stoptyping + +The distribution contains a collection of scripts that can be preloaded and used +when needed. You can recognize the files by the \type {java-imp-} prefix. To +prevent all preambles ending up in the \PDF\ file, we can say: + +\starttyping +\startJSpreamble {something} used later +\stopJSpreamble +\stoptyping + +We already saw that one can also say \type {used now} and there's also a way +to filter specific preambles on usage: + +\starttyping +\startJScode {mything} uses {something} +\stopJScode +\stoptyping + +One should be aware of the fact that there is no decent way to check if every +script is all right! Even worse, the \JAVASCRIPT\ interpreter currently used in +the \ACROBAT\ tools is not reentrant, and breaks down on typos + +The full repertoire of commands is: + +\showsetup{startJScode} + +\showsetup{startJSpreamble} + +\showsetup{addtoJSpreamble} + +\showsetup{setJSpreamble} + +As we're into \LUA\ and because \LUA\ is so lightweight I've wondered several +times now if it would make sense to embed \LUA\ in \PDF\ viewers. After all, +annotations are an extension mechanism. In the early days of \PDF\ this was +actually quite doable because \ACROBAT\ reader (and exchange) had a plugin model. +However, the more functionality ended up in the program, the least interesting +(and popular) the plugins mechanism became. Some open source viewers have an +\API\ so in principle adding the lightweight \LUA\ interpreter (of course with +\LPEG, and quite probably without file \IO) is possible. It has been discussed at +a recent \CONTEXT\ meeting, so who knows \unknown For now we're stuck with +\JAVASCRIPT. + +An example of \JAVASCRIPT\ usage is the following, where we load a video and add +some controls. Beware that this kind of functionality is very viewer dependent +and therefore also very unstable over time. Even worse, if you look at the loaded +\JAVASCRIPT\ file you will notice a dependency on soon obsolete (in \ACROBAT\ at +least) shockwave support. First we load a library that will predefine a video +graphic: and then create an instance: + +\starttyping +\useJSscripts[vplayer] + +\setupinteraction + [state=start] + +\externalfigure + [shockwave] + [frame=on, + width=480pt, + height=270pt, + file=test.mp4, + label=foo] +\stoptyping + +The controls are defined with: + +\starttyping +\goto{START} [JS(StartShockwave{foo})] +\goto{REWIND}[JS(RewindShockwave{foo})] +\goto{PAUSE} [JS(PauseShockwave{foo})] +\goto{STOP} [JS(StopShockwave{foo})] +\stoptyping + +or, as we have some defined reference shortcuts: + +\starttyping +\goto{START} [StartShockwave{foo}] +\goto{REWIND}[RewindShockwave{foo}] +\goto{PAUSE} [PauseShockwave{foo}] +\goto{STOP} [StopShockwave{foo}] +\stoptyping + +It's actually not that hard to add all kind of functionality if only we could be +sure of stable support and continuity. + +\stopchapter + +\stopcomponent diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-menus.tex b/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-menus.tex new file mode 100644 index 00000000000..6e002b5cd52 --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-menus.tex @@ -0,0 +1,12 @@ +\environment interaction-style + +\startcomponent interaction-menus + +\startchapter[title={Menus}] + + {\em This chapter will discuss interaction menus that normally end up in the + margins.} + +\stopchapter + +\stopcomponent diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-progress.tex b/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-progress.tex new file mode 100644 index 00000000000..c077337b329 --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-progress.tex @@ -0,0 +1,11 @@ +\environment interaction-style + +\startcomponent interaction-progress + +\startchapter[title={Progress}] + + {\em This chapter will discuss progress bars.} + +\stopchapter + +\stopcomponent diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-structure.tex b/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-structure.tex new file mode 100644 index 00000000000..793139d6c0d --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-structure.tex @@ -0,0 +1,76 @@ +% language=uk + +\environment interaction-style + +\startcomponent interaction-structure + +\startchapter[title=Structure] + +There is a lot of structure in \CONTEXT: + +\startitemize[packed] +\startitem document structure: projects, products, components, environments \stopitem +\startitem sectioning, with or without numbers (visible), support for lists and userdata \stopitem +\startitem lists, most often related to sections, but there are more \stopitem +\startitem registers \stopitem +\startitem itemized lists \stopitem +\startitem images, \METAPOST\ graphics, different types of tables \stopitem +\startitem typographical objects: constructions, descriptions and enumerations \stopitem +\startitem notes, like footnotes, endnotes, linenotes \stopitem +\startitem marginal notes \stopitem +\startitem formulas (and subformulas) \stopitem +\startitem text areas, layers, overlays \stopitem +\startitem graphic placement with captions and references \stopitem +\startitem cross references to most structural components \stopitem +\startitem bibliographic databases and citations \stopitem +\blank +\startitem \unknown\ and more \unknown \stopitem +\stopitemize + +Most of them in some way carry information about their location in the document +and on the page, and sometimes their exact position. This also means that we can +use that information for annotations. But most users will use the standard +functionality. + +\starttyping +\startsection[title=Whatever] + ... +\stopsection +\stoptyping + +In addition to typesetting this will add the title to a list. In order to do that +some anchor has to be placed in the text, because we need to register the exact +location in order to get the right pagenumber after \TEX\ has broken the flow into +pages. + +\starttyping +\placelist[section][criterium=text] +\stoptyping + +This will place a list of all sections. If you want the whole entry to be a +clickable areas, you can say: + +\starttyping +\placelist[section][interaction=all] +\stoptyping + +Otherwise only clicking on the title will bring you to the spot. If you also say: + +\starttyping +\setuphead[interaction=list] +\stoptyping + +Clicking on the head will bring you back to the table of contents. There are +special list rendering alternatives for interactive documents (\typ +{alternative=e} onwards). You can use the \type {list} and \type {bookmark} +parameters to a section head to deviate from the given \type {title}. + +Many commands accept a \type {reference} as optional argument and when you use +an alternative with key|/|values a \type {reference} key will do the job. + +{\em What should go into this chapter.} + +\stopchapter + +\stopcomponent + diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-style.tex b/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-style.tex new file mode 100644 index 00000000000..e9922fb1415 --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-style.tex @@ -0,0 +1,59 @@ +\startenvironment interaction-style + +\usemodule[abr-02] +\usemodule[scite] +\usemodule[setups-basics] + +\loadsetups[context-en] + +\setupbodyfont[plex,rm] + +\setupwhitespace + [big] + +\setuptolerance + [verytolerant,stretch] + +\setuppagenumbering + [alternative=doublesided] + +\setuplayout + [footer=0cm, + backspace=3cm, + cutspace=2cm, + width=middle, + height=middle, + margin=1.75cm, + margindistance=5mm] + +\setupfloats + [ntop=100] + +\setupinteraction + [state=start, + style=, + color=, + contrastcolor=] + +\setuphead + [interaction=list] + +\setuphead + [chapter] + [header=high, + style=\bfd] + +\setuphead + [section] + [style=\bfb] + +% \definecolor[backcolor][r=.8,b=.7,g=.8] +\definecolor[backcolor][s=.8] + +\setupframedtext + [setuptext] + [frame=off, + background=color, + backgroundcolor=backcolor] + +\stopenvironment diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-tagging.tex b/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-tagging.tex new file mode 100644 index 00000000000..d38c922f2b9 --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-tagging.tex @@ -0,0 +1,26 @@ +% language=uk + +\environment interaction-style + +\startcomponent interaction-tagging + +\startchapter[title=Tagging] + +In a tagged \PDF\ document the page stream is enriched (or polluted) by marks +that indicate what kind of content is involved. + +\starttyping +\setuptagging[state=start] +\stoptyping + +Only the \type {state} parameters is of general use. + +\showsetup {setuptagging} + +The \TEX\ end of this mechanism overlaps with the export related tagging so when +you add your own elements that will be reflected in the tagging. And indeed: the +keyword here is structure. The worse the structure, the worse the tagging. + +\stopchapter + +\stopcomponent diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-titlepage.tex b/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-titlepage.tex new file mode 100644 index 00000000000..43b29baf961 --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-titlepage.tex @@ -0,0 +1,47 @@ +% language=uk + +\environment interaction-style + +\startcomponent interaction-titlepage + +\startMPpage + + StartPage ; + numeric w, h, sh ; transform t ; path p ; pair c ; + w := PaperWidth ; + h := PaperHeight ; + p := Page ; + fill p withcolor .4white ; + sh := define_linear_shade(llcorner p, urcorner p, .6yellow, .6blue) ; + c := center boundingbox outlinetext.p("\ss\bf INTERACTIVITY") ; + set_grid(w, h, w/8, h/8) ; + forever : + if new_on_grid(uniformdeviate w,uniformdeviate h) : + t := identity + scaled (3+uniformdeviate 3) + shifted ((dx,dy) - c) + ; + draw outlinetext.b + ("\bf\ss INTERACTIVITY") + (transformed t withshade sh) + (transformed t withpen pencircle scaled 3 withcolor .6white) + ; + fi ; + exitif grid_full ; + endfor ; + draw anchored.lrt ( + textext("\bf\ss \ConTeXt") xsized .6PaperWidth, + lrcorner p shifted (-10mm,40mm) + ) withcolor white ; + draw anchored.lrt ( + textext("\bf\ss Hans Hagen") xsized .6PaperWidth, + lrcorner p shifted (-10mm,15mm) + ) withcolor white ; + StopPage; + +\stopMPpage + +\page[empty] +\page[right] + +\stopcomponent diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-transitions.tex b/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-transitions.tex new file mode 100644 index 00000000000..0cde2804076 --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-transitions.tex @@ -0,0 +1,64 @@ +% language=uk + +\environment interaction-style + +\startcomponent interaction-transitions + +\startchapter[title={Page transitions}] + +I'm not sure if this feature is still used but in the early days of \PDF\ support +in \TEX, users loved it. I never really used it myself. Page transitions only +make sense in presentations, and unfortunately the ones provided by the Acrobat +viewers are just ugly. Anyhow, one automatically gets them by saying: + +\starttyping +\setuppagetransitions[random] +\stoptyping + +This way one gets random transitions. Resetting transitions is done by: + +\starttyping +\setuppagetransitions[reset] +\stoptyping + +If needed one can specify transitions but I strongly advice against this, because +these commands are very viewer dependant, therefore: if in despair, use numbers! +By default, the next set is used, and one can access them by number, + +\starttabulate[|c|l|] +\HL +\NC \bf number \NC \bf transition effects \NC\NR +\HL +\NC 1\quad 2 \NC \type{{split,in,vertical}} + \type{{split,in,horizontal}} \NC\NR +\NC 3\quad 4 \NC \type{{split,out,vertical}} + \type{{split,out,horizontal}} \NC\NR +\NC 5\quad 6 \NC \type{{blinds,horizontal}} + \type{{blinds,vertical}} \NC\NR +\NC 7\quad 8 \NC \type{{box,in}} + \type{{box,out}} \NC\NR +\NC 9\quad10\quad11\quad12 \NC \type{{wipe,east}} + \type{{wipe,west}} + \type{{wipe,north}} + \type{{wipe,south}} \NC\NR +\NC 13 \NC \type{dissolve} \NC\NR +\NC 14\quad15 \NC \type{{glitter,east}} + \type{{glitter,south}} \NC\NR +\HL +\stoptabulate + +The next settings are all valid: + +\starttyping +\setuppagetransitions +\setuppagetransitions[1] +\setuppagetransitions[3,5,8,random] +\stoptyping + +Valid setups are: + +\setup{setuppagetransitions} + +\stopchapter + +\stopcomponent diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-widgets.tex b/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-widgets.tex new file mode 100644 index 00000000000..e4fa136c9c5 --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction-widgets.tex @@ -0,0 +1,977 @@ +\environment interaction-style + +\startcomponent interaction-widgets + +\startchapter[title={Widgets}] + + {\em This chapter will discuss forms and special buttons.} + +\stopchapter + +\stopcomponent + +% Below comes from the old widgets manual: + +\def\fillinfield#1{} + +% \useJSscripts[fld] +% \setupinteraction[state=start,closeaction=ForgetAll] + +\section{Fill||in fields} + +Fields come in many disguises. Currently \CONTEXT\ supports +the field types provided by \PDF, which in turn are derived +from \HTML. Being a static format and not a programming +language, \PDF\ only provides the interface. Entering data +is up to the viewer and validation to the built in +\JAVASCRIPT\ interpreter. The next paragraph shows an +application. + +\startbuffer +A few years back, \TEX\ could only produce \fillinfield [dvi] +{\DVI} output, but nowadays, thanks to \fillinfield {Han The +Thanh}, we can also directly produce \fillinfield [pdf] {\PDF}! +Nice eh? Actually, while the first field module was prototyped +in \ACROBAT, the current implementation was debugged in +\fillinfield [pdfTeX] {\PDFTEX}. Field support in \fillinfield +[ConTeXt] {\CONTEXT} is rather advanced and complete and all +kind of fields are supported. One can hook in appearances, and +validation \fillinfield [JavaScripts] {\JAVASCRIPT}'s. Fields +can be cloned and copied, where the latter saves some space. By +using \fillinfield {objects} when suited, this module saves +space anyway. +\stopbuffer + +\getbuffer + +This paragraph is entered in the source file as: + +\typebuffer + +I leave it to the imagination of the user how +\type{\fillinfield} is implemented, but trust me, the +definition is rather simple and is based on the macros +mentioned below. + +Because I envision documents with many thousands of fields, +think for instance of tutorials, I rather early decided to +split the definition from the setup. Due to the fact that +while typesetting a field upto three independant instances +of \type{\framed} are called, we would end up with about +150~hash entries per field, while in the current +implementation we only need a few. Each field can inherit +its specific settings from the setup group it belongs to. + +Let's start with an example of a {\em radio} field. In fact +this is a collection of fields. Such a field is defined +with: + +\startbuffer +\definefield + [Logos] [radio] [LogoSetup] + [ConTeXt,PPCHTEX,TeXUtil] [PPCHTEX] +\stopbuffer + +\typebuffer + +\getbuffer + +Here the fourth argument specifies the subfields and the +last argument tells which one to use as default. We have to +define the subfields separately: + +\startbuffer +\definesubfield [ConTeXt] [] [ConTeXtLogo] +\definesubfield [PPCHTEX] [] [PPCHTEXLogo] +\definesubfield [TeXUtil] [] [TeXUtilLogo] +\stopbuffer + +\typebuffer + +\getbuffer + +The second argument specifies the setup. In this example +the setup (\type {LogoSetup}) is inherited from the main +field. The third arguments tells \CONTEXT\ how the fields +look like when turned on. These appearances are to be +defined as symbols: + +\startbuffer +\definesymbol [ConTeXtLogo] [{\externalfigure[mp-cont.502]}] +\definesymbol [PPCHTEXLogo] [{\externalfigure[mp-cont.503]}] +\definesymbol [TeXUtilLogo] [{\externalfigure[mp-cont.504]}] +\stopbuffer + +\typebuffer + +\getbuffer + +Before we typeset the fields, we specify some settings to use: + +\startbuffer +\setupfield [LogoSetup] + [width=4cm, + height=4cm, + frame=off, + background=screen] +\stopbuffer + +\typebuffer + +\getbuffer + +Finally we can typeset the fields: + +\startbuffer +\hbox to \hsize + {\hss\field[ConTeXt]\hss\field[PPCHTEX]\hss\field[TeXUtil]\hss} +\stopbuffer + +\typebuffer + +This shows up as: + +\startbaselinecorrection +\getbuffer +\stopbaselinecorrection + +An important characteristic of field is cloning cq.\ +copying, as demonstrated below: + +% \tracefieldstrue + +\startbuffer[symbol] +\definesymbol [yes-a] [$\times$] +\definesymbol [yes-b] [$\star$] +\definesymbol [nop-a] [$\bullet$] +\definesymbol [nop-b] [$-$] +\stopbuffer + +\getbuffer[symbol] + +\startbuffer[define] +\definefield [example-1] [radio] [setup 1] [ex-a,ex-b,ex-c] [ex-c] +\definesubfield [ex-a,ex-b,ex-c] [setup 1] [yes-a,nop-a] +\stopbuffer + +\getbuffer[define] + +\startbuffer[clone] +\clonefield [ex-a] [ex-p] [setup 2] [yes-b,nop-b] +\clonefield [ex-b] [ex-q] [setup 2] [yes-b,nop-b] +\clonefield [ex-c] [ex-r] [setup 2] [yes-b,nop-b] +\stopbuffer + +\getbuffer[clone] + +\startbuffer[copy] +\copyfield [ex-a] [ex-x] +\copyfield [ex-b] [ex-y] +\copyfield [ex-c] [ex-z] +\stopbuffer + +\getbuffer[copy] + +\startbuffer[setup] +\setupfield [setup 1] [width=1cm,height=1cm,framecolor=red] +\setupfield [setup 2] [width=.75cm,height=.75cm] +\stopbuffer + +\getbuffer[setup] + +\startbuffer[field] +\hbox to \hsize + {\field[ex-a]\hfil\field[ex-b]\hfil\field[ex-c]\hfil\hfil + \field[ex-p]\hfil\field[ex-q]\hfil\field[ex-r]\hfil\hfil + \field[ex-x]\hfil\field[ex-y]\hfil\field[ex-z]} +\stopbuffer + +\startbaselinecorrection \getbuffer[field] \stopbaselinecorrection + +The next table shows the relations between these fields of type radio: + +\startbaselinecorrection +\showfields +\stopbaselinecorrection + +This table is generated by \type {\showfields} and can be +used to check the relations between fields, but only when +we have set \type {\tracefieldstrue}. Radio fields have the +most complicated relationships of fields, due to the fact +that only one of them can be activated (on). By saying +\type {\logfields} one can write the current field +descriptions to the file \type {fields.log}. + +Here we used some \TEX\ mathematical symbols. These are +functional but sort of dull, so later we will define a more +suitable visualization. + +\typebuffer[symbol] + +The parent fields were defined by: + +\typebuffer[define] + +and the clones, which can have their own appearance, by: + +\typebuffer[clone] + +The copies are defined using: + +\typebuffer[copy] + +using the setups + +\typebuffer[setup] + +Finally all these fields are called using \type {\field}: + +\typebuffer[field] + +Now we will define a so called {\em check} field. This +field looks like a radio field but is independant of +others. First we define some suitable symbols: + +\startbuffer +\definesymbol [yes] [{\externalfigure[mp-cont.502]}] +\definesymbol [no] [] +\stopbuffer + +\getbuffer + +\typebuffer + +A check field is defined as: + +\startbuffer +\definefield [check-me] [check] [setup 3] [yes,no] [no] +\stopbuffer + +\getbuffer + +\typebuffer + +This time we say \type{\field[check-me]} and get: + +\startbuffer +\setupfield + [setup 3] + [width=2cm, height=2cm, + rulethickness=3pt, corner=round, framecolor=red] +\stopbuffer + +\getbuffer + +\startlinecorrection +\hbox to \hsize{\hss\field[check-me]\hss} +\stoplinecorrection + +As setup we used: + +\typebuffer + +We already saw an example of a {\em line} field. By default +such a line field looks like: + +\startbuffer[define] +\definefield [Email] [line] [ShortLine] [] [pragma@wxs.nl] +\stopbuffer + +\getbuffer[define] + +\startbuffer[field] +\field [Email] [your email] +\stopbuffer + +\startbaselinecorrection \getbuffer[field] \stopbaselinecorrection + +We defined this field as: + +\typebuffer[define] + +and called it using a second, optional, argument: + +\typebuffer[field] + +As shown, we can influence the way such a field is typeset. +It makes for instance sense to use a monospaced typeface +and limit the height. When we set up a field, apart from +the setup class we pass some general characteristics, and +three more detailed definitions, concerning the +surrounding, the label and the field itself. + +\startbuffer +\setupfield + [ShortLine] + [label,frame,horizontal] + [offset=4pt,height=fit,framecolor=green, + background=screen,backgroundscreen=.80] + [height=18pt,width=80pt,align=middle, + background=screen,backgroundscreen=.90,frame=off] + [height=18pt,width=80pt,color=red,align=right,style=type, + background=screen,backgroundscreen=.90,frame=off] +\stopbuffer + +\typebuffer + +So now we get: + +\getbuffer + +\startbuffer[mainmail] +\definemainfield [MainMail] [line] [ShortLine] [] [pragma@wxs.nl] +\stopbuffer + +\getbuffer[mainmail] + +\startlinecorrection +\field [MainMail] [your email] +\stoplinecorrection + +Such rather long definitions can be more sparse when we set +up all fields at once, like: + +\startbuffer +\setupfields + [label,frame,horizontal] + [offset=4pt,height=fit,framecolor=green, + background=screen,backgroundscreen=.80] + [height=18pt,width=80pt, + background=screen,backgroundscreen=.90,frame=off] + [height=18pt,width=80pt,color=red,align=middle, + background=screen,backgroundscreen=.90,frame=off] +\stopbuffer + +\typebuffer + +So given that we have defined field \type {MainMail} we can +say: + +\startbuffer[MP] +\startuniqueMPgraphic{button} + path p ; p := fullcircle xyscaled (OverlayWidth,OverlayHeight) ; + fill p withcolor (.8,.8,.8) ; + draw p withcolor OverlayColor withpen pencircle scaled 3 ; +\stopuniqueMPgraphic + +\defineoverlay [normalbutton] [\uniqueMPgraphic{button}] +\stopbuffer + +\getbuffer[MP] + +\startbuffer +\setupfield [LeftLine] + [background=normalbutton, backgroundcolor=darkgreen, + offset=2ex, height=7ex, width=.25\hsize, + style=type, frame=off, align=left] +\setupfield [MiddleLine] + [background=normalbutton, backgroundcolor=darkgreen, + offset=2ex, height=7ex, width=.25\hsize, + style=type, frame=off, align=middle] +\setupfield [RightLine] + [background=normalbutton, backgroundcolor=darkgreen, + offset=2ex, height=7ex, width=.25\hsize, + style=type, frame=off, align=right] + +\clonefield [MainMail] [LeftMail] [LeftLine] +\clonefield [MainMail] [MiddleMail] [MiddleLine] +\clonefield [MainMail] [RightMail] [RightLine] +\stopbuffer + +\typebuffer + +\getbuffer + +We get get three connected fields: + +\startlinecorrection +\hbox to \hsize + {\field[LeftMail]\hss\field[MiddleMail]\hss\field[RightMail]} +\stoplinecorrection + +(Keep in mind that in \CONTEXT\ left aligned comes down to +using \type {\raggedleft}, which can be confusing, but +history cannot be replayed.) + +By the way, this shape was generated by \METAPOST\ using +the overlay mechanism: + +\typebuffer[MP] + +Due to the fact that a field can have several modes (loner, +parent, clone or copy), one cannot define a clone or copy +when the parent field is already typeset. When one knows in +advance that there will be clones or copies, one should +use: + +\typebuffer[mainmail] + +Now we can define copies, clones and even fields with the +same name, also when the original already is typeset. Use +\type {\showfields} to check the status of fields. When in +this table the mode is typeset slanted, the field is not +yet typeset. + +The values set up with \type {\setupfield} are inherited by +all following setup commands. One can reset these default +values by: + +\startbuffer +\setupfields[reset] +\stopbuffer + +\typebuffer + +\getbuffer + +When we want more than one line, we use a {\em text} field. +Like the previous fields, text must be entered in the +viewer specific encoding, in our case, \PDF\ document +encoding. To free users from thinking of encoding, +\CONTEXT\ provides a way to force at least the accented +glyphs into a text field in a for \TEX\ users familiar way: + +\setupfields + [horizontal] + [offset=4pt,height=fit,framecolor=green, + background=screen,backgroundscreen=.80] + [height=18pt,width=80pt, + background=screen,backgroundscreen=.90,frame=off] + [height=18pt,width=80pt,color=red,align=middle, + background=screen,backgroundscreen=.90,frame=off] + +\definefield + [SomeField] [text] [TextSetup] + [hi there, try \string\\ \string " e or + \string\\ \string~ n to get \"e or \~n] + +\setupfield + [TextSetup] + [label,frame,horizontal][framecolor=green][] + [height=80pt,width=240pt,style=\sl,align=middle, + enterregion=JS(Initialize_TeX_Key), + afterkey=JS(Convert_TeX_Key), + validate=JS(Convert_TeX_String)] + +\startbaselinecorrection +\field[SomeField][Just Some Text] +\stopbaselinecorrection + +Now, how is this done? Defining the field is not that hard: + +\starttyping +\definefield [SomeField] [text] [TextSetup] [default text] +\stoptyping + +The conversion is taken care of by a \JAVASCRIPT's. We can assign such +scripts to mouse and keyboard events, like in: + +\starttyping +\setupfield + [TextSetup][...][...][...] + [...., + regionin=JS(Initialize_TeX_Key), + afterkey=JS(Convert_TeX_Key), + validate=JS(Convert_TeX_String)] +\stoptyping + +The main reason for using the \type{JS(...)} method here is +that this permits future extensions and looks familiar at +the same time. Depending on the assignments, one can +convert after each keypress and|/|or after all text is +entered. + +\startbuffer +\definefield + [Ugly] [choice] [UglySetup] + [ugly,awful,bad] [ugly] +\setupfield + [UglySetup] + [width=6em, + height=1.2\lineheight, + location=low] +\stopbuffer + +\getbuffer + +We've arrived at another class of fields: {\em choice}, +{\em pop||up} and {\em combo} fields. All those are menu +based (and \vbox{\field[Ugly]}). This in||line menu was +defined as: + +\typebuffer + +\startbuffer +\definefield + [Ugly2] [popup] [UglySetup] + [ugly,awful,bad] [ugly] +\definefield + [Ugly3] [combo] [UglySetup] + [ugly,{AWFUL=>awful},bad] [ugly] +\stopbuffer + +\getbuffer + +Pop||up fields look like: \vbox{\field[Ugly2]} and combo +fields permit the user to enter his or her own option: +\vbox{\field[Ugly3]}. The amount of typographic control +over these three type of fields is minimal, but one can +specify what string to show and what string results: + +\typebuffer + +Here \type{AWFUL} is shown and when selected becomes the +choice \type{awful}. Just in case one wonders why we use +\type{=>}, well, it just looks better and the direction +shows what value will be output. + +\startbuffer[uglies] +\definefieldset [AllUglies] [Ugly, Ugly2, Ugly3] +\stopbuffer + +\getbuffer[uglies] + +\startbuffer +One can for instance \goto {reset the form} [ResetForm] or +\goto {part of the form} [ResetForm{AllUglies}]. This last +sentence was typed in as: +\stopbuffer + +A special case of the check type field is a pure {\em push} +field. Such a field has no export value and has only use as +a pure interactive element. For the moment, let's forget +about that one. + +Before we demonstrate our last type of fields and show some +more tricky things, we need to discuss what to do with the +information provided by filling in the fields. There are +several actions available related to fields. + +\getbuffer + +\typebuffer + +Hereby \type {AllUglies} is a set of fields to be defined +on forehand, using + +\typebuffer[uglies] + +In a similar way one can \goto {submit some or all fields} +[SubmitForm] using the \type {SubmitForm} directive. This +action optionally can take two arguments, the first being +the destination, the second a list of fields to submit, for +instance: + +\starttyping +\button{submit}[SubmitForm{mailto::pragma@wxs.nl,AllUglies}] +\stoptyping + +Once the fields are submitted (or saved in a file), we can +convert the resulting \FDF\ file into something \TEX\ with +the perl program \type{fdf2tex}. One can use \type +{\ShowFDFFields{filename}} to typeset the values. If you do +not want to run the \PERL\ converter from within \TEX, say +\type {\runFDFconverterfalse}. In that case, the (stil) +less robust \TEX\ based converter will be used. + +I already demonstrated how to attach scripts to events, but +how about changing the appearance of the button itself? +Consider the next definitions: + +\startbuffer +\definesymbol [my-y] [$\times$] +\definesymbol [my-r] [?] +\definesymbol [my-d] [!] + +\definefield + [my-check] [check] [my-setup] + [{my-y,my-r,my-d},{,my-r,my-d}] +\stopbuffer + +\typebuffer + +\getbuffer + +Here we omitted the default value, which always is {\em no} +by default. The setup can look like this: + +\startbuffer +\setupfield + [my-setup] + [width=1.5cm, height=1.5cm, + frame=on, framecolor=red, rulethickness=1pt, + backgroundoffset=2pt, background=screen, backgroundscreen=.85] +\stopbuffer + +\typebuffer + +\getbuffer + +Now when this field shows up, watch what happens when the +mouse enters the region and what when we click. + +\startbaselinecorrection +\hbox to \hsize{\hss\field[my-check]\hss} +\stopbaselinecorrection + +So, when instead of something \type{[yes,no]} we give +triplets, the second element of such a triplet declares the +roll||over appearance and the third one the push||down +appearance. The braces are needed! + +One application of appearances is to provide help or +additional information. Consider the next definition: + +\startbuffer +\definefield [Help] [check] [HelpSetup] [helpinfo] [helpinfo] +\stopbuffer + +\typebuffer + +\getbuffer + +This means as much as: define a check field, typeset this +field using the help specific setup and let \type{helpinfo} +be the on||value as well as the default. Here we use the +next setup: + +\startbuffer +\setupfields + [reset] +\setupfield + [HelpSetup] + [width=fit,height=fit,frame=off,option={readonly,hidden}] +\stopbuffer + +\typebuffer + +\getbuffer + +We didn't use options before, but here we have to make sure +that users don't change the content of the field and by +default we don't want to show this field at all. The actual +text is defined as a symbol: + +\startbuffer +\definesymbol [helpinfo] [\SomeHelpText] + +\def\SomeHelpText% + {\framed + [width=\leftmarginwidth,height=fit,align=middle,style=small, + frame=on,background=color,backgroundcolor=white,framecolor=red] + {Click on the hide button to remove this screen}} +\stopbuffer + +\typebuffer + +\getbuffer + +\startbuffer +\inmargin {\fitfield[Help]} Now we can put the button somewhere and +turn the help on or off by saying \goto {Hide Help} [HideField{Help}] +or \goto {Show Help} [ShowField{Help}]. Although it's better to put +these commands in a dedicated part of the screen. And try \goto +{Help} [JS(Toggle_Hide{Help})]. +\stopbuffer + +\getbuffer + +We can place a field anywhere on the page, for instance by +using the \type {\setup...texts} commands. Here we simply +said: + +\typebuffer + +When one uses for instance \type {\setup...texts}, one +often wants the help text to show up on every next page. +This can be accomplished by saying: + +\starttyping +\definemainfield [Help] [check] [HelpSetup] [helpinfo] [helpinfo] +\stoptyping + +Every time such a field is called again, a new copy is +generated automatically. Because fields use the +objectreference mechanism and because such copies need to +be known to their parent, field inclusion is a multi||pass +typesetting job (upto 4 passes can be needed!). + +When possible, appearances are shared between fields, +mainly because this saves space, but at the cost of extra +object references. This feature is not that important for +straight forward forms, but has some advantages when +composing more complicated (educational) documents. + +Let us now summarize the commands we have available for +defining and typesetting fields. The main definition macro +is: + +\setup{definefield} + +and for radiofields we need to define the components by: + +\setup{definesubfield} + +Fields can be cloned and copied, where the latter can not +be set up independently. + +\setup{clonefield} + +\setup{copyfield} + +Fields can be grouped, and such a group can have its own +settings. Apart from copied fields, we can define the +layout of a field and set options using: + +\setup{setupfield} + +Such a group inherits its settings from the general setup +command: + +\setup{setupfields} + +Fields are placed using one of: + +\setup{field} + +or + +\setup{fitfield} + +Some pages back I showed an example of: + +\setup{fillinfield} + +Finally there are two commands to trace fields. These +commands only make sense when one already has said: \type +{\tracefieldstrue}. + +\setup{showfields} + +\setup{logfields} + +\section{Tooltips} + +\startbuffer +Chinese people seem to have no problems in recognizing their many +different pictorial glyphs. \tooltip [left] {Western} {European +and American} people however seem to have problems in understanding +what all those \tooltip [middle] {icons} {small graphics} on their +computer screens represent. But, instead of standardizing on a set +of icons, computer programmers tend to fill the screen with so +called tooltips. Well, \tooltip {\CONTEXT} {a \TEX\ macro package} +can do tooltips too, and although a good design can do without them, +\TEX\ at least can typeset them correctly. +\stopbuffer + +\getbuffer + +The previous paragraph has three of such tooltips under +{\em western}, {\em icons} and {\em \CONTEXT}, each aligned +differently. We just typed: + +\typebuffer + +This is an official command, and thereby we can show its +definition: + +\setup{tooltip} + +\section{Fieldstacks} + +In due time I will provide more dedicated field commands. +Currently apart from \type {\fillinfield} and \type +{\tooltip} we have \type {\fieldstack}. Let's spend a few +words on those now. + +\startbuffer[somemap1] +\useexternalfigure [map -- -- --] [euro-10] [width=.3\hsize] +\useexternalfigure [map nl -- --] [euro-11] [map -- -- --] +\useexternalfigure [map nl de --] [euro-12] [map -- -- --] +\useexternalfigure [map nl de en] [euro-13] [map -- -- --] + +\definesymbol [map -- -- --] [{\externalfigure[map -- -- --]}] +\definesymbol [map nl -- --] [{\externalfigure[map nl -- --]}] +\definesymbol [map nl de --] [{\externalfigure[map nl de --]}] +\definesymbol [map nl de en] [{\externalfigure[map nl de en]}] + +\stopbuffer + +\startbuffer[somemap2] +\definefieldstack + [somemap] + [map -- -- --, map nl -- --, map nl de --, map nl de en] + [frame=on] +\stopbuffer + +\startbuffer[somemap3] +\placefigure + [left][fig:somemap] + {Do you want to see what interfaces + are available? Just click \goto + {here} [JS(Walk_Field{somemap})] + a few times!} + {\fieldstack[somemap]} +\stopbuffer + +\getbuffer[somemap1,somemap2,somemap3] + +One can abuse field for educational purposes. Take for +instance \in{figure}[fig:somemap]. In this figure we can +sort of walk over different alternatives of the same +graphic. This illustration was typeset by saying: + +\typebuffer[somemap3] + +However, before we can ask for such a map, we need to +define a field set, which in fact is a list of symbols to +show. This list is defined using: + +\typebuffer[somemap2] + +which in turn is preceded by: + +\typebuffer[somemap1] + +\startbuffer +\placefigure + [here][fig:anothermap] + {Choose \goto {one} [JS(Set_Field{anothermap,2})] country, + \goto {two} [JS(Set_Field{anothermap,3})] countries, + \goto {three} [JS(Set_Field{anothermap,4})] countries or + \goto {no} [JS(Set_Field{anothermap,1})] countries at all.} + {\fieldstack + [anothermap] + [map -- -- --, map nl -- --, map nl de --, map nl de en] + [frame=on]} +\stopbuffer + +\getbuffer + +A slightly different illustration is shown in +\in{figure}[fig:anothermap]. Here we use the same symbols +but instead say: + +\typebuffer + +As one can see, we can can skip the definition and pass it +directly, but I wouldn't call that beautiful. + +The formal definitions are: + +\setup{definefieldstack} + +\setup{fieldstack} + +Instead if stacking fields, you can of course also put them +alongside. This makes sense when you want to use dedicated +(visible) captions for each image. + +\startbuffer +\useexternalfigure [europe] [euro-10] [width=.3\hsize] +\useexternalfigure [holland] [euro-nl] [europe] +\useexternalfigure [germany] [euro-de] [europe] +\useexternalfigure [england] [euro-en] [europe] + +\definesymbol [europe] [{\externalfigure[europe]}] +\definesymbol [holland] [{\externalfigure[holland]}] +\definesymbol [germany] [{\externalfigure[germany]}] +\definesymbol [england] [{\externalfigure[england]}] + +\definefield + [interface] [radio] [map] + [england,germany,holland] [holland] + +\definesubfield [holland] [] [holland,europe] +\definesubfield [germany] [] [germany,europe] +\definesubfield [england] [] [england,europe] + +\setupfield[map][frame=off] +\stopbuffer + +\typebuffer + +\getbuffer + +We can for instance typeset the fields by saying: + +\startbuffer +\startcombination[3] + {\fitfield[holland]} {Dutch Interface} + {\fitfield[germany]} {German Interface} + {\fitfield[england]} {English Interface} +\stopcombination +\stopbuffer + +\typebuffer + +\startbaselinecorrection +\getbuffer +\stopbaselinecorrection + + + +%D \starttyping +%D \defineviewerlayer[test] +%D +%D \startviewerlayer[test]Hide Me\stopviewerlayer +%D +%D \defineoverlay +%D [WithTest] +%D [{\overlayrollbutton[HideLayer{test}][VideLayer{test}]}] +%D +%D \framed[background=WithTest]{toggle} +%D \stoptyping + + + +% \setupinteraction[state=start] +% +% \definepushbutton [reset] +% +% \startuniqueMPgraphic{whatever}{color} +% fill fullcircle xysized (OverlayWidth,OverlayHeight) withcolor \MPvar{color} ; +% \stopuniqueMPgraphic +% +% \definepushsymbol [reset] [n] [\uniqueMPgraphic{whatever}{color=red}] +% \definepushsymbol [reset] [r] [\uniqueMPgraphic{whatever}{color=green}] +% \definepushsymbol [reset] [d] [\uniqueMPgraphic{whatever}{color=blue}] +% +% \starttext +% \startTEXpage +% \pushbutton [reset] [page(2)] +% \stopTEXpage +% \startTEXpage +% \pushbutton [reset] [page(1)] +% \stopTEXpage +% \stoptext + + +% \setupinteraction[state=start] +% +% \definepushbutton [reset] +% +% \startuniqueMPgraphic{whatever}{color} +% fill fullcircle xysized (OverlayWidth,OverlayHeight) withcolor \MPvar{color} ; +% \stopuniqueMPgraphic +% +% \definepushsymbol [reset] [n] [\uniqueMPgraphic{whatever}{color=red}] +% \definepushsymbol [reset] [r] [\uniqueMPgraphic{whatever}{color=green}] +% \definepushsymbol [reset] [d] [\uniqueMPgraphic{whatever}{color=blue}] +% +% \starttext +% \startTEXpage +% \pushbutton [reset] [page(2)] +% \stopTEXpage +% \startTEXpage +% \pushbutton [reset] [page(1)] +% \stopTEXpage +% \stoptext + +% \appendtoks +% \let\startrob\scrn_menu_rob_start +% \let\stoprob \relax +% \let\rob \scrn_menu_rob_direct +% \to \everysetmenucommands + +% \protect \endinput diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction.tex b/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction.tex new file mode 100644 index 00000000000..077e4abd8fb --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/interaction/interaction.tex @@ -0,0 +1,34 @@ +\environment interaction-style + +\setuplayout[bottom=1cm,bottomdistance=1cm] +\setupbottomtexts[\infofont work in progress] + +\startdocument + + \component interaction-titlepage + + \startfrontmatter + \component interaction-contents + \component interaction-introduction + \stopfrontmatter + + \startbodymatter + \component interaction-annotations + \component interaction-enabling + \component interaction-actions + \component interaction-hyperlinks + \component interaction-structure + \component interaction-comments + \component interaction-attachments + \component interaction-bookmarks + \component interaction-javascript + \component interaction-buttons + \component interaction-menus + \component interaction-progress + \component interaction-widgets + \component interaction-transitions + \component interaction-importing + \component interaction-tagging + \stopbodymatter + +\stopdocument diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/languages/languages-numbering.tex b/Master/texmf-dist/doc/context/sources/general/manuals/languages/languages-numbering.tex index 3464826df6b..9752b62710a 100644 --- a/Master/texmf-dist/doc/context/sources/general/manuals/languages/languages-numbering.tex +++ b/Master/texmf-dist/doc/context/sources/general/manuals/languages/languages-numbering.tex @@ -296,6 +296,10 @@ can also as a specific one, so {\em jalali} \date [y=1395, m=4, d=18] \typebuffer \startnarrower \getbuffer \stopnarrower +For time we have \type {\currenttime} and here the specification is just an \type +{h}, \type {m} and whatever connects them. Both date and time are +pre|-|configured in the language definition file \type {lang-def}. + \stopsection % \startsection[title=Counters] diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-contents.tex b/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-contents.tex index 6d06b3ef082..2582a81c72b 100644 --- a/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-contents.tex +++ b/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-contents.tex @@ -1,5 +1,4 @@ \environment luatex-style -\environment luatex-logos \startcomponent luatex-contents diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-enhancements.tex b/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-enhancements.tex index e0119bf7e98..b81e1dbdae5 100644 --- a/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-enhancements.tex +++ b/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-enhancements.tex @@ -1,22 +1,23 @@ % language=uk \environment luatex-style -\environment luatex-logos \startcomponent luatex-enhancements \startchapter[reference=enhancements,title={Basic \TEX\ enhancements}] -\section{Introduction} +\startsection[title={Introduction}] + +\startsubsection[title={Primitive behaviour}] From day one, \LUATEX\ has offered extra features compared to the superset of -\PDFTEX\ and \ALEPH. This has not been limited to the possibility to execute -\LUA\ code via \type {\directlua}, but \LUATEX\ also adds functionality via new -\TEX|-|side primitives or extensions to existing ones. +\PDFTEX, which includes \ETEX, and \ALEPH. This has not been limited to the +possibility to execute \LUA\ code via \prm {directlua}, but \LUATEX\ also adds +functionality via new \TEX|-|side primitives or extensions to existing ones. When \LUATEX\ starts up in \quote {iniluatex} mode (\type {luatex -ini}), it defines only the primitive commands known by \TEX82 and the one extra command -\type {\directlua}. As is fitting, a \LUA\ function has to be called to add the +\prm {directlua}. As is fitting, a \LUA\ function has to be called to add the extra primitives to the user environment. The simplest method to get access to all of the new primitive commands is by adding this line to the format generation file: @@ -25,7 +26,7 @@ file: \directlua { tex.enableprimitives('',tex.extraprimitives()) } \stoptyping -But be aware that the curly braces may not have the proper \type {\catcode} +But be aware that the curly braces may not have the proper \prm {catcode} assigned to them at this early time (giving a \quote {Missing number} error), so it may be needed to put these assignments before the above line: @@ -36,44 +37,62 @@ it may be needed to put these assignments before the above line: More fine|-|grained primitives control is possible and you can look up the details in \in {section} [luaprimitives]. For simplicity's sake, this manual -assumes that you have executed the \type {\directlua} command as given above. +assumes that you have executed the \prm {directlua} command as given above. The startup behaviour documented above is considered stable in the sense that there will not be backward|-|incompatible changes any more. We have promoted some -rather generic \PDFTEX\ primitives to core \LUATEX\ ones, and the ones inherited -frome \ALEPH\ (\OMEGA) are also promoted. Effectively this means that we now only -have the \type {tex}, \type {etex} and \type {luatex} sets left. +rather generic \PDFTEX\ primitives to core \LUATEX\ ones, and the few that we +inherited from \ALEPH\ (\OMEGA) are also promoted. Effectively this means that we +now only have the \type {tex}, \type {etex} and \type {luatex} sets left. In \in {Chapter} [modifications] we discuss several primitives that are derived from \PDFTEX\ and \ALEPH\ (\OMEGA). Here we stick to real new ones. In the chapters on fonts and math we discuss a few more new ones. -\section{Version information} +\stopsubsection + +\startsubsection[title={Version information}] -\subsection {\type {\luatexbanner}, \type {\luatexversion} and \type {\luatexrevision}} +\startsubsubsection[title={\lpr {luatexbanner}, \lpr {luatexversion} and \lpr {luatexrevision}}] + +\topicindex{version} +\topicindex{banner} There are three new primitives to test the version of \LUATEX: -\starttabulate[|l|pl|pl|] -\BC primitive \BC explanation \BC value \NC \NR -\NC \type {\luatexbanner} \NC the banner reported on the command line \NC \luatexbanner \NC \NR -\NC \type {\luatexversion} \NC a combination of major and minor number \NC \the\luatexversion \NC \NR -\NC \type {\luatexrevision} \NC the revision number, the current value is \NC \luatexrevision \NC \NR +\unexpanded\def\VersionHack#1% otherwise different luatex and luajittex runs + {\ctxlua{% + local banner = "\luatexbanner" + local banner = string.match(banner,"(.+)\letterpercent(") or banner + context(string.gsub(banner ,"jit",""))% + }} + +\starttabulate[|l|l|pl|] +\DB primitive \BC value + \BC explanation \NC \NR +\TB +\NC \lpr {luatexbanner} \NC \VersionHack{\luatexbanner} + \NC the banner reported on the command line \NC \NR +\NC \lpr {luatexversion} \NC \the\luatexversion + \NC a combination of major and minor number \NC \NR +\NC \lpr {luatexrevision} \NC \luatexrevision + \NC the revision number, the current value is \NC \NR +\LL \stoptabulate The official \LUATEX\ version is defined as follows: \startitemize \startitem - The major version is the integer result of \type {\luatexversion} divided by + The major version is the integer result of \lpr {luatexversion} divided by 100. The primitive is an \quote {internal variable}, so you may need to prefix - its use with \type {\the} depending on the context. + its use with \prm {the} depending on the context. \stopitem \startitem - The minor version is the two-digit result of \type {\luatexversion} modulo 100. + The minor version is the two|-|digit result of \lpr {luatexversion} modulo 100. \stopitem \startitem - The revision is the given by \type {\luatexrevision}. This primitive expands to + The revision is reported by \lpr {luatexrevision}. This primitive expands to a positive integer. \stopitem \startitem @@ -82,16 +101,28 @@ The official \LUATEX\ version is defined as follows: \stopitem \stopitemize -\subsection{\type {\formatname}} +\stopsubsubsection + +\startsubsubsection[title={\lpr {formatname}}] -The \type {\formatname} syntax is identical to \type {\jobname}. In \INITEX, the -expansion is empty. Otherwise, the expansion is the value that \type {\jobname} had +\topicindex{format} + +The \lpr {formatname} syntax is identical to \prm {jobname}. In \INITEX, the +expansion is empty. Otherwise, the expansion is the value that \prm {jobname} had during the \INITEX\ run that dumped the currently loaded format. You can use this token list to provide your own version info. -\section{\UNICODE\ text support} +\stopsubsubsection + +\stopsubsection + +\stopsection -\subsection {Extended ranges} +\startsection[title={\UNICODE\ text support}] + +\startsubsection[title={Extended ranges}] + +\topicindex{\UNICODE} Text input and output is now considered to be \UNICODE\ text, so input characters can use the full range of \UNICODE\ ($2^{20}+2^{16}-1 = \hbox{0x10FFFF}$). Later @@ -101,22 +132,25 @@ always converted to a suitable graphic representation of that character in a specific font. However, while processing a list of to|-|be|-|typeset nodes, its contents may still be seen as a character. Inside \LUATEX\ there is no clear separation between the two concepts. Because the subtype of a glyph node can be -changed in \LUA\ it is up to the user: subtypes larger than 255 indicate that +changed in \LUA\ it is up to the user. Subtypes larger than 255 indicate that font processing has happened. A few primitives are affected by this, all in a similar fashion: each of them has -to accommodate for a larger range of acceptable numbers. For instance, \type -{\char} now accepts values between~0 and $1{,}114{,}111$. This should not be a +to accommodate for a larger range of acceptable numbers. For instance, \prm +{char} now accepts values between~0 and $1{,}114{,}111$. This should not be a problem for well|-|behaved input files, but it could create incompatibilities for input that would have generated an error when processed by older \TEX|-|based -engines. The affected commands with an altered initial (left of the equals sign) -or secondary (right of the equals sign) value are: \type {\char}, \type -{\lccode}, \type {\uccode}, \type {\catcode}, \type {\sfcode}, \type {\efcode}, -\type {\lpcode}, \type {\rpcode}, \type {\chardef}. +engines. The affected commands with an altered initial (left of the equal sign) +or secondary (right of the equal sign) value are: \prm {char}, \prm {lccode}, +\prm {uccode}, \lpr {hjcode}, \prm {catcode}, \prm {sfcode}, \lpr {efcode}, \lpr +{lpcode}, \lpr {rpcode}, \prm {chardef}. As far as the core engine is concerned, all input and output to text files is \UTF-8 encoded. Input files can be pre|-|processed using the \type {reader} -callback. This will be explained in a later chapter. +callback. This will be explained in \in {section} [iocallback]. Normalization of +the \UNICODE\ input is on purpose not built|-|in and can be handled by a macro +package during callback processing. We have made some practical choices and the +user has to live with those. Output in byte|-|sized chunks can be achieved by using characters just outside of the valid \UNICODE\ range, starting at the value $1{,}114{,}112$ (0x110000). When @@ -129,58 +163,106 @@ are considered \quote {safe} and therefore printed as|-|is. You can disable escaping with \type {texio.setescape(false)} in which case you get the normal characters on the console. -Normalization of the \UNICODE\ input can be handled by a macro package during -callback processing (this will be explained in \in {section} [iocallback]). +\stopsubsection -\subsection{\type {\Uchar}} +\startsubsection[title={\lpr {Uchar}}] -The expandable command \type {\Uchar} reads a number between~0 and $1{,}114{,}111$ +\topicindex{\UNICODE} + +The expandable command \lpr {Uchar} reads a number between~0 and $1{,}114{,}111$ and expands to the associated \UNICODE\ character. -\section{Extended tables} +\stopsubsection + +\startsubsection[title={Extended tables}] All traditional \TEX\ and \ETEX\ registers can be 16-bit numbers. The affected commands are: \startfourcolumns -\starttyping -\count -\dimen -\skip -\muskip -\marks -\toks -\countdef -\dimendef -\skipdef -\muskipdef -\toksdef -\insert -\box -\unhbox -\unvbox -\copy -\unhcopy -\unvcopy -\wd -\ht -\dp -\setbox -\vsplit -\stoptyping +\startlines +\prm {count} +\prm {dimen} +\prm {skip} +\prm {muskip} +\prm {marks} +\prm {toks} +\prm {countdef} +\prm {dimendef} +\prm {skipdef} +\prm {muskipdef} +\prm {toksdef} +\prm {insert} +\prm {box} +\prm {unhbox} +\prm {unvbox} +\prm {copy} +\prm {unhcopy} +\prm {unvcopy} +\prm {wd} +\prm {ht} +\prm {dp} +\prm {setbox} +\prm {vsplit} +\stoplines \stopfourcolumns Because font memory management has been rewritten, character properties in fonts -are no longer shared among fonts instances that originate from the same metric -file. +are no longer shared among font instances that originate from the same metric +file. Of course we share fonts in the backend when possible so that the resulting +\PDF\ file is as efficient as possible, but for instance also expansion and +protrusion no longer use copies as in \PDFTEX. + +\stopsubsection -\section{Attributes} +\stopsection -\subsection{Attribute registers} +\startsection[title={Attributes}] + +\startsubsection[title={Nodes}] + +\topicindex {nodes} + +When \TEX\ reads input it will interpret the stream according to the properties +of the characters. Some signal a macro name and trigger expansion, others open +and close groups, trigger math mode, etc. What's left over becomes the typeset +text. Internally we get linked list of nodes. Characters become \nod {glyph} +nodes that have for instance a \type {font} and \type {char} property and \typ +{\kern 10pt} becomes a \nod {kern} node with a \type {width} property. Spaces are +alien to \TEX\ as they are turned into \nod {glue} nodes. So, a simple paragraph +is mostly a mix of sequences of \nod {glyph} nodes (words) and \nod {glue} nodes +(spaces). + +The sequences of characters at some point are extended with \nod {disc} nodes +that relate to hyphenation. After that font logic can be applied and we get a +list where some characters can be replaced, for instance multiple characters can +become one ligature, and font kerns can be injected. This is driven by the +font properties. + +Boxes (like \prm {hbox} and \prm {vbox}) become \nod {hlist} or \nod {vlist} +nodes with \type {width}, \type {height}, \type {depth} and \type {shift} +properties and a pointer \type {list} to its actual content. Boxes can be +constructed explicitly or can be the result of subprocesses. For instance, when +lines are broken into paragraphs, the lines are a linked list of \nod {hlist} +nodes. + +So, to summarize: all that you enter as content eventually becomes a node, often +as part of a (nested) list structure. They have a relative small memory footprint +and carry only the minimal amount of information needed. In traditional \TEX\ a +character node only held the font and slot number, in \LUATEX\ we also store some +language related information, the expansion factor, etc. Now that we have access +to these nodes from \LUA\ it makes sense to be able to carry more information +with an node and this is where attributes kick in. + +\stopsubsection + +\startsubsection[title={Attribute registers}] + +\topicindex {attributes} Attributes are a completely new concept in \LUATEX. Syntactically, they behave a lot like counters: attributes obey \TEX's nesting stack and can be used after -\type {\the} etc.\ just like the normal \type {\count} registers. +\prm {the} etc.\ just like the normal \prm {count} registers. \startsyntax \attribute <16-bit number> <optional equals> <32-bit number>!crlf @@ -202,13 +284,22 @@ attached to all nodes created in their scope. These can then be queried from any attributes for node list processing from \LUA\ is given in~\in {chapter}[nodes]. Attributes are stored in a sorted (sparse) linked list that are shared when -possible. This permits efficient testing and updating. +possible. This permits efficient testing and updating. You can define many +thousands of attributes but normally such a large number makes no sense and is +also not that efficient because each node carries a (possibly shared) link to a +list of currently set attributes. But they are a convenient extension and one of +the first extensions we implemented in \LUATEX. + +\stopsubsection -\subsection{Box attributes} +\startsubsection[title={Box attributes}] + +\topicindex {attributes} +\topicindex {boxes} Nodes typically receive the list of attributes that is in effect when they are created. This moment can be quite asynchronous. For example: in paragraph -building, the individual line boxes are created after the \type {\par} command has +building, the individual line boxes are created after the \prm {par} command has been processed, so they will receive the list of attributes that is in effect then, not the attributes that were in effect in, say, the first or third line of the paragraph. @@ -229,28 +320,64 @@ incompatibility is mostly due to the fact that separate specials and literals ar a more unnatural approach to colors than attributes. It is possible to fine-tune the list of attributes that are applied to a \type -{hbox}, \type {vbox} or \type {vtop} by the use of the keyword \type {attr}. An -example: +{hbox}, \type {vbox} or \type {vtop} by the use of the keyword \type {attr}. The +\type {attr} keyword(s) should come before a \type {to} or \type {spread}, if +that is also specified. An example is: -\starttyping -\attribute2=5 +\startbuffer[tex] +\attribute997=123 +\attribute998=456 \setbox0=\hbox {Hello} -\setbox2=\hbox attr1=12 attr2=-"7FFFFFFF{Hello} -\stoptyping +\setbox2=\hbox attr 999 = 789 attr 998 = -"7FFFFFFF{Hello} +\stopbuffer + +\startbuffer[lua] + for b=0,2,2 do + for a=997, 999 do + tex.sprint("box ", b, " : attr ",a," : ",tostring(tex.box[b] [a])) + tex.sprint("\\quad\\quad") + tex.sprint("list ",b, " : attr ",a," : ",tostring(tex.box[b].list[a])) + tex.sprint("\\par") + end + end +\stopbuffer + +\typebuffer[tex] + +Box 0 now has attributes 997 and 998 set while box 2 has attributes 997 and 999 +set while the nodes inside that box will all have attributes 997 and 998 set. +Assigning the maximum negative value causes an attribute to be ignored. + +To give you an idea of what this means at the \LUA\ end, take the following +code: + +\typebuffer[lua] -This will set the attribute list of box~2 to $1=12$, and the attributes of box~0 -will be $2=5$. As you can see, assigning the maximum negative value causes an -attribute to be ignored. +Later we will see that you can access properties of a node. The boxes here are so +called \nod {hlist} nodes that have a field \type {list} that points to the +content. Because the attributes are a list themselves you can access them by +indexing the node (here we do that with \type {[a]}. Running this snippet gives: -The \type {attr} keyword(s) should come before a \type {to} or \type {spread}, if -that is also specified. +\start + \getbuffer[tex] + \startpacked \tt + \ctxluabuffer[lua] + \stoppacked +\stop -\section{\LUA\ related primitives} +Because some values are not set we need to apply the \type {tostring} function +here so that we get the word \type {nil}. -\subsection{\type {\directlua}} +\stopsubsection + +\stopsection + +\startsection[title={\LUA\ related primitives}] + +\startsubsection[title={\prm {directlua}}] In order to merge \LUA\ code with \TEX\ input, a few new primitives are needed. -The primitive \type {\directlua} is used to execute \LUA\ code immediately. The +The primitive \prm {directlua} is used to execute \LUA\ code immediately. The syntax is \startsyntax @@ -261,10 +388,10 @@ syntax is The \syntax {<general text>} is expanded fully, and then fed into the \LUA\ interpreter. After reading and expansion has been applied to the \syntax {<general text>}, the resulting token list is converted to a string as if it was -displayed using \type {\the\toks}. On the \LUA\ side, each \type {\directlua} -block is treated as a separate chunk. In such a chunk you can use the \type -{local} directive to keep your variables from interfering with those used by the -macro package. +displayed using \type {\the\toks}. On the \LUA\ side, each \prm {directlua} block +is treated as a separate chunk. In such a chunk you can use the \type {local} +directive to keep your variables from interfering with those used by the macro +package. The conversion to and from a token list means that you normally can not use \LUA\ line comments (starting with \type {--}) within the argument. As there typically @@ -281,15 +408,16 @@ say: \stoptyping Then \LUA\ line comments can be used, since \TEX\ does not replace line endings -with spaces. +with spaces. Of course such an approach depends on the macro package that you +use. -Likewise, the \syntax {<16-bit number>} designates a name of a \LUA\ chunk and is +The \syntax {<16-bit number>} designates a name of a \LUA\ chunk and is taken from the \type {lua.name} array (see the documentation of the \type {lua} table further in this manual). When a chunk name starts with a \type {@} it will be displayed as a file name. This is a side effect of the way \LUA\ implements error handling. -The \type {\directlua} command is expandable. Since it passes \LUA\ code to the +The \prm {directlua} command is expandable. Since it passes \LUA\ code to the \LUA\ interpreter its expansion from the \TEX\ viewpoint is usually empty. However, there are some \LUA\ functions that produce material to be read by \TEX, the so called print functions. The most simple use of these is \type @@ -320,10 +448,10 @@ will result in \getbuffer -Note that the expansion of \type {\directlua} is a sequence of characters, not of +Note that the expansion of \prm {directlua} is a sequence of characters, not of tokens, contrary to all \TEX\ commands. So formally speaking its expansion is null, but it places material on a pseudo-file to be immediately read by \TEX, as -\ETEX's \type {\scantokens}. For a description of print functions look at \in +\ETEX's \prm {scantokens}. For a description of print functions look at \in {section} [sec:luaprint]. Because the \syntax {<general text>} is a chunk, the normal \LUA\ error handling @@ -337,15 +465,14 @@ can break up \LUATEX\ pretty bad. If you are not careful while working with the node list interface, you may even end up with assertion errors from within the \TEX\ portion of the executable. -The behaviour documented in the above subsection is considered stable in the sense -that there will not be backward-incompatible changes any more. +\stopsubsection -\subsection{\type {\latelua}} +\startsubsection[title={\lpr {latelua} and \lpr {lateluafunction}}] -Contrary to \type {\directlua}, \type {\latelua} stores \LUA\ code in a whatsit +Contrary to \prm {directlua}, \lpr {latelua} stores \LUA\ code in a whatsit that will be processed at the time of shipping out. Its intended use is a cross -between \PDF\ literals (often available as \type {\pdfliteral}) and the -traditional \TEX\ extension \type {\write}. Within the \LUA\ code you can print +between \PDF\ literals (often available as \orm {pdfliteral}) and the +traditional \TEX\ extension \prm {write}. Within the \LUA\ code you can print \PDF\ statements directly to the \PDF\ file via \type {pdf.print}, or you can write to other output streams via \type {texio.write} or simply using \LUA\ \IO\ routines. @@ -356,12 +483,19 @@ routines. \stopsyntax Expansion of macros in the final \type {<general text>} is delayed until just -before the whatsit is executed (like in \type {\write}). With regard to \PDF\ -output stream \type {\latelua} behaves as \PDF\ page literals. The \syntax +before the whatsit is executed (like in \prm {write}). With regard to \PDF\ +output stream \lpr {latelua} behaves as \PDF\ page literals. The \syntax {name <general text>} and \syntax {<16-bit number>} behave in the same way as -they do for \type {\directlua} +they do for \prm {directlua}. + +The \lpr {lateluafunction} primitive takes a number and is similar to \lpr +{luafunction} but gets delated to shipout time. It's just there for completeness. + +\stopsubsection + +\startsubsection[title={\lpr {luaescapestring}}] -\subsection{\type {\luaescapestring}} +\topicindex {escaping} This primitive converts a \TEX\ token sequence so that it can be safely used as the contents of a \LUA\ string: embedded backslashes, double and single quotes, @@ -375,23 +509,24 @@ sequence is fully expanded. \stopsyntax Most often, this command is not actually the best way to deal with the -differences between the \TEX\ and \LUA. In very short bits of \LUA\ -code it is often not needed, and for longer stretches of \LUA\ code it -is easier to keep the code in a separate file and load it using \LUA's -\type {dofile}: +differences between \TEX\ and \LUA. In very short bits of \LUA\ code it is often +not needed, and for longer stretches of \LUA\ code it is easier to keep the code +in a separate file and load it using \LUA's \type {dofile}: \starttyping \directlua { dofile('mysetups.lua') } \stoptyping -\subsection{\type {\luafunction} and \type {\luafunctioncall}} +\stopsubsection -The \type {\directlua} commands involves tokenization of its argument (after +\startsubsection[title={\lpr {luafunction}, \lpr {luafunctioncall} and \lpr {luadef}}] + +The \prm {directlua} commands involves tokenization of its argument (after picking up an optional name or number specification). The tokenlist is then converted into a string and given to \LUA\ to turn into a function that is -called. The overhead is rather small but when you use this primitive hundreds of -thousands of times, it can become noticeable. For this reason there is a variant -call available: \type {\luafunction}. This command is used as follows: +called. The overhead is rather small but when you have millions of calls it can +have some impact. For this reason there is a variant call available: \lpr +{luafunction}. This command is used as follows: \starttyping \directlua { @@ -417,73 +552,117 @@ in the following example the number \type {8} gets typeset. } \stoptyping -The \type {\luafunctioncall} primitive does the same but is unexpandable, for instance -in an \type {\edef}. +The \lpr {luafunctioncall} primitive does the same but is unexpandable, for +instance in an \prm {edef}. In addition \LUATEX\ provides a definer: + +\starttyping + \luadef\MyFunctionA 1 + \global\luadef\MyFunctionB 2 +\protected\global\luadef\MyFunctionC 3 +\stoptyping -\section {Alignments} +You should really use these commands with care. Some references get stored in +tokens and assume that the function is available when that token expands. On the +other hand, as we have tested this functionality in relative complex situations +normal usage should not give problems. -\subsection{\tex {alignmark}} +\stopsubsection -This primitive duplicates the functionality of \type {#} inside alignment -preambles. +\startsubsection[title={\lpr {luabytecode} and \lpr {luabytecodecall}}] -\subsection{\tex {aligntab}} +Analogue to the function callers discussed in the previous section we have byte +code callers. Again the call variant is unexpandable. + +\starttyping +\directlua { + lua.bytecode[9998] = function(s) + tex.sprint(s*token.scan_int()) + end + lua.bytecode[5555] = function(s) + tex.sprint(s*token.scan_dimen()) + end +} +\stoptyping + +This works with: + +\starttyping +\luabytecode 9998 5 \luabytecode 5555 5sp +\luabytecodecall9998 5 \luabytecodecall5555 5sp +\stoptyping -This primitive duplicates the functionality of \type {&} inside alignments and -preambles. +The variable \type {s} in the code is the number of the byte code register that +can be used for diagnostic purposes. The advantage of bytecode registers over +function calls is that they are stored in the format (but without upvalues). -\section{Catcode tables} +\stopsubsection + +\stopsection + +\startsection[title={Catcode tables}] + +\startsubsection[title={Catcodes}] + +\topicindex {catcodes} Catcode tables are a new feature that allows you to switch to a predefined catcode regime in a single statement. You can have a practically unlimited number of different tables. This subsystem is backward compatible: if you never use the following commands, your document will not notice any difference in behaviour compared to traditional \TEX. The contents of each catcode table is independent -from any other catcode tables, and their contents is stored and retrieved from -the format file. +from any other catcode table, and its contents is stored and retrieved from the +format file. -\subsection{\type {\catcodetable}} +\stopsubsection + +\startsubsection[title={\lpr {catcodetable}}] \startsyntax \catcodetable <15-bit number> \stopsyntax -The primitive \type {\catcodetable} switches to a different catcode table. Such a +The primitive \lpr {catcodetable} switches to a different catcode table. Such a table has to be previously created using one of the two primitives below, or it has to be zero. Table zero is initialized by \INITEX. -\subsection{\type {\initcatcodetable}} +\stopsubsection + +\startsubsection[title={\lpr {initcatcodetable}}] \startsyntax \initcatcodetable <15-bit number> \stopsyntax -The primitive \type {\initcatcodetable} creates a new table with catcodes identical -to those defined by \INITEX: - -\starttabulate[|r|l|l|l|] -\NC 0 \NC \tttf \letterbackslash \NC \NC \type {escape} \NC\NR -\NC 5 \NC \tttf \letterhat\letterhat M \NC return \NC \type {car_ret} \NC\NR -\NC 9 \NC \tttf \letterhat\letterhat @ \NC null \NC \type {ignore} \NC\NR -\NC 10 \NC \tttf <space> \NC space \NC \type {spacer} \NC\NR -\NC 11 \NC {\tttf a} \endash\ {\tttf z} \NC \NC \type {letter} \NC\NR -\NC 11 \NC {\tttf A} \endash\ {\tttf Z} \NC \NC \type {letter} \NC\NR -\NC 12 \NC everything else \NC \NC \type {other} \NC\NR -\NC 14 \NC \tttf \letterpercent \NC \NC \type {comment} \NC\NR -\NC 15 \NC \tttf \letterhat\letterhat ? \NC delete \NC \type {invalid_char} \NC\NR +The primitive \lpr {initcatcodetable} creates a new table with catcodes +identical to those defined by \INITEX. The new catcode table is allocated +globally: it will not go away after the current group has ended. If the supplied +number is identical to the currently active table, an error is raised. The +initial values are: + +\starttabulate[|c|c|l|l|] +\DB catcode \BC character \BC equivalent \BC category \NC \NR +\TB +\NC 0 \NC \tttf \letterbackslash \NC \NC \type {escape} \NC \NR +\NC 5 \NC \tttf \letterhat\letterhat M \NC return \NC \type {car_ret} \NC \NR +\NC 9 \NC \tttf \letterhat\letterhat @ \NC null \NC \type {ignore} \NC \NR +\NC 10 \NC \tttf <space> \NC space \NC \type {spacer} \NC \NR +\NC 11 \NC {\tttf a} \endash\ {\tttf z} \NC \NC \type {letter} \NC \NR +\NC 11 \NC {\tttf A} \endash\ {\tttf Z} \NC \NC \type {letter} \NC \NR +\NC 12 \NC everything else \NC \NC \type {other} \NC \NR +\NC 14 \NC \tttf \letterpercent \NC \NC \type {comment} \NC \NR +\NC 15 \NC \tttf \letterhat\letterhat ? \NC delete \NC \type {invalid_char} \NC \NR +\LL \stoptabulate -The new catcode table is allocated globally: it will not go away after the -current group has ended. If the supplied number is identical to the currently -active table, an error is raised. +\stopsubsection -\subsection{\type {\savecatcodetable}} +\startsubsection[title={\lpr {savecatcodetable}}] \startsyntax \savecatcodetable <15-bit number> \stopsyntax -\type {\savecatcodetable} copies the current set of catcodes to a new table with +\lpr {savecatcodetable} copies the current set of catcodes to a new table with the requested number. The definitions in this new table are all treated as if they were made in the outermost level. @@ -491,55 +670,78 @@ The new table is allocated globally: it will not go away after the current group has ended. If the supplied number is the currently active table, an error is raised. -\section{Suppressing errors} +\stopsubsection + +\stopsection -\subsection{\type {\suppressfontnotfounderror}} +\startsection[title={Suppressing errors}] + +\startsubsection[title={\lpr {suppressfontnotfounderror}}] + +\topicindex {errors} + +If this integer parameter is non|-|zero, then \LUATEX\ will not complain about +font metrics that are not found. Instead it will silently skip the font +assignment, making the requested csname for the font \prm {ifx} equal to \prm +{nullfont}, so that it can be tested against that without bothering the user. \startsyntax \suppressfontnotfounderror = 1 \stopsyntax -If this integer parameter is non|-|zero, then \LUATEX\ will not complain about -font metrics that are not found. Instead it will silently skip the font -assignment, making the requested csname for the font \type {\ifx} equal to \type -{\nullfont}, so that it can be tested against that without bothering the user. +\stopsubsection + +\startsubsection[title={\lpr {suppresslongerror}}] -\subsection{\type {\suppresslongerror}} +\topicindex {errors} + +If this integer parameter is non|-|zero, then \LUATEX\ will not complain about +\prm {par} commands encountered in contexts where that is normally prohibited +(most prominently in the arguments of macros not defined as \prm {long}). \startsyntax \suppresslongerror = 1 \stopsyntax -If this integer parameter is non|-|zero, then \LUATEX\ will not complain about -\type {\par} commands encountered in contexts where that is normally prohibited -(most prominently in the arguments of non-long macros). +\stopsubsection -\subsection{\type {\suppressifcsnameerror}} +\startsubsection[title={\lpr {suppressifcsnameerror}}] -\startsyntax -\suppressifcsnameerror = 1 -\stopsyntax +\topicindex {errors} If this integer parameter is non|-|zero, then \LUATEX\ will not complain about -non-expandable commands appearing in the middle of a \type {\ifcsname} expansion. +non-expandable commands appearing in the middle of a \prm {ifcsname} expansion. Instead, it will keep getting expanded tokens from the input until it encounters -an \type {\endcsname} command. If the input expansion is unbalanced with respect -to \type {\csname} \ldots \type {\endcsname} pairs, the \LUATEX\ process may hang +an \prm {endcsname} command. If the input expansion is unbalanced with respect +to \prm {csname} \ldots \prm {endcsname} pairs, the \LUATEX\ process may hang indefinitely. -\subsection{\type {\suppressoutererror}} - \startsyntax -\suppressoutererror = 1 +\suppressifcsnameerror = 1 \stopsyntax +\stopsubsection + +\startsubsection[title={\lpr {suppressoutererror}}] + +\topicindex {errors} + If this new integer parameter is non|-|zero, then \LUATEX\ will not complain -about \type {\outer} commands encountered in contexts where that is normally +about \prm {outer} commands encountered in contexts where that is normally prohibited. -\subsection{\type {\suppressmathparerror}} +\startsyntax +\suppressoutererror = 1 +\stopsyntax + +\stopsubsection + +\startsubsection[title={\lpr {suppressmathparerror}}] + +\topicindex {errors} +\topicindex {math} -The following setting will permit \type {\par} tokens in a math formula: +The following setting will permit \prm {par} tokens in a math formula: \startsyntax \suppressmathparerror = 1 @@ -553,41 +755,30 @@ $ x + 1 = a $ \stoptyping -\subsection{\type {\suppressprimitiveerror}} +\stopsubsection + +\startsubsection[title={\lpr {suppressprimitiveerror}}] + +\topicindex {errors} +\topicindex {primitives} When set to a non|-|zero value the following command will not issue an error: -\starttyping +\startsyntax \suppressprimitiveerror = 1 \primitive\notaprimitive -\stoptyping - -\section {Math} - -\subsection{Extensions} +\stopsyntax -We will cover math in its own chapter because not only the font subsystem and -spacing model have been enhanced (thereby introducing many new primitives) but -also because some more control has been added to existing functionality. +\stopsubsection -\subsection{\type {\matheqnogapstep}} +\stopsection -By default \TEX\ will add one quad between the equation and the number. This is -hard coded. A new primitive can control this: +\startsection[title={Fonts}] -\startsyntax -\matheqnogapstep = 1000 -\stopsyntax +\startsubsection[title={Font syntax}] -Because a math quad from the math text font is used instead of a dimension, we -use a step to control the size. A value of zero will suppress the gap. The step -is divided by 1000 which is the usual way to mimmick floating point factors in -\TEX. - -\section{Fonts} - -\subsection{Font syntax} +\topicindex {fonts} \LUATEX\ will accept a braced argument as a font name: @@ -598,20 +789,26 @@ is divided by 1000 which is the usual way to mimmick floating point factors in This allows for embedded spaces, without the need for double quotes. Macro expansion takes place inside the argument. -\subsection{\type {\fontid}} +\stopsubsection + +\startsubsection[title={\lpr {fontid} and \lpr {setfontid}}] \startsyntax \fontid\font \stopsyntax This primitive expands into a number. It is not a register so there is no need to -prefix with \type {\number} (and using \type {\the} gives an error). The currently +prefix with \prm {number} (and using \prm {the} gives an error). The currently used font id is \fontid\font. Here are some more: -\starttabulate[|l|c|] -\NC \type {\bf} \NC \bf \fontid\font \NC \NR -\NC \type {\it} \NC \it \fontid\font \NC \NR -\NC \type {\bi} \NC \bi \fontid\font \NC \NR +\starttabulate[|l|c|c|] +\DB style \BC command \BC font id \NC \NR +\TB +\NC normal \NC \type {\tf} \NC \bf \fontid\font \NC \NR +\NC bold \NC \type {\bf} \NC \bf \fontid\font \NC \NR +\NC italic \NC \type {\it} \NC \it \fontid\font \NC \NR +\NC bold italic \NC \type {\bi} \NC \bi \fontid\font \NC \NR +\LL \stoptabulate These numbers depend on the macro package used because each one has its own way @@ -620,12 +817,15 @@ order of loading fonts. For instance, when in \CONTEXT\ virtual math \UNICODE\ fonts are used, we can easily get over a hundred ids in use. Not all ids have to be bound to a real font, after all it's just a number. -\subsection{\type {\setfontid}} +The primitive \lpr {setfontid} can be used to enable a font with the given id, +which of course needs to be a valid one. -The primitive \type {\setfontid} can be used to enable a font with the given id -(which of course needs to be a valid one). +\stopsubsection -\subsection{\type {\noligs} and \type {\nokerns}} +\startsubsection[title={\lpr {noligs} and \lpr {nokerns}}] + +\topicindex {ligatures+suppress} +\topicindex {kerns+suppress} These primitives prohibit ligature and kerning insertion at the time when the initial node list is built by \LUATEX's main control loop. You can enable these @@ -642,15 +842,19 @@ kerning functions, i.e.\ by assigning dummy functions to their associated callbacks. Keep in mind that when you define a font (using \LUA) you can also omit the kern and ligature tables, which has the same effect as the above. -\subsection{\type{\nospaces}} +\stopsubsection + +\startsubsection[title={\type{\nospaces}}] + +\topicindex {spaces+suppress} -This new primitive can be used to overrule the usual \type {\spaceskip} -related heuristics when a space character is seen in a text flow. The -value~\type{1} triggers no injection while \type{2} results in injection of -a zero skip. Below we see the results for four characters separated by a +This new primitive can be used to overrule the usual \prm {spaceskip} related +heuristics when a space character is seen in a text flow. The value~\type{1} +triggers no injection while \type{2} results in injection of a zero skip. In \in +{figure} [fig:nospaces] we see the results for four characters separated by a space. -\startlinecorrection +\startplacefigure[reference=fig:nospaces,title={The \lpr {nospaces} options.}] \startcombination[3*2] {\ruledhbox to 5cm{\vtop{\hsize 10mm\nospaces=0\relax x x x x \par}\hss}} {\type {0 / hsize 10mm}} {\ruledhbox to 5cm{\vtop{\hsize 10mm\nospaces=1\relax x x x x \par}\hss}} {\type {1 / hsize 10mm}} @@ -659,24 +863,30 @@ space. {\ruledhbox to 5cm{\vtop{\hsize 1mm\nospaces=1\relax x x x x \par}\hss}} {\type {1 / hsize 1mm}} {\ruledhbox to 5cm{\vtop{\hsize 1mm\nospaces=2\relax x x x x \par}\hss}} {\type {2 / hsize 1mm}} \stopcombination -\stoplinecorrection +\stopplacefigure + +\stopsubsection + +\stopsection + +\startsection[title={Tokens, commands and strings}] -\section{Tokens, commands and strings} +\startsubsection[title={\lpr {scantextokens}}] -\subsection{\type {\scantextokens}} +\topicindex {tokens+scanning} -The syntax of \type {\scantextokens} is identical to \type {\scantokens}. This -primitive is a slightly adapted version of \ETEX's \type {\scantokens}. The +The syntax of \lpr {scantextokens} is identical to \prm {scantokens}. This +primitive is a slightly adapted version of \ETEX's \prm {scantokens}. The differences are: \startitemize \startitem - The last (and usually only) line does not have a \type {\endlinechar} + The last (and usually only) line does not have a \prm {endlinechar} appended. \stopitem \startitem - \type {\scantextokens} never raises an EOF error, and it does not execute - \type {\everyeof} tokens. + \lpr {scantextokens} never raises an EOF error, and it does not execute + \prm {everyeof} tokens. \stopitem \startitem There are no \quote {\unknown\ while end of file \unknown} error tests @@ -685,7 +895,10 @@ differences are: \stopitem \stopitemize -\subsection{\type {\toksapp}, \type {\tokspre}, \type {\etoksapp} and \type {\etokspre}} +\stopsubsection + +\startsubsection[title={\lpr {toksapp}, \lpr {tokspre}, \lpr {etoksapp}, \lpr {etokspre}, +\lpr {gtoksapp}, \lpr {gtokspre}, \lpr {xtoksapp}, \lpr {xtokspre}}] Instead of: @@ -700,15 +913,17 @@ you can use: \stoptyping The \type {pre} variants prepend instead of append, and the \type {e} variants -expand the passed general text. +expand the passed general text. The \type {g} and \type {x} variants are global. + +\stopsubsection -\subsection{\type {\csstring}, \type {\begincsname} and \type {\lastnamedcs}} +\startsubsection[title={\prm {csstring}, \lpr {begincsname} and \lpr {lastnamedcs}}] -These are somewhat special. The \type {\csstring} primitive is like -\type {\string} but it omits the leading escape character. This can be -somewhat more efficient that stripping it of afterwards. +These are somewhat special. The \prm {csstring} primitive is like +\prm {string} but it omits the leading escape character. This can be +somewhat more efficient than stripping it afterwards. -The \type {\begincsname} primitive is like \type {\csname} but doesn't create +The \lpr {begincsname} primitive is like \prm {csname} but doesn't create a relaxed equivalent when there is no such name. It is equivalent to \starttyping @@ -718,10 +933,8 @@ a relaxed equivalent when there is no such name. It is equivalent to \stoptyping The advantage is that it saves a lookup (don't expect much speedup) but more -important is that it avoids using the \type {\if}. - -The \type {\lastnamedcs} is one that should be used with care. The above -example could be written as: +important is that it avoids using the \prm {if} test. The \lpr {lastnamedcs} +is one that should be used with care. The above example could be written as: \starttyping \ifcsname foo\endcsname @@ -731,9 +944,13 @@ example could be written as: This is slightly more efficient than constructing the string twice (deep down in \LUATEX\ this also involves some \UTF8 juggling), but probably more relevant is -that it saves a few tokens and can make code a bit more more readable. +that it saves a few tokens and can make code a bit more readable. + +\stopsubsection -\subsection{\type {\clearmarks}} +\startsubsection[title={\lpr {clearmarks}}] + +\topicindex {marks} This primitive complements the \ETEX\ mark primitives and clears a mark class completely, resetting all three connected mark texts to empty. It is an @@ -743,71 +960,241 @@ immediate command. \clearmarks <16-bit number> \stopsyntax -\subsection{\type{\letcharcode}} +\stopsubsection + +\startsubsection[title={\lpr {alignmark} and \lpr {aligntab}}] + +The primitive \lpr {alignmark} duplicates the functionality of \type {#} inside +alignment preambles, while \lpr {aligntab} duplicates the functionality of \type +{&}. -This primitive is still experimental but can be used to assign a meaning to an active -character, as in: +\stopsubsection + +\startsubsection[title={\lpr {letcharcode}}] + +This primitive can be used to assign a meaning to an active character, as in: \starttyping \def\foo{bar} \letcharcode123=\foo \stoptyping -This can be a bit nicer that using the uppercase tricks (using the property of -\type {\uppercase} that it treats active characters special). +This can be a bit nicer than using the uppercase tricks (using the property of +\prm {uppercase} that it treats active characters special). -\section{Boxes, rules and leaders} +\stopsubsection -\subsection{\type {\outputbox}} +\startsubsection[title={\lpr {glet}}] -\startsyntax -\outputbox = 65535 -\stopsyntax +This primitive is similar to: + +\starttyping +\protected\def\glet{\global\let} +\stoptyping + +but faster (only measurable with millions of calls) and probably more convenient +(after all we also have \type {\gdef}). + +\stopsubsection + +\startsubsection[title={\lpr {expanded}, \lpr {immediateassignment} and \lpr {immediateassigned}}] + +\topicindex {expansion} + +The \lpr {expanded} primitive takes a token list and expands it content which can +come in handy: it avoids a tricky mix of \prm {expandafter} and \prm {noexpand}. +You can compare it with what happens inside the body of an \prm {edef}. But this +kind of expansion it still doesn't expand some primitive operations. + +\startbuffer +\newcount\NumberOfCalls + +\def\TestMe{\advance\NumberOfCalls1 } + +\edef\Tested{\TestMe foo:\the\NumberOfCalls} +\edef\Tested{\TestMe foo:\the\NumberOfCalls} +\edef\Tested{\TestMe foo:\the\NumberOfCalls} + +\meaning\Tested +\stopbuffer + +\typebuffer + +The result is a macro that has the not expanded code in its body: + +\getbuffer + +Instead we can define \tex {TestMe} in a way that expands the assignment +immediately. You need of course to be aware of preventing look ahead interference +by using a space or \tex {relax} (often an expression works better as it doesn't +leave an \tex {relax}). + +\startbuffer +\def\TestMe{\immediateassignment\advance\NumberOfCalls1 } + +\edef\Tested{\TestMe foo:\the\NumberOfCalls} +\edef\Tested{\TestMe foo:\the\NumberOfCalls} +\edef\Tested{\TestMe foo:\the\NumberOfCalls} + +\meaning\Tested +\stopbuffer + +\typebuffer + +This time the counter gets updates and we don't see interference in the +resulting \tex {Tested} macro: + +\getbuffer + +Here is a somewhat silly example of expanded comparison: + +\startbuffer +\def\expandeddoifelse#1#2#3#4% + {\immediateassignment\edef\tempa{#1}% + \immediateassignment\edef\tempb{#2}% + \ifx\tempa\tempb + \immediateassignment\def\next{#3}% + \else + \immediateassignment\def\next{#4}% + \fi + \next} + +\edef\Tested + {(\expandeddoifelse{abc}{def}{yes}{nop}/% + \expandeddoifelse{abc}{abc}{yes}{nop})} + +\meaning\Tested +\stopbuffer + +\typebuffer + +It gives: + +\getbuffer + +A variant is: + +\starttyping +\def\expandeddoifelse#1#2#3#4% + {\immediateassigned{ + \edef\tempa{#1}% + \edef\tempb{#2}% + }% + \ifx\tempa\tempb + \immediateassignment\def\next{#3}% + \else + \immediateassignment\def\next{#4}% + \fi + \next} +\stoptyping + +The possible error messages are the same as using assignments in preambles of +alignments and after the \prm {accent} command. The supported assignments are the +so called prefixed commands (except box assignments). + +\stopsubsection + +\startsubsection[title={\lpr {ifcondition}}] + +\topicindex {conditions} + +This is a somewhat special one. When you write macros conditions need to be +properly balanced in order to let \TEX's fast branch skipping work well. This new +primitive is basically a no||op flagged as a condition so that the scanner can +recognize it as an if|-|test. However, when a real test takes place the work is +done by what follows, in the next example \tex {something}. + +\starttyping +\unexpanded\def\something#1#2% + {\edef\tempa{#1}% + \edef\tempb{#2} + \ifx\tempa\tempb} + +\ifcondition\something{a}{b}% + \ifcondition\something{a}{a}% + true 1 + \else + false 1 + \fi +\else + \ifcondition\something{a}{a}% + true 2 + \else + false 2 + \fi +\fi +\stoptyping + +If you are familiar with \METAPOST, this is a bit like \type {vardef} where the macro +has a return value. Here the return value is a test. + +\stopsubsection -This new integer parameter allows you to alter the number of the box that will be +\stopsection + +\startsection[title={Boxes, rules and leaders}] + +\startsubsection[title={\lpr {outputbox}}] + +\topicindex {output} + +This integer parameter allows you to alter the number of the box that will be used to store the page sent to the output routine. Its default value is 255, and the acceptable range is from 0 to 65535. -\subsection{\type {\vpack}, \type {\hpack} and \type {\tpack}} +\startsyntax +\outputbox = 12345 +\stopsyntax + +\stopsubsection -These three primitives are like \type {\vbox}, \type {\hbox} and \type {\vtop} +\startsubsection[title={\prm {vpack}, \prm {hpack} and \prm {tpack}}] + +These three primitives are like \prm {vbox}, \prm {hbox} and \prm {vtop} but don't apply the related callbacks. -\subsection{\type {\vsplit}} +\stopsubsection + +\startsubsection[title={\prm {vsplit}}] + +\topicindex {splitting} -The \type {\vsplit} primitive has to be followed by a specification of the -required height. As alternative for the \type {to} keyword you can use \type -{upto} to get a split of the given size but result has the natural dimensions -then. +The \prm {vsplit} primitive has to be followed by a specification of the required +height. As alternative for the \type {to} keyword you can use \type {upto} to get +a split of the given size but result has the natural dimensions then. -\subsection{Images and Forms} +\stopsubsection + +\startsubsection[title={Images and reused box objects},reference=sec:imagedandforms] These two concepts are now core concepts and no longer whatsits. They are in fact now implemented as rules with special properties. Normal rules have subtype~0, saved boxes have subtype~1 and images have subtype~2. This has the positive side -effect that whenever we need to take content with dimensions into account, when we -look at rule nodes, we automatically also deal with these two types. +effect that whenever we need to take content with dimensions into account, when +we look at rule nodes, we automatically also deal with these two types. The syntax of the \type {\save...resource} is the same as in \PDFTEX\ but you should consider them to be backend specific. This means that a macro package should treat them as such and check for the current output mode if applicable. -Here are the equivalents: -\starttabulate[|l|l|] -\NC \type {\saveboxresource} \EQ \type {\pdfxform} \NC \NR -\NC \type {\saveimageresource} \EQ \type {\pdfximage} \NC \NR -\NC \type {\useboxresource} \EQ \type {\pdfrefxform} \NC \NR -\NC \type {\useimageresource} \EQ \type {\pdfrefximage} \NC \NR -\NC \type {\lastsavedboxresourceindex} \EQ \type {\pdflastxform} \NC \NR -\NC \type {\lastsavedimageresourceindex} \EQ \type {\pdflastximage} \NC \NR -\NC \type {\lastsavedimageresourcepages} \EQ \type {\pdflastximagepages} \NC \NR +\starttabulate[|l|p|] +\DB command \BC explanation \NC \NR +\TB +\NC \lpr {saveboxresource} \NC save the box as an object to be included later \NC \NR +\NC \lpr {saveimageresource} \NC save the image as an object to be included later \NC \NR +\NC \lpr {useboxresource} \NC include the saved box object here (by index) \NC \NR +\NC \lpr {useimageresource} \NC include the saved image object here (by index) \NC \NR +\NC \lpr {lastsavedboxresourceindex} \NC the index of the last saved box object \NC \NR +\NC \lpr {lastsavedimageresourceindex} \NC the index of the last saved image object \NC \NR +\NC \lpr {lastsavedimageresourcepages} \NC the number of pages in the last saved image object \NC \NR +\LL \stoptabulate \LUATEX\ accepts optional dimension parameters for \type {\use...resource} in the same format as for rules. With images, these dimensions are then used instead of -the ones given to \type {\useimageresource} but the original dimensions are not -overwritten, so that a \type {\useimageresource} without dimensions still -provides the image with dimensions defined by \type {\saveimageresource}. These -optional parameters are not implemented for \type {\saveboxresource}. +the ones given to \lpr {useimageresource} but the original dimensions are not +overwritten, so that a \lpr {useimageresource} without dimensions still +provides the image with dimensions defined by \lpr {saveimageresource}. These +optional parameters are not implemented for \lpr {saveboxresource}. \starttyping \useimageresource width 20mm height 10mm depth 5mm \lastsavedimageresourceindex @@ -820,35 +1207,51 @@ is the \type {type} key. When set to non|-|zero the \type {/Type} entry is omitted. A value of 1 or 3 still writes a \type {/BBox}, while 2 or 3 will write a \type {/Matrix}. -\subsection{\type {\nohrule} and \type {\novrule}} +\stopsubsection + +\startsubsection[title={\lpr {nohrule} and \lpr {novrule}}] + +\topicindex {rules} Because introducing a new keyword can cause incompatibilities, two new primitives -were introduced: \type {\nohrule} and \type {\novrule}. These can be used to +were introduced: \lpr {nohrule} and \lpr {novrule}. These can be used to reserve space. This is often more efficient than creating an empty box with fake -dimensions). +dimensions. + +\stopsubsection -\subsection{\type {\gleaders}} +\startsubsection[title={\lpr {gleaders}}] + +\topicindex {leaders} This type of leaders is anchored to the origin of the box to be shipped out. So -they are like normal \type {\leaders} in that they align nicely, except that the +they are like normal \prm {leaders} in that they align nicely, except that the alignment is based on the {\it largest\/} enclosing box instead of the {\it smallest\/}. The \type {g} stresses this global nature. -\section {Languages} +\stopsubsection + +\stopsection -\subsection{\type {\hyphenationmin}} +\startsection[title={Languages}] + +\startsubsection[title={\lpr {hyphenationmin}}] + +\topicindex {languages} +\topicindex {hyphenation} This primitive can be used to set the minimal word length, so setting it to a value of~$5$ means that only words of 6 characters and more will be hyphenated, of course -within the constraints of the \type {\lefthyphenmin} and \type {\righthyphenmin} +within the constraints of the \prm {lefthyphenmin} and \prm {righthyphenmin} values (as stored in the glyph node). This primitive accepts a number and stores the value with the language. -\subsection{\type {\boundary}, \type {\noboundary}, \type {\protrusionboundary} and \type -{\wordboundary}} +\stopsubsection + +\startsubsection[title={\prm {boundary}, \prm {noboundary}, \prm {protrusionboundary} and \prm {wordboundary}}] -The \type {\noboundary} commands used to inject a whatsit node but now injects a normal -node with type \type {boundary} and subtype~0. In addition you can say: +The \prm {noboundary} command is used to inject a whatsit node but now injects a normal +node with type \nod {boundary} and subtype~0. In addition you can say: \starttyping x\boundary 123\relax y @@ -858,31 +1261,57 @@ This has the same effect but the subtype is now~1 and the value~123 is stored. The traditional ligature builder still sees this as a cancel boundary directive but at the \LUA\ end you can implement different behaviour. The added benefit of passing this value is a side effect of the generalization. The subtypes~2 and~3 -are used to control protrusion and word boundaries in hyphenation. +are used to control protrusion and word boundaries in hyphenation and have +related primitives. -\section{Control and debugging} +\stopsubsection -\subsection {Tracing} +\stopsection -If \type {\tracingonline} is larger than~2, the node list display will also print +\startsection[title={Control and debugging}] + +\startsubsection[title={Tracing}] + +\topicindex {tracing} + +If \prm {tracingonline} is larger than~2, the node list display will also print the node number of the nodes. -\subsection{\type {\outputmode} and \type {\draftmode}} +\stopsubsection -The \type {\outputmode} variable tells \LUATEX\ what it has to produce: +\startsubsection[title={\lpr {outputmode}}] + +\topicindex {output} +\topicindex {backend} + +The \lpr {outputmode} variable tells \LUATEX\ what it has to produce: \starttabulate[|l|l|] +\DB value \BC output \NC \NR +\TB \NC \type {0} \NC \DVI\ code \NC \NR \NC \type {1} \NC \PDF\ code \NC \NR +\LL \stoptabulate -The value of the \type {\draftmode} counter signals the backend if it should -output less. The \PDF\ backend accepts a value of~$1$, while the \DVI\ backend -ignores the value. +\stopsubsection + +\startsubsection[title={\lpr {draftmode}}] -\section {Files} +The value of the \lpr {draftmode} counter signals the backend if it should output +less. The \PDF\ backend accepts a value of~1, while the \DVI\ backend ignores the +value. This is no critical feature so we can remove it in future versions when it +can make the backend cleaner. -\subsection{File syntax} +\stopsubsection + +\stopsection + +\startsection[title={Files}] + +\startsubsection[title={File syntax}] + +\topicindex {files+names} \LUATEX\ will accept a braced argument as a file name: @@ -894,26 +1323,49 @@ ignores the value. This allows for embedded spaces, without the need for double quotes. Macro expansion takes place inside the argument. -The \type {\tracingfonts} primitive that has been inherited from \PDFTEX\ has +The \lpr {tracingfonts} primitive that has been inherited from \PDFTEX\ has been adapted to support variants in reporting the font. The reason for this extension is that a csname not always makes sense. The zero case is the default. -\starttabulate[|T||] -\NC 0 \EQ \type{\foo xyz} \NC \NR -\NC 1 \EQ \type{\foo (bar)} \NC \NR -\NC 2 \EQ \type{<bar> xyz} \NC \NR -\NC 3 \EQ \type{<bar @ ..pt> xyz} \NC \NR -\NC 4 \EQ \type{<id>} \NC \NR -\NC 5 \EQ \type{<id: bar>} \NC \NR -\NC 6 \EQ \type{<id: bar @ ..pt> xyz} \NC \NR +\starttabulate[|l|l|] +\DB value \BC reported \NC \NR +\TB +\NC \type{0} \NC \type{\foo xyz} \NC \NR +\NC \type{1} \NC \type{\foo (bar)} \NC \NR +\NC \type{2} \NC \type{<bar> xyz} \NC \NR +\NC \type{3} \NC \type{<bar @ ..pt> xyz} \NC \NR +\NC \type{4} \NC \type{<id>} \NC \NR +\NC \type{5} \NC \type{<id: bar>} \NC \NR +\NC \type{6} \NC \type{<id: bar @ ..pt> xyz} \NC \NR +\LL \stoptabulate -\subsection{Writing to file} +\stopsubsection + +\startsubsection[title={Writing to file}] + +\topicindex {files+writing} -You can now open upto 127 files with \type {\openout}. When no file is open +You can now open upto 127 files with \prm {openout}. When no file is open writes will go to the console and log. As a consequence a system command is no longer possible but one can use \type {os.execute} to do the same. +\stopsubsection + +\stopsection + +\startsection[title={Math}] + +\topicindex {math} + +We will cover math extensions in its own chapter because not only the font +subsystem and spacing model have been enhanced (thereby introducing many new +primitives) but also because some more control has been added to existing +functionality. Much of this relates to the different approaches of traditional +\TEX\ fonts and \OPENTYPE\ math. + +\stopsection + \stopchapter \stopcomponent diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-firstpage.tex b/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-firstpage.tex new file mode 100644 index 00000000000..772fbb3fe2a --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-firstpage.tex @@ -0,0 +1,32 @@ +\startcomponent luatex-firstpage + +\startstandardmakeup + + \start + \raggedleft + \definedfont[Bold*default at 48pt] + \setupinterlinespace + \blue \documentvariable{manual} \endgraf Reference \endgraf Manual \endgraf + \stop + + \vfill + + \definedfont[Bold*default at 12pt] + + \starttabulate[|l|l|] + \NC copyright \EQ Lua\TeX\ development team \NC \NR + \NC more info \EQ www.luatex.org \NC \NR + \NC version \EQ \currentdate \doifsomething{\documentvariable{snapshot}}{(snapshot \documentvariable{snapshot})} \NC \NR + \stoptabulate + +\stopstandardmakeup + +\setupbackgrounds + [leftpage] + [setups=pagenumber:left] + +\setupbackgrounds + [rightpage] + [setups=pagenumber:right] + +\stopcomponent diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-fonts.tex b/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-fonts.tex index ddb64d94630..d49c63afecb 100644 --- a/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-fonts.tex +++ b/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-fonts.tex @@ -1,29 +1,31 @@ % language=uk \environment luatex-style -\environment luatex-logos \startcomponent luatex-fonts \startchapter[reference=fonts,title={Font structure}] -\section {The font tables} +\startsection[title={The font tables}] + +\topicindex {fonts} +\topicindex {fonts+tables} All \TEX\ fonts are represented to \LUA\ code as tables, and internally as \CCODE~structures. All keys in the table below are saved in the internal font -structure if they are present in the table returned by the \type {define_font} +structure if they are present in the table returned by the \cbk {define_font} callback, or if they result from the normal \TFM|/|\VF\ reading routines if there -is no \type {define_font} callback defined. +is no \cbk {define_font} callback defined. The column \quote {\VF} means that this key will be created by the \type {font.read_vf()} routine, \quote {\TFM} means that the key will be created by the -\type {font.read_tfm()} routine, and \quote{used} means whether or not the -\LUATEX\ engine itself will do something with the key. - -The top|-|level keys in the table are as follows: +\type {font.read_tfm()} routine, and \quote {used} means whether or not the +\LUATEX\ engine itself will do something with the key. The top|-|level keys in +the table are as follows: -\starttabulate[|l|c|c|c|l|p|] -\BC key \BC vf \BC tfm \BC used \BC value type \BC description \NC \NR +\starttabulate[|l|c|c|c|l|pl|] +\DB key \BC vf \BC tfm \BC used \BC value type \BC description \NC \NR +\TB \NC \type{name} \NC yes \NC yes \NC yes \NC string \NC metric (file) name \NC \NR \NC \type{area} \NC no \NC yes \NC yes \NC string \NC (directory) location, typically empty \NC \NR \NC \type{used} \NC no \NC yes \NC yes \NC boolean \NC indicates usage (initial: false) \NC \NR @@ -42,52 +44,61 @@ The top|-|level keys in the table are as follows: \NC \type{fullname} \NC no \NC no \NC yes \NC string \NC output font name, used as a fallback in the \PDF\ output if the \type {psname} is not set \NC \NR \NC \type{header} \NC yes \NC no \NC no \NC string \NC header comments, if any \NC \NR -\NC \type{hyphenchar} \NC no \NC no \NC yes \NC number \NC default: \TEX's \type {\hyphenchar} \NC \NR +\NC \type{hyphenchar} \NC no \NC no \NC yes \NC number \NC default: \TEX's \prm {hyphenchar} \NC \NR \NC \type{parameters} \NC no \NC yes \NC yes \NC hash \NC default: 7 parameters, all zero \NC \NR -\NC \type{size} \NC no \NC yes \NC yes \NC number \NC loaded (at) size. (default: same as designsize) \NC \NR -\NC \type{skewchar} \NC no \NC no \NC yes \NC number \NC default: \TEX's \type {\skewchar} \NC \NR +\NC \type{size} \NC no \NC yes \NC yes \NC number \NC the required scaling (by default the same as designsize) \NC \NR +\NC \type{skewchar} \NC no \NC no \NC yes \NC number \NC default: \TEX's \prm {skewchar} \NC \NR \NC \type{type} \NC yes \NC no \NC yes \NC string \NC basic type of this font \NC \NR \NC \type{format} \NC no \NC no \NC yes \NC string \NC disk format type \NC \NR \NC \type{embedding} \NC no \NC no \NC yes \NC string \NC \PDF\ inclusion \NC \NR \NC \type{filename} \NC no \NC no \NC yes \NC string \NC the name of the font on disk \NC \NR \NC \type{tounicode} \NC no \NC yes \NC yes \NC number \NC When this is set to~1 \LUATEX\ assumes per|-|glyph tounicode entries are present in the font. \NC \NR -\NC \type{stretch} \NC no \NC no \NC yes \NC number \NC the \quote {stretch} value from \type - {\expandglyphsinfont} \NC \NR -\NC \type{shrink} \NC no \NC no \NC yes \NC number \NC the \quote {shrink} value from \type - {\expandglyphsinfont} \NC \NR -\NC \type{step} \NC no \NC no \NC yes \NC number \NC the \quote {step} value from \type - {\expandglyphsinfont} \NC \NR +\NC \type{stretch} \NC no \NC no \NC yes \NC number \NC the \quote {stretch} value from \lpr {expandglyphsinfont} \NC \NR +\NC \type{shrink} \NC no \NC no \NC yes \NC number \NC the \quote {shrink} value from \lpr {expandglyphsinfont} \NC \NR +\NC \type{step} \NC no \NC no \NC yes \NC number \NC the \quote {step} value from \lpr {expandglyphsinfont} \NC \NR \NC \type{expansion_factor} \NC no \NC no \NC no \NC number \NC the actual expansion factor of an expanded font \NC \NR -\NC \type{attributes} \NC no \NC no \NC yes \NC string \NC the \type {\pdffontattr} \NC \NR +\NC \type{attributes} \NC no \NC no \NC yes \NC string \NC the \orm {pdffontattr} \NC \NR \NC \type{cache} \NC no \NC no \NC yes \NC string \NC This key controls caching of the \LUA\ table on the \TEX\ end where \type {yes} means: use a reference to the table that is passed to \LUATEX\ (this is the - default), and no \type {no} means: don't store the + default), and \type {no} means: don't store the table reference, don't cache any \LUA\ data for this font while \type {renew} means: don't store the table reference, but save a reference to the table that is - created at the first access to one of its fields in font. - Note: the saved reference is thread|-|local, so be - careful when you are using coroutines: an error will be - thrown if the table has been cached in one thread, but - you reference it from another thread. \NC \NR + created at the first access to one of its fields in the + font. \NC \NR \NC \type{nomath} \NC no \NC no \NC yes \NC boolean \NC This key allows a minor speedup for text fonts. If it is present and true, then \LUATEX\ will not check the character entries for math|-|specific keys. \NC \NR \NC \type{oldmath} \NC no \NC no \NC yes \NC boolean \NC This key flags a font as representing an old school \TEX\ math font and disables the \OPENTYPE\ code path. \NC \NR -\NC \type{slant} \NC no \NC no \NC yes \NC number \NC This has the same semantics as the \type {SlantFont} - operator in font map files. \NC \NR -\NC \type{extent} \NC no \NC no \NC yes \NC number \NC This has the same semantics as the \type {ExtendFont} - operator in font map files. \NC \NR +\NC \type{slant} \NC no \NC no \NC yes \NC number \NC This parameter will tilt the font and + does the same as \type {SlantFont} in the map file for + \TYPEONE\ fonts. \NC \NR +\NC \type{extend} \NC no \NC no \NC yes \NC number \NC This parameter will scale the font horizontally and + does the same as \type {ExtendFont} in the map file for + \TYPEONE\ fonts. \NC \NR +\NC \type{squeeze} \NC no \NC no \NC yes \NC number \NC This parameter will scale the font vertically and has + no equivalent in the map file. \NC \NR +\NC \type{width} \NC no \NC no \NC yes \NC number \NC The backend will inject \PDF\ operators that set the + penwidth. The value is (as usual in \TEX) divided by 1000. + It works with the \type {mode} file. \NC \NR +\NC \type{mode} \NC no \NC no \NC yes \NC number \NC The backend will inject \PDF\ operators that relate to the + drawing mode with 0~being a fill, 1~being an outline, + 2~both draw and fill and 3~no painting at all. \NC \NR +\LL \stoptabulate +The saved reference in the \type {cache} option is thread|-|local, so be careful +when you are using coroutines: an error will be thrown if the table has been +cached in one thread, but you reference it from another thread. + The key \type {name} is always required. The keys \type {stretch}, \type {shrink}, \type {step} only have meaning when used together: they can be used to -replace a post|-|loading \type {\expandglyphsinfont} command. The \type -{auto_expand} option is not supported in \LUATEX. In fact, the primitives -that create expanded or protruding copies are probably only useful when used with +replace a post|-|loading \lpr {expandglyphsinfont} command. The \type +{auto_expand} option is not supported in \LUATEX. In fact, the primitives that +create expanded or protruding copies are probably only useful when used with traditional fonts because all these extra \OPENTYPE\ properties are kept out of the picture. The \type {expansion_factor} is value that can be present inside a font in \type {font.fonts}. It is the actual expansion factor (a value between @@ -112,16 +123,14 @@ makes sure that the font's definition is written to the output file (\DVI\ or signalling the \quote {normal} direction for this font. There are sixteen possibilities: -\starttabulate[|c|c|c|c|] -\BC number \BC meaning \BC number \BC meaning \NC \NR -\NC \type{0} \NC \type{LT} \NC \type {8} \NC \type{TT} \NC \NR -\NC \type{1} \NC \type{LL} \NC \type {9} \NC \type{TL} \NC \NR -\NC \type{2} \NC \type{LB} \NC \type{10} \NC \type{TB} \NC \NR -\NC \type{3} \NC \type{LR} \NC \type{11} \NC \type{TR} \NC \NR -\NC \type{4} \NC \type{RT} \NC \type{12} \NC \type{BT} \NC \NR -\NC \type{5} \NC \type{RL} \NC \type{13} \NC \type{BL} \NC \NR -\NC \type{6} \NC \type{RB} \NC \type{14} \NC \type{BB} \NC \NR -\NC \type{7} \NC \type{RR} \NC \type{15} \NC \type{BR} \NC \NR +\starttabulate[|Tc|c|Tc|c|Tc|c|Tc|c|] +\DB \# \BC dir \BC \# \BC dir \BC \# \BC dir \BC \# \BC dir \NC \NR +\TB +\NC 0 \NC LT \NC 4 \NC RT \NC 8 \NC TT \NC 12 \NC BT \NC \NR +\NC 1 \NC LL \NC 5 \NC RL \NC 9 \NC TL \NC 13 \NC BL \NC \NR +\NC 2 \NC LB \NC 6 \NC RB \NC 10 \NC TB \NC 14 \NC BB \NC \NR +\NC 3 \NC LR \NC 7 \NC RR \NC 11 \NC TR \NC 15 \NC BR \NC \NR +\LL \stoptabulate These are \OMEGA|-|style direction abbreviations: the first character indicates @@ -138,7 +147,8 @@ gives a nicer user interface. The names and their internal remapping are: \starttabulate[|l|c|] -\BC name \BC remapping \NC \NR +\DB name \BC remapping \NC \NR +\TB \NC \type {slant} \NC 1 \NC \NR \NC \type {space} \NC 2 \NC \NR \NC \type {space_stretch} \NC 3 \NC \NR @@ -146,6 +156,7 @@ The names and their internal remapping are: \NC \type {x_height} \NC 5 \NC \NR \NC \type {quad} \NC 6 \NC \NR \NC \type {extra_space} \NC 7 \NC \NR +\LL \stoptabulate The keys \type {type}, \type {format}, \type {embedding}, \type {fullname} and @@ -159,37 +170,27 @@ virtual character whose ligatures and kerns are used to handle word boundary processing. \type {right_boundary} is similar but not actually used for anything (yet). -Other index keys are ignored. - Each character hash itself is a hash. For example, here is the character \quote -{f} (decimal 102) in the font \type {cmr10 at 10pt}: +{f} (decimal 102) in the font \type {cmr10 at 10pt}. The numbers that represent +dimensions are in scaled points. \starttyping [102] = { - ['width'] = 200250, - ['height'] = 455111, - ['depth'] = 0, - ['italic'] = 50973, - ['kerns'] = { + ["width"] = 200250, + ["height"] = 455111, + ["depth"] = 0, + ["italic"] = 50973, + ["kerns"] = { [63] = 50973, [93] = 50973, [39] = 50973, [33] = 50973, [41] = 50973 }, - ['ligatures'] = { - [102] = { - ['char'] = 11, - ['type'] = 0 - }, - [108] = { - ['char'] = 13, - ['type'] = 0 - }, - [105] = { - ['char'] = 12, - ['type'] = 0 - } + ["ligatures"] = { + [102] = { ["char"] = 11, ["type"] = 0 }, + [108] = { ["char"] = 13, ["type"] = 0 }, + [105] = { ["char"] = 12, ["type"] = 0 } } } \stoptyping @@ -197,16 +198,17 @@ Each character hash itself is a hash. For example, here is the character \quote The following top|-|level keys can be present inside a character hash: \starttabulate[|l|c|c|c|l|p|] -\BC key \BC vf \BC tfm \BC used \BC type \BC description \NC\NR +\DB key \BC vf \BC tfm \BC used \BC type \BC description \NC\NR +\TB \NC \type{width} \NC yes \NC yes \NC yes \NC number \NC character's width, in sp (default 0) \NC\NR \NC \type{height} \NC no \NC yes \NC yes \NC number \NC character's height, in sp (default 0) \NC\NR \NC \type{depth} \NC no \NC yes \NC yes \NC number \NC character's depth, in sp (default 0) \NC\NR \NC \type{italic} \NC no \NC yes \NC yes \NC number \NC character's italic correction, in sp (default zero) \NC\NR \NC \type{top_accent} \NC no \NC no \NC maybe \NC number \NC character's top accent alignment place, in sp (default zero) \NC\NR \NC \type{bot_accent} \NC no \NC no \NC maybe \NC number \NC character's bottom accent alignment place, in sp (default zero) \NC\NR -\NC \type{left_protruding} \NC no \NC no \NC maybe \NC number \NC character's \type {\lpcode} \NC\NR -\NC \type{right_protruding} \NC no \NC no \NC maybe \NC number \NC character's \type {\rpcode} \NC\NR -\NC \type{expansion_factor} \NC no \NC no \NC maybe \NC number \NC character's \type {\efcode} \NC\NR +\NC \type{left_protruding} \NC no \NC no \NC maybe \NC number \NC character's \lpr {lpcode} \NC\NR +\NC \type{right_protruding} \NC no \NC no \NC maybe \NC number \NC character's \lpr {rpcode} \NC\NR +\NC \type{expansion_factor} \NC no \NC no \NC maybe \NC number \NC character's \lpr {efcode} \NC\NR \NC \type{tounicode} \NC no \NC no \NC maybe \NC string \NC character's \UNICODE\ equivalent(s), in \UTF|-|16BE hexadecimal format \NC\NR \NC \type{next} \NC no \NC yes \NC yes \NC number \NC the \quote {next larger} character index \NC\NR \NC \type{extensible} \NC no \NC yes \NC yes \NC table \NC the constituent parts of an extensible recipe \NC\NR @@ -217,19 +219,17 @@ The following top|-|level keys can be present inside a character hash: \NC \type{commands} \NC yes \NC no \NC yes \NC array \NC virtual font commands \NC\NR \NC \type{name} \NC no \NC no \NC no \NC string \NC the character (\POSTSCRIPT) name \NC\NR \NC \type{index} \NC no \NC no \NC yes \NC number \NC the (\OPENTYPE\ or \TRUETYPE) font glyph index \NC\NR -\NC \type{used} \NC no \NC yes \NC yes \NC boolean \NC typeset already (default: false)? \NC\NR +\NC \type{used} \NC no \NC yes \NC yes \NC boolean \NC typeset already (default: false) \NC\NR \NC \type{mathkern} \NC no \NC no \NC yes \NC table \NC math cut-in specifications \NC\NR +\LL \stoptabulate The values of \type {top_accent}, \type {bot_accent} and \type {mathkern} are -used only for math accent and superscript placement, see the \at {math chapter} -[math] in this manual for details. - -The values of \type {left_protruding} and \type {right_protruding} are used only -when \type {\protrudechars} is non-zero. - -Whether or not \type {expansion_factor} is used depends on the font's global -expansion settings, as well as on the value of \type {\adjustspacing}. +used only for math accent and superscript placement, see \at {page} [math] in +this manual for details. The values of \type {left_protruding} and \type +{right_protruding} are used only when \lpr {protrudechars} is non-zero. Whether +or not \type {expansion_factor} is used depends on the font's global expansion +settings, as well as on the value of \lpr {adjustspacing}. The usage of \type {tounicode} is this: if this font specifies a \type {tounicode=1} at the top level, then \LUATEX\ will construct a \type {/ToUnicode} @@ -245,24 +245,28 @@ enclosing angle brackets. For instance the \type {tounicode} for a \type {fi} ligature would be \type {00660069}. When you pass a number the conversion will be done for you. -The presence of \type {extensible} will overrule \type {next}, if that is also -present. It in in turn can be overruled by \type {vert_variants}. - -The \type {extensible} table is very simple: +A math character can have a \type {next} field that points to a next larger +shape. However, the presence of \type {extensible} will overrule \type {next}, if +that is also present. The \type {extensible} field in turn can be overruled by +\type {vert_variants}, the \OPENTYPE\ version. The \type {extensible} table is +very simple: \starttabulate[|l|l|p|] -\BC key \BC type \BC description \NC\NR +\DB key \BC type \BC description \NC\NR +\TB \NC \type{top} \NC number \NC top character index \NC\NR \NC \type{mid} \NC number \NC middle character index \NC\NR \NC \type{bot} \NC number \NC bottom character index \NC\NR \NC \type{rep} \NC number \NC repeatable character index \NC\NR +\LL \stoptabulate The \type {horiz_variants} and \type {vert_variants} are arrays of components. Each of those components is itself a hash of up to five keys: \starttabulate[|l|l|p|] -\BC key \BC type \BC explanation \NC \NR +\DB key \BC type \BC explanation \NC \NR +\TB \NC \type{glyph} \NC number \NC The character index. Note that this is an encoding number, not a name. \NC \NR \NC \type{extender} \NC number \NC One (1) if this part is repeatable, zero (0) otherwise. \NC \NR \NC \type{start} \NC number \NC The maximum overlap at the starting side (in scaled points). \NC \NR @@ -270,11 +274,12 @@ Each of those components is itself a hash of up to five keys: \NC \type{advance} \NC number \NC The total advance width of this item. It can be zero or missing, then the natural size of the glyph for character \type {component} is used. \NC \NR +\LL \stoptabulate The \type {kerns} table is a hash indexed by character index (and \quote {character index} is defined as either a non|-|negative integer or the string -value \type {right_boundary}), with the values the kerning to be applied, in +value \type {right_boundary}), with the values of the kerning to be applied, in scaled points. The \type {ligatures} table is a hash indexed by character index (and \quote @@ -283,23 +288,25 @@ value \type {right_boundary}), with the values being yet another small hash, wit two fields: \starttabulate[|l|l|p|] -\BC key \BC type \BC description \NC \NR +\DB key \BC type \BC description \NC \NR +\TB \NC \type{type} \NC number \NC the type of this ligature command, default 0 \NC \NR \NC \type{char} \NC number \NC the character index of the resultant ligature \NC \NR +\LL \stoptabulate -The \type {char} field in a ligature is required. - -The \type {type} field inside a ligature is the numerical or string value of one -of the eight possible ligature types supported by \TEX. When \TEX\ inserts a new -ligature, it puts the new glyph in the middle of the left and right glyphs. The -original left and right glyphs can optionally be retained, and when at least one -of them is kept, it is also possible to move the new \quote {insertion point} -forward one or two places. The glyph that ends up to the right of the insertion -point will become the next \quote {left}. +The \type {char} field in a ligature is required. The \type {type} field inside a +ligature is the numerical or string value of one of the eight possible ligature +types supported by \TEX. When \TEX\ inserts a new ligature, it puts the new glyph +in the middle of the left and right glyphs. The original left and right glyphs +can optionally be retained, and when at least one of them is kept, it is also +possible to move the new \quote {insertion point} forward one or two places. The +glyph that ends up to the right of the insertion point will become the next +\quote {left}. \starttabulate[|l|c|l|l|] -\BC textual (Knuth) \BC number \BC string \BC result \NC\NR +\DB textual (Knuth) \BC number \BC string \BC result \NC\NR +\TB \NC \type{l + r =: n} \NC 0 \NC \type{=:} \NC \type{|n} \NC\NR \NC \type{l + r =:| n} \NC 1 \NC \type{=:|} \NC \type{|nr} \NC\NR \NC \type{l + r |=: n} \NC 2 \NC \type{|=:} \NC \type{|ln} \NC\NR @@ -308,6 +315,7 @@ point will become the next \quote {left}. \NC \type{l + r |=:> n} \NC 6 \NC \type{|=:>} \NC \type{l|n} \NC\NR \NC \type{l + r |=:|> n} \NC 7 \NC \type{|=:|>} \NC \type{l|nr} \NC\NR \NC \type{l + r |=:|>> n} \NC 11 \NC \type{|=:|>>} \NC \type{ln|r} \NC\NR +\LL \stoptabulate The default value is~0, and can be left out. That signifies a \quote {normal} @@ -316,7 +324,12 @@ indicates the final insertion point. The \type {commands} array is explained below. -\section {Real fonts} +\stopsection + +\startsection[title={Real fonts}] + +\topicindex {fonts+real} +\topicindex {fonts+virtual} Whether or not a \TEX\ font is a \quote {real} font that should be written to the \PDF\ document is decided by the \type {type} value in the top|-|level font @@ -325,141 +338,150 @@ inclusion mechanism will attempt to add the needed font object definitions to th \PDF. Values for \type {type} are: \starttabulate[|l|p|] -\BC value \BC description \NC\NR +\DB value \BC description \NC\NR +\TB \NC \type{real} \NC this is a base font \NC\NR \NC \type{virtual} \NC this is a virtual font \NC\NR +\LL \stoptabulate The actions to be taken depend on a number of different variables: \startitemize[packed] \startitem - Whether the used font fits in an 8-bit encoding scheme or not. + Whether the used font fits in an 8-bit encoding scheme or not. This is true for + traditional \TEX\ fonts that communicate via \TFM\ files. \stopitem \startitem - The type of the disk font file. + The type of the disk font file, for instance a bitmap file or an outline + \TYPEONE, \TRUETYPE\ or \OPENTYPE\ font. \stopitem \startitem - The level of embedding requested. + The level of embedding requested, although in most cases a subset of + characters is embedded. The times when nothing got embedded are (in our + opinion at least) basically gone. \stopitem \stopitemize A font that uses anything other than an 8-bit encoding vector has to be written -to the \PDF\ in a different way. - -The rule is: if the font table has \type {encodingbytes} set to~2, then this is a -wide font, in all other cases it isn't. The value~2 is the default for \OPENTYPE\ -and \TRUETYPE\ fonts loaded via \LUA. For \TYPEONE\ fonts, you have to set \type -{encodingbytes} to~2 explicitly. For \PK\ bitmap fonts, wide font encoding is not -supported at all. - -If no special care is needed, \LUATEX\ currently falls back to the -mapfile|-|based solution used by \PDFTEX\ and \DVIPS. This behaviour might -silently be removed in the future, in which case the related primitives and \LUA\ -functions will become no|-|ops. - -If a \quote {wide} font is used, the new subsystem kicks in, and some -extra fields have to be present in the font structure. In this case, \LUATEX\ -does not use a map file at all. - -The extra fields are: \type {format}, \type {embedding}, \type {fullname}, \type -{cidinfo} (as explained above), \type {filename}, and the \type {index} key in -the separate characters. - -Values for \type {format} are: +to the \PDF\ in a different way. When the font table has \type {encodingbytes} +set to~2, then it is a wide font, in all other cases it isn't. The value~2 is the +default for \OPENTYPE\ and \TRUETYPE\ fonts loaded via \LUA. For \TYPEONE\ fonts, +you have to set \type {encodingbytes} to~2 explicitly. For \PK\ bitmap fonts, +wide font encoding is not supported at all. + +If no special care is needed, \LUATEX\ falls back to the mapfile|-|based solution +used by \PDFTEX\ and \DVIPS, so that legacy fonts are supported transparently. If +a \quote {wide} font is used, the new subsystem kicks in, and some extra fields +have to be present in the font structure. In this case, \LUATEX\ does not use a +map file at all. These extra fields are: \type {format}, \type {embedding}, \type +{fullname}, \type {cidinfo} (as explained above), \type {filename}, and the \type +{index} key in the separate characters. + +The \type {format} variable can have the following values. \type {type3} fonts +are provided for backward compatibility only, and do not support the new wide +encoding options. \starttabulate[|l|p|] -\BC value \BC description \NC \NR +\DB value \BC description \NC \NR +\TB \NC \type{type1} \NC this is a \POSTSCRIPT\ \TYPEONE\ font \NC \NR \NC \type{type3} \NC this is a bitmapped (\PK) font \NC \NR \NC \type{truetype} \NC this is a \TRUETYPE\ or \TRUETYPE|-|based \OPENTYPE\ font \NC \NR \NC \type{opentype} \NC this is a \POSTSCRIPT|-|based \OPENTYPE\ font \NC \NR +\LL \stoptabulate -\type {type3} fonts are provided for backward compatibility only, and do not -support the new wide encoding options. - -Values for \type {embedding} are: +Valid values for the \type {embedding} variable are: \starttabulate[|l|p|] -\BC value \BC description \NC \NR +\DB value \BC description \NC \NR +\TB \NC \type{no} \NC don't embed the font at all \NC \NR \NC \type{subset} \NC include and atttempt to subset the font \NC \NR \NC \type{full} \NC include this font in its entirety \NC \NR +\LL \stoptabulate -The other fields are used as follows: The \type {fullname} will be the +The other fields are used as follows. The \type {fullname} will be the \POSTSCRIPT|/|\PDF\ font name. The \type {cidinfo} will be used as the character -set (the CID \type {/Ordering} and \type {/Registry} keys). The \type {filename} +set: the CID \type {/Ordering} and \type {/Registry} keys. The \type {filename} points to the actual font file. If you include the full path in the \type {filename} or if the file is in the local directory, \LUATEX\ will run a little -bit more efficient because it will not have to re|-|run the \type {find_xxx_file} +bit more efficient because it will not have to re|-|run the \type {find_*_file} callback in that case. Be careful: when mixing old and new fonts in one document, it is possible to create \POSTSCRIPT\ name clashes that can result in printing errors. When this -happens, you have to change the \type {fullname} of the font. +happens, you have to change the \type {fullname} of the font to a more unique +one. Typeset strings are written out in a wide format using 2~bytes per glyph, using the \type {index} key in the character information as value. The overall effect is like having an encoding based on numbers instead of traditional (\POSTSCRIPT) -name|-|based reencoding. The way to get the correct \type {index} numbers for -\TYPEONE\ fonts is by loading the font via \type {fontloader.open} and use the table -indices as \type {index} fields. +name|-|based reencoding. One way to get the correct \type {index} numbers for +\TYPEONE\ fonts is by loading the font via \type {fontloader.open} and use the +table indices as \type {index} fields. In order to make sure that cut and paste of the final document works okay you can -best make sure that there is a \type {tounicode} vector enforced. +best make sure that there is a \type {tounicode} vector enforced. Not all \PDF\ +viewers handle this right so take \ACROBAT\ as reference. + +\stopsection -\section[virtualfonts]{Virtual fonts} +\startsection[reference=virtualfonts,title={Virtual fonts}] \subsection{The structure} +\topicindex {fonts+virtual} + You have to take the following steps if you want \LUATEX\ to treat the returned -table from \type {define_font} as a virtual font: +table from \cbk {define_font} as a virtual font: \startitemize[packed] \startitem - Set the top|-|level key \type {type} to \type {virtual}. + Set the top|-|level key \type {type} to \type {virtual}. In most cases it's + optional because we look at the \type {commands} entry anyway. \stopitem \startitem - Make sure there is at least one valid entry in \type {fonts} (see below). + Make sure there is at least one valid entry in \type {fonts} (see below), + although recent versions of \LUATEX\ add a default entry when this table is + missing. \stopitem \startitem - Give a \type {commands} array to every character (see below). + Add a \type {commands} array to those characters that matter. A virtual + character can itself point to virtual characters but be careful with nesting + as you can create loops and overflow the stack (which often indicates an + error anyway). \stopitem \stopitemize The presence of the toplevel \type {type} key with the specific value \type {virtual} will trigger handling of the rest of the special virtual font fields in the table, but the mere existence of 'type' is enough to prevent \LUATEX\ from -looking for a virtual font on its own. - -Therefore, this also works \quote {in reverse}: if you are absolutely certain -that a font is not a virtual font, assigning the value \type {base} or \type -{real} to \type {type} will inhibit \LUATEX\ from looking for a virtual font -file, thereby saving you a disk search. +looking for a virtual font on its own. This also works \quote {in reverse}: if +you are absolutely certain that a font is not a virtual font, assigning the value +\type {real} to \type {type} will inhibit \LUATEX\ from looking for a virtual +font file, thereby saving you a disk search. This only matters when we load a +\TFM\ file. -The \type {fonts} is another \LUA\ array. The values are one- or two|-|key +The \type {fonts} is an (indexed) \LUA\ table. The values are one- or two|-|key hashes themselves, each entry indicating one of the base fonts in a virtual font. In case your font is referring to itself, you can use the \type {font.nextid()} function which returns the index of the next to be defined font which is probably -the currently defined one. - -An example makes this easy to understand +the currently defined one. So, a table looks like this: \starttyping fonts = { - { name = 'ptmr8a', size = 655360 }, - { name = 'psyr', size = 600000 }, - { id = 38 } + { name = "ptmr8a", size = 655360 }, + { name = "psyr", size = 600000 }, + { id = 38 } } \stoptyping -says that the first referenced font (index 1) in this virtual font is \type -{ptrmr8a} loaded at 10pt, and the second is \type {psyr} loaded at a little over -9pt. The third one is previously defined font that is known to \LUATEX\ as font id -\quote {38}. - +The first referenced font (at index~1) in this virtual font is \type {ptrmr8a} +loaded at 10pt, and the second is \type {psyr} loaded at a little over 9pt. The +third one is a previously defined font that is known to \LUATEX\ as font id~38. The array index numbers are used by the character command definitions that are part of each character. @@ -468,7 +490,8 @@ with the first entry representing a command and the extra items being the parameters to that command. The allowed commands and their arguments are: \starttabulate[|l|l|l|p|] -\BC command name \BC arguments \BC type \BC description \NC \NR +\DB command \BC arguments \BC type \BC description \NC \NR +\TB \NC \type{font} \NC 1 \NC number \NC select a new font from the local \type {fonts} table \NC \NR \NC \type{char} \NC 1 \NC number \NC typeset this character number from the current font, and move right by the character's width \NC \NR @@ -481,18 +504,20 @@ parameters to that command. The allowed commands and their arguments are: \NC \type{rule} \NC 2 \NC 2 numbers \NC output a rule $ht*wd$, and move right. \NC \NR \NC \type{down} \NC 1 \NC number \NC move down on the page \NC \NR \NC \type{right} \NC 1 \NC number \NC move right on the page \NC \NR -\NC \type{special} \NC 1 \NC string \NC output a \type {\special} command \NC \NR +\NC \type{special} \NC 1 \NC string \NC output a \prm {special} command \NC \NR \NC \type{pdf} \NC 2 \NC 2 strings \NC output a \PDF\ literal, the first string is one of \type {origin}, \type {page}, \type {text}, \type {font}, \type {direct} or \type {raw}; if you have one string only \type {origin} is assumed \NC \NR -\NC \type{lua} \NC 1 \NC string \NC execute a \LUA\ script (at \type {\latelua} time) \NC \NR -\NC \type{image} \NC 1 \NC image \NC output an image (the argument can be either an \type - {<image>} variable or an \type {image_spec} table) \NC \NR +\NC \type{lua} \NC 1 \NC string, + function \NC execute a \LUA\ script when the glyph is embedded; in case of a + function it gets the font id and character code passed \NC \NR +\NC \type{image} \NC 1 \NC image \NC output an image (the argument can be either an \type {<image>} variable or an \type {image_spec} table) \NC \NR \NC \type{comment} \NC any \NC any \NC the arguments of this command are ignored \NC \NR +\LL \stoptabulate When a font id is set to~0 then it will be replaced by the currently assigned -font id. This prevents the need for hackery with future id's (normally one could +font id. This prevents the need for hackery with future id's. Normally one could use \type {font.nextid} but when more complex fonts are built in the meantime other instances could have been loaded. @@ -500,24 +525,24 @@ The \type {pdf} option also accepts a \type {mode} keyword in which case the third argument sets the mode. That option will change the mode in an efficient way (passing an empty string would result in an extra empty lines in the \PDF\ file. This option only makes sense for virtual fonts. The \type {font} mode only -makes sense in virtual fonts. - -These modes are somewhat fuzzy and partially inherited from \PDFTEX. +makes sense in virtual fonts. Modes are somewhat fuzzy and partially inherited +from \PDFTEX. \starttabulate[|l|p|] -\BC mode \BC description \NC \NR +\DB mode \BC description \NC \NR +\TB \NC \type {origin} \NC enter page mode and set the position \NC \NR \NC \type {page} \NC enter page mode \NC \NR \NC \type {text} \NC enter text mode \NC \NR \NC \type {font} \NC enter font mode (kind of text mode, only in virtual fonts) \NC \NR \NC \type {always} \NC finish the current string and force a transform if needed \NC \NR \NC \type {raw} \NC finish the current string \NC \NR +\LL \stoptabulate -You always need to check what \PDF\ code is generated because there can be all kind of -interferences with optimizations in the backend and fonts are complicated anyway. - -Here is a rather elaborate glyph commands example: +You always need to check what \PDF\ code is generated because there can be all +kind of interferences with optimization in the backend and fonts are complicated +anyway. Here is a rather elaborate glyph commands example using such keys: \starttyping ... @@ -526,6 +551,7 @@ commands = { { "right", 5000 }, -- move right about 0.08pt { "font", 3 }, -- select the fonts[3] entry { "char", 97 }, -- place character 97 (ASCII 'a') + -- { "slot", 2, 97 }, -- an alternative for the previous two { "pop" }, -- go all the way back { "down", -200000 }, -- move upwards by about 3pt { "special", "pdf: 1 0 0 rg" } -- switch to red color @@ -539,7 +565,7 @@ commands = { The default value for \type {font} is always~1 at the start of the \type {commands} array. Therefore, if the virtual font is essentially only a -re|-|encoding, then you do usually not have create an explicit \quote {font} +re|-|encoding, then you do usually not have created an explicit \quote {font} command in the array. Rules inside of \type {commands} arrays are built up using only two dimensions: @@ -572,6 +598,8 @@ characters that are already present cannot be altered). \subsection{Example virtual font} +\topicindex {fonts+virtual} + Finally, here is a plain \TEX\ input file with a virtual font demonstration: \startbuffer @@ -579,58 +607,65 @@ Finally, here is a plain \TEX\ input file with a virtual font demonstration: callback.register('define_font', function (name,size) if name == 'cmr10-red' then - f = font.read_tfm('cmr10',size) - f.name = 'cmr10-red' - f.type = 'virtual' - f.fonts = {{ name = 'cmr10', size = size }} + local f = font.read_tfm('cmr10',size) + f.name = 'cmr10-red' + f.type = 'virtual' + f.fonts = { + { name = 'cmr10', size = size } + } for i,v in pairs(f.characters) do - if (string.char(i)):find('[tacohanshartmut]') then + if string.char(i):find('[tacohanshartmut]') then v.commands = { - {'special','pdf: 1 0 0 rg'}, - {'char',i}, - {'special','pdf: 0 g'}, + { "special", "pdf: 1 0 0 rg" }, + { "char", i }, + { "special", "pdf: 0 g" }, } - else - v.commands = {{'char',i}} end end + return f else - f = font.read_tfm(name,size) + return font.read_tfm(name,size) end - return f end ) } -\font\myfont = cmr10-red at 10pt \myfont This is a line of text \par -\font\myfontx= cmr10 at 10pt \myfontx Here is another line of text \par +\font\myfont = cmr10-red at 10pt \myfont This is a line of text \par +\font\myfontx = cmr10 at 10pt \myfontx Here is another line of text \par \stopbuffer \typebuffer -\section{The \type {vf} library} +\stopsection + +\startsection[title={The \type {vf} library}] The \type {vf} library can be used when \LUA\ code, as defined in the \type {commands} of the font, is executed. The functions provided are similar as the commands: \type {char}, \type {down}, \type {fontid}, \type {image}, \type -{node}, \type {nop}, \type {pop}, \type {push}, \type {right}, \type {rule}, -\type {special} and \type {pdf}. This library has been present for a while but -not been advertised and tested much, if only because it's easy to define an -invalid font (or mess up the \PDF\ stream). Keep in mind that the \LUA\ snippets -are executed each time when a character is output. +{node}, \type {nop}, \type {pop}, \type {push}, \type {right}, \nod {rule}, \type +{special} and \type {pdf}. This library has been present for a while but not been +advertised and tested much, if only because it's easy to define an invalid font +(or mess up the \PDF\ stream). Keep in mind that the \LUA\ snippets are executed +each time when a character is output. + +\stopsection +\startsection[title={The \type {font} library}] -\section{The \type {font} library} +\topicindex {fonts+library} The font library provides the interface into the internals of the font system, -and also it contains helper functions to load traditional \TEX\ font metrics +and it also contains helper functions to load traditional \TEX\ font metrics formats. Other font loading functionality is provided by the \type {fontloader} library that will be discussed in the next section. \subsection{Loading a \TFM\ file} -The behavior documented in this subsection is considered stable in the sense that -there will not be backward-incompatible changes any more. +\topicindex {fonts+tfm} + +The behaviour documented in this subsection is considered stable in the sense that +there will not be backward|-|incompatible changes any more. \startfunctioncall <table> fnt = @@ -649,11 +684,10 @@ The number is a bit special: \stopitem \stopitemize -The internal structure of the metrics font table that is returned is explained in -\in {chapter} [fonts]. - \subsection{Loading a \VF\ file} +\topicindex {fonts+vf} + The behavior documented in this subsection is considered stable in the sense that there will not be backward-incompatible changes any more. @@ -663,10 +697,12 @@ there will not be backward-incompatible changes any more. \stopfunctioncall The meaning of the number \type {s} and the format of the returned table are -similar to the ones in the \type {read_tfm()} function. +similar to the ones in the \type {read_tfm} function. \subsection{The fonts array} +\topicindex {fonts+virtual} + The whole table of \TEX\ fonts is accessible from \LUA\ using a virtual array. \starttyping @@ -674,9 +710,8 @@ font.fonts[n] = { ... } <table> f = font.fonts[n] \stoptyping -See \in {chapter} [fonts] for the structure of the tables. Because this is a -virtual array, you cannot call \type {pairs} on it, but see below for the \type -{font.each} iterator. +Because this is a virtual array, you cannot call \type {pairs} on it, but see +below for the \type {font.each} iterator. The two metatable functions implementing the virtual array are: @@ -686,9 +721,15 @@ font.setfont(<number> n, <table> f) \stopfunctioncall Note that at the moment, each access to the \type {font.fonts} or call to \type -{font.getfont} creates a \LUA\ table for the whole font. This process can be quite -slow. In a later version of \LUATEX, this interface will change (it will start -using userdata objects instead of actual tables). +{font.getfont} creates a \LUA\ table for the whole font unless you cached it. +This process can be quite slow. + +\startfunctioncall +<table> p = font.getparameters(<number> n) +\stopfunctioncall + +This one will return a table of the parameters as known to \TEX. These can be +different from the ones in the cached table. Also note the following: assignments can only be made to fonts that have already been defined in \TEX, but have not been accessed {\it at all\/} since that @@ -709,6 +750,8 @@ changed) or \type {nil} (not a valid font at all). \subsection{Defining a font directly} +\topicindex {fonts+define} + You can define your own font into \type {font.fonts} by calling this function: \startfunctioncall @@ -717,9 +760,8 @@ You can define your own font into \type {font.fonts} by calling this function: \stopfunctioncall The return value is the internal id number of the defined font (the index into -\type {font.fonts}). If the font creation fails, an error is raised. The table -is a font structure, as explained in \in {chapter} [fonts]. An alternative call -is: +\type {font.fonts}). If the font creation fails, an error is raised. The table is +a font structure. An alternative call is: \startfunctioncall <number> i = @@ -730,6 +772,8 @@ Where the first argument is a reserved font id (see below). \subsection{Extending a font} +\topicindex {fonts+extend} + Within reasonable bounds you can extend a font after it has been defined. Because some properties are best left unchanged this is limited to adding characters. @@ -747,6 +791,8 @@ a more drastic replacer.) \subsection{Projected next font id} +\topicindex {fonts+id} + \startfunctioncall <number> i = font.nextid() @@ -763,36 +809,37 @@ This can be handy when you create complex virtual fonts. font.nextid(true) \stopfunctioncall -\subsection{Font id} +\subsection{Font ids} + +\topicindex {fonts+id} +\topicindex {fonts+current} \startfunctioncall <number> i = font.id(<string> csname) \stopfunctioncall -This returns the font id associated with \type {csname} string, or $-1$ if \type +This returns the font id associated with \type {csname}, or $-1$ if \type {csname} is not defined. -\subsection{Currently active font} - \startfunctioncall -<number> i = font.current() -font.current(<number> i) +<number> i = + font.max() \stopfunctioncall -This gets or sets the currently used font number. - -\subsection{Maximum font id} +This is the largest used index in \type {font.fonts}. \startfunctioncall -<number> i = - font.max() +<number> i = font.current() +font.current(<number> i) \stopfunctioncall -This is the largest used index in \type {font.fonts}. +This gets or sets the currently used font number. \subsection{Iterating over all fonts} +\topicindex {fonts+iterate} + \startfunctioncall for i,v in font.each() do ... @@ -804,6 +851,8 @@ value is the index in \type {font.fonts}, the second the font itself, as a \LUA\ table. The indices are listed incrementally, but they do not always form an array of consecutive numbers: in some cases there can be holes in the sequence. +\stopsection + \stopchapter \stopcomponent diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-introduction.tex b/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-introduction.tex index d0899147da2..5ed80f4c176 100644 --- a/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-introduction.tex +++ b/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-introduction.tex @@ -1,7 +1,6 @@ % language=uk \environment luatex-style -\environment luatex-logos \startcomponent luatex-introduction @@ -15,9 +14,9 @@ Additional reference material is published in journals of user groups and It took about a decade to reach stable version 1.0, but for good reason. Successive versions brought new functionality, more control, some cleanup of -internals and experimental features evolved into stable ones or were dropped. +internals. Experimental features evolved into stable ones or were dropped. Already quite early \LUATEX\ could be used for production and it was used on a -daily basis by the authors. Successive versions sometimes demanded a adaption to +daily basis by the authors. Successive versions sometimes demanded an adaption to the \LUA\ interfacing, but the concepts were unchanged. The current version can be considered stable in functionality and there will be no fundamental changes. Of course we then can decide to move towards version 2.00 with different @@ -26,9 +25,10 @@ properties. Don't expect \LUATEX\ to behave the same as \PDFTEX ! Although the core functionality of that 8 bit engine was starting point, it has been combined with the directional support of \OMEGA\ (\ALEPH). But, \LUATEX\ can behave different -due to its wide (32 bit) characters, many registers and large memory support. -There is native \UTF\ input, support for large (more that 8 bit) fonts, and the -math machinery is tuned for \OPENTYPE\ math. There is support for directional +due to its wide (32 bit) characters, many registers and large memory support. The +\PDF\ code produced differs from \PDFTEX\ but users will normally not notice +that. There is native \UTF\ input, support for large (more than 8 bit) fonts, and +the math machinery is tuned for \OPENTYPE\ math. There is support for directional typesetting too. The log output can differ from other engines and will likely differ more as we move forward. When you run plain \TEX\ for sure \LUATEX\ runs slower than \PDFTEX\ but when you run for instance \CONTEXT\ \MKIV\ in many cases @@ -47,27 +47,29 @@ The organization of the source code is adapted so that it can glue all these components together. We continue cleaning up side effects of the accumulated code in \TEX\ engines (especially code that is not needed any longer). -\startitemize[packed] +\startitemize [unpacked] \startitem - Most of \PDFTEX\ version 1.40.9, converted to \CCODE. Some experimental - features have been removed and some utility macros are not inherited as - their functionality can be done in \LUA. The number of backend interface - commands has been reduced to a few. The extensions are separated from the - core (which we keep close to the original \TEX\ core). Some mechanisms - like expansion and protrusion can behave different from the original due - to some cleanup and optimization. Some whatsit based functionality (image - support and reusable content) is now core functionality. + We started out with most of \PDFTEX\ version 1.40.9. The code base was + converted to \CCODE\ and split in modules. Experimental features were + removed and utility macros are not inherited because their functionality + can be programmed in \LUA. The number of backend interface commands has + been reduced to a few. The so called extensions are separated from the + core (which we try to keep close to the original \TEX\ core). Some + mechanisms like expansion and protrusion can behave different from the + original due to some cleanup and optimization. Some whatsit based + functionality (image support and reusable content) is now core + functionality. We don't stay in sync with \PDFTEX\ development. \stopitem \startitem - The direction model and some other bits from \ALEPH\ RC4 (derived from - \OMEGA) is included. The related primitives are part of core \LUATEX\ but - at the node level directional support is no longer based on so called - whatsits but on real nodes. In fact, whatsits are now only used for - backend specific extensions. + The direction model from \ALEPH\ RC4 (which is derived from \OMEGA) is + included. The related primitives are part of core \LUATEX\ but at the + node level directional support is no longer based on so called whatsits + but on real nodes with relevant properties. The number of directions is + limited to the useful set and the backend has been made direction aware. \stopitem \startitem Neither \ALEPH's I/O translation processes, nor tcx files, nor \ENCTEX\ - can be used, these encoding|-|related functions are superseded by a + are available. These encoding|-|related functions are superseded by a \LUA|-|based solution (reader callbacks). In a similar fashion all file \IO\ can be intercepted. \stopitem @@ -80,14 +82,18 @@ code in \TEX\ engines (especially code that is not needed any longer). \startitem There are various \TEX\ extensions but only those that cannot be done using the \LUA\ interfaces. The math machinery often has two code paths: - one traditional and the other more suitable for wide \OPENTYPE\ fonts. + one traditional and the other more suitable for wide \OPENTYPE\ fonts. Here + we follow the \MICROSOFT\ specifications as much as possible. Some math + functionality has been opened up a bit so that users have more control. \stopitem \startitem The fontloader uses parts of \FONTFORGE\ 2008.11.17 combined with additional code specific for usage in a \TEX\ engine. We try to minimize specific font support to what \TEX\ needs: character references and dimensions and delegate everything else to \LUA. That way we keep \TEX\ - open for extensions without touching the core. + open for extensions without touching the core. In order to minimize + dependencies at some point we may decide to make this an optional + library. \stopitem \startitem The \METAPOST\ library is integral part of \LUATEX. This gives \TEX\ some @@ -95,33 +101,71 @@ code in \TEX\ engines (especially code that is not needed any longer). Again \LUA\ is used as glue between the frontend and backend. Further development of \METAPOST\ is closely related to \LUATEX. \stopitem + \startitem + The virtual font technology that comes with \TEX\ has been integrated + into the font machinery in a way that permits creating virtual fonts + at runtime. Because \LUATEX\ can also act as a \LUA\ interpreter this + means that a complete \TEX\ workflow can be built without the need for + additional programs. + \stopitem + \startitem + The versions starting from 1.09 no longer use the poppler library for + inclusion but a lightweight dedicated one. This removes a dependency but + also makes the inclusion code of \LUATEX\ different from \PDFTEX. In fact + it was already much different due to the \LUA\ image interfacing. + \stopitem \stopitemize We try to keep upcoming versions compatible but intermediate releases can contain -experimental features. A general rule is that versions that end up on \TEX live -and|/|or are released around \CONTEXT\ meetings are stable. Future versions will -probably become a bit leaner and meaner. Some libraries might become external as -we don't want to bloat the binary and also don't want to add more hard coded -solutions. After all, with \LUA\ you can extend the core functionality. The less -dependencies, the better. - -The \TEXLIVE\ version is to be considered the current stable version. Any version -between the yearly \TEXLIVE\ releases are to be considered beta. The beta -releases are normally available via the \CONTEXT\ distribution channels (the -garden and so called minimals). - -\blank[1*big] - -Hans Hagen, Harmut Henkel, \crlf -Taco Hoekwater \& Luigi Scarso +experimental features. A general rule is that versions that end up on \TEXLIVE\ +and|/|or are released around \CONTEXT\ meetings are stable. Any version between +the yearly \TEXLIVE\ releases are to be considered beta and in the repository end +up as trunk releases. We have an experimental branch that we use for development +but there is no support for any of its experimental features. Intermediate +releases (from trunk) are normally available via the \CONTEXT\ distribution +channels (the garden and so called minimals). + +Version 1.10 is more or less an endpoint in development: this is what you get. +Because not only \CONTEXT, that we can adapt rather easily, uses \LUATEX, we +cannot change fundamentals without unforeseen consequences. By now it has been +proven that \LUA\ can be used to extend the core functionality so there is no +need to add more, and definitely no hard coded solutions for (not so) common +problems. Of course there will be bug fixes, maybe some optimization, and there +might even be some additions or non|-|intrusive improvements, but only after +testing outside the stable release. After all, the binary is already more than +large enough and there is not that much to gain. + +You might find \LUA\ helpers that are not yet documented. These are considered +experimental, although when you encounter them in a \CONTEXT\ version that has +been around for a while you can assume that they will stay. Of course it can just +be that we forgot to document them yet. + +A manual like this is not really meant as tutorial, for that we refer to +documents that ship with \CONTEXT, articles, etc. It is also never complete +enough for all readers. We try to keep up but the reader needs to realize that +it's all volunteer work done in spare time. And for sure, complaining about a bad +manual or crappy documentation will not really motivate us to spend more time on +it. That being said, we hope that this document is useful. + +\blank[big] + +\testpage[5] + +\startlines +Hans Hagen +Harmut Henkel +Taco Hoekwater +Luigi Scarso +\stoplines \blank[3*big] -\starttabulate +\starttabulate[|||] \NC Version \EQ \currentdate \NC \NR -\NC \LUATEX \EQ version \cldcontext{status.luatex_version/100}, - revision \cldcontext{status.luatex_revision}, - number \cldcontext{environment.luatexversion} \NC \NR +\NC \LUATEX \EQ \cldcontext{LUATEXENGINE} % + \cldcontext{LUATEXVERSION} / % + \cldcontext{LUATEXFUNCTIONALITY} + \NC \NR \NC \CONTEXT \EQ MkIV \contextversion \NC \NR \stoptabulate diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-languages.tex b/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-languages.tex index 365e87f266c..d4a7bda607a 100644 --- a/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-languages.tex +++ b/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-languages.tex @@ -1,19 +1,22 @@ % language=uk \environment luatex-style -\environment luatex-logos \startcomponent luatex-languages \startchapter[reference=languages,title={Languages, characters, fonts and glyphs}] +\startsection[title={Introduction}] + +\topicindex {languages} + \LUATEX's internal handling of the characters and glyphs that eventually become typeset is quite different from the way \TEX82 handles those same objects. The easiest way to explain the difference is to focus on unrestricted horizontal mode (i.e.\ paragraphs) and hyphenation first. Later on, it will be easy to deal with the differences that occur in horizontal and math modes. -In \TEX82, the characters you type are converted into \type {char_node} records +In \TEX82, the characters you type are converted into \type {char} node records when they are encountered by the main control loop. \TEX\ attaches and processes the font information while creating those records, so that the resulting \quote {horizontal list} contains the final forms of ligatures and implicit kerning. @@ -21,7 +24,7 @@ This packaging is needed because we may want to get the effective width of for instance a horizontal box. When it becomes necessary to hyphenate words in a paragraph, \TEX\ converts (one -word at time) the \type {char_node} records into a string by replacing ligatures +word at time) the \type {char} node records into a string by replacing ligatures with their components and ignoring the kerning. Then it runs the hyphenation algorithm on this string, and converts the hyphenated result back into a \quote {horizontal list} that is consecutively spliced back into the paragraph stream. @@ -29,14 +32,14 @@ Keep in mind that the paragraph may contain unboxed horizontal material, which then already contains ligatures and kerns and the words therein are part of the hyphenation process. -Those \type {char_node} records are somewhat misnamed, as they are glyph +Those \type {char} node records are somewhat misnamed, as they are glyph positions in specific fonts, and therefore not really \quote {characters} in the -linguistic sense. There is no language information inside the \type {char_node} +linguistic sense. There is no language information inside the \type {char} node records at all. Instead, language information is passed along using \type -{language whatsit} records inside the horizontal list. +{language whatsit} nodes inside the horizontal list. In \LUATEX, the situation is quite different. The characters you type are always -converted into \type {glyph_node} records with a special subtype to identify them +converted into \nod {glyph} node records with a special subtype to identify them as being intended as linguistic characters. \LUATEX\ stores the needed language information in those records, but does not do any font|-|related processing at the time of node creation. It only stores the index of the current font and a @@ -48,93 +51,83 @@ font information in the whole list (creating ligatures and adjusting kerning), and finally it adjusts all the subtype identifiers so that the records are \quote {glyph nodes} from now on. -\section[charsandglyphs]{Characters and glyphs} +\stopsection + +\startsection[title={Characters, glyphs and discretionaries},reference=charsandglyphs] + +\topicindex {characters} +\topicindex {glyphs} +\topicindex {hyphenation} -\TEX82 (including \PDFTEX) differentiates between \type {char_node}s and \type -{lig_node}s. The former are simple items that contained nothing but a \quote +\TEX82 (including \PDFTEX) differentiates between \type {char} nodes and \type +{lig} nodes. The former are simple items that contained nothing but a \quote {character} and a \quote {font} field, and they lived in the same memory as tokens did. The latter also contained a list of components, and a subtype indicating whether this ligature was the result of a word boundary, and it was stored in the same place as other nodes like boxes and kerns and glues. In \LUATEX, these two types are merged into one, somewhat larger structure called -a \type {glyph_node}. Besides having the old character, font, and component -fields, and the new special fields like \quote {attr} (see~\in {section} -[glyphnodes]), these nodes also contain: +a \nod {glyph} node. Besides having the old character, font, and component +fields there are a few more, like \quote {attr} that we will see in \in {section} +[glyphnodes], these nodes also contain a subtype, that codes four main types and +two additional ghost types. For ligatures, multiple bits can be set at the same +time (in case of a single|-|glyph word). \startitemize - -\startitem A subtype, split into four main types: - - \startitemize - \startitem - \type {character}, for characters to be hyphenated: the lowest bit - (bit 0) is set to 1. - \stopitem - \startitem - \type {glyph}, for specific font glyphs: the lowest bit (bit 0) is - not set. - \stopitem - \startitem - \type {ligature}, for ligatures (bit 1 is set) - \stopitem - \startitem - \type {ghost}, for \quote {ghost objects} (bit 2 is set) - \stopitem - \stopitemize - - The latter two make further use of two extra fields (bits 3 and 4): - - \startitemize - \startitem - \type {left}, for ligatures created from a left word boundary and for - ghosts created from \type {\leftghost} - \stopitem - \startitem - \type {right}, for ligatures created from a right word boundary and - for ghosts created from \type {\rightghost} - \stopitem - \stopitemize - - For ligatures, both bits can be set at the same time (in case of a - single|-|glyph word). - -\stopitem - -\startitem - \type {glyph_node}s of type \quote {character} also contain language data, - split into four items that were current when the node was created: the - \type {\setlanguage} (15 bits), \type {\lefthyphenmin} (8 bits), \type - {\righthyphenmin} (8 bits), and \type {\uchyph} (1 bit). -\stopitem - + \startitem + \type {character}, for characters to be hyphenated: the lowest bit + (bit 0) is set to 1. + \stopitem + \startitem + \nod {glyph}, for specific font glyphs: the lowest bit (bit 0) is + not set. + \stopitem + \startitem + \type {ligature}, for constructed ligatures bit 1 is set. + \stopitem + \startitem + \type {ghost}, for so called \quote {ghost objects} bit 2 is set. + \stopitem + \startitem + \type {left}, for ligatures created from a left word boundary and for + ghosts created from \lpr {leftghost} bit 3 gets set. + \stopitem + \startitem + \type {right}, for ligatures created from a right word boundary and + for ghosts created from \lpr {rightghost} bit 4 is set. + \stopitem \stopitemize +The \nod {glyph} nodes also contain language data, split into four items that +were current when the node was created: the \prm {setlanguage} (15~bits), \prm +{lefthyphenmin} (8~bits), \prm {righthyphenmin} (8~bits), and \prm {uchyph} +(1~bit). + Incidentally, \LUATEX\ allows 16383 separate languages, and words can be 256 characters long. The language is stored with each character. You can set -\type {\firstvalidlanguage} to for instance~1 and make thereby language~0 +\prm {firstvalidlanguage} to for instance~1 and make thereby language~0 an ignored hyphenation language. -The new primitive \type {\hyphenationmin} can be used to signal the minimal length -of a word. This value stored with the (current) language. +The new primitive \lpr {hyphenationmin} can be used to signal the minimal length +of a word. This value is stored with the (current) language. -Because the \type {\uchyph} value is saved in the actual nodes, its handling is -subtly different from \TEX82: changes to \type {\uchyph} become effective +Because the \prm {uchyph} value is saved in the actual nodes, its handling is +subtly different from \TEX82: changes to \prm {uchyph} become effective immediately, not at the end of the current partial paragraph. Typeset boxes now always have their language information embedded in the nodes themselves, so there is no longer a possible dependency on the surrounding -language settings. In \TEX82, a mid-paragraph statement like \type {\unhbox0} would -process the box using the current paragraph language unless there was a -\type {\setlanguage} issued inside the box. In \LUATEX, all language variables are -already frozen. +language settings. In \TEX82, a mid|-|paragraph statement like \type {\unhbox0} +would process the box using the current paragraph language unless there was a +\prm {setlanguage} issued inside the box. In \LUATEX, all language variables +are already frozen. In traditional \TEX\ the process of hyphenation is driven by \type {lccode}s. In \LUATEX\ we made this dependency less strong. There are several strategies possible. When you do nothing, the currently used \type {lccode}s are used, when loading patterns, setting exceptions or hyphenating a list. -When you set \type {\savinghyphcodes} to a value larger than zero the current set +When you set \prm {savinghyphcodes} to a value greater than zero the current set of \type {lccode}s will be saved with the language. In that case changing a \type {lccode} afterwards has no effect. However, you can adapt the set with: @@ -144,96 +137,88 @@ of \type {lccode}s will be saved with the language. In that case changing a \typ This change is global which makes sense if you keep in mind that the moment that hyphenation happens is (normally) when the paragraph or a horizontal box is -constructed. When \type {\savinghyphcodes} was zero when the language got +constructed. When \prm {savinghyphcodes} was zero when the language got initialized you start out with nothing, otherwise you already have a set. -When a \type {\hjcode} is larger than $0$ but smaller than $32$ is indicates the +When a \lpr {hjcode} is greater than 0 but less than 32 is indicates the to be used length. In the following example we map a character (\type {x}) onto another one in the patterns and tell the engine that \type {œ} counts as one character. Because traditionally zero itself is reserved for inhibiting -hyphenation, a value of $32$ counts as zero. - -\starttyping -% assuming french patterns: -foobar % foo-bar - -\hjcode`x=`o - -fxxbar % fxx-bar - -\lefthyphenmin3 - -œdipus % œdi-pus - -\lefthyphenmin4 - -œdipus % œdipus - -\hjcode`œ=2 - -œdipus % œdi-pus - -\hjcode`i=32 -\hjcode`d=32 - -œdipus % œdipus -\stoptyping +hyphenation, a value of 32 counts as zero. + +Here are some examples (we assume that French patterns are used): + +\starttabulate[||||] +\NC \NC \type{foobar} \NC \type{foo-bar} \NC \NR +\NC \type{\hjcode`x=`o} \NC \type{fxxbar} \NC \type{fxx-bar} \NC \NR +\NC \type{\lefthyphenmin3} \NC \type{œdipus} \NC \type{œdi-pus} \NC \NR +\NC \type{\lefthyphenmin4} \NC \type{œdipus} \NC \type{œdipus} \NC \NR +\NC \type{\hjcode`œ=2} \NC \type{œdipus} \NC \type{œdi-pus} \NC \NR +\NC \type{\hjcode`i=32 \hjcode`d=32} \NC \type{œdipus} \NC \type{œdipus} \NC \NR +\NC +\stoptabulate Carrying all this information with each glyph would give too much overhead and -also make the process of setting up thee codes more complex. A solution with +also make the process of setting up these codes more complex. A solution with \type {hjcode} sets was considered but rejected because in practice the current approach is sufficient and it would not be compatible anyway. Beware: the values are always saved in the format, independent of the setting -of \type {\savinghyphcodes} at the moment the format is dumped. +of \prm {savinghyphcodes} at the moment the format is dumped. A boundary node normally would mark the end of a word which interferes with for -instance discretionary injection. For this you can use the \type {\wordboundary} -as trigger. Here are a few examples of usage: +instance discretionary injection. For this you can use the \prm {wordboundary} +as a trigger. Here are a few examples of usage: \startbuffer discrete---discrete \stopbuffer -\typebuffer \start \dontcomplain \hsize 1pt \getbuffer \par \stop +\typebuffer \startnarrower \dontcomplain \hsize 1pt \getbuffer \par \stopnarrower \startbuffer discrete\discretionary{}{}{---}discrete \stopbuffer -\typebuffer \start \dontcomplain \hsize 1pt \getbuffer \par \stop +\typebuffer \startnarrower \dontcomplain \hsize 1pt \getbuffer \par \stopnarrower \startbuffer discrete\wordboundary\discretionary{}{}{---}discrete \stopbuffer -\typebuffer \start \dontcomplain \hsize 1pt \getbuffer \par \stop +\typebuffer \startnarrower \dontcomplain \hsize 1pt \getbuffer \par \stopnarrower \startbuffer discrete\wordboundary\discretionary{}{}{---}\wordboundary discrete \stopbuffer -\typebuffer \start \dontcomplain \hsize 1pt \getbuffer \par \stop +\typebuffer \startnarrower \dontcomplain \hsize 1pt \getbuffer \par \stopnarrower \startbuffer discrete\wordboundary\discretionary{---}{}{}\wordboundary discrete \stopbuffer -\typebuffer \start \dontcomplain \hsize 1pt \getbuffer \par \stop +\typebuffer \startnarrower \dontcomplain \hsize 1pt \getbuffer \par \stopnarrower We only accept an explicit hyphen when there is a preceding glyph and we skip a -sequence of explicit hyphens as that normally indicates a \type {--} or \type +sequence of explicit hyphens since that normally indicates a \type {--} or \type {---} ligature in which case we can in a worse case usage get bad node lists later on due to messed up ligature building as these dashes are ligatures in base -fonts. This is a side effect of the separating the hyphenation, ligaturing and +fonts. This is a side effect of separating the hyphenation, ligaturing and kerning steps. -The start and end of a characters is signalled by a glue, penalty, kern or boundary -node. But by default also a hlist, vlist, rule, dir, whatsit, ins, and adjust node -indicate a start or end. You can omit the last set from the test by setting -\type {\hyphenationbounds} to a non|-|zero value: +The start and end of a sequence of characters is signalled by a \nod {glue}, \nod +{penalty}, \nod {kern} or \nod {boundary} node. But by default also a \nod +{hlist}, \nod {vlist}, \nod {rule}, \nod {dir}, \nod {whatsit}, \nod {ins}, and +\nod {adjust} node indicate a start or end. You can omit the last set from the +test by setting \lpr {hyphenationbounds} to a non|-|zero value: -\starttabulate[|l|l|] +\starttabulate[|c|l|] +\DB value \BC behaviour \NC \NR +\TB \NC \type{0} \NC not strict \NC \NR \NC \type{1} \NC strict start \NC \NR \NC \type{2} \NC strict end \NC \NR \NC \type{3} \NC strict start and strict end \NC \NR +\LL \stoptabulate The word start is determined as follows: \starttabulate[|l|l|] +\DB node \BC behaviour \NC \NR +\TB \BC boundary \NC yes when wordboundary \NC \NR \BC hlist \NC when hyphenationbounds 1 or 3 \NC \NR \BC vlist \NC when hyphenationbounds 1 or 3 \NC \NR @@ -244,11 +229,14 @@ The word start is determined as follows: \BC math \NC skipped \NC \NR \BC glyph \NC exhyphenchar (one only) : yes (so no -- ---) \NC \NR \BC otherwise \NC yes \NC \NR +\LL \stoptabulate The word end is determined as follows: \starttabulate[|l|l|] +\DB node \BC behaviour \NC \NR +\TB \BC boundary \NC yes \NC \NR \BC glyph \NC yes when different language \NC \NR \BC glue \NC yes \NC \NR @@ -261,10 +249,11 @@ The word end is determined as follows: \BC whatsit \NC when hyphenationbounds 2 or 3 \NC \NR \BC ins \NC when hyphenationbounds 2 or 3 \NC \NR \BC adjust \NC when hyphenationbounds 2 or 3 \NC \NR +\LL \stoptabulate -\in{Figures}[hb:1] upto \in[hb:5] show some examples. In all cases we set the min -values to 1 and make sure that the words hyphenate at each character. +\in {Figures} [hb:1] upto \in [hb:5] show some examples. In all cases we set the +min values to 1 and make sure that the words hyphenate at each character. \hyphenation{o-n-e t-w-o} @@ -283,12 +272,13 @@ values to 1 and make sure that the words hyphenate at each character. \startplacefigure[reference=hb:1,title={\type{one}}] \startcombination[4*1] - {\SomeTest{0}{one}} {\type{0}} - {\SomeTest{1}{one}} {\type{1}} - {\SomeTest{2}{one}} {\type{2}} - {\SomeTest{3}{one}} {\type{3}} + {\SomeTest{0}{one}} {\type{0}} + {\SomeTest{1}{one}} {\type{1}} + {\SomeTest{2}{one}} {\type{2}} + {\SomeTest{3}{one}} {\type{3}} \stopcombination \stopplacefigure + \startplacefigure[reference=hb:2,title={\type{one\null two}}] \startcombination[4*1] {\SomeTest{0}{one\null two}} {\type{0}} @@ -297,6 +287,7 @@ values to 1 and make sure that the words hyphenate at each character. {\SomeTest{3}{one\null two}} {\type{3}} \stopcombination \stopplacefigure + \startplacefigure[reference=hb:3,title={\type{\null one\null two}}] \startcombination[4*1] {\SomeTest{0}{\null one\null two}} {\type{0}} @@ -305,6 +296,7 @@ values to 1 and make sure that the words hyphenate at each character. {\SomeTest{3}{\null one\null two}} {\type{3}} \stopcombination \stopplacefigure + \startplacefigure[reference=hb:4,title={\type{one\null two\null}}] \startcombination[4*1] {\SomeTest{0}{one\null two\null}} {\type{0}} @@ -313,6 +305,7 @@ values to 1 and make sure that the words hyphenate at each character. {\SomeTest{3}{one\null two\null}} {\type{3}} \stopcombination \stopplacefigure + \startplacefigure[reference=hb:5,title={\type{\null one\null two\null}}] \startcombination[4*1] {\SomeTest{0}{\null one\null two\null}} {\type{0}} @@ -330,7 +323,7 @@ deal differently with (a sequence of) explicit hyphens. We already have added some control over aspects of the hyphenation and yet another one concerns automatic hyphens (e.g.\ \type {-} characters in the input). -When \type {\automatichyphenmode} has a value of 0, a hyphen will be turned into +When \lpr {automatichyphenmode} has a value of 0, a hyphen will be turned into an automatic discretionary. The snippets before and after it will not be hyphenated. A side effect is that a leading hyphen can lead to a split but one will seldom run into that situation. Setting a pre and post character makes this @@ -359,12 +352,6 @@ before--after \par before---after \par \stopbuffer -We show three samples: - -Input A: \typebuffer[a] -Input B: \typebuffer[b] -Input C: \typebuffer[c] - \startbuffer[demo] \startcombination[nx=4,ny=3,location=top] {\framed[align=normal,strut=no,top=\vskip.5ex,bottom=\vskip.5ex]{\automatichyphenmode\zerocount \hsize6em \getbuffer[a]}} {A~0~6em} @@ -382,104 +369,139 @@ Input C: \typebuffer[c] \stopcombination \stopbuffer -\startplacefigure[reference=automatic:1,title={The automatic modes \type {0} (default), \type {1} and \type {2}, with a \type {\hsize} +\startplacefigure[locationreference=automatichyphenmode:1,title={The automatic modes \type {0} (default), \type {1} and \type {2}, with a \prm {hsize} of 6em and 2pt (which triggers a linebreak).}] \dontcomplain \tt \getbuffer[demo] \stopplacefigure -\startplacefigure[reference=automatic:2,title={The automatic modes \type {0} (default), \type {1} and \type {2}, with \type -{\preexhyphenchar} and \type {\postexhyphenchar} set to characters \type {A} and \type {B}.}] +\startplacefigure[reference=automatichyphenmode:2,title={The automatic modes \type {0} (default), \type {1} and \type {2}, with \lpr {preexhyphenchar} and \lpr {postexhyphenchar} set to characters \type {A} and \type {B}.}] \postexhyphenchar`A\relax \preexhyphenchar `B\relax \dontcomplain \tt \getbuffer[demo] \stopplacefigure -As with primitive companions of other single character commands, the \type {\-} -command has a more verbose primitive version in \type {\explicitdiscretionary} +In \in {figure} [automatichyphenmode:1] \in {and} [automatichyphenmode:2] we show +what happens with three samples: + +Input A: \typebuffer[a] +Input B: \typebuffer[b] +Input C: \typebuffer[c] + +As with primitive companions of other single character commands, the \prm {-} +command has a more verbose primitive version in \lpr {explicitdiscretionary} and the normally intercepted in the hyphenator character \type {-} (or whatever -is configured) is available as \type {\automaticdiscretionary}. +is configured) is available as \lpr {automaticdiscretionary}. -\section{The main control loop} +\stopsection + +\startsection[title={The main control loop}] + +\topicindex {main loop} +\topicindex {hyphenation} In \LUATEX's main loop, almost all input characters that are to be typeset are -converted into \type {glyph} node records with subtype \quote {character}, but +converted into \nod {glyph} node records with subtype \quote {character}, but there are a few exceptions. -First, the \type {\accent} primitives creates nodes with subtype \quote {glyph} -instead of \quote {character}: one for the actual accent and one for the -accentee. The primary reason for this is that \type {\accent} in \TEX82 is -explicitly dependent on the current font encoding, so it would not make much -sense to attach a new meaning to the primitive's name, as that would invalidate -many old documents and macro packages. \footnote {Of course, modern packages will -not use the \type {\accent} primitive at all but try to map directly on composed -characters.} A secondary reason is that in \TEX82, \type {\accent} prohibits -hyphenation of the current word. Since in \LUATEX\ hyphenation only takes place -on \quote {character} nodes, it is possible to achieve the same effect. - -This change of meaning did happen with \type {\char}, that now generates \quote -{glyph} nodes with a character subtype. In traditional \TEX\ there was a strong -relationship between the 8|-|bit input encoding, hyphenation and glyphs taken -from a font. In \LUATEX\ we have \UTF\ input, and in most cases this maps -directly to a character in a font, apart from glyph replacement in the font -engine. If you want to access arbitrary glyphs in a font directly you can always -use \LUA\ to do so, because fonts are available as \LUA\ table. - -Second, all the results of processing in math mode eventually become nodes with -\quote {glyph} subtypes. - -Third, the \ALEPH|-|derived commands \type {\leftghost} and \type {\rightghost} -create nodes of a third subtype: \quote {ghost}. These nodes are ignored -completely by all further processing until the stage where inter|-|glyph kerning -is added. - -Fourth, automatic discretionaries are handled differently. \TEX82 inserts an -empty discretionary after sensing an input character that matches the \type -{\hyphenchar} in the current font. This test is wrong in our opinion: whether or -not hyphenation takes place should not depend on the current font, it is a -language property. \footnote {When \TEX\ showed up we didn't have \UNICODE\ yet -and being limited to eight bits meant that one sometimes had to compromise -between supporting character input, glyph rendering, hyphenation.} - -In \LUATEX, it works like this: if \LUATEX\ senses a string of input characters -that matches the value of the new integer parameter \type {\exhyphenchar}, it will -insert an explicit discretionary after that series of nodes. Initex sets the \type -{\exhyphenchar=`\-}. Incidentally, this is a global parameter instead of a -language-specific one because it may be useful to change the value depending on -the document structure instead of the text language. - -The insertion of discretionaries after a sequence of explicit hyphens happens at -the same time as the other hyphenation processing, {\it not\/} inside the main -control loop. - -The only use \LUATEX\ has for \type {\hyphenchar} is at the check whether a word -should be considered for hyphenation at all. If the \type {\hyphenchar} of the -font attached to the first character node in a word is negative, then hyphenation -of that word is abandoned immediately. This behaviour is added for backward -compatibility only, and the use of \type {\hyphenchar=-1} as a means of -preventing hyphenation should not be used in new \LUATEX\ documents. - -Fifth, \type {\setlanguage} no longer creates whatsits. The meaning of \type -{\setlanguage} is changed so that it is now an integer parameter like all others. -That integer parameter is used in \type {\glyph_node} creation to add language -information to the glyph nodes. In conjunction, the \type {\language} primitive is -extended so that it always also updates the value of \type {\setlanguage}. - -Sixth, the \type {\noboundary} command (that prohibits word boundary processing -where that would normally take place) now does create nodes. These nodes are -needed because the exact place of the \type {\noboundary} command in the input -stream has to be retained until after the ligature and font processing stages. - -Finally, there is no longer a \type {main_loop} label in the code. Remember that -\TEX82 did quite a lot of processing while adding \type {char_nodes} to the -horizontal list? For speed reasons, it handled that processing code outside of -the \quote {main control} loop, and only the first character of any \quote {word} -was handled by that \quote {main control} loop. In \LUATEX, there is no longer a -need for that (all hard work is done later), and the (now very small) bits of -character|-|handling code have been moved back inline. When \type -{\tracingcommands} is on, this is visible because the full word is reported, -instead of just the initial character. - -Because we tend to make hard codes behaviour configurable a few new primitives +\startitemize[n] + +\startitem + The \prm {accent} primitive creates nodes with subtype \quote {glyph} + instead of \quote {character}: one for the actual accent and one for the + accentee. The primary reason for this is that \prm {accent} in \TEX82 is + explicitly dependent on the current font encoding, so it would not make much + sense to attach a new meaning to the primitive's name, as that would + invalidate many old documents and macro packages. A secondary reason is that + in \TEX82, \prm {accent} prohibits hyphenation of the current word. Since + in \LUATEX\ hyphenation only takes place on \quote {character} nodes, it is + possible to achieve the same effect. Of course, modern \UNICODE\ aware macro + packages will not use the \prm {accent} primitive at all but try to map + directly on composed characters. + + This change of meaning did happen with \prm {char}, that now generates + \quote {glyph} nodes with a character subtype. In traditional \TEX\ there was + a strong relationship between the 8|-|bit input encoding, hyphenation and + glyphs taken from a font. In \LUATEX\ we have \UTF\ input, and in most cases + this maps directly to a character in a font, apart from glyph replacement in + the font engine. If you want to access arbitrary glyphs in a font directly + you can always use \LUA\ to do so, because fonts are available as \LUA\ + table. +\stopitem + +\startitem + All the results of processing in math mode eventually become nodes with + \quote {glyph} subtypes. In fact, the result of processing math is just + a regular list of glyphs, kerns, glue, penalties, boxes etc. +\stopitem + +\startitem + The \ALEPH|-|derived commands \lpr {leftghost} and \lpr {rightghost} + create nodes of a third subtype: \quote {ghost}. These nodes are ignored + completely by all further processing until the stage where inter|-|glyph + kerning is added. +\stopitem + +\startitem + Automatic discretionaries are handled differently. \TEX82 inserts an empty + discretionary after sensing an input character that matches the \prm + {hyphenchar} in the current font. This test is wrong in our opinion: whether + or not hyphenation takes place should not depend on the current font, it is a + language property. \footnote {When \TEX\ showed up we didn't have \UNICODE\ + yet and being limited to eight bits meant that one sometimes had to + compromise between supporting character input, glyph rendering, hyphenation.} + + In \LUATEX, it works like this: if \LUATEX\ senses a string of input + characters that matches the value of the new integer parameter \prm + {exhyphenchar}, it will insert an explicit discretionary after that series of + nodes. Initially \TEX\ sets the \type {\exhyphenchar=`\-}. Incidentally, this + is a global parameter instead of a language-specific one because it may be + useful to change the value depending on the document structure instead of the + text language. + + The insertion of discretionaries after a sequence of explicit hyphens happens + at the same time as the other hyphenation processing, {\it not\/} inside the + main control loop. + + The only use \LUATEX\ has for \prm {hyphenchar} is at the check whether a + word should be considered for hyphenation at all. If the \prm {hyphenchar} + of the font attached to the first character node in a word is negative, then + hyphenation of that word is abandoned immediately. This behaviour is added + for backward compatibility only, and the use of \type {\hyphenchar=-1} as a + means of preventing hyphenation should not be used in new \LUATEX\ documents. +\stopitem + +\startitem + The \prm {setlanguage} command no longer creates whatsits. The meaning of + \prm {setlanguage} is changed so that it is now an integer parameter like all + others. That integer parameter is used in \type {\glyph_node} creation to add + language information to the glyph nodes. In conjunction, the \prm {language} + primitive is extended so that it always also updates the value of \prm + {setlanguage}. +\stopitem + +\startitem + The \prm {noboundary} command (that prohibits word boundary processing + where that would normally take place) now does create nodes. These nodes are + needed because the exact place of the \prm {noboundary} command in the + input stream has to be retained until after the ligature and font processing + stages. +\stopitem + +\startitem + There is no longer a \type {main_loop} label in the code. Remember that + \TEX82 did quite a lot of processing while adding \type {char_nodes} to the + horizontal list? For speed reasons, it handled that processing code outside + of the \quote {main control} loop, and only the first character of any \quote + {word} was handled by that \quote {main control} loop. In \LUATEX, there is + no longer a need for that (all hard work is done later), and the (now very + small) bits of character|-|handling code have been moved back inline. When + \prm {tracingcommands} is on, this is visible because the full word is + reported, instead of just the initial character. +\stopitem + +\stopitemize + +Because we tend to make hard coded behaviour configurable a few new primitives have been added: \starttyping @@ -489,50 +511,59 @@ have been added: \stoptyping The first parameter has the following consequences for automatic discs (the ones -resulting from an \type {\exhyphenchar}: +resulting from an \prm {exhyphenchar}: \starttabulate[|c|l|l|] -\BC mode \BC automatic disc \type{-} \BC explicit disc \type{\-} \NC \NR -\HL -\NC \type{0} \NC \type {\exhyphenpenalty} \NC \type {\exhyphenpenalty} \NC \NR -\NC \type{1} \NC \type {\hyphenpenalty} \NC \type {\hyphenpenalty} \NC \NR -\NC \type{2} \NC \type {\exhyphenpenalty} \NC \type {\hyphenpenalty} \NC \NR -\NC \type{3} \NC \type {\hyphenpenalty} \NC \type {\exhyphenpenalty} \NC \NR -\NC \type{4} \NC \type {\automatichyphenpenalty} \NC \type {\explicithyphenpenalty} \NC \NR -\NC \type{5} \NC \type {\exhyphenpenalty} \NC \type {\explicithyphenpenalty} \NC \NR -\NC \type{6} \NC \type {\hyphenpenalty} \NC \type {\explicithyphenpenalty} \NC \NR -\NC \type{7} \NC \type {\automatichyphenpenalty} \NC \type {\exhyphenpenalty} \NC \NR -\NC \type{8} \NC \type {\automatichyphenpenalty} \NC \type {\hyphenpenalty} \NC \NR +\DB mode \BC automatic disc \type {-} \BC explicit disc \prm{-} \NC \NR +\TB +\NC \type{0} \NC \prm {exhyphenpenalty} \NC \prm {exhyphenpenalty} \NC \NR +\NC \type{1} \NC \prm {hyphenpenalty} \NC \prm {hyphenpenalty} \NC \NR +\NC \type{2} \NC \prm {exhyphenpenalty} \NC \prm {hyphenpenalty} \NC \NR +\NC \type{3} \NC \prm {hyphenpenalty} \NC \prm {exhyphenpenalty} \NC \NR +\NC \type{4} \NC \lpr {automatichyphenpenalty} \NC \lpr {explicithyphenpenalty} \NC \NR +\NC \type{5} \NC \prm {exhyphenpenalty} \NC \lpr {explicithyphenpenalty} \NC \NR +\NC \type{6} \NC \prm {hyphenpenalty} \NC \lpr {explicithyphenpenalty} \NC \NR +\NC \type{7} \NC \lpr {automatichyphenpenalty} \NC \prm {exhyphenpenalty} \NC \NR +\NC \type{8} \NC \lpr {automatichyphenpenalty} \NC \prm {hyphenpenalty} \NC \NR +\LL \stoptabulate -other values do what we always did in \LUATEX: insert \type {\exhyphenpenalty}. +other values do what we always did in \LUATEX: insert \prm {exhyphenpenalty}. -\section[patternsexceptions]{Loading patterns and exceptions} +\stopsection -The hyphenation algorithm in \LUATEX\ is quite different from the one in \TEX82, -although it uses essentially the same user input. +\startsection[title={Loading patterns and exceptions},reference=patternsexceptions] -After expansion, the argument for \type {\patterns} has to be proper \UTF8 with -individual patterns separated by spaces, no \type {\char} or \type {\chardef}d -commands are allowed. The current implementation quite strict and will reject all -non|-|\UNICODE\ characters. +\topicindex {hyphenation} +\topicindex {hyphenation+patterns} +\topicindex {hyphenation+exceptions} +\topicindex {patterns} +\topicindex {exceptions} -Likewise, the expanded argument for \type {\hyphenation} also has to be proper -\UTF8, but here a bit of extra syntax is provided: +Although we keep the traditional approach towards hyphenation (which is still +superior) the implementation of the hyphenation algorithm in \LUATEX\ is quite +different from the one in \TEX82. + +After expansion, the argument for \prm {patterns} has to be proper \UTF8 with +individual patterns separated by spaces, no \prm {char} or \prm {chardef}d +commands are allowed. The current implementation is quite strict and will reject +all non|-|\UNICODE\ characters. Likewise, the expanded argument for \prm +{hyphenation} also has to be proper \UTF8, but here a bit of extra syntax is +provided: \startitemize[n] \startitem - Three sets of arguments in curly braces (\type {{}{}{}}) indicates a desired - complex discretionary, with arguments as in \type {\discretionary}'s command in + Three sets of arguments in curly braces (\type {{}{}{}}) indicate a desired + complex discretionary, with arguments as in \prm {discretionary}'s command in normal document input. \stopitem \startitem - A \type {-} indicates a desired simple discretionary, cf.\ \type {\-} and \type - {\discretionary{-}{}{}} in normal document input. + A \type {-} indicates a desired simple discretionary, cf.\ \type {\-} and + \type {\discretionary{-}{}{}} in normal document input. \stopitem \startitem - Internal command names are ignored. This rule is provided especially for \type - {\discretionary}, but it also helps to deal with \type {\relax} commands that + Internal command names are ignored. This rule is provided especially for \prm + {discretionary}, but it also helps to deal with \prm {relax} commands that may sneak in. \stopitem \startitem @@ -540,22 +571,24 @@ Likewise, the expanded argument for \type {\hyphenation} also has to be proper \stopitem \stopitemize -The expanded argument is first converted back to a space-separated string while +The expanded argument is first converted back to a space|-|separated string while dropping the internal command names. This string is then converted into a dictionary by a routine that creates key|-|value pairs by converting the other listed items. It is important to note that the keys in an exception dictionary can always be generated from the values. Here are a few examples: \starttabulate[|l|l|l|] -\BC value \BC implied key (input) \NC effect \NC\NR +\DB value \BC implied key (input) \BC effect \NC\NR +\TB \NC \type {ta-ble} \NC table \NC \type {ta\-ble} ($=$ \type {ta\discretionary{-}{}{}ble}) \NC\NR \NC \type {ba{k-}{}{c}ken} \NC backen \NC \type {ba\discretionary{k-}{}{c}ken} \NC\NR +\LL \stoptabulate The resultant patterns and exception dictionary will be stored under the language -code that is the present value of \type {\language}. +code that is the present value of \prm {language}. -In the last line of the table, you see there is no \type {\discretionary} command +In the last line of the table, you see there is no \prm {discretionary} command in the value: the command is optional in the \TEX-based input syntax. The underlying reason for that is that it is conceivable that a whole dictionary of words is stored as a plain text file and loaded into \LUATEX\ using one of the @@ -573,20 +606,64 @@ actual explicit hyphen character if needed). For example, this matches the word \hyphenation{multi{-}{}{-}word{-}{}{-}boun-daries} \stoptyping -The motivation behind the \ETEX\ extension \type {\savinghyphcodes} was that +The motivation behind the \ETEX\ extension \prm {savinghyphcodes} was that hyphenation heavily depended on font encodings. This is no longer true in \LUATEX, and the corresponding primitive is basically ignored. Because we now -have \type {hjcode}, the case relate codes can be used exclusively for \type -{\uppercase} and \type {\lowercase}. - -\section{Applying hyphenation} +have \lpr {hjcode}, the case relate codes can be used exclusively for \prm +{uppercase} and \prm {lowercase}. + +The three curly brace pair pattern in an exception can be somewhat unexpected so +we will try to explain it by example. The pattern \type {foo{}{}{x}bar} pattern +creates a lookup \type {fooxbar} and the pattern \type {foo{}{}{}bar} creates +\type {foobar}. Then, when a hit happens there is a replacement text (\type {x}) +or none. Because we introduced penalties in discretionary nodes, the exception +syntax now also can take a penalty specification. The value between square brackets +is a multiplier for \lpr {exceptionpenalty}. Here we have set it to 10000 so +effectively we get 30000 in the example. + +\def\ShowSample#1#2% + {\startlinecorrection[blank] + \hyphenation{#1}% + \exceptionpenalty=10000 + \bTABLE[foregroundstyle=type] + \bTR + \bTD[align=middle,nx=4] \type{#1} \eTD + \eTR + \bTR + \bTD[align=middle] \type{10em} \eTD + \bTD[align=middle] \type {3em} \eTD + \bTD[align=middle] \type {0em} \eTD + \bTD[align=middle] \type {6em} \eTD + \eTR + \bTR + \bTD[width=10em]\vtop{\hsize 10em 123 #2 123\par}\eTD + \bTD[width=10em]\vtop{\hsize 3em 123 #2 123\par}\eTD + \bTD[width=10em]\vtop{\hsize 0em 123 #2 123\par}\eTD + \bTD[width=10em]\vtop{\setupalign[verytolerant,stretch]\rmtf\hsize 6em 123 #2 #2 #2 #2 123\par}\eTD + \eTR + \eTABLE + \stoplinecorrection} + +\ShowSample{x{a-}{-b}{}x{a-}{-b}{}x{a-}{-b}{}x{a-}{-b}{}xx}{xxxxxx} +\ShowSample{x{a-}{-b}{}x{a-}{-b}{}[3]x{a-}{-b}{}[1]x{a-}{-b}{}xx}{xxxxxx} + +\ShowSample{z{a-}{-b}{z}{a-}{-b}{z}{a-}{-b}{z}{a-}{-b}{z}z}{zzzzzz} +\ShowSample{z{a-}{-b}{z}{a-}{-b}{z}[3]{a-}{-b}{z}[1]{a-}{-b}{z}z}{zzzzzz} + +\stopsection + +\startsection[title={Applying hyphenation}] + +\topicindex {hyphenation+how it works} +\topicindex {hyphenation+discretionaries} +\topicindex {discretionaries} The internal structures \LUATEX\ uses for the insertion of discretionaries in words is very different from the ones in \TEX82, and that means there are some noticeable differences in handling as well. First and foremost, there is no \quote {compressed trie} involved in hyphenation. -The algorithm still reads \PATGEN-generated pattern files, but \LUATEX\ uses a +The algorithm still reads pattern files generated by \PATGEN, but \LUATEX\ uses a finite state hash to match the patterns against the word to be hyphenated. This algorithm is based on the \quote {libhnj} library used by \OPENOFFICE, which in turn is inspired by \TEX. @@ -605,12 +682,12 @@ of the implementation: \stopitem \startitem Because there is no \quote {trie preparation} stage, language patterns never - become frozen. This means that the primitive \type {\patterns} (and its \LUA\ + become frozen. This means that the primitive \prm {patterns} (and its \LUA\ counterpart \type {lang.patterns}) can be used at any time, not only in ini\TEX. \stopitem \startitem - Only the string representation of \type {\patterns} and \type {\hyphenation} is + Only the string representation of \prm {patterns} and \prm {hyphenation} is stored in the format file. At format load time, they are simply re|-|evaluated. It follows that there is no real reason to preload languages in the format file. In fact, it is usually not a good idea to do so. It is @@ -618,23 +695,27 @@ of the implementation: needed. \stopitem \startitem - \LUATEX\ uses the language-specific variables \type {\prehyphenchar} and \type - {\posthyphenchar} in the creation of implicit discretionaries, instead of - \TEX82's \type {\hyphenchar}, and the values of the language|-|specific variables - \type {\preexhyphenchar} and \type {\postexhyphenchar} for explicit + \LUATEX\ uses the language-specific variables \lpr {prehyphenchar} and \lpr + {posthyphenchar} in the creation of implicit discretionaries, instead of + \TEX82's \prm {hyphenchar}, and the values of the language|-|specific + variables \lpr {preexhyphenchar} and \lpr {postexhyphenchar} for explicit discretionaries (instead of \TEX82's empty discretionary). \stopitem \startitem - The value of the two counters related to hyphenation, \type {\hyphenpenalty} - and \type {\exhyphenpenalty}, are now stored in the discretionary nodes. This - permits a local overload for explicit \type {\discretionary} commands. The + The value of the two counters related to hyphenation, \prm {hyphenpenalty} + and \prm {exhyphenpenalty}, are now stored in the discretionary nodes. This + permits a local overload for explicit \prm {discretionary} commands. The value current when the hyphenation pass is applied is used. When no callbacks are used this is compatible with traditional \TEX. When you apply the \LUA\ \type {lang.hyphenate} function the current values are used. \stopitem +\startitem + The hyphenation exception dictionary is maintained as key|-|value hash, and + that is also dynamic, so the \type {hyph_size} setting is not used either. +\stopitem \stopitemize -Because we store penalties in the disc node the \type {\discretionary} command has +Because we store penalties in the disc node the \prm {discretionary} command has been extended to accept an optional penalty specification, so you can do the following: @@ -657,18 +738,18 @@ inserted at the left-hand side of a word). Word boundaries are no longer implied by font switches, but by language switches. One word can have two separate fonts and still be hyphenated correctly (but it -can not have two different languages, the \type {\setlanguage} command forces a +can not have two different languages, the \prm {setlanguage} command forces a word boundary). All languages start out with \type {\prehyphenchar=`\-}, \type {\posthyphenchar=0}, \type {\preexhyphenchar=0} and \type {\postexhyphenchar=0}. When you assign the values of one of these four parameters, you are actually changing the settings -for the current \type {\language}, this behaviour is compatible with \type {\patterns} -and \type {\hyphenation}. +for the current \prm {language}, this behaviour is compatible with \prm {patterns} +and \prm {hyphenation}. \LUATEX\ also hyphenates the first word in a paragraph. Words can be up to 256 -characters long (up from 64 in \TEX82). Longer words generate an error right now, -but eventually either the limitation will be removed or perhaps it will become +characters long (up from 64 in \TEX82). Longer words are ignored right now, but +eventually either the limitation will be removed or perhaps it will become possible to silently ignore the excess characters (this is what happens in \TEX82, but there the behaviour cannot be controlled). @@ -677,10 +758,12 @@ that this function expects to receive a list of \quote {character} nodes. It wil not operate properly in the presence of \quote {glyph}, \quote {ligature}, or \quote {ghost} nodes, nor does it know how to deal with kerning. -The hyphenation exception dictionary is maintained as key|-|value hash, and that -is also dynamic, so the \type {hyph_size} setting is not used either. +\stopsection -\section{Applying ligatures and kerning} +\startsection[title={Applying ligatures and kerning}] + +\topicindex {ligatures} +\topicindex {kerning} After all possible hyphenation points have been inserted in the list, \LUATEX\ will process the list to convert the \quote {character} nodes into \quote {glyph} @@ -689,24 +772,28 @@ ligatures are processed, then all kerning information is applied to the result list. But those two stages are somewhat dependent on each other: If the used font makes it possible to do so, the ligaturing stage adds virtual \quote {character} nodes to the word boundaries in the list. While doing so, it removes and -interprets \type {\noboundary} nodes. The kerning stage deletes those word +interprets \prm {noboundary} nodes. The kerning stage deletes those word boundary items after it is done with them, and it does the same for \quote {ghost} nodes. Finally, at the end of the kerning stage, all remaining \quote {character} nodes are converted to \quote {glyph} nodes. -This work separation is worth mentioning because, if you overrule from \LUA\ only +This word separation is worth mentioning because, if you overrule from \LUA\ only one of the two callbacks related to font handling, then you have to make sure you perform the tasks normally done by \LUATEX\ itself in order to make sure that the other, non|-|overruled, routine continues to function properly. -Work in this area is not yet complete, but most of the possible cases are handled -by our rewritten ligaturing engine. At some point all of the possible inputs will -become supported. \footnote {Not all of this makes sense because we nowadays have -\OPENTYPE\ fonts and ligature building can happen in ,any different ways there.} +Although we could improve the situation the reality is that in modern \OPENTYPE\ +fonts ligatures can be constructed in many ways: by replacing a sequence of +characters by one glyph, or by selectively replacing individual glyphs, or by +kerning, or any combination of this. Add to that contextual analysis and it will +be clear that we have to let \LUA\ do that job instead. The generic font handler +that we provide (which is part of \CONTEXT) distinguishes between base mode +(which essentially is what we describe here and which delegates the task to \TEX) +and node mode (which deals with more complex fonts. -For example, take the word \type {office}, hyphenated \type {of-fice}, using a -\quote {normal} font with all the \type {f}-\type {f} and \type {f}-\type {i} -type ligatures: +Let's look at an example. Take the word \type {office}, hyphenated \type +{of-fice}, using a \quote {normal} font with all the \type {f}-\type {f} and +\type {f}-\type {i} type ligatures: \starttabulate[|l|l|] \NC initial \NC \type {{o}{f}{f}{i}{c}{e}} \NC\NR @@ -734,11 +821,15 @@ the top-level discretionary that resulted from the first hyphenation point. Here is that nested solution again, in a different representation: +\testpage[4] + \starttabulate[|l|c|c|c|c|c|c|] -\NC \BC pre \BC \BC post \BC \BC replace \BC \NC \NR +\DB \BC pre \BC \BC post \BC \BC replace \BC \NC \NR +\TB \NC topdisc \NC \type {f-} \NC (1) \NC \NC sub 1 \NC \NC sub 2 \NC \NR \NC sub 1 \NC \type {f-} \NC (2) \NC \type {i} \NC (3) \NC \type {<fi>} \NC (4) \NC \NR \NC sub 2 \NC \type {<ff>-} \NC (5) \NC \type {i} \NC (6) \NC \type {<ffi>} \NC (7) \NC \NR +\LL \stoptabulate When line breaking is choosing its breakpoints, the following fields will @@ -763,21 +854,23 @@ the first node). One can observe that the \type {of-f-ice} and \type {off-ice} cases both end with the same actual post replacement list (\type {i}), and that this would be the -case even if that \type {i} was the first item of a potential following ligature -like \type {ic}. This allows \LUATEX\ to do away with one of the fields, and thus -make the whole stuff fit into just two discretionary nodes. +case even if \type {i} was the first item of a potential following ligature like +\type {ic}. This allows \LUATEX\ to do away with one of the fields, and thus make +the whole stuff fit into just two discretionary nodes. The mapping of the seven list fields to the six fields in this discretionary node pair is as follows: \starttabulate[|l|c|c|] -\BC field \BC description \NC \NC \NR +\DB field \BC description \NC \NC \NR +\TB \NC \type {disc1.pre} \NC \type {f-} \NC (1) \NC \NR \NC \type {disc1.post} \NC \type {<fi>} \NC (4) \NC \NR \NC \type {disc1.replace} \NC \type {<ffi>} \NC (7) \NC \NR \NC \type {disc2.pre} \NC \type {f-} \NC (2) \NC \NR \NC \type {disc2.post} \NC \type {i} \NC (3,6) \NC \NR \NC \type {disc2.replace} \NC \type {<ff>-} \NC (5) \NC \NR +\LL \stoptabulate What is actually generated after ligaturing has been applied is therefore: @@ -806,13 +899,21 @@ mapping a sequence of glyphs onto one glyph, but also by selective replacement a kerning. This means that the above examples are just representing the traditional approach. -\section{Breaking paragraphs into lines} +\stopsection + +\startsection[title={Breaking paragraphs into lines}] -This code is still almost unchanged, but because of the above|-|mentioned changes +\topicindex {linebreaks} +\topicindex {paragraphs} +\topicindex {discretionaries} + +This code is almost unchanged, but because of the above|-|mentioned changes with respect to discretionaries and ligatures, line breaking will potentially be different from traditional \TEX. The actual line breaking code is still based on the \TEX82 algorithms, and it does not expect there to be discretionaries inside -of discretionaries. +of discretionaries. But, as patterns evolve and font handling can influence +discretionaries, you need to be aware of the fact that long term consistency is not +an engine matter only. But that situation is now fairly common in \LUATEX, due to the changes to the ligaturing mechanism. And also, the \LUATEX\ discretionary nodes are implemented @@ -826,10 +927,19 @@ The combined effect of these two differences is that \LUATEX\ does not always us all of the potential breakpoints in a paragraph, especially when fonts with many ligatures are used. Of course kerning also complicates matters here. -\section{The \type {lang} library} +\stopsection + +\startsection[title={The \type {lang} library}][library=lang] + +\subsection {\type {new} and \type {id}} -This library provides the interface to \LUATEX's structure -representing a language, and the associated functions. +\topicindex {languages+library} + +\libindex {new} +\libindex {id} + +This library provides the interface to \LUATEX's structure representing a +language, and the associated functions. \startfunctioncall <language> l = lang.new() @@ -837,106 +947,141 @@ representing a language, and the associated functions. \stopfunctioncall This function creates a new userdata object. An object of type \type {<language>} -is the first argument to most of the other functions in the \type {lang} -library. These functions can also be used as if they were object methods, using -the colon syntax. - -Without an argument, the next available internal id number will be assigned to -this object. With argument, an object will be created that links to the internal -language with that id number. +is the first argument to most of the other functions in the \type {lang} library. +These functions can also be used as if they were object methods, using the colon +syntax. Without an argument, the next available internal id number will be +assigned to this object. With argument, an object will be created that links to +the internal language with that id number. \startfunctioncall <number> n = lang.id(<language> l) \stopfunctioncall -returns the internal \type {\language} id number this object refers to. +The number returned is the internal \prm {language} id number this object refers to. + +\subsection {\type {hyphenation}} + +\libindex {hyphenation} + +You can hyphenate a string directly with: \startfunctioncall <string> n = lang.hyphenation(<language> l) lang.hyphenation(<language> l, <string> n) \stopfunctioncall -Either returns the current hyphenation exceptions for this language, or adds new -ones. The syntax of the string is explained in~\in {section} +\subsection {\type {clear_hyphenation} and \type {clean}} + +\libindex {clear_hyphenation} +\libindex {clean} + +This either returns the current hyphenation exceptions for this language, or adds +new ones. The syntax of the string is explained in~\in {section} [patternsexceptions]. \startfunctioncall lang.clear_hyphenation(<language> l) \stopfunctioncall -Clears the exception dictionary (string) for this language. +This call clears the exception dictionary (string) for this language. \startfunctioncall <string> n = lang.clean(<language> l, <string> o) <string> n = lang.clean(<string> o) \stopfunctioncall -Creates a hyphenation key from the supplied hyphenation value. The syntax of the -argument string is explained in~\in {section} [patternsexceptions]. This function -is useful if you want to do something else based on the words in a dictionary -file, like spell|-|checking. +This function creates a hyphenation key from the supplied hyphenation value. The +syntax of the argument string is explained in \in {section} [patternsexceptions]. +This function is useful if you want to do something else based on the words in a +dictionary file, like spell|-|checking. + +\subsection {\type {patterns} and \type {clear_patterns}} + +\libindex {patterns} +\libindex {clear_patterns} \startfunctioncall <string> n = lang.patterns(<language> l) lang.patterns(<language> l, <string> n) \stopfunctioncall -Adds additional patterns for this language object, or returns the current set. -The syntax of this string is explained in~\in {section} [patternsexceptions]. +This adds additional patterns for this language object, or returns the current +set. The syntax of this string is explained in \in {section} +[patternsexceptions]. \startfunctioncall lang.clear_patterns(<language> l) \stopfunctioncall -Clears the pattern dictionary for this language. +This can be used to clear the pattern dictionary for a language. + +\subsection {\type {hyphenationmin}} + +\libindex {hyphenationmin} + +This function sets (or gets) the value of the \TEX\ parameter +\type {\hyphenationmin}. \startfunctioncall -<number> n = lang.prehyphenchar(<language> l) -lang.prehyphenchar(<language> l, <number> n) +n = lang.hyphenationmin(<language> l) +lang.hyphenationmin(<language> l, <number> n) \stopfunctioncall -Gets or sets the \quote {pre|-|break} hyphen character for implicit hyphenation -in this language (initially the hyphen, decimal 45). +\subsection {\type {[pre|post][ex|]hyphenchar}} + +\libindex {prehyphenchar} +\libindex {posthyphenchar} +\libindex {preexhyphenchar} +\libindex {postexhyphenchar} \startfunctioncall +<number> n = lang.prehyphenchar(<language> l) +lang.prehyphenchar(<language> l, <number> n) + <number> n = lang.posthyphenchar(<language> l) lang.posthyphenchar(<language> l, <number> n) \stopfunctioncall -Gets or sets the \quote {post|-|break} hyphen character for implicit hyphenation -in this language (initially null, decimal~0, indicating emptiness). +These two are used to get or set the \quote {pre|-|break} and \quote +{post|-|break} hyphen characters for implicit hyphenation in this language. The +intial values are decimal 45 (hyphen) and decimal~0 (indicating emptiness). \startfunctioncall <number> n = lang.preexhyphenchar(<language> l) lang.preexhyphenchar(<language> l, <number> n) -\stopfunctioncall -Gets or sets the \quote {pre|-|break} hyphen character for explicit hyphenation -in this language (initially null, decimal~0, indicating emptiness). - -\startfunctioncall <number> n = lang.postexhyphenchar(<language> l) lang.postexhyphenchar(<language> l, <number> n) \stopfunctioncall -Gets or sets the \quote {post|-|break} hyphen character for explicit hyphenation -in this language (initially null, decimal~0, indicating emptiness). +These gets or set the \quote {pre|-|break} and \quote {post|-|break} hyphen +characters for explicit hyphenation in this language. Both are initially +decimal~0 (indicating emptiness). + +\subsection {\type {hyphenate}} + +\libindex {hyphenate} + +The next call inserts hyphenation points (discretionary nodes) in a node list. If +\type {tail} is given as argument, processing stops on that node. Currently, +\type {success} is always true if \type {head} (and \type {tail}, if specified) +are proper nodes, regardless of possible other errors. \startfunctioncall <boolean> success = lang.hyphenate(<node> head) <boolean> success = lang.hyphenate(<node> head, <node> tail) \stopfunctioncall -Inserts hyphenation points (discretionary nodes) in a node list. If \type {tail} -is given as argument, processing stops on that node. Currently, \type {success} -is always true if \type {head} (and \type {tail}, if specified) are proper nodes, -regardless of possible other errors. - Hyphenation works only on \quote {characters}, a special subtype of all the glyph nodes with the node subtype having the value \type {1}. Glyph modes with -different subtypes are not processed. See \in {section~} [charsandglyphs] for +different subtypes are not processed. See \in {section} [charsandglyphs] for more details. +\subsection {\type {[set|get]hjcode}} + +\libindex {sethjcode} +\libindex {gethjcode} + The following two commands can be used to set or query hj codes: \startfunctioncall @@ -945,7 +1090,9 @@ lang.sethjcode(<language> l, <number> char, <number> usedchar) \stopfunctioncall When you set a hjcode the current sets get initialized unless the set was already -initialized due to \type {\savinghyphcodes} being larger than zero. +initialized due to \prm {savinghyphcodes} being larger than zero. + +\stopsection \stopchapter diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-logos.tex b/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-logos.tex index 7406dd60206..3172336ec3f 100644 --- a/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-logos.tex +++ b/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-logos.tex @@ -1,5 +1,7 @@ \startenvironment luatex-logos +\usemodule[abr-02] + \logo[DFONT] {dfont} \logo[CFF] {cff} \logo[CMAP] {CMap} diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-lua.tex b/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-lua.tex index 82b060440b1..f9107fa1f96 100644 --- a/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-lua.tex +++ b/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-lua.tex @@ -1,15 +1,17 @@ % language=uk \environment luatex-style -\environment luatex-logos \startcomponent luatex-lua -\startchapter[reference=lua,title={\LUA\ general}] +\startchapter[reference=lua,title={Using \LUATEX}] -\section[init]{Initialization} +\startsection[title={Initialization},reference=init] -\subsection{\LUATEX\ as a \LUA\ interpreter} +\startsubsection[title={\LUATEX\ as a \LUA\ interpreter}] + +\topicindex {initialization} +\topicindex {\LUA+interpreter} There are some situations that make \LUATEX\ behave like a standalone \LUA\ interpreter: @@ -35,7 +37,11 @@ positive values, just like the \LUA\ interpreter. in effect, a somewhat bulky stand alone \LUA\ interpreter with a bunch of extra preloaded libraries. -\subsection{\LUATEX\ as a \LUA\ byte compiler} +\stopsubsection + +\startsubsection[title={\LUATEX\ as a \LUA\ byte compiler}] + +\topicindex {\LUA+byte code} There are two situations that make \LUATEX\ behave like the \LUA\ byte compiler: @@ -46,9 +52,15 @@ There are two situations that make \LUATEX\ behave like the \LUA\ byte compiler: In this mode, \LUATEX\ is exactly like \type {luac} from the stand alone \LUA\ distribution, except that it does not have the \type {-l} switch, and that it -accepts (but ignores) the \type {--luaconly} switch. +accepts (but ignores) the \type {--luaconly} switch. The current version of \LUA\ +can dump bytecode using \type {string.dump} so we might decide to drop this +version of \LUATEX. -\subsection{Other commandline processing} +\stopsubsection + +\startsubsection[title={Other commandline processing}] + +\topicindex {command line} When the \LUATEX\ executable starts, it looks for the \type {--lua} command line option. If there is no \type {--lua} option, the command line is interpreted in a @@ -56,6 +68,8 @@ similar fashion as the other \TEX\ engines. Some options are accepted but have n consequence. The following command|-|line options are understood: \starttabulate[|l|p|] +\DB commandline argument \BC explanation \NC \NR +\TB \NC \type{--credits} \NC display credits and exit \NC \NR \NC \type{--debug-format} \NC enable format debugging \NC \NR \NC \type{--draftmode} \NC switch on draft mode i.e.\ generate no output in \PDF\ mode \NC \NR @@ -65,112 +79,137 @@ consequence. The following command|-|line options are understood: \NC \type{--halt-on-error} \NC stop processing at the first error\NC \NR \NC \type{--help} \NC display help and exit \NC\NR \NC \type{--ini} \NC be \type {iniluatex}, for dumping formats \NC\NR -\NC \type{--interaction=STRING} \NC set interaction mode: \type {batchmode}, \type {nonstopmode}, \type {scrollmode} or \type {errorstopmode} \NC \NR +\NC \type{--interaction=STRING} \NC set interaction mode: \type {batchmode}, \type {nonstopmode}, + \type {scrollmode} or \type {errorstopmode} \NC \NR \NC \type{--jobname=STRING} \NC set the job name to \type {STRING} \NC \NR -\NC \type{--kpathsea-debug=NUMBER} \NC set path searching debugging flags according to the bits of \type {NUMBER} \NC \NR +\NC \type{--kpathsea-debug=NUMBER} \NC set path searching debugging flags according to the bits of + \type {NUMBER} \NC \NR \NC \type{--lua=FILE} \NC load and execute a \LUA\ initialization script \NC\NR -\NC \type{--[no-]mktex=FMT} \NC disable/enable \type {mktexFMT} generation with \type {FMT} is \type {tex} or \type {tfm} \NC \NR +\NC \type{--[no-]mktex=FMT} \NC disable/enable \type {mktexFMT} generation with \type {FMT} is + \type {tex} or \type {tfm} \NC \NR \NC \type{--nosocket} \NC disable the \LUA\ socket library \NC\NR -\NC \type{--output-comment=STRING} \NC use \type {STRING} for \DVI\ file comment instead of date (no effect for \PDF) \NC \NR +\NC \type{--output-comment=STRING} \NC use \type {STRING} for \DVI\ file comment instead of date (no + effect for \PDF) \NC \NR \NC \type{--output-directory=DIR} \NC use \type {DIR} as the directory to write files to \NC \NR -\NC \type{--output-format=FORMAT} \NC use \type {FORMAT} for job output; \type {FORMAT} is \type {dvi} or \type {pdf} \NC \NR +\NC \type{--output-format=FORMAT} \NC use \type {FORMAT} for job output; \type {FORMAT} is \type {dvi} + or \type {pdf} \NC \NR \NC \type{--progname=STRING} \NC set the program name to \type {STRING} \NC \NR \NC \type{--recorder} \NC enable filename recorder \NC \NR \NC \type{--safer} \NC disable easily exploitable \LUA\ commands \NC\NR \NC \type{--[no-]shell-escape} \NC disable/enable system calls \NC \NR -\NC \type{--shell-restricted} \NC restrict system calls to a list of commands given in \type {texmf.cnf} \NC \NR +\NC \type{--shell-restricted} \NC restrict system calls to a list of commands given in \type + {texmf.cnf} \NC \NR \NC \type{--synctex=NUMBER} \NC enable \type {synctex} \NC \NR \NC \type{--utc} \NC use utc times when applicable \NC \NR \NC \type{--version} \NC display version and exit \NC \NR +\LL \stoptabulate -Some of the traditional flags are just ignored: \type {--etex}, \type -{--translate-file}, \type {--8bit}. \type {--[no-]parse-first-line}, \type -{--default-translate-file}. Also, we no longer support write18 because \type -{os.execute} can do the same. +We don't support \prm {write} 18 because \type {os.execute} can do the same. It +simplifies the code and makes more write targets possible. -The value to use for \type {\jobname} is decided as follows: +The value to use for \prm {jobname} is decided as follows: \startitemize \startitem If \type {--jobname} is given on the command line, its argument will be the - value for \type {\jobname}, without any changes. The argument will not be + value for \prm {jobname}, without any changes. The argument will not be used for actual input so it need not exist. The \type {--jobname} switch only - controls the \type {\jobname} setting. + controls the \prm {jobname} setting. \stopitem \startitem - Otherwise, \type {\jobname} will be the name of the first file that is read + Otherwise, \prm {jobname} will be the name of the first file that is read from the file system, with any path components and the last extension (the part following the last \type {.}) stripped off. \stopitem \startitem - An exception to the previous point: if the command line goes into interactive - mode (by starting with a command) and there are no files input via \type - {\everyjob} either, then the \type {\jobname} is set to \type {texput} as a - last resort. + There is an exception to the previous point: if the command line goes into + interactive mode (by starting with a command) and there are no files input + via \prm {everyjob} either, then the \prm {jobname} is set to \type + {texput} as a last resort. \stopitem \stopitemize The file names for output files that are generated automatically are created by attaching the proper extension (\type {log}, \type {pdf}, etc.) to the found -\type {\jobname}. These files are created in the directory pointed to by \type +\prm {jobname}. These files are created in the directory pointed to by \type {--output-directory}, or in the current directory, if that switch is not present. -\blank - Without the \type {--lua} option, command line processing works like it does in -any other web2c-based typesetting engine, except that \LUATEX\ has a few extra -switches. - -If the \type {--lua} option is present, \LUATEX\ will enter an alternative mode -of command line processing in comparison to the standard web2c programs. - -In this mode, a small series of actions is taken in order. First, it will parse -the command line as usual, but it will only interpret a small subset of the -options immediately: \type {--safer}, \type {--nosocket}, \type -{--[no-]shell-escape}, \type {--enable-write18}, \type {--disable-write18}, \type -{--shell-restricted}, \type {--help}, \type {--version}, and \type {--credits}. - -Next \LUATEX\ searches for the requested \LUA\ initialization script. If it -cannot be found using the actual name given on the command line, a second attempt -is made by prepending the value of the environment variable \type {LUATEXDIR}, if -that variable is defined in the environment. - -Then it checks the various safety switches. You can use those to disable some -\LUA\ commands that can easily be abused by a malicious document. At the moment, -\type {--safer} \type {nil}s the following functions: - -\starttabulate[|l|l|] -\BC library \BC functions \NC \NR -\NC \type {os} \NC \type {execute} \type {exec} \type {spawn} \type {setenv} \type {rename} \type {remove} \type {tmpdir} \NC \NR -\NC \type {io} \NC \type {popen} \type {output} \type {tmpfile} \NC \NR -\NC \type {lfs} \NC \type {rmdir} \type {mkdir} \type {chdir} \type {lock} \type {touch} \NC \NR -\stoptabulate +any other \WEBC|-|based typesetting engine, except that \LUATEX\ has a few extra +switches and lacks some others. Also, if the \type {--lua} option is present, +\LUATEX\ will enter an alternative mode of command line processing in comparison +to the standard \WEBC\ programs. In this mode, a small series of actions is taken +in the following order: -Furthermore, it disables loading of compiled \LUA\ libraries and it makes \type -{io.open()} fail on files that are opened for anything besides reading. +\startitemize[n] -When \LUATEX\ starts it set the locale to a neutral value. If for some reason you -use \type {os.locale}, you need to make sure you \type {nil} it afterwards -because otherwise it can interfere with code that for instance generates dates. -You can nil the locale with +\startitem + First, it will parse the command line as usual, but it will only interpret a + small subset of the options immediately: \type {--safer}, \type {--nosocket}, + \type {--[no-]shell-escape}, \type {--enable-write18}, \type + {--disable-write18}, \type {--shell-restricted}, \type {--help}, \type + {--version}, and \type {--credits}. +\stopitem -\starttyping -os.setlocale(nil.nil) -\stoptyping +\startitem + Next \LUATEX\ searches for the requested \LUA\ initialization script. If it + cannot be found using the actual name given on the command line, a second + attempt is made by prepending the value of the environment variable \type + {LUATEXDIR}, if that variable is defined in the environment. +\stopitem + +\startitem + Then it checks the various safety switches. You can use those to disable some + \LUA\ commands that can easily be abused by a malicious document. At the + moment, \type {--safer} \type {nil}s the following functions: + + \blank + + \starttabulate[|c|l|] + \DB library \BC functions \NC \NR + \TB + \NC \type {os} \NC \type {execute} \type {exec} \type {spawn} \type {setenv} + \type {rename} \type {remove} \type {tmpdir} \NC \NR + \NC \type {io} \NC \type {popen} \type {output} \type {tmpfile} \NC \NR + \NC \type {lfs} \NC \type {rmdir} \type {mkdir} \type {chdir} \type {lock} + \type {touch} \NC \NR + \LL + \stoptabulate -The \type {--nosocket} option makes the socket library unavailable, so that \LUA\ -cannot use networking. + \blank -The switches \type {--[no-]shell-escape}, \type {--[enable|disable]-write18}, and -\type {--shell-restricted} have the same effects as in \PDFTEX, and additionally -make \type {io.popen()}, \type {os.execute}, \type {os.exec} and \type {os.spawn} -adhere to the requested option. + Furthermore, it disables loading of compiled \LUA\ libraries and it makes + \type {io.open()} fail on files that are opened for anything besides reading. +\stopitem + +\startitem + When \LUATEX\ starts it sets the \type {locale} to a neutral value. If for + some reason you use \type {os.locale}, you need to make sure you \type {nil} + it afterwards because otherwise it can interfere with code that for instance + generates dates. You can ignore the \type {locale} with: -Next the initialization script is loaded and executed. From within the script, -the entire command line is available in the \LUA\ table \type {arg}, beginning with -\type {arg[0]}, containing the name of the executable. As consequence warnings -about unrecognized options are suppressed. + \starttyping + os.setlocale(nil,nil) + \stoptyping + + The \type {--nosocket} option makes the socket library unavailable, so that \LUA\ + cannot use networking. + + The switches \type {--[no-]shell-escape}, \type {--[enable|disable]-write18}, and + \type {--shell-restricted} have the same effects as in \PDFTEX, and additionally + make \type {io.popen()}, \type {os.execute}, \type {os.exec} and \type {os.spawn} + adhere to the requested option. +\stopitem + +\startitem + Next the initialization script is loaded and executed. From within the + script, the entire command line is available in the \LUA\ table \type {arg}, + beginning with \type {arg[0]}, containing the name of the executable. As + consequence warnings about unrecognized options are suppressed. +\stopitem + +\stopitemize Command line processing happens very early on. So early, in fact, that none of \TEX's initializations have taken place yet. For that reason, the tables that @@ -179,7 +218,7 @@ deal with typesetting, like \type {tex}, \type {token}, \type {node} and are \type {nil}'d). Special care is taken that \type {texio.write} and \type {texio.write_nl} function properly, so that you can at least report your actions to the log file when (and if) it eventually becomes opened (note that \TEX\ does -not even know its \type {\jobname} yet at this point). +not even know its \prm {jobname} yet at this point). Everything you do in the \LUA\ initialization script will remain visible during the rest of the run, with the exception of the \TEX\ specific libraries like @@ -205,17 +244,34 @@ finished: in order to initialize the built|-|in \KPATHSEA\ library properly, check \type {--progname}, or \type {--ini} and \type {--fmt}, if \type {--progname} is missing. -\section{\LUA\ behaviour} +\stopsubsection + +\stopsection + +\startsection[title={\LUA\ behaviour}] + +\startsubsection[title={The \LUA\ version}] + +\topicindex {\LUA+libraries} +\topicindex {\LUA+extensions} + +We currently use \LUA\ 5.3 and will follow developments of the language but +normally with some delay. Therefore the user needs to keep an eye on (subtle) +differences in successive versions of the language. Also, \LUAJITTEX\ lags behind +in the sense that \LUAJIT\ is not in sync with regular \LUA\ development. Here is +an example of one aspect. \LUA s \type {tostring} function (and \type {string.format} may return values in scientific notation, thereby confusing the \TEX\ end of things when it is used as -the right|-|hand side of an assignment to a \type {\dimen} or \type {\count}. +the right|-|hand side of an assignment to a \prm {dimen} or \prm {count}. The +output of these serializers also depend on the \LUA\ version, so in \LUA\ 5.3 you +can get different output than from 5.2. -Loading dynamic \LUA\ libraries will fail if there are two \LUA\ libraries loaded -at the same time (which will typically happen on \type {win32}, because there is -one \LUA\ 5.3 inside \LUATEX, and another will likely be linked to the \DLL\ file -of the module itself). +\stopsubsection + +\startsubsection[title={Integration in the \TDS\ ecosystem}] +The main \TEX\ distributions follow the \TEX\ directory structure (\TDS). \LUATEX\ is able to use the kpathsea library to find \type {require()}d modules. For this purpose, \type {package.searchers[2]} is replaced by a different loader function, that decides at runtime whether to use kpathsea or the built|-|in core @@ -226,6 +282,10 @@ Initialization of \KPATHSEA\ can happen either implicitly (when \LUATEX\ starts up and the startup script has not set \type {texconfig.kpse_init} to false), or explicitly by calling the \LUA\ function \type {kpse.set_program_name()}. +\stopsubsection + +\startsubsection[title={Loading libraries}] + \LUATEX\ is able to use dynamically loadable \LUA\ libraries, unless \type {--safer} was given as an option on the command line. For this purpose, \type {package.searchers[3]} is replaced by a different loader function, that @@ -233,12 +293,10 @@ decides at runtime whether to use \KPATHSEA\ or the built|-|in core \LUA\ function. It uses \KPATHSEA\ when that is already initialized at that point in time, otherwise it reverts to using the normal \type {package.cpath} loader. -This functionality required an extension to kpathsea: - -\startnarrower -There is a new kpathsea file format: \type {kpse_clua_format} that searches for -files with extension \type {.dll} and \type {.so}. The \type {texmf.cnf} setting -for this variable is \type {CLUAINPUTS}, and by default it has this value: +This functionality required an extension to kpathsea. There is a new kpathsea +file format: \type {kpse_clua_format} that searches for files with extension +\type {.dll} and \type {.so}. The \type {texmf.cnf} setting for this variable is +\type {CLUAINPUTS}, and by default it has this value: \starttyping CLUAINPUTS=.:$SELFAUTOLOC/lib/{$progname,$engine,}/lua// @@ -248,12 +306,18 @@ This path is imperfect (it requires a \TDS\ subtree below the binaries directory), but the architecture has to be in the path somewhere, and the currently simplest way to do that is to search below the binaries directory only. Of course it no big deal to write an alternative loader and use that in a macro -package. +package. One level up (a \type {lib} directory parallel to \type {bin}) would +have been nicer, but that is not doable because \TEXLIVE\ uses a \type +{bin/<arch>} structure. + +Loading dynamic \LUA\ libraries will fail if there are two \LUA\ libraries loaded +at the same time (which will typically happen on \type {win32}, because there is +one \LUA\ 5.3 inside \LUATEX, and another will likely be linked to the \DLL\ file +of the module itself). -One level up (a \type {lib} directory parallel to \type {bin}) would have been -nicer, but that is not doable because \TEXLIVE\ uses a \type {bin/<arch>} -structure. -\stopnarrower +\stopsubsection + +\startsubsection[title={Executing programs}] In keeping with the other \TEX|-|like programs in \TEXLIVE, the two \LUA\ functions \type {os.execute} and \type {io.popen}, as well as the two new functions \type @@ -265,26 +329,29 @@ the command line option \type {--luaonly} was not given), it will only run the four functions above if the matching \type {texmf.cnf} variable(s) or their \type {texconfig} (see \in {section} [texconfig]) counterparts allow execution of the requested system command. In \quote {script interpreter} runs of \LUATEX, these -settings have no effect, and all four functions function as normal. - -The \type {f:read("*line")} and \type {f:lines()} functions from the io library -have been adjusted so that they are line|-|ending neutral: any of \type {LF}, -\type {CR} or \type {CR+LF} are acceptable line endings. - -\type {luafilesystem} has been extended: there are two extra boolean functions -(\type {lfs.isdir(filename)} and \type {lfs.isfile(filename)}) and one extra -string field in its attributes table (\type {permissions}). There is an -additional function \type {lfs.shortname()} which takes a file name and returns -its short name on \type {win32} platforms. On other platforms, it just returns -the given argument. The file name is not tested for existence. Finally, for -non|-|\type {win32} platforms only, there is the new function \type -{lfs.readlink()} hat takes an existing symbolic link as argument and returns its -content. It returns an error on \type {win32}. - -The \type {string} library has an extra function: \type {string.explode(s[,m])}. -This function returns an array containing the string argument \type {s} split -into sub-strings based on the value of the string argument \type {m}. The second -argument is a string that is either empty (this splits the string into +settings have no effect, and all four functions have their original meaning. + +Some libraries have a few more functions, either coded in \CCODE\ or in \LUA. For +instance, when we started with \LUATEX\ we added some helpers to the \type +{luafilesystem} namespace \type {lfs}. The two boolean functions \type +{lfs.isdir} and \type {lfs.isfile} were speedy and better variants of what could +be done with \type {lfs.attributes}. The additional function \type +{lfs.shortname} takes a file name and returns its short name on \type {win32} +platforms. Finally, for non|-|\type {win32} platforms only, we provided \type +{lfs.readlink} that takes an existing symbolic link as argument and returns its +name. However, the \type library evolved so we have dropped these in favour of +pure \LUA\ variants. The \type {shortname} helper is obsolete and now just +returns the name. + +\stopsubsection + +\startsubsection[title={Multibyte \type {string} functions}] + +The \type {string} library has a few extra functions, for example \libidx +{string} {explode}. This function takes upto two arguments: \type +{string.explode(s[,m])} and returns an array containing the string argument \type +{s} split into sub-strings based on the value of the string argument \type {m}. +The second argument is a string that is either empty (this splits the string into characters), a single character (this splits on each occurrence of that character, possibly introducing empty strings), or a single character followed by the plus sign \type {+} (this special version does not create empty sub-strings). @@ -293,7 +360,9 @@ The default value for \type {m} is \quote {\type { +}} (multiple spaces). Note: written in \TEX\ macros. The \type {string} library also has six extra iterators that return strings -piecemeal: +piecemeal: \libidx {string} {utfvalues}, \libidx {string} {utfcharacters}, +\libidx {string} {characters}, \libidx {string} {characterpairs}, \libidx +{string} {bytes} and \libidx {string} {bytepairs}. \startitemize \startitem @@ -303,7 +372,7 @@ piecemeal: \type {string.utfcharacters(s)}: a string with a single \UTF-8 token in it \stopitem \startitem - \type {string.characters(s)}: a string containing one byte + \type {string.cWharacters(s)}: a string containing one byte \stopitem \startitem \type {string.characterpairs(s)}: two strings each containing one byte or an @@ -323,7 +392,8 @@ are useful especially in the conversion of \UTF16 encoded data into \UTF8. There is also a two|-|argument form of \type {string.dump()}. The second argument is a boolean which, if true, strips the symbols from the dumped data. This -matches an extension made in \type {luajit}. +matches an extension made in \type {luajit}. This is typically a function that +gets adapted as \LUA\ itself progresses. The \type {string} library functions \type {len}, \type {lower}, \type {sub} etc.\ are not \UNICODE|-|aware. For strings in the \UTF8 encoding, i.e., strings @@ -338,7 +408,8 @@ interpretation of character classes in \type {unicode.utf8} functions refer to the library sources at \hyphenatedurl {http://luaforge.net/projects/sln}. Version 5.3 of \LUA\ provides some native \UTF8 support but we have added a few -similar helpers too: +similar helpers too: \libidx {string} {utfvalue}, \libidx {string} {utfcharacter} +and \libidx {string} {utflength}. \startitemize \startitem @@ -354,13 +425,18 @@ similar helpers too: \stopitem \stopitemize -These three functions are relative fast and don't do much checking. They can be used -as building blocks for other helpers. +These three functions are relative fast and don't do much checking. They can be +used as building blocks for other helpers. +\stopsubsection -\blank +\startsubsection[title={Extra \type {os} library functions}] -The \type {os} library has a few extra functions and variables: +The \type {os} library has a few extra functions and variables: \libidx {os} +{selfdir}, \libidx {os} {exec}, \libidx {os} {spawn}, \libidx {os} {setenv}, +\libidx {os} {env}, \libidx {os} {gettimeofday}, \libidx {os} {times}, \libidx +{os} {tmpdir}, \libidx {os} {type}, \libidx {os} {name} and \libidx {os} {uname}, +that we will discuss here. \startitemize @@ -373,25 +449,37 @@ The \type {os} library has a few extra functions and variables: \type {os.exec(commandline)} is a variation on \type {os.execute}. Here \type {commandline} can be either a single string or a single table. - If the argument is a table \LUATEX\ first checks if there is a value at - integer index zero. If there is, this is the command to be executed. - Otherwise, it will use the value at integer index one. If neither are - present, nothing at all happens. - - The set of consecutive values starting at integer~1 in the table are the - arguments that are passed on to the command (the value at index~1 becomes - \type {arg[0]}). The command is searched for in the execution path, so there - is normally no need to pass on a fully qualified path name. - - If the argument is a string, then it is automatically converted into a table - by splitting on whitespace. In this case, it is impossible for the command - and first argument to differ from each other. - - In the string argument format, whitespace can be protected by putting (part - of) an argument inside single or double quotes. One layer of quotes is - interpreted by \LUATEX, and all occurrences of \type {\"}, \type {\'} or \type - {\\} within the quoted text are unescaped. In the table format, there is no - string handling taking place. + \startitemize + + \startitem + If the argument is a table \LUATEX\ first checks if there is a value at + integer index zero. If there is, this is the command to be executed. + Otherwise, it will use the value at integer index one. If neither are + present, nothing at all happens. + \stopitem + + \startitem + The set of consecutive values starting at integer~1 in the table are the + arguments that are passed on to the command (the value at index~1 becomes + \type {arg[0]}). The command is searched for in the execution path, so + there is normally no need to pass on a fully qualified path name. + \stopitem + + \startitem + If the argument is a string, then it is automatically converted into a + table by splitting on whitespace. In this case, it is impossible for the + command and first argument to differ from each other. + \stopitem + + \startitem + In the string argument format, whitespace can be protected by putting + (part of) an argument inside single or double quotes. One layer of quotes + is interpreted by \LUATEX, and all occurrences of \type {\"}, \type {\'} + or \type {\\} within the quoted text are unescaped. In the table format, + there is no string handling taking place. + \stopitem + + \stopitemize This function normally does not return control back to the \LUA\ script: the command will replace the current process. However, it will return the two @@ -470,14 +558,86 @@ The \type {os} library has a few extra functions and variables: \stopitem \startitem - \type {os.uname()} returns a table with specific operating system + \type {os.uname} returns a table with specific operating system information acquired at runtime. The keys in the returned table are all - string valued, and their names are: \type {sysname}, \type {machine}, \type + string values, and their names are: \type {sysname}, \type {machine}, \type {release}, \type {version}, and \type {nodename}. \stopitem \stopitemize +\stopsubsection + +\startsubsection[title={Binary input from files with \type {fio}}] + +There is a whole set of helpers for reading numbers and strings from a file: +\libidx {fio} {readcardinal1}, \libidx {fio} {readcardinal2}, \libidx {fio} +{readcardinal3}, \libidx {fio} {readcardinal4}, \libidx {fio} +{readcardinaltable}, \libidx {fio} {readinteger1}, \libidx {fio} {readinteger2}, +\libidx {fio} {readinteger3}, \libidx {fio} {readinteger4}, \libidx {fio} +{readintegertable}, \libidx {fio} {readfixed2}, \libidx {fio} {readfixed4}, +\libidx {fio} {read2dot14}, \libidx {fio} {setposition}, \libidx {fio} +{getposition}, \libidx {fio} {skipposition}, \libidx {fio} {readbytes}, \libidx +{fio} {readbytetable}. They work on normal \LUA\ file handles. + +%libidx{fio}{readline} +%libidx{fio}{recordfilename} +%libidx{fio}{checkpermission} + +This library provides a set of functions for reading numbers from a file and +in addition to the regular \type {io} library functions. + +\starttabulate +\NC \type{readcardinal1(f)} \NC a 1 byte unsigned integer \NC \NR +\NC \type{readcardinal2(f)} \NC a 2 byte unsigned integer \NC \NR +\NC \type{readcardinal3(f)} \NC a 3 byte unsigned integer \NC \NR +\NC \type{readcardinal4(f)} \NC a 4 byte unsigned integer \NC \NR +\NC \type{readcardinaltable(f,n,b)} \NC \type {n} cardinals of \type {b} bytes \NC \NR +\NC \type{readinteger1(f)} \NC a 1 byte signed integer \NC \NR +\NC \type{readinteger2(f)} \NC a 2 byte signed integer \NC \NR +\NC \type{readinteger3(f)} \NC a 3 byte signed integer \NC \NR +\NC \type{readinteger4(f)} \NC a 4 byte signed integer \NC \NR +\NC \type{readintegertable(f,n,b)} \NC \type {n} integers of \type {b} bytes \NC \NR +\NC \type{readfixed2(f)} \NC a 2 byte float (used in font files) \NC \NR +\NC \type{readfixed4(f)} \NC a 4 byte float (used in font files) \NC \NR +\NC \type{read2dot14(f)} \NC a 2 byte float (used in font files) \NC \NR +\NC \type{setposition(f,p)} \NC goto position \type {p} \NC \NR +\NC \type{getposition(f)} \NC get the current position \NC \NR +\NC \type{skipposition(f,n)} \NC skip \type {n} positions \NC \NR +\NC \type{readbytes(f,n)} \NC \type {n} bytes \NC \NR +\NC \type{readbytetable(f,n)} \NC \type {n} bytes\NC \NR +\stoptabulate + +\stopsubsection + +\startsubsection[title={Binary input from strings with \type {sio}}] + +A similar set of function as in the \type {fio} library is available in the \type +{sio} library: \libidx {sio} {readcardinal1}, \libidx {sio} {readcardinal2}, +\libidx {sio} {readcardinal3}, \libidx {sio} {readcardinal4}, \libidx {sio} +{readcardinaltable}, \libidx {sio} {readinteger1}, \libidx {sio} {readinteger2}, +\libidx {sio} {readinteger3}, \libidx {sio} {readinteger4}, \libidx {sio} +{readintegertable}, \libidx {sio} {readfixed2}, \libidx {sio} {readfixed4}, +\libidx {sio} {read2dot14}, \libidx {sio} {setposition}, \libidx {sio} +{getposition}, \libidx {sio} {skipposition}, \libidx {sio} {readbytes} and +\libidx {sio} {readbytetable}. Here the first argument is a string instead of a +file handle. More details can be found in the previous section. + +\stopsubsection + +\startsubsection[title={Hashes conform \type {sha2}}] + +This library is a side effect of the \type {pdfe} library that needs such +helpers. The \libidx{sha2}{digest256}, \libidx{sha2}{digest384} and +\libidx{sha2}{digest512} functions accept a string and return a string with the +hash. + +\stopsubsection + +\startsubsection[title={Locales}] + +\index {locales} + In stock \LUA, many things depend on the current locale. In \LUATEX, we can't do that, because it makes documents unportable. While \LUATEX\ is running if forces the following locale settings: @@ -488,7 +648,14 @@ LC_COLLATE=C LC_NUMERIC=C \stoptyping -\section {\LUA\ modules} +\stopsubsection + +\stopsection + +\startsection[title={\LUA\ modules}] + +\topicindex {\LUA+libraries} +\topicindex {\LUA+modules} Some modules that are normally external to \LUA\ are statically linked in with \LUATEX, because they offer useful functionality: @@ -496,11 +663,27 @@ Some modules that are normally external to \LUA\ are statically linked in with \startitemize \startitem + \type {lpeg}, by Roberto Ierusalimschy, \hyphenatedurl + {http://www.inf.puc-rio.br/~roberto/lpeg/lpeg.html}. This library is not + \UNICODE|-|aware, but interprets strings on a byte|-|per|-|byte basis. This + mainly means that \type {lpeg.S} cannot be used with \UTF8 characters encoded + in more than two bytes, and thus \type {lpeg.S} will look for one of those + two bytes when matching, not the combination of the two. The same is true for + \type {lpeg.R}, although the latter will display an error message if used + with multibyte characters. Therefore \type {lpeg.R('aä')} results in the + message \type {bad argument #1 to 'R' (range must have two characters)}, + since to \type {lpeg}, \type {ä} is two 'characters' (bytes), so \type {aä} + totals three. In practice this is no real issue and with some care you can + deal with \UNICODE\ just fine. +\stopitem + +\startitem \type {slnunicode}, from the \type {selene} libraries, \hyphenatedurl {http://luaforge.net/projects/sln}. This library has been slightly extended so that the \type {unicode.utf8.*} functions also accept the first 256 values of plane~18. This is the range \LUATEX\ uses for raw binary output, as - explained above. + explained above. We have no plans to provide more like this because you can + basically do all that you want in \LUA. \stopitem \startitem @@ -514,20 +697,6 @@ Some modules that are normally external to \LUA\ are statically linked in with \stopitem \startitem - \type {lpeg}, by Roberto Ierusalimschy, \hyphenatedurl - {http://www.inf.puc-rio.br/~roberto/lpeg/lpeg.html}. This library is not - \UNICODE|-|aware, but interprets strings on a byte|-|per|-|byte basis. This - mainly means that \type {lpeg.S} cannot be used with \UTF8 characters encoded - in more than two bytes, and thus \type {lpeg.S} will look for one of those - two bytes when matching, not the combination of the two. The same is true for - \type {lpeg.R}, although the latter will display an error message if used - with multibyte characters. Therefore \type {lpeg.R('aä')} results in the - message \type {bad argument #1 to 'R' (range must have two characters)}, - since to \type {lpeg}, \type {ä} is two 'characters' (bytes), so \type {aä} - totals three. In practice this is no real issue. -\stopitem - -\startitem \type {lzlib}, by Tiago Dionizio, \hyphenatedurl {http://luaforge.net/projects/lzlib/}. \stopitem @@ -546,11 +715,12 @@ Some modules that are normally external to \LUA\ are statically linked in with \stopitemize -At some point (this also depends on distributions) \LUATEX\ might have these -libraries loaded on demand. For this reason you can best use \type {require} to -make sure they are loaded. +\stopsection -\section{Testing} +\startsection[title={Testing}] + +\topicindex {testing} +\topicindex {\PDF+date} For development reasons you can influence the used startup date and time. This can be done in two ways. @@ -579,17 +749,11 @@ When Universal Time is needed, you can pass the flag \type {utc} to the engine. property also works when the date and time are set by \LUATEX\ itself. It has a complementary entry \type {use_utc_time} in the \type {texconfig} table. -\startnotabene - To some extend a cleaner solution would be to have a flag that disables all - variable data in one go (like filenames and so) but we just follow the method - implemented in \PDFTEX\ where primitives are used to influence other - properties. -\stopnotabene - -\startnotabene - In \CONTEXT\ we provide the command line argument \type {--nodates} that does - bit more disabling of dates. -\stopnotabene +There is some control possible, for instance prevent filename to be written to +the \PDF\ file. This is discussed elsewhere. In \CONTEXT\ we provide the command +line argument \type {--nodates} that does a bit more disabling of dates. + +\stopsection \stopchapter diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-math.tex b/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-math.tex index 8ccae83f3ec..4623ce706d7 100644 --- a/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-math.tex +++ b/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-math.tex @@ -1,12 +1,15 @@ % language=uk \environment luatex-style -\environment luatex-logos \startcomponent luatex-math \startchapter[reference=math,title={Math}] +\startsection[title={Traditional alongside \OPENTYPE}] + +\topicindex {math} + The handling of mathematics in \LUATEX\ differs quite a bit from how \TEX82 (and therefore \PDFTEX) handles math. First, \LUATEX\ adds primitives and extends some others so that \UNICODE\ input can be used easily. Second, all of \TEX82's @@ -16,18 +19,133 @@ make it easier to use \OPENTYPE\ math fonts. And finally, there are some extensions that have been proposed or considered in the past that are now added to the engine. -\section{The current math style} +\stopsection + +\startsection[title={Unicode math characters}] + +\topicindex {math+\UNICODE} +\topicindex {\UNICODE+math} + +Character handling is now extended up to the full \UNICODE\ range (the \type {\U} +prefix), which is compatible with \XETEX. + +The math primitives from \TEX\ are kept as they are, except for the ones that +convert from input to math commands: \type {mathcode}, and \type {delcode}. These +two now allow for a 21-bit character argument on the left hand side of the equals +sign. + +Some of the new \LUATEX\ primitives read more than one separate value. This is +shown in the tables below by a plus sign. + +The input for such primitives would look like this: + +\starttyping +\def\overbrace{\Umathaccent 0 1 "23DE } +\stoptyping + +The altered \TEX82 primitives are: + +\starttabulate[|l|l|r|c|l|r|] +\DB primitive \BC min \BC max \BC \kern 2em \BC min \BC max \NC \NR +\TB +\NC \prm {mathcode} \NC 0 \NC 10FFFF \NC = \NC 0 \NC 8000 \NC \NR +\NC \prm {delcode} \NC 0 \NC 10FFFF \NC = \NC 0 \NC FFFFFF \NC \NR +\LL +\stoptabulate + +The unaltered ones are: + +\starttabulate[|l|l|r|] +\DB primitive \BC min \BC max \NC \NR +\TB +\NC \prm {mathchardef} \NC 0 \NC 8000 \NC \NR +\NC \prm {mathchar} \NC 0 \NC 7FFF \NC \NR +\NC \prm {mathaccent} \NC 0 \NC 7FFF \NC \NR +\NC \prm {delimiter} \NC 0 \NC 7FFFFFF \NC \NR +\NC \prm {radical} \NC 0 \NC 7FFFFFF \NC \NR +\LL +\stoptabulate + +For practical reasons \prm {mathchardef} will silently accept values larger +that \type {0x8000} and interpret it as \lpr {Umathcharnumdef}. This is needed +to satisfy older macro packages. + +The following new primitives are compatible with \XETEX: + +% somewhat fuzzy: + +\starttabulate[|l|l|r|c|l|r|] +\DB primitive \BC min \BC max \BC \kern 2em \BC min \BC max \NC \NR +\TB +\NC \lpr {Umathchardef} \NC 0+0+0 \NC 7+FF+10FFFF \NC \NC \NC \NC \NR +\NC \lpr {Umathcharnumdef}\rlap{\high{5}} \NC -80000000 \NC 7FFFFFFF \NC \NC \NC \NC \NR +\NC \lpr {Umathcode} \NC 0 \NC 10FFFF \NC = \NC 0+0+0 \NC 7+FF+10FFFF \NC \NR +\NC \lpr {Udelcode} \NC 0 \NC 10FFFF \NC = \NC 0+0 \NC FF+10FFFF \NC \NR +\NC \lpr {Umathchar} \NC 0+0+0 \NC 7+FF+10FFFF \NC \NC \NC \NC \NR +\NC \lpr {Umathaccent} \NC 0+0+0 \NC 7+FF+10FFFF \NC \NC \NC \NC \NR +\NC \lpr {Udelimiter} \NC 0+0+0 \NC 7+FF+10FFFF \NC \NC \NC \NC \NR +\NC \lpr {Uradical} \NC 0+0 \NC FF+10FFFF \NC \NC \NC \NC \NR +\NC \lpr {Umathcharnum} \NC -80000000 \NC 7FFFFFFF \NC \NC \NC \NC \NR +\NC \lpr {Umathcodenum} \NC 0 \NC 10FFFF \NC = \NC -80000000 \NC 7FFFFFFF \NC \NR +\NC \lpr {Udelcodenum} \NC 0 \NC 10FFFF \NC = \NC -80000000 \NC 7FFFFFFF \NC \NR +\LL +\stoptabulate + +Specifications typically look like: + +\starttyping +\Umathchardef\xx="1"0"456 +\Umathcode 123="1"0"789 +\stoptyping + +The new primitives that deal with delimiter|-|style objects do not set up a +\quote {large family}. Selecting a suitable size for display purposes is expected +to be dealt with by the font via the \lpr {Umathoperatorsize} parameter. + +For some of these primitives, all information is packed into a single signed +integer. For the first two (\lpr {Umathcharnum} and \lpr {Umathcodenum}), the +lowest 21 bits are the character code, the 3 bits above that represent the math +class, and the family data is kept in the topmost bits. This means that the values +for math families 128--255 are actually negative. For \lpr {Udelcodenum} there +is no math class. The math family information is stored in the bits directly on +top of the character code. Using these three commands is not as natural as using +the two- and three|-|value commands, so unless you know exactly what you are +doing and absolutely require the speedup resulting from the faster input +scanning, it is better to use the verbose commands instead. + +The \lpr {Umathaccent} command accepts optional keywords to control various +details regarding math accents. See \in {section} [mathacc] below for details. + +There are more new primitives and all of these will be explained in following +sections: + +\starttabulate[|l|l|] +\DB primitive \BC value range (in hex) \NC \NR +\TB +\NC \lpr {Uroot} \NC 0 + 0--FF + 10FFFF \NC \NR +\NC \lpr {Uoverdelimiter} \NC 0 + 0--FF + 10FFFF \NC \NR +\NC \lpr {Uunderdelimiter} \NC 0 + 0--FF + 10FFFF \NC \NR +\NC \lpr {Udelimiterover} \NC 0 + 0--FF + 10FFFF \NC \NR +\NC \lpr {Udelimiterunder} \NC 0 + 0--FF + 10FFFF \NC \NR +\LL +\stoptabulate + +\stopsection + +\startsection[title={Math styles}] + +\subsection{\lpr {mathstyle}} + +\topicindex {math+styles} It is possible to discover the math style that will be used for a formula in an expandable fashion (while the math list is still being read). To make this -possible, \LUATEX\ adds the new primitive: \type {\mathstyle}. This is a \quote -{convert command} like e.g. \type {\romannumeral}: its value can only be read, +possible, \LUATEX\ adds the new primitive: \lpr {mathstyle}. This is a \quote +{convert command} like e.g. \prm {romannumeral}: its value can only be read, not set. -\subsection{\type {\mathstyle}} - The returned value is between 0 and 7 (in math mode), or $-1$ (all other modes). -For easy testing, the eight math style commands have been altered so that the can +For easy testing, the eight math style commands have been altered so that they can be used as numeric values, so you can write code like this: \starttyping @@ -48,16 +166,15 @@ thereby reusing numbers) because the number that got used is stored and used in the second pass (so changing \type {\fam 12} mid|-|formula spoils over to preceding use of that family). -The style switching primitives like \type {\textstyle} are turned into nodes so -the styles set there are frozen. The \type {\mathchoice} primitive results in -four lists being constructed of which one is used in the second pass. The fact -that some automatic styles are not yet known also means that the \type -{\mathstyle} primitive expands to the current style which can of course be -different from the one really used. It's a snapshot of the first pass state. As a -consequence in the following example you get a style number (first pass) typeset -that can actually differ from the used style (second pass). In the case of a math -choice used ungrouped, the chosen style is used after the choice too, unless you -group. +The style switching primitives like \prm {textstyle} are turned into nodes so the +styles set there are frozen. The \prm {mathchoice} primitive results in four +lists being constructed of which one is used in the second pass. The fact that +some automatic styles are not yet known also means that the \lpr {mathstyle} +primitive expands to the current style which can of course be different from the +one really used. It's a snapshot of the first pass state. As a consequence in the +following example you get a style number (first pass) typeset that can actually +differ from the used style (second pass). In the case of a math choice used +ungrouped, the chosen style is used after the choice too, unless you group. \startbuffer[1] [a:\mathstyle]\quad @@ -120,134 +237,39 @@ This gives: \blank $\displaystyle \getbuffer[1]$ \blank \blank $\textstyle \getbuffer[1]$ \blank -Using \type {\begingroup} \unknown\ \type {\endgroup} instead gives: +Using \prm {begingroup} \unknown\ \prm {endgroup} instead gives: \blank $\displaystyle \getbuffer[2]$ \blank \blank $\textstyle \getbuffer[2]$ \blank -This might look wrong but it's just a side effect of \type {\mathstyle} expanding +This might look wrong but it's just a side effect of \lpr {mathstyle} expanding to the current (first pass) style and the number being injected in the list that gets converted in the second pass. It all makes sense and it illustrates the importance of grouping. In fact, the math choice style being effective afterwards has advantages. It would be hard to get it otherwise. -\subsection{\type {\Ustack}} +\subsection{\lpr {Ustack}} + +\topicindex {math+stacks} There are a few math commands in \TEX\ where the style that will be used is not -known straight from the start. These commands (\type {\over}, \type {\atop}, -\type {\overwithdelims}, \type {\atopwithdelims}) would therefore normally return -wrong values for \type {\mathstyle}. To fix this, \LUATEX\ introduces a special -prefix command: \type {\Ustack}: +known straight from the start. These commands (\prm {over}, \prm {atop}, +\prm {overwithdelims}, \prm {atopwithdelims}) would therefore normally return +wrong values for \lpr {mathstyle}. To fix this, \LUATEX\ introduces a special +prefix command: \lpr {Ustack}: \starttyping $\Ustack {a \over b}$ \stoptyping -The \type {\Ustack} command will scan the next brace and start a new math group +The \lpr {Ustack} command will scan the next brace and start a new math group with the correct (numerator) math style. -\section{Unicode math characters} - -Character handling is now extended up to the full \UNICODE\ range (the \type {\U} -prefix), which is compatible with \XETEX. - -The math primitives from \TEX\ are kept as they are, except for the ones that -convert from input to math commands: \type {mathcode}, and \type {delcode}. These -two now allow for a 21-bit character argument on the left hand side of the equals -sign. - -Some of the new \LUATEX\ primitives read more than one separate value. This is -shown in the tables below by a plus sign in the second column. - -The input for such primitives would look like this: - -\starttyping -\def\overbrace{\Umathaccent 0 1 "23DE } -\stoptyping - -The altered \TEX82 primitives are: - -\starttabulate[|l|l|r|c|l|r|] -\BC primitive \BC min \BC max \BC \kern 2em \BC min \BC max \NC \NR -\NC \type {\mathcode} \NC 0 \NC 10FFFF \NC = \NC 0 \NC 8000 \NC \NR -\NC \type {\delcode} \NC 0 \NC 10FFFF \NC = \NC 0 \NC FFFFFF \NC \NR -\stoptabulate - -The unaltered ones are: - -\starttabulate[|l|l|r|] -\BC primitive \BC min \BC max \NC \NR -\NC \type {\mathchardef} \NC 0 \NC 8000 \NC \NR -\NC \type {\mathchar} \NC 0 \NC 7FFF \NC \NR -\NC \type {\mathaccent} \NC 0 \NC 7FFF \NC \NR -\NC \type {\delimiter} \NC 0 \NC 7FFFFFF \NC \NR -\NC \type {\radical} \NC 0 \NC 7FFFFFF \NC \NR -\stoptabulate +\subsection{Cramped math styles} -For practical reasons \type {\mathchardef} will silently accept values larger -that \type {0x8000} and interpret it as \type {\Umathcharnumdef}. This is needed -to satisfy older macro packages. - -The following new primitives are compatible with \XETEX: - -% somewhat fuzzy: - -\starttabulate[|l|l|r|c|l|r|] -\BC primitive \BC min \BC max \BC \kern 2em \BC min \BC max \NC \NR -\NC \type {\Umathchardef} \NC 0+0+0 \NC 7+FF+10FFFF\rlap{\high{1}} \NC \NC \NC \NC \NR -\NC \type {\Umathcharnumdef}\rlap{\high{5}} \NC -80000000 \NC 7FFFFFFF\rlap{\high{3}} \NC \NC \NC \NC \NR -\NC \type {\Umathcode} \NC 0 \NC 10FFFF \NC = \NC 0+0+0 \NC 7+FF+10FFFF\rlap{\high{1}} \NC \NR -\NC \type {\Udelcode} \NC 0 \NC 10FFFF \NC = \NC 0+0 \NC FF+10FFFF\rlap{\high{2}} \NC \NR -\NC \type {\Umathchar} \NC 0+0+0 \NC 7+FF+10FFFF \NC \NC \NC \NC \NR -\NC \type {\Umathaccent} \NC 0+0+0 \NC 7+FF+10FFFF\rlap{\high{2,4}} \NC \NC \NC \NC \NR -\NC \type {\Udelimiter} \NC 0+0+0 \NC 7+FF+10FFFF\rlap{\high{2}} \NC \NC \NC \NC \NR -\NC \type {\Uradical} \NC 0+0 \NC FF+10FFFF\rlap{\high{2}} \NC \NC \NC \NC \NR -\NC \type {\Umathcharnum} \NC -80000000 \NC 7FFFFFFF\rlap{\high{3}} \NC \NC \NC \NC \NR -\NC \type {\Umathcodenum} \NC 0 \NC 10FFFF \NC = \NC -80000000 \NC 7FFFFFFF\rlap{\high{3}} \NC \NR -\NC \type {\Udelcodenum} \NC 0 \NC 10FFFF \NC = \NC -80000000 \NC 7FFFFFFF\rlap{\high{3}} \NC \NR -\stoptabulate - -Specifications typically look like: - -\starttyping -\Umathchardef\xx="1"0"456 -\Umathcode 123="1"0"789 -\stoptyping - -Note 1: The new primitives that deal with delimiter|-|style objects do not set up a -\quote {large family}. Selecting a suitable size for display purposes is expected -to be dealt with by the font via the \type {\Umathoperatorsize} parameter (more -information can be found in a following section). - -Note 2: For these three primitives, all information is packed into a single -signed integer. For the first two (\type {\Umathcharnum} and \type -{\Umathcodenum}), the lowest 21 bits are the character code, the 3 bits above -that represent the math class, and the family data is kept in the topmost bits -(This means that the values for math families 128--255 are actually negative). -For \type {\Udelcodenum} there is no math class. The math family information is -stored in the bits directly on top of the character code. Using these three -commands is not as natural as using the two- and three|-|value commands, so -unless you know exactly what you are doing and absolutely require the speedup -resulting from the faster input scanning, it is better to use the verbose -commands instead. - -Note 3: The \type {\Umathaccent} command accepts optional keywords to control -various details regarding math accents. See \in {section} [mathacc] below for -details. - -New primitives that exist in \LUATEX\ only (all of these will be explained -in following sections): - -\starttabulate[|l|l|] -\BC primitive \BC value range (in hex) \NC \NR -\NC \type {\Uroot} \NC 0+0--FF+10FFFF$^2$ \NC \NR -\NC \type {\Uoverdelimiter} \NC 0+0--FF+10FFFF$^2$ \NC \NR -\NC \type {\Uunderdelimiter} \NC 0+0--FF+10FFFF$^2$ \NC \NR -\NC \type {\Udelimiterover} \NC 0+0--FF+10FFFF$^2$ \NC \NR -\NC \type {\Udelimiterunder} \NC 0+0--FF+10FFFF$^2$ \NC \NR -\stoptabulate - -\section{Cramped math styles} +\topicindex {math+styles} +\topicindex {math+spacing} +\topicindex {math+cramped} \LUATEX\ has four new primitives to set the cramped math styles directly: @@ -282,20 +304,23 @@ are described as follows: style if the original style was cramped. \stopitem \startitem - Formulas under a \type {\sqrt} or \type {\overline} are in cramped style. + Formulas under a \type {\sqrt} or \prm {overline} are in cramped style. \stopitem \stopitemize In \LUATEX\ one can set the styles in more detail which means that you sometimes -have to set both normal and cramped styles to get the effect you want. If we -force styles in the script using \type {\scriptstyle} and \type {\crampedscriptstyle} -we get this: +have to set both normal and cramped styles to get the effect you want. (Even) if +we force styles in the script using \prm {scriptstyle} and \lpr +{crampedscriptstyle} we get this: \startbuffer[demo] \starttabulate +\DB style \BC example \NC \NR +\TB \NC default \NC $b_{x=xx}^{x=xx}$ \NC \NR \NC script \NC $b_{\scriptstyle x=xx}^{\scriptstyle x=xx}$ \NC \NR \NC crampedscript \NC $b_{\crampedscriptstyle x=xx}^{\crampedscriptstyle x=xx}$ \NC \NR +\LL \stoptabulate \stopbuffer @@ -310,7 +335,7 @@ Now we set the following parameters \typebuffer[setup] -This gives: +This gives a different result: \start\getbuffer[setup,demo]\stop @@ -329,69 +354,77 @@ Now we get: \start\getbuffer[setup,demo]\stop -\section{Math parameter settings} +\stopsection + +\startsection[title={Math parameter settings}] + +\subsection {Many new \lpr {Umath*} primitives} + +\topicindex {math+parameters} In \LUATEX, the font dimension parameters that \TEX\ used in math typesetting are now accessible via primitive commands. In fact, refactoring of the math engine -has resulted in many more parameters than were accessible before. +has resulted in many more parameters than were not accessible before. \starttabulate -\BC primitive name \BC description \NC \NR -\NC \type {\Umathquad} \NC the width of 18 mu's \NC \NR -\NC \type {\Umathaxis} \NC height of the vertical center axis of +\DB primitive name \BC description \NC \NR +\TB +\NC \lpr {Umathquad} \NC the width of 18 mu's \NC \NR +\NC \lpr {Umathaxis} \NC height of the vertical center axis of the math formula above the baseline \NC \NR -\NC \type {\Umathoperatorsize} \NC minimum size of large operators in display mode \NC \NR -\NC \type {\Umathoverbarkern} \NC vertical clearance above the rule \NC \NR -\NC \type {\Umathoverbarrule} \NC the width of the rule \NC \NR -\NC \type {\Umathoverbarvgap} \NC vertical clearance below the rule \NC \NR -\NC \type {\Umathunderbarkern} \NC vertical clearance below the rule \NC \NR -\NC \type {\Umathunderbarrule} \NC the width of the rule \NC \NR -\NC \type {\Umathunderbarvgap} \NC vertical clearance above the rule \NC \NR -\NC \type {\Umathradicalkern} \NC vertical clearance above the rule \NC \NR -\NC \type {\Umathradicalrule} \NC the width of the rule \NC \NR -\NC \type {\Umathradicalvgap} \NC vertical clearance below the rule \NC \NR -\NC \type {\Umathradicaldegreebefore}\NC the forward kern that takes place before placement of - the radical degree \NC \NR -\NC \type {\Umathradicaldegreeafter} \NC the backward kern that takes place after placement of - the radical degree \NC \NR -\NC \type {\Umathradicaldegreeraise} \NC this is the percentage of the total height and depth of - the radical sign that the degree is raised by; it is - expressed in \type {percents}, so 60\% is expressed as the - integer $60$ \NC \NR -\NC \type {\Umathstackvgap} \NC vertical clearance between the two - elements in a \type {\atop} stack \NC \NR -\NC \type {\Umathstacknumup} \NC numerator shift upward in \type {\atop} stack \NC \NR -\NC \type {\Umathstackdenomdown} \NC denominator shift downward in \type {\atop} stack \NC \NR -\NC \type {\Umathfractionrule} \NC the width of the rule in a \type {\over} \NC \NR -\NC \type {\Umathfractionnumvgap} \NC vertical clearance between the numerator and the rule \NC \NR -\NC \type {\Umathfractionnumup} \NC numerator shift upward in \type {\over} \NC \NR -\NC \type {\Umathfractiondenomvgap} \NC vertical clearance between the denominator and the rule \NC \NR -\NC \type {\Umathfractiondenomdown} \NC denominator shift downward in \type {\over} \NC \NR -\NC \type {\Umathfractiondelsize} \NC minimum delimiter size for \type {\...withdelims} \NC \NR -\NC \type {\Umathlimitabovevgap} \NC vertical clearance for limits above operators \NC \NR -\NC \type {\Umathlimitabovebgap} \NC vertical baseline clearance for limits above operators \NC \NR -\NC \type {\Umathlimitabovekern} \NC space reserved at the top of the limit \NC \NR -\NC \type {\Umathlimitbelowvgap} \NC vertical clearance for limits below operators \NC \NR -\NC \type {\Umathlimitbelowbgap} \NC vertical baseline clearance for limits below operators \NC \NR -\NC \type {\Umathlimitbelowkern} \NC space reserved at the bottom of the limit \NC \NR -\NC \type {\Umathoverdelimitervgap} \NC vertical clearance for limits above delimiters \NC \NR -\NC \type {\Umathoverdelimiterbgap} \NC vertical baseline clearance for limits above delimiters \NC \NR -\NC \type {\Umathunderdelimitervgap} \NC vertical clearance for limits below delimiters \NC \NR -\NC \type {\Umathunderdelimiterbgap} \NC vertical baseline clearance for limits below delimiters \NC \NR -\NC \type {\Umathsubshiftdrop} \NC subscript drop for boxes and subformulas \NC \NR -\NC \type {\Umathsubshiftdown} \NC subscript drop for characters \NC \NR -\NC \type {\Umathsupshiftdrop} \NC superscript drop (raise, actually) for boxes and subformulas \NC \NR -\NC \type {\Umathsupshiftup} \NC superscript raise for characters \NC \NR -\NC \type {\Umathsubsupshiftdown} \NC subscript drop in the presence of a superscript \NC \NR -\NC \type {\Umathsubtopmax} \NC the top of standalone subscripts cannot be higher than this - above the baseline \NC \NR -\NC \type {\Umathsupbottommin} \NC the bottom of standalone superscripts cannot be less than - this above the baseline \NC \NR -\NC \type {\Umathsupsubbottommax} \NC the bottom of the superscript of a combined super- and subscript - be at least as high as this above the baseline \NC \NR -\NC \type {\Umathsubsupvgap} \NC vertical clearance between super- and subscript \NC \NR -\NC \type {\Umathspaceafterscript} \NC additional space added after a super- or subscript \NC \NR -\NC \type {\Umathconnectoroverlapmin}\NC minimum overlap between parts in an extensible recipe \NC \NR +\NC \lpr {Umathoperatorsize} \NC minimum size of large operators in display mode \NC \NR +\NC \lpr {Umathoverbarkern} \NC vertical clearance above the rule \NC \NR +\NC \lpr {Umathoverbarrule} \NC the width of the rule \NC \NR +\NC \lpr {Umathoverbarvgap} \NC vertical clearance below the rule \NC \NR +\NC \lpr {Umathunderbarkern} \NC vertical clearance below the rule \NC \NR +\NC \lpr {Umathunderbarrule} \NC the width of the rule \NC \NR +\NC \lpr {Umathunderbarvgap} \NC vertical clearance above the rule \NC \NR +\NC \lpr {Umathradicalkern} \NC vertical clearance above the rule \NC \NR +\NC \lpr {Umathradicalrule} \NC the width of the rule \NC \NR +\NC \lpr {Umathradicalvgap} \NC vertical clearance below the rule \NC \NR +\NC \lpr {Umathradicaldegreebefore}\NC the forward kern that takes place before placement of + the radical degree \NC \NR +\NC \lpr {Umathradicaldegreeafter} \NC the backward kern that takes place after placement of + the radical degree \NC \NR +\NC \lpr {Umathradicaldegreeraise} \NC this is the percentage of the total height and depth of + the radical sign that the degree is raised by; it is + expressed in \type {percents}, so 60\% is expressed as the + integer $60$ \NC \NR +\NC \lpr {Umathstackvgap} \NC vertical clearance between the two + elements in a \prm {atop} stack \NC \NR +\NC \lpr {Umathstacknumup} \NC numerator shift upward in \prm {atop} stack \NC \NR +\NC \lpr {Umathstackdenomdown} \NC denominator shift downward in \prm {atop} stack \NC \NR +\NC \lpr {Umathfractionrule} \NC the width of the rule in a \prm {over} \NC \NR +\NC \lpr {Umathfractionnumvgap} \NC vertical clearance between the numerator and the rule \NC \NR +\NC \lpr {Umathfractionnumup} \NC numerator shift upward in \prm {over} \NC \NR +\NC \lpr {Umathfractiondenomvgap} \NC vertical clearance between the denominator and the rule \NC \NR +\NC \lpr {Umathfractiondenomdown} \NC denominator shift downward in \prm {over} \NC \NR +\NC \lpr {Umathfractiondelsize} \NC minimum delimiter size for \type {\...withdelims} \NC \NR +\NC \lpr {Umathlimitabovevgap} \NC vertical clearance for limits above operators \NC \NR +\NC \lpr {Umathlimitabovebgap} \NC vertical baseline clearance for limits above operators \NC \NR +\NC \lpr {Umathlimitabovekern} \NC space reserved at the top of the limit \NC \NR +\NC \lpr {Umathlimitbelowvgap} \NC vertical clearance for limits below operators \NC \NR +\NC \lpr {Umathlimitbelowbgap} \NC vertical baseline clearance for limits below operators \NC \NR +\NC \lpr {Umathlimitbelowkern} \NC space reserved at the bottom of the limit \NC \NR +\NC \lpr {Umathoverdelimitervgap} \NC vertical clearance for limits above delimiters \NC \NR +\NC \lpr {Umathoverdelimiterbgap} \NC vertical baseline clearance for limits above delimiters \NC \NR +\NC \lpr {Umathunderdelimitervgap} \NC vertical clearance for limits below delimiters \NC \NR +\NC \lpr {Umathunderdelimiterbgap} \NC vertical baseline clearance for limits below delimiters \NC \NR +\NC \lpr {Umathsubshiftdrop} \NC subscript drop for boxes and subformulas \NC \NR +\NC \lpr {Umathsubshiftdown} \NC subscript drop for characters \NC \NR +\NC \lpr {Umathsupshiftdrop} \NC superscript drop (raise, actually) for boxes and subformulas \NC \NR +\NC \lpr {Umathsupshiftup} \NC superscript raise for characters \NC \NR +\NC \lpr {Umathsubsupshiftdown} \NC subscript drop in the presence of a superscript \NC \NR +\NC \lpr {Umathsubtopmax} \NC the top of standalone subscripts cannot be higher than this + above the baseline \NC \NR +\NC \lpr {Umathsupbottommin} \NC the bottom of standalone superscripts cannot be less than + this above the baseline \NC \NR +\NC \lpr {Umathsupsubbottommax} \NC the bottom of the superscript of a combined super- and subscript + be at least as high as this above the baseline \NC \NR +\NC \lpr {Umathsubsupvgap} \NC vertical clearance between super- and subscript \NC \NR +\NC \lpr {Umathspaceafterscript} \NC additional space added after a super- or subscript \NC \NR +\NC \lpr {Umathconnectoroverlapmin}\NC minimum overlap between parts in an extensible recipe \NC \NR +\LL \stoptabulate Each of the parameters in this section can be set by a command like this: @@ -403,22 +436,9 @@ Each of the parameters in this section can be set by a command like this: they obey grouping, and you can use \type {\the\Umathquad\displaystyle} if needed. -\section{Skips around display math} +\subsection{Font|-|based math parameters} -The injection of \type {\abovedisplayskip} and \type {\belowdisplayskip} is not -symmetrical. An above one is always inserted, also when zero, but the below is -only inserted when larger than zero. Especially the later makes it sometimes hard -to fully control spacing. Therefore \LUATEX\ comes with a new directive: \type -{\mathdisplayskipmode}. The following values apply: - -\starttabulate -\NC 0 \NC normal \TEX\ behaviour \NC \NR -\NC 1 \NC always (same as 0) \NC \NR -\NC 2 \NC only when not zero \NC \NR -\NC 3 \NC never, not even when not zero \NC \NR -\stoptabulate - -\section{Font-based Math Parameters} +\topicindex {math+parameters} While it is nice to have these math parameters available for tweaking, it would be tedious to have to set each of them by hand. For this reason, \LUATEX\ @@ -432,121 +452,118 @@ case no attention is paid to which family is being assigned to: the \type {MathConstants} tables in the last assigned family sets all parameters. In the table below, the one|-|letter style abbreviations and symbolic tfm font -dimension names match those using in the \TeX book. Assignments to \type -{\textfont} set the values for the cramped and uncramped display and text styles, -\type {\scriptfont} sets the script styles, and \type {\scriptscriptfont} sets -the scriptscript styles, so we have eight parameters for three font sizes. In the +dimension names match those used in the \TeX book. Assignments to \prm +{textfont} set the values for the cramped and uncramped display and text styles, +\prm {scriptfont} sets the script styles, and \prm {scriptscriptfont} sets the +scriptscript styles, so we have eight parameters for three font sizes. In the \TFM\ case, assignments only happen in family~2 and family~3 (and of course only for the parameters for which there are font dimensions). Besides the parameters below, \LUATEX\ also looks at the \quote {space} font dimension parameter. For math fonts, this should be set to zero. -\start +\def\MathLine#1#2#3#4#5% + {\TB + \NC \llap{\high{\tx #2\enspace}}\ttbf \string #1 \NC \tt #5 \NC \NR + \NC \tx #3 \NC \tt #4 \NC \NR} -\switchtobodyfont[8pt] - -\starttabulate[|l|l|l|p|] -\BC variable \BC style \BC default value opentype \BC default value tfm \NC \NR -\NC \type {\Umathaxis} \NC -- \NC AxisHeight \NC axis_height \NC \NR -\NC \type {\Umathoperatorsize} \NC D, D' \NC DisplayOperatorMinHeight \NC $^6$ \NC \NR -\NC \type {\Umathfractiondelsize} \NC D, D' \NC FractionDelimiterDisplayStyleSize$^9$ \NC delim1 \NC \NR -\NC \NC T, T', S, S', SS, SS' \NC FractionDelimiterSize$^9$ \NC delim2 \NC \NR -\NC \type {\Umathfractiondenomdown} \NC D, D' \NC FractionDenominatorDisplayStyleShiftDown \NC denom1 \NC \NR -\NC \NC T, T', S, S', SS, SS' \NC FractionDenominatorShiftDown \NC denom2 \NC \NR -\NC \type {\Umathfractiondenomvgap} \NC D, D' \NC FractionDenominatorDisplayStyleGapMin \NC 3*default_rule_thickness \NC \NR -\NC \NC T, T', S, S', SS, SS' \NC FractionDenominatorGapMin \NC default_rule_thickness \NC \NR -\NC \type {\Umathfractionnumup} \NC D, D' \NC FractionNumeratorDisplayStyleShiftUp \NC num1 \NC \NR -\NC \NC T, T', S, S', SS, SS' \NC FractionNumeratorShiftUp \NC num2 \NC \NR -\NC \type {\Umathfractionnumvgap} \NC D, D' \NC FractionNumeratorDisplayStyleGapMin \NC 3*default_rule_thickness \NC \NR -\NC \NC T, T', S, S', SS, SS' \NC FractionNumeratorGapMin \NC default_rule_thickness \NC \NR -\NC \type {\Umathfractionrule} \NC -- \NC FractionRuleThickness \NC default_rule_thickness \NC \NR -\NC \type {\Umathskewedfractionhgap} \NC -- \NC SkewedFractionHorizontalGap \NC math_quad/2 \NC \NR -\NC \type {\Umathskewedfractionvgap} \NC -- \NC SkewedFractionVerticalGap \NC math_x_height \NC \NR -\NC \type {\Umathlimitabovebgap} \NC -- \NC UpperLimitBaselineRiseMin \NC big_op_spacing3 \NC \NR -\NC \type {\Umathlimitabovekern} \NC -- \NC 0$^1$ \NC big_op_spacing5 \NC \NR -\NC \type {\Umathlimitabovevgap} \NC -- \NC UpperLimitGapMin \NC big_op_spacing1 \NC \NR -\NC \type {\Umathlimitbelowbgap} \NC -- \NC LowerLimitBaselineDropMin \NC big_op_spacing4 \NC \NR -\NC \type {\Umathlimitbelowkern} \NC -- \NC 0$^1$ \NC big_op_spacing5 \NC \NR -\NC \type {\Umathlimitbelowvgap} \NC -- \NC LowerLimitGapMin \NC big_op_spacing2 \NC \NR -\NC \type {\Umathoverdelimitervgap} \NC -- \NC StretchStackGapBelowMin \NC big_op_spacing1 \NC \NR -\NC \type {\Umathoverdelimiterbgap} \NC -- \NC StretchStackTopShiftUp \NC big_op_spacing3 \NC \NR -\NC \type {\Umathunderdelimitervgap} \NC-- \NC StretchStackGapAboveMin \NC big_op_spacing2 \NC \NR -\NC \type {\Umathunderdelimiterbgap} \NC-- \NC StretchStackBottomShiftDown \NC big_op_spacing4 \NC \NR -\NC \type {\Umathoverbarkern} \NC -- \NC OverbarExtraAscender \NC default_rule_thickness \NC \NR -\NC \type {\Umathoverbarrule} \NC -- \NC OverbarRuleThickness \NC default_rule_thickness \NC \NR -\NC \type {\Umathoverbarvgap} \NC -- \NC OverbarVerticalGap \NC 3*default_rule_thickness \NC \NR -\NC \type {\Umathquad} \NC -- \NC <font_size(f)>$^1$ \NC math_quad \NC \NR -\NC \type {\Umathradicalkern} \NC -- \NC RadicalExtraAscender \NC default_rule_thickness \NC \NR -\NC \type {\Umathradicalrule} \NC -- \NC RadicalRuleThickness \NC <not set>$^2$ \NC \NR -\NC \type {\Umathradicalvgap} \NC D, D' \NC RadicalDisplayStyleVerticalGap \NC (default_rule_thickness+\crlf - (abs(math_x_height)/4))$^3$ \NC \NR -\NC \NC T, T', S, S', SS, SS' \NC RadicalVerticalGap \NC (default_rule_thickness+\crlf - (abs(default_rule_thickness)/4))$^3$ \NC \NR -\NC \type {\Umathradicaldegreebefore} \NC -- \NC RadicalKernBeforeDegree \NC <not set>$^2$ \NC \NR -\NC \type {\Umathradicaldegreeafter} \NC -- \NC RadicalKernAfterDegree \NC <not set>$^2$ \NC \NR -\NC \type {\Umathradicaldegreeraise} \NC -- \NC RadicalDegreeBottomRaisePercent \NC <not set>$^{2,7}$ \NC \NR -\NC \type {\Umathspaceafterscript} \NC -- \NC SpaceAfterScript \NC script_space$^4$ \NC \NR -\NC \type {\Umathstackdenomdown} \NC D, D' \NC StackBottomDisplayStyleShiftDown \NC denom1 \NC \NR -\NC \NC T, T', S, S', SS, SS' \NC StackBottomShiftDown \NC denom2 \NC \NR -\NC \type {\Umathstacknumup} \NC D, D' \NC StackTopDisplayStyleShiftUp \NC num1 \NC \NR -\NC \NC T, T', S, S', SS, SS' \NC StackTopShiftUp \NC num3 \NC \NR -\NC \type {\Umathstackvgap} \NC D, D' \NC StackDisplayStyleGapMin \NC 7*default_rule_thickness \NC \NR -\NC \NC T, T', S, S', SS, SS' \NC StackGapMin \NC 3*default_rule_thickness \NC \NR -\NC \type {\Umathsubshiftdown} \NC -- \NC SubscriptShiftDown \NC sub1 \NC \NR -\NC \type {\Umathsubshiftdrop} \NC -- \NC SubscriptBaselineDropMin \NC sub_drop \NC \NR -\NC \type {\Umathsubsupshiftdown} \NC -- \NC SubscriptShiftDownWithSuperscript$^8$ \NC \NC \NR -\NC \NC \NC \quad\ or SubscriptShiftDown \NC sub2 \NC \NR -\NC \type {\Umathsubtopmax} \NC -- \NC SubscriptTopMax \NC (abs(math_x_height * 4) / 5) \NC \NR -\NC \type {\Umathsubsupvgap} \NC -- \NC SubSuperscriptGapMin \NC 4*default_rule_thickness \NC \NR -\NC \type {\Umathsupbottommin} \NC -- \NC SuperscriptBottomMin \NC (abs(math_x_height) / 4) \NC \NR -\NC \type {\Umathsupshiftdrop} \NC -- \NC SuperscriptBaselineDropMax \NC sup_drop \NC \NR -\NC \type {\Umathsupshiftup} \NC D \NC SuperscriptShiftUp \NC sup1 \NC \NR -\NC \NC T, S, SS, \NC SuperscriptShiftUp \NC sup2 \NC \NR -\NC \NC D', T', S', SS' \NC SuperscriptShiftUpCramped \NC sup3 \NC \NR -\NC \type {\Umathsupsubbottommax} \NC -- \NC SuperscriptBottomMaxWithSubscript \NC (abs(math_x_height * 4) / 5) \NC \NR -\NC \type {\Umathunderbarkern} \NC -- \NC UnderbarExtraDescender \NC default_rule_thickness \NC \NR -\NC \type {\Umathunderbarrule} \NC -- \NC UnderbarRuleThickness \NC default_rule_thickness \NC \NR -\NC \type {\Umathunderbarvgap} \NC -- \NC UnderbarVerticalGap \NC 3*default_rule_thickness \NC \NR -\NC \type {\Umathconnectoroverlapmin} \NC -- \NC MinConnectorOverlap \NC 0$^5$ \NC \NR +\starttabulate[|l|l|] +\DB variable / style \BC tfm / opentype \NC \NR +\MathLine{\Umathaxis} {} {} {AxisHeight} {axis_height} +\MathLine{\Umathoperatorsize} {6} {D, D'} {DisplayOperatorMinHeight} {\emdash} +\MathLine{\Umathfractiondelsize} {9} {D, D'} {FractionDelimiterDisplayStyleSize} {delim1} +\MathLine{\Umathfractiondelsize} {9} {T, T', S, S', SS, SS'}{FractionDelimiterSize} {delim2} +\MathLine{\Umathfractiondenomdown} {} {D, D'} {FractionDenominatorDisplayStyleShiftDown}{denom1} +\MathLine{\Umathfractiondenomdown} {} {T, T', S, S', SS, SS'}{FractionDenominatorShiftDown} {denom2} +\MathLine{\Umathfractiondenomvgap} {} {D, D'} {FractionDenominatorDisplayStyleGapMin} {3*default_rule_thickness} +\MathLine{\Umathfractiondenomvgap} {} {T, T', S, S', SS, SS'}{FractionDenominatorGapMin} {default_rule_thickness} +\MathLine{\Umathfractionnumup} {} {D, D'} {FractionNumeratorDisplayStyleShiftUp} {num1} +\MathLine{\Umathfractionnumup} {} {T, T', S, S', SS, SS'}{FractionNumeratorShiftUp} {num2} +\MathLine{\Umathfractionnumvgap} {} {D, D'} {FractionNumeratorDisplayStyleGapMin} {3*default_rule_thickness} +\MathLine{\Umathfractionnumvgap} {} {T, T', S, S', SS, SS'}{FractionNumeratorGapMin} {default_rule_thickness} +\MathLine{\Umathfractionrule} {} {} {FractionRuleThickness} {default_rule_thickness} +\MathLine{\Umathskewedfractionhgap} {} {} {SkewedFractionHorizontalGap} {math_quad/2} +\MathLine{\Umathskewedfractionvgap} {} {} {SkewedFractionVerticalGap} {math_x_height} +\MathLine{\Umathlimitabovebgap} {} {} {UpperLimitBaselineRiseMin} {big_op_spacing3} +\MathLine{\Umathlimitabovekern} {1} {} {0} {big_op_spacing5} +\MathLine{\Umathlimitabovevgap} {} {} {UpperLimitGapMin} {big_op_spacing1} +\MathLine{\Umathlimitbelowbgap} {} {} {LowerLimitBaselineDropMin} {big_op_spacing4} +\MathLine{\Umathlimitbelowkern} {1} {} {0} {big_op_spacing5} +\MathLine{\Umathlimitbelowvgap} {} {} {LowerLimitGapMin} {big_op_spacing2} +\MathLine{\Umathoverdelimitervgap} {} {} {StretchStackGapBelowMin} {big_op_spacing1} +\MathLine{\Umathoverdelimiterbgap} {} {} {StretchStackTopShiftUp} {big_op_spacing3} +\MathLine{\Umathunderdelimitervgap} {} {} {StretchStackGapAboveMin} {big_op_spacing2} +\MathLine{\Umathunderdelimiterbgap} {} {} {StretchStackBottomShiftDown} {big_op_spacing4} +\MathLine{\Umathoverbarkern} {} {} {OverbarExtraAscender} {default_rule_thickness} +\MathLine{\Umathoverbarrule} {} {} {OverbarRuleThickness} {default_rule_thickness} +\MathLine{\Umathoverbarvgap} {} {} {OverbarVerticalGap} {3*default_rule_thickness} +\MathLine{\Umathquad} {1} {} {<font_size(f)>} {math_quad} +\MathLine{\Umathradicalkern} {} {} {RadicalExtraAscender} {default_rule_thickness} +\MathLine{\Umathradicalrule} {2} {} {RadicalRuleThickness} {<not set>} +\MathLine{\Umathradicalvgap} {3} {D, D'} {RadicalDisplayStyleVerticalGap} {default_rule_thickness+abs(math_x_height)/4} +\MathLine{\Umathradicalvgap} {3} {T, T', S, S', SS, SS'}{RadicalVerticalGap} {default_rule_thickness+abs(default_rule_thickness)/4} +\MathLine{\Umathradicaldegreebefore}{2} {} {RadicalKernBeforeDegree} {<not set>} +\MathLine{\Umathradicaldegreeafter} {2} {} {RadicalKernAfterDegree} {<not set>} +\MathLine{\Umathradicaldegreeraise} {2,7}{} {RadicalDegreeBottomRaisePercent} {<not set>} +\MathLine{\Umathspaceafterscript} {4} {} {SpaceAfterScript} {script_space} +\MathLine{\Umathstackdenomdown} {} {D, D'} {StackBottomDisplayStyleShiftDown} {denom1} +\MathLine{\Umathstackdenomdown} {} {T, T', S, S', SS, SS'}{StackBottomShiftDown} {denom2} +\MathLine{\Umathstacknumup} {} {D, D'} {StackTopDisplayStyleShiftUp} {num1} +\MathLine{\Umathstacknumup} {} {T, T', S, S', SS, SS'}{StackTopShiftUp} {num3} +\MathLine{\Umathstackvgap} {} {D, D'} {StackDisplayStyleGapMin} {7*default_rule_thickness} +\MathLine{\Umathstackvgap} {} {T, T', S, S', SS, SS'}{StackGapMin} {3*default_rule_thickness} +\MathLine{\Umathsubshiftdown} {} {} {SubscriptShiftDown} {sub1} +\MathLine{\Umathsubshiftdrop} {} {} {SubscriptBaselineDropMin} {sub_drop} +\MathLine{\Umathsubsupshiftdown} {8} {} {SubscriptShiftDownWithSuperscript} {\emdash} +\MathLine{\Umathsubtopmax} {} {} {SubscriptTopMax} {abs(math_x_height*4)/5} +\MathLine{\Umathsubsupvgap} {} {} {SubSuperscriptGapMin} {4*default_rule_thickness} +\MathLine{\Umathsupbottommin} {} {} {SuperscriptBottomMin} {abs(math_x_height/4)} +\MathLine{\Umathsupshiftdrop} {} {} {SuperscriptBaselineDropMax} {sup_drop} +\MathLine{\Umathsupshiftup} {} {D} {SuperscriptShiftUp} {sup1} +\MathLine{\Umathsupshiftup} {} {T, S, SS,} {SuperscriptShiftUp} {sup2} +\MathLine{\Umathsupshiftup} {} {D', T', S', SS'} {SuperscriptShiftUpCramped} {sup3} +\MathLine{\Umathsupsubbottommax} {} {} {SuperscriptBottomMaxWithSubscript} {abs(math_x_height*4)/5} +\MathLine{\Umathunderbarkern} {} {} {UnderbarExtraDescender} {default_rule_thickness} +\MathLine{\Umathunderbarrule} {} {} {UnderbarRuleThickness} {default_rule_thickness} +\MathLine{\Umathunderbarvgap} {} {} {UnderbarVerticalGap} {3*default_rule_thickness} +\MathLine{\Umathconnectoroverlapmin}{5} {} {MinConnectorOverlap} {0} +\LL \stoptabulate -\stop - -Note 1: \OPENTYPE\ fonts set \type {\Umathlimitabovekern} and \type -{\Umathlimitbelowkern} to zero and set \type {\Umathquad} to the font size of the +Note 1: \OPENTYPE\ fonts set \lpr {Umathlimitabovekern} and \lpr +{Umathlimitbelowkern} to zero and set \lpr {Umathquad} to the font size of the used font, because these are not supported in the \type {MATH} table, -Note 2: Traditional \TFM\ fonts do not set \type {\Umathradicalrule} because +Note 2: Traditional \TFM\ fonts do not set \lpr {Umathradicalrule} because \TEX82\ uses the height of the radical instead. When this parameter is indeed not set when \LUATEX\ has to typeset a radical, a backward compatibility mode will kick in that assumes that an oldstyle \TEX\ font is used. Also, they do not set -\type {\Umathradicaldegreebefore}, \type {\Umathradicaldegreeafter}, and \type -{\Umathradicaldegreeraise}. These are then automatically initialized to +\lpr {Umathradicaldegreebefore}, \lpr {Umathradicaldegreeafter}, and \lpr +{Umathradicaldegreeraise}. These are then automatically initialized to $5/18$quad, $-10/18$quad, and 60. -Note 3: If \TFM\ fonts are used, then the \type {\Umathradicalvgap} is not set +Note 3: If \TFM\ fonts are used, then the \lpr {Umathradicalvgap} is not set until the first time \LUATEX\ has to typeset a formula because this needs parameters from both family~2 and family~3. This provides a partial backward -compatibility with \TEX82, but that compatibility is only partial: once the \type -{\Umathradicalvgap} is set, it will not be recalculated any more. +compatibility with \TEX82, but that compatibility is only partial: once the \lpr +{Umathradicalvgap} is set, it will not be recalculated any more. -Note 4: When \TFM\ fonts are used a similar situation arises with respect to -\type {\Umathspaceafterscript}: it is not set until the first time \LUATEX\ has -to typeset a formula. This provides some backward compatibility with \TEX82. But -once the \type {\Umathspaceafterscript} is set, \type {\scriptspace} will never -be looked at again. +Note 4: When \TFM\ fonts are used a similar situation arises with respect to \lpr +{Umathspaceafterscript}: it is not set until the first time \LUATEX\ has to +typeset a formula. This provides some backward compatibility with \TEX82. But +once the \lpr {Umathspaceafterscript} is set, \prm {scriptspace} will never be +looked at again. -Note 5: Traditional \TFM\ fonts set \type {\Umathconnectoroverlapmin} to zero +Note 5: Traditional \TFM\ fonts set \lpr {Umathconnectoroverlapmin} to zero because \TEX82\ always stacks extensibles without any overlap. -Note 6: The \type {\Umathoperatorsize} is only used in \type {\displaystyle}, and -is only set in \OPENTYPE\ fonts. In \TFM\ font mode, it is artificially set to -one scaled point more than the initial attempt's size, so that always the \quote +Note 6: The \lpr {Umathoperatorsize} is only used in \prm {displaystyle}, and is +only set in \OPENTYPE\ fonts. In \TFM\ font mode, it is artificially set to one +scaled point more than the initial attempt's size, so that always the \quote {first next} will be tried, just like in \TEX82. -Note 7: The \type {\Umathradicaldegreeraise} is a special case because it is the -only parameter that is expressed in a percentage instead of as a number of scaled +Note 7: The \lpr {Umathradicaldegreeraise} is a special case because it is the +only parameter that is expressed in a percentage instead of a number of scaled points. Note 8: \type {SubscriptShiftDownWithSuperscript} does not actually exist in the @@ -557,16 +574,198 @@ Note 9: \type {FractionDelimiterDisplayStyleSize} and \type {FractionDelimiterSize} do not actually exist in the \quote {standard} \OPENTYPE\ math font Cambria, but were useful enough to be added. -\section{Nolimit correction} +\stopsection + +\startsection[title={Math spacing}] + +\subsection{Inline surrounding space} + +\topicindex {math+spacing} + +Inline math is surrounded by (optional) \prm {mathsurround} spacing but that is a fixed +dimension. There is now an additional parameter \lpr {mathsurroundskip}. When set to a +non|-|zero value (or zero with some stretch or shrink) this parameter will replace +\prm {mathsurround}. By using an additional parameter instead of changing the nature +of \prm {mathsurround}, we can remain compatible. In the meantime a bit more +control has been added via \lpr {mathsurroundmode}. This directive can take 6 values +with zero being the default behaviour. + +\start + +\def\OneLiner#1#2% + {\NC \type{#1} + \NC \dontleavehmode\inframed[align=normal,offset=0pt,frame=off]{\mathsurroundmode#1\relax\hsize 100pt x$x$x} + \NC \dontleavehmode\inframed[align=normal,offset=0pt,frame=off]{\mathsurroundmode#1\relax\hsize 100pt x $x$ x} + \NC #2 + \NC \NR} + +\startbuffer +\mathsurround 10pt +\mathsurroundskip20pt +\stopbuffer + +\typebuffer \getbuffer + +\starttabulate[|c|c|c|pl|] +\DB mode \BC x\$x\$x \BC x \$x\$ x \BC effect \NC \NR +\TB +\OneLiner{0}{obey \prm {mathsurround} when \lpr {mathsurroundskip} is 0pt} +\OneLiner{1}{only add skip to the left} +\OneLiner{2}{only add skip to the right} +\OneLiner{3}{add skip to the left and right} +\OneLiner{4}{ignore the skip setting, obey \prm {mathsurround}} +\OneLiner{5}{disable all spacing around math} +\OneLiner{6}{only apply \lpr {mathsurroundskip} when also spacing} +\OneLiner{7}{only apply \lpr {mathsurroundskip} when no spacing} +\LL +\stoptabulate + +\stop + +Method six omits the surround glue when there is (x)spacing glue present while +method seven does the opposite, the glue is only applied when there is (x)space +glue present too. Anything more fancy, like checking the begining or end of a +paragraph (or edges of a box) would not be robust anyway. If you want that you +can write a callback that runs over a list and analyzes a paragraph. Actually, in +that case you could also inject glue (or set the properties of a math node) +explicitly. So, these modes are in practice mostly useful for special purposes +and experiments (they originate in a tracker item). Keep in mind that this glue +is part of the math node and not always treated as normal glue: it travels with +the begin and end math nodes. Also, method 6 and 7 will zero the skip related +fields in a node when applicable in the first occasion that checks them +(linebreaking or packaging). + +\subsection{Pairwise spacing} + +\topicindex {math+spacing} + +Besides the parameters mentioned in the previous sections, there are also 64 new +primitives to control the math spacing table (as explained in Chapter~18 of the +\TEX book). The primitive names are a simple matter of combining two math atom +types, but for completeness' sake, here is the whole list: + +\starttwocolumns +\startlines +\lpr {Umathordordspacing} +\lpr {Umathordopspacing} +\lpr {Umathordbinspacing} +\lpr {Umathordrelspacing} +\lpr {Umathordopenspacing} +\lpr {Umathordclosespacing} +\lpr {Umathordpunctspacing} +\lpr {Umathordinnerspacing} +\lpr {Umathopordspacing} +\lpr {Umathopopspacing} +\lpr {Umathopbinspacing} +\lpr {Umathoprelspacing} +\lpr {Umathopopenspacing} +\lpr {Umathopclosespacing} +\lpr {Umathoppunctspacing} +\lpr {Umathopinnerspacing} +\lpr {Umathbinordspacing} +\lpr {Umathbinopspacing} +\lpr {Umathbinbinspacing} +\lpr {Umathbinrelspacing} +\lpr {Umathbinopenspacing} +\lpr {Umathbinclosespacing} +\lpr {Umathbinpunctspacing} +\lpr {Umathbininnerspacing} +\lpr {Umathrelordspacing} +\lpr {Umathrelopspacing} +\lpr {Umathrelbinspacing} +\lpr {Umathrelrelspacing} +\lpr {Umathrelopenspacing} +\lpr {Umathrelclosespacing} +\lpr {Umathrelpunctspacing} +\lpr {Umathrelinnerspacing} +\lpr {Umathopenordspacing} +\lpr {Umathopenopspacing} +\lpr {Umathopenbinspacing} +\lpr {Umathopenrelspacing} +\lpr {Umathopenopenspacing} +\lpr {Umathopenclosespacing} +\lpr {Umathopenpunctspacing} +\lpr {Umathopeninnerspacing} +\lpr {Umathcloseordspacing} +\lpr {Umathcloseopspacing} +\lpr {Umathclosebinspacing} +\lpr {Umathcloserelspacing} +\lpr {Umathcloseopenspacing} +\lpr {Umathcloseclosespacing} +\lpr {Umathclosepunctspacing} +\lpr {Umathcloseinnerspacing} +\lpr {Umathpunctordspacing} +\lpr {Umathpunctopspacing} +\lpr {Umathpunctbinspacing} +\lpr {Umathpunctrelspacing} +\lpr {Umathpunctopenspacing} +\lpr {Umathpunctclosespacing} +\lpr {Umathpunctpunctspacing} +\lpr {Umathpunctinnerspacing} +\lpr {Umathinnerordspacing} +\lpr {Umathinneropspacing} +\lpr {Umathinnerbinspacing} +\lpr {Umathinnerrelspacing} +\lpr {Umathinneropenspacing} +\lpr {Umathinnerclosespacing} +\lpr {Umathinnerpunctspacing} +\lpr {Umathinnerinnerspacing} +\stoplines +\stoptwocolumns + +These parameters are of type \prm {muskip}, so setting a parameter can be done +like this: + +\starttyping +\Umathopordspacing\displaystyle=4mu plus 2mu +\stoptyping + +They are all initialized by \type {initex} to the values mentioned in the table +in Chapter~18 of the \TEX book. + +Note 1: for ease of use as well as for backward compatibility, \prm {thinmuskip}, +\prm {medmuskip} and \prm {thickmuskip} are treated specially. In their case a +pointer to the corresponding internal parameter is saved, not the actual \prm +{muskip} value. This means that any later changes to one of these three +parameters will be taken into account. + +Note 2: Careful readers will realise that there are also primitives for the items +marked \type {*} in the \TEX book. These will not actually be used as those +combinations of atoms cannot actually happen, but it seemed better not to break +orthogonality. They are initialized to zero. + +\subsection{Skips around display math} + +\topicindex {math+spacing} + +The injection of \prm {abovedisplayskip} and \prm {belowdisplayskip} is not +symmetrical. An above one is always inserted, also when zero, but the below is +only inserted when larger than zero. Especially the latter makes it sometimes hard +to fully control spacing. Therefore \LUATEX\ comes with a new directive: \lpr +{mathdisplayskipmode}. The following values apply: + +\starttabulate[|c|l|] +\DB value \BC meaning \NC \NR +\TB +\NC 0 \NC normal \TEX\ behaviour \NC \NR +\NC 1 \NC always (same as 0) \NC \NR +\NC 2 \NC only when not zero \NC \NR +\NC 3 \NC never, not even when not zero \NC \NR +\LL +\stoptabulate + +\subsection {Nolimit correction} -There are two extra math parameters \type {\Umathnolimitsupfactor} and \type -{\Umathnolimitsubfactor} that were added to provide some control over how limits +\topicindex {math+limits} + +There are two extra math parameters \lpr {Umathnolimitsupfactor} and \lpr +{Umathnolimitsubfactor} that were added to provide some control over how limits are spaced (for example the position of super and subscripts after integral -operators). They relate to an extra parameter \type {\mathnolimitsmode}. The half -corrections are what happens when scripts are placed on above and below. The +operators). They relate to an extra parameter \lpr {mathnolimitsmode}. The half +corrections are what happens when scripts are placed above and below. The problem with italic corrections is that officially that correction italic is used for above|/|below placement while advanced kerns are used for placement at the -right end. The question is: how often is this implemented, and if so, does the +right end. The question is: how often is this implemented, and if so, do the kerns assume correction too. Anyway, with this parameter one can control it. \starttabulate[|l|ck1|ck1|ck1|ck1|ck1|ck1|] @@ -609,13 +808,15 @@ When the mode is set to one, the math parameters are used. This way a macro package writer can decide what looks best. Given the current state of fonts in \CONTEXT\ we currently use mode 1 with factor 0 for the superscript and 750 for the subscripts. Positive values are used for both parameters but the subscript -shifts to the left. A \type {\mathnolimitsmode} larger that 15 is considered to +shifts to the left. A \lpr {mathnolimitsmode} larger that 15 is considered to be a factor for the subscript correction. This feature can be handy when experimenting. -\section{Math italic mess} +\subsection {Math italic mess} + +\topicindex {math+italics} -The \type {\mathitalicsmode} parameter can be set to~1 to force italic correction +The \lpr {mathitalicsmode} parameter can be set to~1 to force italic correction before noads that represent some more complex structure (read: everything that is not an ord, bin, rel, open, close, punct or inner). We show a Cambria example. @@ -641,20 +842,26 @@ example. This kind of parameters relate to the fact that italic correction in \OPENTYPE\ math is bound to fuzzy rules. So, control is the solution. -\section{Script boxes} +\subsection {Script and kerning} -If you want typeset text in math macro packages often provide something \type -{\text} which obeys the script sizes. As the definition can be anything there is -a good change that the kerning doesn't come out well when used in a script. Given -that the first glyph ends up in an \type {\hbox} we have some control over this. -And, as a bonus we also added control over the normal sublist kerning. The \type -{\mathscriptboxmode} parameter defaults to~1. +\topicindex {math+kerning} +\topicindex {math+scripts} -\starttabulate[|l|l|] +If you want to typeset text in math macro packages often provide something \type +{\text} which obeys the script sizes. As the definition can be anything there is +a good chance that the kerning doesn't come out well when used in a script. Given +that the first glyph ends up in a \prm {hbox} we have some control over this. +And, as a bonus we also added control over the normal sublist kerning. The \lpr +{mathscriptboxmode} parameter defaults to~1. + +\starttabulate[|c|l|] +\DB value \BC meaning \NC \NR +\TB \NC \type {0} \NC forget about kerning \NC \NR \NC \type {1} \NC kern math sub lists with a valid glyph \NC \NR \NC \type {2} \NC also kern math sub boxes that have a valid glyph \NC \NR \NC \type {2} \NC only kern math sub boxes with a boundary node present\NC \NR +\LL \stoptabulate Here we show some examples. Of course this doesn't solve all our problems, if @@ -675,12 +882,12 @@ italics, while other fonts can lack kerns. \unexpanded\def\Show#1#2#3% {\doifelsenothing{#3} - {\small\typeinlinebuffer[#1]} + {\small\tx\typeinlinebuffer[#1]} {\doifelse{#3}{-} - {\small\type{mode #2}} + {\small\bf\tt mode #2} {\switchtobodyfont[#3]\showfontkerns\showglyphs\mathscriptboxmode#2\relax\inlinebuffer[#1]}}} -\starttabulate[|lT|c|c|c|c|c|] +\starttabulate[|lBT|c|c|c|c|c|] \NC \NC \Show{1}{0}{} \NC\Show{1}{1}{} \NC \Show{2}{1}{} \NC \Show{2}{2}{} \NC \Show{3}{3}{} \NC \NR \NC \NC \Show{1}{0}{-} \NC\Show{1}{1}{-} \NC \Show{2}{1}{-} \NC \Show{2}{2}{-} \NC \Show{3}{3}{-} \NC \NR \NC modern \NC \Show{1}{0}{modern} \NC\Show{1}{1}{modern} \NC \Show{2}{1}{modern} \NC \Show{2}{2}{modern} \NC \Show{3}{3}{modern} \NC \NR @@ -690,15 +897,129 @@ italics, while other fonts can lack kerns. \NC dejavu \NC \Show{1}{0}{dejavu} \NC\Show{1}{1}{dejavu} \NC \Show{2}{1}{dejavu} \NC \Show{2}{2}{dejavu} \NC \Show{3}{3}{dejavu} \NC \NR \stoptabulate -\section{Unscaled fences} +Kerning between a character subscript is controlled by \lpr {mathscriptcharmode} +which also defaults to~1. + +Here is another example. Internally we tag kerns as italic kerns or font kerns +where font kerns result from the staircase kern tables. In 2018 fonts like Latin +Modern and Pagella rely on cheats with the boundingbox, Cambria uses staircase +kerns and Lucida a mixture. Depending on how fonts evolve we might add some more +control over what one can turn on and off. + +\def\MathSample#1#2#3% + {\NC + #1 \NC + #2 \NC + \showglyphdata \switchtobodyfont[#2,17.3pt]$#3T_{f}$ \NC + \showglyphdata \switchtobodyfont[#2,17.3pt]$#3\gamma_{e}$ \NC + \showglyphdata \switchtobodyfont[#2,17.3pt]$#3\gamma_{ee}$ \NC + \showglyphdata \switchtobodyfont[#2,17.3pt]$#3T_{\tf fluff}$ \NC + \NR} + +\starttabulate[|Tl|Tl|l|l|l|l|] + \FL + \MathSample{normal}{modern} {\mr} + \MathSample{} {pagella} {\mr} + \MathSample{} {cambria} {\mr} + \MathSample{} {lucidaot}{\mr} + \ML + \MathSample{bold} {modern} {\mb} + \MathSample{} {pagella} {\mb} + \MathSample{} {cambria} {\mb} + \MathSample{} {lucidaot}{\mb} + \LL +\stoptabulate + +\subsection{Fixed scripts} + +We have three parameters that are used for this fixed anchoring: + +\starttabulate[|c|l|] +\DB parameter \BC register \NC \NR +\NC $d$ \NC \lpr {Umathsubshiftdown} \NC \NR +\NC $u$ \NC \lpr {Umathsupshiftup} \NC \NR +\NC $s$ \NC \lpr {Umathsubsupshiftdown} \NC \NR +\LL +\stoptabulate + +When we set \lpr {mathscriptsmode} to a value other than zero these are used +for calculating fixed positions. This is something that is needed for instance +for chemistry. You can manipulate the mentioned variables to achieve different +effects. + +\def\SampleMath#1% + {$\mathscriptsmode#1\mathupright CH_2 + CH^+_2 + CH^2_2$} + +\starttabulate[|c|c|c|p|] +\DB mode \BC down \BC up \BC example \NC \NR +\TB +\NC 0 \NC dynamic \NC dynamic \NC \SampleMath{0} \NC \NR +\NC 1 \NC $d$ \NC $u$ \NC \SampleMath{1} \NC \NR +\NC 2 \NC $s$ \NC $u$ \NC \SampleMath{2} \NC \NR +\NC 3 \NC $s$ \NC $u + s - d$ \NC \SampleMath{3} \NC \NR +\NC 4 \NC $d + (s-d)/2$ \NC $u + (s-d)/2$ \NC \SampleMath{4} \NC \NR +\NC 5 \NC $d$ \NC $u + s - d$ \NC \SampleMath{5} \NC \NR +\LL +\stoptabulate + +The value of this parameter obeys grouping but applies to the whole current +formula. + +% if needed we can put the value in stylenodes but maybe more should go there -The \type {\mathdelimitersmode} primitive is experimental and deals with the -following (potential) problems. Three bits can be set. The first bit prevents -an unwanted shift when the fence symbol is not scaled (a cambria side effect). The -second bit forces italic correction between a preceding character ordinal and -the fenced subformula, while the third bit turns that subformula into a ordinary -so that the same spacing applies as with unfenced variants. Here we show Cambria -(with \type {\mathitalicsmode} enabled). +\subsection{Penalties: \lpr {mathpenaltiesmode}} + +\topicindex {math+penalties} + +Only in inline math penalties will be added in a math list. You can force +penalties (also in display math) by setting: + +\starttyping +\mathpenaltiesmode = 1 +\stoptyping + +This primnitive is not really needed in \LUATEX\ because you can use the callback +\cbk {mlist_to_hlist} to force penalties by just calling the regular routine +with forced penalties. However, as part of opening up and control this primitive +makes sense. As a bonus we also provide two extra penalties: + +\starttyping +\prebinoppenalty = -100 % example value +\prerelpenalty = 900 % example value +\stoptyping + +They default to inifinite which signals that they don't need to be inserted. When +set they are injected before a binop or rel noad. This is an experimental feature. + +\subsection{Equation spacing: \lpr {matheqnogapstep}} + +By default \TEX\ will add one quad between the equation and the number. This is +hard coded. A new primitive can control this: + +\startsyntax +\matheqnogapstep = 1000 +\stopsyntax + +Because a math quad from the math text font is used instead of a dimension, we +use a step to control the size. A value of zero will suppress the gap. The step +is divided by 1000 which is the usual way to mimmick floating point factors in +\TEX. + +\stopsection + +\startsection[title={Math constructs}] + +\subsection {Unscaled fences} + +\topicindex {math+fences} + +The \lpr {mathdelimitersmode} primitive is experimental and deals with the +following (potential) problems. Three bits can be set. The first bit prevents an +unwanted shift when the fence symbol is not scaled (a cambria side effect). The +second bit forces italic correction between a preceding character ordinal and the +fenced subformula, while the third bit turns that subformula into an ordinary so +that the same spacing applies as with unfenced variants. Here we show Cambria +(with \lpr {mathitalicsmode} enabled). \starttexdefinition Whatever #1 \NC \type{\mathdelimitersmode = #1} @@ -716,127 +1037,34 @@ so that the same spacing applies as with unfenced variants. Here we show Cambria \stop So, when set to 7 fenced subformulas with unscaled delimiters come out the same -as unfenced ones. This can be handy for cases where one is forced to use \type -{\left} and \type {\right} always because of unpredictable content. As said, it's -an experimental features (which somehow fits in the exceptional way fences are -dealt with in the engine). The full list of flags is given in the next table: - -\starttabulate[|T|l|] -\NC "01 \NC don't apply the usual shift \NC \NR -\NC "02 \NC apply italic correction when possible \NC \NR -\NC "04 \NC force a ordinary subformula \NC \NR -\NC "08 \NC no shift when a base character \NC \NR -\NC "10 \NC only shift when an extensible \NC \NR +as unfenced ones. This can be handy for cases where one is forced to use \prm +{left} and \prm {right} always because of unpredictable content. As said, it's an +experimental feature (which somehow fits in the exceptional way fences are dealt +with in the engine). The full list of flags is given in the next table: + +\starttabulate[|c|l|] +\DB value \BC meaning \NC \NR +\TB +\NC \type{"01} \NC don't apply the usual shift \NC \NR +\NC \type{"02} \NC apply italic correction when possible \NC \NR +\NC \type{"04} \NC force an ordinary subformula \NC \NR +\NC \type{"08} \NC no shift when a base character \NC \NR +\NC \type{"10} \NC only shift when an extensible \NC \NR +\LL \stoptabulate -The effect can depend on the font (and for Cambria one can use for instance \type -{"16}). +The effect can depend on the font (and for Cambria one can use for instance \type {"16}). -\section{Math spacing setting} +\subsection[mathacc]{Accent handling} -Besides the parameters mentioned in the previous sections, there are also 64 new -primitives to control the math spacing table (as explained in Chapter~18 of the -\TEX book). The primitive names are a simple matter of combining two math atom -types, but for completeness' sake, here is the whole list: - -\starttwocolumns -\starttyping -\Umathordordspacing -\Umathordopspacing -\Umathordbinspacing -\Umathordrelspacing -\Umathordopenspacing -\Umathordclosespacing -\Umathordpunctspacing -\Umathordinnerspacing -\Umathopordspacing -\Umathopopspacing -\Umathopbinspacing -\Umathoprelspacing -\Umathopopenspacing -\Umathopclosespacing -\Umathoppunctspacing -\Umathopinnerspacing -\Umathbinordspacing -\Umathbinopspacing -\Umathbinbinspacing -\Umathbinrelspacing -\Umathbinopenspacing -\Umathbinclosespacing -\Umathbinpunctspacing -\Umathbininnerspacing -\Umathrelordspacing -\Umathrelopspacing -\Umathrelbinspacing -\Umathrelrelspacing -\Umathrelopenspacing -\Umathrelclosespacing -\Umathrelpunctspacing -\Umathrelinnerspacing -\Umathopenordspacing -\Umathopenopspacing -\Umathopenbinspacing -\Umathopenrelspacing -\Umathopenopenspacing -\Umathopenclosespacing -\Umathopenpunctspacing -\Umathopeninnerspacing -\Umathcloseordspacing -\Umathcloseopspacing -\Umathclosebinspacing -\Umathcloserelspacing -\Umathcloseopenspacing -\Umathcloseclosespacing -\Umathclosepunctspacing -\Umathcloseinnerspacing -\Umathpunctordspacing -\Umathpunctopspacing -\Umathpunctbinspacing -\Umathpunctrelspacing -\Umathpunctopenspacing -\Umathpunctclosespacing -\Umathpunctpunctspacing -\Umathpunctinnerspacing -\Umathinnerordspacing -\Umathinneropspacing -\Umathinnerbinspacing -\Umathinnerrelspacing -\Umathinneropenspacing -\Umathinnerclosespacing -\Umathinnerpunctspacing -\Umathinnerinnerspacing -\stoptyping -\stoptwocolumns - -These parameters are of type \type {\muskip}, so setting a parameter can be done -like this: - -\starttyping -\Umathopordspacing\displaystyle=4mu plus 2mu -\stoptyping - -They are all initialized by \type {initex} to the values mentioned in the table -in Chapter~18 of the \TEX book. - -Note 1: for ease of use as well as for backward compatibility, \type -{\thinmuskip}, \type {\medmuskip} and \type {\thickmuskip} are treated -especially. In their case a pointer to the corresponding internal parameter is -saved, not the actual \type {\muskip} value. This means that any later changes to -one of these three parameters will be taken into account. - -Note 2: Careful readers will realise that there are also primitives for the items -marked \type {*} in the \TEX book. These will not actually be used as those -combinations of atoms cannot actually happen, but it seemed better not to break -orthogonality. They are initialized to zero. - -\section[mathacc]{Math accent handling} +\topicindex {math+accents} \LUATEX\ supports both top accents and bottom accents in math mode, and math accents stretch automatically (if this is supported by the font the accent comes from, of course). Bottom and combined accents as well as fixed-width math accents -are controlled by optional keywords following \type {\Umathaccent}. +are controlled by optional keywords following \lpr {Umathaccent}. -The keyword \type {bottom} after \type {\Umathaccent} signals that a bottom accent +The keyword \type {bottom} after \lpr {Umathaccent} signals that a bottom accent is needed, and the keyword \type {both} signals that both a top and a bottom accent are needed (in this case two accents need to be specified, of course). @@ -852,7 +1080,7 @@ A simple example: If a math top accent has to be placed and the accentee is a character and has a non-zero \type {top_accent} value, then this value will be used to place the -accent instead of the \type {\skewchar} kern used by \TEX82. +accent instead of the \prm {skewchar} kern used by \TEX82. The \type {top_accent} value represents a vertical line somewhere in the accentee. The accent will be shifted horizontally such that its own \type @@ -861,7 +1089,7 @@ accentee. The accent will be shifted horizontally such that its own \type followed by its italic correction is used instead. The vertical placement of a top accent depends on the \type {x_height} of the -font of the accentee (as explained in the \TEX book), but if value that turns out +font of the accentee (as explained in the \TEX book), but if a value turns out to be zero and the font had a \type {MathConstants} table, then \type {AccentBaseHeight} is used instead. @@ -870,28 +1098,30 @@ correction takes place. Possible locations are \type {top}, \type {bottom}, \type {both} and \type {center}. When no location is given \type {top} is assumed. An additional -parameter \type {fraction} can be specified followed by a number; a value of for +parameter \nod {fraction} can be specified followed by a number; a value of for instance 1200 means that the criterium is 1.2 times the width of the nucleus. The fraction only applies to the stepwise selected shapes and is mostly meant for the \type {overlay} location. It also works for the other locations but then it concerns the width. -\section{Math root extension} +\subsection{Radical extensions} + +\topicindex {math+radicals} -The new primitive \type {\Uroot} allows the construction of a radical noad -including a degree field. Its syntax is an extension of \type {\Uradical}: +The new primitive \lpr {Uroot} allows the construction of a radical noad +including a degree field. Its syntax is an extension of \lpr {Uradical}: \starttyping \Uradical <fam integer> <char integer> <radicand> \Uroot <fam integer> <char integer> <degree> <radicand> \stoptyping -The placement of the degree is controlled by the math parameters \type -{\Umathradicaldegreebefore}, \type {\Umathradicaldegreeafter}, and \type -{\Umathradicaldegreeraise}. The degree will be typeset in \type -{\scriptscriptstyle}. +The placement of the degree is controlled by the math parameters \lpr +{Umathradicaldegreebefore}, \lpr {Umathradicaldegreeafter}, and \lpr +{Umathradicaldegreeraise}. The degree will be typeset in \prm +{scriptscriptstyle}. -\section{Math kerning in super- and subscripts} +\subsection{Super- and subscripts} The character fields in a \LUA|-|loaded \OPENTYPE\ math font can have a \quote {mathkern} table. The format of this table is the same as the \quote {mathkern} @@ -950,11 +1180,15 @@ next higher height and kern pair, or the highest one in the character (if there value high enough in the character), or simply zero (if the character has no math kern pairs at all). -\section{Scripts on horizontally extensible items like arrows} +\subsection{Scripts on extensibles} -The primitives \type {\Uunderdelimiter} and \type {\Uoverdelimiter} allow the +\topicindex {math+scripts} +\topicindex {math+delimiters} +\topicindex {math+extensibles} + +The primitives \lpr {Uunderdelimiter} and \lpr {Uoverdelimiter} allow the placement of a subscript or superscript on an automatically extensible item and -\type {\Udelimiterunder} and \type {\Udelimiterover} allow the placement of an +\lpr {Udelimiterunder} and \lpr {Udelimiterover} allow the placement of an automatically extensible item as a subscript or superscript on a nucleus. The input: @@ -972,18 +1206,18 @@ $\Udelimiterunder 0 "2194 {\hbox{\strut delimiterunder}}$ \blank \startnarrower \getbuffer \stopnarrower \blank -The vertical placements are controlled by \type {\Umathunderdelimiterbgap}, \type -{\Umathunderdelimitervgap}, \type {\Umathoverdelimiterbgap}, and \type -{\Umathoverdelimitervgap} in a similar way as limit placements on large operators. -The superscript in \type {\Uoverdelimiter} is typeset in a suitable scripted style, -the subscript in \type {\Uunderdelimiter} is cramped as well. +The vertical placements are controlled by \lpr {Umathunderdelimiterbgap}, \lpr +{Umathunderdelimitervgap}, \lpr {Umathoverdelimiterbgap}, and \lpr +{Umathoverdelimitervgap} in a similar way as limit placements on large operators. +The superscript in \lpr {Uoverdelimiter} is typeset in a suitable scripted style, +the subscript in \lpr {Uunderdelimiter} is cramped as well. These primitives accepts an option \type {width} specification. When used the also optional keywords \type {left}, \type {middle} and \type {right} will determine what happens when a requested size can't be met (which can happen when we step to successive larger variants). -An extra primitive \type {\Uhextensible} is available that can be used like this: +An extra primitive \lpr {Uhextensible} is available that can be used like this: \startbuffer $\Uhextensible width 10cm 0 "2194$ @@ -1008,38 +1242,11 @@ $\Uhextensible width 1pt middle 0 "2194$ font metrics are involved we have a different code path for traditional fonts end \OPENTYPE\ fonts. -\section {Extracting values} - -You can extract the components of a math character. Say that we have defined: +\subsection{Fractions} -\starttyping -\Umathcode 1 2 3 4 -\stoptyping - -then - -\starttyping -[\Umathcharclass1] [\Umathcharfam1] [\Umathcharslot1] -\stoptyping +\topicindex {math+fractions} -will return: - -\starttyping -[2] [3] [4] -\stoptyping - -These commands are provides as convenience. Before they came available you could -do the following: - -\starttyping -\def\Umathcharclass{\directlua{tex.print(tex.getmathcode(token.scan_int())[1])}} -\def\Umathcharfam {\directlua{tex.print(tex.getmathcode(token.scan_int())[2])}} -\def\Umathcharslot {\directlua{tex.print(tex.getmathcode(token.scan_int())[3])}} -\stoptyping - -\section{fractions} - -The \type {\abovewithdelims} command accepts a keyword \type {exact}. When issued +The \prm {abovewithdelims} command accepts a keyword \type {exact}. When issued the extra space relative to the rule thickness is not added. One can of course use the \type {\Umathfraction..gap} commands to influence the spacing. Also the rule is still positioned around the math axis. @@ -1053,7 +1260,7 @@ vertical gap for skewed fractions. Of course some guessing is needed in order to implement something that uses them. And so we now provide a primitive similar to the other fraction related ones but with a few options so that one can influence the rendering. Of course a user can also mess around a bit with the parameters -\type {\Umathskewedfractionhgap} and \type {\Umathskewedfractionvgap}. +\lpr {Umathskewedfractionhgap} and \lpr {Umathskewedfractionvgap}. The syntax used here is: @@ -1064,7 +1271,7 @@ The syntax used here is: where the options can be \type {noaxis} and \type {exact}. By default we add half the axis to the shifts and by default we zero the width of the middle character. -For Latin Modern The result looks as follows: +For Latin Modern the result looks as follows: \def\ShowA#1#2#3{$x + { {#1} \Uskewed / #3 {#2} } + x$} \def\ShowB#1#2#3{$x + { {#1} \Uskewedwithdelims / () #3 {#2} } + x$} @@ -1099,7 +1306,85 @@ For Latin Modern The result looks as follows: \stoptabulate \stop -\section {Last lines} +\subsection {Delimiters: \type{\Uleft}, \prm {Umiddle} and \prm {Uright}} + +\topicindex {math+delimiters} + +Normally you will force delimiters to certain sizes by putting an empty box or +rule next to it. The resulting delimiter will either be a character from the +stepwise size range or an extensible. The latter can be quite differently +positioned than the characters as it depends on the fit as well as the fact if +the used characters in the font have depth or height. Commands like (plain \TEX +s) \type {\big} need use this feature. In \LUATEX\ we provide a bit more control +by three variants that support optional parameters \type {height}, \type {depth} +and \type {axis}. The following example uses this: + +\startbuffer +\Uleft height 30pt depth 10pt \Udelimiter "0 "0 "000028 +\quad x\quad +\Umiddle height 40pt depth 15pt \Udelimiter "0 "0 "002016 +\quad x\quad +\Uright height 30pt depth 10pt \Udelimiter "0 "0 "000029 +\quad \quad \quad +\Uleft height 30pt depth 10pt axis \Udelimiter "0 "0 "000028 +\quad x\quad +\Umiddle height 40pt depth 15pt axis \Udelimiter "0 "0 "002016 +\quad x\quad +\Uright height 30pt depth 10pt axis \Udelimiter "0 "0 "000029 +\stopbuffer + +\typebuffer + +\startlinecorrection +\ruledhbox{\mathematics{\getbuffer}} +\stoplinecorrection + +The keyword \type {exact} can be used as directive that the real dimensions +should be applied when the criteria can't be met which can happen when we're +still stepping through the successively larger variants. When no dimensions are +given the \type {noaxis} command can be used to prevent shifting over the axis. + +You can influence the final class with the keyword \type {class} which will +influence the spacing. The numbers are the same as for character classes. + +\stopsection + +\startsection[title={Extracting values}] + +\subsection{Codes} + +\topicindex {math+codes} + +You can extract the components of a math character. Say that we have defined: + +\starttyping +\Umathcode 1 2 3 4 +\stoptyping + +then + +\starttyping +[\Umathcharclass1] [\Umathcharfam1] [\Umathcharslot1] +\stoptyping + +will return: + +\starttyping +[2] [3] [4] +\stoptyping + +These commands are provides as convenience. Before they come available you could +do the following: + +\starttyping +\def\Umathcharclass{\directlua{tex.print(tex.getmathcode(token.scan_int())[1])}} +\def\Umathcharfam {\directlua{tex.print(tex.getmathcode(token.scan_int())[2])}} +\def\Umathcharslot {\directlua{tex.print(tex.getmathcode(token.scan_int())[3])}} +\stoptyping + +\subsection {Last lines} + +\topicindex {math+last line} There is a new primitive to control the overshoot in the calculation of the previous line in mid|-|paragraph display math. The default value is 2 times @@ -1121,32 +1406,41 @@ get the length of the last line, the following will often work too: \relax} \stoptyping -\section {Other Math changes} +\stopsection + +\startsection[title={Math mode}] + +\subsection {Verbose versions of single|-|character math commands} -\subsection {Verbose versions of single-character math commands} +\topicindex {math+styles} \LUATEX\ defines six new primitives that have the same function as \type {^}, \type {_}, \type {$}, and \type {$$}: \starttabulate[|l|l|] -\BC primitive \BC explanation \NC \NR -\NC \type {\Usuperscript} \NC Duplicates the functionality of \type {^} \NC \NR -\NC \type {\Usubscript} \NC Duplicates the functionality of \type {_} \NC \NR -\NC \type {\Ustartmath} \NC Duplicates the functionality of \type {$}, % $ +\DB primitive \BC explanation \NC \NR +\TB +\NC \lpr {Usuperscript} \NC duplicates the functionality of \type {^} \NC \NR +\NC \lpr {Usubscript} \NC duplicates the functionality of \type {_} \NC \NR +\NC \lpr {Ustartmath} \NC duplicates the functionality of \type {$}, % $ when used in non-math mode. \NC \NR -\NC \type {\Ustopmath} \NC Duplicates the functionality of \type {$}, % $ +\NC \lpr {Ustopmath} \NC duplicates the functionality of \type {$}, % $ when used in inline math mode. \NC \NR -\NC \type {\Ustartdisplaymath} \NC Duplicates the functionality of \type {$$}, % $$ +\NC \lpr {Ustartdisplaymath} \NC duplicates the functionality of \type {$$}, % $$ when used in non-math mode. \NC \NR -\NC \type {\Ustopdisplaymath} \NC Duplicates the functionality of \type {$$}, % $$ +\NC \lpr {Ustopdisplaymath} \NC duplicates the functionality of \type {$$}, % $$ when used in display math mode. \NC \NR +\LL \stoptabulate -The \type {\Ustopmath} and \type {\Ustopdisplaymath} primitives check if the current +The \lpr {Ustopmath} and \lpr {Ustopdisplaymath} primitives check if the current math mode is the correct one (inline vs.\ displayed), but you can freely intermix the four mathon|/|mathoff commands with explicit dollar sign(s). -\subsection{Script commands \type {\Unosuperscript} and \type {\Unosubscript}} +\subsection{Script commands \lpr {Unosuperscript} and \lpr {Unosubscript}} + +\topicindex {math+styles} +\topicindex {math+scripts} These two commands result in super- and subscripts but with the current style (at the time of rendering). So, @@ -1164,71 +1458,17 @@ $ results in \inlinebuffer[script]. +\subsection{Allowed math commands in non|-|math modes} -\subsection{Allowed math commands in non-math modes} - -The commands \type {\mathchar}, and \type {\Umathchar} and control sequences that -are the result of \type {\mathchardef} or \type {\Umathchardef} are also -acceptable in the horizontal and vertical modes. In those cases, the \type -{\textfont} from the requested math family is used. - -\section{Math surrounding skips} - -Inline math is surrounded by (optional) \type {\mathsurround} spacing but that is fixed -dimension. There is now an additional parameter \type {\mathsurroundskip}. When set to a -non|-|zero value (or zero with some stretch or shrink) this parameter will replace -\type {\mathsurround}. By using an additional parameter instead of changing the nature -of \type {\mathsurround}, we can remain compatible. In the meantime a bit more -control has been added via \type {\mathsurroundmode}. This directive can take 6 values -with zero being the default behaviour. - -\start +\topicindex {math+text} +\topicindex {text+math} -\def\OneLiner#1#2% - {\NC \type{#1} - \NC \dontleavehmode\inframed[align=normal,offset=0pt,frame=off]{\mathsurroundmode#1\relax\hsize 100pt x$x$x} - \NC \dontleavehmode\inframed[align=normal,offset=0pt,frame=off]{\mathsurroundmode#1\relax\hsize 100pt x $x$ x} - \NC #2 - \NC \NR} +The commands \prm {mathchar}, and \lpr {Umathchar} and control sequences that are +the result of \prm {mathchardef} or \lpr {Umathchardef} are also acceptable in +the horizontal and vertical modes. In those cases, the \prm {textfont} from the +requested math family is used. -\startbuffer -\mathsurround 10pt -\mathsurroundskip20pt -\stopbuffer - -\typebuffer \getbuffer - -\starttabulate[|c|c|c|pl|] -\HL -\BC mode \BC \type {x$x$x} \BC \type {x $x$ x} \BC effect \NC \NR -\HL -\OneLiner{0}{obey \type {\mathsurround} when \type {\mathsurroundskip} is 0pt} -\OneLiner{1}{only add skip to the left} -\OneLiner{2}{only add skip to the right} -\OneLiner{3}{add skip to the left and right} -\OneLiner{4}{ignore the skip setting, obey \type {\mathsurround}} -\OneLiner{5}{disable all spacing around math} -\OneLiner{6}{only apply \type {\mathsurroundskip} when also spacing} -\OneLiner{7}{only apply \type {\mathsurroundskip} when no spacing} -\HL -\stoptabulate - -\stop - -Method six omits the surround glue when there is (x)spacing glue present while -method seven does the opposite, the glue is only applied when there is (x)space -glue present too. Anything more fancy, like checking the begining or end of a -paragraph (or edges of a box) would not be robust anyway. If you want that you -can write a callback that runs over a list and analyzes a paragraph. Actually, in -that case you could also inject glue (or set the properties of a math node) -explicitly. So, these modes are in practice mostly useful for special purposes -and experiments (they originate in a tracker item). Keep in mind that this glue -is part of the math node and not always treated as normal glue: it travels with -the begin and end math nodes. Also, method 6 and 7 will zero the skip related -fields in a node when applicable in the first occasion that checks them -(linebreaking or packaging). - -% \section{Math todo} +% \startsection[title={Math todo}] % % The following items are still todo. % @@ -1249,194 +1489,158 @@ fields in a node when applicable in the first occasion that checks them % Support for multi|-|line displays using \MATHML\ style alignment points. % \stopitem % \stopitemize +% +% \stopsection -\subsection {Delimiters: \type{\Uleft}, \type {\Umiddle} and \type {\Uright}} - -Normally you will force delimiters to certain sizes by putting an empty box or -rule next to it. The resulting delimiter will either be a character from the -stepwise size range or an extensible. The latter can be quite differently -positioned that the characters as it depends on the fit as well as the fact if -the used characters in the font have depth or height. Commands like (plain \TEX -s) \type {\big} need use this feature. In \LUATEX\ we provide a bit more control -by three variants that supporting optional parameters \type {height}, \type -{depth} and \type {axis}. The following example uses this: - -\startbuffer -\Uleft height 30pt depth 10pt \Udelimiter "0 "0 "000028 -\quad x\quad -\Umiddle height 40pt depth 15pt \Udelimiter "0 "0 "002016 -\quad x\quad -\Uright height 30pt depth 10pt \Udelimiter "0 "0 "000029 -\quad \quad \quad -\Uleft height 30pt depth 10pt axis \Udelimiter "0 "0 "000028 -\quad x\quad -\Umiddle height 40pt depth 15pt axis \Udelimiter "0 "0 "002016 -\quad x\quad -\Uright height 30pt depth 10pt axis \Udelimiter "0 "0 "000029 -\stopbuffer - -\typebuffer - -\startlinecorrection -\ruledhbox{\mathematics{\getbuffer}} -\stoplinecorrection - -The keyword \type {exact} can be used as directive that the real dimensions -should be applied when the criteria can't be met which can happen when we're -still stepping through the successively larger variants. When no dimensions are -given the \type {noaxis} command can be used to prevent shifting over the axis. +\stopsection -You can influence the final class with the keyword \type {class} which will -influence the spacing. The numbers are the same as for character classes. +\startsection[title={Goodies}] -\subsection{Fixed scripts} +\subsection {Flattening: \lpr {mathflattenmode}} -We have three parameters that are used for this fixed anchoring: +\topicindex {math+flattening} -\starttabulate[|l|l|] -\NC $d$ \NC \type {\Umathsubshiftdown} \NC \NR -\NC $u$ \NC \type {\Umathsupshiftup} \NC \NR -\NC $s$ \NC \type {\Umathsubsupshiftdown} \NC \NR -\stoptabulate +The \TEX\ math engine collapses \type {ord} noads without sub- and superscripts +and a character as nucleus. and which has the side effect that in \OPENTYPE\ mode +italic corrections are applied (given that they are enabled). -When we set \type {\mathscriptsmode} to a value other than zero these are used -for calculating fixed positions. This is something that is needed for instance -for chemistry. You can manipulate the mentioned variables to achive different -effects. +\startbuffer[sample] +\switchtobodyfont[modern] +$V \mathbin{\mathbin{v}} V$\par +$V \mathord{\mathord{v}} V$\par +\stopbuffer -\def\SampleMath#1% - {$\mathscriptsmode#1\mathupright CH_2 + CH^+_2 + CH^2_2$} +\typebuffer[sample] -\starttabulate[|c|c|c|l|] -\BC mode \BC down \BC up \BC \NC \NR -\NC 0 \NC dynamic \NC dynamic \NC \SampleMath{0} \NC \NR -\NC 1 \NC $d$ \NC $u$ \NC \SampleMath{1} \NC \NR -\NC 2 \NC $s$ \NC $u$ \NC \SampleMath{2} \NC \NR -\NC 3 \NC $s$ \NC $u + s - d$ \NC \SampleMath{3} \NC \NR -\NC 4 \NC $d + (s-d)/2$ \NC $u + (s-d)/2$ \NC \SampleMath{4} \NC \NR -\NC 5 \NC $d$ \NC $u + s - d$ \NC \SampleMath{5} \NC \NR -\stoptabulate +This renders as: -The value of this parameter obeys grouping but applies to the whole current -formula. +\blank \start \mathflattenmode\plusone \getbuffer[sample] \stop \blank -% if needed we can put the value in stylenodes but maybe more should go there +When we set \lpr {mathflattenmode} to 31 we get: -\subsection{Penalties: \type {\mathpenaltiesmode}} +\blank \start \mathflattenmode\numexpr1+2+4+8+16\relax \getbuffer[sample] \stop \blank -Only in inline math penalties will be added in a math list. You can force -penalties (also in display math) by setting: +When you see no difference, then the font probably has the proper character +dimensions and no italic correction is needed. For Latin Modern (at least till +2018) there was a visual difference. In that respect this parameter is not always +needed unless of course you want efficient math lists anyway. -\starttyping -\mathpenaltiesmode = 1 -\stoptyping - -This primnitive is not really needed in \LUATEX\ because you can use the callback -\type {mlist_to_hlist} to force penalties by just calling the regular routine -with forced penalties. However, as part of opening up and control this primitive -makes sense. As a bonus we also provide two extra penalties: +You can influence flattening by adding the appropriate number to the value of the +mode parameter. The default value is~1. -\starttyping -\prebinoppenalty = -100 % example value -\prerelpenalty = 900 % example value -\stoptyping +\starttabulate[|Tc|c|] +\DB mode \BC class \NC \NR +\TB +\NC 1 \NC ord \NC \NR +\NC 2 \NC bin \NC \NR +\NC 4 \NC rel \NC \NR +\NC 8 \NC punct \NC \NR +\NC 16 \NC inner \NC \NR +\LL +\stoptabulate -They default to inifinite which signals that they don't need to be inserted. When -set they are injected before a binop or rel noad. This is an experimental feature. +\subsection {Less Tracing} -\subsection {Tracing} +\topicindex {math+tracing} Because there are quite some math related parameters and values, it is possible to limit tracing. Only when \type {tracingassigns} and|/|or \type {tracingrestores} are set to~2 or more they will be traced. -\subsection {Math options} +\subsection {Math options with \lpr {mathoption}} The logic in the math engine is rather complex and there are often no universal solutions (read: what works out well for one font, fails for another). Therefore -some variations in the implementation will be driven by options for which a new -primitive \type {\mathoption} has been introduced (so that we don't end up with -many new commands). The approach of options also permits us to see what effect a -specific solution has. - -\subsubsection {\type {\mathoption old}} +some variations in the implementation are driven by parameters (modes). In +addition there is a new primitive \lpr {mathoption} which will be used for +testing. Don't rely on any option to be there in a production version as they are +meant for development. This option was introduced for testing purposes when the math engine got split code paths and it forces the engine to treat new fonts as old ones with respect to italic correction etc. There are no guarantees given with respect to the final -result and unexpected side effects are not seens as bugs as they relate to font -properties. +result and unexpected side effects are not seen as bugs as they relate to font +properties. Ther eis currently only one option: \startbuffer \mathoption old 1 \stopbuffer The \type {oldmath} boolean flag in the \LUA\ font table is the official way to -force old treatment as it's bound to fonts. - -\subsubsection {\type {\mathoption noitaliccompensation}} - -This option compensates placement for characters with a built|-|in italic -correction. - -\startbuffer -{\showboxes\int}\quad -{\showboxes\int_{|}^{|}}\quad -{\showboxes\int\limits_{|}^{|}} -\stopbuffer - -\typebuffer - -Gives (with computer modern that has such italics): - -\startlinecorrection[blank] - \switchtobodyfont[modern] - \startcombination[nx=2,ny=2,distance=5em] - {\mathoption noitaliccompensation 0\relax \mathematics{\getbuffer}} - {\nohyphens\type{0:inline}} - {\mathoption noitaliccompensation 0\relax \mathematics{\displaymath\getbuffer}} - {\nohyphens\type{0:display}} - {\mathoption noitaliccompensation 1\relax \mathematics{\getbuffer}} - {\nohyphens\type{1:inline}} - {\mathoption noitaliccompensation 1\relax \mathematics{\displaymath\getbuffer}} - {\nohyphens\type{1:display}} - \stopcombination -\stoplinecorrection - -\subsubsection {\type {\mathoption nocharitalic}} +force old treatment as it's bound to fonts. Like with all options we may +temporarily introduce with this command this feature is not meant for production. -When two characters follow each other italic correction can interfere. The -following example shows what this option does: - -\startbuffer -\catcode"1D443=11 -\catcode"1D444=11 -\catcode"1D445=11 -P( PP PQR -\stopbuffer - -\typebuffer - -Gives (with computer modern that has such italics): - -\startlinecorrection[blank] - \switchtobodyfont[modern] - \startcombination[nx=2,ny=2,distance=5em] - {\mathoption nocharitalic 0\relax \mathematics{\getbuffer}} - {\nohyphens\type{0:inline}} - {\mathoption nocharitalic 0\relax \mathematics{\displaymath\getbuffer}} - {\nohyphens\type{0:display}} - {\mathoption nocharitalic 1\relax \mathematics{\getbuffer}} - {\nohyphens\type{1:inline}} - {\mathoption nocharitalic 1\relax \mathematics{\displaymath\getbuffer}} - {\nohyphens\type{1:display}} - \stopcombination -\stoplinecorrection - -\subsubsection {\type {\mathoption useoldfractionscaling}} +% % obsolete: +% +% \subsubsection {\type {\mathoption noitaliccompensation}} +% +% This option compensates placement for characters with a built|-|in italic +% correction. +% +% \startbuffer +% {\showboxes\int}\quad +% {\showboxes\int_{|}^{|}}\quad +% {\showboxes\int\limits_{|}^{|}} +% \stopbuffer +% +% \typebuffer +% +% Gives (with computer modern that has such italics): +% +% \startlinecorrection[blank] +% \switchtobodyfont[modern] +% \startcombination[nx=2,ny=2,distance=5em] +% {\mathoption noitaliccompensation 0\relax \mathematics{\getbuffer}} +% {\nohyphens\type{0:inline}} +% {\mathoption noitaliccompensation 0\relax \mathematics{\displaymath\getbuffer}} +% {\nohyphens\type{0:display}} +% {\mathoption noitaliccompensation 1\relax \mathematics{\getbuffer}} +% {\nohyphens\type{1:inline}} +% {\mathoption noitaliccompensation 1\relax \mathematics{\displaymath\getbuffer}} +% {\nohyphens\type{1:display}} +% \stopcombination +% \stoplinecorrection + +% % obsolete: +% +% \subsubsection {\type {\mathoption nocharitalic}} +% +% When two characters follow each other italic correction can interfere. The +% following example shows what this option does: +% +% \startbuffer +% \catcode"1D443=11 +% \catcode"1D444=11 +% \catcode"1D445=11 +% P( PP PQR +% \stopbuffer +% +% \typebuffer +% +% Gives (with computer modern that has such italics): +% +% \startlinecorrection[blank] +% \switchtobodyfont[modern] +% \startcombination[nx=2,ny=2,distance=5em] +% {\mathoption nocharitalic 0\relax \mathematics{\getbuffer}} +% {\nohyphens\type{0:inline}} +% {\mathoption nocharitalic 0\relax \mathematics{\displaymath\getbuffer}} +% {\nohyphens\type{0:display}} +% {\mathoption nocharitalic 1\relax \mathematics{\getbuffer}} +% {\nohyphens\type{1:inline}} +% {\mathoption nocharitalic 1\relax \mathematics{\displaymath\getbuffer}} +% {\nohyphens\type{1:display}} +% \stopcombination +% \stoplinecorrection + +% % obsolete: +% +% \subsubsection {\type {\mathoption useoldfractionscaling}} +% +% This option has been introduced as solution for tracker item 604 for fuzzy cases +% around either or not present fraction related settings for new fonts. -This option has been introduced as solution for tracker item 604 for fuzzy cases -around either or not present fraction related settings for new fonts. +\stopsection \stopchapter diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-modifications.tex b/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-modifications.tex index b5d8f2750c0..50d23fb1b78 100644 --- a/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-modifications.tex +++ b/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-modifications.tex @@ -1,7 +1,6 @@ % language=uk \environment luatex-style -\environment luatex-logos \startcomponent luatex-modifications @@ -11,6 +10,9 @@ \startsubsection[title=The need for change] +\topicindex {engines} +\topicindex {history} + The first version of \LUATEX\ only had a few extra primitives and it was largely the same as \PDFTEX. Then we merged substantial parts of \ALEPH\ into the code and got more primitives. When we got more stable the decision was made to clean @@ -22,13 +24,15 @@ most of the adapted ones. Besides the expected changes caused by new functionality, there are a number of not|-|so|-|expected changes. These are sometimes a side|-|effect of a new -(conflicting) feature, or, more often than not, a change neccessary to clean up +(conflicting) feature, or, more often than not, a change necessary to clean up the internal interfaces. These will also be mentioned. \stopsubsection \startsubsection[title=Changes from \TEX\ 3.1415926] +\topicindex {\TEX} + Of course it all starts with traditional \TEX. Even if we started with \PDFTEX, most still comes from the original. But we divert a bit. @@ -38,7 +42,7 @@ most still comes from the original. But we divert a bit. The current code base is written in \CCODE, not \PASCAL. We use \CWEB\ when possible. As a consequence instead of one large file plus change files, we now have multiple files organized in categories like \type {tex}, \type - {pdf}, \type {lang}, \type {font}, \type {lua}, etc. There are some artefacts + {pdf}, \type {lang}, \type {font}, \type {lua}, etc. There are some artifacts of the conversion to \CCODE, but in due time we will clean up the source code and make sure that the documentation is done right. Many files are in the \CWEB\ format, but others, like those interfacing to \LUA, are \CCODE\ files. @@ -63,21 +67,21 @@ most still comes from the original. But we divert a bit. \stopitem \startitem - The upper limit to \type {\endlinechar} and \type {\newlinechar} is 127. + The upper limit to \prm {endlinechar} and \prm {newlinechar} is 127. \stopitem \startitem - Magnification (\type {\mag}) is only supported in \DVI\ output mode. You can + Magnification (\prm {mag}) is only supported in \DVI\ output mode. You can set this parameter and it even works with \type {true} units till you switch to \PDF\ output mode. When you use \PDF\ output you can best not touch the - \type {\mag} variable. This fuzzy behaviour is not much different from using + \prm {mag} variable. This fuzzy behaviour is not much different from using \PDF\ backend related functionality while eventually \DVI\ output is required. After the output mode has been frozen (normally that happens when the first page is shipped out) or when \PDF\ output is enabled, the \type {true} specification is ignored. When you preload a plain format adapted to - \LUATEX\ it can be that the \type {\mag} parameter already has been set. + \LUATEX\ it can be that the \prm {mag} parameter already has been set. \stopitem \stopitemize @@ -86,6 +90,8 @@ most still comes from the original. But we divert a bit. \startsubsection[title=Changes from \ETEX\ 2.2] +\topicindex {\ETEX} + Being the de factor standard extension of course we provide the \ETEX\ functionality, but with a few small adaptations. @@ -99,18 +105,19 @@ functionality, but with a few small adaptations. \startitem The \TEXXET\ extension is not present, so the primitives \type {\TeXXeTstate}, \type {\beginR}, \type {\beginL}, \type {\endR} and \type - {\endL} are missing. Instead we use the \OMEGA\ approach to directionality. + {\endL} are missing. Instead we used the \OMEGA/\ALEPH\ approach to + directionality as starting point. \stopitem \startitem - Some of the tracing information that is output by \ETEX's \type - {\tracingassigns} and \type {\tracingrestores} is not there. + Some of the tracing information that is output by \ETEX's \prm + {tracingassigns} and \prm {tracingrestores} is not there. \stopitem \startitem - Register management in \LUATEX\ uses the \ALEPH\ model, so the maximum value - is 65535 and the implementation uses a flat array instead of the mixed - flat|\&|sparse model from \ETEX. + Register management in \LUATEX\ uses the \OMEGA/\ALEPH\ model, so the maximum + value is 65535 and the implementation uses a flat array instead of the mixed + flat & sparse model from \ETEX. \stopitem \startitem @@ -127,6 +134,8 @@ functionality, but with a few small adaptations. \startsubsection[title=Changes from \PDFTEX\ 1.40] +\topicindex {\PDFTEX} + Because we want to produce \PDF\ the most natural starting point was the popular \PDFTEX\ program. We inherit the stable features, dropped most of the experimental code and promoted some functionality to core \LUATEX\ functionality @@ -134,76 +143,73 @@ which in turn triggered renaming primitives. For compatibility reasons we still refer to \type {\pdf...} commands but \LUATEX\ has a different backend interface. Instead of these primitives there are three -interfacing primitives: \type {\pdfextension}, \type {\pdfvariable} and -\type {\pdffeedback} that take keywords and optional further arguments. This way -we can extend the features when needed but don't need to adapt the core engine. -The front- and backend are decoupled as much as possible. +interfacing primitives: \lpr {pdfextension}, \lpr {pdfvariable} and \lpr +{pdffeedback} that take keywords and optional further arguments (below we will +still use the \tex {pdf} prefix names as reference). This way we can extend the +features when needed but don't need to adapt the core engine. The front- and +backend are decoupled as much as possible. \startitemize \startitem The (experimental) support for snap nodes has been removed, because it is much more natural to build this functionality on top of node processing and - attributes. The associated primitives that are now gone are: \type - {\pdfsnaprefpoint}, \type {\pdfsnapy}, and \type {\pdfsnapycomp}. + attributes. The associated primitives that are gone are: \orm + {pdfsnaprefpoint}, \orm {pdfsnapy}, and \orm {pdfsnapycomp}. \stopitem \startitem The (experimental) support for specialized spacing around nodes has also been - removed. The associated primitives that are now gone are: \type - {\pdfadjustinterwordglue}, \type {\pdfprependkern}, and \type - {\pdfappendkern}, as well as the five supporting primitives \type - {\knbscode}, \type {\stbscode}, \type {\shbscode}, \type {\knbccode}, and - \type {\knaccode}. + removed. The associated primitives that are gone are: \orm + {pdfadjustinterwordglue}, \orm {pdfprependkern}, and \orm {pdfappendkern}, as + well as the five supporting primitives \orm {knbscode}, \orm {stbscode}, \orm + {shbscode}, \orm {knbccode}, and \orm {knaccode}. \stopitem \startitem A number of \quote {\PDFTEX\ primitives} have been removed as they can be - implemented using \LUA: - - \start \raggedright - \type {\pdfelapsedtime}, \type {\pdfescapehex}, \type {\pdfescapename}, \type - {\pdfescapestring}, \type {\pdffiledump}, \type {\pdffilemoddate}, \type - {\pdffilesize}, \type {\pdfforcepagebox}, \type {\pdflastmatch}, \type - {\pdfmatch}, \type {\pdfmdfivesum}, \type {\pdfmovechars}, \type - {\pdfoptionalwaysusepdfpagebox}, \type {\pdfoptionpdfinclusionerrorlevel}, - \type {\pdfresettimer}, \type {\pdfshellescape}, \type {\pdfstrcmp} and \type - {\pdfunescapehex} - \par \stop + implemented using \LUA: \orm {pdfelapsedtime}, \orm {pdfescapehex}, \orm + {pdfescapename}, \orm {pdfescapestring}, \orm {pdffiledump}, \orm + {pdffilemoddate}, \orm {pdffilesize}, \orm {pdfforcepagebox}, \orm + {pdflastmatch}, \orm {pdfmatch}, \orm {pdfmdfivesum}, \orm {pdfmovechars}, + \orm {pdfoptionalwaysusepdfpagebox}, \orm {pdfoptionpdfinclusionerrorlevel}, + \orm {pdfresettimer}, \orm {pdfshellescape}, \orm {pdfstrcmp} and \orm + {pdfunescapehex}. \stopitem \startitem - The version related primitives \type {\pdftexbanner}, \type {\pdftexversion} - and \type {\pdftexrevision} are no longer present as there is no longer a + The version related primitives \orm {pdftexbanner}, \orm {pdftexversion} + and \orm {pdftexrevision} are no longer present as there is no longer a relationship with \PDFTEX\ development. \stopitem \startitem The experimental snapper mechanism has been removed and therefore also the - primitives: - - \start \raggedright - \type {\pdfignoreddimen}, \type {\pdffirstlineheight}, \type - {\pdfeachlineheight}, \type {\pdfeachlinedepth} and \type - {\pdflastlinedepth} - \par \stop + primitives \orm {pdfignoreddimen}, \orm {pdffirstlineheight}, \orm + {pdfeachlineheight}, \orm {pdfeachlinedepth} and \orm {pdflastlinedepth}. \stopitem \startitem - The experimental primitives \type {\primitive}, \type {\ifprimitive}, \type - {\ifabsnum} and \type {\ifabsdim} are promoted to core primitives. The \type + The experimental primitives \lpr {primitive}, \lpr {ifprimitive}, \lpr + {ifabsnum} and \lpr {ifabsdim} are promoted to core primitives. The \type {\pdf*} prefixed originals are not available. \stopitem \startitem - The \PNG\ transparency fix from 1.40.6 is not applied as high|-|level support - is pending. Because \LUATEX\ has a different subsystem for managing images, - more diversion from its ancestor happened in the meantime. + Because \LUATEX\ has a different subsystem for managing images, more + diversion from its ancestor happened in the meantime. We don't adapt to + changes in \PDFTEX. +\stopitem + +\startitem + Two extra token lists are provided, \orm {pdfxformresources} and \orm + {pdfxformattr}, as an alternative to \orm {pdfxform} keywords. \stopitem \startitem - Two extra token lists are provides, \type {\pdfxformresources} and \type - {\pdfxformattr}, as an alternative to \type {\pdfxform} keywords. + Image specifications also support \type {visiblefilename}, \type + {userpassword} and \type {ownerpassword}. The password options are only + relevant for encrypted \PDF\ files. \stopitem \startitem @@ -214,27 +220,27 @@ The front- and backend are decoupled as much as possible. \stopitem \startitem - The primitives \type {\pdfpagewidth} and \type {\pdfpageheight} have been removed - because \type {\pagewidth} and \type {\pageheight} have that purpose. + The primitives \orm {pdfpagewidth} and \orm {pdfpageheight} have been removed + because \lpr {pagewidth} and \lpr {pageheight} have that purpose. \stopitem \startitem - The primitives \type {\pdfnormaldeviate}, \type {\pdfuniformdeviate}, \type - {\pdfsetrandomseed} and \type {\pdfrandomseed} have been promoted to core + The primitives \orm {pdfnormaldeviate}, \orm {pdfuniformdeviate}, \orm + {pdfsetrandomseed} and \orm {pdfrandomseed} have been promoted to core primitives without \type {pdf} prefix so the original commands are no longer recognized. \stopitem \startitem - The primitives \type {\ifincsname}, \type {\expanded} and \type {\quitvmode} + The primitives \lpr {ifincsname}, \lpr {expanded} and \lpr {quitvmode} are now core primitives. \stopitem \startitem As the hz and protrusion mechanism are part of the core the related - primitives \type {\lpcode}, \type {\rpcode}, \type {\efcode}, \type - {\leftmarginkern}, \type {\rightmarginkern} are promoted to core primitives. The - two commands \type {\protrudechars} and \type {\adjustspacing} replace their + primitives \lpr {lpcode}, \lpr {rpcode}, \lpr {efcode}, \lpr + {leftmarginkern}, \lpr {rightmarginkern} are promoted to core primitives. The + two commands \lpr {protrudechars} and \lpr {adjustspacing} replace their prefixed with \type {\pdf} originals. \stopitem @@ -245,36 +251,36 @@ The front- and backend are decoupled as much as possible. \stopitem \startitem - When \type {\adjustspacing} has value~2, hz optimization will be applied to + When \lpr {adjustspacing} has value~2, hz optimization will be applied to glyphs and kerns. When the value is~3, only glyphs will be treated. A value smaller than~2 disables this feature. \stopitem \startitem - The \type {\tagcode} primitive is promoted to core primitive. + The \lpr {tagcode} primitive is promoted to core primitive. \stopitem \startitem - The \type {\letterspacefont} feature is now part of the core but will not be + The \lpr {letterspacefont} feature is now part of the core but will not be changed (improved). We just provide it for legacy use. \stopitem \startitem - The \type {\pdfnoligatures} primitive is now \type {\ignoreligaturesinfont}. + The \orm {pdfnoligatures} primitive is now \lpr {ignoreligaturesinfont}. \stopitem \startitem - The \type {\pdfcopyfont} primitive is now \type {\copyfont}. + The \orm {pdfcopyfont} primitive is now \lpr {copyfont}. \stopitem \startitem - The \type {\pdffontexpand} primitive is now \type {\expandglyphsinfont}. + The \orm {pdffontexpand} primitive is now \lpr {expandglyphsinfont}. \stopitem \startitem - Because position tracking is also available in \DVI\ mode the \type - {\savepos}, \type {\lastxpos} and \type {\lastypos} commands now replace - their \type {pdf} prefixed originals. + Because position tracking is also available in \DVI\ mode the \lpr {savepos}, + \lpr {lastxpos} and \lpr {lastypos} commands now replace their \type {pdf} + prefixed originals. \stopitem \startitem @@ -284,27 +290,31 @@ The front- and backend are decoupled as much as possible. \stopitem \startitem - The initializers \type {\pdfoutput} has been replaced by \type {\outputmode} and - \type {\pdfdraftmode} is now \type {\draftmode}. + The initializers \orm {pdfoutput} has been replaced by \lpr {outputmode} and + \orm {pdfdraftmode} is now \lpr {draftmode}. \stopitem \startitem - The pixel multiplier dimension \type {\pdfpxdimen} lots its prefix and is now calles - \type {\pxdimen}. + The pixel multiplier dimension \orm {pdfpxdimen} lost its prefix and is now + called \lpr {pxdimen}. \stopitem \startitem - An extra \type {\pdfimageaddfilename} option has been added that can be used to block - writing the filename to the \PDF\ file. + An extra \orm {pdfimageaddfilename} option has been added that can be used to + block writing the filename to the \PDF\ file. \stopitem \startitem - The primitive \type {\pdftracingfonts} is now \type {\tracingfonts} as it + The primitive \orm {pdftracingfonts} is now \lpr {tracingfonts} as it doesn't relate to the backend. \stopitem \startitem - The experimental primitive \type {\pdfinsertht} is kept as \type {\insertht}. + The experimental primitive \orm {pdfinsertht} is kept as \lpr {insertht}. +\stopitem + +\startitem + There is some more control over what metadata goes into the \PDF\ file. \stopitem \startitem @@ -318,25 +328,28 @@ The front- and backend are decoupled as much as possible. One change involves the so called xforms and ximages. In \PDFTEX\ these are implemented as so called whatsits. But contrary to other whatsits they have dimensions that need to be taken into account when for instance calculating -optimal line breaks. In \LUATEX\ these are now promoted to normal nodes, which -simplifies code that needs those dimensions. +optimal line breaks. In \LUATEX\ these are now promoted to a special type of rule +nodes, which simplifies code that needs those dimensions. Another reason for promotion is that these are useful concepts. Backends can -provide the ability to use content that has been rendered in several places, -and images are also common. For that reason we also changed the names: +provide the ability to use content that has been rendered in several places, and +images are also common. As already mentioned in \in {section} +[sec:imagedandforms], we now have: \starttabulate[|l|l|] -\BC new name \BC old name \NC \NR -\NC \type {\saveboxresource} \NC \type {\pdfxform} \NC \NR -\NC \type {\saveimageresource} \NC \type {\pdfximage} \NC \NR -\NC \type {\useboxresource} \NC \type {\pdfrefxform} \NC \NR -\NC \type {\useimageresource} \NC \type {\pdfrefximage} \NC \NR -\NC \type {\lastsavedboxresourceindex} \NC \type {\pdflastxform} \NC \NR -\NC \type {\lastsavedimageresourceindex} \NC \type {\pdflastximage} \NC \NR -\NC \type {\lastsavedimageresourcepages} \NC \type {\pdflastximagepages} \NC \NR +\DB \LUATEX \BC \PDFTEX \NC \NR +\TB +\NC \lpr {saveboxresource} \NC \orm {pdfxform} \NC \NR +\NC \lpr {saveimageresource} \NC \orm {pdfximage} \NC \NR +\NC \lpr {useboxresource} \NC \orm {pdfrefxform} \NC \NR +\NC \lpr {useimageresource} \NC \orm {pdfrefximage} \NC \NR +\NC \lpr {lastsavedboxresourceindex} \NC \orm {pdflastxform} \NC \NR +\NC \lpr {lastsavedimageresourceindex} \NC \orm {pdflastximage} \NC \NR +\NC \lpr {lastsavedimageresourcepages} \NC \orm {pdflastximagepages} \NC \NR +\LL \stoptabulate -There are a few \type {\pdffeedback} features that relate to this but these are +There are a few \lpr {pdffeedback} features that relate to this but these are typical backend specific ones. The index that gets returned is to be considered as \quote {just a number} and although it still has the same meaning (object related) as before, you should not depend on that. @@ -344,7 +357,7 @@ related) as before, you should not depend on that. The protrusion detection mechanism is enhanced a bit to enable a bit more complex situations. When protrusion characters are identified some nodes are skipped: -\startitemize[packed] +\startitemize[packed,columns,two] \startitem zero glue \stopitem \startitem penalties \stopitem \startitem empty discretionaries \stopitem @@ -374,6 +387,8 @@ instance content moved into the margin: \startsubsection[title=Changes from \ALEPH\ RC4] +\topicindex {\ALEPH} + Because we wanted proper directional typesetting the \ALEPH\ mechanisms looked most attractive. These are rather close to the ones provided by \OMEGA, so what we say next applies to both these programs. @@ -381,46 +396,40 @@ we say next applies to both these programs. \startitemize \startitem - The extended 16-bit math primitives (\type {\omathcode} etc.) have been + The extended 16-bit math primitives (\orm {omathcode} etc.) have been removed. \stopitem \startitem The \OCP\ processing has been removed completely and as a consequence, the - following primitives have been removed: - - \start \raggedright - \type {\ocp}, \type {\externalocp}, \type {\ocplist}, \type {\pushocplist}, - \type {\popocplist}, \type {\clearocplists}, \type {\addbeforeocplist}, \type - {\addafterocplist}, \type {\removebeforeocplist}, \type {\removeafterocplist} - and \type {\ocptracelevel} - \par \stop + following primitives have been removed: \orm {ocp}, \orm {externalocp}, \orm + {ocplist}, \orm {pushocplist}, \orm {popocplist}, \orm {clearocplists}, \orm + {addbeforeocplist}, \orm {addafterocplist}, \orm {removebeforeocplist}, \orm + {removeafterocplist} and \orm {ocptracelevel}. \stopitem \startitem \LUATEX\ only understands 4~of the 16~direction specifiers of \ALEPH: \type - {TLT} (latin), \type {TRT} (arabic), \type {RTT} (cjk), \type {LTL} - (mongolian). All other direction specifiers generate an error. + {TLT} (latin), \type {TRT} (arabic), \type {RTT} (cjk), \type {LTL} (mongolian). + All other direction specifiers generate an error. In addition to a keyword + driven model we also provide an integer driven one. \stopitem \startitem The input translations from \ALEPH\ are not implemented, the related - primitives are not available: - - \start \raggedright - \type {\DefaultInputMode}, \type {\noDefaultInputMode}, \type {\noInputMode}, - \type {\InputMode}, \type {\DefaultOutputMode}, \type {\noDefaultOutputMode}, - \type {\noOutputMode}, \type {\OutputMode}, \type {\DefaultInputTranslation}, - \type {\noDefaultInputTranslation}, \type {\noInputTranslation}, \type - {\InputTranslation}, \type {\DefaultOutputTranslation}, \type - {\noDefaultOutputTranslation}, \type {\noOutputTranslation} and \type - {\OutputTranslation} - \par \stop + primitives are not available: \orm {DefaultInputMode}, \orm + {noDefaultInputMode}, \orm {noInputMode}, \orm {InputMode}, \orm + {DefaultOutputMode}, \orm {noDefaultOutputMode}, \orm {noOutputMode}, \orm + {OutputMode}, \orm {DefaultInputTranslation}, \orm + {noDefaultInputTranslation}, \orm {noInputTranslation}, \orm + {InputTranslation}, \orm {DefaultOutputTranslation}, \orm + {noDefaultOutputTranslation}, \orm {noOutputTranslation} and \orm + {OutputTranslation}. \stopitem \startitem - Several bugs have been fixed an confusing implementation details have been sorted - out. + Several bugs have been fixed and confusing implementation details have been + sorted out. \stopitem \startitem @@ -429,13 +438,13 @@ we say next applies to both these programs. \stopitem \startitem - The \type {^^} notation has been extended: after \type {^^^^} four hexadecimal - characters are expected and after \type {^^^^^^} six hexadecimal characters - have to be given. The original \TEX\ interpretation is still valid for the - \type {^^} case but the four and six variants do no backtracking, i.e.\ when - they are not followed by the right number of hexadecimal digits they issue an - error message. Because \type{^^^} is a normal \TEX\ case, we don't support the - odd number of \type {^^^^^} either. + The \type {^^} notation has been extended: after \type {^^^^} four + hexadecimal characters are expected and after \type {^^^^^^} six hexadecimal + characters have to be given. The original \TEX\ interpretation is still valid + for the \type {^^} case but the four and six variants do no backtracking, + i.e.\ when they are not followed by the right number of hexadecimal digits + they issue an error message. Because \type{^^^} is a normal \TEX\ case, we + don't support the odd number of \type {^^^^^} either. \stopitem \startitem @@ -449,9 +458,9 @@ we say next applies to both these programs. \stopitem \startitem - The page dimension related primitives \type {\pagewidth} and \type - {\pageheight} have been promoted to core primitives. The \type {\hoffset} and - \type {\voffset} primitives have been fixed. + The page dimension related primitives \lpr {pagewidth} and \lpr {pageheight} + have been promoted to core primitives. The \prm {hoffset} and \prm {voffset} + primitives have been fixed. \stopitem \startitem @@ -461,30 +470,35 @@ we say next applies to both these programs. \stopitem \startitem - The two dimension registers \type {\pagerightoffset} and \type - {\pagebottomoffset} are now core primitives. + The two dimension registers \lpr {pagerightoffset} and \lpr + {pagebottomoffset} are now core primitives. \stopitem \startitem - The direction related primitives \type {\pagedir}, \type {\bodydir}, \type - {\pardir}, \type {\textdir}, \type {\mathdir} and \type {\boxdir} are now - core primitives. + The direction related primitives \lpr {pagedir}, \lpr {bodydir}, \lpr + {pardir}, \lpr {textdir}, \lpr {mathdir} and \lpr {boxdir} are now core + primitives. \stopitem \startitem - The promotion of primitives to core primitives as well as the removed of all - others means that the initialization namespace \type {aleph} is gone. + The promotion of primitives to core primitives as well as removing of all + others means that the initialization namespace \type {aleph} that early + versions of \LUATEX\ provided is gone. \stopitem \stopitemize The above let's itself summarize as: we took the 32 bit aspects and much of the -directional mechanisms. +directional mechanisms and merged it into the \PDFTEX\ code base as starting +point for further development. Then we simplified directionality, fixed it and +opened it up. \stopsubsection \startsubsection[title=Changes from standard \WEBC] +\topicindex {\WEBC} + The compilation framework is \WEBC\ and we keep using that but without the \PASCAL\ to \CCODE\ step. This framework also provides some common features that deal with reading bytes from files and locating files in \TDS. This is what we do @@ -507,7 +521,7 @@ different: \stopitem \startitem - The \type {\openout} whatsits are not written to the log file. + The \prm {openout} whatsits are not written to the log file. \stopitem \startitem @@ -515,7 +529,8 @@ different: mode because \type {texmf.cnf} is not read: \type {shell-escape} is off (but that is not a problem because of \LUA's \type {os.execute}), and the paranoia checks on \type {openin} and \type {openout} do not happen. However, it is - easy for a \LUA\ script to do this itself by overloading \type {io.open}. + easy for a \LUA\ script to do this itself by overloading \type {io.open} and + alike. \stopitem \startitem @@ -528,17 +543,22 @@ different: \stopsection -\startsection[reference=backendprimitives,title=The backend primitives \type {\pdf*}] +\startsection[reference=backendprimitives,title=The backend primitives] + +\startsubsection[title={Less primitives}] + +\topicindex {backend} +\topicindex {\PDF+backend} In a previous section we mentioned that some \PDFTEX\ primitives were removed and others promoted to core \LUATEX\ primitives. That is only part of the story. In order to separate the backend specific primitives in de code these commands are now replaced by only a few. In traditional \TEX\ we only had the \DVI\ backend but now we have two: \DVI\ and \PDF. Additional functionality is implemented as -\quote {extensions} in \TEX speak. By separating more strickly we are able to -keep the core (fontend) clean and stable. If for some reason an extra backend -option is needed, it can be implemented without touching the core. The three -\PDF\ backend related primitives are +\quote {extensions} in \TEX\ speak. By separating more strickly we are able to +keep the core (frontend) clean and stable and isolate these extensions. If for +some reason an extra backend option is needed, it can be implemented without +touching the core. The three \PDF\ backend related primitives are: \starttyping \pdfextension command [specification] @@ -550,8 +570,12 @@ An extension triggers further parsing, depending on the command given. A variabl a (kind of) register and can be read and written, while a feedback is reporting something (as it comes from the backend it's normally a sequence of tokens). +\stopsubsection + +\startsubsection[title={\lpr{pdfextension}, \lpr {pdfvariable} and \lpr {pdffeedback}},reference=sec:pdfextensions] + In order for \LUATEX\ to be more than just \TEX\ you need to enable primitives. That -has already be the case right from the start. If you want the traditional \PDFTEX\ +has already been the case right from the start. If you want the traditional \PDFTEX\ primitives (for as far their functionality is still around) you now can do this: \starttyping @@ -604,6 +628,7 @@ The configuration related registers have become: \starttyping \edef\pdfcompresslevel {\pdfvariable compresslevel} \edef\pdfobjcompresslevel {\pdfvariable objcompresslevel} +\edef\pdfrecompress {\pdfvariable recompress} \edef\pdfdecimaldigits {\pdfvariable decimaldigits} \edef\pdfgamma {\pdfvariable gamma} \edef\pdfimageresolution {\pdfvariable imageresolution} @@ -618,6 +643,7 @@ The configuration related registers have become: \edef\pdfignoreunknownimages {\pdfvariable ignoreunknownimages} \edef\pdfgentounicode {\pdfvariable gentounicode} \edef\pdfomitcidset {\pdfvariable omitcidset} +\edef\pdfomitcharset {\pdfvariable omitcharset} \edef\pdfpagebox {\pdfvariable pagebox} \edef\pdfminorversion {\pdfvariable minorversion} \edef\pdfuniqueresname {\pdfvariable uniqueresname} @@ -657,100 +683,16 @@ macro:->[internal backend integer] macro:->[internal backend tokenlist] \stoptyping -The \type {\edef} can also be an \type {\def} but it's a bit more efficient -to expand the lookup related register beforehand. After that you can adapt -the defaults; these are: - -\starttyping -\pdfcompresslevel 9 -\pdfobjcompresslevel 1 % used: (0,9) -\pdfdecimaldigits 4 % used: (3,6) -\pdfgamma 1000 -\pdfimageresolution 71 -\pdfimageapplygamma 0 -\pdfimagegamma 2200 -\pdfimagehicolor 1 -\pdfimageaddfilename 1 -\pdfpkresolution 72 -\pdfpkfixeddpi 0 -\pdfinclusioncopyfonts 0 -\pdfinclusionerrorlevel 0 -\pdfignoreunknownimages 0 -\pdfgentounicode 0 -\pdfomitcidset 0 -\pdfpagebox 0 -\pdfminorversion 4 -\pdfuniqueresname 0 - -\pdfhorigin 1in -\pdfvorigin 1in -\pdflinkmargin 0pt -\pdfdestmargin 0pt -\pdfthreadmargin 0pt -\pdfxformmargin 0pt -\stoptyping - -If you also want some backward compatibility, you can add: - -\starttyping -\let\pdfpagewidth \pagewidth -\let\pdfpageheight \pageheight - -\let\pdfadjustspacing \adjustspacing -\let\pdfprotrudechars \protrudechars -\let\pdfnoligatures \ignoreligaturesinfont -\let\pdffontexpand \expandglyphsinfont -\let\pdfcopyfont \copyfont - -\let\pdfxform \saveboxresource -\let\pdflastxform \lastsavedboxresourceindex -\let\pdfrefxform \useboxresource - -\let\pdfximage \saveimageresource -\let\pdflastximage \lastsavedimageresourceindex -\let\pdflastximagepages\lastsavedimageresourcepages -\let\pdfrefximage \useimageresource - -\let\pdfsavepos \savepos -\let\pdflastxpos \lastxpos -\let\pdflastypos \lastypos - -\let\pdfoutput \outputmode -\let\pdfdraftmode \draftmode - -\let\pdfpxdimen \pxdimen - -\let\pdfinsertht \insertht - -\let\pdfnormaldeviate \normaldeviate -\let\pdfuniformdeviate \uniformdeviate -\let\pdfsetrandomseed \setrandomseed -\let\pdfrandomseed \randomseed - -\let\pdfprimitive \primitive -\let\ifpdfprimitive \ifprimitive - -\let\ifpdfabsnum \ifabsnum -\let\ifpdfabsdim \ifabsdim -\stoptyping - -And even: - -\starttyping -\newdimen\pdfeachlineheight -\newdimen\pdfeachlinedepth -\newdimen\pdflastlinedepth -\newdimen\pdffirstlineheight -\newdimen\pdfignoreddimen -\stoptyping +The \prm {edef} can also be a \prm {def} but it's a bit more efficient to expand +the lookup related register beforehand. The backend is derived from \PDFTEX\ so the same syntax applies. However, the \type {outline} command accepts a \type {objnum} followed by a number. No checking takes place so when this is used it had better be a valid (flushed) object. -In order to be (more or less) compatible with \PDFTEX\ we also support the -option to suppress some info: +In order to be (more or less) compatible with \PDFTEX\ we also support the option +to suppress some info but we do so via a bitset: \starttyping \pdfvariable suppressoptionalinfo \numexpr @@ -770,7 +712,7 @@ option to suppress some info: In addition you can overload the trailer id, but we don't do any checking on validity, so you have to pass a valid array. The following is like the ones -normally generated by the engine: +normally generated by the engine. You even need to include the brackets here! \starttyping \pdfvariable trailerid {[ @@ -779,8 +721,6 @@ normally generated by the engine: ]} \stoptyping -So, you even need to include the brackets! - Although we started from a merge of \PDFTEX\ and \ALEPH, by now the code base as well as functionality has diverted from those parents. Here we show the options that can be passed to the extensions. @@ -928,10 +868,113 @@ that can be passed to the extensions. {tokens} \stoptexsyntax +\stopsubsection + +\startsubsection[title={Defaults}] + +The engine sets the following defaults. + +\starttyping +\pdfcompresslevel 9 +\pdfobjcompresslevel 1 % used: (0,9) +\pdfrecompress 0 % mostly for debugging +\pdfdecimaldigits 4 % used: (3,6) +\pdfgamma 1000 +\pdfimageresolution 71 +\pdfimageapplygamma 0 +\pdfimagegamma 2200 +\pdfimagehicolor 1 +\pdfimageaddfilename 1 +\pdfpkresolution 72 +\pdfpkfixeddpi 0 +\pdfinclusioncopyfonts 0 +\pdfinclusionerrorlevel 0 +\pdfignoreunknownimages 0 +\pdfgentounicode 0 +\pdfomitcidset 0 +\pdfomitcharset 0 +\pdfpagebox 0 +\pdfminorversion 4 +\pdfuniqueresname 0 + +\pdfhorigin 1in +\pdfvorigin 1in +\pdflinkmargin 0pt +\pdfdestmargin 0pt +\pdfthreadmargin 0pt +\pdfxformmargin 0pt +\stoptyping + +\stopsubsection + +\startsubsection[title={Backward compatibility}] + +If you also want some backward compatibility, you can add: + +\starttyping +\let\pdfpagewidth \pagewidth +\let\pdfpageheight \pageheight + +\let\pdfadjustspacing \adjustspacing +\let\pdfprotrudechars \protrudechars +\let\pdfnoligatures \ignoreligaturesinfont +\let\pdffontexpand \expandglyphsinfont +\let\pdfcopyfont \copyfont + +\let\pdfxform \saveboxresource +\let\pdflastxform \lastsavedboxresourceindex +\let\pdfrefxform \useboxresource + +\let\pdfximage \saveimageresource +\let\pdflastximage \lastsavedimageresourceindex +\let\pdflastximagepages\lastsavedimageresourcepages +\let\pdfrefximage \useimageresource + +\let\pdfsavepos \savepos +\let\pdflastxpos \lastxpos +\let\pdflastypos \lastypos + +\let\pdfoutput \outputmode +\let\pdfdraftmode \draftmode + +\let\pdfpxdimen \pxdimen + +\let\pdfinsertht \insertht + +\let\pdfnormaldeviate \normaldeviate +\let\pdfuniformdeviate \uniformdeviate +\let\pdfsetrandomseed \setrandomseed +\let\pdfrandomseed \randomseed + +\let\pdfprimitive \primitive +\let\ifpdfprimitive \ifprimitive + +\let\ifpdfabsnum \ifabsnum +\let\ifpdfabsdim \ifabsdim +\stoptyping + +And even: + +\starttyping +\newdimen\pdfeachlineheight +\newdimen\pdfeachlinedepth +\newdimen\pdflastlinedepth +\newdimen\pdffirstlineheight +\newdimen\pdfignoreddimen +\stoptyping + +\stopsubsection + \stopsection \startsection[title=Directions] +\topicindex {\OMEGA} +\topicindex {\ALEPH} +\topicindex {directions} + +\startsubsection[title={Four directions}] + The directional model in \LUATEX\ is inherited from \OMEGA|/|\ALEPH\ but we tried to improve it a bit. At some point we played with recovery of modes but that was disabled later on when we found that it interfered with nested directions. That @@ -939,14 +982,23 @@ itself had as side effect that the node list was no longer balanced with respect to directional nodes which in turn can give side effects when a series of dir changes happens without grouping. -The current (0.97 onward) approach is that we again make the list balanced but -try to avoid some side effects. What happens is quite intuitive if we forget -about spaces (turned into glue) but even there what happens makes sense if you -look at it in detail. However that logic makes in|-|group switching kind of -useless when no proper nested grouping is used: switching from right to left -several times nested, results in spacing ending up after each other due to nested -mirroring. Of course a sane macro package will manage this for the user but here -we are discussing the low level dir injection. +When extending the \PDF\ backend to support directions some inconsistencies were +found and as a result we decided to support only the four models that make sense +\type {TLT} (latin), \type {TRT} (arabic), \type {RTT} (cjk) and \type {LTL} +(mongolian). + +\stopsubsection + +\startsubsection[title={How it works}] + +The approach is that we again make the list balanced but try to avoid some side +effects. What happens is quite intuitive if we forget about spaces (turned into +glue) but even there what happens makes sense if you look at it in detail. +However that logic makes in|-|group switching kind of useless when no proper +nested grouping is used: switching from right to left several times nested, +results in spacing ending up after each other due to nested mirroring. Of course +a sane macro package will manage this for the user but here we are discussing the +low level dir injection. This is what happens: @@ -1031,7 +1083,7 @@ It gets typeset as: We could define the two helpers to look back, pick up a skip, remove it and inject it after the dir node. But that way we loose the subtype information that for some applications can be handy to be kept as|-|is. This is why we now have a -variant of \type {\textdir} which injects the balanced node before the skip. +variant of \lpr {textdir} which injects the balanced node before the skip. Instead of the previous definition we can use: \startbuffer[def] @@ -1060,64 +1112,64 @@ comes out as a properly spaced: Anything more complex that this, like combination of skips and penalties, or kerns, should be handled in the input or macro package because there is no way we -can predict the expected behaviour. In fact, the \type {\linedir} is just a +can predict the expected behaviour. In fact, the \lpr {linedir} is just a convenience extra which could also have been implemented using node list parsing. +\stopsubsection + +\startsubsection[title={Controlling glue with \lpr {breakafterdirmode}}] + Glue after a dir node is ignored in the linebreak decision but you can bypass that -by setting \type {\breakafterdirmode} to~\type {1}. The following table shows the +by setting \lpr {breakafterdirmode} to~\type {1}. The following table shows the difference. Watch your spaces. \def\ShowSome#1{% - \BC - \type{#1} - \NC - \breakafterdirmode = 0 - \hsize 0pt - #1 + \BC \type{#1} + \NC \breakafterdirmode\zerocount\hsize\zeropoint#1 \NC + \NC \breakafterdirmode\plusone\hsize\zeropoint#1 \NC - \breakafterdirmode = 1 - \hsize 0pt - #1 - \NC - \NC \NR \HL + \NC \NR } -\starttabulate[|l|Tp(0pt)|w(5em)|Tp(0pt)|p|] - \HL - \BC \type{\breakafterdirmode} +\starttabulate[|l|Tp(1pt)|w(5em)|Tp(1pt)|w(5em)|] + \DB \BC \type{0} \NC \BC \type{1} \NC \NC \NR - \HL + \TB \ShowSome{pre {\textdir TLT xxx} post} \ShowSome{pre {\textdir TLT xxx }post} \ShowSome{pre{ \textdir TLT xxx} post} \ShowSome{pre{ \textdir TLT xxx }post} \ShowSome{pre { \textdir TLT xxx } post} \ShowSome{pre {\textdir TLT\relax\space xxx} post} + \LL \stoptabulate +\stopsubsection + +\startsubsection[title={Controling parshapes with \lpr {shapemode}}] Another adaptation to the \ALEPH\ directional model is control over shapes driven -by \type {\hangindent} and \type {\parshape}. This is controlled by a new parameter -\type {\shapemode}: +by \prm {hangindent} and \prm {parshape}. This is controlled by a new parameter +\lpr {shapemode}: -\starttabulate[|c|c|c|] -\BC \BC \type {\hangindent} \BC \type {\parshape} \NC \NR +\starttabulate[|c|l|l|] +\DB value \BC \prm {hangindent} \BC \prm {parshape} \NC \NR +\TB \BC \type{0} \NC normal \NC normal \NC \NR \BC \type{1} \NC mirrored \NC normal \NC \NR \BC \type{2} \NC normal \NC mirrored \NC \NR \BC \type{3} \NC mirrored \NC mirrored \NC \NR +\LL \stoptabulate -The value is reset to zero (like \type {\hangindent} and \type {\parshape}) +The value is reset to zero (like \prm {hangindent} and \prm {parshape}) after the paragraph is done with. You can use negative values to prevent -this. - -In \in {figure} [fig:shapemode] a few examples are given. +this. In \in {figure} [fig:shapemode] a few examples are given. \startplacefigure[reference=fig:shapemode,title={The effect of \type {shapemode}.}] \startcombination[2*3] @@ -1162,12 +1214,46 @@ In \in {figure} [fig:shapemode] a few examples are given. \stopcombination \stopplacefigure +\stopsubsection + +\startsubsection[title={Symbols or numbers}] + +Internally the implementation is different from \ALEPH. First of all we use no +whatsits but dedicated nodes, but also we have only 4 directions that are mapped +onto 4 numbers. A text direction node can mark the start or end of a sequence of +nodes, and therefore has two states. At the \TEX\ end we don't see these states +because \TEX\ itself will add proper end state nodes if needed. + +The symbolic names \type {TLT}, \type {TRT}, etc.\ originate in \OMEGA. In +\LUATEX\ we also have a number based model which sometimes makes more sense. + +\starttabulate[|c|l|l|] +\DB value \BC equivalent \NC \NR +\TB +\BC \type {0} \NC TLT \NC \NR +\BC \type {1} \NC TRT \NC \NR +\BC \type {2} \NC LTL \NC \NR +\BC \type {3} \NC RTT \NC \NR +\LL +\stoptabulate + +We support the \OMEGA\ primitives \orm {textdir}, \orm {pardir}, \orm {pagedir}, +\orm {pardir} and \orm {mathdir}. These accept three character keywords. The +primitives that set the direction by number are: \lpr {textdirection}, \lpr +{pardirection}, \lpr {pagedirection} and \lpr {bodydirection} and \lpr +{mathdirection}. When specifying a direction for a box you can use \type {bdir} +instead of \type {dir}. + +\stopsubsection + \stopsection \startsection[title=Implementation notes] \startsubsection[title=Memory allocation] +\topicindex {memory} + The single internal memory heap that traditional \TEX\ used for tokens and nodes is split into two separate arrays. Each of these will grow dynamically when needed. @@ -1189,14 +1275,9 @@ structures, some of the macros have been duplicated. For instance, there are now {token_info}. All access to the variable memory array is now hidden behind a macro called \type {vmem}. We mention this because using the \TEX book as reference is still quite valid but not for memory related details. Another -significate detail is that we have double linked node lists and that some nodes +significant detail is that we have double linked node lists and that most nodes carry more data. -The implementation of the growth of two arrays (via reallocation) introduces a -potential pitfall: the memory arrays should never be used as the left hand side -of a statement that can modify the array in question. Details like this are -of no concern to users. - The input line buffer and pool size are now also reallocated when needed, and the \type {texmf.cnf} settings \type {buf_size} and \type {pool_size} are silently ignored. @@ -1205,25 +1286,31 @@ ignored. \startsubsection[title=Sparse arrays] -The \type {\mathcode}, \type {\delcode}, \type {\catcode}, \type {\sfcode}, \type -{\lccode} and \type {\uccode} (and the new \type {\hjcode}) tables are now sparse -arrays that are implemented in~\CCODE. They are no longer part of the \TEX\ -\quote {equivalence table} and because each had 1.1 million entries with a few -memory words each, this makes a major difference in memory usage. +The \prm {mathcode}, \prm {delcode}, \prm {catcode}, \prm {sfcode}, \prm {lccode} +and \prm {uccode} (and the new \lpr {hjcode}) tables are now sparse arrays that +are implemented in~\CCODE. They are no longer part of the \TEX\ \quote +{equivalence table} and because each had 1.1 million entries with a few memory +words each, this makes a major difference in memory usage. Performance is not +really hurt by this. -The \type {\catcode}, \type {\sfcode}, \type {\lccode}, \type {\uccode} and \type -{\hjcode} assignments do not yet show up when using the \ETEX\ tracing routines -\type {\tracingassigns} and \type {\tracingrestores}. +The \prm {catcode}, \prm {sfcode}, \prm {lccode}, \prm {uccode} and \lpr {hjcode} +assignments don't show up when using the \ETEX\ tracing routines \prm +{tracingassigns} and \prm {tracingrestores} but we don't see that as a real +limitation. -A side|-|effect of the current implementation is that \type {\global} is now more -expensive in terms of processing than non|-|global assignments. +A side|-|effect of the current implementation is that \prm {global} is now more +expensive in terms of processing than non|-|global assignments but not many users +will notice that. The glyph ids within a font are also managed by means of a sparse array as glyph -ids can go up to index $2^{21}-1$. +ids can go up to index $2^{21}-1$ but these are never accessed directly so again +users will not notice this. \stopsubsection -\startsubsection[title=Simple single-character csnames] +\startsubsection[title=Simple single|-|character csnames] + +\topicindex {csnames} Single|-|character commands are no longer treated specially in the internals, they are stored in the hash just like the multiletter csnames. @@ -1236,16 +1323,22 @@ control sequences that uses a prefix that is otherwise impossible to obtain. \stopsubsection -\startsubsection[title=Compressed format] +\startsubsection[title=The compressed format file] + +\topicindex {format} The format is passed through \type {zlib}, allowing it to shrink to roughly half of the size it would have had in uncompressed form. This takes a bit more \CPU\ -cycles but much less disk \IO, so it should still be faster. +cycles but much less disk \IO, so it should still be faster. We use a level~3 +compression which we found to be the optimal trade|-|off between filesize and +decompression speed. \stopsubsection \startsubsection[title=Binary file reading] +\topicindex {files+binary} + All of the internal code is changed in such a way that if one of the \type {read_xxx_file} callbacks is not set, then the file is read by a \CCODE\ function using basically the same convention as the callback: a single read into a buffer @@ -1257,6 +1350,9 @@ previous code (that mostly used \type {getc} calls), it can be quite a bit faste \startsubsection[title=Tabs and spaces] +\topicindex {space} +\topicindex {newline} + We conform to the way other \TEX\ engines handle trailing tabs and spaces. For decades trailing tabs and spaces (before a newline) were removed from the input but this behaviour was changed in September 2017 to only handle spaces. We are @@ -1273,7 +1369,7 @@ to be kept. We are aware of the fact that this contradicts some of our other choices but consistency with other engines and the fact that in \KPSE\ mode a common file \IO\ layer is used can have a side effect of breaking compatibility. We still stick to our view that at the log level we can (and might be) more -incompatible. +incompatible. We already expose some more details. \stopsubsection diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-nodes.tex b/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-nodes.tex index 517d9b6c897..34a2aebe801 100644 --- a/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-nodes.tex +++ b/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-nodes.tex @@ -1,16 +1,21 @@ % language=uk \environment luatex-style -\environment luatex-logos \startcomponent luatex-nodes \startchapter[reference=nodes,title={Nodes}] -\section{\LUA\ node representation} +\startsection[title={\LUA\ node representation}][library=node] -\TEX's nodes are represented in \LUA\ as userdata object with a variable set of -fields. In the following syntax tables, such the type of such a userdata object +\topicindex {nodes} + +\libindex {fields} +\libindex {subtypes} +\libindex {values} + +\TEX's nodes are represented in \LUA\ as userdata objects with a variable set of +fields. In the following syntax tables, such as the type of such a userdata object is represented as \syntax {<node>}. The current return value of \type {node.types()} is: @@ -24,76 +29,43 @@ The current return value of \type {node.types()} is: \stopluacode . % period -The \type {\lastnodetype} primitive is \ETEX\ compliant. The valid range is still +The \prm {lastnodetype} primitive is \ETEX\ compliant. The valid range is still $[-1,15]$ and glyph nodes (formerly known as char nodes) have number~0 while ligature nodes are mapped to~7. That way macro packages can use the same symbolic names as in traditional \ETEX. Keep in mind that these \ETEX\ node numbers are different from the real internal ones and that there are more \ETEX\ node types than~15. -You can ask for a list of fields with the \type {node.fields} (which takes an id) -and for valid subtypes with \type {node.subtypes} (which takes a string because -eventually we might support more used enumerations). - -The \type {node.values} function reports some used values. Valid arguments are -\type {dir}, \type {direction}, \type {glue}, \type {pdf_literal}, \type -{pdf_action}, \type {pdf_window} and \type {color_stack}. Keep in mind that the -setters normally expect a number, but this helper gives you a list of what -numbers matter. For practical reason the \type {pagestate} values are also -reported with this helper. - -\subsection{Attributes} - -The newly introduced attribute registers are non|-|trivial, because the value -that is attached to a node is essentially a sparse array of key|-|value pairs. It -is generally easiest to deal with attribute lists and attributes by using the -dedicated functions in the \type {node} library, but for completeness, here is -the low|-|level interface. - -\subsubsection{attribute_list nodes} - -An \type {attribute_list} item is used as a head pointer for a list of attribute -items. It has only one user-visible field: - -\starttabulate[|l|l|p|] -\BC field \BC type \BC explanation \NC \NR -\NC \type{next} \NC node \NC pointer to the first attribute \NC \NR -\stoptabulate - -\subsubsection{attribute nodes} - -A normal node's attribute field will point to an item of type \type -{attribute_list}, and the \type {next} field in that item will point to the first -defined \quote {attribute} item, whose \type {next} will point to the second -\quote {attribute} item, etc. +You can ask for a list of fields with \type {node.fields} and for valid subtypes +with \type {node.subtypes}. The \type {node.values} function reports some used +values. Valid arguments are \nod {dir}, \type {direction}, \nod {glue}, \whs +{pdf_literal}, \whs {pdf_action}, \whs {pdf_window} and \whs {color_stack}. Keep +in mind that the setters normally expect a number, but this helper gives you a +list of what numbers matter. For practical reason the \type {pagestate} values +are also reported with this helper. -\starttabulate[|l|l|p|] -\BC field \BC type \BC explanation \NC \NR -\NC \type{next} \NC node \NC pointer to the next attribute \NC \NR -\NC \type{number} \NC number \NC the attribute type id \NC \NR -\NC \type{value} \NC number \NC the attribute value \NC \NR -\stoptabulate +\stopsection -As mentioned it's better to use the official helpers rather than edit these -fields directly. For instance the \type {prev} field is used for other purposes -and there is no double linked list. +\startsection[title={Main text nodes}] -\subsection{Main text nodes} +\topicindex {nodes+text} These are the nodes that comprise actual typesetting commands. A few fields are present in all nodes regardless of their type, these are: \starttabulate[|l|l|p|] -\BC field \BC type \BC explanation \NC \NR +\DB field \BC type \BC explanation \NC \NR +\TB \NC \type{next} \NC node \NC the next node in a list, or nil \NC \NR \NC \type{id} \NC number \NC the node's type (\type {id}) number \NC \NR \NC \type{subtype} \NC number \NC the node \type {subtype} identifier \NC \NR +\LL \stoptabulate -The \type {subtype} is sometimes just a stub entry. Not all nodes actually use -the \type {subtype}, but this way you can be sure that all nodes accept it as a -valid field name, and that is often handy in node list traversal. In the -following tables \type {next} and \type {id} are not explicitly mentioned. +The \type {subtype} is sometimes just a dummy entry because not all nodes +actually use the \type {subtype}, but this way you can be sure that all nodes +accept it as a valid field name, and that is often handy in node list traversal. +In the following tables \type {next} and \type {id} are not explicitly mentioned. Besides these three fields, almost all nodes also have an \type {attr} field, and there is a also a field called \type {prev}. That last field is always present, @@ -101,25 +73,33 @@ but only initialized on explicit request: when the function \type {node.slide()} is called, it will set up the \type {prev} fields to be a backwards pointer in the argument node list. By now most of \TEX's node processing makes sure that the \type {prev} nodes are valid but there can be exceptions, especially when the -internal magic uses a leading \type {temp} nodes to temporarily store a state. +internal magic uses a leading \nod {temp} nodes to temporarily store a state. -\subsubsection{hlist nodes} +\subsection{\nod {hlist} nodes} \starttabulate[|l|l|p|] -\BC field \BC type \BC explanation \NC \NR +\DB field \BC type \BC explanation \NC \NR +\TB \NC \type{subtype} \NC number \NC \showsubtypes{list} \NC \NR \NC \type{attr} \NC node \NC list of attributes \NC \NR \NC \type{width} \NC number \NC the width of the box \NC \NR \NC \type{height} \NC number \NC the height of the box \NC \NR \NC \type{depth} \NC number \NC the depth of the box \NC \NR -\NC \type{shift} \NC number \NC a displacement perpendicular to the character progression direction \NC \NR -\NC \type{glue_order} \NC number \NC a number in the range $[0,4]$, indicating the glue order \NC \NR +\NC \type{shift} \NC number \NC a displacement perpendicular to the character + progression direction \NC \NR +\NC \type{glue_order} \NC number \NC a number in the range $[0,4]$, indicating the + glue order \NC \NR \NC \type{glue_set} \NC number \NC the calculated glue ratio \NC \NR -\NC \type{glue_sign} \NC number \NC 0 = \type {normal}, 1 = \type {stretching}, 2 = \type {shrinking} \NC \NR +\NC \type{glue_sign} \NC number \NC 0 = \type {normal}, 1 = \type {stretching}, 2 = + \type {shrinking} \NC \NR \NC \type{head/list} \NC node \NC the first node of the body of this list \NC \NR -\NC \type{dir} \NC string \NC the direction of this box, see~\in[dirnodes] \NC \NR +\NC \type{dir} \NC string \NC the direction of this box, see~\in [dirnodes] \NC \NR +\LL \stoptabulate +\topicindex {nodes+lists} +\topicindex {lists} + A warning: never assign a node list to the \type {head} field unless you are sure its internal link structure is correct, otherwise an error may result. @@ -127,39 +107,72 @@ Note: the field name \type {head} and \type {list} are both valid. Sometimes it makes more sense to refer to a list by \type {head}, sometimes \type {list} makes more sense. -\subsubsection{vlist nodes} +\subsection{\nod {vlist} nodes} + +\topicindex {nodes+lists} +\topicindex {lists} -This node is similar to \type {hlist}, except that \quote {shift} is a displacement +This node is similar to \nod {hlist}, except that \quote {shift} is a displacement perpendicular to the line progression direction, and \quote {subtype} only has the values 0, 4, and~5. -\subsubsection{rule nodes} +\subsection{\nod {rule} nodes} -Contrary to traditional \TEX, \LUATEX\ has more subtypes because we also use -rules to store reuseable objects and images. User nodes are invisible and can be -intercepted by a callback. +\topicindex {nodes+rules} +\topicindex {rules} + +Contrary to traditional \TEX, \LUATEX\ has more \prm {rule} subtypes because we +also use rules to store reuseable objects and images. User nodes are invisible +and can be intercepted by a callback. \starttabulate[|l|l|p|] -\BC field \BC type \BC explanation \NC \NR -\NC \type{subtype} \NC number \NC \showsubtypes{rule} \NC \NR -\NC \type{attr} \NC node \NC list of attributes \NC \NR -\NC \type{width} \NC number \NC the width of the rule where the special value $-1073741824$ is used for \quote {running} glue dimensions \NC \NR -\NC \type{height} \NC number \NC the height of the rule (can be negative) \NC \NR -\NC \type{depth} \NC number \NC the depth of the rule (can be negative) \NC \NR -\NC \type{dir} \NC string \NC the direction of this rule, see~\in[dirnodes] \NC \NR -\NC \type{index} \NC number \NC an optional index that can be referred to \NC \NR +\DB field \BC type \BC explanation \NC \NR +\TB +\NC \type{subtype} \NC number \NC \showsubtypes {rule} \NC \NR +\NC \type{attr} \NC node \NC list of attributes \NC \NR +\NC \type{width} \NC number \NC the width of the rule where the special value + $-1073741824$ is used for \quote {running} glue dimensions \NC \NR +\NC \type{height} \NC number \NC the height of the rule (can be negative) \NC \NR +\NC \type{depth} \NC number \NC the depth of the rule (can be negative) \NC \NR +\NC \type{left} \NC number \NC shift at the left end (also subtracted from width) \NC \NR +\NC \type{right} \NC number \NC (subtracted from width) \NC \NR +\NC \type{dir} \NC string \NC the direction of this rule, see~\in[dirnodes] \NC \NR +\NC \type{index} \NC number \NC an optional index that can be referred to \NC \NR +\NC \type{transform} \NC number \NC an private variable (also used to specify outline width) \NC \NR +\LL \stoptabulate -\subsubsection{ins nodes} +The \type {left} and type {right} keys are somewhat special (and experimental). +When rules are auto adapting to the surrounding box width you can enforce a shift +to the right by setting \type {left}. The value is also subtracted from the width +which can be a value set by the engine itself and is not entirely under user +control. The \type {right} is also subtracted from the width. It all happens in +the backend so these are not affecting the calculations in the frontend (actually +the auto settings also happen in the backend). For a vertical rule \type {left} +affects the height and \type {right} affects the depth. There is no matching +interface at the \TEX\ end (although we can have more keywords for rules it would +complicate matters and introduce a speed penalty.) However, you can just +construct a rule node with \LUA\ and write it to the \TEX\ input. The \type +{outline} subtype is just a convenient variant and the \type {transform} field +specifies the width of the outline. + +\subsection{\nod {ins} nodes} + +\topicindex {nodes+insertions} +\topicindex {insertions} + +This node relates to the \prm {insert} primitive. \starttabulate[|l|l|p|] -\BC field \BC type \BC explanation \NC \NR +\DB field \BC type \BC explanation \NC \NR +\TB \NC \type{subtype} \NC number \NC the insertion class \NC \NR \NC \type{attr} \NC node \NC list of attributes \NC \NR \NC \type{cost} \NC number \NC the penalty associated with this insert \NC \NR \NC \type{height} \NC number \NC height of the insert \NC \NR \NC \type{depth} \NC number \NC depth of the insert \NC \NR \NC \type{head/list} \NC node \NC the first node of the body of this insert \NC \NR +\LL \stoptabulate There is a set of extra fields that concern the associated glue: \type {width}, @@ -167,53 +180,76 @@ There is a set of extra fields that concern the associated glue: \type {width}, These are all numbers. A warning: never assign a node list to the \type {head} field unless you are sure -its internal link structure is correct, otherwise an error may be result. You can use -\type {list} instead (often in functions you want to use local variable swith similar +its internal link structure is correct, otherwise an error may result. You can use +\type {list} instead (often in functions you want to use local variable with similar names and both names are equally sensible). -\subsubsection{mark nodes} +\subsection{\nod {mark} nodes} + +\topicindex {nodes+marks} +\topicindex {marks} + +This one relates to the \prm {mark} primitive. \starttabulate[|l|l|p|] -\BC field \BC type \BC explanation \NC \NR +\DB field \BC type \BC explanation \NC \NR +\TB \NC \type{subtype} \NC number \NC unused \NC \NR \NC \type{attr} \NC node \NC list of attributes \NC \NR \NC \type{class} \NC number \NC the mark class \NC \NR \NC \type{mark} \NC table \NC a table representing a token list \NC \NR +\LL \stoptabulate -\subsubsection{adjust nodes} +\subsection{\nod {adjust} nodes} + +\topicindex {nodes+adjust} +\topicindex {adjust} + +This node comes from \prm {vadjust} primitive. \starttabulate[|l|l|p|] -\BC field \BC type \BC explanation \NC \NR +\DB field \BC type \BC explanation \NC \NR +\TB \NC \type{subtype} \NC number \NC \showsubtypes{adjust} \NC \NR \NC \type{attr} \NC node \NC list of attributes \NC \NR \NC \type{head/list} \NC node \NC adjusted material \NC \NR +\LL \stoptabulate A warning: never assign a node list to the \type {head} field unless you are sure -its internal link structure is correct, otherwise an error may be result. +its internal link structure is correct, otherwise an error may be the result. + +\subsection{\nod {disc} nodes} + +\topicindex {nodes+discretionaries} +\topicindex {discretionaries} -\subsubsection{disc nodes} +The \prm {discretionary} and \prm {-}, the \type {-} character but also the +hyphenation mechanism produces these nodes. \starttabulate[|l|l|p|] -\BC field \BC type \BC explanation \NC \NR +\DB field \BC type \BC explanation \NC \NR +\TB \NC \type{subtype} \NC number \NC \showsubtypes{disc} \NC \NR \NC \type{attr} \NC node \NC list of attributes \NC \NR \NC \type{pre} \NC node \NC pointer to the pre|-|break text \NC \NR \NC \type{post} \NC node \NC pointer to the post|-|break text \NC \NR \NC \type{replace} \NC node \NC pointer to the no|-|break text \NC \NR -\NC \type{penalty} \NC number \NC the penalty associated with the break, normally \type {\hyphenpenalty} or \type {\exhyphenpenalty} \NC \NR +\NC \type{penalty} \NC number \NC the penalty associated with the break, normally + \prm {hyphenpenalty} or \prm {exhyphenpenalty} \NC \NR +\LL \stoptabulate The subtype numbers~4 and~5 belong to the \quote {of-f-ice} explanation given -elsewhere. +elsewhere. These disc nodes are kind of special as at some point they also keep +information about breakpoints and nested ligatures. -These disc nodes are kind of special as at some point they also keep information -about breakpoints and nested ligatures. The \type {pre}, \type {post} and \type -{replace} fields at the \LUA\ end are in fact indirectly accessed and have a -\type {prev} pointer that is not \type {nil}. This means that when you mess -around with the head of these (three) lists, you also need to reassign them -because that will restore the proper \type {prev} pointer, so: +The \type {pre}, \type {post} and \type {replace} fields at the \LUA\ end are in +fact indirectly accessed and have a \type {prev} pointer that is not \type {nil}. +This means that when you mess around with the head of these (three) lists, you +also need to reassign them because that will restore the proper \type {prev} +pointer, so: \starttyping pre = d.pre @@ -225,48 +261,64 @@ Otherwise you can end up with an invalid internal perception of reality and \LUATEX\ might even decide to crash on you. It also means that running forward over for instance \type {pre} is ok but backward you need to stop at \type {pre}. And you definitely must not mess with the node that \type {prev} points to, if -only because it is not really an node but part of the disc data structure (so +only because it is not really a node but part of the disc data structure (so freeing it again might crash \LUATEX). -\subsubsection{math nodes} +\subsection{\nod {math} nodes} + +\topicindex {nodes+math} +\topicindex {math+nodes} + +Math nodes represent the boundaries of a math formula, normally wrapped into +\type {$} signs. \starttabulate[|l|l|p|] -\BC field \BC type \BC explanation \NC \NR +\DB field \BC type \BC explanation \NC \NR +\TB \NC \type{subtype} \NC number \NC \showsubtypes{math} \NC \NR \NC \type{attr} \NC node \NC list of attributes \NC \NR -\NC \type{surround} \NC number \NC width of the \type {\mathsurround} kern \NC \NR +\NC \type{surround} \NC number \NC width of the \prm {mathsurround} kern \NC \NR +\LL \stoptabulate There is a set of extra fields that concern the associated glue: \type {width}, \type {stretch}, \type {stretch_order}, \type {shrink} and \type {shrink_order}. These are all numbers. -\subsubsection{glue nodes} +\subsection{\nod {glue} nodes} + +\topicindex {nodes+glue} +\topicindex {glue} Skips are about the only type of data objects in traditional \TEX\ that are not a -simple value. The structure that represents the glue components of a skip is -called a \type {glue_spec}, and it has the following accessible fields: +simple value. They are inserted when \TEX\ sees a space in the text flow but also +by \prm {hskip} and \prm {vskip}. The structure that represents the glue +components of a skip is called a \nod {glue_spec}, and it has the following +accessible fields: \starttabulate[|l|l|p|] -\BC key \BC type \BC explanation \NC \NR +\DB field \BC type \BC explanation \NC \NR +\TB \NC \type{width} \NC number \NC the horizontal or vertical displacement \NC \NR \NC \type{stretch} \NC number \NC extra (positive) displacement or stretch amount \NC \NR \NC \type{stretch_order} \NC number \NC factor applied to stretch amount \NC \NR \NC \type{shrink} \NC number \NC extra (negative) displacement or shrink amount\NC \NR \NC \type{shrink_order} \NC number \NC factor applied to shrink amount \NC \NR +\LL \stoptabulate The effective width of some glue subtypes depends on the stretch or shrink needed to make the encapsulating box fit its dimensions. For instance, in a paragraph -lines normally have glue representing spaces and these stretch of shrink to make +lines normally have glue representing spaces and these stretch or shrink to make the content fit in the available space. The \type {effective_glue} function that takes a glue node and a parent (hlist or vlist) returns the effective width of -that glue item. +that glue item. When you pass \type {true} as third argument the value will be +rounded. -A gluespec node is a special kind of node that is used for storing a set of glue -values in registers. Originally they were also used to store properties of glue -nodes (using a system of reference counts) but we now keep these properties in -the glue nodes themselves, which gives a cleaner interface to \LUA. +A \nod {glue_spec} node is a special kind of node that is used for storing a set +of glue values in registers. Originally they were also used to store properties +of glue nodes (using a system of reference counts) but we now keep these +properties in the glue nodes themselves, which gives a cleaner interface to \LUA. The indirect spec approach was in fact an optimization in the original \TEX\ code. First of all it can save quite some memory because all these spaces that @@ -281,10 +333,12 @@ not that high (and nowadays memory is less an issue, also given that a glue node is only a few memory words larger than a spec). \starttabulate[|l|l|p|] -\BC field \BC type \BC explanation \NC \NR +\DB field \BC type \BC explanation \NC \NR +\TB \NC \type{subtype} \NC number \NC \showsubtypes{glue} \NC \NR \NC \type{attr} \NC node \NC list of attributes \NC \NR \NC \type{leader} \NC node \NC pointer to a box or rule for leaders \NC \NR +\LL \stoptabulate In addition there are the \type {width}, \type {stretch} \type {stretch_order}, @@ -295,64 +349,90 @@ so we decided to stick to that naming. A regular word space also results in a \type {spaceskip} subtype (this used to be a \type {userskip} with subtype zero). -\subsubsection{kern nodes} +\subsection{\nod {kern} nodes} + +\topicindex {nodes+kerns} +\topicindex {kerns} + +The \prm {kern} command creates such nodes but for instance the font and math +machinery can also add them. \starttabulate[|l|l|p|] -\BC field \BC type \BC explanation \NC \NR +\DB field \BC type \BC explanation \NC \NR +\TB \NC \type{subtype} \NC number \NC \showsubtypes{kern} \NC \NR \NC \type{attr} \NC node \NC list of attributes \NC \NR \NC \type{kern} \NC number \NC fixed horizontal or vertical advance \NC \NR +\LL \stoptabulate -\subsubsection{penalty nodes} +\subsection{\nod {penalty} nodes} + +\topicindex {nodes+penalty} +\topicindex {penalty} + +The \prm {penalty} command is one that generates these nodes. \starttabulate[|l|l|p|] -\BC field \BC type \BC explanation \NC \NR +\DB field \BC type \BC explanation \NC \NR +\TB \NC \type{subtype} \NC number \NC \showsubtypes{penalty} \NC \NR \NC \type{attr} \NC node \NC list of attributes \NC \NR \NC \type{penalty} \NC number \NC the penalty value \NC \NR +\LL \stoptabulate The subtypes are just informative and \TEX\ itself doesn't use them. When you run into an \type {linebreakpenalty} you need to keep in mind that it's a accumulation of \type {club}, \type{widow} and other relevant penalties. -\subsubsection[glyphnodes]{glyph nodes} +\subsection[glyphnodes]{\nod {glyph} nodes} + +\topicindex {nodes+glyph} +\topicindex {glyphs} + +These are probably the mostly used nodes and although you can push them in the +current list with for instance \prm {char} \TEX\ will normally do it for you when +it considers some input to be text. \starttabulate[|l|l|p|] -\BC field \BC type \BC explanation \NC \NR -\NC \type{subtype} \NC number \NC bitfield \NC \NR +\DB field \BC type \BC explanation \NC \NR +\TB +\NC \type{subtype} \NC number \NC bit field \NC \NR \NC \type{attr} \NC node \NC list of attributes \NC \NR -\NC \type{char} \NC number \NC the chatacter index in the font \NC \NR +\NC \type{char} \NC number \NC the character index in the font \NC \NR \NC \type{font} \NC number \NC the font identifier \NC \NR \NC \type{lang} \NC number \NC the language identifier \NC \NR \NC \type{left} \NC number \NC the frozen \type {\lefthyphenmnin} value \NC \NR \NC \type{right} \NC number \NC the frozen \type {\righthyphenmnin} value \NC \NR -\NC \type{uchyph} \NC boolean \NC the frozen \type {\uchyph} value \NC \NR +\NC \type{uchyph} \NC boolean \NC the frozen \prm {uchyph} value \NC \NR \NC \type{components} \NC node \NC pointer to ligature components \NC \NR \NC \type{xoffset} \NC number \NC a virtual displacement in horizontal direction \NC \NR \NC \type{yoffset} \NC number \NC a virtual displacement in vertical direction \NC \NR -%NC \type{xadvance} \NC number \NC an additional advance after the glyph (experimental) \NC \NR \NC \type{width} \NC number \NC the (original) width of the character \NC \NR \NC \type{height} \NC number \NC the (original) height of the character\NC \NR \NC \type{depth} \NC number \NC the (original) depth of the character\NC \NR \NC \type{expansion_factor} \NC number \NC the to be applied expansion_factor \NC \NR +\NC \type{data} \NC number \NC a general purpose field for users (we had room for it) \NC \NR +\LL \stoptabulate The \type {width}, \type {height} and \type {depth} values are read|-|only. The -\type {expansion_factor} is assigned in the parbuilder and used in the backend. +\type {expansion_factor} is assigned in the par builder and used in the backend. A warning: never assign a node list to the components field unless you are sure its internal link structure is correct, otherwise an error may be result. Valid bits for the \type {subtype} field are: \starttabulate[|c|l|] -\BC bit \BC meaning \NC \NR +\DB bit \BC meaning \NC \NR +\TB \NC 0 \NC character \NC \NR \NC 1 \NC ligature \NC \NR \NC 2 \NC ghost \NC \NR \NC 3 \NC left \NC \NR \NC 4 \NC right \NC \NR +\LL \stoptabulate See \in {section} [charsandglyphs] for a detailed description of the \type @@ -378,87 +458,117 @@ helpers are not always faster than separate calls but they sometimes permit making more readable tests. The \type {uses_font} helpers takes a node and font id and returns true when a glyph or disc node references that font. -\subsubsection{boundary nodes} +\subsection{\nod {boundary} nodes} + +\topicindex {nodes+boundary} +\topicindex {boundary} + +This node relates to the \prm {noboundary}, \prm {boundary}, \prm +{protrusionboundary} and \prm {wordboundary} primitives. \starttabulate[|l|l|p|] -\BC field \BC type \BC explanation \NC \NR +\DB field \BC type \BC explanation \NC \NR +\TB \NC \type{subtype} \NC number \NC \showsubtypes{boundary} \NC \NR \NC \type{attr} \NC node \NC list of attributes \NC \NR \NC \type{value} \NC number \NC values 0--255 are reserved \NC \NR +\LL \stoptabulate -This node relates to the \type {\noboundary}, \type {\boundary}, \type -{\protrusionboundary} and \type {\wordboundary} primitives. +\subsection{\nod {local_par} nodes} + +\topicindex {nodes+paragraphs} +\topicindex {paragraphs} -\subsubsection{local_par nodes} +This node is inserted at the start of a paragraph. You should not mess +too much with this one. \starttabulate[|l|l|p|] -\BC field \BC type \BC explanation \NC \NR +\DB field \BC type \BC explanation \NC \NR +\TB \NC \type{attr} \NC node \NC list of attributes \NC \NR -\NC \type{pen_inter} \NC number \NC local interline penalty (from \type {\localinterlinepenalty}) \NC \NR -\NC \type{pen_broken} \NC number \NC local broken penalty (from \type {\localbrokenpenalty}) \NC \NR +\NC \type{pen_inter} \NC number \NC local interline penalty (from \lpr {localinterlinepenalty}) \NC \NR +\NC \type{pen_broken} \NC number \NC local broken penalty (from \lpr {localbrokenpenalty}) \NC \NR \NC \type{dir} \NC string \NC the direction of this par. see~\in [dirnodes] \NC \NR -\NC \type{box_left} \NC node \NC the \type {\localleftbox} \NC \NR -\NC \type{box_left_width} \NC number \NC width of the \type {\localleftbox} \NC \NR -\NC \type{box_right} \NC node \NC the \type {\localrightbox} \NC \NR -\NC \type{box_right_width} \NC number \NC width of the \type {\localrightbox} \NC \NR +\NC \type{box_left} \NC node \NC the \lpr {localleftbox} \NC \NR +\NC \type{box_left_width} \NC number \NC width of the \lpr {localleftbox} \NC \NR +\NC \type{box_right} \NC node \NC the \lpr {localrightbox} \NC \NR +\NC \type{box_right_width} \NC number \NC width of the \lpr {localrightbox} \NC \NR +\LL \stoptabulate A warning: never assign a node list to the \type {box_left} or \type {box_right} field unless you are sure its internal link structure is correct, otherwise an -error may be result. +error may result. + +\subsection[dirnodes]{\nod {dir} nodes} -\subsubsection[dirnodes]{dir nodes} +\topicindex {nodes+direction} +\topicindex {directions} + +Direction nodes mark parts of the running text that need a change of direction and \ +the \prm {textdir} command generates them. \starttabulate[|l|l|p|] -\BC field \BC type \BC explanation \NC \NR +\DB field \BC type \BC explanation \NC \NR +\TB \NC \type{attr} \NC node \NC list of attributes \NC \NR \NC \type{dir} \NC string \NC the direction (but see below) \NC \NR \NC \type{level} \NC number \NC nesting level of this direction whatsit \NC \NR +\LL \stoptabulate -A note on \type {dir} strings. Direction specifiers are three|-|letter -combinations of \type {T}, \type {B}, \type {R}, and \type {L}. - -These are built up out of three separate items: +Direction specifiers are three|-|letter combinations of \type {T}, \type {B}, +\type {R}, and \type {L}. These are built up out of three separate items: \startitemize[packed] \startitem - the first is the direction of the \quote{top} of paragraphs. + the first is the direction of the \quote{top} of paragraphs \stopitem \startitem - the second is the direction of the \quote{start} of lines. + the second is the direction of the \quote{start} of lines \stopitem \startitem - the third is the direction of the \quote{top} of glyphs. + the third is the direction of the \quote{top} of glyphs \stopitem \stopitemize However, only four combinations are accepted: \type {TLT}, \type {TRT}, \type -{RTT}, and \type {LTL}. +{RTT}, and \type {LTL}. Inside actual \nod {dir} nodes, the representation of +\nod {dir} is not a three|-|letter but a combination of numbers. When printed the +direction is indicated by a \type {+} or \type {-}, indicating whether the value +is pushed or popped from the direction stack. -Inside actual \type {dir} whatsit nodes, the representation of \type {dir} is not -a three-letter but a four|-|letter combination. The first character in this case -is always either \type {+} or \type {-}, indicating whether the value is pushed -or popped from the direction stack. +\subsection{\nod {marginkern} nodes} -\subsubsection{margin_kern nodes} +\topicindex {nodes+paragraphs} +\topicindex {paragraphs} +\topicindex {protrusion} + +Margin kerns result from protrusion. \starttabulate[|l|l|p|] -\BC field \BC type \BC explanation \NC \NR -\NC \type{subtype} \NC number \NC \showsubtypes{margin_kern} \NC \NR +\DB field \BC type \BC explanation \NC \NR +\TB +\NC \type{subtype} \NC number \NC \showsubtypes{marginkern} \NC \NR \NC \type{attr} \NC node \NC list of attributes \NC \NR \NC \type{width} \NC number \NC the advance of the kern \NC \NR \NC \type{glyph} \NC node \NC the glyph to be used \NC \NR +\LL \stoptabulate -\subsection{Math nodes} +\stopsection + +\startsection[title={Math noads}] + +\topicindex {nodes+math} +\topicindex {math+nodes} These are the so||called \quote {noad}s and the nodes that are specifically associated with math processing. Most of these nodes contain subnodes so that the list of possible fields is actually quite small. First, the subnodes: -\subsubsection{Math kernel subnodes} +\subsection{Math kernel subnodes} Many object fields in math mode are either simple characters in a specific family or math lists or node lists. There are four associated subnodes that represent @@ -467,58 +577,62 @@ these cases (in the following node descriptions these are indicated by the word The \type {next} and \type {prev} fields for these subnodes are unused. -\subsubsubsection{math_char and math_text_char subnodes} +\subsection{\nod {math_char} and \nod {math_text_char} subnodes} \starttabulate[|l|l|p|] -\BC field \BC type \BC explanation \NC \NR +\DB field \BC type \BC explanation \NC \NR +\TB \NC \type{attr} \NC node \NC list of attributes \NC \NR \NC \type{char} \NC number \NC the character index \NC \NR \NC \type{fam} \NC number \NC the family number \NC \NR +\LL \stoptabulate -The \type {math_char} is the simplest subnode field, it contains the character -and family for a single glyph object. The \type {math_text_char} is a special +The \nod {math_char} is the simplest subnode field, it contains the character +and family for a single glyph object. The \nod {math_text_char} is a special case that you will not normally encounter, it arises temporarily during math list conversion (its sole function is to suppress a following italic correction). -\subsubsubsection{sub_box and sub_mlist subnodes} +\subsection{\nod {sub_box} and \nod {sub_mlist} subnodes} \starttabulate[|l|l|p|] -\BC field \BC type \BC explanation \NC \NR +\DB field \BC type \BC explanation \NC \NR +\TB \NC \type{attr} \NC node \NC list of attributes \NC \NR \NC \type{head/list} \NC node \NC list of nodes \NC \NR +\LL \stoptabulate -These two subnode types are used for subsidiary list items. For \type {sub_box}, -the \type {head} points to a \quote {normal} vbox or hbox. For \type {sub_mlist}, +These two subnode types are used for subsidiary list items. For \nod {sub_box}, +the \type {head} points to a \quote {normal} vbox or hbox. For \nod {sub_mlist}, the \type {head} points to a math list that is yet to be converted. A warning: never assign a node list to the \type {head} field unless you are sure -its internal link structure is correct, otherwise an error may be result. +its internal link structure is correct, otherwise an error is triggered. -\subsubsection{Math delimiter subnode} +\subsection{\nod {delim} subnodes} There is a fifth subnode type that is used exclusively for delimiter fields. As before, the \type {next} and \type {prev} fields are unused. -\subsubsubsection{delim subnodes} - \starttabulate[|l|l|p|] -\BC field \BC type \BC explanation \NC \NR +\DB field \BC type \BC explanation \NC \NR +\TB \NC \type{attr} \NC node \NC list of attributes \NC \NR \NC \type{small_char} \NC number \NC character index of base character \NC \NR \NC \type{small_fam} \NC number \NC family number of base character \NC \NR \NC \type{large_char} \NC number \NC character index of next larger character \NC \NR \NC \type{large_fam} \NC number \NC family number of next larger character \NC \NR +\LL \stoptabulate The fields \type {large_char} and \type {large_fam} can be zero, in that case the -font that is sed for the \type {small_fam} is expected to provide the large +font that is set for the \type {small_fam} is expected to provide the large version as an extension to the \type {small_char}. -\subsubsection{Math core nodes} +\subsection{Math core nodes} -First, there are the objects (the \TEX book calls then \quote {atoms}) that are +First, there are the objects (the \TEX book calls them \quote {atoms}) that are associated with the simple math objects: ord, op, bin, rel, open, close, punct, inner, over, under, vcent. These all have the same fields, and they are combined into a single node type with separate subtypes for differentiation. @@ -526,6 +640,8 @@ into a single node type with separate subtypes for differentiation. Some noads have an option field. The values in this bitset are common: \starttabulate[|l|r|] +\DB meaning \BC bits \NC \NR +\TB \NC set \NC \type{0x08} \NC \NR \NC internal \NC \type{0x00} + \type{0x08} \NC \NR \NC internal \NC \type{0x01} + \type{0x08} \NC \NR @@ -538,24 +654,28 @@ Some noads have an option field. The values in this bitset are common: \NC no sub script \NC \type{0x21} + \type{0x08} \NC \NR \NC no super script \NC \type{0x22} + \type{0x08} \NC \NR \NC no script \NC \type{0x23} + \type{0x08} \NC \NR +\LL \stoptabulate -\subsubsubsection{simple nodes} +\subsection{simple \nod {noad} nodes} \starttabulate[|l|l|p|] -\BC field \BC type \BC explanation \NC \NR +\DB field \BC type \BC explanation \NC \NR +\TB \NC \type{subtype} \NC number \NC \showsubtypes{noad} \NC \NR \NC \type{attr} \NC node \NC list of attributes \NC \NR \NC \type{nucleus} \NC kernel node \NC base \NC \NR \NC \type{sub} \NC kernel node \NC subscript \NC \NR \NC \type{sup} \NC kernel node \NC superscript \NC \NR \NC \type{options} \NC number \NC bitset of rendering options \NC \NR +\LL \stoptabulate -\subsubsubsection{accent nodes} +\subsection{\nod {accent} nodes} \starttabulate[|l|l|p|] -\BC field \BC type \BC explanation \NC \NR +\DB field \BC type \BC explanation \NC \NR +\TB \NC \type{subtype} \NC number \NC \showsubtypes{accent} \NC \NR \NC \type{nucleus} \NC kernel node \NC base \NC \NR \NC \type{sub} \NC kernel node \NC subscript \NC \NR @@ -563,57 +683,65 @@ Some noads have an option field. The values in this bitset are common: \NC \type{accent} \NC kernel node \NC top accent \NC \NR \NC \type{bot_accent} \NC kernel node \NC bottom accent \NC \NR \NC \type{fraction} \NC number \NC larger step criterium (divided by 1000) \NC \NR +\LL \stoptabulate -\subsubsubsection{style nodes} +\subsection{\nod {style} nodes} \starttabulate[|l|l|p|] -\BC field \BC type \BC explanation \NC \NR +\DB field \BC type \BC explanation \NC \NR +\TB \NC \type{style} \NC string \NC contains the style \NC \NR +\LL \stoptabulate There are eight possibilities for the string value: one of \type {display}, \type {text}, \type {script}, or \type {scriptscript}. Each of these can have be prefixed by \type {cramped}. -\subsubsubsection{choice nodes} +\subsection{\nod {choice} nodes} \starttabulate[|l|l|p|] -\BC field \BC type \BC explanation \NC \NR +\DB field \BC type \BC explanation \NC \NR +\TB \NC \type{attr} \NC node \NC list of attributes \NC \NR \NC \type{display} \NC node \NC list of display size alternatives \NC \NR \NC \type{text} \NC node \NC list of text size alternatives \NC \NR \NC \type{script} \NC node \NC list of scriptsize alternatives \NC \NR \NC \type{scriptscript} \NC node \NC list of scriptscriptsize alternatives \NC \NR +\LL \stoptabulate Warning: never assign a node list to the \type {display}, \type {text}, \type {script}, or \type {scriptscript} field unless you are sure its internal link -structure is correct, otherwise an error may be result. +structure is correct, otherwise an error can occur. -\subsubsubsection{radical nodes} +\subsection{\nod {radical} nodes} \starttabulate[|l|l|p|] -\BC field \BC type \BC explanation \NC \NR +\DB field \BC type \BC explanation \NC \NR +\TB \NC \type{subtype} \NC number \NC \showsubtypes{radical} \NC \NR \NC \type{attr} \NC node \NC list of attributes \NC \NR \NC \type{nucleus} \NC kernel node \NC base \NC \NR \NC \type{sub} \NC kernel node \NC subscript \NC \NR \NC \type{sup} \NC kernel node \NC superscript \NC \NR \NC \type{left} \NC delimiter node \NC \NC \NR -\NC \type{degree} \NC kernel node \NC only set by \type {\Uroot} \NC \NR +\NC \type{degree} \NC kernel node \NC only set by \lpr {Uroot} \NC \NR \NC \type{width} \NC number \NC required width \NC \NR \NC \type{options} \NC number \NC bitset of rendering options \NC \NR +\LL \stoptabulate Warning: never assign a node list to the \type {nucleus}, \type {sub}, \type {sup}, \type {left}, or \type {degree} field unless you are sure its internal -link structure is correct, otherwise an error may be result. +link structure is correct, otherwise an error can be triggered. -\subsubsubsection{fraction nodes} +\subsection{\nod {fraction} nodes} \starttabulate[|l|l|p|] -\BC field \BC type \BC explanation \NC \NR +\DB field \BC type \BC explanation \NC \NR +\TB \NC \type{attr} \NC node \NC list of attributes \NC \NR \NC \type{width} \NC number \NC (optional) width of the fraction \NC \NR \NC \type{num} \NC kernel node \NC numerator \NC \NR @@ -622,16 +750,18 @@ link structure is correct, otherwise an error may be result. \NC \type{right} \NC delimiter node \NC right side symbol \NC \NR \NC \type{middle} \NC delimiter node \NC middle symbol \NC \NR \NC \type{options} \NC number \NC bitset of rendering options \NC \NR +\LL \stoptabulate Warning: never assign a node list to the \type {num}, or \type {denom} field unless you are sure its internal link structure is correct, otherwise an error -may be result. +can result. -\subsubsubsection{fence nodes} +\subsection{\nod {fence} nodes} \starttabulate[|l|l|p|] -\BC field \BC type \BC explanation \NC \NR +\DB field \BC type \BC explanation \NC \NR +\TB \NC \type{subtype} \NC number \NC \showsubtypes{fence} \NC \NR \NC \type{attr} \NC node \NC list of attributes \NC \NR \NC \type{delim} \NC delimiter node \NC delimiter specification \NC \NR @@ -640,15 +770,18 @@ may be result. \NC \type{depth} \NC number \NC required depth \NC \NR \NC \type{options} \NC number \NC bitset of rendering options \NC \NR \NC \type{class} \NC number \NC spacing related class \NC \NR +\LL \stoptabulate Warning: some of these fields are used by the renderer and might get adapted in the process. -\subsection{whatsit nodes} +\stopsection + +\startsection[title={Front|-|end whatsits}] -Whatsit nodes come in many subtypes that you can ask for by running -\type {node.whatsits()}: +Whatsit nodes come in many subtypes that you can ask for them by running +\type {node.whatsits}: \startluacode for id, name in table.sortedpairs(node.whatsits()) do context.type(name) @@ -659,44 +792,53 @@ Whatsit nodes come in many subtypes that you can ask for by running \stopluacode . % period -\subsubsection{front|-|end whatsits} +Some of them are generic and independent of the output mode and others are +specific to the chosen backend: \DVI\ or \PDF. Here we discuss the generic +font|-|end nodes nodes. -\subsubsubsection{open whatsits} +\subsection{\whs {open}} \starttabulate[|l|l|p|] -\BC field \BC type \BC explanation \NC \NR +\DB field \BC type \BC explanation \NC \NR +\TB \NC \type{attr} \NC node \NC list of attributes \NC \NR \NC \type{stream} \NC number \NC \TEX's stream id number \NC \NR \NC \type{name} \NC string \NC file name \NC \NR \NC \type{ext} \NC string \NC file extension \NC \NR \NC \type{area} \NC string \NC file area (this may become obsolete) \NC \NR +\LL \stoptabulate -\subsubsubsection{write whatsits} +\subsection{\whs {write}} \starttabulate[|l|l|p|] -\BC field \BC type \BC explanation \NC \NR +\DB field \BC type \BC explanation \NC \NR +\TB \NC \type{attr} \NC node \NC list of attributes \NC \NR \NC \type{stream} \NC number \NC \TEX's stream id number \NC \NR \NC \type{data} \NC table \NC a table representing the token list to be written \NC \NR +\LL \stoptabulate -\subsubsubsection{close whatsits} +\subsection{\whs {close}} \starttabulate[|l|l|p|] -\BC field \BC type \BC explanation \NC \NR +\DB field \BC type \BC explanation \NC \NR +\TB \NC \type{attr} \NC node \NC list of attributes \NC \NR \NC \type{stream} \NC number \NC \TEX's stream id number \NC \NR +\LL \stoptabulate -\subsubsubsection{user_defined whatsits} +\subsection{\whs {user_defined}} User|-|defined whatsit nodes can only be created and handled from \LUA\ code. In effect, they are an extension to the extension mechanism. The \LUATEX\ engine will simply step over such whatsits without ever looking at the contents. \starttabulate[|l|l|p|] -\BC field \BC type \BC explanation \NC \NR +\DB field \BC type \BC explanation \NC \NR +\TB \NC \type{attr} \NC node \NC list of attributes \NC \NR \NC \type{user_id} \NC number \NC id number \NC \NR \NC \type{type} \NC number \NC type of the value \NC \NR @@ -704,103 +846,129 @@ will simply step over such whatsits without ever looking at the contents. \NC \NC node \NC a node list \NC \NR \NC \NC string \NC a \LUA\ string \NC \NR \NC \NC table \NC a \LUA\ table \NC \NR +\LL \stoptabulate The \type {type} can have one of six distinct values. The number is the \ASCII\ -value if the first character if the type name (so you can use string.byte("l") +value if the first character of the type name (so you can use string.byte("l") instead of \type {108}). \starttabulate[|r|c|p|] -\BC value \BC meaning \BC explanation \NC \NR +\DB value \BC meaning \BC explanation \NC \NR +\TB \NC 97 \NC a \NC list of attributes (a node list) \NC \NR \NC 100 \NC d \NC a \LUA\ number \NC \NR \NC 108 \NC l \NC a \LUA\ value (table, number, boolean, etc) \NC \NR \NC 110 \NC n \NC a node list \NC \NR \NC 115 \NC s \NC a \LUA\ string \NC \NR \NC 116 \NC t \NC a \LUA\ token list in \LUA\ table form (a list of triplets) \NC \NR +\LL \stoptabulate -\subsubsubsection{save_pos whatsits} +\subsection{\whs {save_pos}} \starttabulate[|l|l|p|] -\BC field \BC type \BC explanation \NC \NR +\DB field \BC type \BC explanation \NC \NR +\TB \NC \type{attr} \NC node \NC list of attributes \NC \NR +\LL \stoptabulate -\subsubsubsection{late_lua whatsits} +\subsection{\whs {late_lua}} \starttabulate[|l|l|p|] -\BC field \BC type \BC explanation \NC \NR -\NC \type{attr} \NC node \NC list of attributes \NC \NR -\NC \type{data} \NC string \NC data to execute \NC \NR -\NC \type{string} \NC string \NC data to execute \NC \NR -\NC \type{name} \NC string \NC the name to use for \LUA\ error reporting \NC \NR +\DB field \BC type \BC explanation \NC \NR +\TB +\NC \type{attr} \NC node \NC list of attributes \NC \NR +\NC \type{data} \NC string or function \NC the to be written information stored as \LUA\ value \NC \NR +\NC \type{token} \NC string \NC the to be written information stored as token list \NC \NR +\NC \type{name} \NC string \NC the name to use for \LUA\ error reporting \NC \NR +\LL \stoptabulate The difference between \type {data} and \type {string} is that on assignment, the -\type {data} field is converted to a token list, cf. use as \type {\latelua}. The +\type {data} field is converted to a token list, cf.\ use as \lpr {latelua}. The \type {string} version is treated as a literal string. -\subsubsection{\DVI\ backend whatsits} +\stopsection -\subsubsection{special whatsits} +\startsection[title={\DVI\ backend whatsits}] + +\subsection{\whs {special}} + +There is only one \DVI\ backend whatsit, and it just flushes its content to the +output file. \starttabulate[|l|l|p|] -\BC field \BC type \BC explanation \NC \NR +\DB field \BC type \BC explanation \NC \NR +\TB \NC \type{attr} \NC node \NC list of attributes \NC \NR -\NC \type{data} \NC string \NC the \type {\special} information \NC \NR +\NC \type{data} \NC string \NC the \prm {special} information \NC \NR +\LL \stoptabulate -\subsubsection{\PDF\ backend whatsits} +\stopsection + +\startsection[title={\PDF\ backend whatsits}] -\subsubsubsection{pdf_literal whatsits} +\subsection{\whs {pdf_literal}} \starttabulate[|l|l|p|] -\BC field \BC type \BC explanation \NC \NR -\NC \type{attr} \NC node \NC list of attributes \NC \NR -\NC \type{mode} \NC number \NC the \quote {mode} setting of this literal \NC \NR -\NC \type{data} \NC string \NC the \type {\pdfliteral} information \NC \NR +\DB field \BC type \BC explanation \NC \NR +\TB +\NC \type{attr} \NC node \NC list of attributes \NC \NR +\NC \type{mode} \NC number \NC the \quote {mode} setting of this literal \NC \NR +\NC \type{data} \NC string \NC the to be written information stored as \LUA\ string \NC \NR +\NC \type{token} \NC string \NC the to be written information stored as token list \NC \NR +\LL \stoptabulate Possible mode values are: -\starttabulate[|l|p|] -\BC value \BC keyword \NC \NR +\starttabulate[|c|p|] +\DB value \BC keyword \NC \NR +\TB \NC 0 \NC \type{origin} \NC \NR \NC 1 \NC \type{page} \NC \NR \NC 2 \NC \type{direct} \NC \NR \NC 3 \NC \type{raw} \NC \NR \NC 4 \NC \type{text} \NC \NR +\LL \stoptabulate -The higher the number, the less checking and the more you can run into troubles. +The higher the number, the less checking and the more you can run into trouble. Especially the \type {raw} variant can produce bad \PDF\ so you can best check what you generate. -\subsubsubsection{pdf_refobj whatsits} +\subsection{\whs {pdf_refobj}} \starttabulate[|l|l|p|] -\BC field \BC type \BC explanation \NC \NR +\DB field \BC type \BC explanation \NC \NR +\TB \NC \type{attr} \NC node \NC list of attributes \NC \NR \NC \type{objnum} \NC number \NC the referenced \PDF\ object number \NC \NR +\LL \stoptabulate -\subsubsubsection{pdf_annot whatsits} +\subsection{\whs {pdf_annot}} \starttabulate[|l|l|p|] -\BC field \BC type \BC explanation \NC \NR +\DB field \BC type \BC explanation \NC \NR +\TB \NC \type{attr} \NC node \NC list of attributes \NC \NR \NC \type{width} \NC number \NC the width (not used in calculations) \NC \NR \NC \type{height} \NC number \NC the height (not used in calculations) \NC \NR \NC \type{depth} \NC number \NC the depth (not used in calculations) \NC \NR \NC \type{objnum} \NC number \NC the referenced \PDF\ object number \NC \NR \NC \type{data} \NC string \NC the annotation data \NC \NR +\LL \stoptabulate -\subsubsubsection{pdf_start_link whatsits} +\subsection{\whs {pdf_start_link}} \starttabulate[|l|l|p|] -\BC field \BC type \BC explanation \NC \NR +\DB field \BC type \BC explanation \NC \NR +\TB \NC \type{attr} \NC node \NC list of attributes \NC \NR \NC \type{width} \NC number \NC the width (not used in calculations) \NC \NR \NC \type{height} \NC number \NC the height (not used in calculations) \NC \NR @@ -808,19 +976,23 @@ what you generate. \NC \type{objnum} \NC number \NC the referenced \PDF\ object number \NC \NR \NC \type{link_attr} \NC table \NC the link attribute token list \NC \NR \NC \type{action} \NC node \NC the action to perform \NC \NR +\LL \stoptabulate -\subsubsubsection{pdf_end_link whatsits} +\subsection{\whs {pdf_end_link}} \starttabulate[|l|l|p|] -\BC field \BC type \BC explanation \NC \NR +\DB field \BC type \BC explanation \NC \NR +\TB \NC \type{attr} \NC node \NC \NC \NR +\LL \stoptabulate -\subsubsubsection{pdf_dest whatsits} +\subsection{\whs {pdf_dest}} \starttabulate[|l|l|p|] -\BC field \BC type \BC explanation \NC \NR +\DB field \BC type \BC explanation \NC \NR +\TB \NC \type{attr} \NC node \NC list of attributes \NC \NR \NC \type{width} \NC number \NC the width (not used in calculations) \NC \NR \NC \type{height} \NC number \NC the height (not used in calculations) \NC \NR @@ -831,45 +1003,54 @@ what you generate. \NC \type{dest_type} \NC number \NC type of destination \NC \NR \NC \type{xyz_zoom} \NC number \NC the zoom factor (times 1000) \NC \NR \NC \type{objnum} \NC number \NC the \PDF\ object number \NC \NR +\LL \stoptabulate -\subsubsubsection{pdf_action whatsits} +\subsection{\whs {pdf_action}} -These are a special kind of item that only appears inside \PDF\ start link +These are a special kind of items that only appear inside \PDF\ start link objects. \starttabulate[|l|l|p|] -\BC field \BC type \BC explanation \NC \NR -\BC field \BC type \BC explanation \NC \NR +\DB field \BC type \BC explanation \NC \NR +\TB \NC \type{action_type} \NC number \NC the kind of action involved \NC \NR \NC \type{action_id} \NC number or string \NC token list reference or string \NC \NR \NC \type{named_id} \NC number \NC the index of the destination \NC \NR \NC \type{file} \NC string \NC the target filename \NC \NR \NC \type{new_window} \NC number \NC the window state of the target \NC \NR \NC \type{data} \NC string \NC the name of the destination \NC \NR +\LL \stoptabulate Valid action types are: \starttabulate[|l|l|] -\NC 0 \NC \type{page} \NC \NR -\NC 1 \NC \type{goto} \NC \NR -\NC 2 \NC \type{thread} \NC \NR -\NC 3 \NC \type{user} \NC \NR +\DB value \BC meaning \NC \NR +\TB +\NC 0 \NC \type{page} \NC \NR +\NC 1 \NC \type{goto} \NC \NR +\NC 2 \NC \type{thread} \NC \NR +\NC 3 \NC \type{user} \NC \NR +\LL \stoptabulate Valid window types are: \starttabulate[|l|l|] -\NC 0 \NC \type{notset} \NC \NR -\NC 1 \NC \type{new} \NC \NR -\NC 2 \NC \type{nonew} \NC \NR +\DB value \BC meaning \NC \NR +\TB +\NC 0 \NC \type{notset} \NC \NR +\NC 1 \NC \type{new} \NC \NR +\NC 2 \NC \type{nonew} \NC \NR +\LL \stoptabulate -\subsubsubsection{pdf_thread whatsits} +\subsection{\whs {pdf_thread}} \starttabulate[|l|l|p|] -\BC field \BC type \BC explanation \NC \NR +\DB field \BC type \BC explanation \NC \NR +\TB \NC \type{attr} \NC node \NC list of attributes \NC \NR \NC \type{width} \NC number \NC the width (not used in calculations) \NC \NR \NC \type{height} \NC number \NC the height (not used in calculations) \NC \NR @@ -878,12 +1059,14 @@ Valid window types are: \NC \type{tread_id} \NC number \NC the thread id \NC \NR \NC \NC string \NC the thread name \NC \NR \NC \type{thread_attr} \NC number \NC extra thread information \NC \NR +\LL \stoptabulate -\subsubsubsection{pdf_start_thread whatsits} +\subsection{\whs {pdf_start_thread}} \starttabulate[|l|l|p|] -\BC field \BC type \BC explanation \NC \NR +\DB field \BC type \BC explanation \NC \NR +\TB \NC \type{attr} \NC node \NC list of attributes \NC \NR \NC \type{width} \NC number \NC the width (not used in calculations) \NC \NR \NC \type{height} \NC number \NC the height (not used in calculations) \NC \NR @@ -892,48 +1075,63 @@ Valid window types are: \NC \type{tread_id} \NC number \NC the thread id \NC \NR \NC \NC string \NC the thread name \NC \NR \NC \type{thread_attr} \NC number \NC extra thread information \NC \NR +\LL \stoptabulate -\subsubsubsection{pdf_end_thread whatsits} +\subsection{\whs {pdf_end_thread}} \starttabulate[|l|l|p|] -\BC field \BC type \BC explanation \NC \NR +\DB field \BC type \BC explanation \NC \NR +\TB \NC \type{attr} \NC node \NC \NC \NR +\LL \stoptabulate -\subsubsubsection{pdf_colorstack whatsits} +\subsection{\whs {pdf_colorstack}} \starttabulate[|l|l|p|] -\BC field \BC type \BC explanation \NC \NR +\DB field \BC type \BC explanation \NC \NR +\TB \NC \type{attr} \NC node \NC list of attributes \NC \NR \NC \type{stack} \NC number \NC colorstack id number \NC \NR \NC \type{command} \NC number \NC command to execute \NC \NR \NC \type{data} \NC string \NC data \NC \NR +\LL \stoptabulate -\subsubsubsection{pdf_setmatrix whatsits} +\subsection{\whs {pdf_setmatrix}} \starttabulate[|l|l|p|] -\BC field \BC type \BC explanation \NC \NR +\DB field \BC type \BC explanation \NC \NR +\TB \NC \type{attr} \NC node \NC list of attributes \NC \NR \NC \type{data} \NC string \NC data \NC \NR +\LL \stoptabulate -\subsubsubsection{pdf_save whatsits} +\subsection{\whs {pdf_save}} \starttabulate[|l|l|p|] -\BC field \BC type \BC explanation \NC \NR +\DB field \BC type \BC explanation \NC \NR +\TB \NC \type{attr} \NC node \NC list of attributes \NC \NR +\LL \stoptabulate -\subsubsubsection{pdf_restore whatsits} +\subsection{\whs {pdf_restore}} \starttabulate[|l|l|p|] -\BC field \BC type \BC explanation \NC \NR +\DB field \BC type \BC explanation \NC \NR +\TB \NC \type{attr} \NC node \NC list of attributes \NC \NR +\LL \stoptabulate -\section{The \type {node} library} +\stopsection + +\startsection[title={The \type {node} library}][library=node] + +\subsection {Introduction} The \type {node} library contains functions that facilitate dealing with (lists of) nodes and their values. They allow you to create, alter, copy, delete, and @@ -943,8 +1141,7 @@ insert \LUATEX\ node objects, the core objects within the typesetter. \type {luatex.node}. The various parts within a node can be accessed using named fields. -Each node has at least the three fields \type {next}, \type {id}, and \type -{subtype}: +Each node has at least the three fields \type {next}, \type {id}, and \type {subtype}: \startitemize[intro] @@ -969,10 +1166,9 @@ Each node has at least the three fields \type {next}, \type {id}, and \type \stopitemize The other available fields depend on the \type {id} (and for \quote {whatsits}, -the \type {subtype}) of the node. Further details on the various fields and their -meanings are given in~\in{chapter}[nodes]. +the \type {subtype}) of the node. -Support for \type {unset} (alignment) nodes is partial: they can be queried and +Support for \nod {unset} (alignment) nodes is partial: they can be queried and modified from \LUA\ code, but not created. Nodes can be compared to each other, but: you are actually comparing indices into @@ -987,76 +1183,90 @@ call the node freeing functions yourself when you are no longer in need of a nod (list). Nodes form linked lists without reference counting, so you have to be careful that when control returns back to \LUATEX\ itself, you have not deleted nodes that are still referenced from a \type {next} pointer elsewhere, and that -you did not create nodes that are referenced more than once. +you did not create nodes that are referenced more than once. Normally the setters +and getters handle this for you. There are statistics available with regards to the allocated node memory, which can be handy for tracing. -\subsection{Node handling functions} +\subsection{\type {is_node}} -\subsubsection{\type {node.is_node}} +\topicindex {nodes+functions} + +\libindex {is_node} \startfunctioncall -<boolean> t = +<boolean|integer> t = node.is_node(<any> item) \stopfunctioncall -This function returns true if the argument is a userdata object of -type \type {<node>}. +This function returns a number (the internal index of the node) if the argument +is a userdata object of type \type {<node>} and false when no node is passed. -\subsubsection{\type {node.types}} +\subsection{\type {types} and \type {whatsits}} + +\libindex {types} +\libindex {whatsits} + +This function returns an array that maps node id numbers to node type strings, +providing an overview of the possible top|-|level \type {id} types. \startfunctioncall <table> t = node.types() \stopfunctioncall -This function returns an array that maps node id numbers to node type strings, -providing an overview of the possible top|-|level \type {id} types. - -\subsubsection{\type {node.whatsits}} +\TEX's \quote {whatsits} all have the same \type {id}. The various subtypes are +defined by their \type {subtype} fields. The function is much like \type {types}, +except that it provides an array of \type {subtype} mappings. \startfunctioncall <table> t = node.whatsits() \stopfunctioncall -\TEX's \quote{whatsits} all have the same \type {id}. The various subtypes are -defined by their \type {subtype} fields. The function is much like \type -{node.types}, except that it provides an array of \type {subtype} mappings. +\subsection{\type {id}} -\subsubsection{\type {node.id}} +\libindex{id} + +This converts a single type name to its internal numeric representation. \startfunctioncall <number> id = node.id(<string> type) \stopfunctioncall -This converts a single type name to its internal numeric representation. +\subsection{\type {type} and \type {subtype}} -\subsubsection{\type {node.subtype}} +\libindex {type} +\libindex {subtype} + +In the argument is a number, then the next function converts an internal numeric +representation to an external string representation. Otherwise, it will return +the string \type {node} if the object represents a node, and \type {nil} +otherwise. \startfunctioncall -<number> subtype = - node.subtype(<string> type) +<string> type = + node.type(<any> n) \stopfunctioncall -This converts a single whatsit name to its internal numeric representation (\type -{subtype}). - -\subsubsection{\type {node.type}} +This next one converts a single whatsit name to its internal numeric +representation (\type {subtype}). \startfunctioncall -<string> type = - node.type(<any> n) +<number> subtype = + node.subtype(<string> type) \stopfunctioncall -In the argument is a number, then this function converts an internal numeric -representation to an external string representation. Otherwise, it will return -the string \type {node} if the object represents a node, and \type {nil} -otherwise. +\subsection{\type {fields}} + +\libindex {fields} -\subsubsection{\type {node.fields}} +This function returns an array of valid field names for a particular type of +node. If you want to get the valid fields for a \quote {whatsit}, you have to +supply the second argument also. In other cases, any given second argument will +be silently ignored. \startfunctioncall <table> t = @@ -1065,24 +1275,29 @@ otherwise. node.fields(<number> id, <number> subtype) \stopfunctioncall -This function returns an array of valid field names for a particular type of -node. If you want to get the valid fields for a \quote {whatsit}, you have to -supply the second argument also. In other cases, any given second argument will -be silently ignored. +The function accepts string \type {id} and \type {subtype} values as well. -This function accepts string \type {id} and \type {subtype} values as well. +\subsection{\type {has_field}} -\subsubsection{\type {node.has_field}} +\libindex {has_field} + +This function returns a boolean that is only true if \type {n} is +actually a node, and it has the field. \startfunctioncall <boolean> t = node.has_field(<node> n, <string> field) \stopfunctioncall -This function returns a boolean that is only true if \type {n} is -actually a node, and it has the field. +\subsection{\type {new}} -\subsubsection{\type {node.new}} +\libindex{new} + +The \type {new} function creates a new node. All its fields are initialized to +either zero or \type {nil} except for \type {id} and \type {subtype}. Instead of +numbers you can also use strings (names). If you create a new \nod {whatsit} node +the second argument is required. As with all node functions, this function +creates a node at the \TEX\ level. \startfunctioncall <node> n = @@ -1091,15 +1306,16 @@ actually a node, and it has the field. node.new(<number> id, <number> subtype) \stopfunctioncall -Creates a new node. All of the new node's fields are initialized to either zero -or \type {nil} except for \type {id} and \type {subtype} (if supplied). If you -want to create a new whatsit, then the second argument is required, otherwise it -need not be present. As with all node functions, this function creates a node on -the \TEX\ level. +\subsection{\type {free}, \type {flush_node} and \type {flush_list}} -This function accepts string \type {id} and \type {subtype} values as well. +\libindex{free} +\libindex{flush_node} +\libindex{flush_list} -\subsubsection{\type {node.free} and \type {node.flush_node}} +The next one the node \type {n} from \TEX's memory. Be careful: no checks are +done on whether this node is still pointed to from a register or some \type +{next} field: it is up to you to make sure that the internal data structures +remain correct. \startfunctioncall <node> next = @@ -1107,35 +1323,33 @@ This function accepts string \type {id} and \type {subtype} values as well. flush_node(<node> n) \stopfunctioncall -Removes the node \type {n} from \TEX's memory. Be careful: no checks are done on -whether this node is still pointed to from a register or some \type {next} field: -it is up to you to make sure that the internal data structures remain correct. - The \type {free} function returns the next field of the freed node, while the \type {flush_node} alternative returns nothing. -\subsubsection{\type {node.flush_list}} +A list starting with node \type {n} can be flushed from \TEX's memory too. Be +careful: no checks are done on whether any of these nodes is still pointed to +from a register or some \type {next} field: it is up to you to make sure that the +internal data structures remain correct. \startfunctioncall node.flush_list(<node> n) \stopfunctioncall -Removes the node list \type {n} and the complete node list following \type {n} -from \TEX's memory. Be careful: no checks are done on whether any of these nodes -is still pointed to from a register or some \type {next} field: it is up to you -to make sure that the internal data structures remain correct. +\subsection{\type {copy} and \type {copy_list}} -\subsubsection{\type {node.copy}} +\libindex{copy} +\libindex{copy_list} + +This creates a deep copy of node \type {n}, including all nested lists as in the case +of a hlist or vlist node. Only the \type {next} field is not copied. \startfunctioncall <node> m = node.copy(<node> n) \stopfunctioncall -Creates a deep copy of node \type {n}, including all nested lists as in the case -of a hlist or vlist node. Only the \type {next} field is not copied. - -\subsubsection{\type {node.copy_list}} +A deep copy of the node list that starts at \type {n} can be created too. If +\type {m} is also given, the copy stops just before node \type {m}. \startfunctioncall <node> m = @@ -1144,42 +1358,37 @@ of a hlist or vlist node. Only the \type {next} field is not copied. node.copy_list(<node> n, <node> m) \stopfunctioncall -Creates a deep copy of the node list that starts at \type {n}. If \type {m} is -also given, the copy stops just before node \type {m}. +Note that you cannot copy attribute lists this way. However, there is normally no +need to copy attribute lists as when you do assignments to the \type {attr} field +or make changes to specific attributes, the needed copying and freeing takes +place automatically. + +\subsection{\type {prev} and \type{next}} -Note that you cannot copy attribute lists this way, specialized functions for -dealing with attribute lists will be provided later but are not there yet. -However, there is normally no need to copy attribute lists as when you do -assignments to the \type {attr} field or make changes to specific attributes, the -needed copying and freeing takes place automatically. +\libindex{prev} +\libindex{next} -\subsubsection{\type {node.next}} +These returns the node preceding or following the given node, or \type {nil} if +there is no such node. \startfunctioncall <node> m = node.next(<node> n) -\stopfunctioncall - -Returns the node following this node, or \type {nil} if there is no such node. - -\subsubsection{\type {node.prev}} - -\startfunctioncall <node> m = node.prev(<node> n) \stopfunctioncall -Returns the node preceding this node, or \type {nil} if there is no such node. +\subsection{\type {current_attr}} + +\libindex{current_attr} -\subsubsection{\type {node.current_attr}} +This returns the currently active list of attributes, if there is one. \startfunctioncall <node> m = node.current_attr() \stopfunctioncall -Returns the currently active list of attributes, if there is one. - The intended usage of \type {current_attr} is as follows: \starttyping @@ -1209,7 +1418,16 @@ attribute list, not a copy thereof. Therefore, changing any of the attributes in the list will change these values for all nodes that have the current attribute list assigned to them. -\subsubsection{\type {node.hpack}} +\subsection{\type {hpack}} + +\libindex {hpack} + +This function creates a new hlist by packaging the list that begins at node \type +{n} into a horizontal box. With only a single argument, this box is created using +the natural width of its components. In the three argument form, \type {info} +must be either \type {additional} or \type {exactly}, and \type {w} is the +additional (\type {\hbox spread}) or exact (\type {\hbox to}) width to be used. +The second return value is the badness of the generated box. \startfunctioncall <node> h, <number> b = @@ -1220,21 +1438,22 @@ list assigned to them. node.hpack(<node> n, <number> w, <string> info, <string> dir) \stopfunctioncall -This function creates a new hlist by packaging the list that begins at node \type -{n} into a horizontal box. With only a single argument, this box is created using -the natural width of its components. In the three argument form, \type {info} -must be either \type {additional} or \type {exactly}, and \type {w} is the -additional (\type {\hbox spread}) or exact (\type {\hbox to}) width to be used. The -second return value is the badness of the generated box. +Caveat: there can be unexpected side|-|effects to this function, like updating +some of the \prm {marks} and \type {\inserts}. Also note that the content of +\type {h} is the original node list \type {n}: if you call \type {node.free(h)} +you will also free the node list itself, unless you explicitly set the \type +{list} field to \type {nil} beforehand. And in a similar way, calling \type +{node.free(n)} will invalidate \type {h} as well! -Caveat: at this moment, there can be unexpected side|-|effects to this function, -like updating some of the \type {\marks} and \type {\inserts}. Also note that the -content of \type {h} is the original node list \type {n}: if you call \type -{node.free(h)} you will also free the node list itself, unless you explicitly set -the \type {list} field to \type {nil} beforehand. And in a similar way, calling -\type {node.free(n)} will invalidate \type {h} as well! +\subsection{\type {vpack}} -\subsubsection{\type {node.vpack}} +\libindex {vpack} + +This function creates a new vlist by packaging the list that begins at node \type +{n} into a vertical box. With only a single argument, this box is created using +the natural height of its components. In the three argument form, \type {info} +must be either \type {additional} or \type {exactly}, and \type {w} is the +additional (\type {\vbox spread}) or exact (\type {\vbox to}) height to be used. \startfunctioncall <node> h, <number> b = @@ -1245,17 +1464,26 @@ the \type {list} field to \type {nil} beforehand. And in a similar way, calling node.vpack(<node> n, <number> w, <string> info, <string> dir) \stopfunctioncall -This function creates a new vlist by packaging the list that begins at node \type -{n} into a vertical box. With only a single argument, this box is created using -the natural height of its components. In the three argument form, \type {info} -must be either \type {additional} or \type {exactly}, and \type {w} is the -additional (\type {\vbox spread}) or exact (\type {\vbox to}) height to be used. +The second return value is the badness of the generated box. See the description +of \type {hpack} for a few memory allocation caveats. -The second return value is the badness of the generated box. +\subsection{\type {prepend_prevdepth}} -See the description of \type {node.hpack()} for a few memory allocation caveats. +\libindex {prepend_prevdepth} -\subsubsection{\type {node.dimensions}, \type {node.rangedimensions}} +This function is somewhat special in the sense that it is an experimental helper +that adds the interlinespace to a line keeping the baselineskip and lineskip into +account. + +\startfunctioncall +<node> n, <number> delta = + node.prepend_prevdepth(<node> n,<number> prevdepth) +\stopfunctioncall + +\subsection{\type {dimensions} and \type {rangedimensions}} + +\libindex{dimensions} +\libindex{rangedimensions} \startfunctioncall <number> w, <number> h, <number> d = @@ -1320,7 +1548,9 @@ cases: node.rangedimensions(<node> parent, <node> first, <node> last) \stopfunctioncall -\subsubsection{\type {node.mlist_to_hlist}} +\subsection{\type {mlist_to_hlist}} + +\libindex {mlist_to_hlist} \startfunctioncall <node> h = @@ -1329,9 +1559,9 @@ cases: This runs the internal mlist to hlist conversion, converting the math list in \type {n} into the horizontal list \type {h}. The interface is exactly the same -as for the callback \type {mlist_to_hlist}. +as for the callback \cbk {mlist_to_hlist}. -\subsubsection{\type {node.slide}} +\subsection{\type {slide}} \startfunctioncall <node> m = @@ -1342,7 +1572,9 @@ Returns the last node of the node list that starts at \type {n}. As a side|-|effect, it also creates a reverse chain of \type {prev} pointers between nodes. -\subsubsection{\type {node.tail}} +\subsection{\type {tail}} + +\libindex {tail} \startfunctioncall <node> m = @@ -1351,7 +1583,10 @@ nodes. Returns the last node of the node list that starts at \type {n}. -\subsubsection{\type {node.length}} +\subsection{\type {length} and type {count}} + +\libindex {length} +\libindex {count} \startfunctioncall <number> i = @@ -1364,8 +1599,6 @@ Returns the number of nodes contained in the node list that starts at \type {n}. If \type {m} is also supplied it stops at \type {m} instead of at the end of the list. The node \type {m} is not counted. -\subsubsection{\type {node.count}} - \startfunctioncall <number> i = node.count(<number> id, <node> n) @@ -1376,14 +1609,29 @@ list. The node \type {m} is not counted. Returns the number of nodes contained in the node list that starts at \type {n} that have a matching \type {id} field. If \type {m} is also supplied, counting stops at \type {m} instead of at the end of the list. The node \type {m} is not -counted. +counted. This function also accept string \type {id}'s. + +\subsection{\type {is_char} and \type {is_glyph}} -This function also accept string \type {id}'s. +\libindex {is_char} +\libindex {is_glyph} -\subsubsection{\type {node.traverse}} +The subtype of a glyph node signals if the glyph is already turned into a character reference +or not. \startfunctioncall -<node> t = +<boolean> b = + node.is_char(<node> n) +<boolean> b = + node.is_glyph(<node> n) +\stopfunctioncall + +\subsection{\type {traverse}} + +\libindex {traverse} + +\startfunctioncall +<node> t, id, subtype = node.traverse(<node> n) \stopfunctioncall @@ -1426,10 +1674,12 @@ pointers remain valid. If the above is unclear to you, see the section \quote {For Statement} in the \LUA\ Reference Manual. -\subsubsection{\type {node.traverse_id}} +\subsection{\type {traverse_id}} + +\libindex {traverse_id} \startfunctioncall -<node> t = +<node> t, subtype = node.traverse_id(<number> id, <node> n) \stopfunctioncall @@ -1454,17 +1704,45 @@ See the previous section for details. The change is in the local function \type end \stoptyping -\subsubsection{\type {node.traverse_char}} +\subsection{\type {traverse_char} and \type {traverse_glyph}} + +\libindex {traverse_char} +\libindex {traverse_glyph} -This iterators loops over the glyph nodes in a list. Only nodes with a subtype -less than 256 are seen. +The \type{traverse_char} iterator loops over the \nod {glyph} nodes in a list. +Only nodes with a subtype less than 256 are seen. \startfunctioncall -<node> n = +<node> n, font, char = node.traverse_char(<node> n) \stopfunctioncall -\subsubsection{\type {node.has_glyph}} +The \type{traverse_glyph} iterator loops over a list and returns the list and +filters all glyphs: + +\startfunctioncall +<node> n, font, char = + node.traverse_glyph(<node> n) +\stopfunctioncall + +\subsection{\type {traverse_list}} + +\libindex {traverse_list} + +This iterator loops over the \nod {hlist} and \nod {vlist} nodes in a list. + +\startfunctioncall +<node> n, id, subtype, list = + node.traverse_list(<node> n) +\stopfunctioncall + +The four return values can save some time compared to fetching these fields but +in practice you seldom need them all. So consider it a (side effect of +experimental) convenience. + +\subsection{\type {has_glyph}} + +\libindex {has_glyph} This function returns the first glyph or disc node in the given list: @@ -1473,7 +1751,9 @@ This function returns the first glyph or disc node in the given list: node.has_glyph(<node> n) \stopfunctioncall -\subsubsection{\type {node.end_of_math}} +\subsection{\type {end_of_math}} + +\libindex {end_of_math} \startfunctioncall <node> t = @@ -1481,11 +1761,13 @@ This function returns the first glyph or disc node in the given list: \stopfunctioncall Looks for and returns the next \type {math_node} following the \type {start}. If -the given node is a math endnode this helper return that node, else it follows -the list and return the next math endnote. If no such node is found nil is +the given node is a math end node this helper returns that node, else it follows +the list and returns the next math endnote. If no such node is found nil is returned. -\subsubsection{\type {node.remove}} +\subsection{\type {remove}} + +\libindex {remove} \startfunctioncall <node> head, current = @@ -1501,7 +1783,9 @@ no such node). The returned \type {head} is more important, because if the function is called with \type {current} equal to \type {head}, it will be changed. -\subsubsection{\type {node.insert_before}} +\subsection{\type {insert_before}} + +\libindex {insert_before} \startfunctioncall <node> head, new = @@ -1515,7 +1799,9 @@ mutated) \type {head} and the node \type {new}, set up to be part of the list (with correct \type {next} field). If \type {head} is initially \type {nil}, it will become \type {new}. -\subsubsection{\type {node.insert_after}} +\subsection{\type {insert_after}} + +\libindex {insert_after} \startfunctioncall <node> head, new = @@ -1528,7 +1814,9 @@ following \type {head}. It is your responsibility to make sure that \type the node \type {new}, set up to be part of the list (with correct \type {next} field). If \type {head} is initially \type {nil}, it will become \type {new}. -\subsubsection{\type {node.first_glyph}} +\subsection{\type {first_glyph}} + +\libindex {first_glyph} \startfunctioncall <node> n = @@ -1542,7 +1830,9 @@ with a subtype indicating it is a glyph, or \type {nil}. If \type {m} is given, processing stops at (but including) that node, otherwise processing stops at the end of the list. -\subsubsection{\type {node.ligaturing}} +\subsection{\type {ligaturing}} + +\libindex {ligaturing} \startfunctioncall <node> h, <node> t, <boolean> success = @@ -1555,7 +1845,9 @@ Apply \TEX-style ligaturing to the specified nodelist. The tail node \type {m} i optional. The two returned nodes \type {h} and \type {t} are the new head and tail (both \type {n} and \type {m} can change into a new ligature). -\subsubsection{\type {node.kerning}} +\subsection{\type {kerning}} + +\libindex {kerning} \startfunctioncall <node> h, <node> t, <boolean> success = @@ -1569,7 +1861,10 @@ optional. The two returned nodes \type {h} and \type {t} are the head and tail (either one of these can be an inserted kern node, because special kernings with word boundaries are possible). -\subsubsection{\type {node.unprotect_glyphs} and \type {node.unprotect_glyph}} +\subsection{\type {unprotect_glyph[s]}} + +\libindex {unprotect_glyphs} +\libindex {unprotect_glyph} \startfunctioncall node.unprotect_glyph(<node> n) @@ -1578,9 +1873,12 @@ node.unprotect_glyphs(<node> n,[<node> n]) Subtracts 256 from all glyph node subtypes. This and the next function are helpers to convert from \type {characters} to \type {glyphs} during node -processing. The second argument is option and indicates the end of a range. +processing. The second argument is optional and indicates the end of a range. -\subsubsection{\type {node.protect_glyphs} and \type {node.protect_glyph}} +\subsection{\type {protect_glyph[s]}} + +\libindex {protect_glyphs} +\libindex {protect_glyph} \startfunctioncall node.protect_glyph(<node> n) @@ -1591,9 +1889,11 @@ Adds 256 to all glyph node subtypes in the node list starting at \type {n}, except that if the value is 1, it adds only 255. The special handling of 1 means that \type {characters} will become \type {glyphs} after subtraction of 256. A single character can be marked by the singular call. The second argument is -option and indicates the end of a range. +optional and indicates the end of a range. + +\subsection{\type {last_node}} -\subsubsection{\type {node.last_node}} +\libindex {last_node} \startfunctioncall <node> n = @@ -1603,17 +1903,21 @@ option and indicates the end of a range. This function pops the last node from \TEX's \quote{current list}. It returns that node, or \type {nil} if the current list is empty. -\subsubsection{\type {node.write}} +\subsection{\type {write}} + +\libindex {write} \startfunctioncall node.write(<node> n) \stopfunctioncall -This is an experimental function that will append a node list to \TEX's \quote -{current list} The node list is not deep|-|copied! There is no error checking -either! +This function that will append a node list to \TEX's \quote {current list}. The +node list is not deep|-|copied! There is no error checking either! You mignt need +to enforce horizontal mode in order for this to work as expected. + +\subsection{\type {protrusion_skippable}} -\subsubsection{\type {node.protrusion_skippable}} +\libindex {protrusion_skippable} \startfunctioncall <boolean> skippable = @@ -1623,9 +1927,13 @@ either! Returns \type {true} if, for the purpose of line boundary discovery when character protrusion is active, this node can be skipped. -\subsection{Glue handling} +\stopsection + +\startsection[title={Glue handling}][library=node] -\subsubsection{\type {node.setglue}} +\subsection{\type {setglue}} + +\libindex {setglue} You can set the properties of a glue in one go. If you pass no values, the glue will become a zero glue. @@ -1645,9 +1953,11 @@ will only adapt the width and shrink. When a list node is passed, you set the glue, order and sign instead. -\subsubsection{\type {node.getglue}} +\subsection{\type {getglue}} + +\libindex {getglue} -The next call will return 5 values (or northing when no glue is passed). +The next call will return 5 values or nothing when no glue is passed. \startfunctioncall <integer> width, <integer> stretch, <integer> shrink, <integer> stretch_order, @@ -1660,7 +1970,9 @@ with \type {tex.get}). When a list node is passed, you get back the glue that is set, the order of that glue and the sign. -\subsubsection{\type {node.is_zero_glue}} +\subsection{\type {is_zero_glue}} + +\libindex {is_zero_glue} This function returns \type {true} when the width, stretch and shrink properties are zero. @@ -1670,13 +1982,61 @@ are zero. node.is_zero_glue(<node> n) \stopfunctioncall -\subsection{Attribute handling} +\stopsection + +\startsection[title={Attribute handling}][library=node] + +\subsection{Attributes} + +\topicindex {attributes} + +The newly introduced attribute registers are non|-|trivial, because the value +that is attached to a node is essentially a sparse array of key|-|value pairs. It +is generally easiest to deal with attribute lists and attributes by using the +dedicated functions in the \type {node} library, but for completeness, here is +the low|-|level interface. Attributes appear as linked list of userdata objects in the \type {attr} field of individual nodes. They can be handled individually, but it is much safer and more efficient to use the dedicated functions associated with them. -\subsubsection{\type {node.has_attribute}} +\subsection{\nod {attribute_list} nodes} + +\topicindex {nodes+attributes} + +An \nod {attribute_list} item is used as a head pointer for a list of attribute +items. It has only one user-visible field: + +\starttabulate[|l|l|p|] +\DB field \BC type \BC explanation \NC \NR +\TB +\NC \type{next} \NC node \NC pointer to the first attribute \NC \NR +\LL +\stoptabulate + +\subsection{\nod {attr} nodes} + +A normal node's attribute field will point to an item of type \nod +{attribute_list}, and the \type {next} field in that item will point to the first +defined \quote {attribute} item, whose \type {next} will point to the second +\quote {attribute} item, etc. + +\starttabulate[|l|l|p|] +\DB field \BC type \BC explanation \NC \NR +\TB +\NC \type{next} \NC node \NC pointer to the next attribute \NC \NR +\NC \type{number} \NC number \NC the attribute type id \NC \NR +\NC \type{value} \NC number \NC the attribute value \NC \NR +\LL +\stoptabulate + +As mentioned it's better to use the official helpers rather than edit these +fields directly. For instance the \type {prev} field is used for other purposes +and there is no double linked list. + +\subsection{\type {has_attribute}} + +\libindex {has_attribute} \startfunctioncall <number> v = @@ -1689,7 +2049,9 @@ Tests if a node has the attribute with number \type {id} set. If \type {val} is also supplied, also tests if the value matches \type {val}. It returns the value, or, if no match is found, \type {nil}. -\subsubsection{\type {node.get_attribute}} +\subsection{\type {get_attribute}} + +\libindex {get_attribute} \startfunctioncall <number> v = @@ -1697,9 +2059,12 @@ or, if no match is found, \type {nil}. \stopfunctioncall Tests if a node has an attribute with number \type {id} set. It returns the -value, or, if no match is found, \type {nil}. +value, or, if no match is found, \type {nil}. If no \type {id} is given then the +zero attributes is assumed. + +\subsection{\type {find_attribute}} -\subsubsection{\type {node.find_attribute}} +\libindex {find_attribute} \startfunctioncall <number> v, <node> n = @@ -1709,7 +2074,9 @@ value, or, if no match is found, \type {nil}. Finds the first node that has attribute with number \type {id} set. It returns the value and the node if there is a match and otherwise nothing. -\subsubsection{\type {node.set_attribute}} +\subsection{\type {set_attribute}} + +\libindex {set_attribute} \startfunctioncall node.set_attribute(<node> n, <number> id, <number> val) @@ -1718,7 +2085,9 @@ node.set_attribute(<node> n, <number> id, <number> val) Sets the attribute with number \type {id} to the value \type {val}. Duplicate assignments are ignored. -\subsubsection{\type {node.unset_attribute}} +\subsection{\type {unset_attribute}} + +\libindex {unset_attribute} \startfunctioncall <number> v = @@ -1734,7 +2103,9 @@ attributes or attribute|-|value pairs are ignored. If the attribute was actually deleted, returns its old value. Otherwise, returns \type {nil}. -\subsubsection{\type {node.slide}} +\subsection{\type {slide}} + +\libindex {slide} This helper makes sure that the node lists is double linked and returns the found tail node. @@ -1751,13 +2122,16 @@ pointers but your other callbacks might expect proper \type {prev} pointers too. Future versions of \LUATEX\ can add more checking but this will not influence usage. -\subsubsection{\type {node.check_discretionary} and \type {node.check_discretionaries}} +\subsection{\type {check_discretionary}, \type {check_discretionaries}} + +\libindex{check_discretionary} +\libindex{check_discretionaries} When you fool around with disc nodes you need to be aware of the fact that they have a special internal data structure. As long as you reassign the fields when you have extended the lists it's ok because then the tail pointers get updated, -but when you add to list without reassigning you might end up in troubles when -the linebreak routien kicks in. You can call this function to check the list for +but when you add to list without reassigning you might end up in trouble when +the linebreak routine kicks in. You can call this function to check the list for issues with disc nodes. \startfunctioncall @@ -1768,7 +2142,9 @@ node.check_discretionaries(<node> head) The plural variant runs over all disc nodes in a list, the singular variant checks one node only (it also checks if the node is a disc node). -\subsubsection{\type {node.flatten_discretionaries}} +\subsection{\type {flatten_discretionaries}} + +\libindex {flatten_discretionaries} This function will remove the discretionaries in the list and inject the replace field when set. @@ -1777,45 +2153,51 @@ field when set. <node> head, count = node.flatten_discretionaries(<node> n) \stopfunctioncall -\subsubsection{\type {node.family_font}} +\subsection{\type {family_font}} + +\libindex {family_font} -When you pass it a proper family identifier the next helper will return the font -currently associated with it. You can normally also access the font with the normal -font field or getter because it will resolve the family automatically for noads. +When you pass a proper family identifier the next helper will return the font +currently associated with it. You can normally also access the font with the +normal font field or getter because it will resolve the family automatically for +noads. \startfunctioncall <integer> id = node.family_font(<integer> fam) \stopfunctioncall -\subsubsection{\type {node.set_synctex_fields} and \type {node.get_synctex_fields}} - -You can set and query the synctex fields, a file number aka tag and a line -number, for a glue, kern, hlist, vlist, rule and math nodes as well as glyph -nodes (although this last one are not used in native synctex). +\stopsection -\startfunctioncall -node.set_synctex_fields(<integer> f, <integer> l) -<integer> f, <integer> l = - node.get_synctex_fields(<node> n) -\stopfunctioncall +\startsection[title={Two access models}][library=node] -Of course you need to know what you're doing as no checking on sane values takes -place. Also, the synctex interpreter used in editors is rather peculiar and has -some assumptions (heuristics). +\topicindex{nodes+direct} +\topicindex{direct nodes} -\section{Two access models} +\libindex {todirect} +\libindex {tonode} +\libindex {tostring} -Deep down in \TEX\ a node has a number which is an numeric entry in a memory +Deep down in \TEX\ a node has a number which is a numeric entry in a memory table. In fact, this model, where \TEX\ manages memory is real fast and one of the reasons why plugging in callbacks that operate on nodes is quite fast too. Each node gets a number that is in fact an index in the memory table and that -number often gets reported when you print node related information. +number often is reported when you print node related information. You go from +userdata nodes and there numeric references and back with: + +\startfunctioncall +<integer> d = node.todirect(<node> n)) +<node> n = node.tonode(<integer> d)) +\stopfunctioncall + +The userdata model is rather robust as it is a virtual interface with some +additional checking while the more direct access which uses the node numbers +directly. However, even with userdata you can get into troubles when you free +nodes that are no longer allocated or mess up lists. if you apply \type +{tostring} to a node you see its internal (direct) number and id. -There are two access models, a robust one using a so called user data object that -provides a virtual interface to the internal nodes, and a more direct access which -uses the node numbers directly. The first model provide key based access while -the second always accesses fields via functions: +The first model provides key based access while the second always accesses fields +via functions: \starttyping nodeobject.char @@ -1823,19 +2205,19 @@ getfield(nodenumber,"char") \stoptyping If you use the direct model, even if you know that you deal with numbers, you -should not depend on that property but treat it an abstraction just like +should not depend on that property but treat it as an abstraction just like traditional nodes. In fact, the fact that we use a simple basic datatype has the penalty that less checking can be done, but less checking is also the reason why it's somewhat faster. An important aspect is that one cannot mix both methods, but you can cast both models. So, multiplying a node number makes no sense. So our advice is: use the indexed (table) approach when possible and investigate -the direct one when speed might be an real issue. For that reason we also provide -the \type {get*} and \type {set*} functions in the top level node namespace. -There is a limited set of getters. When implementing this direct approach the -regular index by key variant was also optimized, so direct access only makes -sense when we're accessing nodes millions of times (which happens in some font -processing for instance). +the direct one when speed might be a real issue. For that reason \LUATEX\ also +provide the \type {get*} and \type {set*} functions in the top level node +namespace. There is a limited set of getters. When implementing this direct +approach the regular index by key variant was also optimized, so direct access +only makes sense when nodes are accessed millions of times (which happens in some +font processing for instance). We're talking mostly of getters because setters are less important. Documents have not that many content related nodes and setting many thousands of properties @@ -1873,8 +2255,10 @@ end Some accessors are used frequently and for these we provide more efficient helpers: \starttabulate[|l|p|] +\DB function \BC explanation \NC \NR +\TB \NC \type{getnext} \NC parsing nodelist always involves this one \NC \NR -\NC \type{getprev} \NC used less but is logical companion to \type {getnext} \NC \NR +\NC \type{getprev} \NC used less but a logical companion to \type {getnext} \NC \NR \NC \type{getboth} \NC returns the next and prev pointer of a node \NC \NR \NC \type{getid} \NC consulted a lot \NC \NR \NC \type{getsubtype} \NC consulted less but also a topper \NC \NR @@ -1883,13 +2267,16 @@ Some accessors are used frequently and for these we provide more efficient helpe \NC \type{getwhd} \NC returns the \type {width}, \type {height} and \type {depth} of a list, rule or (unexpanded) glyph as well as glue (its spec is looked at) and unset nodes\NC \NR \NC \type{getdisc} \NC returns the \type {pre}, \type {post} and \type {replace} fields and - optionally when true is passed also the tail fields. \NC \NR + optionally when true is passed also the tail fields \NC \NR \NC \type{getlist} \NC we often parse nested lists so this is a convenient one too \NC \NR \NC \type{getleader} \NC comparable to list, seldom used in \TEX\ (but needs frequent consulting like lists; leaders could have been made a dedicated node type) \NC \NR \NC \type{getfield} \NC generic getter, sufficient for the rest (other field names are often shared so a specific getter makes no sense then) \NC \NR \NC \type{getbox} \NC gets the given box (a list node) \NC \NR +\NC \type{getoffsets} \NC gets the \type {xoffset} and \type {yoffset} of a glyph or + \type {left} and \type {right} values of a rule \NC \NR +\LL \stoptabulate In the direct namespace there are more such helpers and most of them are @@ -1910,145 +2297,157 @@ directmode setter \type {setlink} takes a list of nodes and will link them, thereby ignoring \type {nil} entries. The first valid node is returned (beware: for good reason it assumes single nodes). For rarely used fields no helpers are provided and there are a few that probably are used seldom too but were added for -consistency. You can of course always define additional accessor using \type -{getfield} and \type {setfield} with little overhead. - -% \startcolumns[balance=yes] +consistency. You can of course always define additional accessors using \type +{getfield} and \type {setfield} with little overhead. When the second argument of +\type {setattributelist} is \type {true} the current attribute list is assumed. \def\yes{$+$} \def\nop{$-$} +\def\supported#1#2#3% + {\NC \type{#1} + \NC \ifx#2\yes\libidx{node} {#1}\fi #2 + \NC \ifx#3\yes\libidx{node.direct}{#1}\fi #3 \NC + \NR} + \starttabulate[|l|c|c|] -\HL -\BC function \BC node \BC direct \NC \NR -\HL -%NC \type {do_ligature_n} \NC \yes \NC \yes \NC \NR % was never documented and experimental -\NC \type {check_discretionaries}\NC \yes \NC \yes \NC \NR -\NC \type {copy_list} \NC \yes \NC \yes \NC \NR -\NC \type {copy} \NC \yes \NC \yes \NC \NR -\NC \type {count} \NC \yes \NC \yes \NC \NR -\NC \type {current_attr} \NC \yes \NC \yes \NC \NR -\NC \type {dimensions} \NC \yes \NC \yes \NC \NR -\NC \type {effective_glue} \NC \yes \NC \yes \NC \NR -\NC \type {end_of_math} \NC \yes \NC \yes \NC \NR -\NC \type {family_font} \NC \yes \NC \nop \NC \NR -\NC \type {fields} \NC \yes \NC \nop \NC \NR -\NC \type {find_attribute} \NC \yes \NC \yes \NC \NR -\NC \type {first_glyph} \NC \yes \NC \yes \NC \NR -\NC \type {flush_list} \NC \yes \NC \yes \NC \NR -\NC \type {flush_node} \NC \yes \NC \yes \NC \NR -\NC \type {free} \NC \yes \NC \yes \NC \NR -\NC \type {get_attribute} \NC \yes \NC \yes \NC \NR -\NC \type {getattributelist} \NC \nop \NC \yes \NC \NR -\NC \type {getboth} \NC \yes \NC \yes \NC \NR -\NC \type {getbox} \NC \nop \NC \yes \NC \NR -\NC \type {getchar} \NC \yes \NC \yes \NC \NR -\NC \type {getcomponents} \NC \nop \NC \yes \NC \NR -\NC \type {getdepth} \NC \nop \NC \yes \NC \NR -\NC \type {getdir} \NC \nop \NC \yes \NC \NR -\NC \type {getdisc} \NC \yes \NC \yes \NC \NR -\NC \type {getfam} \NC \nop \NC \yes \NC \NR -\NC \type {getfield} \NC \yes \NC \yes \NC \NR -\NC \type {getfont} \NC \yes \NC \yes \NC \NR -\NC \type {getglue} \NC \yes \NC \yes \NC \NR -\NC \type {getheight} \NC \nop \NC \yes \NC \NR -\NC \type {getid} \NC \yes \NC \yes \NC \NR -\NC \type {getkern} \NC \nop \NC \yes \NC \NR -\NC \type {getlang} \NC \nop \NC \yes \NC \NR -\NC \type {getleader} \NC \yes \NC \yes \NC \NR -\NC \type {getlist} \NC \yes \NC \yes \NC \NR -\NC \type {getnext} \NC \yes \NC \yes \NC \NR -\NC \type {getnucleus} \NC \nop \NC \yes \NC \NR -\NC \type {getoffsets} \NC \nop \NC \yes \NC \NR -\NC \type {getpenalty} \NC \nop \NC \yes \NC \NR -\NC \type {getprev} \NC \yes \NC \yes \NC \NR -\NC \type {getproperty} \NC \yes \NC \yes \NC \NR -\NC \type {getshift} \NC \nop \NC \yes \NC \NR -\NC \type {getwidth} \NC \nop \NC \yes \NC \NR -\NC \type {getwhd} \NC \nop \NC \yes \NC \NR -\NC \type {getsub} \NC \nop \NC \yes \NC \NR -\NC \type {getsubtype} \NC \yes \NC \yes \NC \NR -\NC \type {getsup} \NC \nop \NC \yes \NC \NR -\NC \type {has_attribute} \NC \yes \NC \yes \NC \NR -\NC \type {has_field} \NC \yes \NC \yes \NC \NR -\NC \type {has_glyph} \NC \yes \NC \yes \NC \NR -\NC \type {hpack} \NC \yes \NC \yes \NC \NR -\NC \type {id} \NC \yes \NC \nop \NC \NR -\NC \type {insert_after} \NC \yes \NC \yes \NC \NR -\NC \type {insert_before} \NC \yes \NC \yes \NC \NR -\NC \type {is_char} \NC \yes \NC \yes \NC \NR -\NC \type {is_direct} \NC \nop \NC \yes \NC \NR -\NC \type {is_glue_zero} \NC \yes \NC \yes \NC \NR -\NC \type {is_glyph} \NC \yes \NC \yes \NC \NR -\NC \type {is_node} \NC \yes \NC \yes \NC \NR -\NC \type {kerning} \NC \yes \NC \yes \NC \NR -\NC \type {last_node} \NC \yes \NC \yes \NC \NR -\NC \type {length} \NC \yes \NC \yes \NC \NR -\NC \type {ligaturing} \NC \yes \NC \yes \NC \NR -\NC \type {mlist_to_hlist} \NC \yes \NC \nop \NC \NR -\NC \type {new} \NC \yes \NC \yes \NC \NR -\NC \type {next} \NC \yes \NC \nop \NC \NR -\NC \type {prev} \NC \yes \NC \nop \NC \NR -\NC \type {protect_glyphs} \NC \yes \NC \yes \NC \NR -\NC \type {protect_glyph} \NC \yes \NC \yes \NC \NR -\NC \type {protrusion_skippable} \NC \yes \NC \yes \NC \NR -\NC \type {rangedimensions} \NC \yes \NC \yes \NC \NR -\NC \type {remove} \NC \yes \NC \yes \NC \NR -\NC \type {set_attribute} \NC \nop \NC \yes \NC \NR -\NC \type {setattributelist} \NC \nop \NC \yes \NC \NR -\NC \type {setboth} \NC \nop \NC \yes \NC \NR -\NC \type {setbox} \NC \nop \NC \yes \NC \NR -\NC \type {setchar} \NC \nop \NC \yes \NC \NR -\NC \type {setcomponents} \NC \nop \NC \yes \NC \NR -\NC \type {setdepth} \NC \nop \NC \yes \NC \NR -\NC \type {setdir} \NC \nop \NC \yes \NC \NR -\NC \type {setdisc} \NC \nop \NC \yes \NC \NR -\NC \type {setfield} \NC \yes \NC \yes \NC \NR -\NC \type {setfont} \NC \nop \NC \yes \NC \NR -\NC \type {setglue} \NC \yes \NC \yes \NC \NR -\NC \type {setheight} \NC \nop \NC \yes \NC \NR -\NC \type {setid} \NC \nop \NC \yes \NC \NR -\NC \type {setkern} \NC \nop \NC \yes \NC \NR -\NC \type {setlang} \NC \nop \NC \yes \NC \NR -\NC \type {setleader} \NC \nop \NC \yes \NC \NR -\NC \type {setlist} \NC \nop \NC \yes \NC \NR -\NC \type {setnext} \NC \nop \NC \yes \NC \NR -\NC \type {setnucleus} \NC \nop \NC \yes \NC \NR -\NC \type {setoffsets} \NC \nop \NC \yes \NC \NR -\NC \type {setpenalty} \NC \nop \NC \yes \NC \NR -\NC \type {setprev} \NC \nop \NC \yes \NC \NR -\NC \type {setproperty} \NC \nop \NC \yes \NC \NR -\NC \type {setshift} \NC \nop \NC \yes \NC \NR -\NC \type {setwidth} \NC \nop \NC \yes \NC \NR -\NC \type {setwhd} \NC \nop \NC \yes \NC \NR -\NC \type {setsub} \NC \nop \NC \yes \NC \NR -\NC \type {setsubtype} \NC \nop \NC \yes \NC \NR -\NC \type {setsup} \NC \nop \NC \yes \NC \NR -\NC \type {slide} \NC \yes \NC \yes \NC \NR -\NC \type {subtypes} \NC \yes \NC \nop \NC \NR -\NC \type {subtype} \NC \yes \NC \nop \NC \NR -\NC \type {tail} \NC \yes \NC \yes \NC \NR -\NC \type {todirect} \NC \yes \NC \yes \NC \NR -\NC \type {tonode} \NC \yes \NC \yes \NC \NR -\NC \type {tostring} \NC \yes \NC \yes \NC \NR -\NC \type {traverse_char} \NC \yes \NC \yes \NC \NR -\NC \type {traverse_id} \NC \yes \NC \yes \NC \NR -\NC \type {traverse} \NC \yes \NC \yes \NC \NR -\NC \type {types} \NC \yes \NC \nop \NC \NR -\NC \type {type} \NC \yes \NC \nop \NC \NR -\NC \type {unprotect_glyphs} \NC \yes \NC \yes \NC \NR -\NC \type {unset_attribute} \NC \yes \NC \yes \NC \NR -\NC \type {usedlist} \NC \yes \NC \yes \NC \NR -\NC \type {uses_font} \NC \yes \NC \yes \NC \NR -\NC \type {vpack} \NC \yes \NC \yes \NC \NR -\NC \type {whatsitsubtypes} \NC \yes \NC \nop \NC \NR -\NC \type {whatsits} \NC \yes \NC \nop \NC \NR -\NC \type {write} \NC \yes \NC \yes \NC \NR -\NC \type {set_synctex_fields} \NC \yes \NC \yes \NC \NR -\NC \type {get_synctex_fields} \NC \yes \NC \yes \NC \NR +\DB function \BC node \BC direct \NC \NR +\TB +\supported {check_discretionaries} \yes \yes +\supported {check_discretionary} \yes \yes +\supported {copy_list} \yes \yes +\supported {copy} \yes \yes +\supported {count} \yes \yes +\supported {current_attr} \yes \yes +\supported {dimensions} \yes \yes +\supported {effective_glue} \yes \yes +\supported {end_of_math} \yes \yes +\supported {family_font} \yes \nop +\supported {fields} \yes \nop +\supported {find_attribute} \yes \yes +\supported {first_glyph} \yes \yes +\supported {flatten_discretionaries} \yes \yes +\supported {flush_list} \yes \yes +\supported {flush_node} \yes \yes +\supported {free} \yes \yes +\supported {get_attribute} \yes \yes +\supported {get_synctex_fields} \nop \yes +\supported {getattributelist} \nop \yes +\supported {getboth} \yes \yes +\supported {getbox} \nop \yes +\supported {getchar} \yes \yes +\supported {getcomponents} \nop \yes +\supported {getdepth} \nop \yes +\supported {getdirection} \nop \yes +\supported {getdir} \nop \yes +\supported {getdisc} \yes \yes +\supported {getfam} \nop \yes +\supported {getfield} \yes \yes +\supported {getfont} \yes \yes +\supported {getglue} \yes \yes +\supported {getheight} \nop \yes +\supported {getid} \yes \yes +\supported {getkern} \nop \yes +\supported {getlang} \nop \yes +\supported {getleader} \yes \yes +\supported {getlist} \yes \yes +\supported {getnext} \yes \yes +\supported {getnucleus} \nop \yes +\supported {getoffsets} \nop \yes +\supported {getpenalty} \nop \yes +\supported {getprev} \yes \yes +\supported {getproperty} \yes \yes +\supported {getshift} \nop \yes +\supported {getsubtype} \yes \yes +\supported {getsub} \nop \yes +\supported {getsup} \nop \yes +\supported {getdata} \nop \yes +\supported {getwhd} \yes \yes +\supported {getwidth} \nop \yes +\supported {has_attribute} \yes \yes +\supported {has_field} \yes \yes +\supported {has_glyph} \yes \yes +\supported {hpack} \yes \yes +\supported {id} \yes \nop +\supported {insert_after} \yes \yes +\supported {insert_before} \yes \yes +\supported {is_char} \yes \yes +\supported {is_direct} \nop \yes +\supported {is_glyph} \yes \yes +\supported {is_node} \yes \yes +\supported {is_zero_glue} \yes \yes +\supported {kerning} \yes \yes +\supported {last_node} \yes \yes +\supported {length} \yes \yes +\supported {ligaturing} \yes \yes +\supported {mlist_to_hlist} \yes \nop +\supported {new} \yes \yes +\supported {next} \yes \nop +\supported {prepend_prevdepth} \nop \yes +\supported {prev} \yes \nop +\supported {protect_glyphs} \yes \yes +\supported {protect_glyph} \yes \yes +\supported {protrusion_skippable} \yes \yes +\supported {rangedimensions} \yes \yes +\supported {remove} \yes \yes +\supported {set_attribute} \yes \yes +\supported {set_synctex_fields} \nop \yes +\supported {setattributelist} \nop \yes +\supported {setboth} \nop \yes +\supported {setbox} \nop \yes +\supported {setchar} \nop \yes +\supported {setcomponents} \nop \yes +\supported {setdepth} \nop \yes +\supported {setdirection} \nop \yes +\supported {setdir} \nop \yes +\supported {setdisc} \nop \yes +\supported {setfam} \nop \yes +\supported {setfield} \yes \yes +\supported {setfont} \nop \yes +\supported {setexpansion} \nop \yes +\supported {setglue} \yes \yes +\supported {setheight} \nop \yes +\supported {setkern} \nop \yes +\supported {setlang} \nop \yes +\supported {setleader} \nop \yes +\supported {setlink} \nop \yes +\supported {setlist} \nop \yes +\supported {setnext} \nop \yes +\supported {setnucleus} \nop \yes +\supported {setoffsets} \nop \yes +\supported {setpenalty} \nop \yes +\supported {setprev} \nop \yes +\supported {setproperty} \yes \yes +\supported {setshift} \nop \yes +\supported {setsplit} \nop \yes +\supported {setsubtype} \nop \yes +\supported {setsub} \nop \yes +\supported {setsup} \nop \yes +\supported {setwhd} \nop \yes +\supported {setwidth} \nop \yes +\supported {slide} \yes \yes +\supported {subtypes} \yes \nop +\supported {subtype} \yes \nop +\supported {tail} \yes \yes +\supported {todirect} \yes \yes +\supported {tonode} \yes \yes +\supported {tostring} \yes \yes +\supported {traverse_char} \yes \yes +\supported {traverse_glyph} \yes \yes +\supported {traverse_id} \yes \yes +\supported {traverse} \yes \yes +\supported {types} \yes \nop +\supported {type} \yes \nop +\supported {unprotect_glyphs} \yes \yes +\supported {unprotect_glyph} \yes \yes +\supported {unset_attribute} \yes \yes +\supported {usedlist} \yes \yes +\supported {uses_font} \yes \yes +\supported {vpack} \yes \yes +\supported {whatsits} \yes \nop +\supported {write} \yes \yes +\LL \stoptabulate -% \stopcolumns - The \type {node.next} and \type {node.prev} functions will stay but for consistency there are variants called \type {getnext} and \type {getprev}. We had to use \type {get} because \type {node.id} and \type {node.subtype} are already @@ -2056,6 +2455,13 @@ taken for providing meta information about nodes. Note: The getters do only basi checking for valid keys. You should just stick to the keys mentioned in the sections that describe node properties. +Some of the getters and setters handle multiple node types, given that the field +is relevant. In that case, some field names are considered similar (like \type +{kern} and \type {width}, or \type {data} and \type {value}. In retrospect we +could have normalized field names better but we decided to stick to the original +(internal) names as much as possible. After all, at the \LUA\ end one can easily +create synonyms. + Some nodes have indirect references. For instance a math character refers to a family instead of a font. In that case we provide a virtual font field as accessor. So, \type {getfont} and \type {.font} can be used on them. The same is @@ -2063,6 +2469,196 @@ true for the \type {width}, \type {height} and \type {depth} of glue nodes. Thes actually access the spec node properties, and here we can set as well as get the values. +In some places \LUATEX\ can do a bit of extra checking for valid node lists and +you can enable that with: + +\startfunctioncall +node.fix_node_lists(<boolean> b) +\stopfunctioncall + +You can set and query the \SYNCTEX\ fields, a file number aka tag and a line +number, for a glue, kern, hlist, vlist, rule and math nodes as well as glyph +nodes (although this last one is not used in native \SYNCTEX). + +\startfunctioncall +node.set_synctex_fields(<integer> f, <integer> l) +<integer> f, <integer> l = + node.get_synctex_fields(<node> n) +\stopfunctioncall + +Of course you need to know what you're doing as no checking on sane values takes +place. Also, the synctex interpreter used in editors is rather peculiar and has +some assumptions (heuristics). + +\stopsection + +\startsection[title={Properties}][library=node] + +\topicindex {nodes+properties} +\topicindex {properties} + +\libindex{flush_properties_table} +\libindex{get_properties_table} +\libindex{set_properties_mode} + +Attributes are a convenient way to relate extra information to a node. You can +assign them at the \TEX\ end as well as at the \LUA\ end and and consult them at +the \LUA\ end. One big advantage is that they obey grouping. They are linked +lists and normally checking for them is pretty efficient, even if you use a lot +of them. A macro package has to provide some way to manage these attributes at the +\TEX\ end because otherwise clashes in their usage can occur. + +Each node also can have a properties table and you can assign values to this +table using the \type {setproperty} function and get properties using the \type +{getproperty} function. Managing properties is way more demanding than managing +attributes. + +Take the following example: + +\starttyping +\directlua { + local n = node.new("glyph") + + node.setproperty(n,"foo") + print(node.getproperty(n)) + + node.setproperty(n,"bar") + print(node.getproperty(n)) + + node.free(n) +} +\stoptyping + +This will print \type {foo} and \type {bar} which in itself is not that useful +when multiple mechanisms want to use this feature. A variant is: + +\starttyping +\directlua { + local n = node.new("glyph") + + node.setproperty(n,{ one = "foo", two = "bar" }) + print(node.getproperty(n).one) + print(node.getproperty(n).two) + + node.free(n) +} +\stoptyping + +This time we store two properties with the node. It really makes sense to have a +table as property because that way we can store more. But in order for that to +work well you need to do it this way: + +\starttyping +\directlua { + local n = node.new("glyph") + + local t = node.getproperty(n) + + if not t then + t = { } + node.setproperty(n,t) + end + t.one = "foo" + t.two = "bar" + + print(node.getproperty(n).one) + print(node.getproperty(n).two) + + node.free(n) +} +\stoptyping + +Here our own properties will not overwrite other users properties unless of +course they use the same keys. So, eventually you will end up with something: + +\starttyping +\directlua { + local n = node.new("glyph") + + local t = node.getproperty(n) + + if not t then + t = { } + node.setproperty(n,t) + end + t.myself = { one = "foo", two = "bar" } + + print(node.getproperty(n).myself.one) + print(node.getproperty(n).myself.two) + + node.free(n) +} +\stoptyping + +This assumes that only you use \type {myself} as subtable. The possibilities are +endless but care is needed. For instance, the generic font handler that ships +with \CONTEXT\ uses the \type {injections} subtable and you should not mess with +that one! + +There are a few helper functions that you normally should not touch as user: \typ +{flush_properties_table} will wipe the table (normally a bad idea), \typ +{get_properties_table} and will give the table that stores properties (using +direct entries) and you can best not mess too much with that one either because +\LUATEX\ itself will make sure that entries related to nodes will get wiped when +nodes get freed, so that the \LUA\ garbage collector can do its job. In fact, the +main reason why we have this mechanism is that it saves the user (or macro +package) some work. One can easily write a property mechanism in \LUA\ where +after a shipout properties gets cleaned up but it's not entirely trivial to make +sure that with each freed node also its properties get freed, due to the fact +that there can be nodes left over for a next page. And having a callback bound to +the node deallocator would add way to much overhead. + +Managing properties in the node (de)allocator functions is disabled by default +and is enabled by: + +\starttyping +node.set_properties_mode(true) +\stoptyping + +When we copy a node list that has a table as property, there are several possibilities: we do the same as +a new node, we copy the entry to the table in properties (a reference), we do +a deep copy of a table in the properties, we create a new table and give it +the original one as a metatable. After some experiments (that also included +timing) with these scenarios we decided that a deep copy made no sense, nor +did nilling. In the end both the shallow copy and the metatable variant were +both ok, although the second one is slower. The most important aspect to keep +in mind is that references to other nodes in properties no longer can be +valid for that copy. We could use two tables (one unique and one shared) or +metatables but that only complicates matters. + +When defining a new node, we could already allocate a table but it is rather easy +to do that at the lua end e.g.\ using a metatable \type {__index} method. That +way it is under macro package control. When deleting a node, we could keep the +slot (e.g. setting it to false) but it could make memory consumption raise +unneeded when we have temporary large node lists and after that only small lists. +Both are not done. + +So in the end this is what happens now: when a node is copied, and it has a table +as property, the new node will share that table. If the second argument of \typ +{set_properties_mode} is \type {true} then a metatable approach is chosen: the +copy gets its own table with the original table as metatable. If you use the +generic font loader the mode is enabled that way. + +A few more xperiments were done. For instance: copy attributes to the properties +so that we have fast access at the \LUA\ end. In the end the overhead is not +compensated by speed and convenience, in fact, attributes are not that slow when +it comes to accessing them. So this was rejected. + +Another experiment concerned a bitset in the node but again the gain compared to +attributes was neglectable and given the small amount of available bits it also +demands a pretty strong agreement over what bit represents what, and this is +unlikely to succeed in the \TEX\ community. It doesn't pay off. + +Just in case one wonders why properties make sense: it is not so much speed that +we gain, but more convenience: storing all kind of (temporary) data in attributes +is no fun and this mechanism makes sure that properties are cleaned up when a +node is freed. Also, the advantage of a more or less global properties table is +that we stay at the \LUA\ end. An alternative is to store a reference in the node +itself but that is complicated by the fact that the register has some limitations +(no numeric keys) and we also don't want to mess with it too much. + +\stopsection + \stopchapter \stopcomponent diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-preamble.tex b/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-preamble.tex new file mode 100644 index 00000000000..8293179776d --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-preamble.tex @@ -0,0 +1,110 @@ +% language=uk + +\environment luatex-style + +\startcomponent luatex-preamble + +\startchapter[reference=preamble,title={Preamble}] + +\topicindex{nodes} +\topicindex{boxes} +\topicindex{\LUA} + +This is a reference manual, not a tutorial. This means that we discuss changes +relative to traditonal \TEX\ and also present new functionality. As a consequence +we will refer to concepts that we assume to be known or that might be explained +later. + +The average user doesn't need to know much about what is in this manual. For +instance fonts and languages are normally dealt with in the macro package that +you use. Messing around with node lists is also often not really needed at the +user level. If you do mess around, you'd better know what you're dealing with. +Reading \quotation {The \TEX\ Book} by Donald Knuth is a good investment of time +then also because it's good to know where it all started. A more summarizing +overview is given by \quotation {\TEX\ by Topic} by Victor Eijkhout. You might +want to peek in \quotation {The \ETEX\ manual} and documentation about \PDFTEX. + +But \unknown\ if you're here because of \LUA, then all you need to know is that +you can call it from within a run. The macro package that you use probably will +provide a few wrapper mechanisms but the basic \lpr {directlua} command that +does the job is: + +\starttyping +\directlua{tex.print("Hi there")} +\stoptyping + +You can put code between curly braces but if it's a lot you can also put it in a +file and load that file with the usual \LUA\ commands. + +If you still decide to read on, then it's good to know what nodes are, so we do a +quick introduction here. If you input this text: + +\starttyping +Hi There +\stoptyping + +eventually we will get a linked lists of nodes, which in \ASCII\ art looks like: + +\starttyping +H <=> i <=> [glue] <=> T <=> h <=> e <=> r <=> e +\stoptyping + +When we have a paragraph, we actually get something: + +\starttyping +[localpar] <=> H <=> i <=> [glue] <=> T <=> h <=> e <=> r <=> e <=> [glue] +\stoptyping + +Each character becomes a so called glyph node, a record with properties like the +current font, the character code and the current language. Spaces become glue +nodes. There are many node types that we will discuss later. Each node points +back to a previous node or next node, given that these exist. + +It's also good to know beforehand that \TEX\ is basically centered around +creating paragraphs and pages. The par builder takes a list and breaks it into +lines. We turn horizontal material into vertical. Lines are so called boxes and +can be separated by glue, penalties and more. The page builder accumulates lines +and when feasible triggers an output routine that will take the list so far. +Constructing the actual page is not part of \TEX\ but done using primitives that +permit manipulation of boxes. The result is handled back to \TEX\ and flushed to +a (often \PDF) file. + +The \LUATEX\ engine provides hooks for \LUA\ code at nearly every reasonable +point in the process: collecting content, hyphenating, applying font features, +breaking into lines, etc. This means that you can overload \TEX's natural +behaviour, which still is the benchmark. When we refer to \quote {callbacks} we +means these hooks. + +Where plain \TEX\ is basically a basic framework for writing a specific style, +macro packages like \CONTEXT\ and \LATEX\ provide the user a whole lot of +additional tools to make documents look good. They hide the dirty details of font +management, language demands, turning structure into typeset results, wrapping +pages, including images, and so on. You should be aware of the fact that when you +hook in your own code to manipulate lists, this can interfere with the macro +package that you use. + +When you read about nodes in the following chapters it's good to keep in mind their +commands that relate to then. Here are a few: + +\starttabulate[|l|l|p|] +\DB command \BC node \BC explanation \NC \NR +\TB +\NC \prm {hbox} \NC \nod {hlist} \NC horizontal box \NC \NR +\NC \prm {vbox} \NC \nod {vlist} \NC vertical box with the baseline at the bottom \NC \NR +\NC \prm {vtop} \NC \nod {vlist} \NC vertical box with the baseline at the top \NC \NR +\NC \prm {hskip} \NC \nod {glue} \NC horizontal skip with optional stretch and shrink \NC \NR +\NC \prm {vskip} \NC \nod {glue} \NC vertical skip with optional stretch and shrink \NC \NR +\NC \prm {kern} \NC \nod {kern} \NC horizontal or vertical fixed skip \NC \NR +\NC \prm {discretionary} \NC \nod {disc} \NC hyphenation point (pre, post, replace) \NC \NR +\NC \prm {char} \NC \nod {glyph} \NC a character \NC \NR +\NC \prm {hrule} \NC \nod {rule} \NC a horizontal rule \NC \NR +\NC \prm {vrule} \NC \nod {rule} \NC a vertical rule \NC \NR +\NC \prm {textdir(ection)} \NC \nod {dir} \NC a change in text direction \NC \NR +\LL +\stoptabulate + +For now this should be enough to enable you to understand the next chapters. + +\stopchapter + +\stopcomponent diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-registers.tex b/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-registers.tex new file mode 100644 index 00000000000..36b1ec0513d --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-registers.tex @@ -0,0 +1,47 @@ +\environment luatex-style + +\startcomponent luatex-registers + +\startchapter[title=Topics] + + \placeregister[topicindex] + +\stopchapter + +\startchapter[title=Primitives] + + This register contains the primitives that are mentioned in the manual. There + are of course many more primitives. The \LUATEX\ primitives are typeset in + bold. The primitives from \PDFTEX\ are not supported that way but mentioned + anyway. + + \placeregister[primitiveindex][indicator=no] + +\stopchapter + +\startchapter[title=Callbacks] + + \placeregister[callbackindex] + +\stopchapter + +\startchapter[title=Nodes] + + This register contains the nodes that are known to \LUATEX. The primary nodes + are in bold, whatsits that are determined by their subtype are normal. The + names prefixed by \type {pdf_} are backend specific. + + \placeregister[nodeindex] + +\stopchapter + +\startchapter[title=Libraries] + + This register contains the functions available in libraries. Not all functions + are documented, for instance because they can be experimental or obsolete. + + \placeregister[libraryindex] + +\stopchapter + +\stopcomponent diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-statistics.tex b/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-statistics.tex new file mode 100644 index 00000000000..efd7f1c75c4 --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-statistics.tex @@ -0,0 +1,17 @@ +% language=uk + +\environment luatex-style + +\startcomponent luatex-statistics + +\startchapter[title={Statistics}] + + \topicindex{fonts+used} + + The following fonts are used in this document: + + \showfontusage + +\stopchapter + +\stopcomponent diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-style.tex b/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-style.tex index a277a117836..708f83ed6d0 100644 --- a/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-style.tex +++ b/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-style.tex @@ -2,7 +2,9 @@ % todo: use \useMPlibrary[lua] -\usemodule[abr-02] +\enabletrackers[fonts.usage] + +\usemodule[fonts-statistics] \setuplayout [height=middle, @@ -31,9 +33,31 @@ [each] [packed] +\definesymbol[1][\Uchar"2023] +\definesymbol[2][\endash] +\definesymbol[3][\wait] % we want to catch it + +\setupitemize + [each] + [headcolor=maincolor, + symbolcolor=maincolor, + color=maincolor] + \setupwhitespace [medium] +\setuptabulate + [blank={small,samepage}, + headstyle=bold, + rulecolor=maincolor, + rulethickness=1pt, + foregroundcolor=white, + foregroundstyle=\ss\bfx\WORD, + backgroundcolor=maincolor] + +\setupcaptions + [headcolor=darkblue] + \startluacode local skipped = table.tohash { 'id', 'subtype', 'next', 'prev' } @@ -89,51 +113,13 @@ \definecolor[keptcolor] [b=.5] \definecolor[othercolor][r=.5,g=.5] -\usebodyfont[lucidaot] -\usebodyfont[pagella] -\usebodyfont[cambria] -%usebodyfont[dejavu] -\usebodyfont[modern] % we need this in examples so we predefine - -% \doifmodeelse {atpragma} { -% -% % \setupbodyfont -% % [lucidaot,10pt] -% -% \setupbodyfont -% [dejavu,10pt] -% -% \setuphead [chapter] [style=\bfd] -% \setuphead [section] [style=\bfb] -% \setuphead [subsection] [style=\bfa] -% \setuphead [subsubsection][style=\bf] -% -% } { -% -% \definetypeface[mainfacenormal] [ss][sans] [iwona] [default] -% \definetypeface[mainfacenormal] [rm][serif][palatino] [default] -% \definetypeface[mainfacenormal] [tt][mono] [modern] [default][rscale=1.1] -% \definetypeface[mainfacenormal] [mm][math] [iwona] [default] -% -% \definetypeface[mainfacemedium] [ss][sans] [iwona-medium][default] -% \definetypeface[mainfacemedium] [rm][serif][palatino] [default] -% \definetypeface[mainfacemedium] [tt][mono] [modern] [default][rscale=1.1] -% \definetypeface[mainfacemedium] [mm][math] [iwona-medium][default] -% -% \setupbodyfont -% [mainfacenormal,10pt] -% -% \setuphead [chapter] [style=\mainfacemedium\bfd] -% \setuphead [section] [style=\mainfacemedium\bfb] -% \setuphead [subsection] [style=\mainfacemedium\bfa] -% \setuphead [subsubsection][style=\mainfacemedium\bf] -% -% } - -\writestatus{luatex manual}{we assume that dejavu math is available} - -\setupbodyfont % assumes dejavu-math - [dejavu,10pt] +\writestatus{luatex manual}{} +\writestatus{luatex manual}{defining lucodaot} \usebodyfont [lucidaot] +\writestatus{luatex manual}{defining pagella} \usebodyfont [pagella] +\writestatus{luatex manual}{defining cambria} \usebodyfont [cambria] +\writestatus{luatex manual}{defining modern} \usebodyfont [modern] +\writestatus{luatex manual}{defining dejavu} \setupbodyfont[dejavu,10pt] +\writestatus{luatex manual}{} \setuphead [chapter] [align={flushleft,broad},style=\bfd] \setuphead [section] [align={flushleft,broad},style=\bfb] @@ -145,6 +131,9 @@ \setuphead [subsection] [color=maincolor] \setuphead [subsubsection][color=maincolor] +\setupfloats + [ntop=4] + \definehead [remark] [subsubsubject] @@ -152,6 +141,10 @@ \setupheadertexts [] +% \setuplayout +% [style=bold, +% color=maincolor] + \definemixedcolumns [twocolumns] [n=2, @@ -207,7 +200,7 @@ fill fullcircle scaled r shifted (d-1/8,d-1/8) withcolor luaholecolor ; luaorbitfactor := .25 ; - ) enddef ; + ) enddef ; \stopMPdefinitions @@ -217,8 +210,15 @@ fill Page withcolor \MPcolor{othercolor} ; luaorbitfactor := 1 ; - picture p ; p := lualogo xsized (3PaperWidth/5) ; - draw p shifted center Page shifted (0,-.85ypart center ulcorner p) ; + + picture p ; p := lualogo ysized (5*\measure{paperheight}/10) ; + draw p + shifted - center p + shifted ( + \measure{spreadwidth} - .5*\measure{paperwidth} + \measure{spinewidth}, + .375*\measure{paperheight} + ) + ; StopPage ; \stopuseMPgraphic @@ -232,7 +232,7 @@ \startuseMPgraphic{luanumber} % luaextraangle := \luaextraangle; - luaextraangle := if (LastPageNumber == 0) : 0 else : (RealPageNumber / LastPageNumber) * 360 fi; + luaextraangle := if (LastPageNumber < 10) : 10 else : (RealPageNumber / LastPageNumber) * 360 fi; luaorbitfactor := 0.25 ; picture p ; p := lualogo ; setbounds p to boundingbox fullcircle ; @@ -252,40 +252,46 @@ [rightpage] [background=page] +\definemeasure[banneroffset][\bottomspace-\footerheight-\footerdistance+2cm] + \startsetups pagenumber:right \setlayerframed [page] - [preset=rightbottom,offset=1cm] + [preset=rightbottom,x=1.0cm,y=\measure{banneroffset}] [frame=off,height=1cm,offset=overlay] - {\useMPgraphic{luanumber}} + {\strut\useMPgraphic{luanumber}} \setlayerframed [page] - [preset=rightbottom,offset=1cm,x=1.5cm] - [frame=off,height=1cm,width=1cm,offset=overlay] - {\pagenumber} + [preset=rightbottom,x=2.5cm,y=\measure{banneroffset}] + [frame=off,height=1cm,width=1cm,offset=overlay, + foregroundstyle=bold,foregroundcolor=maincolor] + {\strut\pagenumber} \setlayerframed [page] - [preset=rightbottom,offset=1cm,x=2.5cm] - [frame=off,height=1cm,offset=overlay] - {\getmarking[chapter]} + [preset=rightbottom,x=3.5cm,y=\measure{banneroffset}] + [frame=off,height=1cm,offset=overlay, + foregroundstyle=bold,foregroundcolor=maincolor] + {\strut\getmarking[chapter]} \stopsetups \startsetups pagenumber:left \setlayerframed [page] - [preset=leftbottom,offset=1cm,x=2.5cm] - [frame=off,height=1cm,offset=overlay] - {\getmarking[chapter]} + [preset=leftbottom,x=3.5cm,y=\measure{banneroffset}] + [frame=off,height=1cm,offset=overlay, + foregroundstyle=bold,foregroundcolor=maincolor] + {\strut\getmarking[chapter]} \setlayerframed [page] - [preset=leftbottom,offset=1cm,x=1.5cm] - [frame=off,height=1cm,width=1cm,offset=overlay] - {\pagenumber} + [preset=leftbottom,x=2.5cm,y=\measure{banneroffset}] + [frame=off,height=1cm,width=1cm,offset=overlay, + foregroundstyle=bold,foregroundcolor=maincolor] + {\strut\pagenumber} \setlayerframed [page] - [preset=leftbottom,offset=1cm] + [preset=leftbottom,x=1.0cm,y=\measure{banneroffset}] [frame=off,height=1cm,offset=overlay] - {\useMPgraphic{luanumber}} + {\strut\useMPgraphic{luanumber}} \stopsetups \unexpanded\def\nonterminal#1>{\mathematics{\langle\hbox{\rm #1}\rangle}} @@ -311,13 +317,15 @@ {\par \tt \startnarrower - \maincolor #1 + % \maincolor + #1 \stopnarrower \par} \unexpanded\def\syntaxbody#1% {\begingroup - \maincolor \tt #1% + % \maincolor + \tt #1% \endgroup} \bgroup \catcodetable\syntaxcodetable @@ -329,7 +337,7 @@ \definetyping [texsyntax] - [color=maincolor] +% [color=maincolor] % end of wave @@ -351,9 +359,14 @@ \setuplist [chapter] [style=bold, + before={\testpage[4]\blank}, color=keptcolor] \setuplist + [section] + [before={\testpage[3]}] + +\setuplist [subsection,subsubsection] [margin=3em, width=5em] @@ -373,4 +386,57 @@ % \setupinteractionscreen % [option=bookmark] +\startbuffer[stylecalculations] + + \normalexpanded{\definemeasure[spinewidth] [0pt]} + \normalexpanded{\definemeasure[paperwidth] [\the\paperwidth ]} + \normalexpanded{\definemeasure[paperheight][\the\paperheight]} + \normalexpanded{\definemeasure[spreadwidth][\measure{paperwidth}]} + +\stopbuffer + +\getbuffer[stylecalculations] + +\dontcomplain + +\environment luatex-logos + +\defineregister[topicindex] +\defineregister[primitiveindex] +\defineregister[callbackindex] +\defineregister[nodeindex] +\defineregister[libraryindex] + +\unexpanded\def\lpr#1{\doifmode{*bodypart}{\primitiveindex[#1]{\bf\tex {#1}}}\tex {#1}} +\unexpanded\def\prm#1{\doifmode{*bodypart}{\primitiveindex[#1]{\tex {#1}}}\tex {#1}} +\unexpanded\def\orm#1{\doifmode{*bodypart}{\primitiveindex[#1]{\tex {#1}}}\tex {#1}} +\unexpanded\def\cbk#1{\doifmode{*bodypart}{\callbackindex [#1]{\type {#1}}}\type{#1}} +\unexpanded\def\nod#1{\doifmode{*bodypart}{\nodeindex [#1]{\bf\type{#1}}}\type{#1}} +\unexpanded\def\whs#1{\doifmode{*bodypart}{\nodeindex [#1]{\type {#1}}}\type{#1}} +\unexpanded\def\noa#1{\doifmode{*bodypart}{\nodeindex [#1]{\type {#1}}}\type{#1}} + +\hyphenation{sub-nodes} + +\def\currentlibraryindex{\namedstructureuservariable{section}{library}} + +\setupregister + [libraryindex] + [indicator=no,before=] + +\setupregister + [libraryindex] + [1] + [textstyle=\ttbf] + +\setupregister + [libraryindex] + [2] + [textstyle=\tttf] + +\unexpanded\def\lib #1{\doifmode{*bodypart}{\expanded{\libraryindex{\currentlibraryindex+#1}}\type{\currentlibraryindex.#1}}} +\unexpanded\def\libindex#1{\doifmode{*bodypart}{\expanded{\libraryindex{\currentlibraryindex+#1}}}} +\unexpanded\def\libidx#1#2{\doifmode{*bodypart}{\expanded{\libraryindex{#1+#2}}\type{#1.#2}}} + +% \setstructurepageregister[][keys:1=,entries:1=] + \stopenvironment diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-titlepage.tex b/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-titlepage.tex index 307741ee115..d9ca4b3f933 100644 --- a/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-titlepage.tex +++ b/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex-titlepage.tex @@ -1,5 +1,4 @@ \environment luatex-style -\environment luatex-logos \startcomponent luatex-titlepage @@ -8,7 +7,7 @@ \switchtobodyfont [mainfacemedium] - \definedfont[Bold*default at \the\dimexpr.08\paperheight\relax] \setupinterlinespace + \definedfont[Bold*default at \the\dimexpr.06\paperheight\relax] \setupinterlinespace \setlayer [page] @@ -16,55 +15,39 @@ \setlayerframed [page] - [preset=middletop, - voffset=.05\paperheight] + [preset=righttop, + location=middletop, + hoffset=.500\measured{paperwidth}, + voffset=.175\measured{paperheight}] [align=middle, - foregroundcolor=blue, + foregroundcolor=white, frame=off] - {Lua\TeX\\Reference Manual} + {\documentvariable{manual}\crlf Reference\crlf Manual} - \definedfont[Bold*default at 18pt] \setupinterlinespace + \definedfont[Bold*default at 14pt] \setupinterlinespace \setlayerframed [page] [preset=rightbottom, - offset=.01\paperheight] + offset=.025\measured{paperheight}] [align=flushright, - foregroundcolor=blue, + foregroundcolor=white, frame=off] {\doifsomething{\documentvariable{status}}{\documentvariable{status}\par} \currentdate[month,space,year]\par Version \documentvariable{version}} -\stopstandardmakeup - -\startstandardmakeup - - \start - \raggedleft - \definedfont[Bold*default at 48pt] - \setupinterlinespace - \blue Lua\TeX \endgraf Reference \endgraf Manual \endgraf - \stop - - \vfill - - \definedfont[Bold*default at 12pt] - - \starttabulate[|l|l|] - \NC copyright \EQ Lua\TeX\ development team \NC \NR - \NC more info \EQ www.luatex.org \NC \NR - \NC version \EQ \currentdate \doifsomething{\documentvariable{snapshot}}{(snapshot \documentvariable{snapshot})} \NC \NR - \stoptabulate + \setlayerframed + [page] + [preset=middle, + hoffset=-.5\dimexpr\measured{paperwidth}-\measured{spinewidth}\relax] + [width=.7\measured{paperwidth}, + align=normal, + foregroundstyle=\bf, + foregroundcolor=white, + frame=off] + {\getbuffer[backpage]} \stopstandardmakeup -\setupbackgrounds - [leftpage] - [setups=pagenumber:left] - -\setupbackgrounds - [rightpage] - [setups=pagenumber:right] - \stopcomponent diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex.tex b/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex.tex index 3025788bfe5..1486abd496d 100644 --- a/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex.tex +++ b/Master/texmf-dist/doc/context/sources/general/manuals/luatex/luatex.tex @@ -1,11 +1,12 @@ % macros=mkvi +% \nopdfcompression + % \disabledirectives[vspacing.synchronizepage] % wsl /data/context/runcontext.sh --global --path=/mnt/c/data/develop/context/manuals/mkiv/external/luatex luatex.tex % % same runtime as regular context or linux - % author : Hans Hagen with Taco Hoekwater, Luigi Scarso & Hartmut Henkel % copyright : PRAGMA ADE & ConTeXt Development Team % license : Creative Commons Attribution ShareAlike 4.0 International @@ -22,10 +23,6 @@ % comment : Some (parts of) chapters might have been published in TugBoat, the NTG Maps, the % ConTeXt Group journal or otherwise. Thanks to the editors for corrections. Also % thanks to users for testing, feedback and corrections. -% -% 238 pages : 2017-07-06 -% luatex 9.5 sec / luajittex 7.0 sec -% Dell 7600 / i7 3840QM / passmark 1.922 / Windows 10 64 bit % \tex vs \type vs \syntax vs. \luatex % \em \it \/ @@ -33,12 +30,28 @@ % "context --nodates --nocompression luatex" can be used for comparison % runs, not that we do it +% todo: all (sub)section to start/stop + % \enabledirectives[hyphenator.flatten] % \setupsynctex[state=start,method=max] % adds 5 pct overhead +% compoundhyphenmode : looks okay +% endlocalcontrol : very experimental +% fixupboxesmode : very experimental +% mathflattenmode : looks okay +% mathrulethicknessmode : looks okay + +% after adding primitives: context mult-prm.mkiv + \environment luatex-style + +\startmode[book] + \environment luatex-style-book +\stopmode + \environment luatex-logos +\environment luatex-private \startmode[export] @@ -47,14 +60,14 @@ \stopmode -\dontcomplain - \startdocument - [status=experimental, - version=1.08.0] + [manual=Lua\TeX, + status=experimental, + version=1.10] \startnotmode[*export] \component luatex-titlepage + \component luatex-firstpage \stopnotmode \startmode[*export] @@ -67,6 +80,7 @@ \stopfrontmatter \startbodymatter + \component luatex-preamble \component luatex-enhancements \component luatex-modifications \component luatex-lua @@ -81,4 +95,9 @@ \component luatex-backend \stopbodymatter +\startbackmatter + \component luatex-registers + \component luatex-statistics +\stopbackmatter + \stopdocument diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/math/math-input.tex b/Master/texmf-dist/doc/context/sources/general/manuals/math/math-input.tex index d395e1865cb..9b14057d0c1 100644 --- a/Master/texmf-dist/doc/context/sources/general/manuals/math/math-input.tex +++ b/Master/texmf-dist/doc/context/sources/general/manuals/math/math-input.tex @@ -95,6 +95,36 @@ which gives: \stopsection +\startsection [title=Scripts] + +With \UNICODE\ providing math symbols and a limited set of super- and subscripts, +it made sense to add yet another feature. The scripts were already supported for +a long time, but at some point on the mailing list sequential scripts were +mentioned. So here is an example of both (some fonts, like the one used for +verbatim, don't have all symbols but you get the idea anyway): + +\startbuffer +\startformula + 𝑷₂₀(0), ∀²𝑥⁰⁺²₂₀: 𝑷₂₀(𝑥⁰⁺²₂₀) ⇒ 𝑷₂₀(s(𝑥⁰⁺²₂₀)) ⊢ ∀¹𝑦⁰⁺¹₂₀ 𝑷₂₀(𝑦⁰⁺¹₂₀) +\stopformula + +\startformula + \unstackscripts + 𝑷₂₀(0), ∀²𝑥⁰⁺²₂₀: 𝑷₂₀(𝑥⁰⁺²₂₀) ⇒ 𝑷₂₀(s(𝑥⁰⁺²₂₀)) ⊢ ∀¹𝑦⁰⁺¹₂₀ 𝑷₂₀(𝑦⁰⁺¹₂₀) +\stopformula +\stopbuffer + +\typebuffer + +which renders the clueless formulas: + +\getbuffer + +The \type {\unstackscripts} macro triggers the unstacking of super and +subscripts. + +\stopsection + \stopchapter \stopcomponent diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/metafun/metafun-lua.tex b/Master/texmf-dist/doc/context/sources/general/manuals/metafun/metafun-lua.tex index da35cccdebf..0794fc87919 100644 --- a/Master/texmf-dist/doc/context/sources/general/manuals/metafun/metafun-lua.tex +++ b/Master/texmf-dist/doc/context/sources/general/manuals/metafun/metafun-lua.tex @@ -263,7 +263,7 @@ There are several ways to deal with this table. I will show clumsy as well as better looking ways. \startbuffer -lua("MP = { } MP.data = table.load('demo-data.lua')") ; +lua("MP.data = table.load('demo-data.lua')") ; numeric n ; lua("mp.print('n := ',\#MP.data)") ; for i=1 upto n : @@ -524,17 +524,23 @@ The \type {mp} namespace provides the following helpers: \starttabulate[|l|l|] \HL -\NC \type {print(...)} \NC returns one or more values \NC \NR -\NC \type {pair(x,y)} - \type {pair(t)} \NC returns a proper pair \NC \NR -\NC \type {triplet(x,y,z)} - \type {triplet(t)} \NC returns an \RGB\ color \NC \NR -\NC \type {quadruple(w,x,y,z)} - \type {quadruple(t)} \NC returns an \CMYK\ color \NC \NR -\NC \type {format(fmt,...)} \NC returns a formatted string \NC \NR +\NC \type {print(...)} \NC returns one or more values \NC \NR +\NC \type {fprint(fmt,...)} \NC returns a formatted result \NC \NR +\NC \type {boolean(b)} \NC returns \type {true} or \type {false} \NC \NR +\NC \type {numeric(f)} \NC returns a floating point number \NC \NR +\NC \type {integer(i)} \NC returns a whole number \NC \NR +\NC \type {points(i)} \NC returns a floating point with unit \type {pt} \NC \NR +\NC \type {pair(x,y)|(t)} \NC returns a proper pair \NC \NR +\NC \type {pairpoints(x,y)|(t)} \NC returns a proper pair with unit \type {pt} \NC \NR +\NC \type {triplet(x,y,z)|(t)} \NC returns a \RGB\ color \NC \NR +\NC \type {tripletpoints(x,y,z)|(t)} \NC returns a \RGB\ color but with unit \type {pt} \NC \NR +\NC \type {quadruple(w,x,y,z)|(t)} \NC returns a \CMYK\ color \NC \NR +\NC \type {quadruplepoints(w,x,y,z)|(t)} \NC returns a \CMYK\ color but with unit \type {pt} \NC \NR +\NC \type {format(fmt,...)} \NC returns a formatted string \NC \NR \NC \type {quoted(fmt,...)} - \type {quoted(s)} \NC returns a (formatted) quoted string \NC \NR -\NC \type {path(t[,connect][,close])} \NC returns a connected (closed) path \NC \NR + \type {quoted(s)} \NC returns a (formatted) quoted string \NC \NR +\NC \type {path(t[,connect][,close])} \NC returns a connected (closed) path \NC \NR +\NC \type {pathpoints(t[,connect][,close])} \NC returns a connected (closed) path with units \type {pt} \NC \NR \HL \stoptabulate @@ -780,15 +786,15 @@ We load the data in datasets: \startbuffer \startMPcode - lua.mp.datasets.load("foo","foo.tmp") ; - lua.mp.datasets.load("bar","bar.tmp") ; + lua.mp.datasets("load","foo","foo.tmp") ; + lua.mp.datasets("load","bar","bar.tmp") ; fill area - lua.mp.datasets.foo.Line() + lua.mp.datasets("foo","line") xysized (HSize/2-EmWidth-.25ExHeight,10ExHeight) withpen pencircle scaled .25ExHeight withcolor darkyellow ; fill area - lua.mp.datasets.bar.Line() + lua.mp.datasets("bar","line") xysized (HSize/2-EmWidth-.25ExHeight,10ExHeight) shifted (HSize/2+EmWidth,0) withpen pencircle scaled .25ExHeight @@ -798,7 +804,7 @@ We load the data in datasets: \typebuffer -Because the datasets are stores by name we can use them without worrying about +Because the datasets are stored by name we can use them without worrying about them being forgotten: \startlinecorrection[blank] \getbuffer \stoplinecorrection @@ -808,8 +814,8 @@ valid: \starttyping \startMPcode - lua.mp.datasets.load("foo.tmp") ; - lua.mp.datasets.load("bar.tmp") ; + lua.mp.datasets("load","foo.tmp") ; + lua.mp.datasets("load","bar.tmp") ; \stopMPcode \stoptyping @@ -819,9 +825,9 @@ The following methods are defined for a dataset: \HL \NC \type {method} \NC usage \NC \NR \HL -\NC \type {Size} \NC the number of subsets in a dataset \NC \NR -\NC \type {Line} \NC the joined pairs in a dataset making a non|-|closed path \NC \NR -\NC \type {Data} \NC the table containing the data (in subsets, so there is always at least one subset) \NC \NR +\NC \type {size} \NC the number of subsets in a dataset \NC \NR +\NC \type {line} \NC the joined pairs in a dataset making a non|-|closed path \NC \NR +\NC \type {data} \NC the table containing the data (in subsets, so there is always at least one subset) \NC \NR \HL \stoptabulate @@ -847,7 +853,7 @@ passvariable("triplet",(1/1,1/2,1/3)) ; passvariable("quad",(1.1,2.2,3.3,4.4)) ; passvariable("boolean",false) ; passvariable("path",fullcircle scaled 1cm) ; -path p[] ; p[1] := fullcircle ; p[2] := fullsquare ; +save p ; path p[] ; p[1] := fullcircle ; p[2] := fullsquare ; passarrayvariable("list",p,1,2,1) ; % first last step \stopMPcalculation \stopbuffer @@ -943,6 +949,533 @@ serialization. \stopsection +\startsection[title={Interference}] + +In this section we will discuss a potential conflict with other mechanisms, +especially primitives and macros. A simple example of using the interface is: + +\startbuffer +\startluacode +function MP.AnExample(str) + mp.aux.quoted(string.reverse(str)) +end +\stopluacode + +\startMPcode +draw textext(lua.MP.AnExample("Hi there!")) + rotated 45 + ysized 2cm ; +\stopMPcode +\stopbuffer + +\typebuffer + +\startlinecorrection +\getbuffer +\stoplinecorrection + +The \type {mp} namespace is reserved for functionality provided by \CONTEXT\ +itself so you should not polute it with your own code. Instead use the \type {MP} +namespace and mix in some uppercase characters. + +Here you see a subnamespace \type {aux} which is where officially the helpers are +organized but they are also accessible directly (as shown in previous sections). +The reason for the \type {aux} namespace is, apart from propection aginst +redefinition, also that we have \type {get} and \type {set} namespaces and there +might be more in the future. At the \LUA\ end you can best use these namespaces +because they are less likely to be accidentally overwritten by user code. + +As mentioned, there can still be conflicts. For instance the following will not +work: + +\startbuffer +\startluacode +function MP.reverse(str) + mp.aux.quoted(string.reverse(str)) +end +\stopluacode + +\startMPcode +draw textext(lua.MP.reverse("Hi there!")) + rotated -45 + ysized 2cm ; +\stopMPcode +\stopbuffer + +\typebuffer + +% \startlinecorrection +% \getbuffer +% \stoplinecorrection + +The reason is that \type {reverse} gets expanded as part of parsing the macro +name and this command expects an expression. A way out of this is the following: + +\startbuffer +\startluacode +function MP.reverse(str) + mp.aux.quoted(string.reverse(str)) +end +\stopluacode + +\startMPcode +draw textext(lua.MP("reverse","Hi there!")) + rotated -45 + ysized 2cm ; +\stopMPcode +\stopbuffer + +\typebuffer + +\startlinecorrection +\getbuffer +\stoplinecorrection + +You can add litle bit of protection for your own code by using a prefix in the +name, like: + +\startbuffer +\startluacode +MP["mynamespace.reverse"] = function(str) + mp.aux.quoted(string.reverse(str)) +end +\stopluacode + +\startMPcode +draw textext(lua.MP("mynamespace.reverse","Hi there!")) + rotated -90 + ysized 2cm ; +\stopMPcode +\stopbuffer + +\typebuffer + +\startlinecorrection +\getbuffer +\stoplinecorrection + +\stopsection + +\startsection[title=Predefined properties] + +As mentioned, the \type {mp} namespace is reserved for commands that come +with the \CONTEXT|-|\METAFUN\ combination. For instance, a lot of layout +related calls are there:: + +\startcolumns[n=3] +\starttyping +BackSpace +BaseLineSkip +BodyFontSize +BottomDistance +BottomHeight +BottomSpace +CurrentColumn +CurrentHeight +CurrentWidth +CutSpace +EmWidth +ExHeight +FooterDistance +FooterHeight +HeaderDistance +HeaderHeight +InnerEdgeDistance +InnerEdgeWidth +InnerMarginDistance +InnerMarginWidth +LastPageNumber +LayoutColumnDistance +LayoutColumns +LayoutColumnWidth +LeftEdgeDistance +LeftEdgeWidth +LeftMarginDistance +LeftMarginWidth +LineHeight +MakeupHeight +MakeupWidth +NOfColumns +NOfPages +NOfPages +NOfSubPages +OuterEdgeDistance +OuterEdgeWidth +OuterMarginDistance +OuterMarginWidth +PageDepth +PageFraction +PageNumber +PageNumber +PageOffset +PaperBleed +PaperHeight +PaperWidth +PrintPaperHeight +PrintPaperWidth +RealPageNumber +RealPageNumber +RightEdgeDistance +RightEdgeWidth +RightMarginDistance +RightMarginWidth +SpineWidth +StrutDepth +StrutHeight +SubPageNumber +TextHeight +TextWidth +TopDistance +TopHeight +TopSkip +TopSpace +\stoptyping +\stopcolumns + +There all return dimensions, contrary to the next few that return a boolean: + +\startcolumns[n=3] +\starttyping +OnRightPage +OnOddPage +InPageBody +\stoptyping +\stopcolumns + +There are also calls related to backgrounds: + +\startcolumns[n=3] +\starttyping +OverlayWidth +OverlayHeight +OverlayDepth +OverlayLineWidth +OverlayOffset +\stoptyping +\stopcolumns + +And one related to color: + +\startcolumns[n=3] +\starttyping +NamedColor +\stoptyping +\stopcolumns + +In most cases such \type {lua.mp.command()} calls have a \METAPOST\ macro with +the same name defined. + +\stopsection + +\startsection[title=Large paths] + +The plugins (like those dealing with text) also use calls in the \type {mp} +namespace but they have sort of protected names, starting with \type {mf_}. These +are visible but not meant to be used by users. Not only can their name change, +their functionality can as well. + +The following are actually private as they have related macros but also have a +public alias: + +\startlines +lua.mp.pathlength(name) +lua.mp.pathpoint(i) +lua.mp.pathleft(i) +lua.mp.pathright(i) +lua.mp.pathreset() +\stoplines + +They are meant for special high|-|performance path access, for example: + +\startlinecorrection +\startMPcode + save p, q, r; + path p ; p := for i=1 upto 1000 : + (i,0) -- (i,4 + uniformdeviate 4) -- + endfor cycle ; + fill p xysized (TextWidth,20mm) withcolor red ; + + path q ; q := for i=1 upto lua.mp.pathlength("p") : + if (i mod 4) == 0 : lua.mp.pathpoint(i) -- fi + endfor cycle ; + fill q xysized (TextWidth,2cm) shifted (0,-45mm) withcolor green ; + + path r ; r := for i inpath p : + if not odd (i) : pointof i -- fi + endfor cycle ; + fill r xysized (TextWidth,2cm) shifted (0,-70mm) withcolor blue ; +\stopMPcode +\stoplinecorrection + +Because a lookup of a point in \METAPOST\ is a linear lookup over a linked list +for very large paths the gain is significant when using \LUA\ because there the +points are stored in an indexed table. The left and right points can be used +vebose like + +\starttyping +lua.mp.pathpoint(i) controls lua.mp.pathleft(i) and lua.mp.pathright(i) +\stoptyping + +or more terse: + +\starttyping +pointof i controls leftof i and rightof i +\stoptyping + +Beware: this kind of trickery is {\em only} needed when you have very large paths +that are to be manipulated and the. Otherwise it's overkill. + +\stopsection + +\startsection[title={Interfacing to \TEX}] + +The next bunch of calls is for accessing \TEX\ registers. You can set their +values and get them as well. + +\starttyping +lua.mp.getmacro(k) +lua.mp.getdimen(k) +lua.mp.getcount(k) +lua.mp.gettoks (k) + +lua.mp.setmacro(k,v) +lua.mp.setdimen(k,v) +lua.mp.setcount(k,v) +lua.mp.settoks (k,v) +\stoptyping + +When you mess around with variables and run into issues it might help to +report values on the console. The \type {report} function does this: + +\starttyping +lua.mp.report(a,b) +\stoptyping + +\startlinecorrection +\startMPcode +lua.mp.report("status","a circle") ; +fill fullcircle xyscaled (2cm,1cm) withcolor red ; +lua.mp.report("status","its boundingbox [@N,@N,@N,@N]", + xpart llcorner currentpicture, ypart llcorner currentpicture, + xpart urcorner currentpicture, ypart urcorner currentpicture +) ; +draw boundingbox currentpicture withcolor blue ; +report("status","the size: @Nbp x @Nbp", + bbwidth(currentpicture), bbheight(currentpicture) +) ; +message("done") ; +\stopMPcode +\stoplinecorrection + +The console shows: + +\starttyping +metapost > status : a circle +metapost > status : its boundingbox [-28.34645,-14.17323,28.34645,14.17323] +metapost > status : the size: 57.1929bp x 28.84647bp +metapost > message : done +\stoptyping + +There are two more getters. These can be used to access the specific graphic +related variables set at the \TEX\ end. + +\starttyping +mp.texvar(name) +mp.texstr(name) +\stoptyping + +If you have adaptive styles you might want to test for modes. + +\starttyping +if lua.mp.mode("screen") : % or processingmode + % some action only needed for screen documents +fi ; +if lua.mp.systemmode("first") : + % some action for the first run +fi ; +\stoptyping + +For convenience these are wrapped into macros: + +\starttyping +if texmode("screen") : + % some action only needed for screen documents +fi ; +if systemmode("first") : + % some action for the first run +fi ; +\stoptyping + +When you implement your own helpers you can fall back on some auxiliary functions +in the \type {mp} namespace. Actually these are collected in \type {mp.aux} and +thereby protected from being overwritten by mistake. Here they are: + +% mp.flush() +% mp.size(t) + +\stopsection + +\startsection[title={Interfacing to \METAPOST}] + +There is also experimental access to some of the \METAPOST\ internals. In order +to deal with (large) paths a few more iterator related helpers are provided too. + +% mp.set (numeric string path boolean) + +\starttyping +n = mp.getnumeric(name) +s = mp.getstring(name) +b = mp.getboolean(name) +p = mp.getpath(name) +\stoptyping + +A is path a table of tables that have six values: the coordinates and the +pre- and postcontrol. You can manipulate this table and feed it back into +\METAPOST. + +\startlinecorrection +\startMPcode +numeric n ; n := lua.mp.newhash() ; + +for i=1 upto 3 : + lua.mp.tohash(n,i) ; +endfor ; + +fill fullcircle scaled 10mm withcolor + if lua.mp.inhash(n,3) : green else : red fi ; + +fill fullcircle scaled 5mm withcolor + if lua.mp.inhash(n,4) : green else : red fi ; + +lua.mp.disposehash(n) +\stopMPcode +\stoplinecorrection + +You can also store values with keys and access them later: + +\startlinecorrection +\startMPcode +numeric n ; n := lua.mp.newhash() ; + +for i=1 upto 3 : + lua.mp.tohash(n,i,decimal sqrt(i)) ; +endfor ; + +draw textext(lua.mp.fromhash(n,3)) + ysized 1cm + withcolor blue ; + +lua.mp.disposehash(n) +\stopMPcode +\stoplinecorrection + +You can also name your own hash: + +\startlinecorrection +\startMPcode +lua.mp.newhash("foo") ; + +for i=1 upto 3 : + lua.mp.tohash("foo",i,decimal sqrt(i)) ; +endfor ; + +draw textext(lua.mp.fromhash("foo",2)) + ysized 1cm + withcolor green ; + +lua.mp.disposehash("foo") +\stopMPcode +\stoplinecorrection + +Although it might look like \METAPOST\ supports arrays the reality is that it +doesn't really. There is a concept of suffixes but internally these are just a +way to compose macros. The following test is one that is used in one of the +\METAFUN\ modules written by Alan Braslau. + +\startlinecorrection +\startMPcode +path p, q[] ; + +if lua.mp.isarray(str q[1]) : + fill fullcircle scaled 1cm withcolor red ; + draw textext(lua.mp.prefix(str q[1])) withcolor white; +else : + fill fullsquare scaled 1cm withcolor blue ; +fi ; + +currentpicture := currentpicture shifted (-2cm,0) ; + +if lua.mp.isarray(str p) : + fill fullcircle scaled 1cm withcolor red ; +else : + fill fullsquare scaled 1cm withcolor blue ; +fi ; +\stopMPcode +\stoplinecorrection + +Another helper relates to extensions that \METAFUN\ adds to \METAPOST. When you +iterate over a picture you can recognize these as objects. The next code shows +the \LUA\ call as well as the more convenient macro call. So, \type {textext} +clearly is a foreign object. + +\startlinecorrection +\startMPcode +picture p ; p := image ( + fill fullcircle scaled 1cm withcolor red ; + draw textext("ok") withcolor white ; +) ; + +for i within p : + if not picture i : + draw i ysized 4cm ; + elseif isobject i : + draw i xsized 3cm ; + else : + draw i ysized 4cm ; + fi ; +endfor ; + +currentpicture := currentpicture shifted (-6cm,0) ; + +for i within p : + if isobject(i) : + draw i xsized 5cm ; + else : + draw i ysized 3cm withcolor blue; + fi ; +endfor ; +\stopMPcode +\stoplinecorrection + +% not yet, still experimental: +% +% mp.dataset(str) +% mp.n(t) + +% not for users: +% +% mp.defaultcolormodel() + +% rather specialized: +% +% mp.positionpath(name) +% mp.positioncurve(name) +% mp.positionbox(name) +% mp.positionxy(name) +% mp.positionpage(name) +% mp.positionregion(name) +% mp.positionwhd(name) +% mp.positionpxy(name) +% mp.positionanchor() + + +% mp.cleaned +% mp.format(fmt,str) +% mp.formatted(fmt,...) +% mp.graphformat(fmt,num) + +\stopsection + \stopchapter % \startMPcode{doublefun} @@ -1055,6 +1588,4 @@ serialization. % % \ctxcommand{mprunvar("x")} -\stoptext - - +\stopcomponent diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/musings/musings-children.tex b/Master/texmf-dist/doc/context/sources/general/manuals/musings/musings-children.tex new file mode 100644 index 00000000000..b814675bbd4 --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/musings/musings-children.tex @@ -0,0 +1,1253 @@ +% language=uk + +% naming-nature.jog + +\startcomponent musings-children + +\environment musings-style + +\definedescription + [presomething] + [headstyle=\bold, + alternative=hanging, + width=fit, + hang=1] + +\startchapter[title={Children of \TEX}] + +\startsection[title={The theme}] + +Nearly always \TEX\ conferences carry a theme. As there have been many +conferences the organizers have run out of themes involving fonts, macros and +typesetting and are now cooking up more fuzzy ones. Take the Bacho\TUG\ 2017 +theme: + +\startnarrower[left,8*right] \startpacked +\startpresomething {Premises} + The starting point, what we have, what do we use, what has been achieved? +\stoppresomething +\startpresomething {Predilections} + How do we act now, how do we want to act, what is important to us and what do + we miss? +\stoppresomething +\startpresomething {Predictions} + What is the future of \TEX, what we'd like to achieve and can we influence + it? +\stoppresomething +\stoppacked \stopnarrower + +My first impression with these three P words was: what do they mean? Followed by +the thought: this is no longer a place to take kids to. But the Internet gives +access to the Cambridge Dictionary, so instead of running to the dusty meter of +dictionaries somewhere else in my place, I made sure that I googled the most +recent definitions: + +\startnarrower[left] \startpacked +\startpresomething {premise} + an idea or theory on which a statement or action is based +\stoppresomething +\startpresomething {predilection} + if someone has a predilection for something, they like it a lot +\stoppresomething +\startpresomething {prediction} + a statement about what you think will happen in the future +\stoppresomething +\stoppacked \stopnarrower + +I won't try to relate these two sets of definitions but several words stand out +in the second set: idea, theory, action, like, statement and future. Now, as a +preparation for the usual sobering thoughts that Jerzy, Volker and I have when we +staring into a Bacho\TEX\ campfire I decided to wrap up some ideas around these +themes and words. The books that I will mention are just a selection of what you +can find distributed around my place. This is not some systematic research but +just the result of a few weeks making a couple of notes while pondering about +this conference. + +\stopsection + +\startsection[title=Introduction] + +One cannot write the amount of \TEX\ macros that I've written without also liking +books. If you look at my bookshelves the topics are somewhat spread over the +possible spectrum of topics: history, biology, astronomy, paleontology, general +science but surprisingly little math. There are a bunch of typography|-|related +books but only some have been read: it's the visuals that matter most and as +there are no real developments I haven't bought new ones in over a decade, +although I do buy books that look nice for our office display but the content +should be interesting too. Of course I do have a couple of books about computer +(related) science and technology but only a few are worth a second look. +Sometimes I bought computer books expecting to use them (in some project) but I +must admit that most have not been read and many will soon end up in the paper +bin (some already went that way). I'll make an exception for Knuth, Wirth and a +few other fundamental ones that I (want to) read. And, I need to catch up on deep +learning, so that might need a book. + +My colleagues and I have many discussions, especially about what we read, and +after a few decades one starts seeing patterns. Therefore the last few years it +was a pleasant surprise for me to run into books and lectures that nicely +summarize what one has noticed and discussed in a consistent way. My memory is +not that good, but good enough to let some bells ring. + +\startplacefigure[location=top] + \startcombination[nx=4,ny=1,width=\textwidth,distance=0pt] + {\externalfigure[covers/sapiens.jpg] [height=5cm]} {history} + {\externalfigure[covers/homo-deus.jpg] [height=5cm]} {futurology} + {\externalfigure[covers/children-of-time.jpg] [height=5cm]} {science fiction} + {\externalfigure[covers/superintelligence.jpg][height=5cm]} {informatics} + \stopcombination +\stopplacefigure + +The first book that gave me this \quotation {finally a perfect summary of +historic developments} feeling is \quotation{Sapiens} by Yuval Noah Harari. The +author summarizes human history from a broad perspective where modern views on +psychology, anthropology and technical developments are integrated. It's a follow +up on a history writing trend started by Jared Diamond. The follow up \quotation +{Homo Deus} looks ahead and is just as well written. It also integrates ideas +from other fields, for instance those related to development of artificial +intelligence (Dennett, Bostrom, etc.). + +Another inspiration for this talk and article is the 50 hour lecture series on +behavioral biology by Robert Sapolsky of Stanford University, brought to my +attention by my nephew Bram who visited a few \TEX\ conferences with me and who +is now also forced to use \TEX\ for assignments and reports. (How come +self|-|published books used at universities often look so bad?) + +The title of this talk is inspired by the book \quotation {Children of Time} by +Adrian Tchaikovsky that I read recently. There are science fiction writers who +focus on long term science and technology, such as some of Alastair Reynolds, +while others follow up on recent development in all kind of sciences. One can +recognize aspects of \quotation {Superintelligence} by Bostrom in Neal Asher's +books, insights in psychology in the older Greg Bear books, while in the +mentioned \quotation {Children of Time} (socio)biological insights dominate. The +main thread in that book is the development of intelligence, social behaviour, +language, script and cooperation in a species quite different from us: spiders. +It definitely avoids the anthropocentric focus that we normally have. + +So how does this relate to the themes of the Bacho\TEX\ conference? I will pick +out some ways to approach them using ideas from the kind of resources mentioned +above. I could probably go on and on for pages because once you start relating +what you read and hear to this \TEX\ ecosystem and community, there is no end. +So, consider this a snapshot, that somehow relates to the themes: + +\startnarrower[left,8*right] \startpacked +\startpresomething {premise} + Let's look at what the live sciences have to say about \TEX\ and friends and + let's hope that I don't offend the reader and the field. +\stoppresomething +\startpresomething {predilection} + Let's figure out what brings us here to this place deeply hidden in the woods, + a secret gathering of the \TEX\ sect. +\stoppresomething +\startpresomething {prediction} + Let's see if the brains present here can predict the future because after + all, according to Dennett, that is what brains are for. +\stoppresomething +\stoppacked \stopnarrower + +At school I was already intrigued by patterns in history: a cyclic, spiral and +sinusoid social evolution instead of a pure linear sequence of events. It became +my first typeset|-|by|-|typewriter document: Is history an exact science? Next I +will use and abuse patterns and ideas to describe the \TEX\ world, not wearing a +layman's mathematical glasses, but more from the perspective of live sciences, +where chaos dominates. + +\stopsection + +\startsection[title={The larger picture}] + +History of mankind can be roughly summarized as follows. For a really long time +we were hunters but at some point (10K years ago) became farmers. As a result we +could live in larger groups and still feed them. The growing complexity of +society triggered rules and religion as instruments for stability and +organization (I use the term religion in its broadest sense here). For quite a +while cultures came and went, and climate changes are among the reasons. + +After the industrial revolution new religions were invented (social, economic and +national liberalism) and we're now getting dataism (search for Harari on youtube +for a better summary). Some pretty great minds seem to agree that we're heading +to a time when humans as we are will be outdated. Massive automation, interaction +between the self and computer driven ecosystems, lack of jobs and purpose, +messing around with our genome. Some countries and cultures still have to catch +up on the industrial revolution, if they manage at all, and maybe we ourselves +will be just as behind reality soon. Just ask yourself: did you manage to catch +up? Is \TEX\ a stone age tool or a revolutionary turning point? + +A few decades ago a trip to Bacho\TEX\ took more than a day. Now you drive there +in just over half a day. There was a time that it took weeks: preparation, +changing horses, avoiding bad roads. Not only your own man|-|hours were involved. +It became easier later (my first trip took only 24 hours) and recently it turned +into a piece of cake: you don't pick up maps but start your device; you don't +need a travel agent but use the Internet; there are no border patrols, you can +just drive on. (Okay, maybe some day soon border patrols at the Polish border +show up again, just like road tax police in Germany, but that might be a +temporary glitch.) + +Life gets easier and jobs get lost. Taxi and truck drivers, travel agents, and +cashiers become as obsolete as agricultural workers before. Next in line are +doctors, lawyers, typesetters, printers, and all those who think they're safe. +Well, how many people were needed 400 years ago to produce the proceedings of a +conference like this in a few days' time span? Why read the introduction of a +book or a review when you can just listen to the author's summary on the web? How +many conferences still make proceedings (or go for videos instead), will we +actually need editors and typesetters in the future? How much easier has it +become to design a font, including variants? What stories can designers tell in +the future when programs do the lot? The narrower your speciality is, the worse +are your changes; hopefully the people present at this conference operate on a +broader spectrum. It's a snapshot. I will show some book covers as reference but +am aware that years ago or ahead the selection could have been different. + +\stopsection + +\startsection[title=Words] + +Words (whatever they represent) found a perfect spot to survive: our minds. Then +they made it from speech (and imagination) into writing: carved in stone, wood, +lead. At some point they managed to travel over wires but no matter what +happened, they are still around. Typesetting as visualization is also still +surrounding us so that might give us a starting point for ensuring a future for +\TEX\ to work on, because \TEX\ is all about words. There is a lot we don't see; +imagine if our eyes had microscopic qualities. What if we could hear beyond +20KHz. Imagine we could see infrared. How is that with words. What tools, similar +in impact as \TEX, can evolve once we figure that out. What if we get access to +the areas of our brain that hold information? We went from print to screen and +\TEX\ could cope with that. Can it cope with what comes next? + +The first printing press replaced literal copying by hand. Later we got these +linotype|-|like machines but apart from a few left, these are already thrown out +of windows (as we saw in a movie a few Bacho\TeX's ago). Photo|-|typesetting has +been replaced too and because a traditional centuries old printing press is a +nice to see item, these probably ring more bells than that gray metal closed box +typesetters. Organizers of \TEX\ conferences love to bring the audience to old +printing workshops and museums. At some point computers got used for typesetting +and in that arena \TEX\ found its place. These gray closed boxes are way less +interesting than something mechanical that at least invites us to touch it. How +excited can one be about a stack of \TEX\,Live \DVD{}s? + +\stopsection + +\startsection[title=Remembering] + +Two times I visited the part of the science museum in London with young family +members: distracted by constantly swiping their small powerful devices, they +didn't have the least interest in the exhibited computer related items, let alone +the fact that the couch they were sitting on was a Cray mainframe. Later on, +climbing on some old monument or an old cannon seemed more fun. So, in a few +decades folks will still look at wooden printing presses but quickly walk through +the part of an exhibition where the tools that we use are shown. We need to find +ways to look interesting. But don't think we're unique: how many kids find +graphical trend|-|setting games like Myst and Riven still interesting? On the +other hand a couple of month ago a bunch of nieces and nephews had a lot of fun +with an old Atari console running low|-|res bitmap games. Maybe there is hope for +good old \TEX. + +If indeed we're heading to a radically different society one can argue if this +whole discussion makes sense. When the steam engine showed up, the metaphor for +what went on in our heads was that technology, It's a popular example of speakers +on this topic: \quotation {venting off steam}. When electricity and radio came +around metaphors like \quotation {being on the same wavelength} showed up. A few +decades ago the computer replaced that model although in the meantime the model +is more neurobiological: we're a hormone and neurotransmitter driven computer. We +don't have memory the way computers do. + +How relevant will page breaks, paragraph and line breaks be in the future? Just +like \quotation {venting off steam} may make no sense to the youth, asking a +typesetter to \quotation {give me a break} might not make much sense soon. +However, when discussing automated typesetting the question \quotation {are we on +the same page} still has relevance. + +Typesetting with a computer might seem like the ultimate solution but it's +actually rather dumb when we consider truly intelligent systems. On the large +scale of history and developments what we do might get quite unnoticed. Say that +mankind survives the next few hundred years one way or the other. Science fiction +novels by Jack McDevitt have an interesting perspective of rather normal humans +millennia ahead of us who look back on these times in the same way as we look +back now. Nothing fundamental changed in the way we run society. Nearly nothing +from the past is left over and apart from being ruled by \AI{}s people still do +sort of what they do now. \TEX ? What is that? Well, there once was this great +computer scientist Knuth (in the remembered row of names like Aristotle |<|I just +started reading \quotation {The Lagoon} by Armand Leroi|>| Newton, Einstein, his +will show up) who had a group of followers that used a program that he seems to +have written. And even that is unlikely to be remembered, unless maybe user +groups manage to organize an archive and pass that on. Maybe the fact that \TEX\ +was one of the first large scale open source programs, of which someone can study +the history, makes it a survivor. The first program that was properly documented +in detail! But then we need to make sure that it gets known and persists. + +\startsection[title=Automation] + +In a recent interview Daniel Dennett explains that his view of the mind as a big +neural network, one that can be simulated in software on silicon, is a bit too +simplistic. He wonders if we shouldn't more tend to think of a network of +(selfish) neurons that group together in tasks and then compete with each other, +if only because they want to have something to do. + +Maybe attempts to catch the creative mindset and working of a typesetter in +algorithms is futile. What actually is great typography or good typesetting? +Recently I took a look at my bookshelf wondering what to get rid of \emdash\ +better do that now than when I'm too old to carry the crap down (crap being +defined as uninteresting content or bad looking). I was surprised about the +on|-|the|-|average bad quality of the typesetting and print. It's also not really +getting better. One just gets accustomed to what is the norm at a certain point. +Whenever they change the layout and look and feel of the newspaper I read the +arguments are readability and ease of access. Well, I never had such a hard time +reading my paper as today (with my old eyes). + +Are we, like Dennett, willing to discard old views on our tools and models? When +my first computer was a \RCA\ 1802 based kit, that had 256 bytes of memory. My +current laptop (from 2013) is a Dell Precision workstation with an extreme quad +core processor and 16 GB of memory and ssd storage. Before I arrived there I +worked with \DECTEN, \VAX\ and the whole range of Intel \CPU{}s. So if you really +want to compare a brain with a computer, take your choice. + +I started with \TEX\ on a 4 MHz desk top with 640 MB memory and a 10 MB hard +disk. Running \CONTEXT\ \MKIV\ with \LUATEX\ on such a machine is no option at +all, but I still carry the burden of trying to write efficient code (which is +still somewhat reflected in the code that makes up \CONTEXT). In the decades that +we have been using \TEX\ we had to adapt! Demands changed, possibilities changed, +technologies changed. And they keep changing. How many successive changes can a +\TEX\ user handle? Sometimes, when I look and listen I wonder. + +\startplacefigure[location=top] + \startcombination[nx=4,ny=1,width=\textwidth,distance=0pt] + {\externalfigure[covers/the-mind-in-the-cave.jpg] [height=5cm]} {paleontology} + {\externalfigure[covers/the-ancestors-tale.jpg] [height=5cm]} {evolutionary biology} + {\externalfigure[covers/the-good-book-of-human-nature.jpg][height=5cm]} {anthropology} + {\externalfigure[covers/chaos-and-harmony.jpg] [height=5cm]} {physics} + \stopcombination +\stopplacefigure + +If you look back, that is, if you read about the tens of thousands of years that +it took humans to evolve (\quotation {The mind in the cave} by Lewis|-|Williams +is a good exercise) you realize even more in what a fast|-|paced time we live and +that we're witnessing transitions of another magnitude. + +In the evolution of species some tools were invented multiple times, like eyes. +You see the same in our \TEX\ world: multiple (sub)macro packages, different font +technologies, the same solutions but with an alternative approach. Some +disappear, some stay around. Just like different circumstances demand different +solutions in nature, so do different situations in typesetting, for instance +different table rendering solutions. Sometime I get the feeling that we focus too +much on getting rid of all but one solution while more natural would be to accept +diversity, like bio|-|diversity is accepted. Transitions nowadays happen faster +but the question is if, like aeons before, we (have to) let them fade away. When +evolution is discussed the terms \quote {random}, \quote {selection}, \quote +{fit}, and so on are used. This probably also applies to typography: at some +point a font can be used a lot, but in the end the best readable and most +attractive one will survive. Newspapers are printed in many copies, but rare +beautiful books hold value. Of course, just like in nature some developments +force the further path of development, we don't suddenly grow more legs or digits +on our hands. The same happens with \TEX\ on a smaller timescale: successors +still have the same core technology, also because if we'd drop it, it would be +something different and then give a reason to reconsider using such technology +(which likely would result in going by another path). + +\stopsection + +\startsection[title=Quality] + +Richard Dawkins \quotation {The Ancestor's Tale} is a non|-|stop read. In a +discussion with Jared Diamond about religion and evolution they ponder this +thread: you holding the hand of your mother who is handing her mother's hand and +so on till at some point fish get into the picture. The question then is, when do +we start calling something human? And a related question is, when does what we +call morality creeps in? Is 50\% neanderthaler human or not? + +So, in the history of putting thoughts on paper: where does \TEX\ fit in? When do +we start calling something automated typesetting? When do we decide that we have +quality? Is \TEX\ so much different from its predecessors? And when we see +aspects of \TEX\ (or related font technology) in more modern programs, do we see +points where we cross qualitative or other boundaries? Is a program doing a +better job than a human? Where do we stand? There are fields where there is no +doubt that machines outperform humans. It's probably a bit more difficult in +aesthetic fields except perhaps when we lower the conditions and expectations +(something that happens a lot). + +For sure \TEX\ will become obsolete, maybe even faster that we think, but so will +other typesetting technologies. Just look back and have no illusions. Till then +we can have our fun and eventually, when we have more free time than we need, we +might use it out of hobbyism. Maybe \TEX\ will be remembered by probably its most +important side effect: the first large scale open source, the time when users met +over programs, Knuth's disciples gathered in user groups, etc. The tools that we +use are just a step in an evolution. And, as with evolution, most branches are +pruned. So, when in the far future one looks back, will they even notice \TEX ? +The ancestor's tail turns the tree upside down: at the end of the successful +branch one doesn't see the dead ends. + +Just a thought: \CD{}s and media servers are recently being replaced (or at least +accompanied) by Long Play records. In the shop where I buy my \CD{}s the space +allocated to records grows at the cost of more modern media. So, maybe at some +point retro|-|typesetting will pop up. Of course it might skip \TEX\ and end up +at woodcutting or printing with lead. + +\stopsection + +\startsection[title=What mission] + +We rely on search engines instead of asking around or browsing libraries. Do +students really still read books and manuals or do they just search and listen to +lectures. Harari claims that instead of teaching kids facts in school we should +just take for granted that they can get all the data they want and that we should +learn them how to deal with data and adapt to what is coming. We take for granted +that small devices with human voices show us the route to drive to Bacho\TEX, for +instance, although by now I can drive it without help. In fact, kids can surprise +you by asking if we're driving in Germany when we are already in Poland. + +We accept that computer programs help physicians in analyzing pictures. Some wear +watches that warn them about health issues, and I know a few people who monitor +their sugar levels electronically instead of relying on their own measurements. +We seem to believe and trust the programs. And indeed, we also believe that \TEX\ +does the job in the best way possible. How many people really understand the way +\TEX\ works? + +We still have mailing lists where we help each other. There are also wikis and +forums like stack exchange. But who says that even a moderate bit of artificial +intelligence doesn't answer questions better. Of course there needs to be input +(manuals, previous answers, etc.) but just like we need fewer people as workforce +soon, the number of experts needed also can be smaller. And we're still talking +about a traditional system like \TEX. Maybe the social experience that we have on +these media will survive somehow, although: how many people are members of +societies, participate in demonstrations, meet weekly in places where ideas get +exchanged, compared to a few decades ago? That being said, I love to watch posts +with beautiful \CONTEXT\ solutions or listen to talks by enthusiastic users who +do things I hadn't expected. I really hope that this property survives, just like +I hope that we will be able to see the difference between a real user's response +and one from an intelligent machine (an unrealistic hope I fear). Satisfaction +wins and just like our neurological subsystems at some point permanently adapt to +thresholds (given that you trigger things often enough), we get accustomed to +what \TEX\ provides and so we stick to it. + +\stopsection + +\startsection[title={Intelligence versus consciousness}] + +Much of what we do is automated. You don't need to think of which leg to move and +what foot to put down when you walk. Reacting to danger also to a large extent is +automated. It doesn't help much to start thinking about how dangerous a lion can +be when it's coming after you, you'd better move fast. Our limbic system is +responsible for such automated behaviour, for instance driven by emotions. The +more difficult tasks and thoughts about them happen in the frontal cortex (sort +of). + +\startplacefigure[location=top] + \startcombination[nx=4,ny=1,width=\textwidth,distance=0pt] + {\externalfigure[covers/death-by-black-hole.jpg] [height=5cm]} {astronomy} + {\externalfigure[covers/the-formula.jpg] [height=5cm]} {informatics} + {\externalfigure[covers/hals-legacy.jpg] [height=5cm]} {future science} + {\externalfigure[covers/lucky-planet.jpg] [height=5cm]} {earth science} + \stopcombination +\stopplacefigure + +For most users \TEX\ is like the limbic system: there is not much thinking +involved, and the easy solutions are the ones used. Just like hitting a nerve +triggers a chain of reactions, hitting a key eventually produces a typeset +document. Often this is best because the job needs to get done and no one really +cares how it looks; just copy a preamble, key in the text and assume that it +works out well (enough). It is tempting to compare \TEX's penalties, badness and +other parameters with levels of hormones and neurotransmitters. Their function +depends on where they get used and the impact can be accumulated, blocked or +absent. It's all magic, especially when things interact. + +Existing \TEX\ users, developers and user groups of course prefer to think +otherwise, that it is a positive choice by free will. That new users have looked +around and arrived at \TEX\ for good reason: their frontal cortex steering a +deliberate choice. Well, it might have played a role but the decision to use +\TEX\ might in the end be due to survival skills: I want to pass this exam and +therefore I will use that weird system called \TEX. + +All animals, us included, have some level of intelligence but also have this hard +to describe property that we think makes us what we are. Intelligence and +consciousness are not the same (at least we know a bit about the first but nearly +nothing about the second). We can argue about how well composed some music is but +why we like it is a different matter. + +We can make a well thought out choice for using \TEX\ for certain tasks but can +we say why we started liking it (or not)? Why it gives us pleasure or maybe +grief? Has it become a drug that we got addicted to? So, one can make an +intelligent decision about using \TEX\ but getting a grip on why we like it can +be hard. Do we enjoy the first time struggle? Probably not. Do we like the folks +involved? Yes, Don Knuth is a special and very nice person. Can we find help and +run into a friendly community? Yes, and a unique one too, annoying at times, +often stimulating and on the average friendly for all the odd cases running +around. + +Artificial intelligence is pretty ambitious, so speaking of machine intelligence +is probably better. Is \TEX\ an intelligent program? There is definitely some +intelligence built in and the designer of that program is for sure very +intelligent. The designer is also a conscious entity: he likes what he did and +finds pleasure in using it. The program on the other hand is just doing its job: +it doesn't care how it's done and how long it takes: a mindless entity. So here +is a question: do we really want a more intelligent program doing the job for us, +or do those who attend conferences like Bacho\TEX\ enjoy \TEX ing so much that +they happily stay with what they have now? Compared to rockets tumbling down +and|/|or exploding or Mars landers thrashing themselves due to programming errors +of interactions, \TEX\ is surprisingly stable and bug free. + +\stopsection + +\startsection[title={Individual versus group evolution}] + +After listening for hours to Sapolsky you start getting accustomed to remarks +about (unconscious) behaviour driven by genes, expression and environment, aimed +at \quotation {spreading many copies of your genes}. In most cases that is an +individual's driving force. However, cooperation between individuals plays a role +in this. A possible view is that we have now reached a state where survival is +more dependent on a group than on an individual. This makes sense when we +consider that developments (around us) can go way faster than regular evolution +(adaptation) can handle. We take control over evolution, a mechanism that needs +time to adapt and time is something we don't give it anymore. + +Why does \TEX\ stay around? It started with an individual but eventually it's the +groups that keeps it going. A too|-|small group won't work but too|-|large groups +won't work either. It's a known fact that one can only handle some 150 social +contacts: we evolved in small bands that split when they became too large. Larger +groups demanded abstract beliefs and systems to deal with the numbers: housing, +food production, protection. The \TEX\ user groups also provide some +organization: they organize meetings, somehow keep development going and provide +infrastructure and distributions. They are organized around languages. According +to Diamond new languages are still discovered but many go extinct too. So the +potential for language related user groups is not really growing. + +Some of the problems that we face in this world have become too large to be dealt +with by individuals and nations. In spite of what anti|-|globalists want we +cannot deal with our energy hunger, environmental issues, lack of natural +resources, upcoming technologies without global cooperation. We currently see a +regression in cooperation by nationalistic movements, protectionism and the usual +going back to presumed better times, but that won't work. + +Local user groups are important but the number of members is not growing. There +is some cooperation between groups but eventually we might need to combine the +groups into one which might succeed unless one wants to come first. Of course we +will get the same sentiments and arguments as in regular politics but on the +other hand, we already have the advantage of \TEX\ systems being multi|-|lingual +and users sharing interest in the diversity of usage and users. The biggest +challenge is to pass on what we have achieved. We're just a momentary highlight +and let's not try to embrace some \quotation {\TEX\ first} madness. + +\stopsection + +\startplacefigure[location=top] + \startcombination[nx=4,ny=1,width=\textwidth,distance=0pt] + {\externalfigure[covers/3-16.jpg] [height=5cm]} {art} + % {\externalfigure[covers/dirt.jpg] [height=5cm]} {history} + {\externalfigure[covers/the-winds-of-change.jpg] [height=5cm]} {history} + {\externalfigure[covers/pale-blue-dot.jpg] [height=5cm]} {astronomy} + {\externalfigure[covers/the-third-chimpanzee.jpg][height=5cm]} {history} + \stopcombination +\stopplacefigure + +\startsection[title=Sexes] + +Most species have two sexes but it is actually a continuum controlled by hormones +and genetic expression: we just have to accept it. Although the situation has +improved there are plenty of places where some gender relationships are +considered bad even to the extent that one's life can be in danger. Actually +having strong ideas about these issues is typically human. But in the end one has +to accept the continuum. + +In a similar way we just have to accept that \TEX\ usage, application of \TEX\ +engines, etc.\ is a continuum and not a batch versus \WYSIWYG\ battle any more. +It's disturbing to read strong recommendations not to use this or that. Of the +many macro packages that showed up only a few were able to survive. How do users +of outlines look at bitmaps, how do \DVI\ lovers look at \PDF. But, as +typesetting relates to esthetics, strong opinions come with the game. + +Sapolsky reports about a group of baboons where due to the fact that they get the +first choice of food the alpha males of pack got poisoned, so that the remaining +suppressed males who treated the females well became dominant. In fact they can +then make sure that no new alpha male from outside joins the pack without +behaving like they do. A sort of social selection. In a similar fashion, until +now the gatherings of \TEX ies managed to keep its social properties and has not +been dominated by for instance commerce. + +% So, maybe should focus on acceptance and tolerance and then make sure that that +% we keep what we have and let it not be influenced too much by sectarianism. It +% makes a nice topic for a meeting of the context (sub)group, that actually has a +% women as driving force. How can we preserve what we have but still proceed is a +% legitimate question. Where do we stand in the landscape. + +In the animal world often sexes relate to appearance. The word sexy made it to +other domains as well. Is \TEX\ sexy? For some it is. We often don't see the real +colors of birds. What looks gray to us looks vivid to a bird which sees in a +different spectrum. The same is true for \TEX. Some users see a command line +(shell) and think: this is great! Others just see characters and keystrokes and +are more attracted to an interactive program. When I see a graphic made by +\METAPOST, I always note how exact it is. Others don't care if their interactive +effort doesn't connect the dots well. Some people (also present here) think that +we should make \TEX\ attractive but keep in mind that like and dislike are not +fixed human properties. Some mindsets might as well be the result from our +makeup, others can be driven by culture. + +\stopsection + +\startsection[title=Religion] + +One of Sapolsky's lectures is about religion and it comes in the sequence of +mental variations including depression and schizophrenia, because all these +relate to mental states, emotions, thresholds and such (all things human). That +makes it a tricky topic which is why it has not been taped. As I was raised in a +moderate Protestant tradition I can imagine that it's an uncomfortable topic +instead. But there are actually a few years older videos around and they are +interesting to watch and not as threatening as some might expect. Here I just +stick to some common characteristics. + +If you separate the functions that religions play into for instance explanation +of the yet unknown, social interactions, control of power and regulation of +morals, then it's clear why at \TEX\ user group meetings the religious aspect of +\TEX\ has been discussed in talks. Those who see programs as infallible and +always right and don't understand the inner working can see it as an almighty +entity. In the Netherlands church-going diminishes but it looks like alternative +meetings are replacing it (and I'm not talking of football matches). So what are +our \TEX\ meetings? What do we believe in? The reason that I bring up this aspect +is that in the \TEX\ community we can find aspects of the more extremist aspects +of religions: if you don't use the macro package that I use, you're wrong. If you +don't use the same operating system as I do, you're evil. You will be punished if +you use the wrong editor for \TEX ? Why don't you use this library (which, by the +way, just replaced that other one)? We create angels and daemons. Even for quite +convinced atheists (it's not hard to run into them on youtube) a religion only +survives when it has benefits, something that puzzles them. So when we're +religious about \TEX\ and friends we have to make sure that it's at least +beneficial. Also, maybe we fall in Dennett's category of \quotation {believers +who want to believe}: it helps us to do our job if we just believe that we have +the perfect tool. Religion has inspired visual and aural art and keeps doing +that. (Don Knuth's current musical composition project is a good example of +this.) + +Scientists can be religious, in flexible ways too, which is demonstrated by Don +Knuth. In fact, I'm pretty sure \TEX\ would not be in the position it is in now +if it weren't for his knowledgeable, inspirational, humorous, humble, and always +positive presence. And for sure he's not at all religious about the open source +software that he sent viral. + +I'm halfway through reading \quotation {The Good Book of Human Nature} (An +Evolutionary Reading of the Bible) a book about the evolution of the bible and +monotheism which is quite interesting. It discusses for instance how transitions +from a hunter to a farmer society demanded a change of rules and introduced +stories that made sense in that changing paradigm. Staying in one place means +that possessions became more important and therefore inheritance. Often when +religion is discussed by behavioral biologists, historians and anthropologists +they stress this cultural narrative aspect. Also mentioned is that such societies +were willing to support (in food and shelter) the ones that didn't normally fit +it but added to the spiritual character of religions. The social and welcoming +aspect is definitely present in for instance Bacho\TEX\ conferences although a +bystander can wonder what these folks are doing in the middle of the night around +a campfire, singing, drinking, frying sausages, spitting fire, and discussing the +meaning of life. + +Those who wrap up the state of religious affairs, do predictions and advocate the +message, are sometimes called evangelists. I remember a \TEX\ conference in the +\USA\ where the gospel of \XML\ was preached (by someone from outside the \TEX\ +community). We were all invited to believe it. I was sitting in the back of the +crowded (!)\ room and that speaker was not at all interested in who spoke before +and after. Well, I do my share of \XML\ processing with \CONTEXT, but believe me: +much of the \XML\ that we see is not according to any gospel. It's probably +blessed the same way as those state officials get blessed when they ask and pray +for it in public. + +It can get worse at \TEX\ conferences. Some present here at Bacho\TEX\ might +remember the \PDF\ evangelists that we had show up at \TEX\ conferences. You see +this qualification occasionally and I have become quite allergic to +qualifications like architect, innovator, visionary, inspirator and evangelist, +even worse when they look young but qualify as senior. I have no problem with +religion at all but let's stay away from becoming one. And yes, typography also +falls into that trap, so we have to be doubly careful. + +\stopsection + +\startplacefigure[location=top] + \startcombination[nx=4,ny=1,width=\textwidth,distance=0pt] + {\externalfigure[covers/from-bacteria-to-bach-and-back.jpg][height=5cm]} {philosophy} + {\externalfigure[covers/the-lagoon.jpg] [height=5cm]} {science history} + {\externalfigure[covers/chaos.jpg] [height=5cm]} {science} + {\externalfigure[covers/why-zebras-dont-get-ulcers.jpg] [height=5cm]} {behavioral biology} + \stopcombination +\stopplacefigure + +\startsection[title=Chaotic solutions] + +The lectures on \quotation {chaos and reductionism} and \quotation {emergence and +complexity} were the highlights in Sapolsky's lectures. I'm not a good narrator +so I will not summarize them but it sort of boils down to the fact that certain +classes of problems cannot be split up in smaller tasks that we understand well, +after which we can reassemble the solutions to deal with the complex task. +Emerging systems can however cook up working solutions from random events. +Examples are colonies of ants and bees. + +The \TEX\ community is like a colony: we cook up solutions, often by trial and +error. We dream of the perfect solutions but deep down know that esthetics cannot +be programmed in detail. This is a good thing because it doesn't render us +obsolete. At last year's Bacho\TEX, my nephew Teun and I challenged the anthill +outside the canteen to typeset the \TEX\ logo with sticks but it didn't persist. +So we don't need to worry about competition from that end. How do you program a +hive mind anyway? + +When chaos theory evolved in the second half of the previous century not every +scientist felt happy about it. Instead of converging to more perfect predictions +and control in some fields a persistent uncertainty became reality. + +After about a decade of using \TEX\ and writing macros to solve recurring +situations I came to the conclusion that striving for a perfect \TEX\ (the +engine) that can do everything and anything makes no sense. Don Knuth not only +stopped adding code when he could do what he needed for his books, he also stuck +to what to me seems reasonable endpoints. Every hard|-|coded solution beyond that +is just that: a hard|-|coded solution that is not able to deal with the +exceptions that make up most of the more complex documents. Of course we can +theorize and discuss at length the perfect never|-|reachable solutions but +sometimes it makes more sense to admit that an able user of a desktop publishing +system can do that job in minutes, just by looking at the result and moving +around an image or piece of text a bit. + +There are some hard|-|coded solutions and presets in the programs but with +\LUATEX\ and \MPLIB\ we try to open those up. And that's about it. Thinking that +for instance adding features like protrusion or expansion (or whatever else) +always lead to better results is just a dream. Just as a butterfly flapping its +wings on one side of the world can have an effect on the other side, so can +adding a single syllable to your source completely confuse an otherwise clever +column or page break algorithm. So, we settle for not adding more to the engine, +and provide just a flexible framework. + +A curious observation is that when Edward Lorenz ran into chaotic models it was +partially due to a restart of a simulation midway, using printed floating point +numbers that then in the computer were represented with a different accuracy than +printed. Aware of floating point numbers being represented differently across +architectures, Don Knuth made sure that \TEX\ was insensitive to this so that its +outcome was predictable, if you knew how it worked internally. Maybe \LUATEX\ +introduces a bit of chaos because the \LUA\ we use has only floats. In fact, a +few months ago we did uncover a bug in the backend where the same phenomena gave +a chaotic crash. + +In chaos theory there is the concept of an attractor. When visualized this can be +the area (seemingly random) covered by a trajectory. Or it can be a single point +where for instance a pendulum comes to rest. So what is our attractor? We have a +few actually. First there is the engine, the stable core of primitives always +present. You often see programs grow more complex every update and for sure that +happened with \ETEX, \PDFTEX, \XETEX\ and \LUATEX. However there is always the +core that is supposed to be stable. After some time the new kid arrives at a +stable state not much different from the parent. The same is true for \METAPOST. +Fonts are somewhat different because the technology changes but in the end the +shapes and their interactions become stable as well. Yet another example is \TEX\ +Live: during a year it might diverge from its route but eventually it settles +down and enters the area where we expect it to end up. The \TEX\ world is at +times chaotic, but stable in the long run. + +So, how about the existence, the reason for it still being around? One can +speculate about its future trajectory but one thing is sure: as long as we break +a text into paragraphs and pages \TEX\ is hard to beat. But what if we don't need +that any more? What if the concept of a page is no longer relevant? What if +justified texts no longer matter (often designers don't care anyway)? What if +students are no longer challenged to come up with a nice looking thesis? Do these +collaborative tools with remote \TEX\ processing really bring new long term users +or is \TEX\ then just one of the come|-|and|-|go tools? + +\stopsection + +\startsection[title=Looking ahead] + +In an interview (\quotation {World of ideas}) Asimov explains that science +fiction evolved rapidly when people lived long enough to see that there was a +future (even for their offspring) that is different from today. It is (at least +for me) mind boggling to think of an evolution of hundreds of thousands of years +to achieve something like language. Waiting for the physical being to arrive at a +spot where you can make sounds, where the brain is suitable for linguistic +patterns, etc. A few hundred years ago speed of any developments (and science) +stepped up. + +\TEX\ is getting near 40 years old. Now, for software that {\bf is} old! In that +period we have seen computers evolve: thousands of times faster processing, even +more increase in memory and storage. If we read about spaceships that travel at a +reasonable fraction of the speed of light, and think that will not happen soon, +just think back to the terminals that were sitting in computer labs when \TEX\ +was developed: 300 baud was normal. I actually spent quite some time on +optimizing time|-|critical components of \CONTEXT\ but on this timescale that is +really a waste of time. But even temporary bottlenecks can be annoying (and +costly) enough to trigger such an effort. (Okay, I admit that it can be a +challenge, a kind of game, too.) + +Neil Tyson, in the video \quotation {Storytelling of science} says that when +science made it possible to make photos it also made possible a transition in +painting to impressionism. Other technology could make the exact snapshot so +there was new room for inner feelings and impressions. When the Internet showed +up we went through a similar transition, but \TEX\ actually dates from before the +Internet. Did we also have a shift in typesetting? To some extent yes, browsers +and real time rendering is different from rendering pages on paper. In what space +and time are \TEX ies rooted? + +We get older than previous generations. Quoting Sapolsky \quotation{\unknown\ we +are now living well enough and long enough to slowly fall apart.} The opposite is +happening with our tools, especially software: it's useful lifetime becomes +shorter and changes faster each year. Just look at the version numbers of +operating systems. Don Knuth expected \TEX\ to last for a long time and compared +to other software its core concept and implementation is doing surprisingly well. +We use a tool that suits our lifespan! Let's not stress ourselves out too much +with complex themes. (It helps to read \quotation {Why zebras don't get ulcers}.) + +\stopsection + +\startsection[title=Memes] + +If you repeat a message often enough, even if it's something not true, it can +become a meme that gets itself transferred across generations. Conferences like +this is where they can evolve. We tell ourselves and the audience how good \TEX\ +is and because we spend so many hours, days, weeks, months using it, it actually +must be good, or otherwise we would not come here and talk about it. We're not so +stupid as to spend time on something not good, are we? We're always surprised +when we run into a (potential) customer who seems to know \TEX. It rings a bell, +and it being around must mean something. Somehow the \TEX\ meme has anchored +itself when someone attended university. Even if experiences might have been bad +or usage was minimal. The meme that \TEX\ is the best in math typesetting is a +strong survivor. + +There's a certain kind of person who tries to get away with their own deeds and +decisions by pointing to \quotation {fake news} and accusations of \quotation +{mainstream media} cheating on them. But to what extent are our stories true +about how easy \TEX\ macro packages are to use and how good their result? We have +to make sure we spread the right memes. And the user groups are the guardians. + +Maybe macro packages are like memes too. In the beginning there was a bunch but +only some survived. It's about adaptation and evolution. Maybe competition was +too fierce in the beginning. Like ecosystems, organisms and cellular processes in +biology we can see the \TEX\ ecosystem, users and usage, as a chaotic system. +Solutions pop up, succeed, survive, lead to new ones. Some look similar and +slightly different input can give hugely different outcomes. You cannot really +look too far ahead and you cannot deduce the past from the present. Whenever +something kicks it off its stable course, like the arrival of color, graphics, +font technologies, \PDF, \XML, ebooks, the \TEX\ ecosystem has to adapt and find +its stable state again. The core technology has proven to be quite fit for the +kind of adaptation needed. But still, do it wrong and you get amplified out of +existence, don't do anything and the external factors also make you extinct. +There is no denial that (in the computer domain) \TEX\ is surprisingly stable and +adaptive. It's also hard not to see how conservatism can lead to extinction. + +\startplacefigure[location=top] + \startcombination[nx=4,ny=1,width=\textwidth,distance=0pt] + {\externalfigure[covers/the-epigenetics-revolution.jpg] [height=5cm]} {genetics} + {\externalfigure[covers/dark-matter-and-the-dinosaurs.jpg][height=5cm]} {physics} + {\externalfigure[covers/the-world-without-us.jpg] [height=5cm]} {history} + {\externalfigure[covers/what-we-cannot-know.jpg] [height=5cm]} {science} + \stopcombination +\stopplacefigure + +\stopsection + +\startsection[title=Inspiration] + +I just took some ideas from different fields. I could have mentioned quantum +biology, which tries to explain some unexplainable phenomena in living creatures. +For instance how do birds navigate without visible and measurable clues. How do +people arrive at \TEX\ while we don't really advertise? Or I could mention +epigenetics and explorations in junk \DNA. It's not the bit of the genome that we +thought that matters, but also the expression of the genes driven by other +factors. Offspring not only gets genetic material passed but it can get presets. +How can the \TEX\ community pass on Knuth's legacy? Do we need to hide the +message in subtle ways? Or how about the quest for dark matter? Does it really +exist or do we want (need) it to exist? Does \TEX\ really have that many users, +or do we cheat by adding the users that are enforced during college but don't +like it at all? There's enough inspiration for topics at \TEX\ conferences, we +just have to look around us. + +\stopsection + +\startsection[title=Stability] + +I didn't go into technical aspects of \TEX\ yet. I must admit that after decades +of writing macros I've reached a point where I can safely say that there will +never be perfect automated solutions for really complex documents. When books +about neural networks show up I wondered if it could be applied (but I couldn't). +When I ran into genetic algorithms I tried to understand its possible impact (but +I never did). So I stuck to writing solutions for problems using visualization: +the trial and error way. Of course, speaking of \CONTEXT, I will adapt what is +needed, and others can do that as well. Is there a new font technology? Fine, +let's support it as it's no big deal, just a boring programming task. Does a user +want a new mechanism? No problem, as solving a reduced subset of problems can be +fun. But to think of \TEX\ in a reductionist way, i.e.\ solving the small +puzzles, and to expect the whole to work in tandem to solve a complex task is not +trivial and maybe even impossible. It's a good thing actually, as it keeps us on +edge. Also, \CONTEXT\ was designed to help you with your own solutions: be +creative. + +I mentioned my nephew Bram. He has seen part of this crowd a few times, just like +his brother and sister do now. He's into artificial intelligence now. In a few +years I'll ask him how he sees the current state of \TEX\ affairs. I might learn +a few tricks in the process. + +In \quotation {The world without us} Weisman explores how fast the world would be +void of traces of humankind. A mere 10.000 years can be more than enough. Looking +back, that's about the time hunters became farmers. So here's a challenge: say +that we want an ant culture that evolves to the level of having archaeologists to +know that we were here at Bacho\TEX\ \unknown\ what would we leave behind? + +Sapolsky ends his series by stressing that we should accept and embrace +individual differences. The person sitting next to you can have the same makeup +but be just a bit more sensitive to depression or be the few percent with genes +controlling schizophrenic behaviour. He stresses that knowing how things work or +where things go wrong doesn't mean that we should fix everything. So look at this +room full of \TEX ies: we don't need to be all the same, use all the same, we +don't need some dominance, we just need to accept and especially we need to +understand that we can never fully understand (and solve) everything forever. + +Predictions, one of the themes, can be hard. It's not true that science has the +answer to everything. There will always be room for speculation and maybe we will +always need metaphysics too. I just started to read \quotation {What we cannot +know} by Sautoy. For sure those present here can not predict how \TEX\ will go on +and|/|or be remembered. + +\stopsection + +\startsection[title=Children of \TEX] + +I mentioned \quotation {Children of time}. The author lets you see their spidery +world through spider eyes and physiology. They have different possibilities +(eyesight, smell) than we do and also different mental capabilities. They evolve +rapidly and have to cope conceptually with signals from a human surveillance +satellite up in the sky. Eventually they need to deal with a bunch of (of course) +quarrelling humans who want their place on the planet. We humans have some +pre|-|occupation with spiders and other creatures. In a competitive world it is +sometimes better to be suspicious (and avoid and flee) that to take a risk of +being eaten. A frequently used example is that a rustle in a bush can be the wind +or a lion, so best is to run. + +We are not that well adapted to our current environment. We evolved at a very +slow pace so there was no need to look ahead more than a year. And so we still +don't look too far ahead (and choose politicians accordingly). We can also not +deal that well with statistics (Dawkins's \quotation {Climbing Mount Probability} +is a good read) so we make false assumptions, or just forget. + +Does our typeset text really look that good on the long run, or do we cheat with +statistics? It's not too hard to find a bad example of something not made by +\TEX\ and extrapolate that to the whole body of typeset documents. Just like we +can take a nice example of something done by \TEX\ and assume that what we do +ourselves is equally okay. I still remember the tests we did with \PDFTEX\ and +hz. When \THANH\ and I discussed that with Hermann Zapf he was not surprised at +all that no one saw a difference between the samples and instead was focusing on +aspects that \TEX ies are told to look at, like two hyphens in a row. + +A tool like \TEX\ has a learning curve. If you don't like that just don't use it. +If you think that someone doesn't like that, don't enforce this tool on that +someone. And don't use (or lie with) statistics. Much better arguments are that +it's a long|-|lived stable tool with a large user base and support. That it's not +a waste of time. Watching a designer like Hermann Zapf draw shapes is more fun +than watching click and point in heavily automated tools. It's probably also less +fun to watch a \TEX ie converge towards a solution. + +Spiders are resilient. Ants maybe even more. Ants will survive a nuclear blast +(mutations might even bring them benefits), they can handle the impact of a +meteorite, a change in climate won't harm them much. Their biggest enemy is +probably us, when we try to wipe them out with poison. But, as long as they keep +a low profile they're okay. \TEX\ doesn't fit into the economic model as there is +no turnaround involved, no paid development, it is often not seen at all, it's +just a hit in a search engine and even then you might miss it (if only because no +one pays for it being shown at the top). + +We can learn from that. Keeping a low profile doesn't trigger the competition to +wipe you out. Many (open source) software projects fade away: some big company +buys out the developer and stalls the project or wraps what they bought in their +own stuff, other projects go professional and enterprise and alienate the +original users. Yet others abort because the authors lose interest. Just like the +ideals of socialism don't automatically mean that every attempt to implement it +is a success, so not all open source and free software is good (natured) by +principle either. The fact that communism failed doesn't mean that capitalism is +better and a long term winner. The same applies to programs, whether successful +or not. + +Maybe we should be like the sheep. Dennett uses these animals as a clever +species. They found a way to survive by letting themselves (unconsciously) be +domesticated. The shepherd guarantees food, shelter and protection. He makes sure +they don't get ill. Speaking biologically: they definitely made sure that many +copies of their genes survived. Cows did the same and surprisingly many of them +are related due to the fact that they share the same father (something now trying +to be reverted). All \TEX\ spin|-|offs relate to the same parent, and those that +survived are those that were herded by user groups. We see bits and pieces of +\TEX\ end up in other applications. Hyphenation is one of them. Maybe we should +settle for that small victory in a future hall of fame. + +When I sit on my balcony and look at the fruit trees in my garden, some simple +math can be applied. Say that one of the apple trees has 100 apples per year and +say that this tree survives for 25 years (it's one of those small manipulated +trees). That makes 2.500 apples. Without human intervention only a few of these +apples make it into new trees, otherwise the whole world would be dominated by +apple trees. Of course that tree now only survives because we permit it to +survive, and for that it has to be humble (something that is very hard for modern +Apples). Anyway, the apple tree doesn't look too unhappy. + +A similar calculation can be done for birds that nest in the trees and under my +roof. Given that the number of birds stays the same, most of energy spent on +raising offspring is wasted. Nevertheless they seem to enjoy life. Maybe we +should be content if we get one enthusiastic new user when we demonstrate \TEX\ +to thousands of potential users. + +Maybe, coming back to the themes of the conference, we should not come up with +these kinds of themes. We seem to be quite happy here. Talking about the things +that we like, meeting people. We just have to make sure that we survive. Why not +stay low under the radar? That way nothing will see us as a danger. Let's be like +the ants and spiders, the invisible hive mind that carries our message, whatever +that is. + +When Dennett discusses language he mentions (coined) words that survive in +language. He also mentions that children pick up language no matter what. Their +minds are made for it. Other animals don't do that: they listen but don't start +talking back. Maybe \TEX\ is just made for certain minds. Some like it and pick +it up, while for others it's just noise. There's nothing wrong with that. +Predilection can be a user property. + +\stopsection + +\startsection[title={The unexpected}] + +In a discussion with Dawkins the well|-|spoken astrophysicist Neil deGrasse Tyson +brings up the following. We differ only a few percent in \DNA\ from a chimp but +quite a lot in brain power, so how would it be if an alien that differs a few +percent (or more) passes by earth. Just like we don't talk to ants or chimps or +whatever expecting an intelligent answer, whatever passes earth won't bother +wasting time on us. Our rambling about the quality of typesetting probably sounds +alien to many people who just want to read and who happily reflow a text on an +ebook device, not bothered by a lack of quality. + +\startplacefigure[location=top] + \startcombination[nx=4,ny=1,width=\textwidth,distance=0pt] + {\externalfigure[covers/live-as-we-do-not-know-it.jpg][height=5cm]} {astrobiology} + {\externalfigure[covers/life-on-the-edge.jpg] [height=5cm]} {quantumbiology} + {\externalfigure[covers/rare-earth.jpg] [height=5cm]} {astrophysics} + {\externalfigure[covers/austerity.jpg] [height=5cm]} {economics} + \stopcombination +\stopplacefigure + +We tend to take ourselves as reference. In \quotation {Rare Earth} Ward and +Brownlee extrapolate the possibility of life elsewhere in the universe. They are +not alone in thinking that while on one hand applying statistics to these +formulas of possible life on planets there might also be a chance that we're the +only intelligent species ever evolved. In a follow up, \quotation {Life as we do +not know it} paleontologist and astrobiologist Ward (one of my favourite authors) +discusses the possibility of life not based on carbon, which is not natural for a +carbon based species. Carl Sagan once pointed out that an alien species looking +down to earth can easily conclude that cars are the dominant species on earth and +that the thingies crawling in and out them are some kind of parasites. So, when +we look at the things that somehow end up on paper (as words, sentences, +ornaments, etc.), what is dominant there? And is what we consider dominant really +that dominant in the long run? You can look at a nice page as a whole and don't +see the details of the content. Maybe beauty hides nonsense. + +When \TEX ies look around they look to similar technologies. Commands in shells +and solutions done by scripting and programming. This make sense in the +perspective of survival. However, if you want to ponder alternatives, maybe not +for usage but just for fun, a completely different perspective might be needed. +You must be willing to accept that communicating with a user of a \WYSIWYG\ +program might be impossible. If mutual puzzlement is a fact, then they can either +be too smart and you can be too dumb or the reverse. Or both approaches can be +just too alien, based on different technologies and assumptions. Just try to +explain \TEX\ to a kid 40 years younger or to an 80 year old grandparent for that +matter. Today you can be very clever in one area and very stupid in another. + +In another debate, Neil deGrasse Tyson asks Dawkins the question why in science +fiction movies the aliens look so human and when they don't, why they look so +strange, for instance like cumbersome sluggish snails. The response to that is +one of puzzlement: the opponent has no reference of such movies. In discussions +old \TEX ies like to suggest that we should convert young users. They often don't +understand that kids live in a different universe. + +How often does that happen to us? In a world of many billions \TEX\ has its place +and can happily coexist with other typesetting technologies. Users of other +technologies can be unaware of us and even create wrong images. In fact, this +also happens in the community itself: (false) assumptions turned into +conclusions. Solutions that look alien, weird and wrong to users of the same +community. Maybe something that I present as hip and modern and high|-|\TEX\ and +promising might be the opposite: backward, old|-|fashioned and of no use to +others. Or maybe it is, but the audience is in a different mindset. Does it +matter? Let's just celebrate that diversity. (So maybe, instead of discussing the +conference theme, I should have talked about how I abuse \LUATEX\ in controlling +lights in my home as part of some IoT experiments.) + +\stopsection + +\startsection[title=What drives us] + +I'm no fan of economics and big money talk makes me suspicious. I cannot imagine +working in a large company where money is the drive. It also means that I have +not much imagination in that area. We get those calls at the office from far away +countries who are hired to convince us by phone of investments. Unfortunately +mentioning that you're not at all interested in investments or that multiplying +money is irrelevant to you does not silence the line. You have to actively kill +such calls. This is also why I probably don't understand today's publishing world +where money also dominates. Recently I ran into talks by Mark Blyth about the +crisis (what crisis?) and I wish I could argue like he does when it comes to +typesetting and workflows. He discusses quite well that most politicians have no +clue what the crisis is about. + +I think that the same applies to the management of publishers: many have no clue +what typesetting is about. So they just throw lots of money into the wrong +activities, just like the central banks seem to do. It doesn't matter if we \TEX +ies demonstrate cheap and efficient solutions. + +Of course there are exceptions. We're lucky to have some customers that do +understand the issues at hand. Those are also the customers where authors may use +the tools themselves. Educating publishers, and explaining that authors can do a +lot, might be a premise, predilection and prediction in one go! Forget about +those who don't get it: they will lose eventually, unfortunately not before they +have reaped and wasted the landscape. + +Google, Facebook, Amazon, Microsoft and others invest a lot in artificial +intelligence (or, having all that virtual cash, just buy other companies that +do). They already have such entities in place to analyze whatever you do. It is +predicted that at some point they know more about you then you know yourself. +Reading Luke Dormehl's \quotation {The Formula} is revealing. So what will that +do with our so|-|called (disputed by some) free will? Can we choose our own +tools? What if a potential user is told that all his or her friends use +WhateverOffice so they'd better do that too? Will subtle pressure lead them or +even us users away from \TEX ? We already see arguments among \TEX ies, like +\quotation {It doesn't look updated in 3 years, is it still good?} Why update +something that is still valid? Will the community be forced to update everything, +sort of fake updates. Who sets out the rules? Do I really need to update (or +re|-|run) manuals every five years? + +Occasionally I visit the Festo website. This is a (family owned) company that +does research at the level that used to be common in large companies decades ago. +If I had to choose a job, that would be the place to go to. Just google for +\quotation {festo bionic learning network} and you understand why. We lack this +kind of research in the field we talk about today: research not driven by +commerce, short term profit, long term control, but because it is fundamental +fun. + +Last year Alan Braslau and I spent some time on \BIBTEX. Apart from dealing with +all the weird aspects of the \APA\ standard, dealing with the inconsistently +constructed author fields is a real pain. There have been numerous talks about +that aspect here at Bacho\TEX\ by Jean|-|Michel Hufflen. We're trying to deal +with a more than 30|-|year|-|old flawed architecture. Just look back over a curve +that backtracks 30 years of exponential development in software and databases and +you realize that it's a real waste of time and a lost battle. It's fine to have a +text based database, and stable formats are great, but the lack of structure is +appalling and hard to explain to young programmers. Compare that to the Festo +projects and you realize that there can be more challenging projects. Of course, +dealing with the old data can be a challenge, a necessity and eventually even be +fun, but don't even think that it can be presented as something hip and modern. +We should be willing to admit flaws. No wonder that Jean|-|Michel decided to +switch to talking about music instead. Way more fun. + +Our brains are massively parallel bio|-|machinery. Groups of neurons cooperate +and compete for attention. Coming up with solutions that match what comes out of +our minds demands a different approach. Here we still think in traditional +programming solutions. Will new ideas about presenting information, the follow up +on books come from this community? Are we the innovative Festo or are we an old +dinosaur that just follows the fashion? + +\stopsection + +\startsection[title=User experience] + +Here is a nice one. Harari spends many pages explaining that research shows that +when an unpleasant experience has less unpleasantness at the end of the period +involved, the overall experience is valued according to the last experience. Now, +this is something we can apply to working with \TEX: often, the more you reach +the final state of typesetting the more it feels as all hurdles are in the +beginning: initial coding, setting up a layout, figuring things out, etc. + +It can only get worse if you have a few left|-|over typesetting disasters but +there adapting the text can help out. Of course seeing it in a cheap bad print +can make the whole experience bad again. It happens. There is a catch here: one +can find lots of bad|-|looking documents typeset by \TEX. Maybe there frustration +(or indifference) prevails. + +I sometimes get to see what kind of documents people make with \CONTEXT\ and it's +nice to see a good looking thesis with diverse topics: science, philosophy, +music, etc. Here \TEX\ is just instrumental, as what it is used for is way more +interesting (and often also more complex) than the tool used to get it on paper. +We have conferences but they're not about rocket science or particle +accelerators. Proceedings of such conferences can still scream \TEX, but it's the +content that matters. Here somehow \TEX\ still sells itself, being silently +present in rendering and presentations. It's like a rootkit: not really +appreciated and hard to get rid of. Does one discuss the future of rootkits other +than in the perspective of extinction? So, even as an invisible rootkit, hidden +in the workings of other programs, \TEX's future is not safe. Sometimes, when you +install a Linux system, you automatically get this large \TEX\ installation, +either because of dependencies or because it is seen as a similar toolkit as for +instance Open (or is it Libre) Office. If you don't need it, that user might as +well start seeing it as a (friendly) virus. + +\stopsection + +\startsection[title=Conclusion] + +At some point those who introduced computers in typesetting had no problem +throwing printing presses out of the window. So don't pity yourself if at some +point in the near future you figure out that professional typesetting is no +longer needed. Maybe once we let machines rule the world (even more) we will be +left alone and can make beautiful documents (or whatever) just for the joy, not +bothering if we use outdated tools. After all, we play modern music on old +instruments (and the older rock musicians get, the more they seem to like +acoustic). + +There are now computer generated compositions that experienced listeners cannot +distinguish from old school. We already had copies of paintings that could only +be determined forgeries by looking at chemical properties. Both of these +(artificial) arts can be admired and bring joy. So, the same applies to fully +automated typeset novels (or runtime rendered ebooks). How bad is that really? +You don't dig channels with your hand. You don't calculate logarithmic tables +manually any longer. + +However, one of the benefits of the Internet is watching and listening to great +minds. Another is seeing musicians perform, which is way more fun that watching a +computer (although googling for \quotation {animusic} brings nice visuals). +Recently I ran into a wooden musical computer made by \quotation {Wintergatan} +which reminded me of the \quotation {Paige Compositor} that we use in a \LUATEX\ +cartoon. Watching something like that nicely compensates for a day of rather +boring programming. Watching how the marble machine x (mmx) evolves is yet +another nice distraction. + +Now, the average age of the audience here is pretty high even if we consider that +we get older. When I see solutions of \CONTEXT\ users (or experts) posted by +(young) users on the mailing list or stack exchange I often have to smile because +my answer would have been worse. A programmable system invokes creative +solutions. My criterion is always that it has to look nice in code and has some +elegance. Many posted solutions fit. Do we really want more automation? It's more +fun to admire the art of solutions and I'm amazed how well users use the +possibilities (even ones that I already forgot). + +One of my favourite artists on my weekly \quotation {check youtube} list is Jacob +Collier. Right from when I ran into him I realized that a new era in music had +begun. Just google for his name and \quotation {music theory interview} and you +probably understand what I mean. When Dennett comments on the next generation +(say up to 25) he wonders how they will evolve as they grow up in a completely +different environment of connectivity. I can see that when I watch family +members. Already long ago Greg Bear wrote the novel \quotation {Darwin's +Children}. It sets you thinking and when looking around you even wonder if there +is a truth in it. + +There are folks here at Bacho\TEX\ who make music. Now imagine that this is a +conference about music and that the theme includes the word \quotation {future}. +Then, imagine watching that video. You see some young musicians, one of them +probably one of the musical masterminds of this century, others instrumental to +his success, for instance by wrapping up his work. While listening you realize +that this next generation knows perfectly well what previous generations did and +achieved and how they influenced the current. You see the future there. Just look +at how old musicians reflect on such videos. (There are lots of examples of youth +evolving into prominent musicians around and I love watching them). There is no +need to discuss the future, in fact, we might make a fool of ourselves doing so. +Now back to this conference. Do we really want to discuss the future? What we +think is the future? Our future? Why not just hope that in the flow of getting +words on a medium we play our humble role and hope we're not forgotten but +remembered as inspiration. + +One more word about predicting the future. When Arthur Clarke's \quotation {2001: +A Space Odyssey} was turned into a movie in 1968, a lot of effort went into +making sure that the not so far ahead future would look right. In 1996 scientists +were asked to reflect on these predictions in \quotation {Hal's Legacy}. It +turned out that most predictions were plain wrong. For instance computers got way +smaller (and even smaller in the next 20 years) while (self|-|aware) artificial +intelligence had not arrived either. So, let's be careful in what we predict (and +wish for). + +\stopsection + +\startsection[title=No more themes] + +We're having fun here, that's why we come to Bacho\TEX\ (predilection). That +should be our focus. Making sure that \TEX's future is not so much in the cutting +edge but in providing fun to its users (prediction). So we just have to make sure +it stays around (premise). That's how it started out. Just watch at Don Knuth's +3:16 poster: via \TEX\ and \METAFONT\ he got in contact with designers and I +wouldn't be surprised if that sub|-|project was among the most satisfying parts. +So, maybe instead of ambitious themes the only theme that matters is: show what +you did and how you did it. + +\stopsection + +\stopchapter + +\stopcomponent diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/musings/musings-contents.tex b/Master/texmf-dist/doc/context/sources/general/manuals/musings/musings-contents.tex new file mode 100644 index 00000000000..45b21ec60c9 --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/musings/musings-contents.tex @@ -0,0 +1,7 @@ +\startcomponent musings-contents + +\starttitle[title=Content] + \placelist[chapter][criterium=text,width=2em] +\stoptitle + +\stopcomponent diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/musings/musings-introduction.tex b/Master/texmf-dist/doc/context/sources/general/manuals/musings/musings-introduction.tex new file mode 100644 index 00000000000..d8dadd74344 --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/musings/musings-introduction.tex @@ -0,0 +1,31 @@ +% language=uk + +\startcomponent musings-introduction + +\environment musings-style + +\startchapter[title={Introduction}] + +This is a collection of articles and wrap|-|ups that don't suit in other manuals +or collections. Some are published, some meant as draft for a presentation. + +The \quotation {Children of \TEX} article is the framework for a presentation at +Bacho\TEX\ 2017 in Poland, and covers the main theme of the conference. In the +aftermath of that conference I wrote \quotation {Advertising \TEX} and later +\quotation {Why use \TEX?}. The 2018 Bacho\TEX\ conference theme is explored in +\quotation {What’s to stay, what’s to go}. After a short discussion on the +\CONTEXT\ mailing list about stability (at the moment that \MKII\ had been frozen +for more than a decade but is still used without problems) I wrote \quotation +{Stability}. + +Many of the thoughts in these articles are influenced by discussions with my +colleagues Ton Otten and Kees van Marle. Operating in a similar arena, they +provide me the reflection needed to sort out my thoughts on these matters. + +\startlines +Hans Hagen +Hasselt NL +2017\endash 1028 +\stoplines + +\stopchapter diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/musings/musings-perception.tex b/Master/texmf-dist/doc/context/sources/general/manuals/musings/musings-perception.tex new file mode 100644 index 00000000000..9936044737b --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/musings/musings-perception.tex @@ -0,0 +1,180 @@ +% language=uk + +\definefontfeature[ligatures][liga=yes,mode=node] + +\startcomponent musings-perception + +\environment musings-style + +\startchapter[title=Advertising \TEX] + +I can get upset when I hear \TEX ies boast about the virtues of \TEX\ compared to +for instance Microsoft Word. Not that I feel responsible for defending a program +that I never use(d) but attacking something for no good reason makes not much +sense to me. It is especially annoying when the attack is accompanied by a +presentation that looks pretty bad in design and typography. The best +advertisements for \TEX\ should of course come from outside the \TEX\ community, +by people impressed by its capabilities. How many \TEX ies can really claim that +Word is bad when they never tried to make something in it with a similar learning +curve as they had in \TEX\ or the same amount of energy spent in editing and +perfecting a word|-|processor|-|made document. + +In movies where computer technology plays a role one can encounter weird +assumptions about what computers and programs can do. Run into a server room, +pull one disk out of a \RAID-5 array and get all information from it. Connect +some magic device to a usb port of a phone and copy all data from it in seconds. +Run a high speed picture or fingerprint scan on a computer (probably on a remote +machine) and show all pictures flying by. Okay, it's not so far from other +unrealistic aspects in movies, like talking animals, so maybe it is just a +metaphor for complexity and speed. When zapping channels on my television I saw +\in{figure}[fig:tex-in-movie] and as the media box permits replay I could make a +picture. I have no clue what the movie was about or what movie it was so a +reference is lacking here. Anyway it's interesting that seeing a lot of \TEX\ +code flying by can impress someone: the viewer, even if no \TEX ie will ever see +that on the console unless in some error or tracing message and even then it's +hard to get that amount. So, the viewer will never realize that what is seen is +definitely not what a \TEX ie wants to see. + +\startplacefigure[title={\TEX\ in a movie},reference=fig:tex-in-movie] + \externalfigure[tex-in-movie.jpg][height=8cm] +\stopplacefigure + +So, as that kind of free advertisement doesn't promote \TEX\ well, what of an +occasional mentioning of \TEX\ in highly|-|regarded literature? When reading +\quotation {From bacteria to Bach and back, the evolution of minds} by Daniel +Dennett I ran into the following: + +\startquotation +In Microsoft Word, for instance, there are the typographical operations of +superscript and subscript, as illustrated by + +\startnarrower +base\high{power} +\stopnarrower + +and + +\startnarrower +human\low{female} +\stopnarrower + +But try to add another superscript to base\high{power}\emdash it {\em should} +work, but it doesn't! In mathematics, you can raise powers to powers to powers +forever, but you can't get Microsoft Word to display these (there are other +text|-|editing systems, such as TeX, that can). Now, are we sure that human +languages make use of true recursion, or might some or all of them be more like +Microsoft Word? Might our interpretation of grammars as recursive be rather an +elegant mathematical idealization of the actual \quotation {moving parts} of a +grammar? +\stopquotation + +Now, that book is a wonderfully interesting read and the author often refers to +other sources. When one reads some reference (with a quote) then one assumes that +what one reads is correct, and I have no reason to doubt Dennett in this. But +this remark about \TEX\ has some curious inaccuracies. \footnote {Of course one +can wonder in general that when one encounters such an inaccuracy, how valid +other examples and conclusions are. However, consistency in arguments and +confirmation by other sources can help to counter this.} + +First of all a textual raise or lower is normally not meant to be recursive. +Nesting would have interesting consequences for the interline space so one will +avoid it whenever possible. There are fonts that have superscript and subscript +glyphs and even \UNICODE\ has slots for a bunch of characters. I'm not sure what +Word does: take the special glyph or use a scaled down copy? + +Then there is the reference to \TEX\ where we can accept that the \quotation {E} +is not lowered but just kept as a regular \quotation {e}. Actually the mentioning +of nested scripts refers to typesetting math and that's what the superscripts and +subscripts are for in \TEX. In math mode however, one will normally raise or +lower symbols and numbers, not words: that happens in text mode. + +While Word will use the regular text font when scripting in text mode, a \TEX\ +user will either have to use a macro to make sure that the right size (and font) +is used, or one can revert to math mode. But how to explain that one has to enter +math and then explicitly choose the right font? Think of this: + +\startbuffer +efficient\high{efficient} or +efficient$^{\text{efficient}}$ or \par +{\bf efficient\high{efficient} or +efficient$^{\text{efficient}}$} +\stopbuffer + +\typebuffer + +Which gives (in Cambria) + +\getbuffer + +Now this, + +\startbuffer +efficient\high{efficient\high{efficient}} or +efficient$^{\text{efficient$^{\text{efficient}}$}}$ or \par +{\bf efficient\high{efficient\high{efficient}} or +efficient$^{\text{efficient$^{\text{efficient}}$}}$} +\stopbuffer + +\typebuffer + +will work okay but the math variant is probably quite frightening at a glance for +an average Word user (or beginner in \TEX) and I can understand why someone would +rather stick to click and point. + +\getbuffer + +Oh, and it's tempting to try the following: + +\startbuffer +efficient{\addff{f:superiors}efficient} +\stopbuffer + +\typebuffer + +but that only works with fonts that have such a feature, like Cambria: + +\blank {\switchtobodyfont[cambria]\getbuffer} \blank + +To come back to Dennett's remark: when typesetting math in Word, one just has to +switch to the math editing mode and one can have nested scripts! And, when using +\TEX\ one should not use math mode for text scripts. So in the end in both +systems one has to know what one is doing, and both systems are equally capable. + +The recursion example is needed in order to explain how (following recent ideas +from Chomsky) for modern humans some recursive mechanism is needed in our +wetware. Now, I won't go into details about that (as I can only mess up an +excellent explanation) but if you want to refer to \TEX\ in some way, then +expansion \footnote{Expanding macros actually works well with tail recursion.} of +(either combined or not) snippets of knowledge might be a more interesting model +than recursion, because much of what \TEX\ is capable of relates to expansion. +But I leave that to others to explore. \footnote {One quickly starts thinking of +how \cs {expandafter}, \type {noexpand}, \type {unexpanded}, \type {protected} +and other primitives can be applied to language, understanding and also +misunderstanding.} + +Now, comparing \TEX\ to Word is always kind of tricky: Word is a text editor with +typesetting capabilities and \TEX\ is a typesetting engine with programming +capabilities. Recursion is not really that relevant in this perspective. Endless +recursion in scripts makes little sense and even \TEX\ has its limits there: the +\TEX\ math engine only distinguishes three levels (text, script and scriptscript) +and sometimes I'd like to have a level more. Deeper nesting is just more of +scriptscript unless one explicitly enforces some style. So, it's recursive in the +sense that there can be many levels, but it also sort of freezes at level three. + +\startplacefigure[title={Nicer than \TEX},reference=fig:nicer-than-tex] + \externalfigure[mathematics.png][width=\textwidth] +\stopplacefigure + +I love \TEX\ and I like what you can do with it and it keeps surprising me. And +although mathematics is part of that, I seldom have to typeset math myself. So, I +can't help that \in {figure} [fig:nicer-than-tex] impresses me more. It even has +the so|-|familiar|-|to|-|\TEX ies dollar symbols in it: the poem \quotation +{Poetry versus Orchestra} written by Hollie McNish, music composed by Jules +Buckley and artwork by Martin Pyper (I have the \DVD\ but you can also find it on +\YOUTUBE). It reminds me of Don Knuth's talk at a \TUG\ meeting. In \TUGBOAT\ +31:2 (2010) you can read Don's announcement of his new typesetting engine i\TEX: +\quotation {Output can be automatically formatted for lasercutters, embroidery +machines, \THREED\ printers, milling machines, and other \CNC\ devices \unknown}. +Now that is something that Word can't do! + +\stopcomponent diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/musings/musings-roadmap.tex b/Master/texmf-dist/doc/context/sources/general/manuals/musings/musings-roadmap.tex new file mode 100644 index 00000000000..f8771ba426b --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/musings/musings-roadmap.tex @@ -0,0 +1,372 @@ +% language=uk + +% \showfontkerns + +\startcomponent musings-roadmap + +\environment musings-style + +\startchapter[title={\METATEX, a roadmap}] + +% \startlines \setupalign[flushright] +% Hans Hagen +% Hasselt NL +% September 2018 +% \stoplines + +\startsection[title={Introduction}] + +Here I will shortly wrap up the state of \LUATEX\ and \CONTEXT\ in fall 2018. I +made the first draft of this article as preparation for the \CONTEXT\ meeting +where we also discussed the future. I updated the text afterwards to match the +decisions made there. It's also a personal summary of thoughts and discussions +with team members about where to move next. + +\stopsection + +\startsection[title={The state of affairs}] + +After a dozen years the development of \LUATEX\ has reached a state where adding +more functionality and|/|or opening up more of the internals makes not much +sense. Apart from fixes and maybe some minor extensions, version 1.10 is what you +get. Users can do enough in \LUA\ and there is not much to gain in convenience +and performance. Of course some of the code can and will be cleaned up, as we +still see the effects of going from \PASCAL\ to \CWEB\ to \CCODE. In the process +consistency is on the radar so we might occasionally add a helper. But we also +don't want to move too far away from the original code, which is for instance why +we keep names, keys and other properties found in original \TEX, which in turn +leads to some inconsistencies with extensions added over time. We have to accept +that. + +Because \LUATEX\ development is closely related to \CONTEXT\ development, +especially \MKIV, we've also reached the moment that we can get rid of some older +code and assume the latest \LUATEX\ to be used. Because we do so much in \LUA\ +the question is always to what extent the benefits outweigh the drawbacks. Just +in case you wonder why we use \LUA\ extensively, the main reason is that it is +easier and more efficient to manage data in this language and modern typesetting +needs much data. It also permits us to extend regular \TEX\ functionality. But, +one should not overrate the impact: we still let \TEX\ do what \TEX\ is best at! + +Performance is quite important. It doesn't make sense to create a powerful +typesetting system where processing a page takes a second. We have discussed +performance before since one of the complaints about \LUATEX\ is that it is slow. +A simple, basic test is this: + +\starttyping +\starttext + \dorecurse{1000}{\input tufte \par} +\stoptext +\stoptyping + +This involves 1000 times loading a file (and reporting that on the console, which +can influence runtime), typesetting paragraphs, splitting of a page and of course +loading fonts and saving to the \PDF\ file. When I run this on a modest machine, +I get these (relative) timings for the (about) 225 pages: + +\starttabulate[|l|c|c|c|c|] +\BC \TEX\ engine used \BC \PDFTEX \BC \LUATEX \BC \LUAJITTEX \BC \XETEX \NC \NR +\BC runtime in seconds \NC 2.0 \NC 3.9 \NC 3.0 \NC 8.4 \NC \NR +\stoptabulate + +Now, as expected the 8 bit \PDFTEX\ is the winner here but \LUATEX\ is not doing +that bad. I don't know why \XETEX\ is so much slower, maybe because its 64 bit +binary is less optimal. I once noticed that a 64 bit \PDFTEX\ performed worse on +such a test than \LUATEX, for which I always use 64 bit binaries. + +If you consider that often much more is done than in this example, you can take +my word that \LUATEX\ quickly outpaces \PDFTEX\ on more complex tasks. In that +sense it is now our benchmark. It must be said that the \MKIV\ code is probably a +bit more efficient than the \MKII\ code but that doesn't matter much in this +simple test because hardly any macro magic happens here; it mostly tests basic +font processing, paragraph building and page construction. I don't think that I +can squeeze out more pages per second, at least not without users telling me +where they encounter bottlenecks that don't result from their style coding. It's +no problem to write inefficient macros (or styles) so normally a user should +first carefully check her|/|his own work. Using a more modern \CPU\ with proper +caching and an \SSD\ helps too. + +So, to summarize, we can say that with version 1.10 \LUATEX\ is sort of finished. +Our mission is now to make \LUATEX\ robust and stable. Things can be added and +improved, but these are small and mostly consistency related. + +\stopsection + +\startsection[title={More in \LUA}] + +Till now I always managed to add functionality to \CONTEXT\ without hampering +performance too much. Of course the biggest challenge is always in handling fonts +and common features like color because that all happens in \LUA. So, the question +is, what if we delegate more of the core functionality to \LUA ? I will discuss a +few options because the \CONTEXT\ developers and users need to agree on the path +to follow. One question there is, are the possible performance hits (which can be +an inconvenience) compensated by better and easier typesetting. + +Fonts, colors, special typesetting features like spaced kerning, protrusion, +expansion, but also dropped caps, line numbering, marginal notes, tables, +structure related things, floats and spacing are not open for much discussion. +All the things that happen in \LUA\ combined with macros is there and will stay. +But how about hyphenation, paragraph building and page building? And how about a +leaner and meaner, future safe engine? + +Hyphenation is handled in the \TEX\ core. But in \CONTEXT\ already for years one +can also use a \LUA\ based variant. There is room for extensions and improvements +there. Interesting is that performance is more or less the same, so this is an +area where we might switch to the \LUA\ method eventually. It compares to fonts, +where node mode is more or less the standard and base mode the old way. + +Building the paragraphs in \LUA\ is also available in \MKIV, although it needs an +update. Again performance is not that bad, so when we add features not possible +(or hard to do) in regular \TEX, it might actually pay of to default to the +par builder written in \LUA. + +The page builder is also doable in \LUA\ but so far I only played a bit with a +\LUA\ based variant. I might pick up that thread. However, when we would switch to +\LUA\ there, it might have a bit of a penalty, unless we combine it with some +other mechanisms which is not entirely trivial, as it would mean a diversion from +the way \TEX\ does it normally. + +How about math? We could at some point do math rendering in \LUA\ but because the +core mechanism is the standard, it doesn't really makes much sense. It would also +touch the soul of \TEX. But, I might give it a try, just for fun, so that I can +play with it a bit. It's typically something for cold and rainy days with some +music in the background. + +We already use \LUA\ in the frontend: locating and reading files in \TEX, +\XML, \LUA\ and whatever input format. Normalization and manipulation is all +active and available. The backend is also depending on \LUA, like support for +special \PDF\ features and exporting to \XML . The engine still handles the page +stream conversion, font inclusion and object management. + +The inclusion of images is also handled by the engine, although in \CONTEXT\ we +can delegate \PDF\ inclusion to \LUA. Interesting is that this has no performance +hit. + +With some juggling the page stream conversion can also be done in \LUA, and I +might move that code into the \CONTEXT\ distribution. Here we do have a +performance hit: about one second more runtime on the 14 seconds needed for the +300 page \LUATEX\ manual and just over more than half a second on a 11 second +\LUAJITTEX\ run. The manual has lots of tables, verbatim, indices and uses color +as well as a more than average number of fonts and much time is spent in \LUA. So +there is a price to pay there. I tried to speed that up but there is not much to +gain there. + +So, say that we default to \LUA\ based hyphenation, which enables some new +functionality, \LUA\ based par building, which permits some heuristics for corner +cases, and \LUA\ based page building, which might result in more control over +tricky cases. A total performance hit of some 5\% is probably acceptable, +especially because by that time I might have replaced my laptop and won't notice +the degrade. This still fits in the normal progress and doesn't really demand a +roadmap or wider acceptance. And of course we would still use the same strategies +as implemented in traditional \TEX\ as default anyway. + +\stopsection + +\startsection[title={A more drastic move}] + +More fundamental is the question whether we delegate more backend activity to +\LUA\ code. If we decide to handle the page stream in \LUA, then the next +question is, why not also delegate object management and font inclusion to +\LUA. Now, keep in mind that this is all very \CONTEXT\ specific! Already for +more than a decade we delegate a lot to \LUA, and also we have a rather tight +control over this core functionality. This would mean that \CONTEXT\ doesn't +really need the backend code in the engine. \footnote {For generic packages like +TikZ we (can) provide some primitive emulators, which is rather trivial to +implement.} + +That situation is actually not unique. For instance, already for a while we don't +need the \LUATEX\ font loader either, as loading the \OPENTYPE\ files is done in +\LUA. So, we could also get rid of the font loader code. Currently some code is +shared with the font inclusion in the backend but that can be isolated. + +You can see a \TEX\ engine as being made from several parts, but the core really +concerns only two processes: reading, storing and expanding macros on the one +hand, and converting a stream of characters into lines, paragraphs, pages etc. +Fonts are mostly an abstraction: they are visible in so called glyph nodes as +font identifier (a number) and character code (also a number) properties. The +result, nowadays being \PDF, is also an abstraction: at some point the engine +converts the to be shipped out box in \PDF\ instructions, and in our case, +relatively simple ones. The backend registers which characters and fonts are used +and also includes the right resources. But, the backend is not part of the core +as such! It has been introduced in \PDFTEX\ and is a so called extension. + +So, what does that all mean for a future version of \CONTEXT\ and \LUATEX ? It +means that we can decide to follow up with a \CONTEXT\ that does more in \LUA, +which means not hard coded in a binary, on the one hand, but that we can also +decide to strip the engine from non|-|core code. But, given that \LUATEX\ is also +used in other macro packages, this would mean a different engine. We cannot say +that \LUATEX\ is stable when we also experiment with core components. + +We've seen folks picking up experimental versions assuming that it is a precursor +to official code. So, in order to move on we need to avoid confusion: we need to +use another name. Choosing a name is always tricky but as Taco already registered +the \METATEX\ domain, and because in the \CONTEXT\ distribution you will find +references to \METATEX, we will use that name for the future engine. Adding \LUA\ +to that name makes sense but then the name would become too long. + +The main difference between \METATEX\ and \LUATEX\ would be that the former has +no file lookup library, no hardcoded font loader, and no backend generator (but +possibly some helpers, and these need time to evolve). We're basically back where +\TEX\ started but instead of coding these extensions in \PASCAL\ or \CCODE\ we +use \LUA. We're also kind of back to when we first started experimenting with +\LUATEX\ in \CONTEXT\ where test, write and rewrite were going in parallel. But, +as said, we cannot impose that on a wide audience. + +If we go for such a lean and mean follow up, then we can also do a more drastic +cleanup of obsolete code in \CONTEXT\ (dating from \ETEX, \PDFTEX, \ALEPH, etc.). +We then are sort of back to where it all started: we go back to the basics. This +might mean dropping some primitives (one can define them as dummy). Of course we +could generalize some of the \CONTEXT\ code to provide the kicked out +functionality but would that pay of? Probably not. + +Just for the record: replacing the handling of macros, registers, grouping, etc.\ +to \LUA\ is not really an option as the performance hit would make a large system +like \CONTEXT\ sort of unusable: it's no option and not even considered (although +I must admit that I have some experimental \LUA\ based \TEX\ parser code around). + +It is quite likely that building \METATEX\ from source for the moment will be an +option to the build script. But we can also decide to simplify that process, +which is possible because we only need one binary. But in general we can assume +that one can generate \METATEX\ and \LUATEX\ from the same source. A first step +probably is a further isolation of the backend code. The fontloader and file +handling code already can be made optional. + +Given that we only need one binary (it being \LUATEX\ or \METATEX) and nowadays +only use \OPENTYPE\ fonts, one can even start thinking of a mini distribution, +possibly with a zipped resource tree, something we experimented with in the early +days of \LUATEX. + +Another though I have been playing with is a better separation between low level +and high level \CONTEXT\ commands, and whether the low level layer should be more +generic in nature (so that one can run specific packages on top of it instead of +the whole of \CONTEXT) but that might not be worth the trouble. + +\stopsection + +\startsection[title={Interlude}] + +If we look at the future, it's good to also look at the past. Opening up \TEX\ +the way we did has many advantages but also potential drawbacks. It works quite +well in \CONTEXT\ because we ship an integrated package. I don't think that there +are many users who kick in their own callbacks. It is possible but completely up +to the user to make sure things work out well. Performance hits, interference, +crashes: those who interfere with the internals can sort that out themselves. I'm +not sure how well that works out in other macro packages but it is a time bomb if +users start doing that. Of course the documented interfaces to use \LUA\ in +\CONTEXT\ are supported. So far I think we're not yet bitten in the tail. We keep +this aspect out of the discussion. + +Another important aspect is stability of the engine. Sometimes we get suggestions +for changes or patches that works for a specific case but for sure will have side +effects on \CONTEXT. Just as we don't test \LATEX\ side effects, \LATEX\ users +don't check \CONTEXT. And we're not even talking of users who expect their code +to keep working. A tight control over the source is important but cannot be we +will not be around for ever. This means that at some point \LUATEX\ should not be +changed any more, even when we observe side effects we want to get rid of, +because these side effects can be in use. This is another argument for a stripped +down engine. The less there is to mess with, the less the mess. + +\stopsection + +\startsection[title={Audience}] + +So how about \CONTEXT\ itself? Of course we can make it better. We can add more +examples and more documentation. We can try to improve support. The main question +for us (as developers) is who actually is our audience. From the mails coming to +the \CONTEXT\ support list it looks like a rather diverse group of users. + +At \TEX\ meetings there are often discussions about promoting \TEX. I can agree +on the fact that even for simple documents it makes a lot of sense to use \TEX, +but who will take the first hurdles? How many people really produce a lot of +documents? And how many need \TEX\ after maybe a short period of (enforced) usage +at the university? + +It's not trivial to recognize the possibilities and power of the +\LUATEX|-|\CONTEXT\ combination. We never got any serious requests for support +from large organizations. In fact, we do use this combination in a few projects +for educational publishers, but there it's actually the authors and editors doing +the work. It's seldom company policy to use tools that efficiently automate +typesetting. I dare to say that publishers are not really an audience at all: +they normally delegate the task. They might accept \TEX\ documents but let them +rekey or adapt far|-|far|-|away and as cheap as possible. Thinking of it, the +main reason for Don Knuth for writing \TEX\ in the first place was the ability to +control the look and feel and quality. It were developments at typesetters and +publishers that triggered development of \TEX . It was user demand. And the +success of \TEX\ was largely due to the unique personality and competence of the +author. + +System integrators qualify as audience but I fear that \TEX\ is not considered +hip and modern. It doesn't seem to matter if you can demonstrate that it can do a +wonderful job efficiently and relatively cheap. Also the fact that an +installation can be very stable on the long run is of no importance. Maybe that +audience (market place) is all about \quotation {The more we have to program and +update regularly, the merrier.}. Marketing \TEX\ is difficult. + +Those who render multiple products, maintain manuals, have to render many +documents automatically qualify as audience. But often company policies, +preferred suppliers, so called standard tools etc.\ are used as argument against +\TEX. It's a missed opportunity. + +One needs a certain mindset to recognize the potential and the question is, how +do we reach that audience. Drawing a roadmap for that is not easy but worth +discussing. We're open for suggestions. + +% \footnote {It's kind of interesting that recently the \TEX\ User Group announced +% its presence on Facebook and Twitter. Apart from wondering how that gets updated, +% one can also wonder how many potential (or even current) users go there, given +% that these platforms are subjected to rise and fall. I'm on neither of them and +% don't plan to. Kids (our future users) that I know already said goodbye to them. +% We'll see how that works out.} + +\stopsection + +\startsection[title={Conclusion}] + +At the \CONTEXT\ user meeting those present agreed that moving forward this way +makes sense. This means that we will explore a lean and mean \METATEX\ alongside +\LUATEX. There is no rush and it's all volunteer work so we will take our time +for this. It boils down to some reshuffling of code so that we can remove the +built|-|in font loader, file handling, and probably also \SYNCTEX\ because we can +emulate that. Then the backend with its font inclusion code will be cleaned up a +bit (we even discussed only supporting modern wide fonts). It's no big deal to +adapt \CONTEXT\ to this (so it can and will support both \LUATEX\ and \METATEX). +Eventually the backend might go away but now we're talking years ahead. By then +we can also explore the option to make \METATEX\ start out as a \LUA\ function +call (the main control loop) and become reentrant. There will probably not be +many changes to the opened up \TEX\ kernel, but we might extend the \METAPOST\ +part a bit (some of that was discussed at the meeting) especially because it is a +nice tool to visualize big data. + +As with \LUATEX\ development we will go in small steps so that we keep a working +system. Of course \LUATEX\ is always there as stable fallback. The experiments +will mostly happen in the experimental branch and binaries will be generated +using the compile farm on the \CONTEXT\ garden, just as happens now. This also +limits testing and exploring to the \CONTEXT\ community so that there are no side +effects for mainstream \LUATEX\ usage. + +Nowadays, instead if roadmaps, we tend to use navigational gadgets that adapt +themselves to the situation. On the road by car this can mean a detour and when +walking around it can be going to suggested points of interest. During the +excursion at the meeting, we noticed that after the drivers (navigators) +synchronized their gadget with Jano, the routes that were followed differed a +bit. We saw cars in front of going a different direction and cars behind us +arriving from a different direction. So, even when we talk about roadmaps, our +route can be adapted to the situation. + +Now here is something to think about. If you look at the \TEX\ community you will +notice that it's an aging community. User groups seem to loose members, although +the \CONTEXT\ group is currently still growing. Fortunately we see a new +generation taking interest and the \CONTEXT\ users are a pleasant mix and it +makes me stay around. I see it as an \quote {old timers} responsibility to have +\TEX\ and its environment in a healthy state by the time I retire from it +(although I have no plans in that direction). In parallel to the upcoming +development I think we will also see a change in \TEX\ use and usage. This aspect +was also discussed at the meeting and for sure will get a follow up on the +mailing lists and future meetings. It might as well influence the decisions we +make the upcoming years. So far \TEX\ has never failed us in it's flexibility and +capacity to adapt, so let's end on that positive note. + +\stopsection + +\stopchapter + +\stopcomponent diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/musings/musings-stability.tex b/Master/texmf-dist/doc/context/sources/general/manuals/musings/musings-stability.tex new file mode 100644 index 00000000000..7dc35c6be54 --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/musings/musings-stability.tex @@ -0,0 +1,388 @@ +% language=uk + +\environment musings-style + +\startcomponent musings-stability + +\startchapter[title={Stability}] + +\startsubject[title=Introduction] + +How stable is \CONTEXT ? This question is hard to answer. For instance \MKII\ +hasn't changed for years and seems to work quite well: no changes equals +stability. Those who use it can do with what it offers. The potentially sensitive +dependencies on for instance fonts are probably absent because there is not much +development in the 8 bit fonts arena. As long as these are available we're okay, +in fact, \OPENTYPE\ fonts are more a moving target and therefore less stable. + +What do we mean by stable? The fundamental differences between an 8 bit engine +(and fonts) and an \UNICODE\ aware engine able to handle \OPENTYPE\ fonts is +substantial which is why we dropped some functionality and added some relevant +new. One can consider that a problem but in practice using fonts has become +easier so no one is hurt by it. Here we need to keep in mind that \PDFTEX\ is +really stable: it uses fonts and technology that doesn't change. On the other +hand \XETEX\ and \LUATEX\ follow new trends. Thereby \XETEX\ uses libraries, +which introduces a dependency and instability, while \LUATEX\ assumes solutions +in \LUA\ which means that users and macro writers can tweak and thereby also +introduce instability (but at least one can adapt that code). + +Due to the way the user interface is set up, it is unlikely that \CONTEXT\ will +change. But the fact that we now have \LUA\ available means that many commands +have been touched. Most behave compatible, some have more functionality, and of +course we have a \LUA\ interface. We include a lot of support code which also +lessens dependencies. + +The user input is normally \TEX\ but when you use \XML\ the move to \MKIV\ meant +that we dropped the \MKII\ way of dealing with it in favour of a completely new +mechanism. I get the impression that those using \XML\ don't regret that change. +Talking of stability the \MKIV\ \XML\ interface is typically a mechanism that is +stable and might change little. We can add new trickery but the old stays as it +is. + +If we look at the output, there is \DVI\ and \PDF. In \MKII\ the \DVI\ could +become \POSTSCRIPT. As there are different \DVI\ post|-|processors the backend +code was using a plug|-|in model. Contrary to other macro packages there was only +one so called format that could adapt itself to the required (engine specific) +output. A \CONTEXT\ run has always been managed by a wrapper so users were not +bothered much by what \TEX\ engine they used and|/|or what backend was triggered. +This changed with \MKIV\ where we use just \LUATEX, always produce \PDF\ and +optionally can export \XML. But again the run is managed by a wrapper, which +incidentally is written in \LUA\ and thereby avoids dependencies on for instance +\PERL, \RUBY\ or \PYTHON, which are moving targets, use libraries and additional +user code, and thereby are potentially instable too. + +The \PDF\ code that is produced is a mix of what the engine spits out and what +the macro package injects. The code is normally rather simple. This means that +it's no big deal to support the so called standards. It also means that we can +support advanced interactivity and other features but these also depends on the +viewers used. So, stability here is more fluent, for instance because the \PDF\ +standard evolves and|/|or we need to adapt to viewers. Special demands like +tagged \PDF\ have been supported right from the start but how that evolves +depends mostly on input from users who need it. Again, that is less important +(and crucial) for stability than the rendering capabilities. + +The fact that we use \LUA\ creates a dependency on that language but the reason +that we use it is {\em because} it is so stable. We follow the updates and so far +that worked out well. Now, say that we had a frozen version of \CONTEXT\ 2010 and +\LUATEX\ 1.09 that uses \LUA\ 5.3, would that work? First of all, in 2010 +\LUATEX\ itself was evolving so the answer is probably \quotation {no}, unless +one adds a few compatibility patches. I'm not going to try it. The change from +5.1 to 5.2 to 5.3 was not really a problem I think and the few issues could be +dealt with easily. If you want long term stability and use a lot of \LUA\ code +you can take it into account when coding. Avoiding external libraries is a good +start. + +Fonts are more than before moving targets. So, if you want stability there you +should save them with your document source. The processing of them has evolved +and has been improved over time. By now it's rather stable. More recent code can +catch more issues and fixes are relatively easy. But it's an area that you always +need to check when you update an old distribution. The same is true for language +related hyphenation patterns and script specific support. The community is no +longer leading in the math department either (\OPENTYPE\ math is a \MICROSOFT\ +invention). But, the good news is that the \TEX\ ecosystem is always fast to +adapt and can also often provide more functionality. + +Vertical spacing, in fact spacing in general is an aera that can always be +improved, so there is where you can expect changed. The same is true for side +floats or mechanisms where content is somehow attached to other moving content, +for instance marginal notes. + +But code dealing with fonts, color, scripts, structure, and specific features +that once written don't need more, will not change that much. As mentioned for +fonts, like any resource, we also depend on third parties. Colors can relate to +standards, but their main properties are unchanged. Support for specific scripts +can (and will) be improved due to user input and demands so there the users also +influence stability. Structure doesn't really influence the overall rendering, +but the way you set it up does, but that's user styling. Of course during the +transition from \MKII\ to \MKIV\ and the evolution of \LUATEX\ things could be +broken, but fixing something structural seldom relates to rendering. If for +instance we improve the interpretation of \BIBTEX\ input , which can be real +messy, that involves data processing, nor rendering. When we improve support for +the \APA\ standard, which is complex, it might involve rendering but then that's +asked for and expected. One cannot do better than the input permits. + +\stopsubject + +\startsubject[title=Publishers] + +When discussing stability and especially stability as requirement we need to look +at the way \CONTEXT\ is used. So let's look at a few scenarios. Say that a +publisher gets a camera ready book from an author in \PDF\ format. In that +case the author can do all tweaks needed. Now say that the publisher also wants +the source code in a format that makes reuse possible. + +But let's face reality. Will that publisher really reformat the document in \PDF\ +again? It's very unlikely. First of all the original \PDF\ can be kept, and +second, a reformat only makes sense after updating the content or going for a +completely different layout. It's basically a new book then. In that case literal +similarity of output is irrelevant. It is a cheap demand without much substance. + +When the source is used for a different purpose the tool used to make the \PDF\ +is irrelevant. In that case the coding of the source can matter. If it is in some +dialect of \TEX, fine, one has to convert it anyway (to suit the other usage). If +there is an \XML\ export available, fine too as it can be transformed, given that +the structure is rich enough, something that is unlikely to have been checked +when the original was archived. Then there could have been the demand for a +document in some other format and who can guarantee stability of the tools used +there? Just look at how \MICROSOFT\ Word evolved, or for that matter, its +competitors. On the average \TEX\ is more stable as one can snapshot a \TEX\ tree +and run binaries for years, if needed, in a virtual machine. + +So, I don't think that a publisher is of any relevance in the discussion about +stability. Even if we can clearly define what a publisher is, I doubt if +publishers themselves can be considered long term stable organizations. Not +today. I'm not sure if (especially the large) publishers really deserve a place +in the discussion about stability but I'm willing to discuss that when I run into +one. + +The main problem that an author can face when being confronted with the stability +issue this way is that the times are long gone that publishers have a clue about +what \TEX\ is, how it evolved and how it always had to and did adapt to changing +requirements. If you're lucky you will run into someone who does know all this. +They're normally a bit older and have seen the organization from any angles and +therefore are fun to work with. + +But even then, rendering issues are often not high on their agenda. Outsourcing +often has become the modus operandi which basically brings us to the second group +involved in this discussion: suppliers. + +\stopsubject + +\startsubject[title=Suppliers] + +I don't know many suppliers other than the ones we ran into over a few decades. +At least where I live the departments that are responsible for outsourcing +typesetting like to deal with only a few large suppliers, interestingly because +they assume that they are stable. However, in my experience hardly any of those +seem to have survived. (Of course one can wonder if long term commitment really +is that important in a world where companies change so fast.) This is somewhat +obscured by the fact that publishers themselves merge, reorganize, move people +around, etc. so who can check on the stability of suppliers. It is definitely a +fact that at least recently hardly any of them played a rol of any relevance in +the development of stable tools. In the past the membership of \TEX\ user groups +contained people working at publishers and suppliers but that has changed. + +Let's focus on the suppliers that somehow use \TEX\ and let's consider two kind +of suppliers: small ones, one were only a few people work, and large ones. The +small ones depend on stable \TEX\ distributions, like \TEX Live where they can +get the resources from: styles, fonts, patterns, binaries. If they get the +authors \TEX\ files they need to have that access. They have to rework that input +into what the customer demands and that likely involves tweaks. So, maybe they +have developed their own additional code. For that code, stability is their own +responsibility. Did they tweak core code of a macro package? Fine, but you might +have it coming when you update. You cannot expect the evolving free meal world to +stick to your commercial needs. A supplier can play safe and somehow involve the +developers of macro packages or consult them occasionally, but does that really +happen often? Interesting is that a few times that I was asked for input it was +also wrapped in obscurity, as if some holy grail of styling was involved, while +it's quite likely that the developer of a macro package can write such a style +(or extra code) easily and probably also better. There really is not that much +unique code around. + +Small suppliers can be on mailing lists where they can contribute, get feedback, +provide testing, etc. They are part of a process and as such have some influence +on stability. If they charge by the page, then a change in their tools can be +reflected in what they charge. Basically redoing a book (or so) after a decade is +doing a new job. And adapting to some new options in a package, as part of a +typesetting job is probably no big deal. Is commercial really more stable than +open source free software? Probably not, except from open source software +developers whose real objective is to eventually sell their stuff to some company +(and cash) and even accept it to be ditched. Small suppliers are more flexible. + +The large suppliers are a different group. They often guard their secrets and +stay in the dark. They probably seldom share (fundamental) code and information. +If they are present in a community it can be for marketing reasons. If at some +point a large supplier would demand stability, then my first response would be: +sure I can make you a stable setup and maybe even provide intermediate patches +but put your money where your mouth is. But that never happened and I've come to +the conclusion that we can safely ignore that group. The \TEX\ user groups create +distributions and have for instance funded font development and it are the common +users who paid for that, not the scale ones. To some extent this is actually good +because large (software related) organizations often have special agendas that +can contradict what we aim at in the long term. + +From the authors perspective there is a dilemma here. When you submit to a +publisher who outsources, it can be a demand to deliver in a specific \TEX\ +format. Often a \PDF\ comes with the source then, so that the intended rendering +is known. Then that source goes to a supplier who then (quite likely) redoes a +lot of the coding in some stable subset, maybe even in a very old version of the +macro package. If I were such an author I'd render the document in \quote {as +stupid as possible mode} because you gain nothing by spending time on the looks. +So, stability within the package that you use is easy and translation from one to +another probably also. It's best to check beforehand what will happen with your +source and let stability, if mentioned, be their problem. After all they get paid +for it. + +Suppliers seldom know \CONTEXT. An interesting question is if they really know +the alternatives well, apart from the bit they use. A well structured \CONTEXT\ +source (or probably any source) is often easy to convert to another format. You +can assume that a supplier has tools for that (although we're often surprised +about the poor quality of tools used). Often the strict demand for some kind of +format is an excuse for lack of knowledge. Unfortunately you need a large author +base to change that attitude. + +\stopsubject + +\startsubject[title=Authors] + +Before we move to some variants of the above, first I will look at stability from +the authors perspective. When a book is being written the typesetting more or +less happens as part of the process. The way it looks can influence the way you +write and vise versa. Once the book is done it can go in print and, unless you +were using beta versions of \CONTEXT\ and updated frequently. Normally you will +try to work in a stable setup. Of course when a user asks for additional features +while working on a project, he or she should also accept other beta features +and side effects. + +After a few years an author might decide to update the book. The worst that can +happen is that the code doesn't run with the latest \CONTEXT. This is not so +likely because commands are upward compatible. However, the text might come out a +bit different, for instance because different fonts or patterns are used. But on +the average paragraphs will come out the same in \TEX. You can encounter +differences in the vertical spacing and page breaks, because that is where +improvements are still possible. If you use conceptually and implementation wise +complex mechanism like side floats, you can also run into compatibility issues. +But all these don't really matter much because the text will be updated anyway +and fine|-|tuning of page breaks (if at all) happens at the end. The more you try +to compete with desk top publishing, and the more tweaks you apply, the greater +is the risk that you introduce instability. It is okay for a one|-|time job, but +when you come back to it after a decade, be prepared for surprises. + +Even if you stick to the original coding, it makes sense to sacrifice some of that +stability if new mechanisms have become available. For instance, if you use +\METAPOST, better ways to solve your problem might have become available. Or if +you document is 15 years old, a move from \MKII\ to \MKIV\ is a valid option, +in which case you might also consider using the latest fonts. + +Of course, when you made a style where you patched core code, you can expect +problems, because anything not explicitly mentioned in the interface definition +files is subjected to change. But you probably see that coming anyway. + +So, is an author (or stand alone user) really dependent on stability? Probably +less than thought. In fact, the operating system, internet and browsers, +additional tools: all change over time and one adapts. It's something one can +live with. Just see how people adapt to phones, tablets, social media, electric +cars, etc. As long as the document processes and reasonable output is generated +it's fine. And that is always what we aim at! After all we need to be able to use +it ourselves, don't we? + +\stopsubject + +\startsubject[title=Projects] + +Although it is often overlooked as valid alternative in rendering in large scale +projects, \CONTEXT\ is perfect as component in a larger whole. Something goes +in, something comes out. In a long term project one can just install a minimal +distribution, write styles, and run it for ages. Use a virtual machine and +we're talking decades without any change. And, when one updates, it's easy to check +if all still works. Often the demands and styles are simple and predictable. It's +way more likely that a hard coded solution in some large programming environment has +stability issues than that the \CONTEXT\ bit has. + +If \CONTEXT\ is used in for instance documentation of (say) software, again there +is no real issue. Such documents are simple, evolve and therefore have no stable +page flow, and updating \CONTEXT\ is not needed if the once decided upon coding +is stable. You don't need the latest features. We've written styles and setups for +such tasks and indeed they run for ages. + +It can make me smile to see how much effort sometimes goes in low quality +rendering where \CONTEXT\ could do a way better job with far less investment in +time and money but where using some presumed stable toolkit is used instead, one +that comes with expensive licensing, from companies that come and go but shine in +marketing. (A valid question is to what extent the quality of and care for +documentation reflects the core products that a company produces, at least under +the hood.) + +The biggest hurdle in setting up a decent efficient workflow is that it has to be +seen as a project: proper analysis, proper planning, prototyping and testing, +etc. You invest first and gain later. When dealing with paper many publishers +still think in price per page and have problems seeing that a stable mostly +automated flow in the end can result in a ridiculous low price per page, +especially in typesetting on demand. + +\stopsubsubject + +\startsubject[title=Hybrids] + +Last I will mention a setup that we sometimes are involved in. An author writes +books and uses \TEX. The publisher is okay with that and adds some quality +assurance but in the end the product comes from the author. Maybe images are +oursourced (not always for the better) but these can be handled easily. It can be that +a copy|-|editor is involved and that person also then has to use \TEX\ of course, +or feedback to the author. + +Publishers, and this really depends on knowledgeable persons, which as said can +be fun to work with, can look beyond paper and also decide for additional +materials, for instance web pages, interactive exercises, etc. In that case +either \CONTEXT\ input has to be available as \XML\ (an export) or (often better) +\XML\ is the starting point for multiple output. Contrary to what is believed, +there are authors out there who have no problem coding in \XML\ directly. They +think in structured content and reuse! The fact that they can hit a button in the +editor and see the result in \PDF\ helps a lot. It just works. + +Here stability is either achieved by simply not updating during a project. There +are however cases where an update is needed, for instance because demands +changed. An example is a project where \ASCIIMATH\ is used which is a moving +target. Of course one can update just that module, and often that works, but not +when a module uses some new improved core helpers. Another example is additional +proofing options. + +The budget of such projects seldom permit patching an existing distribution, so +we then just update to the latest but not after checking if the used style works +okay. There is no author involvement in this. Depending on the workflow, it can +even be that the final rendering which involves fine tuning (side) float placement +or page breaks (often educational documents have special demands) is done by us +using special directives. + +Such hybrid workflows are quite convenient for all parties. The publisher works +with the author who likes using these tools, the author can do her or his thing +in the preferred way, and we do what we're best in: supporting this. And it +scales up pretty well too if needed, without much costs for the publishers. + +\stopsubject + +\startsubject[title=Conclusion] + +So what can we conclude with respect to the demand for stability? First of course +that it's important that our files keep running well. So, functionality should be +stable. Freezing a distribution will make sure that during project you don't run +into issues. Many \CONTEXT\ users update frequently in order to benefit from the +latest additions. Most will not be harmed by this, but when something really +breaks it's users like those on the \CONTEXT\ support list (who often also +contribute in helping out other users) that are listened to first. Publishers +demands play no role in this, if only because they also play no role in +typesetting, and if they want to they should also contribute. The same is true +for large suppliers. We're talking of free software often written without any +compensation so these parties have no say in the matter unless they pay for it. +It's small suppliers, authors and general users that matter most. If \CONTEXT\ is +part of a workflow that we support, of course stability is guaranteed quite well, +and those paying for that never have an issue with better solutions popping up. +In fact, \CONTEXT\ is often just a tool then, one that does the job and questions +about stability don't matter much in practice, as long as it does the job well. + +The main engine we use, \LUATEX, will be quite stable from version 1.10 and we'll +try to make sure that newer versions are capable of running an older \CONTEXT, +which is easier when no fundamental changes happen in the engine. Maybe a stripped +down version of \LUATEX\ for \CONTEXT\ can facilitate that objective even more. + +Users themselves can try to stick to standard \CONTEXT\ features. The more tricks +you apply, the less stable your future might be. Most mechanism are not evolving +but some, like those that deal with columns, might become better over time. But +typesetting in columns is often a one|-|shot adventure anyway (and who needs +columns in the future). + +Of one thing users can be sure. There will never be a \CONTEXT\ professional or +\CONTEXT\ enterprise. There is only one variant. All users get the same +functionality and policies don't change suddenly. There will be no lock in to some +cloud or web based service either. Of course one can hire us for support of any +kind but that's independent of the distributed package. There is support by users +for users on mailing lists and other media. That itself can also guard stability. + +But, always keep in mind that stability and progress, either of not driven by the +environment that we operate in, can be in conflict. + +\stopsubject + +\stopchapter + +\stopcomponent diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/musings/musings-staygo.tex b/Master/texmf-dist/doc/context/sources/general/manuals/musings/musings-staygo.tex new file mode 100644 index 00000000000..4be647e47b2 --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/musings/musings-staygo.tex @@ -0,0 +1,461 @@ +% language=uk + +% Written with on repeat: +% +% Rai Thistlethwayte: Betty Page (the keyscape version) + +% \usemodule[art-01,abr-04] +% +% \setupbodyfont[12pt] +% +% \startdocument +% [title={What’s to stay, what’s to go}, +% subtitle={The 2018 Bacho\TeX\ theme}, +% author={Hans Hagen}] + +\definedescription + [theme] + [before=\startnarrower, + after=\stopnarrower, + title=yes, + alternative=serried, + width=fit, + distance=.5\emwidth, + text={\documentvariable{title}:}] + +% \starttitle[title=\documentvariable{title}\\\txx\documentvariable{subtitle}] + +\startcomponent musings-staygo + +\environment musings-style + +\startchapter[title={What’s to stay, what’s to go}] + +\startsection[title=Introduction] + +The following text was written as preparation for a 2018 talk at Bacho\TEX, which +has this theme. It's mostly a collection of thoughts. It was also more meant as a +wrapup for the presentation (possibly with some discussions) than an article. + +\stopsection + +\startsection[title=Attraction] + +There are those movies where some whiz-kid sits down behind a computer, keys in a +few commands, and miracles happen. Ten fingers are used to generate programs that +work immediately. It's no problem to bypass firewalls. There is no lag over +network connections. Checking massive databases is no big deal and there's even +processing power left for real time visualization or long logs to the terminal. + +How boring and old fashioned must a regular edit||run||preview cycle look +compared to this. If we take this 2018 movie reality as reference, in a time when +one can suck a phone empty with a simple connection, pull a hard drive from a +raid five array and still get all data immediately available, when we can follow +realtime whoever we want using cameras spread over the country, it's pretty clear +that this relatively slow page production engine \TEX\ has no chance to survive, +unless we want to impress computer illiterate friends with a log flying by on the +console (which in fact is used in movies to impress as well). + +On YouTube you can find these (a few hours) sessions where Jacob Collier +harmonizes live in one of these Digital Audio Workstation programs. A while later +on another channel June Lee will transcribe these masterpieces into complex +sheets of music by ear. Or you can watch the weekly Wintergatan episodes on +building the Marble Machine from wood using drilling, milling, drawing programs +etc. There are impressive videos of multi|-|dimensional led arrays made by hand +and controlled by small computers and robots that solve Rubic Cubes. You can be +impressed by these Animusic videos, musicians show their craftmanship and +interesting informative movies are all over the place. I simply cannot imagine +millions of kids watching a \TEX\ style being written in a few hours. It's a real +challenge for an attention span. I hope to be proven wrong but I fear that for +the upcoming generation it's probably already too late because the \quote {whow} +factor of \TEX\ is low at first encounter. Although: picking up one of Don Knuths +books can have that effect: a nice mixture of code, typesetting and subtle +graphics, combined with great care, only possible with a system like \TEX. + +\starttheme + Biology teaches us that \quote {cool} is not a recipe for \quote {survival}. + Not all designs by nature look cool, and it's only efficiency and + functionality that matters. Beauty sometimes matters too but many functional + mechanisms can do without. So far \TEX\ and its friends were quite capable to + survive so there must be something in it that prevents it to be discarded. + But survival is hard to explain. So far \TEX\ just stayed around but lack of + visual attraction is a missing competitive trait. +\stoptheme + +\stopsection + +\startsection[title=Satisfaction] + +Biology also teaches us that chemistry can overload reason. When we go for +short|-|term pleasure instead of long|-|term satisfaction (Google for Simon Sinek +on this topic), addiction kicks in (for instance driven by crossing the dopamine +thresholds too often, Google for Robert Sapolsky). Cool might relate more to +pleasure while satisfaction relates to an effort. Using \TEX\ is not that cool +and often takes an effort. But the results can be very satisfying. Where \quote +{cool} is rewarding in the short term, \quote {satisfaction} is more a long term +effect. So, you probably get the best (experience) out of \TEX\ by using it a +lifetime. That's why we see so many old \TEX ies here: many like the rewards. + +If we want to draw new users we run into the problem that humans are not that +good in long term visions. This means that we cannot rely on showing cool (and +easy) features but must make sure that the long term reward is clear. We can try +to be \quote {cool} to draw in new users, but it will not be the reason they +stay. Instant success is important for kids who have to make a report for school, +and a few days \quotation {getting acquainted with a program} doesn't fit in. +It's hard to make kids addicted to \TEX\ (which could be a dubious objective). + +\starttheme + As long as the narrative of satisfaction can be told we will see new users. + Meetings like Bacho\TEX\ is where the narrative gets told. What will happen + when we no longer meet? +\stoptheme + +\stopsection + +\startsection[title=Survival] + +Survival relates to improvements, stability and discarding of weak aspects. +Unfortunately that does not work out well in practice. Fully automated +multi||columns typesetting with all other elements done well too (we just mention +images) is hard and close to impossible for arbitrary cases, so nature would have +gotten rid of it. Ligatures can be a pain especially when the language is not +tagged and some kind of intelligence is needed to selectively disable them. They +are the tail of the peacock: not that handy but meant to be impressive. Somehow +it stayed around in automated typesetting, in biology it would be called a freak +of nature: probably a goodbye in wildlife. And how about page breaks on an +electronic device: getting rid of them would make the floating figures go away +and remove boundary conditions often imposed. It would also make widows and clubs +less of a problem. One can even wonder if with page breaks the windows and clubs +are the biggest problems, and if one can simply live with them. After all, we can +live with our own bodily limitations too. After all, (depending on what country +you live in) you can also live with bad roads, bad weather, polution, taxes, lack +of healthcare for many, too much sugar in food, and more. + + +\starttheme + Animals or plants that can adapt to live on a specific island might not + survive elsewhere. Animals or plants introduced in an isolated environment + might quickly dominate and wipe out the locals. What are the equivalents in + our \TEX\ ecosystem? +\stoptheme + +\stopsection + +\startsection[title=Niches] + +But arguments will not help us determine if \TEX\ is the fittest for survival. +It's not a rational thing. Humans are bad in applying statistics in their live, +and looking far ahead is not a treat needed to survive. Often nature acts in +retrospect. (Climbing mount probability by Richard Dawkins). So, it doesn't +matter if we save time in the future if it complicates the current job. If +governments and companies cannot look ahead and act accordingly, how can we +extrapolate software (usage) or more specifically typesetting demands. Just look +at the political developments in the country that hosts this conference. Could we +have predicted the diminishing popularity of the \EU\ (and disturbing retrograde +political mess in some countries) of 2018 when we celebrated the moment Poland +joining the \EU\ at a Bacho\TEX\ campfire? + +Extrapolating the future quality of versions of \TEX\ or macro packages also doesn't +matter much. With machine learning and artificial intelligence around the corner and +with unavoidable new interfaces that hook into our brains, who knows what systems +we need in the future. A generic flexible typesetting system is probably not the +most important tool then. When we discuss quality and design it gets personal so +a learning system that renders neutrally coded content into a form that suits +an individual, demands a different kind of tool than we have now. + +On the short term (our live span) it makes more sense to look around and see how +other software (ecosystems) fare. Maybe we can predict \TEX's future from that. +Maybe we can learn from others mistakes. In the meantime we should not flatter +ourselves with the idea that a near perfect typesetting system will draw attention +and be used by a large audience. Factors external to the community play a too +important role in this. + +\starttheme + It all depends on how well it fits into a niche. Sometimes survival is only + possible by staying low on the radar. But just as we destroy nature and kill + animals competing for space, programs get driven out of the software world. + On a positive note: in a project that provides open (free) math for schools + students expressed to favour a printed book over \WEB|-|only (one curious + argument for \WEB\ was that it permits easier listening to music at the same + time). +\stoptheme + +\stopsection + +\startsection[title=Dominance] + +Last year I installed a bit clever (evohome) heating control system. It's +probably the only \quotation {working out of the box} system that supports 12 +zones but at the same time it has a rather closed interface as any other. One can +tweak a bit via a web interface but that one works by a proxy outside so there is +a lock in. Such a system is a gamble because it's closed and we're talking of a +20 year investment. I was able to add a layer of control (abusing \LUATEX\ as +\LUA\ engine and \CONTEXT\ as library) so let's see. When I updated the boiler I +also reconfigured some components (like valves) and was surprised how limited +upgrading was supported. One ends up with lost settings and weird interference +and it's because I know a bit of programming that I kept going and managed to add +more control. Of course, after a few weeks I had to check a few things in the +manuals, like how to enter the right menu. + +So, as the original manuals are stored somewhere, one picks up the smart phone +and looks for the manual on the web. I have no problem with proper \PDF\ as a +manual but why not provide a simple standard format document alongside the fancy +folded A3 one. Is it because it's hard to produce different instances from one +source? Is it because it takes effort? We're talking of a product that doesn't +change for years. + +\starttheme + The availability of flexible tools for producing manuals doesn't mean that + they are used as such. They don't support the survival of tools. Bad examples + are a threat. Dominant species win. +\stoptheme + +\stopsection + +\startsection[title=Extinction] + +When I was writing this I happened to visit a bookshop where I always check the +SciFi section for new publications. I picked out a pocket and wondered if I had +the wrong glasses on. The text was wobbling and looked kind of weird. On close +inspection indeed the characters were kind of randomly dancing on the baseline +and looked like some 150 \DPI\ (at most) scan. (By the way, I checked this the +next time I was there by showing the book to a nephew.) I get the idea that quite +some books get published first in the (more expensive) larger formats, so +normally I wait till a pocket size shows up (which can take a year) so maybe here +I had to do with a scan of a larger print scaled down. + +What does that tell us? First of all that the publisher doesn't care about the +reader: this book is just unreadable. Second, it demonstrates that the printer +didn't ask for the original \PDF\ file and then scaled down the outline copy. It +really doesn't matter in this case if you use some high quality typesetting +program then. It's also a waste of time to talk to such publishers about quality +typesetting. The printer probably didn't bother to ask for a \PDF\ file that +could be scaled down. + +\starttheme + In the end most of the publishing industry will die and this is just one of + the symptoms. Typesetting as we know it might fade away. +\stoptheme + +\stopsection + +\startsection[title=Desinterest] + +The newspaper that I read has a good reputation for design. But why do they need +to drastically change the layout and font setup every few years? Maybe like an +animal marking his or her territory a new department head also has to put a mark +on the layout. Who knows. For me the paper became pretty hard to read: a too +light font that suits none of the several glasses that I have. So yes, I spend +less time reading the paper. In a recent commentary about the 75 year history of +the paper there was a remark about the introduction of a modern look a few +decades ago by using a sans serif font. I'm not sure why sans is considered +modern (most handwriting is sans) and to me some of these sans fonts look pretty +old fashioned compared to a modern elegant serif (or mix). + +\starttheme + If marketing and fashion of the day dominate then a wrong decision can result + in dying pretty fast. +\stoptheme + +\stopsection + +\startsection[title=Persistence] + +Around the turn of the century I had to replace my \CD\ player and realized that it +made more sense to invest in ripping the \CD's to \FLAC\ files and use a decent +\DAC\ to render the sound. This is a generic approach similar to processing +documents with \TEX\ and it looks as future proof as well. So, I installed a +virtual machine running SlimServer and bought a few SlimDevices, although by that +time they were already called SqueezeBoxes. + +What started as an independent supplier of hardware and an open source program +had gone the (nowadays rather predictable) route of a buy out by a larger company +(Logitech). That company later ditched the system, even if it had a decent share +of users. This \quotation {start something interesting and rely on dedicated +users}, then \quotation {sell yourself (to the highest bidder)} and a bit later +\quotation {accept that the product gets abandoned} is where open source can fail +in many aspects: loyal users are ignored and offended with the original author +basically not caring about it. The only good thing is that because the software +is open source there can be a follow up, but of course that requires that there +are users able to program. + +I have 5 small boxes and a larger transporter so my setup is for now safe from +extinction. And I can run the server on any (old) \LINUX\ or \MSWINDOWS\ +distribution. For the record, when I recently connected the 20 year old Cambridge +CD2 I was surprised how well it sounded on my current headphones. The only +drawback was that it needs 10 minutes for the transport to warm up and get +working. + +In a similar fashion I can still use \TEX, even when we originally started using +it with the only viable quality \DVI\ to \POSTSCRIPT\ backend at that time +(\DVIPSONE). But I'm not so sure what I'd done if I had not been involved in the +development of \PDFTEX\ and later \LUATEX . As an average user I might just have +dropped out. As with the \CD\ player, maybe someone will dust off an old \TEX\ +some day and maybe the only hurdle is to get it running on a virtual retro +machine. Although \unknown\ recently I ran into an issue with a virtual machine +that didn't provide a console after a \KVM\ host update, so I'm also getting +pessimistic about that escape for older programs. (Not seldom when a library +update is forced into the \LUATEX\ repository we face some issue and it's not +something the average user want (or is able to) cope with.) + +\starttheme + Sometimes it's hard to go extinct, even when commerce interfered at some + point. But it does happen that users successfully take (back) control. +\stoptheme + +\stopsection + +\startsection[title=Freedom] + +If you buy a book originating in academia written and typeset by the author, +there is a chance that it is produced by some flavour of \TEX\ and looks quite +okay. This is because the author could iterate to the product she or he likes. +Unfortunately the web is also a source of bad looking documents produced by \TEX. +Even worse is that many authors don't even bother to set up a document layout +properly, think about structure and choose a font setup that matches well. One +can argue that only content matters. Fine, but than also one shouldn't claim +quality simply because \TEX\ has been used. + +I've seen examples of material meant for bachelor students that made me pretend +that I am not familiar with \TEX\ and cannot be held responsible. Letter based +layouts on A4 paper, or worse, meant for display (or e|-|book devices) without +bothering to remove the excessive margins. Then these students are forced to use +some collaborative \TEX\ environment, which makes them dependent on the quality +standards of fellow students. No wonder that one then sees dozens of packages +being loaded, abundant copy and paste and replace of already entered formulas and +interesting mixtures of inline and display math, skips, kerns and whatever can +help to make the result look horrible. + +\starttheme + Don't expect enthusiast new users when you impose \TEX\ but take away freedom + and force folks to cooperate with those with lesser standards. It will not + help quality \TEX\ to stay around. You cannot enforce survival, it just + happens or not, probably better with no competition or with a competition so + powerful that it doesn't bother with the niches. In fact, keeping a low + profile might be best! The number of users is no indication of quality, + although one can abuse that statistic selectively? +\stoptheme + +\stopsection + +\startsection[title=Diversity] + +Diversity in nature is enormous. There are or course niches, but in general there +are multiple variants of the same. When humans started breeding stock or +companion animals diversity also was a property. No one is forcing the same dog +upon everyone or the same cow. However, when industrialization kicks in things +become worse. Many cows in our country share the same dad. And when we look at +for instance corn, tomatoes or whatever dominance is not dictated by what nature +figures out best, but by what commercially makes most sense, even if that means +that something can't reproduce by itself any longer. + +In a similar way the diversity of methods and devices to communicate (on paper) +at some point turns into commercial uniformity. The diversity is simply very +small, also in typesetting. And even worse, a user even has to defend +her|/|himself for a choice of system (even in the \TEX\ community). It's just +against nature. + +\starttheme + Normally something stays around till it no longer can survive. However, we + humans have a tendency to destroy and commerce is helping a hand here. In + that respect it's a surprise that \TEX\ is still around. On the other hand, + humans also have a tendency to keep things artificially alive and even + revive. Can we revive \TEX\ in a few hundred years given the complex code + base and Make infrastructure? +\stoptheme + +\stopsection + +\startsection[title=Publishing] + +What will happen with publishing? In the production notes of some of my recently +bought books the author mentions that the first prints were self|-|published +(either or not sponsored). This means that when a publisher \quotation {takes +over} (which still happens when one scales up) not much work has to be done. +Basically the only thing an author needs is a distribution network. My personal +experience with for instance \CD's produced by a group of musicians is that it is +often hard to get it from abroad (if at all) simply because one needs a payment +channel and mail costs are also relatively high. + +But both demonstrate that given good facilitating options it is unlikely that +publishers as we have now have not much change of survival. Add to the argument +that while in Gutenbergs time a publisher also was involved in the technology, +today nothing innovative comes from publishers: the internet, ebook devices, +programs, etc.\ all come from elsewhere. And I get the impression that even in +picking up on technology publishers lag behind and mostly just react. Even +arguments like added value in terms of peer review are disappearing with the +internet where peer groups can take over that task. Huge amounts of money are +wasted on short|-|term modern media. (I bet similar amounts were never spend on +typesetting.) + +\starttheme + Publishers, publishing, publications and their public: as they are now they + might not stay around. Lack of long term vision and ideas and decoupling of + technology can make sure of that. Publishing will stay but anyone can + publish; we only need the infrastructure. Creativity can win over greed and + exploitation, small can win over big. And tools like \TEX\ can thrive in + there, as it already does on a small scale. +\stoptheme + +\stopsection + +\startsection[title=Understanding] + +\quotation {Why do you use \TEX?} If we limit this question to typesetting, you +can think of \quotation {Why don't you use \MSWORD ?} \quotation {Why don't you use +Indesign?}, \quotation {Why don't you use that macro package?}, \quotation {Why +don't you use this \TEX\ engine?} and alike. I'm sure that most of the readers +had to answer questions like this, questions that sort of assume that you're not +happy with what you use now, or maybe even suggest that you must be stupid not to +use \unknown + +It's not that easy to explain why I use \TEX\ and|/|or why \TEX\ is good a the +job. If you are in a one|-|to|-|one (or few) sessions you can demonstrate its +virtues but \quote {selling} it to for instance a publisher is close to +impossible because this kind of technology is rather unknown and far from the +click|-|and|-|point paradigm. It's even harder when students get accustomed to +these interactive books from wherein they can even run code snippets although one +can wonder how individual these are when a student has the web as a source of +solutions. Only after a long exposure to similar and maybe imperfect alternatives +books will get appreciated. + +For instance speaking of \quotation {automated typesetting} assumes that one +knows what typesetting is and also is aware that automated has some benefits. A +simple \quotation {it's an \XML\ to \PDF\ converter} might work better but that +assumes \XML\ being used which for instance not always makes sense. And while +hyphenation, fancy font support and proper justification might impress a \TEX\ +user it often is less of an argument than one thinks. + +The \quotation {Why don't you} also can be heard in the \TEX\ community. In the +worst case it's accompanied by a \quotation {\unknown\ because everybody uses +\unknown} which of course makes no sense because you can bet that the same user +will not fall for that argument when it comes to using an operating system or so. +Also from outside the community there is pressure to use something else: one can +find defense of minimal markup over \TEX\ markup or even \HTML\ markup as better +alternative for dissemination than for instance \PDF\ or \TEX\ sources. The +problem here is that old||timers can reflect on how relatively wonderful a +current technique really is, given changes over time, but who wants to listen to +an old|-|timer. Progress is needed and stimulating (which doesn't mean that all +old technology is obsolete). When I watched Endre eNerd's \quotation {The Time +Capsule} blu|-|ray I noticed an Ensoniq Fizmo keyboard and looked up what it was. +I ended up in interesting reads where the bottom line was \quotation {Either you +get it or you don't}. Reading the threads rang a bell. As with \TEX, you cannot +decide after a quick test or even a few hours if you (get the concept and) like +it or not: you need days, weeks, or maybe even months, and some actually never +really get it after years. + +\starttheme + It is good to wonder why you use some program but what gets used by others + depends on understanding. If we can't explain the benefits there is no + future for \TEX. Or more exact: if it no longer provide benefits, it will + just disappear. Just walk around a gallery in a science museum that deals + with computers: it can be a bit pathetic experience. +\stoptheme + +\stopsection + +{\bf Who knows \unknown} + +\stoptitle + +\stopdocument diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/musings/musings-style.tex b/Master/texmf-dist/doc/context/sources/general/manuals/musings/musings-style.tex new file mode 100644 index 00000000000..5ed934f2fb2 --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/musings/musings-style.tex @@ -0,0 +1,92 @@ +\startenvironment musings-style + +\usemodule[abr-04] + +\setupbodyfont + [pagella] + +\setuplayout + [topspace=2cm, + header=0pt, + footer=1.5cm, + bottomspace=1cm, + width=middle, + height=middle] + +% \definecolor[maincolor] [darkyellow] +% \definecolor[extracolor][darkblue] + +\definecolor[maincolor] [middleorange] +\definecolor[extracolor][middleblue] + +\setuptype + [color=maincolor] + +\setuptyping + [color=maincolor] + +\setuphead + [color=maincolor] + +\setuphead + [chapter] + [style=\bfd] + +\setuphead + [chapter] + [after={\blank[3*line]}, + align=flushright, + command=\ChapterCommand] + +\starttexdefinition unexpanded ChapterCommand#1#2 + \hbox to \textwidth { + \hss + % title + #2 + \doifmode {*sectionnumber} { + % distance + \hskip10mm + % number + \struttedbox{\offset[x=-1mm,y=2.5mm]{\scale[height=2cm]{#1}}} + } + } +\stoptexdefinition + +\setuphead + [section] + [style=\bfb] + +\setuphead + [subsection] + [style=\bf, + before=\blank, + after=\blank] + +\setuppagenumbering + [alternative=doublesided] + +\setupfootertexts + [][{\getmarking[chapter]\hbox to 2em{\hss\pagenumber}}] + [{\hbox to 2em{\pagenumber\hss}\getmarking[chapter]}][] + +\setupwhitespace + [big] + +\setuplist + [chapter] + [width=3em, + before={\testpage[3]\blank}, + after={\blank[samepage]}, + color=maincolor, + style=bold] + +\setuplist + [section] + [width=3em, + before={\blank[nowhite]}, + after={\blank[nowhite]}] + +% \logo [MATHTYPE] {MathType} +% \logo [SYNCTEX] {Sync\TeX} + +\stopenvironment diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/musings/musings-titlepage.tex b/Master/texmf-dist/doc/context/sources/general/manuals/musings/musings-titlepage.tex new file mode 100644 index 00000000000..33eb44d95a4 --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/musings/musings-titlepage.tex @@ -0,0 +1,46 @@ +\environment musings-style + +\startcomponent musings-titlepage + +\startMPpage + + fill Page withcolor "maincolor" ; + + draw image ( + draw textext.bot("m") xysized (20mm, 85mm) shifted (point .8 along (topboundary Page)) shifted (0,-10mm) ; + draw textext.bot("u") xysized (20mm, 60mm) shifted (point .7 along (topboundary Page)) shifted (0,-25mm) ; + draw textext.bot("s") xysized (20mm, 50mm) shifted (point .6 along (topboundary Page)) shifted (0,-45mm) ; + draw textext.bot("i") xysized (20mm,100mm) shifted (point .5 along (topboundary Page)) shifted (0, 00mm) ; + draw textext.bot("n") xysized (20mm, 55mm) shifted (point .4 along (topboundary Page)) shifted (0,-20mm) ; + draw textext.bot("g") xysized (20mm, 70mm) shifted (point .3 along (topboundary Page)) shifted (0,-20mm) ; + draw textext.bot("s") xysized (20mm, 40mm) shifted (point .2 along (topboundary Page)) shifted (0,-15mm) ; + ) shifted (0,-120mm) withcolor "white" ; + + draw image ( + draw textext.bot("c") xysized (20mm, 85mm) shifted (point .8 along (topboundary Page)) shifted (0,-10mm) ; + draw textext.bot("o") xysized (20mm, 60mm) shifted (point .7 along (topboundary Page)) shifted (0,-25mm) ; + draw textext.bot("n") xysized (20mm, 50mm) shifted (point .6 along (topboundary Page)) shifted (0,-45mm) ; + draw textext.bot("t") xysized (20mm,100mm) shifted (point .5 along (topboundary Page)) shifted (0, 00mm) ; + draw textext.bot("e") xysized (20mm, 55mm) shifted (point .4 along (topboundary Page)) shifted (0,-20mm) ; + draw textext.bot("x") xysized (20mm, 70mm) shifted (point .3 along (topboundary Page)) shifted (0,-20mm) ; + draw textext.bot("t") xysized (20mm, 40mm) shifted (point .2 along (topboundary Page)) shifted (0,-15mm) ; + ) shifted (0,-10mm) withcolor "white" ; + + draw image ( + draw textext.bot("h") xysized (10mm, 20.0mm) shifted (point .75 along (topboundary Page)) shifted (0,-10.0mm) ; + draw textext.bot("a") xysized (10mm, 22.5mm) shifted (point .70 along (topboundary Page)) shifted (0,-20.0mm) ; + draw textext.bot("n") xysized (10mm, 27.5mm) shifted (point .65 along (topboundary Page)) shifted (0,-12.5mm) ; + draw textext.bot("s") xysized (10mm, 30.0mm) shifted (point .60 along (topboundary Page)) shifted (0, 0mm) ; + + draw textext.bot("h") xysized (10mm, 25mm) shifted (point .45 along (topboundary Page)) shifted (0,-05.0mm) ; + draw textext.bot("a") xysized (10mm, 30mm) shifted (point .40 along (topboundary Page)) shifted (0,-15.0mm) ; + draw textext.bot("g") xysized (10mm, 35mm) shifted (point .35 along (topboundary Page)) shifted (0,-17.5mm) ; + draw textext.bot("e") xysized (10mm, 20mm) shifted (point .30 along (topboundary Page)) shifted (0,-20.0mm) ; + draw textext.bot("n") xysized (10mm, 25mm) shifted (point .25 along (topboundary Page)) shifted (0,-10.0mm) ; + ) shifted (0,-235mm) withcolor "white" ; + +\stopMPpage + +\page[empty] + +\stopcomponent diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/musings/musings-whytex.tex b/Master/texmf-dist/doc/context/sources/general/manuals/musings/musings-whytex.tex new file mode 100644 index 00000000000..8f9b7de9b15 --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/musings/musings-whytex.tex @@ -0,0 +1,326 @@ +% language=uk + +\startcomponent musings-whytex + +\environment musings-style + +\startchapter[title={Why use \TEX ?}] + +\startsection[title={Introduction}] + +Let's assume that you know what \TEX\ is: a program that interprets a language +with the same name that makes it possible to convert (tagged) input into for +instance \PDF. For many of its users it is a black box: you key in some text, hit +a button and get some typeset result in return. After a while you start tweaking +this black box, meet other users (on the web), become more fluent and stick to it +forever. + +But now let's assume that you don't know \TEX\ and are in search of a system +that helps you create beautiful documents in an efficient way. When your +documents have a complex structure you are probably willing to spend some time on +figuring out what the best tool is. Even if a search lets you end up with +something called \TEX, a three letter word with a dropped E, you still don't +know what it is. Advertisement for \TEX\ is often pretty weak. It's rather easy +to point to the numerous documents that can be found on the web. But what exactly +does \TEX\ do and what are its benefits? In order to answer this we need to know +who you are: an author, editor, an organization that deals with documents or needs +to generate readable output, like publishers do. + +\stopsection + +\startsection[title={Authors}] + +We start with authors. Students of sciences that use mathematics don't have much +of a choice. But most of these documents hardly communicate the message that +\quotation {Everyone should use \TEX.} or that \quotation {All documents produced +by \TEX\ look great.} but they do advocate that for rendering math it is a pretty +good system. The source code of these documents often look rather messy and +unattractive and for a non|-|math user it can be intimidating. Choosing some +lightweight click|-|and|-|ping alternative looks attractive. +lightweight click|-|and|-|ping alternative looks attractive. + +Making \TEX\ popular is not going to happen by convincing those who have to write +an occasional letter or report. They should just use whatever suits them. On the +other hand if you love consistency, long term support, need math, are dealing +with a rare language or script, like to reuse content, prefer different styling +from one source, use one source for multiple documents, or maybe love open source +tools, then you are a candidate. Of course there is a learning curve but normally +you can master \TEX\ rather fast and once you get the hang of it there's often no +way back. But you always need to invest a bit beforehand. + +So what authors are candidates for \TEX ? It could be that \TEX\ is the only tool +that does the job. If so, you probably learned that from someone who saw you +struggle or had the same experience and wrote or talked about it somewhere. In +that case using \TEX\ for creating just one document (like a thesis) makes sense. +Otherwise, you should really wonder if you want to invest time in a tool that you +probably have to ditch later on as most organizations stick to standard +(commercial) word processing tools. + +Talking to customers we are often surprised that people have heard about \TEX, or +even used it for a few documents in college. Some universities just prescribe the +use of \TEX\ for reporting, so not much of a choice there. Memories are normally +rather positive in the sense that they know that it can do the job and that it's +flexible. + +User group journals, presentations at \TEX\ meetings, journals, books and manuals +that come with \TEX\ macro packages can all be used to determine if this tool +suits an author. Actually, I started using \TEX\ because the original \TEX book +had some magic, and reading it was just that: reading it, as I had no running +implementation. A few years later, when I had to write (evolving) reports, I +picked up again. But I'm not a typical user. + +\stopsection + +\startsection[title={Programmers}] + +When you are a programmer who has to generate reports, for instance in \PDF, or +write manuals, then \TEX\ can really be beneficial. Of course \TEX\ is not always +an obvious choice, but if you're a bit able to use it it's hard to beat in +quality, flexibility and efficiency. I'm often surprised that companies are +willing to pay a fortune for functionality that basically comes for free. +Programmers are accustomed to running commands and working in a code editor with +syntax highlighting so that helps too. They too recognize when something can be +done more efficiently. + +When you need to go from some kind of input (document source, database, +generated) to some rendered output there currently are a few endpoints: a +(dynamic) \HTML\ page, a \PDF\ document, something useable in a word processor, +or a representation using the desktop user interface. It's the second category +where \TEX\ is hard to beat but even using \TEX\ and \METAPOST\ for creating a +chart can make sense. + +There are of course special cases where \TEX\ fits in nicely. Say that you have +to combine \PDF\ documents. There are numerous tools to do that and \TEX\ is one. +The advantage of \TEX\ over other tools is that it's trivial to add additional +text, number pages, provide headers and footers. And it will work forever. Why? +Because \TEX\ has been around for decades and will be around for decades to come. +It's an independent component. The problem with choosing for \TEX\ is that the +starting point is important. The question is not \quotation {What tool should I +use?} but \quotation {What problem do I need to solve?}. An open discussion about +the objectives and possibilities is needed, not some checklist based on +assumptions. If you don't know \TEX\ and have never worked with a programmable +typesetting environment, you probably don't see the possibilities. In fact, you +might even choose for \TEX\ for the wrong reasons. + +The problem with this category of users is that they seldom have the freedom to +choose their tools. There are not that many jobs where the management is able to +recognize the clever programmer who can determine that \TEX\ is suitable for a +lot of jobs and can save money and time. Even the long term availability and +support is not an argument since not only most tools (or even apis) changes every +few years but also organizations themselves change ownership, objectives, and +personnel on a whim. The concept of \quote {long term} is hard to grasp for most +people (just look at politics) and it's only in retrospect that one can say +\quote {We used that toolkit for over a decade.} + +\stopsection + +\startsection[title={Organizations}] + +Authors (often) have the advantage that they can choose themselves: they can use +what they like. In practice any decent programmer is able to find the suitable +tools but convincing the management to use one of them can be a challenge. Here +we're also talking of \quote {comfort zones}: you have to like a tool(chain). +Organizations normally don't look for \TEX. Special departments are responsible +for choosing and negotiating whatever is used in a company. Unfortunately +companies don't always start from the open question \quotation {We have this +problem, we want to go there, what should we do?} and then discuss options with +for instance those who know \TEX. Instead requirements are formulated and matches +are found. The question then is \quotation {Are these requirements cut in stone?} +and if not (read: we just omit some requirements when most alternatives don't +meet them), were other requirements forgotten? Therefore organizations can end up +with the wrong choice (using \TEX\ in a situation where it makes no sense) or +don't see opportunities (not using \TEX\ while it makes most sense). It doesn't +help that a hybrid solution (use a mix of \TEX\ and other tools) is often not an +option. Where an author can just stop using a tool after a few days of +disappointment, and where a programmer can play around a bit before making a +choice, an organization probably best can start small with a proof of concept. + +Let's take a use case. A publisher wants to automatically convert \XML\ files +into \PDF. One product can come from multiple sources (we have cases where +thousands of small \XML\ files combine into one final product). Say that we have +three different layouts: a theory book, a teachers manual and an answer book. In +addition special proofing documents have to be rendered. The products might be +produced on demand with different topics in any combination. There is at least +one image and table per page, but there can be more. There are color and +backgrounds used, tables of contents generated, there is extensive cross +referencing and an index. Of course there is math. + +Now let's assume an initial setup costs 20K Euro and, what happens often when the +real products show up, a revision after one year takes the same amount. We also +assume 10K for the following eight years for support. So, we end up with 120K +over 10 years. If one goes cheap we can consider half of that, or we can be +pessimistic and double the amount. + +The first year 10K pages are produced, the second year 20K and after that 30K per +year. So, we're talking of 270K pages. If we include customer specific documents +and proofing we might as well end up with a multiple of that. + +So, we have 120K Euro divided by 270K pages or about half an Euro per page. But +likely we have more pages so it costs less. If we double the costs then we can +assume that some major changes took place which means more pages. In fact we had +projects where the layout changed, all documents were regenerated and the costs +were included in the revision, so far from double. We also see many more pages +being generated so in practice the price per page drops below half an Euro. The +more we process the cheaper it gets and one server can produce a lot of pages! + +Now, the interesting bit of such a calculation is that the costs only concern the +hours spent on a solution. A \TEX\ based system comes for free and there are no +license costs. Whatever alternative is taken, even if it is as flexible, it will +involve additional costs. From the perspective of costs it's very hard to beat +\TEX. Add to that the possibility for custom extensions, long term usage and the +fact that one can adapt the system. The main question of course is: does it do +the job. The only way to find out is to either experiment (which is free), +consult an expert (not free, but then needed anyway for any solution) or ask an +expert to make a proof of concept (also not free but relatively cheap and +definitely cheaper than a failure). In fact, before making decisions about what +solution is best it might be a good idea to check with an expert anyway, because +more or less than one thinks might be possible. Also, take into account that the +\TEX\ ecosystem is often one of the first to support new technologies, and +normally does that within its existing interface. And there is plenty of free +support and knowledge available once you know how to find it. Instead of wasting +time and money on advertisement and fancy websites, effort goes into support and +development. Even if you doubt that the current provider is around in the decade +to come, you can be sure that there will be others, simply because \TEX\ attracts +people. Okay, it doesn't help that large companies like to out source to +far||far||away and expect support around the corner, so in the end they might +kill their support chain. + +When talking of \TEX\ used in organizations we tend to think of publishers. But +this is only a small subset of organizations where information gets transformed +into something presentable. For small organizations the choice for \TEX\ can be +easy: costs, long term stability, knowing some experts are driving forces. For +large organizations these factors seem (at least to us) hardly relevant. We've +(had) projects where actually the choice for using a \TEX\ based solution was (in +retrospect) a negative one: there was no other tool than this relatively unknown +thing called \TEX. Or, because the normal tools could not be used, one ended up +with a solution where (behind the scenes) \TEX\ is used, without the organization +knowing it. Or, it happened that the problem at hand was mostly one that demands +in|-|depth knowledge of manipulating content, cleaning up messy data, combining +resources (images or \PDF\ documents), all things that happen to be available in +the perspective of \TEX. If you can solve a hard to solve problem for them then +an organization doesn't care what tool you use. What does matter is that the +solution runs forever, that costs are controllable and above all, that it +\quotation {Just works.} And if you can make it work fast, that helps too. We +can safely claim that when \TEX\ is evaluated as being a good option, that in the +end it always works out quite well. + +Among arguments that (large) organizations like to use against a choice for \TEX\ +(or something comparable) are the size of the company that they buy their +solution from, the expected availability for support, and the wide|-|spread usage +of the tool at hand. One can wonder if it also matters that many vendors change +ownership, change products every few years, change license conditions when they +like, charge a lot for support or just abort a tool chain. Unfortunately when that +happens those responsible for choosing such a system can have moved on to another +job, so this is seldom part of an evaluation. For the supplier the other side of +the table is just as much of a gamble. In that respect, an organization that +wants to use an open source (and|/|or free) solution should realize that getting +a return on investment on such a development is pretty hard to achieve. So, who +really takes the risk for writing open source? + +For us, the reason to develop \CONTEXT\ and make it open is that it fits in our +philosophy and we like the community. It is actually not really giving us an +advantage commercially: it costs way more to develop, support and keep +up|-|to|-|date than it will ever return. We can come up with better, faster and +easier solutions and in the end we pay the price because it takes less time to +cook up styles. So there is some backslash involved because commercially a +difficult solution leads to more billable hours. Luckily we tend to avoid wasting +time so we improve when possible and then it ends up in the distributed code. +And, once the solution is there, anyone can use it. Basically also for us it's +just a tool, like the operating system, editor and viewer are. So, what keep +development going is mostly the interaction with the community. This also means +that a customer can't really demand functionality for free: either wait for it to +show up or pay for it (which seldom happens). Open source is not equivalent with +\quotation {You get immediately what you want because someone out there writes +the code.}. There has to be a valid reason and often it's just users and meetings +or just some challenge that drives it. + +This being said, it is hard to convince a company to use \TEX. It has to come +from users in the organization. Or, what we sometimes see with publishers, it +comes with an author team or acquired product line where it's the only option. +Even then we seldom see transfer to other branches in the organizations. No one +seems to wonder \quotation {How on earth can that \XML\ to \PDF\ project produce +whatever output in large quantities in a short period of time} while other (past) +projects failed. It probably relates to the abstraction of the process. Even +among \TEX\ users it can be that you demonstrate something with a click on a +button and that many years afterwards someone present at that moment tells you +that they just discovered that this or that can be done by hitting a button. I'm +not claiming that \TEX\ is the magic wand for everything but in some areas it's +pretty much ahead of the pack. Go to a \TEX\ user meeting and you will be surprised +about the accumulated diverse knowledge present in the room. It's user demand that +drives \CONTEXT\ development, not commerce. + +\stopsection + +\startsection[title={Choosing}] + +So, where can one find information about \TEX\ and friends? On the web +one has to use the right search keys, so adding \type {tex} helps: \typ {context +tex} or \typ {xml tex pdf} and so on. Can one make a fancy hip website, sure, but +it being a life|-|long, already old and mature environment, and given that it +comes for free, or is used low|-|budget, not much effort and money can be spent +on advertising it. A benefit is that no false promises and hypes are made either. +If you want to know more, just ask the right folks. + +For all kind of topics one can find interesting videos and blogs. One can +subscribe to channels on YouTube or join forums. Unfortunately not that many +bloggers or vloggers or podcasters come up with original material every time, and +often one starts to recognize patterns and will get boring by repetition of wisdom +and arguments. The same is true for manuals. Is a ten year old manual really +obsolete? Should we just recompile it to fake an update while in fact there has +been no need for it? Should we post twenty similar presentations while one can +do? (If one already wants to present the same topic twenty times in the first +place?) Maybe one should compare \TEX\ with cars: they became better over time +and can last for decades. And no new user manual is needed. + +As with blogs and vlogs advertising \TEX\ carries the danger for triggering +political discussions and drawing people into discussions that are not pleasant: +\TEX\ versus some word processor, open versus closed source, free versus paid +software, this versus that operating system, editor such or editor so. + +To summarize, it's not that trivial to come up with interesting information about +\TEX, unless one goes into details that are beyond the average user. And those +who are involved are often involved for a long time so it gets more complex over +time. User group journals that started with tutorials later on became expert +platforms. This is a side effect of being an old and long|-|term toolkit. If +you run into it, and wonder if it can serve your purpose, just ask an expert. + +Most \TEX\ solutions are open source and come for free as well. Of course if you +want a specific solution or want support beyond what is offered on mailing lists +and forums you should be willing to pay for the hours spent. For a professional +publisher (of whatever kind) this is not a problem, if only because any other +solution also will cost something. It is hard to come up with a general estimate. +A popular measure of typesetting costs is the price per page, which can range +from a couple of euro's per page to two digit numbers. We've heard of cases where +initial setup costs were charged. If not much manual intervention is needed a +\TEX\ solution mostly concerns initial costs. + +Let's return to the main question \quotation {Why use \TEX ?} in which you can +replace \TEX\ by one of the macro packages build on top of it, for instance +\CONTEXT. If an (somewhat older) organization considers using \TEX\ it should +also ask itself, why it wasn't considered long ago already? For sure there have +been developments in \TEX\ engines (in \CONTEXT\ we use \LUATEX) as well as +possibilities of macro packages but if you look at the documents produced with +them, there is not that much difference with decades ago. Processing has become +faster, some things have become easier, but new technologies have always been +supported as soon at they showed up. Advertising is often just repeating an old +message. + +The \TEX\ ecosystem was among the first in supporting for instance \OPENTYPE, and +the community even made sure that there were free fonts available. A format like +\PDF\ was supported as soon as it shows up and \TEX\ was the first to demonstrate +what advanced features were there and how way it was to adapt to changes. +Processing \XML\ using \TEX\ has never been a big deal and if that is a reason to +look at this already old and mature technology, then an organization can wonder +if years and opportunities (for instance for publishing on demand or easy +updating of manuals) have been lost. Of course there are (and have been) +alternative tools but the arguments for using \TEX\ or not are not much different +now. It can be bad marketing of open and free software. It can be that \TEX\ has +been around too long. It can also be that its message was not understood yet. On +the other hand, in software development it's quite common to reinvent wheels and +present old as new. It's never to late to catch on. + +\stopsection + +\stopchapter + +\stopcomponent diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/musings/musings.tex b/Master/texmf-dist/doc/context/sources/general/manuals/musings/musings.tex new file mode 100644 index 00000000000..e2787dc9923 --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/musings/musings.tex @@ -0,0 +1,21 @@ +\environment musings-style + +\startproduct musings + +\component musings-titlepage + +\startfrontmatter + \component musings-contents + \component musings-introduction +\stopfrontmatter + +\startbodymatter + \component musings-children + \component musings-perception + \component musings-whytex + \component musings-staygo + \component musings-stability + \component musings-roadmap +\stopbodymatter + +\stopproduct diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/nodes/nodes.tex b/Master/texmf-dist/doc/context/sources/general/manuals/nodes/nodes.tex index e9857f37ed9..2d97e676a4a 100644 --- a/Master/texmf-dist/doc/context/sources/general/manuals/nodes/nodes.tex +++ b/Master/texmf-dist/doc/context/sources/general/manuals/nodes/nodes.tex @@ -24,17 +24,6 @@ % % comment : The cover images are from the NASA website. - -% Alan : There is no need for % before \startfootnote .. the less such crap the better -% (as it might give disappearing content if one wraps). -% -% Alan : ḻṯ What is this? An editor glitch? -% -% Alan : $≠$ -> just use ≠ (also, soon the gyre fonts will have more symbols in text -% that match text. -% -% Alan : You can use \typ {x = 0} for something that breaks accross lines. - %\enabletrackers[metapost.showlog] \definemeasure [layout:margin] [\paperheight/20] @@ -89,6 +78,7 @@ % \definesymbol [1] [$\cdot$] % Dot rather than bullet ... Alan hates bullets! Hans hates too small dots. +% So, a dash is OK? ;-) \setupitemize [symbol=2] % dash rather than bullet, I hate bullets! @@ -160,12 +150,13 @@ \startbuffer [bib] @ARTICLE{Krebs1946, - author = {Krebs, H. A.}, - title = {Cyclic processes in living matter}, - journal = {Enzymologia}, - year = {1946}, - volume = {12}, - pages = {88--100} + author = {Krebs, H. A.}, + title = {Cyclic processes in living matter}, + journal = {Enzymologia}, + year = {1946}, + volume = {12}, + pages = {88--100} + language = {english}, } @ARTICLE{Bethe1939a, @@ -180,6 +171,7 @@ issue = {1}, publisher = {American Physical Society}, XXurl = {http://link.aps.org/doi/10.1103/PhysRev.55.103} + language = {english}, } @ARTICLE{Bethe1939b, @@ -194,6 +186,7 @@ issue = {5}, publisher = {American Physical Society}, XXurl = {http://link.aps.org/doi/10.1103/PhysRev.55.434} + language = {english}, } @BOOK{Lawvere2009, @@ -204,6 +197,20 @@ publisher = {Cambridge University Press}, address = {Cambridge, UK}, year = {2009} + language = {english}, + } + + @ARTICLE{Braslau2018, + title = {ConTeXt nodes}, + subtitle = {commutative diagrams and related graphics}, + author = {Braslau, Alan and Hamid, Idris Samawi and Hagen, Hans}, + year = {2018}, + journal = {TUGboat}, + volume = {39}, + number = {1}, + ISSN = {0896-3207}, + url = {https://www.tug.org/TUGboat/Contents/contents39-1.html}, + language = {english}, } \stopbuffer @@ -356,6 +363,11 @@ \startsubject [title=Introduction] +A draft version of this manual was published in \cite[journal] [Braslau2018]. +\cite [Braslau2018] + +\blank + The graphical representation of textual diagrams is a very useful tool in the communication of ideas. In category and topos theory, for example, many key concepts, formulas, and theorems are expressed by means of \emph {commutative @@ -683,7 +695,7 @@ The \CONTEXT\ syntax for the current example looks like this: \stopnodes \stopbuffer -\startplacefigure [location=right, +\startplacefigure [location=right,reference=fig:ConTeXt, title={Drawn using \CONTEXT\ interface}] \getbuffer \stopplacefigure @@ -692,10 +704,9 @@ The \CONTEXT\ syntax for the current example looks like this: \startplacefigure [reference=fig:indices, location=right, - title={Coordinates and indices.\footnote{For variety, a rectangular oblique - lattice is drawn in \in{Figure} [fig:indices].} - }] -\stopfootnote + title={Coordinates and indices.\footnote {For variety, a rectangular oblique + lattice is drawn in \in{Figure} [fig:indices].}} + ] \startframed [frame=off,width=.33\textwidth,align=flushright] \startnodes [dx=3cm,dy=2cm,rotation=60] \placenode [0,0] {\node{(0,0)}} @@ -716,14 +727,47 @@ The \CONTEXT\ syntax for the current example looks like this: This follows the more classic (and limited) approach of placing nodes on the coordinates of a regular lattice, here defined as a 3~cm square network. -\footnote {The lattice can be square (\type {dx} $=$ \type {dy}), rectangular -(\type {dx} $≠$ \type {dy}), or oblique (through \type {rotation} $≠$ 90).} The -arguments are then $(x,y)$ coordinates of this lattice and the nodes are indexed +\startfootnote + The lattice can be square (\type {dx} $=$ \type {dy}), rectangular + (\type {dx} ≠ \type {dy}), or oblique (through \type {rotation} ≠ 90). +\stopfootnote +The arguments are then $(x,y)$ coordinates of this lattice and the nodes are indexed 0, 1, 2, … in the order that they are drawn. These are used as reference indices in the commands \type {\connectnodes} (rather than requiring two \emph {pairs} of coordinates); see \in {Figure} [fig:indices]. This might seem a bit confusing at first view, but it simplifies things in the end, really! +To avoid such confusion, in particular when drawing complicated diagrams +containing many nodes, the \CONTEXT\ interface allows the use of a \type +{reference} or tag, assigning a symbolic name to a numbered node. The example of +\in{Figure} [fig:ConTeXt] can be redrawn a little more verbosely as: + +\startbuffer + \startnodes [dx=3cm,dy=3cm] + \placenode [0,0] [reference=GX] {\node{$G(X)$}} + \placenode [1,0] [reference=GY] {\node{$G(Y)$}} + \placenode [1,1] [reference=FY] {\node{$F(Y)$}} + \placenode [0,1] [reference=FX] {\node{$F(X)$}} + \connectnodes [GX,GY] [alternative=arrow, + label={\nodeSmall{$G(f)$}},position=bottom] + \connectnodes [FX,FY] [alternative=arrow, + label={\nodeSmall{$F(f)$}},position=top] + \connectnodes [FY,GY] [alternative=arrow, + label={\nodeSmall{$η_Y$}}, position=right] + \connectnodes [FX,GX] [alternative=arrow, + label={\nodeSmall{$η_X$}}, position=left] + \stopnodes +\stopbuffer + +\typebuffer [option=TEX] + +Notice that, like for all \CONTEXT\ macros, one never mixes a +comma|-|separated list of arguments (\type {[0,0]}) with a key=value list +(i.e. \type {[reference=GX]}). Symbolic references are very useful for longer, +more complicated diagrams; additionally, this easily allows modification such as +the addition of nodes without having to keep track of counting the order in +which they were drawn. + An identity map, as shown in \in {Figure} [fig:ID], earlier, and, below, in \in {Figure} [fig:Me] is achieved by connecting a node to itself: @@ -752,8 +796,6 @@ The scale (diameter) of the circular loop|-|back is set by the keyword \type straight|-|line segment between them), and the \type {position=} keyword sets its orientation. -\page [yes] - \startbuffer \startMPcode clearnodepath ; @@ -840,8 +882,6 @@ exterior for the output. The code is as follows: -\page [yes] - \startbuffer \startMPcode save p ; path p[] ; @@ -1145,7 +1185,7 @@ path: save p ; % path p ; \stopTEX -The \type {save p ;} assures that the path is undefined. This path will later ḻṯ +The \type {save p ;} assures that the path is undefined. This path will later be defined based on the contents of the nodes and a desired relative placement. In fact, it is not even necessary to declare that the suffix will be a path, as the path will be declared and automatically built once the positions of all the nodes @@ -1490,8 +1530,6 @@ coordinate taken to be \type {right}, both in the page of the paper. The third coordinate \type {X} is an oblique projection in a right|-|hand coordinate system. -\page [yes] - \startbuffer \startMPcode{three} save nodepath ; diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/notnow/notnow-sidefloats.tex b/Master/texmf-dist/doc/context/sources/general/manuals/notnow/notnow-sidefloats.tex index 4a8a1ba3b85..c7de6c2c57f 100644 --- a/Master/texmf-dist/doc/context/sources/general/manuals/notnow/notnow-sidefloats.tex +++ b/Master/texmf-dist/doc/context/sources/general/manuals/notnow/notnow-sidefloats.tex @@ -1,7 +1,5 @@ \usemodule[art-01] -\starttext - \definefloat [figure-column] diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/onandon/onandon-110.tex b/Master/texmf-dist/doc/context/sources/general/manuals/onandon/onandon-110.tex new file mode 100644 index 00000000000..e8b005f2405 --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/onandon/onandon-110.tex @@ -0,0 +1,137 @@ +% language=uk + +% After watching \quotation {What Makes This Song Great 30: Alanis Morisette} by +% Rick Beato, it's tempting to poder \quotation {What makes \TEX\ great}. + +\startcomponent onandon-110 + +\environment onandon-environment + +\startchapter[title={Getting there, version 1.10}] + +When we decided to turn experiments with a \LUA\ extensions to \PDFTEX\ into +developing \LUATEX\ as alternative engine we had, in addition to opening up some +of \TEX's internals, some extensions in mind. Around version 1.00 most was +already achieved and with version 1.10 we're pretty close to where we want to be. +The question is, when are we ready? In order to answer that I will look at four +aspects: + +\startitemize[packed] +\startitem objectives \stopitem +\startitem functionality \stopitem +\startitem performance \stopitem +\startitem stability \stopitem +\stopitemize + +The main {\em objective} was to open up \TEX\ in a way that permit extensions +without the need to patch the engine. Although it might suit us, we don't want to +change too much the internals, first of all because \TEX\ is \TEX, the documented +program with a large legacy. \footnote {This is reflected in the keywords that +exposed mechanisms use: they reflect internal variable names and constants and as +a consequence there is inconsistency there.} Discussions about how to extend +\TEX\ are not easy and seldom lead to an agreement so better is to provide a way +to do what you like without bothering other users and|/|or interfering with macro +packages. I think that this objective is met quite well now. Other objectives, +like embedding basic graphic capabilities using \METAPOST\ have already been met +long ago. There is more control over the backend and modern fonts can be dealt +with. + +The {\em functionality} in terms of primitives has been extended but within +reasonable bounds: we only added things that make coding a bit more natural but +we realize that this is very subjective. So, here again we can say that we met +our goals. A lot can be achieved via \LUA\ code and users and developers need to +get accustomed to that if they want to move on with \LUATEX. We will not +introduce features that get added to or are part of other engines. + +We wanted to keeping {\em performance} acceptable. The core \TEX\ engine is +already pretty fast and it's often the implementation of macros (in macro +packages) that creates a performance hit. Going \UTF\ has a price as do modern +fonts. At the time of this writing processing the 270 page \LUATEX\ manual takes +about 12 seconds (one run), which boils down to over 27 pages per second. + +\starttabulate[||c|c|] +\NC \BC runtime \BC overhead \NC \NR +\BC \LUATEX \NC $12.0$ \NC $+0.6$ \NC \NR +\BC \LUAJITTEX \NC $ 9.7$ \NC $+0.5$ \NC \NR +\stoptabulate + +Is this fast or slow? One can do tests with specific improvements (using new +primitives) but in practice it's very hard to improve performance significantly. +This is because a test with millions of calls that show a .05 second improvement +disappears when one only has a few thousand calls. Many small improvements can +add up, but less that one thinks, especially when macros are already quite +optimal. Also this runtime includes time normally used for running additional +programs (e.g.\ for getting bibliographies right). + +It must be said that performance is not completely under our control. For +instance, we have patched the \LUAJIT\ hash function because it favours \URL's +and therefore favours hashing the middle of the string which is bad for our use +as we are more interested in the (often unique) start of strings. We also +compress the format which speeds up loading but not on the native windows 64~bit +binary. At the time this writing the extra overhead is 2~seconds due to some +suboptimal gzip handling; the cross compiled 64~bit mingw binaries that I use +don't suffer from this. When I was testing the 32~bit binaries on the machine of +a colleague, I was surprised to measure the following differences on a complex +document with hundreds of \XML\ files, many images and a lot of manipulations. + +\starttabulate[||c|c|] +\NC \BC 1.08 with \LUA\ 5.2 \BC 1.09 with \LUA\ 5.3 \NC \NR +\BC \LUATEX \NC $21.5$ \NC $15.2$ \NC \NR +\BC \LUAJITTEX \NC $10.7$ \NC $10.3$ \NC \NR +\stoptabulate + +Now, these are just rough numbers but they demonstrate that the gap between +\LUATEX\ and \LUAJITTEX\ is becoming less which is good because at this moment it +looks like \LUAJIT\ will not catch up with \LUA\ 5.3 so at some point we might +drop it. It will be interesting to see what \LUA\ 5.4 will bring as it offers an +\ alternative garbage collector. And imagine that the regular \LUA\ virtual +machine gets more optimized. + +You also have to take into account that having a browser open in the background +of a \TEX\ run has way more impact than a few tenths of a second in \LUATEX\ +performance. The same is true for memory usage: why bother about \LUATEX\ taking +tens of megabytes for fonts while a few tabs in a browser can bump memory +consumption to gigabytes of memory usage. Also, using a large \TEX\ tree (say the +whole of \TEXLIVE) can have a bit of a performance hit! Or what about inefficient +callbacks, using inefficient \LUA\ code of badly designed solutions? What we +could gain here we loose there, so I think we can safely say that the current +implementation of \LUATEX\ is as good as you can (and will) get. Why should we +introduce obscure optimizations where on workstations \TEX\ is just one of the +many processes? Why should we bother too much to speed up on servers that have +start|-|up or job management overhead or are connected to relatively slow remote +file system? Why squeeze out a few more milliseconds when badly written macros or +styles can have an way more impact on performance? So, for now we're satisfied +with performance. Just for the record, the ratio between \CONTEXT\ \MKII\ +running other engines and \LUATEX\ with \MKIV\ for the next snippet of code: + +\starttyping +\dorecurse{250}{\input tufte\par} +\stoptyping + +is 2.8 seconds for \XETEX, 1.5 seconds for \LUATEX, 1.2 seconds for \LUAJITTEX, +and 0.9 seconds for \PDFTEX. Of course this is not really a practical test but it +demonstrates the baseline performance on just text. The 64 bit version of \PDFTEX\ +is actually quite a bit slower on my machine. Anyway, \LUATEX\ (1.09) with \MKIV\ +is doing okey here. + +That brings us to {\em stability}. In order to achieve that we will not introduce +many more extensions. That way users get accustomed to what is there (read: there +is no need to search for what else is possible). Also, it makes that existing +functionality can become bug free because no new features can interfere. So, at +some point we have to decide that this is it. If we can do what we want now, +there are no strong arguments for more. in that perspective version 1.10 can be +considered very close to what we want to achieve. + +Of course development will continue. For instance, the \PDF\ inclusion code will +be replaced by more lightweight and independent code. Names of functions and +symbolic constants might be normalized (as mentioned, currently they are often +related to or derived from internals). More documentation will be added. We will +quite probably keep up with \LUA\ versions. Also the \FFI\ interface will become +more stable. And for sure bugs will be fixed. We might add a few more options to +control behaviour of for instance of math rendering. Some tricky internals (like +alignments) might get better attribute support if possible. But currently we +think that most fundamental issues have been dealt with. + +\stopchapter + +\stopcomponent diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/onandon/onandon-53.tex b/Master/texmf-dist/doc/context/sources/general/manuals/onandon/onandon-53.tex new file mode 100644 index 00000000000..0d5dc1b9cc0 --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/onandon/onandon-53.tex @@ -0,0 +1,288 @@ +% language=uk + +\startcomponent onandon-53 + +\environment onandon-environment + +\startchapter[title={From \LUA\ 5.2 to 5.3}] + +When we started with \LUATEX\ we used \LUA\ 5.1 and moved to 5.2 when that became +available. We didn't run into issues then because there were no fundamental +changes that could not be dealt with. However, when \LUA\ 5.3 was announced in +2015 we were not sure if we should make the move. The main reason was that we'd +chosen \LUA\ because of its clean design which meant that we had only one number +type: double. In 5.3 on the other hand, deep down a number can be either an +integer or a floating point quantity. + +Internally \TEX\ is mostly (up to) 32-bit integers and when we go from \LUA\ to +\TEX\ we round numbers. Nonetheless one can expect some benefits in using +integers. Performance|-|wise we didn't expect much, and memory consumption would +be the same too. So, the main question then was: can we get the same output and +not run into trouble due to possible differences in serializing numbers; after +all \TEX\ is about stability. The serialization aspect is for instance important +when we compare quantities and|/|or use numbers in hashes. + +Apart from this change in number model, which comes with a few extra helpers, +another extension in 5.3 was that bit|-|wise operations are now part of the +language. The lpeg library is still not part of stock \LUA. There is some minimal +\UTF8 support, but less than we provide in \LUATEX\ already. So, looking at these +changes, we were not in a hurry to update. Also, it made sense to wait till this +important number|-|related change was stable. + +But, a few years later, we still had it on our agenda to test, and after the +\CONTEXT\ 2017 meeting we decided to give it a try; here are some observations. A +quick test was just dropping in the new \LUA\ code and seeing if we could make a +\CONTEXT\ format. Indeed that was no big deal but a test run failed because at +some point a (for instance) \type {1} became a \type {1.0}. It turned out that +serializing has some side effects. And with some ad hoc prints for tracing (in +the \LUATEX\ source) I could figure out what went on. How numbers are seen can +(to some extent) be deduced from the \type {string.format} function, which is in +\LUA\ a combination of parsing, splitting and concatenation combined with piping +to the \CCODE\ \type {sprintf} function. \footnote {Actually, at some point I +decided to write my own formatter on top of \type {format} and I ended up with +splitting as well. It's only now that I realize why this is working out so well +(in terms of performance): simple format (single items) are passed more or less +directly to \type {sprintf} and as \LUA\ itself is fast, due to some caching, the +overhead is small compared to the built|-|in splitter method. And the \CONTEXT\ +formatter has many more options and is extensible.} + +\starttyping +local a = 2 * (1/2) print(string.format("%s", a),math.type(x)) +local b = 2 * (1/2) print(string.format("%d", b),math.type(x)) +local c = 2 print(string.format("%d", c),math.type(x)) +local d = -2 print(string.format("%d", d),math.type(x)) +local e = 2 * (1/2) print(string.format("%i", e),math.type(x)) +local f = 2.1 print(string.format("%.0f",f),math.type(x)) +local g = 2.0 print(string.format("%.0f",g),math.type(x)) +local h = 2.1 print(string.format("%G", h),math.type(x)) +local i = 2.0 print(string.format("%G", i),math.type(x)) +local j = 2 print(string.format("%.0f",j),math.type(x)) +local k = -2 print(string.format("%.0f",k),math.type(x)) +\stoptyping + +This gives the following results: + +\starttabulate[|cBT|c|T|c|cT|] +\BC a \NC 2 * (1/2)\NC s \NC 1.0 \NC float \NC \NR +\BC b \NC 2 * (1/2)\NC d \NC 1 \NC float \NC \NR +\BC c \NC 2 \NC d \NC 2 \NC integer \NC \NR +\BC d \NC -2 \NC d \NC 2 \NC integer \NC \NR +\BC e \NC 2 * (1/2)\NC i \NC 1 \NC float \NC \NR +\BC f \NC 2.1 \NC .0f \NC 2 \NC float \NC \NR +\BC g \NC 2.0 \NC .0f \NC 2 \NC float \NC \NR +\BC h \NC 2.1 \NC G \NC 2.1 \NC float \NC \NR +\BC i \NC 2.0 \NC G \NC 2 \NC float \NC \NR +\BC j \NC 2 \NC .0f \NC 2 \NC integer \NC \NR +\BC k \NC -2 \NC .0f \NC 2 \NC integer \NC \NR +\stoptabulate + +This demonstrates that we have to be careful when we need these numbers +represented as strings. In \CONTEXT\ the number of places where we had to check +for that was not that large; in fact, only some hashing related to font sizes had +to be done using explicit rounding. + +Another surprising side effect is the following. Instead of: + +\starttyping +local n = 2^6 +\stoptyping + +we now need to use: + +\starttyping +local n = 0x40 +\stoptyping + +or just: + +\starttyping +local n = 64 +\stoptyping + +because we don't want this to be serialized to \type {64.0} which is due to the +fact that a power results in a float. One can wonder if this makes sense when we +apply it to an integer. + +At any rate, once we could process a file, two documents were chosen for a +performance test. Some experiments with loops and casts had demonstrated that we +could expect a small performance hit and indeed, this was the case. Processing +the \LUATEX\ manual takes 10.7 seconds with 5.2 on my 5-year-old laptop and 11.6 +seconds with 5.3. If we consider that \CONTEXT\ spends 50\% of its time in \LUA, +then we see a 20\% performance penalty. Processing the \METAFUN\ manual (which +has lots of \METAPOST\ images) went from less than 20 seconds (\LUAJITTEX\ does +it in 16 seconds) up to more than 27 seconds. So there we lose more than 50\% on +the \LUA\ end. When we observed these kinds of differences, Luigi and I +immediately got into debugging mode, partly out of curiosity, but also because +consistent performance is important to~us. + +Because these numbers made no sense, we traced different sub-mechanisms and +eventually it became clear that the reason for the speed penalty was that the +core \typ {string.format} function was behaving quite badly in the \type {mingw} +cross-compiled binary, as seen by this test: + +\starttyping +local t = os.clock() +for i=1,1000*1000 do + -- local a = string.format("%.3f",1.23) + -- local b = string.format("%i",123) + local c = string.format("%s",123) +end +print(os.clock()-t) +\stoptyping + +\starttabulate[|c|c|c|c|c|] +\BC \BC lua 5.3 \BC lua 5.2 \BC texlua 5.3 \BC texlua 5.2 \BC \NR +\BC a \NC 0.43 \NC 0.54 \NC 3.71 (0.47) \NC 0.53 \NC \NR +\BC b \NC 0.18 \NC 0.24 \NC 3.78 (0.17) \NC 0.22 \NC \NR +\BC c \NC 0.26 \NC 0.68 \NC 3.67 (0.29) \NC 0.66 \NC \NR +\stoptabulate + +The 5.2 binaries perform the same but the 5.3 Lua binary greatly outperforms +\LUATEX, and so we had to figure out why. After all, all this integer +optimization could bring some gain! It took us a while to figure this out. The +numbers in parentheses are the results after fixing this. + +Because font internals are specified in integers one would expect a gain +in running: + +\starttyping +mtxrun --script font --reload force +\stoptyping + +and indeed that is the case. On my machine a scan results in 2561 registered +fonts from 4906 read files and with 5.2 that takes 9.1 seconds while 5.3 needs a +bit less: 8.6 seconds (with the bad format performance) and even less once that +was fixed. For a test: + +\starttyping +\setupbodyfont[modern] \tf \bf \it \bs +\setupbodyfont[pagella] \tf \bf \it \bs +\setupbodyfont[dejavu] \tf \bf \it \bs +\setupbodyfont[termes] \tf \bf \it \bs +\setupbodyfont[cambria] \tf \bf \it \bs +\starttext \stoptext +\stoptyping + +This code needs 30\% more runtime so the question is: how often do we call \type +{string.format} there? A first run (when we wipe the font cache) needs some +715,000 calls while successive runs need 115,000 calls so that slow down +definitely comes from the bad handling of \type {string.format}. When we drop in +a \LUA\ update or whatever other dependency we don't want this kind of impact. In +fact, when one uses external libraries that are or can be compiled under the +\TEX\ Live infrastructure and the impact would be such, it's bad advertising, +especially when one considers the occasional complaint about \LUATEX\ being +slower than other engines. + +The good news is that eventually Luigi was able to nail down this issue and we +got a binary that performed well. It looks like \LUA\ 5.3.4 (cross|)|compiles +badly with \GCC\ 5.3.0 and 6.3.0. + +So in the end caching the fonts takes: + +\starttabulate[||c|c|] +\BC \BC caching \BC running \NC \NR +\BC 5.2 stock \NC 8.3 \NC 1.2 \NC \NR +\BC 5.3 bugged \NC 12.6 \NC 2.1 \NC \NR +\BC 5.3 fixed \NC 6.3 \NC 1.0 \NC \NR +\stoptabulate + +So indeed it looks like 5.3 is able to speed up \LUATEX\ a bit, given that one +integrates it in the right way! Using a recent compiler is needed too, although +one can wonder when a bad case will show up again. One can also wonder why such a +slow down can mostly go unnoticed, because for sure \LUATEX\ is not the only +compiled program. + +The next examples are some edge cases that show you need to be aware +that +\startitemize[n,text,nostopper] + \startitem an integer has its limits, \stopitem + \startitem that hexadecimal numbers are integers and \stopitem + \startitem that \LUA\ and \LUAJIT\ can be different in details. \stopitem +\stopitemize + +\starttabulate[||T|T|] +\NC \NC \tx print(0xFFFFFFFFFFFFFFFF) \NC \tx print(0x7FFFFFFFFFFFFFFF) \NC \NR +\HL +\BC lua 52 \NC 1.844674407371e+019 \NC 9.2233720368548e+018 \NC \NR +\BC luajit \NC 1.844674407371e+19 \NC 9.2233720368548e+18 \NC \NR +\BC lua 53 \NC -1 \NC 9223372036854775807 \NC \NR +\stoptabulate + +So, to summarize the process. A quick test was relatively easy: move 5.3 into the +code base, adapt a little bit of internals (there were some \LUATEX\ interfacing +bits where explicit rounding was needed), run tests and eventually fix some +issues related to the Makefile (compatibility) and \CCODE\ obscurities (the slow +\type {sprintf}). Adapting \CONTEXT\ was also not much work, and the test suite +uncovered some nasty side effects. For instance, the valid 5.2 solution: + +\starttyping +local s = string.format("02X",u/1024) +local s = string.char (u/1024) +\stoptyping + +now has to become (both 5.2 and 5.3): + +\starttyping +local s = string.format("02X",math.floor(u/1024)) +local s = string.char (math.floor(u/1024)) +\stoptyping + +or (both 5.2 and (emulated or real) 5.3): + +\starttyping +local s = string.format("02X",bit32.rshift(u,10)) +local s = string.char (bit32.rshift(u,10)) +\stoptyping + +or (only 5.3): + +\starttyping +local s = string.format("02X",u >> 10)) +local s = string.char (u >> 10) +\stoptyping + +or (only 5.3): + +\starttyping +local s = string.format("02X",u//1024) +local s = string.char (u//1024) +\stoptyping + +A conditional section like: + +\starttyping +if LUAVERSION >= 5.3 then + local s = string.format("02X",u >> 10)) + local s = string.char (u >> 10) +else + local s = string.format("02X",bit32.rshift(u,10)) + local s = string.char (bit32.rshift(u,10)) +end +\stoptyping + +will fail because (of course) the 5.2 parser doesn't like that. In \CONTEXT\ we +have some experimental solutions for that but that is beyond this summary. + +In the process a few \UTF\ helpers were added to the string library so that we +have a common set for \LUAJIT\ and \LUA\ (the \type {utf8} library that was added +to 5.3 is not that important for \LUATEX). For now we keep the \type {bit32} +library on board. Of course we'll not mention all the details here. + +When we consider a gain in speed of 5-10\% with 5.3 that also means that the gain +of \LUAJITTEX\ compared to 5.2 becomes less. For instance in font processing both +engines now perform closer to the same. + +As I write this, we've just entered 2018 and after a few months of testing +\LUATEX\ with \LUA\ 5.3 we're confident that we can move the code to the +experimental branch. This means that we will use this version in the \CONTEXT\ +distribution and likely will ship this version as 1.10 in 2019, where it becomes +the default. The 2018 version of \TEX~Live will have 1.07 with \LUA\ 5.2 while +intermediate versions of the \LUA\ 5.3 binary will end up on the \CONTEXT\ +garden, probably with number 1.08 and 1.09 (who knows what else we will add or +change in the meantime). + +\stopchapter + +\stopcomponent + +% collectgarbage("count") -- two return values in 2 diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/onandon/onandon-emoji.tex b/Master/texmf-dist/doc/context/sources/general/manuals/onandon/onandon-emoji.tex index 1f67cc52883..0a89727a19b 100644 --- a/Master/texmf-dist/doc/context/sources/general/manuals/onandon/onandon-emoji.tex +++ b/Master/texmf-dist/doc/context/sources/general/manuals/onandon/onandon-emoji.tex @@ -75,8 +75,8 @@ mentions twice that amount. Currently in \CONTEXT\ we resolve such combinations when requested.} so imagine what will happen in the future. But, instead of making a picture for each variant, a different solution has been chosen. For coloring this seguiemj font uses the (very flexible) stacking technology: a color -shape is an overlay of colored symbols. The colors are organized in pallets and -it's no big deal to add additional pallets if needed. Instead of adding +shape is an overlay of colored symbols. The colors are organized in palettes and +it's no big deal to add additional palettes if needed. Instead of adding pre|-|composed shapes (as is needed with bitmaps and \SVG) snippets are used to build alternative glyphs and these can be combined into new shapes by substitution and positioning (for that kerns, mark anchoring and distance @@ -186,11 +186,11 @@ account: scale too. \stopitem \startitem - How efficient is a shape constructed. In that respect a bitmap or \SVG\ image + How efficiently is a shape constructed? In that respect a bitmap or \SVG\ image is just one entity. \stopitem \startitem - How well can (semi) arbitrary combinations of emoji be provided. Here the + How well can a (semi)arbitrary combinations of emoji be provided? Here the glyph approach wins. \stopitem \startitem @@ -203,17 +203,17 @@ account: social political reasons. \stopitem \startitem - Are black and white shapes provided alongside color shapes. + Are black and white shapes provided alongside color shapes? \stopitem \stopitemize Maybe an \SVG\ or bitmap image can have a lot of detail compared to a stacked -glyph but, when we're just using pictographic representations, the later is the +glyph but, when we're just using pictographic representations, the latter is the best choice. When I was playing a bit with the skin tone variants and other combinations that should result in some composed shape, I used the \UNICODE\ test files but I got -the impression that there are some errors in the test suite, for instance with +the impression that there were some errors in the test suite, for instance with respect to modifiers. Maybe the fonts are just doing the wrong thing or maybe some implement these sequences a bit inconsistent. This will probably improve over time but the question is if we should intercept issues. I'm not in favour of @@ -409,7 +409,7 @@ In case you wonder how some of the details above were typeset, there is a module \NC \type {\ShowEmojiSnippets} \NC show the snippets of a given emoji \NC \NR \NC \type {\ShowEmojiSnippetsOverlay} \NC show the overlayed snippets of a given emoji \NC \NR \NC \type {\ShowEmojiGlyphs} \NC show the snippets of a typeset emoji \NC \NR -\NC \type {\ShowEmojiPalettes} \NC show the color pallets in the current font \NC \NR +\NC \type {\ShowEmojiPalettes} \NC show the color palletes in the current font \NC \NR \stoptabulate Examples of usage are: diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/onandon/onandon-execute.tex b/Master/texmf-dist/doc/context/sources/general/manuals/onandon/onandon-execute.tex new file mode 100644 index 00000000000..abb3b4d8a15 --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/onandon/onandon-execute.tex @@ -0,0 +1,396 @@ +% language=uk + +\startcomponent onandon-execute + +\environment onandon-environment + +\startchapter[title={Executing \TEX}] + +Much of the \LUA\ code in \CONTEXT\ originates from experiments. When it survives +in the source code it is probably used, waiting to be used or kept for +educational purposes. The functionality that we describe here has already been +present for a while in \CONTEXT, but improved a little starting with \LUATEX\ +1.08 due to an extra helper. The code shown here is generic and not used in +\CONTEXT\ as such. + +Say that we have this code: + +\startbuffer +for i=1,10000 do + tex.sprint("1") + tex.sprint("2") + for i=1,3 do + tex.sprint("3") + tex.sprint("4") + tex.sprint("5") + end + tex.sprint("\\space") +end +\stopbuffer + +\typebuffer + +% \ctxluabuffer + +When we call \type {\directlua} with this snippet we get some 30 pages of \type +{12345345345}. The printed text is saved till the end of the \LUA\ call, so +basically we pipe some 170.000 characters to \TEX\ that get interpreted as one +paragraph. + +Now imagine this: + +\startbuffer +\setbox0\hbox{xxxxxxxxxxx} \number\wd0 +\stopbuffer + +\typebuffer + +which gives \getbuffer. If we check the box in \LUA, with: + +\startbuffer +tex.sprint(tex.box[0].width) +tex.sprint("\\enspace") +tex.sprint("\\setbox0\\hbox{!}") +tex.sprint(tex.box[0].width) +\stopbuffer + +\typebuffer + +the result is {\tttf \ctxluabuffer}, which is not what you would expect at first +sight. However, if you consider that we just pipe to a \TEX\ buffer that gets +parsed after the \LUA\ call, it will be clear that the reported width is the +width that we started with. It will work all right if we say: + +\startbuffer +tex.sprint(tex.box[0].width) +tex.sprint("\\enspace") +tex.sprint("\\setbox0\\hbox{!}") +tex.sprint("\\directlua{tex.sprint(tex.box[0].width)}") +\stopbuffer + +\typebuffer + +because now we get: {\tttf\ctxluabuffer}. It's not that complex to write some +support code that makes this more convenient. This can work out quite well but +there is a drawback. If we use this code: + +\startbuffer +print(status.input_ptr) +tex.sprint(tex.box[0].width) +tex.sprint("\\enspace") +tex.sprint("\\setbox0\\hbox{!}") +tex.sprint("\\directlua{print(status.input_ptr)\ + tex.sprint(tex.box[0].width)}") +\stopbuffer + +\typebuffer + +Here we get \type {6} and \type {7} reported. You can imagine that when a lot of +nested \type {\directlua} calls happen, we can get an overflow of the input level +or (depending on what we do) the input stack size. Ideally we want to do a \LUA\ +call, temporarily go to \TEX, return to \LUA, etc.\ without needing to worry +about nesting and possible crashes due to \LUA\ itself running into problems. One +charming solution is to use so|-|called coroutines: independent \LUA\ threads +that one can switch between --- you jump out from the current routine to another +and from there back to the current one. However, when we use \type {\directlua} +for that, we still have this nesting issue and what is worse, we keep nesting +function calls too. This can be compared to: + +\starttyping +\def\whatever{\ifdone\whatever\fi} +\stoptyping + +where at some point \type {\ifdone} is false so we quit. But we keep nesting when +the condition is met, so eventually we can end up with some nesting related +overflow. The following: + +\starttyping +\def\whatever{\ifdone\expandafter\whatever\fi} +\stoptyping + +is less likely to overflow because there we have tail recursion which basically +boils down to not nesting but continuing. Do we have something similar in +\LUATEX\ for \LUA ? Yes, we do. We can register a function, for instance: + +\starttyping +lua.get_functions_table()[1] = function() print("Hi there!") end +\stoptyping + +and call that one with: + +\starttyping +\luafunction 1 +\stoptyping + +This is a bit faster than calling a function like: + +\starttyping +\directlua{HiThere()} +\stoptyping + +which can also be achieved by + +\starttyping +\directlua{print("Hi there!")} +\stoptyping + +which sometimes can be more convenient. Anyway, a function call is what we can +use for our purpose as it doesn't involve interpretation and effectively behaves +like a tail call. The following snippet shows what we have in mind: + +\startbuffer[code] +local stepper = nil +local stack = { } +local fid = 0xFFFFFF +local goback = "\\luafunction" .. fid .. "\\relax" + +function tex.resume() + if coroutine.status(stepper) == "dead" then + stepper = table.remove(stack) + end + if stepper then + coroutine.resume(stepper) + end +end + +lua.get_functions_table()[fid] = tex.resume + +function tex.yield() + tex.sprint(goback) + coroutine.yield() + texio.closeinput() +end + +function tex.routine(f) + table.insert(stack,stepper) + stepper = coroutine.create(f) + tex.sprint(goback) +end +\stopbuffer + +\ctxluabuffer[code] + +\startbuffer[demo] +tex.routine(function() + tex.sprint(tex.box[0].width) + tex.sprint("\\enspace") + tex.sprint("\\setbox0\\hbox{!}") + tex.yield() + tex.sprint(tex.box[0].width) +end) +\stopbuffer + +\typebuffer[demo] +We start a routine, jump out to \TEX\ in the middle, come back when we're done +and continue. This gives us: \ctxluabuffer [demo], which is what we expect. + +\setbox0\hbox{xxxxxxxxxxx} + +\ctxluabuffer[demo] + +This mechanism permits efficient (nested) loops like: + +\startbuffer[demo] +tex.routine(function() + for i=1,10000 do + tex.sprint("1") + tex.yield() + tex.sprint("2") + tex.routine(function() + for i=1,3 do + tex.sprint("3") + tex.yield() + tex.sprint("4") + tex.yield() + tex.sprint("5") + end + end) + tex.sprint("\\space") + tex.yield() + end +end) +\stopbuffer + +\typebuffer[demo] + +We do create coroutines, go back and forwards between \LUA\ and \TEX, but avoid +memory being filled up with printed content. If we flush paragraphs (instead of +e.g.\ the space) then the main difference is that instead of a small delay due to +the loop unfolding in a large set of prints and accumulated content, we now get a +steady flushing and processing. + +However, we can still have an overflow of input buffers because we still nest +them: the limitation at the \TEX\ end has moved to a limitation at the \LUA\ end. +How come? Here is the code that we use: + +\typebuffer[code] + +The \type {routine} creates a coroutine, and \type {yield} gives control to \TEX. +The \type {resume} is done at the \TEX\ end when we're finished there. In +practice this works fine and when you permit enough nesting and levels in \TEX\ +then you will not easily overflow. + +When I picked up this side project and wondered how to get around it, it suddenly +struck me that if we could just quit the current input level then nesting would +not be a problem. Adding a simple helper to the engine made that possible (of +course figuring it out took a while): + +\startbuffer[code] +local stepper = nil +local stack = { } +local fid = 0xFFFFFF +local goback = "\\luafunction" .. fid .. "\\relax" + +function tex.resume() + if coroutine.status(stepper) == "dead" then + stepper = table.remove(stack) + end + if stepper then + coroutine.resume(stepper) + end +end + +lua.get_functions_table()[fid] = tex.resume + +if texio.closeinput then + function tex.yield() + tex.sprint(goback) + coroutine.yield() + texio.closeinput() + end +else + function tex.yield() + tex.sprint(goback) + coroutine.yield() + end +end + +function tex.routine(f) + table.insert(stack,stepper) + stepper = coroutine.create(f) + tex.sprint(goback) +end +\stopbuffer + +\ctxluabuffer[code] + +\typebuffer[code] + +The trick is in \type {texio.closeinput}, a recent helper and one that should be +used with care. We assume that the user knows what she or he is doing. On an old +laptop with a i7-3840 processor running \WINDOWS\ 10 the following snippet takes +less than 0.35 seconds with \LUATEX\ and 0.26 seconds with \LUAJITTEX. + +\startbuffer[code] +tex.routine(function() + for i=1,10000 do + tex.sprint("\\setbox0\\hpack{x}") + tex.yield() + tex.sprint(tex.box[0].width) + tex.routine(function() + for i=1,3 do + tex.sprint("\\setbox0\\hpack{xx}") + tex.yield() + tex.sprint(tex.box[0].width) + end + end) + end +end) +\stopbuffer + +\typebuffer[code] + +% \testfeatureonce {1} {\setbox0\hpack{\ctxluabuffer[code]}} \elapsedtime + +Say that we run the bad snippet: + +\startbuffer[code] +for i=1,10000 do + tex.sprint("\\setbox0\\hpack{x}") + tex.sprint(tex.box[0].width) + for i=1,3 do + tex.sprint("\\setbox0\\hpack{xx}") + tex.sprint(tex.box[0].width) + end +end +\stopbuffer + +\typebuffer[code] + +% \testfeatureonce {1} {\setbox0\hpack{\ctxluabuffer[code]}} \elapsedtime + +This time we need 0.12 seconds in both engines. So what if we run this: + +\startbuffer[code] +\dorecurse{10000}{% + \setbox0\hpack{x} + \number\wd0 + \dorecurse{3}{% + \setbox0\hpack{xx} + \number\wd0 + }% +} +\stopbuffer + +\typebuffer[code] + +% \testfeatureonce {1} {\setbox0\hpack{\getbuffer[code]}} \elapsedtime + +Pure \TEX\ needs 0.30 seconds for both engines but there we lose 0.13 seconds on +the loop code. In the \LUA\ example where we yield, the loop code takes hardly +any time. As we need only 0.05 seconds more it demonstrates that when we use the +power of \LUA\ the performance hit of the switch is quite small: we yield 40.000 +times! In general, such differences are far exceeded by the overhead: the time +needed to typeset the content (which \type {\hpack} doesn't do), breaking +paragraphs into lines, constructing pages and other overhead involved in the run. +In \CONTEXT\ we use a slightly different variant which has 0.30 seconds more +overhead, but that is probably true for all \LUA\ usage in \CONTEXT, but again, +it disappears in other runtime. + +Here is another example: + +\startbuffer[code] +\def\TestWord#1% + {\directlua{ + tex.routine(function() + tex.sprint("\\setbox0\\hbox{\\tttf #1}") + tex.yield() + tex.sprint(math.round(100 * tex.box[0].width/tex.hsize)) + tex.sprint(" percent of the hsize: ") + tex.sprint("\\box0") + end) + }} +\stopbuffer + +\typebuffer[code] \getbuffer[code] + +\startbuffer +The width of next word is \TestWord {inline}! +\stopbuffer + +\typebuffer \getbuffer + +Now, in order to stay realistic, this macro can also be defined as: + +\startbuffer[code] +\def\TestWord#1% + {\setbox0\hbox{\tttf #1}% + \directlua{ + tex.sprint(math.round(100 * tex.box[0].width/tex.hsize)) + } % + percent of the hsize: \box0\relax} +\stopbuffer + +\typebuffer[code] + +We get the same result: \quotation {\getbuffer}. + +We have been using a \LUA|-|\TEX\ mix for over a decade now in \CONTEXT, and have +never really needed this mixed model. There are a few places where we could +(have) benefitted from it and we might use it in a few places, but so far we have +done fine without it. In fact, in most cases typesetting can be done fine at the +\TEX\ end. It's all a matter of imagination. + +\stopchapter + +\stopcomponent diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/onandon/onandon-expansion.tex b/Master/texmf-dist/doc/context/sources/general/manuals/onandon/onandon-expansion.tex new file mode 100644 index 00000000000..73a0b495374 --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/onandon/onandon-expansion.tex @@ -0,0 +1,307 @@ +% language=uk + +\startcomponent onandon-expansion + +\environment onandon-environment + +\startchapter[title={More (new) expansion trickery}] + +Contrary to what one might expect when looking at macro definitions, \TEX\ is +pretty efficient. Occasionally I wonder if some extra built in functionality +could help me write better code but when you program with a bit care there is +often not much to gain in terms of tokens and performance. \footnote {The long +trip to the yearly Bacho\TeX\ meeting is always a good opportunity to ponder +\TEX\ and its features. The new functionality discussed here is a side effect of +the most recent trip.} Also, some possible extensions probably only would be +applied a few times which makes them low priority. When you look at the +extensions brought by \ETEX\ the number is not that large, and \LUATEX\ only +added a few that deal with the language, for instance \tex {expanded} which is +like an \tex {edef} without the defining a macro and acts on a token list wrapped +in (normally) curly braces. Just as reference we mention some of the expansion +related helpers. + +\starttabulate[|l|l|p|] +\BC command \BC argument \BC + comment +\NC \NR +\HL +\NC \tex {expandafter} \NC \type {token} \NC + The token after the next token gets expanded (one level only). In tricky + \TEX\ code you can often see multiple such commands in sequence which makes a + nice puzzle. +\NC \NR +\NC \tex {noexpand} \NC \type {token} \NC + The token after this command is not expanded in the context of expansion. +\NC \NR +\NC \tex {expanded} \NC \type {{tokens}} \NC + The given token list is expanded. This command showed up early in \LUATEX\ + development and was taken from \ETEX\ follow|-|ups. I have mails from 2011 + mentioning its presence in \PDFTEX\ 1.50 (which was targeted in 2008) but + somehow it never ended up in a production version at that time (and we're + still not at that version). In \CONTEXT\ we already had a command with that + name so there we use \tex {normalexpanded}. Users normally can just use the + \CONTEXT\ variant of \type {\expanded}. +\NC \NR +\NC \tex {unexpanded} \NC \type {{tokens}} \NC + The given token list is hidden from expansion. Again, in \CONTEXT\ we already + had a command serving as prefix for definitions so instead we use \tex + {normalunexpanded}. In the core of \CONTEXT\ this new \ETEX\ command is hardly + used. +\NC \NR +\NC \tex {detokenize} \NC \type {{tokens}} \NC + The given tokenlist becomes (basically) verbatim \TEX\ code. We had something + like that in \CONTEXT\ but have no nameclash. It is used in a few places. It's + also an \ETEX\ command. +\NC \NR +\NC \tex {scantokens} \NC \type {{tokens}} \NC + This primitive interprets its argument as a pseudo file. We don't really use it. +\NC \NR % +\NC \tex {scantextokens} \NC \type {{tokens}} \NC + This \LUATEX\ primitive does the same but has no end|-|of|-|file side + effects. This one is also not really used in \CONTEXT. +\NC \NR +\NC \tex {protected} \NC \type {\.def} \NC + The definition following this prefix, introduced in \ETEX, is unexpandable in + the context of expansion. We already used such a command in \CONTEXT\ but + with a completely different meaning so use \tex {normalprotected} as prefix + or \tex {unexpanded} which is an alias. +\NC \NR +\stoptabulate + +Here I will present two other extensions in \LUATEX\ that can come in handy, and +they are there simply because their effect can hardly be realized otherwise +(never say never in \TEX). One has to do with immediately applying a definition, +the other with user defined conditions. The first one relates directly to +expansion, the second one concerns conditions and relates more to parsing +branches which on purpose avoids expansion. + +For the first one I use some silly examples. I must admit that although I can +envision useful application, I really need to go over the large amount of +\CONTEXT\ source code to really find a place where it is making things better. +Take the following definitions: + +\startbuffer +\newcount\NumberOfCalls + +\def\TestMe{\advance\NumberOfCalls1 } + +\edef\Tested{\TestMe foo:\the\NumberOfCalls} +\edef\Tested{\TestMe foo:\the\NumberOfCalls} +\edef\Tested{\TestMe foo:\the\NumberOfCalls} + +\meaning\Tested +\stopbuffer + +\typebuffer + +The result is a macro \tex {Tested} that not only has the unexpanded incrementing +code in its body but also hasn't done any advancing: + +\getbuffer + +Of course when you're typesetting something, this kind of expansion normally is +not needed. Instead of the above definition we can define \tex {TestMe} in a way +that expands the assignment immediately. You need of course to be aware of +preventing look ahead interference by using a space or \tex {relax} (often an +expression works better as it doesn't leave an \tex {relax}). + +\startbuffer +\def\TestMe{\immediateassignment\advance\NumberOfCalls1 } + +\edef\Tested{\TestMe bar:\the\NumberOfCalls} +\edef\Tested{\TestMe bar:\the\NumberOfCalls} +\edef\Tested{\TestMe bar:\the\NumberOfCalls} + +\meaning\Tested +\stopbuffer + +\typebuffer + +This time the counter gets updated and we don't see interference in the resulting +\tex {Tested} macro: + +\getbuffer + +Here is a somewhat silly example of an expanded comparison of two \quote +{strings}: + +\startbuffer +\def\expandeddoifelse#1#2#3#4% + {\immediateassignment\edef\tempa{#1}% + \immediateassignment\edef\tempb{#2}% + \ifx\tempa\tempb + \immediateassignment\def\next{#3}% + \else + \immediateassignment\def\next{#4}% + \fi + \next} + +\edef\Tested + {(\expandeddoifelse{abc}{def}{yes}{nop}/% + \expandeddoifelse{abc}{abc}{yes}{nop})} + +\meaning\Tested +\stopbuffer + +\typebuffer + +I don't remember many cases where I needed such an expanded comparison. We have a +variant in \CONTEXT\ that uses \LUA\ but that one is not really used in the core. +Anyway, the above code gives: + +\getbuffer + +You can do the same assignments as in preambles of \tex {halign} and after \tex +{accent} which means that assignments to box registers are blocked (boxing +involves grouping and delayed assignments and so). The error you will get when +you use a non||assignment command refers to a prefix, because internally such +commands are called prefixed commands. Leading spaces and \tex {relax} are +ignored. + +In addition to this one|-|time immediate assignment a pseudo token list variant +is provided, so the above could be rewritten to: + +\starttyping +\def\expandeddoifelse#1#2#3#4% + {\immediateassigned { + \edef\tempa{#1} + \edef\tempb{#2} + }% + \ifx\tempa\tempb + \immediateassignment\def\next{#3}% + \else + \immediateassignment\def\next{#4}% + \fi + \next} +\stoptyping + +While \tex {expanded} first builds a token lists that then gets used, the \tex +{immediateassigned} primitive just walls over the list delimited by curly braces. + +A next extension concerns conditions. If you have done a bit of extensive \TEX\ +programming you know that nested conditions need to be properly constructed in +for instance macro bodies. This is because (for good reason) \TEX\ goes into a +fast scanning mode when there is a match and it has to skip the \tex {else} upto +\tex {fi} branch. In order to do that properly a nested \tex {if} in there needs +to have a matching \tex {fi}. + +In practice this is no real problem and careful coding will never give a problem +here: you can either hide nested code in a macro or somehow jump over nested +conditions if really needed. Actually you only need to care when you pickup a +token inside the branch because likely you don't want to pick up for instance a +\tex {fi} but something that comes after it. Say that we have a sane conditional +setup like this: + +\starttyping +\newif\iffoo \foofalse +\newif\ifbar \bartrue + +\ifoo + \ifbar \else \fi +\else + \ifbar \else \fi +\fi +\stoptyping + +Here the \tex {iffoo} and \tex {ifbar} need to be equivalent to \tex {iftrue} or +\tex {iffalse} in order to succeed well and that is what for instance \tex +{footrue} and \tex {foofalse} will do: change the meaning of \tex {iffoo}. + +But imagine that you want something more complex. You want for instance to let +\tex {ifbar} do some calculations. In that case you want it to behave a bit like +what a so called \type {vardef} in \METAPOST\ does: the end result is what +matters. Now, because \TEX\ macros often are a complex mix of expandable and +non|-|expandable this is not that trivial. One solution is a dedicated definer, +say \tex {cdef} for defining a macro with conditional properties. I actually +implemented such a definer a few years ago but left it so long in a folder with +ideas that I only found it back after I had come up with another solution. It was +probably proof that it was not that good an idea. + +The solution implemented in \LUATEX\ is just a special case of a test: \tex +{ifcondition}. When looking at the next example, keep in mind that from the +perspective of \TEX's scanner it only needs to know if something is a token that +does some test and has a matching \tex {fi}. For that purpose you can consider +\tex {ifcondition} to be \tex {iftrue}. When \TEX\ actually wants to do a test, +which is the case in the true branch, then it will simply ignore this \tex +{ifcondition} primitive and expands what comes after it (which is \TEX's natural +behaviour). Effectively \tex {ifcondition} has no meaning except from when it has +to be skipped, in which case it's a token flagged as \tex {if} kind of command. + +\starttyping +\unexpanded\def\something#1#2% + {\edef\tempa{#1}% + \edef\tempb{#2} + \ifx\tempa\tempb} + +\ifcondition\something{a}{b}% + \ifcondition\something{a}{a}% + true 1 + \else + false 1 + \fi +\else + \ifcondition\something{a}{a}% + true 2 + \else + false 2 + \fi +\fi +\stoptyping + +Wrapped in a macro you can actually make this fully expandable when you use the +previously mentioned immediate assignment. Here is another example: + +\starttyping +\unexpanded\def\onoddpage + {\ifodd\count0 } + +\ifcondition\onoddpage odd \else even \fi page +\stoptyping + +The previously defined comparison macro can now be rewritten as: + +\starttyping +\def\equaltokens#1#2% + {\immediateassignment\edef\tempa{#1}% + \immediateassignment\edef\tempb{#2}% + \ifx\tempa\tempb} + +\def\expandeddoifelse#1#2#3#4% + {\ifcondition\equaltokens{#1}{#2}% + \immediateassignment\def\next{#3}% + \else + \immediateassignment\def\next{#4}% + \fi + \next} +\stoptyping + +When used this way it will of course also work without the \tex {ifcondition} but +when used nested it can be like this. This last example also demonstrates that +this feature probably only makes sense in more complicated cases where more work +is done in the \tex {onoddpage} or \tex {equaltokens} macro. And again, I am not +sure if for instance in \CONTEXT\ I have a real use for it because there are only +a few cases where nesting like this could benefit. I did some tests with a low +level macro where it made the code look nicer. It was actually a bit faster but +most core macros are not called that often. Although the overhead of this feature +can be neglected, performance should not be the reason for using it: in \CONTEXT\ +for instance one can often only measure such possible speed|-|ups on macros that +are called tens or hundreds of thousands of times and that seldom happens in a +real run end even then a change from say 0.827 seconds to 0.815 seconds for 10K +calls of a complex case is just noise as the opposite can also happen. + +Although not strictly necessary these extensions might make some code look better +so that is why they officially will be available in the 1.09 release of \LUATEX\ +in fall 2018. It might eventually inspire me to go over some code and see where I +can improve the look and feel. + +The last few years I have implemented some more ideas as local experiments, for +instance \tex {futurelet} variant or a simple (one level) \tex {expand}, but in +the end rejected them because there is no real benefit in them (no better looking +code, no gain in performance, hard to document, possible side effects, etc.), so +it is very unlikely that we will have more extensions like this. After all, we +could do more than 40 years without them. Although \unknown\ who knows what we +will provide in \LUATEX\ version~2. + +\stopchapter + +\stopcomponent diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/onandon/onandon-fences.tex b/Master/texmf-dist/doc/context/sources/general/manuals/onandon/onandon-fences.tex new file mode 100644 index 00000000000..133b9bfeb32 --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/onandon/onandon-fences.tex @@ -0,0 +1,499 @@ +% language=uk + +\startcomponent onandon-fences + +\environment onandon-environment + +% avoid context defaults: +% +% \mathitalicsmode \plusone % default in context +% \mathdelimitersmode\plusseven % optional in context + +\def\UseMode#1{\appendtoks\mathdelimitersmode#1\to\everymathematics} + +\startchapter[title={Tricky fences}] + +Occasionally one of my colleagues notices some suboptimal rendering and asks me +to have a look at it. Now, one can argue about \quotation {what is right} and +indeed there is not always a best answer to it. Such questions can even be a +nuisance; let's think of the following scenario. You have a project where \TEX\ +is practically the only solution. Let it be an \XML\ rendering project, which +means that there are some boundary conditions. Speaking in 2017 we find that in +most cases a project starts out with the assumption that everything is possible. + +Often such a project starts with a folio in mind and therefore by decent tagging +to match the educational and esthetic design. When rendering is mostly automatic +and concerns too many (variants) to check all rendering, some safeguards are used +(an example will be given below). Then different authors, editors and designers +come into play and their expectations, also about what is best, often conflict. +Add to that rendering for the web, and devices and additional limitations show +up: features get dropped and even more cases need to be compensated (the quality +rules for paper are often much higher). But, all that defeats the earlier +attempts to do well because suddenly it has to match the lesser format. This in +turn makes investing in improving rendering very inefficient (read: a bottomless +pit because it never gets paid and there is no way to gain back the investment). +Quite often it is spacing that triggers discussions and questions what rendering +is best. And inconsistency dominates these questions. + +So, in case you wonder why I bother with subtle aspects of rendering as discussed +below, the answer is that it is not so much professional demand but users (like +my colleagues or those on the mailing lists) that make me look into it and often +something that looks trivial takes days to sort out (even for someone who knows +his way around the macro language, fonts and the inner working of the engine). +And one can be sure that more cases will pop up. + +All this being said, let's move on to a recent example. In \CONTEXT\ we support +\MATHML\ although in practice we're forced to a mix of that standard and +\ASCIIMATH. When we're lucky, we even get a mix with good old \TEX-encoded math. +One problem with an automated flow and processing (other than raw \TEX) is that +one can get anything and therefore we need to play safe. This means for instance +that you can get input like this: + +\starttyping +f(x) + f(1/x) +\stoptyping + +or in more structured \TEX\ speak: + +\startbuffer +$f(x) + f(\frac{1}{x})$ +\stopbuffer + +\typebuffer + +Using \TeX\ Gyre Pagella, this renders as: {\UseMode\zerocount\inlinebuffer}, and +when seeing this a \TEX\ user will revert to: + +\startbuffer +$f(x) + f\left(\frac{1}{x}\right)$ +\stopbuffer + +\typebuffer + +which gives: {\UseMode\zerocount \inlinebuffer}. So, in order to be robust we can +always use the \type {\left} and \type {\right} commands, can't we? + +\startbuffer +$f(x) + f\left(x\right)$ +\stopbuffer + +\typebuffer + +which gives {\UseMode\zerocount \inlinebuffer}, but let's blow up this result a +bit showing some additional tracing from left to right, now in Latin Modern: + +\startbuffer[blownup] +\startcombination[nx=3,ny=2,after=\vskip3mm] + {\scale[scale=4000]{\hbox{$f(x)$}}} + {just characters} + {\scale[scale=4000]{\ruledhbox{\showglyphs \showfontkerns \showfontitalics$f(x)$}}} + {just characters} + {\scale[scale=4000]{\ruledhbox{\showglyphs \showfontkerns \showfontitalics \showmakeup$f(x)$}}} + {just characters} + {\scale[scale=4000]{\hbox{$f\left(x\right)$}}} + {using delimiters} + {\scale[scale=4000]{\ruledhbox{\showglyphs \showfontkerns \showfontitalics$f\left(x\right)$}}} + {using delimiters} + {\scale[scale=4000]{\ruledhbox{\showglyphs \showfontkerns \showfontitalics \showmakeup$f\left(x\right)$}}} + {using delimiters} +\stopcombination +\stopbuffer + +\startlinecorrection +\UseMode\zerocount +\switchtobodyfont[modern]\getbuffer[blownup] +\stoplinecorrection + +When we visualize the glyphs and kerns we see that there's a space instead of a +kern when we use delimiters. This is because the delimited sequence is processed +as a subformula and injected as a so|-|called inner object and as such gets +spaced according to the ordinal (for the $f$) and inner (\quotation {fenced} with +delimiters $x$) spacing rules. Such a difference normally will go unnoticed but +as we mentioned authors, editors and designers being involved, there's a good +chance that at some point one will magnify a \PDF\ preview and suddenly notice +that the difference between the $f$ and $($ is a bit on the large side for simple +unstacked cases, something that in print is likely to go unnoticed. So, even when +we don't know how to solve this, we do need to have an answer ready. + +When I was confronted by this example of rendering I started wondering if there +was a way out. It makes no sense to hard code a negative space before a fenced +subformula because sometimes you don't want that, especially not when there's +nothing before it. So, after some messing around I decided to have a look at the +engine instead. I wondered if we could just give the non|-|scaled fence case the +same treatment as the character sequence. + +Unfortunately here we run into the somewhat complex way the rendering takes +place. Keep in mind that it is quite natural from the perspective of \TEX\ +because normally a user will explicitly use \type {\left} and \type {\right} as +needed, while in our case the fact that we automate and therefore want a generic +solution interferes (as usual in such cases). + +Once read in the sequence \type {f(x)} can be represented as a list: + +\starttyping +list = { + { + id = "noad", subtype = "ord", nucleus = { + { + id = "mathchar", fam = 0, char = "U+00066", + }, + }, + }, + { + id = "noad", subtype = "open", nucleus = { + { + id = "mathchar", fam = 0, char = "U+00028", + }, + }, + }, + { + id = "noad", subtype = "ord", nucleus = { + { + id = "mathchar", fam = 0, char = "U+00078", + }, + }, + }, + { + id = "noad", subtype = "close", nucleus = { + { + id = "mathchar", fam = 0, char = "U+00029", + }, + }, + }, +} +\stoptyping + +The sequence \type {f \left( x \right)} is also a list but now it is a tree (we +leave out some unset keys): + +\starttyping +list = { + { + id = "noad", subtype = "ord", nucleus = { + { + id = "mathchar", fam = 0, char = "U+00066", + }, + }, + }, + { + id = "noad", subtype = "inner", nucleus = { + { + id = "submlist", head = { + { + id = "fence", subtype = "left", delim = { + { + id = "delim", small_fam = 0, small_char = "U+00028", + }, + }, + }, + { + id = "noad", subtype = "ord", nucleus = { + { + id = "mathchar", fam = 0, char = "U+00078", + }, + }, + }, + { + id = "fence", subtype = "right", delim = { + { + id = "delim", small_fam = 0, small_char = "U+00029", + }, + }, + }, + }, + }, + }, + }, +} +\stoptyping + +So, the formula \type {f(x)} is just four characters and stays that way, but with +some inter|-|character spacing applied according to the rules of \TEX\ math. The +sequence \typ {f \left( x \right)} however becomes two components: the \type {f} +is an ordinal noad,\footnote {Noads are the mathematical building blocks. +Eventually they become nodes, the building blocks of paragraphs and boxed +material.} and \typ {\left( x \right)} becomes an inner noad with a list as a +nucleus, which gets processed independently. The way the code is written this is +what (roughly) happens: + +\startitemize +\startitem + A formula starts; normally this is triggered by one or two dollar signs. +\stopitem +\startitem + The \type {f} becomes an ordinal noad and \TEX\ goes~on. +\stopitem +\startitem + A fence is seen with a left delimiter and an inner noad is injected. +\stopitem +\startitem + That noad has a sub|-|math list that takes the left delimiter up to a + matching right one. +\stopitem +\startitem + When all is scanned a routine is called that turns a list of math noads into + a list of nodes. +\stopitem +\startitem + So, we start at the beginning, the ordinal \type {f}. +\stopitem +\startitem + Before moving on a check happens if this character needs to be kerned with + another (but here we have an ordinal|-|inner combination). +\stopitem +\startitem + Then we encounter the subformula (including fences) which triggers a nested + call to the math typesetter. +\stopitem +\startitem + The result eventually gets packaged into a hlist and we're back one level up + (here after the ordinal \type {f}). +\stopitem +\startitem + Processing a list happens in two passes and, to cut it short, it's the second + pass that deals with choosing fences and spacing. +\stopitem +\startitem + Each time when a (sub)list is processed a second pass over that list + happens. +\stopitem +\startitem + So, now \TEX\ will inject the right spaces between pairs of noads. +\stopitem +\startitem + In our case that is between an ordinal and an inner noad, which is quite + different from a sequence of ordinals. +\stopitem +\stopitemize + +It's these fences that demand a two-pass approach because we need to know the +height and depth of the subformula. Anyway, do you see the complication? In our +inner formula the fences are not scaled, but this is not communicated back in the +sense that the inner noad can become an ordinal one, as in the simple \type {f(} +pair. The information is not only lost, it is not even considered useful and the +only way to somehow bubble it up in the processing so that it can be used in the +spacing requires an extension. And even then we have a problem: the kerning that +we see between \type {f(} is also lost. It must be noted that this kerning is +optional and triggered by setting \type {\mathitalicsmode=1}. One reason for this +is that fonts approach italic correction differently, and cheat with the +combination of natural width and italic correction. + +Now, because such a workaround is definitely conflicting with the inner workings +of \TEX, our experimenting demands another variable be created: \type +{\mathdelimitersmode}. It might be a prelude to more manipulations but for now we +stick to this one case. How messy it really is can be demonstrated when we render +our example with Cambria. + +\startlinecorrection +\UseMode\zerocount +\switchtobodyfont[cambria]\getbuffer[blownup] +\stoplinecorrection + +If you look closely you will notice that the parenthesis are moved up a bit. Also +notice the more accurate bounding boxes. Just to be sure we also show Pagella: + +\startlinecorrection +\UseMode\zerocount +\switchtobodyfont[pagella]\getbuffer[blownup] +\stoplinecorrection + +When we really want the unscaled variant to be somewhat compatible with the +fenced one we now need to take into account: + +\startitemize[packed] +\startitem + the optional axis|-|and|-|height|/|depth related shift of the fence (bit 1) +\stopitem +\startitem + the optional kern between characters (bit 2) +\stopitem +\startitem + the optional space between math objects (bit 4) +\stopitem +\stopitemize + +Each option can be set (which is handy for testing) but here we will set them +all, so, when \type {\mathdelimitersmode=7}, we want cambria to come out as +follows: + +\startlinecorrection +\UseMode\plusseven +\switchtobodyfont[cambria]\getbuffer[blownup] +\stoplinecorrection + +When this mode is set the following happens: + +\startitemize +\startitem + We keep track of the scaling and when we use the normal size this is + registered in the noad (we had space in the data structure for that). +\stopitem +\startitem + This information is picked up by the caller of the routine that does the + subformula and stored in the (parent) inner noad (again, we had space for + that). +\stopitem +\startitem + Kerns between a character (ordinal) and subformula (inner) are kept, + which can be bad for other cases but probably less than what we try + to solve here. +\stopitem +\startitem + When the fences are unscaled the inner property temporarily becomes + an ordinal one when we apply the inter|-|noad spacing. +\stopitem +\stopitemize + +Hopefully this is good enough but anything more fancy would demand drastic +changes in one of the most sensitive mechanisms of \TEX. It might not always work +out right, so for now I consider it an experiment, which means that it can be +kept around, rejected or improved. + +In case one wonders if such an extension is truly needed, one should also take +into account that automated typesetting (also of math) is probably one of the +areas where \TEX\ can shine for a while. And while we can deal with much by using +\LUA, this is one of the cases where the interwoven and integrated parsing, +converting and rendering of the math machinery makes it hard. It also fits into a +further opening up of the inner working by modes. + +\startbuffer[simple] +\dontleavehmode +\scale + [scale=3000] + {\ruledhbox + {\showglyphs + \showfontkerns + \showfontitalics + $f(x)$}} +\stopbuffer + +\startbuffer[fenced] +\dontleavehmode +\scale + [scale=3000] + {\ruledhbox + {\showglyphs + \showfontkerns + \showfontitalics + $f\left(x\right)$}} +\stopbuffer + +\def\TestMe#1% + {\bTR + \bTD[width=35mm,align=middle,toffset=3mm] \switchtobodyfont[#1]\UseMode\zerocount\getbuffer[simple] \eTD + \bTD[width=35mm,align=middle,toffset=3mm] \switchtobodyfont[#1]\UseMode\zerocount\getbuffer[fenced] \eTD + \bTD[width=35mm,align=middle,toffset=3mm] \switchtobodyfont[#1]\UseMode\plusseven\getbuffer[simple] \eTD + \bTD[width=35mm,align=middle,toffset=3mm] \switchtobodyfont[#1]\UseMode\plusseven\getbuffer[fenced] \eTD + \eTR + \bTR + \bTD[align=middle,nx=2] \type{\mathdelimitersmode=0} \eTD + \bTD[align=middle,nx=2] \type{\mathdelimitersmode=7} \eTD + \eTR + \bTR + \bTD[align=middle,nx=4] \switchtobodyfont[#1]\bf #1 \eTD + \eTR} + +\startbuffer +\bTABLE[frame=off] + \TestMe{modern} + \TestMe{cambria} + \TestMe{pagella} +\eTABLE +\stopbuffer + +Another objection to such a solution can be that we should not alter the engine +too much. However, fences already are an exception and treated specially (tests +and jumps in the program) so adding this fits reasonably well into that part of +the design. + +In the following examples we demonstrate the results for Latin Modern, Cambria +and Pagella when \type {\mathdelimitersmode} is set to zero or one. First we show +the case where \type {\mathitalicsmode} is disabled: + +\startlinecorrection + \mathitalicsmode\zerocount\getbuffer +\stoplinecorrection + +When we enable \type {\mathitalicsmode} we get: + +\startlinecorrection + \mathitalicsmode\plusone \getbuffer +\stoplinecorrection + +So is this all worth the effort? I don't know, but at least I got the picture and +hopefully now you have too. It might also lead to some more modes in future +versions of \LUATEX. + +\startbuffer[simple] +\dontleavehmode +\scale + [scale=2000] + {\ruledhbox + {\showglyphs + \showfontkerns + \showfontitalics + $f(x)$}} +\stopbuffer + +\startbuffer[fenced] +\dontleavehmode +\scale + [scale=2000] + {\ruledhbox + {\showglyphs + \showfontkerns + \showfontitalics + $f\left(x\right)$}} +\stopbuffer + +\def\TestMe#1% + {\bTR + \dostepwiserecurse{0}{7}{1}{ + \bTD[align=middle,toffset=3mm] \switchtobodyfont[#1]\UseMode##1\getbuffer[simple] \eTD + } + \eTR + \bTR + \dostepwiserecurse{0}{7}{1}{ + \bTD[align=middle,toffset=3mm] \switchtobodyfont[#1]\UseMode##1\getbuffer[fenced] \eTD + } + \eTR + \bTR + \dostepwiserecurse{0}{7}{1}{ + \bTD[align=middle] + \tttf + \ifcase##1\relax + \or ns % 1 + \or it % 2 + \or ns it % 3 + \or or % 4 + \or ns or % 5 + \or it or % 6 + \or ns it or % 7 + \fi + \eTD + } + \eTR + \bTR + \bTD[align=middle,nx=8] \switchtobodyfont[#1]\bf #1 \eTD + \eTR} + +\startbuffer +\bTABLE[frame=off,distance=2mm] + \TestMe{modern} + \TestMe{cambria} + \TestMe{pagella} +\eTABLE +\stopbuffer + +\startlinecorrection +\getbuffer +\stoplinecorrection + +In \CONTEXT, a regular document can specify \type {\setupmathfences +[method=auto]}, but in \MATHML\ or \ASCIIMATH\ this feature is enabled by default +(so that we can test it). + +We end with a summary of all the modes (assuming italics mode is enabled) in the +table below. + +\stopcomponent diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/onandon/onandon-media.tex b/Master/texmf-dist/doc/context/sources/general/manuals/onandon/onandon-media.tex new file mode 100644 index 00000000000..f44c3bb1984 --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/onandon/onandon-media.tex @@ -0,0 +1,220 @@ +% language=uk + +\startcomponent onandon-media + +\environment onandon-environment + +\startchapter[title={The state of \PDF}] + +\startsection[title={Introduction}] + +Below I will spend some words on the state of \PDF\ in \CONTEXT\ mid 2018. These +are just some reflections, not an in|-|depth discussion of the state of affairs. I +sometimes feel the need to wrap up. + +\stopsection + +\startsection[title={Media}] + +For over two decades \CONTEXT\ has supported fancy \PDF\ features like movies and +sound. In fact, as happens more, the flexibility of \TEX\ made it possible to +support such features right after they became available, often even before other +applications supported them. + +The first approach to support such media clips was relatively easy. In \PDF\ one +has the text flow, resulting from the typesetting process, either or not enhanced +with images that are referred to from the flow. In that respect images are an +integral part of \PDF. On a separate layer there can be annotations. There are +many kinds and they are originally a sort of extension mechanism that permits +plugins to add features to a document. Examples of this are hyperlinks and the +already mentioned media clips. Video was supported by the quicktime movie plugin. +As far as I know in the meantime that plugin has been dropped as official part of +Acrobat but one can still plug it in. + +Later an extra mechanism was introduced, tagged renditions. It separates the +views from the media and was more complex. When I first played with it, quite +some media were possible, and I made a demo that could handle mov, mp3, smi and +swf files. But last time I checked none of these really worked, apart from the +swf file. One gets pop|-|ups for missing viewers and a look at the reader +preferences makes one pessimistic about future support anyway. But one should be +able to set up a list of useable players with this mechanism (although only an +Adobe one seems to be okay so we're back to where we started). + +At some point support for u3d was added. Interesting is that there is quite some +infrastructure described in the \PDF\ standard. Also something called rich media +was introduced and that should replace the former video and audio annotations +(definitely in \PDF\ version 2) and probably some day the renditions will no +longer be supported either. Open source \PDF\ viewers just stuck to supporting +text and static images. + +Now, do these rich media work well? Hardly. The standard leaves it to the viewer +and provides ways to define viewers (although it's unclear to me how that works +out in practice.) Basically in \PDF\ version 2 there is no native support for +simple straightforward video. One has to construct a complex set of related +annotations. + +One can give arguments (like security risks) for not supporting all these fancy +features but then why make rich media part of the specification at all? Browsers +beat \PDF\ viewers in showing media and as browsers can operate in kiosk mode I +suppose that it's not that hard to delegate showing whatever you want in an +embedded window in the \PDF\ viewer. Or why not simply support videolan out of +the box. All we need is the ability to view movies and control them (play, pause, +stop, rewind, etc). Where \HTML\ evolved towards easier media support, \PDF\ +evolved to more obscurity. + +So, how bad is it really? There are \PDF\ files around that have video! Indeed, +but the way they're supposed to do this is as follows: currently one actually has +to embed a shockwave video player (a user interface around something built|-|in) +and let that player show for instance an mp4 movie. However, support for +shockwave (flash) will be dropped in 2020 and that renders documents that use it +obsolete. This even makes one wonder about \JAVASCRIPT\ and widgets like form +fields, also a rather moving and somewhat unstable target. (I must have a +document being a calculator somewhere made in the previous century, in the early +days of \PDF.) + +I think that the plugin model failed already rather early in the \PDF\ history if +only because it made no sense to develop them when in a next version of Acrobat +the functionality was copied in the core. In a similar fashion \JAVASCRIPT\ +support seems to have stalled. + +Unfortunately the open source viewers never catched on with media, forms and +\JAVASCRIPT\ and therefore there has been no momentum created to keep things +supported. It all makes efforts spent on supporting this kind of \PDF\ features a +waste of time. It also makes one careful in using them: it only works on the +short term. + +Get me right, I'm not talking of complex media like 3d or animations but of +straightforward video support. I understand that the rich media framework tries +to cover complex cases but it's simple cases that carry the format. On the other +hand, one can wonder why the \PDF\ format makes it possible to specify behaviour +that in practice depends on \JAVASCRIPT\ and therefore could as well have been +delegated to \JAVASCRIPT\ as well. It would probably have been much cleaner. +\footnote {It looks like mu\PDF\ in 2018 got some support related to widgets aka +fields but alas not for layers which would be quite useful.} + +The \PDF\ version 2 specification mentions \type {3D}, \type {Video} and \type +{Audio} as primary content types so maybe future viewers will support video out +of the box. Who knows. We try to keep up in \CONTEXT\ because it's often not that +complex to support \PDF\ features but with hardly any possibility to test them, +they have a low priority. And with Acrobat moving to the cloud and thereby +creating a more of less lifelong dependency on remote resources it doesn't become +much interesting to explore those routes either. + +\stopsection + +\startsection[title={Accessibility}] + +A popular \PDF\ related topic is accessibility. One aspect of that is tagged +\PDF. This substandard is in my opinion not something that deserves a price for +beauty. I know that there are \CONTEXT\ users who need to be compliant but I +always wonder what a publisher really does with such a file. It's a bit like +requiring \XML\ as source but at the same time sacrificing really rich encoded +and sources for tweaks that suite the current limitations of for instance browsers, +tool|-|chains and competence. We've seen it happen. + +Support for tagged \PDF\ has been available in \CONTEXT\ already for a while but +as far as I know only Acrobat professional can do something with it. The reason +for tagging is that a document is then useable for (for instance) visually +impaired users, but aren't they better served with a proper complete and very +structured source in some format that tools suitable for it can use? How many +publishers distribute \PDF\ files while they can still make money on prints? How +many are really interested in distributing enriched content that then can be +reused somehow? And how many are willing to invest in tools instead of waiting +for it to happen for free? It's a bit cheap trick to just expect authors (and +their in the case of \TEX\ free tools) to suit a publishers needs. Anyway, just +as with advanced interactive documents or forms, I wonder if it will catch on. At +least no publisher ever asked us and by the time they might do the competition of +web based dissemination could have driven \PDF\ to the background. But, in +\CONTEXT\ we will keep supporting such features anyway, if only because it's +quite doable. But \unknown\ it's user demand that drives development, not the +market, which means that the motivation for implementing such features depends on +user input as well as challenging aspects that make it somewhat fun to spend time +on them. + +\stopsection + +\startsection[title={Quality assurance}] + +Another aspect popping up occasionally is validation. I'm not entirely sure what +drives that but delegating a problem can be one reason. Often we see publishers +and printers use old versions of \PDF\ related tools. Also, some workflows are +kind of ancient anyway and are more driven by \POSTSCRIPT\ history than \PDF\ +possibilities. I sometimes get the impression that it takes at least a decade for +these things to catch on, and by that time it doesn't matter any more that \TEX\ +and friends were at the front: their users are harassed by what the market +demands by then. + +Support for several standards related to validation is already part of \CONTEXT\ +for quite a while. For instance the bump from \PDF\ 1.7 to 2.0 was hardly worth +noticing, simply because there are not that many fundamental changes. Adapting +\LUATEX\ was trivial (and actually not really needed), and macro packages can +provide what is needed without much problems. So, yes, we can support it without +much hassle. Personally I never ran into a case where validation was really +needed. The danger of validation is that it can give a false impression of +quality. And as with everything quality control created a market. As with other +features it is users who drive the availability of support for this. After all, +they are the ones testing it and figuring out the often fuzzy specifications. +These are things that one can always look at in retrospect (like: it has to be +done this or that way) while in practice in order to be an early adopter one has +to gamble a bit and see where it fails or succeeds. Fortunately it's relatively +easy to adapt macro packages and \CONTEXT\ users are willing to update so it's +not really an issue. + +Putting a stamp of approval on a \PDF\ cannot hide the inconsistencies between +for instance vector graphics produced by a third party. They also don't expose +inconsistent use of color and fonts. The page streams produced by \LUATEX\ are +simple and clean enough to not give problems with validation. The problem lays +more with resources coming from elsewhere. When you're phoned by a printing house +about an issue with \RGB\ images in a file where there is no sign of \RGB\ being +used but where a validator reports an issue, you're lucky when an experienced +printer dating back decades then replies that he already had that impression and +will contact the origin. There is no easy way out of this but educating users +(authors) is an option. However, they are often dependent on the publishers and +departments that deal with these and those tend to come with directives that the +authors cannot really argue with (or about). + +\stopsection + +\startsection[title={Interactivity}] + +This is an area where \TEX\ (an therefore also \CONTEXT) always had an edge, +There is a lot possible and in principle all that \PDF\ provides can be +supported. But the more fancy one goes, the more one depends on Acrobat. +Interactivity in \PDF\ evolved stepwise and is mostly market driven. As a result +it is (or was) not always consistent. This is partly due to the fact that we have +a chicken|-|egg issue: you need typesetting machinery, viewer as well as a +standard. + +The regular hyperlinks, page or named driven are normally supported by viewers. +Some redefined named destinations (like going to a next page, or going back in a +chain of followed links) not always. Launching applications, as it also relates +to security, can be qualified as an unreliable mechanism. More advanced linking, +for instance using \JAVASCRIPT\ is hardly supported. In that respect \PDF\ +viewers lag way behind \HTML\ browsers. I understand that there can be security +risks involved. It's interesting to see that in Acrobat one can mess with +internals of files which makes the \API\ large and complex, but if we stick to +the useful core, the amount of interfacing needed is quite small. Lack of support +in open source viewers (we're talking of about two decades now) made me loose +interest in these features but they are and will be supported in \CONTEXT. We'll +see if and when viewers catch up. + +Comments and attachments are also part of interactivity and of course we +supported them right from the start. Some free viewers also support them by now. +Personally I never use comments but they can be handy for popping up information +or embedding snippets or (structured) sources (like \MATHML\ or bibliographic +data). In \CONTEXT\ we can even support \PDF\ inclusion with (a reasonable) +subset of these so called annotations. As the \PDF\ standard no longer evolves +much we can expect all these features to become stable. + +\stopsection + +\startsection[title={Summary}] + +We have always supported the fancy \PDF\ features and we will continue doing so +in \CONTEXT . However, many of them depends on what viewers support, and after +decades of \PDF\ that is still kind of disappointing, which is not that +motivating. We'll see what happens. + +\stopsection + +\stopchapter diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/onandon/onandon-modern.tex b/Master/texmf-dist/doc/context/sources/general/manuals/onandon/onandon-modern.tex new file mode 100644 index 00000000000..65b5d0490f5 --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/onandon/onandon-modern.tex @@ -0,0 +1,1284 @@ +% language=uk + +% 284 instances, 234 shared in backend, 126 common vectors, 108 common hashes, load time 1.343 seconds + +%setupversion[alternative=concept,text={not corrected yet}] +\setupversion[alternative=file,text={not corrected yet}] + +\definebodyfontenvironment[24pt] + +\usemodule[fonts-effects] + +\startcomponent onandon-modern + +\environment onandon-environment + +\startchapter[title={Modern Latin}] + +\startsection[title={Introduction}] + +In \CONTEXT, already in \MKII, we have a feature tagged \quote {effects} that can +be used to render a font in outline or bolder versions. It uses some low level +\PDF\ directives to accomplish this and it works quite well. When a user on the +\CONTEXT\ list asked if we could also provide it as a font feature in the +repertoire of additional features in \CONTEXT, I was a bit reluctant to provide +that because it operates at another level than the glyph stream. Also, such a +feature can be abused and result in a bad looking document. However, by adding a +few simple options to the \LUATEX\ engine such a feature could actually be +achieved rather easy: it was trivial to implement given that we can influence +font handling at the \LUA\ end. In retrospect extended and pseudo slanted fonts +could be done this way too but there we have some historic ballast. Also, the +backend now handles such transformations very efficient because they are combined +with font scaling. Anyway, by adding this feature in spite of possible +objections, I could do some more advanced experiments. + +In the following pages I will demonstrate how we support effects as a feature in +\CONTEXT. Instead of simply applying some magic \PDF\ text operators in the +backend a more integrated approach is used. The difference with the normal effect +mechanism is that where the one described here is bound to a font instance while +the normal mechanism operates on the glyph stream. + +\stopsection + +\startsection[title={The basics}] + +\definefontsynonym[DemoSerif][file:lmroman10-regular] + +Let's start with a basic boldening example. First we demonstrate a regular Latin +Modern sample (using \type {ward.tex}): + +\startnarrower + \definedfont[DemoSerif*default] + \samplefile{ward} +\stopnarrower + +This font looks rather thin (light). Next we define an effect or \type {0.2} and +typeset the same sample: + +\startbuffer +\definefontfeature + [effect-1] + [effect=.2] +\stopbuffer + +\typebuffer \getbuffer + +\startnarrower + \definedfont[DemoSerif*default,effect-1] + \samplefile{ward} +\stopnarrower + +This simple call gives reasonable default results. But you can have more control +than this. The previous examples use the following properties: + +{\definedfont[DemoSerif*default,effect-1] \showfonteffect} + +\startbuffer +\definefontfeature + [effect-2] + [effect={width=.3}] +\stopbuffer + +\typebuffer \getbuffer + +\startnarrower + \definedfont[DemoSerif*default,effect-2] + \samplefile{ward} +\stopnarrower + +This time we use: + +{\definedfont[DemoSerif*default,effect-2] \showfonteffect} + +\startbuffer +\definefontfeature + [effect-3] + [effect={width=.3,delta=0.4}] +\stopbuffer + +\typebuffer \getbuffer + +\startnarrower + \showfontkerns + \definedfont[DemoSerif*default,effect-3] + \samplefile{ward} +\stopnarrower + +We have now tweaked one more property and show the fontkerns in order to see what +happens with them: + +{\definedfont[DemoSerif*default,effect-3] \showfonteffect} + +\startbuffer +\definefontfeature + [effect-4] + [effect={width=.3,delta=0.4,factor=0.3}] +\stopbuffer + +\typebuffer \getbuffer + +\startnarrower + \showfontkerns + \definedfont[DemoSerif*default,effect-4] + \samplefile{ward} +\stopnarrower + +An additional parameter \type {factor} will influence the way (for instance) +kerns get affected: + +{\definedfont[DemoSerif*effect-4] \showfonteffect} + +\stopsection + +\startsection[title=Outlines] + +There are four effects. Normally a font is rendered with effect \type {inner}. +The \type {outer} effect just draws the outlines while \type {both} gives a +rather fat result. The \type {hidden} effect hides the text. + +\startbuffer +\definefontfeature + [effect-5] + [effect={width=0.2,delta=0.4,factor=0.3,effect=inner}] +\stopbuffer + +\typebuffer \getbuffer + +\startnarrower + \showfontkerns + \definedfont[DemoSerif*default,effect-5] + \samplefile{ward} +\stopnarrower + +An inner effect is rather useless unless you want to use the other properties of +this mechanism. + +\startbuffer +\definefontfeature + [effect-6] + [effect={width=.2,delta=0.4,factor=0.3,effect=outer}] +\stopbuffer + +\typebuffer \getbuffer + +\startnarrower + \showfontkerns + \definedfont[DemoSerif*default,effect-6] + \samplefile{ward} +\stopnarrower + +\startbuffer +\definefontfeature + [effect-7] + [effect={width=.2,delta=0.4,factor=0.3,effect=both}] +\stopbuffer + +\typebuffer \getbuffer + +\startnarrower + \showfontkerns + \definedfont[DemoSerif*default,effect-7] + \samplefile{ward} +\stopnarrower + +\startbuffer +\definefontfeature + [effect-8] + [effect={width=.2,delta=0.4,factor=0.3,effect=hidden}, + boundingbox=yes] % to show something +\stopbuffer + +\typebuffer \getbuffer + +We also show the boundingboxes of the glyphs here so that you can see what you're +missing. Actually this text is still there and you can select it in the viewer. + +\startnarrower + \showfontkerns + \showglyphs + \definedfont[DemoSerif*default,effect-8] + \samplefile{ward} +\stopnarrower + +\stopsection + +\startsection[title=The logic] + +In order to support this I had to make some choices. The calculations involved +are best explained in terms of \CONTEXT\ font machinery. + +\startformula + \Delta _{\text{wd}} = \text{effect} _{\text{wdelta}} + \times \text{parameter}_{\text{hfactor}} + \times \text{effect} _{\text{width}} + \times 100 +\stopformula + +\startformula + \Delta _{\text{ht}} = \text{effect} _{\text{hdelta}} + \times \text{parameter}_{\text{vfactor}} + \times \text{effect} _{\text{width}} + \times 100 +\stopformula + +\startformula + \Delta _{\text{dp}} = \text{effect} _{\text{ddelta}} + \times \text{parameter}_{\text{vfactor}} + \times \text{effect} _{\text{width}} + \times 100 +\stopformula + +The factors in the parameter namespace are adapted according to: + +\startformula + \Delta _{\text{factor}} = \text{effect} _{\text{factor}} + \times \text{parameters}_{\text{factor}} +\stopformula + +\startformula + \Delta _{\text{hfactor}} = \text{effect} _{\text{hfactor}} + \times \text{parameters}_{\text{hfactor}} +\stopformula + +\startformula + \Delta _{\text{vfactor}} = \text{effect} _{\text{vfactor}} + \times \text{parameters}_{\text{vfactor}} +\stopformula + +The horizontal and vertical scaling factors default to the normal factor that +defaults to zero so by default we have no additional scaling of for instance +kerns. The width (wd), height (ht) and depth (dp) of a glyph are adapted in +relation to the line width. A glyph is shifted in its bounding box by half the +width correction. The delta defaults to one. + +\stopsection + +\startsection[title=About features] + +This kind of boldening has limitations especially because some fonts use +positioning features that closely relate to the visual font properties. Let's +give some examples. The most common positioning is kerning. Take for instance +these shapes: + +\startlinecorrection +\startMPcode + def SampleShapes(expr dx, offset, pw, k) = + picture p ; p := image ( + draw fullcircle scaled 1cm ; + draw fullsquare scaled 1cm shifted (dx+k,0) ; + draw point 8 of (fullcircle scaled 1cm) withcolor white ; + draw point 3.5 of (fullsquare scaled 1cm) shifted (dx+k,0) withcolor white ; + ) shifted (offset,0) ; + draw p withpen pencircle scaled pw ; + draw boundingbox p withcolor white ; + enddef ; + SampleShapes(15mm, 0mm,1mm,0mm) ; + SampleShapes(15mm, 40mm,2mm,0mm) ; + SampleShapes(17mm, 80mm,2mm,0mm) ; +\stopMPcode +\stoplinecorrection + +The first one is that we start with. The circle and square have a line width of +one unit and a distance (kern) of five units. The second pair has a line width of +two units and the same distance while the third pair has a distance of seven +units. So, in the last case we have just increased the kern with a value relative +to the increase of line width. + +\startlinecorrection +\startMPcode + SampleShapes(15mm, 0mm,1mm,0mm) ; + SampleShapes(15mm, 40mm,2mm,2mm) ; + SampleShapes(17mm, 80mm,2mm,2mm) ; +\stopMPcode +\stoplinecorrection + +In this example we have done the same but we started with a distance of zero. You +can consider this a kind of anchoring. This happens in for instance cursive +scripts where entry and exit points are used to connect shapes. In a latin script +you can think of a poor|-|mans attachment of a cedilla or ogonek. But what to do +with for instance an accent on top of a character? In that case we could do the +same as with kerning. However, when we mix styles we would like to have a +consistent height so maybe there scaling is not a good idea. This is why we can +set the factors and deltas explictly for vertical and horizontal movements. +However, this will only work well when a font is consistent in how it applies +these movements. In this case, if could recognize cursive anchoring (the last +pair in the example) we could compensate for it. + +\startMPinclusions + def SampleShapes(expr dx, offset, pw, k) = + picture p ; p := image ( + draw fullcircle scaled 1cm ; + draw fullsquare scaled 1cm shifted (dx+k,0) ; + draw point 8 of (fullcircle scaled 1cm) withcolor white ; + draw point 3.5 of (fullsquare scaled 1cm) shifted (dx+k,0) withcolor white ; + ) shifted (offset,0) ; + draw p withpen pencircle scaled pw ; + draw boundingbox p withcolor white ; + enddef ; +\stopMPinclusions + +\startlinecorrection +\startMPcode + SampleShapes(10mm, 0mm,1mm,0mm) ; + SampleShapes(10mm, 40mm,1mm,1mm) ; + SampleShapes(10mm, 80mm,2mm,0mm) ; + SampleShapes(10mm,120mm,2mm,2mm) ; +\stopMPcode +\stoplinecorrection + +So, an interesting extension to the positioning part of the font handler could be +to influence all the scaling factors: anchors, cursives, single and pair wise +positioning in both directions (so eight independent factors). Technically this +is no big deal so I might give it a go when I have a need for it. + +\stopsection + +\startsection[title=Some (extreme) examples] + +The last decade buying a font has become a bit of a nightmare simply because you +have to choose the weights that you need. It's the business model to not stick to +four shapes in a few weights but offer a whole range and each of course costs +money. + +Latin Modern is based on Computer Modern and is meant for high resolution rendering. +The design of the font is such that you can create instances but in practice that +isn't done. One property that let the font stand out is its bold which runs rather +wide. However, how about cooking up a variant? For this we will use a series of +definitions: + +\startbuffer +\definefontfeature[effect-2-0-0] + [effect={width=0.2,delta=0}] +\definefontfeature[effect-2-3-0] + [effect={width=0.2,delta=0.3}] +\definefontfeature[effect-2-6-0] + [effect={width=0.2,delta=0.6}] +\definefontfeature[effect-4-0-0] + [effect={width=0.4,delta=0}] +\definefontfeature[effect-4-3-0] + [effect={width=0.4,delta=0.3}] +\definefontfeature[effect-4-6-0] + [effect={width=0.4,delta=0.6}] +\definefontfeature[effect-8-0-0] + [effect={width=0.8,delta=0}] +\definefontfeature[effect-8-3-0] + [effect={width=0.8,delta=0.3}] +\definefontfeature[effect-8-6-0] + [effect={width=0.8,delta=0.6}] +\definefontfeature[effect-8-6-2] + [effect={width=0.8,delta=0.6,factor=0.2}] +\definefontfeature[effect-8-6-4] + [effect={width=0.8,delta=0.6,factor=0.4}] +\stopbuffer + +\typebuffer \getbuffer + +And a helper macro: + +\startbuffer +\starttexdefinition ShowOneSample #1#2#3#4 + %\testpage[5] + %\startsubsubsubject[title=\type{#1}] + \start + \definedfont[#2*#3 @ 10pt] + \setupinterlinespace + \startlinecorrection + \showglyphs \showfontkerns + \scale[sx=#4,sy=#4]{effective n\"ots} + \stoplinecorrection + \blank[samepage] + \dontcomplain + \showfontkerns + \margintext{\tt\txx\maincolor#1} + \samplefile{ward} + \par + \stop + %\stopsubsubsubject +\stoptexdefinition +\stopbuffer + +\typebuffer \getbuffer + +\starttexdefinition ShowSamples #1 + \startsubsubject[title=#1] + \start + \ShowOneSample{no effect} {#1}{default} {5} + \ShowOneSample{width=0.2\\delta=0} {#1}{default,effect-2-0-0}{5} + \ShowOneSample{width=0.2\\delta=0.3} {#1}{default,effect-2-3-0}{5} + \ShowOneSample{width=0.2\\delta=0.6} {#1}{default,effect-2-6-0}{5} + \ShowOneSample{width=0.4\\delta=0} {#1}{default,effect-4-0-0}{5} + \ShowOneSample{width=0.4\\delta=0.3} {#1}{default,effect-4-3-0}{5} + \ShowOneSample{width=0.4\\delta=0.6} {#1}{default,effect-4-6-0}{5} + \ShowOneSample{width=0.8\\delta=0} {#1}{default,effect-8-0-0}{5} + \ShowOneSample{width=0.8\\delta=0.3} {#1}{default,effect-8-3-0}{5} + \ShowOneSample{width=0.8\\delta=0.6} {#1}{default,effect-8-6-0}{5} + \ShowOneSample{width=0.8\\delta=0.6\\factor=0.2}{#1}{default,effect-8-6-2}{5} + \ShowOneSample{width=0.8\\delta=0.6\\factor=0.4}{#1}{default,effect-8-6-4}{5} + \stop + \stopsubsubject +\stoptexdefinition + +We show some extremes, using the font used in this document. so don't complain +about beauty here. + +\texdefinition{ShowSamples}{Serif} +\texdefinition{ShowSamples}{SerifBold} +\texdefinition{ShowSamples}{SerifItalic} +\texdefinition{ShowSamples}{SerifBoldItalic} +\texdefinition{ShowSamples}{Sans} + +\start + \setupalign[flushleft,broad,nothyphenated,verytolerant] + \texdefinition{ShowSamples}{Mono} +\stop + +\stopsection + +\startsection[title=Pitfall] + +The quality of the result depends on how the font is made. For instance, +ligatures can be whole shapes, replaced glyphs and|/|or repositioned +glyphs, or whatever the designer thinks reasonable. In \in {figure} +[fig:ligature-effects-mess] this is demonstrated. We use the following +feature sets: + +\startbuffer +\definefontfeature + [demo-1] + [default] + [hlig=yes] + +\definefontfeature + [demo-2] + [demo-1] + [effect=0.5] +\stopbuffer + +\typebuffer \getbuffer + +\startplacefigure[title={The effects on ligatures.},reference=fig:ligature-effects-mess] + \startcombination[1*3] + { \scale [scale=5000] { + \definedfont[texgyrepagellaregular*demo-1]fist effe + \par + \definedfont[texgyrepagellaregular*demo-2]fist effe + } } { + texgyre pagella regular + } { \scale [scale=5000] { + \definedfont[cambria*demo-1]fist effe + \par + \definedfont[cambria*demo-2]fist effe + } } { + cambria + } { \scale [scale=5000] { + \definedfont[ebgaramond12regular*demo-1]fist effe + \par + \definedfont[ebgaramond12regular*demo-2]fist effe + } } { + ebgaramond 12 regular + } + \stopcombination +\stopplacefigure + +Normally the artifacts (as in the fi ligature in ebgaramond as of 2018) will go +unnoticed at small sized. Also, when the user has a low res display, printer or +when the publishers is one of those who print a scanned \PDF\ the reader might +not notice it at all. Most readers don't even know what to look at. + +\stopsection + +\startsection[title=A modern Modern] + +So how can we make an effective set of Latin Modern that fits in todays look and +feel. Of course this is a very subjective experiment but we've seen experiments +with these fonts before (like these cm super collections). Here is an example of +a typescript definition: + +\starttyping +\starttypescriptcollection[modernlatin] + + \definefontfeature[lm-rm-regular][effect={width=0.15,delta=1.00}] + \definefontfeature[lm-rm-bold] [effect={width=0.30,delta=1.00}] + \definefontfeature[lm-ss-regular][effect={width=0.10,delta=1.00}] + \definefontfeature[lm-ss-bold] [effect={width=0.20,delta=1.00}] + \definefontfeature[lm-tt-regular][effect={width=0.15,delta=1.00}] + \definefontfeature[lm-tt-bold] [effect={width=0.30,delta=1.00}] + \definefontfeature[lm-mm-regular][effect={width=0.15,delta=1.00}] + \definefontfeature[lm-mm-bold] [effect={width=0.30,delta=1.00}] + + \starttypescript [serif] [modern-latin] + \definefontsynonym + [Serif] [file:lmroman10-regular] + [features={default,lm-rm-regular}] + \definefontsynonym + [SerifItalic] [file:lmroman10-italic] + [features={default,lm-rm-regular}] + \definefontsynonym + [SerifSlanted] [file:lmromanslant10-regular] + [features={default,lm-rm-regular}] + \definefontsynonym + [SerifBold] [file:lmroman10-regular] + [features={default,lm-rm-bold}] + \definefontsynonym + [SerifBoldItalic] [file:lmroman10-italic] + [features={default,lm-rm-bold}] + \definefontsynonym + [SerifBoldSlanted] [file:lmromanslant10-regular] + [features={default,lm-rm-bold}] + \stoptypescript + + \starttypescript [sans] [modern-latin] + \definefontsynonym + [Sans] [file:lmsans10-regular] + [features={default,lm-ss-regular}] + \definefontsynonym + [SansItalic] [file:lmsans10-oblique] + [features={default,lm-ss-regular}] + \definefontsynonym + [SansSlanted] [file:lmsans10-oblique] + [features={default,lm-ss-regular}] + \definefontsynonym + [SansBold] [file:lmsans10-regular] + [features={default,lm-ss-bold}] + \definefontsynonym + [SansBoldItalic] [file:lmsans10-oblique] + [features={default,lm-ss-bold}] + \definefontsynonym + [SansBoldSlanted] [file:lmsans10-oblique] + [features={default,lm-ss-bold}] + \stoptypescript + + \starttypescript [mono] [modern-latin] + \definefontsynonym + [Mono] [file:lmmono10-regular] + [features={default,lm-tt-regular}] + \definefontsynonym + [MonoItalic] [file:lmmono10-italic] + [features={default,lm-tt-regular}] + \definefontsynonym + [MonoSlanted] [file:lmmonoslant10-regular] + [features={default,lm-tt-regular}] + \definefontsynonym + [MonoBold] [file:lmmono10-regular] + [features={default,lm-tt-bold}] + \definefontsynonym + [MonoBoldItalic] [file:lmmono10-italic] + [features={default,lm-tt-bold}] + \definefontsynonym + [MonoBoldSlanted] [file:lmmonoslant10-regular] + [features={default,lm-tt-bold}] + \stoptypescript + + \starttypescript [math] [modern-latin] + \loadfontgoodies[lm] + \definefontsynonym + [MathRoman] [file:latinmodern-math-regular.otf] + [features={math\mathsizesuffix,lm-mm-regular,mathextra}, + goodies=lm] + \definefontsynonym + [MathRomanBold] [file:latinmodern-math-regular.otf] + [features={math\mathsizesuffix,lm-mm-bold,mathextra}, + goodies=lm] + \stoptypescript + + \starttypescript [modern-latin] + \definetypeface [\typescriptone] + [rm] [serif] [modern-latin] [default] + \definetypeface [\typescriptone] + [ss] [sans] [modern-latin] [default] + \definetypeface [\typescriptone] + [tt] [mono] [modern-latin] [default] + \definetypeface [\typescriptone] + [mm] [math] [modern-latin] [default] + \quittypescriptscanning + \stoptypescript + +\stoptypescriptcollection +\stoptyping + +We show some more samples now for which we use\type {zapf.tex}. + +\startbuffer + {\tf\samplefile{zapf}}\blank {\bf\samplefile{zapf}}\blank + {\it\samplefile{zapf}}\blank {\bi\samplefile{zapf}}\blank + {\sl\samplefile{zapf}}\blank {\bs\samplefile{zapf}}\blank +\stopbuffer + +\startsubsubsubject[title={\type{\switchtobodyfont[modern-latin,rm,10pt]}}] + \start + \switchtobodyfont[modern-latin,rm,10pt] + \getbuffer + \stop +\stopsubsubsubject + +\startsubsubsubject[title={\type{\switchtobodyfont[modern-latin,ss,10pt]}}] + \start + \switchtobodyfont[modern-latin,ss,10pt] + \getbuffer + \stop +\stopsubsubsubject + +\startsubsubsubject[title={\type{\switchtobodyfont[modern-latin,tt,10pt]}}] + \start + \switchtobodyfont[modern-latin,tt,10pt] + \setupalign[flushleft,broad,nothyphenated,verytolerant] + \getbuffer + \stop +\stopsubsubsubject + +\stopsection + +\startsection[title=Finetuning] + +In practice we only need to compensate the width but can leave the height +and depth untouched. In the following examples we see the normal bold next +to the regular as well as the boldened version. For this we will use a couple +of definitions: + +\startbuffer +\definefontfeature[lm-bald][effect={width=0.25,effect=both}] +\definefontfeature[pg-bald][effect={width=0.25,effect=both}] +\definefontfeature[dj-bald][effect={width=0.35,effect=both}] + +\definefontfeature + [lm-bold] + [effect={width=0.25,hdelta=0,ddelta=0,effect=both}, + extend=1.10] + +\definefontfeature + [pg-bold] + [effect={width=0.25,hdelta=0,ddelta=0,effect=both}, + extend=1.00] + +\definefontfeature + [dj-bold] + [effect={width=0.35,hdelta=0,ddelta=0,effect=both}, + extend=1.05] + +\definefont[lmbald][Serif*default,lm-bald sa d] +\definefont[pgbald][Serif*default,pg-bald sa d] +\definefont[djbald][Serif*default,dj-bald sa d] + +\definefont[lmbold][Serif*default,lm-bold sa d] +\definefont[pgbold][Serif*default,pg-bold sa d] +\definefont[djbold][Serif*default,dj-bold sa d] +\stopbuffer + +\typebuffer \getbuffer + +We can combine the extend and effect features to get a bold running as wide as a +normal bold. We limit the height and depth so that we can use regular and bold in +the same sentence. It's all a matter of taste, but some control is there. + +\starttabulate[|l|l|l|l|] +\NC + \BC + \tt modern \BC + \tt pagella \BC + \tt dejavu \NC +\NR +\NC + \type{\tfd} \NC + \switchtobodyfont [modern,24pt]\strut\ruledhbox{\tfd ABC}\NC + \switchtobodyfont[pagella,24pt]\strut\ruledhbox{\tfd ABC}\NC + \switchtobodyfont [dejavu,24pt]\strut\ruledhbox{\tfd ABC}\NC +\NR +\NC + \type{\..bald} \NC + \switchtobodyfont [modern,24pt]\strut\ruledhbox{\lmbald ABC}\NC + \switchtobodyfont[pagella,24pt]\strut\ruledhbox{\pgbald ABC}\NC + \switchtobodyfont [dejavu,24pt]\strut\ruledhbox{\djbald ABC}\NC +\NR +\NC + \type{\bfd} \NC + \switchtobodyfont [modern,24pt]\strut\ruledhbox{\bfd ABC}\NC + \switchtobodyfont[pagella,24pt]\strut\ruledhbox{\bfd ABC}\NC + \switchtobodyfont [dejavu,24pt]\strut\ruledhbox{\bfd ABC}\NC +\NR +\NC + \type{\..bold} \NC + \switchtobodyfont [modern,24pt]\strut\ruledhbox{\lmbold ABC}\NC + \switchtobodyfont[pagella,24pt]\strut\ruledhbox{\pgbold ABC}\NC + \switchtobodyfont [dejavu,24pt]\strut\ruledhbox{\djbold ABC}\NC +\NR +\stoptabulate + +Let's take another go at Pagella. We define a few features, colors +and fonts first: + +\startbuffer +\definefontfeature + [pg-fake-1] + [effect={width=0.25,effect=both}] + +\definefontfeature + [pg-fake-2] + [effect={width=0.25,hdelta=0,ddelta=0,effect=both}] + +\definefont[pgregular] [Serif*default] +\definefont[pgbold] [SerifBold*default] +\definefont[pgfakebolda][Serif*default,pg-fake-1] +\definefont[pgfakeboldb][Serif*default,pg-fake-2] + +\definecolor[color-pgregular] [t=.5,a=1,r=.6] +\definecolor[color-pgbold] [t=.5,a=1,g=.6] +\definecolor[color-pgfakebolda][t=.5,a=1,b=.6] +\definecolor[color-pgfakeboldb][t=.5,a=1,r=.6,g=.6] +\stopbuffer + +\typebuffer \getbuffer + +When we apply these we get the results of \in {figure} [fig:pagella-compared] +while we show the same overlayed in \in {figure} [fig:pagella-overlayed]. As you +can see, the difference in real bold and fake bold is subtle: the inner shape of +the \quote {o} differs. Also note that the position of the accents doesn't change +in the vertical direction but moves along with the width. + +\def\SampleWord{\^o\"ep\c s} + +\startplacefigure[title={Four pagella style variants compared.},reference=fig:pagella-compared] + \startcombination[2*2] + { + \scale [scale=7500] { + \ruledhbox{\showglyphs\pgregular \SampleWord} + } + } { + regular (red) + } { + \scale [scale=7500] { + \ruledhbox{\showglyphs\pgbold \SampleWord} + } + } { + bold (green) + } { + \scale [scale=7500] { + \ruledhbox{\showglyphs\pgfakebolda \SampleWord} + } + } { + fakebolda (blue) + } { + \scale [scale=7500] { + \ruledhbox{\showglyphs\pgfakeboldb \SampleWord} + } + } { + fakeboldb (yellow) + } + \stopcombination +\stopplacefigure + +\startplacefigure[title={Four pagella style variants overlayed.},reference=fig:pagella-overlayed] + \startcombination[2*3] + { + \scale [scale=7500] { + \startoverlay + {\color[color-pgregular] {\pgregular \SampleWord}} + {\color[color-pgbold] {\pgbold \SampleWord}} + \stopoverlay + } + } { + bold over regular + } { + \scale [scale=7500] { + \startoverlay + {\color[color-pgregular] {\pgregular \SampleWord}} + {\color[color-pgfakeboldb]{\pgfakeboldb \SampleWord}} + \stopoverlay + } + } { + fakebolda over regular + } { + \scale [scale=7500] { + \startoverlay + {\color[color-pgregular] {\pgregular \SampleWord}} + {\color[color-pgfakebolda]{\pgfakeboldb \SampleWord}} + \stopoverlay + } + } { + fakeboldb over regular + } { + \scale [scale=7500] { + \startoverlay + {\color[color-pgbold] {\pgbold \SampleWord}} + {\color[color-pgfakeboldb]{\pgfakeboldb \SampleWord}} + \stopoverlay + } + } { + fakeboldb over bold + } { + \scale [scale=7500] { + \startoverlay + {\color[color-pgfakebolda]{\pgfakebolda \SampleWord}} + {\color[color-pgfakeboldb]{\pgfakeboldb \SampleWord}} + \stopoverlay + } + } { + fakeboldb over fakebolda + } { + \scale [scale=7500] { + \startoverlay + {\color[color-pgregular] {\pgregular \SampleWord}} + {\color[color-pgbold] {\pgbold \SampleWord}} + {\color[color-pgfakebolda]{\pgfakebolda \SampleWord}} + {\color[color-pgfakeboldb]{\pgfakeboldb \SampleWord}} + \stopoverlay + } + } { + all four overlayed + } + \stopcombination +\stopplacefigure + +\stopsection + +\startsection[title=The code] + +The amount of code involved is not that large and is a nice illustration of what +\LUATEX\ provides (I have omitted a few lines of tracing and error reporting). +The only thing added to the font scaler elsewhere is that we pass the \type +{mode} and \type {width} parameters to \TEX\ so that they get used in the backend +to inject the few operators needed. + +\starttyping +local effects = { + inner = 0, + outer = 1, + both = 2, + hidden = 3, +} + +local function initialize(tfmdata,value) + local spec + if type(value) == "number" then + spec = { width = value } + else + spec = utilities.parsers.settings_to_hash(value) + end + local effect = spec.effect or "both" + local width = tonumber(spec.width) or 0 + local mode = effects[effect] + if mode then + local factor = tonumber(spec.factor) or 0 + local hfactor = tonumber(spec.vfactor) or factor + local vfactor = tonumber(spec.hfactor) or factor + local delta = tonumber(spec.delta) or 1 + local wdelta = tonumber(spec.wdelta) or delta + local hdelta = tonumber(spec.hdelta) or delta + local ddelta = tonumber(spec.ddelta) or hdelta + tfmdata.parameters.mode = mode + tfmdata.parameters.width = width * 1000 + tfmdata.properties.effect = { + effect = effect, width = width, + wdelta = wdelta, factor = factor, + hdelta = hdelta, hfactor = hfactor, + ddelta = ddelta, vfactor = vfactor, + } + end +end + +local function manipulate(tfmdata) + local effect = tfmdata.properties.effect + if effect then + local characters = tfmdata.characters + local parameters = tfmdata.parameters + local multiplier = effect.width * 100 + local wdelta = effect.wdelta * parameters.hfactor * multiplier + local hdelta = effect.hdelta * parameters.vfactor * multiplier + local ddelta = effect.ddelta * parameters.vfactor * multiplier + local hshift = wdelta / 2 + local factor = (1 + effect.factor) * parameters.factor + local hfactor = (1 + effect.hfactor) * parameters.hfactor + local vfactor = (1 + effect.vfactor) * parameters.vfactor + for unicode, char in next, characters do + local oldwidth = char.width + local oldheight = char.height + local olddepth = char.depth + if oldwidth and oldwidth > 0 then + char.width = oldwidth + wdelta + char.commands = { + { "right", hshift }, + { "char", unicode }, + } + end + if oldheight and oldheight > 0 then + char.height = oldheight + hdelta + end + if olddepth and olddepth > 0 then + char.depth = olddepth + ddelta + end + end + parameters.factor = factor + parameters.hfactor = hfactor + parameters.vfactor = vfactor + end +end + +local specification = { + name = "effect", + description = "apply effects to glyphs", + initializers = { + base = initialize, + node = initialize, + }, + manipulators = { + base = manipulate, + node = manipulate, + }, +} + +fonts.handlers.otf.features.register(specification) +fonts.handlers.afm.features.register(specification) +\stoptyping + +The real code is slightly more complex because we want to stack virtual features +properly but the principle is the same. + +\stopsection + +\startsection[title=Arabic] + +It is tempting to test effects with arabic but we need to keep in mind that for +that we should add some more support in the \CONTEXT\ font handler. Let's define +some features. + +\startbuffer +\definefontfeature + [bolden-arabic-1] + [effect={width=0.4}] + +\definefontfeature + [bolden-arabic-2] + [effect={width=0.4,effect=outer}] + +\definefontfeature + [bolden-arabic-3] + [effect={width=0.5,wdelta=0.5,ddelta=.2,hdelta=.2,factor=.1}] +\stopbuffer + +\typebuffer \getbuffer + +\startbuffer + +\setupalign + [righttoleft] + +\setupinterlinespace + [1.5] + +\start + \definedfont[arabictest*arabic,bolden-arabic-1 @ 30pt] + \samplefile{khatt-ar}\par + \definedfont[arabictest*arabic,bolden-arabic-2 @ 30pt] + \samplefile{khatt-ar}\par + \definedfont[arabictest*arabic,bolden-arabic-3 @ 30pt] + \samplefile{khatt-ar}\par +\stop +\stopbuffer + +With \MICROSOFT\ Arabtype the \type {khatt-ar.tex} looks as follows: + +\typebuffer \start \definefontsynonym[arabictest][arabtype] \getbuffer\stop + +And with Idris' Husayni we get: + +\typebuffer \start \definefontsynonym[arabictest][husayni] \getbuffer\stop + +Actually, quite okay are the following. We don't over do bold here and to get +a distinction we make the original thinner. + +\startbuffer +\definefontfeature[effect-ar-thin] [effect={width=0.01,effect=inner}] +\definefontfeature[effect-ar-thick][effect={width=0.20,extend=1.05}] +\stopbuffer + +\typebuffer \getbuffer + +\start + \setupalign + [righttoleft] + + \setupinterlinespace + [1.5] + + \definedfont[husayni*arabic,effect-ar-thin @ 30pt] + \samplefile{khatt-ar}\par + \definedfont[husayni*arabic,effect-ar-thick @ 30pt] + \samplefile{khatt-ar}\par +\stop + +The results are acceptable at small sizes but at larger sizes you will start to +see kerning, anchoring and cursive artifacts. The outline examples show that the +amount of overlap differs per font and the more overlap we have the better +boldening will work. + +\startMPinclusions + def DrawShapes(expr how) = + def SampleShapes(expr offset, pw, xc, xs, xt, yc, ys, yt, txt, more) = + numeric l ; l := pw * mm ; + picture p ; p := image ( + draw fullcircle scaled 10 ; + draw fullcircle scaled 3 shifted (-3+xc ,8+yc) withcolor "darkred" ; + draw fullsquare scaled 3 shifted ( 6+xs ,7+ys) withcolor "darkblue"; + draw fulltriangle scaled 4 shifted ( 6+xt+5,6+yt) withcolor "darkgreen"; + ) shifted (offset,0) scaled mm ; + draw p + withpen pencircle + if how = 2 : + xscaled l yscaled (l/2) rotated 30 ; + else : + scaled l ; + fi ; + draw boundingbox p + withcolor "darkyellow" ; + draw textext(txt) + shifted (xpart center p, -8mm) ; + draw textext(more) + shifted (xpart center p, -11mm) ; + enddef ; + SampleShapes( 0,1, 0,0,0, 0, 0, 0, "\tinyfont \setstrut \strut original", "\tinyfont \setstrut \strut ") ; + SampleShapes( 25,2, 0,0,0, 0, 0, 0, "\tinyfont \setstrut \strut instance", "\tinyfont \setstrut \strut ") ; + SampleShapes( 50,2,-1,1,0, 0, 0, 0, "\tinyfont \setstrut \strut mark", "\tinyfont \setstrut \strut x only") ; + SampleShapes( 75,2,-1,1,1, 0, 0, 0, "\tinyfont \setstrut \strut mark + mkmk","\tinyfont \setstrut \strut x only") ; + SampleShapes(100,2,-1,1,1, 1, 1, 1, "\tinyfont \setstrut \strut mark + mkmk","\tinyfont \setstrut \strut x and y") ; + SampleShapes(125,2,-1,2,2,-1/2,-1/2,-1/2,"\tinyfont \setstrut \strut mark + mkmk","\tinyfont \setstrut \strut x and -y") ; + enddef ; +\stopMPinclusions + +In arabic (and sometimes latin) fonts the marks (or accents in latin) are +attached to base shapes and normally one will use the \type {mark} to anchor a +mark to a base character or specific component of a ligature. The \type {mkmk} +feature is then used to anchor marks to other marks. Consider the following +example. + +\startlinecorrection +\scale + [width=\textwidth] + {\startMPcode DrawShapes(1) ; \stopMPcode} +\stoplinecorrection + +We start with \type {original}: a base shape with three marks: the red circle and +blue square anchor to the base and the green triangle anchors to the blue square. +When we bolden, the shapes will start touching. In the case of latin scripts, +it's normal to keep the accents on the same height so this is why the third +picture only shifts in the horizontal direction. The fourth picture demonstrates +that we need to compensate the two bound marks. One can decide to move the lot up +as in the fifth picture but that is no option here. + +Matters can be even more complex when a non circular pen is introduced. In that +case a transformation from one font to another using the transformed \OPENTYPE\ +positioning logic (values) is even more tricky and unless one knows the +properties (and usage) of a mark it makes no sense at all. Actually the sixths +variant is probably nicer here but there we actually move the marks down! + +\startlinecorrection +\scale + [width=\textwidth] + {\startMPcode DrawShapes(2) ; \stopMPcode} +\stoplinecorrection + +For effects this means that when it gets applied to such a font, only small +values work out well. + +\stopsection + +\startsection[title=Math] + +Math is dubious as there is all kind of positioning involved. Future versions +might deal with this, although bolder math (math itself has bold, so actually +we're talking of bold with some heavy) is needed for titling. If we keep that +in mind we can actually just bolden math and probably most will come out +reasonable well. One of the potential troublemakers is the radical (root) sign +that can be bound to a rule. Bumping the rules is no big deal and patching the +relevant radical properties neither, so indeed we can do: + +\startbuffer[mathblob] +2\times\sqrt{\frac{\sqrt{\frac{\sqrt{2}}{\sqrt{2}}}} + {\sqrt{\frac{\sqrt{2}}{\sqrt{2}}}}} +\stopbuffer + +\startbuffer +\switchtobodyfont [modernlatin,17.3pt] +$ + \mr \darkblue \getbuffer[mathblob] \quad + \mb \darkgreen \getbuffer[mathblob] +$ +\stopbuffer + +\typebuffer \blank \start \getbuffer \stop \blank + +Where the \type {mathblob} buffer is: + +\typebuffer[mathblob] + +Here you also see a fraction rule that has been bumped. In display mode we +get: + +\startbuffer +\switchtobodyfont[modernlatin,17.3pt] +\startformula + \mr \darkblue \getbuffer[mathblob] \quad + \mb \darkgreen \getbuffer[mathblob] +\stopformula +\stopbuffer + +\typebuffer \blank \start \getbuffer \stop \blank + +Extensibles behave well too: + +\startbuffer +\switchtobodyfont [modernlatin,17.3pt] +\dostepwiserecurse {1} {30} {5} { + $ + \mr \sqrt{\blackrule[width=2mm,height=#1mm,color=darkblue]} + \quad + \mb \sqrt{\blackrule[width=2mm,height=#1mm,color=darkgreen]} + $ +} +\stopbuffer + +\typebuffer \blank \start \getbuffer \stop \blank + +\definecolor[colormr] [t=.5,a=1,b=.6] +\definecolor[colormb] [t=.5,a=1,g=.6] + +In \in {figure} [fig:regular-over-bold] we overlay regular and bold. The result +doesn't look that bad after all, does it? It took however a bit of experimenting +and a fix in \LUATEX: pickup the value from the font instead of the currently +used (but frozen) math parameter. + +\startplacefigure[title={Modern Latin regular over bold.},reference=fig:regular-over-bold] +\switchtobodyfont[modernlatin,17.3pt] +\scale[width=.25\textwidth]{\startoverlay + {\color[colormb]{$\mb\sqrt{\frac{1}{x}}$}} + {\color[colormr]{$ \sqrt{\frac{1}{x}}$}} +\stopoverlay} +\stopplacefigure + +In case you wonder how currently normal Latin Modern bold looks, here we go: + +\startbuffer +\switchtobodyfont[latinmodern,17.3pt] +\startformula + \mr \darkblue \getbuffer[mathblob] \quad + \mb \darkgreen \getbuffer[mathblob] +\stopformula +\stopbuffer + +\typebuffer \blank \start \getbuffer \stop \blank + +\unexpanded\def\ShowMathSample#1% + {\switchtobodyfont[#1,14.4pt]% + \mathematics{% + \mr \darkblue \getbuffer[mathblob] \quad + \mb \darkgreen \getbuffer[mathblob] + }} + +\unexpanded\def\ShowMathCaption#1% + {\switchtobodyfont[#1]% + #1: + $ + {\mr2\enspace \scriptstyle2\enspace \scriptscriptstyle2} + \enspace + {\mb2\enspace \scriptstyle2\enspace \scriptscriptstyle2} + $} + +\startcombination[3*2] + {\ShowMathSample {dejavu}} {\ShowMathCaption{dejavu}} + {\ShowMathSample{pagella}} {\ShowMathCaption{pagella}} + {\ShowMathSample {termes}} {\ShowMathCaption{termes}} + {\ShowMathSample {bonum}} {\ShowMathCaption{bonum}} + {\ShowMathSample {schola}} {\ShowMathCaption{schola}} + {\ShowMathSample{cambria}} {\ShowMathCaption{cambria}} +\stopcombination + +I must admit that I cheat a bit. In order to get a better looking pseudo math +we need to extend the shapes horizontally as well as squeeze them a bit vertically. +So, the real effect definitions more look like this: + +\starttyping +\definefontfeature + [boldened-30] + [effect={width=0.3,extend=1.15,squeeze=0.985,% + delta=1,hdelta=0.225,ddelta=0.225,vshift=0.225}] +\stoptyping + +and because we can calculate the funny values sort of automatically, this gets +simplified to: + +\starttyping +\definefontfeature + [boldened-30] + [effect={width=0.30,auto=yes}] +\stoptyping + +We leave it to your imagination to figure out what happens behind the screens. +Just think of some virtual font magic combined with the engine supported \type +{extend} and \type {squeeze} function. And because we already support bold math +in \CONTEXT, you will get it when you are doing bold titling. + +\startbuffer +\def\MathSample + {\overbrace{2 + + \sqrt{\frac{\sqrt{\frac{\sqrt{2}}{\sqrt{2}}}} + {\sqrt{\frac{\sqrt{\underbar{2}}}{\sqrt{\overbar{2}}}}}}}} + +\definehead + [mysubject] + [subject] + +\setuphead + [mysubject] + [style=\tfc, + color=darkblue, + before=\blank, + after=\blank] + +\mysubject{Regular\quad$\MathSample\quad\mb\MathSample$} + +\setuphead + [mysubject] + [style=\bfc, + color=darkred] + +\mysubject{Bold \quad$\MathSample\quad\mb\MathSample$} +\stopbuffer + +\typebuffer + +\getbuffer + +Of course one can argue about the right values for boldening and compensation if +dimensions so don't expect the current predefined related features to be frozen +yet. + +For sure this mechanism will create more fonts than normal but fortunately it +can use the low level optimizations for sharing instances so in the end the +overhead is not that large. This chapter uses 36 different fonts, creates 270 +font instances (different scaling and properties) of which 220 are shared in the +backend. The load time is 5 seconds in \LUATEX\ and 1.2 seconds in \LUAJITTEX\ on +a somewhat old laptop with a i7-3840QM processor running 64 bit \MSWINDOWS. Of +course we load a lot of bodyfonts at different sizes so in a normal run the extra +loading is limited to just a couple of extra instances for math (normally 3, one +for each math size). + +\stopsection + +\startsection[title=Conclusion] + +So what can we conclude? When we started with \LUATEX, right from the start +\CONTEXT\ supported true \UNICODE\ math by using virtual \UNICODE\ math fonts. +One of the objectives of the \TEX Gyre project is to come up with a robust +complete set of math fonts, text fonts with a bunch of useful symbols, and +finally a subset bold math font for titling. Now we have real \OPENTYPE\ math +fonts, although they are still somewhat experimental. Because we're impatient, we +now provide bold math by using effects but the future will learn to what extent +the real bold math fonts will differ and be more pleasant to look at. After all, +what we describe he is just an experiment that got a bit out of hands. + +% And if you wonder if this kind of messing with fonts is okay? Well, you don't +% know what specs we sometimes get (and then ignore). + +\stopsection + +\stopchapter + +\stopcomponent diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/onandon/onandon-performance.tex b/Master/texmf-dist/doc/context/sources/general/manuals/onandon/onandon-performance.tex index 279383a8cc1..b1b34443d41 100644 --- a/Master/texmf-dist/doc/context/sources/general/manuals/onandon/onandon-performance.tex +++ b/Master/texmf-dist/doc/context/sources/general/manuals/onandon/onandon-performance.tex @@ -28,8 +28,8 @@ So what exactly does performance refer to? If you use \CONTEXT\ there are probably only two things that matter: \startitemize[packed] -\startitem How long does one run take. \stopitem -\startitem How many runs do I need. \stopitem +\startitem How long does one run take? \stopitem +\startitem How many runs do I need? \stopitem \stopitemize Processing speed is reported at the end of a run in terms of seconds spent on the @@ -50,72 +50,74 @@ i7-3840QM as reference. A simple \stoptext \stoptyping -document reports 0.4 seconds but as we wrap the run in an \type {mtxrun} -management run we have an additional 0.3 overhead (auxiliary file handling, \PDF\ -viewer management, etc). This includes loading the Latin Modern font. With -\LUAJITTEX\ these times are below 0.3 and 0.2 seconds. It might look like much -overhead but in an edit|-|preview runs it feels snappy. One can try this: +document reports 0.4 seconds but, as we wrap the run in an \type {mtxrun} +management run, we have an additional 0.3 overhead (auxiliary file handling, +\PDF\ viewer management, etc). This includes loading the Latin Modern font. With +\LUAJITTEX, these times are below 0.3 and 0.2 seconds. It might look like a lot +of overhead, but in an edit|-|preview runs it feels snappy. One can try this: \starttyping \stoptext \stoptyping -which bring down the time to about 0.2 seconds for both engines but as it doesn't -do anything useful that is is no practice. +which bring down the time to about 0.2 seconds for both engines but it doesn't +do anything useful in practice. -Finishing a document is not that demanding because most gets flushed as we go. -The more (large) fonts we use, the longer it takes to finish a document but on +Finishing a document is not that demanding, because most gets flushed as we go. +The more (large) fonts we use, the longer it takes to finish a document, but, on the average that time is not worth noticing. The main runtime contribution comes from processing the pages. Okay, this is not always true. For instance, if we process a 400 page book from 2500 small \XML\ files with multiple graphics per page, there is a little -overhead in loading the files and constructing the \XML\ tree as well as in -inserting the graphics but in such cases one expects a few seconds more runtime. The -\METAFUN\ manual has some 450 pages with over 2500 runtime generated \METAPOST\ -graphics. It has color, uses quite some fonts, has lots of font switches -(verbatim too) but still one run takes only 18 seconds in stock \LUATEX\ and less -that 15 seconds with \LUAJITTEX. Keep these numbers in mind if a non|-|\CONTEXT\ -users barks against the performance tree that his few page mediocre document -takes 10 seconds to compile: the content, styling, quality of macros and whatever -one can come up with all plays a role. Personally I find any rate between 10 and -30 pages per second acceptable, and if I get the lower rate then I normally know -pretty well that the job is demanding in all kind of aspects. - -Over time the \CONTEXT||\LUATEX\ combination, in spite of the fact that more +overhead in loading the files and in constructing the \XML\ tree as well as in +inserting the graphics, but in such cases one expects a few seconds longer +runtime. \METAFUN\ manual has some 450 pages with over 2500 runtime|-|generated +\METAPOST\ graphics. It has color, uses quite some fonts, has lots of font +switches (verbatim, too), but, still, one run takes only 18 seconds in stock +\LUATEX\ and less and less that 15 seconds with \LUAJITTEX. Keep these numbers in +mind if a non|-|\CONTEXT\ users bark against the performance tree that his few +page mediocre document takes 10 seconds to compile: the content, styling, quality +of macros and whatever one can come up with all play a role. Personally I find +any rate between 10 and 30 pages per second acceptable, and, if I get the lower +rate, then I normally know pretty well that the job is demanding in all kind of +aspects. + +Over time, the \CONTEXT||\LUATEX\ combination, in spite of the fact that more functionality has been added, has not become slower. In fact, some subsystems -have been sped up. For instance font handling is very sensitive for adding +have been sped up. For instance, font handling is very sensitive to adding functionality. However, each version so far performed a bit better. Whenever some neat new trickery was added, at the same time improvements were made thanks to -more insight in the matter. In practice we're not talking of changes in speed by +more insight in the matter. In practice, we're not talking of changes in speed by large factors but more by small percentages. I'm pretty sure that most \CONTEXT\ -users never noticed. Recently a 15\endash30\% speed up (in font handling) was -realized (for more complex fonts) but only when you use such complex fonts and -pages full of text you will see a positive impact on the whole run. +users never noticed. Recently, a 15\endash30\% speed up (in font handling) was +realized (for more complex fonts), but only when you use such complex fonts and +pages full of text will you see a positive impact on the whole run. There is one important factor I didn't mention yet: the efficiency of the console. You can best check that by making a format (\typ {context --make en}). When that is done by piping the messages to a file, it takes 3.2 seconds on my laptop and about the same when done from the editor (\SCITE), maybe because the \LUATEX\ run and the log pane run on a different thread. When I use the standard -console it takes 3.8 seconds in Windows 10 Creative update (in older versions it +console, it takes 3.8 seconds in Windows 10 Creative update (in older versions it took 4.3 and slightly less when using a console wrapper). The powershell takes -3.2 seconds which is the same as piping to a file. Interesting is that in Bash on -Windows it takes 2.8 seconds and 2.6 seconds when piped to a file. Normal runs -are somewhat slower, but it looks like the 64 bit Linux binary is somewhat faster -than the 64 bit mingw version. \footnote {Long ago we found that \LUATEX\ is very -sensitive to for instance the \CPU\ cache so maybe there are some differences due -to optimization flags and|/|or the fact that bash runs in one thread and all file -\IO\ in the main windows instance. Who knows.} Anyway, it demonstrates that when -someone yells a number you need to ask what the conditions where. - -At a \CONTEXT\ meeting there has been a presentation about possible speed|-|up of -a run for instance by using a separate syntax checker to prevent a useless run. -However, the use case concerned a document that took a minute on the machine -used, while the same document took a few seconds on mine. At the same meeting we -also did a comparison of speed for a \LATEX\ run using \PDFTEX\ and the same -document migrated to \CONTEXT\ \MKIV\ using \LUATEX\ (Harald K\"onigs \XML\ -torture and compatibility test). Contrary to what one might expect, the +3.2 seconds, which is the same as piping to a file. Interesting is that in Bash +on Windows, it takes 2.8 seconds and 2.6 seconds when piped to a file. Normal +runs are somewhat slower, but it looks like the 64 bit Linux binary is somewhat +faster than the 64 bit mingw version. \footnote {Long ago, we found that \LUATEX\ +is very sensitive to for instance the \CPU\ cache, so maybe there are some +differences due to optimization flags and|/|or the fact that bash runs in one +thread, and all file \IO\ takes place in the main Windows instance. Who knows.} +Anyway, it demonstrates that when someone yells a number you need to ask what the +conditions were. + +At a \CONTEXT\ meeting, there has been a presentation about possible speed|-|up +of of a run by using, for instance, a separate syntax checker to prevent a +useless run. However, the use case concerned a document that took a minute on the +machine used, while the same document took a few seconds on mine. At the same +meeting, we also did a comparison of speed for a \LATEX\ run using \PDFTEX\ and +the same document migrated to \CONTEXT\ \MKIV\ using \LUATEX\ (Harald K\"onigs +\XML\ torture and compatibility test). Contrary to what one might expect, the \CONTEXT\ run was significantly faster; the resulting document was a few gigabytes in size. @@ -126,76 +128,77 @@ gigabytes in size. I will discuss a few potential bottlenecks next. A complex integrated system like \CONTEXT\ has lots of components and some can be quite demanding. However, when something is not used, it has no (or hardly any) impact on performance. Even when -we spend a lot of time in \LUA\ that is not the reason for a slow|-|down. +we spend a lot of time in \LUA, that is not the reason for a slow|-|down. Sometimes using \LUA\ results in a speedup, sometimes it doesn't matter. Complex -mechanisms like natural tables for instance will not suddenly become less +mechanisms like natural tables, for instance, will not suddenly become less complex. So, let's focus on the \quotation {aspects} that come up in those complaints: fonts and \LUA. Because I only use \CONTEXT\ and occasionally test with the plain \TEX\ version that we provide, I will not explore the potential -impact of using truckloads of packages, styles and such, which I'm sure of plays -a role, but one neglected in the discussion. +impact of using truckloads of packages, styles, and such, which I'm sure of plays +a role, but one neglected in my discussion. \startsubsubject[title=Fonts] -According to the principles of \LUATEX\ we process (\OPENTYPE) fonts using \LUA. -That way we have complete control over any aspect of font handling, and can, as +According to the principles of \LUATEX, we process (\OPENTYPE) fonts using \LUA. +That way, we have complete control over any aspect of font handling, and can, as to be expected in \TEX\ systems, provide users what they need, now and in the -future. In fact, if we didn't had that freedom in \CONTEXT\ I'd probably already +future. In fact, if we didn't had that freedom in \CONTEXT, I'd probably already quit using \TEX\ a decade ago and found myself some other (programming) niche. -After a font is loaded, part of the data gets passed to the \TEX\ engine so that -it can do its work. For instance, in order to be able to typeset a paragraph, -\TEX\ needs to know the dimensions of glyphs. Once a font has been loaded -(that is, the binary blob) the next time it's fetched from a cache. Initial -loading (and preparation) takes some time, depending on the complexity or size of -the font. Loading from cache is close to instantaneous. After loading the -dimensions are passed to \TEX\ but all data remains accessible for any desired -usage. The \OPENTYPE\ feature processor for instance uses that data and \CONTEXT\ -for sure needs that data (fast accessible) for different purposes too. - -When a font is used in so called base mode, we let \TEX\ do the ligaturing and +After a font has been loaded, part of the data gets passed to the \TEX\ engine, +so that it can do its work. For instance, in order to be able to typeset a +paragraph, \TEX\ needs to know the dimensions of glyphs. Once a font has been +loaded (that is, the binary blob) it's fetched from a cache the next time. +Initial loading (and preparation) takes some time, depending on the complexity +and the size of the font. Loading from cache is close to instantaneous. After +loading, the dimensions are passed to \TEX\ but all data remains accessible for +any desired usage. The \OPENTYPE\ feature processor, for instance, uses that data +and \CONTEXT, for sure, needs that data (quickly accessible) for different +purposes, too. + +When a font is used in so|-|called base mode, we let \TEX\ do the ligaturing and kerning. This is possible with simple fonts and features. If you have a critical -workflow you might enable base mode, which can be done per font instance. -Processing in node mode takes some time but how much depends on the font and -script. Normally there is no difference between \CONTEXT\ and generic usage. In -\CONTEXT\ we also have dynamic features, and the impact on performance depends on -usage. In addition to base and node we also have plug mode but that is only used +workflow, you might enable base mode, which can be done per font instance. +Processing in node mode takes some time, but how much depends on the font and +script. Normally, there is no difference between \CONTEXT\ and generic usage. In +\CONTEXT, we also have dynamic features, and the impact on performance depends on +usage. In addition to base and node, we also have plug mode, but that is only used for testing and therefore not advertised. Every \type {\hbox} and every paragraph goes through the font handler. Because we support mixed modes, some analysis takes place, and because we do more in -\CONTEXT, the generic analyzer is more light weight, which again can mean that a +\CONTEXT, the generic analyzer is more lightweight, which again can mean that a generic run is not slower than a similar \CONTEXT\ one. Interesting is that added functionality for variable and|/|or color fonts had no -impact on performance. Runtime added user features can have some impact but when -defined well it can be neglected. I bet that when you add additional node list -handling yourself, its impact on performance is larger. But in the end what -counts is that the job gets done and the more you demand the higher the price you -pay. +impact on performance. Runtime|-|added user features can have some impact, but, +when defined well, it can be neglected. I bet that when you add additional node +list handling yourself, its impact on performance will be larger. But in the end +what counts is that the job gets done and the more you demand the higher the +price you pay. \stopsubsubject \startsubsubject[title=\LUA] The second possible bottleneck when using \LUATEX\ can be in using \LUA\ code. -However, using that as argument for slow runs is laughable. For instance -\CONTEXT\ \MKIV\ can easily spend half its time in \LUA\ and that is not making +However, using that is laughable as an argument for slow runs. For instance, +\CONTEXT\ \MKIV\ can easily spend half its time in \LUA, and that is not making it any slower than \MKII\ using \PDFTEX\ doing equally complex things. For -instance the embedded \METAPOST\ library makes \MKIV\ way faster than \MKII, and +instance, the embedded \METAPOST\ library makes \MKIV\ way faster than \MKII, and the built|-|in \XML\ processing capabilities in \MKIV\ can easily beat \MKII\ \XML\ handling, apart from the fact that it can do more, like filtering by path and expression. In fact, files that take, say, half a minute in \MKIV, could as well have taken 15 minutes or more in \MKII\ (and imagine multiple runs then). -So, for \CONTEXT\ using \LUA\ to achieve its objectives is mandate. The +So, for \CONTEXT, using \LUA\ to achieve its objectives is mandatory. The combination of \TEX, \METAPOST\ and \LUA\ is pretty powerful! Each of these components is really fast. If \TEX\ is your bottleneck, review your macros! When \LUA\ seems to be the bad, go over your code and make it better. Much of the -\LUA\ code I see flying around doesn't look that efficient, which is okay because +\LUA\ code I see flying around doesn't look that efficient, which is okay, because the interpreter is really fast, but don't blame \LUA\ beforehand, blame your coding (style) first. When \METAPOST\ is the bottleneck, well, sometimes not much -can be done about it, but when you know that language well enough you can often +can be done about it, but when you know that language well enough, you can often make it perform better. For the record: every additional mechanism that kicks in, like character spacing @@ -210,26 +213,26 @@ gets pretty well obscured by other things happening, just that you know. \startsection[title=Some timing] -Next I will show some timings related to fonts. For this I use stock \LUATEX\ -(second column) as well as \LUAJITTEX\ (last column) which of course performs -much better. The timings are given in 3 decimals but often (within a set of runs) -and as the system load is normally consistent in a set of test runs the last two -decimals only matter in relative comparison. So, for comparing runs over time -round to the first decimal. Let's start with loading a bodyfont. This happens -once per document and normally one has only one bodyfont active. Loading involves -definitions as well as setting up math so a couple of fonts are actually loaded, -even if they're not used later on. A setup normally involves a serif, sans, mono, -and math setup (in \CONTEXT). \footnote {The timing for Latin Modern is so low +Next, I will show some timings related to fonts. For this, I use stock \LUATEX\ +(second column) as well as \LUAJITTEX\ (last column), which, of course, performs +much better. The timings are rounded to three decimal places, but, as the system +load is usually only consistent in a set of test runs, the last two decimals only +matter in relative comparison. So, for comparing runs over time, round to the +first decimal. Let's start with loading a bodyfont. This happens once per +document, and one usually only has one bodyfont active. Loading involves +definitions as well as setting up math, so a couple of fonts are actually loaded +even if they're not used later on. A setup normally involves a serif, sans, mono +and math setup (in \CONTEXT). \footnote {The timing for Latin Modern is so low, because that font is loaded already.} \environment onandon-speed-000 \ShowSample{onandon-speed-000} % bodyfont -There is a bit difference between the font sets but a safe average is 150 milli -seconds and this is rather constant over runs. +There is a bit of a difference between the font sets, but a safe average is 150 +milliseconds, and this is rather constant over runs. -An actual font switch can result in loading a font but this is a one time overhead. +An actual font switch can result in loading a font, but this is a one|-|time overhead. Loading four variants (regular, bold, italic and bold italic) roughly takes the following time: @@ -239,34 +242,32 @@ Using them again later on takes no time: \ShowSample{onandon-speed-002} % four variants -Before we start timing the font handler, first a few baseline benchmarks are -shown. When no font is applied and nothing else is done with the node list we -get: +Before we start timing the font handler, a few baseline benchmarks are shown. +When no font is applied and nothing else is done with the node list, we get: \ShowSample{onandon-speed-009} -A simple monospaced, no features applied, run takes a bit more: +A simple monospaced, no|-|features|-|applied, run takes a bit more: \ShowSample{onandon-speed-010} -Now we show a one font typesetting run. As the two benchmarks before, we just -typeset a text in a \type {\hbox}, so no par builder interference happens. We use -the \type {sapolsky} sample text and typeset it 100 times 4 (either of not with -font switches). +Now, we show a one|-|font typesetting run. As with the two benchmarks before, we +just typeset a text in a \type {\hbox}, so no par builder interference happens. +We use the \type {sapolsky} sample text and typeset it 100 times 4, first without +font switches. \ShowSample{onandon-speed-003} -Much more runtime is needed when we typeset with four font switches. The garamond -is most demanding. Actually we're not doing 4 fonts there because it has no bold, -so the numbers are a bit lower than expected for this example. One reason for it -being demanding is that it has lots of (contextual) lookups. The only comment I -can make about that is that it also depends on the strategies of the font -designer. Combining lookups saves space and time so complexity of a font is not -always a good predictor for performance hits. +Much more runtime is needed when we typeset with four font switches. Ebgaramond +is the most demanding. Actually, we're not doing 4 fonts there because ebgaramond +has no bold, so the numbers are a bit lower than expected for this example. One +reason for it being demanding is that it has lots of (contextual) lookups. +Combining lookups saves space and time, so complexity of a font is not always a +good predictor for performance hits. % \ShowSample{onandon-speed-004} -If we typeset paragraphs we get this: +If we typeset paragraphs, we get the following: \ShowSample{onandon-speed-005} @@ -274,11 +275,11 @@ We're talking of some 275 pages here. \ShowSample{onandon-speed-006} -There is of course overhead in handling paragraphs and pages: +There is, of course overhead in handling paragraphs and pages: \ShowSample{onandon-speed-011} -Before I discuss these numbers in more details two more benchmarks are +Before I discuss these numbers in more detail, two more benchmarks are shown. The next table concerns a paragraph with only a few (bold) words. \ShowSample{onandon-speed-007} @@ -290,11 +291,11 @@ typeset using \type{\type}. When a node list (hbox or paragraph) is processed, each glyph is looked at. One important property of \LUATEX\ (compared to \PDFTEX) is that it hyphenates the -whole text, not only the most feasible spots. For the \type {sapolsky} snippet -this results in 200 potential breakpoints, registered in an equal number of -discretionary nodes. The snippet has 688 characters grouped into 125 words and -because it's an English quote we're not hampered with composed characters or -complex script handling. And, when we mention 100 runs then we actually mean +whole text, not only the most feasible spots. For the \type {sapolsky} snippet, +this results in 200 potential breakpoints registered in an equal number of +discretionary nodes. The snippet has 688 characters grouped into 125 words and, +because it's an English quote, we're not hampered with composed characters or +complex script handling. And, when we mention 100 runs, then we actually mean 400 ones when font switching and bodyfonts are compared \startnarrower @@ -302,7 +303,7 @@ complex script handling. And, when we mention 100 runs then we actually mean \input sapolsky \wordright{Robert M. Sapolsky} \stopnarrower -In order to get substitutions and positioning right we need not only to consult +In order to get substitutions and positioning right, we need not only to consult streams of glyphs but also combinations with preceding pre or replace, or trailing post and replace texts. When a font has a bit more complex substitutions, as ebgaramond has, multiple (sometimes hundreds of) passes over the list are made. @@ -312,15 +313,15 @@ Another factor, one you could easily deduce from the benchmarks, is intermediate font switches. Even a few such switches (in the last benchmarks) already result in a runtime penalty. The four switch benchmarks show an impressive increase of runtime, but it's good to know that such a situation seldom happens. It's also -important not to confuse for instance a verbatim snippet with a bold one. The +important not to confuse, for instance, a verbatim snippet with a bold one. The bold one is indeed leading to a pass over the list, but verbatim is normally -skipped because it uses a font that needs no processing. That verbatim or bold +skipped, because it uses a font that needs no processing. That verbatim or bold have the same penalty is mainly due to the fact that verbatim itself is costly: the text is picked up using a different catcode regime and travels through \TEX\ and \LUA\ before it finally gets typeset. This relates to special treatments of -spacing and syntax highlighting and such. +spacing, syntax highlighting, and such. -Also keep in mind that the page examples are quite unreal. We use a layout with +Also, keep in mind that the page examples are quite unreal. We use a layout with no margins, just text from edge to edge. \placefigure @@ -343,19 +344,19 @@ no margins, just text from edge to edge. {\SampleTitle{onandon-speed-011}} {\externalfigure[onandon-speed-011][frame=on,orientation=90,width=.45\textheight]} -So what is a realistic example? That is hard to say. Unfortunately no one ever -asked us to typeset novels. They are rather brain dead products for a machinery -so they process fast. On the mentioned laptop 350 word pages in Dejavu fonts can -be processed at a rate of 75 pages per second with \LUATEX\ and over 100 pages -per second with \LUAJITTEX . On a more modern laptop or professional server -performance is of course better. And for automated flows batch mode is your -friend. The rate is not much worse for a document in a language with a bit more -complex character handling, take accents or ligatures. Of course \PDFTEX\ is -faster on such a dumb document but kick in some more functionality and the -advantage quickly disappears. So, if someone complains that \LUATEX\ needs 10 or -more seconds for a simple few page document \unknown\ you can bet that when the -fonts are seen as reason, that the setup is pretty bad. Personally I'd not waste -time on such a complaint. +So, what is a realistic example? That is hard to say. Unfortunately, no one has +ever asked us to typeset novels. They are rather brain dead-products for a +machinery, so they process fast. On the mentioned laptop, 350 word pages in +Dejavu fonts can be processed at a rate of 75 pages per second with \LUATEX\ and +over 100 pages per second with \LUAJITTEX . On a more modern laptop or a +professional server, the performance is of course better. And, for automated +flows, batch mode is your friend. The rate is not much worse for a document in a +language with a bit more complex character handling, take accents or ligatures. +Of course, \PDFTEX\ is faster on such a dumb document, but kick in some more +functionality, and the advantage quickly disappears. So, if someone complains +that \LUATEX\ needs 10 or more seconds for a simple few page document \unknown\ +you can bet that when the fonts are seen as reason, then the setup is pretty bad. +Personally I would not waste time on such a complaint. \stopsection @@ -366,74 +367,75 @@ about the slowness of \LUATEX: \startsubsubject[title={What engines do you compare?}] -If you come from \PDFTEX\ you come from an 8~bit world: input and font handling -are based on bytes and hyphenation is integrated into the par builder. If you use -\UTF-8\ in \PDFTEX, the input is decoded by \TEX\ macros which carries a speed +If you come from \PDFTEX, you come from an 8-bit world: input and font handling +are based on bytes, and hyphenation is integrated into the par builder. If you use +\UTF-8\ in \PDFTEX, the input is decoded by \TEX\ macros, which carries a speed penalty. Because in the wide engines macro names can also be \UTF\ sequences, construction of macro names is less efficient too. -When you try to use wide fonts, again there is a penalty. Now, if you use \XETEX\ -or \LUATEX\ your input is \UTF-8 which becomes something 32 bit internally. Fonts -are wide so more resources are needed, apart from these fonts being larger and in -need of more processing due to feature handling. Where \XETEX\ uses a library, -\LUATEX\ uses its own handler. Does that have a consequence for performance? Yes -and no. First of all it depends on how much time is spent on fonts at all, but -even then the difference is not that large. Sometimes \XETEX\ wins, sometimes -\LUATEX. One thing is clear: \LUATEX\ is more flexible as we can roll out our own -solutions and therefore do more advanced font magic. For \CONTEXT\ it doesn't -matter as we use \LUATEX\ exclusively and rely on the flexible font handler, also -for future extensions. If really needed you can kick in a library based handler -but it's (currently) not distributed as we loose other functionality which in -turn would result in complaints about that fact (apart from conflicting with the -strive for independence). - -There is no doubt that \PDFTEX\ is faster but for \CONTEXT\ it's an obsolete -engine. The hard coded solutions engine \XETEX\ is also not feasible for -\CONTEXT\ either. So, in practice \CONTEXT\ users have no choice: \LUATEX\ is -used, but users of other macro packages can use the alternatives if they are not -satisfied with performance. The fact that \CONTEXT\ users don't complain about -speed is a clear signal that this is no issue. And, if you want more speed you -can use \LUAJITTEX. \footnote {In plug mode we can actually test a library and -experiments have shown that performance on the average is much worse but it can +When you try to use wide fonts, there is, again, a penalty. Now, if you use +\XETEX\ or \LUATEX, your input is \UTF-8, which becomes something 32-bit +internally. Fonts are wide, so more resources are needed, apart from these fonts +being larger and in need of more processing due to feature handling. Where +\XETEX\ uses a library, \LUATEX\ uses its own handler. Does that have a +consequence for performance? Yes and no. First of all, it depends on how much +time is spent on fonts at all, but even then, the difference is not that large. +Sometimes \XETEX\ wins, sometimes it's \LUATEX. One thing is clear: \LUATEX\ is +more flexible as we can roll out our own solutions and therefore do more advanced +font magic. For \CONTEXT, it doesn't matter as we use \LUATEX\ exclusively, and +we rely on the flexible font handler, also for future extensions. If really +needed, you can kick in a library-based handler but it's (currently) not +distributed as we lose other functionality, which would, in turn, result in +complaints about that fact (apart from conflicting with the strive for +independence). + +There is no doubt that \PDFTEX\ is faster, but, for \CONTEXT, it's an obsolete +engine. The hard-coded-solutions engine \XETEX\ is not feasible for \CONTEXT\ +either. So, in practice, \CONTEXT\ users have no choice: \LUATEX\ is used, but +users of other macro packages can use the alternatives if they are not satisfied +with performance. The fact that \CONTEXT\ users don't complain about speed is a +clear signal that this is a no|-|issue. And, if you want more speed, you can always +use \LUAJITTEX. \footnote {In plug mode, we can actually test a library and +experiments have shown that performance on the average is much worse, but it can be a bit better for complex scripts, although a gain gets unnoticed in normal documents. So, one can decide to use a library but at the cost of much other -functionality that \CONTEXT\ offers, so we don't support it.} In the last section +functionality that \CONTEXT\ offers, so we don't support it.} In the last section, the different engines will be compared in more detail. -Just that you know, when we do the four switches example in plain \TEX\ on my -laptop I get a rate of 40 pages per second, and for one font 180 pages per -second. There is of course a bit more going on in \CONTEXT\ in page building and -so, but the difference between plain and \CONTEXT\ is not that large. +Just that you know, when we do the four|-|switches example in plain \TEX\ on my +laptop, I get a rate of 40 pages per second, and, for one font, 180 pages per +second. There is, of course, a bit more going on in \CONTEXT\ in page building +and so, but the difference between plain and \CONTEXT\ is not that large. \stopsubsubject \startsubsubject[title={What macro package is used?}] -If the answer is that when plain \TEX\ is used, a follow up question is: what -variant? The \CONTEXT\ distribution ships with \type {luatex-plain} and that is -our benchmark. If there really is a bottleneck it is worth exploring. But keep in -mind that in order to be plain, not that much can be done. The \LUATEX\ part is -just an example of an implementation. We already discussed \CONTEXT, and for -\LATEX\ I don't want to speculate where performance hits might come from. When -we're talking fonts, \CONTEXT\ can actually a bit slower than the generic (or -\LATEX) variant because we can kick in more functionality. Also, when you compare -macro packages, keep in mind that when node list processing code is added in that -package the impact depends on interaction with other functionality and depends on -the efficiency of the code. You can't compare mechanisms or draw general -conclusions when you don't know what else is done! +When plain \TEX\ is used, a follow up question is: what variant? The \CONTEXT\ +distribution ships with \type {luatex-plain}, and that is our benchmark. If there +really is a bottleneck, it is worth exploring, but keep in mind that, in order to +be plain, not that much can be done. The \LUATEX\ part is just an example of an +implementation. We already discussed \CONTEXT, and for \LATEX, I don't want to +speculate where performance hits might come from. When we're talking fonts, +\CONTEXT\ can actually be a bit slower than the generic (or \LATEX) variant, because +we can kick in more functionality. Also, when you compare macro packages, keep in +mind that, when node list processing code is added in that package, the impact +depends on interaction with other functionality and depends on the efficiency of +the code. You can't compare mechanisms or draw general conclusions when you don't +know what else is done! \stopsubsubject \startsubsubject[title={What do you load?}] -Most \CONTEXT\ modules are small and load fast. Of course there can be exceptions -when we rely on third party code; for instance loading tikz takes a a bit of -time. It makes no sense to look for ways to speed that system up because it is -maintained elsewhere. There can probably be gained a bit but again, no user -complained so far. +Most \CONTEXT\ modules are small and load fast. Of course, there can be exceptions +when we rely on third party code; for instance, loading tikz takes a bit of +time. It makes no sense to look for ways to speed that system up, because it is +maintained elsewhere. There can probably be gained a bit, but, again, no user +has complained so far. -If \CONTEXT\ is not used, one probably also uses a large \TEX\ installations. -File lookup in \CONTEXT\ is done differently and can can be faster. Even loading +If \CONTEXT\ is not used, one probably also uses a large \TEX\ installation. +File lookup in \CONTEXT\ is done differently, and can be faster. Even loading can be more efficient in \CONTEXT, but it's hard to generalize that conclusion. If one complains about loading fonts being an issue, just try to measure how much time is spent on loading other code. @@ -443,36 +445,36 @@ time is spent on loading other code. \startsubsubject[title={Did you patch macros?}] Not everyone is a \TEX pert. So, coming up with macros that are expanded many -times and|/|or have inefficient user interfacing can have some impact. If someone -complains about one subsystem being slow, then honestly demands to complain about +times and|/|or have inefficient user interfacing, can have some impact. If someone +complains about one subsystem being slow, then honesty demands to complain about other subsystems as well. You get what you ask for. \stopsubsubject \startsubsubject[title={How efficient is the code that you use?}] -Writing super efficient code only makes sense when it's used frequently. In -\CONTEXT\ most code is reasonable efficient. It can be that in one document fonts -are responsible for most runtime, but in another document table construction can +Writing super|-|efficient code only makes sense when it's used frequently. In +\CONTEXT, most code is reasonable efficient. It can be that in one document fonts +are responsible for most runtime, but in another document, table construction can be more demanding while yet another document puts some stress on interactive -features. When hz or protrusion is enabled then you run substantially slower -anyway so when you are willing to sacrifice 10\% or more runtime don't complain -about other components. The same is true for enabling \SYNCTEX: if you are -willing to add more than 10\% runtime for that, don't wither about the same -amount for font handling. \footnote {In \CONTEXT\ we use a \SYNCTEX\ alternative -that is somewhat faster but it remains a fact that enabling more and more -functionality will make the penalty of for instance font processing relatively -small.} +features. When hz or protrusion is enabled, then you run substantially slower +anyway, so when you are willing to sacrifice 10 \% or more of runtime, don't +complain about other components. The same is true for enabling \SYNCTEX: if you +are willing to add more than 10 \% of runtime for that, don't wither about the +same amount for font handling. \footnote {In \CONTEXT, we use a \SYNCTEX\ +alternative that is somewhat faster, but it remains a fact that enabling more and +more functionality will make the penalty of, for instance, font processing +relatively small.} \stopsubsubject \startsubsubject[title={How efficient is the styling that you use?}] -Probably the most easily overseen optimization is in switching fonts and color. -Although in \CONTEXT\ font switching is fast, I have no clue about it in other -macro packages. But in a style you can decide to use inefficient (massive) font +Probably the most easily overlooked optimization is in switching fonts and colors. +Although in \CONTEXT, font switching is fast, I have no clue about it in other +macro packages. But in a style, you can decide to use inefficient (massive) font switches. The effects can easily be tested by commenting bit and pieces. For -instance sometimes you need to do a full bodyfont switch when changing a style, +instance, sometimes you need to do a full bodyfont switch when changing a style, like assigning \type {\small\bf} to the \type {style} key in \type {\setuphead}, but often using e.g.\ \type {\tfd} is much more efficient and works quite as well. Just try it. @@ -481,24 +483,24 @@ well. Just try it. \startsubsubject[title={Are fonts really the bottleneck?}] -We already mentioned that one can look in the wrong direction. Maybe once someone +We already mentioned that one can look in the wrong direction. Maybe, once someone is convinced that fonts are the culprit, it gets hard to look at the real issue. -If a similar job in different macro packages has a significant different runtime +If a similar job in different macro packages has a significantly different runtime, one can wonder what happens indeed. It is good to keep in mind that the amount of text is often not as large as you -think. It's easy to do a test with hundreds of paragraphs of text but in practice +think. It's easy to do a test with hundreds of paragraphs of text, but, in practice, we have whitespace, section titles, half empty pages, floats, itemize and similar -constructs, etc. Often we don't mix many fonts in the running text either. So, in -the end a real document is the best test. +constructs, etc. Often, we don't mix many fonts in the running text either. So, +in the end, a real document is your best test. \stopsubsubject \startsubsubject[title={If you use \LUA, is that code any good?}] You can gain from the faster virtual machine of \LUAJITTEX. Don't expect wonders -from the jitting as that only pays of for long runs with the same code used over -and over again. If the gain is high you can even wonder how well written your +from the jitting as that only pays off in long runs with the same code used over +and over again. If the gain is high, you can even wonder how well-written your \LUA\ code is anyway. \stopsubsubject @@ -506,18 +508,18 @@ and over again. If the gain is high you can even wonder how well written your \startsubsubject[title={What if they don't believe you?}] So, say that someone finds \LUATEX\ slow, what can be done about it? Just advice -him or her to stick to tool used previously. Then, if arguments come that one +them to stick to their previously|-|used tool. Then, if arguments come that one also wants to use \UTF-8, \OPENTYPE\ fonts, a bit of \METAPOST, and is looking forward to using \LUA\ runtime, the only answer is: take it or leave it. You pay -a price for progress, but if you do your job well, the price is not that large. -Tell them to spend time on learning and maybe adapting and bark against their own +a price for progress, but, if you do your job well, the price is not that high. +Tell them to spend time on learning and maybe adapting and to bark against their own tree before barking against those who took that step a decade ago. Most \CONTEXT\ users took that step and someone still using \LUATEX\ after a decade can't be that stupid. It's always best to first wonder what one actually asks from \LUATEX, and if the benefit of having \LUA\ on board has an advantage. If not, one can just use another engine. -Also think of this. When a job is slow, for me it's no problem to identify where +Also think of this: when a job is slow, for me it's no problem to identify where the problem is. The question then is: can something be done about it? Well, I happily keep the answer for myself. After all, some people always need room to complain, maybe if only to hide their ignorance or incompetence. Who knows. @@ -529,13 +531,13 @@ complain, maybe if only to hide their ignorance or incompetence. Who knows. \startsection[title={Comparing engines}] The next comparison is to be taken with a grain of salt and concerns the state of -affairs mid 2017. First of all, you cannot really compare \MKII\ with \MKIV: the -later has more functionality (or a more advanced implementation of -functionality). And as mentioned you can also not really compare \PDFTEX\ and the -wide engines. Anyway, here are some (useless) tests. First a bunch of loads. Keep +affairs mid-2017. First of all, you cannot really compare \MKII\ with \MKIV: the +latter has more functionality (or a more advanced implementation of +functionality). And, as mentioned, you can also not really compare \PDFTEX\ and the +wide engines. Anyway, here are some (useless) tests. First, a bunch of loads. Keep in mind that different engines also deal differently with reading files. For -instance \MKIV\ uses \LUATEX\ callbacks to normalize the input and has its own -readers. There is a bit more overhead in starting up a \LUATEX\ run and some +instance, \MKIV\ uses \LUATEX\ callbacks to normalize the input and has its own +readers. There is a bit more overhead in starting up a \LUATEX\ run, and some functionality is enabled that is not present in \MKII. The format is also larger, if only because we preload a lot of useful font, character and script related data. @@ -549,7 +551,7 @@ data. \stoptext \stoptyping -When looking at the numbers one should realize that the times include startup and +When looking at the numbers, one should realize that the times include startup and job management by the runner scripts. We also run in batchmode to avoid logging to influence runtime. The average is calculated from 5 runs. @@ -593,8 +595,7 @@ The second example does a few switches in a paragraph: \HL \stoptabulate -The third examples does a few more, resulting in multiple subranges -per style: +The third example does more, resulting in multiple subranges per style: \starttyping \starttext @@ -623,7 +624,7 @@ per style: The last example adds some color. Enabling more functionality can have an impact on performance. In fact, as \MKIV\ uses a lot of \LUA\ and is also more advanced -that \MKII, one can expect a performance hit but in practice the opposite +that \MKII, one can expect a performance hit, but, in practice, the opposite happens, which can also be due to some fundamental differences deep down at the macro level. @@ -654,20 +655,20 @@ macro level. \HL \stoptabulate -In these measurements the accuracy is a few decimals but a pattern is visible. As -expected \PDFTEX\ wins on simple documents but starts loosing when things get -more complex. For these tests I used 64 bit binaries. A 32 bit \XETEX\ with -\MKII\ performs the same as \LUAJITTEX\ with \MKIV, but a 64 bit \XETEX\ is -actually quite a bit slower. In that case the mingw cross compiled \LUATEX\ -version does pretty well. A 64 bit \PDFTEX\ is also slower (it looks) that a 32 -bit version. So in the end, there are more factors that play a role. Choosing -between \LUATEX\ and \LUAJITTEX\ depends on how well the memory limited +In these measurements, the accuracy is a few decimals, but a pattern is visible. +As expected, \PDFTEX\ wins on simple documents but starts losing when things get +more complex. For these tests, I used 64 bit binaries. A 32-bit \XETEX\ with +\MKII\ performs the same as \LUAJITTEX\ with \MKIV, but a 64-bit \XETEX\ is +actually quite a bit slower. In that case, the mingw cross|-|compiled \LUATEX\ +version does pretty well. A 64-bit \PDFTEX\ is also slower (it looks) than a +32-bit version. So, in the end, there are more factors that play a role. Choosing +between \LUATEX\ and \LUAJITTEX\ depends on how well the memory|-|limited \LUAJITTEX\ variant can handle your documents and fonts. Because in most of our recent styles we use \OPENTYPE\ fonts and (structural) -features as well as recent \METAFUN\ extensions only present in \MKIV\ we cannot +features as well as recent \METAFUN\ extensions only present in \MKIV, we cannot compare engines using such documents. The mentioned performance of \LUATEX\ (or -\LUAJITTEX) and \MKIV\ on the \METAFUN\ manual illustrate that in most cases this +\LUAJITTEX) and \MKIV\ on the \METAFUN\ manual illustrate that, in most cases, this combination is a clear winner. \starttyping @@ -703,8 +704,8 @@ That leaves the zero run: \stoptext \stoptyping -This gives the following numbers. In longer runs the difference in overhead is -neglectable. +This gives the following numbers. In longer runs, the difference in overhead is +negligible. % sample 6, number of runs: 5 @@ -719,31 +720,31 @@ neglectable. \HL \stoptabulate -It will be clear that when we use different fonts the numbers will also be -different. And if you use a lot of runtime \METAPOST\ graphics (for instance for -backgrounds), the \MKIV\ runs end up at the top. And when we process \XML\ it +It will be clear that when we use different fonts, the numbers will also be +different. And, if you use a lot of runtime \METAPOST\ graphics (for instance for +backgrounds), the \MKIV\ runs end up at the top. And, when we process \XML, it will be clear that going back to \MKII\ is no longer a realistic option. It must -be noted that I occasionally manage to improve performance but we've now reached +be noted that I occasionally manage to improve performance, but we've now reached a state where there is not that much to gain. Some functionality is hard to -compare. For instance in \CONTEXT\ we don't use much of the \PDF\ backend -features because we implement them all in \LUA. In fact, even in \MKII\ already a -done in \TEX, so in the end the speed difference there is not large and often in +compare. For instance, in \CONTEXT, we don't use much of the \PDF\ backend +features because we implement them all in \LUA. In fact, even in \MKII, already +done in \TEX, so in the end, the speed difference there is not large and often in favour of \MKIV. -For the record I mention that shipping out the about 1250 pages has some overhead -too: about 2 seconds. Here \LUAJITTEX\ is 20\% more efficient which is an +For the record, I mention that shipping out the about 1250 pages has some overhead +too: about 2 seconds. Here, \LUAJITTEX\ is 20\% more efficient, which is an indication of quite some \LUA\ involvement. Loading the input files has an -overhead of about half a second. Starting up \LUATEX\ takes more time that +overhead of about half a second. Starting up \LUATEX\ takes more time than \PDFTEX\ and \XETEX, but that disadvantage disappears with more pages. So, in the -end there are quite some factors that blur the measurements. In practice what -matters is convenience: does the runtime feel reasonable and in most cases it +end, there are quite some factors that blur the measurements. In practice, what +matters is convenience: does the runtime feel reasonable and, in most cases, it does. -If I would replace my laptop with a reasonable comparable alternative that one +If I would replace my laptop with a reasonable comparable alternative, that one would be some 35\% faster (single threads on processors don't gain much per year). -I guess that this is about the same increase in performance that \CONTEXT\ -\MKIV\ got in that period. I don't expect such a gain in the coming years so -at some point we're stuck with what we have. +I guess that this is about the same increase in performance than \CONTEXT\ +\MKIV\ got in that period. I don't expect such a gain in the upcoming years, so, +at some point, we're stuck with what we have. \stopsection @@ -754,29 +755,29 @@ go back in time to when the first wide engines showed up, \OMEGA\ was considered to be slow, although I never tested that myself. Then, when \XETEX\ showed up, there was not much talk about speed, just about the fact that we could use \OPENTYPE\ fonts and native \UTF\ input. If you look at the numbers, for sure you -can say that it was much slower than \PDFTEX. So how come that some people +can say that it was much slower than \PDFTEX. So, how come that some people complain about \LUATEX\ being so slow, especially when we take into account that -it's not that much slower than \XETEX, and that \LUAJITTEX\ is often faster that -\XETEX. Also, computers have become faster. With the wide engines you get more +it's not that much slower than \XETEX, and that \LUAJITTEX\ is often faster than +\XETEX. Also, computers have become faster. With the wide engines, you get more functionality and that comes at a price. This was accepted for \XETEX\ and is also acceptable for \LUATEX. But the price is nto that high if you take into account that hardware performs better: you just need to compare \LUATEX\ (and \XETEX) runtime with \PDFTEX\ runtime 15 years ago. As a comparison, look at games and video. Resolution became much higher as did -color depth. Higher frame rates were in demand. Therefore the hardware had to -become faster and it did, and as a result the user experience kept up. No user +color depth. Higher frame rates were in demand. Therefore, the hardware had to +become faster, and it did, and, as a result, the user experience kept up. No user will say that a modern game is slower than an old one, because the old one does 500 frames per second compared to some 50 for the new game on the modern hardware. In a similar fashion, the demands for typesetting became higher: \UNICODE, \OPENTYPE, graphics, \XML, advanced \PDF, more complex (niche) typesetting, etc. This happened more or less in parallel with computers becoming more powerful. So, as with games, the user experience didn't degrade with -demands. Comparing \LUATEX\ with \PDFTEX\ is like comparing a low res, low frame -rate, low color game with a modern one. You need to have up to date hardware and -even then, the writer of such programs need to make sure it runs efficient, -simply because hardware no longer scales like it did decades ago. You need to -look at the larger picture. +demands. Comparing \LUATEX\ with \PDFTEX\ is like comparing a low|-|res, +low|-|framerate, low|-|color game with a modern one. You need to have +up|-|to|-|date hardware and even then, the writer of such programs needs to make +sure that they run efficiently, simply because hardware no longer scales like it +did decades ago. You need to look at the bigger picture. \stopsection diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/onandon/onandon-runtoks.tex b/Master/texmf-dist/doc/context/sources/general/manuals/onandon/onandon-runtoks.tex new file mode 100644 index 00000000000..b3adeb4a5db --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/onandon/onandon-runtoks.tex @@ -0,0 +1,531 @@ +% language=uk + +\startcomponent onandon-amputating + +\environment onandon-environment + +\startchapter[title={Amputating code}] + +\startsection[title={Introduction}] + +Because \CONTEXT\ is already rather old in terms of software life and because it +evolves over time, code can get replaced by better code. Reasons for this can be: + +\startitemize[packed] +\startitem a better understanding of the way \TEX\ and \METAPOST\ work \stopitem +\startitem demand for more advanced options \stopitem +\startitem a brainwave resulting in a better solution \stopitem +\startitem new functionality provided in \TEX\ engine used \stopitem +\startitem the necessity to speed up a core process \stopitem +\stopitemize + +Replacing code that in itself does a good job but is no longer the best to be +used comes with sentiments. It can be rather satisfying to cook up a +(conceptually as well as codewise) good solution and therefore removing code from +a file can result in a somewhat bad feeling and even a feeling of losing +something. Hence the title of this chapter. + +Here I will discuss one of the more complex subsystems: the one dealing with +typeset text in \METAPOST\ graphics. I will stick to the principles and not +present (much) code as that can be found in archives. This is not a tutorial, +but more a sort of wrap|-|up for myself. It anyhow show the thinking behind +this mechanism. I'll also introduce a new \LUATEX\ feature here: subruns. + +\stopsection + +\startsection[title={The problem}] + +\METAPOST\ is meant for drawing graphics and adding text to them is not really +part of the concept. Its a bit like how \TEX\ sees images: the dimensions matter, +the content doesn't. This means that in \METAPOST\ a blob of text is an +abstraction. The native way to create a typeset text picture is: + +\starttyping +picture p ; p := btex some text etex ; +\stoptyping + +In traditional \METAPOST\ this will create a temporary \TEX\ file with the words +\type {some text} wrapped in a box that when typeset is just shipped out. The +result is a \DVI\ file that with an auxiliary program will be transformed into a +\METAPOST\ picture. That picture itself is made from multiple pictures, because +each sequences of characters becomes a picture and kerns become shifts. + +There is also a primitive \type {infont} that takes a text and just converts it +into a low level text object but no typesetting is done there: so no ligatures +and no kerns are found there. In \CONTEXT\ this operator is redefined to do the +right thing. + +In both cases, what ends up in the \POSTSCRIPT\ file is references to fonts and +characters and the original idea is that \DVIPS\ understands what +fonts to embed. Details are communicated via specials (comments) that \DVIPS\ is +supposed to intercept and understand. This all happens in an 8~bit (font) universe. + +When we moved on to \PDF, a converter from \METAPOST's rather predictable and +simple \POSTSCRIPT\ code to \PDF\ was written in \TEX. The graphic operators +became \PDF\ operators and the text was retypeset using the font information and +snippets of strings and injected at the right spot. The only complication was +that a non circular pen actually produced two path of which one has to be +transformed. + +At that moment it already had become clear that a more tight integration in +\CONTEXT\ would happen and not only would that demand a more sophisticated +handling of text, but it would also require more features not present in +\METAPOST, like dealing with \CMYK\ colors, special color spaces, transparency, +images, shading, and more. All this was implemented. In the next sections we will +only discuss texts. + +\stopsection + +\startsection[title={Using the traditional method}] + +The \type {btex} approach was not that flexible because what happens is that +\type {btex} triggers the parser to just grabbing everything upto the \type +{etex} and pass that to an external program. It's special scanner mode and +because because of that using macros for typesetting texts is a pain. So, instead +of using this method in \CONTEXT\ we used \type {textext}. Before a run the +\METAPOST\ file was scanned and for each \type {textext} the argument was copied +to a file. The \type {btex} calls were scanned to and replaced by \type {textext} +calls. + +For each processed snippet the dimensions were stored in order to be loaded at +the start of the \METAPOST\ run. In fact, each text was just a rectangle with +certain dimensions. The \PDF\ converter would use the real snippet (by +typesetting it). + +Of course there had to be some housekeeping in order to make sure that the right +snippets were used, because the order of definition (as picture) can be different +from them being used. This mechanism evolved into reasonable robust text handling +but of course was limited by the fact that the file was scanned for snippets. So, +the string had to be string and not assembled one. This disadvantage was +compensated by the fact that we could communicate relevant bits of the +environment and apply all the usual context trickery in texts in a way that was +consistent with the rest of the document. + +A later implementation could communicate the text via specials which is more +flexible. Although we talk of this method in the past sense it is still used in +\MKII. + +\stopsection + +\startsection[title={Using the library}] + +When the \MPLIB\ library showed up in \LUATEX, the same approach was used but +soon we moved on to a different approach. We already used specials to communicate +extensions to the backend, using special colors and fake objects as signals. But +at that time paths got pre- and postscripts fields and those could be used to +really carry information with objects because unlike specials, they were bound to +that object. So, all extensions using specials as well as texts were rewritten to +use these scripts. + +The \type {textext} macro changed its behaviour a bit too. Remember that a +text effectively was just a rectangle with some transformation applied. However +this time the postscript field carried the text and the prescript field some +specifics, like the fact that that we are dealing with text. Using the script made +it possible to carry some more inforation around, like special color demands. + +\starttyping +draw textext("foo") ; +\stoptyping + +Among the prescripts are \typ {tx_index=trial} and \typ {tx_state=trial} +(multiple prescripts are prepended) and the postscript is \type {foo}. In a +second run the prescript is \type {tx_index=trial} and \typ {tx_state=final}. +After the first run we analyze all objects, collect the texts (those with a \type +{tx_} variables set) and typeset them. As part of the second run we pass the +dimensions of each indexed text snippet. Internally before the first run we +\quote {reset} states, then after the first run we \quote {analyze}, and after +the second run we \quote {process} as part of the conversion of output to \PDF. + +\stopsection + +\startsection[title={Using \type {runscript}}] + +When the \type {runscript} feature was introduced in the library we no longer +needed to pass the dimensions via subscripted variables. Instead we could just +run a \LUA\ snippets and ask for the dimensions of a text with some index. This +is conceptually not much different but it saves us creating \METAPOST\ code that +stored the dimensions, at the cost of potentially a bit more runtime due to the +\type {runscript} calls. But the code definitely looks a bit cleaner this way. Of +course we had to keep the dimensions at the \LUA\ end but we already did that +because we stored the preprocessed snippets for final usage. + +\stopsection + +\startsection[title={Using a sub \TEX\ run}] + +We now come the current (post \LUATEX\ 1.08) solution. For reasons I will +mention later a two pass approach is not optimal, but we can live with that, +especially because \CONTEXT\ with \METAFUN\ (which is what we're talking about +here) is quit efficient. More important is that it's kind of ugly to do all the +not that special work twice. In addition to text we also have outlines, graphics +and more mechanisms that needed two passes and all these became one pass +features. + +A \TEX\ run is special in many ways. At some point after starting up \TEX\ +enters the main loop and begins reading text and expanding macros. Normally you +start with a file but soon a macro is seen, and a next level of input is entered, +because as part of the expansion more text can be met, files can be opened, +other macros be expanded. When a macro expands a token register, another level is +entered and the same happens when a \LUA\ call is triggered. Such a call can +print back something to \TEX\ and that has to be scanned as if it came from a +file. + +When token lists (and macros) get expanded, some commands result in direct +actions, others result in expansion only and processing later as one of more +tokens can end up in the input stack. The internals of the engine operate in +miraculous ways. All commands trigger a function call, but some have their own +while others share one with a switch statement (in \CCODE\ speak) because they +belong to a category of similar actions. Some are expanded directly, some get +delayed. + +Does it sound complicated? Well, it is. It's even more so when you consider that +\TEX\ uses nesting, which means pushing and popping local assignments, knows +modes, like horizontal, vertical and math mode, keeps track of interrupts and at +the same type triggers typesetting, par building, page construction and flushing +to the output file. + +It is for this reason plus the fact that users can and will do a lot to influence +that behaviour that there is just one main loop and in many aspects global state. +There are some exceptions, for instance when the output routine is called, which +creates a sort of closure: it interrupts the process and for that reason gets +grouping enforced so that it doesn't influence the main run. But even then the +main loop does the job. + +Starting with version 1.10 \LUATEX\ provides a way to do a local run. There are +two ways provided: expanding a token register and calling a \LUA\ function. It +took a bit of experimenting to reach an implementation that works out reasonable +and many variants were tried. In the appendix we give an example of usage. + +The current variant is reasonable robust and does the job but care is needed. +First of all, as soon as you start piping something to \TEX\ that gets typeset +you'd better in a valid mode. If not, then for instance glyphs can end up in a +vertical list and \LUATEX\ will abort. In case you wonder why we don't intercept +this: we can't because we don't know the users intentions. We cannot enforce a +mode for instance as this can have side effects, think of expanding \type +{\everypar} or injecting an indentation box. Also, as soon as you start juggling +nodes there is no way that \TEX\ can foresee what needs to be copied to +discarded. Normally it works out okay but because in \LUATEX\ you can cheat in +numerous ways with \LUA, you can get into trouble. + +So, what has this to do with \METAPOST ? Well, first of all we could now use a +one pass approach. The \type {textext} macro calls \LUA, which then let \TEX\ do +some typesetting, and then gives back the dimensions to \METAPOST. The \quote +{analyze} phase is now integrated in the run. For a regular text this works quite +well because we just box some text and that's it. However, in the next section we +will see where things get complicated. + +Let's summarize the one pass approach: the \type {textext} macro creates +rectangle with the right dimensions and for doing passes the string to \LUA\ +using \type {runscript}. We store the argument of \type {textext} in a variable, +then call \type {runtoks}, which expands the given token list, where we typeset a +box with the stored text (that we fetch with a \LUA\ call), and the \type +{runscript} passes back the three dimensions as fake \RGB\ color to \METAPOST\ +which applies a \type {scantokens} to the result. So, in principle there is no +real conceptual difference except that we now analyze in|-|place instead of +between runs. I will not show the code here because in \CONTEXT\ we use a wrapper +around \type {runscript} so low level examples won't run well. + +\stopsection + +\startsection[title={Some aspects}] + +An important aspect of the text handling is that the whole text can be +transformed. Normally this is only some scaling but rotation is also quite valid. +In the first approach, the original \METAPOST\ one, we have pictures constructed +of snippets and pictures transform well as long as the backend is not too +confused, something that can happen when for instance very small or large font +scales are used. There were some limitations with respect to the number of fonts +and efficient inclusion when for instance randomization was used (I remember +cases with thousands of font instances). The \PDF\ backend could handle most +cases well, by just using one size and scaling at the \PDF\ level. All the \type +{textext} approaches use rectangles as stubs which is very efficient and permits +all transforms. + +How about color? Think of this situation: + +\starttyping +\startMPcode + draw textext("some \color[red]{text}") + withcolor green ; +\stopMPcode +\stoptyping + +And what about the document color? We suffice by saying that this is all well +supported. Of course using transparency, spot colors etc.\ also needs extensions. +These are however not directly related to texts although we need to take it into +account when dealing with the inclusion. + +\starttyping +\startMPcode + draw textext("some \color[red]{text}") + withcolor "blue" + withtransparency (1,0.5) ; +\stopMPcode +\stoptyping + +What if you have a graphic with many small snippets of which many have the same +content? These are by default shared, but if needed you can disable it. This makes +sense if you have a case like this: + +\starttyping +\useMPlibrary[dum] + +\startMPcode + draw textext("\externalfigure[unknown]") notcached ; + draw textext("\externalfigure[unknown]") notcached ; +\stopMPcode +\stoptyping + +Normally each unknown image gets a nice placeholder with some random properties. +So, do we want these two to have the same or not? At least you can control it. + +When I said that things can get complicated with the one pass approach the +previous code snippet is a good example. The dummy figure is generated by +\METAPOST. So, as we have one pass, and jump temporarily back to \TEX, +we have two problems: we reenter the \MPLIB\ instance again in the middle of +a run, and we might pipe back something to and|/|or from \TEX\ nested. + +The first problem could be solved by starting a new \MPLIB\ session. This +normally is not a problem as both runs are independent of each other. In +\CONTEXT\ we can have \METAPOST\ runs in many places and some produce some more +of less stand alone graphic in the text while other calls produce \PDF\ code in +the backend that is used in a different way (for instance in a font). In the +first case the result gets nicely wrapped in a box, while in the second case it +might directly end up in the page stream. And, as \TEX\ has no knowledge of what +is needed, it's here that we can get the complications that can lead to aborting +a run when you are careless. But in any case, if you abort, then you can be sure +you're doing the wrong thing. So, the second problem can only be solved by +careful programming. + +When I ran the test suite on the new code, some older modules had to be fixed. +They were doing the right thing from the perspective of intermediate runs and +therefore independent box handling, putting a text in a box and collecting +dimensions, but interwoven they demanded a bit more defensive programming. For +instance, the multi|-|pass approach always made copies snippets while the one +pass approach does that only when needed. And that confused some old code in a +module, which incidentally is never used today because we have better +functionality built|-|in (the \METAFUN\ \type {followtext} mechanism). + +The two pass approach has special code for cases where a text is not used. +Imagine this: + +\starttyping +picture p ; p := textext("foo") ; + +draw boundingbox p; +\stoptyping + +Here the \quote {analyze} stage will never see the text because we don't flush p. +However because \type {textext} is called it can also make sure we still know the +dimensions. In the next case we do use the text but in two different ways. These +subtle aspects are dealt with properly and could be made a it simpler in the +single pass approach. + +\starttyping +picture p ; p := textext("foo") ; + +draw p rotated 90 withcolor red ; +draw p withcolor green ; +\stoptyping + +\stopsection + +\startsection[title=One or two runs] + +So are we better off now? One problem with two passes is that if you use the +equation solver you need to make sure that you don't run into the redundant +equation issue. So, you need to manage your variables well. In fact you need to +do that anyway because you can call out to \METAPOST\ many times in a run so old +variables can interfere anyway. So yes, we're better off here. + +Are we worse off now? The two runs with in between the text processing is very +robust. There is no interference of nested runs and no interference of nested +local \TEX\ calls. So, maybe we're also bit worse off. You need to anyhow keep +this in mind when you write your own low level \TEX|-|\METAPOST\ interaction +trickery, but fortunately now many users do that. And if you did write your own +plugins, you now need to make them single pass. + +The new code is conceptually cleaner but also still not trivial because due to +the mentioned complications. It's definitely less code but somehow amputating the +old code does hurt a bit. Maybe I should keep it around as reference of how text +handling evolved over a few decades. + +\stopsection + +\startsection[title=Appendix] + +Because the single pass approach made me finally look into a (although somewhat +limited) local \TEX\ run, I will show a simple example. For the sake of +generality I will use \type {\directlua}. Say that you need the dimensions of a +box while in \LUA: + +\startbuffer +\directlua { + tex.sprint("result 1: <") + + tex.sprint("\\setbox0\\hbox{one}") + tex.sprint("\\number\\wd0") + + tex.sprint("\\setbox0\\hbox{\\directlua{tex.print{'first'}}}") + tex.sprint(",") + tex.sprint("\\number\\wd0") + + tex.sprint(">") +} +\stopbuffer + +\typebuffer \getbuffer + +This looks ok, but only because all printed text is collected and pushed into a +new input level once the \LUA\ call is done. So take this then: + +\startbuffer +\directlua { + tex.sprint("result 2: <") + + tex.sprint("\\setbox0\\hbox{one}") + tex.sprint(tex.getbox(0).width) + + tex.sprint("\\setbox0\\hbox{\\directlua{tex.print{'first'}}}") + tex.sprint(",") + tex.sprint(tex.getbox(0).width) + + tex.sprint(">") +} +\stopbuffer + +\typebuffer \getbuffer + +This time we get the widths of the box known at the moment that we are in \LUA, +but we haven't typeset the content yet, so we get the wrong dimensions. This +however will work okay: + +\startbuffer +\toks0{\setbox0\hbox{one}} +\toks2{\setbox0\hbox{first}} +\directlua { + tex.forcehmode(true) + + tex.sprint("<") + + tex.runtoks(0) + tex.sprint(tex.getbox(0).width) + + tex.runtoks(2) + tex.sprint(",") + tex.sprint(tex.getbox(0).width) + + tex.sprint(">") +} +\stopbuffer + +\typebuffer \getbuffer + +as does this: + +\startbuffer +\toks0{\setbox0\hbox{\directlua{tex.sprint(MyGlobalText)}}} +\directlua { + tex.forcehmode(true) + + tex.sprint("result 3: <") + + MyGlobalText = "one" + tex.runtoks(0) + tex.sprint(tex.getbox(0).width) + + MyGlobalText = "first" + tex.runtoks(0) + tex.sprint(",") + tex.sprint(tex.getbox(0).width) + + tex.sprint(">") +} +\stopbuffer + +\typebuffer \getbuffer + +Here is a variant that uses functions: + +\startbuffer +\directlua { + tex.forcehmode(true) + + tex.sprint("result 4: <") + + tex.runtoks(function() + tex.sprint("\\setbox0\\hbox{one}") + end) + tex.sprint(tex.getbox(0).width) + + tex.runtoks(function() + tex.sprint("\\setbox0\\hbox{\\directlua{tex.print{'first'}}}") + end) + tex.sprint(",") + tex.sprint(tex.getbox(0).width) + + tex.sprint(">") +} +\stopbuffer + +\typebuffer \getbuffer + +The \type {forcemode} is needed when you do this in vertical mode. Otherwise the +run aborts. Of course you can also force horizontal mode before the call. I'm +sure that users will be surprised by side effects when they really use this +feature but that is to be expected: you really need to be aware of the subtle +interference of input levels and mix of input media (files, token lists, macros +or \LUA) as well as the fact that \TEX\ often looks one token ahead, and often, +when forced to typeset something, also can trigger builders. You're warned. + +\stopsection + +\stopchapter + +\stopcomponent + +% \starttext + +% \toks0{\hbox{test}} [\ctxlua{tex.runtoks(0)}]\par + +% \toks0{\relax\relax\hbox{test}\relax\relax}[\ctxlua{tex.runtoks(0)}]\par + +% \toks0{xxxxxxx} [\ctxlua{tex.runtoks(0)}]\par + +% \toks0{\hbox{(\ctxlua{context("test")})}} [\ctxlua{tex.runtoks(0)}]\par + +% \toks0{\global\setbox1\hbox{(\ctxlua{context("test")})}} [\ctxlua{tex.runtoks(0)}\box1]\par + +% \startluacode +% local s = "[\\ctxlua{tex.runtoks(0)}\\box1]" +% context("<") +% context( function() context(s) end) +% context( function() context(s) end) +% context(">") +% \stopluacode\par + +% \toks10000{\hbox{\red test1}} +% \toks10002{\green\hbox{test2}} +% \toks10004{\hbox{\global\setbox1\hbox to 1000sp{\directlua{context("!4!")}}}} +% \toks10006{\hbox{\global\setbox3\hbox to 2000sp{\directlua{context("?6?")}}}} +% \hbox{x\startluacode +% local s0 = "(\\hbox{\\ctxlua{tex.runtoks(10000)}})" +% local s2 = "[\\hbox{\\ctxlua{tex.runtoks(10002)}}]" +% context("<!") +% -- context( function() context(s0) end) +% -- context( function() context(s0) end) +% -- context( function() context(s2) end) +% context(s0) +% context(s0) +% context(s2) +% context("<") +% tex.runtoks(10004) +% context("X") +% tex.runtoks(10006) +% context(tex.box[1].width) +% context("/") +% context(tex.box[3].width) +% context("!>") +% \stopluacode x}\par + + diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/onandon/onandon-variable.tex b/Master/texmf-dist/doc/context/sources/general/manuals/onandon/onandon-variable.tex index c73196cef62..c308864e68e 100644 --- a/Master/texmf-dist/doc/context/sources/general/manuals/onandon/onandon-variable.tex +++ b/Master/texmf-dist/doc/context/sources/general/manuals/onandon/onandon-variable.tex @@ -12,7 +12,7 @@ \startsubject[title=Introduction] -History shows the tendency to recycle ideas. Often quite some effort is made by +History shows the tendency to recycle ideas. Often, quite some effort is made by historians to figure out what really happened, not just long ago, when nothing was written down and we have to do with stories or pictures at most, but also in recent times. Descriptions can be conflicting, puzzling, incomplete, partially @@ -20,64 +20,64 @@ lost, biased, \unknown Just as language was invented (or evolved) several times, so were scripts. The same might be true for rendering scripts on a medium. Semaphores came and went -within decades and how many people know now that they existed and that encryption +within decades, and how many people know now that they existed and that encryption was involved? Are the old printing presses truly the old ones, or are older examples simply gone? One of the nice aspects of the internet is that one can now -more easily discover similar solutions for the same problem, but with a different +more easily discover similar solutions to the same problem but with a different (and independent) origin. -So, how about this \quotation {new big thing} in font technology: variable fonts. -In this case, history shows that it's not that new. For most \TEX\ users the -names \METAFONT\ and \METAPOST\ will ring bells. They have a very well documented -history so there is not much left to speculation. There are articles, books, +So, how about this \quotation {next big thing} in font technology: variable fonts? +In this case, history shows that it's not that new. For most \TEX\ users, the +names \METAFONT\ and \METAPOST\ will ring bells. They have a very well-documented +history, so there is not much left to speculation. There are articles, books, pictures, examples, sources, and more around for decades. So, the ability to change the appearance of a glyph in a font depending on some parameters is not new. What probably {\em is} new is that creating variable fonts is done in the -natural environment where fonts are designed: an interactive program. The -\METAFONT\ toolkit demands quite some insight in programming shapes in such a way +natural environment, where fonts are designed: an interactive program. The +\METAFONT\ toolkit demands quite some insight into programming shapes in such a way that one can change look and feel depending on parameters. There are not that -many meta fonts made and one reason is that making them requires a certain mind- -and skill set. On the other hand, faster computers, interactive programs, -evolving web technologies, where rea|l|-time rendering and therefore more or less +many metafonts made, and one reason is that making them requires a certain mind- +and skillset. On the other hand, faster computers, interactive programs, +evolving web technologies, where real-time rendering and therefore more or less real-time tweaking of fonts is a realistic option, all play a role in acceptance. -But do interactive font design programs make this easier? You still need to be -able to translate ideas into usable beautiful fonts. Taking the common shapes of -glyphs, defining extremes and letting a program calculate some interpolations -will not always bring good results. It's like morphing a picture of your baby's -face into yours of old age (or that of your grandparent): not all intermediate -results will look great. It's good to notice that variable fonts are a revival of -existing techniques and ideas used in, for instance, multiple master fonts. The -details might matter even more as they can now be exaggerated when some -transformation is applied. - -There is currently (March 2017) not much information about these fonts so what I +But do interactive font design programs make this easier? You still need to +translate ideas into usable beautiful fonts. Taking the common shapes of glyphs, +defining extremes and letting a program calculate some interpolations will not +always give good results. It's like morphing a picture of your baby's face into +yours of old age or that of your grandparent: not all intermediate results will +look great. It's good to notice that variable fonts are a revival of existing +techniques and ideas used in, for instance, multiple master fonts. The details +might matter even more as they can now be exaggerated when transformations are +applied. + +There is currently (March 2017) not much information about these fonts, so what I say next may be partially wrong or at least different from what is intended. The -perspective will be one from a \TEX\ user and coder. Whatever you think of them, -these fonts will be out there and for sure there will be nice examples -circulating soon. And so, when I ran into a few experimental fonts, with +perspective will be one of a \TEX\ user and coder. Whatever you think of them, +these fonts will be out there, and, for sure, there will be nice examples +circulating soon. And so, when I ran into a few experimental fonts with \POSTSCRIPT\ and \TRUETYPE\ outlines, I decided to have a look at what is inside. -After all, because it's visual, it's also fun to play with. Let's stress that at -the moment of this writing I only have a few simple fonts available, fonts that -are designed for testing and not usage. Some recommended tables were missing and -no complex \OPENTYPE\ features are used in these fonts. +After all, because it's visual, it's also fun to play with. Let's stress that, at +the moment of this writing, I only have a few simple fonts available, fonts that +are designed for testing and not for usage. Some recommended tables were missing +and no complex \OPENTYPE\ features were used in these fonts. \stopsubject \startsubject[title=The specification] I'm not that good at reading specifications, first of all because I quickly fall -asleep with such documents, but most of all because I prefer reading other stuff +asleep with such documents, but mostly because I prefer reading other stuff (I do have lots of books waiting to be read). I'm also someone who has to play with something in order to understand it: trial and error is my modus operandi. -Eventually it's my intended usage that drives the interface and that is when +Eventually, it's my intended usage that drives the interface and that is when everything comes together. Exploring this technology comes down to: locate a font, get the \OPENTYPE\ 1.8 specification from the \MICROSOFT\ website, and try to figure out what is in the -font. When I had a rough idea the next step was to get to the shapes and see if I -could manipulate them. Of course it helped that in \CONTEXT\ we already can load -fonts and play with shapes (using \METAPOST). I didn't have to install and learn +font. When I had a rough idea, the next step was to get to the shapes and see if +I could manipulate them. Of course, it helped that we can already load fonts and +play with shapes in \CONTEXT using \METAPOST. I didn't have to install and learn other programs. Once I could render them, in this case by creating a virtual font with inline \PDF\ literals, a next step was to apply variation. Then came the first experiments with a possible user interface. Seeing more variation then @@ -88,18 +88,18 @@ The main extension to the data packaged in a font file concerns the (to be discussed) axis along which variable fonts operate and deltas to be applied to coordinates. The \type {gdef} table has been extended and contains information that is used in \type {gpos} features. There are new \type {hvar}, \type {vvar} -and \type {mvar} tables that influence the horizontal, vertical and general font +and \type {mvar} tables that influence the horizontal, vertical, and general font dimensions. The \type {gvar} table is used for \TRUETYPE\ variants, while the \type {cff2} table replaces the \type {cff} table for \OPENTYPE\ \POSTSCRIPT\ outlines. The \type {avar} and \type {stat} tables contain some -meta|-|information about the axes of variations. +metainformation about the axes of variations. -It must be said that because this is new technology the information in the +It must be said that, because this is a new technology, the information in the standard is not always easy to understand. The fact that we have two rendering techniques, \POSTSCRIPT\ \type {cff} and \TRUETYPE\ \type {ttf}, also means that we have different information and perspectives. But this situation is not much -different from \OPENTYPE\ standards a few years ago: it takes time but in the end -I will get there. And, after all, users also complain about the lack of +different from \OPENTYPE\ standards a few years ago: it takes time, but, in the +end, I will get there. And, after all, users also complain about the lack of documentation for \CONTEXT, so who am I to complain? In fact, it will be those \CONTEXT\ users who will provide feedback and make the implementation better in the~end. @@ -110,41 +110,41 @@ the~end. Before we discuss some details, it will be useful to summarize what the font loader does when a user requests a font at a certain size and with specific -features enabled. When a font is used the first time, its binary format is -converted into a form that makes it suitable for use within \CONTEXT\ and -therefore \LUATEX. This conversion involves collecting properties of the font as -a whole (official names, general dimensions like x-height and em-width, etc.), of +features enabled. When a font is used for the first time, its binary format is +converted into a form that makes it suitable for use in \CONTEXT\ and therefore +in \LUATEX. This conversion involves collecting the properties of the font as a +whole (official names, general dimensions like x-height and em-width, etc.), of glyphs (dimensions, \UNICODE\ properties, optional math properties), and all -kinds of information that relates to (contextual) replacements of glyphs (small +kinds of information that relate to (contextual) replacements of glyphs (small caps, oldstyle, scripts like Arabic) and positioning (kerning, anchoring marks, -etc.). In the \CONTEXT\ font loader this conversion is done in \LUA. +etc.). In the \CONTEXT\ font loader, this conversion is done in \LUA. -The result is stored in a condensed format in a cache and the next time the font -is needed it loads in an instant. In the cached version the dimensions are -untouched, so a font at different sizes has just one copy in the cache. Often a -font is needed at several sizes and for each size we create a copy with scaled +The result is stored in a condensed format in a cache, and, the next time the font +is needed, it loads in an instant. In the cached version, the dimensions are +untouched, so a font at different sizes has just one copy in the cache. Often, a +font is needed at several sizes, and for each size, we create a copy with scaled glyph dimensions. The feature-related dimensions (kerning, anchoring, etc.)\ are shared and scaled when needed. This happens when sequences of characters in the node list get converted into sequences of glyphs. We could do the same with glyph -dimensions but one reason for having a scaled copy is that this copy can also -contain virtual glyphs and these have to be scaled beforehand. In practice there +dimensions, but one reason for having a scaled copy is that this copy can also +contain virtual glyphs, and these have to be scaled beforehand. In practice, there are several layers of caching in order to keep the memory footprint within -reasonable bounds. \footnote {In retrospect one can wonder if that makes sense; +reasonable bounds. \footnote {In retrospect, one can wonder if that makes sense; just look at how much memory a browser uses when it has been open for some time. -In the beginning of \LUATEX\ users wondered about caching fonts, but again, just +In the beginning of \LUATEX, users wondered about caching fonts, but again, just look at what amounts browsers cache: it gets pretty close to the average amount of writes that a \SSD\ can handle per day within its guarantee.} When the font is actually used, interaction between characters is resolved using -the feature|-|related information. When for instance two characters need to be +the feature|-|related information. When, for instance, two characters need to be kerned, a lookup results in the injection of a kern, scaled from general dimensions to the current size of the font. -When the outlines of glyphs are needed in \METAFUN\ the font is also converted +When the outlines of glyphs are needed in \METAFUN, the font is also converted from its binary form to something in \LUA, but this time we filter the shapes. -For a \type {cff} this comes down to interpreting the \type {charstrings} and -reducing the complexity to \type {moveto}, \type {lineto} and \type {curveto} -operators. In the process subroutines are inlined. The result is something that +For a \type {cff}, this comes down to interpreting the \type {charstrings} and +reducing the complexity to \type {moveto}, \type {lineto}, and \type {curveto} +operators. In the process, subroutines are inlined. The result is something that \METAPOST\ is happy with but that also can be turned into a piece of a \PDF. We now come to what a variable font actually is: a basic design which is @@ -170,7 +170,7 @@ endfor ; \stopMPcode \stoplinecorrection -Here we have a linear scaling but glyphs are not normally done that way. There +Here we have linear scaling, but glyphs are not normally done that way. There are font collections out there with lots of intermediate variants (say from light to heavy) and it's more profitable to sell each variant independently. However, there is often some logic behind it, probably supported by programs that @@ -178,68 +178,69 @@ designers use, so why not build that logic into the font and have one file that represents many intermediate forms. In fact, once we have multiple axes, even when the designer has clear ideas of the intended usage, nothing will prevent users from tinkering with the axis properties in ways that will fulfil their -demands but hurt the designers eyes. We will not discuss that dilemma here. +demands (and hurt the designer's eyes). I will not discuss that dilemma here. When a variable font follows the route described above, we face a problem. When -you load a \TRUETYPE\ font it will just work. The glyphs are packaged in the same -format as static fonts. However, a variable font has axes and on each axis a -value can be set. Each axis has a minimum, maximum and default. It can be that -the default instance also assumes some transformations are applied. The standard -recommends adding tables to describe these things but the fonts that I played -with each lacked such tables. So that leaves some guesswork. But still, just -loading a \TRUETYPE\ font gives some sort of outcome, although the dimensions -(widths) might be weird due to lack of a (default) axis being applied. +you load a \TRUETYPE\ font, it will just work. The glyphs are packaged in the +same format as static fonts. However, a variable font has axes, and, on each +axis, a value can be set. Each axis has a minimum, a maximum, and a default. It +can be that the default instance also assumes some transformations are applied. +The standard recommends adding tables to describe these things, but the fonts +that I played with each lacked such tables. So that leaves some guesswork. But +still, just loading a \TRUETYPE\ font gives some sort of outcome, although the +dimensions (widths) might be weird due to the lack of a (default) axis being +applied. An \OPENTYPE\ font with \POSTSCRIPT\ outlines is different: the internal \type -{cff} format has been upgraded to \type {cff2} which on the one hand is less -complicated but on the other hand has a few new operators \emdash\ which results -in programs that have not been adapted complaining or simply quitting on them. +{cff} format has been upgraded to \type {cff2}, which on the one hand is less +complicated, but on the other hand has a few new operators, which results +in programs that have not been adapted complaining or simply crashing on them. One could argue that a font is just a resource and that one only has to pass it -along but that's not what works well in practice. Take \LUATEX. We can of course -load the font and apply axis vales so that we can process the document as we -normally do. But at some point we have to create a \PDF. We can simply embed the -\TRUETYPE\ files but no axis values are applied. This is because, even if we add -the relevant information, there is no way in current \PDF\ formats to deal with -it. For that, we should be able to pass all relevant axis|-|related information -as well as specify what values to use along these axes. And for \TRUETYPE\ fonts -this information is not part of the shape description so then we in fact need to -filter and pass more. An \OPENTYPE\ \POSTSCRIPT\ font is much cleaner because -there we have the information needed to transform the shape mostly in the glyph -description. There we only need to carry some extra information on how to apply -these so|-|called blend values. The region|/|axis model used there only demands -passing a relatively simple table (stripped down to what we need). But, as said -above, \type {cff2} is not backward-compatible so a viewer will (currently) -simply not show anything. - -Recalling how we load fonts, how does that translate with variable changes? If we +along, but that's not what works well in practice. Take \LUATEX. We can, of +course, load the font and apply axis vales, so that we can process the document +as we normally do. But, at some point, we have to create a \PDF\ file. We can +simply embed the \TRUETYPE\ files, but no axis values are applied. This is +because, even if we add the relevant information, there is no way in the current +\PDF\ formats to deal with it. For that, we should be able to pass all relevant +axis|-|related information as well as specify what values to use along these +axes. And, for \TRUETYPE\ fonts, this information is not part of the shape +description so then we need to filter and pass more. An \OPENTYPE\ \POSTSCRIPT\ +font is much cleaner, because there we have the information needed to transform +the shape mostly in the glyph description. There, we only need to carry some +extra information on how to apply these so|-|called blend values. The +region|/|axis model used there only demands passing a relatively simple table +(stripped down to what we need). But, as said above, \type {cff2} is not +backward-compatible, so a viewer will (currently) simply not show anything. + +Recalling how we load fonts, how does that change with variable changes? If we have two characters with glyphs that get transformed and that have a kern between them, the kern may or may not transform. So, when we choose values on an axis, -then not only glyph properties change but also relations. We no longer can share -positional information and scale afterwards because each instance can have +then not only glyph properties change but also relations. We can no longer share +positional information and scale afterwards, because each instance can have different values to start with. We could carry all that information around and -apply it at runtime but because we're typesetting documents with a static design +apply it at runtime, but, because we're typesetting documents with a static design, it's more convenient to just apply it once and create an instance. We can use the -same caching as mentioned before but each chosen instance (provided by the font +same caching as mentioned before, but each chosen instance (provided by the font or made up by user specifications) is kept in the cache. As a consequence, using a variable font has no overhead, apart from initial caching. So, having dealt with that, how do we proceed? Processing a font is not different from what we already had. However, I would not be surprised if users are not -always satisfied with, for instance, kerning, because in such fonts a lot of care -has to be given to this by the designer. Of course I can imagine that programs +always satisfied with, for instance, kerning, because in such fonts, a lot of care +has to be given to this by the designer. Of course, I can imagine that programs used to create fonts deal with this, but even then, there is a visual aspect to -it too. The good news is that in \CONTEXT\ we can manipulate features so in -theory one can create a so|-|called font goodie file for a specific instance. +it, too. The good news is that in \CONTEXT\ we can manipulate features, so, in +theory, one can create a so|-|called font goodie file for a specific instance. \stopsubject \startsubject[title=Shapes] -For \OPENTYPE\ \POSTSCRIPT\ shapes we always have to do a dummy rendering in -order to get the right bounding box information. For \TRUETYPE\ this information +For \OPENTYPE\ \POSTSCRIPT\ shapes, we always have to do a dummy rendering in +order to get the right bounding box information. For \TRUETYPE, this information is already present but not when we use a variable instance, so I had to do a bit -of coding for that. Here we face a problem. For \TEX\ we need the width, height +of coding for that. Here we face a problem. For \TEX, we need the width, height and depth of a glyph. Consider the following case: \startlinecorrection @@ -259,16 +260,16 @@ draw boundingbox currentpicture The shape has a bounding box that fits the shape. However, its left corner is not at the origin. So, when we calculate a tight bounding box, we cannot use it for actually positioning the glyph. We do use it (for horizontal scripts) to get the -height and depth but for the width we depend on an explicit value. In \OPENTYPE\ -\POSTSCRIPT\ we have the width available and how the shape is positioned relative -to the origin doesn't much matter. In a \TRUETYPE\ shape a bounding box is part -of the specification, as is the width, but for a variable font one has to use -so-called phantom points to recalculate the width and the test fonts I had were +height and depth, but for the width, we depend on an explicit value. In \OPENTYPE\ +\POSTSCRIPT, we have the width available, and how the shape is positioned relative +to the origin doesn't much matter. In a \TRUETYPE\ shape, a bounding box is part +of the specification, as is the width, but for a variable font, one has to use +so|-|called phantom points to recalculate the width, and the test fonts I had were not suitable for investigating this. At any rate, once I could generate documents with typeset text using variable -fonts it became time to start thinking about a user interface. A variable font -can have predefined instances but of course a user also wants to mess with axis +fonts, it was time to start thinking about a user interface. A variable font +can have predefined instances, but, of course, a user also wants to mess with axis values. Take one of the test fonts: Adobe Variable Font Prototype. It has several instances: @@ -310,7 +311,7 @@ The Avenir Next variable demo font (currently) provides: \SampleFont {avenirnextvariable} {heavy condensed} {heavycondensed} \stoptabulate -Before we continue I will show a few examples of variable shapes. Here we use some +Before we continue, I will show a few examples of variable shapes. Here, we use some \METAFUN\ magic. Just take these definitions for granted. \startbuffer[a] @@ -357,15 +358,15 @@ Before we continue I will show a few examples of variable shapes. Here we use so \typebuffer[a,b,c,d] The results are shown in \in {figure} [fig:whatever:1]. What we see here is that -as long as we fill the shape everything will look as expected but using an -outline only won't. The crucial (control) points are moved to different locations -and as a result they can end up inside the shape. Giving up outlines is the price -we evidently need to pay. Of course this is not unique for variable fonts -although in practice static fonts behave better. To some extent we're back to +as long as we fill the shape everything will look as expected, but only using an +outline won't. The crucial (control) points are moved to different locations +and, as a result, they can end up inside the shape. Giving up outlines is the price +we evidently need to pay. Of course this is not unique for variable fonts, +although, in practice, static fonts behave better. To some extent, we're back to where we were with \METAFONT\ and (for instance) Computer Modern: because these originate in bitmaps (and probably use similar design logic) we also can have overlap and bits and pieces pasted together and no one will notice that. The -first outline variants of Computer Modern also had such artifacts while in the +first outline variants of Computer Modern also had such artifacts, while in the static Latin Modern successors, outlines were cleaned up. \startplacefigure[title=Four variants,reference=fig:whatever:1] @@ -377,9 +378,10 @@ static Latin Modern successors, outlines were cleaned up. \stopcombination \stopplacefigure -The fact that we need to preprocess an instance but only know how to do that when -we have gotten the information about axis values from the font means that the -font handler has to be adapted to keep caching correct. Another definition is: +The fact that we need to preprocess an instance, but that we only know how to do +that after we have retrieved information about axis values from the font means +that the font handler has to be adapted to keep caching correct. Another +definition is: \starttyping \definefontfeature @@ -392,9 +394,9 @@ font handler has to be adapted to keep caching correct. Another definition is: [name:adobevariablefontprototype*lightdefault] \stoptyping -Here the complication is that where normally features are dealt with after +Here, the complication is that where normally features are dealt with after loading, the axis feature is part of the preparation (and caching). If you want -the virtual font solution you can do this: +the virtual font solution, you can do this: \starttyping \definefontfeature @@ -408,12 +410,12 @@ the virtual font solution you can do this: [name:adobevariablefontprototype*inlinelightdefault] \stoptyping -When playing with these fonts it was hard to see if loading was done right. For -instance not all values make sense. It is beyond the scope of this article, but -axes like weight, width, contrast and italic values get applied differently to -so|-|called regions (subspaces). So say that we have an $x$ coordinate with value -$50$. This value can be adapted in, for instance, four subspaces (regions), so we -actually get: +When playing with these fonts, it was hard to see if loading was done right. For +instance, not all values make sense. It is beyond the scope of this article, but +axes like weight, width, contrast, and italic values get applied differently to +so|-|called regions (subspaces). So, say that we have an $x$ coordinate with the +value $50$. This value can be adapted in, for instance, four subspaces (regions), +so we actually get: \startformula x^\prime = x @@ -423,11 +425,11 @@ actually get: + s_4 \times x_4 \stopformula -The (here) four scale factors $s_n$ are determined by the axis value. Each axis +The (here four) scale factors $s_n$ are determined by the axis value. Each axis has some rules about how to map the values $230$ for weight and $50$ for contrast -to such a factor. And each region has its own translation from axis values to -these factors. The deltas $x_1,\dots,x_4$ are provided by the font. For a -\POSTSCRIPT|-|based font we find sequences like: +to such a factor. Each region has its own translation from axis values to these +factors. The deltas $x_1,\dots,x_4$ are provided by the font. In a +\POSTSCRIPT|-|based font, we find sequences like: \starttyping 1 <setvstore> @@ -437,10 +439,10 @@ these factors. The deltas $x_1,\dots,x_4$ are provided by the font. For a A store refers to a region specification. From there the factors are calculated using the chosen values on the axis. The deltas are part of the glyphs -specification. Officially there can be multiple region specifications, but how +specification. Officially, there can be multiple region specifications, but how likely it is that they will be used in real fonts is an open question. -For \TRUETYPE\ fonts the deltas are not in the glyph specification but in a +In \TRUETYPE\ fonts, the deltas are not in the glyph specification but in a dedicated \type {gvar} table. \starttyping @@ -448,7 +450,7 @@ apply x deltas [10 -30 40 -60] to x 120 apply y deltas [30 -10 -30 20] to y 100 \stoptyping -Here the deltas come from tables outside the glyph specification and their +Here, the deltas come from tables outside the glyph specification and their application is triggered by a combination of axis values and regions. The following two examples use Avenir Next Variable and demonstrate that kerning @@ -488,19 +490,19 @@ is adapted to the variant. \startsubject[title=Embedding] -Once we're done typesetting and a \PDF\ file has to be created there are three +Once we're done typesetting and a \PDF\ file has to be created, there are three possible routes: \startitemize \startitem We can embed the shapes as \PDF\ images (inline literal) using virtual - font technology. We cannot use so|-|called xforms here because we want to + font technology. We cannot use so|-|called xforms here, because we want to support color selectively in text. \stopitem \startitem - We can wait till the \PDF\ format supports such fonts, which might happen - but even then we might be stuck for years with viewers getting there. Also - documents need to get printed, and when printer support might + We can wait till the \PDF\ format supports such fonts, which might + happen, but even then we might be stuck for years with viewers getting + there. Also, documents need to be printed, and when printer support might arrive is another unknown. \stopitem \startitem @@ -512,43 +514,43 @@ possible routes: Once I could interpret the right information in the font, the first route was the way to go. A side effect of having a converter for both outline types meant that it was trivial to create a virtual font at runtime. This option will stay in -\CONTEXT\ as pseudo|-|feature \type {variableshapes}. +\CONTEXT\ as a pseudo|-|feature \type {variableshapes}. -When trying to support variable fonts I tried to limit the impact on the backend +When trying to support variable fonts, I tried to limit the impact on the backend code. Also, processing features and such was not touched. The inclusion of the right shapes is done via a callback that requests the blob to be injected in the -\type {cff} or \type {glyf} table. When implementing this I actually found out -that the \LUATEX\ backend also does some juggling of charstrings, to serve the -purpose of inlining subroutines. In retrospect I could have learned a few tricks -faster by looking at that code but I never realized that it was there. Looking at -the code again, it strikes me that the whole inclusion could be done with \LUA\ -code and some day I will give that a try. +\type {cff} or \type {glyf} table. When implementing this, I actually found out +that the \LUATEX\ backend also does some juggling of charstrings to inline +subroutines. In retrospect, I could have learned a few tricks faster by looking +at that code, but I never realized that it was there. Looking at the code again, +it strikes me that the whole inclusion could be done with \LUA\ code, and, some +day, I will give that a try. \stopsubject \startsubject[title=Conclusion] -When I first heard about variable fonts I was confident that when they showed up -they could be supported. Of course a specimen was needed to prove this. A first -implementation demonstrates that indeed it's no big deal to let \CONTEXT\ with -\LUATEX\ handle such fonts. Of course we need to fill in some gaps which can be -done once we have complete fonts. And then of course users will demand more -control. In the meantime the helper script that deals with identifying fonts by -name has been extended and the relevant code has been added to the distribution. -At some point the \CONTEXT\ Garden will provide the \LUATEX\ binary that has the -callback. - -I end with a warning. On the one hand this technology looks promising but on the -other hand one can easily get lost. Probably most such fonts operate over a -well|-|defined domain of values but even then one should be aware of complex +When I first heard about variable fonts, I was confident that when they showed +up, they could be supported. Of course, a specimen was needed to prove this. A +first implementation demonstrates that, indeed, it's no big deal to let \CONTEXT\ +with \LUATEX\ handle such fonts. Of course, we need to fill in some gaps, which +can be done once we have complete fonts. And then, of course, users will demand +more control. In the meantime, the helper script that deals with identifying +fonts by name has been extended, and the relevant code has been added to the +distribution. At some point, the \CONTEXT\ Garden will provide the \LUATEX\ +binary that has the callback. + +I end on a warning note. On the one hand, this technology looks promising, but on +the other hand, one can easily get lost. Most such fonts probably operate over a +well|-|defined domain of values, but, even then, one should be aware of complex interactions with features like positioning or replacements. Not all combinations -can be tested. It's probably best to stick to fonts that have all the relevant +can be tested. It's probably best to stick with fonts that have all the relevant tables and don't depend on properties of a specific rendering technology. -Although support is now present in the core of \CONTEXT\ the official release -will happen at the \CONTEXT\ meeting in 2017. By then I hope to have tested more -fonts. Maybe the interface has also been extended by then because after all, -\TEX\ is about control. +Although support is now present in the core of \CONTEXT, the official release +will happen at the \CONTEXT\ meeting in 2017. By then, I hope to have tested more +fonts. Maybe the interface will also have been extended by then, because, after +all, \TEX\ is about control. \stopsubject diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/onandon/onandon.tex b/Master/texmf-dist/doc/context/sources/general/manuals/onandon/onandon.tex index 60b626a5e9e..00b01f9aebd 100644 --- a/Master/texmf-dist/doc/context/sources/general/manuals/onandon/onandon.tex +++ b/Master/texmf-dist/doc/context/sources/general/manuals/onandon/onandon.tex @@ -35,22 +35,18 @@ \startbodymatter \component onandon-decade \component onandon-ffi - % \startchapter[title=Variable fonts] First published in user group magazines. \stopchapter - \component onandon-variable + \component onandon-variable % first published in user group magazines \component onandon-emoji - \startchapter[title={Children of \TEX}] First published in user group magazines. \stopchapter - % \component onandon-children \component onandon-performance \component onandon-editing - \startchapter[title={Advertising \TEX}] First published in user group magazines. \stopchapter - % \component onandon-perception - \startchapter[title={Tricky fences}] First published in user group magazines. \stopchapter - % \component onandon-fences - % \component onandon-media - \startchapter[title={From 5.2 to 5.3}] Maybe first published in user group magazines. \stopchapter - % \component onandon-53 - \startchapter[title={Executing \TEX}] Maybe first published in user group magazines. \stopchapter - % \component onandon-execute + \component onandon-fences % first published in user group magazines + \component onandon-media + \component onandon-53 % first published in user group magazines + \component onandon-execute % first published in user group magazines + \component onandon-modern + \component onandon-expansion + \component onandon-runtoks + \component onandon-110 \stopbodymatter \stopproduct diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/rules/rules-mkiv.tex b/Master/texmf-dist/doc/context/sources/general/manuals/rules/rules-mkiv.tex index 00ab49c25c6..e1d8acfaac6 100644 --- a/Master/texmf-dist/doc/context/sources/general/manuals/rules/rules-mkiv.tex +++ b/Master/texmf-dist/doc/context/sources/general/manuals/rules/rules-mkiv.tex @@ -257,8 +257,10 @@ A running box is seen as text. As you (probably) expect, a nested ornamental rule is supported as well: \startbuffer -\underbars{We see this \high{\tfxx\underdot{®}} symbol \runninghbox to 1cm{\hss} often.} -\underbar {We see this \high{\tfxx\underdot{®}} symbol \runninghbox to 1cm{\hss} often.} +\underbars + {We see this \high{\tfxx\underdot{®}} symbol \runninghbox to 1cm{\hss} often.} +\underbar + {We see this \high{\tfxx\underdot{®}} symbol \runninghbox to 1cm{\hss} often.} \stopbuffer \typebuffer @@ -269,6 +271,30 @@ This time we get (you might need a magnifier to see it): \getbuffer \stoplines +We end this section with an extreme example: + +\startbuffer +\definebar + [xbarone] + [text=\lower\exheight\hbox{\darkred \infofont +}, + repeat=yes] +\definebar + [xbartwo] + [text=\lower\exheight\hbox{\darkblue\infofont +}, + repeat=yes, + continue=yes] +\stopbuffer + +\typebuffer \getbuffer + +\startbuffer +Klein : \xbarone{\samplefile {klein}\removeunwantedspaces}\par +Sapolsky : \xbartwo{\samplefile{sapolsky}\removeunwantedspaces}\par +\stopbuffer + +\typebuffer \getbuffer + + \stopsubject \startsubject[title=Shifting] @@ -307,7 +333,7 @@ or: \inlinebuffer\ Here we just shift words but you can shift more than that although I haven't yet seen a useful example of that: \startbuffer -We can \shiftup {\input{tufte}} whole paragraphs if we really want. +We can \shiftup {\samplefile{tufte}} whole paragraphs if we really want. \stopbuffer \typebuffer @@ -352,7 +378,7 @@ an educational feature. color=darkred] \startlinefiller[filler-1] - \input ward + \samplefile{ward} \stoplinefiller \stopbuffer @@ -366,7 +392,7 @@ paragraph. \startbuffer \startalign[flushleft,broad] \startlinefiller[filler-1] - \input ward + \samplefile{ward} \stoplinefiller \stopalign \stopbuffer @@ -386,7 +412,7 @@ a \type {middle} alignment: \startbuffer \startalign[middle] \startlinefiller[filler-1] - \input ward + \samplefile{ward} \stoplinefiller \stopalign \stopbuffer @@ -397,7 +423,7 @@ a \type {middle} alignment: \startalign[middle] \startnarrower \startlinefiller[filler-1] - \input ward + \samplefile{ward} \stoplinefiller \stopnarrower \stopalign @@ -418,7 +444,7 @@ to the margins if you like: \startalign[middle] \startnarrower \startlinefiller[filler-1][scope=global] - \input ward + \samplefile{ward} \stoplinefiller \stopnarrower \stopalign @@ -436,7 +462,7 @@ You can also use a \type {left} or \type {right} scope, as in: \startalign[middle] \startnarrower \startlinefiller[filler-1][scope=right] - \input ward + \samplefile{ward} \stoplinefiller \stopnarrower \stopalign @@ -452,7 +478,7 @@ Only the right rules extend into the margins. \startalign[middle] \startnarrower \startlinefiller[filler-1][scope=right,location=right] - \input ward + \samplefile{ward} \stoplinefiller \stopnarrower \stopalign @@ -498,7 +524,7 @@ The previous example now looks like: \startalign[middle,r2l] \startnarrower[4*middle] \startlinefiller[filler-1] [scope=global] - \input ward + \samplefile{ward} \stoplinefiller \stopnarrower \stopalign @@ -534,7 +560,7 @@ use a latin script example for convenience. % \startlinefiller[filler-1] [scope=global,align=middle] % \parindent 100pt % \parfillskip 100pt -% \input ward +% \samplefile{ward} % \stoplinefiller % \stopnarrower % \stopbuffer @@ -544,7 +570,7 @@ use a latin script example for convenience. \startlinefiller[filler-1] [scope=global,align={middle,r2l}] \parindent 100pt \parfillskip 100pt - \input ward + \samplefile{ward} \stoplinefiller \stopnarrower \stopbuffer @@ -623,9 +649,11 @@ primitive here, so we follow the \TEX\ conventions of keywords. \startbuffer \ruledhbox\bgroup - \darkgray \frule width 100mm height 10mm depth 8mm radius 2mm line 2pt type fill\relax + \darkgray + \frule width 100mm height 10mm depth 8mm radius 2mm line 2pt type fill\relax \hskip-100mm - \darkred \frule width 100mm height 10mm depth 8mm radius 2mm line 2pt\relax + \darkred + \frule width 100mm height 10mm depth 8mm radius 2mm line 2pt\relax \hskip-100mm \hbox to 100mm{\white \bold \hfill some handy word with frames\hfill}% \egroup @@ -784,11 +812,11 @@ command: \stoptyping \startbuffer -\input ward \hiddenbar {\color[red]{invisible}} -\input ward \hiddenbar {\quad\color[red]{invisible}\quad} -\input ward \hiddenbar{\quad\quad\quad\color[red]{invisible}\quad\quad\quad} -\input ward \hiddenbar {\color[red]{invisible}\quad\quad\quad\quad\quad\quad} -\input ward +\samplefile{ward}\hiddenbar {\color[red]{invisible}} +\samplefile{ward}\hiddenbar {\quad\color[red]{invisible}\quad} +\samplefile{ward}\hiddenbar{\quad\quad\quad\color[red]{invisible}\quad\quad\quad} +\samplefile{ward}\hiddenbar {\color[red]{invisible}\quad\quad\quad\quad\quad\quad} +\samplefile{ward} \stopbuffer \getbuffer @@ -807,8 +835,8 @@ the spacing is kept at a linebreak: [left={\quads[3]}, right={\quads[3]}] -\widehiddenbar{invisible} \input weisman -\widehiddenbar{invisible} \input weisman +\widehiddenbar{invisible} \samplefile{weisman} +\widehiddenbar{invisible} \samplefile{weisman} \widehiddenbar{invisible} \stopbuffer @@ -818,4 +846,164 @@ the spacing is kept at a linebreak: \stopsubject +\startsubject[title=Tabulate] + +The previously discussed mechanism is also available in the tabulate mechanism. +We start with simple backgrounds: + +\startbuffer +\starttabulate + \NL[darkred] foo \NC bar \NC \NR + \NL[darkgreen] foo \NC bar \NC \NR + \NL[darkblue] foo \NC \samplefile{tufte} \NC \NR + \NL[darkgray] foo \NC bar \NC \NR + \NL[darkyellow] foo \NC bar \NC \NR + \LL +\stoptabulate +\stopbuffer + +\typebuffer + +This comes out as: + +{\startcolor[white]\getbuffer\stopcolor} + +There are several two character commands that deal with this: + +\starttabulate[|c|c|l|] +\BC command \BC related to \BC effect \NC +\NC \type{\NL} \NC \type{\NC} \NC Normal with Line \NC \NR +\NC \type{\ND} \NC \type{\NC} \NC Normal with Default Line \NC \NR +\NC \type{\LB} \NC \type{\BC} \NC Bold with Line \NC \NR +\NC \type{\DB} \NC \type{\BC} \NC Bold with Default Line \NC \NR +\NC \type{\NF} \NC \type{\NC} \NC Normal with Filler \NC \NR +\NC \type{\NP} \NC \type{\NC} \NC Normal with Predefined Filler \NC \NR +\NC \type{\FB} \NC \type{\BC} \NC Bold with Filler \NC \NR +\NC \type{\NA} \NC \type{\NC} \NC Normal with Auto Toggled Line \NC \NR +\NC \type{\BA} \NC \type{\BC} \NC Bold with Auto Toggled Line \NC \NR +\stoptabulate + +Before we show more, we set up tabulate: + +\startbuffer[setup] +\setuptabulate + [blank={small,samepage}, + headstyle=bold, + rulecolor=darkred, + rulethickness=1pt, + background=foo, + backgroundcolor=darkred, + foregroundcolor=white] +\stopbuffer + +\typebuffer[setup] + +This time we don't set colors in the table itself: + +\startbuffer +\starttabulate[|l|l|] + \DB foos \BC bars \BC \NR + \TB + \NC foo foo foo \NC bar bar bar bar \NC \NR + \NC foo foo foo \NC bar bar bar bar \NC \NR + \NC foo foo foo \NC bar bar bar bar \NC \NR + \NC foo foo foo \NC bar bar bar bar \NC \NR + \LL +\stoptabulate +\stopbuffer + +\typebuffer {\getbuffer[setup]\getbuffer} + +Instead of coming up with a separate mechanism for hooking in \METAPOST\ we +use the linefiller mechanism. We use these graphics: + +\startbuffer +\startuseMPgraphic{foo} + fill unitsquare + xyscaled (RuleWidth,RuleHeight+RuleDepth) enlarged (ExHeight/4,ExHeight/8) + shifted (-ExHeight/8,ExHeight/16) + withcolor RuleColor ; +\stopuseMPgraphic + +\startuseMPgraphic{bar} + fill unitsquare + xyscaled (RuleWidth,RuleHeight+RuleDepth) enlarged (ExHeight/4,ExHeight/8) + shifted (-ExHeight/8,ExHeight/16) + randomized ExHeight + withcolor RuleColor ; +\stopuseMPgraphic +\stopbuffer + +\typebuffer \getbuffer + +With these fillers: + +\startbuffer +\definelinefiller[foo][mp=foo,color=darkgreen] +\definelinefiller[bar][mp=bar,color=darkred] +\stopbuffer + +\typebuffer \getbuffer + +An example of usage is: + +\startbuffer +\linefillerhbox[foo] to 12cm{\hss\strut\white\bf FOO\hss} \blank +\linefillerhbox[bar] to 10cm{\hss\strut\white\bf BAR\hss} \blank +\linefillerhbox[foo] to 9cm{\hss\strut\white\bf FOO\hss} \blank +\linefillerhbox[bar] to 14cm{\hss\strut\white\bf BAR\hss} +\stopbuffer + +\typebuffer \blank \getbuffer \blank + +We can rely on the default: + +\startbuffer +\starttabulate[|||] + \PB foo \BC bars \BC \NR + \NC foo foo foo \NC bar bar bar bar \NC \NR + \NC foo foo foo \NC bar bar bar bar \NC \NR + \NC foo foo foo \NC bar bar bar bar \NC \NR + \NC foo foo foo \NC bar bar bar bar \NC \NR +\stoptabulate +\stopbuffer + +\typebuffer \start\getbuffer[setup]\getbuffer\stop + +or be explicit: + +\startbuffer +\starttabulate[|||] + \FB[bar] foos \BC bars \BC \NR + \NC foo foo foo \NC bar bar bar bar \NC \NR + \NC foo foo foo \NC bar bar bar bar \NC \NR + \NC foo foo foo \NC bar bar bar bar \NC \NR + \NC foo foo foo \NC bar bar bar bar \NC \NR +\stoptabulate +\stopbuffer + +\typebuffer \start\getbuffer[setup]\getbuffer\stop + +The auto variants will switch between colors: + +\startbuffer +\setuptabulate[backgroundcolor:1=darkred] +\setuptabulate[backgroundcolor:2=darkgreen] +\setuptabulate[backgroundcolor:3=darkblue] + +\starttabulate[|||] + \BA foo foo foo \BC bar bar bar bar \NC \NR + \BA foo foo foo \BC bar bar bar bar \NC \NR + \BA foo foo foo \BC bar bar bar bar \NC \NR + \BA foo foo foo \BC bar bar bar bar \NC \NR + \BA foo foo foo \BC bar bar bar bar \NC \NR + \BA foo foo foo \BC bar bar bar bar \NC \NR +\stoptabulate +\stopbuffer + +\typebuffer \start\getbuffer[setup]\getbuffer\stop + + +\stopsubject + \stopdocument diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/texit/texit-conditions.tex b/Master/texmf-dist/doc/context/sources/general/manuals/texit/texit-conditions.tex new file mode 100644 index 00000000000..83fb0a7bcb5 --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/texit/texit-conditions.tex @@ -0,0 +1,108 @@ +\environment texit-style + +\startcomponent texit-conditions + +\startchapter[title={Conditions}] + +In case you wonder why we have modes in \CONTEXT, here is an example that might +convince you. The \TEX\ language has conditionals and they are in fact quite +efficient, take for instance: + +\startTEX +\ifnum\scratchcounter>10 + \ifdim\scratchdimen>10pt + one + \else + two + \fi +\else + three +\fi +\stopTEX + +When the first test fails, \TEX\ will do a fast scan over the following tokens +and expand the \type {three} branch. In order to do such a fast scan, the nested +condition needs to be properly balanced: the \type {\else} is optional but the +nested \type {\fi} definitely isn't. Now imagine that you use a few pseudo +booleans, like: + +\startTEX +\newif\ifalpha \alphatrue +\newif\ifbeta \betatrue +\stopTEX + +And you need it in: + +\startTEX +\ifalpha + \ifbeta + YES + \else + NOP + \fi +\else + NOP +\fi +\stopTEX + +This happens occasionally in real applications and one can either repeat the +\type {NOP} or wrap it in a macro in order to save tokens. However, way more +natural would be something like this: + +\startTEX +\ifalphaorbeta + YES +\else + NOP +\fi +\stopTEX + +This basically would introduce a new kind concept: an expandable macro flagged as +\type {\if} kind of token. I actually experimented with that in \LUATEX\ but +rejected it eventually. Instead \type {\ifcondition} was introduced. This is +basically equivalent to \type {\iffalse} when \TEX\ is in fast \type {\if*} +skipping mode, but when a real test happens the next argument is expanded. That +macro is expected to end up as something equivalent to \type {\iftrue} or \type +{\iffalse} so that other the nexct branch or the \type {\else} is entered. Here +is an example: + +\startTEX +\ifcondition\alphaorbeta + YES +\else + NOP +\fi +\stopTEX + +There are several ways to define \type {\alphaorbeta} now and we show a few here. +It's up to you to figure out which ons is the most efficient. + +\startTEX +\def\alphaorbeta{\ifcase0\ifalpha \else\ifbeta \else1\fi\fi\relax} +\def\alphaorbeta{\ifcase \ifalpha0\else\ifbeta0\else1\fi\fi\relax} +\def\alphaorbeta{\ifnum1=\ifalpha1\else\ifbeta1\else0\fi\fi\relax} +\def\alphaorbeta{\ifnum 0\ifalpha1\fi \ifbeta1\fi >1\relax} +\stopTEX + +Now, do we expect users to come up with such constructs? Of course not. Even in +\CONTEXT\ we don't really need them, although there are a few places where they +can be used. In \CONTEXT\ you would just do this: + +\startTEX +\enablemode[alpha] +\enablemode[beta] + +\doifelsemode {alpha,beta} { + YES +} { + NOP +} +\stopTEX + +Of course such a verbose macro is less efficient but even if you use this test +10.000 times in a run it will not take more than 0.06 seconds on a decent 2013 +laptop. + +\stopchapter + +\stopcomponent diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/texit/texit-contents.tex b/Master/texmf-dist/doc/context/sources/general/manuals/texit/texit-contents.tex new file mode 100644 index 00000000000..f22db1c3835 --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/texit/texit-contents.tex @@ -0,0 +1,9 @@ +\environment texit-style + +\startcomponent texit-contents + +\starttitle[title={Contents}] + \placelist[chapter] +\stoptitle + +\stopcomponent diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/texit/texit-introduction.tex b/Master/texmf-dist/doc/context/sources/general/manuals/texit/texit-introduction.tex new file mode 100644 index 00000000000..cfe87c25a39 --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/texit/texit-introduction.tex @@ -0,0 +1,24 @@ +\environment texit-style + +\startcomponent texit-introduction + +\startchapter[title={Introduction}] + +I needed a place to collect examples of \TEX\ coding and this is it. The examples +presented here are an unorganized bunch. Some originate in questions asked on the +mailing list. Others are byproducts of tests made when playing with some (new) +functionality. When you plan to use \TEX\ for a long time, it doesn't hurt to see +a bit of \TEX\ coding but when possible I will also show the \CONTEXT\ way. + +I hope that this document is useful. You can of course always try to challenge me +for more examples. Hopefully I will not forget about this document and extend it +occasionaly. + +\startlines +Hans Hagen +Hasselt NL +\stoplines + +\stopchapter + +\stopcomponent diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/texit/texit-leaders.tex b/Master/texmf-dist/doc/context/sources/general/manuals/texit/texit-leaders.tex new file mode 100644 index 00000000000..ed224da5ce0 --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/texit/texit-leaders.tex @@ -0,0 +1,247 @@ +\environment texit-style + +\startcomponent texit-leaders + +\startchapter[title={Leaders}] + +The following example comes from a question on the \CONTEXT\ list. It +exhibits a few low level tricks. For the purpose of this example we +use \type {\ruledhbox} instead of \type {\hbox}. We start with a simple +command that puts something at the end of a line: + +\startbuffer +\starttexdefinition MyFill #1 + \removeunwantedspaces + \hfill + \ruledhbox{#1} +\stoptexdefinition +\stopbuffer + +\typebuffer[option=TEX] \getbuffer + +We use this in: + +\startbuffer[sample] +\startitemize[packed,joinedup][rightmargin=3em] + \startitem + \samplefile{ward}\MyFill{DW} + \stopitem +\stopitemize +\stopbuffer + +\typebuffer[sample][option=TEX] + +and get: + +\getbuffer[sample] + +But, the requirement was that we move the number towards the right margin, so +instead we need something: + +\startbuffer +\starttexdefinition MyFill #1 + \removeunwantedspaces + \hfill + \rlap{\ruledhbox to \rightskip{\hss#1}} +\stoptexdefinition +\stopbuffer + +\typebuffer[option=TEX] \getbuffer + +This already looks more like it: + +\getbuffer[sample] + +But also part of the requirements was that there should be dots between the end +of the last sentence and the number. In low level \TEX\ speak that means using +leaders: repeated boxed content where the repitition is driven by a glue +specification. Let's naively use leaders now: + +\startbuffer +\starttexdefinition MyFill #1 + \leaders + \ruledhbox to 1em{\hss.\hss} + \hfill + \ruledhbox{#1} +\stoptexdefinition +\stopbuffer + +\typebuffer[option=TEX] \getbuffer + +Let's see what we get: + +\getbuffer[sample] + +Again we need to move the number to the right. This time we need a different +solution because we need to fill the space in between. When \TEX\ ends a +paragraph it adds \type {\parfillskip} so we will now manipulate that parameter. + +\startbuffer +\starttexdefinition MyFill #1 + \parfillskip-1\rightskip plus 1fil\relax + \leaders + \ruledhbox to 1em{\hss.\hss} + \hfill + \ruledhbox{#1} +\stoptexdefinition +\stopbuffer + +\typebuffer[option=TEX] \getbuffer + +Does it look better? + +\getbuffer[sample] + +Indeed it does, but watch this: + +\startbuffer[sample] +\startitemize[packed,joinedup][rightmargin=8.5em] + \startitem + \samplefile{ward}\MyFill{DW}\par + \samplefile{ward}\par + \samplefile{ward}\MyFill{DW} + \stopitem +\stopitemize +\stopbuffer + +\typebuffer[sample][option=TEX] + +The first \type {\MyFill} will set the \type {\parfillskip} to a value that will +also be used later on. + +\getbuffer[sample] + +The way out is the following + +\startbuffer +\starttexdefinition MyFill #1 + \begingroup + \parfillskip-1\rightskip plus 1fil\relax + \leaders + \ruledhbox to 1em{\hss.\hss} + \hfill + \ruledhbox{#1} + \par + \endgroup +\stoptexdefinition +\stopbuffer + +\typebuffer[option=TEX] \getbuffer + +This looks more or less okay. The \type {\par} keeps the adaption local but for +it to work well, the \type {\par} must be inside the group. + +\getbuffer[sample] + +Now it's time to go for perfection! First of all, we get rid of any leading +spacing. If we need some we should inject it after a cleanup. We also use a +different leader command. Instead of \type {to} we use a \type {spread} so that +we get half the emwidth and not something slightly less due to the width of the +period. + +\startbuffer +\starttexdefinition MyFill #1 + \removeunwantedspaces + \begingroup + \parfillskip-1\rightskip plus 1fil\relax + \cleaders + \ruledhbox spread 1em{\hss.\hss} + \hfill + \ruledhbox{#1} + \par + \endgroup +\stoptexdefinition +\stopbuffer + +\typebuffer[option=TEX] \getbuffer + +So, we end up here: + +\startbuffer[sample] +\startitemize[packed,joinedup][rightmargin=5em] + \startitem + \samplefile{sapolsky}\MyFill{RS}\par + \stopitem +\stopitemize +\stopbuffer + +\getbuffer[sample] + +For which we used this: + +\typebuffer[sample][option=TEX] + +Finally we get rid of the tracing: + +\startbuffer +\starttexdefinition unexpanded MyFill #1 + \begingroup + \parfillskip-1\rightskip plus 1fil\relax + \leaders + \hbox to \emwidth{\hss.\hss} + \hfill + \hbox{#1} + \par + \endgroup +\stoptexdefinition +\stopbuffer + +\typebuffer[option=TEX] \getbuffer + +Watch a few more details. It brings us to: + +\getbuffer[sample] + +\page + +\startbuffer +\definefiller + [MyFiller] + [offset=.25\emwidth, + method=middle] + +\starttexdefinition unexpanded MyFill #1 + \begingroup + \parfillskip-1\rightskip plus 1fil\relax + \filler[MyFiller]% + \hbox{#1} + \par + \endgroup +\stoptexdefinition +\stopbuffer + +\typebuffer[option=TEX] \getbuffer + +\getbuffer[sample] + +When writing these examples I realized that it's rather trivial to add this +option to the already existing filler mechanism. The definition of such a filler +looks like this: + +\startbuffer +\definefiller + [MyFiller] + [offset=.25\emwidth, + rightmargindistance=-\rightskip, + method=middle] +\stopbuffer + +\typebuffer[option=TEX] \getbuffer + +\startbuffer[sample] +\startitemize[packed,joinedup][rightmargin=5em] + \startitem + \samplefile{sapolsky}\fillupto[MyFiller]{RS} + \stopitem +\stopitemize +\stopbuffer + +The sample code now becomes: + +\typebuffer[sample][option=TEX] + +Ans as expected renders as: + +\getbuffer[sample] + +\stopcomponent diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/texit/texit-lookahead.tex b/Master/texmf-dist/doc/context/sources/general/manuals/texit/texit-lookahead.tex new file mode 100644 index 00000000000..03eaecabe4b --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/texit/texit-lookahead.tex @@ -0,0 +1,387 @@ +\environment texit-style + +\startcomponent texit-lookahead + +\startchapter[title={Lookahead}] + +When you look at the \TEX\ source of a macro package, your can often see +constructs like this: + +\startTEX +\def\foo#1% + {We do something with "#1".} +\stopTEX + +or maybe: + +\startTEX +\def\foo#1{% + We do something with "#1".% +} +\stopTEX + +Normally the percentage symbol is used to indicate a comment, but here +are no comments. In these cases, it makes the definition effectively + +\startTEX +\def\foo#1{do something with "#1"!} +\stopTEX + +which is different from when we would not have that percent sign there: + +\startTEX +\def\foo#1 {We do something with "#1"!} +\stopTEX + +That variant is valid \TEX\ code but expects a space as delimiter of the +argument to \type {\foo}. This means that you can say: + +\startTEX +\foo{1} \foo 2 \foo {34} and \foo 56 . +\stopTEX + +while this can trigger an error message (when no space is seen at some point) or +at least give unexpected results. + +\startTEX +\foo{1}\foo 2\foo {34}and\foo 56. +\stopTEX + +A different use of the percent is seen in cases like this: + +\startTEX +\def\foo#1% + {We do something % + with "#1".} +\stopTEX + +This time we want to preserve the space after \type {something} because an +end|-|of|-|line would either or not collapse it with \type {with} depending on +how the endofline character is set up. Normally + +\startTEX +\def\foo#1% + {We do something + with "#1".} +\stopTEX + +will also add a space after something but when \TEX\ is set up to ignore lines +you get a collapse. So the explicit space is a robust way out. Both cases of +using or omitting the comment symbol are easy to spot as they trigger an error +or result in weird typeset results. + +\startbuffer[defs] +\def\fooA#1% + {\ifnum#1>100 + yes\else nop% + \fi} + +\def\fooB#1{\ifnum#1>100 yes\else nop \fi} + +\def\fooC#1% + {\ifnum#1>100% + yes\else nop% + \fi} +\stopbuffer + +\typebuffer[defs][option=TEX] \getbuffer[defs] + +We test this with: + +\startbuffer[demo] +\fooA{100} \fooB{100} \fooC{100} +\fooA{101} \fooB{101} \fooC{101} +\stopbuffer + +\typebuffer[demo][option=TEX] + +And the result is probably what you expect: + +\startlines +\getbuffer[demo] +\stoplines + +\startbuffer[defs] +\def\fooA#1% + {\ifnum#1>100 + 1\else 0% + \fi} + +\def\fooB#1{\ifnum#1>100 1\else 0\fi} + +\def\fooC#1% + {\ifnum#1>100% + 1\else 0% + \fi} +\stopbuffer + +However, when we have the following macro body: + +\typebuffer[defs][option=TEX] \getbuffer[defs] + +We get this output. Do you see the issue? + +\startlines +\getbuffer[demo] +\stoplines + +A preferred way to catch this is the following as a \type {\relax} ends scanning +for a number: + +\startbuffer[defs] +\def\foo#1% + {\ifnum#1>100\relax + 1\else 0% + \fi} +\stopbuffer + +\typebuffer[defs][option=TEX] \getbuffer[defs] + +However, watch what happens here: + +\startbuffer[demo] +\edef\result{\foo{123}} +\stopbuffer + +\typebuffer[demo][option=TEX] \getbuffer[demo] + +The \type {\result} macro has the following body: + +\expanded{\setbuffer[result]\meaning\result\endbuffer} + +\typebuffer[result][option=TEX] + +A neat trick out of this is the following: + +\startbuffer[defs] +\def\foo#1% + {\ifnum#1>\numexpr100\relax + 1\else 0% + \fi} +\stopbuffer + +\typebuffer[defs][option=TEX] \getbuffer[defs] + +\getbuffer[demo] + +Now the body of \type {\result} looks like this: + +\expanded{\setbuffer[result]\meaning\result\endbuffer} + +\typebuffer[result][option=TEX] + +Of course this also works: + +\startTEX +\def\foo#1% + {\ifnum#1>100 % + 1\else 0% + \fi} +\stopTEX + +as a space also delimits scanning the number. But that method can actually introduce +that space in the output. Think of this definition: + +\startbuffer[defs] +\def\foo#1#2% + {\ifnum#1>#2 % + 1\else 0% + \fi} +\stopbuffer + +\typebuffer[defs][option=TEX] \getbuffer[defs] + +What if \type {#2} has a trailing space? What if it is a verbose number? What if +it is a counter variable? + +\startbuffer[demo] +\scratchcounter=100 + [\foo{101}{100}] [\foo{101}{100 }] [\foo{101}\scratchcounter] +\scratchcounter=101 + [\foo{100}{101}] [\foo{100}{101 }] [\foo{100}\scratchcounter] +\stopbuffer + +\typebuffer[demo][option=TEX] + +\startlines +\getbuffer[demo] +\stoplines + +If you really want to introduce an unpredictable situation, use a coding style like +this: + +\startTEX +\def\foo#1#2#3#4{\if#1=#2#3\else#4\fi} +\stopTEX + +This is not that imaginary as you often see users play safe and do things like this: + +\startTEX +\ifnum\scratchcounterone=\scratchcountertwo% + ... +\else + ... +\fi +\stopTEX + +Here the percent sign is useless as the number scanner already got the number, +just try: + +\startTEX +\scratchcounterone=1 +\scratchcountertwo=1 + +\ifnum\scratchcounterone=\scratchcountertwo + yes +\else + nop +\fi +\stopTEX + +A previous one liner formatted like this really is not better! + +\startTEX +\def\foo#1#2#3#4% + {\ifnum#1=#2% + #3% + \else + #4% + \fi} +\stopTEX + +When you define macros more often than not you don't want unexpected spaces (aka spurious spaces) +which is why in \CONTEXT\ for instance setups ignores lines: + +\startbuffer[defs] +\startsetups foo + here + we ignore + spaces at the end + of a line +\stopsetups +\stopbuffer + +\typebuffer[defs][option=TEX] \getbuffer[defs] + +so we get: \quotation {\directsetup{foo}} which means that the normally few times +that we {\em do} want spaces we need to be explicit: + +\startbuffer[defs] +\startsetups foo + here\space + we ignore\space + spaces at the end\space + of a line\space +\stopsetups +\stopbuffer + +\typebuffer[defs][option=TEX] \getbuffer[defs] + +Now we're okay: \quotation {\directsetup{foo}}. The same is true for: + +\startTEX +\starttexdefinition foo + here\space + we ignore\space + spaces at the end\space + of a line\space +\stoptexdefinition +\stopTEX + +There are more cases where \TEX\ will look further. Take for example skip (glue) +scanning. A glue specification can have \type {plus} and \type {minus} fields. + +\startbuffer[defs] +\scratchdimenone=10pt +\scratchskipone =10pt plus 10pt minus 10pt +\scratchskiptwo =0pt +\stopbuffer + +\typebuffer[defs][option=TEX] + +Now take the following test: + +\startbuffer[demo] +{1 \scratchskiptwo 10pt plus 10pt \relax\the\scratchskiptwo} +{2 \scratchskiptwo \scratchdimenone plus 10pt \relax\the\scratchskiptwo} +{3 \scratchskiptwo 1\scratchdimenone plus 10pt \relax\the\scratchskiptwo} +{4 \scratchskiptwo \scratchskipone plus 10pt \relax\the\scratchskiptwo} +{5 \scratchskiptwo 1\scratchskipone plus 10pt \relax\the\scratchskiptwo} +\stopbuffer + +\typebuffer[demo][option=TEX] + +\startlines +\inlinebuffer[defs]\getbuffer[demo] +\stoplines + +If you wonder what the second \type {\relax} does, here is a variant: + +\startlines +{1 \scratchskiptwo 10pt plus 10pt \the\scratchskiptwo} +{2 \scratchskiptwo \scratchdimenone plus 10pt \the\scratchskiptwo} +{3 \scratchskiptwo 1\scratchdimenone plus 10pt \the\scratchskiptwo} +{4 \scratchskiptwo \scratchskipone plus 10pt \the\scratchskiptwo} +{5 \scratchskiptwo 1\scratchskipone plus 10pt \the\scratchskiptwo} +\stoplines + +\typebuffer[demo][option=TEX] + +\startlines +\inlinebuffer[defs]\getbuffer[demo] +\stoplines + +In this second variant \TEX\ happily keep looking for a glue specification when +it sees the \type {\the} so it serializes \type {\scratchskiptwo}. But as it sees +\type {0pt} then, it stops scanning the glue spec. What we get typeset is the old +value, not the new one! If you want to prevent this you need to \type {\relax}. + +Another case where \TEX\ keeps scanning is the following: + +\startbuffer[demo] +\vrule width 40pt height 2pt depth 5pt \quad +\vrule width 40pt height 20pt depth 5pt height 10pt \quad +\vrule width 40pt height 10pt height 20pt \quad +\vrule width 40pt height 20pt depth 5pt height 10pt width 80pt +\stopbuffer + +\typebuffer[demo][option=TEX] + +This gives the rules: + +\startlinecorrection \darkgray +\getbuffer[demo] +\stoplinecorrection + +So you can overload dimensions. The space before the \type {quad} is gobbled as +part of the look ahead for more keywords. + +Often rules (just like glue assignments) are wrapped in macro definitions where the +macro writer used \type {\relax} to look ahead. That way you prevent an error message +in cases like: + +\startTEX +\def\foo{\vrule width 40pt height 2pt} + +The \foo depth of this thought is amazing. +\stopTEX + +because \type {of} definitely is not a valid dimension. Even more subtle is: + +\startTEX +\def\foo{\hskip 10pt plus 1fil} + +The \foo fine points of typesetting can actually become a nightmare. +\stopTEX + +As \TEX\ will now see the \type {f} of \type {fine} as further specification and +think that you want \type {1fill}. + +So, the most important lesson of this chapter is that you need to be aware of the way +\TEX\ scans for quantities and specifications. In most cases the users can safely use +a \type {\relax} to prevent a lookahead. And try to avoid adding percent signs all +over the place. + +\stopchapter + +\stopcomponent diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/texit/texit-style.tex b/Master/texmf-dist/doc/context/sources/general/manuals/texit/texit-style.tex new file mode 100644 index 00000000000..bee526d7b14 --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/texit/texit-style.tex @@ -0,0 +1,52 @@ +\startenvironment texit-style + +\setupbodyfont + [dejavu,11pt] + +\setuphead + [chapter] + [style=\bfc, + header=empty, + color=darkgray] + +\setuplist + [chapter] + [alternative=c, + width=1.5em] + +\setuplayout + [width=middle, + height=middle, + topspace=15mm, + backspace=15mm, + header=15mm, + footer=0mm] + +\setupwhitespace + [big] + +\setupheadertexts + [] + +\setupheadertexts + [] + [{\getmarking[chapter]\quad\pagenumber}] + [{\pagenumber\quad\getmarking[chapter]}] + [] + +\setupheader + [style=\bf, + color=darkgray] + +% \setuptype +% [color=darkred] + +% \setuptyping +% [color=darkred] + +\setuppagenumbering + [alternative=doublesided] + +\usemodule[scite] + +\stopenvironment diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/texit/texit-titlepage.tex b/Master/texmf-dist/doc/context/sources/general/manuals/texit/texit-titlepage.tex new file mode 100644 index 00000000000..1b7cabe8441 --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/texit/texit-titlepage.tex @@ -0,0 +1,40 @@ +\environment texit-style + +\startcomponent texit-titlepage + +\startMPpage + +fill Page + withcolor "darkyellow" + withtransparency (1,.75) ; + +draw image ( + for i=1 upto 500 : + draw (((1,-2) ... origin ... (-1,2)) randomized 0.2) + scaled 1cm + rotated (0 randomized 15) + shifted (origin randomized (PaperWidth,PaperHeight)) + withpen pencircle yscaled 5mm xscaled 2mm rotated 45 + withcolor "darkmagenta" + withtransparency (1,.5) + ; + endfor ; +) shifted center Page ; + +clip currentpicture to Page ; + +draw textext.ulft ("\TeX it") ysized 5cm % indeed, no space after \TeX ! + shifted lrcorner Page shifted (-1cm,1cm) + withcolor white ; + +draw textext.llft ("Hans Hagen") ysized 1cm + rotated 90 + shifted urcorner Page shifted (-15mm,-1cm) + withcolor white ; + +\stopMPpage + +\startstandardmakeup[doublesided=no,page=no] +\stopstandardmakeup + +\stopcomponent diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/texit/texit.tex b/Master/texmf-dist/doc/context/sources/general/manuals/texit/texit.tex new file mode 100644 index 00000000000..b8b70c00d55 --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/texit/texit.tex @@ -0,0 +1,19 @@ +\environment texit-style + +\startdocument + +\component texit-titlepage + +\startfrontmatter + \component texit-contents + \component texit-introduction +\stopfrontmatter + +\startbodymatter + \component texit-lookahead + \component texit-conditions + \component texit-leaders + % \component texit-efficiency +\stopbodymatter + +\stopdocument diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/tools/tools-mkiv.tex b/Master/texmf-dist/doc/context/sources/general/manuals/tools/tools-mkiv.tex index 48142675627..949a878cad0 100644 --- a/Master/texmf-dist/doc/context/sources/general/manuals/tools/tools-mkiv.tex +++ b/Master/texmf-dist/doc/context/sources/general/manuals/tools/tools-mkiv.tex @@ -37,11 +37,17 @@ \setuptyping [color=DocumentColor] -\usetypescript - [iwona] +\setupalign + [tolerant,stretch] + +% \usetypescript +% [iwona] +% +% \setupbodyfont +% [iwona] \setupbodyfont - [iwona] + [ibmplex,rm] \setuphead [chapter] @@ -53,6 +59,11 @@ [style=\bfb, color=DocumentColor] +\setuphead + [sub section] + [style=\bfa, + color=DocumentColor] + \setupinteraction [hidden] @@ -130,7 +141,7 @@ This manual is work in progress. Feel free to submit additions or corrections. Before you start reading, it is good to know that in order to get starting with \CONTEXT, the easiest way to do that is to download the standalone distribution from \type {contextgarden.net}. After that you only need to make sure that \type -{luatex} is in your path. The main command you use is then \type {context} and +{luatex} is in your path. The main command you use is then \sCONTEXT\ and normally it does all the magic it needs itself. \stopsection @@ -488,9 +499,8 @@ You can copy \type {mtxrun.exe} to for instance \type {context.exe} and it will still use \sMTXRUN\ for locating the right script. It also takes care of mapping \sTEXMFSTART\ to \sMTXRUN. So we've removed the intermediate \type {cmd} step, can not run the script as any program, and most of all, we're as efficient as can -be. - -Of course this program is only meaningful for the \CONTEXT\ approach to tools. +be. Of course this program is only meaningful for the \CONTEXT\ approach to +tools. It may all sound more complex than it is but once it works users will not notice those details. Als, in practice not that much has changed in running the tools @@ -498,12 +508,909 @@ between \MKII\ and \MKIV\ as we've seen no reason to change the methods. \stopsection +\startsection[title={A detailed look at \sMTXRUN}] + +This section is derived from Taco Hoekwaters presentation and article for the +2018 \CONTEXT\ meeting. You might want to read this is you want to benefit from +even the most obscure features. There is a bit of repetition with the previous +sections but so be it. + +% macros specific to this section + +\def\highlighted#1#2#3% + {\testpage[4] + \dontleavehmode + \backgroundline[gray]{\tt\strut #1} + \nowhitespace + \startnarrower[left] + \veryraggedright + #2\par + \doifsomething{#3}{{\it #3}} + \stopnarrower} + +\definestartstop + [options] + [before=\blank, + after=\blank] + +\def\option#1#2% + {\highlighted{--#1}{#2}{}} + +\definestartstop + [mtxrunscripts] + [before=\blank,after=\blank] + +\def\mtxrunscriptv#1#2#3#4% + {\highlighted{#1}{#3}{#4}} + +\def\mtxrunscript#1#2#3% + {\highlighted{#1}{#3}{}} + +\definestartstop + [mtxrunenvironment] + [before=\blank, + after=\blank] + +\def\mtxrunenv#1#2% + {\highlighted{#1}{#2}{}} + +\startsubsection[title=Common flags] + +Much of the code inside \MKIV\ can alter its behaviour based on either \quote +{trackers} (which add debugging information to the terminal and log output) or +\quote {directives} or \quote {experiments} (for getting code to perform some +alternate behaviour). Since this also affects the \LUA\ code within \sMTXRUN\ +itself, it makes sense to list these options first. + +Getting information on trackers, directives and experiments. Trackers enable more +extensive status messages on the console or in \CONTEXT\ additional visual clues. +Directives change behaviour that are not part of the official interface and have +no corresponding commands. Experiments are like directives but not official +(yet). + +\startoptions + \option + {trackers} + {show (known) trackers} + \option + {directives} + {show (known) directives} + \option + {experiments} + {show (known) experiments} +\stopoptions + +Enabling directives, trackers and experiments: + +\startoptions + \option + {trackers=list} + {enable given trackers} + \option + {directives=list} + {enable given directives} + \option + {experiments=list} + {enable given experiments} +\stopoptions + +The next tree (hidden) options are converted into \quote {directives} entries, +that are then enabled. These are just syntactic sugar for the relevant directive. + +\startoptions + \option + {silent[=...]} + {sets \type {logs.blocked={\%s}}} + \option + {errors[=...]} + {sets \type {logs.errors={\%s}}} + \option + {noconsole} + {sets \type {logs.target=file}} +\stopoptions + +As you can see here, various directives (and even some trackers) have optional +arguments, which can make specifying such directives on the command line a bit of +a challenge. Explaining the details of all the directives is outside of the scope +of this article, but you can look them up in the \CONTEXT\ source by searching +for \typ {directives.register} and \typ {trackers.register}. + +In verbose mode, \sMTXRUN\ itself gives more messages, and it also \typ +{resolvers.locating}, which is a tracker itself: + +\startoptions + \option + {verbose} + {give a bit more info} +\stopoptions + +The \type {--timedlog} (hidden) option starts the \sMTXRUN\ output with a +timestamp line: + +\startoptions + \option + {timedlog} + {prepend output with a timestamp} +\stopoptions + +\stopsubsection + +\startsubsection[title=Setup for finding files and configurations] + +The next block of options deals with the setup of \sMTXRUN\ itself. You do not +need to deal with these options unless you are messing with the \CONTEXT\ +distribution yourself instead of relying on a prepackaged solution, or you need +to use kpathsea for some reason (typically in a \MKII\ environment). In +particular, \type {--progname} and \type {--tree} are often needed as well when +using the \type {kpse} options. + +\startoptions + \option + {configurations} + {show configuration order, alias \type {--show-configurations}} + \option + {resolve} + {resolve prefixed arguments, see \type {--prefixes}, below} +\stopoptions + +and: + +\startoptions + \option + {usekpse} + {use kpse as fallback (when no \MKIV\ and cache installed, often slower)} + \option + {forcekpse} + {force using kpse (handy when no \MKIV\ and cache installed but less + functionality)} + \option + {progname=str} + {format or backend} + \option + {tree=pathtotree} + {use given texmf tree (default file: \type {setuptex.tmf})} +\stopoptions + +\stopsubsection + +\startsubsection[title=Options for finding files and reporting configurations] + +Once the configuration setup is done, it makes sense to have a bunch or options +to use and|/|or query the configuration. + +\startoptions + \option + {locate} + {locate given filename in database (default) or system (uses the + sub||options \type {--first}, \type {--all} and \type {--detail})} + \option + {autogenerate} + {regenerate databases if needed (handy when used to run context in an + editor)} + \option + {generate} + {generate file database} + \option + {prefixes} + {show supported prefixes for file searches} + \option + {variables} + {show configuration variables (uses the sub||option \type {--pattern}, + and an alias is \type {--show-variables})} + \option + {expansions} + {show configuration variable expansion (uses the sub||options + \type{--pattern}, alias \type {--show-expansions})} + \option + {expand-braces} + {expand complex variable} + \option + {resolve-path} + {expand variable (completely resolve paths)} + \option + {expand-path} + {expand variable (resolve paths)} + \option + {expand-var} + {expand variable (resolves references inside variables, alias + \type{--expand-variable})} + \option + {show-path} + {show path expansion of \type {...} (alias \type {--path-value})} + \option + {var-value} + {report value of variable (alias \type {--show-value})} + \option + {find-file} + {report file location; it uses the sub||options \type {--all}, \type + {--pattern}, and \type {--format}} + \option + {find-path} + {report path of file} +\stopoptions + +Hidden option: + +\startoptions + \option + {format-path} + {report format path} +\stopoptions + +\stopsubsection + +\startsubsection[title=Running code] + +Here we come to the core functionality of \sMTXRUN: running scripts. First +there are few options that trigger how the running takes place: + +\startoptions + \option + {path=runpath} + {go to given path before execution} + \option + {ifchanged=filename} + {only execute when given file has changed (this loads and saves an md5 + checksum from \type {filename.md5})} + \option + {iftouched=old,new} + {only execute when given file has changed (time stamp)} + \option + {timedrun} + {run a script or program and time its run (external)} +\stopoptions + +Specifying both \type {--iftouched} and \type {--ifchanged} means both are +tested, and when either one is false, nothing will happen. These options have to +come before one of the next options: + +\startoptions + \option + {script} + {run an mtx script (where \LUA\ is the preferred method); it has the + sub||options \typ {--nofiledatabase}, \typ {--autogenerate}, \type + {--load}, and \type {--save}. The latter two are currently no|-|ops} + \option + {execute} + {run a script or program externally (\type {texmfstart} method); it has + sub||option \type {--noquotes}} + \option + {internal} + {run a script using built|-|in libraries (alias is \type {--ctxlua})} + \option + {direct} + {run an external program; it has the sub||option \type {--noquotes}} +\stopoptions + +Since scripts potentially have their own options, any options intended for +\sMTXRUN\ itself have to come {\it before} the option that specifies the script +to run, and options for the script itself have to come {\it after} the option +that gives the script name. This is especially true when using \type {--script}, +so it is important to check the order of your options. + +Of the four above options, \type {--script} is the most important one, since that +is the one that finds and executes the \LUA\ \sMTXRUN\ scripts provided by the +distribution. With \typ {--nofiledatabase}, it will not attempt to resolve any +file names (which means you need either a local script or a full path name). On +the opposite side, when you also provide \typ {--autogenerate}, it will not only +attempt to resolve the file name, it will also regenerate the database if it +cannot find the script on the first try. In a future version of \CONTEXT, the +\type {--load} and \type {--save} will allow you to save|/|restore the current +command line in a file for reuse. + +The shell return value of \sMTXRUN\ indicates whether the script was found. When +you specify something like \type {--script base}, \sMTXRUN\ actually searches +for \type {mtx-base.lua}, \type {mtx-bases.lua}, \type {mtx-t-base.lua}, \type +{mtx-t-bases.lua}, and \type {base.lua}, in that order. The +distribution||supplied scripts normally use \type {mtx-<name>.lua} as template. +The template names with \type {mtx-t-} prefix is reserved for third||party +scripts, and \type {<name>.lua} is just a last-ditch effort if nothing else +works. Scripts are looked for in the local path, and in whatever directories the +configuration variable \type {LUAINPUTS} points to. + +The \type {--execute} options exists mostly for the non||\LUA\ {\MKII} scripts +that still exist in the distribution. It will try to find a matching interpreter +for non||\LUA\ scripts, and it is aware of a number of distribution||supplied +scripts so that if you specify \type {--execute texexec}, it knows that what you +really want to execute is \type {ruby texexec.rb}. Support is present for \RUBY\ +(\type {.rb}, \LUA\ (\type {.lua}), python (\type {.py}) and \PERL\ (\type {.pl}) +scripts (tested in that order). File resolving uses \type {TEXMFSCRIPTS} from the +configuration. The shell return value of \sMTXRUN\ indicates whether the script +was found and executed. + +The \type {--internal} option uses the file search method of \type {--execute}, +but then assumes this is a \LUA\ script and executes it internally like \type +{--script}. This is useful if you have a \LUA\ script in an odd location. + +The last of the four options, \type {--direct}, directly executes an external +program. You need to give the full path for binaries not in the current shell +\type {PATH}, because no searching is done at all. The shell return value of +\sMTXRUN\ in this case is a boolean based on the return value of +\type {os.exec()}. + +It is also possible to execute bare \LUA\ code directly: + +\startoptions + \option + {evaluate} + {run code passed on the command-line (between quotes)} +\stopoptions + +\stopsubsection + +\startsubsection[title=Options for maintenance of \sMTXRUN\ itself] + +None of these are advertised. Normally developers should be the only ones needing +them, but if you made a change to one of the distributed libraries (maybe because +of a beta bug), you may need to run \type {--selfmerge} and \type {--selfupdate}. + +\startoptions + \option + {selfclean} + {remove embedded libraries} + \option + {selfmerge} + {update embedded libraries in \type {mtxrun.lua}} + \option + {selfupdate} + {copy \type {mtxlua.lua} to the executable directory, renamed \type + {mtxrun}} +\stopoptions + +\stopsubsection + +\startsubsection[title=Creating stubs] + +Stubs are little shortcuts that live in some binaries directory. For example, the +content of the \UNIX||style \sCONTEXT\ shell command is: + +\starttyping +#!/bin/sh +mtxrun --script context "$@" +\stoptyping + +Apart from the \sCONTEXT\ command itself (which is provided by the distribution), +use of stubs is discouraged. Still, the \sMTXRUN\ options are there because +sometimes existing workflows depend on executable tool names like \type +{ctxtools}. + +\startoptions + \option + {makestubs} + {create stubs for (context related) scripts} + \option + {removestubs} + {remove stubs (context related) scripts} + \option + {stubpath=binpath} + {paths where stubs will be written} + \option + {windows} + {create windows (mswin) stubs (alias \type {--mswin})} + \option + {unix} + {create unix (linux) stubs (alias \type {--linux})} +\stopoptions + +\stopsubsection + +\startsubsection[title=Remaining options] + +The remaining options are hard to group into a subcategory. These are the +advertised options: + +\startoptions + \option + {systeminfo} + {show current operating system, processor, et cetera} + \option + {edit} + {launch editor with found file; the editor is taken from the environment + variable \type {MTXRUN_EDITOR}, or \type {TEXMFSTART_EDITOR}, or + \type {EDITOR}, or as a last resort: \type {gvim}} + \option + {launch} + {launch files like manuals, assumes os support (uses the sub||options + \type {--all}, \type {--pattern} and \type {--list})} +\stopoptions + +While these are sort of hidden options: + +\startoptions + \option + {ansi} + {colorize output to terminal using \ANSI\ escapes} + \option + {associate} + {launch files like manuals, assumes os support. this function does not do + any file searching, so you have to use either a local file or a full + path name} + \option + {exporthelp} + {output the \sMTXRUN\ \XML\ help blob (useful for creating man and \HTML\ + help pages)} + \option + {fmt} + {shortcut for \type {--script base --fmt}} + \option + {gethelp} + {attempt to look up remote \sCONTEXT\ command help (uses the sub||options + \type{--command} and \type {--url})} + \option + {help} + {print the \sMTXRUN\ help screen} + \option + {locale} + {force setup of locale; unless you are certain you need this option, stay + away from it, because it can interfere massively with \CONTEXT's \LUA\ + code} + \option + {make} + {(re)create format files (aliases are \type {--ini} and \type {--compile})} + \option + {platform} + {(alias is \type {--show-platform})} + \option + {run} + {shortcut for \type {--script base --run}} + \option + {version} + {print \sMTXRUN\ version} +\stopoptions + +\stopsubsection + +\startsubsection[title=Known scripts] + +When you run \type {mtxrun --scripts}, it will output a list of \quote {known} +scripts. The definition of \quote {known} is important here: the list comprises +the scripts that are present in the same directory as \type {mtx-context.lua} +that do not have an extra hyphen in the name (like \type {mtx-t-...}scripts would +have). In a normal installation, this means it \quote {knows} almost all the +scripts that are distributed with \CONTEXT. Note: it skips over any files from +the distribution that do have an extra hyphen, like the \type {mtx-server} +support scripts. + +Since this section is about \sMTXRUN, I'll just present the list of the scripts +that are \quote {known} in the current \CONTEXT\ beta as output by \sMTXRUN\ +itself, and not get into detail about all of the script functionality (they all +have \type {--help} options if you want to find out more). Where we still felt the +need to explain something, there is an extra bit of text in italics. + +\startmtxrunscripts +\mtxrunscript + {babel} + {1.20} + {Babel Input To UTF Conversion} +\mtxrunscript + {base} + {1.35} + {ConTeXt TDS Management Tool (aka luatools)} +\mtxrunscript + {bibtex} + {} + {bibtex helpers (obsolete)} +\mtxrunscript + {cache} + {0.10} + {ConTeXt & MetaTeX Cache Management} +\mtxrunscript + {chars} + {0.10} + {MkII Character Table Generators} +\mtxrunscriptv + {check} + {0.10} + {Basic ConTeXt Syntax Checking} + {Occasionally useful on big projects, but be warned that it does not actually + run any \TEX\ engine, so it is not 100\% reliable.} +\mtxrunscriptv + {colors} + {0.10} + {ConTeXt Color Management} + {This displays icc color tables by name} +\mtxrunscriptv + {convert} + {0.10} + {ConTeXT Graphic Conversion Helpers} + {A wrapper around ghostscript and imagemagick that offers some extra (batch + processing) functionality.} +\mtxrunscript + {dvi} + {0.10} + {ConTeXt DVI Helpers} +\mtxrunscriptv + {epub} + {1.10} + {ConTeXt EPUB Helpers} + {The EPUB manual (\type {epub-mkiv.pdf}) explains how to use this script.} +\mtxrunscriptv + {evohome} + {1.00} + {Evohome Fetcher} + {Evohome is a domotica system that controls your central heating} +\mtxrunscript + {fcd} + {1.00} + {Fast Directory Change} +\mtxrunscriptv + {flac} + {0.10} + {ConTeXt Flac Helpers} + {Extracts information from \type{.flac} audio files into an \XML\ index.} +\mtxrunscript + {fonts} + {0.21} + {ConTeXt Font Database Management} +\mtxrunscript + {grep} + {0.10} + {Simple Grepper} +\mtxrunscript + {interface} + {0.13} + {ConTeXt Interface Related Goodies} +\mtxrunscript + {metapost} + {0.10} + {MetaPost to PDF processor} +\mtxrunscript + {metatex} + {0.10} + {MetaTeX Process Management (obsolete)} +\mtxrunscript + {modules} + {1.00} + {ConTeXt Module Documentation Generators} +\mtxrunscriptv + {package} + {0.10} + {Distribution Related Goodies} + {This script is used to create the generic \CONTEXT\ code used in + \LUA\LATEX~c.s.} +\mtxrunscriptv + {patterns} + {0.20} + {ConTeXt Pattern File Management} + {Hyphenation patterns, that is \unknown} +\mtxrunscript + {pdf} + {0.10} + {ConTeXt PDF Helpers} +\mtxrunscript + {plain} + {1.00} + {Plain TeX Runner} +\mtxrunscript + {profile} + {1.00} + {ConTeXt MkIV LuaTeX Profiler} +\mtxrunscript + {rsync} + {0.10} + {Rsync Helpers} +\mtxrunscript + {scite} + {1.00} + {Scite Helper Script} +\mtxrunscriptv + {server} + {0.10} + {Simple Webserver For Helpers} + {There are some subscripts associated with this.} +\mtxrunscript + {synctex} + {1.00} + {ConTeXt SyncTeX Checker} +\mtxrunscript + {texworks} + {1.00} + {TeXworks Startup Script} +\mtxrunscript + {timing} + {0.10} + {ConTeXt Timing Tools} +\mtxrunscript + {tools} + {1.01} + {Some File Related Goodies} +\mtxrunscript + {unicode} + {1.02} + {Checker for \type{char-def.lua}} +\mtxrunscript + {unzip} + {0.10} + {Simple Unzipper} +\mtxrunscript + {update} + {1.03} + {ConTeXt Minimals Updater} +\mtxrunscript + {watch} + {1.00} + {ConTeXt Request Watchdog} +\mtxrunscriptv + {youless} + {1.10} + {YouLess Fetcher} + {YouLess is a domotica system that tracks your home energy use.} +\stopmtxrunscripts + +\stopsubsection + +\startsubsection[title=Writing your own] + +A well|-|written script has some required internal structure. It should start with +a module definition block. This gives some information about the module, but more +importantly, it prevents double|-|loading. + +Here is an example: + +\starttyping +if not modules then modules = { } end + +modules ['mtx-envtest'] = { + version = 0.100, + comment = "companion to mtxrun.lua", + author = "Taco Hoekwater", + copyright = "Taco Hoekwater", + license = "bsd" +} +\stoptyping + +Next up is a variable containing the help information. The help information is actually +a bit of \XML\ stored in \LUA\ string. In the full example listing at the end of this +article, you can see what the internal structure is supposed to be like. + +\starttyping +local helpinfo = [[ +<?xml version="1.0"?> +<application> + .... +</application> +]] +\stoptyping + +And this help information is then used to create an instance of an \type +{application} table. + +\starttyping +local application = logs.application { + name = "envtest", + banner = "Mtxrun environment demo", + helpinfo = helpinfo, +} +\stoptyping + +After this call, the \type {application} table contains (amongst some other +things) three functions that are very useful: + +\startmtxrunenvironment +\mtxrunenv + {identify()} + {Prints out a banner identifying the current script to the user.} +\mtxrunenv + {report(str)} + {For printing information to the terminal with the script name as prefix.} +\mtxrunenv + {export()} + {Prints the \type {helpinfo} to the terminal, so it can easily be used for + documentation purposes.} +\stopmtxrunenvironment + +Next up, it is good to define your scripts' functionality in functions in a +private table. This prevents namespace pollution, and looks like this: + +\starttyping +scripts = scripts or { } +scripts.envtest = scripts.envtest or { } + +function scripts.envtest.runtest() + application.report("script name is " .. environment.ownname) +end +\stoptyping + +An finally, identify the current script, followed by handling the provided +options (usually with an \type {if}||\type {else} statement). + +\starttyping +if environment.argument("exporthelp") then + application.export() +elseif environment.argument('test') then + scripts.envtest.runtest() +else + application.help() +end +\stoptyping + +\stopsubsection + +\startsubsection[title=Script environment] + +\sMTXRUN\ includes lots of the internal \LUA\ helper libraries from \CONTEXT. We +actually maintains a version of the script without all those libraries included, +and before every (beta) \CONTEXT\ release, an amalgamated version of \sMTXRUN\ is +added to the distribution. In the merging process, most all comments are stripped +from the embedded libraries, so if you want to know details, it is better to look +in the original \LUA\ source file. + +Inside \sMTXRUN, the full list of embedded libraries can be queried via the array +\type {own.libs}: + +\startalign[flushleft] +l-lua.lua l-macro.lua l-sandbox.lua l-package.lua l-lpeg.lua +l-function.lua l-string.lua l-table.lua l-io.lua l-number.lua l-set.lua l-os.lua +l-file.lua l-gzip.lua l-md5.lua l-url.lua l-dir.lua l-boolean.lua l-unicode.lua +l-math.lua util-str.lua util-tab.lua util-fil.lua util-sac.lua util-sto.lua +util-prs.lua util-fmt.lua trac-set.lua trac-log.lua trac-inf.lua trac-pro.lua +util-lua.lua util-deb.lua util-tpl.lua util-sbx.lua util-mrg.lua util-env.lua +luat-env.lua lxml-tab.lua lxml-lpt.lua lxml-mis.lua lxml-aux.lua lxml-xml.lua +trac-xml.lua data-ini.lua data-exp.lua data-env.lua data-tmp.lua data-met.lua +data-res.lua data-pre.lua data-inp.lua data-out.lua data-fil.lua data-con.lua +data-use.lua data-zip.lua data-tre.lua data-sch.lua data-lua.lua data-aux.lua +data-tmf.lua data -lst.lua util-lib.lua luat-sta.lua luat-fmt.lua +\stopalign + +In fact, the \LUA\ table \type {own} contains some other useful stuff like the +script's actual disk name and location (\type {own.name} and \type {own.path}) +and some internal variables like a list of all the locations it searches for its +embedded libraries (\type {own.list}), which is used by the \type {--selfmerge} +option and also allows the non||amalgamated version to run (since otherwise \type +{--selfmerge} could not be bootstrapped). + +\sMTXRUN\ offers a programming environment that makes it easy to write \LUA\ a +scripts. The most important element of that environment is a \LUA\ table that is +conveniently called \type {environment} (\type {util-env} does the actual work of +setting that up). + +The bulk of \type {environment} consists of functions and variables that deal +with the command||line given by the user as \sMTXRUN\ does quite a bit of work on +the given command||line. The goal is to safely tuck all the given options into +the \type {arguments} and \type {files} tables. This work is done by two +functions called \type {initializearguments()} and \type {splitarguments()}. +These functions are part of the \type{environment} table, but you should not need +them as they have been called already once control is passed on to your script. + +\startmtxrunenvironment +\mtxrunenv + {arguments} + {These are the processed options to the current script. The keys are option + names (without the leading dashes) and the value is either \type {true} or a + string with one level of shell quotes removed.} +\mtxrunenv + {files} + {This array holds all the non||option arguments to the current script. + Typically, those are supposed to be files, but they could be any text, + really.} +\mtxrunenv + {getargument(name,partial)} + {Queries the \type {arguments} table using a function. Its main reason for + existence is the \type {partial} argument, which allows scripts to accept + shortened command||line options (alias: \type {argument()}).} +\mtxrunenv + {setargument(name,value)} + {Sets a value in the \type {arguments} table. This can be useful in + complicated scripts with default options.} +\stopmtxrunenvironment + +In case you need access to the full command||line, there are some possibilities: + +\startmtxrunenvironment +\mtxrunenv + {arguments_after} + {These are the unquoted but otherwise unprocessed arguments to your script as + an array.} +\mtxrunenv + {arguments_before} + {These are the unquoted but otherwise unprocessed arguments to \sMTXRUN\ + before your scripts' name (so the last entry is usually \type {--script}).} +\mtxrunenv + {rawarguments} + {This is the whole unprocessed command||line as an array.} +\mtxrunenv + {originalarguments} + {Like \type{rawarguments}, but with some top||level quotes removed.} +\mtxrunenv + {reconstructcommandline(arg,noquote)} + {Tries to reconstruct a command||line from its arguments. It uses \type + {originalarguments} if no \type {arg} is given. Take care: due to the + vagaries of shell command||line processing, this may or may not work when + quoting is involved.} +\stopmtxrunenvironment + +\type{environment} also stores various bits of information you may find useful: + +\startmtxrunenvironment +\mtxrunenv + {validengines} + {This table contains keys for \type {luatex} and \type {luajittex}. This is + only relevant when \sMTXRUN\ itself is called via \LUATEX's \type {luaonly} + option.} +\mtxrunenv + {basicengines} + {This table maps executable names to \type{validengines} entries.} +\mtxrunenv + {default_texmfcnf} + {This is the \type {texmfcnf} value from \type {kpathsea}, processed for use + with \MKIV\ in the unlikely event this is needed.} +\mtxrunenv + {homedir} + {The user's home directory.} +\mtxrunenv + {ownbin} + {The name of the binary used to call \sMTXRUN.} +\mtxrunenv + {ownmain} + {The mapped version of \type {ownbin}.} +\mtxrunenv + {ownname} + {Full name of this instance of \sMTXRUN.} +\mtxrunenv + {ownpath} + {The path this instance of \sMTXRUN\ resides in.} +\mtxrunenv + {texmfos} + {Operating system root directory path.} +\mtxrunenv + {texos} + {Operating system root directory name.} +\mtxrunenv + {texroot} + {\CONTEXT\ root directory path.} +\stopmtxrunenvironment + +As well as some functions: + +\startmtxrunenvironment +\mtxrunenv + {texfile(filename)} + {Locates a {\TEX} file.} +\mtxrunenv + {luafile(filename)} + {Locates a \LUA\ file.} +\mtxrunenv + {loadluafile(filename,version)} + {Locates, compiles and loads a \LUA\ file, possibly in compressed \type {.luc} + format. In the compressed case, it uses the \type {version} to make sure the + compressed form is up||to||date.} +\mtxrunenv + {luafilechunk(filename,silent,macros)} + {Locates and compiles a \LUA\ file, returning its contents as data.} +\mtxrunenv + {make_format(name,arguments)} + {Creates a format file and stores in in the \CONTEXT\ cache, used by \type + {mtxrun --make}.} +\mtxrunenv + {relativepath(path,root)} + {Returns a modified version of \type {root} based on the relative path in + \type {path}.} +\mtxrunenv + {run_format(name,data,more)} + {Run a \TEX\ format file.} +\stopmtxrunenvironment + +\stopsubsection + +\startsubsection[title=Shell return values] + +As explained earlier, the shell return value of \sMTXRUN\ normally indicates +whether the script was found. If you are running a \CONTEXT\ release newer than +September 2018 and want to modify the shell return value from within your script, +you can use \type {os.exitcode}. Whatever value you assign to that variable will +be the shell return value of your script. + +\stopsubsection + +\stopsection + \startsubject[title={Colofon}] \starttabulate[|B|p|] \NC author \NC \documentvariable{author}, \documentvariable{affiliation}, \documentvariable{location} \NC \NR + \NC \NC Taco Hoekwater, extra \sMTXRUN\ section \NC \NR \NC version \NC \currentdate \NC \NR \NC website \NC \documentvariable{website} \endash\ \documentvariable{support} \NC \NR diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/xml/xml-mkiv-commands.tex b/Master/texmf-dist/doc/context/sources/general/manuals/xml/xml-mkiv-commands.tex new file mode 100644 index 00000000000..ab998989542 --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/xml/xml-mkiv-commands.tex @@ -0,0 +1,893 @@ +\environment xml-mkiv-style + +\startcomponent xml-mkiv-commands + +\startchapter[title={Commands}] + +\startsection[title={nodes and lpaths}] + +The amount of commands available for manipulating the \XML\ file is rather large. +Many of the commands cooperate with the already discussed setups, a fancy name +for a collection of macro calls either or not mixed with text. + +Most of the commands are just shortcuts to \LUA\ calls, which means that the real +work is done by \LUA. In fact, what happens is that we have a continuous transfer +of control from \TEX\ to \LUA, where \LUA\ prints back either data (like element +content or attribute values) or just invokes a setup whereby it passes a +reference to the node resolved conform the path expression. The invoked setup +itself might return control to \LUA\ again, etc. + +This sounds complicated but examples will show what we mean here. First we +present the whole repertoire of commands. Because users can read the source code, +they might uncover more commands, but only the ones discussed here are official. +The commands are grouped in categories. + +In the following sections \cmdinternal {cd:node} means a reference to a node: +this can be the identifier of the root (the loaded xml tree) or a reference to a +node in that tree (often the result of some lookup. A \cmdinternal {cd:lpath} is +a fancy name for a path expression (as with \XSLT) but resolved by \LUA. + +\stopsection + +\startsection[title={commands}] + +There are a lot of commands available but you probably can ignore most of them. +We try to be complete which means that there is for instance \type {\xmlfirst} as +well as \type {\xmllast} but you probably never need the last one. There are also +commands that were used when testing this interface and we see no reason to +remove them. Some obscure ones are used in modules and after a while even I often +forget that they exist. To give you an idea of what commands are important we +show their use in generating the \CONTEXT\ command definitions (\type +{x-set-11.mkiv}) per January 2016: + +\startcolumns[n=2,balance=yes] +\starttabulate[|l|r|] +\NC \type {\xmlall} \NC 1 \NC \NR +\NC \type {\xmlatt} \NC 23 \NC \NR +\NC \type {\xmlattribute} \NC 1 \NC \NR +\NC \type {\xmlcount} \NC 1 \NC \NR +\NC \type {\xmldoif} \NC 2 \NC \NR +\NC \type {\xmldoifelse} \NC 1 \NC \NR +\NC \type {\xmlfilterlist} \NC 4 \NC \NR +\NC \type {\xmlflush} \NC 5 \NC \NR +\NC \type {\xmlinclude} \NC 1 \NC \NR +\NC \type {\xmlloadonly} \NC 1 \NC \NR +\NC \type {\xmlregisterdocumentsetup} \NC 1 \NC \NR +\NC \type {\xmlsetsetup} \NC 1 \NC \NR +\NC \type {\xmlsetup} \NC 4 \NC \NR +\stoptabulate +\stopcolumns + +As you can see filtering, flushing and accessing attributes score high. Below we show +the statistics of a quite complex rendering (5 variants of schoolbooks: basic book, +answers, teachers guide, worksheets, full blown version with extensive tracing). + +\startcolumns[n=2,balance=yes] +\starttabulate[|l|r|] +\NC \type {\xmladdindex} \NC 3 \NC \NR +\NC \type {\xmlall} \NC 5 \NC \NR +\NC \type {\xmlappendsetup} \NC 1 \NC \NR +\NC \type {\xmlapplyselectors} \NC 1 \NC \NR +\NC \type {\xmlatt} \NC 40 \NC \NR +\NC \type {\xmlattdef} \NC 9 \NC \NR +\NC \type {\xmlattribute} \NC 10 \NC \NR +\NC \type {\xmlbadinclusions} \NC 3 \NC \NR +\NC \type {\xmlconcat} \NC 3 \NC \NR +\NC \type {\xmlcount} \NC 1 \NC \NR +\NC \type {\xmldelete} \NC 11 \NC \NR +\NC \type {\xmldoif} \NC 39 \NC \NR +\NC \type {\xmldoifelse} \NC 28 \NC \NR +\NC \type {\xmldoifelsetext} \NC 13 \NC \NR +\NC \type {\xmldoifnot} \NC 2 \NC \NR +\NC \type {\xmldoifnotselfempty} \NC 1 \NC \NR +\NC \type {\xmlfilter} \NC 100 \NC \NR +\NC \type {\xmlfirst} \NC 51 \NC \NR +\NC \type {\xmlflush} \NC 69 \NC \NR +\NC \type {\xmlflushcontext} \NC 2 \NC \NR +\NC \type {\xmlinclude} \NC 1 \NC \NR +\NC \type {\xmlincludeoptions} \NC 5 \NC \NR +\NC \type {\xmlinclusion} \NC 16 \NC \NR +\NC \type {\xmlinjector} \NC 1 \NC \NR +\NC \type {\xmlloaddirectives} \NC 1 \NC \NR +\NC \type {\xmlmapvalue} \NC 4 \NC \NR +\NC \type {\xmlmatch} \NC 1 \NC \NR +\NC \type {\xmlprependsetup} \NC 5 \NC \NR +\NC \type {\xmlregisterdocumentsetup} \NC 2 \NC \NR +\NC \type {\xmlregistersetup} \NC 1 \NC \NR +\NC \type {\xmlremapnamespace} \NC 1 \NC \NR +\NC \type {\xmlsetfunction} \NC 2 \NC \NR +\NC \type {\xmlsetinjectors} \NC 2 \NC \NR +\NC \type {\xmlsetsetup} \NC 11 \NC \NR +\NC \type {\xmlsetup} \NC 76 \NC \NR +\NC \type {\xmlstrip} \NC 1 \NC \NR +\NC \type {\xmlstripanywhere} \NC 1 \NC \NR +\NC \type {\xmltag} \NC 1 \NC \NR +\NC \type {\xmltext} \NC 53 \NC \NR +\NC \type {\xmlvalue} \NC 2 \NC \NR +\stoptabulate +\stopcolumns + +Here many more are used but this is an exceptional case. The top is again +dominated by filtering, flushing and attribute consulting. The list can actually +be smaller. For instance, the \type {\xmlcount} can just as well be \type +{\xmlfilter} with a \type {count} finalizer. There are also some special ones, +like the injectors, that are needed for finetuning the final result. + +\stopsection + +\startsection[title={loading}] + +\startxmlcmd {\cmdbasicsetup{xmlloadfile}} + loads the file \cmdinternal {cd:file} and registers it under \cmdinternal + {cd:name} and applies either given or standard \cmdinternal + {cd:xmlsetup} (alias: \type {\xmlload}) +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlloadbuffer}} + loads the buffer \cmdinternal {cd:buffer} and registers it under + \cmdinternal {cd:name} and applies either given or standard + \cmdinternal {cd:xmlsetup} +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlloaddata}} + loads \cmdinternal {cd:text} and registers it under \cmdinternal + {cd:name} and applies either given or standard \cmdinternal + {cd:xmlsetup} +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlloadonly}} + loads \cmdinternal {cd:text} and registers it under \cmdinternal + {cd:name} and applies either given or standard \cmdinternal + {cd:xmlsetup} but doesn't flush the content +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlinclude}} + includes the file specified by attribute \cmdinternal {cd:name} of the + element located by \cmdinternal {cd:lpath} at node \cmdinternal {cd:node} +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlprocessfile}} + registers file \cmdinternal {cd:file} as \cmdinternal {cd:name} and + process the tree starting with \cmdinternal {cd:xmlsetup} (alias: + \type {\xmlprocess}) +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlprocessbuffer}} + registers buffer \cmdinternal {cd:name} as \cmdinternal {cd:name} and process + the tree starting with \cmdinternal {cd:xmlsetup} +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlprocessdata}} + registers \cmdinternal {cd:text} as \cmdinternal {cd:name} and process + the tree starting with \cmdinternal {cd:xmlsetup} +\stopxmlcmd + +The initial setup defaults to \type {xml:process} that is defined +as follows: + +\starttyping +\startsetups xml:process + \xmlregistereddocumentsetups\xmldocument + \xmlmain\xmldocument +\stopsetups +\stoptyping + +First we apply the setups associated with the document (including common setups) +and then we flush the whole document. The macro \type {\xmldocument} expands to +the current document id. There is also \type {\xmlself} which expands to the +current node number (\type {#1} in setups). + +\startxmlcmd {\cmdbasicsetup{xmlmain}} + returns the whole document +\stopxmlcmd + +Normally such a flush will trigger a chain reaction of setups associated with the +child elements. + +\stopsection + +\startsection[title={saving}] + +\startxmlcmd {\cmdbasicsetup{xmlsave}} + saves the given node \cmdinternal {cd:node} in the file \cmdinternal {cd:file} +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmltofile}} + saves the match of \cmdinternal {cd:lpath} in the file \cmdinternal {cd:file} +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmltobuffer}} + saves the match of \cmdinternal {cd:lpath} in the buffer \cmdinternal {cd:buffer} +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmltobufferverbose}} + saves the match of \cmdinternal {cd:lpath} verbatim in the buffer \cmdinternal + {cd:buffer} +\stopxmlcmd + +% \startxmlcmd {\cmdbasicsetup{xmltoparameters}} +% converts the match of \cmdinternal {cd:lpath} to key|/|values (for tracing) +% \stopxmlcmd + +The next command is only needed when you have messed with the tree using +\LUA\ code. + +\startxmlcmd {\cmdbasicsetup{xmladdindex}} + (re)indexes a tree +\stopxmlcmd + +The following macros are only used in special situations and are not really meant +for users. + +\startxmlcmd {\cmdbasicsetup{xmlraw}} + flush the content if \cmdinternal {cd:node} with original entities +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{startxmlraw}} + flush the wrapped content with original entities +\stopxmlcmd + +\stopsection + +\startsection[title={flushing data}] + +When we flush an element, the associated \XML\ setups are expanded. The most +straightforward way to flush an element is the following. Keep in mind that the +returned values itself can trigger setups and therefore flushes. + +\startxmlcmd {\cmdbasicsetup{xmlflush}} + returns all nodes under \cmdinternal {cd:node} +\stopxmlcmd + +You can restrict flushing by using commands that accept a specification. + +\startxmlcmd {\cmdbasicsetup{xmltext}} + returns the text of the matching \cmdinternal {cd:lpath} under \cmdinternal + {cd:node} +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlpure}} + returns the text of the matching \cmdinternal {cd:lpath} under \cmdinternal + {cd:node} without \type {\Ux} escaped special \TEX\ characters +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlflushtext}} + returns the text of the \cmdinternal {cd:node} +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlflushpure}} + returns the text of the \cmdinternal {cd:node} without \type {\Ux} escaped + special \TEX\ characters +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlnonspace}} + returns the text of the matching \cmdinternal {cd:lpath} under \cmdinternal + {cd:node} without embedded spaces +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlall}} + returns all nodes under \cmdinternal {cd:node} that matches \cmdinternal + {cd:lpath} +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmllastmatch}} + returns all nodes found in the last match +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlfirst}} + returns the first node under \cmdinternal {cd:node} that matches \cmdinternal + {cd:lpath} +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmllast}} + returns the last node under \cmdinternal {cd:node} that matches \cmdinternal + {cd:lpath} +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlfilter}} + at a match of \cmdinternal {cd:lpath} a given filter \type {filter} is applied + and the result is returned +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlsnippet}} + returns the \cmdinternal {cd:number}\high{th} element under \cmdinternal + {cd:node} +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlposition}} + returns the \cmdinternal {cd:number}\high{th} match of \cmdinternal + {cd:lpath} at node \cmdinternal {cd:node}; a negative number starts at the + end (alias: \type {\xmlindex}) +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlelement}} + returns the \cmdinternal {cd:number}\high{th} child of node \cmdinternal {cd:node}; + a negative number starts at the end +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlpos}} + returns the index (position) in the parent node of \cmdinternal {cd:node} +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlconcat}} + returns the sequence of nodes that match \cmdinternal {cd:lpath} at + \cmdinternal {cd:node} whereby \cmdinternal {cd:text} is put between each + match +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlconcatrange}} + returns the \cmdinternal {cd:first}\high {th} upto \cmdinternal + {cd:last}\high {th} of nodes that match \cmdinternal {cd:lpath} at + \cmdinternal {cd:node} whereby \cmdinternal {cd:text} is put between each + match +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlcommand}} + apply the given \cmdinternal {cd:xmlsetup} to each match of \cmdinternal + {cd:lpath} at node \cmdinternal {cd:node} +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlstrip}} + remove leading and trailing spaces from nodes under \cmdinternal {cd:node} + that match \cmdinternal {cd:lpath} +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlstripped}} + remove leading and trailing spaces from nodes under \cmdinternal {cd:node} + that match \cmdinternal {cd:lpath} and return the content afterwards +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlstripnolines}} + remove leading and trailing spaces as well as collapse embedded spaces + from nodes under \cmdinternal {cd:node} that match \cmdinternal {cd:lpath} +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlstrippednolines}} + remove leading and trailing spaces as well as collapse embedded spaces from + nodes under \cmdinternal {cd:node} that match \cmdinternal {cd:lpath} and + return the content afterwards +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlverbatim}} + flushes the content verbatim code (without any wrapping, i.e. no fonts + are selected and such) +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlinlineverbatim}} + return the content of the node as inline verbatim code; no further + interpretation (expansion) takes place and spaces are honoured; it uses the + following wrapper +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{startxmlinlineverbatim}} + wraps inline verbatim mode using the environment specified (a prefix \type + {xml:} is added to the environment name) +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmldisplayverbatim}} + return the content of the node as display verbatim code; no further + interpretation (expansion) takes place and leading and trailing spaces and + newlines are treated special; it uses the following wrapper +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{startxmldisplayverbatim}} + wraps the content in display verbatim using the environment specified (a prefix + \type {xml:} is added to the environment name) +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlprettyprint}} + pretty print (with colors) the node \cmdinternal {cd:node}; use the \CONTEXT\ + \SCITE\ lexers when available (\type {\usemodule [scite]}) +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlflushspacewise}} + flush node \cmdinternal {cd:node} obeying spaces and newlines +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlflushlinewise}} + flush node \cmdinternal {cd:node} obeying newlines +\stopxmlcmd + +\stopsection + +\startsection[title={information}] + +The following commands return strings. Normally these are used in tests. + +\startxmlcmd {\cmdbasicsetup{xmlname}} + returns the complete name (including namespace prefix) of the + given \cmdinternal {cd:node} +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlnamespace}} + returns the namespace of the given \cmdinternal {cd:node} +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmltag}} + returns the tag of the element, without namespace prefix +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlcount}} + returns the number of matches of \cmdinternal {cd:lpath} at node \cmdinternal + {cd:node} +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlatt}} + returns the value of attribute \cmdinternal {cd:name} or empty if no such + attribute exists +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlattdef}} + returns the value of attribute \cmdinternal {cd:name} or \cmdinternal + {cd:string} if no such attribute exists +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlrefatt}} + returns the value of attribute \cmdinternal {cd:name} or empty if no such + attribute exists; a leading \type {#} is removed (nicer for tex) +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlchainatt}} + returns the value of attribute \cmdinternal {cd:name} or empty if no such + attribute exists; backtracks till a match is found +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlchainattdef}} + returns the value of attribute \cmdinternal {cd:name} or \cmdinternal + {cd:string} if no such attribute exists; backtracks till a match is found +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlattribute}} + finds a first match for \cmdinternal {cd:lpath} at \cmdinternal {cd:node} and + returns the value of attribute \cmdinternal {cd:name} or empty if no such + attribute exists +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlattributedef}} + finds a first match for \cmdinternal {cd:lpath} at \cmdinternal {cd:node} and + returns the value of attribute \cmdinternal {cd:name} or \cmdinternal + {cd:text} if no such attribute exists +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmllastatt}} + returns the last attribute found (this avoids a lookup) +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlsetatt}} + set the value of attribute \cmdinternal {cd:name} +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlsetattribute}} + set the value of attribute \cmdinternal {cd:name} for each match of \cmdinternal + {cd:lpath} +\stopxmlcmd + +\stopsection + +\startsection[title={manipulation}] + +You can use \LUA\ code to manipulate the tree and it makes no sense to duplicate +this in \TEX. In the future we might provide an interface to some of this +functionality. Keep in mind that manipuating the tree might have side effects as +we maintain several indices into the tree that also needs to be updated then. + +\stopsection + +\startsection[title={integration}] + +If you write a module that deals with \XML, for instance processing cals tables, +then you need ways to control specific behaviour. For instance, you might want to +add a background to the table. Such directives are collected in \XML\ files and +can be loaded on demand. + +\startxmlcmd {\cmdbasicsetup{xmlloaddirectives}} + loads \CONTEXT\ directives from \cmdinternal {cd:file} that will get + interpreted when processing documents +\stopxmlcmd + +A directives definition file looks as follows: + +\starttyping +<?xml version="1.0" standalone="yes"?> + +<directives> + <directive attribute='id' value="100" + setup="cdx:100"/> + <directive attribute='id' value="101" + setup="cdx:101"/> + <directive attribute='cdx' value="colors" element="cals:table" + setup="cdx:cals:table:colors"/> + <directive attribute='cdx' value="vertical" element="cals:table" + setup="cdx:cals:table:vertical"/> + <directive attribute='cdx' value="noframe" element="cals:table" + setup="cdx:cals:table:noframe"/> + <directive attribute='cdx' value="*" element="cals:table" + setup="cdx:cals:table:*"/> +</directives> +\stoptyping + +Examples of usage can be found in \type {x-cals.mkiv}. The directive is triggered +by an attribute. Instead of a setup you can specify a setup to be applied before +and after the node gets flushed. + +\startxmlcmd {\cmdbasicsetup{xmldirectives}} + apply the setups directive associated with the node +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmldirectivesbefore}} + apply the before directives associated with the node +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmldirectivesafter}} + apply the after directives associated with the node +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlinstalldirective}} + defines a directive that hooks into a handler +\stopxmlcmd + +Normally a directive will be put in the \XML\ file, for instance as: + +\starttyping +<?context-mathml-directive minus reduction yes ?> +\stoptyping + +Here the \type {mathml} is the general class of directives and \type {minus} a +subclass, in our case a specific element. + +\stopsection + +\startsection[title={setups}] + +The basic building blocks of \XML\ processing are setups. These are just +collections of macros that are expanded. These setups get one argument passed +(\type {#1}): + +\starttyping +\startxmlsetups somedoc:somesetup + \xmlflush{#1} +\stopxmlsetups +\stoptyping + +This argument is normally a number that internally refers to a specific node in +the \XML\ tree. The user should see it as an abstract reference and not depend on +its numeric property. Just think of it as \quote {the current node}. You can (and +probably will) call such setups using: + +\startxmlcmd {\cmdbasicsetup{xmlsetup}} + expands setup \cmdinternal {cd:setup} and pass \cmdinternal {cd:node} as + argument +\stopxmlcmd + +However, in most cases the setups are associated to specific elements, +something that users of \XSLT\ might recognize as templates. + +\startxmlcmd {\cmdbasicsetup{xmlsetfunction}} + associates function \cmdinternal {cd:luafunction} to the elements in + namespace \cmdinternal {cd:name} that match \cmdinternal {cd:lpath} +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlsetsetup}} + associates setups \cmdinternal {cd:setup} (\TEX\ code) with the matching + nodes of \cmdinternal {cd:lpath} or root \cmdinternal {cd:node} +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlprependsetup}} + pushes \cmdinternal {cd:setup} to the front of global list of setups +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlappendsetup}} + adds \cmdinternal {cd:setup} to the global list of setups to be applied + (alias: \type{\xmlregistersetup}) +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlbeforesetup}} + pushes \cmdinternal {cd:setup} into the global list of setups; the + last setup is the position +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlaftersetup}} + adds \cmdinternal {cd:setup} to the global list of setups; the last setup + is the position +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlremovesetup}} + removes \cmdinternal {cd:setup} from the global list of setups +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlprependdocumentsetup}} + pushes \cmdinternal {cd:setup} to the front of list of setups to be applied + to \cmdinternal {cd:name} +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlappenddocumentsetup}} + adds \cmdinternal {cd:setup} to the list of setups to be applied to + \cmdinternal {cd:name} (you can also use the alias: \type + {\xmlregisterdocumentsetup}) +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlbeforedocumentsetup}} + pushes \cmdinternal {cd:setup} into the setups to be applied to \cmdinternal + {cd:name}; the last setup is the position +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlafterdocumentsetup}} + adds \cmdinternal {cd:setup} to the setups to be applied to \cmdinternal + {cd:name}; the last setup is the position +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlremovedocumentsetup}} + removes \cmdinternal {cd:setup} from the global list of setups to be applied + to \cmdinternal {cd:name} +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlresetsetups}} + removes all global setups +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlresetdocumentsetups}} + removes all setups from the \cmdinternal {cd:name} specific list of setups to + be applied +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlflushdocumentsetups}{setup}} + applies \cmdinternal {cd:setup} (can be a list) to \cmdinternal {cd:name} +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlregisteredsetups}} + applies all global setups to the current document +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlregistereddocumentsetups}} + applies all document specific \cmdinternal {cd:setup} to document + \cmdinternal {cd:name} +\stopxmlcmd + +\stopsection + +\startsection[title={testing}] + +The following test macros all take a \cmdinternal {cd:node} as first argument +and an \cmdinternal {cd:lpath} as second: + +\startxmlcmd {\cmdbasicsetup{xmldoif}} + expands to \cmdinternal {cd:true} when \cmdinternal {cd:lpath} matches at + node \cmdinternal {cd:node} +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmldoifnot}} + expands to \cmdinternal {cd:true} when \cmdinternal {cd:lpath} does not match + at node \cmdinternal {cd:node} +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmldoifelse}} + expands to \cmdinternal {cd:true} when \cmdinternal {cd:lpath} matches at + node \cmdinternal {cd:node} and to \cmdinternal {cd:false} otherwise +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmldoiftext}} + expands to \cmdinternal {cd:true} when the node matching \cmdinternal + {cd:lpath} at node \cmdinternal {cd:node} has some content +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmldoifnottext}} + expands to \cmdinternal {cd:true} when the node matching \cmdinternal + {cd:lpath} at node \cmdinternal {cd:node} has no content +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmldoifelsetext}} + expands to \cmdinternal {cd:true} when the node matching \cmdinternal + {cd:lpath} at node \cmdinternal {cd:node} has content and to \cmdinternal + {cd:false} otherwise +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmldoifatt}} + expands to \cmdinternal {cd:true} when the attribute matching \cmdinternal + {cd:node} and the name given as second argument matches the third argument +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmldoifnotatt}} + expands to \cmdinternal {cd:true} when the attribute matching \cmdinternal + {cd:node} and the name given as second argument differs from the third + argument +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmldoifelseatt}} + expands to \cmdinternal {cd:true} when the attribute matching \cmdinternal + {cd:node} and the name given as second argument matches the third argument + and to \cmdinternal {cd:false} otherwise +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmldoifelseempty}} + expands to \cmdinternal {cd:true} when the node matching \cmdinternal + {cd:lpath} at node \cmdinternal {cd:node} is empty and to \cmdinternal + {cd:false} otherwise +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmldoifelseselfempty}} + expands to \cmdinternal {cd:true} when the node is empty and to \cmdinternal + {cd:false} otherwise +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmldoifselfempty}} + expands to \cmdinternal {cd:true} when \cmdinternal {cd:node} is empty +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmldoifnotselfempty}} + expands to \cmdinternal {cd:true} when \cmdinternal {cd:node} is not empty +\stopxmlcmd + +\stopsection + +\startsection[title={initialization}] + +The general setup command (not to be confused with setups) that deals with the +\MKIV\ tree handler is \type {\setupxml}. There are currently only a few options. + +\cmdfullsetup{setupxml} + +When you set \type {default} to \cmdinternal {cd:text} elements with no setup +assigned will end up as text. When set to \type {hidden} such elements will be +hidden. You can apply the default yourself using: + +\startxmlcmd {\cmdbasicsetup{xmldefaulttotext}} + presets the tree with root \cmdinternal {cd:node} to the handlers set up with + \type {\setupxml} option \cmdinternal{default} +\stopxmlcmd + +You can set \type {compress} to \type {yes} in which case comment is stripped +from the tree when the file is read. + +\startxmlcmd {\cmdbasicsetup{xmlregisterns}} + associates an internal namespace (like \type {mml}) with one given in the + document as \URL\ (like mathml) +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlremapname}} + changes the namespace and tag of the matching elements +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlremapnamespace}} + replaces all references to the given namespace to a new one (applied + recursively) +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlchecknamespace}} + sets the namespace of the matching elements unless a namespace is already set +\stopxmlcmd + +\stopsection + +\startsection[title={helpers}] + +Often an attribute will determine the rendering and this may result in many +tests. Especially when we have multiple attributes that control the output such +tests can become rather extensive and redundant because one gets $n\times m$ or +more such tests. + +Therefore we have a convenient way to map attributes onto for instance strings or +commands. + +\startxmlcmd {\cmdbasicsetup{xmlmapvalue}} + associate a \cmdinternal {cd:text} with a \cmdinternal {cd:category} and + \cmdinternal {cd:name} (alias: \type{\xmlmapval}) +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlvalue}} + expand the value associated with a \cmdinternal {cd:category} and + \cmdinternal {cd:name} and if not resolved, expand to the \cmdinternal + {cd:text} (alias: \type{\xmlval}) +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmldoifelsevalue}} + associate a \cmdinternal {cd:text} with a \cmdinternal {cd:category} and + \cmdinternal {cd:name} +\stopxmlcmd + +This is used as follows. We define a couple of mappings in the same category: + +\starttyping +\xmlmapvalue{emph}{bold} {\bf} +\xmlmapvalue{emph}{italic}{\it} +\stoptyping + +Assuming that we have associated the following setup with the \type {emph} +element, we can say (with \type {#1} being the current element): + +\starttyping +\startxmlsetups demo:emph + \begingroup + \xmlvalue{emph}{\xmlatt{#1}{type}}{} + \endgroup +\stopxmlsetups +\stoptyping + +In this case we have no default. The \type {type} attribute triggers the actions, +as in: + +\starttyping +normal <emph type='bold'>bold</emph> normal +\stoptyping + +This mechanism is not really bound to elements and attributes so you can use this +mechanism for other purposes as well. + +\stopsection + +\startsection[title={Parameters}] + +\startbuffer[test] +<something whatever="alpha"> + <what> + beta + </what> +</something> +\stopbuffer + +\startbuffer +\startxmlsetups xml:mysetups + \xmlsetsetup{\xmldocument}{*}{xml:*} +\stopxmlsetups + +\xmlregistersetup{xml:mysetups} + +\startxmlsetups xml:something + parameter : \xmlpar {#1}{whatever}\par + attribute : \xmlatt {#1}{whatever}\par + text : \xmlfirst {#1}{what} \par + \xmlsetpar{#1}{whatever}{gamma} + parameter : \xmlpar {#1}{whatever}\par + \xmlflush{#1} +\stopxmlsetups + +\startxmlsetups xml:what + what: \xmlflush{#1}\par + parameter : \xmlparam{#1}{..}{whatever}\par +\stopxmlsetups + +\xmlprocessbuffer{main}{test}{} +\stopbuffer + +Say that we have this \XML\ blob: + +\typebuffer[test] + +With: + +\typebuffer + +we get: + +\getbuffer + +Parameters are stored with a node. + +\startxmlcmd {\cmdbasicsetup{xmlpar}} + returns the value of parameter \cmdinternal {cd:name} or empty if no such + parameter exists +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlparam}} + finds a first match for \cmdinternal {cd:lpath} at \cmdinternal {cd:node} and + returns the value of parameter \cmdinternal {cd:name} or empty if no such + parameter exists +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmllastpar}} + returns the last parameter found (this avoids a lookup) +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlsetpar}} + set the value of parameter \cmdinternal {cd:name} +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlsetparam}} + set the value of parameter \cmdinternal {cd:name} for each match of \cmdinternal + {cd:lpath} +\stopxmlcmd + +\stopsection + +\stopchapter + +\stopcomponent diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/xml/xml-mkiv-contents.tex b/Master/texmf-dist/doc/context/sources/general/manuals/xml/xml-mkiv-contents.tex new file mode 100644 index 00000000000..e0787ec5fe9 --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/xml/xml-mkiv-contents.tex @@ -0,0 +1,12 @@ +\environment xml-mkiv-style + +\startcomponent xml-mkiv-contents + +\starttitle[title=Contents] + +\placelist + [chapter,section] + +\stoptitle + +\stopcomponent diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/xml/xml-mkiv-converter.tex b/Master/texmf-dist/doc/context/sources/general/manuals/xml/xml-mkiv-converter.tex new file mode 100644 index 00000000000..a457f962be8 --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/xml/xml-mkiv-converter.tex @@ -0,0 +1,300 @@ +\environment xml-mkiv-style + +\startcomponent xml-mkiv-converter + +\startchapter[title={Setting up a converter}] + +\startsection[title={from structure to setup}] + +We use a very simple document structure for demonstrating how a converter is +defined. In practice a mapping will be more complex, especially when we have a +style with complex chapter openings using data coming from all kind of places, +different styling of sections with the same name, selectively (out of order) +flushed content, special formatting, etc. + +\typefile{manual-demo-1.xml} + +Say that this document is stored in the file \type {demo.xml}, then the following +code can be used as starting point: + +\starttyping +\startxmlsetups xml:demo:base + \xmlsetsetup{#1}{document|section|p}{xml:demo:*} +\stopxmlsetups + +\xmlregisterdocumentsetup{demo}{xml:demo:base} + +\startxmlsetups xml:demo:document + \starttitle[title={Contents}] + \placelist[chapter] + \stoptitle + \xmlflush{#1} +\stopxmlsetups + +\startxmlsetups xml:demo:section + \startchapter[title=\xmlfirst{#1}{/title}] + \xmlfirst{#1}{/content} + \stopchapter +\stopxmlsetups + +\startxmlsetups xml:demo:p + \xmlflush{#1}\endgraf +\stopxmlsetups + +\xmlprocessfile{demo}{demo.xml}{} +\stoptyping + +Watch out! These are not just setups, but specific \XML\ setups which get an +argument passed (the \type {#1}). If for some reason your \XML\ processing fails, +it might be that you mistakenly have used a normal setup definition. The argument +\type {#1} represents the current node (element) and is a unique identifier. For +instance a \type {<p>..</p>} can have an identifier {demo::5}. So, we can get +something: + +\starttyping +\xmlflush{demo::5}\endgraf +\stoptyping + +but as well: + +\starttyping +\xmlflush{demo::6}\endgraf +\stoptyping + +Keep in mind that the references tor the actual nodes (elements) are +abstractions, you never see those \type {<id>::<number>}'s, because we will use +either the abstract \type {#1} (any node) or an explicit reference like \type +{demo}. The previous setup when issued will be like: + +\starttyping +\startchapter[title=\xmlfirst{demo::3}{/title}] + \xmlfirst{demo::4}{/content} +\stopchapter +\stoptyping + +Here the \type {title} is used to typeset the chapter title but also for an entry +in the table of contents. At the moment the title is typeset the \XML\ node gets +looked up and expanded in real text. However, for the list it gets stored for +later use. One can argue that this is not needed for \XML, because one can just +filter all the titles and use page references, but then one also looses the +control one normally has over such titles. For instance it can be that some +titles are rendered differently and for that we need to keep track of usage. +Doing that with transformations or filtering is often more complex than leaving +that to \TEX. As soon as the list gets typeset, the reference (\type {demo::#3}) +is used for the lookup. This is because by default the title is stored as given. +So, as long as we make sure the \XML\ source is loaded before the table of +contents is typeset we're ok. Later we will look into this in more detail, for +now it's enough to know that in most cases the abstract \type {#1} reference will +work out ok. + +Contrary to the style definitions this interface looks rather low level (with no +optional arguments) and the main reason for this is that we want processing to be +fast. So, the basic framework is: + +\starttyping +\startxmlsetups xml:demo:base + % associate setups with elements +\stopxmlsetups + +\xmlregisterdocumentsetup{demo}{xml:demo:base} + +% define setups for matches + +\xmlprocessfile{demo}{demo.xml}{} +\stoptyping + +In this example we mostly just flush the content of an element and in the case of +a section we flush explicit child elements. The \type {#1} in the example code +represents the current element. The line: + +\starttyping +\xmlsetsetup{demo}{*}{-} +\stoptyping + +sets the default for each element to \quote {just ignore it}. A \type {+} would +make the default to always flush the content. This means that at this point we +only handle: + +\starttyping +<section> + <title>Some title</title> + <content> + <p>a paragraph of text</p> + </content> +</section> +\stoptyping + +In the next section we will deal with the slightly more complex itemize and +figure placement. At first sight all these setups may look overkill but keep in +mind that normally the number of elements is rather limited. The complexity is +often in the style and having access to each snippet of content is actually +quite handy for that. + +\stopsection + +\startsection[title={alternative solutions}] + +Dealing with an itemize is rather simple (as long as we forget about +attributes that control the behaviour): + +\starttyping +<itemize> + <item>first</item> + <item>second</item> +</itemize> +\stoptyping + +First we need to add \type {itemize} to the setup assignment (unless we've used +the wildcard \type {*}): + +\starttyping +\xmlsetsetup{demo}{document|section|p|itemize}{xml:demo:*} +\stoptyping + +The setup can look like: + +\starttyping +\startxmlsetups xml:demo:itemize + \startitemize + \xmlfilter{#1}{/item/command(xml:demo:itemize:item)} + \stopitemize +\stopxmlsetups + +\startxmlsetups xml:demo:itemize:item + \startitem + \xmlflush{#1} + \stopitem +\stopxmlsetups +\stoptyping + +An alternative is to map item directly: + +\starttyping +\xmlsetsetup{demo}{document|section|p|itemize|item}{xml:demo:*} +\stoptyping + +and use: + +\starttyping +\startxmlsetups xml:demo:itemize + \startitemize + \xmlflush{#1} + \stopitemize +\stopxmlsetups + +\startxmlsetups xml:demo:item + \startitem + \xmlflush{#1} + \stopitem +\stopxmlsetups +\stoptyping + +Sometimes, a more local solution using filters and \type {/command(...)} makes more +sense, especially when the \type {item} tag is used for other purposes as well. + +Explicit flushing with \type {command} is definitely the way to go when you have +complex products. In one of our projects we compose math school books from many +thousands of small \XML\ files, and from one source set several products are +typeset. Within a book sections get done differently, content gets used, ignored +or interpreted differently depending on the kind of content, so there is a +constant checking of attributes that drive the rendering. In that a generic setup +for a title element makes less sense than explicit ones for each case. (We're +talking of huge amounts of files here, including multiple images on each rendered +page.) + +When using \type {command} you can pass two arguments, the first is the setup for +the match, the second one for the miss, as in: + +\starttyping +\xmlfilter{#1}{/element/command(xml:true,xml:false)} +\stoptyping + +Back to the example, this leaves us with dealing with the resources, like +figures: + +\starttyping +<resource type='figure'> + <caption>A picture of a cow.</caption> + <content><external file="cow.pdf"/></content> +</resource> +\stoptyping + +Here we can use a more restricted match: + +\starttyping +\xmlsetsetup{demo}{resource[@type='figure']}{xml:demo:figure} +\xmlsetsetup{demo}{external}{xml:demo:*} +\stoptyping + +and the definitions: + +\starttyping +\startxmlsetups xml:demo:figure + \placefigure + {\xmlfirst{#1}{/caption}} + {\xmlfirst{#1}{/content}} +\stopxmlsetups + +\startxmlsetups xml:demo:external + \externalfigure[\xmlatt{#1}{file}] +\stopxmlsetups +\stoptyping + +At this point it is good to notice that \type {\xmlatt{#1}{file}} is passed as it +is: a macro call. This means that when a macro like \type {\externalfigure} uses +the first argument frequently without first storing its value, the lookup is done +several times. A solution for this is: + +\starttyping +\startxmlsetups xml:demo:external + \expanded{\externalfigure[\xmlatt{#1}{file}]} +\stopxmlsetups +\stoptyping + +Because the lookup is rather fast, normally there is no need to bother about this +too much because internally \CONTEXT\ already makes sure such expansion happens +only once. + +An alternative definition for placement is the following: + +\starttyping +\xmlsetsetup{demo}{resource}{xml:demo:resource} +\stoptyping + +with: + +\starttyping +\startxmlsetups xml:demo:resource + \placefloat + [\xmlatt{#1}{type}] + {\xmlfirst{#1}{/caption}} + {\xmlfirst{#1}{/content}} +\stopxmlsetups +\stoptyping + +This way you can specify \type {table} as type too. Because you can define your +own float types, more complex variants are also possible. In that case it makes +sense to provide some default behaviour too: + +\starttyping +\definefloat[figure-here][figure][default=here] +\definefloat[figure-left][figure][default=left] +\definefloat[table-here] [table] [default=here] +\definefloat[table-left] [table] [default=left] + +\startxmlsetups xml:demo:resource + \placefloat + [\xmlattdef{#1}{type}{figure}-\xmlattdef{#1}{location}{here}] + {\xmlfirst{#1}{/caption}} + {\xmlfirst{#1}{/content}} +\stopxmlsetups +\stoptyping + +In this example we support two types and two locations. We default to a figure +placed (when possible) at the current location. + +\stopsection + +\stopchapter + +\stopcomponent diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/xml/xml-mkiv-examples.tex b/Master/texmf-dist/doc/context/sources/general/manuals/xml/xml-mkiv-examples.tex new file mode 100644 index 00000000000..064510d6dd1 --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/xml/xml-mkiv-examples.tex @@ -0,0 +1,948 @@ +\environment xml-mkiv-style + +\startcomponent xml-mkiv-examples + +\startchapter[title=Examples] + +\startsection[title=attribute chains] + +In \CSS, when an attribute is not present, the parent element is checked, and when +not found again, the lookup follows the chain till a match is found or the root is +reached. The following example demonstrates how such a chain lookup works. + +\startbuffer[test] +<something mine="1" test="one" more="alpha"> + <whatever mine="2" test="two"> + <whocares mine="3"> + <!-- this is a test --> + </whocares> + </whatever> +</something> +\stopbuffer + +\typebuffer[test] + +We apply the following setups to this tree: + +\startbuffer[setups] +\startxmlsetups xml:common + [ + \xmlchainatt{#1}{mine}, + \xmlchainatt{#1}{test}, + \xmlchainatt{#1}{more}, + \xmlchainatt{#1}{none} + ]\par +\stopxmlsetups + +\startxmlsetups xml:something + something: \xmlsetup{#1}{xml:common} + \xmlflush{#1} +\stopxmlsetups + +\startxmlsetups xml:whatever + whatever: \xmlsetup{#1}{xml:common} + \xmlflush{#1} +\stopxmlsetups + +\startxmlsetups xml:whocares + whocares: \xmlsetup{#1}{xml:common} + \xmlflush{#1} +\stopxmlsetups + +\startxmlsetups xml:mysetups + \xmlsetsetup{#1}{something|whatever|whocares}{xml:*} +\stopxmlsetups + +\xmlregisterdocumentsetup{example-1}{xml:mysetups} + +\xmlprocessbuffer{example-1}{test}{} +\stopbuffer + +\typebuffer[setups] + +This gives: + +\start + \getbuffer[setups] +\stop + +\stopsection + +\startsection[title=conditional setups] + +Say that we have this code: + +\starttyping +\xmldoifelse {#1} {/what[@a='1']} { + \xmlfilter {#1} {/what/command('xml:yes')} +} { + \xmlfilter {#1} {/what/command('xml:nop')} +} +\stoptyping + +Here we first determine if there is a child \type {what} with attribute \type {a} +set to \type {1}. Depending on the outcome again we check the child nodes for +being named \type {what}. A faster solution which also takes less code is this: + +\starttyping +\xmlfilter {#1} {/what[@a='1']/command('xml:yes','xml:nop')} +\stoptyping + +\stopsection + +\startsection[title=manipulating] + +Assume that we have the following \XML\ data: + +\startbuffer[test] +<A> + <B>right</B> + <B>wrong</B> +</A> +\stopbuffer + +\typebuffer[test] + +But, instead of \type {right} we want to see \type {okay}. We can do that with a +finalizer: + +\startbuffer +\startluacode +local rehash = { + ["right"] = "okay", +} + +function xml.finalizers.tex.Okayed(collected,what) + for i=1,#collected do + if what == "all" then + local str = xml.text(collected[i]) + context(rehash[str] or str) + else + context(str) + end + end +end +\stopluacode +\stopbuffer + +\typebuffer \getbuffer + +\startbuffer +\startxmlsetups xml:A + \xmlflush{#1} +\stopxmlsetups + +\startxmlsetups xml:B + (It's \xmlfilter{#1}{./Okayed("all")}) +\stopxmlsetups + +\startxmlsetups xml:testsetups + \xmlsetsetup{#1}{A|B}{xml:*} +\stopxmlsetups + +\xmlregisterdocumentsetup{example-2}{xml:testsetups} +\xmlprocessbuffer{example-2}{test}{} +\stopbuffer + +\typebuffer + +The result is: \start \inlinebuffer \stop + +\stopsection + +\startsection[title=cross referencing] + +A rather common way to add cross references to \XML\ files is to borrow the +asymmetrical id's from \HTML. This means that one cannot simply use a value +of (say) \type {href} to locate an \type {id}. The next example came up on +the \CONTEXT\ mailing list. + +\startbuffer[test] +<doc> + <p>Text + <a href="#fn1" class="footnoteref" id="fnref1"><sup>1</sup></a> and + <a href="#fn2" class="footnoteref" id="fnref2"><sup>2</sup></a> + </p> + <div class="footnotes"> + <hr /> + <ol> + <li id="fn1"><p>A footnote.<a href="#fnref1">↩</a></p></li> + <li id="fn2"><p>A second footnote.<a href="#fnref2">↩</a></p></li> + </ol> + </div> +</doc> +\stopbuffer + +\typebuffer[test] + +We give two variants for dealing with such references. The first solution does +lookups and depending on the size of the file can be somewhat inefficient. + +\startbuffer +\startxmlsetups xml:doc + \blank + \xmlflush{#1} + \blank +\stopxmlsetups + +\startxmlsetups xml:p + \xmlflush{#1} +\stopxmlsetups + +\startxmlsetups xml:footnote + (variant 1)\footnote + {\xmlfirst + {example-3-1} + {div[@class='footnotes']/ol/li[@id='\xmlrefatt{#1}{href}']}} +\stopxmlsetups + +\startxmlsetups xml:initialize + \xmlsetsetup{#1}{p|doc}{xml:*} + \xmlsetsetup{#1}{a[@class='footnoteref']}{xml:footnote} + \xmlsetsetup{#1}{div[@class='footnotes']}{xml:nothing} +\stopxmlsetups + +\xmlresetdocumentsetups{*} +\xmlregisterdocumentsetup{example-3-1}{xml:initialize} + +\xmlprocessbuffer{example-3-1}{test}{} +\stopbuffer + +\typebuffer + +This will typeset two footnotes. + +\getbuffer + +The second variant collects the references so that the time spend on lookups is +less. + +\startbuffer +\startxmlsetups xml:doc + \blank + \xmlflush{#1} + \blank +\stopxmlsetups + +\startxmlsetups xml:p + \xmlflush{#1} +\stopxmlsetups + +\startluacode + userdata.notes = {} +\stopluacode + +\startxmlsetups xml:collectnotes + \ctxlua{userdata.notes['\xmlrefatt{#1}{id}'] = '#1'} +\stopxmlsetups + +\startxmlsetups xml:footnote + (variant 2)\footnote + {\xmlflush + {\cldcontext{userdata.notes['\xmlrefatt{#1}{href}']}}} +\stopxmlsetups + +\startxmlsetups xml:initialize + \xmlsetsetup{#1}{p|doc}{xml:*} + \xmlsetsetup{#1}{a[@class='footnoteref']}{xml:footnote} + \xmlfilter{#1}{div[@class='footnotes']/ol/li/command(xml:collectnotes)} + \xmlsetsetup{#1}{div[@class='footnotes']}{} +\stopxmlsetups + +\xmlregisterdocumentsetup{example-3-2}{xml:initialize} + +\xmlprocessbuffer{example-3-2}{test}{} +\stopbuffer + +\typebuffer + +This will again typeset two footnotes: + +\getbuffer + +\stopsection + +\startsection[title=mapping values] + +One way to process options \type {frame} in the example below is to map the +values to values known by \CONTEXT. + +\startbuffer[test] +<a> + <nattable frame="on"> + <tr><td>#1</td><td>#2</td><td>#3</td><td>#4</td></tr> + <tr><td>#5</td><td>#6</td><td>#7</td><td>#8</td></tr> + </nattable> + <nattable frame="off"> + <tr><td>#1</td><td>#2</td><td>#3</td><td>#4</td></tr> + <tr><td>#5</td><td>#6</td><td>#7</td><td>#8</td></tr> + </nattable> + <nattable frame="no"> + <tr><td>#1</td><td>#2</td><td>#3</td><td>#4</td></tr> + <tr><td>#5</td><td>#6</td><td>#7</td><td>#8</td></tr> + </nattable> +</a> +\stopbuffer + +\typebuffer[test] + +\startbuffer +\startxmlsetups xml:a + \xmlflush{#1} +\stopxmlsetups + +\xmlmapvalue {nattable:frame} {on} {on} +\xmlmapvalue {nattable:frame} {yes} {on} +\xmlmapvalue {nattable:frame} {off} {off} +\xmlmapvalue {nattable:frame} {no} {off} + +\startxmlsetups xml:nattable + \startplacetable[title=#1] + \setupTABLE[frame=\xmlval{nattable:frame}{\xmlatt{#1}{frame}}{on}]% + \bTABLE + \xmlflush{#1} + \eTABLE + \stopplacetable +\stopxmlsetups + +\startxmlsetups xml:tr + \bTR + \xmlflush{#1} + \eTR +\stopxmlsetups + +\startxmlsetups xml:td + \bTD + \xmlflush{#1} + \eTD +\stopxmlsetups + +\startxmlsetups xml:testsetups + \xmlsetsetup{example-4}{a|nattable|tr|td|}{xml:*} +\stopxmlsetups + +\xmlregisterdocumentsetup{example-4}{xml:testsetups} + +\xmlprocessbuffer{example-4}{test}{} +\stopbuffer + +The \type {\xmlmapvalue} mechanism is rather efficient and involves a minimum +of testing. + +\typebuffer + +We get: + +\getbuffer + +\stopsection + +\startsection[title=using \LUA] + +In this example we demonstrate how you can delegate rendering to \LUA. We +will construct a so called extreme table. The input is: + +\startbuffer[demo] +<?xml version="1.0" encoding="utf-8"?> + +<a> + <b> <c>1</c> <d>Text</d> </b> + <b> <c>2</c> <d>More text</d> </b> + <b> <c>2</c> <d>Even more text</d> </b> + <b> <c>2</c> <d>And more</d> </b> + <b> <c>3</c> <d>And even more</d> </b> + <b> <c>2</c> <d>The last text</d> </b> +</a> +\stopbuffer + +\typebuffer[demo] + +The processor code is: + +\startbuffer[process] +\startxmlsetups xml:test_setups + \xmlsetsetup{#1}{a|b|c|d}{xml:*} +\stopxmlsetups + +\xmlregisterdocumentsetup{example-5}{xml:test_setups} + +\xmlprocessbuffer{example-5}{demo}{} +\stopbuffer + +\typebuffer + +We color a sequence of the same titles (numbers here) differently. The first +solution remembers the last title: + +\startbuffer +\startxmlsetups xml:a + \startembeddedxtable + \xmlflush{#1} + \stopembeddedxtable +\stopxmlsetups + +\startxmlsetups xml:b + \xmlfunction{#1}{test_ba} +\stopxmlsetups + +\startluacode +local lasttitle = nil + +function xml.functions.test_ba(t) + local title = xml.text(t, "/c") + local content = xml.text(t, "/d") + context.startxrow() + context.startxcell { + background = "color", + backgroundcolor = lasttitle == title and "colorone" or "colortwo", + foregroundstyle = "bold", + foregroundcolor = "white", + } + context(title) + lasttitle = title + context.stopxcell() + context.startxcell() + context(content) + context.stopxcell() + context.stopxrow() +end +\stopluacode +\stopbuffer + +\typebuffer \getbuffer + +The \type {embeddedxtable} environment is needed because the table is picked up +as argument. + +\startlinecorrection \getbuffer[process] \stoplinecorrection + +The second implemetation remembers what titles are already processed so here we +can color the last one too. + +\startbuffer +\startxmlsetups xml:a + \ctxlua{xml.functions.reset_bb()} + \startembeddedxtable + \xmlflush{#1} + \stopembeddedxtable +\stopxmlsetups + +\startxmlsetups xml:b + \xmlfunction{#1}{test_bb} +\stopxmlsetups + +\startluacode +local titles + +function xml.functions.reset_bb(t) + titles = { } +end + +function xml.functions.test_bb(t) + local title = xml.text(t, "/c") + local content = xml.text(t, "/d") + context.startxrow() + context.startxcell { + background = "color", + backgroundcolor = titles[title] and "colorone" or "colortwo", + foregroundstyle = "bold", + foregroundcolor = "white", + } + context(title) + titles[title] = true + context.stopxcell() + context.startxcell() + context(content) + context.stopxcell() + context.stopxrow() +end +\stopluacode +\stopbuffer + +\typebuffer \getbuffer + +\startlinecorrection \getbuffer[process] \stoplinecorrection + +A solution without any state variable is given below. + +\startbuffer +\startxmlsetups xml:a + \startembeddedxtable + \xmlflush{#1} + \stopembeddedxtable +\stopxmlsetups + +\startxmlsetups xml:b + \xmlfunction{#1}{test_bc} +\stopxmlsetups + +\startluacode +function xml.functions.test_bc(t) + local title = xml.text(t, "/c") + local content = xml.text(t, "/d") + context.startxrow() + local okay = xml.text(t,"./preceding-sibling::/[-1]") == title + context.startxcell { + background = "color", + backgroundcolor = okay and "colorone" or "colortwo", + foregroundstyle = "bold", + foregroundcolor = "white", + } + context(title) + context.stopxcell() + context.startxcell() + context(content) + context.stopxcell() + context.stopxrow() +end +\stopluacode +\stopbuffer + +\typebuffer \getbuffer + +\startlinecorrection \getbuffer[process] \stoplinecorrection + +Here is a solution that delegates even more to \LUA. The previous variants were +actually not that safe with repect to special characters and didn't handle +nested elements either but the next one does. + +\startbuffer[demo] +<?xml version="1.0" encoding="utf-8"?> + +<a> + <b> <c>#1</c> <d>Text</d> </b> + <b> <c>#2</c> <d>More text</d> </b> + <b> <c>#2</c> <d>Even more text</d> </b> + <b> <c>#2</c> <d>And more</d> </b> + <b> <c>#3</c> <d>And even more</d> </b> + <b> <c>#2</c> <d>Something <i>nested</i> </d> </b> +</a> +\stopbuffer + +\typebuffer[demo] + +We also need to map the \type {i} element. + +\startbuffer +\startxmlsetups xml:a + \starttexcode + \xmlfunction{#1}{test_a} + \stoptexcode +\stopxmlsetups + +\startxmlsetups xml:c + \xmlflush{#1} +\stopxmlsetups + +\startxmlsetups xml:d + \xmlflush{#1} +\stopxmlsetups + +\startxmlsetups xml:i + {\em\xmlflush{#1}} +\stopxmlsetups + +\startluacode +function xml.functions.test_a(t) + context.startxtable() + local previous = false + for b in xml.collected(lxml.getid(t),"/b") do + context.startxrow() + local current = xml.text(b,"/c") + context.startxcell { + background = "color", + backgroundcolor = (previous == current) and "colorone" or "colortwo", + foregroundstyle = "bold", + foregroundcolor = "white", + } + lxml.first(b,"/c") + context.stopxcell() + context.startxcell() + lxml.first(b,"/d") + context.stopxcell() + previous = current + context.stopxrow() + end + context.stopxtable() +end +\stopluacode + +\startxmlsetups xml:test_setups + \xmlsetsetup{#1}{a|b|c|d|i}{xml:*} +\stopxmlsetups + +\xmlregisterdocumentsetup{example-5}{xml:test_setups} + +\xmlprocessbuffer{example-5}{demo}{} +\stopbuffer + +\typebuffer + +\startlinecorrection \getbuffer \stoplinecorrection + +The question is, do we really need \LUA ? Often we don't, apart maybe from an +occasional special finalizer. A pure \TEX\ solution is given next: + +\startbuffer +\startxmlsetups xml:a + \glet\MyPreviousTitle\empty + \glet\MyCurrentTitle \empty + \startembeddedxtable + \xmlflush{#1} + \stopembeddedxtable +\stopxmlsetups + +\startxmlsetups xml:b + \startxrow + \xmlflush{#1} + \stopxrow +\stopxmlsetups + +\startxmlsetups xml:c + \xdef\MyCurrentTitle{\xmltext{#1}{.}} + \doifelse {\MyPreviousTitle} {\MyCurrentTitle} { + \startxcell + [background=color, + backgroundcolor=colorone, + foregroundstyle=bold, + foregroundcolor=white] + } { + \glet\MyPreviousTitle\MyCurrentTitle + \startxcell + [background=color, + backgroundcolor=colortwo, + foregroundstyle=bold, + foregroundcolor=white] + } + \xmlflush{#1} + \stopxcell +\stopxmlsetups + +\startxmlsetups xml:d + \startxcell + \xmlflush{#1} + \stopxcell +\stopxmlsetups + +\startxmlsetups xml:i + {\em\xmlflush{#1}} +\stopxmlsetups + +\startxmlsetups xml:test_setups + \xmlsetsetup{#1}{*}{xml:*} +\stopxmlsetups + +\xmlregisterdocumentsetup{example-5}{xml:test_setups} + +\xmlprocessbuffer{example-5}{demo}{} +\stopbuffer + +\typebuffer + +\startlinecorrection \getbuffer \stoplinecorrection + +You can even save a few lines of code: + +\starttyping +\startxmlsetups xml:c + \xdef\MyCurrentTitle{\xmltext{#1}{.}} + \startxcell + [background=color, + backgroundcolor=color\ifx\MyPreviousTitle\MyCurrentTitle one\else two\fi, + foregroundstyle=bold, + foregroundcolor=white] + \xmlflush{#1} + \stopxcell + \glet\MyPreviousTitle\MyCurrentTitle +\stopxmlsetups +\stoptyping + +Or if you prefer: + +\starttyping +\startxmlsetups xml:c + \xdef\MyCurrentTitle{\xmltext{#1}{.}} + \doifelse {\MyPreviousTitle} {\MyCurrentTitle} { + \xmlsetup{#1}{xml:c:one} + } { + \xmlsetup{#1}{xml:c:two} + } +\stopxmlsetups + +\startxmlsetups xml:c:one + \startxcell + [background=color, + backgroundcolor=colorone, + foregroundstyle=bold, + foregroundcolor=white] + \xmlflush{#1} + \stopxcell +\stopxmlsetups + +\startxmlsetups xml:c:two + \startxcell + [background=color, + backgroundcolor=colortwo, + foregroundstyle=bold, + foregroundcolor=white] + \xmlflush{#1} + \stopxcell + \global\let\MyPreviousTitle\MyCurrentTitle +\stopxmlsetups +\stoptyping + +These examples demonstrate that it doesn't hurt to know a little bit of \TEX\ +programming: defining macros and basic comparisons can come in handy. There are +examples in the test suite, you can peek in the source code, you can consult +the wiki or you can just ask on the list. + +\stopsection + +\startsection[title=last match] + +For the next example we use the following \XML\ input: + +\startbuffer[demo] +<?xml version "1.0"?> +<document> + <section id="1"> + <content> + <p>first</p> + <p>second</p> + </content> + </section> + <section id="2"> + <content> + <p>third</p> + <p>fourth</p> + </content> + </section> +</document> +\stopbuffer + +\typebuffer[demo] + +If you check if some element is present and then act accordingly, you can +end up with doing the same lookup twice. Although it might sound inefficient, +in practice it's often not measureable. + +\startbuffer +\startxmlsetups xml:demo:document + \type{\xmlall{#1}{/section[@id='2']/content/p}}\par + \xmldoif{#1}{/section[@id='2']/content/p} { + \xmlall{#1}{/section[@id='2']/content/p} + } + \type{\xmllastmatch}\par + \xmldoif{#1}{/section[@id='2']/content/p} { + \xmllastmatch + } + \type{\xmlall{#1}{last-match::}}\par + \xmldoif{#1}{/section[@id='2']/content/p} { + \xmlall{#1}{last-match::} + } + \type{\xmlfilter{#1}{last-match::/command(xml:demo:p)}}\par + \xmldoif{#1}{/section[@id='2']/content/p} { + \xmlfilter{#1}{last-match::/command(xml:demo:p)} + } +\stopxmlsetups + +\startxmlsetups xml:demo:p + \quad\xmlflush{#1}\endgraf +\stopxmlsetups + +\startxmlsetups xml:demo:base + \xmlsetsetup{#1}{document|p}{xml:demo:*} +\stopxmlsetups + +\xmlregisterdocumentsetup{example-6}{xml:demo:base} + +\xmlprocessbuffer{example-6}{demo}{} +\stopbuffer + +\typebuffer + +In the second check we just flush the last match, so effective we do an \type +{\xmlall} here. The third and fourth alternatives demonstrate how we can use +\type {last-match} as axis. The gain is 10\% or more on the lookup but of course +typesetting often takes relatively more time than the lookup. + +\startpacked +\getbuffer +\stoppacked + +\stopsection + +\startsection[title=Finalizers] + +The \XML\ parser is also available outside \TEX. Here is an example of its usage. +We pipe the result to \TEX\ but you can do with \type {t} whatever you like. + +\startbuffer +local x = xml.load("manual-demo-1.xml") +local t = { } + +for c in xml.collected(x,"//*") do + if not c.special and not t[c.tg] then + t[c.tg] = true + end +end + +context.tocontext(table.sortedkeys(t)) +\stopbuffer + +% \typebuffer + +This returns: + +\ctxluabuffer + +We can wrap this in a finalizer: + +\startbuffer +xml.finalizers.taglist = function(collected) + local t = { } + for i=1,#collected do + local c = collected[i] + if not c.special then + local tg = c.tg + if tg and not t[tg] then + t[tg] = true + end + end + end + return table.sortedkeys(t) +end +\stopbuffer + +\typebuffer + +Or in a more extensive one: + +\startbuffer +xml.finalizers.taglist = function(collected,parenttoo) + local t = { } + for i=1,#collected do + local c = collected[i] + if not c.special then + local tg = c.tg + if tg and not t[tg] then + t[tg] = true + end + if parenttoo then + local p = c.__p__ + if p and not p.special then + local tg = p.tg .. ":" .. tg + if tg and not t[tg] then + t[tg] = true + end + end + end + end + end + return table.sortedkeys(t) +end +\stopbuffer + +\typebuffer \ctxluabuffer + +Usage is as follows: + +\startbuffer +local x = xml.load("manual-demo-1.xml") +local t = xml.applylpath(x,"//*/taglist()") + +context.tocontext(t) +\stopbuffer + +\typebuffer + +And indeed we get: + +\ctxluabuffer + +But we can also say: + +\startbuffer +local x = xml.load("manual-demo-1.xml") +local t = xml.applylpath(x,"//*/taglist(true)") + +context.tocontext(t) +\stopbuffer + +\typebuffer + +Now we get: + +\ctxluabuffer + +\stopsection + +\startsection[title=Pure xml] + +One might wonder how a \TEX\ macro package would look like when backslashes, +dollars and percent signs would have no special meaning. In fact, it would be +rather useless as interpreting commands are triggered by such characters. Any +formatting or coding system needs such characters. Take \XML: angle brackets and +ampersands are really special. So, no matter what system we use, we do have to +deal with the (common) case where these characters need to be seen as they are. +Normally escaping is the solution. + +The \CONTEXT\ interface for \XML\ suffers from this as well. You really don't +want to know how many tricks are used for dealing with special characters and +entities: there are several ways these travel through the system and it is +possible to adapt and cheat. Especially roundtripped data (via tuc file) puts +some demands on the system because when ts \XML\ can become \TEX\ and vise versa. +The next example (derived from a mail on the list) demonstrates this: + +\starttyping +\startbuffer[demo] +<doc> + <pre><code>\ConTeXt\ is great</code></pre> + + <pre><code>but you need to know some tricks</code></pre> +</doc> +\stopbuffer + +\startxmlsetups xml:initialize + \xmlsetsetup{#1}{doc|p|code}{xml:*} + \xmlsetsetup{#1}{pre/code}{xml:pre:code} +\stopxmlsetups + +\xmlregistersetup{xml:initialize} + +\startxmlsetups xml:doc + \xmlflush{#1} +\stopxmlsetups + +\startxmlsetups xml:pre:code + no solution + \comment[symbol=Key, location=inmargin,color=yellow]{\xmlflush{#1}} + \par + solution one \begingroup + \expandUx + \comment[symbol=Key, location=inmargin,color=yellow]{\xmlflush{#1}} + \endgroup + \par + solution two + \comment[symbol=Key, location=inmargin,color=yellow]{\xmlpure{#1}} + \par + \xmlprettyprint{#1}{tex} +\stopxmlsetups + +\xmlprocessbuffer{main}{demo}{} +\stoptyping + +The first comment (an interactive feature of \PDF\ comes out as: + +\starttyping +\Ux {5C}ConTeXt\Ux {5C} is great +\stoptyping + +The second and third comment are okay. It's one of the reasons why we have \type +{\xmlpure}. + +\stopsection + +\stopchapter + +\stopcomponent diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/xml/xml-mkiv-expressions.tex b/Master/texmf-dist/doc/context/sources/general/manuals/xml/xml-mkiv-expressions.tex new file mode 100644 index 00000000000..0c126f2f89d --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/xml/xml-mkiv-expressions.tex @@ -0,0 +1,645 @@ +\environment xml-mkiv-style + +\startcomponent xml-mkiv-expressions + +\startchapter[title={Expressions and filters}] + +\startsection[title={path expressions}] + +In the previous chapters we used \cmdinternal {cd:lpath} expressions, which are a variant +on \type {xpath} expressions as in \XSLT\ but in this case more geared towards +usage in \TEX. This mechanisms will be extended when demands are there. + +A path is a sequence of matches. A simple path expression is: + +\starttyping +a/b/c/d +\stoptyping + +Here each \type {/} goes one level deeper. We can go backwards in a lookup with +\type {..}: + +\starttyping +a/b/../d +\stoptyping + +We can also combine lookups, as in: + +\starttyping +a/(b|c)/d +\stoptyping + +A negated lookup is preceded by a \type {!}: + +\starttyping +a/(b|c)/!d +\stoptyping + +A wildcard is specified with a \type {*}: + +\starttyping +a/(b|c)/!d/e/*/f +\stoptyping + +In addition to these tag based lookups we can use attributes: + +\starttyping +a/(b|c)/!d/e/*/f[@type=whatever] +\stoptyping + +An \type {@} as first character means that we are dealing with an attribute. +Within the square brackets there can be boolean expressions: + +\starttyping +a/(b|c)/!d/e/*/f[@type=whatever and @id>100] +\stoptyping + +You can use functions as in: + +\starttyping +a/(b|c)/!d/e/*/f[something(text()) == "oeps"] +\stoptyping + +There are a couple of predefined functions: + +\starttabulate[|l|l|p|] +\NC \type{rootposition} \type{order} \NC number \NC the index of the matched root element (kind of special) \NC \NR +\NC \type{position} \NC number \NC the current index of the matched element in the match list \NC \NR +\NC \type{match} \NC number \NC the current index of the matched element sub list with the same parent \NC \NR +\NC \type{first} \NC number \NC \NC \NR +\NC \type{last} \NC number \NC \NC \NR +\NC \type{index} \NC number \NC the current index of the matched element in its parent list \NC \NR +\NC \type{firstindex} \NC number \NC \NC \NR +\NC \type{lastindex} \NC number \NC \NC \NR +\NC \type{element} \NC number \NC the element's index \NC \NR +\NC \type{firstelement} \NC number \NC \NC \NR +\NC \type{lastelement} \NC number \NC \NC \NR +\NC \type{text} \NC string \NC the textual representation of the matched element \NC \NR +\NC \type{content} \NC table \NC the node of the matched element \NC \NR +\NC \type{name} \NC string \NC the full name of the matched element: namespace and tag \NC \NR +\NC \type{namespace} \type{ns} \NC string \NC the namespace of the matched element \NC \NR +\NC \type{tag} \NC string \NC the tag of the matched element \NC \NR +\NC \type{attribute} \NC string \NC the value of the attribute with the given name of the matched element \NC \NR +\stoptabulate + +There are fundamental differences between \type {position}, \type {match} and +\type {index}. Each step results in a new list of matches. The \type {position} +is the index in this new (possibly intermediate) list. The \type {match} is also +an index in this list but related to the specific match of element names. The +\type {index} refers to the location in the parent element. + +Say that we have: + +\starttyping +<collection> + <resources> + <manual> + <screen>.1.</screen> + <paper>.1.</paper> + </manual> + <manual> + <paper>.2.</paper> + <screen>.2.</screen> + </manual> + <resources> + <resources> + <manual> + <screen>.3.</screen> + <paper>.3.</paper> + </manual> + <resources> +<collection> +\stoptyping + +The following then applies: + +\starttabulate[|l|l|] +\NC \type {collection/resources/manual[position()==1]/paper} \NC \type{.1.} \NC \NR +\NC \type {collection/resources/manual[match()==1]/paper} \NC \type{.1.} \type{.3.} \NC \NR +\NC \type {collection/resources/manual/paper[index()==1]} \NC \type{.2.} \NC \NR +\stoptabulate + +In most cases the \type {position} test is more restrictive than the \type +{match} test. + +You can pass your own functions too. Such functions are defined in the \type +{xml.expressions} namespace. We have defined a few shortcuts: + +\starttabulate[|l|l|] +\NC \type {find(str,pattern)} \NC \type{string.find} \NC \NR +\NC \type {contains(str)} \NC \type{string.find} \NC \NR +\NC \type {oneof(str,...)} \NC is \type{str} in list \NC \NR +\NC \type {upper(str)} \NC \type{characters.upper} \NC \NR +\NC \type {lower(str)} \NC \type{characters.lower} \NC \NR +\NC \type {number(str)} \NC \type{tonumber} \NC \NR +\NC \type {boolean(str)} \NC \type{toboolean} \NC \NR +\NC \type {idstring(str)} \NC removes leading hash \NC \NR +\NC \type {name(index)} \NC full tag name \NC \NR +\NC \type {tag(index)} \NC tag name \NC \NR +\NC \type {namespace(index)} \NC namespace of tag \NC \NR +\NC \type {text(index)} \NC content \NC \NR +\NC \type {error(str)} \NC quit and show error \NC \NR +\NC \type {quit()} \NC quit \NC \NR +\NC \type {print()} \NC print message \NC \NR +\NC \type {count(pattern)} \NC number of matches \NC \NR +\NC \type {child(pattern)} \NC take child that matches \NC \NR +\stoptabulate + + +You can also use normal \LUA\ functions as long as you make sure that you pass +the right arguments. There are a few predefined variables available inside such +functions. + +\starttabulate[|Tl|l|p|] +\NC \type{list} \NC table \NC the list of matches \NC \NR +\NC \type{l} \NC number \NC the current index in the list of matches \NC \NR +\NC \type{ll} \NC element \NC the current element that matched \NC \NR +\NC \type{order} \NC number \NC the position of the root of the path \NC \NR +\stoptabulate + +The given expression between \type {[]} is converted to a \LUA\ expression so you +can use the usual operators: + +\starttyping +== ~= <= >= < > not and or () +\stoptyping + +In addition, \type {=} equals \type {==} and \type {!=} is the same as \type +{~=}. If you mess up the expression, you quite likely get a \LUA\ error message. + +\stopsection + +\startsection[title={css selectors}] + +\startbuffer[selector-001] +<?xml version="1.0" ?> + +<a> + <b class="one">b.one</b> + <b class="two">b.two</b> + <b class="one two">b.one.two</b> + <b class="three">b.three</b> + <b id="first">b#first</b> + <c>c</c> + <d>d e</d> + <e>d e</e> + <e>d e e</e> + <d>d f</d> + <f foo="bar">@foo = bar</f> + <f bar="foo">@bar = foo</f> + <f bar="foo1">@bar = foo1</f> + <f bar="foo2">@bar = foo2</f> + <f bar="foo3">@bar = foo3</f> + <f bar="foo+4">@bar = foo+4</f> + <g>g</g> + <g><gg><d>g gg d</d></gg></g> + <g><gg><f>g gg f</f></gg></g> + <g><gg><f class="one">g gg f.one</f></gg></g> + <g>g</g> + <g><gg><f class="two">g gg f.two</f></gg></g> + <g><gg><f class="three">g gg f.three</f></gg></g> + <g><f class="one">g f.one</f></g> + <g><f class="three">g f.three</f></g> + <h whatever="four five six">@whatever = four five six</h> +</a> +\stopbuffer + +\xmlloadbuffer{selector-001}{selector-001} + +\startxmlsetups xml:selector:demo + \advance\scratchcounter\plusone + \inleftmargin{\the\scratchcounter}\ignorespaces\xmlverbatim{#1}\par +\stopxmlsetups + +\unexpanded\def\showCSSdemo#1#2% + {\blank + \textrule{\tttf#2} + \startlines + \dontcomplain + \tttf \obeyspaces + \scratchcounter\zerocount + \xmlcommand{#1}{#2}{xml:selector:demo} + \stoplines + \blank} + +The \CSS\ approach to filtering is a bit different from the path based one and is +supported too. In fact, you can combine both methods. Depending on what you +select, the \CSS\ one can be a little bit faster too. It has the advantage that +one can select more in one go but at the same time looks a bit less attractive. +This method was added just to show that it can be done but might be useful too. A +selector is given between curly braces (after all \CSS\ uses them and they have no +function yet in the parser. + +\starttyping +\xmlall{#1}{{foo bar .whatever, bar foo .whatever}} +\stoptyping + +The following methods are supported: + +\starttabulate[|T||] +\NC element \NC all tags element \NC \NR +\NC element-1 > element-2 \NC all tags element-2 with parent tag element-1 \NC \NR +\NC element-1 + element-2 \NC all tags element-2 preceded by tag element-1 \NC \NR +\NC element-1 ~ element-2 \NC all tags element-2 preceded by tag element-1 \NC \NR +\NC element-1 element-2 \NC all tags element-2 inside tag element-1 \NC \NR +\NC [attribute] \NC has attribute \NC \NR +\NC [attribute=value] \NC attribute equals value\NC \NR +\NC [attribute\lettertilde =value] \NC attribute contains value (space is separator) \NC \NR +\NC [attribute\letterhat ="value"] \NC attribute starts with value \NC \NR +\NC [attribute\letterdollar="value"] \NC attribute ends with value \NC \NR +\NC [attribute*="value"] \NC attribute contains value \NC \NR +\NC .class \NC has class \NC \NR +\NC \letterhash id \NC has id \NC \NR +\NC :nth-child(n) \NC the child at index n \NC \NR +\NC :nth-last-child(n) \NC the child at index n from the end \NC \NR +\NC :first-child \NC the first child \NC \NR +\NC :last-child \NC the last child \NC \NR +\NC :nth-of-type(n) \NC the match at index n \NC \NR +\NC :nth-last-of-type(n) \NC the match at index n from the end \NC \NR +\NC :first-of-type \NC the first match \NC \NR +\NC :last-of-type \NC the last match \NC \NR +\NC :only-of-type \NC the only match or nothing \NC \NR +\NC :only-child \NC the only child or nothing \NC \NR +\NC :empty \NC only when empty \NC \NR +\NC :root \NC the whole tree \NC \NR +\stoptabulate + +The next pages show some examples. For that we use the demo file: + +\typebuffer[selector-001] + +The class and id selectors often only make sense in \HTML\ like documents but they +are supported nevertheless. They are after all just shortcuts for filtering by +attribute. The class filtering is special in the sense that it checks for a class +in a list of classes given in an attribute. + +\showCSSdemo{selector-001}{{.one}} +\showCSSdemo{selector-001}{{.one, .two}} +\showCSSdemo{selector-001}{{.one, .two, \letterhash first}} + +Attributes can be filtered by presence, value, partial value and such. Quotes are +optional but we advice to use them. + +\showCSSdemo{selector-001}{{[foo], [bar=foo]}} +\showCSSdemo{selector-001}{{[bar\lettertilde=foo]}} +\showCSSdemo{selector-001}{{[bar\letterhat="foo"]}} +\showCSSdemo{selector-001}{{[whatever\lettertilde="five"]}} + +You can of course combine the methods as in: + +\showCSSdemo{selector-001}{{g f .one, g f .three}} +\showCSSdemo{selector-001}{{g > f .one, g > f .three}} +\showCSSdemo{selector-001}{{d + e}} +\showCSSdemo{selector-001}{{d ~ e}} +\showCSSdemo{selector-001}{{d ~ e, g f .one, g f .three}} + +You can also negate the result by using \type {:not} on a simple expression: + +\showCSSdemo{selector-001}{{:not([whatever\lettertilde="five"])}} +\showCSSdemo{selector-001}{{:not(d)}} + +The child and match selectors are also supported: + +\showCSSdemo{selector-001}{{a:nth-child(3)}} +\showCSSdemo{selector-001}{{a:nth-last-child(3)}} +\showCSSdemo{selector-001}{{g:nth-of-type(3)}} +\showCSSdemo{selector-001}{{g:nth-last-of-type(3)}} +\showCSSdemo{selector-001}{{a:first-child}} +\showCSSdemo{selector-001}{{a:last-child}} +\showCSSdemo{selector-001}{{e:first-of-type}} +\showCSSdemo{selector-001}{{gg d:only-of-type}} + +Instead of numbers you can also give the \type {an} and \type {an+b} formulas +as well as the \type {odd} and \type {even} keywords: + +\showCSSdemo{selector-001}{{a:nth-child(even)}} +\showCSSdemo{selector-001}{{a:nth-child(odd)}} +\showCSSdemo{selector-001}{{a:nth-child(3n+1)}} +\showCSSdemo{selector-001}{{a:nth-child(2n+3)}} + +There are a few special cases: + +\showCSSdemo{selector-001}{{g:empty}} +\showCSSdemo{selector-001}{{g:root}} +\showCSSdemo{selector-001}{{*}} + +Combining the \CSS\ methods with the regular ones is possible: + +\showCSSdemo{selector-001}{{g gg f .one}} +\showCSSdemo{selector-001}{g/gg/f[@class='one']} +\showCSSdemo{selector-001}{g/{gg f .one}} + +\startbuffer[selector-002] +<?xml version="1.0" ?> + +<document> + <title class="one" >title 1</title> + <title class="two" >title 2</title> + <title class="one" >title 3</title> + <title class="three">title 4</title> +</document> +\stopbuffer + +The next examples we use this file: + +\typebuffer[selector-002] + +\xmlloadbuffer{selector-002}{selector-002} + +When we filter from this (not too well structured) tree we can use both +methods to achieve the same: + +\showCSSdemo{selector-002}{{document title .one, document title .three}} + +\showCSSdemo{selector-002}{/document/title[(@class='one') or (@class='three')]} + +However, imagine this file: + +\startbuffer[selector-003] +<?xml version="1.0" ?> + +<document> + <title class="one">title 1</title> + <subtitle class="sub">title 1.1</subtitle> + <title class="two">title 2</title> + <subtitle class="sub">title 2.1</subtitle> + <title class="one">title 3</title> + <subtitle class="sub">title 3.1</subtitle> + <title class="two">title 4</title> + <subtitle class="sub">title 4.1</subtitle> +</document> +\stopbuffer + +\typebuffer[selector-003] + +\xmlloadbuffer{selector-003}{selector-003} + +The next filter in easier with the \CSS\ selector methods because these accumulate +independent (simple) expressions: + +\showCSSdemo{selector-003}{{document title .one + subtitle, document title .two + subtitle}} + +Watch how we get an output in the document order. Because we render a sequential document +a combined filter will trigger a sorting pass. + +\stopsection + +\startsection[title={functions as filters}] + +At the \LUA\ end a whole \cmdinternal {cd:lpath} expression results in a (set of) node(s) +with its environment, but that is hardly usable in \TEX. Think of code like: + +\starttyping +for e in xml.collected(xml.load('text.xml'),"title") do + -- e = the element that matched +end +\stoptyping + +The older variant is still supported but you can best use the previous variant. + +\starttyping +for r, d, k in xml.elements(xml.load('text.xml'),"title") do + -- r = root of the title element + -- d = data table + -- k = index in data table +end +\stoptyping + +Here \type {d[k]} points to the \type {title} element and in this case all titles +in the tree pass by. In practice this kind of code is encapsulated in function +calls, like those returning elements one by one, or returning the first or last +match. The result is then fed back into \TEX, possibly after being altered by an +associated setup. We've seen the wrappers to such functions already in a previous +chapter. + +In addition to the previously discussed expressions, one can add so called +filters to the expression, for instance: + +\starttyping +a/(b|c)/!d/e/text() +\stoptyping + +In a filter, the last part of the \cmdinternal {cd:lpath} expression is a +function call. The previous example returns the text of each element \type {e} +that results from matching the expression. When running \TEX\ the following +functions are available. Some are also available when using pure \LUA. In \TEX\ +you can often use one of the macros like \type {\xmlfirst} instead of a \type +{\xmlfilter} with finalizer \type {first()}. The filter can be somewhat faster +but that is hardly noticeable. + +\starttabulate[|l|l|p|] +\NC \type {context()} \NC string \NC the serialized text with \TEX\ catcode regime \NC \NR +%NC \type {ctxtext()} \NC string \NC \NC \NR +\NC \type {function()} \NC string \NC depends on the function \NC \NR +% +\NC \type {name()} \NC string \NC the (remapped) namespace \NC \NR +\NC \type {tag()} \NC string \NC the name of the element \NC \NR +\NC \type {tags()} \NC list \NC the names of the element \NC \NR +% +\NC \type {text()} \NC string \NC the serialized text \NC \NR +\NC \type {upper()} \NC string \NC the serialized text uppercased \NC \NR +\NC \type {lower()} \NC string \NC the serialized text lowercased \NC \NR +\NC \type {stripped()} \NC string \NC the serialized text stripped \NC \NR +\NC \type {lettered()} \NC string \NC the serialized text only letters (cf. \UNICODE) \NC \NR +% +\NC \type {count()} \NC number \NC the number of matches \NC \NR +\NC \type {index()} \NC number \NC the matched index in the current path \NC \NR +\NC \type {match()} \NC number \NC the matched index in the preceding path \NC \NR +% +%NC \type {lowerall()} \NC string \NC \NC \NR +%NC \type {upperall()} \NC string \NC \NC \NR +% +\NC \type {attribute(name)} \NC content \NC returns the attribute with the given name \NC \NR +\NC \type {chainattribute(name)} \NC content \NC sidem, but backtracks till one is found \NC \NR +\NC \type {command(name)} \NC content \NC expands the setup with the given name for each found element \NC \NR +\NC \type {position(n)} \NC content \NC processes the \type {n}\high{th} instance of the found element \NC \NR +\NC \type {all()} \NC content \NC processes all instances of the found element \NC \NR +%NC \type {default} \NC content \NC all \NC \NR +\NC \type {reverse()} \NC content \NC idem in reverse order \NC \NR +\NC \type {first()} \NC content \NC processes the first instance of the found element \NC \NR +\NC \type {last()} \NC content \NC processes the last instance of the found element \NC \NR +\NC \type {concat(...)} \NC content \NC concatinates the match \NC \NC \NR +\NC \type {concatrange(from,to,...)} \NC content \NC concatinates a range of matches \NC \NC \NR +\stoptabulate + +The extra arguments of the concatinators are: \type {separator} (string), \type +{lastseparator} (string) and \type {textonly} (a boolean). + +These filters are in fact \LUA\ functions which means that if needed more of them +can be added. Indeed this happens in some of the \XML\ related \MKIV\ modules, +for instance in the \MATHML\ processor. + +\stopsection + +\startsection[title={example}] + +The number of commands is rather large and if you want to avoid them this is +often possible. Take for instance: + +\starttyping +\xmlall{#1}{/a/b[position()>3]} +\stoptyping + +Alternatively you can use: + +\starttyping +\xmlfilter{#1}{/a/b[position()>3]/all()} +\stoptyping + +and actually this is also faster as internally it avoids a function call. Of +course in practice this is hardly measurable. + +In previous examples we've already seen quite some expressions, and it might be +good to point out that the syntax is modelled after \XSLT\ but is not quite the +same. The reason is that we started with a rather minimal system and have already +styles in use that depend on compatibility. + +\starttyping +namespace:// axis node(set) [expr 1]..[expr n] / ... / filter +\stoptyping + +When we are inside a \CONTEXT\ run, the namespace is \type {tex}. Hoewever, if +you want not to print back to \TEX\ you need to be more explicit. Say that we +typeset examns and have a (not that logical) structure like: + +\starttyping +<question> + <text>...</text> + <answer> + <item>one</item> + <item>two</item> + <item>three</item> + </answer> + <alternative> + <condition>true</condition> + <score>1</score> + </alternative> + <alternative> + <condition>false</condition> + <score>0</score> + </alternative> + <alternative> + <condition>true</condition> + <score>2</score> + </alternative> +</question> +\stoptyping + +Say that we typeset the questions with: + +\starttyping +\startxmlsetups question + \blank + score: \xmlfunction{#1}{totalscore} + \blank + \xmlfirst{#1}{text} + \startitemize + \xmlfilter{#1}{/answer/item/command(answer:item)} + \stopitemize + \endgraf + \blank +\stopxmlsetups +\stoptyping + +Each item in the answer results in a call to: + +\starttyping +\startxmlsetups answer:item + \startitem + \xmlflush{#1} + \endgraf + \xmlfilter{#1}{../../alternative[position()=rootposition()]/ + condition/command(answer:condition)} + \stopitem +\stopxmlsetups +\stoptyping + +\starttyping +\startxmlsetups answer:condition + \endgraf + condition: \xmlflush{#1} + \endgraf +\stopxmlsetups +\stoptyping + +Now, there are two rather special filters here. The first one involves +calculating the total score. As we look forward we use a function to deal with +this. + +\starttyping +\startluacode +function xml.functions.totalscore(root) + local score = 0 + for e in xml.collected(root,"/alternative") do + score = score + xml.filter(e,"xml:///score/number()") or 0 + end + tex.write(score) +end +\stopluacode +\stoptyping + +Watch how we use the namespace to keep the results at the \LUA\ end. + +The second special trick shown here is to limit a match using the current +position of the root (\type {#}) match. + +As you can see, a path expression can be more than just filtering a few nodes. At +the end of this manual you will find a bunch of examples. + +\stopsection + +\startsection[title={tables}] + +If you want to know how the internal \XML\ tables look you can print such a +table: + +\starttyping +print(table.serialize(e)) +\stoptyping + +This produces for instance: + +% s = xml.convert("<document><demo label='whatever'>some text</demo></document>") +% print(table.serialize(xml.filter(s,"demo")[1])) + +\starttyping +t={ + ["at"]={ + ["label"]="whatever", + }, + ["dt"]={ "some text" }, + ["ns"]="", + ["rn"]="", + ["tg"]="demo", +} +\stoptyping + +The \type {rn} entry is the renamed namespace (when renaming is applied). If you +see tags like \type {@pi@} this means that we don't have an element, but (in this +case) a processing instruction. + +\starttabulate[|l|p|] +\NC \type {@rt@} \NC the root element \NC \NR +\NC \type {@dd@} \NC document definition \NC \NR +\NC \type {@cm@} \NC comment, like \type {<!-- whatever -->} \NC \NR +\NC \type {@cd@} \NC so called \type {CDATA} \NC \NR +\NC \type {@pi@} \NC processing instruction, like \type {<?whatever we want ?>} \NC \NR +\stoptabulate + +There are many ways to deal with the content, but in the perspective of \TEX\ +only a few matter. + +\starttabulate[|l|p|] +\NC \type {xml.sprint(e)} \NC print the content to \TEX\ and apply setups if needed \NC \NR +\NC \type {xml.tprint(e)} \NC print the content to \TEX\ (serialize elements verbose) \NC \NR +\NC \type {xml.cprint(e)} \NC print the content to \TEX\ (used for special content) \NC \NR +\stoptabulate + +Keep in mind that anything low level that you uncover is not part of the official +interface unless mentioned in this manual. + +\stopsection + +\stopchapter + +\stopcomponent diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/xml/xml-mkiv-filtering.tex b/Master/texmf-dist/doc/context/sources/general/manuals/xml/xml-mkiv-filtering.tex new file mode 100644 index 00000000000..5bb5a35dee9 --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/xml/xml-mkiv-filtering.tex @@ -0,0 +1,262 @@ +\environment xml-mkiv-style + +\startcomponent xml-mkiv-filtering + +\startchapter[title={Filtering content}] + +\startsection[title={\TEX\ versus \LUA}] + +It will not come as a surprise that we can access \XML\ files from \TEX\ as well +as from \LUA. In fact there are two methods to deal with \XML\ in \LUA. First +there are the low level \XML\ functions in the \type {xml} namespace. On top of +those functions there is a set of functions in the \type {lxml} namespace that +deals with \XML\ in a more \TEX ie way. Most of these have similar commands at +the \TEX\ end. + +\startbuffer +\startxmlsetups first:demo:one + \xmlfilter {#1} {artist/name[text()='Randy Newman']/.. + /albums/album[position()=3]/command(first:demo:two)} +\stopxmlsetups + +\startxmlsetups first:demo:two + \blank \start \tt + \xmldisplayverbatim{#1} + \stop \blank +\stopxmlsetups + +\xmlprocessfile{demo}{music-collection.xml}{first:demo:one} +\stopbuffer + +\typebuffer + +This gives the following snippet of verbatim \XML\ code. The indentation is +conform the indentation in the whole \XML\ file. \footnote {The (probably +outdated) \XML\ file contains the collection stores on my slimserver instance. +You can use the \type {mtxrun --script flac} to generate such files.} + +\doifmodeelse {atpragma} { + \getbuffer +} { + \typefile{xml-mkiv-01.xml} +} + +An alternative written in \LUA\ looks as follows: + +\startbuffer +\blank \start \tt \startluacode + local m = lxml.load("mine","music-collection.xml") -- m == lxml.id("mine") + local p = "artist/name[text()='Randy Newman']/../albums/album[position()=4]" + local l = lxml.filter(m,p) -- returns a list (with one entry) + lxml.displayverbatim(l[1]) +\stopluacode \stop \blank +\stopbuffer + +\typebuffer + +This produces: + +\doifmodeelse {atpragma} { + \getbuffer +} { + \typefile{xml-mkiv-02.xml} +} + +You can use both methods mixed but in practice we will use the \TEX\ commands in +regular styles and the mixture in modules, for instance in those dealing with +\MATHML\ and cals tables. For complex matters you can write your own finalizers +(the last action to be taken in a match) in \LUA\ and use them at the \TEX\ end. + +\stopsection + +\startsection[title={a few details}] + +In \CONTEXT\ setups are a rather common variant on macros (\TEX\ commands) but +with their own namespace. An example of a setup is: + +\starttyping +\startsetup doc:print + \setuppapersize[A4][A4] +\stopsetup + +\startsetup doc:screen + \setuppapersize[S6][S4] +\stopsetup +\stoptyping + +Given the previous definitions, later on we can say something like: + +\starttyping +\doifmodeelse {paper} { + \setup[doc:print] +} { + \setup[doc:screen] +} +\stoptyping + +Another example is: + +\starttyping +\startsetup[doc:header] + \marking[chapter] + \space + -- + \space + \pagenumber +\stopsetup +\stoptyping + +in combination with: + +\starttyping +\setupheadertexts[\setup{doc:header}] +\stoptyping + +Here the advantage is that instead of ending up with an unreadable header +definitions, we use a nicely formatted setup. An important property of setups and +the reason why they were introduced long ago is that spaces and newlines are +ignored in the definition. This means that we don't have to worry about so called +spurious spaces but it also means that when we do want a space, we have to use +the \type {\space} command. + +The only difference between setups and \XML\ setups is that the following ones +get an argument (\type {#1}) that reflects the current node in the \XML\ tree. + +\stopsection + +\startsection[title={CDATA}] + +What to do with \type {CDATA}? There are a few methods at tle \LUA\ end for +dealing with it but here we just mention how you can influence the rendering. +There are four macros that play a role here: + +\starttyping +\unexpanded\def\xmlcdataobeyedline {\obeyedline} +\unexpanded\def\xmlcdataobeyedspace{\strut\obeyedspace} +\unexpanded\def\xmlcdatabefore {\begingroup\tt} +\unexpanded\def\xmlcdataafter {\endgroup} +\stoptyping + +Technically you can overload them but beware of side effects. Normally you won't +see much \type {CDATA} and whenever we do, it involves special data that needs +very special treatment anyway. + +\stopsection + +\startsection[title={Entities}] + +As usual with any way of encoding documents you need escapes in order to encode +the characters that are used in tagging the content, embedding comments, escaping +special characters in strings (in programming languages), etc. In \XML\ this +means that in order characters like \type {<} you need an escape like \type +{<} and in order then to encode an \type {&} you need \type {&}. + +In a typesetting workflow using a programming language like \TEX, another problem +shows up. There we have different special characters, like \type {$ $} for triggering +math, but also the backslash, braces etc. Even one such special character is already +enough to have yet another escaping mechanism at work. + +Ideally a user should not worry about these issues but it helps figuring out issues +when you know what happens under the hood. Also it is good to know that in the +code there are several ways to deal with these issues. Take the following document: + +\starttyping +<text> + Here we have a bit of a <&mess>: + + # # + % % + \ \ + { { + | | + } } + ~ ~ +</text> +\stoptyping + +When the file is read the \type {<} entity will be replaced by \type {<} and +the \type {>} by \type {>}. The numeric entities will be replaced by the +characters they refer to. The \type {&mess} is kind of special. We do preload +a huge list of more or less standardized entities but \type {mess} is not in +there. However, it is possible to have it defined in the document preamble, like: + +\starttyping +<!DOCTYPE dummy SYSTEM "dummy.dtd" [ + <!ENTITY mess "what a mess" > +]> +\stoptyping + +or even this: + +\starttyping +<!DOCTYPE dummy SYSTEM "dummy.dtd" [ + <!ENTITY mess "<p>what a mess</p>" > +]> +\stoptyping + +You can also define it in your document style using one of: + +\startxmlcmd {\cmdbasicsetup{xmlsetentity}} + replaces entity with name \cmdinternal {cd:name} by \cmdinternal {cd:text} +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmltexentity}} + replaces entity with name \cmdinternal {cd:name} by \cmdinternal {cd:text} + typeset under a \TEX\ regime +\stopxmlcmd + +Such a definition will always have a higher priority than the one defined +in the document. Anyway, when the document is read in all entities are +resolved and those that need a special treatment because they map to some +text are stored in such a way that we can roundtrip them. As a consequence, +as soon as the content gets pushed into \TEX, we need not only to intercept +special characters but also have to make sure that the following works: + +\starttyping +\xmltexentity {tex} {\TEX} +\stoptyping + +Here the backslash starts a control sequence while in regular content a +backslash is just that: a backslash. + +Special characters are really special when we have to move text around +in a \TEX\ ecosystem. + +\starttyping +<text> + <title>About #3</title> +</text> +\stoptyping + +If we map and define title as follows: + +\starttyping +\startxmlsetup xml:title + \title{\xmlflush{#1}} +\stopxmlsetup +\stoptyping + +normally something \type {\xmlflush {id::123}} will be written to the +auxiliary file and in most cases that is quite okay, but if we have this: + +\starttyping +\setuphead[title][expansion=yes] +\stoptyping + +then we don't want the \type {#} to end up as hash because later on \TEX\ +can get very confused about it because it sees some argument then in a +probably unexpected way. This is solved by escaping the hash like this: + +\starttyping +About \Ux{23}3 +\stoptyping + +The \type {\Ux} command will convert its hexadecimal argument into a +character. Of course one then needs to typeset such a text under a \TEX\ +character regime but that is normally the case anyway. + +\stopsection + +\stopchapter + +\stopcomponent diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/xml/xml-mkiv-introduction.tex b/Master/texmf-dist/doc/context/sources/general/manuals/xml/xml-mkiv-introduction.tex new file mode 100644 index 00000000000..e7f0124daab --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/xml/xml-mkiv-introduction.tex @@ -0,0 +1,42 @@ +\environment xml-mkiv-style + +\startcomponent xml-mkiv-introduction + +\startchapter[title={Introduction}] + +This manual presents the \MKIV\ way of dealing with \XML. Although the +traditional \MKII\ streaming parser has a charming simplicity in its control, for +complex documents the tree based \MKIV\ method is more convenient. It is for this +reason that the old method has been removed from \MKIV. If you are familiar with +\XML\ processing in \MKII, then you will have noticed that the \MKII\ commands +have \type {XML} in their name. The \MKIV\ commands have a lowercase \type {xml} +in their names. That way there is no danger for confusion or a mixup. + +You may wonder why we do these manipulations in \TEX\ and not use \XSLT\ (or +other transformation methods) instead. The advantage of an integrated approach is +that it simplifies usage. Think of not only processing the document, but also +using \XML\ for managing resources in the same run. An \XSLT\ approach is just as +verbose (after all, you still need to produce \TEX\ code) and probably less +readable. In the case of \MKIV\ the integrated approach is also faster and gives +us the option to manipulate content at runtime using \LUA. It has the additional +advantage that to some extend we can handle a mix of \TEX\ and \XML\ because we +know when we're doing one or the other. + +This manual is dedicated to Taco Hoekwater, one of the first \CONTEXT\ users, and +also the first to use it for processing \XML. Who could have thought at that time +that we would have a more convenient way of dealing with those angle brackets. +The second version for this manual is dedicated to Thomas Schmitz, a power user +who occasionally became victim of the evolving mechanisms. + +\blank + +\startlines +Hans Hagen +\PRAGMA +Hasselt NL +2008\endash2016 +\stoplines + +\stopchapter + +\stopcomponent diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/xml/xml-mkiv-lookups.tex b/Master/texmf-dist/doc/context/sources/general/manuals/xml/xml-mkiv-lookups.tex new file mode 100644 index 00000000000..e6afaa9488b --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/xml/xml-mkiv-lookups.tex @@ -0,0 +1,314 @@ +\environment xml-mkiv-style + +\startcomponent xml-mkiv-lookups + +\startchapter[title={Lookups using lpaths}] + +\startsection[title={introduction}] + +There is not that much system in the following examples. They resulted from tests +with different documents. The current implementation evolved out of the +experimental code. For instance, I decided to add the multiple expressions in row +handling after a few email exchanges with Jean|-|Michel Huffen. + +One of the main differences between the way \XSLT\ resolves a path and our way is +the anchor. Take: + +\starttyping +/something +something +\stoptyping + +The first one anchors in the current (!) element so it will only consider direct +children. The second one does a deep lookup and looks at the descendants as well. +Furthermore we have a few extra shortcuts like \type {**} in \type {a/**/b} which +represents all descendants. + +The expressions (between square brackets) has to be valid \LUA\ and some +preprocessing is done to resolve the built in functions. So, you might use code +like: + +\starttyping +my_lpeg_expression:match(text()) == "whatever" +\stoptyping + +given that \type {my_lpeg_expression} is known. In the examples below we use the +visualizer to show the steps. Some are shown more than once as part of a set. + +\stopsection + +\startsection[title={special cases}] + +\xmllshow{} +\xmllshow{*} +\xmllshow{.} +\xmllshow{/} + +\stopsection + +\startsection[title={wildcards}] + +\xmllshow{*} +\xmllshow{*:*} +\xmllshow{/*} +\xmllshow{/*:*} +\xmllshow{*/*} +\xmllshow{*:*/*:*} + +\xmllshow{a/*} +\xmllshow{a/*:*} +\xmllshow{/a/*} +\xmllshow{/a/*:*} + +\xmllshow{/*} +\xmllshow{/**} +\xmllshow{/***} + +\stopsection + +\startsection[title={multiple steps}] + +\xmllshow{answer} +\xmllshow{answer/test/*} +\xmllshow{answer/test/child::} +\xmllshow{answer/*} +\xmllshow{answer/*[tag()='p' and position()=1 and text()!='']} + +\stopsection + +\startsection[title={pitfals}] + +\xmllshow{[oneof(lower(@encoding),'tex','context','ctx')]} +\xmllshow{.[oneof(lower(@encoding),'tex','context','ctx')]} + +\stopsection + +\startsection[title={more special cases}] + +\xmllshow{**} +\xmllshow{*} +\xmllshow{..} +\xmllshow{.} +\xmllshow{//} +\xmllshow{/} + +\xmllshow{**/} +\xmllshow{**/*} +\xmllshow{**/.} +\xmllshow{**//} + +\xmllshow{*/} +\xmllshow{*/*} +\xmllshow{*/.} +\xmllshow{*//} + +\xmllshow{/**/} +\xmllshow{/**/*} +\xmllshow{/**/.} +\xmllshow{/**//} + +\xmllshow{/*/} +\xmllshow{/*/*} +\xmllshow{/*/.} +\xmllshow{/*//} + +\xmllshow{./} +\xmllshow{./*} +\xmllshow{./.} +\xmllshow{.//} + +\xmllshow{../} +\xmllshow{../*} +\xmllshow{../.} +\xmllshow{..//} + +\stopsection + +\startsection[title={more wildcards}] + +\xmllshow{one//two} +\xmllshow{one/*/two} +\xmllshow{one/**/two} +\xmllshow{one/***/two} +\xmllshow{one/x//two} +\xmllshow{one//x/two} +\xmllshow{//x/two} + +\stopsection + +\startsection[title={special axis}] + +\xmllshow{descendant::whocares/ancestor::whoknows} +\xmllshow{descendant::whocares/ancestor::whoknows/parent::} +\xmllshow{descendant::whocares/ancestor::} +\xmllshow{child::something/child::whatever/child::whocares} +\xmllshow{child::something/child::whatever/child::whocares|whoknows} +\xmllshow{child::something/child::whatever/child::(whocares|whoknows)} +\xmllshow{child::something/child::whatever/child::!(whocares|whoknows)} +\xmllshow{child::something/child::whatever/child::(whocares)} +\xmllshow{child::something/child::whatever/child::(whocares)[position()>2]} +\xmllshow{child::something/child::whatever[position()>2][position()=1]} +\xmllshow{child::something/child::whatever[whocares][whocaresnot]} +\xmllshow{child::something/child::whatever[whocares][not(whocaresnot)]} +\xmllshow{child::something/child::whatever/self::whatever} + +There is also \type {last-match::} that starts with the last found set of nodes. +This can save some run time when you do lots of tests combined with a same check +afterwards. There is however one pitfall: you never know what is done with that +last match in the setup that gets called nested. Take the following example: + +\starttyping +\startbuffer[test] +<something> + <crap> <crapa> <crapb> <crapc> <crapd> + <crape> + done 1 + </crape> + </crapd> </crapc> </crapb> </crapa> + <crap> <crapa> <crapb> <crapc> <crapd> + <crape> + done 2 + </crape> + </crapd> </crapc> </crapb> </crapa> + <crap> <crapa> <crapb> <crapc> <crapd> + <crape> + done 3 + </crape> + </crapd> </crapc> </crapb> </crapa> +</something> +\stopbuffer +\stoptyping + +One way to filter the content is this: + +\starttyping +\xmldoif {#1} {/crap/crapa/crapb/crapc/crapd/crape} { + some action +} +\stoptyping + +It is not unlikely that you will do something like this: + +\starttyping +\xmlfirst {#1} {/crap/crapa/crapb/crapc/crapd/crape} { + \xmlfirst{#1}{/crap/crapa/crapb/crapc/crapd/crape} +} +\stoptyping + +This means that the path is resolved twice but that can be avoided as +follows: + +\starttyping +\xmldoif{#1}{/crap/crapa/crapb/crapc/crapd/crape}{ + \xmlfirst{#1}{last-match::} +} +\stoptyping + +But the next is now guaranteed to work: + +\starttyping +\xmldoif{#1}{/crap/crapa/crapb/crapc/crapd/crape}{ + \xmlfirst{#1}{last-match::} + \xmllast{#1}{last-match::} +} +\stoptyping + +Because the first one can have done some lookup the last match can be replaced +and the second call will give unexpected results. You can overcome this with: + +\starttyping +\xmldoif{#1}{/crap/crapa/crapb/crapc/crapd/crape}{ + \xmlpushmatch + \xmlfirst{#1}{last-match::} + \xmlpopmatch +} +\stoptyping + +Does it pay off? Here are some timings of a 10.000 times text and lookup +like the previous (on a decent January 2016 laptop): + +\starttabulate[|r|l|] +\NC 0.239 \NC \type {\xmldoif {...} {...}} \NC \NR +\NC 0.292 \NC \type {\xmlfirst {...} {...}} \NC \NR +\NC 0.538 \NC \type {\xmldoif {...} {...} + \xmlfirst {...} {...}} \NC \NR +\NC 0.338 \NC \type {\xmldoif {...} {...} + \xmlfirst {...} {last-match::}} \NC \NR +\NC 0.349 \NC \type {+ \xmldoif {...} {...} + \xmlfirst {...} {last-match::}-} \NC \NR +\stoptabulate + +So, pushing and popping (the last row) is a bit slower than not doing that but it +is still much faster than not using \type {last-match::} at all. As a shortcut +you can use \type {=}, as in: + +\starttyping +\xmlfirst{#1}{=} +\stoptyping + +You can even do this: + +\starttyping +\xmlall{#1}{last-match::/text()} +\stoptyping + +or + +\starttyping +\xmlall{#1}{=/text()} +\stoptyping + + +\stopsection + +\startsection[title={some more examples}] + +\xmllshow{/something/whatever} +\xmllshow{something/whatever} +\xmllshow{/**/whocares} +\xmllshow{whoknows/whocares} +\xmllshow{whoknows} +\xmllshow{whocares[contains(text(),'f') or contains(text(),'g')]} +\xmllshow{whocares/first()} +\xmllshow{whocares/last()} +\xmllshow{whatever/all()} +\xmllshow{whocares/position(2)} +\xmllshow{whocares/position(-2)} +\xmllshow{whocares[1]} +\xmllshow{whocares[-1]} +\xmllshow{whocares[2]} +\xmllshow{whocares[-2]} +\xmllshow{whatever[3]/attribute(id)} +\xmllshow{whatever[2]/attribute('id')} +\xmllshow{whatever[3]/text()} +\xmllshow{/whocares/first()} +\xmllshow{/whocares/last()} + +\xmllshow{xml://whatever/all()} +\xmllshow{whatever/all()} +\xmllshow{//whocares} +\xmllshow{..[2]} +\xmllshow{../*[2]} + +\xmllshow{/(whocares|whocaresnot)} +\xmllshow{/!(whocares|whocaresnot)} +\xmllshow{/!whocares} + +\xmllshow{/interface/command/command(xml:setups:register)} +\xmllshow{/interface/command[@name='xxx']/command(xml:setups:typeset)} +\xmllshow{/arguments/*} +\xmllshow{/sequence/first()} +\xmllshow{/arguments/text()} +\xmllshow{/sequence/variable/first()} +\xmllshow{/interface/define[@name='xxx']/first()} +\xmllshow{/parameter/command(xml:setups:parameter:measure)} + +\xmllshow{/(*:library|figurelibrary)/*:figure/*:label} +\xmllshow{/(*:library|figurelibrary)/figure/*:label} +\xmllshow{/(*:library|figurelibrary)/figure/label} +\xmllshow{/(*:library|figurelibrary)/figure:*/label} + +\xmlshow {whatever//br[tag(1)='br']} + +\stopsection + +\stopchapter + +\stopcomponent diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/xml/xml-mkiv-lpath.tex b/Master/texmf-dist/doc/context/sources/general/manuals/xml/xml-mkiv-lpath.tex new file mode 100644 index 00000000000..9c8b853c8f3 --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/xml/xml-mkiv-lpath.tex @@ -0,0 +1,207 @@ +\input lxml-ctx.mkiv + +\ctxlua{dofile("t:/sources/lxml-lpt.lua")} + +\startbuffer[xmltest] +<?xml version='1.0'?> + +<!-- this is a test file --> + +<something id='1'> + <x:whatever id='1.1'> + <whocares id='1.1.1'> + test a + </whocares> + <whocaresnot id='1.1.2'> + test b + </whocaresnot> + </x:whatever> + <whatever id='2'> + <whocares id='2.1'> + test c + </whocares> + <whocaresnot id='2.2'> + test d + </whocaresnot> + </whatever> + <whatever id='3'> + test e + </whatever> + <whatever id='4' test="xxx"> + <whocares id='4.1'> + test f + </whocares> + <whocares id='4.2'> + test g + </whocares> + </whatever> + <whatever id='5' test="xxx"> + <whoknows id='5.1'> + <whocares id='5.1.1'> + test h + </whocares> + </whoknows> + <whoknows id='5.2'> + <whocaresnot id='5.2.1'> + test i + </whocaresnot> + </whoknows> + <whoknows id='5.3'> + <whocares id='5.3.1'> + test j + </whocares> + </whoknows> + </whatever> +</something> +\stopbuffer + +% \enabletrackers[xml.lparse] + +\setuplayout[width=middle,height=middle,header=1cm,footer=1cm,topspace=2cm,backspace=2cm] +\setupbodyfont[10pt] + +\setfalse\xmllshowbuffer + +\starttext + +\xmllshow{/(*:library|figurelibrary)/*:figure/*:label} +\xmllshow{/(*:library|figurelibrary)/figure/*:label} +\xmllshow{/(*:library|figurelibrary)/figure/label} +\xmllshow{/(*:library|figurelibrary)/figure:*/label} + +% \xmllshow{collection[@version='all']/resources/manual[match()==1]/paper/command(xml:overview)} +% \xmllshow{collection/resources/manual[match()=1]/paper/command(xml:overview)} + +% \xmllshow{answer//oeps} +% \xmllshow{answer/*/oeps} +% \xmllshow{answer/**/oeps} +% \xmllshow{answer/***/oeps} +% \xmllshow{answer/x//oeps} +% \xmllshow{answer//x/oeps} +% \xmllshow{//x/oeps} +% \xmllshow{answer/test/*} +% \xmllshow{answer/test/child::} +% \xmllshow{answer/*} +% \xmllshow{ oeps / answer / .. / * [tag()='p' and position()=1 and text()!=''] / oeps()} + +% \xmllshow{ artist / name [text()='Randy Newman'] / .. / albums / album [position()=3] / command(first:demo:two)} +% \xmllshow{/exa:selectors/exa:selector/exa:list/component[count()>1]} + +\stoptext + +\xmllshow{/*} +\xmllshow{child::} +\xmllshow{child::test} +\xmllshow{/test/test} +\xmllshow{../theory/sections/section/exercises} +\xmllshow{../training/practicalassignments} +\xmllshow{../../Outcome[position()=rootposition()]/Condition/command(xml:answer:mc:condition)} + +% \stoptext + +% \typebuffer[xmltest] \page + +\xmllshowbuffer{xmltest}{**}{id} +\xmllshowbuffer{xmltest}{*}{id} +\xmllshowbuffer{xmltest}{..}{id} +\xmllshowbuffer{xmltest}{.}{id} +\xmllshowbuffer{xmltest}{//}{id} +\xmllshowbuffer{xmltest}{/}{id} + +\xmllshowbuffer{xmltest}{**/}{id} +\xmllshowbuffer{xmltest}{**/*}{id} +\xmllshowbuffer{xmltest}{**/.}{id} +\xmllshowbuffer{xmltest}{**//}{id} + +\xmllshowbuffer{xmltest}{*/}{id} +\xmllshowbuffer{xmltest}{*/*}{id} +\xmllshowbuffer{xmltest}{*/.}{id} +\xmllshowbuffer{xmltest}{*//}{id} + +\xmllshowbuffer{xmltest}{/**/}{id} +\xmllshowbuffer{xmltest}{/**/*}{id} +\xmllshowbuffer{xmltest}{/**/.}{id} +\xmllshowbuffer{xmltest}{/**//}{id} + +\xmllshowbuffer{xmltest}{/*/}{id} +\xmllshowbuffer{xmltest}{/*/*}{id} +\xmllshowbuffer{xmltest}{/*/.}{id} +\xmllshowbuffer{xmltest}{/*//}{id} + +\xmllshowbuffer{xmltest}{./}{id} +\xmllshowbuffer{xmltest}{./*}{id} +\xmllshowbuffer{xmltest}{./.}{id} +\xmllshowbuffer{xmltest}{.//}{id} + +\xmllshowbuffer{xmltest}{../}{id} +\xmllshowbuffer{xmltest}{../*}{id} +\xmllshowbuffer{xmltest}{../.}{id} +\xmllshowbuffer{xmltest}{..//}{id} + +\xmllshowbuffer{xmltest}{descendant::whocares/ancestor::whoknows}{id} +\xmllshowbuffer{xmltest}{descendant::whocares/ancestor::whoknows/parent::}{id} +\xmllshowbuffer{xmltest}{descendant::whocares/ancestor::}{id} +\xmllshowbuffer{xmltest}{child::something/child::whatever/child::whocares}{id} +\xmllshowbuffer{xmltest}{child::something/child::whatever/child::whocares|whoknows}{id} +\xmllshowbuffer{xmltest}{child::something/child::whatever/child::(whocares|whoknows)}{id} +\xmllshowbuffer{xmltest}{child::something/child::whatever/child::!(whocares|whoknows)}{id} +\xmllshowbuffer{xmltest}{child::something/child::whatever/child::(whocares)}{id} +\xmllshowbuffer{xmltest}{child::something/child::whatever/child::(whocares)[position()>2]}{id} +\xmllshowbuffer{xmltest}{child::something/child::whatever[position()>2][position()=1]}{id} +\xmllshowbuffer{xmltest}{child::something/child::whatever[whocares][whocaresnot]}{id} +\xmllshowbuffer{xmltest}{child::something/child::whatever[whocares][not(whocaresnot)]}{id} +\xmllshowbuffer{xmltest}{child::something/child::whatever/self::whatever}{id} +\xmllshowbuffer{xmltest}{/something/whatever}{id} +\xmllshowbuffer{xmltest}{something/whatever}{id} +\xmllshowbuffer{xmltest}{/**/whocares}{id} +\xmllshowbuffer{xmltest}{whoknows/whocares}{id} +\xmllshowbuffer{xmltest}{whoknows}{id} +\xmllshowbuffer{xmltest}{whocares[contains(text(),'f') or contains(text(),'g')]}{id} +\xmllshowbuffer{xmltest}{whocares/first()}{id} +\xmllshowbuffer{xmltest}{whocares/last()}{id} +\xmllshowbuffer{xmltest}{whatever/all()}{id} +\xmllshowbuffer{xmltest}{whocares/position(2)}{id} +\xmllshowbuffer{xmltest}{whocares/position(-2)}{id} +\xmllshowbuffer{xmltest}{whocares[1]}{id} +\xmllshowbuffer{xmltest}{whocares[-1]}{id} +\xmllshowbuffer{xmltest}{whocares[2]}{id} +\xmllshowbuffer{xmltest}{whocares[-2]}{id} +\xmllshowbuffer{xmltest}{whatever[3]/attribute(id)}{id} +\xmllshowbuffer{xmltest}{whatever[2]/attribute('id')}{id} +\xmllshowbuffer{xmltest}{whatever[3]/text()}{id} +\xmllshowbuffer{xmltest}{/whocares/first()}{id} +\xmllshowbuffer{xmltest}{/whocares/last()}{id} + +\xmllshowbuffer{xmltest}{xml://whatever/all()}{id} +\xmllshowbuffer{xmltest}{whatever/all()}{id} +\xmllshowbuffer{xmltest}{//whocares}{id} +\xmllshowbuffer{xmltest}{..[2]}{id} +\xmllshowbuffer{xmltest}{../*[2]}{id} + +\xmllshowbuffer{xmltest}{/(whocares|whocaresnot)}{id} +\xmllshowbuffer{xmltest}{/!(whocares|whocaresnot)}{id} +\xmllshowbuffer{xmltest}{/!whocares}{id} + +% \page + +% \xmllshow{/interface/command/command(xml:setups:register)} +% \xmllshow{/interface/command[@name='xxx']/command(xml:setups:typeset)} +% \xmllshow{/arguments/*} +% \xmllshow{/sequence/first()} +% \xmllshow{/arguments/text()} +% \xmllshow{/sequence/variable/first()} +% \xmllshow{/interface/define[@name='xxx']/first()} +% \xmllshow{/parameter/command(xml:setups:parameter:measure)} + +% \page + +% \xmllshow{interface/command/command(xml:setups:register)} +% \xmllshow{interface/command[@name='xxx']/command(xml:setups:typeset)} +% \xmllshow{arguments/*} +% \xmllshow{sequence/first()} +% \xmllshow{arguments/text()} +% \xmllshow{sequence/variable/first()} +% \xmllshow{interface/define[@name='xxx']/first()} +% \xmllshow{parameter/command(xml:setups:parameter:measure)} + +\stoptext diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/xml/xml-mkiv-style.tex b/Master/texmf-dist/doc/context/sources/general/manuals/xml/xml-mkiv-style.tex new file mode 100644 index 00000000000..8bcd7408649 --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/xml/xml-mkiv-style.tex @@ -0,0 +1,155 @@ +\startenvironment xml-mkiv-style + +\input lxml-ctx.mkiv + +\settrue \xmllshowtitle +\setfalse\xmllshowwarning + +\usemodule[set-11] + +\loadsetups[i-context] + +% \definehspace[squad][1em plus .25em minus .25em] + +\usemodule[abr-02] + +\setuplayout + [location=middle, + marking=on, + backspace=20mm, + cutspace=20mm, + topspace=15mm, + header=15mm, + footer=15mm, + height=middle, + width=middle] + +\setuppagenumbering + [alternative=doublesided, + location=] + +\setupfootertexts + [][pagenumber] + +\setupheadertexts + [][chapter] + +\setupheader + [color=colortwo, + style=bold] + +\setupfooter + [color=colortwo, + style=bold] + +\setuphead + [chapter] + [page={yes,header,right}, + header=empty, + style=\bfc] + +\setupsectionblock + [page={yes,header,right}] + +\starttexdefinition unexpanded section:chapter:number #1 + \doifmode{*sectionnumber} { + \bf + \llap{<\enspace}#1\enspace> + } +\stoptexdefinition + +\starttexdefinition unexpanded section:section:number #1 + \doifmode{*sectionnumber} { + \bf + \llap{<<\enspace}#1\enspace>> + } +\stoptexdefinition + +\starttexdefinition unexpanded section:subsection:number #1 + \doifmode{*sectionnumber} { + \bf + \llap{<<<\enspace}#1\enspace>>> + } +\stoptexdefinition + +\setuphead[chapter] [numbercolor=black,numbercommand=\texdefinition{section:chapter:number}] +\setuphead[section] [numbercolor=black,numbercommand=\texdefinition{section:section:number}] +\setuphead[subsection] [numbercolor=black,numbercommand=\texdefinition{section:subsection:number}] +\setuphead[subsubsection][numbercolor=,numbercommand=,before=\blank,after=\blank] + +\setuphead + [section] + [style=\bfa] + +\setuplist + [chapter] + [style=bold] + +\setupinteractionscreen + [option=doublesided] + +\setupalign + [tolerant,stretch] + +\setupwhitespace + [big] + +\setuptolerance + [tolerant] + +\doifelsemode {atpragma} { + \setupbodyfont[lucidaot,10pt] +} { + \setupbodyfont[dejavu,10pt] +} + +\definecolor[colorone] [b=.5] +\definecolor[colortwo] [s=.3] +\definecolor[colorthree][y=.5] + +\setuptype + [color=colorone] + +\setuptyping + [color=colorone] + +\setuphead + [lshowtitle] + [style=\tt, + color=colorone] + +\setuphead + [chapter,section] + [numbercolor=colortwo, + color=colorone] + +\definedescription + [xmlcmd] + [alternative=hanging, + width=line, + distance=1em, + margin=2em, + headstyle=monobold, + headcolor=colorone] + +\setupframedtext + [setuptext] + [framecolor=colorone, + rulethickness=1pt, + corner=round] + +\usemodule[punk] + +\usetypescript[punk] + +\definelayer + [page] + [width=\paperwidth, + height=\paperheight] + +\definestartstop + [smallexample] + [before={\blank\bgroup\ss\small\setupwhitespace[medium]\setupblank[medium]}, + after={\par\egroup\blank}] + +\stopenvironment diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/xml/xml-mkiv-titlepage.tex b/Master/texmf-dist/doc/context/sources/general/manuals/xml/xml-mkiv-titlepage.tex new file mode 100644 index 00000000000..4275572140e --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/xml/xml-mkiv-titlepage.tex @@ -0,0 +1,47 @@ +\environment xml-mkiv-style + +\startcomponent xml-mkiv-titlepage + +\setuplayout[page] + +\startstandardmakeup + \startfontclass[none] % nil the current fontclass since it may append its features + \EnableRandomPunk + \setlayerframed + [page] + [width=\paperwidth,height=\paperheight, + background=color,backgroundcolor=colorone,backgroundoffset=1ex,frame=off] + {} + \definedfont[demo@punk at 18pt] + \setbox\scratchbox\vbox { + \hsize\dimexpr\paperwidth+2ex\relax + \setupinterlinespace + \baselineskip 1\baselineskip plus 1pt minus 1pt + \raggedcenter + \color[colortwo]{\dorecurse{1000}{XML }} + } + \setlayer + [page] + [preset=middle] + {\vsplit\scratchbox to \dimexpr\paperheight+2ex\relax} + \definedfont[demo@punk at 90pt] + \setstrut + \setlayerframed + [page] + [preset=rightbottom,offset=10mm] + [foregroundcolor=colorthree,align=flushright,offset=overlay,frame=off] + {Dealing\\with XML in\\Con\TeX t MkIV} + \definedfont[demo@punk at 18pt] + \setstrut + \setlayerframed + [page] + [preset=righttop,offset=10mm,x=3mm,rotation=90] + [foregroundcolor=colorthree,align=flushright,offset=overlay,frame=off] + {Hans Hagen, Pragma ADE, \currentdate} + \tightlayer[page] + \stopfontclass +\stopstandardmakeup + +\setuplayout + +\stopcomponent diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/xml/xml-mkiv-tricks.tex b/Master/texmf-dist/doc/context/sources/general/manuals/xml/xml-mkiv-tricks.tex new file mode 100644 index 00000000000..f8c65ecc93d --- /dev/null +++ b/Master/texmf-dist/doc/context/sources/general/manuals/xml/xml-mkiv-tricks.tex @@ -0,0 +1,814 @@ +\environment xml-mkiv-style + +\startcomponent xml-mkiv-tricks + +\startchapter[title={Tips and tricks}] + +\startsection[title={tracing}] + +It can be hard to debug code as much happens kind of behind the screens. +Therefore we have a couple of tracing options. Of course you can typeset some +status information, using for instance: + +\startxmlcmd {\cmdbasicsetup{xmlshow}} + typeset the tree given by \cmdinternal {cd:node} +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlinfo}} + typeset the name in the element given by \cmdinternal {cd:node} +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlpath}} + returns the complete path (including namespace prefix and index) of the + given \cmdinternal {cd:node} +\stopxmlcmd + +\startbuffer[demo] +<?xml version "1.0"?> +<document> + <section> + <content> + <p>first</p> + <p><b>second</b></p> + </content> + </section> + <section> + <content> + <p><b>third</b></p> + <p>fourth</p> + </content> + </section> +</document> +\stopbuffer + +Say that we have the following \XML: + +\typebuffer[demo] + +and the next definitions: + +\startbuffer +\startxmlsetups xml:demo:base + \xmlsetsetup{#1}{p|b}{xml:demo:*} +\stopxmlsetups + +\startxmlsetups xml:demo:p + \xmlflush{#1} + \par +\stopxmlsetups + +\startxmlsetups xml:demo:b + \par + \xmlpath{#1} : \xmlflush{#1} + \par +\stopxmlsetups + +\xmlregisterdocumentsetup{example-10}{xml:demo:base} + +\xmlprocessbuffer{example-10}{demo}{} +\stopbuffer + +\typebuffer + +This will give us: + +\blank \startpacked \getbuffer \stoppacked \blank + +If you use \type {\xmlshow} you will get a complete subtree which can +be handy for tracing but can also lead to large documents. + +We also have a bunch of trackers that can be enabled, like: + +\starttyping +\enabletrackers[xml.show,xml.parse] +\stoptyping + +The full list (currently) is: + +\starttabulate[|lT|p|] +\NC xml.entities \NC show what entities are seen and replaced \NC \NR +\NC xml.path \NC show the result of parsing an lpath expression \NC \NR +\NC xml.parse \NC show stepwise resolving of expressions \NC \NR +\NC xml.profile \NC report all parsed lpath expressions (in the log) \NC \NR +\NC xml.remap \NC show what namespaces are remapped \NC \NR +\NC lxml.access \NC report errors with respect to resolving (symbolic) nodes \NC \NR +\NC lxml.comments \NC show the comments that are encountered (if at all) \NC \NR +\NC lxml.loading \NC show what files are loaded and converted \NC \NR +\NC lxml.setups \NC show what setups are being associated to elements \NC \NR +\stoptabulate + +In one of our workflows we produce books from \XML\ where the (educational) +content is organized in many small files. Each book has about 5~chapters and each +chapter is made of sections that contain text, exercises, resources, etc.\ and so +the document is assembled from thousands of files (don't worry, runtime inclusion +is pretty fast). In order to see where in the sources content resides we can +trace the filename. + +\startxmlcmd {\cmdbasicsetup{xmlinclusion}} + returns the file where the node comes from +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlinclusions}} + returns the list of files where the node comes from +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlbadinclusions}} + returns a list of files that were not included due to some problem +\stopxmlcmd + +Of course you have to make sure that these names end up somewhere visible, for +instance in the margin. + +\stopsection + +\startsection[title={expansion}] + +For novice users the concept of expansion might sound frightening and to some +extend it is. However, it is important enough to spend some words on it here. + +It is good to realize that most setups are sort of immediate. When one setup is +issued, it can call another one and so on. Normally you won't notice that but +there are cases where that can be a problem. In \TEX\ you can define a macro, +take for instance: + +\starttyping +\startxmlsetups xml:foo + \def\foobar{\xmlfirst{#1}{/bar}} +\stopxmlsetups +\stoptyping + +you store the reference top node \type {bar} in \type {\foobar} maybe for later use. In +this case the content is not yet fetched, it will be done when \type {\foobar} is +called. + +\starttyping +\startxmlsetups xml:foo + \edef\foobar{\xmlfirst{#1}{/bar}} +\stopxmlsetups +\stoptyping + +Here the content of \type {bar} becomes the body of the macro. But what if +\type {bar} itself contains elements that also contain elements. When there +is a setup for \type {bar} it will be triggered and so on. + +When that setup looks like: + +\starttyping +\startxmlsetups xml:bar + \def\barfoo{\xmlflush{#1}} +\stopxmlsetups +\stoptyping + +Here we get something like: + +\starttyping +\foobar => {\def\barfoo{...}} +\stoptyping + +When \type {\barfoo} is not defined we get an error and when it is known and expands +to something weird we might also get an error. + +Especially when you don't know what content can show up, this can result in errors +when an expansion fails, for example because some macro being used is not defined. +To prevent this we can define a macro: + +\starttyping +\starttexdefinition unexpanded xml:bar:macro #1 + \def\barfoo{\xmlflush{#1}} +\stoptexdefinition + +\startxmlsetups xml:bar + \texdefinition{xml:bar:macro}{#1} +\stopxmlsetups +\stoptyping + +The setup \type {xml:bar} will still expand but the replacement text now is just the +call to the macro, think of: + +\starttyping +\foobar => {\texdefinition{xml:bar:macro}{#1}} +\stoptyping + +But this is often not needed, most \CONTEXT\ commands can handle the expansions +quite well but it's good to know that there is a way out. So, now to some +examples. Imagine that we have an \XML\ file that looks as follows: + +\starttyping +<?xml version='1.0' ?> +<demo> + <chapter> + <title>Some <em>short</em> title</title> + <content> + zeta + <index> + <key>zeta</key> + <content>zeta again</content> + </index> + alpha + <index> + <key>alpha</key> + <content>alpha <em>again</em></content> + </index> + gamma + <index> + <key>gamma</key> + <content>gamma</content> + </index> + beta + <index> + <key>beta</key> + <content>beta</content> + </index> + delta + <index> + <key>delta</key> + <content>delta</content> + </index> + done! + </content> + </chapter> +</demo> +\stoptyping + +There are a few structure related elements here: a chapter (with its list entry) +and some index entries. Both are multipass related and therefore travel around. +This means that when we let data end up in the auxiliary file, we need to make +sure that we end up with either expanded data (i.e.\ no references to the \XML\ +tree) or with robust forward and backward references to elements in the tree. + +Here we discuss three approaches (and more may show up later): pushing \XML\ into +the auxiliary file and using references to elements either or not with an +associated setup. We control the variants with a switch. + +\starttyping +\newcount\TestMode + +\TestMode=0 % expansion=xml +\TestMode=1 % expansion=yes, index, setup +\TestMode=2 % expansion=yes +\stoptyping + +We apply a couple of setups: + +\starttyping +\startxmlsetups xml:mysetups + \xmlsetsetup{\xmldocument}{demo|index|content|chapter|title|em}{xml:*} +\stopxmlsetups + +\xmlregistersetup{xml:mysetups} +\stoptyping + +The main document is processed with: + +\starttyping +\startxmlsetups xml:demo + \xmlflush{#1} + \subject{contents} + \placelist[chapter][criterium=all] + \subject{index} + \placeregister[index][criterium=all] + \page % else buffer is forgotten when placing header +\stopxmlsetups +\stoptyping + +First we show three alternative ways to deal with the chapter. The first case +expands the \XML\ reference so that we have an \XML\ stream in the auxiliary +file. This stream is processed as a small independent subfile when needed. The +second case registers a reference to the current element (\type {#1}). This means +that we have access to all data of this element, like attributes, title and +content. What happens depends on the given setup. The third variant does the same +but here the setup is part of the reference. + +\starttyping +\startxmlsetups xml:chapter + \ifcase \TestMode + % xml code travels around + \setuphead[chapter][expansion=xml] + \startchapter[title=eh: \xmltext{#1}{title}] + \xmlfirst{#1}{content} + \stopchapter + \or + % index is used for access via setup + \setuphead[chapter][expansion=yes,xmlsetup=xml:title:flush] + \startchapter[title=\xmlgetindex{#1}] + \xmlfirst{#1}{content} + \stopchapter + \or + % tex call to xml using index is used + \setuphead[chapter][expansion=yes] + \startchapter[title=hm: \xmlreference{#1}{xml:title:flush}] + \xmlfirst{#1}{content} + \stopchapter + \fi +\stopxmlsetups + +\startxmlsetups xml:title:flush + \xmltext{#1}{title} +\stopxmlsetups +\stoptyping + +We need to deal with emphasis and the content of the chapter. + +\starttyping +\startxmlsetups xml:em + \begingroup\em\xmlflush{#1}\endgroup +\stopxmlsetups + +\startxmlsetups xml:content + \xmlflush{#1} +\stopxmlsetups +\stoptyping + +A similar approach is followed with the index entries. Watch how we use the +numbered entries variant (in this case we could also have used just \type +{entries} and \type {keys}). + +\starttyping +\startxmlsetups xml:index + \ifcase \TestMode + \setupregister[index][expansion=xml,xmlsetup=] + \setstructurepageregister + [index] + [entries:1=\xmlfirst{#1}{content}, + keys:1=\xmltext{#1}{key}] + \or + \setupregister[index][expansion=yes,xmlsetup=xml:index:flush] + \setstructurepageregister + [index] + [entries:1=\xmlgetindex{#1}, + keys:1=\xmltext{#1}{key}] + \or + \setupregister[index][expansion=yes,xmlsetup=] + \setstructurepageregister + [index] + [entries:1=\xmlreference{#1}{xml:index:flush}, + keys:1=\xmltext{#1}{key}] + \fi +\stopxmlsetups + +\startxmlsetups xml:index:flush + \xmlfirst{#1}{content} +\stopxmlsetups +\stoptyping + +Instead of this flush, you can use the predefined setup \type {xml:flush} +unless it is overloaded by you. + +The file is processed by: + +\starttyping +\starttext + \xmlprocessfile{main}{test.xml}{} +\stoptext +\stoptyping + +We don't show the result here. If you're curious what the output is, you can test +it yourself. In that case it also makes sense to peek into the \type {test.tuc} +file to see how the information travels around. The \type {metadata} fields carry +information about how to process the data. + +The first case, the \XML\ expansion one, is somewhat special in the sense that +internally we use small pseudo files. You can control the rendering by tweaking +the following setups: + +\starttyping +\startxmlsetups xml:ctx:sectionentry + \xmlflush{#1} +\stopxmlsetups + +\startxmlsetups xml:ctx:registerentry + \xmlflush{#1} +\stopxmlsetups +\stoptyping + +{\em When these methods work out okay the other structural elements will be +dealt with in a similar way.} + +\stopsection + +\startsection[title={special cases}] + +Normally the content will be flushed under a special (so called) catcode regime. +This means that characters that have a special meaning in \TEX\ will have no such +meaning in an \XML\ file. If you want content to be treated as \TEX\ code, you can +use one of the following: + +\startxmlcmd {\cmdbasicsetup{xmlflushcontext}} + flush the given \cmdinternal {cd:node} using the \TEX\ character + interpretation scheme +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlcontext}} + flush the match of \cmdinternal {cd:lpath} for the given \cmdinternal + {cd:node} using the \TEX\ character interpretation scheme +\stopxmlcmd + +We use this in cases like: + +\starttyping +.... + \xmlsetsetup {#1} { + tm|texformula| + } {xml:*} +.... + +\startxmlsetups xml:tm + \mathematics{\xmlflushcontext{#1}} +\stopxmlsetups + +\startxmlsetups xml:texformula + \placeformula\startformula\xmlflushcontext{#1}\stopformula +\stopxmlsetups +\stoptyping + +\stopsection + +\startsection[title={collecting}] + +Say that your document has + +\starttyping +<table> + <tr> + <td>foo</td> + <td>bar<td> + </tr> +</table> +\stoptyping + +And that you need to convert that to \TEX\ speak like: + +\starttyping +\bTABLE + \bTR + \bTD foo \eTD + \bTD bar \eTD + \eTR +\eTABLE +\stoptyping + +A simple mapping is: + +\starttyping +\startxmlsetups xml:table + \bTABLE \xmlflush{#1} \eTABLE +\stopxmlsetups +\startxmlsetups xml:tr + \bTR \xmlflush{#1} \eTR +\stopxmlsetups +\startxmlsetups xml:td + \bTD \xmlflush{#1} \eTD +\stopxmlsetups +\stoptyping + +The \type {\bTD} command is a so called delimited command which means that it +picks up its argument by looking for an \type {\eTD}. For the simple case here +this works quite well because the flush is inside the pair. This is not the case +in the following variant: + +\starttyping +\startxmlsetups xml:td:start + \bTD +\stopxmlsetups +\startxmlsetups xml:td:stop + \eTD +\stopxmlsetups +\startxmlsetups xml:td + \xmlsetup{#1}{xml:td:start} + \xmlflush{#1} + \xmlsetup{#1}{xml:td:stop} +\stopxmlsetups +\stoptyping + +When for some reason \TEX\ gets confused you can revert to a mechanism that +collects content. + +\starttyping +\startxmlsetups xml:td:start + \startcollect + \bTD + \stopcollect +\stopxmlsetups +\startxmlsetups xml:td:stop + \startcollect + \eTD + \stopcollect +\stopxmlsetups +\startxmlsetups xml:td + \startcollecting + \xmlsetup{#1}{xml:td:start} + \xmlflush{#1} + \xmlsetup{#1}{xml:td:stop} + \stopcollecting +\stopxmlsetups +\stoptyping + +You can even implement solutions that effectively do this: + +\starttyping +\startcollecting + \startcollect \bTABLE \stopcollect + \startcollect \bTR \stopcollect + \startcollect \bTD \stopcollect + \startcollect foo\stopcollect + \startcollect \eTD \stopcollect + \startcollect \bTD \stopcollect + \startcollect bar\stopcollect + \startcollect \eTD \stopcollect + \startcollect \eTR \stopcollect + \startcollect \eTABLE \stopcollect +\stopcollecting +\stoptyping + +Of course you only need to go that complex when the situation demands it. Here is +another weird one: + +\starttyping +\startcollecting + \startcollect \setupsomething[\stopcollect + \startcollect foo=\stopcollect + \startcollect FOO,\stopcollect + \startcollect bar=\stopcollect + \startcollect BAR,\stopcollect + \startcollect ]\stopcollect +\stopcollecting +\stoptyping + +\stopsection + +\startsection[title={selectors and injectors}] + +This section describes a bit special feature, one that we needed for a project +where we could not touch the original content but could add specific sections for +our own purpose. Hopefully the example demonstrates its useability. + +\enabletrackers[lxml.selectors] + +\startbuffer[foo] +<?xml version="1.0" encoding="UTF-8"?> + +<?context-directive message info 1: this is a demo file ?> +<?context-message-directive info 2: this is a demo file ?> + +<one> + <two> + <?context-select begin t1 t2 t3 ?> + <three> + t1 t2 t3 + <?context-directive injector crlf t1 ?> + t1 t2 t3 + </three> + <?context-select end ?> + <?context-select begin t4 ?> + <four> + t4 + </four> + <?context-select end ?> + <?context-select begin t8 ?> + <four> + t8.0 + t8.0 + </four> + <?context-select end ?> + <?context-include begin t4 ?> + <!-- + <three> + t4.t3 + <?context-directive injector crlf t1 ?> + t4.t3 + </three> + --> + <three> + t3 + <?context-directive injector crlf t1 ?> + t3 + </three> + <?context-include end ?> + <?context-select begin t8 ?> + <four> + t8.1 + t8.1 + </four> + <?context-select end ?> + <?context-select begin t8 ?> + <four> + t8.2 + t8.2 + </four> + <?context-select end ?> + <?context-select begin t4 ?> + <four> + t4 + t4 + </four> + <?context-select end ?> + <?context-directive injector page t7 t8 ?> + foo + <?context-directive injector blank t1 ?> + bar + <?context-directive injector page t7 t8 ?> + bar + </two> +</one> +\stopbuffer + +\typebuffer[foo] + +First we show how to plug in a directive. Processing instructions like the +following are normally ignored by an \XML\ processor, unless they make sense +to it. + +\starttyping +<?context-directive message info 1: this is a demo file ?> +<?context-message-directive info 2: this is a demo file ?> +\stoptyping + +We can define a message handler as follows: + +\startbuffer +\def\MyMessage#1#2#3{\writestatus{#1}{#2 #3}} + +\xmlinstalldirective{message}{MyMessage} +\stopbuffer + +\typebuffer \getbuffer + +When this file is processed you will see this on the console: + +\starttyping +info > 1: this is a demo file +info > 2: this is a demo file +\stoptyping + +The file has some sections that can be used or ignored. The recipe for +obeying \type {t1} and \type {t4} is the following: + +\startbuffer +\xmlsetinjectors[t1] +\xmlsetinjectors[t4] + +\startxmlsetups xml:initialize + \xmlapplyselectors{#1} + \xmlsetsetup {#1} { + one|two|three|four + } {xml:*} +\stopxmlsetups + +\xmlregistersetup{xml:initialize} + +\startxmlsetups xml:one + [ONE \xmlflush{#1} ONE] +\stopxmlsetups + +\startxmlsetups xml:two + [TWO \xmlflush{#1} TWO] +\stopxmlsetups + +\startxmlsetups xml:three + [THREE \xmlflush{#1} THREE] +\stopxmlsetups + +\startxmlsetups xml:four + [FOUR \xmlflush{#1} FOUR] +\stopxmlsetups +\stopbuffer + +\typebuffer \getbuffer + +This typesets: + +\startnarrower +\xmlprocessbuffer{main}{foo}{} +\stopnarrower + +The include coding is kind of special: it permits adding content (in a comment) +and ignoring the rest so that we indeed can add something without interfering +with the original. Of course in a normal workflow such messy solutions are +not needed, but alas, often workflows are not that clean, especially when one +has no real control over the source. + +\startxmlcmd {\cmdbasicsetup{xmlsetinjectors}} + enables a list of injectors that will be used +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlresetinjectors}} + resets the list of injectors +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlinjector}} + expands an injection (command); normally this one is only used + (in some setup) or for testing +\stopxmlcmd + +\startxmlcmd {\cmdbasicsetup{xmlapplyselectors}} + analyze the tree \cmdinternal {cd:node} for marked sections that + will be injected +\stopxmlcmd + +We have some injections predefined: + +\starttyping +\startsetups xml:directive:injector:page + \page +\stopsetups + +\startsetups xml:directive:injector:column + \column +\stopsetups + +\startsetups xml:directive:injector:blank + \blank +\stopsetups +\stoptyping + +In the example we see: + +\starttyping +<?context-directive injector page t7 t8 ?> +\stoptyping + +When we set \type {\xmlsetinjector[t7]} a pagebreak will injected in that spot. +Tags like \type {t7}, \type {t8} etc.\ can represent versions. + +\stopsection + +\startsection[title=preprocessing] + +% local match = lpeg.match +% local replacer = lpeg.replacer("BAD TITLE:","<bold>BAD TITLE:</bold>") +% +% function lxml.preprocessor(data,settings) +% return match(replacer,data) +% end + +\startbuffer[pre-code] +\startluacode + function lxml.preprocessor(data,settings) + return string.find(data,"BAD TITLE:") + and string.gsub(data,"BAD TITLE:","<bold>BAD TITLE:</bold>") + or data + end +\stopluacode +\stopbuffer + +\startbuffer[pre-xml] +\startxmlsetups pre:demo:initialize + \xmlsetsetup{#1}{*}{pre:demo:*} +\stopxmlsetups + +\xmlregisterdocumentsetup{pre:demo}{pre:demo:initialize} + +\startxmlsetups pre:demo:root + \xmlflush{#1} +\stopxmlsetups + +\startxmlsetups pre:demo:bold + \begingroup\bf\xmlflush{#1}\endgroup +\stopxmlsetups + +\starttext + \xmlprocessbuffer{pre:demo}{demo}{} +\stoptext +\stopbuffer + +Say that you have the following \XML\ setup: + +\typebuffer[pre-xml] + +and that (such things happen) the input looks like this: + +\startbuffer[demo] +<root> +BAD TITLE: crap crap crap ... + +BAD TITLE: crap crap crap ... +</root> +\stopbuffer + +\typebuffer[demo] + +You can then clean up these \type {BAD TITLE}'s as follows: + +\typebuffer[pre-code] + +and get as result: + +\start \getbuffer[pre-code,pre-xml] \stop + +The preprocessor function gets as second argument the current settings, an d +the field \type {currentresource} can be used to limit the actions to +specific resources, in our case it's \type {buffer: demo}. Afterwards you can +reset the proprocessor with: + +\startluacode +lxml.preprocessor = nil +\stopluacode + +Future versions might give some more control over preprocessors. For now consider +it to be a quick hack. + +\stopsection + +\stopchapter + +\stopcomponent diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/xml/xml-mkiv.tex b/Master/texmf-dist/doc/context/sources/general/manuals/xml/xml-mkiv.tex index 8550badecb3..77054e79c53 100644 --- a/Master/texmf-dist/doc/context/sources/general/manuals/xml/xml-mkiv.tex +++ b/Master/texmf-dist/doc/context/sources/general/manuals/xml/xml-mkiv.tex @@ -31,4368 +31,25 @@ % % \xmldirect -\input lxml-ctx.mkiv - -\settrue \xmllshowtitle -\setfalse\xmllshowwarning - -\usemodule[set-11] - -\loadsetups[i-context] - -% \definehspace[squad][1em plus .25em minus .25em] - -\usemodule[abr-02] - -\setuplayout - [location=middle, - marking=on, - backspace=20mm, - cutspace=20mm, - topspace=15mm, - header=15mm, - footer=15mm, - height=middle, - width=middle] - -\setuppagenumbering - [alternative=doublesided, - location=] - -\setupfootertexts - [][pagenumber] - -\setupheadertexts - [][chapter] - -\setupheader - [color=colortwo, - style=bold] - -\setupfooter - [color=colortwo, - style=bold] - -\setuphead - [chapter] - [page={yes,header,right}, - header=empty, - style=\bfc] - -\setupsectionblock - [page={yes,header,right}] - -\starttexdefinition unexpanded section:chapter:number #1 - \doifmode{*sectionnumber} { - \bf - \llap{<\enspace}#1\enspace> - } -\stoptexdefinition - -\starttexdefinition unexpanded section:section:number #1 - \doifmode{*sectionnumber} { - \bf - \llap{<<\enspace}#1\enspace>> - } -\stoptexdefinition - -\starttexdefinition unexpanded section:subsection:number #1 - \doifmode{*sectionnumber} { - \bf - \llap{<<<\enspace}#1\enspace>>> - } -\stoptexdefinition - -\setuphead[chapter] [numbercolor=black,numbercommand=\texdefinition{section:chapter:number}] -\setuphead[section] [numbercolor=black,numbercommand=\texdefinition{section:section:number}] -\setuphead[subsection][numbercolor=black,numbercommand=\texdefinition{section:subsection:number}] - -\setuphead - [section] - [style=\bfa] - -\setuplist - [chapter] - [style=bold] - -\setupinteractionscreen - [option=doublesided] - -\setupalign - [tolerant,stretch] - -\setupwhitespace - [big] - -\setuptolerance - [tolerant] - -\doifelsemode {atpragma} { - \setupbodyfont[lucidaot,10pt] -} { - \setupbodyfont[dejavu,10pt] -} - -\definecolor[colorone] [b=.5] -\definecolor[colortwo] [s=.3] -\definecolor[colorthree][y=.5] - -\setuptype - [color=colorone] - -\setuptyping - [color=colorone] - -\setuphead - [lshowtitle] - [style=\tt, - color=colorone] - -\setuphead - [chapter,section] - [numbercolor=colortwo, - color=colorone] - -\definedescription - [xmlcmd] - [alternative=hanging, - width=line, - distance=1em, - margin=2em, - headstyle=monobold, - headcolor=colorone] - -\setupframedtext - [setuptext] - [framecolor=colorone, - rulethickness=1pt, - corner=round] - -\usemodule[punk] - -\usetypescript[punk] - -\definelayer - [page] - [width=\paperwidth, - height=\paperheight] +\environment xml-mkiv-style \starttext -\setuplayout[page] - -\startstandardmakeup - \startfontclass[none] % nil the current fontclass since it may append its features - \EnableRandomPunk - \setlayerframed - [page] - [width=\paperwidth,height=\paperheight, - background=color,backgroundcolor=colorone,backgroundoffset=1ex,frame=off] - {} - \definedfont[demo@punk at 18pt] - \setbox\scratchbox\vbox { - \hsize\dimexpr\paperwidth+2ex\relax - \setupinterlinespace - \baselineskip 1\baselineskip plus 1pt minus 1pt - \raggedcenter - \color[colortwo]{\dorecurse{1000}{XML }} - } - \setlayer - [page] - [preset=middle] - {\vsplit\scratchbox to \dimexpr\paperheight+2ex\relax} - \definedfont[demo@punk at 90pt] - \setstrut - \setlayerframed - [page] - [preset=rightbottom,offset=10mm] - [foregroundcolor=colorthree,align=flushright,offset=overlay,frame=off] - {Dealing\\with XML in\\Con\TeX t MkIV} - \definedfont[demo@punk at 18pt] - \setstrut - \setlayerframed - [page] - [preset=righttop,offset=10mm,x=3mm,rotation=90] - [foregroundcolor=colorthree,align=flushright,offset=overlay,frame=off] - {Hans Hagen, Pragma ADE, \currentdate} - \tightlayer[page] - \stopfontclass -\stopstandardmakeup - -\setuplayout +\component xml-mkiv-titlepage \startfrontmatter - -\starttitle[title=Contents] - -\placelist - [chapter,section] - -\stoptitle - -\startchapter[title={Introduction}] - -This manual presents the \MKIV\ way of dealing with \XML. Although the -traditional \MKII\ streaming parser has a charming simplicity in its control, for -complex documents the tree based \MKIV\ method is more convenient. It is for this -reason that the old method has been removed from \MKIV. If you are familiar with -\XML\ processing in \MKII, then you will have noticed that the \MKII\ commands -have \type {XML} in their name. The \MKIV\ commands have a lowercase \type {xml} -in their names. That way there is no danger for confusion or a mixup. - -You may wonder why we do these manipulations in \TEX\ and not use \XSLT\ (or -other transformation methods) instead. The advantage of an integrated approach is -that it simplifies usage. Think of not only processing the document, but also -using \XML\ for managing resources in the same run. An \XSLT\ approach is just as -verbose (after all, you still need to produce \TEX\ code) and probably less -readable. In the case of \MKIV\ the integrated approach is also faster and gives -us the option to manipulate content at runtime using \LUA. It has the additional -advantage that to some extend we can handle a mix of \TEX\ and \XML\ because we -know when we're doing one or the other. - -This manual is dedicated to Taco Hoekwater, one of the first \CONTEXT\ users, and -also the first to use it for processing \XML. Who could have thought at that time -that we would have a more convenient way of dealing with those angle brackets. -The second version for this manual is dedicated to Thomas Schmitz, a power user -who occasionally became victim of the evolving mechanisms. - -\blank - -\startlines -Hans Hagen -\PRAGMA -Hasselt NL -2008\endash2016 -\stoplines - -\stopchapter - + \component xml-mkiv-contents + \component xml-mkiv-introduction \stopfrontmatter \startbodymatter - -\startchapter[title={Setting up a converter}] - -\startsection[title={from structure to setup}] - -We use a very simple document structure for demonstrating how a converter is -defined. In practice a mapping will be more complex, especially when we have a -style with complex chapter openings using data coming from all kind of places, -different styling of sections with the same name, selectively (out of order) -flushed content, special formatting, etc. - -\typefile{manual-demo-1.xml} - -Say that this document is stored in the file \type {demo.xml}, then the following -code can be used as starting point: - -\starttyping -\startxmlsetups xml:demo:base - \xmlsetsetup{#1}{document|section|p}{xml:demo:*} -\stopxmlsetups - -\xmlregisterdocumentsetup{demo}{xml:demo:base} - -\startxmlsetups xml:demo:document - \starttitle[title={Contents}] - \placelist[chapter] - \stoptitle - \xmlflush{#1} -\stopxmlsetups - -\startxmlsetups xml:demo:section - \startchapter[title=\xmlfirst{#1}{/title}] - \xmlfirst{#1}{/content} - \stopchapter -\stopxmlsetups - -\startxmlsetups xml:demo:p - \xmlflush{#1}\endgraf -\stopxmlsetups - -\xmlprocessfile{demo}{demo.xml}{} -\stoptyping - -Watch out! These are not just setups, but specific \XML\ setups which get an -argument passed (the \type {#1}). If for some reason your \XML\ processing fails, -it might be that you mistakenly have used a normal setup definition. The argument -\type {#1} represents the current node (element) and is a unique identifier. For -instance a \type {<p>..</p>} can have an identifier {demo::5}. So, we can get -something: - -\starttyping -\xmlflush{demo::5}\endgraf -\stoptyping - -but as well: - -\starttyping -\xmlflush{demo::6}\endgraf -\stoptyping - -Keep in mind that the references tor the actual nodes (elements) are -abstractions, you never see those \type {<id>::<number>}'s, because we will use -either the abstract \type {#1} (any node) or an explicit reference like \type -{demo}. The previous setup when issued will be like: - -\starttyping -\startchapter[title=\xmlfirst{demo::3}{/title}] - \xmlfirst{demo::4}{/content} -\stopchapter -\stoptyping - -Here the \type {title} is used to typeset the chapter title but also for an entry -in the table of contents. At the moment the title is typeset the \XML\ node gets -looked up and expanded in real text. However, for the list it gets stored for -later use. One can argue that this is not needed for \XML, because one can just -filter all the titles and use page references, but then one also looses the -control one normally has over such titles. For instance it can be that some -titles are rendered differently and for that we need to keep track of usage. -Doing that with transformations or filtering is often more complex than leaving -that to \TEX. As soon as the list gets typeset, the reference (\type {demo::#3}) -is used for the lookup. This is because by default the title is stored as given. -So, as long as we make sure the \XML\ source is loaded before the table of -contents is typeset we're ok. Later we will look into this in more detail, for -now it's enough to know that in most cases the abstract \type {#1} reference will -work out ok. - -Contrary to the style definitions this interface looks rather low level (with no -optional arguments) and the main reason for this is that we want processing to be -fast. So, the basic framework is: - -\starttyping -\startxmlsetups xml:demo:base - % associate setups with elements -\stopxmlsetups - -\xmlregisterdocumentsetup{demo}{xml:demo:base} - -% define setups for matches - -\xmlprocessfile{demo}{demo.xml}{} -\stoptyping - -In this example we mostly just flush the content of an element and in the case of -a section we flush explicit child elements. The \type {#1} in the example code -represents the current element. The line: - -\starttyping -\xmlsetsetup{demo}{*}{-} -\stoptyping - -sets the default for each element to \quote {just ignore it}. A \type {+} would -make the default to always flush the content. This means that at this point we -only handle: - -\starttyping -<section> - <title>Some title</title> - <content> - <p>a paragraph of text</p> - </content> -</section> -\stoptyping - -In the next section we will deal with the slightly more complex itemize and -figure placement. At first sight all these setups may look overkill but keep in -mind that normally the number of elements is rather limited. The complexity is -often in the style and having access to each snippet of content is actually -quite handy for that. - -\stopsection - -\startsection[title={alternative solutions}] - -Dealing with an itemize is rather simple (as long as we forget about -attributes that control the behaviour): - -\starttyping -<itemize> - <item>first</item> - <item>second</item> -</itemize> -\stoptyping - -First we need to add \type {itemize} to the setup assignment (unless we've used -the wildcard \type {*}): - -\starttyping -\xmlsetsetup{demo}{document|section|p|itemize}{xml:demo:*} -\stoptyping - -The setup can look like: - -\starttyping -\startxmlsetups xml:demo:itemize - \startitemize - \xmlfilter{#1}{/item/command(xml:demo:itemize:item)} - \stopitemize -\stopxmlsetups - -\startxmlsetups xml:demo:itemize:item - \startitem - \xmlflush{#1} - \stopitem -\stopxmlsetups -\stoptyping - -An alternative is to map item directly: - -\starttyping -\xmlsetsetup{demo}{document|section|p|itemize|item}{xml:demo:*} -\stoptyping - -and use: - -\starttyping -\startxmlsetups xml:demo:itemize - \startitemize - \xmlflush{#1} - \stopitemize -\stopxmlsetups - -\startxmlsetups xml:demo:item - \startitem - \xmlflush{#1} - \stopitem -\stopxmlsetups -\stoptyping - -Sometimes, a more local solution using filters and \type {/command(...)} makes more -sense, especially when the \type {item} tag is used for other purposes as well. - -Explicit flushing with \type {command} is definitely the way to go when you have -complex products. In one of our projects we compose math school books from many -thousands of small \XML\ files, and from one source set several products are -typeset. Within a book sections get done differently, content gets used, ignored -or interpreted differently depending on the kind of content, so there is a -constant checking of attributes that drive the rendering. In that a generic setup -for a title element makes less sense than explicit ones for each case. (We're -talking of huge amounts of files here, including multiple images on each rendered -page.) - -When using \type {command} you can pass two arguments, the first is the setup for -the match, the second one for the miss, as in: - -\starttyping -\xmlfilter{#1}{/element/command(xml:true,xml:false)} -\stoptyping - -Back to the example, this leaves us with dealing with the resources, like -figures: - -\starttyping -<resource type='figure'> - <caption>A picture of a cow.</caption> - <content><external file="cow.pdf"/></content> -</resource> -\stoptyping - -Here we can use a more restricted match: - -\starttyping -\xmlsetsetup{demo}{resource[@type='figure']}{xml:demo:figure} -\xmlsetsetup{demo}{external}{xml:demo:*} -\stoptyping - -and the definitions: - -\starttyping -\startxmlsetups xml:demo:figure - \placefigure - {\xmlfirst{#1}{/caption}} - {\xmlfirst{#1}{/content}} -\stopxmlsetups - -\startxmlsetups xml:demo:external - \externalfigure[\xmlatt{#1}{file}] -\stopxmlsetups -\stoptyping - -At this point it is good to notice that \type {\xmlatt{#1}{file}} is passed as it -is: a macro call. This means that when a macro like \type {\externalfigure} uses -the first argument frequently without first storing its value, the lookup is done -several times. A solution for this is: - -\starttyping -\startxmlsetups xml:demo:external - \expanded{\externalfigure[\xmlatt{#1}{file}]} -\stopxmlsetups -\stoptyping - -Because the lookup is rather fast, normally there is no need to bother about this -too much because internally \CONTEXT\ already makes sure such expansion happens -only once. - -An alternative definition for placement is the following: - -\starttyping -\xmlsetsetup{demo}{resource}{xml:demo:resource} -\stoptyping - -with: - -\starttyping -\startxmlsetups xml:demo:resource - \placefloat - [\xmlatt{#1}{type}] - {\xmlfirst{#1}{/caption}} - {\xmlfirst{#1}{/content}} -\stopxmlsetups -\stoptyping - -This way you can specify \type {table} as type too. Because you can define your -own float types, more complex variants are also possible. In that case it makes -sense to provide some default behaviour too: - -\starttyping -\definefloat[figure-here][figure][default=here] -\definefloat[figure-left][figure][default=left] -\definefloat[table-here] [table] [default=here] -\definefloat[table-left] [table] [default=left] - -\startxmlsetups xml:demo:resource - \placefloat - [\xmlattdef{#1}{type}{figure}-\xmlattdef{#1}{location}{here}] - {\xmlfirst{#1}{/caption}} - {\xmlfirst{#1}{/content}} -\stopxmlsetups -\stoptyping - -In this example we support two types and two locations. We default to a figure -placed (when possible) at the current location. - -\stopsection - -\stopchapter - -\startchapter[title={Filtering content}] - -\startsection[title={\TEX\ versus \LUA}] - -It will not come as a surprise that we can access \XML\ files from \TEX\ as well -as from \LUA. In fact there are two methods to deal with \XML\ in \LUA. First -there are the low level \XML\ functions in the \type {xml} namespace. On top of -those functions there is a set of functions in the \type {lxml} namespace that -deals with \XML\ in a more \TEX ie way. Most of these have similar commands at -the \TEX\ end. - -\startbuffer -\startxmlsetups first:demo:one - \xmlfilter {#1} {artist/name[text()='Randy Newman']/.. - /albums/album[position()=3]/command(first:demo:two)} -\stopxmlsetups - -\startxmlsetups first:demo:two - \blank \start \tt - \xmldisplayverbatim{#1} - \stop \blank -\stopxmlsetups - -\xmlprocessfile{demo}{music-collection.xml}{first:demo:one} -\stopbuffer - -\typebuffer - -This gives the following snippet of verbatim \XML\ code. The indentation is -conform the indentation in the whole \XML\ file. \footnote {The (probably -outdated) \XML\ file contains the collection stores on my slimserver instance. -You can use the \type {mtxrun --script flac} to generate such files.} - -\doifmodeelse {atpragma} { - \getbuffer -} { - \typefile{xml-mkiv-01.xml} -} - -An alternative written in \LUA\ looks as follows: - -\startbuffer -\blank \start \tt \startluacode - local m = lxml.load("mine","music-collection.xml") -- m == lxml.id("mine") - local p = "artist/name[text()='Randy Newman']/../albums/album[position()=4]" - local l = lxml.filter(m,p) -- returns a list (with one entry) - lxml.displayverbatim(l[1]) -\stopluacode \stop \blank -\stopbuffer - -\typebuffer - -This produces: - -\doifmodeelse {atpragma} { - \getbuffer -} { - \typefile{xml-mkiv-02.xml} -} - -You can use both methods mixed but in practice we will use the \TEX\ commands in -regular styles and the mixture in modules, for instance in those dealing with -\MATHML\ and cals tables. For complex matters you can write your own finalizers -(the last action to be taken in a match) in \LUA\ and use them at the \TEX\ end. - -\stopsection - -\startsection[title={a few details}] - -In \CONTEXT\ setups are a rather common variant on macros (\TEX\ commands) but -with their own namespace. An example of a setup is: - -\starttyping -\startsetup doc:print - \setuppapersize[A4][A4] -\stopsetup - -\startsetup doc:screen - \setuppapersize[S6][S4] -\stopsetup -\stoptyping - -Given the previous definitions, later on we can say something like: - -\starttyping -\doifmodeelse {paper} { - \setup[doc:print] -} { - \setup[doc:screen] -} -\stoptyping - -Another example is: - -\starttyping -\startsetup[doc:header] - \marking[chapter] - \space - -- - \space - \pagenumber -\stopsetup -\stoptyping - -in combination with: - -\starttyping -\setupheadertexts[\setup{doc:header}] -\stoptyping - -Here the advantage is that instead of ending up with an unreadable header -definitions, we use a nicely formatted setup. An important property of setups and -the reason why they were introduced long ago is that spaces and newlines are -ignored in the definition. This means that we don't have to worry about so called -spurious spaces but it also means that when we do want a space, we have to use -the \type {\space} command. - -The only difference between setups and \XML\ setups is that the following ones -get an argument (\type {#1}) that reflects the current node in the \XML\ tree. - -\stopsection - -\startsection[title={CDATA}] - -What to do with \type {CDATA}? There are a few methods at tle \LUA\ end for -dealing with it but here we just mention how you can influence the rendering. -There are four macros that play a role here: - -\starttyping -\unexpanded\def\xmlcdataobeyedline {\obeyedline} -\unexpanded\def\xmlcdataobeyedspace{\strut\obeyedspace} -\unexpanded\def\xmlcdatabefore {\begingroup\tt} -\unexpanded\def\xmlcdataafter {\endgroup} -\stoptyping - -Technically you can overload them but beware of side effects. Normally you won't -see much \type {CDATA} and whenever we do, it involves special data that needs -very special treatment anyway. - -\stopsection - -\startsection[title={Entities}] - -As usual with any way of encoding documents you need escapes in order to encode -the characters that are used in tagging the content, embedding comments, escaping -special characters in strings (in programming languages), etc. In \XML\ this -means that in order characters like \type {<} you need an escape like \type -{<} and in order then to encode an \type {&} you need \type {&}. - -In a typesetting workflow using a programming language like \TEX, another problem -shows up. There we have different special characters, like \type {$ $} for triggering -math, but also the backslash, braces etc. Even one such special character is already -enough to have yet another escaping mechanism at work. - -Ideally a user should not worry about these issues but it helps figuring out issues -when you know what happens under the hood. Also it is good to know that in the -code there are several ways to deal with these issues. Take the following document: - -\starttyping -<text> - Here we have a bit of a <&mess>: - - # # - % % - \ \ - { { - | | - } } - ~ ~ -</text> -\stoptyping - -When the file is read the \type {<} entity will be replaced by \type {<} and -the \type {>} by \type {>}. The numeric entities will be replaced by the -characters they refer to. The \type {&mess} is kind of special. We do preload -a huge list of more or less standardized entities but \type {mess} is not in -there. However, it is possible to have it defined in the document preamble, like: - -\starttyping -<!DOCTYPE dummy SYSTEM "dummy.dtd" [ - <!ENTITY mess "what a mess" > -]> -\stoptyping - -or even this: - -\starttyping -<!DOCTYPE dummy SYSTEM "dummy.dtd" [ - <!ENTITY mess "<p>what a mess</p>" > -]> -\stoptyping - -You can also define it in your document style using one of: - -\startxmlcmd {\cmdbasicsetup{xmlsetentity}} - replaces entity with name \cmdinternal {cd:name} by \cmdinternal {cd:text} -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmltexentity}} - replaces entity with name \cmdinternal {cd:name} by \cmdinternal {cd:text} - typeset under a \TEX\ regime -\stopxmlcmd - -Such a definition will always have a higher priority than the one defined -in the document. Anyway, when the document is read in all entities are -resolved and those that need a special treatment because they map to some -text are stored in such a way that we can roundtrip them. As a consequence, -as soon as the content gets pushed into \TEX, we need not only to intercept -special characters but also have to make sure that the following works: - -\starttyping -\xmltexentity {tex} {\TEX} -\stoptyping - -Here the backslash starts a control sequence while in regular content a -backslash is just that: a backslash. - -Special characters are really special when we have to move text around -in a \TEX\ ecosystem. - -\starttyping -<text> - <title>About #3</title> -</text> -\stoptyping - -If we map and define title as follows: - -\starttyping -\startxmlsetup xml:title - \title{\xmlflush{#1}} -\stopxmlsetup -\stoptyping - -normally something \type {\xmlflush {id::123}} will be written to the -auxiliary file and in most cases that is quite okay, but if we have this: - -\starttyping -\setuphead[title][expansion=yes] -\stoptyping - -then we don't want the \type {#} to end up as hash because later on \TEX\ -can get very confused about it because it sees some argument then in a -probably unexpected way. This is solved by escaping the hash like this: - -\starttyping -About \Ux{23}3 -\stoptyping - -The \type {\Ux} command will convert its hexadecimal argument into a -character. Of course one then needs to typeset such a text under a \TEX\ -character regime but that is normally the case anyway. - -\stopsection - -\stopchapter - -\startchapter[title={Commands}] - -\startsection[title={nodes and lpaths}] - -The amount of commands available for manipulating the \XML\ file is rather large. -Many of the commands cooperate with the already discussed setups, a fancy name -for a collection of macro calls either or not mixed with text. - -Most of the commands are just shortcuts to \LUA\ calls, which means that the real -work is done by \LUA. In fact, what happens is that we have a continuous transfer -of control from \TEX\ to \LUA, where \LUA\ prints back either data (like element -content or attribute values) or just invokes a setup whereby it passes a -reference to the node resolved conform the path expression. The invoked setup -itself might return control to \LUA\ again, etc. - -This sounds complicated but examples will show what we mean here. First we -present the whole repertoire of commands. Because users can read the source code, -they might uncover more commands, but only the ones discussed here are official. -The commands are grouped in categories. - -In the following sections \cmdinternal {cd:node} means a reference to a node: -this can be the identifier of the root (the loaded xml tree) or a reference to a -node in that tree (often the result of some lookup. A \cmdinternal {cd:lpath} is -a fancy name for a path expression (as with \XSLT) but resolved by \LUA. - -\stopsection - -\startsection[title={commands}] - -There are a lot of commands available but you probably can ignore most of them. -We try to be complete which means that there is for instance \type {\xmlfirst} as -well as \type {\xmllast} but you probably never need the last one. There are also -commands that were used when testing this interface and we see no reason to -remove them. Some obscure ones are used in modules and after a while even I often -forget that they exist. To give you an idea of what commands are important we -show their use in generating the \CONTEXT\ command definitions (\type -{x-set-11.mkiv}) per Januari 2016: - -\startcolumns[n=2,balance=yes] -\starttabulate[|l|r|] -\NC \type {\xmlall} \NC 1 \NC \NR -\NC \type {\xmlatt} \NC 23 \NC \NR -\NC \type {\xmlattribute} \NC 1 \NC \NR -\NC \type {\xmlcount} \NC 1 \NC \NR -\NC \type {\xmldoif} \NC 2 \NC \NR -\NC \type {\xmldoifelse} \NC 1 \NC \NR -\NC \type {\xmlfilterlist} \NC 4 \NC \NR -\NC \type {\xmlflush} \NC 5 \NC \NR -\NC \type {\xmlinclude} \NC 1 \NC \NR -\NC \type {\xmlloadonly} \NC 1 \NC \NR -\NC \type {\xmlregisterdocumentsetup} \NC 1 \NC \NR -\NC \type {\xmlsetsetup} \NC 1 \NC \NR -\NC \type {\xmlsetup} \NC 4 \NC \NR -\stoptabulate -\stopcolumns - -As you can see filtering, flushing and accessing attributes score high. Below we show -the statistics of a quite complex rendering (5 variants of schoolbooks: basic book, -answers, teachers guide, worksheets, full blown version with extensive tracing). - -\startcolumns[n=2,balance=yes] -\starttabulate[|l|r|] -\NC \type {\xmladdindex} \NC 3 \NC \NR -\NC \type {\xmlall} \NC 5 \NC \NR -\NC \type {\xmlappendsetup} \NC 1 \NC \NR -\NC \type {\xmlapplyselectors} \NC 1 \NC \NR -\NC \type {\xmlatt} \NC 40 \NC \NR -\NC \type {\xmlattdef} \NC 9 \NC \NR -\NC \type {\xmlattribute} \NC 10 \NC \NR -\NC \type {\xmlbadinclusions} \NC 3 \NC \NR -\NC \type {\xmlconcat} \NC 3 \NC \NR -\NC \type {\xmlcount} \NC 1 \NC \NR -\NC \type {\xmldelete} \NC 11 \NC \NR -\NC \type {\xmldoif} \NC 39 \NC \NR -\NC \type {\xmldoifelse} \NC 28 \NC \NR -\NC \type {\xmldoifelsetext} \NC 13 \NC \NR -\NC \type {\xmldoifnot} \NC 2 \NC \NR -\NC \type {\xmldoifnotselfempty} \NC 1 \NC \NR -\NC \type {\xmlfilter} \NC 100 \NC \NR -\NC \type {\xmlfirst} \NC 51 \NC \NR -\NC \type {\xmlflush} \NC 69 \NC \NR -\NC \type {\xmlflushcontext} \NC 2 \NC \NR -\NC \type {\xmlinclude} \NC 1 \NC \NR -\NC \type {\xmlincludeoptions} \NC 5 \NC \NR -\NC \type {\xmlinclusion} \NC 16 \NC \NR -\NC \type {\xmlinjector} \NC 1 \NC \NR -\NC \type {\xmlloaddirectives} \NC 1 \NC \NR -\NC \type {\xmlmapvalue} \NC 4 \NC \NR -\NC \type {\xmlmatch} \NC 1 \NC \NR -\NC \type {\xmlprependsetup} \NC 5 \NC \NR -\NC \type {\xmlregisterdocumentsetup} \NC 2 \NC \NR -\NC \type {\xmlregistersetup} \NC 1 \NC \NR -\NC \type {\xmlremapnamespace} \NC 1 \NC \NR -\NC \type {\xmlsetfunction} \NC 2 \NC \NR -\NC \type {\xmlsetinjectors} \NC 2 \NC \NR -\NC \type {\xmlsetsetup} \NC 11 \NC \NR -\NC \type {\xmlsetup} \NC 76 \NC \NR -\NC \type {\xmlstrip} \NC 1 \NC \NR -\NC \type {\xmlstripanywhere} \NC 1 \NC \NR -\NC \type {\xmltag} \NC 1 \NC \NR -\NC \type {\xmltext} \NC 53 \NC \NR -\NC \type {\xmlvalue} \NC 2 \NC \NR -\stoptabulate -\stopcolumns - -Here many more are used but this is an exceptional case. The top is again -dominated by filtering, flushing and attribute consulting. The list can actually -be smaller. For instance, the \type {\xmlcount} can just as well be \type -{\xmlfilter} with a \type {count} finalizer. There are also some special ones, -like the injectors, that are needed for finetuning the final result. - -\stopsection - -\startsection[title={loading}] - -\startxmlcmd {\cmdbasicsetup{xmlloadfile}} - loads the file \cmdinternal {cd:file} and registers it under \cmdinternal - {cd:name} and applies either given or standard \cmdinternal - {cd:xmlsetup} (alias: \type {\xmlload}) -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlloadbuffer}} - loads the buffer \cmdinternal {cd:buffer} and registers it under - \cmdinternal {cd:name} and applies either given or standard - \cmdinternal {cd:xmlsetup} -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlloaddata}} - loads \cmdinternal {cd:text} and registers it under \cmdinternal - {cd:name} and applies either given or standard \cmdinternal - {cd:xmlsetup} -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlloadonly}} - loads \cmdinternal {cd:text} and registers it under \cmdinternal - {cd:name} and applies either given or standard \cmdinternal - {cd:xmlsetup} but doesn't flush the content -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlinclude}} - includes the file specified by attribute \cmdinternal {cd:name} of the - element located by \cmdinternal {cd:lpath} at node \cmdinternal {cd:node} -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlprocessfile}} - registers file \cmdinternal {cd:file} as \cmdinternal {cd:name} and - process the tree starting with \cmdinternal {cd:xmlsetup} (alias: - \type {\xmlprocess}) -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlprocessbuffer}} - registers buffer \cmdinternal {cd:name} as \cmdinternal {cd:name} and process - the tree starting with \cmdinternal {cd:xmlsetup} -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlprocessdata}} - registers \cmdinternal {cd:text} as \cmdinternal {cd:name} and process - the tree starting with \cmdinternal {cd:xmlsetup} -\stopxmlcmd - -The initial setup defaults to \type {xml:process} that is defined -as follows: - -\starttyping -\startsetups xml:process - \xmlregistereddocumentsetups\xmldocument - \xmlmain\xmldocument -\stopsetups -\stoptyping - -First we apply the setups associated with the document (including common setups) -and then we flush the whole document. The macro \type {\xmldocument} expands to -the current document id. There is also \type {\xmlself} which expands to the -current node number (\type {#1} in setups). - -\startxmlcmd {\cmdbasicsetup{xmlmain}} - returns the whole document -\stopxmlcmd - -Normally such a flush will trigger a chain reaction of setups associated with the -child elements. - -\stopsection - -\startsection[title={saving}] - -\startxmlcmd {\cmdbasicsetup{xmlsave}} - saves the given node \cmdinternal {cd:node} in the file \cmdinternal {cd:file} -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmltofile}} - saves the match of \cmdinternal {cd:lpath} in the file \cmdinternal {cd:file} -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmltobuffer}} - saves the match of \cmdinternal {cd:lpath} in the buffer \cmdinternal {cd:buffer} -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmltobufferverbose}} - saves the match of \cmdinternal {cd:lpath} verbatim in the buffer \cmdinternal - {cd:buffer} -\stopxmlcmd - -% \startxmlcmd {\cmdbasicsetup{xmltoparameters}} -% converts the match of \cmdinternal {cd:lpath} to key|/|values (for tracing) -% \stopxmlcmd - -The next command is only needed when you have messed with the tree using -\LUA\ code. - -\startxmlcmd {\cmdbasicsetup{xmladdindex}} - (re)indexes a tree -\stopxmlcmd - -The following macros are only used in special situations and are not really meant -for users. - -\startxmlcmd {\cmdbasicsetup{xmlraw}} - flush the content if \cmdinternal {cd:node} with original entities -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{startxmlraw}} - flush the wrapped content with original entities -\stopxmlcmd - -\stopsection - -\startsection[title={flushing data}] - -When we flush an element, the associated \XML\ setups are expanded. The most -straightforward way to flush an element is the following. Keep in mind that the -returned values itself can trigger setups and therefore flushes. - -\startxmlcmd {\cmdbasicsetup{xmlflush}} - returns all nodes under \cmdinternal {cd:node} -\stopxmlcmd - -You can restrict flushing by using commands that accept a specification. - -\startxmlcmd {\cmdbasicsetup{xmltext}} - returns the text of the matching \cmdinternal {cd:lpath} under \cmdinternal - {cd:node} -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlpure}} - returns the text of the matching \cmdinternal {cd:lpath} under \cmdinternal - {cd:node} without \type {\Ux} escaped special \TEX\ characters -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlflushtext}} - returns the text of the \cmdinternal {cd:node} -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlflushpure}} - returns the text of the \cmdinternal {cd:node} without \type {\Ux} escaped - special \TEX\ characters -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlnonspace}} - returns the text of the matching \cmdinternal {cd:lpath} under \cmdinternal - {cd:node} without embedded spaces -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlall}} - returns all nodes under \cmdinternal {cd:node} that matches \cmdinternal - {cd:lpath} -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmllastmatch}} - returns all nodes found in the last match -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlfirst}} - returns the first node under \cmdinternal {cd:node} that matches \cmdinternal - {cd:lpath} -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmllast}} - returns the last node under \cmdinternal {cd:node} that matches \cmdinternal - {cd:lpath} -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlfilter}} - at a match of \cmdinternal {cd:lpath} a given filter \type {filter} is applied - and the result is returned -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlsnippet}} - returns the \cmdinternal {cd:number}\high{th} element under \cmdinternal - {cd:node} -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlposition}} - returns the \cmdinternal {cd:number}\high{th} match of \cmdinternal - {cd:lpath} at node \cmdinternal {cd:node}; a negative number starts at the - end (alias: \type {\xmlindex}) -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlelement}} - returns the \cmdinternal {cd:number}\high{th} child of node \cmdinternal {cd:node}; - a negative number starts at the end -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlpos}} - returns the index (position) in the parent node of \cmdinternal {cd:node} -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlconcat}} - returns the sequence of nodes that match \cmdinternal {cd:lpath} at - \cmdinternal {cd:node} whereby \cmdinternal {cd:text} is put between each - match -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlconcatrange}} - returns the \cmdinternal {cd:first}\high {th} upto \cmdinternal - {cd:last}\high {th} of nodes that match \cmdinternal {cd:lpath} at - \cmdinternal {cd:node} whereby \cmdinternal {cd:text} is put between each - match -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlcommand}} - apply the given \cmdinternal {cd:xmlsetup} to each match of \cmdinternal - {cd:lpath} at node \cmdinternal {cd:node} -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlstrip}} - remove leading and trailing spaces from nodes under \cmdinternal {cd:node} - that match \cmdinternal {cd:lpath} -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlstripped}} - remove leading and trailing spaces from nodes under \cmdinternal {cd:node} - that match \cmdinternal {cd:lpath} and return the content afterwards -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlstripnolines}} - remove leading and trailing spaces as well as collapse embedded spaces - from nodes under \cmdinternal {cd:node} that match \cmdinternal {cd:lpath} -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlstrippednolines}} - remove leading and trailing spaces as well as collapse embedded spaces from - nodes under \cmdinternal {cd:node} that match \cmdinternal {cd:lpath} and - return the content afterwards -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlverbatim}} - flushes the content verbatim code (without any wrapping, i.e. no fonts - are selected and such) -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlinlineverbatim}} - return the content of the node as inline verbatim code; no further - interpretation (expansion) takes place and spaces are honoured; it uses the - following wrapper -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{startxmlinlineverbatim}} - wraps inline verbatim mode using the environment specified (a prefix \type - {xml:} is added to the environment name) -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmldisplayverbatim}} - return the content of the node as display verbatim code; no further - interpretation (expansion) takes place and leading and trailing spaces and - newlines are treated special; it uses the following wrapper -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{startxmldisplayverbatim}} - wraps the content in display verbatim using the environment specified (a prefix - \type {xml:} is added to the environment name) -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlprettyprint}} - pretty print (with colors) the node \cmdinternal {cd:node}; use the \CONTEXT\ - \SCITE\ lexers when available (\type {\usemodule [scite]}) -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlflushspacewise}} - flush node \cmdinternal {cd:node} obeying spaces and newlines -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlflushlinewise}} - flush node \cmdinternal {cd:node} obeying newlines -\stopxmlcmd - -\stopsection - -\startsection[title={information}] - -The following commands return strings. Normally these are used in tests. - -\startxmlcmd {\cmdbasicsetup{xmlname}} - returns the complete name (including namespace prefix) of the - given \cmdinternal {cd:node} -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlnamespace}} - returns the namespace of the given \cmdinternal {cd:node} -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmltag}} - returns the tag of the element, without namespace prefix -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlcount}} - returns the number of matches of \cmdinternal {cd:lpath} at node \cmdinternal - {cd:node} -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlatt}} - returns the value of attribute \cmdinternal {cd:name} or empty if no such - attribute exists -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlattdef}} - returns the value of attribute \cmdinternal {cd:name} or \cmdinternal - {cd:string} if no such attribute exists -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlrefatt}} - returns the value of attribute \cmdinternal {cd:name} or empty if no such - attribute exists; a leading \type {#} is removed (nicer for tex) -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlchainatt}} - returns the value of attribute \cmdinternal {cd:name} or empty if no such - attribute exists; backtracks till a match is found -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlchainattdef}} - returns the value of attribute \cmdinternal {cd:name} or \cmdinternal - {cd:string} if no such attribute exists; backtracks till a match is found -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlattribute}} - finds a first match for \cmdinternal {cd:lpath} at \cmdinternal {cd:node} and - returns the value of attribute \cmdinternal {cd:name} or empty if no such - attribute exists -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlattributedef}} - finds a first match for \cmdinternal {cd:lpath} at \cmdinternal {cd:node} and - returns the value of attribute \cmdinternal {cd:name} or \cmdinternal - {cd:text} if no such attribute exists -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmllastatt}} - returns the last attribute found (this avoids a lookup) -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlsetatt}} - set the value of attribute \cmdinternal {cd:name} -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlsetattribute}} - set the value of attribute \cmdinternal {cd:name} for each match of \cmdinternal - {cd:lpath} -\stopxmlcmd - -\stopsection - -\startsection[title={manipulation}] - -You can use \LUA\ code to manipulate the tree and it makes no sense to duplicate -this in \TEX. In the future we might provide an interface to some of this -functionality. Keep in mind that manipuating the tree might have side effects as -we maintain several indices into the tree that also needs to be updated then. - -\stopsection - -\startsection[title={integration}] - -If you write a module that deals with \XML, for instance processing cals tables, -then you need ways to control specific behaviour. For instance, you might want to -add a background to the table. Such directives are collected in \XML\ files and -can be loaded on demand. - -\startxmlcmd {\cmdbasicsetup{xmlloaddirectives}} - loads \CONTEXT\ directives from \cmdinternal {cd:file} that will get - interpreted when processing documents -\stopxmlcmd - -A directives definition file looks as follows: - -\starttyping -<?xml version="1.0" standalone="yes"?> - -<directives> - <directive attribute='id' value="100" - setup="cdx:100"/> - <directive attribute='id' value="101" - setup="cdx:101"/> - <directive attribute='cdx' value="colors" element="cals:table" - setup="cdx:cals:table:colors"/> - <directive attribute='cdx' value="vertical" element="cals:table" - setup="cdx:cals:table:vertical"/> - <directive attribute='cdx' value="noframe" element="cals:table" - setup="cdx:cals:table:noframe"/> - <directive attribute='cdx' value="*" element="cals:table" - setup="cdx:cals:table:*"/> -</directives> -\stoptyping - -Examples of usage can be found in \type {x-cals.mkiv}. The directive is triggered -by an attribute. Instead of a setup you can specify a setup to be applied before -and after the node gets flushed. - -\startxmlcmd {\cmdbasicsetup{xmldirectives}} - apply the setups directive associated with the node -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmldirectivesbefore}} - apply the before directives associated with the node -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmldirectivesafter}} - apply the after directives associated with the node -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlinstalldirective}} - defines a directive that hooks into a handler -\stopxmlcmd - -Normally a directive will be put in the \XML\ file, for instance as: - -\starttyping -<?context-mathml-directive minus reduction yes ?> -\stoptyping - -Here the \type {mathml} is the general class of directives and \type {minus} a -subclass, in our case a specific element. - -\stopsection - -\startsection[title={setups}] - -The basic building blocks of \XML\ processing are setups. These are just -collections of macros that are expanded. These setups get one argument passed -(\type {#1}): - -\starttyping -\startxmlsetups somedoc:somesetup - \xmlflush{#1} -\stopxmlsetups -\stoptyping - -This argument is normally a number that internally refers to a specific node in -the \XML\ tree. The user should see it as an abstract reference and not depend on -its numeric property. Just think of it as \quote {the current node}. You can (and -probably will) call such setups using: - -\startxmlcmd {\cmdbasicsetup{xmlsetup}} - expands setup \cmdinternal {cd:setup} and pass \cmdinternal {cd:node} as - argument -\stopxmlcmd - -However, in most cases the setups are associated to specific elements, -something that users of \XSLT\ might recognize as templates. - -\startxmlcmd {\cmdbasicsetup{xmlsetfunction}} - associates function \cmdinternal {cd:luafunction} to the elements in - namespace \cmdinternal {cd:name} that match \cmdinternal {cd:lpath} -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlsetsetup}} - associates setups \cmdinternal {cd:setup} (\TEX\ code) with the matching - nodes of \cmdinternal {cd:lpath} or root \cmdinternal {cd:node} -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlprependsetup}} - pushes \cmdinternal {cd:setup} to the front of global list of setups -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlappendsetup}} - adds \cmdinternal {cd:setup} to the global list of setups to be applied - (alias: \type{\xmlregistersetup}) -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlbeforesetup}} - pushes \cmdinternal {cd:setup} into the global list of setups; the - last setup is the position -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlaftersetup}} - adds \cmdinternal {cd:setup} to the global list of setups; the last setup - is the position -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlremovesetup}} - removes \cmdinternal {cd:setup} from the global list of setups -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlprependdocumentsetup}} - pushes \cmdinternal {cd:setup} to the front of list of setups to be applied - to \cmdinternal {cd:name} -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlappenddocumentsetup}} - adds \cmdinternal {cd:setup} to the list of setups to be applied to - \cmdinternal {cd:name} (you can also use the alias: \type - {\xmlregisterdocumentsetup}) -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlbeforedocumentsetup}} - pushes \cmdinternal {cd:setup} into the setups to be applied to \cmdinternal - {cd:name}; the last setup is the position -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlafterdocumentsetup}} - adds \cmdinternal {cd:setup} to the setups to be applied to \cmdinternal - {cd:name}; the last setup is the position -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlremovedocumentsetup}} - removes \cmdinternal {cd:setup} from the global list of setups to be applied - to \cmdinternal {cd:name} -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlresetsetups}} - removes all global setups -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlresetdocumentsetups}} - removes all setups from the \cmdinternal {cd:name} specific list of setups to - be applied -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlflushdocumentsetups}{setup}} - applies \cmdinternal {cd:setup} (can be a list) to \cmdinternal {cd:name} -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlregisteredsetups}} - applies all global setups to the current document -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlregistereddocumentsetups}} - applies all document specific \cmdinternal {cd:setup} to document - \cmdinternal {cd:name} -\stopxmlcmd - -\stopsection - -\startsection[title={testing}] - -The following test macros all take a \cmdinternal {cd:node} as first argument -and an \cmdinternal {cd:lpath} as second: - -\startxmlcmd {\cmdbasicsetup{xmldoif}} - expands to \cmdinternal {cd:true} when \cmdinternal {cd:lpath} matches at - node \cmdinternal {cd:node} -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmldoifnot}} - expands to \cmdinternal {cd:true} when \cmdinternal {cd:lpath} does not match - at node \cmdinternal {cd:node} -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmldoifelse}} - expands to \cmdinternal {cd:true} when \cmdinternal {cd:lpath} matches at - node \cmdinternal {cd:node} and to \cmdinternal {cd:false} otherwise -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmldoiftext}} - expands to \cmdinternal {cd:true} when the node matching \cmdinternal - {cd:lpath} at node \cmdinternal {cd:node} has some content -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmldoifnottext}} - expands to \cmdinternal {cd:true} when the node matching \cmdinternal - {cd:lpath} at node \cmdinternal {cd:node} has no content -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmldoifelsetext}} - expands to \cmdinternal {cd:true} when the node matching \cmdinternal - {cd:lpath} at node \cmdinternal {cd:node} has content and to \cmdinternal - {cd:false} otherwise -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmldoifelseempty}} - expands to \cmdinternal {cd:true} when the node matching \cmdinternal - {cd:lpath} at node \cmdinternal {cd:node} is empty and to \cmdinternal - {cd:false} otherwise -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmldoifelseselfempty}} - expands to \cmdinternal {cd:true} when the node is empty and to \cmdinternal - {cd:false} otherwise -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmldoifselfempty}} - expands to \cmdinternal {cd:true} when \cmdinternal {cd:node} is empty -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmldoifnotselfempty}} - expands to \cmdinternal {cd:true} when \cmdinternal {cd:node} is not empty -\stopxmlcmd - -\stopsection - -\startsection[title={initialization}] - -The general setup command (not to be confused with setups) that deals with the -\MKIV\ tree handler is \type {\setupxml}. There are currently only a few options. - -\cmdfullsetup{setupxml} - -When you set \type {default} to \cmdinternal {cd:text} elements with no setup -assigned will end up as text. When set to \type {hidden} such elements will be -hidden. You can apply the default yourself using: - -\startxmlcmd {\cmdbasicsetup{xmldefaulttotext}} - presets the tree with root \cmdinternal {cd:node} to the handlers set up with - \type {\setupxml} option \cmdinternal{default} -\stopxmlcmd - -You can set \type {compress} to \type {yes} in which case comment is stripped -from the tree when the file is read. - -\startxmlcmd {\cmdbasicsetup{xmlregisterns}} - associates an internal namespace (like \type {mml}) with one given in the - document as \URL\ (like mathml) -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlremapname}} - changes the namespace and tag of the matching elements -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlremapnamespace}} - replaces all references to the given namespace to a new one (applied - recursively) -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlchecknamespace}} - sets the namespace of the matching elements unless a namespace is already set -\stopxmlcmd - -\stopsection - -\startsection[title={helpers}] - -Often an attribute will determine the rendering and this may result in many -tests. Especially when we have multiple attributes that control the output such -tests can become rather extensive and redundant because one gets $n\times m$ or -more such tests. - -Therefore we have a convenient way to map attributes onto for instance strings or -commands. - -\startxmlcmd {\cmdbasicsetup{xmlmapvalue}} - associate a \cmdinternal {cd:text} with a \cmdinternal {cd:category} and - \cmdinternal {cd:name} (alias: \type{\xmlmapval}) -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlvalue}} - expand the value associated with a \cmdinternal {cd:category} and - \cmdinternal {cd:name} and if not resolved, expand to the \cmdinternal - {cd:text} (alias: \type{\xmlval}) -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmldoifelsevalue}} - associate a \cmdinternal {cd:text} with a \cmdinternal {cd:category} and - \cmdinternal {cd:name} -\stopxmlcmd - -This is used as follows. We define a couple of mappings in the same category: - -\starttyping -\xmlmapvalue{emph}{bold} {\bf} -\xmlmapvalue{emph}{italic}{\it} -\stoptyping - -Assuming that we have associated the following setup with the \type {emph} -element, we can say (with \type {#1} being the current element): - -\starttyping -\startxmlsetups demo:emph - \begingroup - \xmlvalue{emph}{\xmlatt{#1}{type}}{} - \endgroup -\stopxmlsetups -\stoptyping - -In this case we have no default. The \type {type} attribute triggers the actions, -as in: - -\starttyping -normal <emph type='bold'>bold</emph> normal -\stoptyping - -This mechanism is not really bound to elements and attributes so you can use this -mechanism for other purposes as well. - -\stopsection - -\startsection[title={Parameters}] - -\startbuffer[test] -<something whatever="alpha"> - <what> - beta - </what> -</something> -\stopbuffer - -\startbuffer -\startxmlsetups xml:mysetups - \xmlsetsetup{\xmldocument}{*}{xml:*} -\stopxmlsetups - -\xmlregistersetup{xml:mysetups} - -\startxmlsetups xml:something - parameter : \xmlpar {#1}{whatever}\par - attribute : \xmlatt {#1}{whatever}\par - text : \xmlfirst {#1}{what} \par - \xmlsetpar{#1}{whatever}{gamma} - parameter : \xmlpar {#1}{whatever}\par - \xmlflush{#1} -\stopxmlsetups - -\startxmlsetups xml:what - what: \xmlflush{#1}\par - parameter : \xmlparam{#1}{..}{whatever}\par -\stopxmlsetups - -\xmlprocessbuffer{main}{test}{} -\stopbuffer - -Say that we have this \XML\ blob: - -\typebuffer[test] - -With: - -\typebuffer - -we get: - -\getbuffer - -Parameters are stored with a node. - -\startxmlcmd {\cmdbasicsetup{xmlpar}} - returns the value of parameter \cmdinternal {cd:name} or empty if no such - parameter exists -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlparam}} - finds a first match for \cmdinternal {cd:lpath} at \cmdinternal {cd:node} and - returns the value of parameter \cmdinternal {cd:name} or empty if no such - parameter exists -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmllastpar}} - returns the last parameter found (this avoids a lookup) -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlsetpar}} - set the value of parameter \cmdinternal {cd:name} -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlsetparam}} - set the value of parameter \cmdinternal {cd:name} for each match of \cmdinternal - {cd:lpath} -\stopxmlcmd - -\stopsection - -\stopchapter - -\startchapter[title={Expressions and filters}] - -\startsection[title={path expressions}] - -In the previous chapters we used \cmdinternal {cd:lpath} expressions, which are a variant -on \type {xpath} expressions as in \XSLT\ but in this case more geared towards -usage in \TEX. This mechanisms will be extended when demands are there. - -A path is a sequence of matches. A simple path expression is: - -\starttyping -a/b/c/d -\stoptyping - -Here each \type {/} goes one level deeper. We can go backwards in a lookup with -\type {..}: - -\starttyping -a/b/../d -\stoptyping - -We can also combine lookups, as in: - -\starttyping -a/(b|c)/d -\stoptyping - -A negated lookup is preceded by a \type {!}: - -\starttyping -a/(b|c)/!d -\stoptyping - -A wildcard is specified with a \type {*}: - -\starttyping -a/(b|c)/!d/e/*/f -\stoptyping - -In addition to these tag based lookups we can use attributes: - -\starttyping -a/(b|c)/!d/e/*/f[@type=whatever] -\stoptyping - -An \type {@} as first character means that we are dealing with an attribute. -Within the square brackets there can be boolean expressions: - -\starttyping -a/(b|c)/!d/e/*/f[@type=whatever and @id>100] -\stoptyping - -You can use functions as in: - -\starttyping -a/(b|c)/!d/e/*/f[something(text()) == "oeps"] -\stoptyping - -There are a couple of predefined functions: - -\starttabulate[|l|l|p|] -\NC \type{rootposition} \type{order} \NC number \NC the index of the matched root element (kind of special) \NC \NR -\NC \type{position} \NC number \NC the current index of the matched element in the match list \NC \NR -\NC \type{match} \NC number \NC the current index of the matched element sub list with the same parent \NC \NR -\NC \type{first} \NC number \NC \NC \NR -\NC \type{last} \NC number \NC \NC \NR -\NC \type{index} \NC number \NC the current index of the matched element in its parent list \NC \NR -\NC \type{firstindex} \NC number \NC \NC \NR -\NC \type{lastindex} \NC number \NC \NC \NR -\NC \type{element} \NC number \NC the element's index \NC \NR -\NC \type{firstelement} \NC number \NC \NC \NR -\NC \type{lastelement} \NC number \NC \NC \NR -\NC \type{text} \NC string \NC the textual representation of the matched element \NC \NR -\NC \type{content} \NC table \NC the node of the matched element \NC \NR -\NC \type{name} \NC string \NC the full name of the matched element: namespace and tag \NC \NR -\NC \type{namespace} \type{ns} \NC string \NC the namespace of the matched element \NC \NR -\NC \type{tag} \NC string \NC the tag of the matched element \NC \NR -\NC \type{attribute} \NC string \NC the value of the attribute with the given name of the matched element \NC \NR -\stoptabulate - -There are fundamental differences between \type {position}, \type {match} and -\type {index}. Each step results in a new list of matches. The \type {position} -is the index in this new (possibly intermediate) list. The \type {match} is also -an index in this list but related to the specific match of element names. The -\type {index} refers to the location in the parent element. - -Say that we have: - -\starttyping -<collection> - <resources> - <manual> - <screen>.1.</screen> - <paper>.1.</paper> - </manual> - <manual> - <paper>.2.</paper> - <screen>.2.</screen> - </manual> - <resources> - <resources> - <manual> - <screen>.3.</screen> - <paper>.3.</paper> - </manual> - <resources> -<collection> -\stoptyping - -The following then applies: - -\starttabulate[|l|l|] -\NC \type {collection/resources/manual[position()==1]/paper} \NC \type{.1.} \NC \NR -\NC \type {collection/resources/manual[match()==1]/paper} \NC \type{.1.} \type{.3.} \NC \NR -\NC \type {collection/resources/manual/paper[index()==1]} \NC \type{.2.} \NC \NR -\stoptabulate - -In most cases the \type {position} test is more restrictive than the \type -{match} test. - -You can pass your own functions too. Such functions are defined in the the \type -{xml.expressions} namespace. We have defined a few shortcuts: - -\starttabulate[|l|l|] -\NC \type {find(str,pattern)} \NC \type{string.find} \NC \NR -\NC \type {contains(str)} \NC \type{string.find} \NC \NR -\NC \type {oneof(str,...)} \NC is \type{str} in list \NC \NR -\NC \type {upper(str)} \NC \type{characters.upper} \NC \NR -\NC \type {lower(str)} \NC \type{characters.lower} \NC \NR -\NC \type {number(str)} \NC \type{tonumber} \NC \NR -\NC \type {boolean(str)} \NC \type{toboolean} \NC \NR -\NC \type {idstring(str)} \NC removes leading hash \NC \NR -\NC \type {name(index)} \NC full tag name \NC \NR -\NC \type {tag(index)} \NC tag name \NC \NR -\NC \type {namespace(index)} \NC namespace of tag \NC \NR -\NC \type {text(index)} \NC content \NC \NR -\NC \type {error(str)} \NC quit and show error \NC \NR -\NC \type {quit()} \NC quit \NC \NR -\NC \type {print()} \NC print message \NC \NR -\NC \type {count(pattern)} \NC number of matches \NC \NR -\NC \type {child(pattern)} \NC take child that matches \NC \NR -\stoptabulate - - -You can also use normal \LUA\ functions as long as you make sure that you pass -the right arguments. There are a few predefined variables available inside such -functions. - -\starttabulate[|Tl|l|p|] -\NC \type{list} \NC table \NC the list of matches \NC \NR -\NC \type{l} \NC number \NC the current index in the list of matches \NC \NR -\NC \type{ll} \NC element \NC the current element that matched \NC \NR -\NC \type{order} \NC number \NC the position of the root of the path \NC \NR -\stoptabulate - -The given expression between \type {[]} is converted to a \LUA\ expression so you -can use the usual operators: - -\starttyping -== ~= <= >= < > not and or () -\stoptyping - -In addition, \type {=} equals \type {==} and \type {!=} is the same as \type -{~=}. If you mess up the expression, you quite likely get a \LUA\ error message. - -\stopsection - -\startsection[title={css selectors}] - -\startbuffer[selector-001] -<?xml version="1.0" ?> - -<a> - <b class="one">b.one</b> - <b class="two">b.two</b> - <b class="one two">b.one.two</b> - <b class="three">b.three</b> - <b id="first">b#first</b> - <c>c</c> - <d>d e</d> - <e>d e</e> - <e>d e e</e> - <d>d f</d> - <f foo="bar">@foo = bar</f> - <f bar="foo">@bar = foo</f> - <f bar="foo1">@bar = foo1</f> - <f bar="foo2">@bar = foo2</f> - <f bar="foo3">@bar = foo3</f> - <f bar="foo+4">@bar = foo+4</f> - <g>g</g> - <g><gg><d>g gg d</d></gg></g> - <g><gg><f>g gg f</f></gg></g> - <g><gg><f class="one">g gg f.one</f></gg></g> - <g>g</g> - <g><gg><f class="two">g gg f.two</f></gg></g> - <g><gg><f class="three">g gg f.three</f></gg></g> - <g><f class="one">g f.one</f></g> - <g><f class="three">g f.three</f></g> - <h whatever="four five six">@whatever = four five six</h> -</a> -\stopbuffer - -\xmlloadbuffer{selector-001}{selector-001} - -\startxmlsetups xml:selector:demo - \advance\scratchcounter\plusone - \inleftmargin{\the\scratchcounter}\ignorespaces\xmlverbatim{#1}\par -\stopxmlsetups - -\unexpanded\def\showCSSdemo#1#2% - {\blank - \textrule{\tttf#2} - \startlines - \dontcomplain - \tttf \obeyspaces - \scratchcounter\zerocount - \xmlcommand{#1}{#2}{xml:selector:demo} - \stoplines - \blank} - -The \CSS\ approach to filtering is a bit different from the path based one and is -supported too. In fact, you can combine both methods. Depending on what you -select, the \CSS\ one can be a little bit faster too. It has the advantage that -one can select more in one go but at the same time looks a bit less attractive. -This method was added just to show that it can be done but might be useful too. A -selector is given between curly braces (after all \CSS\ uses them and they have no -function yet in the parser. - -\starttyping -\xmlall{#1}{{foo bar .whatever, bar foo .whatever}} -\stoptyping - -The following methods are supported: - -\starttabulate[|T||] -\NC element \NC all tags element \NC \NR -\NC element-1 > element-2 \NC all tags element-2 with parent tag element-1 \NC \NR -\NC element-1 + element-2 \NC all tags element-2 preceded by tag element-1 \NC \NR -\NC element-1 ~ element-2 \NC all tags element-2 preceded by tag element-1 \NC \NR -\NC element-1 element-2 \NC all tags element-2 inside tag element-1 \NC \NR -\NC [attribute] \NC has attribute \NC \NR -\NC [attribute=value] \NC attribute equals value\NC \NR -\NC [attribute\lettertilde =value] \NC attribute contains value (space is separator) \NC \NR -\NC [attribute\letterhat ="value"] \NC attribute starts with value \NC \NR -\NC [attribute\letterdollar="value"] \NC attribute ends with value \NC \NR -\NC [attribute*="value"] \NC attribute contains value \NC \NR -\NC .class \NC has class \NC \NR -\NC \letterhash id \NC has id \NC \NR -\NC :nth-child(n) \NC the child at index n \NC \NR -\NC :nth-last-child(n) \NC the child at index n from the end \NC \NR -\NC :first-child \NC the first child \NC \NR -\NC :last-child \NC the last child \NC \NR -\NC :nth-of-type(n) \NC the match at index n \NC \NR -\NC :nth-last-of-type(n) \NC the match at index n from the end \NC \NR -\NC :first-of-type \NC the first match \NC \NR -\NC :last-of-type \NC the last match \NC \NR -\NC :only-of-type \NC the only match or nothing \NC \NR -\NC :only-child \NC the only child or nothing \NC \NR -\NC :empty \NC only when empty \NC \NR -\NC :root \NC the whole tree \NC \NR -\stoptabulate - -The next pages show some examples. For that we use the demo file: - -\typebuffer[selector-001] - -The class and id selectors often only make sense in \HTML\ like documents but they -are supported nevertheless. They are after all just shortcuts for filtering by -attribute. The class filtering is special in the sense that it checks for a class -in a list of classes given in an attribute. - -\showCSSdemo{selector-001}{{.one}} -\showCSSdemo{selector-001}{{.one, .two}} -\showCSSdemo{selector-001}{{.one, .two, \letterhash first}} - -Attributes can be filtered by presence, value, partial value and such. Quotes are -optional but we advice to use them. - -\showCSSdemo{selector-001}{{[foo], [bar=foo]}} -\showCSSdemo{selector-001}{{[bar\lettertilde=foo]}} -\showCSSdemo{selector-001}{{[bar\letterhat="foo"]}} -\showCSSdemo{selector-001}{{[whatever\lettertilde="five"]}} - -You can of course combine the methods as in: - -\showCSSdemo{selector-001}{{g f .one, g f .three}} -\showCSSdemo{selector-001}{{g > f .one, g > f .three}} -\showCSSdemo{selector-001}{{d + e}} -\showCSSdemo{selector-001}{{d ~ e}} -\showCSSdemo{selector-001}{{d ~ e, g f .one, g f .three}} - -You can also negate the result by using \type {:not} on a simple expression: - -\showCSSdemo{selector-001}{{:not([whatever\lettertilde="five"])}} -\showCSSdemo{selector-001}{{:not(d)}} - -The child and match selectors are also supported: - -\showCSSdemo{selector-001}{{a:nth-child(3)}} -\showCSSdemo{selector-001}{{a:nth-last-child(3)}} -\showCSSdemo{selector-001}{{g:nth-of-type(3)}} -\showCSSdemo{selector-001}{{g:nth-last-of-type(3)}} -\showCSSdemo{selector-001}{{a:first-child}} -\showCSSdemo{selector-001}{{a:last-child}} -\showCSSdemo{selector-001}{{e:first-of-type}} -\showCSSdemo{selector-001}{{gg d:only-of-type}} - -Instead of numbers you can also give the \type {an} and \type {an+b} formulas -as well as the \type {odd} and \type {even} keywords: - -\showCSSdemo{selector-001}{{a:nth-child(even)}} -\showCSSdemo{selector-001}{{a:nth-child(odd)}} -\showCSSdemo{selector-001}{{a:nth-child(3n+1)}} -\showCSSdemo{selector-001}{{a:nth-child(2n+3)}} - -There are a few special cases: - -\showCSSdemo{selector-001}{{g:empty}} -\showCSSdemo{selector-001}{{g:root}} -\showCSSdemo{selector-001}{{*}} - -Combining the \CSS\ methods with the regular ones is possible: - -\showCSSdemo{selector-001}{{g gg f .one}} -\showCSSdemo{selector-001}{g/gg/f[@class='one']} -\showCSSdemo{selector-001}{g/{gg f .one}} - -\startbuffer[selector-002] -<?xml version="1.0" ?> - -<document> - <title class="one" >title 1</title> - <title class="two" >title 2</title> - <title class="one" >title 3</title> - <title class="three">title 4</title> -</document> -\stopbuffer - -The next examples we use this file: - -\typebuffer[selector-002] - -\xmlloadbuffer{selector-002}{selector-002} - -When we filter from this (not too well structured) tree we can use both -methods to achieve the same: - -\showCSSdemo{selector-002}{{document title .one, document title .three}} - -\showCSSdemo{selector-002}{/document/title[(@class='one') or (@class='three')]} - -However, imagine this file: - -\startbuffer[selector-003] -<?xml version="1.0" ?> - -<document> - <title class="one">title 1</title> - <subtitle class="sub">title 1.1</subtitle> - <title class="two">title 2</title> - <subtitle class="sub">title 2.1</subtitle> - <title class="one">title 3</title> - <subtitle class="sub">title 3.1</subtitle> - <title class="two">title 4</title> - <subtitle class="sub">title 4.1</subtitle> -</document> -\stopbuffer - -\typebuffer[selector-003] - -\xmlloadbuffer{selector-003}{selector-003} - -The next filter in easier with the \CSS\ selector methods because these accumulate -independent (simple) expressions: - -\showCSSdemo{selector-003}{{document title .one + subtitle, document title .two + subtitle}} - -Watch how we get an output in the document order. Because we render a sequential document -a combined filter will trigger a sorting pass. - -\stopsection - -\startsection[title={functions as filters}] - -At the \LUA\ end a whole \cmdinternal {cd:lpath} expression results in a (set of) node(s) -with its environment, but that is hardly usable in \TEX. Think of code like: - -\starttyping -for e in xml.collected(xml.load('text.xml'),"title") do - -- e = the element that matched -end -\stoptyping - -The older variant is still supported but you can best use the previous variant. - -\starttyping -for r, d, k in xml.elements(xml.load('text.xml'),"title") do - -- r = root of the title element - -- d = data table - -- k = index in data table -end -\stoptyping - -Here \type {d[k]} points to the \type {title} element and in this case all titles -in the tree pass by. In practice this kind of code is encapsulated in function -calls, like those returning elements one by one, or returning the first or last -match. The result is then fed back into \TEX, possibly after being altered by an -associated setup. We've seen the wrappers to such functions already in a previous -chapter. - -In addition to the previously discussed expressions, one can add so called -filters to the expression, for instance: - -\starttyping -a/(b|c)/!d/e/text() -\stoptyping - -In a filter, the last part of the \cmdinternal {cd:lpath} expression is a -function call. The previous example returns the text of each element \type {e} -that results from matching the expression. When running \TEX\ the following -functions are available. Some are also available when using pure \LUA. In \TEX\ -you can often use one of the macros like \type {\xmlfirst} instead of a \type -{\xmlfilter} with finalizer \type {first()}. The filter can be somewhat faster -but that is hardly noticeable. - -\starttabulate[|l|l|p|] -\NC \type {context()} \NC string \NC the serialized text with \TEX\ catcode regime \NC \NR -%NC \type {ctxtext()} \NC string \NC \NC \NR -\NC \type {function()} \NC string \NC depends on the function \NC \NR -% -\NC \type {name()} \NC string \NC the (remapped) namespace \NC \NR -\NC \type {tag()} \NC string \NC the name of the element \NC \NR -\NC \type {tags()} \NC list \NC the names of the element \NC \NR -% -\NC \type {text()} \NC string \NC the serialized text \NC \NR -\NC \type {upper()} \NC string \NC the serialized text uppercased \NC \NR -\NC \type {lower()} \NC string \NC the serialized text lowercased \NC \NR -\NC \type {stripped()} \NC string \NC the serialized text stripped \NC \NR -\NC \type {lettered()} \NC string \NC the serialized text only letters (cf. \UNICODE) \NC \NR -% -\NC \type {count()} \NC number \NC the number of matches \NC \NR -\NC \type {index()} \NC number \NC the matched index in the current path \NC \NR -\NC \type {match()} \NC number \NC the matched index in the preceding path \NC \NR -% -%NC \type {lowerall()} \NC string \NC \NC \NR -%NC \type {upperall()} \NC string \NC \NC \NR -% -\NC \type {attribute(name)} \NC content \NC returns the attribute with the given name \NC \NR -\NC \type {chainattribute(name)} \NC content \NC sidem, but backtracks till one is found \NC \NR -\NC \type {command(name)} \NC content \NC expands the setup with the given name for each found element \NC \NR -\NC \type {position(n)} \NC content \NC processes the \type {n}\high{th} instance of the found element \NC \NR -\NC \type {all()} \NC content \NC processes all instances of the found element \NC \NR -%NC \type {default} \NC content \NC all \NC \NR -\NC \type {reverse()} \NC content \NC idem in reverse order \NC \NR -\NC \type {first()} \NC content \NC processes the first instance of the found element \NC \NR -\NC \type {last()} \NC content \NC processes the last instance of the found element \NC \NR -\NC \type {concat(...)} \NC content \NC concatinates the match \NC \NC \NR -\NC \type {concatrange(from,to,...)} \NC content \NC concatinates a range of matches \NC \NC \NR -\stoptabulate - -The extra arguments of the concatinators are: \type {separator} (string), \type -{lastseparator} (string) and \type {textonly} (a boolean). - -These filters are in fact \LUA\ functions which means that if needed more of them -can be added. Indeed this happens in some of the \XML\ related \MKIV\ modules, -for instance in the \MATHML\ processor. - -\stopsection - -\startsection[title={example}] - -The number of commands is rather large and if you want to avoid them this is -often possible. Take for instance: - -\starttyping -\xmlall{#1}{/a/b[position()>3]} -\stoptyping - -Alternatively you can use: - -\starttyping -\xmlfilter{#1}{/a/b[position()>3]/all()} -\stoptyping - -and actually this is also faster as internally it avoids a function call. Of -course in practice this is hardly measurable. - -In previous examples we've already seen quite some expressions, and it might be -good to point out that the syntax is modelled after \XSLT\ but is not quite the -same. The reason is that we started with a rather minimal system and have already -styles in use that depend on compatibility. - -\starttyping -namespace:// axis node(set) [expr 1]..[expr n] / ... / filter -\stoptyping - -When we are inside a \CONTEXT\ run, the namespace is \type {tex}. Hoewever, if -you want not to print back to \TEX\ you need to be more explicit. Say that we -typeset examns and have a (not that logical) structure like: - -\starttyping -<question> - <text>...</text> - <answer> - <item>one</item> - <item>two</item> - <item>three</item> - </answer> - <alternative> - <condition>true</condition> - <score>1</score> - </alternative> - <alternative> - <condition>false</condition> - <score>0</score> - </alternative> - <alternative> - <condition>true</condition> - <score>2</score> - </alternative> -</question> -\stoptyping - -Say that we typeset the questions with: - -\starttyping -\startxmlsetups question - \blank - score: \xmlfunction{#1}{totalscore} - \blank - \xmlfirst{#1}{text} - \startitemize - \xmlfilter{#1}{/answer/item/command(answer:item)} - \stopitemize - \endgraf - \blank -\stopxmlsetups -\stoptyping - -Each item in the answer results in a call to: - -\starttyping -\startxmlsetups answer:item - \startitem - \xmlflush{#1} - \endgraf - \xmlfilter{#1}{../../alternative[position()=rootposition()]/ - condition/command(answer:condition)} - \stopitem -\stopxmlsetups -\stoptyping - -\starttyping -\startxmlsetups answer:condition - \endgraf - condition: \xmlflush{#1} - \endgraf -\stopxmlsetups -\stoptyping - -Now, there are two rather special filters here. The first one involves -calculating the total score. As we look forward we use a function to deal with -this. - -\starttyping -\startluacode -function xml.functions.totalscore(root) - local score = 0 - for e in xml.collected(root,"/alternative") do - score = score + xml.filter(e,"xml:///score/number()") or 0 - end - tex.write(score) -end -\stopluacode -\stoptyping - -Watch how we use the namespace to keep the results at the \LUA\ end. - -The second special trick shown here is to limit a match using the current -position of the root (\type {#}) match. - -As you can see, a path expression can be more than just filtering a few nodes. At -the end of this manual you will find a bunch of examples. - -\stopsection - -\startsection[title={tables}] - -If you want to know how the internal \XML\ tables look you can print such a -table: - -\starttyping -print(table.serialize(e)) -\stoptyping - -This produces for instance: - -% s = xml.convert("<document><demo label='whatever'>some text</demo></document>") -% print(table.serialize(xml.filter(s,"demo")[1])) - -\starttyping -t={ - ["at"]={ - ["label"]="whatever", - }, - ["dt"]={ "some text" }, - ["ns"]="", - ["rn"]="", - ["tg"]="demo", -} -\stoptyping - -The \type {rn} entry is the renamed namespace (when renaming is applied). If you -see tags like \type {@pi@} this means that we don't have an element, but (in this -case) a processing instruction. - -\starttabulate[|l|p|] -\NC \type {@rt@} \NC the root element \NC \NR -\NC \type {@dd@} \NC document definition \NC \NR -\NC \type {@cm@} \NC comment, like \type {<!-- whatever -->} \NC \NR -\NC \type {@cd@} \NC so called \type {CDATA} \NC \NR -\NC \type {@pi@} \NC processing instruction, like \type {<?whatever we want ?>} \NC \NR -\stoptabulate - -There are many ways to deal with the content, but in the perspective of \TEX\ -only a few matter. - -\starttabulate[|l|p|] -\NC \type {xml.sprint(e)} \NC print the content to \TEX\ and apply setups if needed \NC \NR -\NC \type {xml.tprint(e)} \NC print the content to \TEX\ (serialize elements verbose) \NC \NR -\NC \type {xml.cprint(e)} \NC print the content to \TEX\ (used for special content) \NC \NR -\stoptabulate - -Keep in mind that anything low level that you uncover is not part of the official -interface unless mentioned in this manual. - -\stopsection - -\stopchapter - -\startchapter[title={Tips and tricks}] - -\startsection[title={tracing}] - -It can be hard to debug code as much happens kind of behind the screens. -Therefore we have a couple of tracing options. Of course you can typeset some -status information, using for instance: - -\startxmlcmd {\cmdbasicsetup{xmlshow}} - typeset the tree given by \cmdinternal {cd:node} -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlinfo}} - typeset the name in the element given by \cmdinternal {cd:node} -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlpath}} - returns the complete path (including namespace prefix and index) of the - given \cmdinternal {cd:node} -\stopxmlcmd - -\startbuffer[demo] -<?xml version "1.0"?> -<document> - <section> - <content> - <p>first</p> - <p><b>second</b></p> - </content> - </section> - <section> - <content> - <p><b>third</b></p> - <p>fourth</p> - </content> - </section> -</document> -\stopbuffer - -Say that we have the following \XML: - -\typebuffer[demo] - -and the next definitions: - -\startbuffer -\startxmlsetups xml:demo:base - \xmlsetsetup{#1}{p|b}{xml:demo:*} -\stopxmlsetups - -\startxmlsetups xml:demo:p - \xmlflush{#1} - \par -\stopxmlsetups - -\startxmlsetups xml:demo:b - \par - \xmlpath{#1} : \xmlflush{#1} - \par -\stopxmlsetups - -\xmlregisterdocumentsetup{example-10}{xml:demo:base} - -\xmlprocessbuffer{example-10}{demo}{} -\stopbuffer - -\typebuffer - -This will give us: - -\blank \startpacked \getbuffer \stoppacked \blank - -If you use \type {\xmlshow} you will get a complete subtree which can -be handy for tracing but can also lead to large documents. - -We also have a bunch of trackers that can be enabled, like: - -\starttyping -\enabletrackers[xml.show,xml.parse] -\stoptyping - -The full list (currently) is: - -\starttabulate[|lT|p|] -\NC xml.entities \NC show what entities are seen and replaced \NC \NR -\NC xml.path \NC show the result of parsing an lpath expression \NC \NR -\NC xml.parse \NC show stepwise resolving of expressions \NC \NR -\NC xml.profile \NC report all parsed lpath expressions (in the log) \NC \NR -\NC xml.remap \NC show what namespaces are remapped \NC \NR -\NC lxml.access \NC report errors with respect to resolving (symbolic) nodes \NC \NR -\NC lxml.comments \NC show the comments that are encountered (if at all) \NC \NR -\NC lxml.loading \NC show what files are loaded and converted \NC \NR -\NC lxml.setups \NC show what setups are being associated to elements \NC \NR -\stoptabulate - -In one of our workflows we produce books from \XML\ where the (educational) -content is organized in many small files. Each book has about 5~chapters and each -chapter is made of sections that contain text, exercises, resources, etc.\ and so -the document is assembled from thousands of files (don't worry, runtime inclusion -is pretty fast). In order to see where in the sources content resides we can -trace the filename. - -\startxmlcmd {\cmdbasicsetup{xmlinclusion}} - returns the file where the node comes from -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlinclusions}} - returns the list of files where the node comes from -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlbadinclusions}} - returns a list of files that were not included due to some problem -\stopxmlcmd - -Of course you have to make sure that these names end up somewhere visible, for -instance in the margin. - -\stopsection - -\startsection[title={expansion}] - -For novice users the concept of expansion might sound frightening and to some -extend it is. However, it is important enough to spend some words on it here. - -It is good to realize that most setups are sort of immediate. When one setup is -issued, it can call another one and so on. Normally you won't notice that but -there are cases where that can be a problem. In \TEX\ you can define a macro, -take for instance: - -\starttyping -\startxmlsetups xml:foo - \def\foobar{\xmlfirst{#1}{/bar}} -\stopxmlsetups -\stoptyping - -you store the reference top node \type {bar} in \type {\foobar} maybe for later use. In -this case the content is not yet fetched, it will be done when \type {\foobar} is -called. - -\starttyping -\startxmlsetups xml:foo - \edef\foobar{\xmlfirst{#1}{/bar}} -\stopxmlsetups -\stoptyping - -Here the content of \type {bar} becomes the body of the macro. But what if -\type {bar} itself contains elements that also contain elements. When there -is a setup for \type {bar} it will be triggered and so on. - -When that setup looks like: - -\starttyping -\startxmlsetups xml:bar - \def\barfoo{\xmlflush{#1}} -\stopxmlsetups -\stoptyping - -Here we get something like: - -\starttyping -\foobar => {\def\barfoo{...}} -\stoptyping - -When \type {\barfoo} is not defined we get an error and when it is known and expands -to something weird we might also get an error. - -Especially when you don't know what content can show up, this can result in errors -when an expansion fails, for example because some macro being used is not defined. -To prevent this we can define a macro: - -\starttyping -\starttexdefinition unexpanded xml:bar:macro #1 - \def\barfoo{\xmlflush{#1}} -\stoptexdefinition - -\startxmlsetups xml:bar - \texdefinition{xml:bar:macro}{#1} -\stopxmlsetups -\stoptyping - -The setup \type {xml:bar} will still expand but the replacement text now is just the -call to the macro, think of: - -\starttyping -\foobar => {\texdefinition{xml:bar:macro}{#1}} -\stoptyping - -But this is often not needed, most \CONTEXT\ commands can handle the expansions -quite well but it's good to know that there is a way out. So, now to some -examples. Imagine that we have an \XML\ file that looks as follows: - -\starttyping -<?xml version='1.0' ?> -<demo> - <chapter> - <title>Some <em>short</em> title</title> - <content> - zeta - <index> - <key>zeta</key> - <content>zeta again</content> - </index> - alpha - <index> - <key>alpha</key> - <content>alpha <em>again</em></content> - </index> - gamma - <index> - <key>gamma</key> - <content>gamma</content> - </index> - beta - <index> - <key>beta</key> - <content>beta</content> - </index> - delta - <index> - <key>delta</key> - <content>delta</content> - </index> - done! - </content> - </chapter> -</demo> -\stoptyping - -There are a few structure related elements here: a chapter (with its list entry) -and some index entries. Both are multipass related and therefore travel around. -This means that when we let data end up in the auxiliary file, we need to make -sure that we end up with either expanded data (i.e.\ no references to the \XML\ -tree) or with robust forward and backward references to elements in the tree. - -Here we discuss three approaches (and more may show up later): pushing \XML\ into -the auxiliary file and using references to elements either or not with an -associated setup. We control the variants with a switch. - -\starttyping -\newcount\TestMode - -\TestMode=0 % expansion=xml -\TestMode=1 % expansion=yes, index, setup -\TestMode=2 % expansion=yes -\stoptyping - -We apply a couple of setups: - -\starttyping -\startxmlsetups xml:mysetups - \xmlsetsetup{\xmldocument}{demo|index|content|chapter|title|em}{xml:*} -\stopxmlsetups - -\xmlregistersetup{xml:mysetups} -\stoptyping - -The main document is processed with: - -\starttyping -\startxmlsetups xml:demo - \xmlflush{#1} - \subject{contents} - \placelist[chapter][criterium=all] - \subject{index} - \placeregister[index][criterium=all] - \page % else buffer is forgotten when placing header -\stopxmlsetups -\stoptyping - -First we show three alternative ways to deal with the chapter. The first case -expands the \XML\ reference so that we have an \XML\ stream in the auxiliary -file. This stream is processed as a small independent subfile when needed. The -second case registers a reference to the current element (\type {#1}). This means -that we have access to all data of this element, like attributes, title and -content. What happens depends on the given setup. The third variant does the same -but here the setup is part of the reference. - -\starttyping -\startxmlsetups xml:chapter - \ifcase \TestMode - % xml code travels around - \setuphead[chapter][expansion=xml] - \startchapter[title=eh: \xmltext{#1}{title}] - \xmlfirst{#1}{content} - \stopchapter - \or - % index is used for access via setup - \setuphead[chapter][expansion=yes,xmlsetup=xml:title:flush] - \startchapter[title=\xmlgetindex{#1}] - \xmlfirst{#1}{content} - \stopchapter - \or - % tex call to xml using index is used - \setuphead[chapter][expansion=yes] - \startchapter[title=hm: \xmlreference{#1}{xml:title:flush}] - \xmlfirst{#1}{content} - \stopchapter - \fi -\stopxmlsetups - -\startxmlsetups xml:title:flush - \xmltext{#1}{title} -\stopxmlsetups -\stoptyping - -We need to deal with emphasis and the content of the chapter. - -\starttyping -\startxmlsetups xml:em - \begingroup\em\xmlflush{#1}\endgroup -\stopxmlsetups - -\startxmlsetups xml:content - \xmlflush{#1} -\stopxmlsetups -\stoptyping - -A similar approach is followed with the index entries. Watch how we use the -numbered entries variant (in this case we could also have used just \type -{entries} and \type {keys}). - -\starttyping -\startxmlsetups xml:index - \ifcase \TestMode - \setupregister[index][expansion=xml,xmlsetup=] - \setstructurepageregister - [index] - [entries:1=\xmlfirst{#1}{content}, - keys:1=\xmltext{#1}{key}] - \or - \setupregister[index][expansion=yes,xmlsetup=xml:index:flush] - \setstructurepageregister - [index] - [entries:1=\xmlgetindex{#1}, - keys:1=\xmltext{#1}{key}] - \or - \setupregister[index][expansion=yes,xmlsetup=] - \setstructurepageregister - [index] - [entries:1=\xmlreference{#1}{xml:index:flush}, - keys:1=\xmltext{#1}{key}] - \fi -\stopxmlsetups - -\startxmlsetups xml:index:flush - \xmlfirst{#1}{content} -\stopxmlsetups -\stoptyping - -Instead of this flush, you can use the predefined setup \type {xml:flush} -unless it is overloaded by you. - -The file is processed by: - -\starttyping -\starttext - \xmlprocessfile{main}{test.xml}{} -\stoptext -\stoptyping - -We don't show the result here. If you're curious what the output is, you can test -it yourself. In that case it also makes sense to peek into the \type {test.tuc} -file to see how the information travels around. The \type {metadata} fields carry -information about how to process the data. - -The first case, the \XML\ expansion one, is somewhat special in the sense that -internally we use small pseudo files. You can control the rendering by tweaking -the following setups: - -\starttyping -\startxmlsetups xml:ctx:sectionentry - \xmlflush{#1} -\stopxmlsetups - -\startxmlsetups xml:ctx:registerentry - \xmlflush{#1} -\stopxmlsetups -\stoptyping - -{\em When these methods work out okay the other structural elements will be -dealt with in a similar way.} - -\stopsection - -\startsection[title={special cases}] - -Normally the content will be flushed under a special (so called) catcode regime. -This means that characters that have a special meaning in \TEX\ will have no such -meaning in an \XML\ file. If you want content to be treated as \TEX\ code, you can -use one of the following: - -\startxmlcmd {\cmdbasicsetup{xmlflushcontext}} - flush the given \cmdinternal {cd:node} using the \TEX\ character - interpretation scheme -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlcontext}} - flush the match of \cmdinternal {cd:lpath} for the given \cmdinternal - {cd:node} using the \TEX\ character interpretation scheme -\stopxmlcmd - -We use this in cases like: - -\starttyping -.... - \xmlsetsetup {#1} { - tm|texformula| - } {xml:*} -.... - -\startxmlsetups xml:tm - \mathematics{\xmlflushcontext{#1}} -\stopxmlsetups - -\startxmlsetups xml:texformula - \placeformula\startformula\xmlflushcontext{#1}\stopformula -\stopxmlsetups -\stoptyping - -\stopsection - -\startsection[title={collecting}] - -Say that your document has - -\starttyping -<table> - <tr> - <td>foo</td> - <td>bar<td> - </tr> -</table> -\stoptyping - -And that you need to convert that to \TEX\ speak like: - -\starttyping -\bTABLE - \bTR - \bTD foo \eTD - \bTD bar \eTD - \eTR -\eTABLE -\stoptyping - -A simple mapping is: - -\starttyping -\startxmlsetups xml:table - \bTABLE \xmlflush{#1} \eTABLE -\stopxmlsetups -\startxmlsetups xml:tr - \bTR \xmlflush{#1} \eTR -\stopxmlsetups -\startxmlsetups xml:td - \bTD \xmlflush{#1} \eTD -\stopxmlsetups -\stoptyping - -The \type {\bTD} command is a so called delimited command which means that it -picks up its argument by looking for an \type {\eTD}. For the simple case here -this works quite well because the flush is inside the pair. This is not the case -in the following variant: - -\starttyping -\startxmlsetups xml:td:start - \bTD -\stopxmlsetups -\startxmlsetups xml:td:stop - \eTD -\stopxmlsetups -\startxmlsetups xml:td - \xmlsetup{#1}{xml:td:start} - \xmlflush{#1} - \xmlsetup{#1}{xml:td:stop} -\stopxmlsetups -\stoptyping - -When for some reason \TEX\ gets confused you can revert to a mechanism that -collects content. - -\starttyping -\startxmlsetups xml:td:start - \startcollect - \bTD - \stopcollect -\stopxmlsetups -\startxmlsetups xml:td:stop - \startcollect - \eTD - \stopcollect -\stopxmlsetups -\startxmlsetups xml:td - \startcollecting - \xmlsetup{#1}{xml:td:start} - \xmlflush{#1} - \xmlsetup{#1}{xml:td:stop} - \stopcollecting -\stopxmlsetups -\stoptyping - -You can even implement solutions that effectively do this: - -\starttyping -\startcollecting - \startcollect \bTABLE \stopcollect - \startcollect \bTR \stopcollect - \startcollect \bTD \stopcollect - \startcollect foo\stopcollect - \startcollect \eTD \stopcollect - \startcollect \bTD \stopcollect - \startcollect bar\stopcollect - \startcollect \eTD \stopcollect - \startcollect \eTR \stopcollect - \startcollect \eTABLE \stopcollect -\stopcollecting -\stoptyping - -Of course you only need to go that complex when the situation demands it. Here is -another weird one: - -\starttyping -\startcollecting - \startcollect \setupsomething[\stopcollect - \startcollect foo=\stopcollect - \startcollect FOO,\stopcollect - \startcollect bar=\stopcollect - \startcollect BAR,\stopcollect - \startcollect ]\stopcollect -\stopcollecting -\stoptyping - -\stopsection - -\startsection[title={selectors and injectors}] - -This section describes a bit special feature, one that we needed for a project -where we could not touch the original content but could add specific sections for -our own purpose. Hopefully the example demonstrates its useability. - -\enabletrackers[lxml.selectors] - -\startbuffer[foo] -<?xml version="1.0" encoding="UTF-8"?> - -<?context-directive message info 1: this is a demo file ?> -<?context-message-directive info 2: this is a demo file ?> - -<one> - <two> - <?context-select begin t1 t2 t3 ?> - <three> - t1 t2 t3 - <?context-directive injector crlf t1 ?> - t1 t2 t3 - </three> - <?context-select end ?> - <?context-select begin t4 ?> - <four> - t4 - </four> - <?context-select end ?> - <?context-select begin t8 ?> - <four> - t8.0 - t8.0 - </four> - <?context-select end ?> - <?context-include begin t4 ?> - <!-- - <three> - t4.t3 - <?context-directive injector crlf t1 ?> - t4.t3 - </three> - --> - <three> - t3 - <?context-directive injector crlf t1 ?> - t3 - </three> - <?context-include end ?> - <?context-select begin t8 ?> - <four> - t8.1 - t8.1 - </four> - <?context-select end ?> - <?context-select begin t8 ?> - <four> - t8.2 - t8.2 - </four> - <?context-select end ?> - <?context-select begin t4 ?> - <four> - t4 - t4 - </four> - <?context-select end ?> - <?context-directive injector page t7 t8 ?> - foo - <?context-directive injector blank t1 ?> - bar - <?context-directive injector page t7 t8 ?> - bar - </two> -</one> -\stopbuffer - -\typebuffer[foo] - -First we show how to plug in a directive. Processing instructions like the -following are normally ignored by an \XML\ processor, unless they make sense -to it. - -\starttyping -<?context-directive message info 1: this is a demo file ?> -<?context-message-directive info 2: this is a demo file ?> -\stoptyping - -We can define a message handler as follows: - -\startbuffer -\def\MyMessage#1#2#3{\writestatus{#1}{#2 #3}} - -\xmlinstalldirective{message}{MyMessage} -\stopbuffer - -\typebuffer \getbuffer - -When this file is processed you will see this on the console: - -\starttyping -info > 1: this is a demo file -info > 2: this is a demo file -\stoptyping - -The file has some sections that can be used or ignored. The recipe for -obeying \type {t1} and \type {t4} is the following: - -\startbuffer -\xmlsetinjectors[t1] -\xmlsetinjectors[t4] - -\startxmlsetups xml:initialize - \xmlapplyselectors{#1} - \xmlsetsetup {#1} { - one|two|three|four - } {xml:*} -\stopxmlsetups - -\xmlregistersetup{xml:initialize} - -\startxmlsetups xml:one - [ONE \xmlflush{#1} ONE] -\stopxmlsetups - -\startxmlsetups xml:two - [TWO \xmlflush{#1} TWO] -\stopxmlsetups - -\startxmlsetups xml:three - [THREE \xmlflush{#1} THREE] -\stopxmlsetups - -\startxmlsetups xml:four - [FOUR \xmlflush{#1} FOUR] -\stopxmlsetups -\stopbuffer - -\typebuffer \getbuffer - -This typesets: - -\startnarrower -\xmlprocessbuffer{main}{foo}{} -\stopnarrower - -The include coding is kind of special: it permits adding content (in a comment) -and ignoring the rest so that we indeed can add something without interfering -with the original. Of course in a normal workflow such messy solutions are -not needed, but alas, often workflows are not that clean, especially when one -has no real control over the source. - -\startxmlcmd {\cmdbasicsetup{xmlsetinjectors}} - enables a list of injectors that will be used -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlresetinjectors}} - resets the list of injectors -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlinjector}} - expands an injection (command); normally this one is only used - (in some setup) or for testing -\stopxmlcmd - -\startxmlcmd {\cmdbasicsetup{xmlapplyselectors}} - analyze the tree \cmdinternal {cd:node} for marked sections that - will be injected -\stopxmlcmd - -We have some injections predefined: - -\starttyping -\startsetups xml:directive:injector:page - \page -\stopsetups - -\startsetups xml:directive:injector:column - \column -\stopsetups - -\startsetups xml:directive:injector:blank - \blank -\stopsetups -\stoptyping - -In the example we see: - -\starttyping -<?context-directive injector page t7 t8 ?> -\stoptyping - -When we set \type {\xmlsetinjector[t7]} a pagebreak will injected in that spot. -Tags like \type {t7}, \type {t8} etc.\ can represent versions. - -\stopsection - -\startsection[title=preprocessing] - -% local match = lpeg.match -% local replacer = lpeg.replacer("BAD TITLE:","<bold>BAD TITLE:</bold>") -% -% function lxml.preprocessor(data,settings) -% return match(replacer,data) -% end - -\startbuffer[pre-code] -\startluacode - function lxml.preprocessor(data,settings) - return string.find(data,"BAD TITLE:") - and string.gsub(data,"BAD TITLE:","<bold>BAD TITLE:</bold>") - or data - end -\stopluacode -\stopbuffer - -\startbuffer[pre-xml] -\startxmlsetups pre:demo:initialize - \xmlsetsetup{#1}{*}{pre:demo:*} -\stopxmlsetups - -\xmlregisterdocumentsetup{pre:demo}{pre:demo:initialize} - -\startxmlsetups pre:demo:root - \xmlflush{#1} -\stopxmlsetups - -\startxmlsetups pre:demo:bold - \begingroup\bf\xmlflush{#1}\endgroup -\stopxmlsetups - -\starttext - \xmlprocessbuffer{pre:demo}{demo}{} -\stoptext -\stopbuffer - -Say that you have the following \XML\ setup: - -\typebuffer[pre-xml] - -and that (such things happen) the input looks like this: - -\startbuffer[demo] -<root> -BAD TITLE: crap crap crap ... - -BAD TITLE: crap crap crap ... -</root> -\stopbuffer - -\typebuffer[demo] - -You can then clean up these \type {BAD TITLE}'s as follows: - -\typebuffer[pre-code] - -and get as result: - -\start \getbuffer[pre-code,pre-xml] \stop - -The preprocessor function gets as second argument the current settings, an d -the field \type {currentresource} can be used to limit the actions to -specific resources, in our case it's \type {buffer: demo}. Afterwards you can -reset the proprocessor with: - -\startluacode -lxml.preprocessor = nil -\stopluacode - -Future versions might give some more control over preprocessors. For now consider -it to be a quick hack. - -\stopsection - -\stopchapter - -\startchapter[title={Lookups using lpaths}] - -\startsection[title={introduction}] - -There is not that much system in the following examples. They resulted from tests -with different documents. The current implementation evolved out of the -experimental code. For instance, I decided to add the multiple expressions in row -handling after a few email exchanges with Jean|-|Michel Huffen. - -One of the main differences between the way \XSLT\ resolves a path and our way is -the anchor. Take: - -\starttyping -/something -something -\stoptyping - -The first one anchors in the current (!) element so it will only consider direct -children. The second one does a deep lookup and looks at the descendants as well. -Furthermore we have a few extra shortcuts like \type {**} in \type {a/**/b} which -represents all descendants. - -The expressions (between square brackets) has to be valid \LUA\ and some -preprocessing is done to resolve the built in functions. So, you might use code -like: - -\starttyping -my_lpeg_expression:match(text()) == "whatever" -\stoptyping - -given that \type {my_lpeg_expression} is known. In the examples below we use the -visualizer to show the steps. Some are shown more than once as part of a set. - -\stopsection - -\startsection[title={special cases}] - -\xmllshow{} -\xmllshow{*} -\xmllshow{.} -\xmllshow{/} - -\stopsection - -\startsection[title={wildcards}] - -\xmllshow{*} -\xmllshow{*:*} -\xmllshow{/*} -\xmllshow{/*:*} -\xmllshow{*/*} -\xmllshow{*:*/*:*} - -\xmllshow{a/*} -\xmllshow{a/*:*} -\xmllshow{/a/*} -\xmllshow{/a/*:*} - -\xmllshow{/*} -\xmllshow{/**} -\xmllshow{/***} - -\stopsection - -\startsection[title={multiple steps}] - -\xmllshow{answer} -\xmllshow{answer/test/*} -\xmllshow{answer/test/child::} -\xmllshow{answer/*} -\xmllshow{answer/*[tag()='p' and position()=1 and text()!='']} - -\stopsection - -\startsection[title={pitfals}] - -\xmllshow{[oneof(lower(@encoding),'tex','context','ctx')]} -\xmllshow{.[oneof(lower(@encoding),'tex','context','ctx')]} - -\stopsection - -\startsection[title={more special cases}] - -\xmllshow{**} -\xmllshow{*} -\xmllshow{..} -\xmllshow{.} -\xmllshow{//} -\xmllshow{/} - -\xmllshow{**/} -\xmllshow{**/*} -\xmllshow{**/.} -\xmllshow{**//} - -\xmllshow{*/} -\xmllshow{*/*} -\xmllshow{*/.} -\xmllshow{*//} - -\xmllshow{/**/} -\xmllshow{/**/*} -\xmllshow{/**/.} -\xmllshow{/**//} - -\xmllshow{/*/} -\xmllshow{/*/*} -\xmllshow{/*/.} -\xmllshow{/*//} - -\xmllshow{./} -\xmllshow{./*} -\xmllshow{./.} -\xmllshow{.//} - -\xmllshow{../} -\xmllshow{../*} -\xmllshow{../.} -\xmllshow{..//} - -\stopsection - -\startsection[title={more wildcards}] - -\xmllshow{one//two} -\xmllshow{one/*/two} -\xmllshow{one/**/two} -\xmllshow{one/***/two} -\xmllshow{one/x//two} -\xmllshow{one//x/two} -\xmllshow{//x/two} - -\stopsection - -\startsection[title={special axis}] - -\xmllshow{descendant::whocares/ancestor::whoknows} -\xmllshow{descendant::whocares/ancestor::whoknows/parent::} -\xmllshow{descendant::whocares/ancestor::} -\xmllshow{child::something/child::whatever/child::whocares} -\xmllshow{child::something/child::whatever/child::whocares|whoknows} -\xmllshow{child::something/child::whatever/child::(whocares|whoknows)} -\xmllshow{child::something/child::whatever/child::!(whocares|whoknows)} -\xmllshow{child::something/child::whatever/child::(whocares)} -\xmllshow{child::something/child::whatever/child::(whocares)[position()>2]} -\xmllshow{child::something/child::whatever[position()>2][position()=1]} -\xmllshow{child::something/child::whatever[whocares][whocaresnot]} -\xmllshow{child::something/child::whatever[whocares][not(whocaresnot)]} -\xmllshow{child::something/child::whatever/self::whatever} - -There is also \type {last-match::} that starts with the last found set of nodes. -This can save some run time when you do lots of tests combined with a same check -afterwards. There is however one pitfall: you never know what is done with that -last match in the setup that gets called nested. Take the following example: - -\starttyping -\startbuffer[test] -<something> - <crap> <crapa> <crapb> <crapc> <crapd> - <crape> - done 1 - </crape> - </crapd> </crapc> </crapb> </crapa> - <crap> <crapa> <crapb> <crapc> <crapd> - <crape> - done 2 - </crape> - </crapd> </crapc> </crapb> </crapa> - <crap> <crapa> <crapb> <crapc> <crapd> - <crape> - done 3 - </crape> - </crapd> </crapc> </crapb> </crapa> -</something> -\stopbuffer -\stoptyping - -One way to filter the content is this: - -\starttyping -\xmldoif {#1} {/crap/crapa/crapb/crapc/crapd/crape} { - some action -} -\stoptyping - -It is not unlikely that you will do something like this: - -\starttyping -\xmlfirst {#1} {/crap/crapa/crapb/crapc/crapd/crape} { - \xmlfirst{#1}{/crap/crapa/crapb/crapc/crapd/crape} -} -\stoptyping - -This means that the path is resolved twice but that can be avoided as -follows: - -\starttyping -\xmldoif{#1}{/crap/crapa/crapb/crapc/crapd/crape}{ - \xmlfirst{#1}{last-match::} -} -\stoptyping - -But the next is now guaranteed to work: - -\starttyping -\xmldoif{#1}{/crap/crapa/crapb/crapc/crapd/crape}{ - \xmlfirst{#1}{last-match::} - \xmllast{#1}{last-match::} -} -\stoptyping - -Because the first one can have done some lookup the last match can be replaced -and the second call will give unexpected results. You can overcome this with: - -\starttyping -\xmldoif{#1}{/crap/crapa/crapb/crapc/crapd/crape}{ - \xmlpushmatch - \xmlfirst{#1}{last-match::} - \xmlpopmatch -} -\stoptyping - -Does it pay off? Here are some timings of a 10.000 times text and lookup -like the previous (on a decent Januari 2016 laptop): - -\starttabulate[|r|l|] -\NC 0.239 \NC \type {\xmldoif {...} {...}} \NC \NR -\NC 0.292 \NC \type {\xmlfirst {...} {...}} \NC \NR -\NC 0.538 \NC \type {\xmldoif {...} {...} + \xmlfirst {...} {...}} \NC \NR -\NC 0.338 \NC \type {\xmldoif {...} {...} + \xmlfirst {...} {last-match::}} \NC \NR -\NC 0.349 \NC \type {+ \xmldoif {...} {...} + \xmlfirst {...} {last-match::}-} \NC \NR -\stoptabulate - -So, pushing and popping (the last row) is a bit slower than not doing that but it -is still much faster than not using \type {last-match::} at all. As a shortcut -you can use \type {=}, as in: - -\starttyping -\xmlfirst{#1}{=} -\stoptyping - -You can even do this: - -\starttyping -\xmlall{#1}{last-match::/text()} -\stoptyping - -or - -\starttyping -\xmlall{#1}{=/text()} -\stoptyping - - -\stopsection - -\startsection[title={some more examples}] - -\xmllshow{/something/whatever} -\xmllshow{something/whatever} -\xmllshow{/**/whocares} -\xmllshow{whoknows/whocares} -\xmllshow{whoknows} -\xmllshow{whocares[contains(text(),'f') or contains(text(),'g')]} -\xmllshow{whocares/first()} -\xmllshow{whocares/last()} -\xmllshow{whatever/all()} -\xmllshow{whocares/position(2)} -\xmllshow{whocares/position(-2)} -\xmllshow{whocares[1]} -\xmllshow{whocares[-1]} -\xmllshow{whocares[2]} -\xmllshow{whocares[-2]} -\xmllshow{whatever[3]/attribute(id)} -\xmllshow{whatever[2]/attribute('id')} -\xmllshow{whatever[3]/text()} -\xmllshow{/whocares/first()} -\xmllshow{/whocares/last()} - -\xmllshow{xml://whatever/all()} -\xmllshow{whatever/all()} -\xmllshow{//whocares} -\xmllshow{..[2]} -\xmllshow{../*[2]} - -\xmllshow{/(whocares|whocaresnot)} -\xmllshow{/!(whocares|whocaresnot)} -\xmllshow{/!whocares} - -\xmllshow{/interface/command/command(xml:setups:register)} -\xmllshow{/interface/command[@name='xxx']/command(xml:setups:typeset)} -\xmllshow{/arguments/*} -\xmllshow{/sequence/first()} -\xmllshow{/arguments/text()} -\xmllshow{/sequence/variable/first()} -\xmllshow{/interface/define[@name='xxx']/first()} -\xmllshow{/parameter/command(xml:setups:parameter:measure)} - -\xmllshow{/(*:library|figurelibrary)/*:figure/*:label} -\xmllshow{/(*:library|figurelibrary)/figure/*:label} -\xmllshow{/(*:library|figurelibrary)/figure/label} -\xmllshow{/(*:library|figurelibrary)/figure:*/label} - -\xmlshow {whatever//br[tag(1)='br']} - -\stopsection - -\stopchapter - -\startchapter[title=Examples] - -\startsection[title=attribute chains] - -In \CSS, when an attribute is not present, the parent element is checked, and when -not found again, the lookup follows the chain till a match is found or the root is -reached. The following example demonstrates how such a chain lookup works. - -\startbuffer[test] -<something mine="1" test="one" more="alpha"> - <whatever mine="2" test="two"> - <whocares mine="3"> - <!-- this is a test --> - </whocares> - </whatever> -</something> -\stopbuffer - -\typebuffer[test] - -We apply the following setups to this tree: - -\startbuffer[setups] -\startxmlsetups xml:common - [ - \xmlchainatt{#1}{mine}, - \xmlchainatt{#1}{test}, - \xmlchainatt{#1}{more}, - \xmlchainatt{#1}{none} - ]\par -\stopxmlsetups - -\startxmlsetups xml:something - something: \xmlsetup{#1}{xml:common} - \xmlflush{#1} -\stopxmlsetups - -\startxmlsetups xml:whatever - whatever: \xmlsetup{#1}{xml:common} - \xmlflush{#1} -\stopxmlsetups - -\startxmlsetups xml:whocares - whocares: \xmlsetup{#1}{xml:common} - \xmlflush{#1} -\stopxmlsetups - -\startxmlsetups xml:mysetups - \xmlsetsetup{#1}{something|whatever|whocares}{xml:*} -\stopxmlsetups - -\xmlregisterdocumentsetup{example-1}{xml:mysetups} - -\xmlprocessbuffer{example-1}{test}{} -\stopbuffer - -\typebuffer[setups] - -This gives: - -\start - \getbuffer[setups] -\stop - -\stopsection - -\startsection[title=conditional setups] - -Say that we have this code: - -\starttyping -\xmldoifelse {#1} {/what[@a='1']} { - \xmlfilter {#1} {/what/command('xml:yes')} -} { - \xmlfilter {#1} {/what/command('xml:nop')} -} -\stoptyping - -Here we first determine if there is a child \type {what} with attribute \type {a} -set to \type {1}. Depending on the outcome again we check the child nodes for -being named \type {what}. A faster solution which also takes less code is this: - -\starttyping -\xmlfilter {#1} {/what[@a='1']/command('xml:yes','xml:nop')} -\stoptyping - -\stopsection - -\startsection[title=manipulating] - -Assume that we have the following \XML\ data: - -\startbuffer[test] -<A> - <B>right</B> - <B>wrong</B> -</A> -\stopbuffer - -\typebuffer[test] - -But, instead of \type {right} we want to see \type {okay}. We can do that with a -finalizer: - -\startbuffer -\startluacode -local rehash = { - ["right"] = "okay", -} - -function xml.finalizers.tex.Okayed(collected,what) - for i=1,#collected do - if what == "all" then - local str = xml.text(collected[i]) - context(rehash[str] or str) - else - context(str) - end - end -end -\stopluacode -\stopbuffer - -\typebuffer \getbuffer - -\startbuffer -\startxmlsetups xml:A - \xmlflush{#1} -\stopxmlsetups - -\startxmlsetups xml:B - (It's \xmlfilter{#1}{./Okayed("all")}) -\stopxmlsetups - -\startxmlsetups xml:testsetups - \xmlsetsetup{#1}{A|B}{xml:*} -\stopxmlsetups - -\xmlregisterdocumentsetup{example-2}{xml:testsetups} -\xmlprocessbuffer{example-2}{test}{} -\stopbuffer - -\typebuffer - -The result is: \start \inlinebuffer \stop - -\stopsection - -\startsection[title=cross referencing] - -A rather common way to add cross references to \XML\ files is to borrow the -asymmetrical id's from \HTML. This means that one cannot simply use a value -of (say) \type {href} to locate an \type {id}. The next example came up on -the \CONTEXT\ mailing list. - -\startbuffer[test] -<doc> - <p>Text - <a href="#fn1" class="footnoteref" id="fnref1"><sup>1</sup></a> and - <a href="#fn2" class="footnoteref" id="fnref2"><sup>2</sup></a> - </p> - <div class="footnotes"> - <hr /> - <ol> - <li id="fn1"><p>A footnote.<a href="#fnref1">↩</a></p></li> - <li id="fn2"><p>A second footnote.<a href="#fnref2">↩</a></p></li> - </ol> - </div> -</doc> -\stopbuffer - -\typebuffer[test] - -We give two variants for dealing with such references. The first solution does -lookups and depending on the size of the file can be somewhat inefficient. - -\startbuffer -\startxmlsetups xml:doc - \blank - \xmlflush{#1} - \blank -\stopxmlsetups - -\startxmlsetups xml:p - \xmlflush{#1} -\stopxmlsetups - -\startxmlsetups xml:footnote - (variant 1)\footnote - {\xmlfirst - {example-3-1} - {div[@class='footnotes']/ol/li[@id='\xmlrefatt{#1}{href}']}} -\stopxmlsetups - -\startxmlsetups xml:initialize - \xmlsetsetup{#1}{p|doc}{xml:*} - \xmlsetsetup{#1}{a[@class='footnoteref']}{xml:footnote} - \xmlsetsetup{#1}{div[@class='footnotes']}{xml:nothing} -\stopxmlsetups - -\xmlresetdocumentsetups{*} -\xmlregisterdocumentsetup{example-3-1}{xml:initialize} - -\xmlprocessbuffer{example-3-1}{test}{} -\stopbuffer - -\typebuffer - -This will typeset two footnotes. - -\getbuffer - -The second variant collects the references so that the time spend on lookups is -less. - -\startbuffer -\startxmlsetups xml:doc - \blank - \xmlflush{#1} - \blank -\stopxmlsetups - -\startxmlsetups xml:p - \xmlflush{#1} -\stopxmlsetups - -\startluacode - userdata.notes = {} -\stopluacode - -\startxmlsetups xml:collectnotes - \ctxlua{userdata.notes['\xmlrefatt{#1}{id}'] = '#1'} -\stopxmlsetups - -\startxmlsetups xml:footnote - (variant 2)\footnote - {\xmlflush - {\cldcontext{userdata.notes['\xmlrefatt{#1}{href}']}}} -\stopxmlsetups - -\startxmlsetups xml:initialize - \xmlsetsetup{#1}{p|doc}{xml:*} - \xmlsetsetup{#1}{a[@class='footnoteref']}{xml:footnote} - \xmlfilter{#1}{div[@class='footnotes']/ol/li/command(xml:collectnotes)} - \xmlsetsetup{#1}{div[@class='footnotes']}{} -\stopxmlsetups - -\xmlregisterdocumentsetup{example-3-2}{xml:initialize} - -\xmlprocessbuffer{example-3-2}{test}{} -\stopbuffer - -\typebuffer - -This will again typeset two footnotes: - -\getbuffer - -\stopsection - -\startsection[title=mapping values] - -One way to process options \type {frame} in the example below is to map the -values to values known by \CONTEXT. - -\startbuffer[test] -<a> - <nattable frame="on"> - <tr><td>#1</td><td>#2</td><td>#3</td><td>#4</td></tr> - <tr><td>#5</td><td>#6</td><td>#7</td><td>#8</td></tr> - </nattable> - <nattable frame="off"> - <tr><td>#1</td><td>#2</td><td>#3</td><td>#4</td></tr> - <tr><td>#5</td><td>#6</td><td>#7</td><td>#8</td></tr> - </nattable> - <nattable frame="no"> - <tr><td>#1</td><td>#2</td><td>#3</td><td>#4</td></tr> - <tr><td>#5</td><td>#6</td><td>#7</td><td>#8</td></tr> - </nattable> -</a> -\stopbuffer - -\typebuffer[test] - -\startbuffer -\startxmlsetups xml:a - \xmlflush{#1} -\stopxmlsetups - -\xmlmapvalue {nattable:frame} {on} {on} -\xmlmapvalue {nattable:frame} {yes} {on} -\xmlmapvalue {nattable:frame} {off} {off} -\xmlmapvalue {nattable:frame} {no} {off} - -\startxmlsetups xml:nattable - \startplacetable[title=#1] - \setupTABLE[frame=\xmlval{nattable:frame}{\xmlatt{#1}{frame}}{on}]% - \bTABLE - \xmlflush{#1} - \eTABLE - \stopplacetable -\stopxmlsetups - -\startxmlsetups xml:tr - \bTR - \xmlflush{#1} - \eTR -\stopxmlsetups - -\startxmlsetups xml:td - \bTD - \xmlflush{#1} - \eTD -\stopxmlsetups - -\startxmlsetups xml:testsetups - \xmlsetsetup{example-4}{a|nattable|tr|td|}{xml:*} -\stopxmlsetups - -\xmlregisterdocumentsetup{example-4}{xml:testsetups} - -\xmlprocessbuffer{example-4}{test}{} -\stopbuffer - -The \type {\xmlmapvalue} mechanism is rather efficient and involves a minimum -of testing. - -\typebuffer - -We get: - -\getbuffer - -\stopsection - -\startsection[title=using \LUA] - -In this example we demonstrate how you can delegate rendering to \LUA. We -will construct a so called extreme table. The input is: - -\startbuffer[demo] -<?xml version="1.0" encoding="utf-8"?> - -<a> - <b> <c>1</c> <d>Text</d> </b> - <b> <c>2</c> <d>More text</d> </b> - <b> <c>2</c> <d>Even more text</d> </b> - <b> <c>2</c> <d>And more</d> </b> - <b> <c>3</c> <d>And even more</d> </b> - <b> <c>2</c> <d>The last text</d> </b> -</a> -\stopbuffer - -\typebuffer[demo] - -The processor code is: - -\startbuffer[process] -\startxmlsetups xml:test_setups - \xmlsetsetup{#1}{a|b|c|d}{xml:*} -\stopxmlsetups - -\xmlregisterdocumentsetup{example-5}{xml:test_setups} - -\xmlprocessbuffer{example-5}{demo}{} -\stopbuffer - -\typebuffer - -We color a sequence of the same titles (numbers here) differently. The first -solution remembers the last title: - -\startbuffer -\startxmlsetups xml:a - \startembeddedxtable - \xmlflush{#1} - \stopembeddedxtable -\stopxmlsetups - -\startxmlsetups xml:b - \xmlfunction{#1}{test_ba} -\stopxmlsetups - -\startluacode -local lasttitle = nil - -function xml.functions.test_ba(t) - local title = xml.text(t, "/c") - local content = xml.text(t, "/d") - context.startxrow() - context.startxcell { - background = "color", - backgroundcolor = lasttitle == title and "colorone" or "colortwo", - foregroundstyle = "bold", - foregroundcolor = "white", - } - context(title) - lasttitle = title - context.stopxcell() - context.startxcell() - context(content) - context.stopxcell() - context.stopxrow() -end -\stopluacode -\stopbuffer - -\typebuffer \getbuffer - -The \type {embeddedxtable} environment is needed because the table is picked up -as argument. - -\startlinecorrection \getbuffer[process] \stoplinecorrection - -The second implemetation remembers what titles are already processed so here we -can color the last one too. - -\startbuffer -\startxmlsetups xml:a - \ctxlua{xml.functions.reset_bb()} - \startembeddedxtable - \xmlflush{#1} - \stopembeddedxtable -\stopxmlsetups - -\startxmlsetups xml:b - \xmlfunction{#1}{test_bb} -\stopxmlsetups - -\startluacode -local titles - -function xml.functions.reset_bb(t) - titles = { } -end - -function xml.functions.test_bb(t) - local title = xml.text(t, "/c") - local content = xml.text(t, "/d") - context.startxrow() - context.startxcell { - background = "color", - backgroundcolor = titles[title] and "colorone" or "colortwo", - foregroundstyle = "bold", - foregroundcolor = "white", - } - context(title) - titles[title] = true - context.stopxcell() - context.startxcell() - context(content) - context.stopxcell() - context.stopxrow() -end -\stopluacode -\stopbuffer - -\typebuffer \getbuffer - -\startlinecorrection \getbuffer[process] \stoplinecorrection - -A solution without any state variable is given below. - -\startbuffer -\startxmlsetups xml:a - \startembeddedxtable - \xmlflush{#1} - \stopembeddedxtable -\stopxmlsetups - -\startxmlsetups xml:b - \xmlfunction{#1}{test_bc} -\stopxmlsetups - -\startluacode -function xml.functions.test_bc(t) - local title = xml.text(t, "/c") - local content = xml.text(t, "/d") - context.startxrow() - local okay = xml.text(t,"./preceding-sibling::/[-1]") == title - context.startxcell { - background = "color", - backgroundcolor = okay and "colorone" or "colortwo", - foregroundstyle = "bold", - foregroundcolor = "white", - } - context(title) - context.stopxcell() - context.startxcell() - context(content) - context.stopxcell() - context.stopxrow() -end -\stopluacode -\stopbuffer - -\typebuffer \getbuffer - -\startlinecorrection \getbuffer[process] \stoplinecorrection - -Here is a solution that delegates even more to \LUA. The previous variants were -actually not that safe with repect to special characters and didn't handle -nested elements either but the next one does. - -\startbuffer[demo] -<?xml version="1.0" encoding="utf-8"?> - -<a> - <b> <c>#1</c> <d>Text</d> </b> - <b> <c>#2</c> <d>More text</d> </b> - <b> <c>#2</c> <d>Even more text</d> </b> - <b> <c>#2</c> <d>And more</d> </b> - <b> <c>#3</c> <d>And even more</d> </b> - <b> <c>#2</c> <d>Something <i>nested</i> </d> </b> -</a> -\stopbuffer - -\typebuffer[demo] - -We also need to map the \type {i} element. - -\startbuffer -\startxmlsetups xml:a - \starttexcode - \xmlfunction{#1}{test_a} - \stoptexcode -\stopxmlsetups - -\startxmlsetups xml:c - \xmlflush{#1} -\stopxmlsetups - -\startxmlsetups xml:d - \xmlflush{#1} -\stopxmlsetups - -\startxmlsetups xml:i - {\em\xmlflush{#1}} -\stopxmlsetups - -\startluacode -function xml.functions.test_a(t) - context.startxtable() - local previous = false - for b in xml.collected(lxml.getid(t),"/b") do - context.startxrow() - local current = xml.text(b,"/c") - context.startxcell { - background = "color", - backgroundcolor = (previous == current) and "colorone" or "colortwo", - foregroundstyle = "bold", - foregroundcolor = "white", - } - lxml.first(b,"/c") - context.stopxcell() - context.startxcell() - lxml.first(b,"/d") - context.stopxcell() - previous = current - context.stopxrow() - end - context.stopxtable() -end -\stopluacode - -\startxmlsetups xml:test_setups - \xmlsetsetup{#1}{a|b|c|d|i}{xml:*} -\stopxmlsetups - -\xmlregisterdocumentsetup{example-5}{xml:test_setups} - -\xmlprocessbuffer{example-5}{demo}{} -\stopbuffer - -\typebuffer - -\startlinecorrection \getbuffer \stoplinecorrection - -The question is, do we really need \LUA ? Often we don't, apart maybe from an -occasional special finalizer. A pure \TEX\ solution is given next: - -\startbuffer -\startxmlsetups xml:a - \glet\MyPreviousTitle\empty - \glet\MyCurrentTitle \empty - \startembeddedxtable - \xmlflush{#1} - \stopembeddedxtable -\stopxmlsetups - -\startxmlsetups xml:b - \startxrow - \xmlflush{#1} - \stopxrow -\stopxmlsetups - -\startxmlsetups xml:c - \xdef\MyCurrentTitle{\xmltext{#1}{.}} - \doifelse {\MyPreviousTitle} {\MyCurrentTitle} { - \startxcell - [background=color, - backgroundcolor=colorone, - foregroundstyle=bold, - foregroundcolor=white] - } { - \glet\MyPreviousTitle\MyCurrentTitle - \startxcell - [background=color, - backgroundcolor=colortwo, - foregroundstyle=bold, - foregroundcolor=white] - } - \xmlflush{#1} - \stopxcell -\stopxmlsetups - -\startxmlsetups xml:d - \startxcell - \xmlflush{#1} - \stopxcell -\stopxmlsetups - -\startxmlsetups xml:i - {\em\xmlflush{#1}} -\stopxmlsetups - -\startxmlsetups xml:test_setups - \xmlsetsetup{#1}{*}{xml:*} -\stopxmlsetups - -\xmlregisterdocumentsetup{example-5}{xml:test_setups} - -\xmlprocessbuffer{example-5}{demo}{} -\stopbuffer - -\typebuffer - -\startlinecorrection \getbuffer \stoplinecorrection - -You can even save a few lines of code: - -\starttyping -\startxmlsetups xml:c - \xdef\MyCurrentTitle{\xmltext{#1}{.}} - \startxcell - [background=color, - backgroundcolor=color\ifx\MyPreviousTitle\MyCurrentTitle one\else two\fi, - foregroundstyle=bold, - foregroundcolor=white] - \xmlflush{#1} - \stopxcell - \glet\MyPreviousTitle\MyCurrentTitle -\stopxmlsetups -\stoptyping - -Or if you prefer: - -\starttyping -\startxmlsetups xml:c - \xdef\MyCurrentTitle{\xmltext{#1}{.}} - \doifelse {\MyPreviousTitle} {\MyCurrentTitle} { - \xmlsetup{#1}{xml:c:one} - } { - \xmlsetup{#1}{xml:c:two} - } -\stopxmlsetups - -\startxmlsetups xml:c:one - \startxcell - [background=color, - backgroundcolor=colorone, - foregroundstyle=bold, - foregroundcolor=white] - \xmlflush{#1} - \stopxcell -\stopxmlsetups - -\startxmlsetups xml:c:two - \startxcell - [background=color, - backgroundcolor=colortwo, - foregroundstyle=bold, - foregroundcolor=white] - \xmlflush{#1} - \stopxcell - \global\let\MyPreviousTitle\MyCurrentTitle -\stopxmlsetups -\stoptyping - -These examples demonstrate that it doesn't hurt to know a little bit of \TEX\ -programming: defining macros and basic comparisons can come in handy. There are -examples in the test suite, you can peek in the source code, you can consult -the wiki or you can just ask on the list. - -\stopsection - -\startsection[title=last match] - -For the next example we use the following \XML\ input: - -\startbuffer[demo] -<?xml version "1.0"?> -<document> - <section id="1"> - <content> - <p>first</p> - <p>second</p> - </content> - </section> - <section id="2"> - <content> - <p>third</p> - <p>fourth</p> - </content> - </section> -</document> -\stopbuffer - -\typebuffer[demo] - -If you check if some element is present and then act accordingly, you can -end up with doing the same lookup twice. Although it might sound inefficient, -in practice it's often not measureable. - -\startbuffer -\startxmlsetups xml:demo:document - \type{\xmlall{#1}{/section[@id='2']/content/p}}\par - \xmldoif{#1}{/section[@id='2']/content/p} { - \xmlall{#1}{/section[@id='2']/content/p} - } - \type{\xmllastmatch}\par - \xmldoif{#1}{/section[@id='2']/content/p} { - \xmllastmatch - } - \type{\xmlall{#1}{last-match::}}\par - \xmldoif{#1}{/section[@id='2']/content/p} { - \xmlall{#1}{last-match::} - } - \type{\xmlfilter{#1}{last-match::/command(xml:demo:p)}}\par - \xmldoif{#1}{/section[@id='2']/content/p} { - \xmlfilter{#1}{last-match::/command(xml:demo:p)} - } -\stopxmlsetups - -\startxmlsetups xml:demo:p - \quad\xmlflush{#1}\endgraf -\stopxmlsetups - -\startxmlsetups xml:demo:base - \xmlsetsetup{#1}{document|p}{xml:demo:*} -\stopxmlsetups - -\xmlregisterdocumentsetup{example-6}{xml:demo:base} - -\xmlprocessbuffer{example-6}{demo}{} -\stopbuffer - -\typebuffer - -In the second check we just flush the last match, so effective we do an \type -{\xmlall} here. The third and fourth alternatives demonstrate how we can use -\type {last-match} as axis. The gain is 10\% or more on the lookup but of course -typesetting often takes relatively more time than the lookup. - -\startpacked -\getbuffer -\stoppacked - -\stopsection - -\startsection[title=Finalizers] - -The \XML\ parser is also available outside \TEX. Here is an example of its usage. -We pipe the result to \TEX\ but you can do with \type {t} whatever you like. - -\startbuffer -local x = xml.load("manual-demo-1.xml") -local t = { } - -for c in xml.collected(x,"//*") do - if not c.special and not t[c.tg] then - t[c.tg] = true - end -end - -context.tocontext(table.sortedkeys(t)) -\stopbuffer - -\typebuffer - -This returns: - -\ctxluabuffer - -We can wrap this in a finalizer: - -\startbuffer -xml.finalizers.taglist = function(collected) - local t = { } - for i=1,#collected do - local c = collected[i] - if not c.special then - local tg = c.tg - if tg and not t[tg] then - t[tg] = true - end - end - end - return table.sortedkeys(t) -end -\stopbuffer - -\typebuffer - -Or in a more extensive one: - -\startbuffer -xml.finalizers.taglist = function(collected,parenttoo) - local t = { } - for i=1,#collected do - local c = collected[i] - if not c.special then - local tg = c.tg - if tg and not t[tg] then - t[tg] = true - end - if parenttoo then - local p = c.__p__ - if p and not p.special then - local tg = p.tg .. ":" .. tg - if tg and not t[tg] then - t[tg] = true - end - end - end - end - end - return table.sortedkeys(t) -end -\stopbuffer - -\typebuffer \ctxluabuffer - -Usage is as follows: - -\startbuffer -local x = xml.load("manual-demo-1.xml") -local t = xml.applylpath(x,"//*/taglist()") - -context.tocontext(t) -\stopbuffer - -\typebuffer - -And indeed we get: - -\ctxluabuffer - -But we can also say: - -\startbuffer -local x = xml.load("manual-demo-1.xml") -local t = xml.applylpath(x,"//*/taglist(true)") - -context.tocontext(t) -\stopbuffer - -\typebuffer - -Now we get: - -\ctxluabuffer - -\startsection[title=Pure xml] - -One might wonder how a \TEX\ macro package would look like when backslashes, -dollars and percent signs would have no special meaning. In fact, it would be -rather useless as interpreting commands are triggered by such characters. Any -formatting or coding system needs such characters. Take \XML: angle brackets and -ampersands are really special. So, no matter what system we use, we do have to -deal with the (common) case where these characters need to be sees as they are. -Normally escaping is the solution. - -The \CONTEXT\ interface for \XML\ suffers from this as well. You really don't -want to know how many tricks are used for dealing with special characters and -entities: there are several ways these travel through the system and it is -possible to adapt and cheat. Especially roundtripped data (via tuc file) puts -some demands on the system because when ts \XML\ can become \TEX\ and vise versa. -The next example (derived from a mail on the list) demonstrates this: - -\starttyping -\startbuffer[demo] -<doc> - <pre><code>\ConTeXt\ is great</code></pre> - - <pre><code>but you need to know some tricks</code></pre> -</doc> -\stopbuffer - -\startxmlsetups xml:initialize - \xmlsetsetup{#1}{doc|p|code}{xml:*} - \xmlsetsetup{#1}{pre/code}{xml:pre:code} -\stopxmlsetups - -\xmlregistersetup{xml:initialize} - -\startxmlsetups xml:doc - \xmlflush{#1} -\stopxmlsetups - -\startxmlsetups xml:pre:code - no solution - \comment[symbol=Key, location=inmargin,color=yellow]{\xmlflush{#1}} - \par - solution one \begingroup - \expandUx - \comment[symbol=Key, location=inmargin,color=yellow]{\xmlflush{#1}} - \endgroup - \par - solution two - \comment[symbol=Key, location=inmargin,color=yellow]{\xmlpure{#1}} - \par - \xmlprettyprint{#1}{tex} -\stopxmlsetups - -\xmlprocessbuffer{main}{demo}{} -\stoptyping - -The first comment (an interactive feature of \PDF\ comes out as: - -\starttyping -\Ux {5C}ConTeXt\Ux {5C} is great -\stoptyping - -The second and third comment are okay. It's one of the reasons why we have \type -{\xmlpure}. - -\stopsection - -\stopchapter - + \component xml-mkiv-converter + \component xml-mkiv-filtering + \component xml-mkiv-commands + \component xml-mkiv-expressions + \component xml-mkiv-tricks + \component xml-mkiv-lookups + \component xml-mkiv-examples \stopbodymatter \stoptext diff --git a/Master/texmf-dist/doc/context/sources/general/manuals/xtables/xtables-mkiv.tex b/Master/texmf-dist/doc/context/sources/general/manuals/xtables/xtables-mkiv.tex index 827ad3fcc16..1b878e5cbae 100644 --- a/Master/texmf-dist/doc/context/sources/general/manuals/xtables/xtables-mkiv.tex +++ b/Master/texmf-dist/doc/context/sources/general/manuals/xtables/xtables-mkiv.tex @@ -1250,6 +1250,101 @@ you're lucky. \stopsection +\startsection[title={Swapping}] + +The next two examples demonstrate a feature added for Taco, who wanted to input +vertical instead of horizontal. It's a bit of a hack but it works okay for +simple cases. We assume these settings: + +\startbuffer[settings] +\setupxtable[one] [foregroundcolor=darkred] +\setupxtable[two] [foregroundcolor=darkgreen] +\setupxtable[three][foregroundcolor=darkblue] +\setupxtable[four] [foregroundcolor=darkorange] +\stopbuffer + +\startbuffer[demo-1] +\startxtable[frame=on] + \startxcolumn[one] + \startxcell A1 \stopxcell + \startxcell B1 \stopxcell + \startxcell C1 \stopxcell + \startxcell D1 \stopxcell + \startxcell E1 \stopxcell + \stopxcolumn + \startxcolumn[two] + \startxcell A2 \stopxcell + \startxcell B2 \stopxcell + \startxcell C2 \stopxcell + \startxcell D2 \stopxcell + \startxcell E2 \stopxcell + \stopxcolumn + \startxcolumn[three] + \startxcell A3 \stopxcell + \startxcell B3 \stopxcell + \startxcell C3 \stopxcell + \startxcell D3 \stopxcell + \startxcell E3 \stopxcell + \stopxcolumn + \startxcolumn[four] + \startxcell A4 \stopxcell + \startxcell B4 \stopxcell + \startxcell C4 \stopxcell + \startxcell D4 \stopxcell + \startxcell E4 \stopxcell + \stopxcolumn +\stopxtable +\stopbuffer + +\startbuffer[demo-2] +\startxtable[frame=on] + \startxcolumn[one] + \startxcell A \stopxcell + \startxcell B \stopxcell + \startxcell C \stopxcell + \startxcell D \stopxcell + \startxcell E \stopxcell + \stopxcolumn + \startxcolumn[two] + \startxcell A \stopxcell + \startxcell[ny=2] BC \stopxcell + \startxcell[ny=2] DE \stopxcell + \stopxcolumn + \startxcolumn[three] + \startxcell A \stopxcell + \startxcell[ny=4] BCDE \stopxcell + \stopxcolumn + \startxcolumn[four] + \startxcell A \stopxcell + \startxcell B \stopxcell + \startxcell[ny=2] CD \stopxcell + \startxcell E \stopxcell + \stopxcolumn +\stopxtable +\stopbuffer + +\typebuffer[settings] + +The first example has no spans: + +\typebuffer[demo-1] + +while the second one has: + +\typebuffer[demo-2] + +The results are shown in \in {figure} [fig:swapped]. + +\startplacefigure[title={Entering columns instead of rows.},reference=fig:swapped] + \getbuffer[settings] + \startcombination + {\getbuffer[demo-1]} {no spans} + {\getbuffer[demo-2]} {some spans} + \stopcombination +\stopplacefigure + +\stopsection + \startsection[title={Examples}] On the following pages we show some examples of (experimental) features. For this |