From f603a24fcd51ec179af642ad8599848e06b0ce27 Mon Sep 17 00:00:00 2001 From: Peter Breitenlohner Date: Thu, 2 Apr 2009 09:28:59 +0000 Subject: reorganize linked scripts git-svn-id: svn://tug.org/texlive/trunk@12598 c570f23f-e606-0410-a88d-b1316a301751 --- .../source/texk/texlive/linked_scripts/TeXcount.pl | 1286 --- .../texk/texlive/linked_scripts/bengali/ebong.py | 346 + .../source/texk/texlive/linked_scripts/context.sh | 3 - .../linked_scripts/context/lua/luatools.lua | 6659 +++++++++++++++ .../texlive/linked_scripts/context/lua/mtxrun.lua | 8552 ++++++++++++++++++++ .../linked_scripts/context/ruby/texmfstart.rb | 2501 ++++++ .../linked_scripts/context/stubs/unix/context | 3 + .../linked_scripts/context/stubs/unix/ctxtools | 2 + .../linked_scripts/context/stubs/unix/exatools | 2 + .../linked_scripts/context/stubs/unix/makempy | 2 + .../linked_scripts/context/stubs/unix/mpstools | 2 + .../linked_scripts/context/stubs/unix/mptopdf | 2 + .../linked_scripts/context/stubs/unix/mtxtools | 2 + .../linked_scripts/context/stubs/unix/pdftools | 2 + .../linked_scripts/context/stubs/unix/pdftrimwhite | 2 + .../linked_scripts/context/stubs/unix/pstopdf | 2 + .../linked_scripts/context/stubs/unix/rlxtools | 2 + .../linked_scripts/context/stubs/unix/runtools | 2 + .../linked_scripts/context/stubs/unix/texexec | 2 + .../linked_scripts/context/stubs/unix/texfind | 2 + .../linked_scripts/context/stubs/unix/texfont | 2 + .../linked_scripts/context/stubs/unix/texshow | 2 + .../linked_scripts/context/stubs/unix/textools | 2 + .../linked_scripts/context/stubs/unix/texutil | 2 + .../linked_scripts/context/stubs/unix/tmftools | 2 + .../linked_scripts/context/stubs/unix/xmltools | 2 + Build/source/texk/texlive/linked_scripts/ctxtools | 2 - Build/source/texk/texlive/linked_scripts/dviasm.py | 960 --- .../texk/texlive/linked_scripts/dviasm/dviasm.py | 960 +++ Build/source/texk/texlive/linked_scripts/ebong.py | 346 - Build/source/texk/texlive/linked_scripts/epspdf | 3 - Build/source/texk/texlive/linked_scripts/epspdf.x | 3 + Build/source/texk/texlive/linked_scripts/exatools | 2 - .../texk/texlive/linked_scripts/fragmaster.pl | 259 - .../linked_scripts/fragmaster/fragmaster.pl | 259 + .../linked_scripts/glossaries/makeglossaries | 293 + Build/source/texk/texlive/linked_scripts/latex2man | 1817 ----- .../source/texk/texlive/linked_scripts/latex2man.x | 1817 +++++ .../source/texk/texlive/linked_scripts/latexmk.pl | 6031 -------------- .../texk/texlive/linked_scripts/latexmk/latexmk.pl | 6031 ++++++++++++++ .../texk/texlive/linked_scripts/luatools.lua | 6659 --------------- .../texk/texlive/linked_scripts/makeglossaries | 293 - Build/source/texk/texlive/linked_scripts/makempy | 2 - .../texk/texlive/linked_scripts/mkjobtexmf.pl | 810 -- .../linked_scripts/mkjobtexmf/mkjobtexmf.pl | 810 ++ Build/source/texk/texlive/linked_scripts/mpstools | 2 - Build/source/texk/texlive/linked_scripts/mptopdf | 2 - .../source/texk/texlive/linked_scripts/mtxrun.lua | 8552 -------------------- Build/source/texk/texlive/linked_scripts/mtxtools | 2 - .../texlive/linked_scripts/oberdiek/pdfatfi.pl | 185 + .../linked_scripts/pax/pdfannotextractor.pl | 425 + .../texlive/linked_scripts/pdfannotextractor.pl | 425 - .../source/texk/texlive/linked_scripts/pdfatfi.pl | 185 - .../source/texk/texlive/linked_scripts/pdfcrop.pl | 466 -- .../texk/texlive/linked_scripts/pdfcrop/pdfcrop.pl | 466 ++ .../texk/texlive/linked_scripts/pdfthumb.texlua | 44 - Build/source/texk/texlive/linked_scripts/pdftools | 2 - .../texk/texlive/linked_scripts/pdftrimwhite | 2 - Build/source/texk/texlive/linked_scripts/perltex | 476 -- Build/source/texk/texlive/linked_scripts/perltex.x | 476 ++ .../texk/texlive/linked_scripts/pkfix-helper | 1219 --- .../texk/texlive/linked_scripts/pkfix-helper.x | 1219 +++ .../texk/texlive/linked_scripts/ppower4.texlua | 43 - .../texlive/linked_scripts/ppower4/pdfthumb.texlua | 44 + .../texlive/linked_scripts/ppower4/ppower4.texlua | 43 + Build/source/texk/texlive/linked_scripts/ps4pdf | 134 - .../texk/texlive/linked_scripts/pst-pdf/ps4pdf | 134 + .../source/texk/texlive/linked_scripts/pst2pdf.pl | 377 - .../texk/texlive/linked_scripts/pst2pdf/pst2pdf.pl | 377 + Build/source/texk/texlive/linked_scripts/pstopdf | 2 - Build/source/texk/texlive/linked_scripts/purifyeps | 511 -- .../source/texk/texlive/linked_scripts/purifyeps.x | 511 ++ Build/source/texk/texlive/linked_scripts/rlxtools | 2 - Build/source/texk/texlive/linked_scripts/runtools | 2 - .../texk/texlive/linked_scripts/simpdftex.sh | 654 -- .../texlive/linked_scripts/simpdftex/simpdftex | 654 ++ .../texk/texlive/linked_scripts/splitindex.pl | 212 - .../linked_scripts/splitindex/perl/splitindex.pl | 212 + .../texk/texlive/linked_scripts/svn-multi.pl | 374 - .../texlive/linked_scripts/svn-multi/svn-multi.pl | 374 + .../texlive/linked_scripts/texcount/TeXcount.pl | 1286 +++ Build/source/texk/texlive/linked_scripts/texexec | 2 - Build/source/texk/texlive/linked_scripts/texfind | 2 - Build/source/texk/texlive/linked_scripts/texfont | 2 - .../texk/texlive/linked_scripts/texmfstart.rb | 2501 ------ Build/source/texk/texlive/linked_scripts/texshow | 2 - Build/source/texk/texlive/linked_scripts/textools | 2 - Build/source/texk/texlive/linked_scripts/texutil | 2 - .../source/texk/texlive/linked_scripts/thumbpdf.pl | 1565 ---- .../texlive/linked_scripts/thumbpdf/thumbpdf.pl | 1565 ++++ Build/source/texk/texlive/linked_scripts/tmftools | 2 - Build/source/texk/texlive/linked_scripts/vpe.pl | 421 - .../source/texk/texlive/linked_scripts/vpe/vpe.pl | 421 + Build/source/texk/texlive/linked_scripts/xmltools | 2 - 94 files changed, 36664 insertions(+), 36664 deletions(-) delete mode 100755 Build/source/texk/texlive/linked_scripts/TeXcount.pl create mode 100755 Build/source/texk/texlive/linked_scripts/bengali/ebong.py delete mode 100755 Build/source/texk/texlive/linked_scripts/context.sh create mode 100755 Build/source/texk/texlive/linked_scripts/context/lua/luatools.lua create mode 100755 Build/source/texk/texlive/linked_scripts/context/lua/mtxrun.lua create mode 100755 Build/source/texk/texlive/linked_scripts/context/ruby/texmfstart.rb create mode 100755 Build/source/texk/texlive/linked_scripts/context/stubs/unix/context create mode 100755 Build/source/texk/texlive/linked_scripts/context/stubs/unix/ctxtools create mode 100755 Build/source/texk/texlive/linked_scripts/context/stubs/unix/exatools create mode 100755 Build/source/texk/texlive/linked_scripts/context/stubs/unix/makempy create mode 100755 Build/source/texk/texlive/linked_scripts/context/stubs/unix/mpstools create mode 100755 Build/source/texk/texlive/linked_scripts/context/stubs/unix/mptopdf create mode 100755 Build/source/texk/texlive/linked_scripts/context/stubs/unix/mtxtools create mode 100755 Build/source/texk/texlive/linked_scripts/context/stubs/unix/pdftools create mode 100755 Build/source/texk/texlive/linked_scripts/context/stubs/unix/pdftrimwhite create mode 100755 Build/source/texk/texlive/linked_scripts/context/stubs/unix/pstopdf create mode 100755 Build/source/texk/texlive/linked_scripts/context/stubs/unix/rlxtools create mode 100755 Build/source/texk/texlive/linked_scripts/context/stubs/unix/runtools create mode 100755 Build/source/texk/texlive/linked_scripts/context/stubs/unix/texexec create mode 100755 Build/source/texk/texlive/linked_scripts/context/stubs/unix/texfind create mode 100755 Build/source/texk/texlive/linked_scripts/context/stubs/unix/texfont create mode 100755 Build/source/texk/texlive/linked_scripts/context/stubs/unix/texshow create mode 100755 Build/source/texk/texlive/linked_scripts/context/stubs/unix/textools create mode 100755 Build/source/texk/texlive/linked_scripts/context/stubs/unix/texutil create mode 100755 Build/source/texk/texlive/linked_scripts/context/stubs/unix/tmftools create mode 100755 Build/source/texk/texlive/linked_scripts/context/stubs/unix/xmltools delete mode 100755 Build/source/texk/texlive/linked_scripts/ctxtools delete mode 100755 Build/source/texk/texlive/linked_scripts/dviasm.py create mode 100755 Build/source/texk/texlive/linked_scripts/dviasm/dviasm.py delete mode 100755 Build/source/texk/texlive/linked_scripts/ebong.py delete mode 100755 Build/source/texk/texlive/linked_scripts/epspdf create mode 100755 Build/source/texk/texlive/linked_scripts/epspdf.x delete mode 100755 Build/source/texk/texlive/linked_scripts/exatools delete mode 100755 Build/source/texk/texlive/linked_scripts/fragmaster.pl create mode 100755 Build/source/texk/texlive/linked_scripts/fragmaster/fragmaster.pl create mode 100755 Build/source/texk/texlive/linked_scripts/glossaries/makeglossaries delete mode 100755 Build/source/texk/texlive/linked_scripts/latex2man create mode 100755 Build/source/texk/texlive/linked_scripts/latex2man.x delete mode 100755 Build/source/texk/texlive/linked_scripts/latexmk.pl create mode 100755 Build/source/texk/texlive/linked_scripts/latexmk/latexmk.pl delete mode 100755 Build/source/texk/texlive/linked_scripts/luatools.lua delete mode 100755 Build/source/texk/texlive/linked_scripts/makeglossaries delete mode 100755 Build/source/texk/texlive/linked_scripts/makempy delete mode 100755 Build/source/texk/texlive/linked_scripts/mkjobtexmf.pl create mode 100755 Build/source/texk/texlive/linked_scripts/mkjobtexmf/mkjobtexmf.pl delete mode 100755 Build/source/texk/texlive/linked_scripts/mpstools delete mode 100755 Build/source/texk/texlive/linked_scripts/mptopdf delete mode 100755 Build/source/texk/texlive/linked_scripts/mtxrun.lua delete mode 100755 Build/source/texk/texlive/linked_scripts/mtxtools create mode 100755 Build/source/texk/texlive/linked_scripts/oberdiek/pdfatfi.pl create mode 100755 Build/source/texk/texlive/linked_scripts/pax/pdfannotextractor.pl delete mode 100755 Build/source/texk/texlive/linked_scripts/pdfannotextractor.pl delete mode 100755 Build/source/texk/texlive/linked_scripts/pdfatfi.pl delete mode 100755 Build/source/texk/texlive/linked_scripts/pdfcrop.pl create mode 100755 Build/source/texk/texlive/linked_scripts/pdfcrop/pdfcrop.pl delete mode 100755 Build/source/texk/texlive/linked_scripts/pdfthumb.texlua delete mode 100755 Build/source/texk/texlive/linked_scripts/pdftools delete mode 100755 Build/source/texk/texlive/linked_scripts/pdftrimwhite delete mode 100755 Build/source/texk/texlive/linked_scripts/perltex create mode 100755 Build/source/texk/texlive/linked_scripts/perltex.x delete mode 100755 Build/source/texk/texlive/linked_scripts/pkfix-helper create mode 100755 Build/source/texk/texlive/linked_scripts/pkfix-helper.x delete mode 100755 Build/source/texk/texlive/linked_scripts/ppower4.texlua create mode 100755 Build/source/texk/texlive/linked_scripts/ppower4/pdfthumb.texlua create mode 100755 Build/source/texk/texlive/linked_scripts/ppower4/ppower4.texlua delete mode 100755 Build/source/texk/texlive/linked_scripts/ps4pdf create mode 100755 Build/source/texk/texlive/linked_scripts/pst-pdf/ps4pdf delete mode 100755 Build/source/texk/texlive/linked_scripts/pst2pdf.pl create mode 100755 Build/source/texk/texlive/linked_scripts/pst2pdf/pst2pdf.pl delete mode 100755 Build/source/texk/texlive/linked_scripts/pstopdf delete mode 100755 Build/source/texk/texlive/linked_scripts/purifyeps create mode 100755 Build/source/texk/texlive/linked_scripts/purifyeps.x delete mode 100755 Build/source/texk/texlive/linked_scripts/rlxtools delete mode 100755 Build/source/texk/texlive/linked_scripts/runtools delete mode 100755 Build/source/texk/texlive/linked_scripts/simpdftex.sh create mode 100755 Build/source/texk/texlive/linked_scripts/simpdftex/simpdftex delete mode 100755 Build/source/texk/texlive/linked_scripts/splitindex.pl create mode 100755 Build/source/texk/texlive/linked_scripts/splitindex/perl/splitindex.pl delete mode 100755 Build/source/texk/texlive/linked_scripts/svn-multi.pl create mode 100755 Build/source/texk/texlive/linked_scripts/svn-multi/svn-multi.pl create mode 100755 Build/source/texk/texlive/linked_scripts/texcount/TeXcount.pl delete mode 100755 Build/source/texk/texlive/linked_scripts/texexec delete mode 100755 Build/source/texk/texlive/linked_scripts/texfind delete mode 100755 Build/source/texk/texlive/linked_scripts/texfont delete mode 100755 Build/source/texk/texlive/linked_scripts/texmfstart.rb delete mode 100755 Build/source/texk/texlive/linked_scripts/texshow delete mode 100755 Build/source/texk/texlive/linked_scripts/textools delete mode 100755 Build/source/texk/texlive/linked_scripts/texutil delete mode 100755 Build/source/texk/texlive/linked_scripts/thumbpdf.pl create mode 100755 Build/source/texk/texlive/linked_scripts/thumbpdf/thumbpdf.pl delete mode 100755 Build/source/texk/texlive/linked_scripts/tmftools delete mode 100755 Build/source/texk/texlive/linked_scripts/vpe.pl create mode 100755 Build/source/texk/texlive/linked_scripts/vpe/vpe.pl delete mode 100755 Build/source/texk/texlive/linked_scripts/xmltools (limited to 'Build') diff --git a/Build/source/texk/texlive/linked_scripts/TeXcount.pl b/Build/source/texk/texlive/linked_scripts/TeXcount.pl deleted file mode 100755 index 8b68e4984c0..00000000000 --- a/Build/source/texk/texlive/linked_scripts/TeXcount.pl +++ /dev/null @@ -1,1286 +0,0 @@ -#! /usr/bin/env perl -use strict; -use warnings; -use Term::ANSIColor; -use POSIX qw(locale_h); -use locale; -setlocale(LC_CTYPE,"no_NO"); - -my $versionnumber="2.1"; -my $versiondate="2008 Nov 02"; - -###### Set CMD specific settings and variables - -# Options and states -my $verbose=0; -my $showcodes=1; -my $showstates=0; -my $showsubcounts=0; -my $htmlstyle=0; -my $includeTeX=0; -my $briefsum=0; -my $totalflag=0; -my @sumweights; -my $globalworkdir=""; - -# Global variables -my $blankline=0; -my @filelist; -my $workdir; - -###### Set global settings and variables - -### Macros for headers -# Macros that identify headers: i.e. following token or -# {...} is counted as header. The =>[2] indicates transition to -# state 2 which is used within headers (although the value is -# actually never used). This is copied to %TeXmacro and the -# only role of defining it here is that the counter for the number -# of headers is incremented by one. -my %TeXheader=('\title'=>[2],'\part'=>[2],'\chapter'=>[2], - '\section'=>[2],'\subsection'=>[2],'\subsubsection'=>[2], - '\paragraph'=>[2],'\subparagraph'=>[2]); - -### How many tokens to gobble after macro -# Each macro is assumed to gobble up a given number of -# tokens (or {...} groups), as well as options [...] before, within -# and after. The %TeXmacro hash gives a link from a macro -# (or beginNAME for begin-end groups without the backslash) -# to either an integer giving the number of tokens to ignore -# or to an array (specified as [num,num,...]) of length N where -# N is the number of tokens to be read with the macro and the -# array values tell how each is to be interpreted (see the status -# values: 0=ignore, 1=count, etc.). Thus specifying a number N is -# equivalent to specifying an array [0,...,0] of N zeros. -# -# For macros not specified here, the default value is 0: i.e. -# no tokens are excluded, but [...] options are. Header macros -# specified in %TeXheader are automatically included here. -my %TeXmacro=(%TeXheader, - '\documentclass'=>1,'\documentstyle'=>1,'\usepackage'=>1, '\hyphenation'=>1, - '\pagestyle'=>1,'\thispagestyle'=>1, '\pagenumbering'=>1,'\markboth'=>1, '\markright'=>1, - '\newcommand'=>[-3,-3],'\renewcommand'=>[-3,-3], - '\newenvironment'=>[-3,-3,-3], 'renewenvironment'=>[-3,-3,-3], - '\newfont'=>2,'\newtheorem'=>2,'\bibliographystyle'=>1, '\bibliography'=>1, - '\parbox'=>1, '\marginpar'=>[3],'\makebox'=>0, '\raisebox'=>1, '\framebox'=>0, - '\newsavebox'=>1, '\sbox'=>1, '\savebox'=>2, '\usebox'=>1,'\rule'=>2, - '\footnote'=>[3],'\label'=>1, '\ref'=>1, '\pageref'=>1, '\bibitem'=>1, - '\cite'=>1, '\citep'=>1, '\citet'=>1, '\citeauthor'=>1, '\citealt'=>1, '\nocite'=>1, - '\eqlabel'=>1, '\eqref'=>1,'\hspace'=>1, '\vspace'=>1, '\addvspace'=>1, - '\input'=>1, '\include'=>1, '\includeonly'=>1,'\includegraphics'=>1, - '\newlength'=>1, '\setlength'=>2, '\addtolength'=>2,'\settodepth'=>2, - '\settoheight'=>2, '\settowidth'=>2,'\newcounter'=>1, '\setcounter'=>2, - '\addtocounter'=>2,'\stepcounter'=>1, '\refstepcounter'=>1, '\usecounter'=>1, - '\alph'=>1, '\arabic'=>1, '\fnsymbol'=>1, '\roman'=>1, '\value'=>1, - '\cline'=>1, '\multicolumn'=>3,'\typeout'=>1, '\typein'=>1, - 'beginlist'=>2, 'beginminipage'=>1, 'begintabular'=>1, - 'beginthebibliography'=>1,'beginlrbox'=>1, - '\begin'=>1,'\end'=>1,'\title'=>[2]); - -### Macros that should be counted as one or more words -# Macros that represent text may be declared here. The value gives -# the number of words the macro represents. -my %TeXmacroword=('\LaTeX'=>1,'\TeX'=>1); - -### Macros that are counted within the preamble -# The preamble is the text between \documentclass and \begin{document}. -# Text and macros in the preamble is ignored unless specified here. The -# value is the status (1=text, 2=header, etc.) they should be interpreted as. -# Note that only the first unit (token or {...} block) is counted. -my %TeXpreamble=('\title'=>[2], - '\newcommand'=>[-3,-3],'\renewcommand'=>[-3,-3], - '\newenvironment'=>[-3,-3,-3], 'renewenvironment'=>[-3,-3,-3], - ); - -### Begin-End groups -# Identified as begin-end groups, and define =>state. The -# states used corresponds to the elements of the count array, and -# are: -# 0: Not included -# 1: Text, word included in text count -# 2: Header, words included in header count -# 3: Float caption, words included in float caption count -# 6: Inline mathematics, words not counted -# 7: Displayed mathematics, words not counted -# -1: Float, not included, but looks for captions -# -# 4 and 5 are used to count number of headers and floats -# and are not used as states. -# -# Groups that are not defined will be counted as the surrounding text. -# -# Note that some environments may only exist within math-mode, and -# therefore need not be defined here: in fact, they should not as it -# is not clear if they will be in inlined or displayed math. -# -my %TeXgroup=('document'=>1,'letter'=>1,'titlepage'=>0, - 'center'=>1,'flushleft'=>1,'flushright'=>1, - 'abstract'=>1,'quote'=>1,'quotation'=>1,'verse'=>1,'minipage'=>1,'verbatim'=>1, - 'description'=>1,'enumerate'=>1,'itemize'=>1,'list'=>1, - 'theorem'=>1,'lemma'=>1,'definition'=>1,'corollary'=>1,'example'=>1, - 'math'=>6,'displaymath'=>7,'equation'=>7,'eqnarray'=>7, - 'figure'=>-1,'float'=>-1,'picture'=>-1,'table'=>-1, - 'tabbing'=>0,'tabular'=>0,'thebibliography'=>0,'lrbox'=>0); - -### In floats: include only specific macros -# Macros used to identify caption text within floats. -my %TeXfloatinc=('\caption'=>[3]); - -### Macros for including tex files -# Allows \macro{file} or \macro file. If the value is 0, the filename will -# be used as is; if it is 1, the filetype .tex will be added if the -# filename is without filetype; if it is 2, the filetype .tex will be added. -my %TeXfileinclude=('\input'=>1,'\include'=>2); - -### Count labels -# Labels used to describe the counts -my @countlabel=('Files','Words in text','Words in headers', - 'Words in float captions','Number of headers','Number of floats', - 'Number of math inlines','Number of math displayed'); - -### Break points -# Definition of macros that define break points that start a new subcount. -# The values given are used as labels. -my %BreakPointsOptions; -$BreakPointsOptions{'none'}={}; -$BreakPointsOptions{'part'}={%{$BreakPointsOptions{'none'}},'\part'=>'Part'}; -$BreakPointsOptions{'chapter'}={%{$BreakPointsOptions{'part'}},'\chapter'=>'Chapter'}; -$BreakPointsOptions{'section'}={%{$BreakPointsOptions{'chapter'}},'\section'=>'Section'}; -$BreakPointsOptions{'subsection'}={%{$BreakPointsOptions{'section'}},'\subsection'=>'Subsection'}; -$BreakPointsOptions{'default'}=$BreakPointsOptions{'subsection'}; -my %BreakPoints=%{$BreakPointsOptions{'none'}}; - -### Print styles -# Definition of different print styles: maps of class labels -# to ANSI codes. Class labels are as used by HTML styles. -my @STYLES=(); -my %STYLE; -$STYLES[0]={'error'=>'bold red'}; -$STYLES[1]={%{$STYLES[0]}, - 'word1'=>'blue','word2'=>'bold blue','word3'=>'blue', - 'grouping'=>'red','document'=>'red','mathgroup'=>'magenta', - 'state'=>'cyan underline','sumcount'=>'yellow'}; -$STYLES[2]={%{$STYLES[1]}, - 'command'=>'green','exclgroup'=>'yellow','exclmath'=>'yellow', - 'ignore'=>'cyan'}; -$STYLES[3]={%{$STYLES[2]}, - 'tc'=>'bold yellow','comment'=>'yellow','option'=>'yellow', - 'fileinclude'=>'bold green'}; -$STYLES[4]={%{$STYLES[3]}}; - -### Word regexp pattern list -# List of regexp patterns that should be analysed as words. -my $specialchars='\\\\(ae|AE|o|O|aa|AA)'; -my $modifiedchars='\\\\[\'\"\`\~\^\=](\w|\{\w\})'; -my @WordPatterns=('(\w+\.)+\w+\.?','\w+([\-\']\w+)*'); -my @WordPatternsRelaxed=('([\w\-\']|'.$modifiedchars.'|'.$specialchars.'(\{\})?|\{'.$specialchars.'\})+'); - -### Macro option regexp list -# List of regexp patterns to be gobbled as macro option in and after -# a macro. -my @MacroOptionPatterns=('\[(\w|[,\-\s\~\.\:\;\+\?\*\_\=])*\]'); -my @MacroOptionPatternsRelaxed=('\[[^\[\]\n]*\]'); - -###### Main script - -################################################### - -MAIN(@ARGV); - -################################################### - - -######### -######### Main routines -######### - -# MAIN ROUTINE: Handle arguments, then parse files -sub MAIN { - my @args=@_; - my @toplevelfiles=Parse_Arguments(@args); - Apply_Options(); - if (scalar(@toplevelfiles)==0) { - if ($showcodes>1) {print_help_style();} - else {print_error("No files specified.","p","error");} - } else { - conditional_print_help_style(); - my $totalcount=parse_file_list(@toplevelfiles); - conditional_print_total($totalcount); - } - Close_Output(); -} - -# Checks arguments, exits on exit condition -sub Check_Arguments { - my @args=@_; - if (!@args) { - print_version(); - print_syntax(); - print_reference(); - exit; - } elsif ($args[0]=~/^(\-?\-(h|\?|help)|\/(\?|h))$/) { - print_help(); - exit; - } elsif ($args[0]=~/^\-?\-(ver|version)$/) { - print_version(); - exit; - } elsif ($args[0]=~/^\-?\-(lic|license)$/) { - print_license(); - exit; - } - return 1; -} - -# Parses arguments, sets options (global) and returns file list -sub Parse_Arguments { - my @args=@_; - Check_Arguments(@args); - my @files; - foreach my $arg (@ARGV) { - if (Parse_Option($arg)) {next;} - if ($arg=~/^\-/) { - print 'Invalid opton '.$arg."\n"; - print_syntax(); - exit; - } - $arg=~s/\\/\//g; - push @files,$arg; - } - return @files; -} - -# Parse individual option parameters -sub Parse_Option { - my $arg=shift @_; - return parse_options_parsing($arg) - || parse_options_sums($arg) - || parse_options_output($arg) - || parse_options_format($arg) - ; -} - -sub parse_options_parsing { - my $arg=shift @_; - if ($arg eq '-inc') {$includeTeX=1;} - elsif ($arg eq '-noinc') {$includeTeX=0;} - elsif ($arg eq '-dir') {$globalworkdir=undef;} - elsif ($arg=~/^-dir=(.*)$/) {$globalworkdir=$1;} - elsif ($arg eq '-relaxed') { - @MacroOptionPatterns=@MacroOptionPatternsRelaxed; - @WordPatterns=@WordPatternsRelaxed; - } - else {return 0;} - return 1; -} - -sub parse_options_sums { - my $arg=shift @_; - if ($arg=~/^-sum(=(.+))?$/) {option_sum($2);} - elsif ($arg=~/^-(sub|subcounts?)(=(.+))?$/) {option_subcount($3);} - else {return 0;} - return 1; -} - -sub option_subcount { - my $arg=shift @_; - $showsubcounts=1; - if (!defined $arg) { - %BreakPoints=%{$BreakPointsOptions{'default'}}; - } elsif (my $option=$BreakPointsOptions{$arg}) { - %BreakPoints=%{$option}; - } else { - print STDERR "Warning: Option value ".$arg." not valid, using default instead.\n"; - %BreakPoints=%{$BreakPointsOptions{'default'}}; - } -} - -sub option_sum { - my $arg=shift @_; - if (!defined $arg) { - @sumweights=(1,1,1,0,0,1,1); - } elsif ($arg=~/^(\d+(,\d+){0,6})$/) { - @sumweights=split(',',$1); - } else { - print STDERR "Warning: Option value ".$arg." not valid, ignoring option.\n"; - } -} - -sub parse_options_format { - my $arg=shift @_; - if ($arg eq '-brief') {$briefsum=1; return 1;} - elsif ($arg eq '-total') {$totalflag=1; return 1;} - elsif ($arg eq '-1') {$briefsum=1;$totalflag=1;} - elsif ($arg eq "-html" ) {option_no_colours();$htmlstyle = 2;} - elsif ($arg eq "-htmlcore" ) {option_no_colours();$htmlstyle = 1;} - elsif ($arg=~/^\-(nocol|nc$)/) {option_no_colours();} - elsif ($arg eq '-codes') { - $showcodes=2; - if ($verbose==0) {$verbose=3;} - } - elsif ($arg eq '-nocodes') {$showcodes=0;} - else {return 0;} - return 1; -} - -sub parse_options_output { - my $arg=shift @_; - if ($arg eq "-v0") {$verbose=0;} - elsif ($arg eq "-v1") {$verbose=1;} - elsif ($arg eq '-vv' || $arg eq '-v2') {$verbose=2;} - elsif ($arg eq '-vvv' || $arg eq '-v3' || $arg eq '-v') {$verbose=3;} - elsif ($arg eq '-vvvv' || $arg eq '-v4') {$verbose=3; $showstates=1;} - elsif ($arg =~ /^\-showstates?$/ ){$showstates=1;} - else {return 0;} - return 1; -} - -# Parse file list and return total count -sub parse_file_list { - my @filelist=@_; - my $listtotalcount=new_count("TOTAL COUNT"); - for my $file (<@filelist>) { - my $filetotalcount=parse_file($file); - add_count($listtotalcount,$filetotalcount); - } - return $listtotalcount; -} - -# Parse file and included files, and return total count -sub parse_file { - my $file=shift @_; - $workdir=$globalworkdir; - if (!defined $workdir) { - $workdir=$file; - $workdir =~ s/^((.*[\\\/])?)[^\\\/]+$/$1/; - } - @filelist=($file); - if ($htmlstyle) {print "\n
\n";} - my $filetotalcount=new_count("SUM COUNT FOR ".$file); - foreach my $f (@filelist) { - my $tex=TeXfile($f); - my $fpath=$f; - $fpath=~s/^((.*[\\\/])?)[^\\\/]+$/$1/; - if (!defined $tex) { - #print_error("File not found or not readable: ".$f."\n"); - formatprint("File not found or not readable: ".$f."\n","p","error"); - } else { - parse($tex); - my $filecount=add_subcount($tex); - if (!$totalflag) { - print_count($filecount); - print "\n"; - } - add_count($filetotalcount,$filecount); - } - } - if ($htmlstyle) {print "
\n\n";} - return $filetotalcount; -} - -###### -###### Subroutines -###### - -###### Option handling - -# Apply options to set values -sub Apply_Options { - %STYLE=%{$STYLES[$verbose]}; - if ($htmlstyle>1) {html_head();} -} - -# Close the output, e.g. adding HTML tail -sub Close_Output { - if ($htmlstyle>1) { - html_tail(); - } -} - -sub option_no_colours { - $ENV{'ANSI_COLORS_DISABLED'} = 1; -} - -# Print count (total) if conditions are met -sub conditional_print_total { - my $sumcount=shift @_; - if ($totalflag || get_count($sumcount,0)>1) { - if ($totalflag && $briefsum && @sumweights) { - print total_count($sumcount); - } else { - if ($htmlstyle) { - formatprint("Total word count",'h2'); - } - print_count($sumcount); - } - } -} - -###### TeX File handle - -sub TeXfile { - my $filename=shift @_; - my $file=read_file($filename) || return undef; - return TeXcode($file,$filename); -} - -sub TeXcode { - my ($texcode,$filename,$title)=@_; - my %TeX=(); - $TeX{'filename'}=$filename; - if (!defined $filename) { - $TeX{'filepath'}=''; - } elsif ($filename=~/^(.*[\\\/])[^\\\/]+$/) { - $TeX{'filepath'}=$1; - } else { - $TeX{'filepath'}=''; - } - if (defined $title) {} - elsif (defined $filename) {$title="FILE: ".$filename;} - else {$title="Word count";} - $TeX{'line'}=$texcode; - $TeX{'next'}=undef; - $TeX{'type'}=undef; - $TeX{'style'}=undef; - $TeX{'printstate'}=undef; - $TeX{'eof'}=0; - my $countsum=new_count($title); - $TeX{'countsum'}=$countsum; - my $count=new_count("_top_"); - $TeX{'count'}=$count; - inc_count(\%TeX,0); - my @countlist=(); - $TeX{'countlist'}=\@countlist; - $countsum->{'subcounts'}=\@countlist; - return \%TeX; -} - -sub read_file { - my $filename=shift @_; - open(FH,"<".$filename."") || return undef; - if ($verbose) { - formatprint("File: ".$filename."\n",'h2'); - $blankline=0; - } - my @text=; - close(FH); - return join('',@text); -} - -###### Parsing routines - -sub parse { - my ($tex)=@_; - if ($htmlstyle && $verbose) {print "

\n";} - while (!($tex->{'eof'})) { - parse_unit($tex,1); - } - if ($htmlstyle && $verbose) {print "

\n";} -} - -sub parse_unit { - # Status: - # 0 = exclude from count - # 1 = text - # 2 = header text - # 3 = float text - # -1 = float (exclude) - # -2 = strong exclude, ignore begin-end groups - # -3 = stronger exclude, do not parse macro parameters - # -9 = preamble (between \documentclass and \begin{document}) - my ($tex,$status,$end)=@_; - if (!defined $status) { - print_error("CRITICAL ERROR: Undefined parser status!"); - exit; - } elsif (ref($status) eq 'ARRAY') { - print_error("CRITICAL ERROR: Invalid parser status!"); - exit; - } - my $substat; - if ($showstates) { - if (defined $end) { - $tex->{'printstate'}=':'.$status.':'.$end.':'; - } else { - $tex->{'printstate'}=':'.$status.':'; - } - flush_next($tex); - } - while (defined (my $next=next_token($tex))) { - # parse next token; or tokens until match with $end - set_style($tex,"ignore"); - if ((defined $end) && ($end eq $next)) { - # end of unit - return; - } elsif (!defined $next) { - print_error("ERROR: End of file while waiting for ".$end); - return; - } - if ($status==-9 && $next eq '\begin' && $tex->{'line'}=~/^\{\s*document\s*\}/) { - # \begin{document} - $status=1; - } - if ($next eq '\documentclass') { - # starts preamble - set_style($tex,'document'); - gobble_option($tex); - gobble_macro_parms($tex,1); - while (!($tex->{'eof'})) { - parse_unit($tex,-9); - } - } elsif ($tex->{'type'}==666) { - # parse TC instructions - parse_tc($tex); - } elsif ($tex->{'type'}==1) { - # word - if ($status>0) { - inc_count($tex,$status); - set_style($tex,'word'.$status); - } - } elsif ($next eq '{') { - # {...} - parse_unit($tex,$status,'}'); - } elsif ($tex->{'type'}==3 && $status==-3) { - set_style($tex,'ignore'); - } elsif ($tex->{'type'}==3) { - # macro call - if (my $label=$BreakPoints{$next}) { - if ($tex->{'line'}=~ /^[*]?(\s*\[.*?\])*\s*\{(.+?)\}/ ) { - $label=$label.': '.$2; - } - add_subcount($tex,$label); - } - set_style($tex,'command'); - if ($next eq '\begin' && $status!=-2) { - parse_begin_end($tex,$status); - } elsif (($status==-1) && ($substat=$TeXfloatinc{$next})) { - # text included from float - gobble_macro_parms($tex,$substat); - } elsif ($status==-9 && defined ($substat=$TeXpreamble{$next})) { - # parse preamble include macros - if (defined $TeXheader{$next}) {inc_count($tex,4);} - gobble_macro_parms($tex,$substat,1); - } elsif ($status<0) { - # ignore - gobble_option($tex); - } elsif ($next eq '\(') { - # math inline - parse_math($tex,$status,6,'\)'); - } elsif ($next eq '\[') { - # math display - parse_math($tex,$status,7,'\]'); - } elsif ($next eq '\def') { - # ignore \def... - $tex->{'line'} =~ s/^([^\{]*)\{/\{/; - flush_next($tex); - print_style($1.' ','ignore'); - parse_unit($tex,-2); - } elsif (defined (my $addsuffix=$TeXfileinclude{$next})) { - # include file: queue up for parsing - parse_include_file($tex,$status,$addsuffix); - } elsif (defined ($substat=$TeXmacro{$next})) { - # macro: exclude options - if (defined $TeXheader{$next}) {inc_count($tex,4);} - gobble_macro_parms($tex,$substat,$status); - } elsif (defined ($substat=$TeXmacroword{$next})) { - # count macro as word (or a given number of words) - inc_count($tex,$status,$substat); - set_style($tex,'word'.$status); - } elsif ($next =~ /^\\[^\w\_]/) { - } else { - gobble_option($tex); - } - } elsif ($next eq '$') { - # math inline - parse_math($tex,$status,6,'$'); - } elsif ($next eq '$$') { - # math display (unless already in inlined math) - if (!(defined $end && $end eq '$')) { - parse_math($tex,$status,7,'$$'); - } - } - if (!defined $end) {return;} - } -} - -sub gobble_option { - my $tex=shift @_; - flush_next($tex); - foreach my $pattern (@MacroOptionPatterns) { - if ($tex->{'line'}=~s/^($pattern)//) { - print_style($1,'option'); - return $1; - } - } - return undef; -} - -sub parse_tc { - my ($tex)=@_; - my $next=$tex->{'next'}; - set_style($tex,'tc'); - flush_next($tex); - if (!($next=~s/^\%+TC:\s*(\w+)\s*// )) { - print_error('Warning: TC command should have format %TC:instruction [macro] [parameters]'); - return; - }; - my $instr=$1; - if ($instr=~/^(break)$/) { - if ($instr eq 'break') {add_subcount($tex,$next);} - } elsif ($next=~/^([\\]*\w+)\s+([^\s\n]+)(\s+([0-9]+))?/) { - # Format = TC:word macro - my $macro=$1; - my $param=$2; - my $option=$4; - if ($param=~/^\[([0-9,]+)\]$/) {$param=[split(',',$1)];} - if (($instr eq 'macro') || ($instr eq 'exclude')) {$TeXmacro{$macro}=$param;} - elsif ($instr eq 'header') {$TeXheader{$macro}=$param;$TeXmacro{$macro}=$param;} - elsif ($instr eq 'macroword') {$TeXmacroword{$macro}=$param;} - elsif ($instr eq 'preambleinclude') {$TeXpreamble{$macro}=$param;} - elsif ($instr eq 'group') {$TeXmacro{'begin'.$macro}=$param;$TeXgroup{$macro}=$option;} - elsif ($instr eq 'floatinclude') {$TeXfloatinc{$macro}=$param;} - elsif ($instr eq 'fileinclude') {$TeXfileinclude{$macro}=$param;} - elsif ($instr eq 'breakmacro') {$BreakPoints{$macro}=$param;} - else {print_error("Warning: Unknown TC command: ".$instr);} - } elsif ($instr eq 'ignore') { - tc_ignore_input($tex); - } else { - print_error("Warning: Invalid TC command format: ".$instr); - } -} - -sub tc_ignore_input { - my ($tex)=@_; - set_style($tex,'ignore'); - parse_unit($tex,-3,"%TC:endignore"); - set_style($tex,'tc'); - flush_next($tex); -} - -sub parse_math { - my ($tex,$status,$substat,$end)=@_; - my $localstyle=$status>0 ? 'mathgroup' : 'exclmath'; - if ($status>0) {inc_count($tex,$substat);} - set_style($tex,$localstyle); - parse_unit($tex,0,$end); - set_style($tex,$localstyle); -} - -sub parse_begin_end { - my ($tex,$status)=@_; - my $localstyle=$status>0 ? 'grouping' : 'exclgroup'; - flush_style($tex,$localstyle); - gobble_option($tex); - my $groupname; - if ($tex->{'line'} =~ s/^\{\s*([^\{\}]+)\s*\*?\}[ \t\r\f]*//) { - # gobble group type - $groupname=$1; - print_style('{'.$1.'}',$localstyle); - my $next='begin'.$1; - if (defined (my $substat=$TeXmacro{$next})) { - gobble_macro_parms($tex,$substat); - } - } else { - print_error("Warning: BEGIN group without type."); - } - # find group status (or leave unchanged) - my $substat; - defined ($substat=$TeXgroup{$1}) || ($substat=$status); - if ($status<=0 && $status<$substat) {$substat=$status;} - if (($status>0) && ($substat==-1)) { - # Count float - inc_count($tex,5); - } - if ($status>0 and $substat>3) { - # count item, exclude contents - inc_count($tex,$substat); - $substat=0; - } - parse_unit($tex,$substat,'\end'); - if ($tex->{'line'} =~ s/^\{\s*([^\{\}]+)\s*\}[ \t\r\f]*//) { - # gobble group type - flush_style($tex,$localstyle); - print_style('{'.$1.'}',$localstyle); - } else { - print_error("Warning: END group without type while waiting to end ".$groupname."."); - } -} - -sub parse_include_file { - my ($tex,$status,$addsuffix)=@_; - $tex->{'line'} =~ s/^\{([^\{\}\s]+)\}// || - $tex->{'line'} =~ s/^\s*([^\{\}\%\\\s]+)// || - return; - flush_next($tex); - if ($status>0) { - print_style($&,'fileinclude'); - my $fname=$1; - if ($addsuffix==2) {$fname.='.tex';} - elsif ($addsuffix==1 && ($fname=~/^[^\.]+$/)) {$fname.='.tex';} - #if ($includeTeX) {push @filelist,$tex->{'filepath'}.$fname;} - if ($includeTeX) {push @filelist,$workdir.$fname;} - } else { - print_style($&,'ignored'); - } -} - -sub gobble_options { - while (gobble_option(@_)) {} -} - -sub gobble_macro_modifier { - my $tex=shift @_; - flush_next($tex); - if ($tex->{'line'} =~ s/^\*//) { - print_style($1,'option'); - return $1; - } - return undef; -} - -sub gobble_macro_parms { - my ($tex,$parm,$oldstat)=@_; - my $i; - if (ref($parm) eq 'ARRAY') { - $i=scalar @{$parm}; - } else { - $i=$parm; - $parm=[0,0,0,0,0,0,0,0,0]; - } - if ($i>0) {gobble_macro_modifier($tex);} - gobble_options($tex); - for (my $j=0;$j<$i;$j++) { - parse_unit($tex,new_status($parm->[$j],$oldstat)); - gobble_options($tex); - } -} - -sub new_status { - my ($substat,$old)=@_; - if (!defined $old) {return $substat;} - if ($old==-3 || $substat==-3) {return -3;} - if ($old==-2 || $substat==-2) {return -2;} - if ($old==0 || $substat==0) {return 0;} - if ($old==-9 || $substat==-9) {return -9;} - if ($old>$substat) {return $old;} - return $substat; -} - -sub next_token { - my $tex=shift @_; - my ($next,$type); - if (defined $tex->{'next'}) {print_style($tex->{'next'}.' ',$tex->{'style'});} - $tex->{'style'}=undef; - while (defined ($next=get_next_token($tex))) { - $type=$tex->{'type'}; - if ($type==0) { - print_style($next,'comment'); - } elsif ($type==9) { - if ($verbose) {line_return(1,$tex);} - } else { - return $next; - } - } - return $next; -} - - -sub get_next_token { - # Token (or token group) category: - # 0: comment - # 1: word (or other forms of text or text components) - # 2: symbol (not word, e.g. punctuation) - # 3: macro - # 4: curly braces {} - # 5: brackets [] - # 6: maths - # 9: line break in file - # 999: end of line or blank line - # 666: TeXcount instruction (%TC:instruction) - my $tex=shift @_; - my $next; - (defined ($next=get_token($tex,'\%+TC:[^\n]*',666))) && return $next; - (defined ($next=get_token($tex,'\%[^\n]*',0))) && return $next; - (defined ($next=get_token($tex,'\n',9))) && return $next; - (defined ($next=get_token($tex,'\\\\[\{\}]',2))) && return $next; - foreach my $pattern (@WordPatterns) { - (defined ($next=get_token($tex,$pattern,1))) && return $next; - } - (defined ($next=get_token($tex,'[\"\'\`:\.,\(\)\[\]!\+\-\*=/\^\_\@\<\>\~\#\&]',2))) && return $next; - (defined ($next=get_token($tex,'\\\\([a-zA-Z_]+|[^a-zA-Z_])',3))) && return $next; - (defined ($next=get_token($tex,'[\{\}]',4))) && return $next; - (defined ($next=get_token($tex,'[\[\]]',5))) && return $next; - (defined ($next=get_token($tex,'\$\$',6))) && return $next; - (defined ($next=get_token($tex,'\$',6))) && return $next; - (defined ($next=get_token($tex,'.',999))) && return $next; - (defined ($next=get_token($tex,'[^\s]+',999))) && return $next; - $tex->{'eof'}=1; - return undef; -} - -sub get_token { - my ($tex,$regexp,$type)=@_; - if (!defined $regexp) {print_error("ERROR in get_token: undefined regex.");} - if (!defined $tex->{'line'}) {print_error("ERROR in get_token: undefined tex-line. ".$tex->{'next'});} - if ( $tex->{'line'} =~ s/^($regexp)[ \t\r\f]*// ) { - $tex->{'next'}=$1; - $tex->{'type'}=$type; - return $1; - } - return undef; -} - -###### Count handling routines - -sub new_count { - my ($title)=@_; - my @cnt=(0,0,0,0,0,0,0,0); - my %count=('count'=>\@cnt,'title'=>$title); - # files, text words, header words, float words, - # headers, floats, math-inline, math-display; - return \%count; -} - -sub inc_count { - my ($tex,$type,$value)=@_; - my $count=$tex->{'count'}; - if (!defined $value) {$value=1;} - ${$count->{'count'}}[$type]+=$value; -} - -sub get_count { - my ($count,$type)=@_; - return ${$count->{'count'}}[$type]; -} - -sub total_count { - my ($count)=@_; - my $sum=0; - for (my $i=scalar(@sumweights);$i-->0;) { - $sum+=get_count($count,$i+1)*$sumweights[$i]; - } - return $sum; -} - -sub print_count { - my ($count,$header)=@_; - if ($briefsum && @sumweights) { - print_count_total($count,$header); - } elsif ($briefsum) { - if ($htmlstyle) {print "

";} - print_count_brief($count,$header); - if ($htmlstyle) {print "

\n";} - } else { - print_count_details($count,$header); - } -} - -sub print_count_with_header { - my ($count,$header)=@_; - if (!defined $header) {$header=$count->{'title'};} - if (!defined $header) {$header="";} - return $count,$header; -} - -sub print_count_total { - my ($count,$header)=print_count_with_header(@_); - if ($htmlstyle) {print "

".$header;} - print total_count($count); - if ($htmlstyle) {print "

\n";} - else {print ": ".$header;} -} - -sub print_count_brief { - my ($count,$header)=print_count_with_header(@_); - my $cnt=$count->{'count'}; - print ${$cnt}[1]."+".${$cnt}[2]."+".${$cnt}[3]. - " (".${$cnt}[4]."/".${$cnt}[5]."/".${$cnt}[6]."/".${$cnt}[7].") ". - $header; -} - -sub print_count_details { - my ($count,$header)=print_count_with_header(@_); - if ($htmlstyle) {print "
\n";} - if (defined $header) { - formatprint($header."\n",'dt','header'); - } - if (get_count($count,0)>1) { - formatprint($countlabel[0].': ','dt'); - formatprint(get_count($count,0)."\n",'dd'); - } - if (@sumweights) { - formatprint('Sum count: ','dt'); - formatprint(total_count($count)."\n",'dd'); - } - for (my $i=1;$i<8;$i++) { - formatprint($countlabel[$i].': ','dt'); - formatprint(get_count($count,$i)."\n",'dd'); - } - my $subcounts=$count->{'subcounts'}; - if ($showsubcounts && defined $subcounts && scalar(@{$subcounts})>1) { - formatprint("Subcounts: text+headers+captions (#headers/#floats/#inlines/#displayed)\n",'dt'); - foreach my $subcount (@{$subcounts}) { - if ($htmlstyle) {print "
";} - print_count_brief($subcount); - if ($htmlstyle) {print "
";} - print "\n"; - } - } - if ($htmlstyle) {print "
\n";} -} - -sub add_count { - my ($a,$b)=@_; - for (my $i=0;$i<8;$i++) { - ${$a->{'count'}}[$i]+=${$b->{'count'}}[$i]; - } -} - -sub add_subcount { - my ($tex,$title)=@_; - add_count($tex->{'countsum'},$tex->{'count'}); - push @{$tex->{'countlist'}},$tex->{'count'}; - $tex->{'count'}=new_count($title); - return $tex->{'countsum'}; -} - -###### Printing routines - -sub set_style { - my ($tex,$style)=@_; - if (!(($tex->{'style'}) && ($tex->{'style'} eq '-'))) {$tex->{'style'}=$style;} -} - -sub flush_style { - my ($tex,$style)=@_; - set_style($tex,$style); - flush_next($tex); -} - -sub line_return { - my ($blank,$tex)=@_; - if ($blank>$blankline) { - if ((defined $tex) && @sumweights) { - my $num=total_count($tex->{'count'}); - print_style(" [".$num."]","sumcount"); - } - linebreak(); - $blankline++; - } -} - -sub linebreak { - if ($htmlstyle) {print "
\n";} else {print "\n";} -} - -sub print_style { - my ($text,$style,$state)=@_; - (($verbose>=0) && (defined $text) && (defined $style)) || return 0; - my $colour; - ($colour=$STYLE{$style}) || return; - if (($colour) && !($colour eq '-')) { - if ($htmlstyle) { - print "".$text.""; - } else { - print Term::ANSIColor::colored($text,$colour); - } - if ($state) { - print_style($state,'state'); - } - $blankline=-1; - return 1; - } else { - return 0; - } -} - -sub print_error { - my $text=shift @_; - line_return(1); - print_style("### ".$text." ###",'error'); - line_return(1); -} - -sub formatprint { - my ($text,$tag,$class)=@_; - my $break=($text=~s/\n$//); - if ($htmlstyle && defined $tag) { - print '<'.$tag; - if ($class) {print " class='".$class."'";} - print '>'.$text.''; - } else { - print $text; - } - if ($break) {print "\n";} -} - -sub flush_next { - my $tex=shift @_; - if (defined $tex->{'next'}) { - print_style($tex->{'next'}.' ',$tex->{'style'},$tex->{'printstate'}); - } - $tex->{'printstate'}=undef; - $tex->{'style'}='-'; -} - -###### HTML routines - -sub html_head { - print ' - - - - - -

LaTeX word count

-'; -} - -sub html_tail { - print ''; -} - -###### Help routines - -sub print_version { - print "TeXcount version ".$versionnumber.", ".$versiondate.'.'; -} - -sub print_syntax { - print ' -Syntax: TeXcount.pl [options] files - -Options: - -relaxed Uses relaxed rules for word and option handling: - i.e. allows more general cases to be counted as - either words or macros. - -v Verbose (same as -v3) - -v0 Do not present parsing details - -v1 Verbose: print parsed words, mark formulae - -v2 More verbose: also print ignored text - -v3 Even more verbose: include comments and options - -v4 Same as -v3 -showstate - -showstate Show internal states (with verbose) - -brief Only prints a brief, one line summary of counts - -sum, -sum= Make sum of all word and equation counts. May also - use -sum=#[,#] with up to 7 numbers to indicate how - each of the counts (text words, header words, caption - words, #headers, #floats, #inlined formulae, - #displayed formulae) are summed. The default sum (if - only -sum is used) is the same as -sum=1,1,1,0,0,1,1. - -sub, -sub= Generate subcounts. Option values are none, part, - chapter, section or subsection. Default (-sub) is set - to subsection, whereas unset is none. (Alternative - option name is -subcount.) - -nc, -nocol No colours (colours require ANSI) - -html Output in HTML format - -htmlcore Only HTML body contents - -inc Include tex files included in the document - -noinc Do not include included tex files (default) - -total Do not give sums per file, only total sum. - -1 Same as -brief and -total. Ensures there is only one - line of output. If used in conjunction with -sum, the - output will only be the total number. (NB: Character - is the number one, not the letter L.) - -dir, -dir= Specify the working directory using -dir=path. - Remember that the path must end with \ or /. If only - -dir is used, the directory of the parent file is used. - -codes Display output style code overview and explanation. - This is on by default. - -nocodes Do not display output style code overview. - -h, -?, --help, /? Help - --version Print version number - --license License information -'; -} - -sub print_help { - print ' -*************************************************************** -* TeXcount.pl '.$versionnumber.', '.$versiondate.' -* - -Count words in TeX and LaTeX files, ignoring macros, tables, -formulae, etc. -'; - print_syntax(); - print ' -The script counts words as either words in the text, words in -headers/titles or words in floats (figure/table captions). -Macro options (i.e. \marco[...]) are ignored; macro parameters -(i.e. \macro{...}) are counted or ignored depending on the -macro, but by default counted. Begin-end groups are by default -ignored and treated as \'floats\', though some (e.g. center) are -counted. - -Unless -nocol (or -nc) has been specified, the output will be -colour coded. Counted text is coloured blue with headers are in -bold and in HTML output caption text is italicised. - -Mathematical formulae are not counted as words, but are instead -counted separately with separate counts for inlined formulae -and displayed formulae. Similarly, the number of headers and -the number of \'floats\' are counted. Note that \'float\' is used -here to describe anything defined in a begin-end group unless -explicitly recognized as text or mathematics. - -The verbose options (-v1, -v2, -v3, showstate) produces output -indicating how the text has been interpreted. Check this to -ensure that words in the text has been interpreted as such, -whereas mathematical formulae and text/non-text in begin-end -groups have been correctly interpreted. - -Parsing instructions may be passed to TeXcount using comments -in the LaTeX files on the format - %TC:instruction arguments -where valid instructions for setting parsing rules, typically -set at the start of the document (applies globally), are: - %TC:macro [macro] [param.states] - macro handling rule, no. of and rules for parameters - %TC:macroword [macro] [number] - macro counted as a given number of words - %TC:header [macro] [param.states] - header macro rule, as macro but counts as one header - %TC:breakmacro [macro] [label] - macro causing subcount break point - %TC:group [name] [parsing-state] - begin-end-group handling rule - %TC:floatinclude [macro] [param.states] - as macro, but also counted inside floats - %TC:preambleinclude [macro] [param.states] - as macro, but also counted inside the preamble - %TC:fileinclue [macro] [rule] - file include, add .tex if rule=2, not if rule=0 -The [param.states] is used to indicate the number of parameters -used by the macro and the rules of handling each of these: format -is [#,#,...,#] with one number for each parameter, and main rules -are 0 to ignore and 1 to count as text. Parsing instructions -which may be used anywhere are: - %TC:ignore start block to ignore - %TC:endignore end block to ignore - %TC:break [title] add subcount break point here -See the documentation for more details. - -Unix hint: Use \'less -r\' instead of just \'less\' to view output: -the \'-r\' option makes less treat text formating codes properly. - -Windows hint: If your Windows interprets ANSI colour codes, lucky -you! Otherwise, use the -nocol (or -nc) option with the verbose -options or the output will be riddled with colour codes. Instead, -you can use -html to produce HTML code, write this to file and -view with your favourite browser. -'; - print_help_style(); - print_reference(); -} - -sub print_reference { - print ' -The TeXcount script is copyright of Einar Andreas Rødland (2008) -and published under the LaTeX Project Public License. - -For more information about the script, e.g. news, updates, help, -usage tips, known issues and short-comings, go to - http://folk.uio.no/einarro/Comp/texwordcount.html -or go to - http://folk.uio.no/einarro/Services/texcount.html -to access the script as a web service. Feedback such as problems -or errors can be reported to einarro@ifi.uio.no. -'; -} - -sub print_license { - print 'TeXcount version '.$versionnumber.' - -Copyright 2008 Einar Andreas Rødland - -The TeXcount script is published under the LaTeX Project Public -License (LPPL) - http://www.latex-project.org/lppl.txt -which grants you, the user, the right to use, modify and distribute -the script. However, if the script is modified, you must change its -name or use other technical means to avoid confusion. - -The script has LPPL status "maintained" with Einar Andreas -Rødland being the current maintainer. -'; -} - -sub print_help_style { - if ($verbose<=0) {return;} - formatprint("Format/colour codes of verbose output:","h2"); - print "\n\n"; - if ($htmlstyle) {print "

";} - help_style_line('Text which is counted',"word1","counted as text words"); - help_style_line('Header and title text',"word2","counted as header words"); - help_style_line('Caption text and footnotes',"word3","counted as caption words"); - help_style_line("Ignored text or code","ignore","excluded or ignored"); - help_style_line('\documentclass',"document","document start, beginning of preamble"); - help_style_line('\macro',"command","macro not counted, but parameters may be"); - help_style_line("[Macro options]","option","not counted"); - help_style_line('\begin{group} \end{group}',"grouping","begin/end group"); - help_style_line('\begin{group} \end{group}',"exclgroup","begin/end group in excluded region"); - help_style_line('$ $',"mathgroup","counted as one equation"); - help_style_line('$ $',"exclmath","equation in excluded region"); - help_style_line('% Comments',"comment","not counted"); - help_style_line('%TC:TeXcount instructions',"tc","not counted"); - help_style_line("File to include","fileinclude","not counted but file may be counted later"); - if ($showstates) { - help_style_line('[state]',"state","internal TeXcount state"); - } - if (@sumweights) { - help_style_line('[sumcount]',"sumcount","cumulative sum count"); - } - help_style_line("ERROR","error","TeXcount error message"); - if ($htmlstyle) {print "

";} - print "\n\n"; -} - -sub help_style_line { - my ($text,$style,$comment)=@_; - if ($htmlstyle) { - $comment="  ....  ".$comment; - } else { - $comment=" .... ".$comment; - } - if (print_style($text,$style)) { - print $comment; - linebreak(); - } -} - -# Print output style codes if conditions are met -sub conditional_print_help_style { - if ($showcodes) {print_help_style();} - return $showcodes; -} - diff --git a/Build/source/texk/texlive/linked_scripts/bengali/ebong.py b/Build/source/texk/texlive/linked_scripts/bengali/ebong.py new file mode 100755 index 00000000000..c4162cf0ef6 --- /dev/null +++ b/Build/source/texk/texlive/linked_scripts/bengali/ebong.py @@ -0,0 +1,346 @@ +#!/usr/bin/env python +# look in newbong +import sre +A='A' +B='B' +S='S' +s='s' +F='F' +X='X' + +NCLINE = 0 +global NCWORD,CWORD + +AKSAR={ + 'k' :[B,'k'], + 'kh' :[B,'kh'], + 'g' :[B,'g'], + 'gh' :[B,'gh'], + 'ng' :[B,'NG'], + + 'ch' :[B,'c'], + '^ch' :[B,'ch'], + 'j' :[B,'j'], + 'jh' :[B,'jh'], + '^y' :[B,'NJ'], + '_n' :[B,'NJ'], + + 't' :[B,'T'], + '^th' :[B,'Th'], + 'd' :[B,'D'], + 'dh' :[B,'Dh'], + '^n' :[B,'N'], + + '_t' :[B,'t'], + 'th' :[B,'th'], + '_d' :[B,'d'], + '_dh' :[B,'dh'], + 'n' :[B,'n'], + + 'p' :[B,'p'], + 'ph' :[B,'ph'], + 'f' :[B,'ph'], + 'b' :[B,'b'], + 'bh' :[B,'bh'], + 'v' :[B,'bh'], + 'm' :[B,'m'], + 'M' :[F,'M'], + + '^j' :[B,'J'], + 'J' :[B,'J'], + 'r' :[B,'r'], + 'R' :[F,'R'], + 'l' :[B,'l'], + 'L' :[F,'L'], + 'W' :[F,'W'], + 'V' :[F,'W'], + 'h' :[B,'H'], + 'kk' :[B,'kK'], + 'kkm' :[B,'kK/N'], + + 'sh' :[B,'sh'], + '^s' :[B,'Sh'], + '^sh' :[B,'Sh'], + 's' :[B,'s'], + + '^r' :[B,'rh'], + '^rh' :[B,'rhh'], + 'y' :[B,'y'], + 'Y' :[F,'Y'], + 'JY' :[F,'Y'], + '__t' :[B,'t//'], + '^ng' :[B,'NNG'], + ':h' :[B,'h'], + '^' :[F,'NN'], + '_' :[F,':/'], + + 'A' :[S,'A'], + 'AA' :[S,'Aa'], + 'I' :[S,'I'], + 'II' :[S,'II'], + 'U' :[S,'U'], + 'UU' :[S,'UU'], + 'RI' :[S,'RR'], + 'E' :[S,'E'], + 'OI' :[S,'OI'], + 'O' :[S,'O'], + 'OU' :[S,'OU'], + + 'a' :[X,'o',1], + 'aa' :[s,'a',1], + 'i' :[s,'i',-1], + 'ii' :[s,'ii',1], + 'u' :[s,'u',1], + 'uu' :[s,'uu',1], + 'RII' :[s,'rR',1], + 'e' :[s,'e',-1], + 'oi' :[s,'oi',-2], + 'oo' :[s,'oo',11], + 'o' :[X,'o',1], + 'ou' :[s,'ou',12], + + '.' :[F,'.'], + '..' :[F,'..'], + '...' :[F,'...'], + '|' :[F,'|'], + + '~' :[F,'~'], + '`' :[F,'`'], + '!' :[F,'!'], + '1' :[F,'1'], + '2' :[F,'2'], + 'at' :[F,'@'], + '#' :[F,'#'], + '3' :[F,'3'], + '$' :[F,'$'], + '4' :[F,'4'], + '%' :[F,'%'], + '5' :[F,'5'], + '6' :[F,'6'], + '&' :[F,'&'], + '7' :[F,'7'], + '*' :[F,'*'], + '8' :[F,'8'], + '(' :[F,'('], + '9' :[F,'9'], + ')' :[F,')'], + '0' :[F,'0'], + 'dash' :[F,'-'], + '+' :[F,'+'], + '=' :[F,'='], + '|' :[F,'|'], + '{' :[F,'{'], + '[' :[F,'['], + '}' :[F,'}'], + ']' :[F,']'], + ':' :[F,':'], + ';' :[F,';'], + '"' :[F,'"'], + "'" :[F,"'"], + '<' :[F,'<'], + ',' :[F,','], + '>' :[F,'>'], + '.' :[F,'.'], + '?' :[F,'?'], + '/' :[F,'/']} + +CATCODES = {'SS' :[S,'','','',1], + 'SB' :[B,'','','',1], + 'BS' :[S,'','','',1], + 'BB' :[B,'','/','',1], + 'BF' :[F,'','','',1], + 'Bs1' :[S,'','','',1], + 'Bs-1':[S,'\*','','*',1], + 'Bs-2':[S,'\*','','*{oi}',0], + 'Bs11':[S,'\*','','*ea',0], + 'Bs12':[S,'\*','','*eou',0], + 'Fs1' :[S,'','','',1], + 'Fs-1':[S,'\*','','*',1], + 'Fs-2':[S,'\*','','*{oi}',0], + 'Fs11':[S,'\*','','*ea',0], + 'Fs12':[S,'\*','','*eou',0], + 'FF' :[F,'','','',1], + 'AX' :[F,'','','',1]} + +def blocked(line): + #print '@ blocked', line , '->', + m = sre.findall('@[^@]+@',line) + outline = line + if not m : + #print outline + return(outline) + else: + for i in range(len(m)): + s=m[i][:-1].replace(' ','%X%') + outline = outline.replace(m[i],s,1) + #print outline + return(outline) + +def unblock(line): + #print '@unblock', line, '->', + m = sre.findall('@[^\s]+',line) + outline = line + if not m : + #print outline + return(outline) + else: + for i in range(len(m)): + s=m[i].replace('@','').replace('%X%',' ') + outline = outline.replace(m[i],s) + #print outline + return(outline) + +def printamp(line): + #print '@unblock', line, '->', + m = sre.findall('#AT',line) + outline = line + if not m : + #print outline + return(outline) + else: + for i in range(len(m)): + outline = outline.replace('#AT','@') + #print outline + return(outline) + +def readsyll(syll): + syllparts=[] + start = 0; end = len(syll) + while syll[start : end]: + slice = syll[start : end] + #print slice + if AKSAR.has_key(slice): + syllparts.append(AKSAR[slice]) + start = start + len(slice) + end = len(syll) + else : + end = end -1 + return(syllparts) + +def fuse(list1,list2): + global CCATCODE + #print list1,list2 + Type1 = list1[0] + Type2 = list2[0] + + if Type2 == s: + Type3 = str(list2[2]) + elif Type2 == X: + Type1=A + Type3='' + else: + Type3 ='' + + Type = Type1+Type2+Type3 + + #print 'Type:', Type + + try: + CATCODE = CATCODES[Type] + TARGET = CATCODE[0] + PREFIX = CATCODE[1] + MIDFIX = CATCODE[2] + POSTFIX = CATCODE[3] + FLAG = CATCODE[4] + + #print 'TGT:', TARGET, PREFIX,MIDFIX,POSTFIX,FLAG + #print 'RAWC', AKSAR[list1[1]][1],AKSAR[list2[1]][1] + + c1=list1[1] + c2=list2[1] + + if FLAG == 1 : + c = PREFIX + c1 + MIDFIX + POSTFIX + c2 + else : + c = PREFIX + c1 + MIDFIX + POSTFIX + + fused = [TARGET,c] + #print CATCODE + return(fused) + except KeyError: + print '\n ERROR AT LINE:', NCLINE, 'WORD:',NCWORD, '(',CWORD,')' + return(['ERROR','UNKNOWN CATCODE']) + +def fuseatoms(syll): + slist=readsyll(syll); + #print slist + lslist=len(slist); + l0=slist[0]; + for i in range(1,lslist): + nextitem = slist[i] + l0=fuse(l0,nextitem) + + return(l0[1]) + +def fuseword(wrd): + if wrd[0] == '@' : + return(wrd) + syllables = wrd.split('-') + w0='' + for eachsyll in syllables: + syll=eachsyll + thesyll = fuseatoms(syll) + w0 = w0 + thesyll + #print 'FUSED WORD',w0 + return(w0) + +def fuseline(line): + global NCWORD,CWORD + NCWORD = 0 + #line = blocked(line) + words = line.split() + l0='' + for eachword in words: + NCWORD=NCWORD+1 + word = eachword + CWORD=word + theword=fuseword(word) + #print 'XX',theword + l0=l0+' '+theword + #print 'FUSED LINE', l0 + return(l0) + +# The main program +import sys +OK=1 +finnam = sys.argv[1] +foutnam = finnam.split('.')[0] + '.' + 'tex' + +fin = file(finnam,'rt') +fout = file(foutnam,'wt') + +textin = fin.readlines() +nlines = len(textin) + +textout = [] + +fin.close() + +for eachline in textin: + NCLINE = NCLINE+1 + if eachline[0] == '#' : + lineout = eachline[1:] + elif eachline[0] == '\\' : + lineout = eachline + elif eachline == '\n': + lineout = eachline + else : + line1 = eachline.strip() + line2 = blocked(line1) + lineout = fuseline(line2) + '\n' + lineout = lineout[1:] + #print ':::', lineout + if lineout.find('UNKNOWN CATCODE') == -1 : + lineout = unblock(lineout) + #print ':::', lineout + textout.append(printamp(lineout)) + else : + OK = 0 + fout.close() + +if OK == 1: + fout.writelines(textout) + fout.close() + print 'done' +else: + print 'Unknown CATCODE, Fix The errors and try again' diff --git a/Build/source/texk/texlive/linked_scripts/context.sh b/Build/source/texk/texlive/linked_scripts/context.sh deleted file mode 100755 index c7341904f10..00000000000 --- a/Build/source/texk/texlive/linked_scripts/context.sh +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/sh - -mtxrun --script context "$@" diff --git a/Build/source/texk/texlive/linked_scripts/context/lua/luatools.lua b/Build/source/texk/texlive/linked_scripts/context/lua/luatools.lua new file mode 100755 index 00000000000..35986137950 --- /dev/null +++ b/Build/source/texk/texlive/linked_scripts/context/lua/luatools.lua @@ -0,0 +1,6659 @@ +#!/usr/bin/env texlua + +-- one can make a stub: +-- +-- #!/bin/sh +-- env LUATEXDIR=/....../texmf/scripts/context/lua luatex --luaonly=luatools.lua "$@" +-- filename : luatools.lua +-- comment : companion to context.tex +-- author : Hans Hagen, PRAGMA-ADE, Hasselt NL +-- copyright: PRAGMA ADE / ConTeXt Development Team +-- license : see context related readme files +-- Although this script is part of the ConTeXt distribution it is +-- relatively indepent of ConTeXt. The same is true for some of +-- the luat files. We may may make them even less dependent in +-- the future. As long as Luatex is under development the +-- interfaces and names of functions may change. + +banner = "version 1.2.0 - 2006+ - PRAGMA ADE / CONTEXT" +texlua = true + +-- For the sake of independence we optionally can merge the library +-- code here. It's too much code, but that does not harm. Much of the +-- library code is used elsewhere. We don't want dependencies on +-- Lua library paths simply because these scripts are located in the +-- texmf tree and not in some Lua path. Normally this merge is not +-- needed when texmfstart is used, or when the proper stub is used or +-- when (windows) suffix binding is active. + +-- begin library merge +-- filename : l-string.lua +-- comment : split off from luat-lib +-- author : Hans Hagen, PRAGMA-ADE, Hasselt NL +-- copyright: PRAGMA ADE / ConTeXt Development Team +-- license : see context related readme files + +if not versions then versions = { } end versions['l-string'] = 1.001 + +--~ function string.split(str, pat) -- taken from the lua wiki +--~ local t = {n = 0} -- so this table has a length field, traverse with ipairs then! +--~ local fpat = "(.-)"..pat +--~ local last_end = 1 +--~ local s, e, cap = string.find(str, fpat, 1) +--~ while s ~= nil do +--~ if s~=1 or cap~="" then +--~ table.insert(t,cap) +--~ end +--~ last_end = e+1 +--~ s, e, cap = string.find(str, fpat, last_end) +--~ end +--~ if last_end<=string.len(str) then +--~ table.insert(t,(string.sub(str,last_end))) +--~ end +--~ return t +--~ end + +--~ function string:split(pat) -- taken from the lua wiki but adapted +--~ local t = { } -- self and colon usage (faster) +--~ local fpat = "(.-)"..pat +--~ local last_end = 1 +--~ local s, e, cap = self:find(fpat, 1) +--~ while s ~= nil do +--~ if s~=1 or cap~="" then +--~ t[#t+1] = cap +--~ end +--~ last_end = e+1 +--~ s, e, cap = self:find(fpat, last_end) +--~ end +--~ if last_end <= #self then +--~ t[#t+1] = self:sub(last_end) +--~ end +--~ return t +--~ end + +--~ a piece of brilliant code by Rici Lake (posted on lua list) -- only names changed +--~ +--~ function string:splitter(pat) +--~ local st, g = 1, self:gmatch("()"..pat.."()") +--~ local function splitter(self) +--~ if st then +--~ local s, f = g() +--~ local rv = self:sub(st, (s or 0)-1) +--~ st = f +--~ return rv +--~ end +--~ end +--~ return splitter, self +--~ end + +function string:splitter(pat) + -- by Rici Lake (posted on lua list) -- only names changed + -- p 79 ref man: () returns position of match + local st, g = 1, self:gmatch("()("..pat..")") + local function strgetter(self, segs, seps, sep, cap1, ...) + st = sep and seps + #sep + return self:sub(segs, (seps or 0) - 1), cap1 or sep, ... + end + local function strsplitter(self) + if st then return strgetter(self, st, g()) end + end + return strsplitter, self +end + +function string:split(separator) + local t = {} + for k in self:splitter(separator) do t[#t+1] = k end + return t +end + +-- faster than a string:split: + +function string:splitchr(chr) + if #self > 0 then + local t = { } + for s in string.gmatch(self..chr,"(.-)"..chr) do + t[#t+1] = s + end + return t + else + return { } + end +end + +--~ function string.piecewise(str, pat, fnc) -- variant of split +--~ local fpat = "(.-)"..pat +--~ local last_end = 1 +--~ local s, e, cap = string.find(str, fpat, 1) +--~ while s ~= nil do +--~ if s~=1 or cap~="" then +--~ fnc(cap) +--~ end +--~ last_end = e+1 +--~ s, e, cap = string.find(str, fpat, last_end) +--~ end +--~ if last_end <= #str then +--~ fnc((string.sub(str,last_end))) +--~ end +--~ end + +function string.piecewise(str, pat, fnc) -- variant of split + for k in string.splitter(str,pat) do fnc(k) end +end + +--~ function string.piecewise(str, pat, fnc) -- variant of split +--~ for k in str:splitter(pat) do fnc(k) end +--~ end + +--~ do if lpeg then + +--~ -- this alternative is 30% faster esp when we cache them +--~ -- problem: no expressions + +--~ splitters = { } + +--~ function string:split(separator) +--~ if #self > 0 then +--~ local split = splitters[separator] +--~ if not split then +--~ -- based on code by Roberto +--~ local p = lpeg.P(separator) +--~ local c = lpeg.C((1-p)^0) +--~ split = lpeg.Ct(c*(p*c)^0) +--~ splitters[separator] = split +--~ end +--~ return split:match(self) +--~ else +--~ return { } +--~ end +--~ end + +--~ string.splitchr = string.split + +--~ function string:piecewise(separator,fnc) +--~ for _,v in pairs(self:split(separator)) do +--~ fnc(v) +--~ end +--~ end + +--~ end end + +string.chr_to_esc = { + ["%"] = "%%", + ["."] = "%.", + ["+"] = "%+", ["-"] = "%-", ["*"] = "%*", + ["^"] = "%^", ["$"] = "%$", + ["["] = "%[", ["]"] = "%]", + ["("] = "%(", [")"] = "%)", + ["{"] = "%{", ["}"] = "%}" +} + +function string:esc() -- variant 2 + return (self:gsub("(.)",string.chr_to_esc)) +end + +function string.unquote(str) + return (str:gsub("^([\"\'])(.*)%1$","%2")) +end + +function string.quote(str) + return '"' .. str:unquote() .. '"' +end + +function string:count(pattern) -- variant 3 + local n = 0 + for _ in self:gmatch(pattern) do + n = n + 1 + end + return n +end + +function string:limit(n,sentinel) + if #self > n then + sentinel = sentinel or " ..." + return self:sub(1,(n-#sentinel)) .. sentinel + else + return self + end +end + +function string:strip() + return (self:gsub("^%s*(.-)%s*$", "%1")) +end + +--~ function string.strip(str) -- slightly different +--~ return (string.gsub(string.gsub(str,"^%s*(.-)%s*$","%1"),"%s+"," ")) +--~ end + +function string:is_empty() + return not self:find("%S") +end + +function string:enhance(pattern,action) + local ok, n = true, 0 + while ok do + ok = false + self = self:gsub(pattern, function(...) + ok, n = true, n + 1 + return action(...) + end) + end + return self, n +end + +--~ function string:enhance(pattern,action) +--~ local ok, n = 0, 0 +--~ repeat +--~ self, ok = self:gsub(pattern, function(...) +--~ n = n + 1 +--~ return action(...) +--~ end) +--~ until ok == 0 +--~ return self, n +--~ end + +--~ function string:to_hex() +--~ if self then +--~ return (self:gsub("(.)",function(c) +--~ return string.format("%02X",c:byte()) +--~ end)) +--~ else +--~ return "" +--~ end +--~ end + +--~ function string:from_hex() +--~ if self then +--~ return (self:gsub("(..)",function(c) +--~ return string.char(tonumber(c,16)) +--~ end)) +--~ else +--~ return "" +--~ end +--~ end + +string.chr_to_hex = { } +string.hex_to_chr = { } + +for i=0,255 do + local c, h = string.char(i), string.format("%02X",i) + string.chr_to_hex[c], string.hex_to_chr[h] = h, c +end + +--~ function string:to_hex() +--~ if self then return (self:gsub("(.)",string.chr_to_hex)) else return "" end +--~ end + +--~ function string:from_hex() +--~ if self then return (self:gsub("(..)",string.hex_to_chr)) else return "" end +--~ end + +function string:to_hex() + return ((self or ""):gsub("(.)",string.chr_to_hex)) +end + +function string:from_hex() + return ((self or ""):gsub("(..)",string.hex_to_chr)) +end + +if not string.characters then + + local function nextchar(str, index) + index = index + 1 + return (index <= #str) and index or nil, str:sub(index,index) + end + function string:characters() + return nextchar, self, 0 + end + local function nextbyte(str, index) + index = index + 1 + return (index <= #str) and index or nil, string.byte(str:sub(index,index)) + end + function string:bytes() + return nextbyte, self, 0 + end + +end + +--~ function string:padd(n,chr) +--~ return self .. self.rep(chr or " ",n-#self) +--~ end + +function string:rpadd(n,chr) + local m = n-#self + if m > 0 then + return self .. self.rep(chr or " ",m) + else + return self + end +end + +function string:lpadd(n,chr) + local m = n-#self + if m > 0 then + return self.rep(chr or " ",m) .. self + else + return self + end +end + +string.padd = string.rpadd + +function is_number(str) + return str:find("^[%-%+]?[%d]-%.?[%d+]$") == 1 +end + +--~ print(is_number("1")) +--~ print(is_number("1.1")) +--~ print(is_number(".1")) +--~ print(is_number("-0.1")) +--~ print(is_number("+0.1")) +--~ print(is_number("-.1")) +--~ print(is_number("+.1")) + +function string:split_settings() -- no {} handling, see l-aux for lpeg variant + if self:find("=") then + local t = { } + for k,v in self:gmatch("(%a+)=([^%,]*)") do + t[k] = v + end + return t + else + return nil + end +end + +local patterns_escapes = { + ["-"] = "%-", + ["."] = "%.", + ["+"] = "%+", + ["*"] = "%*", + ["%"] = "%%", + ["("] = "%)", + [")"] = "%)", + ["["] = "%[", + ["]"] = "%]", +} + +function string:pattesc() + return (self:gsub(".",patterns_escapes)) +end + +function string:tohash() + local t = { } + for s in self:gmatch("([^, ]+)") do -- lpeg + t[s] = true + end + return t +end + + +-- filename : l-lpeg.lua +-- author : Hans Hagen, PRAGMA-ADE, Hasselt NL +-- copyright: PRAGMA ADE / ConTeXt Development Team +-- license : see context related readme files + +if not versions then versions = { } end versions['l-lpeg'] = 1.001 + +--~ l-lpeg.lua : + +--~ lpeg.digit = lpeg.R('09')^1 +--~ lpeg.sign = lpeg.S('+-')^1 +--~ lpeg.cardinal = lpeg.P(lpeg.sign^0 * lpeg.digit^1) +--~ lpeg.integer = lpeg.P(lpeg.sign^0 * lpeg.digit^1) +--~ lpeg.float = lpeg.P(lpeg.sign^0 * lpeg.digit^0 * lpeg.P('.') * lpeg.digit^1) +--~ lpeg.number = lpeg.float + lpeg.integer +--~ lpeg.oct = lpeg.P("0") * lpeg.R('07')^1 +--~ lpeg.hex = lpeg.P("0x") * (lpeg.R('09') + lpeg.R('AF'))^1 +--~ lpeg.uppercase = lpeg.P("AZ") +--~ lpeg.lowercase = lpeg.P("az") + +--~ lpeg.eol = lpeg.S('\r\n\f')^1 -- includes formfeed +--~ lpeg.space = lpeg.S(' ')^1 +--~ lpeg.nonspace = lpeg.P(1-lpeg.space)^1 +--~ lpeg.whitespace = lpeg.S(' \r\n\f\t')^1 +--~ lpeg.nonwhitespace = lpeg.P(1-lpeg.whitespace)^1 + +local hash = { } + +function lpeg.anywhere(pattern) --slightly adapted from website + return lpeg.P { lpeg.P(pattern) + 1 * lpeg.V(1) } +end + +function lpeg.startswith(pattern) --slightly adapted + return lpeg.P(pattern) +end + +--~ g = lpeg.splitter(" ",function(s) ... end) -- gmatch:lpeg = 3:2 + +function lpeg.splitter(pattern, action) + return (((1-lpeg.P(pattern))^1)/action+1)^0 +end + +local crlf = lpeg.P("\r\n") +local cr = lpeg.P("\r") +local lf = lpeg.P("\n") +local space = lpeg.S(" \t\f\v") +local newline = crlf + cr + lf +local spacing = space^0 * newline + +local empty = spacing * lpeg.Cc("") +local nonempty = lpeg.Cs((1-spacing)^1) * spacing^-1 +local content = (empty + nonempty)^1 + +local capture = lpeg.Ct(content^0) + +function string:splitlines() + return capture:match(self) +end + + +-- filename : l-table.lua +-- comment : split off from luat-lib +-- author : Hans Hagen, PRAGMA-ADE, Hasselt NL +-- copyright: PRAGMA ADE / ConTeXt Development Team +-- license : see context related readme files + +if not versions then versions = { } end versions['l-table'] = 1.001 + +table.join = table.concat + +function table.strip(tab) + local lst = { } + for k, v in ipairs(tab) do + -- s = string.gsub(v, "^%s*(.-)%s*$", "%1") + s = v:gsub("^%s*(.-)%s*$", "%1") + if s == "" then + -- skip this one + else + lst[#lst+1] = s + end + end + return lst +end + +--~ function table.sortedkeys(tab) +--~ local srt = { } +--~ for key,_ in pairs(tab) do +--~ srt[#srt+1] = key +--~ end +--~ table.sort(srt) +--~ return srt +--~ end + +function table.sortedkeys(tab) + local srt, kind = { }, 0 -- 0=unknown 1=string, 2=number 3=mixed + for key,_ in pairs(tab) do + srt[#srt+1] = key + if kind == 3 then + -- no further check + else + local tkey = type(key) + if tkey == "string" then + -- if kind == 2 then kind = 3 else kind = 1 end + kind = (kind == 2 and 3) or 1 + elseif tkey == "number" then + -- if kind == 1 then kind = 3 else kind = 2 end + kind = (kind == 1 and 3) or 2 + else + kind = 3 + end + end + end + if kind == 0 or kind == 3 then + table.sort(srt,function(a,b) return (tostring(a) < tostring(b)) end) + else + table.sort(srt) + end + return srt +end + +function table.append(t, list) + for _,v in pairs(list) do + table.insert(t,v) + end +end + +function table.prepend(t, list) + for k,v in pairs(list) do + table.insert(t,k,v) + end +end + +function table.merge(t, ...) -- first one is target + t = t or {} + local lst = {...} + for i=1,#lst do + for k, v in pairs(lst[i]) do + t[k] = v + end + end + return t +end + +function table.merged(...) + local tmp, lst = { }, {...} + for i=1,#lst do + for k, v in pairs(lst[i]) do + tmp[k] = v + end + end + return tmp +end + +function table.imerge(t, ...) + local lst = {...} + for i=1,#lst do + local nst = lst[i] + for j=1,#nst do + t[#t+1] = nst[j] + end + end + return t +end + +function table.imerged(...) + local tmp, lst = { }, {...} + for i=1,#lst do + local nst = lst[i] + for j=1,#nst do + tmp[#tmp+1] = nst[j] + end + end + return tmp +end + +if not table.fastcopy then do + + local type, pairs, getmetatable, setmetatable = type, pairs, getmetatable, setmetatable + + local function fastcopy(old) -- fast one + if old then + local new = { } + for k,v in pairs(old) do + if type(v) == "table" then + new[k] = fastcopy(v) -- was just table.copy + else + new[k] = v + end + end + local mt = getmetatable(old) + if mt then + setmetatable(new,mt) + end + return new + else + return { } + end + end + + table.fastcopy = fastcopy + +end end + +if not table.copy then do + + local type, pairs, getmetatable, setmetatable = type, pairs, getmetatable, setmetatable + + local function copy(t, tables) -- taken from lua wiki, slightly adapted + tables = tables or { } + local tcopy = {} + if not tables[t] then + tables[t] = tcopy + end + for i,v in pairs(t) do -- brrr, what happens with sparse indexed + if type(i) == "table" then + if tables[i] then + i = tables[i] + else + i = copy(i, tables) + end + end + if type(v) ~= "table" then + tcopy[i] = v + elseif tables[v] then + tcopy[i] = tables[v] + else + tcopy[i] = copy(v, tables) + end + end + local mt = getmetatable(t) + if mt then + setmetatable(tcopy,mt) + end + return tcopy + end + + table.copy = copy + +end end + +-- rougly: copy-loop : unpack : sub == 0.9 : 0.4 : 0.45 (so in critical apps, use unpack) + +function table.sub(t,i,j) + return { unpack(t,i,j) } +end + +function table.replace(a,b) + for k,v in pairs(b) do + a[k] = v + end +end + +-- slower than #t on indexed tables (#t only returns the size of the numerically indexed slice) + +function table.is_empty(t) + return not t or not next(t) +end + +function table.one_entry(t) + local n = next(t) + return n and not next(t,n) +end + +function table.starts_at(t) + return ipairs(t,1)(t,0) +end + +do + + -- one of my first exercises in lua ... + + -- 34.055.092 32.403.326 arabtype.tma + -- 1.620.614 1.513.863 lmroman10-italic.tma + -- 1.325.585 1.233.044 lmroman10-regular.tma + -- 1.248.157 1.158.903 lmsans10-regular.tma + -- 194.646 153.120 lmtypewriter10-regular.tma + -- 1.771.678 1.658.461 palatinosanscom-bold.tma + -- 1.695.251 1.584.491 palatinosanscom-regular.tma + -- 13.736.534 13.409.446 zapfinoextraltpro.tma + + -- 13.679.038 11.774.106 arabtype.tmc + -- 886.248 754.944 lmroman10-italic.tmc + -- 729.828 466.864 lmroman10-regular.tmc + -- 688.482 441.962 lmsans10-regular.tmc + -- 128.685 95.853 lmtypewriter10-regular.tmc + -- 715.929 582.985 palatinosanscom-bold.tmc + -- 669.942 540.126 palatinosanscom-regular.tmc + -- 1.560.588 1.317.000 zapfinoextraltpro.tmc + + table.serialize_functions = true + table.serialize_compact = true + table.serialize_inline = true + + local function key(k) + if type(k) == "number" then -- or k:find("^%d+$") then + return "["..k.."]" + elseif noquotes and k:find("^%a[%a%d%_]*$") then + return k + else + return '["'..k..'"]' + end + end + + local function simple_table(t) + if #t > 0 then + local n = 0 + for _,v in pairs(t) do + n = n + 1 + end + if n == #t then + local tt = { } + for i=1,#t do + local v = t[i] + local tv = type(v) + if tv == "number" or tv == "boolean" then + tt[#tt+1] = tostring(v) + elseif tv == "string" then + tt[#tt+1] = ("%q"):format(v) + else + tt = nil + break + end + end + return tt + end + end + return nil + end + + local function serialize(root,name,handle,depth,level,reduce,noquotes,indexed) + handle = handle or print + reduce = reduce or false + if depth then + depth = depth .. " " + if indexed then + handle(("%s{"):format(depth)) + else + handle(("%s%s={"):format(depth,key(name))) + end + else + depth = "" + local tname = type(name) + if tname == "string" then + if name == "return" then + handle("return {") + else + handle(name .. "={") + end + elseif tname == "number" then + handle("[" .. name .. "]={") + elseif tname == "boolean" then + if name then + handle("return {") + else + handle("{") + end + else + handle("t={") + end + end + if root and next(root) then + local compact = table.serialize_compact + local inline = compact and table.serialize_inline + local first, last = nil, 0 -- #root cannot be trusted here + if compact then + for k,v in ipairs(root) do -- NOT: for k=1,#root do (why) + if not first then first = k end + last = last + 1 + end + end + for _,k in pairs(table.sortedkeys(root)) do + local v = root[k] + local t = type(v) + if compact and first and type(k) == "number" and k >= first and k <= last then + if t == "number" then + handle(("%s %s,"):format(depth,v)) + elseif t == "string" then + if reduce and (v:find("^[%-%+]?[%d]-%.?[%d+]$") == 1) then + handle(("%s %s,"):format(depth,v)) + else + handle(("%s %q,"):format(depth,v)) + end + elseif t == "table" then + if not next(v) then + handle(("%s {},"):format(depth)) + elseif inline then + local st = simple_table(v) + if st then + handle(("%s { %s },"):format(depth,table.concat(st,", "))) + else + serialize(v,k,handle,depth,level+1,reduce,noquotes,true) + end + else + serialize(v,k,handle,depth,level+1,reduce,noquotes,true) + end + elseif t == "boolean" then + handle(("%s %s,"):format(depth,tostring(v))) + elseif t == "function" then + if table.serialize_functions then + handle(('%s loadstring(%q),'):format(depth,string.dump(v))) + else + handle(('%s "function",'):format(depth)) + end + else + handle(("%s %q,"):format(depth,tostring(v))) + end + elseif k == "__p__" then -- parent + if false then + handle(("%s __p__=nil,"):format(depth)) + end + elseif t == "number" then + handle(("%s %s=%s,"):format(depth,key(k),v)) + elseif t == "string" then + if reduce and (v:find("^[%-%+]?[%d]-%.?[%d+]$") == 1) then + handle(("%s %s=%s,"):format(depth,key(k),v)) + else + handle(("%s %s=%q,"):format(depth,key(k),v)) + end + elseif t == "table" then + if not next(v) then + handle(("%s %s={},"):format(depth,key(k))) + elseif inline then + local st = simple_table(v) + if st then + handle(("%s %s={ %s },"):format(depth,key(k),table.concat(st,", "))) + else + serialize(v,k,handle,depth,level+1,reduce,noquotes) + end + else + serialize(v,k,handle,depth,level+1,reduce,noquotes) + end + elseif t == "boolean" then + handle(("%s %s=%s,"):format(depth,key(k),tostring(v))) + elseif t == "function" then + if table.serialize_functions then + handle(('%s %s=loadstring(%q),'):format(depth,key(k),string.dump(v))) + else + handle(('%s %s="function",'):format(depth,key(k))) + end + else + handle(("%s %s=%q,"):format(depth,key(k),tostring(v))) + -- handle(('%s %s=loadstring(%q),'):format(depth,key(k),string.dump(function() return v end))) + end + end + if level > 0 then + handle(("%s},"):format(depth)) + else + handle(("%s}"):format(depth)) + end + else + handle(("%s}"):format(depth)) + end + end + + --~ name: + --~ + --~ true : return { } + --~ false : { } + --~ nil : t = { } + --~ string : string = { } + --~ 'return' : return { } + --~ number : [number] = { } + + function table.serialize(root,name,reduce,noquotes) + local t = { } + local function flush(s) + t[#t+1] = s + end + serialize(root, name, flush, nil, 0, reduce, noquotes) + return table.concat(t,"\n") + end + + function table.tohandle(handle,root,name,reduce,noquotes) + serialize(root, name, handle, nil, 0, reduce, noquotes) + end + + -- sometimes tables are real use (zapfino extra pro is some 85M) in which + -- case a stepwise serialization is nice; actually, we could consider: + -- + -- for line in table.serializer(root,name,reduce,noquotes) do + -- ...(line) + -- end + -- + -- so this is on the todo list + + table.tofile_maxtab = 2*1024 + + function table.tofile(filename,root,name,reduce,noquotes) + local f = io.open(filename,'w') + if f then + local concat = table.concat + local maxtab = table.tofile_maxtab + if maxtab > 1 then + local t = { } + local function flush(s) + t[#t+1] = s + if #t > maxtab then + f:write(concat(t,"\n"),"\n") -- hm, write(sometable) should be nice + t = { } + end + end + serialize(root, name, flush, nil, 0, reduce, noquotes) + f:write(concat(t,"\n"),"\n") + else + local function flush(s) + f:write(s,"\n") + end + serialize(root, name, flush, nil, 0, reduce, noquotes) + end + f:close() + end + end + +end + +--~ t = { +--~ b = "123", +--~ a = "x", +--~ c = 1.23, +--~ d = "1.23", +--~ e = true, +--~ f = { +--~ d = "1.23", +--~ a = "x", +--~ b = "123", +--~ c = 1.23, +--~ e = true, +--~ f = { +--~ e = true, +--~ f = { +--~ e = true +--~ }, +--~ }, +--~ }, +--~ g = function() end +--~ } + +--~ print(table.serialize(t), "\n") +--~ print(table.serialize(t,"name"), "\n") +--~ print(table.serialize(t,false), "\n") +--~ print(table.serialize(t,true), "\n") +--~ print(table.serialize(t,"name",true), "\n") +--~ print(table.serialize(t,"name",true,true), "\n") + +do + + local function flatten(t,f,complete) + for i=1,#t do + local v = t[i] + if type(v) == "table" then + if complete or type(v[1]) == "table" then + flatten(v,f,complete) + else + f[#f+1] = v + end + else + f[#f+1] = v + end + end + end + + function table.flatten(t) + local f = { } + flatten(t,f,true) + return f + end + + function table.unnest(t) -- bad name + local f = { } + flatten(t,f,false) + return f + end + + table.flatten_one_level = table.unnest + +end + +function table.insert_before_value(t,value,str) + for i=1,#t do + if t[i] == value then + table.insert(t,i,str) + return + end + end + table.insert(t,1,str) +end + +function table.insert_after_value(t,value,str) + for i=1,#t do + if t[i] == value then + table.insert(t,i+1,str) + return + end + end + t[#t+1] = str +end + +function table.are_equal(a,b,n,m) + if #a == #b then + n = n or 1 + m = m or #a + for i=n,m do + local ai, bi = a[i], b[i] + if (ai==bi) or (type(ai)=="table" and type(bi)=="table" and table.are_equal(ai,bi)) then + -- continue + else + return false + end + end + return true + else + return false + end +end + +function table.compact(t) + if t then + for k,v in pairs(t) do + if not next(v) then + t[k] = nil + end + end + end +end + +function table.tohash(t) + local h = { } + for _, v in pairs(t) do -- no ipairs here + h[v] = true + end + return h +end + +function table.fromhash(t) + local h = { } + for k, v in pairs(t) do -- no ipairs here + if v then h[#h+1] = k end + end + return h +end + +function table.contains(t, v) + if t then + for i=1, #t do + if t[i] == v then + return true + end + end + end + return false +end + +function table.count(t) + local n, e = 0, next(t) + while e do + n, e = n + 1, next(t,e) + end + return n +end + +function table.swapped(t) + local s = { } + for k, v in pairs(t) do + s[v] = k + end + return s +end + +--~ function table.are_equal(a,b) +--~ return table.serialize(a) == table.serialize(b) +--~ end + +function table.clone(t,p) -- t is optional or nil or table + if not p then + t, p = { }, t or { } + elseif not t then + t = { } + end + setmetatable(t, { __index = function(_,key) return p[key] end }) + return t +end + + +function table.hexed(t,seperator) + local tt = { } + for i=1,#t do tt[i] = string.format("0x%04X",t[i]) end + return table.concat(tt,seperator or " ") +end + +function table.reverse_hash(h) + local r = { } + for k,v in pairs(h) do + r[v] = (k:gsub(" ","")):lower() + end + return r +end + +function table.reverse(t) + local tt = { } + if #t > 0 then + for i=#t,1,-1 do + tt[#tt+1] = t[i] + end + end + return tt +end + + +-- filename : l-io.lua +-- comment : split off from luat-lib +-- author : Hans Hagen, PRAGMA-ADE, Hasselt NL +-- copyright: PRAGMA ADE / ConTeXt Development Team +-- license : see context related readme files + +if not versions then versions = { } end versions['l-io'] = 1.001 + +if string.find(os.getenv("PATH"),";") then + io.fileseparator, io.pathseparator = "\\", ";" +else + io.fileseparator, io.pathseparator = "/" , ":" +end + +function io.loaddata(filename) + local f = io.open(filename,'rb') + if f then + local data = f:read('*all') + f:close() + return data + else + return nil + end +end + +function io.savedata(filename,data,joiner) + local f = io.open(filename, "wb") + if f then + if type(data) == "table" then + f:write(table.join(data,joiner or "")) + elseif type(data) == "function" then + data(f) + else + f:write(data) + end + f:close() + end +end + +function io.exists(filename) + local f = io.open(filename) + if f == nil then + return false + else + assert(f:close()) + return true + end +end + +function io.size(filename) + local f = io.open(filename) + if f == nil then + return 0 + else + local s = f:seek("end") + assert(f:close()) + return s + end +end + +function io.noflines(f) + local n = 0 + for _ in f:lines() do + n = n + 1 + end + f:seek('set',0) + return n +end + +do + + local sb = string.byte + + local nextchar = { + [ 4] = function(f) + return f:read(1,1,1,1) + end, + [ 2] = function(f) + return f:read(1,1) + end, + [ 1] = function(f) + return f:read(1) + end, + [-2] = function(f) + local a, b = f:read(1,1) + return b, a + end, + [-4] = function(f) + local a, b, c, d = f:read(1,1,1,1) + return d, c, b, a + end + } + + function io.characters(f,n) + if f then + return nextchar[n or 1], f + else + return nil, nil + end + end + +end + +do + + local sb = string.byte + +--~ local nextbyte = { +--~ [4] = function(f) +--~ local a = f:read(1) +--~ local b = f:read(1) +--~ local c = f:read(1) +--~ local d = f:read(1) +--~ if d then +--~ return sb(a), sb(b), sb(c), sb(d) +--~ else +--~ return nil, nil, nil, nil +--~ end +--~ end, +--~ [2] = function(f) +--~ local a = f:read(1) +--~ local b = f:read(1) +--~ if b then +--~ return sb(a), sb(b) +--~ else +--~ return nil, nil +--~ end +--~ end, +--~ [1] = function (f) +--~ local a = f:read(1) +--~ if a then +--~ return sb(a) +--~ else +--~ return nil +--~ end +--~ end, +--~ [-2] = function (f) +--~ local a = f:read(1) +--~ local b = f:read(1) +--~ if b then +--~ return sb(b), sb(a) +--~ else +--~ return nil, nil +--~ end +--~ end, +--~ [-4] = function(f) +--~ local a = f:read(1) +--~ local b = f:read(1) +--~ local c = f:read(1) +--~ local d = f:read(1) +--~ if d then +--~ return sb(d), sb(c), sb(b), sb(a) +--~ else +--~ return nil, nil, nil, nil +--~ end +--~ end +--~ } + + local nextbyte = { + [4] = function(f) + local a, b, c, d = f:read(1,1,1,1) + if d then + return sb(a), sb(b), sb(c), sb(d) + else + return nil, nil, nil, nil + end + end, + [2] = function(f) + local a, b = f:read(1,1) + if b then + return sb(a), sb(b) + else + return nil, nil + end + end, + [1] = function (f) + local a = f:read(1) + if a then + return sb(a) + else + return nil + end + end, + [-2] = function (f) + local a, b = f:read(1,1) + if b then + return sb(b), sb(a) + else + return nil, nil + end + end, + [-4] = function(f) + local a, b, c, d = f:read(1,1,1,1) + if d then + return sb(d), sb(c), sb(b), sb(a) + else + return nil, nil, nil, nil + end + end + } + + function io.bytes(f,n) + if f then + return nextbyte[n or 1], f + else + return nil, nil + end + end + +end + +function io.ask(question,default,options) + while true do + io.write(question) + if options then + io.write(string.format(" [%s]",table.concat(options,"|"))) + end + if default then + io.write(string.format(" [%s]",default)) + end + io.write(string.format(" ")) + local answer = io.read() + answer = answer:gsub("^%s*(.*)%s*$","%1") + if answer == "" and default then + return default + elseif not options then + return answer + else + for _,v in pairs(options) do + if v == answer then + return answer + end + end + local pattern = "^" .. answer + for _,v in pairs(options) do + if v:find(pattern) then + return v + end + end + end + end +end + + +-- filename : l-number.lua +-- comment : split off from luat-lib +-- author : Hans Hagen, PRAGMA-ADE, Hasselt NL +-- copyright: PRAGMA ADE / ConTeXt Development Team +-- license : see context related readme files + +if not versions then versions = { } end versions['l-number'] = 1.001 + +if not number then number = { } end + +-- a,b,c,d,e,f = number.toset(100101) + +function number.toset(n) + return (tostring(n)):match("(.?)(.?)(.?)(.?)(.?)(.?)(.?)(.?)") +end + +local format = string.format + +function number.toevenhex(n) + local s = format("%X",n) + if #s % 2 == 0 then + return s + else + return "0" .. s + end +end + +-- the lpeg way is slower on 8 digits, but faster on 4 digits, some 7.5% +-- on +-- +-- for i=1,1000000 do +-- local a,b,c,d,e,f,g,h = number.toset(12345678) +-- local a,b,c,d = number.toset(1234) +-- local a,b,c = number.toset(123) +-- end +-- +-- of course dedicated "(.)(.)(.)(.)" matches are even faster + +do + local one = lpeg.C(1-lpeg.S(''))^1 + + function number.toset(n) + return one:match(tostring(n)) + end +end + + + +-- filename : l-set.lua +-- author : Hans Hagen, PRAGMA-ADE, Hasselt NL +-- copyright: PRAGMA ADE / ConTeXt Development Team +-- license : see context related readme files + +if not versions then versions = { } end versions['l-set'] = 1.001 + +if not set then set = { } end + +do + + local nums = { } + local tabs = { } + local concat = table.concat + + set.create = table.tohash + + function set.tonumber(t) + if next(t) then + local s = "" + -- we could save mem by sorting, but it slows down + for k, v in pairs(t) do + if v then + -- why bother about the leading space + s = s .. " " .. k + end + end + if not nums[s] then + tabs[#tabs+1] = t + nums[s] = #tabs + end + return nums[s] + else + return 0 + end + end + + function set.totable(n) + if n == 0 then + return { } + else + return tabs[n] or { } + end + end + + function set.contains(n,s) + if type(n) == "table" then + return n[s] + elseif n == 0 then + return false + else + local t = tabs[n] + return t and t[s] + end + end + +end + +--~ local c = set.create{'aap','noot','mies'} +--~ local s = set.tonumber(c) +--~ local t = set.totable(s) +--~ print(t['aap']) +--~ local c = set.create{'zus','wim','jet'} +--~ local s = set.tonumber(c) +--~ local t = set.totable(s) +--~ print(t['aap']) +--~ print(t['jet']) +--~ print(set.contains(t,'jet')) +--~ print(set.contains(t,'aap')) + + + +-- filename : l-os.lua +-- comment : split off from luat-lib +-- author : Hans Hagen, PRAGMA-ADE, Hasselt NL +-- copyright: PRAGMA ADE / ConTeXt Development Team +-- license : see context related readme files + +if not versions then versions = { } end versions['l-os'] = 1.001 + +function os.resultof(command) + return io.popen(command,"r"):read("*all") +end + +if not os.exec then os.exec = os.execute end +if not os.spawn then os.spawn = os.execute end + +--~ os.type : windows | unix (new, we already guessed os.platform) +--~ os.name : windows | msdos | linux | macosx | solaris | .. | generic (new) + +if not io.fileseparator then + if string.find(os.getenv("PATH"),";") then + io.fileseparator, io.pathseparator, os.platform = "\\", ";", os.type or "windows" + else + io.fileseparator, io.pathseparator, os.platform = "/" , ":", os.type or "unix" + end +end + +os.platform = os.platform or os.type or (io.pathseparator == ";" and "windows") or "unix" + +function os.launch(str) + if os.platform == "windows" then + os.execute("start " .. str) -- os.spawn ? + else + os.execute(str .. " &") -- os.spawn ? + end +end + +if not os.setenv then + function os.setenv() return false end +end + +if not os.times then + -- utime = user time + -- stime = system time + -- cutime = children user time + -- cstime = children system time + function os.times() + return { + utime = os.gettimeofday(), -- user + stime = 0, -- system + cutime = 0, -- children user + cstime = 0, -- children system + } + end +end + +os.gettimeofday = os.gettimeofday or os.clock + +do + local startuptime = os.gettimeofday() + function os.runtime() + return os.gettimeofday() - startuptime + end +end + +--~ print(os.gettimeofday()-os.time()) +--~ os.sleep(1.234) +--~ print (">>",os.runtime()) +--~ print(os.date("%H:%M:%S",os.gettimeofday())) +--~ print(os.date("%H:%M:%S",os.time())) + + +-- filename : l-md5.lua +-- author : Hans Hagen, PRAGMA-ADE, Hasselt NL +-- copyright: PRAGMA ADE / ConTeXt Development Team +-- license : see context related readme files + +if not versions then versions = { } end versions['l-md5'] = 1.001 + +if md5 then do + + local function convert(str,fmt) + return (string.gsub(md5.sum(str),".",function(chr) return string.format(fmt,string.byte(chr)) end)) + end + + if not md5.HEX then function md5.HEX(str) return convert(str,"%02X") end end + if not md5.hex then function md5.hex(str) return convert(str,"%02x") end end + if not md5.dec then function md5.dec(str) return convert(str,"%03i") end end + +end end + + +-- filename : l-file.lua +-- comment : split off from luat-lib +-- author : Hans Hagen, PRAGMA-ADE, Hasselt NL +-- copyright: PRAGMA ADE / ConTeXt Development Team +-- license : see context related readme files + +if not versions then versions = { } end versions['l-file'] = 1.001 + +if not file then file = { } end + +function file.removesuffix(filename) + return filename:gsub("%.[%a%d]+$", "") +end + +function file.addsuffix(filename, suffix) + if not filename:find("%.[%a%d]+$") then + return filename .. "." .. suffix + else + return filename + end +end + +function file.replacesuffix(filename, suffix) + if not filename:find("%.[%a%d]+$") then + return filename .. "." .. suffix + else + return (filename:gsub("%.[%a%d]+$","."..suffix)) + end +end + +function file.dirname(name) + return name:match("^(.+)[/\\].-$") or "" +end + +function file.basename(name) + return name:match("^.+[/\\](.-)$") or name +end + +function file.nameonly(name) + return ((name:match("^.+[/\\](.-)$") or name):gsub("%..*$","")) +end + +function file.extname(name) + return name:match("^.+%.([^/\\]-)$") or "" +end + +file.suffix = file.extname + +function file.stripsuffix(name) + return (name:gsub("%.[%a%d]+$","")) +end + +--~ function file.join(...) +--~ local t = { ... } +--~ for i=1,#t do +--~ t[i] = (t[i]:gsub("\\","/")):gsub("/+$","") +--~ end +--~ return table.concat(t,"/") +--~ end + +--~ print(file.join("x/","/y")) +--~ print(file.join("http://","/y")) +--~ print(file.join("http://a","/y")) +--~ print(file.join("http:///a","/y")) +--~ print(file.join("//nas-1","/y")) + +function file.join(...) + local pth = table.concat({...},"/") + pth = pth:gsub("\\","/") + local a, b = pth:match("^(.*://)(.*)$") + if a and b then + return a .. b:gsub("//+","/") + end + a, b = pth:match("^(//)(.*)$") + if a and b then + return a .. b:gsub("//+","/") + end + return (pth:gsub("//+","/")) +end + +function file.is_writable(name) + local f = io.open(name, 'w') + if f then + f:close() + return true + else + return false + end +end + +function file.is_readable(name) + local f = io.open(name,'r') + if f then + f:close() + return true + else + return false + end +end + +--~ function file.split_path(str) +--~ if str:find(';') then +--~ return str:splitchr(";") +--~ else +--~ return str:splitchr(io.pathseparator) +--~ end +--~ end + +-- todo: lpeg + +function file.split_path(str) + local t = { } + str = str:gsub("\\", "/") + str = str:gsub("(%a):([;/])", "%1\001%2") + for name in str:gmatch("([^;:]+)") do + if name ~= "" then + name = name:gsub("\001",":") + t[#t+1] = name + end + end + return t +end + +function file.join_path(tab) + return table.concat(tab,io.pathseparator) -- can have trailing // +end + +--~ print('test' .. " == " .. file.collapse_path("test")) +--~ print("test/test" .. " == " .. file.collapse_path("test/test")) +--~ print("test/test/test" .. " == " .. file.collapse_path("test/test/test")) +--~ print("test/test" .. " == " .. file.collapse_path("test/../test/test")) +--~ print("test" .. " == " .. file.collapse_path("test/../test")) +--~ print("../test" .. " == " .. file.collapse_path("../test")) +--~ print("../test/" .. " == " .. file.collapse_path("../test/")) +--~ print("a/a" .. " == " .. file.collapse_path("a/b/c/../../a")) + +--~ function file.collapse_path(str) +--~ local ok, n = false, 0 +--~ while not ok do +--~ ok = true +--~ str, n = str:gsub("[^%./]+/%.%./", function(s) +--~ ok = false +--~ return "" +--~ end) +--~ end +--~ return (str:gsub("/%./","/")) +--~ end + +function file.collapse_path(str) + local n = 1 + while n > 0 do + str, n = str:gsub("([^/%.]+/%.%./)","") + end + return (str:gsub("/%./","/")) +end + +function file.robustname(str) + return (str:gsub("[^%a%d%/%-%.\\]+","-")) +end + +file.readdata = io.loaddata +file.savedata = io.savedata + +function file.copy(oldname,newname) + file.savedata(newname,io.loaddata(oldname)) +end + + +-- filename : l-url.lua +-- author : Hans Hagen, PRAGMA-ADE, Hasselt NL +-- copyright: PRAGMA ADE / ConTeXt Development Team +-- license : see context related readme files + +if not versions then versions = { } end versions['l-url'] = 1.001 +if not url then url = { } end + +-- from the spec (on the web): +-- +-- foo://example.com:8042/over/there?name=ferret#nose +-- \_/ \______________/\_________/ \_________/ \__/ +-- | | | | | +-- scheme authority path query fragment +-- | _____________________|__ +-- / \ / \ +-- urn:example:animal:ferret:nose + +do + + local function tochar(s) + return string.char(tonumber(s,16)) + end + + local colon, qmark, hash, slash, percent, endofstring = lpeg.P(":"), lpeg.P("?"), lpeg.P("#"), lpeg.P("/"), lpeg.P("%"), lpeg.P(-1) + + local hexdigit = lpeg.R("09","AF","af") + local escaped = percent * lpeg.C(hexdigit * hexdigit) / tochar + + local scheme = lpeg.Cs((escaped+(1-colon-slash-qmark-hash))^0) * colon + lpeg.Cc("") + local authority = slash * slash * lpeg.Cs((escaped+(1- slash-qmark-hash))^0) + lpeg.Cc("") + local path = slash * lpeg.Cs((escaped+(1- qmark-hash))^0) + lpeg.Cc("") + local query = qmark * lpeg.Cs((escaped+(1- hash))^0) + lpeg.Cc("") + local fragment = hash * lpeg.Cs((escaped+(1- endofstring))^0) + lpeg.Cc("") + + local parser = lpeg.Ct(scheme * authority * path * query * fragment) + + function url.split(str) + return (type(str) == "string" and parser:match(str)) or str + end + +end + +function url.hashed(str) + local s = url.split(str) + return { + scheme = (s[1] ~= "" and s[1]) or "file", + authority = s[2], + path = s[3], + query = s[4], + fragment = s[5], + original = str + } +end + +function url.filename(filename) + local t = url.hashed(filename) + return (t.scheme == "file" and t.path:gsub("^/([a-zA-Z])([:|])/)","%1:")) or filename +end + +function url.query(str) + if type(str) == "string" then + local t = { } + for k, v in str:gmatch("([^&=]*)=([^&=]*)") do + t[k] = v + end + return t + else + return str + end +end + +--~ print(url.filename("file:///c:/oeps.txt")) +--~ print(url.filename("c:/oeps.txt")) +--~ print(url.filename("file:///oeps.txt")) +--~ print(url.filename("file:///etc/test.txt")) +--~ print(url.filename("/oeps.txt")) + +-- from the spec on the web (sort of): +--~ +--~ function test(str) +--~ print(table.serialize(url.hashed(str))) +--~ end +---~ +--~ test("%56pass%20words") +--~ test("file:///c:/oeps.txt") +--~ test("file:///c|/oeps.txt") +--~ test("file:///etc/oeps.txt") +--~ test("file://./etc/oeps.txt") +--~ test("file:////etc/oeps.txt") +--~ test("ftp://ftp.is.co.za/rfc/rfc1808.txt") +--~ test("http://www.ietf.org/rfc/rfc2396.txt") +--~ test("ldap://[2001:db8::7]/c=GB?objectClass?one#what") +--~ test("mailto:John.Doe@example.com") +--~ test("news:comp.infosystems.www.servers.unix") +--~ test("tel:+1-816-555-1212") +--~ test("telnet://192.0.2.16:80/") +--~ test("urn:oasis:names:specification:docbook:dtd:xml:4.1.2") +--~ test("/etc/passwords") +--~ test("http://www.pragma-ade.com/spaced%20name") + +--~ test("zip:///oeps/oeps.zip#bla/bla.tex") +--~ test("zip:///oeps/oeps.zip?bla/bla.tex") + + +-- filename : l-dir.lua +-- comment : split off from luat-lib +-- author : Hans Hagen, PRAGMA-ADE, Hasselt NL +-- copyright: PRAGMA ADE / ConTeXt Development Team +-- license : see context related readme files + +if not versions then versions = { } end versions['l-dir'] = 1.001 + +dir = { } + +-- optimizing for no string.find (*) does not save time + +if lfs then do + +--~ local attributes = lfs.attributes +--~ local walkdir = lfs.dir +--~ +--~ local function glob_pattern(path,patt,recurse,action) +--~ local ok, scanner = xpcall(function() return walkdir(path) end, function() end) -- kepler safe +--~ if ok and type(scanner) == "function" then +--~ if not path:find("/$") then path = path .. '/' end +--~ for name in scanner do +--~ local full = path .. name +--~ local mode = attributes(full,'mode') +--~ if mode == 'file' then +--~ if name:find(patt) then +--~ action(full) +--~ end +--~ elseif recurse and (mode == "directory") and (name ~= '.') and (name ~= "..") then +--~ glob_pattern(full,patt,recurse,action) +--~ end +--~ end +--~ end +--~ end +--~ +--~ dir.glob_pattern = glob_pattern +--~ +--~ local function glob(pattern, action) +--~ local t = { } +--~ local action = action or function(name) t[#t+1] = name end +--~ local path, patt = pattern:match("^(.*)/*%*%*/*(.-)$") +--~ local recurse = path and patt +--~ if not recurse then +--~ path, patt = pattern:match("^(.*)/(.-)$") +--~ if not (path and patt) then +--~ path, patt = '.', pattern +--~ end +--~ end +--~ patt = patt:gsub("([%.%-%+])", "%%%1") +--~ patt = patt:gsub("%*", ".*") +--~ patt = patt:gsub("%?", ".") +--~ patt = "^" .. patt .. "$" +--~ -- print('path: ' .. path .. ' | pattern: ' .. patt .. ' | recurse: ' .. tostring(recurse)) +--~ glob_pattern(path,patt,recurse,action) +--~ return t +--~ end +--~ +--~ dir.glob = glob + + local attributes = lfs.attributes + local walkdir = lfs.dir + + local function glob_pattern(path,patt,recurse,action) + local ok, scanner + if path == "/" then + ok, scanner = xpcall(function() return walkdir(path..".") end, function() end) -- kepler safe + else + ok, scanner = xpcall(function() return walkdir(path) end, function() end) -- kepler safe + end + if ok and type(scanner) == "function" then + if not path:find("/$") then path = path .. '/' end + for name in scanner do + local full = path .. name + local mode = attributes(full,'mode') + if mode == 'file' then + if full:find(patt) then + action(full) + end + elseif recurse and (mode == "directory") and (name ~= '.') and (name ~= "..") then + glob_pattern(full,patt,recurse,action) + end + end + end + end + + dir.glob_pattern = glob_pattern + + --~ local function glob(pattern, action) + --~ local t = { } + --~ local path, rest, patt, recurse + --~ local action = action or function(name) t[#t+1] = name end + --~ local pattern = pattern:gsub("^%*%*","./**") + --~ local pattern = pattern:gsub("/%*/","/**/") + --~ path, rest = pattern:match("^(/)(.-)$") + --~ if path then + --~ path = path + --~ else + --~ path, rest = pattern:match("^([^/]*)/(.-)$") + --~ end + --~ if rest then + --~ patt = rest:gsub("([%.%-%+])", "%%%1") + --~ end + --~ patt = patt:gsub("%*", "[^/]*") + --~ patt = patt:gsub("%?", "[^/]") + --~ patt = patt:gsub("%[%^/%]%*%[%^/%]%*", ".*") + --~ if path == "" then path = "." end + --~ recurse = patt:find("%.%*/") ~= nil + --~ glob_pattern(path,patt,recurse,action) + --~ return t + --~ end + + local P, S, R, C, Cc, Cs, Ct, Cv, V = lpeg.P, lpeg.S, lpeg.R, lpeg.C, lpeg.Cc, lpeg.Cs, lpeg.Ct, lpeg.Cv, lpeg.V + + local pattern = Ct { + [1] = (C(P(".") + P("/")^1) + C(R("az","AZ") * P(":") * P("/")^0) + Cc("./")) * V(2) * V(3), + [2] = C(((1-S("*?/"))^0 * P("/"))^0), + [3] = C(P(1)^0) + } + + local filter = Cs ( ( + P("**") / ".*" + + P("*") / "[^/]*" + + P("?") / "[^/]" + + P(".") / "%%." + + P("+") / "%%+" + + P("-") / "%%-" + + P(1) + )^0 ) + + local function glob(str,t) + if type(str) == "table" then + local t = t or { } + for _, s in ipairs(str) do + glob(s,t) + end + return t + else + local split = pattern:match(str) + if split then + local t = t or { } + local action = action or function(name) t[#t+1] = name end + local root, path, base = split[1], split[2], split[3] + local recurse = base:find("**") + local start = root .. path + local result = filter:match(start .. base) + glob_pattern(start,result,recurse,action) + return t + else + return { } + end + end + end + + dir.glob = glob + + --~ list = dir.glob("**/*.tif") + --~ list = dir.glob("/**/*.tif") + --~ list = dir.glob("./**/*.tif") + --~ list = dir.glob("oeps/**/*.tif") + --~ list = dir.glob("/oeps/**/*.tif") + + local function globfiles(path,recurse,func,files) -- func == pattern or function + if type(func) == "string" then + local s = func -- alas, we need this indirect way + func = function(name) return name:find(s) end + end + files = files or { } + for name in walkdir(path) do + if name:find("^%.") then + --- skip + elseif attributes(name,'mode') == "directory" then + if recurse then + globfiles(path .. "/" .. name,recurse,func,files) + end + elseif func then + if func(name) then + files[#files+1] = path .. "/" .. name + end + else + files[#files+1] = path .. "/" .. name + end + end + return files + end + + dir.globfiles = globfiles + + -- t = dir.glob("c:/data/develop/context/sources/**/????-*.tex") + -- t = dir.glob("c:/data/develop/tex/texmf/**/*.tex") + -- t = dir.glob("c:/data/develop/context/texmf/**/*.tex") + -- t = dir.glob("f:/minimal/tex/**/*") + -- print(dir.ls("f:/minimal/tex/**/*")) + -- print(dir.ls("*.tex")) + + function dir.ls(pattern) + return table.concat(glob(pattern),"\n") + end + + --~ mkdirs("temp") + --~ mkdirs("a/b/c") + --~ mkdirs(".","/a/b/c") + --~ mkdirs("a","b","c") + + local make_indeed = true -- false + + if string.find(os.getenv("PATH"),";") then + + function dir.mkdirs(...) + local str, pth = "", "" + for _, s in ipairs({...}) do + if s ~= "" then + if str ~= "" then + str = str .. "/" .. s + else + str = s + end + end + end + local first, middle, last + local drive = false + first, middle, last = str:match("^(//)(//*)(.*)$") + if first then + -- empty network path == local path + else + first, last = str:match("^(//)/*(.-)$") + if first then + middle, last = str:match("([^/]+)/+(.-)$") + if middle then + pth = "//" .. middle + else + pth = "//" .. last + last = "" + end + else + first, middle, last = str:match("^([a-zA-Z]:)(/*)(.-)$") + if first then + pth, drive = first .. middle, true + else + middle, last = str:match("^(/*)(.-)$") + if not middle then + last = str + end + end + end + end + for s in last:gmatch("[^/]+") do + if pth == "" then + pth = s + elseif drive then + pth, drive = pth .. s, false + else + pth = pth .. "/" .. s + end + if make_indeed and not lfs.isdir(pth) then + lfs.mkdir(pth) + end + end + return pth, (lfs.isdir(pth) == true) + end + +--~ print(dir.mkdirs("","","a","c")) +--~ print(dir.mkdirs("a")) +--~ print(dir.mkdirs("a:")) +--~ print(dir.mkdirs("a:/b/c")) +--~ print(dir.mkdirs("a:b/c")) +--~ print(dir.mkdirs("a:/bbb/c")) +--~ print(dir.mkdirs("/a/b/c")) +--~ print(dir.mkdirs("/aaa/b/c")) +--~ print(dir.mkdirs("//a/b/c")) +--~ print(dir.mkdirs("///a/b/c")) +--~ print(dir.mkdirs("a/bbb//ccc/")) + + function dir.expand_name(str) + local first, nothing, last = str:match("^(//)(//*)(.*)$") + if first then + first = lfs.currentdir() .. "/" + first = first:gsub("\\","/") + end + if not first then + first, last = str:match("^(//)/*(.*)$") + end + if not first then + first, last = str:match("^([a-zA-Z]:)(.*)$") + if first and not last:find("^/") then + local d = lfs.currentdir() + if lfs.chdir(first) then + first = lfs.currentdir() + first = first:gsub("\\","/") + end + lfs.chdir(d) + end + end + if not first then + first, last = lfs.currentdir(), str + first = first:gsub("\\","/") + end + last = last:gsub("//","/") + last = last:gsub("/%./","/") + last = last:gsub("^/*","") + first = first:gsub("/*$","") + if last == "" then + return first + else + return first .. "/" .. last + end + end + + else + + function dir.mkdirs(...) + local str, pth = "", "" + for _, s in ipairs({...}) do + if s ~= "" then + if str ~= "" then + str = str .. "/" .. s + else + str = s + end + end + end + str = str:gsub("/+","/") + if str:find("^/") then + pth = "/" + for s in str:gmatch("[^/]+") do + local first = (pth == "/") + if first then + pth = pth .. s + else + pth = pth .. "/" .. s + end + if make_indeed and not first and not lfs.isdir(pth) then + lfs.mkdir(pth) + end + end + else + pth = "." + for s in str:gmatch("[^/]+") do + pth = pth .. "/" .. s + if make_indeed and not lfs.isdir(pth) then + lfs.mkdir(pth) + end + end + end + return pth, (lfs.isdir(pth) == true) + end + +--~ print(dir.mkdirs("","","a","c")) +--~ print(dir.mkdirs("a")) +--~ print(dir.mkdirs("/a/b/c")) +--~ print(dir.mkdirs("/aaa/b/c")) +--~ print(dir.mkdirs("//a/b/c")) +--~ print(dir.mkdirs("///a/b/c")) +--~ print(dir.mkdirs("a/bbb//ccc/")) + + function dir.expand_name(str) + if not str:find("^/") then + str = lfs.currentdir() .. "/" .. str + end + str = str:gsub("//","/") + str = str:gsub("/%./","/") + return str + end + + end + + dir.makedirs = dir.mkdirs + +end end + + +-- filename : l-boolean.lua +-- comment : split off from luat-lib +-- author : Hans Hagen, PRAGMA-ADE, Hasselt NL +-- copyright: PRAGMA ADE / ConTeXt Development Team +-- license : see context related readme files + +if not versions then versions = { } end versions['l-boolean'] = 1.001 +if not boolean then boolean = { } end + +function boolean.tonumber(b) + if b then return 1 else return 0 end +end + +function toboolean(str,tolerant) + if tolerant then + local tstr = type(str) + if tstr == "string" then + return str == "true" or str == "yes" or str == "on" or str == "1" + elseif tstr == "number" then + return tonumber(str) ~= 0 + elseif tstr == "nil" then + return false + else + return str + end + elseif str == "true" then + return true + elseif str == "false" then + return false + else + return str + end +end + +function string.is_boolean(str) + if type(str) == "string" then + if str == "true" or str == "yes" or str == "on" then + return true + elseif str == "false" or str == "no" or str == "off" then + return false + end + end + return nil +end + +function boolean.alwaystrue() + return true +end + +function boolean.falsetrue() + return false +end + + +-- filename : l-unicode.lua +-- comment : split off from luat-inp +-- author : Hans Hagen, PRAGMA-ADE, Hasselt NL +-- copyright: PRAGMA ADE / ConTeXt Development Team +-- license : see context related readme files + +if not versions then versions = { } end versions['l-unicode'] = 1.001 +if not unicode then unicode = { } end + +if not garbagecollector then + garbagecollector = { + push = function() collectgarbage("stop") end, + pop = function() collectgarbage("restart") end, + } +end + +-- 0 EF BB BF UTF-8 +-- 1 FF FE UTF-16-little-endian +-- 2 FE FF UTF-16-big-endian +-- 3 FF FE 00 00 UTF-32-little-endian +-- 4 00 00 FE FF UTF-32-big-endian + +unicode.utfname = { + [0] = 'utf-8', + [1] = 'utf-16-le', + [2] = 'utf-16-be', + [3] = 'utf-32-le', + [4] = 'utf-32-be' +} + +function unicode.utftype(f) -- \000 fails ! + local str = f:read(4) + if not str then + f:seek('set') + return 0 + elseif str:find("^%z%z\254\255") then + return 4 + elseif str:find("^\255\254%z%z") then + return 3 + elseif str:find("^\254\255") then + f:seek('set',2) + return 2 + elseif str:find("^\255\254") then + f:seek('set',2) + return 1 + elseif str:find("^\239\187\191") then + f:seek('set',3) + return 0 + else + f:seek('set') + return 0 + end +end + +function unicode.utf16_to_utf8(str, endian) + garbagecollector.push() + local result = { } + local tc, uc = table.concat, unicode.utf8.char + local tmp, n, m, p = { }, 0, 0, 0 + -- lf | cr | crlf / (cr:13, lf:10) + local function doit() + if n == 10 then + if p ~= 13 then + result[#result+1] = tc(tmp,"") + tmp = { } + p = 0 + end + elseif n == 13 then + result[#result+1] = tc(tmp,"") + tmp = { } + p = n + else + tmp[#tmp+1] = uc(n) + p = 0 + end + end + for l,r in str:bytepairs() do + if endian then + n = l*256 + r + else + n = r*256 + l + end + if m > 0 then + n = (m-0xD800)*0x400 + (n-0xDC00) + 0x10000 + m = 0 + doit() + elseif n >= 0xD800 and n <= 0xDBFF then + m = n + else + doit() + end + end + if #tmp > 0 then + result[#result+1] = tc(tmp,"") + end + garbagecollector.pop() + return result +end + +function unicode.utf32_to_utf8(str, endian) + garbagecollector.push() + local result = { } + local tc, uc = table.concat, unicode.utf8.char + local tmp, n, m, p = { }, 0, -1, 0 + -- lf | cr | crlf / (cr:13, lf:10) + local function doit() + if n == 10 then + if p ~= 13 then + result[#result+1] = tc(tmp,"") + tmp = { } + p = 0 + end + elseif n == 13 then + result[#result+1] = tc(tmp,"") + tmp = { } + p = n + else + tmp[#tmp+1] = uc(n) + p = 0 + end + end + for a,b in str:bytepairs() do + if a and b then + if m < 0 then + if endian then + m = a*256*256*256 + b*256*256 + else + m = b*256 + a + end + else + if endian then + n = m + a*256 + b + else + n = m + b*256*256*256 + a*256*256 + end + m = -1 + doit() + end + else + break + end + end + if #tmp > 0 then + result[#result+1] = tc(tmp,"") + end + garbagecollector.pop() + return result +end + + +-- filename : l-utils.lua +-- comment : split off from luat-lib +-- author : Hans Hagen, PRAGMA-ADE, Hasselt NL +-- copyright: PRAGMA ADE / ConTeXt Development Team +-- license : see context related readme files + +if not versions then versions = { } end versions['l-utils'] = 1.001 + +if not utils then utils = { } end +if not utils.merger then utils.merger = { } end +if not utils.lua then utils.lua = { } end + +utils.merger.m_begin = "begin library merge" +utils.merger.m_end = "end library merge" +utils.merger.pattern = + "%c+" .. + "%-%-%s+" .. utils.merger.m_begin .. + "%c+(.-)%c+" .. + "%-%-%s+" .. utils.merger.m_end .. + "%c+" + +function utils.merger._self_fake_() + return + "-- " .. "created merged file" .. "\n\n" .. + "-- " .. utils.merger.m_begin .. "\n\n" .. + "-- " .. utils.merger.m_end .. "\n\n" +end + +function utils.report(...) + print(...) +end + +function utils.merger._self_load_(name) + local f, data = io.open(name), "" + if f then + data = f:read("*all") + f:close() + end + return data or "" +end + +function utils.merger._self_save_(name, data) + if data ~= "" then + local f = io.open(name,'w') + if f then + f:write(data) + f:close() + end + end +end + +function utils.merger._self_swap_(data,code) + if data ~= "" then + return (data:gsub(utils.merger.pattern, function(s) + return "\n\n" .. "-- "..utils.merger.m_begin .. "\n" .. code .. "\n" .. "-- "..utils.merger.m_end .. "\n\n" + end, 1)) + else + return "" + end +end + +function utils.merger._self_libs_(libs,list) + local result, f = { }, nil + if type(libs) == 'string' then libs = { libs } end + if type(list) == 'string' then list = { list } end + for _, lib in ipairs(libs) do + for _, pth in ipairs(list) do + local name = string.gsub(pth .. "/" .. lib,"\\","/") + f = io.open(name) + if f then + -- utils.report("merging library",name) + result[#result+1] = f:read("*all") + f:close() + list = { pth } -- speed up the search + break + else + -- utils.report("no library",name) + end + end + end + return table.concat(result, "\n\n") +end + +function utils.merger.selfcreate(libs,list,target) + if target then + utils.merger._self_save_( + target, + utils.merger._self_swap_( + utils.merger._self_fake_(), + utils.merger._self_libs_(libs,list) + ) + ) + end +end + +function utils.merger.selfmerge(name,libs,list,target) + utils.merger._self_save_( + target or name, + utils.merger._self_swap_( + utils.merger._self_load_(name), + utils.merger._self_libs_(libs,list) + ) + ) +end + +function utils.merger.selfclean(name) + utils.merger._self_save_( + name, + utils.merger._self_swap_( + utils.merger._self_load_(name), + "" + ) + ) +end + +utils.lua.compile_strip = true + +function utils.lua.compile(luafile, lucfile) + -- utils.report("compiling",luafile,"into",lucfile) + os.remove(lucfile) + local command = "-o " .. string.quote(lucfile) .. " " .. string.quote(luafile) + if utils.lua.compile_strip then + command = "-s " .. command + end + if os.spawn("texluac " .. command) == 0 then + return true + elseif os.spawn("luac " .. command) == 0 then + return true + else + return false + end +end + + + +-- filename : luat-lib.lua +-- comment : companion to luat-lib.tex +-- author : Hans Hagen, PRAGMA-ADE, Hasselt NL +-- copyright: PRAGMA ADE / ConTeXt Development Team +-- license : see context related readme files + +if not versions then versions = { } end versions['luat-lib'] = 1.001 + +-- mostcode moved to the l-*.lua and other luat-*.lua files + +-- os / io + +os.setlocale(nil,nil) -- useless feature and even dangerous in luatex + +-- os.platform + +-- mswin|bccwin|mingw|cygwin windows +-- darwin|rhapsody|nextstep macosx +-- netbsd|unix unix +-- linux linux + +if not io.fileseparator then + if string.find(os.getenv("PATH"),";") then + io.fileseparator, io.pathseparator, os.platform = "\\", ";", os.type or "windows" + else + io.fileseparator, io.pathseparator, os.platform = "/" , ":", os.type or "unix" + end +end + +os.platform = os.platform or os.type or (io.pathseparator == ";" and "windows") or "unix" + +-- arg normalization +-- +-- for k,v in pairs(arg) do print(k,v) end + +-- environment + +if not environment then environment = { } end + +environment.ownbin = environment.ownbin or arg[-2] or arg[-1] or arg[0] or "luatex" + +local ownpath = nil -- we could use a metatable here + +function environment.ownpath() + if not ownpath then + for p in string.gmatch(os.getenv("PATH"),"[^"..io.pathseparator.."]+") do + local b = file.join(p,environment.ownbin) + if lfs.isfile(b..".exe") or lfs.isfile(b) then + ownpath = p + break + end + end + if not ownpath then ownpath = '.' end + end + return ownpath +end + +if arg and (arg[0] == 'luatex' or arg[0] == 'luatex.exe') and arg[1] == "--luaonly" then + arg[-1]=arg[0] arg[0]=arg[2] for k=3,#arg do arg[k-2]=arg[k] end arg[#arg]=nil arg[#arg]=nil +end + +environment.arguments = { } +environment.files = { } +environment.sorted_argument_keys = nil + +environment.platform = os.platform + +function environment.initialize_arguments(arg) + environment.arguments = { } + environment.files = { } + environment.sorted_argument_keys = nil + for index, argument in pairs(arg) do + if index > 0 then + local flag, value = argument:match("^%-+(.+)=(.-)$") + if flag then + environment.arguments[flag] = string.unquote(value or "") + else + flag = argument:match("^%-+(.+)") + if flag then + environment.arguments[flag] = true + else + environment.files[#environment.files+1] = argument + end + end + end + end + environment.ownname = environment.ownname or arg[0] or 'unknown.lua' +end + +function environment.showarguments() + for k,v in pairs(environment.arguments) do + print(k .. " : " .. tostring(v)) + end + if #environment.files > 0 then + print("files : " .. table.concat(environment.files, " ")) + end +end + +function environment.setargument(name,value) + environment.arguments[name] = value +end + +function environment.argument(name) + if environment.arguments[name] then + return environment.arguments[name] + else + if not environment.sorted_argument_keys then + environment.sorted_argument_keys = { } + for _,v in pairs(table.sortedkeys(environment.arguments)) do + table.insert(environment.sorted_argument_keys, "^" .. v) + end + end + for _,v in pairs(environment.sorted_argument_keys) do + if name:find(v) then + return environment.arguments[v:sub(2,#v)] + end + end + end + return nil +end + +function environment.split_arguments(separator) -- rather special, cut-off before separator + local done, before, after = false, { }, { } + for _,v in ipairs(environment.original_arguments) do + if not done and v == separator then + done = true + elseif done then + after[#after+1] = v + else + before[#before+1] = v + end + end + return before, after +end + +function environment.reconstruct_commandline(arg) + if not arg then arg = environment.original_arguments end + local result = { } + for _,a in ipairs(arg) do -- ipairs 1 .. #n + local kk, vv = a:match("^(%-+.-)=(.+)$") + if kk and vv then + if vv:find(" ") then + result[#result+1] = kk .. "=" .. string.quote(vv) + else + result[#result+1] = a + end + elseif a:find(" ") then + result[#result+1] = string.quote(a) + else + result[#result+1] = a + end + end + return table.join(result," ") +end + +if arg then + environment.initialize_arguments(arg) + environment.original_arguments = arg + arg = { } -- prevent duplicate handling +end + + +-- filename : luat-inp.lua +-- comment : companion to luat-lib.tex +-- author : Hans Hagen, PRAGMA-ADE, Hasselt NL +-- copyright: PRAGMA ADE / ConTeXt Development Team +-- license : see context related readme files + +-- This lib is multi-purpose and can be loaded again later on so that +-- additional functionality becomes available. We will split this +-- module in components when we're done with prototyping. + +-- TODO: os.getenv -> os.env[] +-- TODO: instances.[hashes,cnffiles,configurations,522] -> ipairs (alles check, sneller) +-- TODO: check escaping in find etc, too much, too slow + +-- This is the first code I wrote for LuaTeX, so it needs some cleanup. + +-- To be considered: hash key lowercase, first entry in table filename +-- (any case), rest paths (so no need for optimization). Or maybe a +-- separate table that matches lowercase names to mixed case when +-- present. In that case the lower() cases can go away. I will do that +-- only when we run into problems with names ... well ... Iwona-Regular. + +-- Beware, loading and saving is overloaded in luat-tmp! + +if not versions then versions = { } end versions['luat-inp'] = 1.001 +if not environment then environment = { } end +if not file then file = { } end + +if environment.aleph_mode == nil then environment.aleph_mode = true end -- temp hack + +if not input then input = { } end +if not input.suffixes then input.suffixes = { } end +if not input.formats then input.formats = { } end +if not input.aux then input.aux = { } end + +if not input.suffixmap then input.suffixmap = { } end + +if not input.locators then input.locators = { } end -- locate databases +if not input.hashers then input.hashers = { } end -- load databases +if not input.generators then input.generators = { } end -- generate databases +if not input.filters then input.filters = { } end -- conversion filters + +local format = string.format + +input.locators.notfound = { nil } +input.hashers.notfound = { nil } +input.generators.notfound = { nil } + +input.cacheversion = '1.0.1' +input.banner = nil +input.verbose = false +input.debug = false +input.cnfname = 'texmf.cnf' +input.luaname = 'texmfcnf.lua' +input.lsrname = 'ls-R' +input.luasuffix = '.tma' +input.lucsuffix = '.tmc' + +-- we use a cleaned up list / format=any is a wildcard, as is *name + +input.formats['afm'] = 'AFMFONTS' input.suffixes['afm'] = { 'afm' } +input.formats['enc'] = 'ENCFONTS' input.suffixes['enc'] = { 'enc' } +input.formats['fmt'] = 'TEXFORMATS' input.suffixes['fmt'] = { 'fmt' } +input.formats['map'] = 'TEXFONTMAPS' input.suffixes['map'] = { 'map' } +input.formats['mp'] = 'MPINPUTS' input.suffixes['mp'] = { 'mp' } +input.formats['ocp'] = 'OCPINPUTS' input.suffixes['ocp'] = { 'ocp' } +input.formats['ofm'] = 'OFMFONTS' input.suffixes['ofm'] = { 'ofm', 'tfm' } +input.formats['otf'] = 'OPENTYPEFONTS' input.suffixes['otf'] = { 'otf' } -- 'ttf' +input.formats['opl'] = 'OPLFONTS' input.suffixes['opl'] = { 'opl' } +input.formats['otp'] = 'OTPINPUTS' input.suffixes['otp'] = { 'otp' } +input.formats['ovf'] = 'OVFFONTS' input.suffixes['ovf'] = { 'ovf', 'vf' } +input.formats['ovp'] = 'OVPFONTS' input.suffixes['ovp'] = { 'ovp' } +input.formats['tex'] = 'TEXINPUTS' input.suffixes['tex'] = { 'tex' } +input.formats['tfm'] = 'TFMFONTS' input.suffixes['tfm'] = { 'tfm' } +input.formats['ttf'] = 'TTFONTS' input.suffixes['ttf'] = { 'ttf', 'ttc' } +input.formats['pfb'] = 'T1FONTS' input.suffixes['pfb'] = { 'pfb', 'pfa' } +input.formats['vf'] = 'VFFONTS' input.suffixes['vf'] = { 'vf' } + +input.formats['fea'] = 'FONTFEATURES' input.suffixes['fea'] = { 'fea' } +input.formats['cid'] = 'FONTCIDMAPS' input.suffixes['cid'] = { 'cid', 'cidmap' } + +input.formats ['texmfscripts'] = 'TEXMFSCRIPTS' -- new +input.suffixes['texmfscripts'] = { 'rb', 'pl', 'py' } -- 'lua' + +input.formats ['lua'] = 'LUAINPUTS' -- new +input.suffixes['lua'] = { 'lua', 'luc', 'tma', 'tmc' } + +-- here we catch a few new thingies (todo: add these paths to context.tmf) +-- +-- FONTFEATURES = .;$TEXMF/fonts/fea// +-- FONTCIDMAPS = .;$TEXMF/fonts/cid// + +function input.checkconfigdata(instance) -- not yet ok, no time for debugging now + local function fix(varname,default) + local proname = varname .. "." .. instance.progname or "crap" + local p = instance.environment[proname] + local v = instance.environment[varname] + if not ((p and p ~= "") or (v and v ~= "")) then + instance.variables[varname] = default -- or environment? + end + end + fix("LUAINPUTS" , ".;$TEXINPUTS;$TEXMFSCRIPTS") + fix("FONTFEATURES", ".;$TEXMF/fonts/fea//;$OPENTYPEFONTS;$TTFONTS;$T1FONTS;$AFMFONTS") + fix("FONTCIDMAPS" , ".;$TEXMF/fonts/cid//;$OPENTYPEFONTS;$TTFONTS;$T1FONTS;$AFMFONTS") +end + +-- backward compatible ones + +input.alternatives = { } + +input.alternatives['map files'] = 'map' +input.alternatives['enc files'] = 'enc' +input.alternatives['cid files'] = 'cid' +input.alternatives['fea files'] = 'fea' +input.alternatives['opentype fonts'] = 'otf' +input.alternatives['truetype fonts'] = 'ttf' +input.alternatives['truetype collections'] = 'ttc' +input.alternatives['type1 fonts'] = 'pfb' + +-- obscure ones + +input.formats ['misc fonts'] = '' +input.suffixes['misc fonts'] = { } + +input.formats ['sfd'] = 'SFDFONTS' +input.suffixes ['sfd'] = { 'sfd' } +input.alternatives['subfont definition files'] = 'sfd' + +function input.reset() + + local instance = { } + + instance.rootpath = '' + instance.treepath = '' + instance.progname = environment.progname or 'context' + instance.engine = environment.engine or 'luatex' + instance.format = '' + instance.environment = { } + instance.variables = { } + instance.expansions = { } + instance.files = { } + instance.remap = { } + instance.configuration = { } + instance.setup = { } + instance.order = { } + instance.found = { } + instance.foundintrees = { } + instance.kpsevars = { } + instance.hashes = { } + instance.cnffiles = { } + instance.luafiles = { } + instance.lists = { } + instance.remember = true + instance.diskcache = true + instance.renewcache = false + instance.scandisk = true + instance.cachepath = nil + instance.loaderror = false + instance.smallcache = false + instance.savelists = true + instance.cleanuppaths = true + instance.allresults = false + instance.pattern = nil -- lists + instance.kpseonly = false -- lists + instance.cachefile = 'tmftools' + instance.loadtime = 0 + instance.starttime = 0 + instance.stoptime = 0 + instance.validfile = function(path,name) return true end + instance.data = { } -- only for loading + instance.force_suffixes = true + instance.dummy_path_expr = "^!*unset/*$" + instance.fakepaths = { } + instance.lsrmode = false + + if os.env then + -- store once, freeze and faster + for k,v in pairs(os.env) do + instance.environment[k] = input.bare_variable(v) + end + else + -- we will access os.env frequently + for k,v in pairs({'HOME','TEXMF','TEXMFCNF'}) do + local e = os.getenv(v) + if e then + -- input.report("setting",v,"to",input.bare_variable(e)) + instance.environment[v] = input.bare_variable(e) + end + end + end + + -- cross referencing + + for k, v in pairs(input.suffixes) do + for _, vv in pairs(v) do + if vv then + input.suffixmap[vv] = k + end + end + end + + return instance + +end + +function input.reset_hashes(instance) + instance.lists = { } + instance.found = { } +end + +function input.bare_variable(str) -- assumes str is a string + -- return string.gsub(string.gsub(string.gsub(str,"%s+$",""),'^"(.+)"$',"%1"),"^'(.+)'$","%1") + return (str:gsub("\s*([\"\']?)(.+)%1\s*", "%2")) +end + +if texio then + input.log = texio.write_nl +else + input.log = print +end + +function input.simple_logger(kind, name) + if name and name ~= "" then + if input.banner then + input.log(input.banner..kind..": "..name) + else + input.log("<<"..kind..": "..name..">>") + end + else + if input.banner then + input.log(input.banner..kind..": no name") + else + input.log("<<"..kind..": no name>>") + end + end +end + +function input.dummy_logger() +end + +function input.settrace(n) + input.trace = tonumber(n or 0) + if input.trace > 0 then + input.logger = input.simple_logger + input.verbose = true + else + input.logger = function() end + end +end + +function input.report(...) -- inefficient + if input.verbose then + if input.banner then + input.log(input.banner .. table.concat({...},' ')) + elseif input.logmode() == 'xml' then + input.log(""..table.concat({...},' ').."") + else + input.log("<<"..table.concat({...},' ')..">>") + end + end +end + +function input.reportlines(str) + if type(str) == "string" then + str = str:split("\n") + end + for _,v in pairs(str) do input.report(v) end +end + +input.settrace(tonumber(os.getenv("MTX.INPUT.TRACE") or os.getenv("MTX_INPUT_TRACE") or input.trace or 0)) + +-- These functions can be used to test the performance, especially +-- loading the database files. + +do + local clock = os.gettimeofday or os.clock + + function input.starttiming(instance) + if instance then + instance.starttime = clock() + if not instance.loadtime then + instance.loadtime = 0 + end + end + end + + function input.stoptiming(instance, report) + if instance then + local starttime = instance.starttime + if starttime then + local stoptime = clock() + local loadtime = stoptime - starttime + instance.stoptime = stoptime + instance.loadtime = instance.loadtime + loadtime + if report then + input.report('load time', format("%0.3f",loadtime)) + end + return loadtime + end + end + return 0 + end + +end + +function input.elapsedtime(instance) + return format("%0.3f",(instance and instance.loadtime) or 0) +end + +function input.report_loadtime(instance) + if instance then + input.report('total load time', input.elapsedtime(instance)) + end +end + +input.loadtime = input.elapsedtime + +function input.env(instance,key) + return instance.environment[key] or input.osenv(instance,key) +end + +function input.osenv(instance,key) + local ie = instance.environment + local value = ie[key] + if value == nil then + -- local e = os.getenv(key) + local e = os.env[key] + if e == nil then + -- value = "" -- false + else + value = input.bare_variable(e) + end + ie[key] = value + end + return value or "" +end + +-- we follow a rather traditional approach: +-- +-- (1) texmf.cnf given in TEXMFCNF +-- (2) texmf.cnf searched in TEXMF/web2c +-- +-- for the moment we don't expect a configuration file in a zip + +function input.identify_cnf(instance) + -- we no longer support treepath and rootpath (was handy for testing); + -- also we now follow the stupid route: if not set then just assume *one* + -- cnf file under texmf (i.e. distribution) + if #instance.cnffiles == 0 then + if input.env(instance,'TEXMFCNF') == "" then + local ownpath = environment.ownpath() or "." + if ownpath then + -- beware, this is tricky on my own system because at that location I do have + -- the raw tree that ends up in the zip; i.e. I cannot test this kind of mess + local function locate(filename,list) + local ownroot = input.normalize_name(file.join(ownpath,"../..")) + if not lfs.isdir(file.join(ownroot,"texmf")) then + ownroot = input.normalize_name(file.join(ownpath,"..")) + if not lfs.isdir(file.join(ownroot,"texmf")) then + input.verbose = true + input.report("error", "unable to identify cnf file") + return + end + end + local texmfcnf = file.join(ownroot,"texmf-local/web2c",filename) -- for minimals and myself + if not lfs.isfile(texmfcnf) then + texmfcnf = file.join(ownroot,"texmf/web2c",filename) + if not lfs.isfile(texmfcnf) then + input.verbose = true + input.report("error", "unable to locate",filename) + return + end + end + table.insert(list,texmfcnf) + local ie = instance.environment + if not ie['SELFAUTOPARENT'] then ie['SELFAUTOPARENT'] = ownroot end + if not ie['TEXMFCNF'] then ie['TEXMFCNF'] = file.dirname(texmfcnf) end + end + locate(input.luaname,instance.luafiles) + locate(input.cnfname,instance.cnffiles) + if #instance.luafiles == 0 and instance.cnffiles == 0 then + input.verbose = true + input.report("error", "unable to locate",filename) + os.exit() + end + -- here we also assume then TEXMF is set in the distribution, if this trickery is + -- used in the minimals, then users who don't use setuptex are on their own with + -- regards to extra trees + else + input.verbose = true + input.report("error", "unable to identify own path") + os.exit() + end + else + local t = input.split_path(input.env(instance,'TEXMFCNF')) + t = input.aux.expanded_path(instance,t) + input.aux.expand_vars(instance,t) + local function locate(filename,list) + for _,v in ipairs(t) do + local texmfcnf = input.normalize_name(file.join(v,filename)) + if lfs.isfile(texmfcnf) then + table.insert(list,texmfcnf) + end + end + end + locate(input.luaname,instance.luafiles) + locate(input.cnfname,instance.cnffiles) + end + end +end + +function input.load_cnf(instance) + local function loadoldconfigdata() + for _, fname in ipairs(instance.cnffiles) do + input.aux.load_cnf(instance,fname) + end + end + -- instance.cnffiles contain complete names now ! + if #instance.cnffiles == 0 then + input.report("no cnf files found (TEXMFCNF may not be set/known)") + else + instance.rootpath = instance.cnffiles[1] + for k,fname in ipairs(instance.cnffiles) do + instance.cnffiles[k] = input.normalize_name(fname:gsub("\\",'/')) + end + for i=1,3 do + instance.rootpath = file.dirname(instance.rootpath) + end + instance.rootpath = input.normalize_name(instance.rootpath) + instance.environment['SELFAUTOPARENT'] = instance.rootpath -- just to be sure + if instance.lsrmode then + loadoldconfigdata() + elseif instance.diskcache and not instance.renewcache then + input.loadoldconfig(instance,instance.cnffiles) + if instance.loaderror then + loadoldconfigdata() + input.saveoldconfig(instance) + end + else + loadoldconfigdata() + if instance.renewcache then + input.saveoldconfig(instance) + end + end + input.aux.collapse_cnf_data(instance) + end + input.checkconfigdata(instance) +end + +function input.load_lua(instance) + if #instance.luafiles == 0 then + -- yet harmless + else + instance.rootpath = instance.luafiles[1] + for k,fname in ipairs(instance.luafiles) do + instance.luafiles[k] = input.normalize_name(fname:gsub("\\",'/')) + end + for i=1,3 do + instance.rootpath = file.dirname(instance.rootpath) + end + instance.rootpath = input.normalize_name(instance.rootpath) + instance.environment['SELFAUTOPARENT'] = instance.rootpath -- just to be sure + input.loadnewconfig(instance) + input.aux.collapse_cnf_data(instance) + end + input.checkconfigdata(instance) +end + +function input.aux.collapse_cnf_data(instance) -- potential optmization: pass start index (setup and configuration are shared) + for _,c in ipairs(instance.order) do + for k,v in pairs(c) do + if not instance.variables[k] then + if instance.environment[k] then + instance.variables[k] = instance.environment[k] + else + instance.kpsevars[k] = true + instance.variables[k] = input.bare_variable(v) + end + end + end + end +end + +function input.aux.load_cnf(instance,fname) + fname = input.clean_path(fname) + local lname = fname:gsub("%.%a+$",input.luasuffix) + local f = io.open(lname) + if f then -- this will go + f:close() + local dname = file.dirname(fname) + if not instance.configuration[dname] then + input.aux.load_configuration(instance,dname,lname) + instance.order[#instance.order+1] = instance.configuration[dname] + end + else + f = io.open(fname) + if f then + input.report("loading", fname) + local line, data, n, k, v + local dname = file.dirname(fname) + if not instance.configuration[dname] then + instance.configuration[dname] = { } + instance.order[#instance.order+1] = instance.configuration[dname] + end + local data = instance.configuration[dname] + while true do + local line, n = f:read(), 0 + if line then + while true do -- join lines + line, n = line:gsub("\\%s*$", "") + if n > 0 then + line = line .. f:read() + else + break + end + end + if not line:find("^[%%#]") then + local k, v = (line:gsub("%s*%%.*$","")):match("%s*(.-)%s*=%s*(.-)%s*$") + if k and v and not data[k] then + data[k] = (v:gsub("[%%#].*",'')):gsub("~", "$HOME") + instance.kpsevars[k] = true + end + end + else + break + end + end + f:close() + else + input.report("skipping", fname) + end + end +end + +-- database loading + +function input.load_hash(instance) + input.locatelists(instance) + if instance.lsrmode then + input.loadlists(instance) + elseif instance.diskcache and not instance.renewcache then + input.loadfiles(instance) + if instance.loaderror then + input.loadlists(instance) + input.savefiles(instance) + end + else + input.loadlists(instance) + if instance.renewcache then + input.savefiles(instance) + end + end +end + +function input.aux.append_hash(instance,type,tag,name) + input.logger("= hash append",tag) + table.insert(instance.hashes, { ['type']=type, ['tag']=tag, ['name']=name } ) +end + +function input.aux.prepend_hash(instance,type,tag,name) + input.logger("= hash prepend",tag) + table.insert(instance.hashes, 1, { ['type']=type, ['tag']=tag, ['name']=name } ) +end + +function input.aux.extend_texmf_var(instance,specification) -- crap + if instance.environment['TEXMF'] then + input.report("extending environment variable TEXMF with", specification) + instance.environment['TEXMF'] = instance.environment['TEXMF']:gsub("^%{", function() + return "{" .. specification .. "," + end) + elseif instance.variables['TEXMF'] then + input.report("extending configuration variable TEXMF with", specification) + instance.variables['TEXMF'] = instance.variables['TEXMF']:gsub("^%{", function() + return "{" .. specification .. "," + end) + else + input.report("setting configuration variable TEXMF to", specification) + instance.variables['TEXMF'] = "{" .. specification .. "}" + end + if instance.variables['TEXMF']:find("%,") and not instance.variables['TEXMF']:find("^%{") then + input.report("adding {} to complex TEXMF variable, best do that yourself") + instance.variables['TEXMF'] = "{" .. instance.variables['TEXMF'] .. "}" + end + input.expand_variables(instance) + input.reset_hashes(instance) +end + +-- locators + +function input.locatelists(instance) + for _, path in pairs(input.simplified_list(input.expansion(instance,'TEXMF'))) do + path = file.collapse_path(path) + input.report("locating list of",path) + input.locatedatabase(instance,input.normalize_name(path)) + end +end + +function input.locatedatabase(instance,specification) + return input.methodhandler('locators', instance, specification) +end + +function input.locators.tex(instance,specification) + if specification and specification ~= '' and lfs.isdir(specification) then + input.logger('! tex locator', specification..' found') + input.aux.append_hash(instance,'file',specification,filename) + else + input.logger('? tex locator', specification..' not found') + end +end + +-- hashers + +function input.hashdatabase(instance,tag,name) + return input.methodhandler('hashers',instance,tag,name) +end + +function input.loadfiles(instance) + instance.loaderror = false + instance.files = { } + if not instance.renewcache then + for _, hash in ipairs(instance.hashes) do + input.hashdatabase(instance,hash.tag,hash.name) + if instance.loaderror then break end + end + end +end + +function input.hashers.tex(instance,tag,name) + input.aux.load_files(instance,tag) +end + +-- generators: + +function input.loadlists(instance) + for _, hash in ipairs(instance.hashes) do + input.generatedatabase(instance,hash.tag) + end +end + +function input.generatedatabase(instance,specification) + return input.methodhandler('generators', instance, specification) +end + +do + + local weird = lpeg.anywhere(lpeg.S("~`!#$%^&*()={}[]:;\"\'||<>,?\n\r\t")) + + function input.generators.tex(instance,specification) + local tag = specification + if not instance.lsrmode and lfs and lfs.dir then + input.report("scanning path",specification) + instance.files[tag] = { } + local files = instance.files[tag] + local n, m, r = 0, 0, 0 + local spec = specification .. '/' + local attributes = lfs.attributes + local directory = lfs.dir + local small = instance.smallcache + local function action(path) + local mode, full + if path then + full = spec .. path .. '/' + else + full = spec + end + for name in directory(full) do + if name:find("^%.") then + -- skip + -- elseif name:find("[%~%`%!%#%$%%%^%&%*%(%)%=%{%}%[%]%:%;\"\'%|%<%>%,%?\n\r\t]") then -- too much escaped + elseif weird:match(name) then + -- texio.write_nl("skipping " .. name) + -- skip + else + mode = attributes(full..name,'mode') + if mode == "directory" then + m = m + 1 + if path then + action(path..'/'..name) + else + action(name) + end + elseif path and mode == 'file' then + n = n + 1 + local f = files[name] + if f then + if not small then + if type(f) == 'string' then + files[name] = { f, path } + else + f[#f+1] = path + end + end + else + files[name] = path + local lower = name:lower() + if name ~= lower then + files["remap:"..lower] = name + r = r + 1 + end + end + end + end + end + end + action() + input.report(format("%s files found on %s directories with %s uppercase remappings",n,m,r)) + else + local fullname = file.join(specification,input.lsrname) + local path = '.' + local f = io.open(fullname) + if f then + instance.files[tag] = { } + local files = instance.files[tag] + local small = instance.smallcache + input.report("loading lsr file",fullname) + -- for line in f:lines() do -- much slower then the next one + for line in (f:read("*a")):gmatch("(.-)\n") do + if line:find("^[%a%d]") then + local fl = files[line] + if fl then + if not small then + if type(fl) == 'string' then + files[line] = { fl, path } -- table + else + fl[#fl+1] = path + end + end + else + files[line] = path -- string + local lower = line:lower() + if line ~= lower then + files["remap:"..lower] = line + end + end + else + path = line:match("%.%/(.-)%:$") or path -- match could be nil due to empty line + end + end + f:close() + end + end + end + +end + +-- savers, todo + +function input.savefiles(instance) + input.aux.save_data(instance, 'files', function(k,v) + return instance.validfile(k,v) -- path, name + end) +end + +-- A config (optionally) has the paths split in tables. Internally +-- we join them and split them after the expansion has taken place. This +-- is more convenient. + +function input.splitconfig(instance) + for i,c in ipairs(instance) do + for k,v in pairs(c) do + if type(v) == 'string' then + local t = file.split_path(v) + if #t > 1 then + c[k] = t + end + end + end + end +end +function input.joinconfig(instance) + for i,c in ipairs(instance.order) do + for k,v in pairs(c) do + if type(v) == 'table' then + c[k] = file.join_path(v) + end + end + end +end +function input.split_path(str) + if type(str) == 'table' then + return str + else + return file.split_path(str) + end +end +function input.join_path(str) + if type(str) == 'table' then + return file.join_path(str) + else + return str + end +end + +function input.splitexpansions(instance) + for k,v in pairs(instance.expansions) do + local t, h = { }, { } + for _,vv in pairs(file.split_path(v)) do + if vv ~= "" and not h[vv] then + t[#t+1] = vv + h[vv] = true + end + end + if #t > 1 then + instance.expansions[k] = t + else + instance.expansions[k] = t[1] + end + end +end + +-- end of split/join code + +function input.saveoldconfig(instance) + input.splitconfig(instance) + input.aux.save_data(instance, 'configuration', nil) + input.joinconfig(instance) +end + +input.configbanner = [[ +-- This is a Luatex configuration file created by 'luatools.lua' or +-- 'luatex.exe' directly. For comment, suggestions and questions you can +-- contact the ConTeXt Development Team. This configuration file is +-- not copyrighted. [HH & TH] +]] + +function input.serialize(files) + -- This version is somewhat optimized for the kind of + -- tables that we deal with, so it's much faster than + -- the generic serializer. This makes sense because + -- luatools and mtxtools are called frequently. Okay, + -- we pay a small price for properly tabbed tables. + local t = { } + local concat = table.concat + local sorted = table.sortedkeys + local function dump(k,v,m) + if type(v) == 'string' then + return m .. "['" .. k .. "']='" .. v .. "'," + elseif #v == 1 then + return m .. "['" .. k .. "']='" .. v[1] .. "'," + else + return m .. "['" .. k .. "']={'" .. concat(v,"','").. "'}," + end + end + t[#t+1] = "return {" + if instance.sortdata then + for _, k in pairs(sorted(files)) do + local fk = files[k] + if type(fk) == 'table' then + t[#t+1] = "\t['" .. k .. "']={" + for _, kk in pairs(sorted(fk)) do + t[#t+1] = dump(kk,fk[kk],"\t\t") + end + t[#t+1] = "\t}," + else + t[#t+1] = dump(k,fk,"\t") + end + end + else + for k, v in pairs(files) do + if type(v) == 'table' then + t[#t+1] = "\t['" .. k .. "']={" + for kk,vv in pairs(v) do + t[#t+1] = dump(kk,vv,"\t\t") + end + t[#t+1] = "\t}," + else + t[#t+1] = dump(k,v,"\t") + end + end + end + t[#t+1] = "}" + return concat(t,"\n") +end + +if not texmf then texmf = {} end -- no longer needed, at least not here + +function input.aux.save_data(instance, dataname, check, makename) -- untested without cache overload + for cachename, files in pairs(instance[dataname]) do + local name = (makename or file.join)(cachename,dataname) + local luaname, lucname = name .. input.luasuffix, name .. input.lucsuffix + input.report("preparing " .. dataname .. " for", luaname) + for k, v in pairs(files) do + if not check or check(v,k) then -- path, name + if type(v) == "table" and #v == 1 then + files[k] = v[1] + end + else + files[k] = nil -- false + end + end + local data = { + type = dataname, + root = cachename, + version = input.cacheversion, + date = os.date("%Y-%m-%d"), + time = os.date("%H:%M:%S"), + content = files, + } + local f = io.open(luaname,'w') + if f then + input.report("saving " .. dataname .. " in", luaname) + f:write(input.serialize(data)) + f:close() + input.report("compiling " .. dataname .. " to", lucname) + if not utils.lua.compile(luaname,lucname) then + input.report("compiling failed for " .. dataname .. ", deleting file " .. lucname) + os.remove(lucname) + end + else + input.report("unable to save " .. dataname .. " in " .. name..input.luasuffix) + end + end +end + +function input.aux.load_data(instance,pathname,dataname,filename,makename) -- untested without cache overload + filename = ((not filename or (filename == "")) and dataname) or filename + filename = (makename and makename(dataname,filename)) or file.join(pathname,filename) + local blob = loadfile(filename .. input.lucsuffix) or loadfile(filename .. input.luasuffix) + if blob then + local data = blob() + if data and data.content and data.type == dataname and data.version == input.cacheversion then + input.report("loading",dataname,"for",pathname,"from",filename) + instance[dataname][pathname] = data.content + else + input.report("skipping",dataname,"for",pathname,"from",filename) + instance[dataname][pathname] = { } + instance.loaderror = true + end + else + input.report("skipping",dataname,"for",pathname,"from",filename) + end +end + +-- some day i'll use the nested approach, but not yet (actually we even drop +-- engine/progname support since we have only luatex now) +-- +-- first texmfcnf.lua files are located, next the cached texmf.cnf files +-- +-- return { +-- TEXMFBOGUS = 'effe checken of dit werkt', +-- } + +function input.aux.load_texmfcnf(instance,dataname,pathname) + local filename = file.join(pathname,input.luaname) + local blob = loadfile(filename) + if blob then + local data = blob() + if data then + input.report("loading","configuration file",filename) + if true then + -- flatten to variable.progname + local t = { } + for k, v in pairs(data) do -- v = progname + if type(v) == "string" then + t[k] = v + else + for kk, vv in pairs(v) do -- vv = variable + if type(vv) == "string" then + t[vv.."."..v] = kk + end + end + end + end + instance[dataname][pathname] = t + else + instance[dataname][pathname] = data + end + else + input.report("skipping","configuration file",filename) + instance[dataname][pathname] = { } + instance.loaderror = true + end + else + input.report("skipping","configuration file",filename) + end +end + +function input.aux.load_configuration(instance,dname,lname) + input.aux.load_data(instance,dname,'configuration',lname and file.basename(lname)) +end +function input.aux.load_files(instance,tag) + input.aux.load_data(instance,tag,'files') +end + +function input.resetconfig(instance) + instance.configuration, instance.setup, instance.order, instance.loaderror = { }, { }, { }, false +end + +function input.loadnewconfig(instance) + for _, cnf in ipairs(instance.luafiles) do + local dname = file.dirname(cnf) + input.aux.load_texmfcnf(instance,'setup',dname) + instance.order[#instance.order+1] = instance.setup[dname] + if instance.loaderror then break end + end +end + +function input.loadoldconfig(instance) + if not instance.renewcache then + for _, cnf in ipairs(instance.cnffiles) do + local dname = file.dirname(cnf) + input.aux.load_configuration(instance,dname) + instance.order[#instance.order+1] = instance.configuration[dname] + if instance.loaderror then break end + end + end + input.joinconfig(instance) +end + +function input.expand_variables(instance) + instance.expansions = { } +--~ instance.environment['SELFAUTOPARENT'] = instance.environment['SELFAUTOPARENT'] or instance.rootpath + if instance.engine ~= "" then instance.environment['engine'] = instance.engine end + if instance.progname ~= "" then instance.environment['progname'] = instance.progname end + for k,v in pairs(instance.environment) do + local a, b = k:match("^(%a+)%_(.*)%s*$") + if a and b then + instance.expansions[a..'.'..b] = v + else + instance.expansions[k] = v + end + end + for k,v in pairs(instance.environment) do -- move environment to expansions + if not instance.expansions[k] then instance.expansions[k] = v end + end + for k,v in pairs(instance.variables) do -- move variables to expansions + if not instance.expansions[k] then instance.expansions[k] = v end + end + while true do + local busy = false + for k,v in pairs(instance.expansions) do + local s, n = v:gsub("%$([%a%d%_%-]+)", function(a) + busy = true + return instance.expansions[a] or input.env(instance,a) + end) + local s, m = s:gsub("%$%{([%a%d%_%-]+)%}", function(a) + busy = true + return instance.expansions[a] or input.env(instance,a) + end) + if n > 0 or m > 0 then + instance.expansions[k]= s + end + end + if not busy then break end + end + local homedir = + instance.environment[(os.type == "windows" and 'USERPROFILE') or 'HOME'] or '~' + for k,v in pairs(instance.expansions) do + v = v:gsub("^~", homedir) + instance.expansions[k] = v:gsub("\\", '/') + end +end + +function input.aux.expand_vars(instance,lst) -- simple vars + for k,v in pairs(lst) do + lst[k] = v:gsub("%$([%a%d%_%-]+)", function(a) + return instance.variables[a] or input.env(instance,a) + end) + end +end + +function input.aux.expanded_var(instance,var) -- simple vars + return var:gsub("%$([%a%d%_%-]+)", function(a) + return instance.variables[a] or input.env(instance,a) + end) +end + +function input.aux.entry(instance,entries,name) + if name and (name ~= "") then + name = name:gsub('%$','') + local result = entries[name..'.'..instance.progname] or entries[name] + if result then + return result + else + result = input.env(instance,name) + if result then + instance.variables[name] = result + input.expand_variables(instance) + return instance.expansions[name] or "" + end + end + end + return "" +end +function input.variable(instance,name) + return input.aux.entry(instance,instance.variables,name) +end +function input.expansion(instance,name) + return input.aux.entry(instance,instance.expansions,name) +end + +function input.aux.is_entry(instance,entries,name) + if name and name ~= "" then + name = name:gsub('%$','') + return (entries[name..'.'..instance.progname] or entries[name]) ~= nil + else + return false + end +end + +function input.is_variable(instance,name) + return input.aux.is_entry(instance,instance.variables,name) +end +function input.is_expansion(instance,name) + return input.aux.is_entry(instance,instance.expansions,name) +end + +function input.simplified_list(str) + if type(str) == 'table' then + return str -- troubles ; ipv , in texmf + elseif str == '' then + return { } + else + local t = { } + for _,v in ipairs(string.splitchr(str:gsub("^\{(.+)\}$","%1"),",")) do + t[#t+1] = (v:gsub("^[%!]*(.+)[%/\\]*$","%1")) + end + return t + end +end + +function input.unexpanded_path_list(instance,str) + local pth = input.variable(instance,str) + local lst = input.split_path(pth) + return input.aux.expanded_path(instance,lst) +end +function input.unexpanded_path(instance,str) + return file.join_path(input.unexpanded_path_list(instance,str)) +end + +do + local done = { } + + function input.reset_extra_path(instance) + local ep = instance.extra_paths + if not ep then + ep, done = { }, { } + instance.extra_paths = ep + elseif #ep > 0 then + instance.lists, done = { }, { } + end + end + + function input.register_extra_path(instance,paths,subpaths) + local ep = instance.extra_paths or { } + local n = #ep + if paths and paths ~= "" then + if subpaths and subpaths ~= "" then + for p in paths:gmatch("[^,]+") do + -- we gmatch each step again, not that fast, but used seldom + for s in subpaths:gmatch("[^,]+") do + local ps = p .. "/" .. s + if not done[ps] then + ep[#ep+1] = input.clean_path(ps) + done[ps] = true + end + end + end + else + for p in paths:gmatch("[^,]+") do + if not done[p] then + ep[#ep+1] = input.clean_path(p) + done[p] = true + end + end + end + elseif subpaths and subpaths ~= "" then + for i=1,n do + -- we gmatch each step again, not that fast, but used seldom + for s in subpaths:gmatch("[^,]+") do + local ps = ep[i] .. "/" .. s + if not done[ps] then + ep[#ep+1] = input.clean_path(ps) + done[ps] = true + end + end + end + end + if #ep > 0 then + instance.extra_paths = ep -- register paths + end + if #ep > n then + instance.lists = { } -- erase the cache + end + end + +end + +function input.expanded_path_list(instance,str) + local function made_list(list) + local ep = instance.extra_paths + if not ep or #ep == 0 then + return list + else + local done, new = { }, { } + -- honour . .. ../.. but only when at the start + for k, v in ipairs(list) do + if not done[v] then + if v:find("^[%.%/]$") then + done[v] = true + new[#new+1] = v + else + break + end + end + end + -- first the extra paths + for k, v in ipairs(ep) do + if not done[v] then + done[v] = true + new[#new+1] = v + end + end + -- next the formal paths + for k, v in ipairs(list) do + if not done[v] then + done[v] = true + new[#new+1] = v + end + end + return new + end + end + if not str then + return ep or { } + elseif instance.savelists then + -- engine+progname hash + str = str:gsub("%$","") + if not instance.lists[str] then -- cached + local lst = made_list(input.split_path(input.expansion(instance,str))) + instance.lists[str] = input.aux.expanded_path(instance,lst) + end + return instance.lists[str] + else + local lst = input.split_path(input.expansion(instance,str)) + return made_list(input.aux.expanded_path(instance,lst)) + end +end + +function input.expand_path(instance,str) + return file.join_path(input.expanded_path_list(instance,str)) +end + +--~ function input.first_writable_path(instance,name) +--~ for _,v in pairs(input.expanded_path_list(instance,name)) do +--~ if file.is_writable(file.join(v,'luatex-cache.tmp')) then +--~ return v +--~ end +--~ end +--~ return "." +--~ end + +function input.expanded_path_list_from_var(instance,str) -- brrr + local tmp = input.var_of_format_or_suffix(str:gsub("%$","")) + if tmp ~= "" then + return input.expanded_path_list(instance,str) + else + return input.expanded_path_list(instance,tmp) + end +end +function input.expand_path_from_var(instance,str) + return file.join_path(input.expanded_path_list_from_var(instance,str)) +end + +function input.format_of_var(str) + return input.formats[str] or input.formats[input.alternatives[str]] or '' +end +function input.format_of_suffix(str) + return input.suffixmap[file.extname(str)] or 'tex' +end + +function input.variable_of_format(str) + return input.formats[str] or input.formats[input.alternatives[str]] or '' +end + +function input.var_of_format_or_suffix(str) + local v = input.formats[str] + if v then + return v + end + v = input.formats[input.alternatives[str]] + if v then + return v + end + v = input.suffixmap[file.extname(str)] + if v then + return input.formats[isf] + end + return '' +end + +function input.expand_braces(instance,str) -- output variable and brace expansion of STRING + local ori = input.variable(instance,str) + local pth = input.aux.expanded_path(instance,input.split_path(ori)) + return file.join_path(pth) +end + +-- {a,b,c,d} +-- a,b,c/{p,q,r},d +-- a,b,c/{p,q,r}/d/{x,y,z}// +-- a,b,c/{p,q/{x,y,z},r},d/{p,q,r} +-- a,b,c/{p,q/{x,y,z},r},d/{p,q,r} +-- a{b,c}{d,e}f +-- {a,b,c,d} +-- {a,b,c/{p,q,r},d} +-- {a,b,c/{p,q,r}/d/{x,y,z}//} +-- {a,b,c/{p,q/{x,y,z}},d/{p,q,r}} +-- {a,b,c/{p,q/{x,y,z},w}v,d/{p,q,r}} + +-- this one is better and faster, but it took me a while to realize +-- that this kind of replacement is cleaner than messy parsing and +-- fuzzy concatenating we can probably gain a bit with selectively +-- applying lpeg, but experiments with lpeg parsing this proved not to +-- work that well; the parsing is ok, but dealing with the resulting +-- table is a pain because we need to work inside-out recursively + +-- get rid of piecewise here, just a gmatch is ok + +function input.aux.splitpathexpr(str, t, validate) + -- no need for optimization, only called a few times, we can use lpeg for the sub + t = t or { } + local concat = table.concat + while true do + local done = false + while true do + local ok = false + str = str:gsub("([^{},]+){([^{}]-)}", function(a,b) + local t = { } + b:piecewise(",", function(s) t[#t+1] = a .. s end) + ok, done = true, true + return "{" .. concat(t,",") .. "}" + end) + if not ok then break end + end + while true do + local ok = false + str = str:gsub("{([^{}]-)}([^{},]+)", function(a,b) + local t = { } + a:piecewise(",", function(s) t[#t+1] = s .. b end) + ok, done = true, true + return "{" .. concat(t,",") .. "}" + end) + if not ok then break end + end + while true do + local ok = false + str = str:gsub("([,{]){([^{}]+)}([,}])", function(a,b,c) + ok, done = true, true + return a .. b .. c + end) + if not ok then break end + end + if not done then break end + end + while true do + local ok = false + str = str:gsub("{([^{}]-)}{([^{}]-)}", function(a,b) + local t = { } + a:piecewise(",", function(sa) + b:piecewise(",", function(sb) + t[#t+1] = sa .. sb + end) + end) + ok = true + return "{" .. concat(t,",") .. "}" + end) + if not ok then break end + end + while true do + local ok = false + str = str:gsub("{([^{}]-)}", function(a) + ok = true + return a + end) + if not ok then break end + end + if validate then + str:piecewise(",", function(s) + s = validate(s) + if s then t[#t+1] = s end + end) + else + str:piecewise(",", function(s) + t[#t+1] = s + end) + end + return t +end + +function input.aux.expanded_path(instance,pathlist) -- maybe not a list, just a path + -- a previous version fed back into pathlist + local newlist, ok = { }, false + for _,v in ipairs(pathlist) do + if v:find("[{}]") then + ok = true + break + end + end + if ok then + for _, v in ipairs(pathlist) do + input.aux.splitpathexpr(v, newlist, function(s) + s = file.collapse_path(s) + return s ~= "" and not s:find(instance.dummy_path_expr) and s + end) + end + else + for _,v in ipairs(pathlist) do + for vv in string.gmatch(v..',',"(.-),") do + vv = file.collapse_path(v) + if vv ~= "" then newlist[#newlist+1] = vv end + end + end + end + return newlist +end + +input.is_readable = { } + +function input.aux.is_readable(readable, name) + if input.trace > 2 then + if readable then + input.logger("+ readable", name) + else + input.logger("- readable", name) + end + end + return readable +end + +function input.is_readable.file(name) + -- return input.aux.is_readable(file.is_readable(name), name) + return input.aux.is_readable(input.aux.is_file(name), name) +end + +input.is_readable.tex = input.is_readable.file + +-- name +-- name/name + +function input.aux.collect_files(instance,names) + local filelist = { } + for _, fname in pairs(names) do + if fname then + if input.trace > 2 then + input.logger("? blobpath asked",fname) + end + local bname = file.basename(fname) + local dname = file.dirname(fname) + if dname == "" or dname:find("^%.") then + dname = false + else + dname = "/" .. dname .. "$" + end + for _, hash in ipairs(instance.hashes) do + local blobpath = hash.tag + local files = blobpath and instance.files[blobpath] + if files then + if input.trace > 2 then + input.logger('? blobpath do',blobpath .. " (" .. bname ..")") + end + local blobfile = files[bname] + if not blobfile then + local rname = "remap:"..bname + blobfile = files[rname] + if blobfile then + bname = files[rname] + blobfile = files[bname] + end + end + if blobfile then + if type(blobfile) == 'string' then + if not dname or blobfile:find(dname) then + filelist[#filelist+1] = { + hash.type, + file.join(blobpath,blobfile,bname), -- search + input.concatinators[hash.type](blobpath,blobfile,bname) -- result + } + end + else + for _, vv in pairs(blobfile) do + if not dname or vv:find(dname) then + filelist[#filelist+1] = { + hash.type, + file.join(blobpath,vv,bname), -- search + input.concatinators[hash.type](blobpath,vv,bname) -- result + } + end + end + end + end + elseif input.trace > 1 then + input.logger('! blobpath no',blobpath .. " (" .. bname ..")" ) + end + end + end + end + if #filelist > 0 then + return filelist + else + return nil + end +end + +function input.suffix_of_format(str) + if input.suffixes[str] then + return input.suffixes[str][1] + else + return "" + end +end + +function input.suffixes_of_format(str) + if input.suffixes[str] then + return input.suffixes[str] + else + return {} + end +end + +do + + -- called about 700 times for an empty doc (font initializations etc) + -- i need to weed the font files for redundant calls + + local letter = lpeg.R("az","AZ") + local separator = lpeg.P("://") + + local qualified = lpeg.P(".")^0 * lpeg.P("/") + letter*lpeg.P(":") + letter^1*separator + local rootbased = lpeg.P("/") + letter*lpeg.P(":") + + -- ./name ../name /name c: :// + function input.aux.qualified_path(filename) + return qualified:match(filename) + end + function input.aux.rootbased_path(filename) + return rootbased:match(filename) + end + + function input.normalize_name(original) + return original + end + + input.normalize_name = file.collapse_path + +end + +function input.aux.register_in_trees(instance,name) + if not name:find("^%.") then + instance.foundintrees[name] = (instance.foundintrees[name] or 0) + 1 -- maybe only one + end +end + +-- split the next one up, better for jit + +function input.aux.find_file(instance,filename) -- todo : plugin (scanners, checkers etc) + local result = { } + local stamp = nil + filename = input.normalize_name(filename) -- elsewhere + filename = file.collapse_path(filename:gsub("\\","/")) -- elsewhere + -- speed up / beware: format problem + if instance.remember then + stamp = filename .. "--" .. instance.engine .. "--" .. instance.progname .. "--" .. instance.format + if instance.found[stamp] then + input.logger('! remembered', filename) + return instance.found[stamp] + end + end + if filename:find('%*') then + input.logger('! wildcard', filename) + result = input.find_wildcard_files(instance,filename) + elseif input.aux.qualified_path(filename) then + if input.is_readable.file(filename) then + input.logger('! qualified', filename) + result = { filename } + else + local forcedname, ok = "", false + if file.extname(filename) == "" then + if instance.format == "" then + forcedname = filename .. ".tex" + if input.is_readable.file(forcedname) then + input.logger('! no suffix, forcing standard filetype tex') + result, ok = { forcedname }, true + end + else + for _, s in pairs(input.suffixes_of_format(instance.format)) do + forcedname = filename .. "." .. s + if input.is_readable.file(forcedname) then + input.logger('! no suffix, forcing format filetype', s) + result, ok = { forcedname }, true + break + end + end + end + end + if not ok then + input.logger('? qualified', filename) + end + end + else + -- search spec + local filetype, extra, done, wantedfiles, ext = '', nil, false, { }, file.extname(filename) + if ext == "" then + if not instance.force_suffixes then + wantedfiles[#wantedfiles+1] = filename + end + else + wantedfiles[#wantedfiles+1] = filename + end + if instance.format == "" then + if ext == "" then + local forcedname = filename .. '.tex' + wantedfiles[#wantedfiles+1] = forcedname + filetype = input.format_of_suffix(forcedname) + input.logger('! forcing filetype',filetype) + else + filetype = input.format_of_suffix(filename) + input.logger('! using suffix based filetype',filetype) + end + else + if ext == "" then + for _, s in pairs(input.suffixes_of_format(instance.format)) do + wantedfiles[#wantedfiles+1] = filename .. "." .. s + end + end + filetype = instance.format + input.logger('! using given filetype',filetype) + end + local typespec = input.variable_of_format(filetype) + local pathlist = input.expanded_path_list(instance,typespec) + if not pathlist or #pathlist == 0 then + -- no pathlist, access check only / todo == wildcard + if input.trace > 2 then + input.logger('? filename',filename) + input.logger('? filetype',filetype or '?') + input.logger('? wanted files',table.concat(wantedfiles," | ")) + end + for _, fname in pairs(wantedfiles) do + if fname and input.is_readable.file(fname) then + filename, done = fname, true + result[#result+1] = file.join('.',fname) + break + end + end + -- this is actually 'other text files' or 'any' or 'whatever' + local filelist = input.aux.collect_files(instance,wantedfiles) + local fl = filelist and filelist[1] + if fl then + filename = fl[3] + result[#result+1] = filename + done = true + end + else + -- list search + local filelist = input.aux.collect_files(instance,wantedfiles) + local doscan, recurse + if input.trace > 2 then + input.logger('? filename',filename) + -- if pathlist then input.logger('? path list',table.concat(pathlist," | ")) end + -- if filelist then input.logger('? file list',table.concat(filelist," | ")) end + end + -- a bit messy ... esp the doscan setting here + for _, path in pairs(pathlist) do + if path:find("^!!") then doscan = false else doscan = true end + if path:find("//$") then recurse = true else recurse = false end + local pathname = path:gsub("^!+", '') + done = false + -- using file list + if filelist and not (done and not instance.allresults) and recurse then + -- compare list entries with permitted pattern + pathname = pathname:gsub("([%-%.])","%%%1") -- this also influences + pathname = pathname:gsub("/+$", '/.*') -- later usage of pathname + pathname = pathname:gsub("//", '/.-/') -- not ok for /// but harmless + local expr = "^" .. pathname + -- input.debug('?',expr) + for _, fl in ipairs(filelist) do + local f = fl[2] + if f:find(expr) then + -- input.debug('T',' '..f) + if input.trace > 2 then + input.logger('= found in hash',f) + end + --- todo, test for readable + result[#result+1] = fl[3] + input.aux.register_in_trees(instance,f) -- for tracing used files + done = true + if not instance.allresults then break end + else + -- input.debug('F',' '..f) + end + end + end + if not done and doscan then + -- check if on disk / unchecked / does not work at all / also zips + if input.method_is_file(pathname) then -- ? + local pname = pathname:gsub("%.%*$",'') + if not pname:find("%*") then + local ppname = pname:gsub("/+$","") + if input.aux.can_be_dir(instance,ppname) then + for _, w in pairs(wantedfiles) do + local fname = file.join(ppname,w) + if input.is_readable.file(fname) then + if input.trace > 2 then + input.logger('= found by scanning',fname) + end + result[#result+1] = fname + done = true + if not instance.allresults then break end + end + end + else + -- no access needed for non existing path, speedup (esp in large tree with lots of fake) + end + end + end + end + if not done and doscan then + -- todo: slow path scanning + end + if done and not instance.allresults then break end + end + end + end + for k,v in pairs(result) do + result[k] = file.collapse_path(v) + end + if instance.remember then + instance.found[stamp] = result + end + return result +end + +input.aux._find_file_ = input.aux.find_file + +function input.aux.find_file(instance,filename) -- maybe make a lowres cache too + local result = input.aux._find_file_(instance,filename) + if #result == 0 then + local lowered = filename:lower() + if filename ~= lowered then + return input.aux._find_file_(instance,lowered) + end + end + return result +end + +if lfs and lfs.isfile then + input.aux.is_file = lfs.isfile -- to be done: use this +else + input.aux.is_file = file.is_readable +end + +if lfs and lfs.isdir then + function input.aux.can_be_dir(instance,name) + if not instance.fakepaths[name] then + if lfs.isdir(name) then + instance.fakepaths[name] = 1 -- directory + else + instance.fakepaths[name] = 2 -- no directory + end + end + return (instance.fakepaths[name] == 1) + end +else + function input.aux.can_be_dir() + return true + end +end + +if not input.concatinators then input.concatinators = { } end + +input.concatinators.tex = file.join +input.concatinators.file = input.concatinators.tex + +function input.find_files(instance,filename,filetype,mustexist) + if type(mustexist) == boolean then + -- all set + elseif type(filetype) == 'boolean' then + filetype, mustexist = nil, false + elseif type(filetype) ~= 'string' then + filetype, mustexist = nil, false + end + instance.format = filetype or '' + local t = input.aux.find_file(instance,filename,true) + instance.format = '' + return t +end + +function input.find_file(instance,filename,filetype,mustexist) + return (input.find_files(instance,filename,filetype,mustexist)[1] or "") +end + +function input.find_given_files(instance,filename) + local bname, result = file.basename(filename), { } + for k, hash in ipairs(instance.hashes) do + local files = instance.files[hash.tag] + local blist = files[bname] + if not blist then + local rname = "remap:"..bname + blist = files[rname] + if blist then + bname = files[rname] + blist = files[bname] + end + end + if blist then + if type(blist) == 'string' then + result[#result+1] = input.concatinators[hash.type](hash.tag,blist,bname) or "" + if not instance.allresults then break end + else + for kk,vv in pairs(blist) do + result[#result+1] = input.concatinators[hash.type](hash.tag,vv,bname) or "" + if not instance.allresults then break end + end + end + end + end + return result +end + +function input.find_given_file(instance,filename) + return (input.find_given_files(instance,filename)[1] or "") +end + +function input.find_wildcard_files(instance,filename) -- todo: remap: + local result = { } + local bname, dname = file.basename(filename), file.dirname(filename) + local path = dname:gsub("^*/","") + path = path:gsub("*",".*") + path = path:gsub("-","%%-") + if dname == "" then + path = ".*" + end + local name = bname + name = name:gsub("*",".*") + name = name:gsub("-","%%-") + path = path:lower() + name = name:lower() + local function doit(blist,bname,hash,allresults) + local done = false + if blist then + if type(blist) == 'string' then + -- make function and share code + if (blist:lower()):find(path) then + result[#result+1] = input.concatinators[hash.type](hash.tag,blist,bname) or "" + done = true + end + else + for kk,vv in pairs(blist) do + if (vv:lower()):find(path) then + result[#result+1] = input.concatinators[hash.type](hash.tag,vv,bname) or "" + done = true + if not allresults then break end + end + end + end + end + return done + end + local files, allresults, done = instance.files, instance.allresults, false + if name:find("%*") then + for k, hash in ipairs(instance.hashes) do + for kk, hh in pairs(files[hash.tag]) do + if not kk:find("^remap:") then + if (kk:lower()):find(name) then + if doit(hh,kk,hash,allresults) then done = true end + if done and not allresults then break end + end + end + end + end + else + for k, hash in ipairs(instance.hashes) do + if doit(files[hash.tag][bname],bname,hash,allresults) then done = true end + if done and not allresults then break end + end + end + return result +end + +function input.find_wildcard_file(instance,filename) + return (input.find_wildcard_files(instance,filename)[1] or "") +end + +-- main user functions + +function input.save_used_files_in_trees(instance, filename,jobname) + if not filename then filename = 'luatex.jlg' end + local f = io.open(filename,'w') + if f then + f:write("\n") + f:write("\n") + if jobname then + f:write("\t" .. jobname .. "\n") + end + f:write("\t\n") + for _,v in pairs(table.sortedkeys(instance.foundintrees)) do + f:write("\t\t" .. v .. "\n") + end + f:write("\t\n") + f:write("\n") + f:close() + end +end + +function input.automount(instance) + -- implemented later +end + +function input.load(instance) + input.starttiming(instance) + input.resetconfig(instance) + input.identify_cnf(instance) + input.load_lua(instance) + input.expand_variables(instance) + input.load_cnf(instance) + input.expand_variables(instance) + input.load_hash(instance) + input.automount(instance) + input.stoptiming(instance) +end + +function input.for_files(instance, command, files, filetype, mustexist) + if files and #files > 0 then + local function report(str) + if input.verbose then + input.report(str) -- has already verbose + else + print(str) + end + end + if input.verbose then + report('') + end + for _, file in pairs(files) do + local result = command(instance,file,filetype,mustexist) + if type(result) == 'string' then + report(result) + else + for _,v in pairs(result) do + report(v) + end + end + end + end +end + +-- strtab + +function input.var_value(instance,str) -- output the value of variable $STRING. + return input.variable(instance,str) +end +function input.expand_var(instance,str) -- output variable expansion of STRING. + return input.expansion(instance,str) +end +function input.show_path(instance,str) -- output search path for file type NAME + return file.join_path(input.expanded_path_list(instance,input.format_of_var(str))) +end + +-- input.find_file(filename) +-- input.find_file(filename, filetype, mustexist) +-- input.find_file(filename, mustexist) +-- input.find_file(filename, filetype) + +function input.aux.register_file(files, name, path) + if files[name] then + if type(files[name]) == 'string' then + files[name] = { files[name], path } + else + files[name] = path + end + else + files[name] = path + end +end + +if not input.finders then input.finders = { } end +if not input.openers then input.openers = { } end +if not input.loaders then input.loaders = { } end + +input.finders.notfound = { nil } +input.openers.notfound = { nil } +input.loaders.notfound = { false, nil, 0 } + +function input.splitmethod(filename) + if not filename then + return { } -- safeguard + elseif type(filename) == "table" then + return filename -- already split + elseif not filename:find("://") then + return { scheme="file", path = filename, original=filename } -- quick hack + else + return url.hashed(filename) + end +end + +function input.method_is_file(filename) + return input.splitmethod(filename).scheme == 'file' +end + +function table.sequenced(t,sep) -- temp here + local s = { } + for k, v in pairs(t) do + s[#s+1] = k .. "=" .. v + end + return table.concat(s, sep or " | ") +end + +function input.methodhandler(what, instance, filename, filetype) -- ... + local specification = (type(filename) == "string" and input.splitmethod(filename)) or filename -- no or { }, let it bomb + local scheme = specification.scheme + if input[what][scheme] then + input.logger('= handler',specification.original .." -> " .. what .. " -> " .. table.sequenced(specification)) + return input[what][scheme](instance,filename,filetype) -- todo: specification + else + return input[what].tex(instance,filename,filetype) -- todo: specification + end +end + +-- also inside next test? + +function input.findtexfile(instance, filename, filetype) + return input.methodhandler('finders',instance, input.normalize_name(filename), filetype) +end +function input.opentexfile(instance,filename) + return input.methodhandler('openers',instance, input.normalize_name(filename)) +end + +function input.findbinfile(instance, filename, filetype) + return input.methodhandler('finders',instance, input.normalize_name(filename), filetype) +end +function input.openbinfile(instance,filename) + return input.methodhandler('loaders',instance, input.normalize_name(filename)) +end + +function input.loadbinfile(instance, filename, filetype) + local fname = input.findbinfile(instance, input.normalize_name(filename), filetype) + if fname and fname ~= "" then + return input.openbinfile(instance,fname) + else + return unpack(input.loaders.notfound) + end +end + +function input.texdatablob(instance, filename, filetype) + local ok, data, size = input.loadbinfile(instance, filename, filetype) + return data or "" +end + +input.loadtexfile = input.texdatablob + +function input.openfile(filename) -- brrr texmf.instance here / todo ! ! ! ! ! + local fullname = input.findtexfile(texmf.instance, filename) + if fullname and (fullname ~= "") then + return input.opentexfile(texmf.instance, fullname) + else + return nil + end +end + +function input.logmode() + return (os.getenv("MTX.LOG.MODE") or os.getenv("MTX_LOG_MODE") or "tex"):lower() +end + +-- this is a prelude to engine/progname specific configuration files +-- in which case we can omit files meant for other programs and +-- packages + +--- ctx + +-- maybe texinputs + font paths +-- maybe positive selection tex/context fonts/tfm|afm|vf|opentype|type1|map|enc + +input.validators = { } +input.validators.visibility = { } + +function input.validators.visibility.default(path, name) + return true +end + +function input.validators.visibility.context(path, name) + path = path[1] or path -- some day a loop + return not ( + path:find("latex") or +-- path:find("doc") or + path:find("tex4ht") or + path:find("source") or +-- path:find("config") or +-- path:find("metafont") or + path:find("lists$") or + name:find("%.tpm$") or + name:find("%.bak$") + ) +end + +-- todo: describe which functions are public (maybe input.private. ... ) + +-- beware: i need to check where we still need a / on windows: + +function input.clean_path(str) +--~ return (((str:gsub("\\","/")):gsub("^!+","")):gsub("//+","//")) + if str then + return ((str:gsub("\\","/")):gsub("^!+","")) + else + return nil + end +end + +function input.do_with_path(name,func) + for _, v in pairs(input.expanded_path_list(instance,name)) do + func("^"..input.clean_path(v)) + end +end + +function input.do_with_var(name,func) + func(input.aux.expanded_var(name)) +end + +function input.with_files(instance,pattern,handle) + for _, hash in ipairs(instance.hashes) do + local blobpath = hash.tag + local blobtype = hash.type + if blobpath then + local files = instance.files[blobpath] + if files then + for k,v in pairs(files) do + if k:find("^remap:") then + k = files[k] + v = files[k] -- chained + end + if k:find(pattern) then + if type(v) == "string" then + handle(blobtype,blobpath,v,k) + else + for _,vv in pairs(v) do + handle(blobtype,blobpath,vv,k) + end + end + end + end + end + end + end +end + +--~ function input.update_script(oldname,newname) -- oldname -> own.name, not per se a suffix +--~ newname = file.addsuffix(newname,"lua") +--~ local newscript = input.clean_path(input.find_file(instance, newname)) +--~ local oldscript = input.clean_path(oldname) +--~ input.report("old script", oldscript) +--~ input.report("new script", newscript) +--~ if oldscript ~= newscript and (oldscript:find(file.removesuffix(newname).."$") or oldscript:find(newname.."$")) then +--~ local newdata = io.loaddata(newscript) +--~ if newdata then +--~ input.report("old script content replaced by new content") +--~ io.savedata(oldscript,newdata) +--~ end +--~ end +--~ end + +function input.update_script(instance,oldname,newname) -- oldname -> own.name, not per se a suffix + local scriptpath = "scripts/context/lua" + newname = file.addsuffix(newname,"lua") + local oldscript = input.clean_path(oldname) + input.report("to be replaced old script", oldscript) + local newscripts = input.find_files(instance, newname) or { } + if #newscripts == 0 then + input.report("unable to locate new script") + else + for _, newscript in ipairs(newscripts) do + newscript = input.clean_path(newscript) + input.report("checking new script", newscript) + if oldscript == newscript then + input.report("old and new script are the same") + elseif not newscript:find(scriptpath) then + input.report("new script should come from",scriptpath) + elseif not (oldscript:find(file.removesuffix(newname).."$") or oldscript:find(newname.."$")) then + input.report("invalid new script name") + else + local newdata = io.loaddata(newscript) + if newdata then + input.report("old script content replaced by new content") + io.savedata(oldscript,newdata) + break + else + input.report("unable to load new script") + end + end + end + end +end + + +--~ print(table.serialize(input.aux.splitpathexpr("/usr/share/texmf-{texlive,tetex}", {}))) + +-- command line resolver: + +--~ print(input.resolve("abc env:tmp file:cont-en.tex path:cont-en.tex full:cont-en.tex rel:zapf/one/p-chars.tex")) + +do + + local resolvers = { } + + resolvers.environment = function(instance,str) + return input.clean_path(os.getenv(str) or os.getenv(str:upper()) or os.getenv(str:lower()) or "") + end + resolvers.relative = function(instance,str,n) + if io.exists(str) then + -- nothing + elseif io.exists("./" .. str) then + str = "./" .. str + else + local p = "../" + for i=1,n or 2 do + if io.exists(p .. str) then + str = p .. str + break + else + p = p .. "../" + end + end + end + return input.clean_path(str) + end + resolvers.locate = function(instance,str) + local fullname = input.find_given_file(instance,str) or "" + return input.clean_path((fullname ~= "" and fullname) or str) + end + resolvers.filename = function(instance,str) + local fullname = input.find_given_file(instance,str) or "" + return input.clean_path(file.basename((fullname ~= "" and fullname) or str)) + end + resolvers.pathname = function(instance,str) + local fullname = input.find_given_file(instance,str) or "" + return input.clean_path(file.dirname((fullname ~= "" and fullname) or str)) + end + + resolvers.env = resolvers.environment + resolvers.rel = resolvers.relative + resolvers.loc = resolvers.locate + resolvers.kpse = resolvers.locate + resolvers.full = resolvers.locate + resolvers.file = resolvers.filename + resolvers.path = resolvers.pathname + + local function resolve(instance,str) + if type(str) == "table" then + for k, v in pairs(str) do + str[k] = resolve(instance,v) or v + end + elseif str and str ~= "" then + str = str:gsub("([a-z]+):([^ ]+)", function(method,target) + if resolvers[method] then + return resolvers[method](instance,target) + else + return method .. ":" .. target + end + end) + end + return str + end + + input.resolve = resolve + +end + + +if not modules then modules = { } end modules ['luat-tmp'] = { + version = 1.001, + comment = "companion to luat-lib.tex", + author = "Hans Hagen, PRAGMA-ADE, Hasselt NL", + copyright = "PRAGMA ADE / ConTeXt Development Team", + license = "see context related readme files" +} + +--[[ldx-- +

This module deals with caching data. It sets up the paths and +implements loaders and savers for tables. Best is to set the +following variable. When not set, the usual paths will be +checked. Personally I prefer the (users) temporary path.

+ + +TEXMFCACHE=$TMP;$TEMP;$TMPDIR;$TEMPDIR;$HOME;$TEXMFVAR;$VARTEXMF;. + + +

Currently we do no locking when we write files. This is no real +problem because most caching involves fonts and the chance of them +being written at the same time is small. We also need to extend +luatools with a recache feature.

+--ldx]]-- + +caches = caches or { } +dir = dir or { } +texmf = texmf or { } + +caches.path = caches.path or nil +caches.base = caches.base or "luatex-cache" +caches.more = caches.more or "context" +caches.direct = false -- true is faster but may need huge amounts of memory +caches.trace = false +caches.tree = false +caches.paths = caches.paths or nil +caches.force = false + +input.usecache = not toboolean(os.getenv("TEXMFSHARECACHE") or "false",true) -- true + +function caches.temp(instance) + local function checkpath(cachepath) + if not cachepath or cachepath == "" then + return nil + elseif lfs.attributes(cachepath,"mode") == "directory" then -- lfs.isdir(cachepath) then + return cachepath + elseif caches.force or io.ask(string.format("Should I create the cache path %s?",cachepath), "no", { "yes", "no" }) == "yes" then + dir.mkdirs(cachepath) + return (lfs.attributes(cachepath,"mode") == "directory") and cachepath + else + return nil + end + end + local cachepath = input.expanded_path_list(instance,"TEXMFCACHE") + cachepath = cachepath and #cachepath > 0 and checkpath(cachepath[1]) + if not cachepath then + cachepath = os.getenv("TEXMFCACHE") or os.getenv("HOME") or os.getenv("HOMEPATH") or os.getenv("TMP") or os.getenv("TEMP") or os.getenv("TMPDIR") or nil + cachepath = checkpath(cachepath) + end + if not cachepath then + print("\nfatal error: there is no valid cache path defined\n") + os.exit() + elseif lfs.attributes(cachepath,"mode") ~= "directory" then + print(string.format("\nfatal error: cache path %s is not a directory\n",cachepath)) + os.exit() + end + function caches.temp(instance) + return cachepath + end + return cachepath +end + +function caches.configpath(instance) + return table.concat(instance.cnffiles,";") +end + +function caches.hashed(tree) + return md5.hex((tree:lower()):gsub("[\\\/]+","/")) +end + +function caches.treehash(instance) + local tree = caches.configpath(instance) + if not tree or tree == "" then + return false + else + return caches.hashed(tree) + end +end + +function caches.setpath(instance,...) + if not caches.path then + if not caches.path then + caches.path = caches.temp(instance) + end + caches.path = input.clean_path(caches.path) -- to be sure + if lfs then + caches.tree = caches.tree or caches.treehash(instance) + if caches.tree then + caches.path = dir.mkdirs(caches.path,caches.base,caches.more,caches.tree) + else + caches.path = dir.mkdirs(caches.path,caches.base,caches.more) + end + end + end + if not caches.path then + caches.path = '.' + end + caches.path = input.clean_path(caches.path) + if lfs and not table.is_empty({...}) then + local pth = dir.mkdirs(caches.path,...) + return pth + end + caches.path = dir.expand_name(caches.path) + return caches.path +end + +function caches.definepath(instance,category,subcategory) + return function() + return caches.setpath(instance,category,subcategory) + end +end + +function caches.setluanames(path,name) + return path .. "/" .. name .. ".tma", path .. "/" .. name .. ".tmc" +end + +function caches.loaddata(path,name) + local tmaname, tmcname = caches.setluanames(path,name) + local loader = loadfile(tmcname) or loadfile(tmaname) + if loader then + return loader() + else + return false + end +end + +function caches.is_writable(filepath,filename) + local tmaname, tmcname = caches.setluanames(filepath,filename) + return file.is_writable(tmaname) +end + +function caches.savedata(filepath,filename,data,raw) -- raw needed for file cache + local tmaname, tmcname = caches.setluanames(filepath,filename) + local reduce, simplify = true, true + if raw then + reduce, simplify = false, false + end + if caches.direct then + file.savedata(tmaname, table.serialize(data,'return',true,true)) + else + table.tofile (tmaname, data,'return',true,true) -- maybe not the last true + end + utils.lua.compile(tmaname, tmcname) +end + +-- here we use the cache for format loading (texconfig.[formatname|jobname]) + +--~ if tex and texconfig and texconfig.formatname and texconfig.formatname == "" then +if tex and texconfig and (not texconfig.formatname or texconfig.formatname == "") and texmf.instance then + if not texconfig.luaname then texconfig.luaname = "cont-en.lua" end -- or luc + texconfig.formatname = caches.setpath(texmf.instance,"formats") .. "/" .. texconfig.luaname:gsub("%.lu.$",".fmt") +end + +--[[ldx-- +

Once we found ourselves defining similar cache constructs +several times, containers were introduced. Containers are used +to collect tables in memory and reuse them when possible based +on (unique) hashes (to be provided by the calling function).

+ +

Caching to disk is disabled by default. Version numbers are +stored in the saved table which makes it possible to change the +table structures without bothering about the disk cache.

+ +

Examples of usage can be found in the font related code.

+--ldx]]-- + +containers = { } +containers.trace = false + +do -- local report + + local function report(container,tag,name) + if caches.trace or containers.trace or container.trace then + logs.report(string.format("%s cache",container.subcategory),string.format("%s: %s",tag,name or 'invalid')) + end + end + + local allocated = { } + + -- tracing + + function containers.define(category, subcategory, version, enabled) + return function() + if category and subcategory then + local c = allocated[category] + if not c then + c = { } + allocated[category] = c + end + local s = c[subcategory] + if not s then + s = { + category = category, + subcategory = subcategory, + storage = { }, + enabled = enabled, + version = version or 1.000, + trace = false, + path = caches.setpath(texmf.instance,category,subcategory), + } + c[subcategory] = s + end + return s + else + return nil + end + end + end + + function containers.is_usable(container, name) + return container.enabled and caches.is_writable(container.path, name) + end + + function containers.is_valid(container, name) + if name and name ~= "" then + local storage = container.storage[name] + return storage and not table.is_empty(storage) and storage.cache_version == container.version + else + return false + end + end + + function containers.read(container,name) + if container.enabled and not container.storage[name] then + container.storage[name] = caches.loaddata(container.path,name) + if containers.is_valid(container,name) then + report(container,"loaded",name) + else + container.storage[name] = nil + end + end + if container.storage[name] then + report(container,"reusing",name) + end + return container.storage[name] + end + + function containers.write(container, name, data) + if data then + data.cache_version = container.version + if container.enabled then + local unique, shared = data.unique, data.shared + data.unique, data.shared = nil, nil + caches.savedata(container.path, name, data) + report(container,"saved",name) + data.unique, data.shared = unique, shared + end + report(container,"stored",name) + container.storage[name] = data + end + return data + end + + function containers.content(container,name) + return container.storage[name] + end + +end + +-- since we want to use the cache instead of the tree, we will now +-- reimplement the saver. + +local save_data = input.aux.save_data + +input.cachepath = nil + +function input.aux.save_data(instance, dataname, check) + input.cachepath = input.cachepath or caches.definepath(instance,"trees") + save_data(instance, dataname, check, function(cachename,dataname) + if input.usecache then + return file.join(input.cachepath(),caches.hashed(cachename)) + else + return file.join(cachename,dataname) + end + end) +end + +local load_data = input.aux.load_data + +function input.aux.load_data(instance,pathname,dataname,filename) + input.cachepath = input.cachepath or caches.definepath(instance,"trees") + load_data(instance,pathname,dataname,filename,function(dataname,filename) + if input.usecache then + return file.join(input.cachepath(),caches.hashed(pathname)) + else + if not filename or (filename == "") then + filename = dataname + end + return file.join(pathname,filename) + end + end) +end + +-- we will make a better format, maybe something xml or just text or lua + +input.automounted = input.automounted or { } + +function input.automount(instance,usecache) + local mountpaths = input.simplified_list(input.expansion(instance,'TEXMFMOUNT')) + if table.is_empty(mountpaths) and usecache then + mountpaths = { caches.setpath(instance,"mount") } + end + if not table.is_empty(mountpaths) then + input.starttiming(instance) + for k, root in pairs(mountpaths) do + local f = io.open(root.."/url.tmi") + if f then + for line in f:lines() do + if line then + if line:find("^[%%#%-]") then -- or %W + -- skip + elseif line:find("^zip://") then + input.report("mounting",line) + table.insert(input.automounted,line) + input.usezipfile(instance,line) + end + end + end + f:close() + end + end + input.stoptiming(instance) + end +end + +-- store info in format + +input.storage = { } +input.storage.data = { } +input.storage.min = 0 -- 500 +input.storage.max = input.storage.min - 1 +input.storage.trace = false -- true +input.storage.done = 0 +input.storage.evaluators = { } +-- (evaluate,message,names) + +function input.storage.register(...) + input.storage.data[#input.storage.data+1] = { ... } +end + +function input.storage.evaluate(name) + input.storage.evaluators[#input.storage.evaluators+1] = name +end + +function input.storage.finalize() -- we can prepend the string with "evaluate:" + for _, t in ipairs(input.storage.evaluators) do + for i, v in pairs(t) do + if type(v) == "string" then + t[i] = loadstring(v)() + elseif type(v) == "table" then + for _, vv in pairs(v) do + if type(vv) == "string" then + t[i] = loadstring(vv)() + end + end + end + end + end +end + +function input.storage.dump() + for name, data in ipairs(input.storage.data) do + local evaluate, message, original, target = data[1], data[2], data[3] ,data[4] + local name, initialize, finalize, code = nil, "", "", "" + for str in target:gmatch("([^%.]+)") do + if name then + name = name .. "." .. str + else + name = str + end + initialize = string.format("%s %s = %s or {} ", initialize, name, name) + end + if evaluate then + finalize = "input.storage.evaluate(" .. name .. ")" + end + input.storage.max = input.storage.max + 1 + if input.storage.trace then + logs.report('storage',string.format('saving %s in slot %s',message,input.storage.max)) + code = + initialize .. + string.format("logs.report('storage','restoring %s from slot %s') ",message,input.storage.max) .. + table.serialize(original,name) .. + finalize + else + code = initialize .. table.serialize(original,name) .. finalize + end + lua.bytecode[input.storage.max] = loadstring(code) + end +end + +if lua.bytecode then -- from 0 upwards + local i = input.storage.min + while lua.bytecode[i] do + lua.bytecode[i]() + lua.bytecode[i] = nil + i = i + 1 + end + input.storage.done = i +end + + +-- filename : luat-zip.lua +-- comment : companion to luat-lib.tex +-- author : Hans Hagen, PRAGMA-ADE, Hasselt NL +-- copyright: PRAGMA ADE / ConTeXt Development Team +-- license : see context related readme files + +if not versions then versions = { } end versions['luat-zip'] = 1.001 + +if zip and input then + zip.supported = true +else + zip = { } + zip.supported = false +end + +if not zip.supported then + + if not input then input = { } end -- will go away + + function zip.openarchive (...) return nil end -- needed ? + function zip.closenarchive (...) end -- needed ? + function input.usezipfile (...) end -- needed ? + +else + + -- zip:///oeps.zip?name=bla/bla.tex + -- zip:///oeps.zip?tree=tex/texmf-local + + local function validzip(str) + if not str:find("^zip://") then + return "zip:///" .. str + else + return str + end + end + + zip.archives = { } + zip.registeredfiles = { } + + function zip.openarchive(instance,name) + if not name or name == "" then + return nil + else + local arch = zip.archives[name] + if arch then + return arch + else + local full = input.find_file(instance,name) or "" + local arch = (full ~= "" and zip.open(full)) or false + zip.archives[name] = arch + return arch + end + end + end + + function zip.closearchive(instance,name) + if not name or name == "" and zip.archives[name] then + zip.close(zip.archives[name]) + zip.archives[name] = nil + end + end + + -- zip:///texmf.zip?tree=/tex/texmf + -- zip:///texmf.zip?tree=/tex/texmf-local + -- zip:///texmf-mine.zip?tree=/tex/texmf-projects + + function input.locators.zip(instance,specification) -- where is this used? startup zips (untested) + specification = input.splitmethod(specification) + local zipfile = specification.path + local zfile = zip.openarchive(instance,name) -- tricky, could be in to be initialized tree + if zfile then + input.logger('! zip locator', specification.original ..' found') + else + input.logger('? zip locator', specification.original ..' not found') + end + end + + function input.hashers.zip(instance,tag,name) + input.report("loading zip file",name,"as",tag) + input.usezipfile(instance,tag .."?tree=" .. name) + end + + function input.concatinators.zip(tag,path,name) + if not path or path == "" then + return tag .. '?name=' .. name + else + return tag .. '?name=' .. path .. "/" .. name + end + end + + function input.is_readable.zip(name) + return true + end + + function input.finders.zip(instance,specification,filetype) + specification = input.splitmethod(specification) + if specification.path then + local q = url.query(specification.query) + if q.name then + local zfile = zip.openarchive(instance,specification.path) + if zfile then + input.logger('! zip finder',specification.path) + local dfile = zfile:open(q.name) + if dfile then + dfile = zfile:close() + input.logger('+ zip finder',q.name) + return specification.original + end + else + input.logger('? zip finder',specification.path) + end + end + end + input.logger('- zip finder',filename) + return unpack(input.finders.notfound) + end + + function input.openers.zip(instance,specification) + local zipspecification = input.splitmethod(specification) + if zipspecification.path then + local q = url.query(zipspecification.query) + if q.name then + local zfile = zip.openarchive(instance,zipspecification.path) + if zfile then + input.logger('+ zip starter',zipspecification.path) + local dfile = zfile:open(q.name) + if dfile then + input.show_open(specification) + return input.openers.text_opener(specification,dfile,'zip') + end + else + input.logger('- zip starter',zipspecification.path) + end + end + end + input.logger('- zip opener',filename) + return unpack(input.openers.notfound) + end + + function input.loaders.zip(instance,specification) + specification = input.splitmethod(specification) + if specification.path then + local q = url.query(specification.query) + if q.name then + local zfile = zip.openarchive(instance,specification.path) + if zfile then + input.logger('+ zip starter',specification.path) + local dfile = zfile:open(q.name) + if dfile then + input.show_load(filename) + input.logger('+ zip loader',filename) + local s = dfile:read("*all") + dfile:close() + return true, s, #s + end + else + input.logger('- zip starter',specification.path) + end + end + end + input.logger('- zip loader',filename) + return unpack(input.openers.notfound) + end + + -- zip:///somefile.zip + -- zip:///somefile.zip?tree=texmf-local -> mount + + function input.usezipfile(instance,zipname) + zipname = validzip(zipname) + input.logger('! zip use','file '..zipname) + local specification = input.splitmethod(zipname) + local zipfile = specification.path + if zipfile and not zip.registeredfiles[zipname] then + local tree = url.query(specification.query).tree or "" + input.logger('! zip register','file '..zipname) + local z = zip.openarchive(instance,zipfile) + if z then + input.logger("= zipfile","registering "..zipname) + input.starttiming(instance) + input.aux.prepend_hash(instance,'zip',zipname,zipfile) + input.aux.extend_texmf_var(instance,zipname) -- resets hashes too + zip.registeredfiles[zipname] = z + instance.files[zipname] = input.aux.register_zip_file(z,tree or "") + input.stoptiming(instance) + else + input.logger("? zipfile","unknown "..zipname) + end + else + input.logger('! zip register','no file '..zipname) + end + end + + function input.aux.register_zip_file(z,tree) + local files, filter = { }, "" + if tree == "" then + filter = "^(.+)/(.-)$" + else + filter = "^"..tree.."/(.+)/(.-)$" + end + input.logger('= zip filter',filter) + local register, n = input.aux.register_file, 0 + for i in z:files() do + local path, name = i.filename:match(filter) + if path then + if name and name ~= '' then + register(files, name, path) + n = n + 1 + else + -- directory + end + else + register(files, i.filename, '') + n = n + 1 + end + end + input.report('= zip entries',n) + return files + end + +end + + +-- filename : luat-zip.lua +-- comment : companion to luat-lib.tex +-- author : Hans Hagen, PRAGMA-ADE, Hasselt NL +-- copyright: PRAGMA ADE / ConTeXt Development Team +-- license : see context related readme files + +if not versions then versions = { } end versions['luat-tex'] = 1.001 + +-- special functions that deal with io + +if texconfig and not texlua then + + input.level = input.level or 0 + + if input.logmode() == 'xml' then + function input.show_open(name) + input.level = input.level + 1 + texio.write_nl("") + end + function input.show_close(name) + texio.write(" ") + input.level = input.level - 1 + end + function input.show_load(name) + texio.write_nl("") -- level? + end + else + function input.show_open () end + function input.show_close() end + function input.show_load () end + end + + function input.finders.generic(instance,tag,filename,filetype) + local foundname = input.find_file(instance,filename,filetype) + if foundname and foundname ~= "" then + input.logger('+ ' .. tag .. ' finder',filename,'filetype') + return foundname + else + input.logger('- ' .. tag .. ' finder',filename,'filetype') + return unpack(input.finders.notfound) + end + end + + input.filters.dynamic_translator = nil + input.filters.frozen_translator = nil + input.filters.utf_translator = nil + + function input.openers.text_opener(filename,file_handle,tag) + local u = unicode.utftype(file_handle) + local t = { } + if u > 0 then + input.logger('+ ' .. tag .. ' opener (' .. unicode.utfname[u] .. ')',filename) + local l + if u > 2 then + l = unicode.utf32_to_utf8(file_handle:read("*a"),u==4) + else + l = unicode.utf16_to_utf8(file_handle:read("*a"),u==2) + end + file_handle:close() + t = { + utftype = u, -- may go away + lines = l, + current = 0, -- line number, not really needed + handle = nil, + noflines = #l, + close = function() + input.logger('= ' .. tag .. ' closer (' .. unicode.utfname[u] .. ')',filename) + input.show_close(filename) + end, +--~ getline = function(n) +--~ local line = t.lines[n] +--~ if not line or line == "" then +--~ return "" +--~ else +--~ local translator = input.filters.utf_translator +--~ return (translator and translator(line)) or line +--~ end +--~ end, + reader = function(self) + self = self or t + local current, lines = self.current, self.lines + if current >= #lines then + return nil + else + current = current + 1 + self.current = current + local line = lines[current] + if line == "" then + return "" + else + local translator = input.filters.utf_translator + -- return (translator and translator(line)) or line + if translator then + return translator(line) + else + return line + end + end + end + end + } + else + input.logger('+ ' .. tag .. ' opener',filename) + -- todo: file;name -> freeze / eerste regel scannen -> freeze + local filters = input.filters + t = { + reader = function(self) + local line = file_handle:read() + if line == "" then + return "" + end + local translator = filters.utf_translator + if translator then + return translator(line) + end + translator = filters.dynamic_translator + if translator then + return translator(line) + end + return line + end, + close = function() + input.logger('= ' .. tag .. ' closer',filename) + input.show_close(filename) + file_handle:close() + end, + handle = function() + return file_handle + end, + noflines = function() + t.noflines = io.noflines(file_handle) + return t.noflines + end + } + end + return t + end + + function input.openers.generic(instance,tag,filename) + if filename and filename ~= "" then + local f = io.open(filename,"r") + if f then + input.show_open(filename) + return input.openers.text_opener(filename,f,tag) + end + end + input.logger('- ' .. tag .. ' opener',filename) + return unpack(input.openers.notfound) + end + + function input.loaders.generic(instance,tag,filename) + if filename and filename ~= "" then + local f = io.open(filename,"rb") + if f then + input.show_load(filename) + input.logger('+ ' .. tag .. ' loader',filename) + local s = f:read("*a") + f:close() + if s then + return true, s, #s + end + end + end + input.logger('- ' .. tag .. ' loader',filename) + return unpack(input.loaders.notfound) + end + + function input.finders.tex(instance,filename,filetype) + return input.finders.generic(instance,'tex',filename,filetype) + end + function input.openers.tex(instance,filename) + return input.openers.generic(instance,'tex',filename) + end + function input.loaders.tex(instance,filename) + return input.loaders.generic(instance,'tex',filename) + end + +end + +-- callback into the file io and related things; disabling kpse + + +if texconfig and not texlua then do + + -- this is not the right place, because we refer to quite some not yet defined tables, but who cares ... + + ctx = ctx or { } + + local ss = { } + + function ctx.writestatus(a,b) + local s = ss[a] + if not ss[a] then + s = a:rpadd(15) .. ": " + ss[a] = s + end + texio.write_nl(s .. b .. "\n") + end + + -- this will become: ctx.install_statistics(fnc() return ..,.. end) etc + + local statusinfo, n = { }, 0 + + function ctx.register_statistics(tag,pattern,fnc) + statusinfo[#statusinfo+1] = { tag, pattern, fnc } + if #tag > n then n = #tag end + end + + function ctx.show_statistics() -- todo: move calls + if caches then + ctx.register_statistics("used config path", "%s", function() return caches.configpath(texmf.instance) end) + ctx.register_statistics("used cache path", "%s", function() return caches.path end) + end + if status.luabytecodes > 0 and input.storage and input.storage.done then + ctx.register_statistics("modules/dumps/instances", "%s/%s/%s", function() return status.luabytecodes-500, input.storage.done, status.luastates end) + end + if texmf.instance then + ctx.register_statistics("input load time", "%s seconds", function() return input.loadtime(texmf.instance) end) + end + if fonts then + ctx.register_statistics("fonts load time","%s seconds", function() return input.loadtime(fonts) end) + end + if xml then + ctx.register_statistics("xml load time", "%s seconds, backreferences: %i, outer filtering time: %s", function() return input.loadtime(xml), #lxml.self, input.loadtime(lxml) end) + end + if mptopdf then + ctx.register_statistics("mps conversion time", "%s seconds", function() return input.loadtime(mptopdf) end) + end + if nodes then + ctx.register_statistics("node processing time", "%s seconds (including kernel)", function() return input.loadtime(nodes) end) + end + if kernel then + ctx.register_statistics("kernel processing time", "%s seconds", function() return input.loadtime(kernel) end) + end + if attributes then + ctx.register_statistics("attribute processing time", "%s seconds", function() return input.loadtime(attributes) end) + end + if languages then + ctx.register_statistics("language load time", "%s seconds, n=%s", function() return input.loadtime(languages), languages.hyphenation.n() end) + end + if figures then + ctx.register_statistics("graphics processing time", "%s seconds, n=%s (including tex)", function() return input.loadtime(figures), figures.n or "?" end) + end + if metapost then + ctx.register_statistics("metapost processing time", "%s seconds, loading: %s seconds, execution: %s seconds, n: %s", function() return input.loadtime(metapost), input.loadtime(mplib), input.loadtime(metapost.exectime), metapost.n end) + end + if status.luastate_bytes then + ctx.register_statistics("current memory usage", "%s bytes", function() return status.luastate_bytes end) + end + if nodes then + ctx.register_statistics("cleaned up reserved nodes", "%s nodes, %s lists of %s", function() return nodes.cleanup_reserved(tex.count[24]) end) -- \topofboxstack + end + if status.node_mem_usage then + ctx.register_statistics("node memory usage", "%s", function() return status.node_mem_usage end) + end + if languages then + ctx.register_statistics("loaded patterns", "%s", function() return languages.logger.report() end) + end + if fonts then + ctx.register_statistics("loaded fonts", "%s", function() return fonts.logger.report() end) + end + if xml then -- so we are in mkiv, we need a different check + ctx.register_statistics("runtime", "%s seconds, %i processed pages, %i shipped pages, %.3f pages/second", function() + input.stoptiming(texmf) + local runtime = input.loadtime(texmf) + local shipped = tex.count['nofshipouts'] + local pages = tex.count['realpageno'] - 1 + local persecond = shipped / runtime + return runtime, pages, shipped, persecond + end) + end + for _, t in ipairs(statusinfo) do + local tag, pattern, fnc = t[1], t[2], t[3] + ctx.writestatus("mkiv lua stats", string.format("%s - %s", tag:rpadd(n," "), pattern:format(fnc()))) + end + end + +end end + +if texconfig and not texlua then + + texconfig.kpse_init = false + texconfig.trace_file_names = input.logmode() == 'tex' + texconfig.max_print_line = 100000 + + -- if still present, we overload kpse (put it off-line so to say) + + if not texmf then texmf = { } end + + input.starttiming(texmf) + + if not texmf.instance then + + if not texmf.instance then -- prevent a second loading + + texmf.instance = input.reset() + texmf.instance.progname = environment.progname or 'context' + texmf.instance.engine = environment.engine or 'luatex' + texmf.instance.validfile = input.validctxfile + + input.load(texmf.instance) + + end + + if callback then + callback.register('find_read_file' , function(id,name) return input.findtexfile(texmf.instance,name) end) + callback.register('open_read_file' , function( name) return input.opentexfile(texmf.instance,name) end) + end + + if callback then + callback.register('find_data_file' , function(name) return input.findbinfile(texmf.instance,name,"tex") end) + callback.register('find_enc_file' , function(name) return input.findbinfile(texmf.instance,name,"enc") end) + callback.register('find_font_file' , function(name) return input.findbinfile(texmf.instance,name,"tfm") end) + callback.register('find_format_file' , function(name) return input.findbinfile(texmf.instance,name,"fmt") end) + callback.register('find_image_file' , function(name) return input.findbinfile(texmf.instance,name,"tex") end) + callback.register('find_map_file' , function(name) return input.findbinfile(texmf.instance,name,"map") end) + callback.register('find_ocp_file' , function(name) return input.findbinfile(texmf.instance,name,"ocp") end) + callback.register('find_opentype_file' , function(name) return input.findbinfile(texmf.instance,name,"otf") end) + callback.register('find_output_file' , function(name) return name end) + callback.register('find_pk_file' , function(name) return input.findbinfile(texmf.instance,name,"pk") end) + callback.register('find_sfd_file' , function(name) return input.findbinfile(texmf.instance,name,"sfd") end) + callback.register('find_truetype_file' , function(name) return input.findbinfile(texmf.instance,name,"ttf") end) + callback.register('find_type1_file' , function(name) return input.findbinfile(texmf.instance,name,"pfb") end) + callback.register('find_vf_file' , function(name) return input.findbinfile(texmf.instance,name,"vf") end) + + callback.register('read_data_file' , function(file) return input.loadbinfile(texmf.instance,file,"tex") end) + callback.register('read_enc_file' , function(file) return input.loadbinfile(texmf.instance,file,"enc") end) + callback.register('read_font_file' , function(file) return input.loadbinfile(texmf.instance,file,"tfm") end) + -- format + -- image + callback.register('read_map_file' , function(file) return input.loadbinfile(texmf.instance,file,"map") end) + callback.register('read_ocp_file' , function(file) return input.loadbinfile(texmf.instance,file,"ocp") end) + callback.register('read_opentype_file' , function(file) return input.loadbinfile(texmf.instance,file,"otf") end) + -- output + callback.register('read_pk_file' , function(file) return input.loadbinfile(texmf.instance,file,"pk") end) + callback.register('read_sfd_file' , function(file) return input.loadbinfile(texmf.instance,file,"sfd") end) + callback.register('read_truetype_file' , function(file) return input.loadbinfile(texmf.instance,file,"ttf") end) + callback.register('read_type1_file' , function(file) return input.loadbinfile(texmf.instance,file,"pfb") end) + callback.register('read_vf_file' , function(file) return input.loadbinfile(texmf.instance,file,"vf" ) end) + end + + if callback and environment.aleph_mode then + callback.register('find_font_file' , function(name) return input.findbinfile(texmf.instance,name,"ofm") end) + callback.register('read_font_file' , function(file) return input.loadbinfile(texmf.instance,file,"ofm") end) + callback.register('find_vf_file' , function(name) return input.findbinfile(texmf.instance,name,"ovf") end) + callback.register('read_vf_file' , function(file) return input.loadbinfile(texmf.instance,file,"ovf") end) + end + + if callback then + callback.register('find_write_file' , function(id,name) return name end) + end + + if callback and (not config or (#config == 0)) then + callback.register('find_format_file' , function(name) return name end) + end + + if callback and false then + for k, v in pairs(callback.list()) do + if not v then texio.write_nl("callback "..k.." is not set") end + end + end + + if callback then + + input.start_actions = { } + input.stop_actions = { } + + function input.register_start_actions(f) table.insert(input.start_actions, f) end + function input.register_stop_actions (f) table.insert(input.stop_actions, f) end + + --~ callback.register('start_run', function() for _, a in pairs(input.start_actions) do a() end end) + --~ callback.register('stop_run' , function() for _, a in pairs(input.stop_actions ) do a() end end) + + end + + if callback then + + if input.logmode() == 'xml' then + + function input.start_page_number() + texio.write_nl("

") + texio.write_nl("") + end + + callback.register('start_page_number' , input.start_page_number) + callback.register('stop_page_number' , input.stop_page_number ) + + function input.report_output_pages(p,b) + texio.write_nl(""..p.."") + texio.write_nl(""..b.."") + texio.write_nl("") + end + function input.report_output_log() + end + + callback.register('report_output_pages', input.report_output_pages) + callback.register('report_output_log' , input.report_output_log ) + + function input.start_run() + texio.write_nl("") + texio.write_nl("") + texio.write_nl("") + end + function input.stop_run() + texio.write_nl("") + end + function input.show_statistics() + for k,v in pairs(status.list()) do + texio.write_nl("log",""..tostring(v).."") + end + end + + table.insert(input.start_actions, input.start_run) + table.insert(input.stop_actions , input.show_statistics) + table.insert(input.stop_actions , input.stop_run) + + else + table.insert(input.stop_actions , input.show_statistics) + end + + callback.register('start_run', function() for _, a in pairs(input.start_actions) do a() end end) + callback.register('stop_run' , function() for _, a in pairs(input.stop_actions ) do a() end ctx.show_statistics() end) + + end + + end + + if kpse then + + function kpse.find_file(filename,filetype,mustexist) + return input.find_file(texmf.instance,filename,filetype,mustexist) + end + function kpse.expand_path(variable) + return input.expand_path(texmf.instance,variable) + end + function kpse.expand_var(variable) + return input.expand_var(texmf.instance,variable) + end + function kpse.expand_braces(variable) + return input.expand_braces(texmf.instance,variable) + end + + end + +end + +-- program specific configuration (memory settings and alike) + +if texconfig and not texlua then + + luatex = luatex or { } + + luatex.variablenames = { + 'main_memory', 'extra_mem_bot', 'extra_mem_top', + 'buf_size','expand_depth', + 'font_max', 'font_mem_size', + 'hash_extra', 'max_strings', 'pool_free', 'pool_size', 'string_vacancies', + 'obj_tab_size', 'pdf_mem_size', 'dest_names_size', + 'nest_size', 'param_size', 'save_size', 'stack_size', + 'trie_size', 'hyph_size', 'max_in_open', + 'ocp_stack_size', 'ocp_list_size', 'ocp_buf_size' + } + + function luatex.variables() + local t, x = { }, nil + for _,v in pairs(luatex.variablenames) do + x = input.var_value(texmf.instance,v) + if x and x:find("^%d+$") then + t[v] = tonumber(x) + end + end + return t + end + + function luatex.setvariables(tab) + for k,v in pairs(luatex.variables()) do + tab[k] = v + end + end + + if not luatex.variables_set then + luatex.setvariables(texconfig) + luatex.variables_set = true + end + + texconfig.max_print_line = 100000 + texconfig.max_in_open = 127 + +end + +-- some tex basics + +if not cs then cs = { } end + +function cs.def(k,v) + tex.sprint(tex.texcatcodes, "\\def\\" .. k .. "{" .. v .. "}") +end + +function cs.chardef(k,v) + tex.sprint(tex.texcatcodes, "\\chardef\\" .. k .. "=" .. v .. "\\relax") +end + +function cs.boolcase(b) + if b then tex.write(1) else tex.write(0) end +end + +function cs.testcase(b) + if b then + tex.sprint(tex.texcatcodes, "\\firstoftwoarguments") + else + tex.sprint(tex.texcatcodes, "\\secondoftwoarguments") + end +end + + +if not modules then modules = { } end modules ['luat-kps'] = { + version = 1.001, + comment = "companion to luatools.lua", + author = "Hans Hagen, PRAGMA-ADE, Hasselt NL", + copyright = "PRAGMA ADE / ConTeXt Development Team", + license = "see context related readme files" +} + +--[[ldx-- +

This file is used when we want the input handlers to behave like +kpsewhich. What to do with the following:

+ + +{$SELFAUTOLOC,$SELFAUTODIR,$SELFAUTOPARENT}{,{/share,}/texmf{-local,}/web2c} +$SELFAUTOLOC : /usr/tex/bin/platform +$SELFAUTODIR : /usr/tex/bin +$SELFAUTOPARENT : /usr/tex + + +

How about just forgetting abou them?

+--ldx]]-- + +input = input or { } +input.suffixes = input.suffixes or { } +input.formats = input.formats or { } + +input.suffixes['gf'] = { 'gf' } +input.suffixes['pk'] = { 'pk' } +input.suffixes['base'] = { 'base' } +input.suffixes['bib'] = { 'bib' } +input.suffixes['bst'] = { 'bst' } +input.suffixes['cnf'] = { 'cnf' } +input.suffixes['mem'] = { 'mem' } +input.suffixes['mf'] = { 'mf' } +input.suffixes['mfpool'] = { 'pool' } +input.suffixes['mft'] = { 'mft' } +input.suffixes['mppool'] = { 'pool' } +input.suffixes['graphic/figure'] = { 'eps', 'epsi' } +input.suffixes['texpool'] = { 'pool' } +input.suffixes['PostScript header'] = { 'pro' } +input.suffixes['ist'] = { 'ist' } +input.suffixes['web'] = { 'web', 'ch' } +input.suffixes['cweb'] = { 'w', 'web', 'ch' } +input.suffixes['cmap files'] = { 'cmap' } +input.suffixes['lig files'] = { 'lig' } +input.suffixes['bitmap font'] = { } +input.suffixes['MetaPost support'] = { } +input.suffixes['TeX system documentation'] = { } +input.suffixes['TeX system sources'] = { } +input.suffixes['dvips config'] = { } +input.suffixes['type42 fonts'] = { } +input.suffixes['web2c files'] = { } +input.suffixes['other text files'] = { } +input.suffixes['other binary files'] = { } +input.suffixes['opentype fonts'] = { 'otf' } + +input.suffixes['fmt'] = { 'fmt' } +input.suffixes['texmfscripts'] = { 'rb','lua','py','pl' } + +input.suffixes['pdftex config'] = { } +input.suffixes['Troff fonts'] = { } + +input.suffixes['ls-R'] = { } + +--[[ldx-- +

If you wondered abou tsome of the previous mappings, how about +the next bunch:

+--ldx]]-- + +input.formats['bib'] = '' +input.formats['bst'] = '' +input.formats['mft'] = '' +input.formats['ist'] = '' +input.formats['web'] = '' +input.formats['cweb'] = '' +input.formats['MetaPost support'] = '' +input.formats['TeX system documentation'] = '' +input.formats['TeX system sources'] = '' +input.formats['Troff fonts'] = '' +input.formats['dvips config'] = '' +input.formats['graphic/figure'] = '' +input.formats['ls-R'] = '' +input.formats['other text files'] = '' +input.formats['other binary files'] = '' + +input.formats['gf'] = '' +input.formats['pk'] = '' +input.formats['base'] = 'MFBASES' +input.formats['cnf'] = '' +input.formats['mem'] = 'MPMEMS' +input.formats['mf'] = 'MFINPUTS' +input.formats['mfpool'] = 'MFPOOL' +input.formats['mppool'] = 'MPPOOL' +input.formats['texpool'] = 'TEXPOOL' +input.formats['PostScript header'] = 'TEXPSHEADERS' +input.formats['cmap files'] = 'CMAPFONTS' +input.formats['type42 fonts'] = 'T42FONTS' +input.formats['web2c files'] = 'WEB2C' +input.formats['pdftex config'] = 'PDFTEXCONFIG' +input.formats['texmfscripts'] = 'TEXMFSCRIPTS' +input.formats['bitmap font'] = '' +input.formats['lig files'] = 'LIGFONTS' + +-- end library merge + +-- We initialize some characteristics of this program. We need to +-- do this before we load the libraries, else own.name will not be +-- properly set (handy for selfcleaning the file). It's an ugly +-- looking piece of code. + +own = { } + +own.libs = { -- todo: check which ones are really needed + 'l-string.lua', + 'l-lpeg.lua', + 'l-table.lua', + 'l-io.lua', + 'l-number.lua', + 'l-set.lua', + 'l-os.lua', + 'l-md5.lua', + 'l-file.lua', + 'l-url.lua', + 'l-dir.lua', + 'l-boolean.lua', + 'l-unicode.lua', + 'l-utils.lua', + 'luat-lib.lua', + 'luat-inp.lua', + 'luat-tmp.lua', + 'luat-zip.lua', + 'luat-tex.lua', + 'luat-kps.lua', +} + +-- We need this hack till luatex is fixed. + +if arg and arg[0] == 'luatex' and arg[1] == "--luaonly" then + arg[-1]=arg[0] arg[0]=arg[2] for k=3,#arg do arg[k-2]=arg[k] end arg[#arg]=nil arg[#arg]=nil +end + +-- End of hack. + +own.name = (environment and environment.ownname) or arg[0] or 'luatools.lua' +own.path = string.match(own.name,"^(.+)[\\/].-$") or "." +own.list = { '.' } + +if own.path ~= '.' then + table.insert(own.list,own.path) +end + +table.insert(own.list,own.path.."/../../../tex/context/base") +table.insert(own.list,own.path.."/mtx") +table.insert(own.list,own.path.."/../sources") + +function locate_libs() + for _, lib in pairs(own.libs) do + for _, pth in pairs(own.list) do + local filename = string.gsub(pth .. "/" .. lib,"\\","/") + local codeblob = loadfile(filename) + if codeblob then + codeblob() + own.list = { pth } -- speed up te search + break + end + end + end +end + +if not input then + locate_libs() +end + +if not input then + print("") + print("Luatools is unable to start up due to lack of libraries. You may") + print("try to run 'lua luatools.lua --selfmerge' in the path where this") + print("script is located (normally under ..../scripts/context/lua) which") + print("will make luatools library independent.") + os.exit() +end + +instance = input.reset() +input.verbose = environment.arguments["verbose"] or false +input.banner = 'LuaTools | ' +utils.report = input.report + +input.defaultlibs = { -- not all are needed + 'l-string.lua', 'l-lpeg.lua', 'l-table.lua', 'l-boolean.lua', 'l-number.lua', 'l-set.lua', 'l-unicode.lua', + 'l-md5.lua', 'l-os.lua', 'l-io.lua', 'l-file.lua', 'l-url.lua', 'l-dir.lua', 'l-utils.lua', 'l-tex.lua', + 'luat-env.lua', 'luat-lib.lua', 'luat-inp.lua', 'luat-tmp.lua', 'luat-zip.lua', 'luat-tex.lua' +} + +-- todo: use environment.argument() instead of environment.arguments[] + +instance.engine = environment.arguments["engine"] or 'luatex' +instance.progname = environment.arguments["progname"] or 'context' +instance.luaname = environment.arguments["luafile"] or "" -- environment.ownname or "" +instance.lualibs = environment.arguments["lualibs"] or table.concat(input.defaultlibs,",") +instance.allresults = environment.arguments["all"] or false +instance.pattern = environment.arguments["pattern"] or nil +instance.sortdata = environment.arguments["sort"] or false +instance.kpseonly = not environment.arguments["all"] or false +instance.my_format = environment.arguments["format"] or instance.format +instance.lsrmode = environment.arguments["lsr"] or false + +if type(instance.pattern) == 'boolean' then + input.report("invalid pattern specification") -- toto, force verbose for one message + instance.pattern = nil +end + +if environment.arguments["trace"] then input.settrace(environment.arguments["trace"]) end + +if environment.arguments["minimize"] then + if input.validators.visibility[instance.progname] then + instance.validfile = input.validators.visibility[instance.progname] + end +end + +function input.my_prepare_a(instance) + input.resetconfig(instance) + input.identify_cnf(instance) + input.load_lua(instance) + input.expand_variables(instance) + input.load_cnf(instance) + input.expand_variables(instance) +end + +function input.my_prepare_b(instance) + input.my_prepare_a(instance) + input.load_hash(instance) + input.automount(instance) +end + +-- barename + +if not messages then messages = { } end + +messages.no_ini_file = [[ +There is no lua initialization file found. This file can be forced by the +"--progname" directive, or specified with "--luaname", or it is derived +automatically from the formatname (aka jobname). It may be that you have +to regenerate the file database using "luatools --generate". +]] + +messages.help = [[ +--generate generate file database +--variables show configuration variables +--expansions show expanded variables +--configurations show configuration order +--expand-braces expand complex variable +--expand-path expand variable (resolve paths) +--expand-var expand variable (resolve references) +--show-path show path expansion of ... +--var-value report value of variable +--find-file report file location +--find-path report path of file +--make or --ini make luatex format +--run or --fmt= run luatex format +--luafile=str lua inifile (default is .lua) +--lualibs=list libraries to assemble (optional when --compile) +--compile assemble and compile lua inifile +--mkii force context mkii mode (only for testing, not usable!) +--verbose give a bit more info +--minimize optimize lists for format +--all show all found files +--sort sort cached data +--engine=str target engine +--progname=str format or backend +--pattern=str filter variables +--lsr use lsr and cnf directly +]] + +function input.my_make_format(instance,texname) + if texname and texname ~= "" then + if input.usecache then + local path = file.join(caches.setpath(instance,"formats")) -- maybe platform + if path and lfs then + lfs.chdir(path) + end + end + local barename = texname:gsub("%.%a+$","") + if barename == texname then + texname = texname .. ".tex" + end + local fullname = input.find_files(instance,texname)[1] or "" + if fullname == "" then + input.report("no tex file with name",texname) + else + local luaname, lucname, luapath, lualibs = "", "", "", { } + -- the following is optional, since context.lua can also + -- handle this collect and compile business + if environment.arguments["compile"] then + if luaname == "" then luaname = barename end + input.report("creating initialization file " .. luaname) + luapath = file.dirname(luaname) + if luapath == "" then + luapath = file.dirname(texname) + end + if luapath == "" then + luapath = file.dirname(input.find_files(instance,texname)[1] or "") + end + lualibs = string.split(instance.lualibs,",") + luaname = file.basename(barename .. ".lua") + lucname = file.basename(barename .. ".luc") + -- todo: when this fails, we can just copy the merged libraries from + -- luatools since they are normally the same, at least for context + if lualibs[1] then + local firstlib = file.join(luapath,lualibs[1]) + if not lfs.isfile(firstlib) then + local foundname = input.find_files(instance,lualibs[1])[1] + if foundname then + input.report("located library path : " .. luapath) + luapath = file.dirname(foundname) + end + end + end + input.report("using library path : " .. luapath) + input.report("using lua libraries: " .. table.join(lualibs," ")) + utils.merger.selfcreate(lualibs,luapath,luaname) + if utils.lua.compile(luaname, lucname) and io.exists(lucname) then + luaname = lucname + input.report("using compiled initialization file " .. lucname) + else + input.report("using uncompiled initialization file " .. luaname) + end + else + for _, v in pairs({instance.luaname, instance.progname, barename}) do + v = string.gsub(v..".lua","%.lua%.lua$",".lua") + if v and (v ~= "") then + luaname = input.find_files(instance,v)[1] or "" + if luaname ~= "" then + break + end + end + end + end + if luaname == "" then + input.reportlines(messages.no_ini_file) + input.report("texname : " .. texname) + input.report("luaname : " .. instance.luaname) + input.report("progname : " .. instance.progname) + input.report("barename : " .. barename) + else + input.report("using lua initialization file " .. luaname) + local flags = { "--ini" } + if environment.arguments["mkii"] then + flags[#flags+1] = "--progname=" .. instance.progname + else + flags[#flags+1] = "--lua=" .. string.quote(luaname) + end + local bs = (environment.platform == "unix" and "\\\\") or "\\" -- todo: make a function + local command = "luatex ".. table.concat(flags," ") .. " " .. string.quote(fullname) .. " " .. bs .. "dump" + input.report("running command: " .. command .. "\n") + os.spawn(command) + end + end + else + input.report("no tex file given") + end +end + +function input.my_run_format(instance,name,data,more) + -- hm, rather old code here; we can now use the file.whatever functions + if name and (name ~= "") then + local barename = name:gsub("%.%a+$","") + local fmtname = "" + if input.usecache then + local path = file.join(caches.setpath(instance,"formats")) -- maybe platform + fmtname = file.join(path,barename..".fmt") or "" + end + if fmtname == "" then + fmtname = input.find_files(instance,barename..".fmt")[1] or "" + end + fmtname = input.clean_path(fmtname) + barename = fmtname:gsub("%.%a+$","") + if fmtname == "" then + input.report("no format with name",name) + else + local luaname = barename .. ".luc" + local f = io.open(luaname) + if not f then + luaname = barename .. ".lua" + f = io.open(luaname) + end + if f then + f:close() + local command = "luatex --fmt=" .. string.quote(barename) .. " --lua=" .. string.quote(luaname) .. " " .. string.quote(data) .. " " .. string.quote(more) + input.report("running command: " .. command) + os.spawn(command) + else + input.report("using format name",fmtname) + input.report("no luc/lua with name",barename) + end + end + end +end + +-- helpers for verbose lists + +input.listers = input.listers or { } + +local function tabstr(str) + if type(str) == 'table' then + return table.concat(str," | ") + else + return str + end +end + +local function list(instance,list) + local pat = string.upper(instance.pattern or "","") + for _,key in pairs(table.sortedkeys(list)) do + if instance.pattern == "" or string.find(key:upper(),pat) then + if instance.kpseonly then + if instance.kpsevars[key] then + print(format("%s=%s",key,tabstr(list[key]))) + end + else + print(format('%s %s=%s',(instance.kpsevars[key] and 'K') or 'E',key,tabstr(list[key]))) + end + end + end +end + +function input.listers.variables (instance) list(instance,instance.variables ) end +function input.listers.expansions(instance) list(instance,instance.expansions) end + +function input.listers.configurations(instance) + for _,key in pairs(table.sortedkeys(instance.kpsevars)) do + if not instance.pattern or (instance.pattern=="") or key:find(instance.pattern) then + print(key.."\n") + for i,c in ipairs(instance.order) do + local str = c[key] + if str then + print(format("\t%s\t\t%s",i,input.aux.tabstr(str))) + end + end + print() + end + end +end + +input.report(banner,"\n") + +local ok = true + +if environment.arguments["find-file"] then + input.my_prepare_b(instance) + instance.format = environment.arguments["format"] or instance.format + if instance.pattern then + instance.allresults = true + input.for_files(instance, input.find_files, { instance.pattern }, instance.my_format) + else + input.for_files(instance, input.find_files, environment.files, instance.my_format) + end +elseif environment.arguments["find-path"] then + input.my_prepare_b(instance) + local path = input.find_file(instance, environment.files[1], instance.my_format) + if input.verbose then + input.report(file.dirname(path)) + else + print(file.dirname(path)) + end +--~ elseif environment.arguments["first-writable-path"] then +--~ input.my_prepare_b(instance) +--~ input.report(input.first_writable_path(instance,environment.files[1] or ".")) +elseif environment.arguments["run"] then + input.my_prepare_a(instance) -- ! no need for loading databases + input.verbose = true + input.my_run_format(instance,environment.files[1] or "",environment.files[2] or "",environment.files[3] or "") +elseif environment.arguments["fmt"] then + input.my_prepare_a(instance) -- ! no need for loading databases + input.verbose = true + input.my_run_format(instance,environment.arguments["fmt"], environment.files[1] or "",environment.files[2] or "") +elseif environment.arguments["expand-braces"] then + input.my_prepare_a(instance) + input.for_files(instance, input.expand_braces, environment.files) +elseif environment.arguments["expand-path"] then + input.my_prepare_a(instance) + input.for_files(instance, input.expand_path, environment.files) +elseif environment.arguments["expand-var"] or environment.arguments["expand-variable"] then + input.my_prepare_a(instance) + input.for_files(instance, input.expand_var, environment.files) +elseif environment.arguments["show-path"] or environment.arguments["path-value"] then + input.my_prepare_a(instance) + input.for_files(instance, input.show_path, environment.files) +elseif environment.arguments["var-value"] or environment.arguments["show-value"] then + input.my_prepare_a(instance) + input.for_files(instance, input.var_value, environment.files) +elseif environment.arguments["format-path"] then + input.my_prepare_b(instance) + input.report(caches.setpath(instance,"format")) +elseif instance.pattern then -- brrr + input.my_prepare_b(instance) + instance.format = environment.arguments["format"] or instance.format + instance.allresults = true + input.for_files(instance, input.find_files, { instance.pattern }, instance.my_format) +elseif environment.arguments["generate"] then + instance.renewcache = true + input.verbose = true + input.my_prepare_b(instance) +elseif environment.arguments["make"] or environment.arguments["ini"] or environment.arguments["compile"] then + input.my_prepare_b(instance) + input.verbose = true + input.my_make_format(instance,environment.files[1] or "") +elseif environment.arguments["selfmerge"] then + utils.merger.selfmerge(own.name,own.libs,own.list) +elseif environment.arguments["selfclean"] then + utils.merger.selfclean(own.name) +elseif environment.arguments["selfupdate"] then + input.my_prepare_b(instance) + input.verbose = true + input.update_script(instance,own.name,"luatools") +elseif environment.arguments["variables"] or environment.arguments["show-variables"] then + input.my_prepare_a(instance) + input.listers.variables(instance) +elseif environment.arguments["expansions"] or environment.arguments["show-expansions"] then + input.my_prepare_a(instance) + input.listers.expansions(instance) +elseif environment.arguments["configurations"] or environment.arguments["show-configurations"] then + input.my_prepare_a(instance) + input.listers.configurations(instance) +elseif environment.arguments["help"] or (environment.files[1]=='help') or (#environment.files==0) then + if not input.verbose then + input.verbose = true + input.report(banner,"\n") + end + input.reportlines(messages.help) +else + input.my_prepare_b(instance) + input.for_files(instance, input.find_files, environment.files, instance.my_format) +end + +if input.verbose then + input.report("") + input.report(string.format("runtime: %0.3f seconds",os.runtime())) +end + +--~ if ok then +--~ input.report("exit code: 0") os.exit(0) +--~ else +--~ input.report("exit code: 1") os.exit(1) +--~ end + +if environment.platform == "unix" then + io.write("\n") +end diff --git a/Build/source/texk/texlive/linked_scripts/context/lua/mtxrun.lua b/Build/source/texk/texlive/linked_scripts/context/lua/mtxrun.lua new file mode 100755 index 00000000000..0fb45cc635b --- /dev/null +++ b/Build/source/texk/texlive/linked_scripts/context/lua/mtxrun.lua @@ -0,0 +1,8552 @@ +#!/usr/bin/env texlua + +if not modules then modules = { } end modules ['mtxrun'] = { + version = 1.001, + comment = "runner, lua replacement for texmfstart.rb", + author = "Hans Hagen, PRAGMA-ADE, Hasselt NL", + copyright = "PRAGMA ADE / ConTeXt Development Team", + license = "see context related readme files" +} + +-- one can make a stub: +-- +-- #!/bin/sh +-- env LUATEXDIR=/....../texmf/scripts/context/lua luatex --luaonly mtxrun.lua "$@" + +-- filename : mtxrun.lua +-- comment : companion to context.tex +-- author : Hans Hagen, PRAGMA-ADE, Hasselt NL +-- copyright: PRAGMA ADE / ConTeXt Development Team +-- license : see context related readme files + +-- This script is based on texmfstart.rb but does not use kpsewhich to +-- locate files. Although kpse is a library it never came to opening up +-- its interface to other programs (esp scripting languages) and so we +-- do it ourselves. The lua variant evolved out of an experimental ruby +-- one. Interesting is that using a scripting language instead of c does +-- not have a speed penalty. Actually the lua variant is more efficient, +-- especially when multiple calls to kpsewhich are involved. The lua +-- library also gives way more ocntrol. + +-- to be done / considered +-- +-- support for --exec or make it default +-- support for jar files (or maybe not, never used, too messy) +-- support for $RUBYINPUTS cum suis (if still needed) +-- remember for subruns: _CTX_K_V_#{original}_ +-- remember for subruns: _CTX_K_S_#{original}_ +-- remember for subruns: TEXMFSTART.#{original} [tex.rb texmfstart.rb] + +banner = "version 1.0.2 - 2007+ - PRAGMA ADE / CONTEXT" +texlua = true + +-- begin library merge +-- filename : l-string.lua +-- comment : split off from luat-lib +-- author : Hans Hagen, PRAGMA-ADE, Hasselt NL +-- copyright: PRAGMA ADE / ConTeXt Development Team +-- license : see context related readme files + +if not versions then versions = { } end versions['l-string'] = 1.001 + +--~ function string.split(str, pat) -- taken from the lua wiki +--~ local t = {n = 0} -- so this table has a length field, traverse with ipairs then! +--~ local fpat = "(.-)"..pat +--~ local last_end = 1 +--~ local s, e, cap = string.find(str, fpat, 1) +--~ while s ~= nil do +--~ if s~=1 or cap~="" then +--~ table.insert(t,cap) +--~ end +--~ last_end = e+1 +--~ s, e, cap = string.find(str, fpat, last_end) +--~ end +--~ if last_end<=string.len(str) then +--~ table.insert(t,(string.sub(str,last_end))) +--~ end +--~ return t +--~ end + +--~ function string:split(pat) -- taken from the lua wiki but adapted +--~ local t = { } -- self and colon usage (faster) +--~ local fpat = "(.-)"..pat +--~ local last_end = 1 +--~ local s, e, cap = self:find(fpat, 1) +--~ while s ~= nil do +--~ if s~=1 or cap~="" then +--~ t[#t+1] = cap +--~ end +--~ last_end = e+1 +--~ s, e, cap = self:find(fpat, last_end) +--~ end +--~ if last_end <= #self then +--~ t[#t+1] = self:sub(last_end) +--~ end +--~ return t +--~ end + +--~ a piece of brilliant code by Rici Lake (posted on lua list) -- only names changed +--~ +--~ function string:splitter(pat) +--~ local st, g = 1, self:gmatch("()"..pat.."()") +--~ local function splitter(self) +--~ if st then +--~ local s, f = g() +--~ local rv = self:sub(st, (s or 0)-1) +--~ st = f +--~ return rv +--~ end +--~ end +--~ return splitter, self +--~ end + +function string:splitter(pat) + -- by Rici Lake (posted on lua list) -- only names changed + -- p 79 ref man: () returns position of match + local st, g = 1, self:gmatch("()("..pat..")") + local function strgetter(self, segs, seps, sep, cap1, ...) + st = sep and seps + #sep + return self:sub(segs, (seps or 0) - 1), cap1 or sep, ... + end + local function strsplitter(self) + if st then return strgetter(self, st, g()) end + end + return strsplitter, self +end + +function string:split(separator) + local t = {} + for k in self:splitter(separator) do t[#t+1] = k end + return t +end + +-- faster than a string:split: + +function string:splitchr(chr) + if #self > 0 then + local t = { } + for s in string.gmatch(self..chr,"(.-)"..chr) do + t[#t+1] = s + end + return t + else + return { } + end +end + +--~ function string.piecewise(str, pat, fnc) -- variant of split +--~ local fpat = "(.-)"..pat +--~ local last_end = 1 +--~ local s, e, cap = string.find(str, fpat, 1) +--~ while s ~= nil do +--~ if s~=1 or cap~="" then +--~ fnc(cap) +--~ end +--~ last_end = e+1 +--~ s, e, cap = string.find(str, fpat, last_end) +--~ end +--~ if last_end <= #str then +--~ fnc((string.sub(str,last_end))) +--~ end +--~ end + +function string.piecewise(str, pat, fnc) -- variant of split + for k in string.splitter(str,pat) do fnc(k) end +end + +--~ function string.piecewise(str, pat, fnc) -- variant of split +--~ for k in str:splitter(pat) do fnc(k) end +--~ end + +--~ do if lpeg then + +--~ -- this alternative is 30% faster esp when we cache them +--~ -- problem: no expressions + +--~ splitters = { } + +--~ function string:split(separator) +--~ if #self > 0 then +--~ local split = splitters[separator] +--~ if not split then +--~ -- based on code by Roberto +--~ local p = lpeg.P(separator) +--~ local c = lpeg.C((1-p)^0) +--~ split = lpeg.Ct(c*(p*c)^0) +--~ splitters[separator] = split +--~ end +--~ return split:match(self) +--~ else +--~ return { } +--~ end +--~ end + +--~ string.splitchr = string.split + +--~ function string:piecewise(separator,fnc) +--~ for _,v in pairs(self:split(separator)) do +--~ fnc(v) +--~ end +--~ end + +--~ end end + +string.chr_to_esc = { + ["%"] = "%%", + ["."] = "%.", + ["+"] = "%+", ["-"] = "%-", ["*"] = "%*", + ["^"] = "%^", ["$"] = "%$", + ["["] = "%[", ["]"] = "%]", + ["("] = "%(", [")"] = "%)", + ["{"] = "%{", ["}"] = "%}" +} + +function string:esc() -- variant 2 + return (self:gsub("(.)",string.chr_to_esc)) +end + +function string.unquote(str) + return (str:gsub("^([\"\'])(.*)%1$","%2")) +end + +function string.quote(str) + return '"' .. str:unquote() .. '"' +end + +function string:count(pattern) -- variant 3 + local n = 0 + for _ in self:gmatch(pattern) do + n = n + 1 + end + return n +end + +function string:limit(n,sentinel) + if #self > n then + sentinel = sentinel or " ..." + return self:sub(1,(n-#sentinel)) .. sentinel + else + return self + end +end + +function string:strip() + return (self:gsub("^%s*(.-)%s*$", "%1")) +end + +--~ function string.strip(str) -- slightly different +--~ return (string.gsub(string.gsub(str,"^%s*(.-)%s*$","%1"),"%s+"," ")) +--~ end + +function string:is_empty() + return not self:find("%S") +end + +function string:enhance(pattern,action) + local ok, n = true, 0 + while ok do + ok = false + self = self:gsub(pattern, function(...) + ok, n = true, n + 1 + return action(...) + end) + end + return self, n +end + +--~ function string:enhance(pattern,action) +--~ local ok, n = 0, 0 +--~ repeat +--~ self, ok = self:gsub(pattern, function(...) +--~ n = n + 1 +--~ return action(...) +--~ end) +--~ until ok == 0 +--~ return self, n +--~ end + +--~ function string:to_hex() +--~ if self then +--~ return (self:gsub("(.)",function(c) +--~ return string.format("%02X",c:byte()) +--~ end)) +--~ else +--~ return "" +--~ end +--~ end + +--~ function string:from_hex() +--~ if self then +--~ return (self:gsub("(..)",function(c) +--~ return string.char(tonumber(c,16)) +--~ end)) +--~ else +--~ return "" +--~ end +--~ end + +string.chr_to_hex = { } +string.hex_to_chr = { } + +for i=0,255 do + local c, h = string.char(i), string.format("%02X",i) + string.chr_to_hex[c], string.hex_to_chr[h] = h, c +end + +--~ function string:to_hex() +--~ if self then return (self:gsub("(.)",string.chr_to_hex)) else return "" end +--~ end + +--~ function string:from_hex() +--~ if self then return (self:gsub("(..)",string.hex_to_chr)) else return "" end +--~ end + +function string:to_hex() + return ((self or ""):gsub("(.)",string.chr_to_hex)) +end + +function string:from_hex() + return ((self or ""):gsub("(..)",string.hex_to_chr)) +end + +if not string.characters then + + local function nextchar(str, index) + index = index + 1 + return (index <= #str) and index or nil, str:sub(index,index) + end + function string:characters() + return nextchar, self, 0 + end + local function nextbyte(str, index) + index = index + 1 + return (index <= #str) and index or nil, string.byte(str:sub(index,index)) + end + function string:bytes() + return nextbyte, self, 0 + end + +end + +--~ function string:padd(n,chr) +--~ return self .. self.rep(chr or " ",n-#self) +--~ end + +function string:rpadd(n,chr) + local m = n-#self + if m > 0 then + return self .. self.rep(chr or " ",m) + else + return self + end +end + +function string:lpadd(n,chr) + local m = n-#self + if m > 0 then + return self.rep(chr or " ",m) .. self + else + return self + end +end + +string.padd = string.rpadd + +function is_number(str) + return str:find("^[%-%+]?[%d]-%.?[%d+]$") == 1 +end + +--~ print(is_number("1")) +--~ print(is_number("1.1")) +--~ print(is_number(".1")) +--~ print(is_number("-0.1")) +--~ print(is_number("+0.1")) +--~ print(is_number("-.1")) +--~ print(is_number("+.1")) + +function string:split_settings() -- no {} handling, see l-aux for lpeg variant + if self:find("=") then + local t = { } + for k,v in self:gmatch("(%a+)=([^%,]*)") do + t[k] = v + end + return t + else + return nil + end +end + +local patterns_escapes = { + ["-"] = "%-", + ["."] = "%.", + ["+"] = "%+", + ["*"] = "%*", + ["%"] = "%%", + ["("] = "%)", + [")"] = "%)", + ["["] = "%[", + ["]"] = "%]", +} + +function string:pattesc() + return (self:gsub(".",patterns_escapes)) +end + +function string:tohash() + local t = { } + for s in self:gmatch("([^, ]+)") do -- lpeg + t[s] = true + end + return t +end + + +-- filename : l-lpeg.lua +-- author : Hans Hagen, PRAGMA-ADE, Hasselt NL +-- copyright: PRAGMA ADE / ConTeXt Development Team +-- license : see context related readme files + +if not versions then versions = { } end versions['l-lpeg'] = 1.001 + +--~ l-lpeg.lua : + +--~ lpeg.digit = lpeg.R('09')^1 +--~ lpeg.sign = lpeg.S('+-')^1 +--~ lpeg.cardinal = lpeg.P(lpeg.sign^0 * lpeg.digit^1) +--~ lpeg.integer = lpeg.P(lpeg.sign^0 * lpeg.digit^1) +--~ lpeg.float = lpeg.P(lpeg.sign^0 * lpeg.digit^0 * lpeg.P('.') * lpeg.digit^1) +--~ lpeg.number = lpeg.float + lpeg.integer +--~ lpeg.oct = lpeg.P("0") * lpeg.R('07')^1 +--~ lpeg.hex = lpeg.P("0x") * (lpeg.R('09') + lpeg.R('AF'))^1 +--~ lpeg.uppercase = lpeg.P("AZ") +--~ lpeg.lowercase = lpeg.P("az") + +--~ lpeg.eol = lpeg.S('\r\n\f')^1 -- includes formfeed +--~ lpeg.space = lpeg.S(' ')^1 +--~ lpeg.nonspace = lpeg.P(1-lpeg.space)^1 +--~ lpeg.whitespace = lpeg.S(' \r\n\f\t')^1 +--~ lpeg.nonwhitespace = lpeg.P(1-lpeg.whitespace)^1 + +local hash = { } + +function lpeg.anywhere(pattern) --slightly adapted from website + return lpeg.P { lpeg.P(pattern) + 1 * lpeg.V(1) } +end + +function lpeg.startswith(pattern) --slightly adapted + return lpeg.P(pattern) +end + +--~ g = lpeg.splitter(" ",function(s) ... end) -- gmatch:lpeg = 3:2 + +function lpeg.splitter(pattern, action) + return (((1-lpeg.P(pattern))^1)/action+1)^0 +end + +local crlf = lpeg.P("\r\n") +local cr = lpeg.P("\r") +local lf = lpeg.P("\n") +local space = lpeg.S(" \t\f\v") +local newline = crlf + cr + lf +local spacing = space^0 * newline + +local empty = spacing * lpeg.Cc("") +local nonempty = lpeg.Cs((1-spacing)^1) * spacing^-1 +local content = (empty + nonempty)^1 + +local capture = lpeg.Ct(content^0) + +function string:splitlines() + return capture:match(self) +end + + +-- filename : l-table.lua +-- comment : split off from luat-lib +-- author : Hans Hagen, PRAGMA-ADE, Hasselt NL +-- copyright: PRAGMA ADE / ConTeXt Development Team +-- license : see context related readme files + +if not versions then versions = { } end versions['l-table'] = 1.001 + +table.join = table.concat + +function table.strip(tab) + local lst = { } + for k, v in ipairs(tab) do + -- s = string.gsub(v, "^%s*(.-)%s*$", "%1") + s = v:gsub("^%s*(.-)%s*$", "%1") + if s == "" then + -- skip this one + else + lst[#lst+1] = s + end + end + return lst +end + +--~ function table.sortedkeys(tab) +--~ local srt = { } +--~ for key,_ in pairs(tab) do +--~ srt[#srt+1] = key +--~ end +--~ table.sort(srt) +--~ return srt +--~ end + +function table.sortedkeys(tab) + local srt, kind = { }, 0 -- 0=unknown 1=string, 2=number 3=mixed + for key,_ in pairs(tab) do + srt[#srt+1] = key + if kind == 3 then + -- no further check + else + local tkey = type(key) + if tkey == "string" then + -- if kind == 2 then kind = 3 else kind = 1 end + kind = (kind == 2 and 3) or 1 + elseif tkey == "number" then + -- if kind == 1 then kind = 3 else kind = 2 end + kind = (kind == 1 and 3) or 2 + else + kind = 3 + end + end + end + if kind == 0 or kind == 3 then + table.sort(srt,function(a,b) return (tostring(a) < tostring(b)) end) + else + table.sort(srt) + end + return srt +end + +function table.append(t, list) + for _,v in pairs(list) do + table.insert(t,v) + end +end + +function table.prepend(t, list) + for k,v in pairs(list) do + table.insert(t,k,v) + end +end + +function table.merge(t, ...) -- first one is target + t = t or {} + local lst = {...} + for i=1,#lst do + for k, v in pairs(lst[i]) do + t[k] = v + end + end + return t +end + +function table.merged(...) + local tmp, lst = { }, {...} + for i=1,#lst do + for k, v in pairs(lst[i]) do + tmp[k] = v + end + end + return tmp +end + +function table.imerge(t, ...) + local lst = {...} + for i=1,#lst do + local nst = lst[i] + for j=1,#nst do + t[#t+1] = nst[j] + end + end + return t +end + +function table.imerged(...) + local tmp, lst = { }, {...} + for i=1,#lst do + local nst = lst[i] + for j=1,#nst do + tmp[#tmp+1] = nst[j] + end + end + return tmp +end + +if not table.fastcopy then do + + local type, pairs, getmetatable, setmetatable = type, pairs, getmetatable, setmetatable + + local function fastcopy(old) -- fast one + if old then + local new = { } + for k,v in pairs(old) do + if type(v) == "table" then + new[k] = fastcopy(v) -- was just table.copy + else + new[k] = v + end + end + local mt = getmetatable(old) + if mt then + setmetatable(new,mt) + end + return new + else + return { } + end + end + + table.fastcopy = fastcopy + +end end + +if not table.copy then do + + local type, pairs, getmetatable, setmetatable = type, pairs, getmetatable, setmetatable + + local function copy(t, tables) -- taken from lua wiki, slightly adapted + tables = tables or { } + local tcopy = {} + if not tables[t] then + tables[t] = tcopy + end + for i,v in pairs(t) do -- brrr, what happens with sparse indexed + if type(i) == "table" then + if tables[i] then + i = tables[i] + else + i = copy(i, tables) + end + end + if type(v) ~= "table" then + tcopy[i] = v + elseif tables[v] then + tcopy[i] = tables[v] + else + tcopy[i] = copy(v, tables) + end + end + local mt = getmetatable(t) + if mt then + setmetatable(tcopy,mt) + end + return tcopy + end + + table.copy = copy + +end end + +-- rougly: copy-loop : unpack : sub == 0.9 : 0.4 : 0.45 (so in critical apps, use unpack) + +function table.sub(t,i,j) + return { unpack(t,i,j) } +end + +function table.replace(a,b) + for k,v in pairs(b) do + a[k] = v + end +end + +-- slower than #t on indexed tables (#t only returns the size of the numerically indexed slice) + +function table.is_empty(t) + return not t or not next(t) +end + +function table.one_entry(t) + local n = next(t) + return n and not next(t,n) +end + +function table.starts_at(t) + return ipairs(t,1)(t,0) +end + +do + + -- one of my first exercises in lua ... + + -- 34.055.092 32.403.326 arabtype.tma + -- 1.620.614 1.513.863 lmroman10-italic.tma + -- 1.325.585 1.233.044 lmroman10-regular.tma + -- 1.248.157 1.158.903 lmsans10-regular.tma + -- 194.646 153.120 lmtypewriter10-regular.tma + -- 1.771.678 1.658.461 palatinosanscom-bold.tma + -- 1.695.251 1.584.491 palatinosanscom-regular.tma + -- 13.736.534 13.409.446 zapfinoextraltpro.tma + + -- 13.679.038 11.774.106 arabtype.tmc + -- 886.248 754.944 lmroman10-italic.tmc + -- 729.828 466.864 lmroman10-regular.tmc + -- 688.482 441.962 lmsans10-regular.tmc + -- 128.685 95.853 lmtypewriter10-regular.tmc + -- 715.929 582.985 palatinosanscom-bold.tmc + -- 669.942 540.126 palatinosanscom-regular.tmc + -- 1.560.588 1.317.000 zapfinoextraltpro.tmc + + table.serialize_functions = true + table.serialize_compact = true + table.serialize_inline = true + + local function key(k) + if type(k) == "number" then -- or k:find("^%d+$") then + return "["..k.."]" + elseif noquotes and k:find("^%a[%a%d%_]*$") then + return k + else + return '["'..k..'"]' + end + end + + local function simple_table(t) + if #t > 0 then + local n = 0 + for _,v in pairs(t) do + n = n + 1 + end + if n == #t then + local tt = { } + for i=1,#t do + local v = t[i] + local tv = type(v) + if tv == "number" or tv == "boolean" then + tt[#tt+1] = tostring(v) + elseif tv == "string" then + tt[#tt+1] = ("%q"):format(v) + else + tt = nil + break + end + end + return tt + end + end + return nil + end + + local function serialize(root,name,handle,depth,level,reduce,noquotes,indexed) + handle = handle or print + reduce = reduce or false + if depth then + depth = depth .. " " + if indexed then + handle(("%s{"):format(depth)) + else + handle(("%s%s={"):format(depth,key(name))) + end + else + depth = "" + local tname = type(name) + if tname == "string" then + if name == "return" then + handle("return {") + else + handle(name .. "={") + end + elseif tname == "number" then + handle("[" .. name .. "]={") + elseif tname == "boolean" then + if name then + handle("return {") + else + handle("{") + end + else + handle("t={") + end + end + if root and next(root) then + local compact = table.serialize_compact + local inline = compact and table.serialize_inline + local first, last = nil, 0 -- #root cannot be trusted here + if compact then + for k,v in ipairs(root) do -- NOT: for k=1,#root do (why) + if not first then first = k end + last = last + 1 + end + end + for _,k in pairs(table.sortedkeys(root)) do + local v = root[k] + local t = type(v) + if compact and first and type(k) == "number" and k >= first and k <= last then + if t == "number" then + handle(("%s %s,"):format(depth,v)) + elseif t == "string" then + if reduce and (v:find("^[%-%+]?[%d]-%.?[%d+]$") == 1) then + handle(("%s %s,"):format(depth,v)) + else + handle(("%s %q,"):format(depth,v)) + end + elseif t == "table" then + if not next(v) then + handle(("%s {},"):format(depth)) + elseif inline then + local st = simple_table(v) + if st then + handle(("%s { %s },"):format(depth,table.concat(st,", "))) + else + serialize(v,k,handle,depth,level+1,reduce,noquotes,true) + end + else + serialize(v,k,handle,depth,level+1,reduce,noquotes,true) + end + elseif t == "boolean" then + handle(("%s %s,"):format(depth,tostring(v))) + elseif t == "function" then + if table.serialize_functions then + handle(('%s loadstring(%q),'):format(depth,string.dump(v))) + else + handle(('%s "function",'):format(depth)) + end + else + handle(("%s %q,"):format(depth,tostring(v))) + end + elseif k == "__p__" then -- parent + if false then + handle(("%s __p__=nil,"):format(depth)) + end + elseif t == "number" then + handle(("%s %s=%s,"):format(depth,key(k),v)) + elseif t == "string" then + if reduce and (v:find("^[%-%+]?[%d]-%.?[%d+]$") == 1) then + handle(("%s %s=%s,"):format(depth,key(k),v)) + else + handle(("%s %s=%q,"):format(depth,key(k),v)) + end + elseif t == "table" then + if not next(v) then + handle(("%s %s={},"):format(depth,key(k))) + elseif inline then + local st = simple_table(v) + if st then + handle(("%s %s={ %s },"):format(depth,key(k),table.concat(st,", "))) + else + serialize(v,k,handle,depth,level+1,reduce,noquotes) + end + else + serialize(v,k,handle,depth,level+1,reduce,noquotes) + end + elseif t == "boolean" then + handle(("%s %s=%s,"):format(depth,key(k),tostring(v))) + elseif t == "function" then + if table.serialize_functions then + handle(('%s %s=loadstring(%q),'):format(depth,key(k),string.dump(v))) + else + handle(('%s %s="function",'):format(depth,key(k))) + end + else + handle(("%s %s=%q,"):format(depth,key(k),tostring(v))) + -- handle(('%s %s=loadstring(%q),'):format(depth,key(k),string.dump(function() return v end))) + end + end + if level > 0 then + handle(("%s},"):format(depth)) + else + handle(("%s}"):format(depth)) + end + else + handle(("%s}"):format(depth)) + end + end + + --~ name: + --~ + --~ true : return { } + --~ false : { } + --~ nil : t = { } + --~ string : string = { } + --~ 'return' : return { } + --~ number : [number] = { } + + function table.serialize(root,name,reduce,noquotes) + local t = { } + local function flush(s) + t[#t+1] = s + end + serialize(root, name, flush, nil, 0, reduce, noquotes) + return table.concat(t,"\n") + end + + function table.tohandle(handle,root,name,reduce,noquotes) + serialize(root, name, handle, nil, 0, reduce, noquotes) + end + + -- sometimes tables are real use (zapfino extra pro is some 85M) in which + -- case a stepwise serialization is nice; actually, we could consider: + -- + -- for line in table.serializer(root,name,reduce,noquotes) do + -- ...(line) + -- end + -- + -- so this is on the todo list + + table.tofile_maxtab = 2*1024 + + function table.tofile(filename,root,name,reduce,noquotes) + local f = io.open(filename,'w') + if f then + local concat = table.concat + local maxtab = table.tofile_maxtab + if maxtab > 1 then + local t = { } + local function flush(s) + t[#t+1] = s + if #t > maxtab then + f:write(concat(t,"\n"),"\n") -- hm, write(sometable) should be nice + t = { } + end + end + serialize(root, name, flush, nil, 0, reduce, noquotes) + f:write(concat(t,"\n"),"\n") + else + local function flush(s) + f:write(s,"\n") + end + serialize(root, name, flush, nil, 0, reduce, noquotes) + end + f:close() + end + end + +end + +--~ t = { +--~ b = "123", +--~ a = "x", +--~ c = 1.23, +--~ d = "1.23", +--~ e = true, +--~ f = { +--~ d = "1.23", +--~ a = "x", +--~ b = "123", +--~ c = 1.23, +--~ e = true, +--~ f = { +--~ e = true, +--~ f = { +--~ e = true +--~ }, +--~ }, +--~ }, +--~ g = function() end +--~ } + +--~ print(table.serialize(t), "\n") +--~ print(table.serialize(t,"name"), "\n") +--~ print(table.serialize(t,false), "\n") +--~ print(table.serialize(t,true), "\n") +--~ print(table.serialize(t,"name",true), "\n") +--~ print(table.serialize(t,"name",true,true), "\n") + +do + + local function flatten(t,f,complete) + for i=1,#t do + local v = t[i] + if type(v) == "table" then + if complete or type(v[1]) == "table" then + flatten(v,f,complete) + else + f[#f+1] = v + end + else + f[#f+1] = v + end + end + end + + function table.flatten(t) + local f = { } + flatten(t,f,true) + return f + end + + function table.unnest(t) -- bad name + local f = { } + flatten(t,f,false) + return f + end + + table.flatten_one_level = table.unnest + +end + +function table.insert_before_value(t,value,str) + for i=1,#t do + if t[i] == value then + table.insert(t,i,str) + return + end + end + table.insert(t,1,str) +end + +function table.insert_after_value(t,value,str) + for i=1,#t do + if t[i] == value then + table.insert(t,i+1,str) + return + end + end + t[#t+1] = str +end + +function table.are_equal(a,b,n,m) + if #a == #b then + n = n or 1 + m = m or #a + for i=n,m do + local ai, bi = a[i], b[i] + if (ai==bi) or (type(ai)=="table" and type(bi)=="table" and table.are_equal(ai,bi)) then + -- continue + else + return false + end + end + return true + else + return false + end +end + +function table.compact(t) + if t then + for k,v in pairs(t) do + if not next(v) then + t[k] = nil + end + end + end +end + +function table.tohash(t) + local h = { } + for _, v in pairs(t) do -- no ipairs here + h[v] = true + end + return h +end + +function table.fromhash(t) + local h = { } + for k, v in pairs(t) do -- no ipairs here + if v then h[#h+1] = k end + end + return h +end + +function table.contains(t, v) + if t then + for i=1, #t do + if t[i] == v then + return true + end + end + end + return false +end + +function table.count(t) + local n, e = 0, next(t) + while e do + n, e = n + 1, next(t,e) + end + return n +end + +function table.swapped(t) + local s = { } + for k, v in pairs(t) do + s[v] = k + end + return s +end + +--~ function table.are_equal(a,b) +--~ return table.serialize(a) == table.serialize(b) +--~ end + +function table.clone(t,p) -- t is optional or nil or table + if not p then + t, p = { }, t or { } + elseif not t then + t = { } + end + setmetatable(t, { __index = function(_,key) return p[key] end }) + return t +end + + +function table.hexed(t,seperator) + local tt = { } + for i=1,#t do tt[i] = string.format("0x%04X",t[i]) end + return table.concat(tt,seperator or " ") +end + +function table.reverse_hash(h) + local r = { } + for k,v in pairs(h) do + r[v] = (k:gsub(" ","")):lower() + end + return r +end + +function table.reverse(t) + local tt = { } + if #t > 0 then + for i=#t,1,-1 do + tt[#tt+1] = t[i] + end + end + return tt +end + + +-- filename : l-io.lua +-- comment : split off from luat-lib +-- author : Hans Hagen, PRAGMA-ADE, Hasselt NL +-- copyright: PRAGMA ADE / ConTeXt Development Team +-- license : see context related readme files + +if not versions then versions = { } end versions['l-io'] = 1.001 + +if string.find(os.getenv("PATH"),";") then + io.fileseparator, io.pathseparator = "\\", ";" +else + io.fileseparator, io.pathseparator = "/" , ":" +end + +function io.loaddata(filename) + local f = io.open(filename,'rb') + if f then + local data = f:read('*all') + f:close() + return data + else + return nil + end +end + +function io.savedata(filename,data,joiner) + local f = io.open(filename, "wb") + if f then + if type(data) == "table" then + f:write(table.join(data,joiner or "")) + elseif type(data) == "function" then + data(f) + else + f:write(data) + end + f:close() + end +end + +function io.exists(filename) + local f = io.open(filename) + if f == nil then + return false + else + assert(f:close()) + return true + end +end + +function io.size(filename) + local f = io.open(filename) + if f == nil then + return 0 + else + local s = f:seek("end") + assert(f:close()) + return s + end +end + +function io.noflines(f) + local n = 0 + for _ in f:lines() do + n = n + 1 + end + f:seek('set',0) + return n +end + +do + + local sb = string.byte + + local nextchar = { + [ 4] = function(f) + return f:read(1,1,1,1) + end, + [ 2] = function(f) + return f:read(1,1) + end, + [ 1] = function(f) + return f:read(1) + end, + [-2] = function(f) + local a, b = f:read(1,1) + return b, a + end, + [-4] = function(f) + local a, b, c, d = f:read(1,1,1,1) + return d, c, b, a + end + } + + function io.characters(f,n) + if f then + return nextchar[n or 1], f + else + return nil, nil + end + end + +end + +do + + local sb = string.byte + +--~ local nextbyte = { +--~ [4] = function(f) +--~ local a = f:read(1) +--~ local b = f:read(1) +--~ local c = f:read(1) +--~ local d = f:read(1) +--~ if d then +--~ return sb(a), sb(b), sb(c), sb(d) +--~ else +--~ return nil, nil, nil, nil +--~ end +--~ end, +--~ [2] = function(f) +--~ local a = f:read(1) +--~ local b = f:read(1) +--~ if b then +--~ return sb(a), sb(b) +--~ else +--~ return nil, nil +--~ end +--~ end, +--~ [1] = function (f) +--~ local a = f:read(1) +--~ if a then +--~ return sb(a) +--~ else +--~ return nil +--~ end +--~ end, +--~ [-2] = function (f) +--~ local a = f:read(1) +--~ local b = f:read(1) +--~ if b then +--~ return sb(b), sb(a) +--~ else +--~ return nil, nil +--~ end +--~ end, +--~ [-4] = function(f) +--~ local a = f:read(1) +--~ local b = f:read(1) +--~ local c = f:read(1) +--~ local d = f:read(1) +--~ if d then +--~ return sb(d), sb(c), sb(b), sb(a) +--~ else +--~ return nil, nil, nil, nil +--~ end +--~ end +--~ } + + local nextbyte = { + [4] = function(f) + local a, b, c, d = f:read(1,1,1,1) + if d then + return sb(a), sb(b), sb(c), sb(d) + else + return nil, nil, nil, nil + end + end, + [2] = function(f) + local a, b = f:read(1,1) + if b then + return sb(a), sb(b) + else + return nil, nil + end + end, + [1] = function (f) + local a = f:read(1) + if a then + return sb(a) + else + return nil + end + end, + [-2] = function (f) + local a, b = f:read(1,1) + if b then + return sb(b), sb(a) + else + return nil, nil + end + end, + [-4] = function(f) + local a, b, c, d = f:read(1,1,1,1) + if d then + return sb(d), sb(c), sb(b), sb(a) + else + return nil, nil, nil, nil + end + end + } + + function io.bytes(f,n) + if f then + return nextbyte[n or 1], f + else + return nil, nil + end + end + +end + +function io.ask(question,default,options) + while true do + io.write(question) + if options then + io.write(string.format(" [%s]",table.concat(options,"|"))) + end + if default then + io.write(string.format(" [%s]",default)) + end + io.write(string.format(" ")) + local answer = io.read() + answer = answer:gsub("^%s*(.*)%s*$","%1") + if answer == "" and default then + return default + elseif not options then + return answer + else + for _,v in pairs(options) do + if v == answer then + return answer + end + end + local pattern = "^" .. answer + for _,v in pairs(options) do + if v:find(pattern) then + return v + end + end + end + end +end + + +-- filename : l-md5.lua +-- author : Hans Hagen, PRAGMA-ADE, Hasselt NL +-- copyright: PRAGMA ADE / ConTeXt Development Team +-- license : see context related readme files + +if not versions then versions = { } end versions['l-md5'] = 1.001 + +if md5 then do + + local function convert(str,fmt) + return (string.gsub(md5.sum(str),".",function(chr) return string.format(fmt,string.byte(chr)) end)) + end + + if not md5.HEX then function md5.HEX(str) return convert(str,"%02X") end end + if not md5.hex then function md5.hex(str) return convert(str,"%02x") end end + if not md5.dec then function md5.dec(str) return convert(str,"%03i") end end + +end end + + +-- filename : l-number.lua +-- comment : split off from luat-lib +-- author : Hans Hagen, PRAGMA-ADE, Hasselt NL +-- copyright: PRAGMA ADE / ConTeXt Development Team +-- license : see context related readme files + +if not versions then versions = { } end versions['l-number'] = 1.001 + +if not number then number = { } end + +-- a,b,c,d,e,f = number.toset(100101) + +function number.toset(n) + return (tostring(n)):match("(.?)(.?)(.?)(.?)(.?)(.?)(.?)(.?)") +end + +local format = string.format + +function number.toevenhex(n) + local s = format("%X",n) + if #s % 2 == 0 then + return s + else + return "0" .. s + end +end + +-- the lpeg way is slower on 8 digits, but faster on 4 digits, some 7.5% +-- on +-- +-- for i=1,1000000 do +-- local a,b,c,d,e,f,g,h = number.toset(12345678) +-- local a,b,c,d = number.toset(1234) +-- local a,b,c = number.toset(123) +-- end +-- +-- of course dedicated "(.)(.)(.)(.)" matches are even faster + +do + local one = lpeg.C(1-lpeg.S(''))^1 + + function number.toset(n) + return one:match(tostring(n)) + end +end + + + +-- filename : l-set.lua +-- author : Hans Hagen, PRAGMA-ADE, Hasselt NL +-- copyright: PRAGMA ADE / ConTeXt Development Team +-- license : see context related readme files + +if not versions then versions = { } end versions['l-set'] = 1.001 + +if not set then set = { } end + +do + + local nums = { } + local tabs = { } + local concat = table.concat + + set.create = table.tohash + + function set.tonumber(t) + if next(t) then + local s = "" + -- we could save mem by sorting, but it slows down + for k, v in pairs(t) do + if v then + -- why bother about the leading space + s = s .. " " .. k + end + end + if not nums[s] then + tabs[#tabs+1] = t + nums[s] = #tabs + end + return nums[s] + else + return 0 + end + end + + function set.totable(n) + if n == 0 then + return { } + else + return tabs[n] or { } + end + end + + function set.contains(n,s) + if type(n) == "table" then + return n[s] + elseif n == 0 then + return false + else + local t = tabs[n] + return t and t[s] + end + end + +end + +--~ local c = set.create{'aap','noot','mies'} +--~ local s = set.tonumber(c) +--~ local t = set.totable(s) +--~ print(t['aap']) +--~ local c = set.create{'zus','wim','jet'} +--~ local s = set.tonumber(c) +--~ local t = set.totable(s) +--~ print(t['aap']) +--~ print(t['jet']) +--~ print(set.contains(t,'jet')) +--~ print(set.contains(t,'aap')) + + + +-- filename : l-os.lua +-- comment : split off from luat-lib +-- author : Hans Hagen, PRAGMA-ADE, Hasselt NL +-- copyright: PRAGMA ADE / ConTeXt Development Team +-- license : see context related readme files + +if not versions then versions = { } end versions['l-os'] = 1.001 + +function os.resultof(command) + return io.popen(command,"r"):read("*all") +end + +if not os.exec then os.exec = os.execute end +if not os.spawn then os.spawn = os.execute end + +--~ os.type : windows | unix (new, we already guessed os.platform) +--~ os.name : windows | msdos | linux | macosx | solaris | .. | generic (new) + +if not io.fileseparator then + if string.find(os.getenv("PATH"),";") then + io.fileseparator, io.pathseparator, os.platform = "\\", ";", os.type or "windows" + else + io.fileseparator, io.pathseparator, os.platform = "/" , ":", os.type or "unix" + end +end + +os.platform = os.platform or os.type or (io.pathseparator == ";" and "windows") or "unix" + +function os.launch(str) + if os.platform == "windows" then + os.execute("start " .. str) -- os.spawn ? + else + os.execute(str .. " &") -- os.spawn ? + end +end + +if not os.setenv then + function os.setenv() return false end +end + +if not os.times then + -- utime = user time + -- stime = system time + -- cutime = children user time + -- cstime = children system time + function os.times() + return { + utime = os.gettimeofday(), -- user + stime = 0, -- system + cutime = 0, -- children user + cstime = 0, -- children system + } + end +end + +os.gettimeofday = os.gettimeofday or os.clock + +do + local startuptime = os.gettimeofday() + function os.runtime() + return os.gettimeofday() - startuptime + end +end + +--~ print(os.gettimeofday()-os.time()) +--~ os.sleep(1.234) +--~ print (">>",os.runtime()) +--~ print(os.date("%H:%M:%S",os.gettimeofday())) +--~ print(os.date("%H:%M:%S",os.time())) + + +-- filename : l-file.lua +-- comment : split off from luat-lib +-- author : Hans Hagen, PRAGMA-ADE, Hasselt NL +-- copyright: PRAGMA ADE / ConTeXt Development Team +-- license : see context related readme files + +if not versions then versions = { } end versions['l-file'] = 1.001 + +if not file then file = { } end + +function file.removesuffix(filename) + return filename:gsub("%.[%a%d]+$", "") +end + +function file.addsuffix(filename, suffix) + if not filename:find("%.[%a%d]+$") then + return filename .. "." .. suffix + else + return filename + end +end + +function file.replacesuffix(filename, suffix) + if not filename:find("%.[%a%d]+$") then + return filename .. "." .. suffix + else + return (filename:gsub("%.[%a%d]+$","."..suffix)) + end +end + +function file.dirname(name) + return name:match("^(.+)[/\\].-$") or "" +end + +function file.basename(name) + return name:match("^.+[/\\](.-)$") or name +end + +function file.nameonly(name) + return ((name:match("^.+[/\\](.-)$") or name):gsub("%..*$","")) +end + +function file.extname(name) + return name:match("^.+%.([^/\\]-)$") or "" +end + +file.suffix = file.extname + +function file.stripsuffix(name) + return (name:gsub("%.[%a%d]+$","")) +end + +--~ function file.join(...) +--~ local t = { ... } +--~ for i=1,#t do +--~ t[i] = (t[i]:gsub("\\","/")):gsub("/+$","") +--~ end +--~ return table.concat(t,"/") +--~ end + +--~ print(file.join("x/","/y")) +--~ print(file.join("http://","/y")) +--~ print(file.join("http://a","/y")) +--~ print(file.join("http:///a","/y")) +--~ print(file.join("//nas-1","/y")) + +function file.join(...) + local pth = table.concat({...},"/") + pth = pth:gsub("\\","/") + local a, b = pth:match("^(.*://)(.*)$") + if a and b then + return a .. b:gsub("//+","/") + end + a, b = pth:match("^(//)(.*)$") + if a and b then + return a .. b:gsub("//+","/") + end + return (pth:gsub("//+","/")) +end + +function file.is_writable(name) + local f = io.open(name, 'w') + if f then + f:close() + return true + else + return false + end +end + +function file.is_readable(name) + local f = io.open(name,'r') + if f then + f:close() + return true + else + return false + end +end + +--~ function file.split_path(str) +--~ if str:find(';') then +--~ return str:splitchr(";") +--~ else +--~ return str:splitchr(io.pathseparator) +--~ end +--~ end + +-- todo: lpeg + +function file.split_path(str) + local t = { } + str = str:gsub("\\", "/") + str = str:gsub("(%a):([;/])", "%1\001%2") + for name in str:gmatch("([^;:]+)") do + if name ~= "" then + name = name:gsub("\001",":") + t[#t+1] = name + end + end + return t +end + +function file.join_path(tab) + return table.concat(tab,io.pathseparator) -- can have trailing // +end + +--~ print('test' .. " == " .. file.collapse_path("test")) +--~ print("test/test" .. " == " .. file.collapse_path("test/test")) +--~ print("test/test/test" .. " == " .. file.collapse_path("test/test/test")) +--~ print("test/test" .. " == " .. file.collapse_path("test/../test/test")) +--~ print("test" .. " == " .. file.collapse_path("test/../test")) +--~ print("../test" .. " == " .. file.collapse_path("../test")) +--~ print("../test/" .. " == " .. file.collapse_path("../test/")) +--~ print("a/a" .. " == " .. file.collapse_path("a/b/c/../../a")) + +--~ function file.collapse_path(str) +--~ local ok, n = false, 0 +--~ while not ok do +--~ ok = true +--~ str, n = str:gsub("[^%./]+/%.%./", function(s) +--~ ok = false +--~ return "" +--~ end) +--~ end +--~ return (str:gsub("/%./","/")) +--~ end + +function file.collapse_path(str) + local n = 1 + while n > 0 do + str, n = str:gsub("([^/%.]+/%.%./)","") + end + return (str:gsub("/%./","/")) +end + +function file.robustname(str) + return (str:gsub("[^%a%d%/%-%.\\]+","-")) +end + +file.readdata = io.loaddata +file.savedata = io.savedata + +function file.copy(oldname,newname) + file.savedata(newname,io.loaddata(oldname)) +end + + +-- filename : l-dir.lua +-- comment : split off from luat-lib +-- author : Hans Hagen, PRAGMA-ADE, Hasselt NL +-- copyright: PRAGMA ADE / ConTeXt Development Team +-- license : see context related readme files + +if not versions then versions = { } end versions['l-dir'] = 1.001 + +dir = { } + +-- optimizing for no string.find (*) does not save time + +if lfs then do + +--~ local attributes = lfs.attributes +--~ local walkdir = lfs.dir +--~ +--~ local function glob_pattern(path,patt,recurse,action) +--~ local ok, scanner = xpcall(function() return walkdir(path) end, function() end) -- kepler safe +--~ if ok and type(scanner) == "function" then +--~ if not path:find("/$") then path = path .. '/' end +--~ for name in scanner do +--~ local full = path .. name +--~ local mode = attributes(full,'mode') +--~ if mode == 'file' then +--~ if name:find(patt) then +--~ action(full) +--~ end +--~ elseif recurse and (mode == "directory") and (name ~= '.') and (name ~= "..") then +--~ glob_pattern(full,patt,recurse,action) +--~ end +--~ end +--~ end +--~ end +--~ +--~ dir.glob_pattern = glob_pattern +--~ +--~ local function glob(pattern, action) +--~ local t = { } +--~ local action = action or function(name) t[#t+1] = name end +--~ local path, patt = pattern:match("^(.*)/*%*%*/*(.-)$") +--~ local recurse = path and patt +--~ if not recurse then +--~ path, patt = pattern:match("^(.*)/(.-)$") +--~ if not (path and patt) then +--~ path, patt = '.', pattern +--~ end +--~ end +--~ patt = patt:gsub("([%.%-%+])", "%%%1") +--~ patt = patt:gsub("%*", ".*") +--~ patt = patt:gsub("%?", ".") +--~ patt = "^" .. patt .. "$" +--~ -- print('path: ' .. path .. ' | pattern: ' .. patt .. ' | recurse: ' .. tostring(recurse)) +--~ glob_pattern(path,patt,recurse,action) +--~ return t +--~ end +--~ +--~ dir.glob = glob + + local attributes = lfs.attributes + local walkdir = lfs.dir + + local function glob_pattern(path,patt,recurse,action) + local ok, scanner + if path == "/" then + ok, scanner = xpcall(function() return walkdir(path..".") end, function() end) -- kepler safe + else + ok, scanner = xpcall(function() return walkdir(path) end, function() end) -- kepler safe + end + if ok and type(scanner) == "function" then + if not path:find("/$") then path = path .. '/' end + for name in scanner do + local full = path .. name + local mode = attributes(full,'mode') + if mode == 'file' then + if full:find(patt) then + action(full) + end + elseif recurse and (mode == "directory") and (name ~= '.') and (name ~= "..") then + glob_pattern(full,patt,recurse,action) + end + end + end + end + + dir.glob_pattern = glob_pattern + + --~ local function glob(pattern, action) + --~ local t = { } + --~ local path, rest, patt, recurse + --~ local action = action or function(name) t[#t+1] = name end + --~ local pattern = pattern:gsub("^%*%*","./**") + --~ local pattern = pattern:gsub("/%*/","/**/") + --~ path, rest = pattern:match("^(/)(.-)$") + --~ if path then + --~ path = path + --~ else + --~ path, rest = pattern:match("^([^/]*)/(.-)$") + --~ end + --~ if rest then + --~ patt = rest:gsub("([%.%-%+])", "%%%1") + --~ end + --~ patt = patt:gsub("%*", "[^/]*") + --~ patt = patt:gsub("%?", "[^/]") + --~ patt = patt:gsub("%[%^/%]%*%[%^/%]%*", ".*") + --~ if path == "" then path = "." end + --~ recurse = patt:find("%.%*/") ~= nil + --~ glob_pattern(path,patt,recurse,action) + --~ return t + --~ end + + local P, S, R, C, Cc, Cs, Ct, Cv, V = lpeg.P, lpeg.S, lpeg.R, lpeg.C, lpeg.Cc, lpeg.Cs, lpeg.Ct, lpeg.Cv, lpeg.V + + local pattern = Ct { + [1] = (C(P(".") + P("/")^1) + C(R("az","AZ") * P(":") * P("/")^0) + Cc("./")) * V(2) * V(3), + [2] = C(((1-S("*?/"))^0 * P("/"))^0), + [3] = C(P(1)^0) + } + + local filter = Cs ( ( + P("**") / ".*" + + P("*") / "[^/]*" + + P("?") / "[^/]" + + P(".") / "%%." + + P("+") / "%%+" + + P("-") / "%%-" + + P(1) + )^0 ) + + local function glob(str,t) + if type(str) == "table" then + local t = t or { } + for _, s in ipairs(str) do + glob(s,t) + end + return t + else + local split = pattern:match(str) + if split then + local t = t or { } + local action = action or function(name) t[#t+1] = name end + local root, path, base = split[1], split[2], split[3] + local recurse = base:find("**") + local start = root .. path + local result = filter:match(start .. base) + glob_pattern(start,result,recurse,action) + return t + else + return { } + end + end + end + + dir.glob = glob + + --~ list = dir.glob("**/*.tif") + --~ list = dir.glob("/**/*.tif") + --~ list = dir.glob("./**/*.tif") + --~ list = dir.glob("oeps/**/*.tif") + --~ list = dir.glob("/oeps/**/*.tif") + + local function globfiles(path,recurse,func,files) -- func == pattern or function + if type(func) == "string" then + local s = func -- alas, we need this indirect way + func = function(name) return name:find(s) end + end + files = files or { } + for name in walkdir(path) do + if name:find("^%.") then + --- skip + elseif attributes(name,'mode') == "directory" then + if recurse then + globfiles(path .. "/" .. name,recurse,func,files) + end + elseif func then + if func(name) then + files[#files+1] = path .. "/" .. name + end + else + files[#files+1] = path .. "/" .. name + end + end + return files + end + + dir.globfiles = globfiles + + -- t = dir.glob("c:/data/develop/context/sources/**/????-*.tex") + -- t = dir.glob("c:/data/develop/tex/texmf/**/*.tex") + -- t = dir.glob("c:/data/develop/context/texmf/**/*.tex") + -- t = dir.glob("f:/minimal/tex/**/*") + -- print(dir.ls("f:/minimal/tex/**/*")) + -- print(dir.ls("*.tex")) + + function dir.ls(pattern) + return table.concat(glob(pattern),"\n") + end + + --~ mkdirs("temp") + --~ mkdirs("a/b/c") + --~ mkdirs(".","/a/b/c") + --~ mkdirs("a","b","c") + + local make_indeed = true -- false + + if string.find(os.getenv("PATH"),";") then + + function dir.mkdirs(...) + local str, pth = "", "" + for _, s in ipairs({...}) do + if s ~= "" then + if str ~= "" then + str = str .. "/" .. s + else + str = s + end + end + end + local first, middle, last + local drive = false + first, middle, last = str:match("^(//)(//*)(.*)$") + if first then + -- empty network path == local path + else + first, last = str:match("^(//)/*(.-)$") + if first then + middle, last = str:match("([^/]+)/+(.-)$") + if middle then + pth = "//" .. middle + else + pth = "//" .. last + last = "" + end + else + first, middle, last = str:match("^([a-zA-Z]:)(/*)(.-)$") + if first then + pth, drive = first .. middle, true + else + middle, last = str:match("^(/*)(.-)$") + if not middle then + last = str + end + end + end + end + for s in last:gmatch("[^/]+") do + if pth == "" then + pth = s + elseif drive then + pth, drive = pth .. s, false + else + pth = pth .. "/" .. s + end + if make_indeed and not lfs.isdir(pth) then + lfs.mkdir(pth) + end + end + return pth, (lfs.isdir(pth) == true) + end + +--~ print(dir.mkdirs("","","a","c")) +--~ print(dir.mkdirs("a")) +--~ print(dir.mkdirs("a:")) +--~ print(dir.mkdirs("a:/b/c")) +--~ print(dir.mkdirs("a:b/c")) +--~ print(dir.mkdirs("a:/bbb/c")) +--~ print(dir.mkdirs("/a/b/c")) +--~ print(dir.mkdirs("/aaa/b/c")) +--~ print(dir.mkdirs("//a/b/c")) +--~ print(dir.mkdirs("///a/b/c")) +--~ print(dir.mkdirs("a/bbb//ccc/")) + + function dir.expand_name(str) + local first, nothing, last = str:match("^(//)(//*)(.*)$") + if first then + first = lfs.currentdir() .. "/" + first = first:gsub("\\","/") + end + if not first then + first, last = str:match("^(//)/*(.*)$") + end + if not first then + first, last = str:match("^([a-zA-Z]:)(.*)$") + if first and not last:find("^/") then + local d = lfs.currentdir() + if lfs.chdir(first) then + first = lfs.currentdir() + first = first:gsub("\\","/") + end + lfs.chdir(d) + end + end + if not first then + first, last = lfs.currentdir(), str + first = first:gsub("\\","/") + end + last = last:gsub("//","/") + last = last:gsub("/%./","/") + last = last:gsub("^/*","") + first = first:gsub("/*$","") + if last == "" then + return first + else + return first .. "/" .. last + end + end + + else + + function dir.mkdirs(...) + local str, pth = "", "" + for _, s in ipairs({...}) do + if s ~= "" then + if str ~= "" then + str = str .. "/" .. s + else + str = s + end + end + end + str = str:gsub("/+","/") + if str:find("^/") then + pth = "/" + for s in str:gmatch("[^/]+") do + local first = (pth == "/") + if first then + pth = pth .. s + else + pth = pth .. "/" .. s + end + if make_indeed and not first and not lfs.isdir(pth) then + lfs.mkdir(pth) + end + end + else + pth = "." + for s in str:gmatch("[^/]+") do + pth = pth .. "/" .. s + if make_indeed and not lfs.isdir(pth) then + lfs.mkdir(pth) + end + end + end + return pth, (lfs.isdir(pth) == true) + end + +--~ print(dir.mkdirs("","","a","c")) +--~ print(dir.mkdirs("a")) +--~ print(dir.mkdirs("/a/b/c")) +--~ print(dir.mkdirs("/aaa/b/c")) +--~ print(dir.mkdirs("//a/b/c")) +--~ print(dir.mkdirs("///a/b/c")) +--~ print(dir.mkdirs("a/bbb//ccc/")) + + function dir.expand_name(str) + if not str:find("^/") then + str = lfs.currentdir() .. "/" .. str + end + str = str:gsub("//","/") + str = str:gsub("/%./","/") + return str + end + + end + + dir.makedirs = dir.mkdirs + +end end + + +-- filename : l-boolean.lua +-- comment : split off from luat-lib +-- author : Hans Hagen, PRAGMA-ADE, Hasselt NL +-- copyright: PRAGMA ADE / ConTeXt Development Team +-- license : see context related readme files + +if not versions then versions = { } end versions['l-boolean'] = 1.001 +if not boolean then boolean = { } end + +function boolean.tonumber(b) + if b then return 1 else return 0 end +end + +function toboolean(str,tolerant) + if tolerant then + local tstr = type(str) + if tstr == "string" then + return str == "true" or str == "yes" or str == "on" or str == "1" + elseif tstr == "number" then + return tonumber(str) ~= 0 + elseif tstr == "nil" then + return false + else + return str + end + elseif str == "true" then + return true + elseif str == "false" then + return false + else + return str + end +end + +function string.is_boolean(str) + if type(str) == "string" then + if str == "true" or str == "yes" or str == "on" then + return true + elseif str == "false" or str == "no" or str == "off" then + return false + end + end + return nil +end + +function boolean.alwaystrue() + return true +end + +function boolean.falsetrue() + return false +end + + +if not modules then modules = { } end modules ['l-xml'] = { + version = 1.001, + comment = "this module is the basis for the lxml-* ones", + author = "Hans Hagen, PRAGMA-ADE, Hasselt NL", + copyright = "PRAGMA ADE / ConTeXt Development Team", + license = "see context related readme files" +} + +-- RJ: key=value ... lpeg.Ca(lpeg.Cc({}) * (pattern-producing-key-and-value / rawset)^0) + +-- some code may move to l-xmlext + +--[[ldx-- +

The parser used here is inspired by the variant discussed in the lua book, but +handles comment and processing instructions, has a different structure, provides +parent access; a first version used different tricky but was less optimized to we +went this route. First we had a find based parser, now we have an based one. +The find based parser can be found in l-xml-edu.lua along with other older code.

+ +

Expecially the lpath code is experimental, we will support some of xpath, but +only things that make sense for us; as compensation it is possible to hook in your +own functions. Apart from preprocessing content for we also need +this module for process management, like handling and +files.

+ + +a/b/c /*/c +a/b/c/first() a/b/c/last() a/b/c/index(n) a/b/c/index(-n) +a/b/c/text() a/b/c/text(1) a/b/c/text(-1) a/b/c/text(n) + + +

Beware, the interface may change. For instance at, ns, tg, dt may get more +verbose names. Once the code is stable we will also remove some tracing and +optimize the code.

+--ldx]]-- + +xml = xml or { } +tex = tex or { } + +xml.trace_lpath = false +xml.trace_print = false +xml.trace_remap = false + +local format, concat = string.format, table.concat + +--~ local pairs, next, type = pairs, next, type + +-- todo: some things per xml file, liek namespace remapping + +--[[ldx-- +

First a hack to enable namespace resolving. A namespace is characterized by +a . The following function associates a namespace prefix with a +pattern. We use , which in this case is more than twice as fast as a +find based solution where we loop over an array of patterns. Less code and +much cleaner.

+--ldx]]-- + +xml.xmlns = xml.xmlns or { } + +do + + local check = lpeg.P(false) + local parse = check + + --[[ldx-- +

The next function associates a namespace prefix with an . This + normally happens independent of parsing.

+ + + xml.registerns("mml","mathml") + + --ldx]]-- + + function xml.registerns(namespace, pattern) -- pattern can be an lpeg + check = check + lpeg.C(lpeg.P(pattern:lower())) / namespace + parse = lpeg.P { lpeg.P(check) + 1 * lpeg.V(1) } + end + + --[[ldx-- +

The next function also registers a namespace, but this time we map a + given namespace prefix onto a registered one, using the given + . This used for attributes like xmlns:m.

+ + + xml.checkns("m","http://www.w3.org/mathml") + + --ldx]]-- + + function xml.checkns(namespace,url) + local ns = parse:match(url:lower()) + if ns and namespace ~= ns then + xml.xmlns[namespace] = ns + end + end + + --[[ldx-- +

Next we provide a way to turn an into a registered + namespace. This used for the xmlns attribute.

+ + + resolvedns = xml.resolvens("http://www.w3.org/mathml") + + + This returns mml. + --ldx]]-- + + function xml.resolvens(url) + return parse:match(url:lower()) or "" + end + + --[[ldx-- +

A namespace in an element can be remapped onto the registered + one efficiently by using the xml.xmlns table.

+ --ldx]]-- + +end + +--[[ldx-- +

This version uses . We follow the same approach as before, stack and top and +such. This version is about twice as fast which is mostly due to the fact that +we don't have to prepare the stream for cdata, doctype etc etc. This variant is +is dedicated to Luigi Scarso, who challenged me with 40 megabyte files that +took 12.5 seconds to load (1.5 for file io and the rest for tree building). With +the implementation we got that down to less 7.3 seconds. Loading the 14 + interface definition files (2.6 meg) went down from 1.05 seconds to 0.55.

+ +

Next comes the parser. The rather messy doctype definition comes in many +disguises so it is no surprice that later on have to dedicate quite some + code to it.

+ + + + + + + + + + +

The code may look a bit complex but this is mostly due to the fact that we +resolve namespaces and attach metatables. There is only one public function:

+ + +local x = xml.convert(somestring) + + +

An optional second boolean argument tells this function not to create a root +element.

+--ldx]]-- + +xml.strip_cm_and_dt = false -- an extra global flag, in case we have many includes + +do + + -- not just one big nested table capture (lpeg overflow) + + local remove, nsremap, resolvens = table.remove, xml.xmlns, xml.resolvens + + local stack, top, dt, at, xmlns, errorstr, entities = {}, {}, {}, {}, {}, nil, {} + + local mt = { __tostring = xml.text } + + function xml.check_error(top,toclose) + return "" + end + + local strip = false + local cleanup = false + + function xml.set_text_cleanup(fnc) + cleanup = fnc + end + + local function add_attribute(namespace,tag,value) + if tag == "xmlns" then + xmlns[#xmlns+1] = resolvens(value) + at[tag] = value + elseif namespace == "xmlns" then + xml.checkns(tag,value) + at["xmlns:" .. tag] = value + else + at[tag] = value + end + end + local function add_begin(spacing, namespace, tag) + if #spacing > 0 then + dt[#dt+1] = spacing + end + local resolved = (namespace == "" and xmlns[#xmlns]) or nsremap[namespace] or namespace + top = { ns=namespace or "", rn=resolved, tg=tag, at=at, dt={}, __p__ = stack[#stack] } + setmetatable(top, mt) + dt = top.dt + stack[#stack+1] = top + at = { } + end + local function add_end(spacing, namespace, tag) + if #spacing > 0 then + dt[#dt+1] = spacing + end + local toclose = remove(stack) + top = stack[#stack] + if #stack < 1 then + errorstr = format("nothing to close with %s %s", tag, xml.check_error(top,toclose) or "") + elseif toclose.tg ~= tag then -- no namespace check + errorstr = format("unable to close %s with %s %s", toclose.tg, tag, xml.check_error(top,toclose) or "") + end + dt = top.dt + dt[#dt+1] = toclose + if at.xmlns then + remove(xmlns) + end + end + local function add_empty(spacing, namespace, tag) + if #spacing > 0 then + dt[#dt+1] = spacing + end + local resolved = (namespace == "" and xmlns[#xmlns]) or nsremap[namespace] or namespace + top = stack[#stack] + dt = top.dt + local t = { ns=namespace or "", rn=resolved, tg=tag, at=at, dt={}, __p__ = top } + dt[#dt+1] = t + setmetatable(t, mt) + at = { } + if at.xmlns then + remove(xmlns) + end + end + local function add_text(text) + if cleanup and #text > 0 then + dt[#dt+1] = cleanup(text) + else + dt[#dt+1] = text + end + end + local function add_special(what, spacing, text) + if #spacing > 0 then + dt[#dt+1] = spacing + end + if strip and (what == "@cm@" or what == "@dt@") then + -- forget it + else + dt[#dt+1] = { special=true, ns="", tg=what, dt={text} } + end + end + local function set_message(txt) + errorstr = "garbage at the end of the file: " .. txt:gsub("([ \n\r\t]*)","") + end + + local P, S, R, C, V = lpeg.P, lpeg.S, lpeg.R, lpeg.C, lpeg.V + + local space = S(' \r\n\t') + local open = P('<') + local close = P('>') + local squote = S("'") + local dquote = S('"') + local equal = P('=') + local slash = P('/') + local colon = P(':') + local valid = R('az', 'AZ', '09') + S('_-.') + local name_yes = C(valid^1) * colon * C(valid^1) + local name_nop = C(P(true)) * C(valid^1) + local name = name_yes + name_nop + + local utfbom = P('\000\000\254\255') + P('\255\254\000\000') + + P('\255\254') + P('\254\255') + P('\239\187\191') -- no capture + + local spacing = C(space^0) + local justtext = C((1-open)^1) + local somespace = space^1 + local optionalspace = space^0 + + local value = (squote * C((1 - squote)^0) * squote) + (dquote * C((1 - dquote)^0) * dquote) + local attribute = (somespace * name * optionalspace * equal * optionalspace * value) / add_attribute + local attributes = attribute^0 + + local text = justtext / add_text + local balanced = P { "[" * ((1 - S"[]") + V(1))^0 * "]" } -- taken from lpeg manual, () example + + local emptyelement = (spacing * open * name * attributes * optionalspace * slash * close) / add_empty + local beginelement = (spacing * open * name * attributes * optionalspace * close) / add_begin + local endelement = (spacing * open * slash * name * optionalspace * close) / add_end + + local begincomment = open * P("!--") + local endcomment = P("--") * close + local begininstruction = open * P("?") + local endinstruction = P("?") * close + local begincdata = open * P("![CDATA[") + local endcdata = P("]]") * close + + local someinstruction = C((1 - endinstruction)^0) + local somecomment = C((1 - endcomment )^0) + local somecdata = C((1 - endcdata )^0) + + function entity(k,v) entities[k] = v end + + local begindoctype = open * P("!DOCTYPE") + local enddoctype = close + local beginset = P("[") + local endset = P("]") + local doctypename = C((1-somespace)^0) + local elementdoctype = optionalspace * P("Packaging data in an xml like table is done with the following + function. Maybe it will go away (when not used).

+ --ldx]]-- + + function xml.is_valid(root) + return root and root.dt and root.dt[1] and type(root.dt[1]) == "table" and not root.dt[1].er + end + + function xml.package(tag,attributes,data) + local ns, tg = tag:match("^(.-):?([^:]+)$") + local t = { ns = ns, tg = tg, dt = data or "", at = attributes or {} } + setmetatable(t, mt) + return t + end + + function xml.is_valid(root) + return root and not root.error + end + + xml.error_handler = (logs and logs.report) or (input and input.report) or print + +end + +--[[ldx-- +

We cannot load an from a filehandle so we need to load +the whole file first. The function accepts a string representing +a filename or a file handle.

+--ldx]]-- + +function xml.load(filename) + if type(filename) == "string" then + local f = io.open(filename,'r') + if f then + local root = xml.convert(f:read("*all")) + f:close() + return root + else + return xml.convert("") + end + elseif filename then -- filehandle + return xml.convert(filename:read("*all")) + else + return xml.convert("") + end +end + +--[[ldx-- +

When we inject new elements, we need to convert strings to +valid trees, which is what the next function does.

+--ldx]]-- + +function xml.toxml(data) + if type(data) == "string" then + local root = { xml.convert(data,true) } + return (#root > 1 and root) or root[1] + else + return data + end +end + +--[[ldx-- +

For copying a tree we use a dedicated function instead of the +generic table copier. Since we know what we're dealing with we +can speed up things a bit. The second argument is not to be used!

+--ldx]]-- + +do + + function copy(old,tables) + if old then + tables = tables or { } + local new = { } + if not tables[old] then + tables[old] = new + end + for k,v in pairs(old) do + new[k] = (type(v) == "table" and (tables[v] or copy(v, tables))) or v + end + local mt = getmetatable(old) + if mt then + setmetatable(new,mt) + end + return new + else + return { } + end + end + + xml.copy = copy + +end + +--[[ldx-- +

In serializing the tree or parts of the tree is a major +actitivity which is why the following function is pretty optimized resulting +in a few more lines of code than needed. The variant that uses the formatting +function for all components is about 15% slower than the concatinating +alternative.

+--ldx]]-- + +do + + -- todo: add when not present + + local fallbackhandle = (tex and tex.sprint) or io.write + + local function serialize(e, handle, textconverter, attributeconverter, specialconverter, nocommands) + if not e then + return + elseif not nocommands then + local ec = e.command + if ec ~= nil then -- we can have all kind of types + +if e.special then -- todo test for true/false + local etg, edt = e.tg, e.dt + local spc = specialconverter and specialconverter[etg] + if spc then +--~ print("SPECIAL",etg,table.serialize(specialconverter), spc) + local result = spc(edt[1]) + if result then + handle(result) + return + else + -- no need to handle any further + end + end +end + + local xc = xml.command + if xc then + xc(e,ec) + return + end + end + end + handle = handle or fallbackhandle + local etg = e.tg + if etg then + if e.special then + local edt = e.dt + local spc = specialconverter and specialconverter[etg] + if spc then + local result = spc(edt[1]) + if result then + handle(result) + else + -- no need to handle any further + end + elseif etg == "@pi@" then + -- handle(format("",edt[1])) + handle("") + elseif etg == "@cm@" then + -- handle(format("",edt[1])) + handle("") + elseif etg == "@cd@" then + -- handle(format("",edt[1])) + handle("") + elseif etg == "@dt@" then + -- handle(format("",edt[1])) + handle("") + elseif etg == "@rt@" then + serialize(edt,handle,textconverter,attributeconverter,specialconverter,nocommands) + end + else + local ens, eat, edt, ern = e.ns, e.at, e.dt, e.rn + local ats = eat and next(eat) and { } -- type test maybe faster + if ats then + if attributeconverter then + for k,v in pairs(eat) do + ats[#ats+1] = format('%s=%q',k,attributeconverter(v)) + end + else + for k,v in pairs(eat) do + ats[#ats+1] = format('%s=%q',k,v) + end + end + end + if ern and xml.trace_remap and ern ~= ens then +--~ if ats then +--~ ats[#ats+1] = format("xmlns:remapped='%s'",ern) +--~ else +--~ ats = { format("xmlns:remapped='%s'",ern) } +--~ end +--~ if ats then +--~ ats[#ats+1] = format("remappedns='%s'",ens or '-') +--~ else +--~ ats = { format("remappedns='%s'",ens or '-') } +--~ end +ens = ern + end + if ens ~= "" then + if edt and #edt > 0 then + if ats then + -- handle(format("<%s:%s %s>",ens,etg,concat(ats," "))) + handle("<" .. ens .. ":" .. etg .. " " .. concat(ats," ") .. ">") + else + -- handle(format("<%s:%s>",ens,etg)) + handle("<" .. ens .. ":" .. etg .. ">") + end + for i=1,#edt do + local e = edt[i] + if type(e) == "string" then + if textconverter then + handle(textconverter(e)) + else + handle(e) + end + else + serialize(e,handle,textconverter,attributeconverter,specialconverter,nocommands) + end + end + -- handle(format("",ens,etg)) + handle("") + else + if ats then + -- handle(format("<%s:%s %s/>",ens,etg,concat(ats," "))) + handle("<" .. ens .. ":" .. etg .. " " .. concat(ats," ") .. "/>") + else + -- handle(format("<%s:%s/>",ens,etg)) + handle("<" .. ens .. ":" .. etg .. "/>") + end + end + else + if edt and #edt > 0 then + if ats then + -- handle(format("<%s %s>",etg,concat(ats," "))) + handle("<" .. etg .. " " .. concat(ats," ") .. ">") + else + -- handle(format("<%s>",etg)) + handle("<" .. etg .. ">") + end + for i=1,#edt do + serialize(edt[i],handle,textconverter,attributeconverter,specialconverter,nocommands) + end + -- handle(format("",etg)) + handle("") + else + if ats then + -- handle(format("<%s %s/>",etg,concat(ats," "))) + handle("<" .. etg .. " " .. concat(ats," ") .. "/>") + else + -- handle(format("<%s/>",etg)) + handle("<" .. etg .. "/>") + end + end + end + end + elseif type(e) == "string" then + if textconverter then + handle(textconverter(e)) + else + handle(e) + end + else + for i=1,#e do + serialize(e[i],handle,textconverter,attributeconverter,specialconverter,nocommands) + end + end + end + + xml.serialize = serialize + + function xml.checkbom(root) -- can be made faster + if root.ri then + local dt, found = root.dt, false + for k,v in ipairs(dt) do + if type(v) == "table" and v.special and v.tg == "@pi" and v.dt:find("xml.*version=") then + found = true + break + end + end + if not found then + table.insert(dt, 1, { special=true, ns="", tg="@pi@", dt = { "xml version='1.0' standalone='yes'"} } ) + table.insert(dt, 2, "\n" ) + end + end + end + + --[[ldx-- +

At the cost of some 25% runtime overhead you can first convert the tree to a string + and then handle the lot.

+ --ldx]]-- + + function xml.tostring(root) -- 25% overhead due to collecting + if root then + if type(root) == 'string' then + return root + elseif next(root) then -- next is faster than type (and >0 test) + local result = { } + serialize(root,function(s) result[#result+1] = s end) + return concat(result,"") + end + end + return "" + end + +end + +--[[ldx-- +

The next function operated on the content only and needs a handle function +that accepts a string.

+--ldx]]-- + +function xml.string(e,handle) + if not handle or (e.special and e.tg ~= "@rt@") then + -- nothing + elseif e.tg then + local edt = e.dt + if edt then + for i=1,#edt do + xml.string(edt[i],handle) + end + end + else + handle(e) + end +end + +--[[ldx-- +

How you deal with saving data depends on your preferences. For a 40 MB database +file the timing on a 2.3 Core Duo are as follows (time in seconds):

+ + +1.3 : load data from file to string +6.1 : convert string into tree +5.3 : saving in file using xmlsave +6.8 : converting to string using xml.tostring +3.6 : saving converted string in file + + +

The save function is given below.

+--ldx]]-- + +function xml.save(root,name) + local f = io.open(name,"w") + if f then + xml.serialize(root,function(s) f:write(s) end) + f:close() + end +end + +--[[ldx-- +

A few helpers:

+--ldx]]-- + +function xml.body(root) + return (root.ri and root.dt[root.ri]) or root +end + +function xml.text(root) + return (root and xml.tostring(root)) or "" +end + +function xml.content(root) -- bugged + return (root and root.dt and xml.tostring(root.dt)) or "" +end + +--[[ldx-- +

The next helper erases an element but keeps the table as it is, +and since empty strings are not serialized (effectively) it does +not harm. Copying the table would take more time. Usage:

+ + +dt[k] = xml.empty() or xml.empty(dt,k) + +--ldx]]-- + +function xml.empty(dt,k) + if dt and k then + dt[k] = "" + return dt[k] + else + return "" + end +end + +--[[ldx-- +

The next helper assigns a tree (or string). Usage:

+ + +dt[k] = xml.assign(root) or xml.assign(dt,k,root) + +--ldx]]-- + +function xml.assign(dt,k,root) + if dt and k then + dt[k] = (type(root) == "table" and xml.body(root)) or root + return dt[k] + else + return xml.body(root) + end +end + +--[[ldx-- +

We've now arrived at an intersting part: accessing the tree using a subset +of and since we're not compatible we call it . We +will explain more about its usage in other documents.

+--ldx]]-- + +do + + xml.functions = xml.functions or { } + + local functions = xml.functions + + local actions = { + [10] = "stay", + [11] = "parent", + [12] = "subtree root", + [13] = "document root", + [14] = "any", + [15] = "many", + [16] = "initial", + [20] = "match", + [21] = "match one of", + [22] = "match and attribute eq", + [23] = "match and attribute ne", + [24] = "match one of and attribute eq", + [25] = "match one of and attribute ne", + [27] = "has attribute", + [28] = "has value", + [29] = "fast match", + [30] = "select", + [31] = "expression", + [40] = "processing instruction", + } + + --~ local function make_expression(str) --could also be an lpeg + --~ str = str:gsub("@([a-zA-Z%-_]+)", "(a['%1'] or '')") + --~ str = str:gsub("position%(%)", "i") + --~ str = str:gsub("text%(%)", "t") + --~ str = str:gsub("!=", "~=") + --~ str = str:gsub("([^=!~<>])=([^=!~<>])", "%1==%2") + --~ str = str:gsub("([a-zA-Z%-_]+)%(", "functions.%1(") + --~ return str, loadstring(format("return function(functions,i,a,t) return %s end", str))() + --~ end + + -- a rather dumb lpeg + + local P, S, R, C, V, Cc = lpeg.P, lpeg.S, lpeg.R, lpeg.C, lpeg.V, lpeg.Cc + + local lp_position = P("position()") / "id" + local lp_text = P("text()") / "tx" + local lp_name = P("name()") / "((rt.ns~='' and rt.ns..':'..rt.tg) or '')" + local lp_tag = P("tag()") / "(rt.tg or '')" + local lp_ns = P("ns()") / "(rt.ns or '')" + local lp_noequal = P("!=") / "~=" + P("<=") + P(">=") + P("==") + local lp_doequal = P("=") / "==" + local lp_attribute = P("@") / "" * Cc("(at['") * R("az","AZ","--","__")^1 * Cc("'] or '')") + + local lp_function = C(R("az","AZ","--","__")^1) * P("(") / function(t) + if functions[t] then + return "functions." .. t .. "(" + else + return "functions.error(" + end + end + + local lparent = lpeg.P("(") + local rparent = lpeg.P(")") + local noparent = 1 - (lparent+rparent) + local nested = lpeg.P{lparent * (noparent + lpeg.V(1))^0 * rparent} + local value = lpeg.P(lparent * lpeg.C((noparent + nested)^0) * rparent) + +--~ local value = P { "(" * C(((1 - S("()")) + V(1))^0) * ")" } + + local lp_special = (C(P("name")+P("text")+P("tag"))) * value / function(t,s) + if functions[t] then + if s then + return "functions." .. t .. "(rt,k," .. s ..")" + else + return "functions." .. t .. "(rt,k)" + end + else + return "functions.error(" .. t .. ")" + end + end + + local converter = lpeg.Cs ( ( + lp_position + + lp_text + lp_name + -- fast one + lp_special + + lp_noequal + lp_doequal + + lp_attribute + + lp_function + + 1 )^1 ) + + local function make_expression(str) + str = converter:match(str) + return str, loadstring(format("return function(functions,id,at,tx,rt,k) return %s end", str))() + end + + local map = { } + + local space = S(' \r\n\t') + local squote = S("'") + local dquote = S('"') + local lparent = P('(') + local rparent = P(')') + local atsign = P('@') + local lbracket = P('[') + local rbracket = P(']') + local exclam = P('!') + local period = P('.') + local eq = P('==') + P('=') + local ne = P('<>') + P('!=') + local star = P('*') + local slash = P('/') + local colon = P(':') + local bar = P('|') + local hat = P('^') + local valid = R('az', 'AZ', '09') + S('_-') +--~ local name_yes = C(valid^1 + star) * colon * C(valid^1 + star) -- permits ns:* *:tg *:* +--~ local name_nop = C(P(true)) * C(valid^1) + local name_yes = C(valid^1 + star) * colon * C(valid^1 + star) -- permits ns:* *:tg *:* + local name_nop = Cc("*") * C(valid^1) + local name = name_yes + name_nop + local number = C((S('+-')^0 * R('09')^1)) / tonumber + local names = (bar^0 * name)^1 + local morenames = name * (bar^0 * name)^1 + local instructiontag = P('pi::') + local spacing = C(space^0) + local somespace = space^1 + local optionalspace = space^0 + local text = C(valid^0) + local value = (squote * C((1 - squote)^0) * squote) + (dquote * C((1 - dquote)^0) * dquote) + local empty = 1-slash + + local is_eq = lbracket * atsign * name * eq * value * rbracket + local is_ne = lbracket * atsign * name * ne * value * rbracket + local is_attribute = lbracket * atsign * name * rbracket + local is_value = lbracket * value * rbracket + local is_number = lbracket * number * rbracket + + local nobracket = 1-(lbracket+rbracket) -- must be improved + local is_expression = lbracket * C(((C(nobracket^1))/make_expression)) * rbracket + + local is_expression = lbracket * (C(nobracket^1))/make_expression * rbracket + + local is_one = name + local is_none = exclam * name + local is_one_of = ((lparent * names * rparent) + morenames) + local is_none_of = exclam * ((lparent * names * rparent) + morenames) + + local stay = (period ) + local parent = (period * period ) / function( ) map[#map+1] = { 11 } end + local subtreeroot = (slash + hat ) / function( ) map[#map+1] = { 12 } end + local documentroot = (hat * hat ) / function( ) map[#map+1] = { 13 } end + local any = (star ) / function( ) map[#map+1] = { 14 } end + local many = (star * star ) / function( ) map[#map+1] = { 15 } end + local initial = (hat * hat * hat ) / function( ) map[#map+1] = { 16 } end + + local match = (is_one ) / function(...) map[#map+1] = { 20, true , ... } end + local match_one_of = (is_one_of ) / function(...) map[#map+1] = { 21, true , ... } end + local dont_match = (is_none ) / function(...) map[#map+1] = { 20, false, ... } end + local dont_match_one_of = (is_none_of ) / function(...) map[#map+1] = { 21, false, ... } end + + local match_and_eq = (is_one * is_eq ) / function(...) map[#map+1] = { 22, true , ... } end + local match_and_ne = (is_one * is_ne ) / function(...) map[#map+1] = { 23, true , ... } end + local dont_match_and_eq = (is_none * is_eq ) / function(...) map[#map+1] = { 22, false, ... } end + local dont_match_and_ne = (is_none * is_ne ) / function(...) map[#map+1] = { 23, false, ... } end + + local match_one_of_and_eq = (is_one_of * is_eq ) / function(...) map[#map+1] = { 24, true , ... } end + local match_one_of_and_ne = (is_one_of * is_ne ) / function(...) map[#map+1] = { 25, true , ... } end + local dont_match_one_of_and_eq = (is_none_of * is_eq ) / function(...) map[#map+1] = { 24, false, ... } end + local dont_match_one_of_and_ne = (is_none_of * is_ne ) / function(...) map[#map+1] = { 25, false, ... } end + + local has_attribute = (is_one * is_attribute) / function(...) map[#map+1] = { 27, true , ... } end + local has_value = (is_one * is_value ) / function(...) map[#map+1] = { 28, true , ... } end + local dont_has_attribute = (is_none * is_attribute) / function(...) map[#map+1] = { 27, false, ... } end + local dont_has_value = (is_none * is_value ) / function(...) map[#map+1] = { 28, false, ... } end + local position = (is_one * is_number ) / function(...) map[#map+1] = { 30, true, ... } end + local dont_position = (is_none * is_number ) / function(...) map[#map+1] = { 30, false, ... } end + + local expression = (is_one * is_expression)/ function(...) map[#map+1] = { 31, true, ... } end + local dont_expression = (is_none * is_expression)/ function(...) map[#map+1] = { 31, false, ... } end + + local self_expression = ( is_expression) / function(...) if #map == 0 then map[#map+1] = { 11 } end + map[#map+1] = { 31, true, "*", "*", ... } end + local dont_self_expression = (exclam * is_expression) / function(...) if #map == 0 then map[#map+1] = { 11 } end + map[#map+1] = { 31, false, "*", "*", ... } end + + local instruction = (instructiontag * text ) / function(...) map[#map+1] = { 40, ... } end + local nothing = (empty ) / function( ) map[#map+1] = { 15 } end -- 15 ? + local crap = (1-slash)^1 + + -- a few ugly goodies: + + local docroottag = P('^^') / function( ) map[#map+1] = { 12 } end + local subroottag = P('^') / function( ) map[#map+1] = { 13 } end + local roottag = P('root::') / function( ) map[#map+1] = { 12 } end + local parenttag = P('parent::') / function( ) map[#map+1] = { 11 } end + local childtag = P('child::') + local selftag = P('self::') + + -- there will be more and order will be optimized + + local selector = ( + instruction + + many + any + + parent + stay + + dont_position + position + + dont_match_one_of_and_eq + dont_match_one_of_and_ne + + match_one_of_and_eq + match_one_of_and_ne + + dont_match_and_eq + dont_match_and_ne + + match_and_eq + match_and_ne + + dont_expression + expression + + dont_self_expression + self_expression + + has_attribute + has_value + + dont_match_one_of + match_one_of + + dont_match + match + + crap + empty + ) + + local grammar = P { "startup", + startup = (initial + documentroot + subtreeroot + roottag + docroottag + subroottag)^0 * V("followup"), + followup = ((slash + parenttag + childtag + selftag)^0 * selector)^1, + } + + local function compose(str) + if not str or str == "" then + -- wildcard + return true + elseif str == '/' then + -- root + return false + else + map = { } + grammar:match(str) + if #map == 0 then + return true + else + local m = map[1][1] + if #map == 1 then + if m == 14 or m == 15 then + -- wildcard + return true + elseif m == 12 then + -- root + return false + end + elseif #map == 2 and m == 12 and map[2][1] == 20 then + -- return { { 29, map[2][2], map[2][3], map[2][4], map[2][5] } } + map[2][1] = 29 + return { map[2] } + end + if m ~= 11 and m ~= 12 and m ~= 13 and m ~= 14 and m ~= 15 and m ~= 16 then + table.insert(map, 1, { 16 }) + end + -- print((table.serialize(map)):gsub("[ \n]+"," ")) + return map + end + end + end + + local cache = { } + + function xml.lpath(pattern,trace) + if type(pattern) == "string" then + local result = cache[pattern] + if not result then + result = compose(pattern) + cache[pattern] = result + end + if trace or xml.trace_lpath then + xml.lshow(result) + end + return result + else + return pattern + end + end + + local fallbackreport = (texio and texio.write) or io.write + + function xml.lshow(pattern,report) + report = report or fallbackreport + local lp = xml.lpath(pattern) + if lp == false then + report(" -: root\n") + elseif lp == true then + report(" -: wildcard\n") + else + if type(pattern) == "string" then + report(format("pattern: %s\n",pattern)) + end + for k,v in ipairs(lp) do + if #v > 1 then + local t = { } + for i=2,#v do + local vv = v[i] + if type(vv) == "string" then + t[#t+1] = (vv ~= "" and vv) or "#" + elseif type(vv) == "boolean" then + t[#t+1] = (vv and "==") or "<>" + end + end + report(format("%2i: %s %s -> %s\n", k,v[1],actions[v[1]],concat(t," "))) + else + report(format("%2i: %s %s\n", k,v[1],actions[v[1]])) + end + end + end + end + + function xml.xshow(e,...) -- also handy when report is given, use () to isolate first e + local t = { ... } + local report = (type(t[#t]) == "function" and t[#t]) or fallbackreport + if e == nil then + report("\n") + elseif type(e) ~= "table" then + report(tostring(e)) + elseif e.tg then + report(tostring(e) .. "\n") + else + for i=1,#e do + report(tostring(e[i]) .. "\n") + end + end + end + +end + +--[[ldx-- +

An is converted to a table with instructions for traversing the +tree. Hoever, simple cases are signaled by booleans. Because we don't know in +advance what we want to do with the found element the handle gets three arguments:

+ + +r : the root element of the data table +d : the data table of the result +t : the index in the data table of the result + + +

Access to the root and data table makes it possible to construct insert and delete +functions.

+--ldx]]-- + +do + + local functions = xml.functions + + functions.contains = string.find + functions.find = string.find + functions.upper = string.upper + functions.lower = string.lower + functions.number = tonumber + functions.boolean = toboolean + + functions.oneof = function(s,...) -- slow + local t = {...} for i=1,#t do if s == t[i] then return true end end return false + end + functions.error = function(str) + xml.error_handler("unknown function in lpath expression",str) + return false + end + functions.text = function(root,k,n) -- unchecked, maybe one deeper + local t = type(t) + if t == "string" then + return t + else -- todo n + local rdt = root.dt + return (rdt and rdt[k]) or root[k] or "" + end + end + functions.name = function(root,k,n) + -- way too fuzzy + local found + if not k or not n then + local ns, tg = root.rn or root.ns or "", root.tg + if not tg then + for i=1,#root do + local e = root[i] + if type(e) == "table" then + found = e + break + end + end + elseif ns ~= "" then + return ns .. ":" .. tg + else + return tg + end + elseif n == 0 then + local e = root[k] + if type(e) ~= "table" then + found = e + end + elseif n < 0 then + for i=k-1,1,-1 do + local e = root[i] + if type(e) == "table" then + if n == -1 then + found = e + break + else + n = n + 1 + end + end + end + else +--~ print(k,n,#root) + for i=k+1,#root,1 do + local e = root[i] + if type(e) == "table" then + if n == 1 then + found = e + break + else + n = n - 1 + end + end + end + end + if found then + local ns, tg = found.rn or found.ns or "", found.tg + if ns ~= "" then + return ns .. ":" .. tg + else + return tg + end + else + return "" + end + end + + local function traverse(root,pattern,handle,reverse,index,parent,wildcard) -- multiple only for tags, not for namespaces + if not root then -- error + return false + elseif pattern == false then -- root + handle(root,root.dt,root.ri) + return false + elseif pattern == true then -- wildcard + local rootdt = root.dt + if rootdt then + local start, stop, step = 1, #rootdt, 1 + if reverse then + start, stop, step = stop, start, -1 + end + for k=start,stop,step do + if handle(root,rootdt,root.ri or k) then return false end + if not traverse(rootdt[k],true,handle,reverse) then return false end + end + end + return false + elseif root.dt then + index = index or 1 + local action = pattern[index] + local command = action[1] + if command == 29 then -- fast case /oeps + local rootdt = root.dt + for k=1,#rootdt do + local e = rootdt[k] + local tg = e.tg + if e.tg then + local ns = e.rn or e.ns + local ns_a, tg_a = action[3], action[4] + local matched = (ns_a == "*" or ns == ns_a) and (tg_a == "*" or tg == tg_a) + if not action[2] then matched = not matched end + if matched then + if handle(root,rootdt,k) then return false end + end + end + end + elseif command == 11 then -- parent + local ep = root.__p__ or parent + if index < #pattern then + if not traverse(ep,pattern,handle,reverse,index+1,root) then return false end + elseif handle(root,rootdt,k) then + return false + end + else + if (command == 16 or command == 12) and index == 1 then -- initial + -- wildcard = true + wildcard = command == 16 -- ok? + index = index + 1 + action = pattern[index] + command = action and action[1] or 0 -- something is wrong + end + if command == 11 then -- parent + local ep = root.__p__ or parent + if index < #pattern then + if not traverse(ep,pattern,handle,reverse,index+1,root) then return false end + elseif handle(root,rootdt,k) then + return false + end + else + local rootdt = root.dt + local start, stop, step, n, dn = 1, #rootdt, 1, 0, 1 + if command == 30 then + if action[5] < 0 then + start, stop, step = stop, start, -1 + dn = -1 + end + elseif reverse and index == #pattern then + start, stop, step = stop, start, -1 + end + local idx = 0 + for k=start,stop,step do -- we used to have functions for all but a case is faster + local e = rootdt[k] + local ns, tg = e.rn or e.ns, e.tg + if tg then + idx = idx + 1 + if command == 30 then + local ns_a, tg_a = action[3], action[4] + if tg == tg_a then + matched = ns_a == "*" or ns == ns_a + elseif tg_a == '*' then + matched, multiple = ns_a == "*" or ns == ns_a, true + else + matched = false + end + if not action[2] then matched = not matched end + if matched then + n = n + dn + if n == action[5] then + if index == #pattern then + if handle(root,rootdt,root.ri or k) then return false end + else + if not traverse(e,pattern,handle,reverse,index+1,root) then return false end + end + break + end + elseif wildcard then + if not traverse(e,pattern,handle,reverse,index,root,true) then return false end + end + else + local matched, multiple = false, false + if command == 20 then -- match + local ns_a, tg_a = action[3], action[4] + if tg == tg_a then + matched = ns_a == "*" or ns == ns_a + elseif tg_a == '*' then + matched, multiple = ns_a == "*" or ns == ns_a, true + else + matched = false + end + if not action[2] then matched = not matched end + elseif command == 21 then -- match one of + multiple = true + for i=3,#action,2 do + local ns_a, tg_a = action[i], action[i+1] + if (ns_a == "*" or ns == ns_a) and (tg == "*" or tg == tg_a) then + matched = true + break + end + end + if not action[2] then matched = not matched end + elseif command == 22 then -- eq + local ns_a, tg_a = action[3], action[4] + if tg == tg_a then + matched = ns_a == "*" or ns == ns_a + elseif tg_a == '*' then + matched, multiple = ns_a == "*" or ns == ns_a, true + else + matched = false + end + matched = matched and e.at[action[6]] == action[7] + elseif command == 23 then -- ne + local ns_a, tg_a = action[3], action[4] + if tg == tg_a then + matched = ns_a == "*" or ns == ns_a + elseif tg_a == '*' then + matched, multiple = ns_a == "*" or ns == ns_a, true + else + matched = false + end + if not action[2] then matched = not matched end + matched = mached and e.at[action[6]] ~= action[7] + elseif command == 24 then -- one of eq + multiple = true + for i=3,#action-2,2 do + local ns_a, tg_a = action[i], action[i+1] + if (ns_a == "*" or ns == ns_a) and (tg == "*" or tg == tg_a) then + matched = true + break + end + end + if not action[2] then matched = not matched end + matched = matched and e.at[action[#action-1]] == action[#action] + elseif command == 25 then -- one of ne + multiple = true + for i=3,#action-2,2 do + local ns_a, tg_a = action[i], action[i+1] + if (ns_a == "*" or ns == ns_a) and (tg == "*" or tg == tg_a) then + matched = true + break + end + end + if not action[2] then matched = not matched end + matched = matched and e.at[action[#action-1]] ~= action[#action] + elseif command == 27 then -- has attribute + local ns_a, tg_a = action[3], action[4] + if tg == tg_a then + matched = ns_a == "*" or ns == ns_a + elseif tg_a == '*' then + matched, multiple = ns_a == "*" or ns == ns_a, true + else + matched = false + end + if not action[2] then matched = not matched end + matched = matched and e.at[action[5]] + elseif command == 28 then -- has value + local edt, ns_a, tg_a = e.dt, action[3], action[4] + if tg == tg_a then + matched = ns_a == "*" or ns == ns_a + elseif tg_a == '*' then + matched, multiple = ns_a == "*" or ns == ns_a, true + else + matched = false + end + if not action[2] then matched = not matched end + matched = matched and edt and edt[1] == action[5] + elseif command == 31 then + local edt, ns_a, tg_a = e.dt, action[3], action[4] + if tg == tg_a then + matched = ns_a == "*" or ns == ns_a + elseif tg_a == '*' then + matched, multiple = ns_a == "*" or ns == ns_a, true + else + matched = false + end + if not action[2] then matched = not matched end + if matched then + matched = action[6](functions,idx,e.at or { },edt[1],rootdt,k) + end + end + if matched then -- combine tg test and at test + if index == #pattern then + if handle(root,rootdt,root.ri or k) then return false end + if wildcard then + if multiple then + if not traverse(e,pattern,handle,reverse,index,root,true) then return false end + else + -- maybe or multiple; anyhow, check on (section|title) vs just section and title in example in lxml + if not traverse(e,pattern,handle,reverse,index,root) then return false end + end + end + else + if not traverse(e,pattern,handle,reverse,index+1,root) then return false end + end + elseif command == 14 then -- any + if index == #pattern then + if handle(root,rootdt,root.ri or k) then return false end + else + if not traverse(e,pattern,handle,reverse,index+1,root) then return false end + end + elseif command == 15 then -- many + if index == #pattern then + if handle(root,rootdt,root.ri or k) then return false end + else + if not traverse(e,pattern,handle,reverse,index+1,root,true) then return false end + end + -- not here : 11 + elseif command == 11 then -- parent + local ep = e.__p__ or parent + if index < #pattern then + if not traverse(ep,pattern,handle,reverse,root,index+1) then return false end + elseif handle(root,rootdt,k) then + return false + end + elseif command == 40 and e.special and tg == "@pi@" then -- pi + local pi = action[2] + if pi ~= "" then + local pt = e.dt[1] + if pt and pt:find(pi) then + if handle(root,rootdt,k) then + return false + end + end + elseif handle(root,rootdt,k) then + return false + end + elseif wildcard then + if not traverse(e,pattern,handle,reverse,index,root,true) then return false end + end + end + else + -- not here : 11 + if command == 11 then -- parent + local ep = e.__p__ or parent + if index < #pattern then + if not traverse(ep,pattern,handle,reverse,index+1,root) then return false end + elseif handle(root,rootdt,k) then + return false + end + break -- else loop + end + end + end + end + end + end + return true + end + + xml.traverse = traverse + +end + +--[[ldx-- +

Next come all kind of locators and manipulators. The most generic function here +is xml.filter(root,pattern). All registers functions in the filters namespace +can be path of a search path, as in:

+ + +local r, d, k = xml.filter(root,"/a/b/c/position(4)" + +--ldx]]-- + +do + + local traverse, lpath, convert = xml.traverse, xml.lpath, xml.convert + + xml.filters = { } + + function xml.filters.default(root,pattern) + local rt, dt, dk + traverse(root, lpath(pattern), function(r,d,k) rt,dt,dk = r,d,k return true end) + return dt and dt[dk], rt, dt, dk + end + function xml.filters.attributes(root,pattern,arguments) + local rt, dt, dk + traverse(root, lpath(pattern), function(r,d,k) rt, dt, dk = r, d, k return true end) + local ekat = (dt and dt[dk] and dt[dk].at) or (rt and rt.at) + if ekat then + if arguments then + return ekat[arguments] or "", rt, dt, dk + else + return ekat, rt, dt, dk + end + else + return { }, rt, dt, dk + end + end + function xml.filters.reverse(root,pattern) + local rt, dt, dk + traverse(root, lpath(pattern), function(r,d,k) rt,dt,dk = r,d,k return true end, 'reverse') + return dt and dt[dk], rt, dt, dk + end + function xml.filters.count(root,pattern,everything) + local n = 0 + traverse(root, lpath(pattern), function(r,d,t) + if everything or type(d[t]) == "table" then + n = n + 1 + end + end) + return n + end + function xml.filters.elements(root, pattern) -- == all + local t = { } + traverse(root, lpath(pattern), function(r,d,k) + local e = d[k] + if e then + t[#t+1] = e + end + end) + return t + end + function xml.filters.texts(root, pattern) + local t = { } + traverse(root, lpath(pattern), function(r,d,k) + local e = d[k] + if e and e.dt then + t[#t+1] = e.dt + end + end) + return t + end + function xml.filters.first(root,pattern) + local rt, dt, dk + traverse(root, lpath(pattern), function(r,d,k) rt,dt,dk = r,d,k return true end) + return dt and dt[dk], rt, dt, dk + end + function xml.filters.last(root,pattern) + local rt, dt, dk + traverse(root, lpath(pattern), function(r,d,k) rt,dt,dk = r,d,k return true end, 'reverse') + return dt and dt[dk], rt, dt, dk + end + function xml.filters.index(root,pattern,arguments) + local rt, dt, dk, reverse, i = nil, nil, nil, false, tonumber(arguments or '1') or 1 + if i and i ~= 0 then + if i < 0 then + reverse, i = true, -i + end + traverse(root, lpath(pattern), function(r,d,k) rt, dt, dk, i = r, d, k, i-1 return i == 0 end, reverse) + if i == 0 then + return dt and dt[dk], rt, dt, dk + end + end + return nil, nil, nil, nil + end + function xml.filters.attribute(root,pattern,arguments) + local rt, dt, dk + traverse(root, lpath(pattern), function(r,d,k) rt, dt, dk = r, d, k return true end) + local ekat = (dt and dt[dk] and dt[dk].at) or (rt and rt.at) + return (ekat and (ekat[arguments] or ekat[arguments:gsub("^([\"\'])(.*)%1$","%2")])) or "" + end + function xml.filters.text(root,pattern,arguments) -- ?? why index, tostring slow + local dtk, rt, dt, dk = xml.filters.index(root,pattern,arguments) + if dtk then -- n + local dtkdt = dtk.dt + if not dtkdt then + return "", rt, dt, dk + elseif #dtkdt == 1 and type(dtkdt[1]) == "string" then + return dtkdt[1], rt, dt, dk + else + return xml.tostring(dtkdt), rt, dt, dk + end + else + return "", rt, dt, dk + end + end + function xml.filters.tag(root,pattern,n) + local tag = "" + traverse(root, lpath(pattern), function(r,d,k) + tag = xml.functions.tag(d,k,n and tonumber(n)) + return true + end) + return tag + end + function xml.filters.name(root,pattern,n) + local tag = "" + traverse(root, lpath(pattern), function(r,d,k) + tag = xml.functions.name(d,k,n and tonumber(n)) + return true + end) + return tag + end + + --[[ldx-- +

For splitting the filter function from the path specification, we can + use string matching or lpeg matching. Here the difference in speed is + neglectable but the lpeg variant is more robust.

+ --ldx]]-- + + -- not faster but hipper ... although ... i can't get rid of the trailing / in the path + + local P, S, R, C, V, Cc = lpeg.P, lpeg.S, lpeg.R, lpeg.C, lpeg.V, lpeg.Cc + + local slash = P('/') + local name = (R("az","AZ","--","__"))^1 + local path = C(((1-slash)^0 * slash)^1) + local argument = P { "(" * C(((1 - S("()")) + V(1))^0) * ")" } + local action = Cc(1) * path * C(name) * argument + local attribute = Cc(2) * path * P('@') * C(name) + local direct = Cc(3) * Cc("../*") * slash^0 * C(name) * argument + + local parser = direct + action + attribute + + local filters = xml.filters + local attribute_filter = xml.filters.attributes + local default_filter = xml.filters.default + + -- todo: also hash, could be gc'd + + function xml.filter(root,pattern) + local kind, a, b, c = parser:match(pattern) +--~ if xml.trace_lpath then +--~ print(pattern,kind,a,b,c) +--~ end + if kind == 1 or kind == 3 then + return (filters[b] or default_filter)(root,a,c) + elseif kind == 2 then + return attribute_filter(root,a,b) + else + return default_filter(root,pattern) + end + end + + --~ slightly faster, but first we need a proper test file + --~ + --~ local hash = { } + --~ + --~ function xml.filter(root,pattern) + --~ local h = hash[pattern] + --~ if not h then + --~ local kind, a, b, c = parser:match(pattern) + --~ if kind == 1 then + --~ h = { kind, filters[b] or default_filter, a, b, c } + --~ elseif kind == 2 then + --~ h = { kind, attribute_filter, a, b, c } + --~ else + --~ h = { kind, default_filter, a, b, c } + --~ end + --~ hash[pattern] = h + --~ end + --~ local kind = h[1] + --~ if kind == 1 then + --~ return h[2](root,h[2],h[4]) + --~ elseif kind == 2 then + --~ return h[2](root,h[2],h[3]) + --~ else + --~ return h[2](root,pattern) + --~ end + --~ end + + --[[ldx-- +

The following functions collect elements and texts.

+ --ldx]]-- + + -- still somewhat bugged + + function xml.collect_elements(root, pattern, ignorespaces) + local rr, dd = { }, { } + traverse(root, lpath(pattern), function(r,d,k) + local dk = d and d[k] + if dk then + if ignorespaces and type(dk) == "string" and dk:find("[^%S]") then + -- ignore + else + local n = #rr+1 + rr[n], dd[n] = r, dk + end + end + end) + return dd, rr + end + + function xml.collect_texts(root, pattern, flatten) + local t = { } -- no r collector + traverse(root, lpath(pattern), function(r,d,k) + if d then + local ek = d[k] + local tx = ek and ek.dt + if flatten then + if tx then + t[#t+1] = xml.tostring(tx) or "" + else + t[#t+1] = "" + end + else + t[#t+1] = tx or "" + end + else + t[#t+1] = "" + end + end) + return t + end + + function xml.collect_tags(root, pattern, nonamespace) + local t = { } + xml.traverse(root, xml.lpath(pattern), function(r,d,k) + local dk = d and d[k] + if dk and type(dk) == "table" then + local ns, tg = e.ns, e.tg + if nonamespace then + t[#t+1] = tg -- if needed we can return an extra table + elseif ns == "" then + t[#t+1] = tg + else + t[#t+1] = ns .. ":" .. tg + end + end + end) + return #t > 0 and {} + end + + --[[ldx-- +

Often using an iterators looks nicer in the code than passing handler + functions. The book describes how to use coroutines for that + purpose (). This permits + code like:

+ + + for r, d, k in xml.elements(xml.load('text.xml'),"title") do + print(d[k]) + end + + +

Which will print all the titles in the document. The iterator variant takes + 1.5 times the runtime of the function variant which is due to the overhead in + creating the wrapper. So, instead of:

+ + + function xml.filters.first(root,pattern) + for rt,dt,dk in xml.elements(root,pattern) + return dt and dt[dk], rt, dt, dk + end + return nil, nil, nil, nil + end + + +

We use the function variants in the filters.

+ --ldx]]-- + + local wrap, yield = coroutine.wrap, coroutine.yield + + function xml.elements(root,pattern,reverse) + return wrap(function() traverse(root, lpath(pattern), yield, reverse) end) + end + + function xml.elements_only(root,pattern,reverse) + return wrap(function() traverse(root, lpath(pattern), function(r,d,k) yield(d[k]) end, reverse) end) + end + + function xml.each_element(root, pattern, handle, reverse) + local ok + traverse(root, lpath(pattern), function(r,d,k) ok = true handle(r,d,k) end, reverse) + return ok + end + + function xml.process_elements(root, pattern, handle) + traverse(root, lpath(pattern), function(r,d,k) + local dkdt = d[k].dt + if dkdt then + for i=1,#dkdt do + local v = dkdt[i] + if v.tg then handle(v) end + end + end + end) + end + + function xml.process_attributes(root, pattern, handle) + traverse(root, lpath(pattern), function(r,d,k) + local ek = d[k] + local a = ek.at or { } + handle(a) + if next(a) then -- next is faster than type (and >0 test) + ek.at = a + else + ek.at = nil + end + end) + end + + --[[ldx-- +

We've now arrives at the functions that manipulate the tree.

+ --ldx]]-- + + function xml.inject_element(root, pattern, element, prepend) + if root and element then + local matches, collect = { }, nil + if type(element) == "string" then + element = convert(element,true) + end + if element then + collect = function(r,d,k) matches[#matches+1] = { r, d, k, element } end + traverse(root, lpath(pattern), collect) + for i=1,#matches do + local m = matches[i] + local r, d, k, element, edt = m[1], m[2], m[3], m[4], nil + if element.ri then + element = element.dt[element.ri].dt + else + element = element.dt + end + if r.ri then + edt = r.dt[r.ri].dt + else + edt = d and d[k] and d[k].dt + end + if edt then + local be, af + if prepend then + be, af = xml.copy(element), edt + else + be, af = edt, xml.copy(element) + end + for i=1,#af do + be[#be+1] = af[i] + end + if r.ri then + r.dt[r.ri].dt = be + else + d[k].dt = be + end + else + -- r.dt = element.dt -- todo + end + end + end + end + end + + -- todo: copy ! + + function xml.insert_element(root, pattern, element, before) -- todo: element als functie + if root and element then + if pattern == "/" then + xml.inject_element(root, pattern, element, before) + else + local matches, collect = { }, nil + if type(element) == "string" then + element = convert(element,true) + end + if element and element.ri then + element = element.dt[element.ri] + end + if element then + collect = function(r,d,k) matches[#matches+1] = { r, d, k, element } end + traverse(root, lpath(pattern), collect) + for i=#matches,1,-1 do + local m = matches[i] + local r, d, k, element = m[1], m[2], m[3], m[4] + if not before then k = k + 1 end + if element.tg then + table.insert(d,k,element) -- untested + elseif element.dt then + for _,v in ipairs(element.dt) do -- i added + table.insert(d,k,v) + k = k + 1 + end + end + end + end + end + end + end + + xml.insert_element_after = xml.insert_element + xml.insert_element_before = function(r,p,e) xml.insert_element(r,p,e,true) end + xml.inject_element_after = xml.inject_element + xml.inject_element_before = function(r,p,e) xml.inject_element(r,p,e,true) end + + function xml.delete_element(root, pattern) + local matches, deleted = { }, { } + local collect = function(r,d,k) matches[#matches+1] = { r, d, k } end + traverse(root, lpath(pattern), collect) + for i=#matches,1,-1 do + local m = matches[i] + deleted[#deleted+1] = table.remove(m[2],m[3]) + end + return deleted + end + + function xml.replace_element(root, pattern, element) + if type(element) == "string" then + element = convert(element,true) + end + if element and element.ri then + element = element.dt[element.ri] + end + if element then + traverse(root, lpath(pattern), function(rm, d, k) + d[k] = element.dt -- maybe not clever enough + end) + end + end + + local function load_data(name) -- == io.loaddata + local f, data = io.open(name), "" + if f then + data = f:read("*all",'b') -- 'b' ? + f:close() + end + return data + end + + function xml.include(xmldata,pattern,attribute,recursive,loaddata) + -- parse="text" (default: xml), encoding="" (todo) + -- attribute = attribute or 'href' + pattern = pattern or 'include' + loaddata = loaddata or load_data + local function include(r,d,k) + local ek, name = d[k], nil + if not attribute or attribute == "" then + local ekdt = ek.dt + name = (type(ekdt) == "table" and ekdt[1]) or ekdt + end + if not name then + if ek.at then + for a in (attribute or "href"):gmatch("([^|]+)") do + name = ek.at[a] + if name then break end + end + end + end + local data = (name and name ~= "" and loaddata(name)) or "" + if data == "" then + xml.empty(d,k) + elseif ek.at["parse"] == "text" then -- for the moment hard coded + d[k] = xml.escaped(data) + else + local xi = xml.convert(data) + if not xi then + xml.empty(d,k) + else + if recursive then + xml.include(xi,pattern,attribute,recursive,loaddata) + end + xml.assign(d,k,xi) + end + end + end + xml.each_element(xmldata, pattern, include) + end + + function xml.strip_whitespace(root, pattern) + traverse(root, lpath(pattern), function(r,d,k) + local dkdt = d[k].dt + if dkdt then -- can be optimized + local t = { } + for i=1,#dkdt do + local str = dkdt[i] + if type(str) == "string" and str:find("^[ \n\r\t]*$") then + -- stripped + else + t[#t+1] = str + end + end + d[k].dt = t + end + end) + end + + function xml.rename_space(root, oldspace, newspace) -- fast variant + local ndt = #root.dt + local rename = xml.rename_space + for i=1,ndt or 0 do + local e = root[i] + if type(e) == "table" then + if e.ns == oldspace then + e.ns = newspace + if e.rn then + e.rn = newspace + end + end + local edt = e.dt + if edt then + rename(edt, oldspace, newspace) + end + end + end + end + + function xml.remap_tag(root, pattern, newtg) + traverse(root, lpath(pattern), function(r,d,k) + d[k].tg = newtg + end) + end + function xml.remap_namespace(root, pattern, newns) + traverse(root, lpath(pattern), function(r,d,k) + d[k].ns = newns + end) + end + function xml.check_namespace(root, pattern, newns) + traverse(root, lpath(pattern), function(r,d,k) + local dk = d[k] + if (not dk.rn or dk.rn == "") and dk.ns == "" then + dk.rn = newns + end + end) + end + function xml.remap_name(root, pattern, newtg, newns, newrn) + traverse(root, lpath(pattern), function(r,d,k) + local dk = d[k] + dk.tg = newtg + dk.ns = newns + dk.rn = newrn + end) + end + + function xml.filters.found(root,pattern,check_content) + local found = false + traverse(root, lpath(pattern), function(r,d,k) + if check_content then + local dk = d and d[k] + found = dk and dk.dt and next(dk.dt) and true + else + found = true + end + return true + end) + return found + end + +end + +--[[ldx-- +

Here are a few synonyms.

+--ldx]]-- + +xml.filters.position = xml.filters.index + +xml.count = xml.filters.count +xml.index = xml.filters.index +xml.position = xml.filters.index +xml.first = xml.filters.first +xml.last = xml.filters.last +xml.found = xml.filters.found + +xml.each = xml.each_element +xml.process = xml.process_element +xml.strip = xml.strip_whitespace +xml.collect = xml.collect_elements +xml.all = xml.collect_elements + +xml.insert = xml.insert_element_after +xml.inject = xml.inject_element_after +xml.after = xml.insert_element_after +xml.before = xml.insert_element_before +xml.delete = xml.delete_element +xml.replace = xml.replace_element + +--[[ldx-- +

The following helper functions best belong to the lmxl-ini +module. Some are here because we need then in the mk +document and other manuals, others came up when playing with +this module. Since this module is also used in we've +put them here instead of loading mode modules there then needed.

+--ldx]]-- + +function xml.gsub(t,old,new) + if t.dt then + for k,v in ipairs(t.dt) do + if type(v) == "string" then + t.dt[k] = v:gsub(old,new) + else + xml.gsub(v,old,new) + end + end + end +end + +function xml.strip_leading_spaces(dk,d,k) -- cosmetic, for manual + if d and k and d[k-1] and type(d[k-1]) == "string" then + local s = d[k-1]:match("\n(%s+)") + xml.gsub(dk,"\n"..string.rep(" ",#s),"\n") + end +end + +function xml.serialize_path(root,lpath,handle) + local dk, r, d, k = xml.first(root,lpath) + dk = xml.copy(dk) + xml.strip_leading_spaces(dk,d,k) + xml.serialize(dk,handle) +end + +--~ xml.escapes = { ['&'] = '&', ['<'] = '<', ['>'] = '>', ['"'] = '"' } +--~ xml.unescapes = { } for k,v in pairs(xml.escapes) do xml.unescapes[v] = k end + +--~ function xml.escaped (str) return str:gsub("(.)" , xml.escapes ) end +--~ function xml.unescaped(str) return str:gsub("(&.-;)", xml.unescapes) end +--~ function xml.cleansed (str) return str:gsub("<.->" , '' ) end -- "%b<>" + +do + + local P, S, R, C, V, Cc, Cs = lpeg.P, lpeg.S, lpeg.R, lpeg.C, lpeg.V, lpeg.Cc, lpeg.Cs + + -- 100 * 2500 * "oeps< oeps> oeps&" : gsub:lpeg|lpeg|lpeg + -- + -- 1021:0335:0287:0247 + + -- 10 * 1000 * "oeps< oeps> oeps& asfjhalskfjh alskfjh alskfjh alskfjh ;al J;LSFDJ" + -- + -- 1559:0257:0288:0190 (last one suggested by roberto) + + -- escaped = Cs((S("<&>") / xml.escapes + 1)^0) + -- escaped = Cs((S("<")/"<" + S(">")/">" + S("&")/"&" + 1)^0) + local normal = (1 - S("<&>"))^0 + local special = P("<")/"<" + P(">")/">" + P("&")/"&" + local escaped = Cs(normal * (special * normal)^0) + + -- 100 * 1000 * "oeps< oeps> oeps&" : gsub:lpeg == 0153:0280:0151:0080 (last one by roberto) + + -- unescaped = Cs((S("<")/"<" + S(">")/">" + S("&")/"&" + 1)^0) + -- unescaped = Cs((((P("&")/"") * (P("lt")/"<" + P("gt")/">" + P("amp")/"&") * (P(";")/"")) + 1)^0) + local normal = (1 - S"&")^0 + local special = P("<")/"<" + P(">")/">" + P("&")/"&" + local unescaped = Cs(normal * (special * normal)^0) + + -- 100 * 5000 * "oeps oeps oeps " : gsub:lpeg == 623:501 msec (short tags, less difference) + + local cleansed = Cs(((P("<") * (1-P(">"))^0 * P(">"))/"" + 1)^0) + + function xml.escaped (str) return escaped :match(str) end + function xml.unescaped(str) return unescaped:match(str) end + function xml.cleansed (str) return cleansed :match(str) end + +end + +function xml.join(t,separator,lastseparator) + if #t > 0 then + local result = { } + for k,v in pairs(t) do + result[k] = xml.tostring(v) + end + if lastseparator then + return concat(result,separator or "",1,#result-1) .. (lastseparator or "") .. result[#result] + else + return concat(result,separator) + end + else + return "" + end +end + + +--[[ldx-- +

We provide (at least here) two entity handlers. The more extensive +resolver consults a hash first, tries to convert to next, +and finaly calls a handler when defines. When this all fails, the +original entity is returned.

+--ldx]]-- + +do if unicode and unicode.utf8 then + + xml.entities = xml.entities or { } -- xml.entities.handler == function + + function xml.entities.handler(e) + return format("[%s]",e) + end + + local char = unicode.utf8.char + + local function toutf(s) + return char(tonumber(s,16)) + end + + local entities = xml.entities -- global entities + + function utfize(root) + local d = root.dt + for k=1,#d do + local dk = d[k] + if type(dk) == "string" then + -- test prevents copying if no match + if dk:find("&#x.-;") then + d[k] = dk:gsub("&#x(.-);",toutf) + end + else + utfize(dk) + end + end + end + + xml.utfize = utfize + + local function resolve(e) -- hex encoded always first, just to avoid mkii fallbacks + if e:find("#x") then + return char(tonumber(e:sub(3),16)) + else + local ee = entities[e] + if ee then + return ee + else + local h = xml.entities.handler + return (h and h(e)) or "&" .. e .. ";" + end + end + end + + local function resolve_entities(root) + if not root.special or root.tg == "@rt@" then + local d = root.dt + for k=1,#d do + local dk = d[k] + if type(dk) == "string" then + if dk:find("&.-;") then + d[k] = dk:gsub("&(.-);",resolve) + end + else + resolve_entities(dk) + end + end + end + end + + xml.resolve_entities = resolve_entities + + function xml.utfize_text(str) + if str:find("&#") then + return (str:gsub("&#x(.-);",toutf)) + else + return str + end + end + + function xml.resolve_text_entities(str) -- maybe an lpeg. maybe resolve inline + if str:find("&") then + return (str:gsub("&(.-);",resolve)) + else + return str + end + end + + function xml.show_text_entities(str) + if str:find("&") then + return (str:gsub("&(.-);","[%1]")) + else + return str + end + end + + -- experimental, this will be done differently + + function xml.merge_entities(root) + local documententities = root.entities + local allentities = xml.entities + if documententities then + for k, v in pairs(documententities) do + allentities[k] = v + end + end + end + +end end + +-- xml.set_text_cleanup(xml.show_text_entities) +-- xml.set_text_cleanup(xml.resolve_text_entities) + +--~ xml.lshow("/../../../a/(b|c)[@d='e']/f") +--~ xml.lshow("/../../../a/!(b|c)[@d='e']/f") +--~ xml.lshow("/../../../a/!b[@d!='e']/f") + +--~ x = xml.convert([[ +--~ +--~ 01 +--~ 02 +--~ 03 +--~ OK +--~ 05 +--~ 06 +--~ ALSO OK +--~ +--~ ]]) + +--~ xml.trace_lpath = true + +--~ xml.xshow(xml.first(x,"b[position() > 2 and position() < 5 and text() == 'ok']")) +--~ xml.xshow(xml.first(x,"b[position() > 2 and position() < 5 and text() == upper('ok')]")) +--~ xml.xshow(xml.first(x,"b[@n=='03' or @n=='08']")) +--~ xml.xshow(xml.all (x,"b[number(@n)>2 and number(@n)<6]")) +--~ xml.xshow(xml.first(x,"b[find(text(),'ALSO')]")) + +--~ str = [[ +--~ +--~ +--~ my secret +--~ +--~ ]] + +--~ x = xml.convert([[ +--~ 0102xx03OK +--~ ]]) +--~ xml.xshow(xml.first(x,"b[tag(2) == 'x']")) +--~ xml.xshow(xml.first(x,"b[tag(1) == 'x']")) +--~ xml.xshow(xml.first(x,"b[tag(-1) == 'x']")) +--~ xml.xshow(xml.first(x,"b[tag(-2) == 'x']")) + +--~ print(xml.filter(x,"b/tag(2)")) +--~ print(xml.filter(x,"b/tag(1)")) + + +-- filename : l-utils.lua +-- comment : split off from luat-lib +-- author : Hans Hagen, PRAGMA-ADE, Hasselt NL +-- copyright: PRAGMA ADE / ConTeXt Development Team +-- license : see context related readme files + +if not versions then versions = { } end versions['l-utils'] = 1.001 + +if not utils then utils = { } end +if not utils.merger then utils.merger = { } end +if not utils.lua then utils.lua = { } end + +utils.merger.m_begin = "begin library merge" +utils.merger.m_end = "end library merge" +utils.merger.pattern = + "%c+" .. + "%-%-%s+" .. utils.merger.m_begin .. + "%c+(.-)%c+" .. + "%-%-%s+" .. utils.merger.m_end .. + "%c+" + +function utils.merger._self_fake_() + return + "-- " .. "created merged file" .. "\n\n" .. + "-- " .. utils.merger.m_begin .. "\n\n" .. + "-- " .. utils.merger.m_end .. "\n\n" +end + +function utils.report(...) + print(...) +end + +function utils.merger._self_load_(name) + local f, data = io.open(name), "" + if f then + data = f:read("*all") + f:close() + end + return data or "" +end + +function utils.merger._self_save_(name, data) + if data ~= "" then + local f = io.open(name,'w') + if f then + f:write(data) + f:close() + end + end +end + +function utils.merger._self_swap_(data,code) + if data ~= "" then + return (data:gsub(utils.merger.pattern, function(s) + return "\n\n" .. "-- "..utils.merger.m_begin .. "\n" .. code .. "\n" .. "-- "..utils.merger.m_end .. "\n\n" + end, 1)) + else + return "" + end +end + +function utils.merger._self_libs_(libs,list) + local result, f = { }, nil + if type(libs) == 'string' then libs = { libs } end + if type(list) == 'string' then list = { list } end + for _, lib in ipairs(libs) do + for _, pth in ipairs(list) do + local name = string.gsub(pth .. "/" .. lib,"\\","/") + f = io.open(name) + if f then + -- utils.report("merging library",name) + result[#result+1] = f:read("*all") + f:close() + list = { pth } -- speed up the search + break + else + -- utils.report("no library",name) + end + end + end + return table.concat(result, "\n\n") +end + +function utils.merger.selfcreate(libs,list,target) + if target then + utils.merger._self_save_( + target, + utils.merger._self_swap_( + utils.merger._self_fake_(), + utils.merger._self_libs_(libs,list) + ) + ) + end +end + +function utils.merger.selfmerge(name,libs,list,target) + utils.merger._self_save_( + target or name, + utils.merger._self_swap_( + utils.merger._self_load_(name), + utils.merger._self_libs_(libs,list) + ) + ) +end + +function utils.merger.selfclean(name) + utils.merger._self_save_( + name, + utils.merger._self_swap_( + utils.merger._self_load_(name), + "" + ) + ) +end + +utils.lua.compile_strip = true + +function utils.lua.compile(luafile, lucfile) + -- utils.report("compiling",luafile,"into",lucfile) + os.remove(lucfile) + local command = "-o " .. string.quote(lucfile) .. " " .. string.quote(luafile) + if utils.lua.compile_strip then + command = "-s " .. command + end + if os.spawn("texluac " .. command) == 0 then + return true + elseif os.spawn("luac " .. command) == 0 then + return true + else + return false + end +end + + + +-- filename : luat-lib.lua +-- comment : companion to luat-lib.tex +-- author : Hans Hagen, PRAGMA-ADE, Hasselt NL +-- copyright: PRAGMA ADE / ConTeXt Development Team +-- license : see context related readme files + +if not versions then versions = { } end versions['luat-lib'] = 1.001 + +-- mostcode moved to the l-*.lua and other luat-*.lua files + +-- os / io + +os.setlocale(nil,nil) -- useless feature and even dangerous in luatex + +-- os.platform + +-- mswin|bccwin|mingw|cygwin windows +-- darwin|rhapsody|nextstep macosx +-- netbsd|unix unix +-- linux linux + +if not io.fileseparator then + if string.find(os.getenv("PATH"),";") then + io.fileseparator, io.pathseparator, os.platform = "\\", ";", os.type or "windows" + else + io.fileseparator, io.pathseparator, os.platform = "/" , ":", os.type or "unix" + end +end + +os.platform = os.platform or os.type or (io.pathseparator == ";" and "windows") or "unix" + +-- arg normalization +-- +-- for k,v in pairs(arg) do print(k,v) end + +-- environment + +if not environment then environment = { } end + +environment.ownbin = environment.ownbin or arg[-2] or arg[-1] or arg[0] or "luatex" + +local ownpath = nil -- we could use a metatable here + +function environment.ownpath() + if not ownpath then + for p in string.gmatch(os.getenv("PATH"),"[^"..io.pathseparator.."]+") do + local b = file.join(p,environment.ownbin) + if lfs.isfile(b..".exe") or lfs.isfile(b) then + ownpath = p + break + end + end + if not ownpath then ownpath = '.' end + end + return ownpath +end + +if arg and (arg[0] == 'luatex' or arg[0] == 'luatex.exe') and arg[1] == "--luaonly" then + arg[-1]=arg[0] arg[0]=arg[2] for k=3,#arg do arg[k-2]=arg[k] end arg[#arg]=nil arg[#arg]=nil +end + +environment.arguments = { } +environment.files = { } +environment.sorted_argument_keys = nil + +environment.platform = os.platform + +function environment.initialize_arguments(arg) + environment.arguments = { } + environment.files = { } + environment.sorted_argument_keys = nil + for index, argument in pairs(arg) do + if index > 0 then + local flag, value = argument:match("^%-+(.+)=(.-)$") + if flag then + environment.arguments[flag] = string.unquote(value or "") + else + flag = argument:match("^%-+(.+)") + if flag then + environment.arguments[flag] = true + else + environment.files[#environment.files+1] = argument + end + end + end + end + environment.ownname = environment.ownname or arg[0] or 'unknown.lua' +end + +function environment.showarguments() + for k,v in pairs(environment.arguments) do + print(k .. " : " .. tostring(v)) + end + if #environment.files > 0 then + print("files : " .. table.concat(environment.files, " ")) + end +end + +function environment.setargument(name,value) + environment.arguments[name] = value +end + +function environment.argument(name) + if environment.arguments[name] then + return environment.arguments[name] + else + if not environment.sorted_argument_keys then + environment.sorted_argument_keys = { } + for _,v in pairs(table.sortedkeys(environment.arguments)) do + table.insert(environment.sorted_argument_keys, "^" .. v) + end + end + for _,v in pairs(environment.sorted_argument_keys) do + if name:find(v) then + return environment.arguments[v:sub(2,#v)] + end + end + end + return nil +end + +function environment.split_arguments(separator) -- rather special, cut-off before separator + local done, before, after = false, { }, { } + for _,v in ipairs(environment.original_arguments) do + if not done and v == separator then + done = true + elseif done then + after[#after+1] = v + else + before[#before+1] = v + end + end + return before, after +end + +function environment.reconstruct_commandline(arg) + if not arg then arg = environment.original_arguments end + local result = { } + for _,a in ipairs(arg) do -- ipairs 1 .. #n + local kk, vv = a:match("^(%-+.-)=(.+)$") + if kk and vv then + if vv:find(" ") then + result[#result+1] = kk .. "=" .. string.quote(vv) + else + result[#result+1] = a + end + elseif a:find(" ") then + result[#result+1] = string.quote(a) + else + result[#result+1] = a + end + end + return table.join(result," ") +end + +if arg then + environment.initialize_arguments(arg) + environment.original_arguments = arg + arg = { } -- prevent duplicate handling +end + + +-- filename : luat-inp.lua +-- comment : companion to luat-lib.tex +-- author : Hans Hagen, PRAGMA-ADE, Hasselt NL +-- copyright: PRAGMA ADE / ConTeXt Development Team +-- license : see context related readme files + +-- This lib is multi-purpose and can be loaded again later on so that +-- additional functionality becomes available. We will split this +-- module in components when we're done with prototyping. + +-- TODO: os.getenv -> os.env[] +-- TODO: instances.[hashes,cnffiles,configurations,522] -> ipairs (alles check, sneller) +-- TODO: check escaping in find etc, too much, too slow + +-- This is the first code I wrote for LuaTeX, so it needs some cleanup. + +-- To be considered: hash key lowercase, first entry in table filename +-- (any case), rest paths (so no need for optimization). Or maybe a +-- separate table that matches lowercase names to mixed case when +-- present. In that case the lower() cases can go away. I will do that +-- only when we run into problems with names ... well ... Iwona-Regular. + +-- Beware, loading and saving is overloaded in luat-tmp! + +if not versions then versions = { } end versions['luat-inp'] = 1.001 +if not environment then environment = { } end +if not file then file = { } end + +if environment.aleph_mode == nil then environment.aleph_mode = true end -- temp hack + +if not input then input = { } end +if not input.suffixes then input.suffixes = { } end +if not input.formats then input.formats = { } end +if not input.aux then input.aux = { } end + +if not input.suffixmap then input.suffixmap = { } end + +if not input.locators then input.locators = { } end -- locate databases +if not input.hashers then input.hashers = { } end -- load databases +if not input.generators then input.generators = { } end -- generate databases +if not input.filters then input.filters = { } end -- conversion filters + +local format = string.format + +input.locators.notfound = { nil } +input.hashers.notfound = { nil } +input.generators.notfound = { nil } + +input.cacheversion = '1.0.1' +input.banner = nil +input.verbose = false +input.debug = false +input.cnfname = 'texmf.cnf' +input.luaname = 'texmfcnf.lua' +input.lsrname = 'ls-R' +input.luasuffix = '.tma' +input.lucsuffix = '.tmc' + +-- we use a cleaned up list / format=any is a wildcard, as is *name + +input.formats['afm'] = 'AFMFONTS' input.suffixes['afm'] = { 'afm' } +input.formats['enc'] = 'ENCFONTS' input.suffixes['enc'] = { 'enc' } +input.formats['fmt'] = 'TEXFORMATS' input.suffixes['fmt'] = { 'fmt' } +input.formats['map'] = 'TEXFONTMAPS' input.suffixes['map'] = { 'map' } +input.formats['mp'] = 'MPINPUTS' input.suffixes['mp'] = { 'mp' } +input.formats['ocp'] = 'OCPINPUTS' input.suffixes['ocp'] = { 'ocp' } +input.formats['ofm'] = 'OFMFONTS' input.suffixes['ofm'] = { 'ofm', 'tfm' } +input.formats['otf'] = 'OPENTYPEFONTS' input.suffixes['otf'] = { 'otf' } -- 'ttf' +input.formats['opl'] = 'OPLFONTS' input.suffixes['opl'] = { 'opl' } +input.formats['otp'] = 'OTPINPUTS' input.suffixes['otp'] = { 'otp' } +input.formats['ovf'] = 'OVFFONTS' input.suffixes['ovf'] = { 'ovf', 'vf' } +input.formats['ovp'] = 'OVPFONTS' input.suffixes['ovp'] = { 'ovp' } +input.formats['tex'] = 'TEXINPUTS' input.suffixes['tex'] = { 'tex' } +input.formats['tfm'] = 'TFMFONTS' input.suffixes['tfm'] = { 'tfm' } +input.formats['ttf'] = 'TTFONTS' input.suffixes['ttf'] = { 'ttf', 'ttc' } +input.formats['pfb'] = 'T1FONTS' input.suffixes['pfb'] = { 'pfb', 'pfa' } +input.formats['vf'] = 'VFFONTS' input.suffixes['vf'] = { 'vf' } + +input.formats['fea'] = 'FONTFEATURES' input.suffixes['fea'] = { 'fea' } +input.formats['cid'] = 'FONTCIDMAPS' input.suffixes['cid'] = { 'cid', 'cidmap' } + +input.formats ['texmfscripts'] = 'TEXMFSCRIPTS' -- new +input.suffixes['texmfscripts'] = { 'rb', 'pl', 'py' } -- 'lua' + +input.formats ['lua'] = 'LUAINPUTS' -- new +input.suffixes['lua'] = { 'lua', 'luc', 'tma', 'tmc' } + +-- here we catch a few new thingies (todo: add these paths to context.tmf) +-- +-- FONTFEATURES = .;$TEXMF/fonts/fea// +-- FONTCIDMAPS = .;$TEXMF/fonts/cid// + +function input.checkconfigdata(instance) -- not yet ok, no time for debugging now + local function fix(varname,default) + local proname = varname .. "." .. instance.progname or "crap" + local p = instance.environment[proname] + local v = instance.environment[varname] + if not ((p and p ~= "") or (v and v ~= "")) then + instance.variables[varname] = default -- or environment? + end + end + fix("LUAINPUTS" , ".;$TEXINPUTS;$TEXMFSCRIPTS") + fix("FONTFEATURES", ".;$TEXMF/fonts/fea//;$OPENTYPEFONTS;$TTFONTS;$T1FONTS;$AFMFONTS") + fix("FONTCIDMAPS" , ".;$TEXMF/fonts/cid//;$OPENTYPEFONTS;$TTFONTS;$T1FONTS;$AFMFONTS") +end + +-- backward compatible ones + +input.alternatives = { } + +input.alternatives['map files'] = 'map' +input.alternatives['enc files'] = 'enc' +input.alternatives['cid files'] = 'cid' +input.alternatives['fea files'] = 'fea' +input.alternatives['opentype fonts'] = 'otf' +input.alternatives['truetype fonts'] = 'ttf' +input.alternatives['truetype collections'] = 'ttc' +input.alternatives['type1 fonts'] = 'pfb' + +-- obscure ones + +input.formats ['misc fonts'] = '' +input.suffixes['misc fonts'] = { } + +input.formats ['sfd'] = 'SFDFONTS' +input.suffixes ['sfd'] = { 'sfd' } +input.alternatives['subfont definition files'] = 'sfd' + +function input.reset() + + local instance = { } + + instance.rootpath = '' + instance.treepath = '' + instance.progname = environment.progname or 'context' + instance.engine = environment.engine or 'luatex' + instance.format = '' + instance.environment = { } + instance.variables = { } + instance.expansions = { } + instance.files = { } + instance.remap = { } + instance.configuration = { } + instance.setup = { } + instance.order = { } + instance.found = { } + instance.foundintrees = { } + instance.kpsevars = { } + instance.hashes = { } + instance.cnffiles = { } + instance.luafiles = { } + instance.lists = { } + instance.remember = true + instance.diskcache = true + instance.renewcache = false + instance.scandisk = true + instance.cachepath = nil + instance.loaderror = false + instance.smallcache = false + instance.savelists = true + instance.cleanuppaths = true + instance.allresults = false + instance.pattern = nil -- lists + instance.kpseonly = false -- lists + instance.cachefile = 'tmftools' + instance.loadtime = 0 + instance.starttime = 0 + instance.stoptime = 0 + instance.validfile = function(path,name) return true end + instance.data = { } -- only for loading + instance.force_suffixes = true + instance.dummy_path_expr = "^!*unset/*$" + instance.fakepaths = { } + instance.lsrmode = false + + if os.env then + -- store once, freeze and faster + for k,v in pairs(os.env) do + instance.environment[k] = input.bare_variable(v) + end + else + -- we will access os.env frequently + for k,v in pairs({'HOME','TEXMF','TEXMFCNF'}) do + local e = os.getenv(v) + if e then + -- input.report("setting",v,"to",input.bare_variable(e)) + instance.environment[v] = input.bare_variable(e) + end + end + end + + -- cross referencing + + for k, v in pairs(input.suffixes) do + for _, vv in pairs(v) do + if vv then + input.suffixmap[vv] = k + end + end + end + + return instance + +end + +function input.reset_hashes(instance) + instance.lists = { } + instance.found = { } +end + +function input.bare_variable(str) -- assumes str is a string + -- return string.gsub(string.gsub(string.gsub(str,"%s+$",""),'^"(.+)"$',"%1"),"^'(.+)'$","%1") + return (str:gsub("\s*([\"\']?)(.+)%1\s*", "%2")) +end + +if texio then + input.log = texio.write_nl +else + input.log = print +end + +function input.simple_logger(kind, name) + if name and name ~= "" then + if input.banner then + input.log(input.banner..kind..": "..name) + else + input.log("<<"..kind..": "..name..">>") + end + else + if input.banner then + input.log(input.banner..kind..": no name") + else + input.log("<<"..kind..": no name>>") + end + end +end + +function input.dummy_logger() +end + +function input.settrace(n) + input.trace = tonumber(n or 0) + if input.trace > 0 then + input.logger = input.simple_logger + input.verbose = true + else + input.logger = function() end + end +end + +function input.report(...) -- inefficient + if input.verbose then + if input.banner then + input.log(input.banner .. table.concat({...},' ')) + elseif input.logmode() == 'xml' then + input.log(""..table.concat({...},' ').."") + else + input.log("<<"..table.concat({...},' ')..">>") + end + end +end + +function input.reportlines(str) + if type(str) == "string" then + str = str:split("\n") + end + for _,v in pairs(str) do input.report(v) end +end + +input.settrace(tonumber(os.getenv("MTX.INPUT.TRACE") or os.getenv("MTX_INPUT_TRACE") or input.trace or 0)) + +-- These functions can be used to test the performance, especially +-- loading the database files. + +do + local clock = os.gettimeofday or os.clock + + function input.starttiming(instance) + if instance then + instance.starttime = clock() + if not instance.loadtime then + instance.loadtime = 0 + end + end + end + + function input.stoptiming(instance, report) + if instance then + local starttime = instance.starttime + if starttime then + local stoptime = clock() + local loadtime = stoptime - starttime + instance.stoptime = stoptime + instance.loadtime = instance.loadtime + loadtime + if report then + input.report('load time', format("%0.3f",loadtime)) + end + return loadtime + end + end + return 0 + end + +end + +function input.elapsedtime(instance) + return format("%0.3f",(instance and instance.loadtime) or 0) +end + +function input.report_loadtime(instance) + if instance then + input.report('total load time', input.elapsedtime(instance)) + end +end + +input.loadtime = input.elapsedtime + +function input.env(instance,key) + return instance.environment[key] or input.osenv(instance,key) +end + +function input.osenv(instance,key) + local ie = instance.environment + local value = ie[key] + if value == nil then + -- local e = os.getenv(key) + local e = os.env[key] + if e == nil then + -- value = "" -- false + else + value = input.bare_variable(e) + end + ie[key] = value + end + return value or "" +end + +-- we follow a rather traditional approach: +-- +-- (1) texmf.cnf given in TEXMFCNF +-- (2) texmf.cnf searched in TEXMF/web2c +-- +-- for the moment we don't expect a configuration file in a zip + +function input.identify_cnf(instance) + -- we no longer support treepath and rootpath (was handy for testing); + -- also we now follow the stupid route: if not set then just assume *one* + -- cnf file under texmf (i.e. distribution) + if #instance.cnffiles == 0 then + if input.env(instance,'TEXMFCNF') == "" then + local ownpath = environment.ownpath() or "." + if ownpath then + -- beware, this is tricky on my own system because at that location I do have + -- the raw tree that ends up in the zip; i.e. I cannot test this kind of mess + local function locate(filename,list) + local ownroot = input.normalize_name(file.join(ownpath,"../..")) + if not lfs.isdir(file.join(ownroot,"texmf")) then + ownroot = input.normalize_name(file.join(ownpath,"..")) + if not lfs.isdir(file.join(ownroot,"texmf")) then + input.verbose = true + input.report("error", "unable to identify cnf file") + return + end + end + local texmfcnf = file.join(ownroot,"texmf-local/web2c",filename) -- for minimals and myself + if not lfs.isfile(texmfcnf) then + texmfcnf = file.join(ownroot,"texmf/web2c",filename) + if not lfs.isfile(texmfcnf) then + input.verbose = true + input.report("error", "unable to locate",filename) + return + end + end + table.insert(list,texmfcnf) + local ie = instance.environment + if not ie['SELFAUTOPARENT'] then ie['SELFAUTOPARENT'] = ownroot end + if not ie['TEXMFCNF'] then ie['TEXMFCNF'] = file.dirname(texmfcnf) end + end + locate(input.luaname,instance.luafiles) + locate(input.cnfname,instance.cnffiles) + if #instance.luafiles == 0 and instance.cnffiles == 0 then + input.verbose = true + input.report("error", "unable to locate",filename) + os.exit() + end + -- here we also assume then TEXMF is set in the distribution, if this trickery is + -- used in the minimals, then users who don't use setuptex are on their own with + -- regards to extra trees + else + input.verbose = true + input.report("error", "unable to identify own path") + os.exit() + end + else + local t = input.split_path(input.env(instance,'TEXMFCNF')) + t = input.aux.expanded_path(instance,t) + input.aux.expand_vars(instance,t) + local function locate(filename,list) + for _,v in ipairs(t) do + local texmfcnf = input.normalize_name(file.join(v,filename)) + if lfs.isfile(texmfcnf) then + table.insert(list,texmfcnf) + end + end + end + locate(input.luaname,instance.luafiles) + locate(input.cnfname,instance.cnffiles) + end + end +end + +function input.load_cnf(instance) + local function loadoldconfigdata() + for _, fname in ipairs(instance.cnffiles) do + input.aux.load_cnf(instance,fname) + end + end + -- instance.cnffiles contain complete names now ! + if #instance.cnffiles == 0 then + input.report("no cnf files found (TEXMFCNF may not be set/known)") + else + instance.rootpath = instance.cnffiles[1] + for k,fname in ipairs(instance.cnffiles) do + instance.cnffiles[k] = input.normalize_name(fname:gsub("\\",'/')) + end + for i=1,3 do + instance.rootpath = file.dirname(instance.rootpath) + end + instance.rootpath = input.normalize_name(instance.rootpath) + instance.environment['SELFAUTOPARENT'] = instance.rootpath -- just to be sure + if instance.lsrmode then + loadoldconfigdata() + elseif instance.diskcache and not instance.renewcache then + input.loadoldconfig(instance,instance.cnffiles) + if instance.loaderror then + loadoldconfigdata() + input.saveoldconfig(instance) + end + else + loadoldconfigdata() + if instance.renewcache then + input.saveoldconfig(instance) + end + end + input.aux.collapse_cnf_data(instance) + end + input.checkconfigdata(instance) +end + +function input.load_lua(instance) + if #instance.luafiles == 0 then + -- yet harmless + else + instance.rootpath = instance.luafiles[1] + for k,fname in ipairs(instance.luafiles) do + instance.luafiles[k] = input.normalize_name(fname:gsub("\\",'/')) + end + for i=1,3 do + instance.rootpath = file.dirname(instance.rootpath) + end + instance.rootpath = input.normalize_name(instance.rootpath) + instance.environment['SELFAUTOPARENT'] = instance.rootpath -- just to be sure + input.loadnewconfig(instance) + input.aux.collapse_cnf_data(instance) + end + input.checkconfigdata(instance) +end + +function input.aux.collapse_cnf_data(instance) -- potential optmization: pass start index (setup and configuration are shared) + for _,c in ipairs(instance.order) do + for k,v in pairs(c) do + if not instance.variables[k] then + if instance.environment[k] then + instance.variables[k] = instance.environment[k] + else + instance.kpsevars[k] = true + instance.variables[k] = input.bare_variable(v) + end + end + end + end +end + +function input.aux.load_cnf(instance,fname) + fname = input.clean_path(fname) + local lname = fname:gsub("%.%a+$",input.luasuffix) + local f = io.open(lname) + if f then -- this will go + f:close() + local dname = file.dirname(fname) + if not instance.configuration[dname] then + input.aux.load_configuration(instance,dname,lname) + instance.order[#instance.order+1] = instance.configuration[dname] + end + else + f = io.open(fname) + if f then + input.report("loading", fname) + local line, data, n, k, v + local dname = file.dirname(fname) + if not instance.configuration[dname] then + instance.configuration[dname] = { } + instance.order[#instance.order+1] = instance.configuration[dname] + end + local data = instance.configuration[dname] + while true do + local line, n = f:read(), 0 + if line then + while true do -- join lines + line, n = line:gsub("\\%s*$", "") + if n > 0 then + line = line .. f:read() + else + break + end + end + if not line:find("^[%%#]") then + local k, v = (line:gsub("%s*%%.*$","")):match("%s*(.-)%s*=%s*(.-)%s*$") + if k and v and not data[k] then + data[k] = (v:gsub("[%%#].*",'')):gsub("~", "$HOME") + instance.kpsevars[k] = true + end + end + else + break + end + end + f:close() + else + input.report("skipping", fname) + end + end +end + +-- database loading + +function input.load_hash(instance) + input.locatelists(instance) + if instance.lsrmode then + input.loadlists(instance) + elseif instance.diskcache and not instance.renewcache then + input.loadfiles(instance) + if instance.loaderror then + input.loadlists(instance) + input.savefiles(instance) + end + else + input.loadlists(instance) + if instance.renewcache then + input.savefiles(instance) + end + end +end + +function input.aux.append_hash(instance,type,tag,name) + input.logger("= hash append",tag) + table.insert(instance.hashes, { ['type']=type, ['tag']=tag, ['name']=name } ) +end + +function input.aux.prepend_hash(instance,type,tag,name) + input.logger("= hash prepend",tag) + table.insert(instance.hashes, 1, { ['type']=type, ['tag']=tag, ['name']=name } ) +end + +function input.aux.extend_texmf_var(instance,specification) -- crap + if instance.environment['TEXMF'] then + input.report("extending environment variable TEXMF with", specification) + instance.environment['TEXMF'] = instance.environment['TEXMF']:gsub("^%{", function() + return "{" .. specification .. "," + end) + elseif instance.variables['TEXMF'] then + input.report("extending configuration variable TEXMF with", specification) + instance.variables['TEXMF'] = instance.variables['TEXMF']:gsub("^%{", function() + return "{" .. specification .. "," + end) + else + input.report("setting configuration variable TEXMF to", specification) + instance.variables['TEXMF'] = "{" .. specification .. "}" + end + if instance.variables['TEXMF']:find("%,") and not instance.variables['TEXMF']:find("^%{") then + input.report("adding {} to complex TEXMF variable, best do that yourself") + instance.variables['TEXMF'] = "{" .. instance.variables['TEXMF'] .. "}" + end + input.expand_variables(instance) + input.reset_hashes(instance) +end + +-- locators + +function input.locatelists(instance) + for _, path in pairs(input.simplified_list(input.expansion(instance,'TEXMF'))) do + path = file.collapse_path(path) + input.report("locating list of",path) + input.locatedatabase(instance,input.normalize_name(path)) + end +end + +function input.locatedatabase(instance,specification) + return input.methodhandler('locators', instance, specification) +end + +function input.locators.tex(instance,specification) + if specification and specification ~= '' and lfs.isdir(specification) then + input.logger('! tex locator', specification..' found') + input.aux.append_hash(instance,'file',specification,filename) + else + input.logger('? tex locator', specification..' not found') + end +end + +-- hashers + +function input.hashdatabase(instance,tag,name) + return input.methodhandler('hashers',instance,tag,name) +end + +function input.loadfiles(instance) + instance.loaderror = false + instance.files = { } + if not instance.renewcache then + for _, hash in ipairs(instance.hashes) do + input.hashdatabase(instance,hash.tag,hash.name) + if instance.loaderror then break end + end + end +end + +function input.hashers.tex(instance,tag,name) + input.aux.load_files(instance,tag) +end + +-- generators: + +function input.loadlists(instance) + for _, hash in ipairs(instance.hashes) do + input.generatedatabase(instance,hash.tag) + end +end + +function input.generatedatabase(instance,specification) + return input.methodhandler('generators', instance, specification) +end + +do + + local weird = lpeg.anywhere(lpeg.S("~`!#$%^&*()={}[]:;\"\'||<>,?\n\r\t")) + + function input.generators.tex(instance,specification) + local tag = specification + if not instance.lsrmode and lfs and lfs.dir then + input.report("scanning path",specification) + instance.files[tag] = { } + local files = instance.files[tag] + local n, m, r = 0, 0, 0 + local spec = specification .. '/' + local attributes = lfs.attributes + local directory = lfs.dir + local small = instance.smallcache + local function action(path) + local mode, full + if path then + full = spec .. path .. '/' + else + full = spec + end + for name in directory(full) do + if name:find("^%.") then + -- skip + -- elseif name:find("[%~%`%!%#%$%%%^%&%*%(%)%=%{%}%[%]%:%;\"\'%|%<%>%,%?\n\r\t]") then -- too much escaped + elseif weird:match(name) then + -- texio.write_nl("skipping " .. name) + -- skip + else + mode = attributes(full..name,'mode') + if mode == "directory" then + m = m + 1 + if path then + action(path..'/'..name) + else + action(name) + end + elseif path and mode == 'file' then + n = n + 1 + local f = files[name] + if f then + if not small then + if type(f) == 'string' then + files[name] = { f, path } + else + f[#f+1] = path + end + end + else + files[name] = path + local lower = name:lower() + if name ~= lower then + files["remap:"..lower] = name + r = r + 1 + end + end + end + end + end + end + action() + input.report(format("%s files found on %s directories with %s uppercase remappings",n,m,r)) + else + local fullname = file.join(specification,input.lsrname) + local path = '.' + local f = io.open(fullname) + if f then + instance.files[tag] = { } + local files = instance.files[tag] + local small = instance.smallcache + input.report("loading lsr file",fullname) + -- for line in f:lines() do -- much slower then the next one + for line in (f:read("*a")):gmatch("(.-)\n") do + if line:find("^[%a%d]") then + local fl = files[line] + if fl then + if not small then + if type(fl) == 'string' then + files[line] = { fl, path } -- table + else + fl[#fl+1] = path + end + end + else + files[line] = path -- string + local lower = line:lower() + if line ~= lower then + files["remap:"..lower] = line + end + end + else + path = line:match("%.%/(.-)%:$") or path -- match could be nil due to empty line + end + end + f:close() + end + end + end + +end + +-- savers, todo + +function input.savefiles(instance) + input.aux.save_data(instance, 'files', function(k,v) + return instance.validfile(k,v) -- path, name + end) +end + +-- A config (optionally) has the paths split in tables. Internally +-- we join them and split them after the expansion has taken place. This +-- is more convenient. + +function input.splitconfig(instance) + for i,c in ipairs(instance) do + for k,v in pairs(c) do + if type(v) == 'string' then + local t = file.split_path(v) + if #t > 1 then + c[k] = t + end + end + end + end +end +function input.joinconfig(instance) + for i,c in ipairs(instance.order) do + for k,v in pairs(c) do + if type(v) == 'table' then + c[k] = file.join_path(v) + end + end + end +end +function input.split_path(str) + if type(str) == 'table' then + return str + else + return file.split_path(str) + end +end +function input.join_path(str) + if type(str) == 'table' then + return file.join_path(str) + else + return str + end +end + +function input.splitexpansions(instance) + for k,v in pairs(instance.expansions) do + local t, h = { }, { } + for _,vv in pairs(file.split_path(v)) do + if vv ~= "" and not h[vv] then + t[#t+1] = vv + h[vv] = true + end + end + if #t > 1 then + instance.expansions[k] = t + else + instance.expansions[k] = t[1] + end + end +end + +-- end of split/join code + +function input.saveoldconfig(instance) + input.splitconfig(instance) + input.aux.save_data(instance, 'configuration', nil) + input.joinconfig(instance) +end + +input.configbanner = [[ +-- This is a Luatex configuration file created by 'luatools.lua' or +-- 'luatex.exe' directly. For comment, suggestions and questions you can +-- contact the ConTeXt Development Team. This configuration file is +-- not copyrighted. [HH & TH] +]] + +function input.serialize(files) + -- This version is somewhat optimized for the kind of + -- tables that we deal with, so it's much faster than + -- the generic serializer. This makes sense because + -- luatools and mtxtools are called frequently. Okay, + -- we pay a small price for properly tabbed tables. + local t = { } + local concat = table.concat + local sorted = table.sortedkeys + local function dump(k,v,m) + if type(v) == 'string' then + return m .. "['" .. k .. "']='" .. v .. "'," + elseif #v == 1 then + return m .. "['" .. k .. "']='" .. v[1] .. "'," + else + return m .. "['" .. k .. "']={'" .. concat(v,"','").. "'}," + end + end + t[#t+1] = "return {" + if instance.sortdata then + for _, k in pairs(sorted(files)) do + local fk = files[k] + if type(fk) == 'table' then + t[#t+1] = "\t['" .. k .. "']={" + for _, kk in pairs(sorted(fk)) do + t[#t+1] = dump(kk,fk[kk],"\t\t") + end + t[#t+1] = "\t}," + else + t[#t+1] = dump(k,fk,"\t") + end + end + else + for k, v in pairs(files) do + if type(v) == 'table' then + t[#t+1] = "\t['" .. k .. "']={" + for kk,vv in pairs(v) do + t[#t+1] = dump(kk,vv,"\t\t") + end + t[#t+1] = "\t}," + else + t[#t+1] = dump(k,v,"\t") + end + end + end + t[#t+1] = "}" + return concat(t,"\n") +end + +if not texmf then texmf = {} end -- no longer needed, at least not here + +function input.aux.save_data(instance, dataname, check, makename) -- untested without cache overload + for cachename, files in pairs(instance[dataname]) do + local name = (makename or file.join)(cachename,dataname) + local luaname, lucname = name .. input.luasuffix, name .. input.lucsuffix + input.report("preparing " .. dataname .. " for", luaname) + for k, v in pairs(files) do + if not check or check(v,k) then -- path, name + if type(v) == "table" and #v == 1 then + files[k] = v[1] + end + else + files[k] = nil -- false + end + end + local data = { + type = dataname, + root = cachename, + version = input.cacheversion, + date = os.date("%Y-%m-%d"), + time = os.date("%H:%M:%S"), + content = files, + } + local f = io.open(luaname,'w') + if f then + input.report("saving " .. dataname .. " in", luaname) + f:write(input.serialize(data)) + f:close() + input.report("compiling " .. dataname .. " to", lucname) + if not utils.lua.compile(luaname,lucname) then + input.report("compiling failed for " .. dataname .. ", deleting file " .. lucname) + os.remove(lucname) + end + else + input.report("unable to save " .. dataname .. " in " .. name..input.luasuffix) + end + end +end + +function input.aux.load_data(instance,pathname,dataname,filename,makename) -- untested without cache overload + filename = ((not filename or (filename == "")) and dataname) or filename + filename = (makename and makename(dataname,filename)) or file.join(pathname,filename) + local blob = loadfile(filename .. input.lucsuffix) or loadfile(filename .. input.luasuffix) + if blob then + local data = blob() + if data and data.content and data.type == dataname and data.version == input.cacheversion then + input.report("loading",dataname,"for",pathname,"from",filename) + instance[dataname][pathname] = data.content + else + input.report("skipping",dataname,"for",pathname,"from",filename) + instance[dataname][pathname] = { } + instance.loaderror = true + end + else + input.report("skipping",dataname,"for",pathname,"from",filename) + end +end + +-- some day i'll use the nested approach, but not yet (actually we even drop +-- engine/progname support since we have only luatex now) +-- +-- first texmfcnf.lua files are located, next the cached texmf.cnf files +-- +-- return { +-- TEXMFBOGUS = 'effe checken of dit werkt', +-- } + +function input.aux.load_texmfcnf(instance,dataname,pathname) + local filename = file.join(pathname,input.luaname) + local blob = loadfile(filename) + if blob then + local data = blob() + if data then + input.report("loading","configuration file",filename) + if true then + -- flatten to variable.progname + local t = { } + for k, v in pairs(data) do -- v = progname + if type(v) == "string" then + t[k] = v + else + for kk, vv in pairs(v) do -- vv = variable + if type(vv) == "string" then + t[vv.."."..v] = kk + end + end + end + end + instance[dataname][pathname] = t + else + instance[dataname][pathname] = data + end + else + input.report("skipping","configuration file",filename) + instance[dataname][pathname] = { } + instance.loaderror = true + end + else + input.report("skipping","configuration file",filename) + end +end + +function input.aux.load_configuration(instance,dname,lname) + input.aux.load_data(instance,dname,'configuration',lname and file.basename(lname)) +end +function input.aux.load_files(instance,tag) + input.aux.load_data(instance,tag,'files') +end + +function input.resetconfig(instance) + instance.configuration, instance.setup, instance.order, instance.loaderror = { }, { }, { }, false +end + +function input.loadnewconfig(instance) + for _, cnf in ipairs(instance.luafiles) do + local dname = file.dirname(cnf) + input.aux.load_texmfcnf(instance,'setup',dname) + instance.order[#instance.order+1] = instance.setup[dname] + if instance.loaderror then break end + end +end + +function input.loadoldconfig(instance) + if not instance.renewcache then + for _, cnf in ipairs(instance.cnffiles) do + local dname = file.dirname(cnf) + input.aux.load_configuration(instance,dname) + instance.order[#instance.order+1] = instance.configuration[dname] + if instance.loaderror then break end + end + end + input.joinconfig(instance) +end + +function input.expand_variables(instance) + instance.expansions = { } +--~ instance.environment['SELFAUTOPARENT'] = instance.environment['SELFAUTOPARENT'] or instance.rootpath + if instance.engine ~= "" then instance.environment['engine'] = instance.engine end + if instance.progname ~= "" then instance.environment['progname'] = instance.progname end + for k,v in pairs(instance.environment) do + local a, b = k:match("^(%a+)%_(.*)%s*$") + if a and b then + instance.expansions[a..'.'..b] = v + else + instance.expansions[k] = v + end + end + for k,v in pairs(instance.environment) do -- move environment to expansions + if not instance.expansions[k] then instance.expansions[k] = v end + end + for k,v in pairs(instance.variables) do -- move variables to expansions + if not instance.expansions[k] then instance.expansions[k] = v end + end + while true do + local busy = false + for k,v in pairs(instance.expansions) do + local s, n = v:gsub("%$([%a%d%_%-]+)", function(a) + busy = true + return instance.expansions[a] or input.env(instance,a) + end) + local s, m = s:gsub("%$%{([%a%d%_%-]+)%}", function(a) + busy = true + return instance.expansions[a] or input.env(instance,a) + end) + if n > 0 or m > 0 then + instance.expansions[k]= s + end + end + if not busy then break end + end + local homedir = + instance.environment[(os.type == "windows" and 'USERPROFILE') or 'HOME'] or '~' + for k,v in pairs(instance.expansions) do + v = v:gsub("^~", homedir) + instance.expansions[k] = v:gsub("\\", '/') + end +end + +function input.aux.expand_vars(instance,lst) -- simple vars + for k,v in pairs(lst) do + lst[k] = v:gsub("%$([%a%d%_%-]+)", function(a) + return instance.variables[a] or input.env(instance,a) + end) + end +end + +function input.aux.expanded_var(instance,var) -- simple vars + return var:gsub("%$([%a%d%_%-]+)", function(a) + return instance.variables[a] or input.env(instance,a) + end) +end + +function input.aux.entry(instance,entries,name) + if name and (name ~= "") then + name = name:gsub('%$','') + local result = entries[name..'.'..instance.progname] or entries[name] + if result then + return result + else + result = input.env(instance,name) + if result then + instance.variables[name] = result + input.expand_variables(instance) + return instance.expansions[name] or "" + end + end + end + return "" +end +function input.variable(instance,name) + return input.aux.entry(instance,instance.variables,name) +end +function input.expansion(instance,name) + return input.aux.entry(instance,instance.expansions,name) +end + +function input.aux.is_entry(instance,entries,name) + if name and name ~= "" then + name = name:gsub('%$','') + return (entries[name..'.'..instance.progname] or entries[name]) ~= nil + else + return false + end +end + +function input.is_variable(instance,name) + return input.aux.is_entry(instance,instance.variables,name) +end +function input.is_expansion(instance,name) + return input.aux.is_entry(instance,instance.expansions,name) +end + +function input.simplified_list(str) + if type(str) == 'table' then + return str -- troubles ; ipv , in texmf + elseif str == '' then + return { } + else + local t = { } + for _,v in ipairs(string.splitchr(str:gsub("^\{(.+)\}$","%1"),",")) do + t[#t+1] = (v:gsub("^[%!]*(.+)[%/\\]*$","%1")) + end + return t + end +end + +function input.unexpanded_path_list(instance,str) + local pth = input.variable(instance,str) + local lst = input.split_path(pth) + return input.aux.expanded_path(instance,lst) +end +function input.unexpanded_path(instance,str) + return file.join_path(input.unexpanded_path_list(instance,str)) +end + +do + local done = { } + + function input.reset_extra_path(instance) + local ep = instance.extra_paths + if not ep then + ep, done = { }, { } + instance.extra_paths = ep + elseif #ep > 0 then + instance.lists, done = { }, { } + end + end + + function input.register_extra_path(instance,paths,subpaths) + local ep = instance.extra_paths or { } + local n = #ep + if paths and paths ~= "" then + if subpaths and subpaths ~= "" then + for p in paths:gmatch("[^,]+") do + -- we gmatch each step again, not that fast, but used seldom + for s in subpaths:gmatch("[^,]+") do + local ps = p .. "/" .. s + if not done[ps] then + ep[#ep+1] = input.clean_path(ps) + done[ps] = true + end + end + end + else + for p in paths:gmatch("[^,]+") do + if not done[p] then + ep[#ep+1] = input.clean_path(p) + done[p] = true + end + end + end + elseif subpaths and subpaths ~= "" then + for i=1,n do + -- we gmatch each step again, not that fast, but used seldom + for s in subpaths:gmatch("[^,]+") do + local ps = ep[i] .. "/" .. s + if not done[ps] then + ep[#ep+1] = input.clean_path(ps) + done[ps] = true + end + end + end + end + if #ep > 0 then + instance.extra_paths = ep -- register paths + end + if #ep > n then + instance.lists = { } -- erase the cache + end + end + +end + +function input.expanded_path_list(instance,str) + local function made_list(list) + local ep = instance.extra_paths + if not ep or #ep == 0 then + return list + else + local done, new = { }, { } + -- honour . .. ../.. but only when at the start + for k, v in ipairs(list) do + if not done[v] then + if v:find("^[%.%/]$") then + done[v] = true + new[#new+1] = v + else + break + end + end + end + -- first the extra paths + for k, v in ipairs(ep) do + if not done[v] then + done[v] = true + new[#new+1] = v + end + end + -- next the formal paths + for k, v in ipairs(list) do + if not done[v] then + done[v] = true + new[#new+1] = v + end + end + return new + end + end + if not str then + return ep or { } + elseif instance.savelists then + -- engine+progname hash + str = str:gsub("%$","") + if not instance.lists[str] then -- cached + local lst = made_list(input.split_path(input.expansion(instance,str))) + instance.lists[str] = input.aux.expanded_path(instance,lst) + end + return instance.lists[str] + else + local lst = input.split_path(input.expansion(instance,str)) + return made_list(input.aux.expanded_path(instance,lst)) + end +end + +function input.expand_path(instance,str) + return file.join_path(input.expanded_path_list(instance,str)) +end + +--~ function input.first_writable_path(instance,name) +--~ for _,v in pairs(input.expanded_path_list(instance,name)) do +--~ if file.is_writable(file.join(v,'luatex-cache.tmp')) then +--~ return v +--~ end +--~ end +--~ return "." +--~ end + +function input.expanded_path_list_from_var(instance,str) -- brrr + local tmp = input.var_of_format_or_suffix(str:gsub("%$","")) + if tmp ~= "" then + return input.expanded_path_list(instance,str) + else + return input.expanded_path_list(instance,tmp) + end +end +function input.expand_path_from_var(instance,str) + return file.join_path(input.expanded_path_list_from_var(instance,str)) +end + +function input.format_of_var(str) + return input.formats[str] or input.formats[input.alternatives[str]] or '' +end +function input.format_of_suffix(str) + return input.suffixmap[file.extname(str)] or 'tex' +end + +function input.variable_of_format(str) + return input.formats[str] or input.formats[input.alternatives[str]] or '' +end + +function input.var_of_format_or_suffix(str) + local v = input.formats[str] + if v then + return v + end + v = input.formats[input.alternatives[str]] + if v then + return v + end + v = input.suffixmap[file.extname(str)] + if v then + return input.formats[isf] + end + return '' +end + +function input.expand_braces(instance,str) -- output variable and brace expansion of STRING + local ori = input.variable(instance,str) + local pth = input.aux.expanded_path(instance,input.split_path(ori)) + return file.join_path(pth) +end + +-- {a,b,c,d} +-- a,b,c/{p,q,r},d +-- a,b,c/{p,q,r}/d/{x,y,z}// +-- a,b,c/{p,q/{x,y,z},r},d/{p,q,r} +-- a,b,c/{p,q/{x,y,z},r},d/{p,q,r} +-- a{b,c}{d,e}f +-- {a,b,c,d} +-- {a,b,c/{p,q,r},d} +-- {a,b,c/{p,q,r}/d/{x,y,z}//} +-- {a,b,c/{p,q/{x,y,z}},d/{p,q,r}} +-- {a,b,c/{p,q/{x,y,z},w}v,d/{p,q,r}} + +-- this one is better and faster, but it took me a while to realize +-- that this kind of replacement is cleaner than messy parsing and +-- fuzzy concatenating we can probably gain a bit with selectively +-- applying lpeg, but experiments with lpeg parsing this proved not to +-- work that well; the parsing is ok, but dealing with the resulting +-- table is a pain because we need to work inside-out recursively + +-- get rid of piecewise here, just a gmatch is ok + +function input.aux.splitpathexpr(str, t, validate) + -- no need for optimization, only called a few times, we can use lpeg for the sub + t = t or { } + local concat = table.concat + while true do + local done = false + while true do + local ok = false + str = str:gsub("([^{},]+){([^{}]-)}", function(a,b) + local t = { } + b:piecewise(",", function(s) t[#t+1] = a .. s end) + ok, done = true, true + return "{" .. concat(t,",") .. "}" + end) + if not ok then break end + end + while true do + local ok = false + str = str:gsub("{([^{}]-)}([^{},]+)", function(a,b) + local t = { } + a:piecewise(",", function(s) t[#t+1] = s .. b end) + ok, done = true, true + return "{" .. concat(t,",") .. "}" + end) + if not ok then break end + end + while true do + local ok = false + str = str:gsub("([,{]){([^{}]+)}([,}])", function(a,b,c) + ok, done = true, true + return a .. b .. c + end) + if not ok then break end + end + if not done then break end + end + while true do + local ok = false + str = str:gsub("{([^{}]-)}{([^{}]-)}", function(a,b) + local t = { } + a:piecewise(",", function(sa) + b:piecewise(",", function(sb) + t[#t+1] = sa .. sb + end) + end) + ok = true + return "{" .. concat(t,",") .. "}" + end) + if not ok then break end + end + while true do + local ok = false + str = str:gsub("{([^{}]-)}", function(a) + ok = true + return a + end) + if not ok then break end + end + if validate then + str:piecewise(",", function(s) + s = validate(s) + if s then t[#t+1] = s end + end) + else + str:piecewise(",", function(s) + t[#t+1] = s + end) + end + return t +end + +function input.aux.expanded_path(instance,pathlist) -- maybe not a list, just a path + -- a previous version fed back into pathlist + local newlist, ok = { }, false + for _,v in ipairs(pathlist) do + if v:find("[{}]") then + ok = true + break + end + end + if ok then + for _, v in ipairs(pathlist) do + input.aux.splitpathexpr(v, newlist, function(s) + s = file.collapse_path(s) + return s ~= "" and not s:find(instance.dummy_path_expr) and s + end) + end + else + for _,v in ipairs(pathlist) do + for vv in string.gmatch(v..',',"(.-),") do + vv = file.collapse_path(v) + if vv ~= "" then newlist[#newlist+1] = vv end + end + end + end + return newlist +end + +input.is_readable = { } + +function input.aux.is_readable(readable, name) + if input.trace > 2 then + if readable then + input.logger("+ readable", name) + else + input.logger("- readable", name) + end + end + return readable +end + +function input.is_readable.file(name) + -- return input.aux.is_readable(file.is_readable(name), name) + return input.aux.is_readable(input.aux.is_file(name), name) +end + +input.is_readable.tex = input.is_readable.file + +-- name +-- name/name + +function input.aux.collect_files(instance,names) + local filelist = { } + for _, fname in pairs(names) do + if fname then + if input.trace > 2 then + input.logger("? blobpath asked",fname) + end + local bname = file.basename(fname) + local dname = file.dirname(fname) + if dname == "" or dname:find("^%.") then + dname = false + else + dname = "/" .. dname .. "$" + end + for _, hash in ipairs(instance.hashes) do + local blobpath = hash.tag + local files = blobpath and instance.files[blobpath] + if files then + if input.trace > 2 then + input.logger('? blobpath do',blobpath .. " (" .. bname ..")") + end + local blobfile = files[bname] + if not blobfile then + local rname = "remap:"..bname + blobfile = files[rname] + if blobfile then + bname = files[rname] + blobfile = files[bname] + end + end + if blobfile then + if type(blobfile) == 'string' then + if not dname or blobfile:find(dname) then + filelist[#filelist+1] = { + hash.type, + file.join(blobpath,blobfile,bname), -- search + input.concatinators[hash.type](blobpath,blobfile,bname) -- result + } + end + else + for _, vv in pairs(blobfile) do + if not dname or vv:find(dname) then + filelist[#filelist+1] = { + hash.type, + file.join(blobpath,vv,bname), -- search + input.concatinators[hash.type](blobpath,vv,bname) -- result + } + end + end + end + end + elseif input.trace > 1 then + input.logger('! blobpath no',blobpath .. " (" .. bname ..")" ) + end + end + end + end + if #filelist > 0 then + return filelist + else + return nil + end +end + +function input.suffix_of_format(str) + if input.suffixes[str] then + return input.suffixes[str][1] + else + return "" + end +end + +function input.suffixes_of_format(str) + if input.suffixes[str] then + return input.suffixes[str] + else + return {} + end +end + +do + + -- called about 700 times for an empty doc (font initializations etc) + -- i need to weed the font files for redundant calls + + local letter = lpeg.R("az","AZ") + local separator = lpeg.P("://") + + local qualified = lpeg.P(".")^0 * lpeg.P("/") + letter*lpeg.P(":") + letter^1*separator + local rootbased = lpeg.P("/") + letter*lpeg.P(":") + + -- ./name ../name /name c: :// + function input.aux.qualified_path(filename) + return qualified:match(filename) + end + function input.aux.rootbased_path(filename) + return rootbased:match(filename) + end + + function input.normalize_name(original) + return original + end + + input.normalize_name = file.collapse_path + +end + +function input.aux.register_in_trees(instance,name) + if not name:find("^%.") then + instance.foundintrees[name] = (instance.foundintrees[name] or 0) + 1 -- maybe only one + end +end + +-- split the next one up, better for jit + +function input.aux.find_file(instance,filename) -- todo : plugin (scanners, checkers etc) + local result = { } + local stamp = nil + filename = input.normalize_name(filename) -- elsewhere + filename = file.collapse_path(filename:gsub("\\","/")) -- elsewhere + -- speed up / beware: format problem + if instance.remember then + stamp = filename .. "--" .. instance.engine .. "--" .. instance.progname .. "--" .. instance.format + if instance.found[stamp] then + input.logger('! remembered', filename) + return instance.found[stamp] + end + end + if filename:find('%*') then + input.logger('! wildcard', filename) + result = input.find_wildcard_files(instance,filename) + elseif input.aux.qualified_path(filename) then + if input.is_readable.file(filename) then + input.logger('! qualified', filename) + result = { filename } + else + local forcedname, ok = "", false + if file.extname(filename) == "" then + if instance.format == "" then + forcedname = filename .. ".tex" + if input.is_readable.file(forcedname) then + input.logger('! no suffix, forcing standard filetype tex') + result, ok = { forcedname }, true + end + else + for _, s in pairs(input.suffixes_of_format(instance.format)) do + forcedname = filename .. "." .. s + if input.is_readable.file(forcedname) then + input.logger('! no suffix, forcing format filetype', s) + result, ok = { forcedname }, true + break + end + end + end + end + if not ok then + input.logger('? qualified', filename) + end + end + else + -- search spec + local filetype, extra, done, wantedfiles, ext = '', nil, false, { }, file.extname(filename) + if ext == "" then + if not instance.force_suffixes then + wantedfiles[#wantedfiles+1] = filename + end + else + wantedfiles[#wantedfiles+1] = filename + end + if instance.format == "" then + if ext == "" then + local forcedname = filename .. '.tex' + wantedfiles[#wantedfiles+1] = forcedname + filetype = input.format_of_suffix(forcedname) + input.logger('! forcing filetype',filetype) + else + filetype = input.format_of_suffix(filename) + input.logger('! using suffix based filetype',filetype) + end + else + if ext == "" then + for _, s in pairs(input.suffixes_of_format(instance.format)) do + wantedfiles[#wantedfiles+1] = filename .. "." .. s + end + end + filetype = instance.format + input.logger('! using given filetype',filetype) + end + local typespec = input.variable_of_format(filetype) + local pathlist = input.expanded_path_list(instance,typespec) + if not pathlist or #pathlist == 0 then + -- no pathlist, access check only / todo == wildcard + if input.trace > 2 then + input.logger('? filename',filename) + input.logger('? filetype',filetype or '?') + input.logger('? wanted files',table.concat(wantedfiles," | ")) + end + for _, fname in pairs(wantedfiles) do + if fname and input.is_readable.file(fname) then + filename, done = fname, true + result[#result+1] = file.join('.',fname) + break + end + end + -- this is actually 'other text files' or 'any' or 'whatever' + local filelist = input.aux.collect_files(instance,wantedfiles) + local fl = filelist and filelist[1] + if fl then + filename = fl[3] + result[#result+1] = filename + done = true + end + else + -- list search + local filelist = input.aux.collect_files(instance,wantedfiles) + local doscan, recurse + if input.trace > 2 then + input.logger('? filename',filename) + -- if pathlist then input.logger('? path list',table.concat(pathlist," | ")) end + -- if filelist then input.logger('? file list',table.concat(filelist," | ")) end + end + -- a bit messy ... esp the doscan setting here + for _, path in pairs(pathlist) do + if path:find("^!!") then doscan = false else doscan = true end + if path:find("//$") then recurse = true else recurse = false end + local pathname = path:gsub("^!+", '') + done = false + -- using file list + if filelist and not (done and not instance.allresults) and recurse then + -- compare list entries with permitted pattern + pathname = pathname:gsub("([%-%.])","%%%1") -- this also influences + pathname = pathname:gsub("/+$", '/.*') -- later usage of pathname + pathname = pathname:gsub("//", '/.-/') -- not ok for /// but harmless + local expr = "^" .. pathname + -- input.debug('?',expr) + for _, fl in ipairs(filelist) do + local f = fl[2] + if f:find(expr) then + -- input.debug('T',' '..f) + if input.trace > 2 then + input.logger('= found in hash',f) + end + --- todo, test for readable + result[#result+1] = fl[3] + input.aux.register_in_trees(instance,f) -- for tracing used files + done = true + if not instance.allresults then break end + else + -- input.debug('F',' '..f) + end + end + end + if not done and doscan then + -- check if on disk / unchecked / does not work at all / also zips + if input.method_is_file(pathname) then -- ? + local pname = pathname:gsub("%.%*$",'') + if not pname:find("%*") then + local ppname = pname:gsub("/+$","") + if input.aux.can_be_dir(instance,ppname) then + for _, w in pairs(wantedfiles) do + local fname = file.join(ppname,w) + if input.is_readable.file(fname) then + if input.trace > 2 then + input.logger('= found by scanning',fname) + end + result[#result+1] = fname + done = true + if not instance.allresults then break end + end + end + else + -- no access needed for non existing path, speedup (esp in large tree with lots of fake) + end + end + end + end + if not done and doscan then + -- todo: slow path scanning + end + if done and not instance.allresults then break end + end + end + end + for k,v in pairs(result) do + result[k] = file.collapse_path(v) + end + if instance.remember then + instance.found[stamp] = result + end + return result +end + +input.aux._find_file_ = input.aux.find_file + +function input.aux.find_file(instance,filename) -- maybe make a lowres cache too + local result = input.aux._find_file_(instance,filename) + if #result == 0 then + local lowered = filename:lower() + if filename ~= lowered then + return input.aux._find_file_(instance,lowered) + end + end + return result +end + +if lfs and lfs.isfile then + input.aux.is_file = lfs.isfile -- to be done: use this +else + input.aux.is_file = file.is_readable +end + +if lfs and lfs.isdir then + function input.aux.can_be_dir(instance,name) + if not instance.fakepaths[name] then + if lfs.isdir(name) then + instance.fakepaths[name] = 1 -- directory + else + instance.fakepaths[name] = 2 -- no directory + end + end + return (instance.fakepaths[name] == 1) + end +else + function input.aux.can_be_dir() + return true + end +end + +if not input.concatinators then input.concatinators = { } end + +input.concatinators.tex = file.join +input.concatinators.file = input.concatinators.tex + +function input.find_files(instance,filename,filetype,mustexist) + if type(mustexist) == boolean then + -- all set + elseif type(filetype) == 'boolean' then + filetype, mustexist = nil, false + elseif type(filetype) ~= 'string' then + filetype, mustexist = nil, false + end + instance.format = filetype or '' + local t = input.aux.find_file(instance,filename,true) + instance.format = '' + return t +end + +function input.find_file(instance,filename,filetype,mustexist) + return (input.find_files(instance,filename,filetype,mustexist)[1] or "") +end + +function input.find_given_files(instance,filename) + local bname, result = file.basename(filename), { } + for k, hash in ipairs(instance.hashes) do + local files = instance.files[hash.tag] + local blist = files[bname] + if not blist then + local rname = "remap:"..bname + blist = files[rname] + if blist then + bname = files[rname] + blist = files[bname] + end + end + if blist then + if type(blist) == 'string' then + result[#result+1] = input.concatinators[hash.type](hash.tag,blist,bname) or "" + if not instance.allresults then break end + else + for kk,vv in pairs(blist) do + result[#result+1] = input.concatinators[hash.type](hash.tag,vv,bname) or "" + if not instance.allresults then break end + end + end + end + end + return result +end + +function input.find_given_file(instance,filename) + return (input.find_given_files(instance,filename)[1] or "") +end + +function input.find_wildcard_files(instance,filename) -- todo: remap: + local result = { } + local bname, dname = file.basename(filename), file.dirname(filename) + local path = dname:gsub("^*/","") + path = path:gsub("*",".*") + path = path:gsub("-","%%-") + if dname == "" then + path = ".*" + end + local name = bname + name = name:gsub("*",".*") + name = name:gsub("-","%%-") + path = path:lower() + name = name:lower() + local function doit(blist,bname,hash,allresults) + local done = false + if blist then + if type(blist) == 'string' then + -- make function and share code + if (blist:lower()):find(path) then + result[#result+1] = input.concatinators[hash.type](hash.tag,blist,bname) or "" + done = true + end + else + for kk,vv in pairs(blist) do + if (vv:lower()):find(path) then + result[#result+1] = input.concatinators[hash.type](hash.tag,vv,bname) or "" + done = true + if not allresults then break end + end + end + end + end + return done + end + local files, allresults, done = instance.files, instance.allresults, false + if name:find("%*") then + for k, hash in ipairs(instance.hashes) do + for kk, hh in pairs(files[hash.tag]) do + if not kk:find("^remap:") then + if (kk:lower()):find(name) then + if doit(hh,kk,hash,allresults) then done = true end + if done and not allresults then break end + end + end + end + end + else + for k, hash in ipairs(instance.hashes) do + if doit(files[hash.tag][bname],bname,hash,allresults) then done = true end + if done and not allresults then break end + end + end + return result +end + +function input.find_wildcard_file(instance,filename) + return (input.find_wildcard_files(instance,filename)[1] or "") +end + +-- main user functions + +function input.save_used_files_in_trees(instance, filename,jobname) + if not filename then filename = 'luatex.jlg' end + local f = io.open(filename,'w') + if f then + f:write("\n") + f:write("\n") + if jobname then + f:write("\t" .. jobname .. "\n") + end + f:write("\t\n") + for _,v in pairs(table.sortedkeys(instance.foundintrees)) do + f:write("\t\t" .. v .. "\n") + end + f:write("\t\n") + f:write("\n") + f:close() + end +end + +function input.automount(instance) + -- implemented later +end + +function input.load(instance) + input.starttiming(instance) + input.resetconfig(instance) + input.identify_cnf(instance) + input.load_lua(instance) + input.expand_variables(instance) + input.load_cnf(instance) + input.expand_variables(instance) + input.load_hash(instance) + input.automount(instance) + input.stoptiming(instance) +end + +function input.for_files(instance, command, files, filetype, mustexist) + if files and #files > 0 then + local function report(str) + if input.verbose then + input.report(str) -- has already verbose + else + print(str) + end + end + if input.verbose then + report('') + end + for _, file in pairs(files) do + local result = command(instance,file,filetype,mustexist) + if type(result) == 'string' then + report(result) + else + for _,v in pairs(result) do + report(v) + end + end + end + end +end + +-- strtab + +function input.var_value(instance,str) -- output the value of variable $STRING. + return input.variable(instance,str) +end +function input.expand_var(instance,str) -- output variable expansion of STRING. + return input.expansion(instance,str) +end +function input.show_path(instance,str) -- output search path for file type NAME + return file.join_path(input.expanded_path_list(instance,input.format_of_var(str))) +end + +-- input.find_file(filename) +-- input.find_file(filename, filetype, mustexist) +-- input.find_file(filename, mustexist) +-- input.find_file(filename, filetype) + +function input.aux.register_file(files, name, path) + if files[name] then + if type(files[name]) == 'string' then + files[name] = { files[name], path } + else + files[name] = path + end + else + files[name] = path + end +end + +if not input.finders then input.finders = { } end +if not input.openers then input.openers = { } end +if not input.loaders then input.loaders = { } end + +input.finders.notfound = { nil } +input.openers.notfound = { nil } +input.loaders.notfound = { false, nil, 0 } + +function input.splitmethod(filename) + if not filename then + return { } -- safeguard + elseif type(filename) == "table" then + return filename -- already split + elseif not filename:find("://") then + return { scheme="file", path = filename, original=filename } -- quick hack + else + return url.hashed(filename) + end +end + +function input.method_is_file(filename) + return input.splitmethod(filename).scheme == 'file' +end + +function table.sequenced(t,sep) -- temp here + local s = { } + for k, v in pairs(t) do + s[#s+1] = k .. "=" .. v + end + return table.concat(s, sep or " | ") +end + +function input.methodhandler(what, instance, filename, filetype) -- ... + local specification = (type(filename) == "string" and input.splitmethod(filename)) or filename -- no or { }, let it bomb + local scheme = specification.scheme + if input[what][scheme] then + input.logger('= handler',specification.original .." -> " .. what .. " -> " .. table.sequenced(specification)) + return input[what][scheme](instance,filename,filetype) -- todo: specification + else + return input[what].tex(instance,filename,filetype) -- todo: specification + end +end + +-- also inside next test? + +function input.findtexfile(instance, filename, filetype) + return input.methodhandler('finders',instance, input.normalize_name(filename), filetype) +end +function input.opentexfile(instance,filename) + return input.methodhandler('openers',instance, input.normalize_name(filename)) +end + +function input.findbinfile(instance, filename, filetype) + return input.methodhandler('finders',instance, input.normalize_name(filename), filetype) +end +function input.openbinfile(instance,filename) + return input.methodhandler('loaders',instance, input.normalize_name(filename)) +end + +function input.loadbinfile(instance, filename, filetype) + local fname = input.findbinfile(instance, input.normalize_name(filename), filetype) + if fname and fname ~= "" then + return input.openbinfile(instance,fname) + else + return unpack(input.loaders.notfound) + end +end + +function input.texdatablob(instance, filename, filetype) + local ok, data, size = input.loadbinfile(instance, filename, filetype) + return data or "" +end + +input.loadtexfile = input.texdatablob + +function input.openfile(filename) -- brrr texmf.instance here / todo ! ! ! ! ! + local fullname = input.findtexfile(texmf.instance, filename) + if fullname and (fullname ~= "") then + return input.opentexfile(texmf.instance, fullname) + else + return nil + end +end + +function input.logmode() + return (os.getenv("MTX.LOG.MODE") or os.getenv("MTX_LOG_MODE") or "tex"):lower() +end + +-- this is a prelude to engine/progname specific configuration files +-- in which case we can omit files meant for other programs and +-- packages + +--- ctx + +-- maybe texinputs + font paths +-- maybe positive selection tex/context fonts/tfm|afm|vf|opentype|type1|map|enc + +input.validators = { } +input.validators.visibility = { } + +function input.validators.visibility.default(path, name) + return true +end + +function input.validators.visibility.context(path, name) + path = path[1] or path -- some day a loop + return not ( + path:find("latex") or +-- path:find("doc") or + path:find("tex4ht") or + path:find("source") or +-- path:find("config") or +-- path:find("metafont") or + path:find("lists$") or + name:find("%.tpm$") or + name:find("%.bak$") + ) +end + +-- todo: describe which functions are public (maybe input.private. ... ) + +-- beware: i need to check where we still need a / on windows: + +function input.clean_path(str) +--~ return (((str:gsub("\\","/")):gsub("^!+","")):gsub("//+","//")) + if str then + return ((str:gsub("\\","/")):gsub("^!+","")) + else + return nil + end +end + +function input.do_with_path(name,func) + for _, v in pairs(input.expanded_path_list(instance,name)) do + func("^"..input.clean_path(v)) + end +end + +function input.do_with_var(name,func) + func(input.aux.expanded_var(name)) +end + +function input.with_files(instance,pattern,handle) + for _, hash in ipairs(instance.hashes) do + local blobpath = hash.tag + local blobtype = hash.type + if blobpath then + local files = instance.files[blobpath] + if files then + for k,v in pairs(files) do + if k:find("^remap:") then + k = files[k] + v = files[k] -- chained + end + if k:find(pattern) then + if type(v) == "string" then + handle(blobtype,blobpath,v,k) + else + for _,vv in pairs(v) do + handle(blobtype,blobpath,vv,k) + end + end + end + end + end + end + end +end + +--~ function input.update_script(oldname,newname) -- oldname -> own.name, not per se a suffix +--~ newname = file.addsuffix(newname,"lua") +--~ local newscript = input.clean_path(input.find_file(instance, newname)) +--~ local oldscript = input.clean_path(oldname) +--~ input.report("old script", oldscript) +--~ input.report("new script", newscript) +--~ if oldscript ~= newscript and (oldscript:find(file.removesuffix(newname).."$") or oldscript:find(newname.."$")) then +--~ local newdata = io.loaddata(newscript) +--~ if newdata then +--~ input.report("old script content replaced by new content") +--~ io.savedata(oldscript,newdata) +--~ end +--~ end +--~ end + +function input.update_script(instance,oldname,newname) -- oldname -> own.name, not per se a suffix + local scriptpath = "scripts/context/lua" + newname = file.addsuffix(newname,"lua") + local oldscript = input.clean_path(oldname) + input.report("to be replaced old script", oldscript) + local newscripts = input.find_files(instance, newname) or { } + if #newscripts == 0 then + input.report("unable to locate new script") + else + for _, newscript in ipairs(newscripts) do + newscript = input.clean_path(newscript) + input.report("checking new script", newscript) + if oldscript == newscript then + input.report("old and new script are the same") + elseif not newscript:find(scriptpath) then + input.report("new script should come from",scriptpath) + elseif not (oldscript:find(file.removesuffix(newname).."$") or oldscript:find(newname.."$")) then + input.report("invalid new script name") + else + local newdata = io.loaddata(newscript) + if newdata then + input.report("old script content replaced by new content") + io.savedata(oldscript,newdata) + break + else + input.report("unable to load new script") + end + end + end + end +end + + +--~ print(table.serialize(input.aux.splitpathexpr("/usr/share/texmf-{texlive,tetex}", {}))) + +-- command line resolver: + +--~ print(input.resolve("abc env:tmp file:cont-en.tex path:cont-en.tex full:cont-en.tex rel:zapf/one/p-chars.tex")) + +do + + local resolvers = { } + + resolvers.environment = function(instance,str) + return input.clean_path(os.getenv(str) or os.getenv(str:upper()) or os.getenv(str:lower()) or "") + end + resolvers.relative = function(instance,str,n) + if io.exists(str) then + -- nothing + elseif io.exists("./" .. str) then + str = "./" .. str + else + local p = "../" + for i=1,n or 2 do + if io.exists(p .. str) then + str = p .. str + break + else + p = p .. "../" + end + end + end + return input.clean_path(str) + end + resolvers.locate = function(instance,str) + local fullname = input.find_given_file(instance,str) or "" + return input.clean_path((fullname ~= "" and fullname) or str) + end + resolvers.filename = function(instance,str) + local fullname = input.find_given_file(instance,str) or "" + return input.clean_path(file.basename((fullname ~= "" and fullname) or str)) + end + resolvers.pathname = function(instance,str) + local fullname = input.find_given_file(instance,str) or "" + return input.clean_path(file.dirname((fullname ~= "" and fullname) or str)) + end + + resolvers.env = resolvers.environment + resolvers.rel = resolvers.relative + resolvers.loc = resolvers.locate + resolvers.kpse = resolvers.locate + resolvers.full = resolvers.locate + resolvers.file = resolvers.filename + resolvers.path = resolvers.pathname + + local function resolve(instance,str) + if type(str) == "table" then + for k, v in pairs(str) do + str[k] = resolve(instance,v) or v + end + elseif str and str ~= "" then + str = str:gsub("([a-z]+):([^ ]+)", function(method,target) + if resolvers[method] then + return resolvers[method](instance,target) + else + return method .. ":" .. target + end + end) + end + return str + end + + input.resolve = resolve + +end + + +if not modules then modules = { } end modules ['luat-tmp'] = { + version = 1.001, + comment = "companion to luat-lib.tex", + author = "Hans Hagen, PRAGMA-ADE, Hasselt NL", + copyright = "PRAGMA ADE / ConTeXt Development Team", + license = "see context related readme files" +} + +--[[ldx-- +

This module deals with caching data. It sets up the paths and +implements loaders and savers for tables. Best is to set the +following variable. When not set, the usual paths will be +checked. Personally I prefer the (users) temporary path.

+ + +TEXMFCACHE=$TMP;$TEMP;$TMPDIR;$TEMPDIR;$HOME;$TEXMFVAR;$VARTEXMF;. + + +

Currently we do no locking when we write files. This is no real +problem because most caching involves fonts and the chance of them +being written at the same time is small. We also need to extend +luatools with a recache feature.

+--ldx]]-- + +caches = caches or { } +dir = dir or { } +texmf = texmf or { } + +caches.path = caches.path or nil +caches.base = caches.base or "luatex-cache" +caches.more = caches.more or "context" +caches.direct = false -- true is faster but may need huge amounts of memory +caches.trace = false +caches.tree = false +caches.paths = caches.paths or nil +caches.force = false + +input.usecache = not toboolean(os.getenv("TEXMFSHARECACHE") or "false",true) -- true + +function caches.temp(instance) + local function checkpath(cachepath) + if not cachepath or cachepath == "" then + return nil + elseif lfs.attributes(cachepath,"mode") == "directory" then -- lfs.isdir(cachepath) then + return cachepath + elseif caches.force or io.ask(string.format("Should I create the cache path %s?",cachepath), "no", { "yes", "no" }) == "yes" then + dir.mkdirs(cachepath) + return (lfs.attributes(cachepath,"mode") == "directory") and cachepath + else + return nil + end + end + local cachepath = input.expanded_path_list(instance,"TEXMFCACHE") + cachepath = cachepath and #cachepath > 0 and checkpath(cachepath[1]) + if not cachepath then + cachepath = os.getenv("TEXMFCACHE") or os.getenv("HOME") or os.getenv("HOMEPATH") or os.getenv("TMP") or os.getenv("TEMP") or os.getenv("TMPDIR") or nil + cachepath = checkpath(cachepath) + end + if not cachepath then + print("\nfatal error: there is no valid cache path defined\n") + os.exit() + elseif lfs.attributes(cachepath,"mode") ~= "directory" then + print(string.format("\nfatal error: cache path %s is not a directory\n",cachepath)) + os.exit() + end + function caches.temp(instance) + return cachepath + end + return cachepath +end + +function caches.configpath(instance) + return table.concat(instance.cnffiles,";") +end + +function caches.hashed(tree) + return md5.hex((tree:lower()):gsub("[\\\/]+","/")) +end + +function caches.treehash(instance) + local tree = caches.configpath(instance) + if not tree or tree == "" then + return false + else + return caches.hashed(tree) + end +end + +function caches.setpath(instance,...) + if not caches.path then + if not caches.path then + caches.path = caches.temp(instance) + end + caches.path = input.clean_path(caches.path) -- to be sure + if lfs then + caches.tree = caches.tree or caches.treehash(instance) + if caches.tree then + caches.path = dir.mkdirs(caches.path,caches.base,caches.more,caches.tree) + else + caches.path = dir.mkdirs(caches.path,caches.base,caches.more) + end + end + end + if not caches.path then + caches.path = '.' + end + caches.path = input.clean_path(caches.path) + if lfs and not table.is_empty({...}) then + local pth = dir.mkdirs(caches.path,...) + return pth + end + caches.path = dir.expand_name(caches.path) + return caches.path +end + +function caches.definepath(instance,category,subcategory) + return function() + return caches.setpath(instance,category,subcategory) + end +end + +function caches.setluanames(path,name) + return path .. "/" .. name .. ".tma", path .. "/" .. name .. ".tmc" +end + +function caches.loaddata(path,name) + local tmaname, tmcname = caches.setluanames(path,name) + local loader = loadfile(tmcname) or loadfile(tmaname) + if loader then + return loader() + else + return false + end +end + +function caches.is_writable(filepath,filename) + local tmaname, tmcname = caches.setluanames(filepath,filename) + return file.is_writable(tmaname) +end + +function caches.savedata(filepath,filename,data,raw) -- raw needed for file cache + local tmaname, tmcname = caches.setluanames(filepath,filename) + local reduce, simplify = true, true + if raw then + reduce, simplify = false, false + end + if caches.direct then + file.savedata(tmaname, table.serialize(data,'return',true,true)) + else + table.tofile (tmaname, data,'return',true,true) -- maybe not the last true + end + utils.lua.compile(tmaname, tmcname) +end + +-- here we use the cache for format loading (texconfig.[formatname|jobname]) + +--~ if tex and texconfig and texconfig.formatname and texconfig.formatname == "" then +if tex and texconfig and (not texconfig.formatname or texconfig.formatname == "") and texmf.instance then + if not texconfig.luaname then texconfig.luaname = "cont-en.lua" end -- or luc + texconfig.formatname = caches.setpath(texmf.instance,"formats") .. "/" .. texconfig.luaname:gsub("%.lu.$",".fmt") +end + +--[[ldx-- +

Once we found ourselves defining similar cache constructs +several times, containers were introduced. Containers are used +to collect tables in memory and reuse them when possible based +on (unique) hashes (to be provided by the calling function).

+ +

Caching to disk is disabled by default. Version numbers are +stored in the saved table which makes it possible to change the +table structures without bothering about the disk cache.

+ +

Examples of usage can be found in the font related code.

+--ldx]]-- + +containers = { } +containers.trace = false + +do -- local report + + local function report(container,tag,name) + if caches.trace or containers.trace or container.trace then + logs.report(string.format("%s cache",container.subcategory),string.format("%s: %s",tag,name or 'invalid')) + end + end + + local allocated = { } + + -- tracing + + function containers.define(category, subcategory, version, enabled) + return function() + if category and subcategory then + local c = allocated[category] + if not c then + c = { } + allocated[category] = c + end + local s = c[subcategory] + if not s then + s = { + category = category, + subcategory = subcategory, + storage = { }, + enabled = enabled, + version = version or 1.000, + trace = false, + path = caches.setpath(texmf.instance,category,subcategory), + } + c[subcategory] = s + end + return s + else + return nil + end + end + end + + function containers.is_usable(container, name) + return container.enabled and caches.is_writable(container.path, name) + end + + function containers.is_valid(container, name) + if name and name ~= "" then + local storage = container.storage[name] + return storage and not table.is_empty(storage) and storage.cache_version == container.version + else + return false + end + end + + function containers.read(container,name) + if container.enabled and not container.storage[name] then + container.storage[name] = caches.loaddata(container.path,name) + if containers.is_valid(container,name) then + report(container,"loaded",name) + else + container.storage[name] = nil + end + end + if container.storage[name] then + report(container,"reusing",name) + end + return container.storage[name] + end + + function containers.write(container, name, data) + if data then + data.cache_version = container.version + if container.enabled then + local unique, shared = data.unique, data.shared + data.unique, data.shared = nil, nil + caches.savedata(container.path, name, data) + report(container,"saved",name) + data.unique, data.shared = unique, shared + end + report(container,"stored",name) + container.storage[name] = data + end + return data + end + + function containers.content(container,name) + return container.storage[name] + end + +end + +-- since we want to use the cache instead of the tree, we will now +-- reimplement the saver. + +local save_data = input.aux.save_data + +input.cachepath = nil + +function input.aux.save_data(instance, dataname, check) + input.cachepath = input.cachepath or caches.definepath(instance,"trees") + save_data(instance, dataname, check, function(cachename,dataname) + if input.usecache then + return file.join(input.cachepath(),caches.hashed(cachename)) + else + return file.join(cachename,dataname) + end + end) +end + +local load_data = input.aux.load_data + +function input.aux.load_data(instance,pathname,dataname,filename) + input.cachepath = input.cachepath or caches.definepath(instance,"trees") + load_data(instance,pathname,dataname,filename,function(dataname,filename) + if input.usecache then + return file.join(input.cachepath(),caches.hashed(pathname)) + else + if not filename or (filename == "") then + filename = dataname + end + return file.join(pathname,filename) + end + end) +end + +-- we will make a better format, maybe something xml or just text or lua + +input.automounted = input.automounted or { } + +function input.automount(instance,usecache) + local mountpaths = input.simplified_list(input.expansion(instance,'TEXMFMOUNT')) + if table.is_empty(mountpaths) and usecache then + mountpaths = { caches.setpath(instance,"mount") } + end + if not table.is_empty(mountpaths) then + input.starttiming(instance) + for k, root in pairs(mountpaths) do + local f = io.open(root.."/url.tmi") + if f then + for line in f:lines() do + if line then + if line:find("^[%%#%-]") then -- or %W + -- skip + elseif line:find("^zip://") then + input.report("mounting",line) + table.insert(input.automounted,line) + input.usezipfile(instance,line) + end + end + end + f:close() + end + end + input.stoptiming(instance) + end +end + +-- store info in format + +input.storage = { } +input.storage.data = { } +input.storage.min = 0 -- 500 +input.storage.max = input.storage.min - 1 +input.storage.trace = false -- true +input.storage.done = 0 +input.storage.evaluators = { } +-- (evaluate,message,names) + +function input.storage.register(...) + input.storage.data[#input.storage.data+1] = { ... } +end + +function input.storage.evaluate(name) + input.storage.evaluators[#input.storage.evaluators+1] = name +end + +function input.storage.finalize() -- we can prepend the string with "evaluate:" + for _, t in ipairs(input.storage.evaluators) do + for i, v in pairs(t) do + if type(v) == "string" then + t[i] = loadstring(v)() + elseif type(v) == "table" then + for _, vv in pairs(v) do + if type(vv) == "string" then + t[i] = loadstring(vv)() + end + end + end + end + end +end + +function input.storage.dump() + for name, data in ipairs(input.storage.data) do + local evaluate, message, original, target = data[1], data[2], data[3] ,data[4] + local name, initialize, finalize, code = nil, "", "", "" + for str in target:gmatch("([^%.]+)") do + if name then + name = name .. "." .. str + else + name = str + end + initialize = string.format("%s %s = %s or {} ", initialize, name, name) + end + if evaluate then + finalize = "input.storage.evaluate(" .. name .. ")" + end + input.storage.max = input.storage.max + 1 + if input.storage.trace then + logs.report('storage',string.format('saving %s in slot %s',message,input.storage.max)) + code = + initialize .. + string.format("logs.report('storage','restoring %s from slot %s') ",message,input.storage.max) .. + table.serialize(original,name) .. + finalize + else + code = initialize .. table.serialize(original,name) .. finalize + end + lua.bytecode[input.storage.max] = loadstring(code) + end +end + +if lua.bytecode then -- from 0 upwards + local i = input.storage.min + while lua.bytecode[i] do + lua.bytecode[i]() + lua.bytecode[i] = nil + i = i + 1 + end + input.storage.done = i +end + + +if not modules then modules = { } end modules ['luat-log'] = { + version = 1.001, + comment = "companion to luat-lib.tex", + author = "Hans Hagen, PRAGMA-ADE, Hasselt NL", + copyright = "PRAGMA ADE / ConTeXt Development Team", + license = "see context related readme files" +} + +--[[ldx-- +

This is a prelude to a more extensive logging module. For the sake +of parsing log files, in addition to the standard logging we will +provide an structured file. Actually, any logging that +is hooked into callbacks will be \XML\ by default.

+--ldx]]-- + +input = input or { } +logs = logs or { } + +--[[ldx-- +

This looks pretty ugly but we need to speed things up a bit.

+--ldx]]-- + +logs.levels = { + ['error'] = 1, + ['warning'] = 2, + ['info'] = 3, + ['debug'] = 4 +} + +logs.functions = { + 'error', 'warning', 'info', 'debug', 'report', + 'start', 'stop', 'push', 'pop' +} + +logs.callbacks = { + 'start_page_number', + 'stop_page_number', + 'report_output_pages', + 'report_output_log' +} + +logs.xml = logs.xml or { } +logs.tex = logs.tex or { } + +logs.level = 0 + +do + local write_nl, write, format = texio.write_nl or print, texio.write or io.write, string.format + + if texlua then + write_nl = print + write = io.write + end + + function logs.xml.debug(category,str) + if logs.level > 3 then write_nl(format("%s",category,str)) end + end + function logs.xml.info(category,str) + if logs.level > 2 then write_nl(format("%s",category,str)) end + end + function logs.xml.warning(category,str) + if logs.level > 1 then write_nl(format("%s",category,str)) end + end + function logs.xml.error(category,str) + if logs.level > 0 then write_nl(format("%s",category,str)) end + end + function logs.xml.report(category,str) + write_nl(format("%s",category,str)) + end + + function logs.xml.start() if logs.level > 0 then tw("<%s>" ) end end + function logs.xml.stop () if logs.level > 0 then tw("") end end + function logs.xml.push () if logs.level > 0 then tw("" ) end end + + function logs.tex.debug(category,str) + if logs.level > 3 then write_nl(format("debug >> %s: %s" ,category,str)) end + end + function logs.tex.info(category,str) + if logs.level > 2 then write_nl(format("info >> %s: %s" ,category,str)) end + end + function logs.tex.warning(category,str) + if logs.level > 1 then write_nl(format("warning >> %s: %s",category,str)) end + end + function logs.tex.error(category,str) + if logs.level > 0 then write_nl(format("error >> %s: %s" ,category,str)) end + end + function logs.tex.report(category,str) + write_nl(format("report >> %s: %s" ,category,str)) + end + + function logs.set_level(level) + logs.level = logs.levels[level] or level + end + + function logs.set_method(method) + for _, v in pairs(logs.functions) do + logs[v] = logs[method][v] or function() end + end + if callback and input[method] then + for _, cb in pairs(logs.callbacks) do + callback.register(cb, input[method][cb]) + end + end + end + + function logs.xml.start_page_number() + write_nl(format("

") + write_nl("") + end + + function logs.xml.report_output_pages(p,b) + write_nl(format("", p)) + write_nl(format("", b)) + write_nl("") + end + + function logs.xml.report_output_log() + end + +end + +logs.set_level('error') +logs.set_method('tex') + + +if not modules then modules = { } end modules ['luat-sta'] = { + version = 1.001, + author = "Hans Hagen, PRAGMA-ADE, Hasselt NL", + copyright = "PRAGMA ADE / ConTeXt Development Team", + license = "see context related readme files" +} + +states = states or { } +states.data = states.data or { } +states.hash = states.hash or { } +states.tag = states.tag or "" +states.filename = states.filename or "" + +function states.save(filename,tag) + tag = tag or states.tag + filename = file.addsuffix(filename or states.filename,'lus') + io.savedata(filename, + "-- generator : luat-sta.lua\n" .. + "-- state tag : " .. tag .. "\n\n" .. + table.serialize(states.data[tag or states.tag] or {},true) + ) +end + +function states.load(filename,tag) + states.filename = filename + states.tag = tag or "whatever" + states.filename = file.addsuffix(states.filename,'lus') + states.data[states.tag], states.hash[states.tag] = (io.exists(filename) and dofile(filename)) or { }, { } +end + +function states.set_by_tag(tag,key,value,default,persistent) + local d, h = states.data[tag], states.hash[tag] + if d then + local dkey, hkey = key, key + local pre, post = key:match("(.+)%.([^%.]+)$") + if pre and post then + for k in pre:gmatch("[^%.]+") do + local dk = d[k] + if not dk then + dk = { } + d[k] = dk + end + d = dk + end + dkey, hkey = post, key + end + if type(value) == nil then + value = value or default + elseif persistent then + value = value or d[dkey] or default + else + value = value or default + end + d[dkey], h[hkey] = value, value + end +end + +function states.get_by_tag(tag,key,default) + local h = states.hash[tag] + if h and h[key] then + return h[key] + else + local d = states.data[tag] + if d then + for k in key:gmatch("[^%.]+") do + local dk = d[k] + if dk then + d = dk + else + return default + end + end + return d or default + end + end +end + +function states.set(key,value,default,persistent) + states.set_by_tag(states.tag,key,value,default,persistent) +end + +function states.get(key,default) + return states.get_by_tag(states.tag,key,default) +end + +--~ states.data.update = { +--~ ["version"] = { +--~ ["major"] = 0, +--~ ["minor"] = 1, +--~ }, +--~ ["rsync"] = { +--~ ["server"] = "contextgarden.net", +--~ ["module"] = "minimals", +--~ ["repository"] = "current", +--~ ["flags"] = "-rpztlv --stats", +--~ }, +--~ ["tasks"] = { +--~ ["update"] = true, +--~ ["make"] = true, +--~ ["delete"] = false, +--~ }, +--~ ["platform"] = { +--~ ["host"] = true, +--~ ["other"] = { +--~ ["mswin"] = false, +--~ ["linux"] = false, +--~ ["linux-64"] = false, +--~ ["osx-intel"] = false, +--~ ["osx-ppc"] = false, +--~ ["sun"] = false, +--~ }, +--~ }, +--~ ["context"] = { +--~ ["available"] = {"current", "beta", "alpha", "experimental"}, +--~ ["selected"] = "current", +--~ }, +--~ ["formats"] = { +--~ ["cont-en"] = true, +--~ ["cont-nl"] = true, +--~ ["cont-de"] = false, +--~ ["cont-cz"] = false, +--~ ["cont-fr"] = false, +--~ ["cont-ro"] = false, +--~ }, +--~ ["engine"] = { +--~ ["pdftex"] = { +--~ ["install"] = true, +--~ ["formats"] = { +--~ ["pdftex"] = true, +--~ }, +--~ }, +--~ ["luatex"] = { +--~ ["install"] = true, +--~ ["formats"] = { +--~ }, +--~ }, +--~ ["xetex"] = { +--~ ["install"] = true, +--~ ["formats"] = { +--~ ["xetex"] = false, +--~ }, +--~ }, +--~ ["metapost"] = { +--~ ["install"] = true, +--~ ["formats"] = { +--~ ["mpost"] = true, +--~ ["metafun"] = true, +--~ }, +--~ }, +--~ }, +--~ ["fonts"] = { +--~ }, +--~ ["doc"] = { +--~ }, +--~ ["modules"] = { +--~ ["f-urwgaramond"] = false, +--~ ["f-urwgothic"] = false, +--~ ["t-bnf"] = false, +--~ ["t-chromato"] = false, +--~ ["t-cmscbf"] = false, +--~ ["t-cmttbf"] = false, +--~ ["t-construction-plan"] = false, +--~ ["t-degrade"] = false, +--~ ["t-french"] = false, +--~ ["t-lettrine"] = false, +--~ ["t-lilypond"] = false, +--~ ["t-mathsets"] = false, +--~ ["t-tikz"] = false, +--~ ["t-typearea"] = false, +--~ ["t-vim"] = false, +--~ }, +--~ } + + +--~ states.save("teststate", "update") +--~ states.load("teststate", "update") + +--~ print(states.get_by_tag("update","rsync.server","unknown")) +--~ states.set_by_tag("update","rsync.server","oeps") +--~ print(states.get_by_tag("update","rsync.server","unknown")) +--~ states.save("teststate", "update") +--~ states.load("teststate", "update") +--~ print(states.get_by_tag("update","rsync.server","unknown")) + +-- end library merge + +own = { } + +own.libs = { -- todo: check which ones are really needed + 'l-string.lua', + 'l-lpeg.lua', + 'l-table.lua', + 'l-io.lua', + 'l-md5.lua', + 'l-number.lua', + 'l-set.lua', + 'l-os.lua', + 'l-file.lua', + 'l-dir.lua', + 'l-boolean.lua', + 'l-xml.lua', +-- 'l-unicode.lua', + 'l-utils.lua', +-- 'l-tex.lua', + 'luat-lib.lua', + 'luat-inp.lua', +-- 'luat-zip.lua', +-- 'luat-tex.lua', +-- 'luat-kps.lua', + 'luat-tmp.lua', + 'luat-log.lua', + 'luat-sta.lua', +} + +-- We need this hack till luatex is fixed. +-- +-- for k,v in pairs(arg) do print(k,v) end + +if arg and (arg[0] == 'luatex' or arg[0] == 'luatex.exe') and arg[1] == "--luaonly" then + arg[-1]=arg[0] arg[0]=arg[2] for k=3,#arg do arg[k-2]=arg[k] end arg[#arg]=nil arg[#arg]=nil +end + +-- End of hack. + +own.name = (environment and environment.ownname) or arg[0] or 'luatools.lua' + +own.path = string.match(own.name,"^(.+)[\\/].-$") or "." +own.list = { '.' } +if own.path ~= '.' then + table.insert(own.list,own.path) +end +table.insert(own.list,own.path.."/../../../tex/context/base") +table.insert(own.list,own.path.."/mtx") +table.insert(own.list,own.path.."/../sources") + +function locate_libs() + for _, lib in pairs(own.libs) do + for _, pth in pairs(own.list) do + local filename = string.gsub(pth .. "/" .. lib,"\\","/") + local codeblob = loadfile(filename) + if codeblob then + codeblob() + own.list = { pth } -- speed up te search + break + end + end + end +end + +if not input then + locate_libs() +end + +if not input then + print("") + print("Mtxrun is unable to start up due to lack of libraries. You may") + print("try to run 'lua mtxrun.lua --selfmerge' in the path where this") + print("script is located (normally under ..../scripts/context/lua) which") + print("will make this script library independent.") + os.exit() +end + +instance = input.reset() +input.verbose = environment.argument("verbose") or false +input.banner = 'MtxRun | ' +utils.report = input.report + +instance.engine = environment.argument("engine") or 'luatex' +instance.progname = environment.argument("progname") or 'context' +instance.lsrmode = environment.argument("lsr") or false + +-- use os.env or environment when available + +--~ function input.check_environment(tree) +--~ input.report('') +--~ os.setenv('TMP', os.getenv('TMP') or os.getenv('TEMP') or os.getenv('TMPDIR') or os.getenv('HOME')) +--~ if os.platform == 'linux' then +--~ os.setenv('TEXOS', os.getenv('TEXOS') or 'texmf-linux') +--~ elseif os.platform == 'windows' then +--~ os.setenv('TEXOS', os.getenv('TEXOS') or 'texmf-windows') +--~ elseif os.platform == 'macosx' then +--~ os.setenv('TEXOS', os.getenv('TEXOS') or 'texmf-macosx') +--~ end +--~ os.setenv('TEXOS', string.gsub(string.gsub(os.getenv('TEXOS'),"^[\\\/]*", ''),"[\\\/]*$", '')) +--~ os.setenv('TEXPATH', string.gsub(tree,"\/+$",'')) +--~ os.setenv('TEXMFOS', os.getenv('TEXPATH') .. "/" .. os.getenv('TEXOS')) +--~ input.report('') +--~ input.report("preset : TEXPATH => " .. os.getenv('TEXPATH')) +--~ input.report("preset : TEXOS => " .. os.getenv('TEXOS')) +--~ input.report("preset : TEXMFOS => " .. os.getenv('TEXMFOS')) +--~ input.report("preset : TMP => " .. os.getenv('TMP')) +--~ input.report('') +--~ end + +function input.check_environment(tree) + input.report('') + os.setenv('TMP', os.getenv('TMP') or os.getenv('TEMP') or os.getenv('TMPDIR') or os.getenv('HOME')) + os.setenv('TEXOS', os.getenv('TEXOS') or ("texmf-" .. os.currentplatform())) + os.setenv('TEXPATH', (tree or "tex"):gsub("\/+$",'')) + os.setenv('TEXMFOS', os.getenv('TEXPATH') .. "/" .. os.getenv('TEXOS')) + input.report('') + input.report("preset : TEXPATH => " .. os.getenv('TEXPATH')) + input.report("preset : TEXOS => " .. os.getenv('TEXOS')) + input.report("preset : TEXMFOS => " .. os.getenv('TEXMFOS')) + input.report("preset : TMP => " .. os.getenv('TMP')) + input.report('') +end + +function input.load_environment(name) -- todo: key=value as well as lua + local f = io.open(name) + if f then + for line in f:lines() do + if line:find("^[%%%#]") then + -- skip comment + else + local key, how, value = line:match("^(.-)%s*([<=>%?]+)%s*(.*)%s*$") + if how then + value = value:gsub("%%(.-)%%", function(v) return os.getenv(v) or "" end) + if how == "=" or how == "<<" then + os.setenv(key,value) + elseif how == "?" or how == "??" then + os.setenv(key,os.getenv(key) or value) + elseif how == "<" or how == "+=" then + if os.getenv(key) then + os.setenv(key,os.getenv(key) .. io.fileseparator .. value) + else + os.setenv(key,value) + end + elseif how == ">" or how == "=+" then + if os.getenv(key) then + os.setenv(key,value .. io.pathseparator .. os.getenv(key)) + else + os.setenv(key,value) + end + end + end + end + end + f:close() + end +end + +function input.load_tree(tree) + if tree and tree ~= "" then + local setuptex = 'setuptex.tmf' + if lfs.attributes(tree, "mode") == "directory" then -- check if not nil + setuptex = tree .. "/" .. setuptex + else + setuptex = tree + end + if io.exists(setuptex) then + input.check_environment(tree) + input.load_environment(setuptex) + end + end +end + +-- md5 extensions + +-- maybe md.md5 md.md5hex md.md5HEX + +if not md5 then md5 = { } end + +if not md5.sum then + function md5.sum(k) + return string.rep("x",16) + end +end + +function md5.hexsum(k) + return (string.gsub(md5.sum(k), ".", function(c) return string.format("%02x", string.byte(c)) end)) +end + +function md5.HEXsum(k) + return (string.gsub(md5.sum(k), ".", function(c) return string.format("%02X", string.byte(c)) end)) +end + +-- file extensions + +file.needs_updating_threshold = 1 + +function file.needs_updating(oldname,newname) -- size modification access change + local oldtime = lfs.attributes(oldname, modification) + local newtime = lfs.attributes(newname, modification) + if newtime >= oldtime then + return false + elseif oldtime - newtime < file.needs_updating_threshold then + return false + else + return true + end +end + +function file.mdchecksum(name) + if md5 then + local data = io.loadall(name) + if data then + return md5.HEXsum(data) + end + end + return nil +end + +function file.loadchecksum(name) + if md then + local data = io.loadall(name .. ".md5") + if data then + return string.gsub(md5.HEXsum(data),"%s$","") + end + end + return nil +end + +function file.savechecksum(name, checksum) + if not checksum then checksum = file.mdchecksum(name) end + if checksum then + local f = io.open(name .. ".md5","w") + if f then + f:write(checksum) + f:close() + return checksum + end + end + return nil +end + +os.arch = os.arch or function() + return os.resultof("uname -m") or "linux" +end + +function os.currentplatform(name, default) + local name = os.name or os.platform or name -- os.name is built in, os.platform is mine + if name then + if name == "windows" or name == "mswin" or name == "win32" or name == "msdos" then + return "mswin" + elseif name == "linux" then + local architecture = os.arch() + if architecture:find("x86_64") then + return "linux-64" + else + return "linux" + end + elseif name == "macosx" then + local architecture = os.arch() + if architecture:find("i386") then + return "osx-intel" + else + return "osx-ppc" + end + elseif name == "freebsd" then + return "freebsd" + end + end + return default or name +end + +-- it starts here + +input.runners = { } +input.runners.applications = { } + +input.runners.applications.lua = "luatex --luaonly" +input.runners.applications.pl = "perl" +input.runners.applications.py = "python" +input.runners.applications.rb = "ruby" + +input.runners.suffixes = { + 'rb', 'lua', 'py', 'pl' +} + +input.runners.registered = { + texexec = { 'texexec.rb', true }, + texutil = { 'texutil.rb', true }, + texfont = { 'texfont.pl', true }, + texshow = { 'texshow.pl', false }, + + makempy = { 'makempy.pl', true }, + mptopdf = { 'mptopdf.pl', true }, + pstopdf = { 'pstopdf.rb', true }, + + examplex = { 'examplex.rb', false }, + concheck = { 'concheck.rb', false }, + + runtools = { 'runtools.rb', true }, + textools = { 'textools.rb', true }, + tmftools = { 'tmftools.rb', true }, + ctxtools = { 'ctxtools.rb', true }, + rlxtools = { 'rlxtools.rb', true }, + pdftools = { 'pdftools.rb', true }, + mpstools = { 'mpstools.rb', true }, + exatools = { 'exatools.rb', true }, + xmltools = { 'xmltools.rb', true }, + luatools = { 'luatools.lua', true }, + mtxtools = { 'mtxtools.rb', true }, + + pdftrimwhite = { 'pdftrimwhite.pl', false } +} + +if not messages then messages = { } end + +messages.help = [[ +--script run an mtx script +--execute run a script or program +--resolve resolve prefixed arguments +--ctxlua run internally (using preloaded libs) +--locate locate given filename + +--autotree use texmf tree cf. env 'texmfstart_tree' or 'texmfstarttree' +--tree=pathtotree use given texmf tree (default file: 'setuptex.tmf') +--environment=name use given (tmf) environment file +--path=runpath go to given path before execution +--ifchanged=filename only execute when given file has changed (md checksum) +--iftouched=old,new only execute when given file has changed (time stamp) + +--make create stubs for (context related) scripts +--remove remove stubs (context related) scripts +--stubpath=binpath paths where stubs wil be written +--windows create windows (mswin) stubs +--unix create unix (linux) stubs + +--verbose give a bit more info +--engine=str target engine +--progname=str format or backend + +--edit launch editor with found file +--launch (--all) launch files (assume os support) + +--intern run script using built in libraries +]] + +function input.runners.my_prepare_a(instance) + input.resetconfig(instance) + input.identify_cnf(instance) + input.load_lua(instance) + input.expand_variables(instance) + input.load_cnf(instance) + input.expand_variables(instance) +end + +function input.runners.my_prepare_b(instance) + input.runners.my_prepare_a(instance) + input.load_hash(instance) + input.automount(instance) +end + +function input.runners.prepare(instance) + local checkname = environment.argument("ifchanged") + if checkname and checkname ~= "" then + local oldchecksum = file.loadchecksum(checkname) + local newchecksum = file.checksum(checkname) + if oldchecksum == newchecksum then + report("file '" .. checkname .. "' is unchanged") + return "skip" + else + report("file '" .. checkname .. "' is changed, processing started") + end + file.savechecksum(checkname) + end + local oldname, newname = string.split(environment.argument("iftouched") or "", ",") + if oldname and newname and oldname ~= "" and newname ~= "" then + if not file.needs_updating(oldname,newname) then + report("file '" .. oldname .. "' and '" .. newname .. "'have same age") + return "skip" + else + report("file '" .. newname .. "' is older than '" .. oldname .. "'") + end + end + local tree = environment.argument('tree') or "" + if environment.argument('autotree') then + tree = os.getenv('TEXMFSTART_TREE') or os.getenv('TEXMFSTARTTREE') or tree + end + if tree and tree ~= "" then + input.load_tree(tree) + end + local env = environment.argument('environment') or "" + if env and env ~= "" then + for _,e in pairs(string.split(env)) do + -- maybe force suffix when not given + input.load_tree(e) + end + end + local runpath = environment.argument("path") + if runpath and not dir.chdir(runpath) then + input.report("unable to change to path '" .. runpath .. "'") + return "error" + end + return "run" +end + +function input.runners.execute_script(instance,fullname,internal) + if fullname and fullname ~= "" then + local state = input.runners.prepare(instance) + if state == 'error' then + return false + elseif state == 'skip' then + return true + elseif state == "run" then + instance.progname = environment.argument("progname") or instance.progname + instance.format = environment.argument("format") or instance.format + local path, name, suffix, result = file.dirname(fullname), file.basename(fullname), file.extname(fullname), "" + if path ~= "" then + result = fullname + elseif name then + name = name:gsub("^int[%a]*:",function() + internal = true + return "" + end ) + name = name:gsub("^script:","") + if suffix == "" and input.runners.registered[name] and input.runners.registered[name][1] then + name = input.runners.registered[name][1] + suffix = file.extname(name) + end + if suffix == "" then + -- loop over known suffixes + for _,s in pairs(input.runners.suffixes) do + result = input.find_file(instance, name .. "." .. s, 'texmfscripts') + if result ~= "" then + break + end + end + elseif input.runners.applications[suffix] then + result = input.find_file(instance, name, 'texmfscripts') + else + -- maybe look on path + result = input.find_file(instance, name, 'other text files') + end + end + if result and result ~= "" then + if internal then + local before, after = environment.split_arguments(fullname) + arg = { } for _,v in pairs(after) do arg[#arg+1] = v end + dofile(result) + else + local binary = input.runners.applications[file.extname(result)] + if binary and binary ~= "" then + result = binary .. " " .. result + end + local before, after = environment.split_arguments(fullname) + local command = result .. " " .. environment.reconstruct_commandline(after) + input.report("") + input.report("executing: " .. command) + input.report("\n \n") + io.flush() + local code = os.exec(command) -- maybe spawn + return code == 0 + end + end + end + end + return false +end + +function input.runners.execute_program(instance,fullname) + if fullname and fullname ~= "" then + local state = input.runners.prepare(instance) + if state == 'error' then + return false + elseif state == 'skip' then + return true + elseif state == "run" then + local before, after = environment.split_arguments(fullname) + environment.initialize_arguments(after) + fullname = fullname:gsub("^bin:","") + local command = fullname .. " " .. environment.reconstruct_commandline(after) + input.report("") + input.report("executing: " .. command) + input.report("\n \n") + io.flush() + local code = os.exec(command) -- (fullname,unpack(after)) does not work / maybe spawn + return code == 0 + end + end + return false +end + +function input.runners.handle_stubs(instance,create) + local stubpath = environment.argument('stubpath') or '.' -- 'auto' no longer supported + local windows = environment.argument('windows') or environment.argument('mswin') or false + local unix = environment.argument('unix') or environment.argument('linux') or false + if not windows and not unix then + if environment.platform == "unix" then + unix = true + else + windows = true + end + end + for _,v in pairs(input.runners.registered) do + local name, doit = v[1], v[2] + if doit then + local base = string.gsub(file.basename(name), "%.(.-)$", "") + if create then + -- direct local command = input.runners.applications[file.extname(name)] .. " " .. name + local command = "luatex --luaonly mtxrun.lua " .. name + if windows then + io.savedata(base..".bat", {"@echo off", command.." %*"}, "\013\010") + input.report("windows stub for '" .. base .. "' created") + end + if unix then + io.savedata(base, {"#!/bin/sh", command..' "$@"'}, "\010") + input.report("unix stub for '" .. base .. "' created") + end + else + if windows and (os.remove(base..'.bat') or os.remove(base..'.cmd')) then + input.report("windows stub for '" .. base .. "' removed") + end + if unix and (os.remove(base) or os.remove(base..'.sh')) then + input.report("unix stub for '" .. base .. "' removed") + end + end + end + end +end + +function input.runners.resolve_string(instance,filename) + if filename and filename ~= "" then + input.runners.report_location(instance,input.resolve(instance,filename)) + end +end + +function input.runners.locate_file(instance,filename) + if filename and filename ~= "" then + input.runners.report_location(instance,input.find_given_file(instance,filename)) + end +end + +function input.runners.locate_platform(instance) + input.runners.report_location(instance,os.currentplatform()) +end + +function input.runners.report_location(instance,result) + if input.verbose then + input.report("") + if result and result ~= "" then + input.report(result) + else + input.report("not found") + end + else + io.write(result) + end +end + +function input.runners.edit_script(instance,filename) + local editor = os.getenv("MTXRUN_EDITOR") or os.getenv("TEXMFSTART_EDITOR") or os.getenv("EDITOR") or 'scite' + local rest = input.resolve(instance,filename) + if rest ~= "" then + os.launch(editor .. " " .. rest) + end +end + +function input.runners.save_script_session(filename, list) + local t = { } + for _, key in ipairs(list) do + t[key] = environment.arguments[key] + end + io.savedata(filename,table.serialize(t,true)) +end + +function input.runners.load_script_session(filename) + if lfs.isfile(filename) then + local t = io.loaddata(filename) + if t then + t = loadstring(t) + if t then t = t() end + for key, value in pairs(t) do + environment.arguments[key] = value + end + end + end +end + +input.runners.launchers = { + windows = { }, + unix = { } +} + +function input.launch(str) + -- maybe we also need to test on mtxrun.launcher.suffix environment + -- variable or on windows consult the assoc and ftype vars and such + local launchers = input.runners.launchers[os.platform] if launchers then + local suffix = file.extname(str) if suffix then + local runner = launchers[suffix] if runner then + str = runner .. " " .. str + end + end + end + os.launch(str) +end + +function input.runners.launch_file(instance,filename) + instance.allresults = true + input.verbose = true + local pattern = environment.arguments["pattern"] + if not pattern or pattern == "" then + pattern = filename + end + if not pattern or pattern == "" then + input.report("provide name or --pattern=") + else + local t = input.find_files(instance,pattern) + -- local t = input.aux.find_file(instance,"*/" .. pattern,true) + if t and #t > 0 then + if environment.arguments["all"] then + for _, v in pairs(t) do + input.report("launching", v) + input.launch(v) + end + else + input.report("launching", t[1]) + input.launch(t[1]) + end + else + input.report("no match for", pattern) + end + end +end + +function input.runners.execute_ctx_script(instance,filename,arguments) + local function found(name) + local path = file.dirname(name) + if path and path ~= "" then + return false + else + local fullname = own and own.path and file.join(own.path,name) + return io.exists(fullname) and fullname + end + end + local suffix = "" + if not filename:find("%.lua$") then suffix = ".lua" end + local fullname = filename + -- just + fullname = filename .. suffix + fullname = input.find_file(instance,fullname) + -- mtx- + if not fullname or fullname == "" then + fullname = "mtx-" .. filename .. suffix + fullname = found(fullname) or input.find_file(instance,fullname) + end + -- mtx-s + if not fullname or fullname == "" then + fullname = "mtx-" .. filename .. "s" .. suffix + fullname = found(fullname) or input.find_file(instance,fullname) + end + -- mtx- + if not fullname or fullname == "" then + fullname = "mtx-" .. filename:gsub("s$","") .. suffix + fullname = found(fullname) or input.find_file(instance,fullname) + end + -- that should do it + if fullname and fullname ~= "" then + local state = input.runners.prepare(instance) + if state == 'error' then + return false + elseif state == 'skip' then + return true + elseif state == "run" then + -- load and save ... kind of undocumented + arg = { } for _,v in pairs(arguments) do arg[#arg+1] = v end + environment.initialize_arguments(arg) + local loadname = environment.arguments['load'] + if loadname then + if type(loadname) ~= "string" then loadname = file.basename(fullname) end + loadname = file.replacesuffix(loadname,"cfg") + input.runners.load_script_session(loadname) + end + filename = environment.files[1] + if input.verbose then + input.report("using script: " .. fullname) + end + dofile(fullname) + local savename = environment.arguments['save'] + if savename and input.runners.save_list and not table.is_empty(input.runners.save_list or { }) then + if type(savename) ~= "string" then savename = file.basename(fullname) end + savename = file.replacesuffix(savename,"cfg") + input.runners.save_script_session(savename, input.runners.save_list) + end + return true + end + else + input.verbose = true + input.report("unknown script: " .. filename) + return false + end +end + +input.report(banner,"\n") + +function input.help(banner,message) + if not input.verbose then + input.verbose = true + input.report(banner,"\n") + end + input.reportlines(message) +end + +-- this is a bit dirty ... first we store the first filename and next we +-- split the arguments so that we only see the ones meant for this script +-- ... later we will use the second half + +local filename = environment.files[1] or "" +local ok = true + +local before, after = environment.split_arguments(filename) + +input.runners.my_prepare_b(instance) +before = input.resolve(instance,before) -- experimental here +after = input.resolve(instance,after) -- experimental here + +environment.initialize_arguments(before) + +if environment.argument("selfmerge") then + -- embed used libraries + utils.merger.selfmerge(own.name,own.libs,own.list) +elseif environment.argument("selfclean") then + -- remove embedded libraries + utils.merger.selfclean(own.name) +elseif environment.argument("selfupdate") then + input.verbose = true + input.update_script(instance,own.name,"mtxrun") +elseif environment.argument("ctxlua") or environment.argument("internal") then + -- run a script by loading it (using libs) + ok = input.runners.execute_script(instance,filename,true) +elseif environment.argument("script") then + -- run a script by loading it (using libs), pass args + ok = input.runners.execute_ctx_script(instance,filename,after) +elseif environment.argument("execute") then + -- execute script + ok = input.runners.execute_script(instance,filename) +elseif environment.argument("direct") then + -- equals bin: + ok = input.runners.execute_program(instance,filename) +elseif environment.argument("edit") then + -- edit file + input.runners.edit_script(instance,filename) +elseif environment.argument("launch") then + input.runners.launch_file(instance,filename) +elseif environment.argument("make") then + -- make stubs + input.runners.handle_stubs(instance,true) +elseif environment.argument("remove") then + -- remove stub + input.runners.handle_stubs(instance,false) +elseif environment.argument("resolve") then + -- resolve string + input.runners.resolve_string(instance,filename) +elseif environment.argument("locate") then + -- locate file + input.runners.locate_file(instance,filename) +elseif environment.argument("platform")then + -- locate platform + input.runners.locate_platform(instance) +elseif environment.argument("help") or filename=='help' or filename == "" then + input.help(banner,messages.help) + -- execute script + if filename:find("^bin:") then + ok = input.runners.execute_program(instance,filename) + else + ok = input.runners.execute_script(instance,filename) + end +end + +--~ if input.verbose then +--~ input.report("") +--~ input.report(string.format("runtime: %0.3f seconds",os.runtime())) +--~ end + +--~ if ok then +--~ input.report("exit code: 0") os.exit(0) +--~ else +--~ input.report("exit code: 1") os.exit(1) +--~ end + +if environment.platform == "unix" then + io.write("\n") +end diff --git a/Build/source/texk/texlive/linked_scripts/context/ruby/texmfstart.rb b/Build/source/texk/texlive/linked_scripts/context/ruby/texmfstart.rb new file mode 100755 index 00000000000..e43929213f5 --- /dev/null +++ b/Build/source/texk/texlive/linked_scripts/context/ruby/texmfstart.rb @@ -0,0 +1,2501 @@ +#!/usr/bin/env ruby + +# program : texmfstart +# copyright : PRAGMA Advanced Document Engineering +# version : 1.9.0 - 2003/2006 +# author : Hans Hagen +# +# project : ConTeXt / eXaMpLe +# info : j.hagen@xs4all.nl +# www : www.pragma-pod.com / www.pragma-ade.com + +# no special requirements, i.e. no exa modules/classes used + +# texmfstart [switches] filename [optional arguments] +# +# ruby2exe texmfstart --help -> avoids stub test +# +# Of couse I can make this into a nice class, which i'll undoubtely will +# do when I feel the need. In that case it will be part of a bigger game. + +# turning this into a service would be nice, so some day ... + +# --locate => provides location +# --exec => exec instead of system +# --iftouched=a,b => only if timestamp a<>b +# --ifchanged=a,b => only if checksum changed +# +# file: path: bin: + +# texmfstart --exec bin:scite *.tex + +# we don't depend on other libs + +$: << File.expand_path(File.dirname($0)) ; $: << File.join($:.last,'lib') ; $:.uniq! + +require "rbconfig" +require "md5" + +# funny, selfmergs was suddenly broken to case problems + +# kpse_merge_done: require 'base/kpseremote' +# kpse_merge_done: require 'base/kpsedirect' +# kpse_merge_done: require 'base/kpsefast' +# kpse_merge_done: require 'base/merge' + +# kpse_merge_start + +# kpse_merge_file: 't:/ruby/base/kpsefast.rb' + +# module : base/kpsefast +# copyright : PRAGMA Advanced Document Engineering +# version : 2005 +# author : Hans Hagen +# +# project : ConTeXt / eXaMpLe +# concept : Hans Hagen +# info : j.hagen@xs4all.nl + +# todo: multiple cnf files +# + +class String + + def split_path + if self =~ /\;/o || self =~ /^[a-z]\:/io then + self.split(";") + else + self.split(":") + end + end + +end + +class Array + + def join_path + self.join(File::PATH_SEPARATOR) + end + +end + +class File + + def File.locate_file(path,name) + begin + files = Dir.entries(path) + if files.include?(name) then + fullname = File.join(path,name) + return fullname if FileTest.file?(fullname) + end + files.each do |p| + fullname = File.join(path,p) + if p != '.' and p != '..' and FileTest.directory?(fullname) and result = locate_file(fullname,name) then + return result + end + end + rescue + # bad path + end + return nil + end + + def File.glob_file(pattern) + return Dir.glob(pattern).first + end + +end + +module KpseUtil + + # to be adapted, see loading cnf file + + @@texmftrees = ['texmf-local','texmf.local','../..','texmf'] # '../..' is for gwtex + @@texmfcnf = 'texmf.cnf' + + def KpseUtil::identify + # we mainly need to identify the local tex stuff and wse assume that + # the texmfcnf variable is set; otherwise we need to expand the + # TEXMF variable and that takes time since it may involve more + ownpath = File.expand_path($0) + if ownpath.gsub!(/texmf.*?$/o, '') then + ENV['SELFAUTOPARENT'] = ownpath + else + ENV['SELFAUTOPARENT'] = '.' # fall back + # may be too tricky: + # + # (ENV['PATH'] ||'').split_path.each do |p| + # if p.gsub!(/texmf.*?$/o, '') then + # ENV['SELFAUTOPARENT'] = p + # break + # end + # end + end + filenames = Array.new + if ENV['TEXMFCNF'] && ! ENV['TEXMFCNF'].empty? then + ENV['TEXMFCNF'].to_s.split_path.each do |path| + filenames << File.join(path,@@texmfcnf) + end + elsif ENV['SELFAUTOPARENT'] == '.' then + filenames << File.join('.',@@texmfcnf) + else + @@texmftrees.each do |tree| + filenames << File.join(ENV['SELFAUTOPARENT'],tree,'web2c',@@texmfcnf) + end + end + loop do + busy = false + filenames.collect! do |f| + f.gsub(/\$([a-zA-Z0-9\_\-]+)/o) do + if (! ENV[$1]) || (ENV[$1] == $1) then + "$#{$1}" + else + busy = true + ENV[$1] + end + end + end + break unless busy + end + filenames.delete_if do |f| + ! FileTest.file?(f) + end + return filenames + end + + def KpseUtil::environment + Hash.new.merge(ENV) + end + +end + +class KpseFast + + # formats are an incredible inconsistent mess + + @@suffixes = Hash.new + @@formats = Hash.new + @@suffixmap = Hash.new + + @@texmfcnf = 'texmf.cnf' + + @@suffixes['gf'] = ['.gf'] # todo + @@suffixes['pk'] = ['.pk'] # todo + @@suffixes['tfm'] = ['.tfm'] + @@suffixes['afm'] = ['.afm'] + @@suffixes['base'] = ['.base'] + @@suffixes['bib'] = ['.bib'] + @@suffixes['bst'] = ['.bst'] + @@suffixes['cnf'] = ['.cnf'] + @@suffixes['ls-R'] = ['ls-R', 'ls-r'] + @@suffixes['fmt'] = ['.fmt', '.efmt', '.efm', '.ofmt', '.ofm', '.oft', '.eofmt', '.eoft', '.eof', '.pfmt', '.pfm', '.epfmt', '.epf', '.xpfmt', '.xpf', '.afmt', '.afm'] + @@suffixes['map'] = ['.map'] + @@suffixes['mem'] = ['.mem'] + @@suffixes['mf'] = ['.mf'] + @@suffixes['mfpool'] = ['.pool'] + @@suffixes['mft'] = ['.mft'] + @@suffixes['mp'] = ['.mp'] + @@suffixes['mppool'] = ['.pool'] + @@suffixes['ocp'] = ['.ocp'] + @@suffixes['ofm'] = ['.ofm', '.tfm'] + @@suffixes['opl'] = ['.opl'] + @@suffixes['otp'] = ['.otp'] + @@suffixes['ovf'] = ['.ovf'] + @@suffixes['ovp'] = ['.ovp'] + @@suffixes['graphic/figure'] = ['.eps', '.epsi'] + @@suffixes['tex'] = ['.tex'] + @@suffixes['texpool'] = ['.pool'] + @@suffixes['PostScript header'] = ['.pro'] + @@suffixes['type1 fonts'] = ['.pfa', '.pfb'] + @@suffixes['vf'] = ['.vf'] + @@suffixes['ist'] = ['.ist'] + @@suffixes['truetype fonts'] = ['.ttf', '.ttc'] + @@suffixes['web'] = ['.web', '.ch'] + @@suffixes['cweb'] = ['.w', '.web', '.ch'] + @@suffixes['enc files'] = ['.enc'] + @@suffixes['cmap files'] = ['.cmap'] + @@suffixes['subfont definition files'] = ['.sfd'] + @@suffixes['lig files'] = ['.lig'] + @@suffixes['bitmap font'] = [] + @@suffixes['MetaPost support'] = [] + @@suffixes['TeX system documentation'] = [] + @@suffixes['TeX system sources'] = [] + @@suffixes['Troff fonts'] = [] + @@suffixes['dvips config'] = [] + @@suffixes['type42 fonts'] = [] + @@suffixes['web2c files'] = [] + @@suffixes['other text files'] = [] + @@suffixes['other binary files'] = [] + @@suffixes['misc fonts'] = [] + @@suffixes['opentype fonts'] = [] + @@suffixes['pdftex config'] = [] + @@suffixes['texmfscripts'] = [] + + # replacements + + @@suffixes['fmt'] = ['.fmt'] + @@suffixes['type1 fonts'] = ['.pfa', '.pfb', '.pfm'] + @@suffixes['tex'] = ['.tex', '.xml'] + @@suffixes['texmfscripts'] = ['rb','lua','py','pl'] + + @@suffixes.keys.each do |k| @@suffixes[k].each do |s| @@suffixmap[s] = k end end + + # TTF2TFMINPUTS + # MISCFONTS + # TEXCONFIG + # DVIPDFMINPUTS + # OTFFONTS + + @@formats['gf'] = '' + @@formats['pk'] = '' + @@formats['tfm'] = 'TFMFONTS' + @@formats['afm'] = 'AFMFONTS' + @@formats['base'] = 'MFBASES' + @@formats['bib'] = '' + @@formats['bst'] = '' + @@formats['cnf'] = '' + @@formats['ls-R'] = '' + @@formats['fmt'] = 'TEXFORMATS' + @@formats['map'] = 'TEXFONTMAPS' + @@formats['mem'] = 'MPMEMS' + @@formats['mf'] = 'MFINPUTS' + @@formats['mfpool'] = 'MFPOOL' + @@formats['mft'] = '' + @@formats['mp'] = 'MPINPUTS' + @@formats['mppool'] = 'MPPOOL' + @@formats['ocp'] = 'OCPINPUTS' + @@formats['ofm'] = 'OFMFONTS' + @@formats['opl'] = 'OPLFONTS' + @@formats['otp'] = 'OTPINPUTS' + @@formats['ovf'] = 'OVFFONTS' + @@formats['ovp'] = 'OVPFONTS' + @@formats['graphic/figure'] = '' + @@formats['tex'] = 'TEXINPUTS' + @@formats['texpool'] = 'TEXPOOL' + @@formats['PostScript header'] = 'TEXPSHEADERS' + @@formats['type1 fonts'] = 'T1FONTS' + @@formats['vf'] = 'VFFONTS' + @@formats['ist'] = '' + @@formats['truetype fonts'] = 'TTFONTS' + @@formats['web'] = '' + @@formats['cweb'] = '' + @@formats['enc files'] = 'ENCFONTS' + @@formats['cmap files'] = 'CMAPFONTS' + @@formats['subfont definition files'] = 'SFDFONTS' + @@formats['lig files'] = 'LIGFONTS' + @@formats['bitmap font'] = '' + @@formats['MetaPost support'] = '' + @@formats['TeX system documentation'] = '' + @@formats['TeX system sources'] = '' + @@formats['Troff fonts'] = '' + @@formats['dvips config'] = '' + @@formats['type42 fonts'] = 'T42FONTS' + @@formats['web2c files'] = 'WEB2C' + @@formats['other text files'] = '' + @@formats['other binary files'] = '' + @@formats['misc fonts'] = '' + @@formats['opentype fonts'] = 'OPENTYPEFONTS' + @@formats['pdftex config'] = 'PDFTEXCONFIG' + @@formats['texmfscripts'] = 'TEXMFSCRIPTS' + + attr_accessor :progname, :engine, :format, :rootpath, :treepath, + :verbose, :remember, :scandisk, :diskcache, :renewcache + + @@cacheversion = '1' + + def initialize + @rootpath = '' + @treepath = '' + @progname = 'kpsewhich' + @engine = 'pdftex' + @variables = Hash.new + @expansions = Hash.new + @files = Hash.new + @found = Hash.new + @kpsevars = Hash.new + @lsrfiles = Array.new + @cnffiles = Array.new + @verbose = true + @remember = true + @scandisk = true + @diskcache = true + @renewcache = false + @isolate = false + + @diskcache = false + @cachepath = nil + @cachefile = 'tmftools.log' + + @environment = ENV + end + + def set(key,value) + case key + when 'progname' then @progname = value + when 'engine' then @engine = value + when 'format' then @format = value + end + end + + def push_environment(env) + @environment = env + end + + # {$SELFAUTOLOC,$SELFAUTODIR,$SELFAUTOPARENT}{,{/share,}/texmf{-local,}/web2c} + # + # $SELFAUTOLOC : /usr/tex/bin/platform + # $SELFAUTODIR : /usr/tex/bin + # $SELFAUTOPARENT : /usr/tex + # + # since we live in scriptpath we need a slightly different method + + def load_cnf(filenames=nil) + unless filenames then + ownpath = File.expand_path($0) + if ownpath.gsub!(/texmf.*?$/o, '') then + @environment['SELFAUTOPARENT'] = ownpath + else + @environment['SELFAUTOPARENT'] = '.' + end + unless @treepath.empty? then + unless @rootpath.empty? then + @treepath = @treepath.split(',').collect do |p| File.join(@rootpath,p) end.join(',') + end + @environment['TEXMF'] = @treepath + # only the first one + @environment['TEXMFCNF'] = File.join(@treepath.split(',').first,'texmf/web2c') + end + unless @rootpath.empty? then + @environment['TEXMFCNF'] = File.join(@rootpath,'texmf/web2c') + @environment['SELFAUTOPARENT'] = @rootpath + @isolate = true + end + filenames = Array.new + if @environment['TEXMFCNF'] and not @environment['TEXMFCNF'].empty? then + @environment['TEXMFCNF'].to_s.split_path.each do |path| + filenames << File.join(path,@@texmfcnf) + end + elsif @environment['SELFAUTOPARENT'] == '.' then + filenames << File.join('.',@@texmfcnf) + else + ['texmf-local','texmf'].each do |tree| + filenames << File.join(@environment['SELFAUTOPARENT'],tree,'web2c',@@texmfcnf) + end + end + end + # /texmf/web2c/texmf.cnf + filenames = _expanded_path_(filenames) + @rootpath = filenames.first + 3.times do + @rootpath = File.dirname(@rootpath) + end + filenames.collect! do |f| + f.gsub("\\", '/') + end + filenames.each do |fname| + if FileTest.file?(fname) and f = File.open(fname) then + @cnffiles << fname + while line = f.gets do + loop do + # concatenate lines ending with \ + break unless line.sub!(/\\\s*$/o) do + f.gets || '' + end + end + case line + when /^[\%\#]/o then + # comment + when /^\s*(.*?)\s*\=\s*(.*?)\s*$/o then + key, value = $1, $2 + unless @variables.key?(key) then + value.sub!(/\%.*$/,'') + value.sub!(/\~/, "$HOME") + @variables[key] = value + end + @kpsevars[key] = true + end + end + f.close + end + end + end + + def load_lsr + @lsrfiles = [] + simplified_list(expansion('TEXMF')).each do |p| + ['ls-R','ls-r'].each do |f| + filename = File.join(p,f) + if FileTest.file?(filename) then + @lsrfiles << [filename,File.size(filename)] + break + end + end + end + @files = Hash.new + if @diskcache then + ['HOME','TEMP','TMP','TMPDIR'].each do |key| + if @environment[key] then + if FileTest.directory?(@environment[key]) then + @cachepath = @environment[key] + @cachefile = [@rootpath.gsub(/[^A-Z0-9]/io, '-').gsub(/\-+/,'-'),File.basename(@cachefile)].join('-') + break + end + end + end + if @cachepath and not @renewcache and FileTest.file?(File.join(@cachepath,@cachefile)) then + begin + if f = File.open(File.join(@cachepath,@cachefile)) then + cacheversion = Marshal.load(f) + if cacheversion == @@cacheversion then + lsrfiles = Marshal.load(f) + if lsrfiles == @lsrfiles then + @files = Marshal.load(f) + end + end + f.close + end + rescue + @files = Hash.new + end + end + end + return if @files.size > 0 + @lsrfiles.each do |filedata| + filename, filesize = filedata + filepath = File.dirname(filename) + begin + path = '.' + data = IO.readlines(filename) + if data[0].chomp =~ /% ls\-R \-\- filename database for kpathsea\; do not change this line\./io then + data.each do |line| + case line + when /^[a-zA-Z0-9]/o then + line.chomp! + if @files[line] then + @files[line] << path + else + @files[line] = [path] + end + when /^\.\/(.*?)\:$/o then + path = File.join(filepath,$1) + end + end + end + rescue + # sorry + end + end + if @diskcache and @cachepath and f = File.open(File.join(@cachepath,@cachefile),'wb') then + f << Marshal.dump(@@cacheversion) + f << Marshal.dump(@lsrfiles) + f << Marshal.dump(@files) + f.close + end + end + + def expand_variables + @expansions = Hash.new + if @isolate then + @variables['TEXMFCNF'] = @environment['TEXMFCNF'].dup + @variables['SELFAUTOPARENT'] = @environment['SELFAUTOPARENT'].dup + else + @environment.keys.each do |e| + if e =~ /^([a-zA-Z]+)\_(.*)\s*$/o then + @expansions["#{$1}.#{$2}"] = (@environment[e] ||'').dup + else + @expansions[e] = (@environment[e] ||'').dup + end + end + end + @variables.keys.each do |k| + @expansions[k] = @variables[k].dup unless @expansions[k] + end + loop do + busy = false + @expansions.keys.each do |k| + @expansions[k].gsub!(/\$([a-zA-Z0-9\_\-]*)/o) do + busy = true + @expansions[$1] || '' + end + @expansions[k].gsub!(/\$\{([a-zA-Z0-9\_\-]*)\}/o) do + busy = true + @expansions[$1] || '' + end + end + break unless busy + end + @expansions.keys.each do |k| + @expansions[k] = @expansions[k].gsub("\\", '/') + end + end + + def variable(name='') + (name and not name.empty? and @variables[name.sub('$','')]) or '' + end + + def expansion(name='') + (name and not name.empty? and @expansions[name.sub('$','')]) or '' + end + + def variable?(name='') + name and not name.empty? and @variables.key?(name.sub('$','')) + end + + def expansion?(name='') + name and not name.empty? and @expansions.key?(name.sub('$','')) + end + + def simplified_list(str) + lst = str.gsub(/^\{/o,'').gsub(/\}$/o,'').split(",") + lst.collect do |l| + l.sub(/^[\!]*/,'').sub(/[\/\\]*$/o,'') + end + end + + def original_variable(variable) + if variable?("#{@progname}.#{variable}") then + variable("#{@progname}.#{variable}") + elsif variable?(variable) then + variable(variable) + else + '' + end + end + + def expanded_variable(variable) + if expansion?("#{variable}.#{@progname}") then + expansion("#{variable}.#{@progname}") + elsif expansion?(variable) then + expansion(variable) + else + '' + end + end + + def original_path(filename='') + _expanded_path_(original_variable(var_of_format_or_suffix(filename)).split(";")) + end + + def expanded_path(filename='') + _expanded_path_(expanded_variable(var_of_format_or_suffix(filename)).split(";")) + end + + def _expanded_path_(pathlist) + i, n = 0, 0 + pathlist.collect! do |mainpath| + mainpath.gsub(/([\{\}])/o) do + if $1 == "{" then + i += 1 ; n = i if i > n ; "<#{i}>" + else + i -= 1 ; "" + end + end + end + n.times do |i| + loop do + more = false + newlist = [] + pathlist.each do |path| + unless path.sub!(/^(.*?)<(#{n-i})>(.*?)<\/\2>(.*?)$/) do + pre, mid, post = $1, $3, $4 + mid.gsub!(/\,$/,',.') + mid.split(',').each do |m| + more = true + if m == '.' then + newlist << "#{pre}#{post}" + else + newlist << "#{pre}#{m}#{post}" + end + end + end then + newlist << path + end + end + if more then + pathlist = [newlist].flatten # copy -) + else + break + end + end + end + pathlist = pathlist.uniq.collect do |path| + p = path + # p.gsub(/^\/+/o) do '' end + # p.gsub!(/(.)\/\/(.)/o) do "#{$1}/#{$2}" end + # p.gsub!(/\/\/+$/o) do '//' end + p.gsub!(/\/\/+/o) do '//' end + p + end + pathlist + end + + # todo: ignore case + + def var_of_format(str) + @@formats[str] || '' + end + + def var_of_suffix(str) # includes . + if @@suffixmap.key?(str) then @@formats[@@suffixmap[str]] else '' end + end + + def var_of_format_or_suffix(str) + if @@formats.key?(str) then + @@formats[str] + elsif @@suffixmap.key?(File.extname(str)) then # extname includes . + @@formats[@@suffixmap[File.extname(str)]] # extname includes . + else + '' + end + end + +end + +class KpseFast + + # test things + + def list_variables(kpseonly=true) + @variables.keys.sort.each do |k| + if kpseonly then + puts("#{k} = #{@variables[k]}") if @kpsevars[k] + else + puts("#{if @kpsevars[k] then 'K' else 'E' end} #{k} = #{@variables[k]}") + end + end + end + + def list_expansions(kpseonly=true) + @expansions.keys.sort.each do |k| + if kpseonly then + puts("#{k} = #{@expansions[k]}") if @kpsevars[k] + else + puts("#{if @kpsevars[k] then 'K' else 'E' end} #{k} = #{@expansions[k]}") + end + end + end + + def list_lsr + puts("files = #{@files.size}") + end + + def set_test_patterns + @variables["KPSE_TEST_PATTERN_A"] = "foo/{1,2}/bar//" + @variables["KPSE_TEST_PATTERN_B"] = "!!x{A,B{1,2}}y" + @variables["KPSE_TEST_PATTERN_C"] = "x{A,B//{1,2}}y" + @variables["KPSE_TEST_PATTERN_D"] = "x{A,B//{1,2,}}//y" + end + + def show_test_patterns + ['A','B','D'].each do |i| + puts "" + puts @variables ["KPSE_TEST_PATTERN_#{i}"] + puts "" + puts expand_path("KPSE_TEST_PATTERN_#{i}").split_path + puts "" + end + end + +end + +class KpseFast + + # kpse stuff + + def expand_braces(str) # output variable and brace expansion of STRING. + _expanded_path_(original_variable(str).split_path).join_path + end + + def expand_path(str) # output complete path expansion of STRING. + _expanded_path_(expanded_variable(str).split_path).join_path + end + + def expand_var(str) # output variable expansion of STRING. + expanded_variable(str) + end + + def show_path(str) # output search path for file type NAME + expanded_path(str).join_path + end + + def var_value(str) # output the value of variable $STRING. + original_variable(str) + end + +end + +class KpseFast + + def _is_cnf_?(filename) + filename == File.basename((@cnffiles.first rescue @@texmfcnf) || @@texmfcnf) + end + + def find_file(filename) + if _is_cnf_?(filename) then + @cnffiles.first rescue '' + else + [find_files(filename,true)].flatten.first || '' + end + end + + def find_files(filename,first=false) + if _is_cnf_?(filename) then + result = @cnffiles.dup + else + if @remember then + # stamp = "#{filename}--#{@format}--#{@engine}--#{@progname}" + stamp = "#{filename}--#{@engine}--#{@progname}" + return @found[stamp] if @found.key?(stamp) + end + pathlist = expanded_path(filename) + result = [] + filelist = if @files.key?(filename) then @files[filename].uniq else nil end + done = false + if pathlist.size == 0 then + if FileTest.file?(filename) then + done = true + result << '.' + end + else + pathlist.each do |path| + doscan = if path =~ /^\!\!/o then false else true end + recurse = if path =~ /\/\/$/o then true else false end + pathname = path.dup + pathname.gsub!(/^\!+/o, '') + done = false + if not done and filelist then + # checking for exact match + if filelist.include?(pathname) then + result << pathname + done = true + end + if not done and recurse then + # checking for fuzzy // + pathname.gsub!(/\/+$/o, '/.*') + # pathname.gsub!(/\/\//o,'/[\/]*/') + pathname.gsub!(/\/\//o,'/.*?/') + re = /^#{pathname}/ + filelist.each do |f| + if re =~ f then + result << f # duplicates will be filtered later + done = true + end + break if done + end + end + end + if not done and doscan then + # checking for path itself + pname = pathname.sub(/\.\*$/,'') + if not pname =~ /\*/o and FileTest.file?(File.join(pname,filename)) then + result << pname + done = true + end + end + break if done and first + end + end + if not done and @scandisk then + pathlist.each do |path| + pathname = path.dup + unless pathname.gsub!(/^\!+/o, '') then # !! prevents scan + recurse = pathname.gsub!(/\/+$/o, '') + complex = pathname.gsub!(/\/\//o,'/*/') + if recurse then + if complex then + if ok = File.glob_file("#{pathname}/**/#{filename}") then + result << File.dirname(ok) + done = true + end + elsif ok = File.locate_file(pathname,filename) then + result << File.dirname(ok) + done = true + end + elsif complex then + if ok = File.glob_file("#{pathname}/#{filename}") then + result << File.dirname(ok) + done = true + end + elsif FileTest.file?(File.join(pathname,filename)) then + result << pathname + done = true + end + break if done and first + end + end + end + result = result.uniq.collect do |pathname| + File.join(pathname,filename) + end + @found[stamp] = result if @remember + end + return result # redundant + end + +end + +class KpseFast + + class FileData + attr_accessor :tag, :name, :size, :date + def initialize(tag=0,name=nil,size=nil,date=nil) + @tag, @name, @size, @date = tag, name, size, date + end + def FileData.sizes(a) + a.collect do |aa| + aa.size + end + end + def report + case @tag + when 1 then "deleted | #{@size.to_s.rjust(8)} | #{@date.strftime('%m/%d/%Y %I:%M')} | #{@name}" + when 2 then "present | #{@size.to_s.rjust(8)} | #{@date.strftime('%m/%d/%Y %I:%M')} | #{@name}" + when 3 then "obsolete | #{' '*8} | #{' '*16} | #{@name}" + end + end + end + + def analyze_files(filter='',strict=false,sort='',delete=false) + puts("command line = #{ARGV.join(' ')}") + puts("number of files = #{@files.size}") + puts("filter pattern = #{filter}") + puts("loaded cnf files = #{@cnffiles.join(' ')}") + puts('') + if filter.gsub!(/^not:/,'') then + def the_same(filter,filename) + not filter or filter.empty? or /#{filter}/ !~ filename + end + else + def the_same(filter,filename) + not filter or filter.empty? or /#{filter}/ =~ filename + end + end + @files.keys.each do |name| + if @files[name].size > 1 then + data = Array.new + @files[name].each do |path| + filename = File.join(path,name) + # if not filter or filter.empty? or /#{filter}/ =~ filename then + if the_same(filter,filename) then + if FileTest.file?(filename) then + if delete then + data << FileData.new(1,filename,File.size(filename),File.mtime(filename)) + begin + File.delete(filename) if delete + rescue + end + else + data << FileData.new(2,filename,File.size(filename),File.mtime(filename)) + end + else + # data << FileData.new(3,filename) + end + end + end + if data.length > 1 then + if strict then + # if data.collect do |d| d.size end.uniq! then + # data.sort! do |a,b| b.size <=> a.size end + # data.each do |d| puts d.report end + # puts '' + # end + data.sort! do |a,b| + if a.size and b.size then + b.size <=> a.size + else + 0 + end + end + bunch = Array.new + done = false + data.each do |d| + if bunch.size == 0 then + bunch << d + elsif bunch[0].size == d.size then + bunch << d + else + if bunch.size > 1 then + bunch.each do |b| + puts b.report + end + done = true + end + bunch = [d] + end + end + puts '' if done + else + case sort + when 'size' then data.sort! do |a,b| a.size <=> b.size end + when 'revsize' then data.sort! do |a,b| b.size <=> a.size end + when 'date' then data.sort! do |a,b| a.date <=> b.date end + when 'revdate' then data.sort! do |a,b| b.date <=> a.date end + end + data.each do |d| puts d.report end + puts '' + end + end + end + end + end + +end + + + # k = KpseFast.new # (root) + # k.set_test_patterns + # k.load_cnf + # k.expand_variables + # k.load_lsr + + # k.show_test_patterns + + # puts k.list_variables + # puts k.list_expansions + # k.list_lsr + # puts k.expansion("$TEXMF") + # puts k.expanded_path("TEXINPUTS","context") + + # k.progname, k.engine, k.format = 'context', 'pdftex', 'tfm' + # k.scandisk = false # == must_exist + # k.expand_variables + + # 10.times do |i| puts k.find_file('texnansi-lmr10.tfm') end + + # puts "expand braces $TEXMF" + # puts k.expand_braces("$TEXMF") + # puts "expand path $TEXMF" + # puts k.expand_path("$TEXMF") + # puts "expand var $TEXMF" + # puts k.expand_var("$TEXMF") + # puts "expand path $TEXMF" + # puts k.show_path('tfm') + # puts "expand value $TEXINPUTS" + # puts k.var_value("$TEXINPUTS") + # puts "expand value $TEXINPUTS.context" + # puts k.var_value("$TEXINPUTS.context") + + # exit + + + +# kpse_merge_file: 't:/ruby/base/kpse/trees.rb' + +require 'monitor' +# kpse_merge_done: require 'base/kpsefast' + +class KpseTrees < Monitor + + def initialize + @trees = Hash.new + end + + def pattern(filenames) + filenames.join('|').gsub(/\\+/o,'/').downcase + end + + def choose(filenames,environment) + current = pattern(filenames) + load(filenames,environment) unless @trees[current] + puts "enabling tree #{current}" + current + end + + def fetch(filenames,environment) # will send whole object ! + current = pattern(filenames) + load(filenames,environment) unless @trees[current] + puts "fetching tree #{current}" + @trees[current] + end + + def load(filenames,environment) + current = pattern(filenames) + puts "loading tree #{current}" + @trees[current] = KpseFast.new + @trees[current].push_environment(environment) + @trees[current].load_cnf(filenames) + @trees[current].expand_variables + @trees[current].load_lsr + end + + def set(tree,key,value) + case key + when 'progname' then @trees[tree].progname = value + when 'engine' then @trees[tree].engine = value + when 'format' then @trees[tree].format = value + end + end + def get(tree,key) + case key + when 'progname' then @trees[tree].progname + when 'engine' then @trees[tree].engine + when 'format' then @trees[tree].format + end + end + + def load_cnf(tree) + @trees[tree].load_cnf + end + def load_lsr(tree) + @trees[tree].load_lsr + end + def expand_variables(tree) + @trees[tree].expand_variables + end + def expand_braces(tree,str) + @trees[tree].expand_braces(str) + end + def expand_path(tree,str) + @trees[tree].expand_path(str) + end + def expand_var(tree,str) + @trees[tree].expand_var(str) + end + def show_path(tree,str) + @trees[tree].show_path(str) + end + def var_value(tree,str) + @trees[tree].var_value(str) + end + def find_file(tree,filename) + @trees[tree].find_file(filename) + end + def find_files(tree,filename,first) + @trees[tree].find_files(filename,first) + end + +end + + +# kpse_merge_file: 't:/ruby/base/kpse/drb.rb' + +require 'drb' +# kpse_merge_done: require 'base/kpse/trees' + +class KpseServer + + attr_accessor :port + + def initialize(port=7000) + @port = port + end + + def start + puts "starting drb service at port #{@port}" + DRb.start_service("druby://localhost:#{@port}", KpseTrees.new) + trap(:INT) do + DRb.stop_service + end + DRb.thread.join + end + + def stop + # todo + end + +end + +class KpseClient + + attr_accessor :port + + def initialize(port=7000) + @port = port + @kpse = nil + end + + def start + # only needed when callbacks are used / slow, due to Socket::getaddrinfo + # DRb.start_service + end + + def object + @kpse = DRbObject.new(nil,"druby://localhost:#{@port}") + end + +end + + +# SERVER_URI="druby://localhost:8787" +# +# # Start a local DRbServer to handle callbacks. +# # +# # Not necessary for this small example, but will be required +# # as soon as we pass a non-marshallable object as an argument +# # to a dRuby call. +# DRb.start_service +# + + +# kpse_merge_file: 't:/ruby/base/kpseremote.rb' + +# kpse_merge_done: require 'base/kpsefast' + +case ENV['KPSEMETHOD'] + when /soap/o then # kpse_merge_done: require 'base/kpse/soap' + when /drb/o then # kpse_merge_done: require 'base/kpse/drb' + else # kpse_merge_done: require 'base/kpse/drb' +end + +class KpseRemote + + @@port = ENV['KPSEPORT'] || 7000 + @@method = ENV['KPSEMETHOD'] || 'drb' + + def KpseRemote::available? + @@method && @@port + end + + def KpseRemote::start_server(port=nil) + kpse = KpseServer.new(port || @@port) + kpse.start + end + + def KpseRemote::start_client(port=nil) # keeps object in server + kpseclient = KpseClient.new(port || @@port) + kpseclient.start + kpse = kpseclient.object + tree = kpse.choose(KpseUtil::identify, KpseUtil::environment) + [kpse, tree] + end + + def KpseRemote::fetch(port=nil) # no need for defining methods but slower, send whole object + kpseclient = KpseClient.new(port || @@port) + kpseclient.start + kpseclient.object.fetch(KpseUtil::identify, KpseUtil::environment) rescue nil + end + + def initialize(port=nil) + if KpseRemote::available? then + begin + @kpse, @tree = KpseRemote::start_client(port) + rescue + @kpse, @tree = nil, nil + end + else + @kpse, @tree = nil, nil + end + end + + def progname=(value) + @kpse.set(@tree,'progname',value) + end + def format=(value) + @kpse.set(@tree,'format',value) + end + def engine=(value) + @kpse.set(@tree,'engine',value) + end + + def progname + @kpse.get(@tree,'progname') + end + def format + @kpse.get(@tree,'format') + end + def engine + @kpse.get(@tree,'engine') + end + + def load + @kpse.load(KpseUtil::identify, KpseUtil::environment) + end + def okay? + @kpse && @tree + end + def set(key,value) + @kpse.set(@tree,key,value) + end + def load_cnf + @kpse.load_cnf(@tree) + end + def load_lsr + @kpse.load_lsr(@tree) + end + def expand_variables + @kpse.expand_variables(@tree) + end + def expand_braces(str) + clean_name(@kpse.expand_braces(@tree,str)) + end + def expand_path(str) + clean_name(@kpse.expand_path(@tree,str)) + end + def expand_var(str) + clean_name(@kpse.expand_var(@tree,str)) + end + def show_path(str) + clean_name(@kpse.show_path(@tree,str)) + end + def var_value(str) + clean_name(@kpse.var_value(@tree,str)) + end + def find_file(filename) + clean_name(@kpse.find_file(@tree,filename)) + end + def find_files(filename,first=false) + # dodo: each filename + @kpse.find_files(@tree,filename,first) + end + + private + + def clean_name(str) + str.gsub(/\\/,'/') + end + +end + + +# kpse_merge_file: 't:/ruby/base/kpsedirect.rb' + +class KpseDirect + + attr_accessor :progname, :format, :engine + + def initialize + @progname, @format, @engine = '', '', '' + end + + def expand_path(str) + clean_name(`kpsewhich -expand-path=#{str}`.chomp) + end + + def expand_var(str) + clean_name(`kpsewhich -expand-var=#{str}`.chomp) + end + + def find_file(str) + clean_name(`kpsewhich #{_progname_} #{_format_} #{str}`.chomp) + end + + def _progname_ + if @progname.empty? then '' else "-progname=#{@progname}" end + end + def _format_ + if @format.empty? then '' else "-format=\"#{@format}\"" end + end + + private + + def clean_name(str) + str.gsub(/\\/,'/') + end + +end + + +# kpse_merge_stop + + + +$mswindows = Config::CONFIG['host_os'] =~ /mswin/ +$separator = File::PATH_SEPARATOR +$version = "2.0.3" +$ownpath = File.dirname($0) + +if $mswindows then + require "win32ole" + require "Win32API" +end + +# exit if defined?(REQUIRE2LIB) + +$stdout.sync = true +$stderr.sync = true + +$applications = Hash.new +$suffixinputs = Hash.new +$predefined = Hash.new +$runners = Hash.new + +$suffixinputs['pl'] = 'PERLINPUTS' +$suffixinputs['rb'] = 'RUBYINPUTS' +$suffixinputs['py'] = 'PYTHONINPUTS' +$suffixinputs['lua'] = 'LUAINPUTS' +$suffixinputs['jar'] = 'JAVAINPUTS' +$suffixinputs['pdf'] = 'PDFINPUTS' + +$predefined['texexec'] = 'texexec.rb' +$predefined['texutil'] = 'texutil.rb' +$predefined['texfont'] = 'texfont.pl' +$predefined['texshow'] = 'texshow.pl' + +$predefined['makempy'] = 'makempy.pl' +$predefined['mptopdf'] = 'mptopdf.pl' +$predefined['pstopdf'] = 'pstopdf.rb' + +$predefined['examplex'] = 'examplex.rb' +$predefined['concheck'] = 'concheck.rb' + +$predefined['runtools'] = 'runtools.rb' +$predefined['textools'] = 'textools.rb' +$predefined['tmftools'] = 'tmftools.rb' +$predefined['ctxtools'] = 'ctxtools.rb' +$predefined['rlxtools'] = 'rlxtools.rb' +$predefined['pdftools'] = 'pdftools.rb' +$predefined['mpstools'] = 'mpstools.rb' +$predefined['exatools'] = 'exatools.rb' +$predefined['xmltools'] = 'xmltools.rb' +# $predefined['luatools'] = 'luatools.lua' +# $predefined['mtxtools'] = 'mtxtools.rb' + +$predefined['newpstopdf'] = 'pstopdf.rb' +$predefined['newtexexec'] = 'texexec.rb' +$predefined['pdftrimwhite'] = 'pdftrimwhite.pl' + +$makelist = [ + # context + 'texexec', + 'texutil', + 'texfont', + # mp/ps + 'pstopdf', + 'mptopdf', + 'makempy', + # misc + 'ctxtools', + 'pdftools', + 'xmltools', + 'textools', + 'mpstools', + 'tmftools', + 'exatools', + 'runtools', + 'rlxtools', + 'pdftrimwhite', + 'texfind', + 'texshow' + # + # no 'luatools', + # no 'mtxtools', + # no, 'texmfstart' +] + +$scriptlist = 'rb|pl|py|lua|jar' +$documentlist = 'pdf|ps|eps|htm|html' + +$editor = ENV['TEXMFSTART_EDITOR'] || ENV['EDITOR'] || ENV['editor'] || 'scite' + +$crossover = true # to other tex tools, else only local +$kpse = nil + +def set_applications(page=1) + + $applications['unknown'] = '' + $applications['ruby'] = $applications['rb'] = 'ruby' + $applications['lua'] = $applications['lua'] = 'lua' + $applications['perl'] = $applications['pl'] = 'perl' + $applications['python'] = $applications['py'] = 'python' + $applications['java'] = $applications['jar'] = 'java' + + if $mswindows then + $applications['pdf'] = ['',"pdfopen --page #{page} --file",'acroread'] + $applications['html'] = ['','netscape','mozilla','opera','iexplore'] + $applications['ps'] = ['','gview32','gv','gswin32','gs'] + else + $applications['pdf'] = ["pdfopen --page #{page} --file",'acroread'] + $applications['html'] = ['netscape','mozilla','opera'] + $applications['ps'] = ['gview','gv','gs'] + end + + $applications['htm'] = $applications['html'] + $applications['eps'] = $applications['ps'] + + $runners['lua'] = "texlua" + +end + +set_applications() + +def check_kpse + if $kpse then + # already done + else + begin + if KpseRemote::available? then + $kpse = KpseRemote.new + if $kpse.okay? then + puts("kpse : remote") if $verbose + else + $kpse = KpseDirect.new + puts("kpse : direct (forced)") if $verbose + end + else + $kpse = KpseDirect.new + puts("kpse : direct") if $verbose + end + rescue + puts("kpse : direct (fallback)") if $verbose + end + end +end + +if $mswindows then + + GetShortPathName = Win32API.new('kernel32', 'GetShortPathName', ['P','P','N'], 'N') + GetLongPathName = Win32API.new('kernel32', 'GetLongPathName', ['P','P','N'], 'N') + + def dowith_pathname (filename,filemethod) + filename = filename.gsub(/\\/o,'/') # no gsub! because filename can be frozen + case filename + when /\;/o then + # could be a path spec + return filename + when /\s+/o then + # danger lurking + buffer = ' ' * 260 + length = filemethod.call(filename,buffer,buffer.size) + if length>0 then + return buffer.slice(0..length-1) + else + # when the path or file does not exist, nothing is returned + # so we try to handle the path separately from the basename + basename = File.basename(filename) + pathname = File.dirname(filename) + length = filemethod.call(pathname,buffer,260) + if length>0 then + return buffer.slice(0..length-1) + '/' + basename + else + return filename + end + end + else + # no danger + return filename + end + end + + def longpathname (filename) + dowith_pathname(filename,GetLongPathName) + end + + def shortpathname (filename) + dowith_pathname(filename,GetShortPathName) + end + +else + + def longpathname (filename) + filename + end + + def shortpathname (filename) + filename + end + +end + +class File + + # def File.needsupdate(oldname,newname) + # begin + # if $mswindows then + # return File.stat(oldname).mtime > File.stat(newname).mtime + # else + # return File.stat(oldname).mtime != File.stat(newname).mtime + # end + # rescue + # return true + # end + # end + + @@update_eps = 1 + + def File.needsupdate(oldname,newname) + begin + oldtime = File.stat(oldname).mtime.to_i + newtime = File.stat(newname).mtime.to_i + if newtime >= oldtime then + return false + elsif oldtime-newtime < @@update_eps then + return false + else + return true + end + rescue + return true + end + end + + def File.syncmtimes(oldname,newname) + return + begin + if $mswindows then + # does not work (yet) / gives future timestamp + # t = File.mtime(oldname) # i'm not sure if the time is frozen, so we do it here + # File.utime(0,t,oldname,newname) + else + t = File.mtime(oldname) # i'm not sure if the time is frozen, so we do it here + File.utime(0,t,oldname,newname) + end + rescue + end + end + + def File.timestamp(name) + begin + "#{File.stat(name).mtime}" + rescue + return 'unknown' + end + end + +end + +# def hashed (arr=[]) + # arg = if arr.class == String then arr.split(' ') else arr.dup end + # hsh = Hash.new + # if arg.length > 0 + # hsh['arguments'] = '' + # done = false + # arg.each do |s| + # if done then + # if s =~ / / then + # hsh['arguments'] += " \"#{s}\"" # maybe split on = + # else + # hsh['arguments'] += " #{s}" + # end + # else + # kvl = s.split('=') + # if kvl[0].sub!(/^\-+/,'') then + # hsh[kvl[0]] = if kvl.length > 1 then kvl[1] else true end + # else + # hsh['file'] = s + # done = true + # end + # end + # end + # end + # return hsh +# end + +def hashed (arr=[]) + arg = if arr.class == String then arr.split(' ') else arr.dup end + hsh = Hash.new + if arg.length > 0 + hsh['arguments'] = '' + done = false + arg.each do |s| + if done then + if s =~ /\s/ then + kvl = s.split('=') + if kvl[1] and kvl[1] !~ /^[\"\']/ then + hsh['arguments'] += ' ' + kvl[0] + "=" + '"' + kvl[1] + '"' + elsif s =~ /\s/ then + hsh['arguments'] += ' "' + s + '"' + else + hsh['arguments'] += ' ' + s + end + else + hsh['arguments'] += ' ' + s + end + else + kvl = s.split('=') + if kvl[0].sub!(/^\-+/,'') then + hsh[kvl[0]] = if kvl.length > 1 then kvl[1] else true end + else + hsh['file'] = s + done = true + end + end + end + end + return hsh +end + + +def launch(filename) + if $browser && $mswindows then + filename = filename.gsub(/\.[\/\\]/) do + Dir.getwd + '/' + end + report("launching #{filename}") + ie = WIN32OLE.new("InternetExplorer.Application") + ie.visible = true + ie.navigate(filename) + return true + else + return false + end +end + +# env|environment +# rel|relative +# loc|locate|kpse|path|file + +def quoted(str) + if str =~ /^\"/ then + return str + elsif str =~ / / then + return "\"#{str}\"" + else + return str + end +end + +def expanded(arg) # no "other text files", too restricted + arg.gsub(/(env|environment)\:([a-zA-Z\-\_\.0-9]+)/o) do + method, original, resolved = $1, $2, '' + if resolved = ENV[original] then + report("environment variable #{original} expands to #{resolved}") unless $report + quoted(resolved) + else + report("environment variable #{original} cannot be resolved") unless $report + quoted(original) + end + end . gsub(/(rel|relative)\:([a-zA-Z\-\_\.0-9]+)/o) do + method, original, resolved = $1, $2, '' + ['.','..','../..'].each do |r| + if FileTest.file?(File.join(r,original)) then + resolved = File.join(r,original) + break + end + end + if resolved.empty? then + quoted(original) + else + quoted(resolved) + end + end . gsub(/(kpse|loc|locate|file|path)\:([a-zA-Z\-\_\.0-9]+)/o) do + method, original, resolved = $1, $2, '' + if $program && ! $program.empty? then + # pstrings = ["-progname=#{$program}"] + pstrings = [$program] + else + # pstrings = ['','-progname=context'] + pstrings = ['','context'] + end + # auto suffix with texinputs as fall back + if ENV["_CTX_K_V_#{original}_"] then + resolved = ENV["_CTX_K_V_#{original}_"] + report("environment provides #{original} as #{resolved}") unless $report + quoted(resolved) + else + check_kpse + pstrings.each do |pstr| + if resolved.empty? then + # command = "kpsewhich #{pstr} #{original}" + # report("running #{command}") + report("locating '#{original}' in program space '#{pstr}'") + begin + # resolved = `#{command}`.chomp + $kpse.progname = pstr + $kpse.format = '' + resolved = $kpse.find_file(original).gsub(/\\/,'/') + rescue + resolved = '' + end + end + # elsewhere in the tree + if resolved.empty? then + # command = "kpsewhich #{pstr} -format=\"other text files\" #{original}" + # report("running #{command}") + report("locating '#{original}' in program space '#{pstr}' using format 'other text files'") + begin + # resolved = `#{command}`.chomp + $kpse.progname = pstr + $kpse.format = 'other text files' + resolved = $kpse.find_file(original).gsub(/\\/,'/') + rescue + resolved = '' + end + end + end + if resolved.empty? then + original = File.dirname(original) if method =~ /path/ + report("#{original} is not resolved") unless $report + ENV["_CTX_K_V_#{original}_"] = original if $crossover + quoted(original) + else + resolved = File.dirname(resolved) if method =~ /path/ + report("#{original} is resolved to #{resolved}") unless $report + ENV["_CTX_K_V_#{original}_"] = resolved if $crossover + quoted(resolved) + end + end + end +end + +def changeddir?(path) + if path.empty? then + return true + else + oldpath = File.expand_path(path) + begin + Dir.chdir(path) if not path.empty? + rescue + report("unable to change to directory: #{path}") + else + report("changed to directory: #{path}") + end + newpath = File.expand_path(Dir.getwd) + return oldpath == newpath + end +end + +def runcommand(command) + if $locate then + command = command.split(' ').collect do |c| + if c =~ /\//o then + begin + cc = File.expand_path(c) + c = cc if FileTest.file?(cc) + rescue + end + end + c + end . join(' ') + print command # to stdout and no newline + elsif $execute then + report("using 'exec' instead of 'system' call: #{command}") + exec(command) if changeddir?($path) + else + report("using 'system' call: #{command}") + system(command) if changeddir?($path) + end +end + +def join_command(args) + args[0] = $runners[args[0]] || args[0] + [args].join(' ') +end + +def runoneof(application,fullname,browserpermitted) + if browserpermitted && launch(fullname) then + return true + else + fullname = quoted(fullname) # added because MM ran into problems + report("starting #{$filename}") unless $report + output("\n") if $report && $verbose + applications = $applications[application.downcase] + if ! applications then + output("problems with determining application type") + return true + elsif applications.class == Array then + if $report then + output(join_command([fullname,expanded($arguments)])) + return true + else + applications.each do |a| + return true if runcommand(join_command([a,fullname,expanded($arguments)])) + end + end + elsif applications.empty? then + if $report then + output(join_command([fullname,expanded($arguments)])) + return true + else + return runcommand(join_command([fullname,expanded($arguments)])) + end + else + if $report then + output(join_command([applications,fullname,expanded($arguments)])) + return true + else + return runcommand(join_command([applications,fullname,expanded($arguments)])) + end + end + return false + end +end + +def report(str) + $stdout.puts(str) if $verbose +end + +def output(str) + $stdout.puts(str) +end + +def usage + print "version : #{$version} - 2003/2006 - www.pragma-ade.com\n" + print("\n") + print("usage : texmfstart [switches] filename [optional arguments]\n") + print("\n") + print("switches : --verbose --report --browser --direct --execute --locate --iftouched --ifchanged\n") + print(" --program --file --page --arguments --batch --edit --report --clear\n") + print(" --make --lmake --wmake --path --stubpath --indirect --before --after\n") + print(" --tree --autotree --environment --showenv\n") + print("\n") + print("example : texmfstart pstopdf.rb cow.eps\n") + print(" texmfstart --locate examplex.rb\n") + print(" texmfstart --execute examplex.rb\n") + print(" texmfstart --browser examplap.pdf\n") + print(" texmfstart showcase.pdf\n") + print(" texmfstart --page=2 --file=showcase.pdf\n") + print(" texmfstart --program=yourtex yourscript.rb arg-1 arg-2\n") + print(" texmfstart --direct xsltproc kpse:somefile.xsl somefile.xml\n") + print(" texmfstart --direct ruby rel:wn-cleanup-1.rb oldfile.xml newfile.xml\n") + print(" texmfstart bin:xsltproc env:somepreset path:somefile.xsl somefile.xml\n") + print(" texmfstart --iftouched=normal,lowres downsample.rb normal lowres\n") + print(" texmfstart --ifchanged=somefile.dat --direct processit somefile.dat\n") + print(" texmfstart bin:scite kpse:texmf.cnf\n") + print(" texmfstart --exec bin:scite *.tex\n") + print(" texmfstart --edit texmf.cnf\n") + print(" texmfstart --edit kpse:texmf.cnf\n") + print(" texmfstart --serve\n") + print("\n") + print(" texmfstart --stubpath=/usr/local/bin [--make --remove] --verbose all\n") + print(" texmfstart --stubpath=auto [--make --remove] all\n") + print("\n") + check_kpse +end + +# somehow registration does not work out (at least not under windows) +# the . is also not accepted by unix as seperator + +def tag(name) + if $crossover then "_CTX_K_S_#{name}_" else "TEXMFSTART.#{name}" end +end + +def registered?(filename) + return ENV[tag(filename)] != nil +end + +def registered(filename) + return ENV[tag(filename)] || 'unknown' +end + +def register(filename,fullname) + if fullname && ! fullname.empty? then # && FileTest.file?(fullname) + ENV[tag(filename)] = fullname + report("registering '#{filename}' as '#{fullname}'") + return true + else + return false + end +end + +def find(filename,program) + begin + filename = filename.sub(/script:/o, '') # so we have bin: and script: and nothing + if $predefined.key?(filename) then + report("expanding '#{filename}' to '#{$predefined[filename]}'") + filename = $predefined[filename] + end + if registered?(filename) then + report("already located '#{filename}'") + return registered(filename) + end + # create suffix list + if filename =~ /^(.*)\.(.+)$/ then + filename = $1 + suffixlist = [$2] + else + suffixlist = [$scriptlist.split('|'),$documentlist.split('|')].flatten + end + # first we honor a given path + if filename =~ /[\\\/]/ then + report("trying to honor '#{filename}'") + suffixlist.each do |suffix| + fullname = filename+'.'+suffix + if FileTest.file?(fullname) && register(filename,fullname) + return shortpathname(fullname) + end + end + end + filename.sub!(/^.*[\\\/]/, '') + # next we look at the current path and the callerpath + pathlist = [ ] + progpath = $applications[suffixlist[0]] + threadok = registered("THREAD") !~ /unknown/ + pathlist << ['.','current'] + pathlist << [$ownpath,'caller'] if $ownpath != '.' + pathlist << ["#{$ownpath}/../#{progpath}",'caller'] if progpath + pathlist << [registered("THREAD"),'thread'] if threadok + pathlist << ["#{registered("THREAD")}/../#{progpath}",'thread'] if progpath && threadok + pathlist.each do |p| + if p && ! p.empty? && ! (p[0] == 'unknown') then + suffixlist.each do |suffix| + fname = "#{filename}.#{suffix}" + fullname = File.expand_path(File.join(p[0],fname)) + report("locating '#{fname}' in #{p[1]} path '#{p[0]}'") + if FileTest.file?(fullname) && register(filename,fullname) then + report("'#{fname}' located in #{p[1]} path") + return shortpathname(fullname) + end + end + end + end + # now we consult environment settings + fullname = nil + check_kpse + $kpse.progname = program + suffixlist.each do |suffix| + begin + break unless $suffixinputs[suffix] + environment = ENV[$suffixinputs[suffix]] || ENV[$suffixinputs[suffix]+".#{$program}"] + if ! environment || environment.empty? then + begin + # environment = `kpsewhich -expand-path=\$#{$suffixinputs[suffix]}`.chomp + environment = $kpse.expand_path("\$#{$suffixinputs[suffix]}") + rescue + environment = nil + else + if environment && ! environment.empty? then + report("using kpsewhich variable #{$suffixinputs[suffix]}") + end + end + elsif environment && ! environment.empty? then + report("using environment variable #{$suffixinputs[suffix]}") + end + if environment && ! environment.empty? then + environment.split($separator).each do |e| + e.strip! + e = '.' if e == '\.' # somehow . gets escaped + e += '/' unless e =~ /[\\\/]$/ + fullname = e + filename + '.' + suffix + report("testing '#{fullname}'") + if FileTest.file?(fullname) then + break + else + fullname = nil + end + end + end + rescue + report("environment string '#{$suffixinputs[suffix]}' cannot be used to locate '#{filename}'") + fullname = nil + else + return shortpathname(fullname) if register(filename,fullname) + end + end + return shortpathname(fullname) if register(filename,fullname) + # then we fall back on kpsewhich + suffixlist.each do |suffix| + # TDS script scripts location as per 2004 + if suffix =~ /(#{$scriptlist})/ then + begin + report("using 'kpsewhich' to locate '#{filename}' in suffix space '#{suffix}' (1)") + # fullname = `kpsewhich -progname=#{program} -format=texmfscripts #{filename}.#{suffix}`.chomp + $kpse.format = 'texmfscripts' + fullname = $kpse.find_file("#{filename}.#{suffix}").gsub(/\\/,'/') + rescue + report("kpsewhich cannot locate '#{filename}' in suffix space '#{suffix}' (1)") + fullname = nil + else + return shortpathname(fullname) if register(filename,fullname) + end + end + # old TDS location: .../texmf/context/... + begin + report("using 'kpsewhich' to locate '#{filename}' in suffix space '#{suffix}' (2)") + # fullname = `kpsewhich -progname=#{program} -format="other text files" #{filename}.#{suffix}`.chomp + $kpse.format = 'other text files' + fullname = $kpse.find_file("#{filename}.#{suffix}").gsub(/\\/,'/') + rescue + report("kpsewhich cannot locate '#{filename}' in suffix space '#{suffix}' (2)") + fullname = nil + else + return shortpathname(fullname) if register(filename,fullname) + end + end + return shortpathname(fullname) if register(filename,fullname) + # let's take a look at the path + paths = ENV['PATH'].split($separator) + suffixlist.each do |s| + paths.each do |p| + suffixedname = "#{filename}.#{s}" + report("checking #{p} for #{filename}") + if FileTest.file?(File.join(p,suffixedname)) then + fullname = File.join(p,suffixedname) + return shortpathname(fullname) if register(filename,fullname) + end + end + end + # bad luck, we need to search the tree ourselves + if (suffixlist.length == 1) && (suffixlist.first =~ /(#{$documentlist})/) then + report("aggressively locating '#{filename}' in document trees") + begin + # texroot = `kpsewhich -expand-var=$SELFAUTOPARENT`.chomp + texroot = $kpse.expand_var("$SELFAUTOPARENT") + rescue + texroot = '' + else + texroot.sub!(/[\\\/][^\\\/]*?$/, '') + end + if not texroot.empty? then + sffxlst = suffixlist.join(',') + begin + report("locating '#{filename}' in document tree '#{texroot}/doc*'") + if (result = Dir.glob("#{texroot}/doc*/**/#{filename}.{#{sffxlst}}")) && result && result[0] && FileTest.file?(result[0]) then + fullname = result[0] + end + rescue + report("locating '#{filename}.#{suffixlist.join('|')}' in tree '#{texroot}' aborted") + end + end + return shortpathname(fullname) if register(filename,fullname) + end + report("aggressively locating '#{filename}' in tex trees") + begin + # textrees = `kpsewhich -expand-var=$TEXMF`.chomp + textrees = $kpse.expand_var("$TEXMF") + rescue + textrees = '' + end + if not textrees.empty? then + textrees.gsub!(/[\{\}\!]/, '') + textrees = textrees.split(',') + if (suffixlist.length == 1) && (suffixlist.first =~ /(#{$documentlist})/) then + speedup = ['doc**','**'] + else + speedup = ['**'] + end + sffxlst = suffixlist.join(',') + speedup.each do |speed| + textrees.each do |tt| + tt.gsub!(/[\\\/]$/, '') + if FileTest.directory?(tt) then + begin + report("locating '#{filename}' in tree '#{tt}/#{speed}/#{filename}.{#{sffxlst}}'") + if (result = Dir.glob("#{tt}/#{speed}/#{filename}.{#{sffxlst}}")) && result && result[0] && FileTest.file?(result[0]) then + fullname = result[0] + break + end + rescue + report("locating '#{filename}' in tree '#{tt}' aborted") + next + end + end + end + break if fullname && ! fullname.empty? + end + end + if register(filename,fullname) then + return shortpathname(fullname) + else + return '' + end + rescue + error, trace = $!, $@.join("\n") + report("fatal error: #{error}\n#{trace}") + # report("fatal error") + end +end + +def run(fullname) + if ! fullname || fullname.empty? then + output("the file '#{$filename}' is not found") + elsif FileTest.file?(fullname) then + begin + case fullname + when /\.(#{$scriptlist})$/i then + return runoneof($1,fullname,false) + when /\.(#{$documentlist})$/i then + return runoneof($1,fullname,true) + else + return runoneof('unknown',fullname,false) + end + rescue + report("starting '#{$filename}' in program space '#{$program}' fails (#{$!})") + end + else + report("the file '#{$filename}' in program space '#{$program}' is not accessible") + end + return false +end + +def direct(fullname) + begin + return runcommand([fullname.sub(/^(bin|binary)\:/, ''),expanded($arguments)].join(' ')) + rescue + return false + end +end + +def edit(filename) + begin + return runcommand([$editor,expanded(filename),expanded($arguments)].join(' ')) + rescue + return false + end +end + +def make(filename,windows=false,linux=false,remove=false) + basename = File.basename(filename).gsub(/\.[^.]+?$/, '') + if $stubpath == 'auto' then + basename = File.dirname($0) + '/' + basename + else + basename = $stubpath + '/' + basename unless $stubpath.empty? + end + if filename == 'texmfstart' then + program = 'ruby' + command = 'kpsewhich --format=texmfscripts --progname=context texmfstart.rb' + filename = `#{command}`.chomp.gsub(/\\/, '/') + if filename.empty? then + report("failure: #{command}") + return + elsif not remove then + if windows then + ['bat','exe'].each do |suffix| + if FileTest.file?("#{basename}.#{suffix}") then + report("windows stub '#{basename}.#{suffix}' skipped (already present)") + return + end + end + elsif linux && FileTest.file?(basename) then + report("unix stub '#{basename}' skipped (already present)") + return + end + end + else + program = nil + if filename =~ /[\\\/]/ && filename =~ /\.(#{$scriptlist})$/ then + program = $applications[$1] + end + filename = "\"#{filename}\"" if filename =~ /\s/ + program = 'texmfstart' if $indirect || ! program || program.empty? + end + begin + callname = $predefined[filename.sub(/\.*?$/,'')] || filename + if remove then + if windows && (File.delete(basename+'.bat') rescue false) then + report("windows stub '#{basename}.bat' removed (calls #{callname})") + elsif linux && (File.delete(basename) rescue false) then + report("unix stub '#{basename}' removed (calls #{callname})") + end + else + if windows && f = open(basename+'.bat','w') then + f.binmode + f.write("@echo off\015\012") + f.write("#{program} #{callname} %*\015\012") + f.close + report("windows stub '#{basename}.bat' made (calls #{callname})") + elsif linux && f = open(basename,'w') then + f.binmode + f.write("#!/bin/sh\012") + f.write("#{program} #{callname} \"$@\"\012") + f.close + report("unix stub '#{basename}' made (calls #{callname})") + end + end + rescue + report("failed to make stub '#{basename}' #{$!}") + return false + else + return true + end +end + +def process(&block) + if $iftouched then + files = $directives['iftouched'].split(',') + oldname, newname = files[0], files[1] + if oldname && newname && File.needsupdate(oldname,newname) then + report("file #{oldname}: #{File.timestamp(oldname)}") + report("file #{newname}: #{File.timestamp(newname)}") + report("file is touched, processing started") + yield + File.syncmtimes(oldname,newname) + else + report("file #{oldname} is untouched") + end + elsif $ifchanged then + filename = $directives['ifchanged'] + checkname = filename + ".md5" + oldchecksum, newchecksum = "old", "new" + begin + newchecksum = MD5.new(IO.read(filename)).hexdigest.upcase + rescue + newchecksum = "new" + else + begin + oldchecksum = IO.read(checkname).chomp + rescue + oldchecksum = "old" + end + end + if $verbose then + report("old checksum #{filename}: #{oldchecksum}") + report("new checksum #{filename}: #{newchecksum}") + end + if oldchecksum != newchecksum then + report("file is changed, processing started") + begin + File.open(checkname,'w') do |f| + f << newchecksum + end + rescue + end + yield + else + report("file #{filename} is unchanged") + end + else + yield + end +end + +def checkenvironment(tree) + report('') + ENV['TMP'] = ENV['TMP'] || ENV['TEMP'] || ENV['TMPDIR'] || ENV['HOME'] + case RUBY_PLATFORM + when /(mswin|bccwin|mingw|cygwin)/i then ENV['TEXOS'] = ENV['TEXOS'] || 'texmf-mswin' + when /(linux)/i then ENV['TEXOS'] = ENV['TEXOS'] || 'texmf-linux' + when /(darwin|rhapsody|nextstep)/i then ENV['TEXOS'] = ENV['TEXOS'] || 'texmf-macosx' + # when /(netbsd|unix)/i then # todo + else # todo + end + ENV['TEXOS'] = "#{ENV['TEXOS'].sub(/^[\\\/]*/, '').sub(/[\\\/]*$/, '')}" + ENV['TEXPATH'] = tree.sub(/\/+$/,'') # + '/' + ENV['TEXMFOS'] = "#{ENV['TEXPATH']}/#{ENV['TEXOS']}" + report('') + report("preset : TEXPATH => #{ENV['TEXPATH']}") + report("preset : TEXOS => #{ENV['TEXOS']}") + report("preset : TEXMFOS => #{ENV['TEXMFOS']}") + report("preset : TMP => #{ENV['TMP']}") + report('') +end + +def loadfile(filename) + begin + IO.readlines(filename).each do |line| + case line.chomp + when /^[\#\%]/ then + # comment + when /^(.*?)\s*(\>|\=|\<)\s*(.*)\s*$/ then + # = assign | > prepend | < append + key, how, value = $1, $2, $3 + begin + # $SAFE = 0 + value.gsub!(/\%(.*?)\%/) do + ENV[$1] || '' + end + # value.gsub!(/\;/,$separator) if key =~ /PATH/i then + case how + when '=', '<<' then ENV[key] = value + when '?', '??' then ENV[key] = ENV[key] || value + when '<', '+=' then ENV[key] = (ENV[key] || '') + $separator + value + when '>', '=+' then ENV[key] = value + $separator + (ENV[key] ||'') + end + rescue + report("user set failed : #{key} (#{$!})") + else + report("user set : #{key} => #{ENV[key]}") + end + end + end + rescue + report("error in reading file '#{filename}'") + end +end + +def loadtree(tree) + begin + unless tree.empty? then + if File.directory?(tree) then + setuptex = File.join(tree,'setuptex.tmf') + else + setuptex = tree.dup + end + if FileTest.file?(setuptex) then + report("tex tree definition: #{setuptex}") + checkenvironment(File.dirname(setuptex)) + loadfile(setuptex) + else + report("no setup file '#{setuptex}'") + end + end + rescue + # maybe tree is empty or boolean (no arg given) + end +end + +def loadenvironment(environment) + begin + unless environment.empty? then + filename = if $path.empty? then environment else File.expand_path(File.join($path,environment)) end + if FileTest.file?(filename) then + report("environment : #{environment}") + loadfile(filename) + else + report("no environment file '#{environment}'") + end + end + rescue + report("problem while loading '#{environment}'") + end +end + +def show_environment + if $showenv then + keys = ENV.keys.sort + size = 0 + keys.each do |k| + size = k.size if k.size > size + end + report('') + keys.each do |k| + report("#{k.rjust(size)} => #{ENV[k]}") + end + report('') + end +end + +def execute(arguments) # br global + + arguments = arguments.split(/\s+/) if arguments.class == String + $directives = hashed(arguments) + + $help = $directives['help'] || false + $batch = $directives['batch'] || false + $filename = $directives['file'] || '' + $program = $directives['program'] || 'context' + $direct = $directives['direct'] || false + $edit = $directives['edit'] || false + $page = $directives['page'] || 1 + $browser = $directives['browser'] || false + $report = $directives['report'] || false + $verbose = $directives['verbose'] || false + $arguments = $directives['arguments'] || '' + $execute = $directives['execute'] || $directives['exec'] || false + $locate = $directives['locate'] || false + + $autotree = if $directives['autotree'] then (ENV['TEXMFSTART_TREE'] || ENV['TEXMFSTARTTREE'] || '') else '' end + + $path = $directives['path'] || '' + $tree = $directives['tree'] || $autotree || '' + $environment = $directives['environment'] || '' + + $make = $directives['make'] || false + $remove = $directives['remove'] || $directives['delete'] || false + $unix = $directives['unix'] || false + $windows = $directives['windows'] || $directives['mswin'] || false + $stubpath = $directives['stubpath'] || '' + $indirect = $directives['indirect'] || false + + $before = $directives['before'] || '' + $after = $directives['after'] || '' + + $iftouched = $directives['iftouched'] || false + $ifchanged = $directives['ifchanged'] || false + + $openoffice = $directives['oo'] || false + + $crossover = false if $directives['clear'] + + $showenv = $directives['showenv'] || false + $verbose = true if $showenv + + $serve = $directives['serve'] || false + + $verbose = true if (ENV['_CTX_VERBOSE_'] =~ /(y|yes|t|true|on)/io) && ! $locate && ! $report + + set_applications($page) + + # private: + + $selfmerge = $directives['selfmerge'] || false + $selfcleanup = $directives['selfclean'] || $directives['selfcleanup'] || false + + ENV['_CTX_VERBOSE_'] = 'yes' if $verbose + + if $openoffice then + if ENV['OOPATH'] then + if FileTest.directory?(ENV['OOPATH']) then + report("using open office python") + if $mswindows then + $applications['python'] = $applications['py'] = "\"#{File.join(ENV['OOPATH'],'program','python.bat')}\"" + else + $applications['python'] = $applications['py'] = File.join(ENV['OOPATH'],'python') + end + report("python path #{$applications['python']}") + else + report("environment variable 'OOPATH' does not exist") + end + else + report("environment variable 'OOPATH' is not set") + end + end + + if $selfmerge then + output("ruby libraries are cleaned up") if SelfMerge::cleanup + output("ruby libraries are merged") if SelfMerge::merge + return true + elsif $selfcleanup then + output("ruby libraries are cleaned up") if SelfMerge::cleanup + return true + elsif $serve then + if ENV['KPSEMETHOD'] && ENV['KPSEPORT'] then + # # kpse_merge_done: require 'base/kpseremote' + begin + KpseRemote::start_server + rescue + return false + else + return true + end + else + usage + puts("") + puts("message : set 'KPSEMETHOD' and 'KPSEPORT' variables") + return false + end + elsif $help || ! $filename || $filename.empty? then + usage + loadtree($tree) + loadenvironment($environment) + show_environment() + return true + elsif $batch && $filename && ! $filename.empty? then + # todo, take commands from file and avoid multiple starts and checks + return false + else + report("texmfstart version #{$version}") + loadtree($tree) + loadenvironment($environment) + show_environment() + if $make || $remove then + if $filename == 'all' then + makelist = $makelist + else + makelist = [$filename] + end + makelist.each do |filename| + if $windows then + make(filename,true,false,$remove) + elsif $unix then + make(filename,false,true,$remove) + else + make(filename,$mswindows,!$mswindows,$remove) + end + end + return true # guess + elsif $browser && $filename =~ /^http\:\/\// then + return launch($filename) + else + begin + process do + if $direct || $filename =~ /^bin\:/ then + return direct($filename) + elsif $edit && ! $editor.empty? then + return edit($filename) + else # script: or no prefix + command = find(shortpathname($filename),$program) + if command then + register("THREAD",File.dirname(File.expand_path(command))) + return run(command) + else + report('unable to locate program') + return false + end + end + end + rescue + report('fatal error in starting process') + return false + end + end + end + +end + +if execute(ARGV) then + report("\nexecution was successful") if $verbose + exit(0) +else + report("\nexecution failed") if $verbose + exit(1) +end diff --git a/Build/source/texk/texlive/linked_scripts/context/stubs/unix/context b/Build/source/texk/texlive/linked_scripts/context/stubs/unix/context new file mode 100755 index 00000000000..c7341904f10 --- /dev/null +++ b/Build/source/texk/texlive/linked_scripts/context/stubs/unix/context @@ -0,0 +1,3 @@ +#!/bin/sh + +mtxrun --script context "$@" diff --git a/Build/source/texk/texlive/linked_scripts/context/stubs/unix/ctxtools b/Build/source/texk/texlive/linked_scripts/context/stubs/unix/ctxtools new file mode 100755 index 00000000000..84e47bbee3c --- /dev/null +++ b/Build/source/texk/texlive/linked_scripts/context/stubs/unix/ctxtools @@ -0,0 +1,2 @@ +#!/bin/sh +texmfstart ctxtools.rb "$@" diff --git a/Build/source/texk/texlive/linked_scripts/context/stubs/unix/exatools b/Build/source/texk/texlive/linked_scripts/context/stubs/unix/exatools new file mode 100755 index 00000000000..50ff0f07e46 --- /dev/null +++ b/Build/source/texk/texlive/linked_scripts/context/stubs/unix/exatools @@ -0,0 +1,2 @@ +#!/bin/sh +texmfstart exatools.rb "$@" diff --git a/Build/source/texk/texlive/linked_scripts/context/stubs/unix/makempy b/Build/source/texk/texlive/linked_scripts/context/stubs/unix/makempy new file mode 100755 index 00000000000..4bf7a1af230 --- /dev/null +++ b/Build/source/texk/texlive/linked_scripts/context/stubs/unix/makempy @@ -0,0 +1,2 @@ +#!/bin/sh +texmfstart makempy.pl "$@" diff --git a/Build/source/texk/texlive/linked_scripts/context/stubs/unix/mpstools b/Build/source/texk/texlive/linked_scripts/context/stubs/unix/mpstools new file mode 100755 index 00000000000..b4c8f634548 --- /dev/null +++ b/Build/source/texk/texlive/linked_scripts/context/stubs/unix/mpstools @@ -0,0 +1,2 @@ +#!/bin/sh +texmfstart mpstools.rb "$@" diff --git a/Build/source/texk/texlive/linked_scripts/context/stubs/unix/mptopdf b/Build/source/texk/texlive/linked_scripts/context/stubs/unix/mptopdf new file mode 100755 index 00000000000..980a3123d1a --- /dev/null +++ b/Build/source/texk/texlive/linked_scripts/context/stubs/unix/mptopdf @@ -0,0 +1,2 @@ +#!/bin/sh +texmfstart mptopdf.pl "$@" diff --git a/Build/source/texk/texlive/linked_scripts/context/stubs/unix/mtxtools b/Build/source/texk/texlive/linked_scripts/context/stubs/unix/mtxtools new file mode 100755 index 00000000000..2922e0b4674 --- /dev/null +++ b/Build/source/texk/texlive/linked_scripts/context/stubs/unix/mtxtools @@ -0,0 +1,2 @@ +#!/bin/sh +texmfstart mtxtools.rb "$@" diff --git a/Build/source/texk/texlive/linked_scripts/context/stubs/unix/pdftools b/Build/source/texk/texlive/linked_scripts/context/stubs/unix/pdftools new file mode 100755 index 00000000000..92ee803a868 --- /dev/null +++ b/Build/source/texk/texlive/linked_scripts/context/stubs/unix/pdftools @@ -0,0 +1,2 @@ +#!/bin/sh +texmfstart pdftools.rb "$@" diff --git a/Build/source/texk/texlive/linked_scripts/context/stubs/unix/pdftrimwhite b/Build/source/texk/texlive/linked_scripts/context/stubs/unix/pdftrimwhite new file mode 100755 index 00000000000..00b5f525aec --- /dev/null +++ b/Build/source/texk/texlive/linked_scripts/context/stubs/unix/pdftrimwhite @@ -0,0 +1,2 @@ +#!/bin/sh +texmfstart pdftrimwhite.pl "$@" diff --git a/Build/source/texk/texlive/linked_scripts/context/stubs/unix/pstopdf b/Build/source/texk/texlive/linked_scripts/context/stubs/unix/pstopdf new file mode 100755 index 00000000000..5b38ed426cb --- /dev/null +++ b/Build/source/texk/texlive/linked_scripts/context/stubs/unix/pstopdf @@ -0,0 +1,2 @@ +#!/bin/sh +texmfstart pstopdf.rb "$@" diff --git a/Build/source/texk/texlive/linked_scripts/context/stubs/unix/rlxtools b/Build/source/texk/texlive/linked_scripts/context/stubs/unix/rlxtools new file mode 100755 index 00000000000..41cea40fc09 --- /dev/null +++ b/Build/source/texk/texlive/linked_scripts/context/stubs/unix/rlxtools @@ -0,0 +1,2 @@ +#!/bin/sh +texmfstart rlxtools.rb "$@" diff --git a/Build/source/texk/texlive/linked_scripts/context/stubs/unix/runtools b/Build/source/texk/texlive/linked_scripts/context/stubs/unix/runtools new file mode 100755 index 00000000000..ff9a333791b --- /dev/null +++ b/Build/source/texk/texlive/linked_scripts/context/stubs/unix/runtools @@ -0,0 +1,2 @@ +#!/bin/sh +texmfstart runtools.rb "$@" diff --git a/Build/source/texk/texlive/linked_scripts/context/stubs/unix/texexec b/Build/source/texk/texlive/linked_scripts/context/stubs/unix/texexec new file mode 100755 index 00000000000..2158172906a --- /dev/null +++ b/Build/source/texk/texlive/linked_scripts/context/stubs/unix/texexec @@ -0,0 +1,2 @@ +#!/bin/sh +texmfstart texexec.rb "$@" diff --git a/Build/source/texk/texlive/linked_scripts/context/stubs/unix/texfind b/Build/source/texk/texlive/linked_scripts/context/stubs/unix/texfind new file mode 100755 index 00000000000..c054bdf5218 --- /dev/null +++ b/Build/source/texk/texlive/linked_scripts/context/stubs/unix/texfind @@ -0,0 +1,2 @@ +#!/bin/sh +texmfstart texfind "$@" diff --git a/Build/source/texk/texlive/linked_scripts/context/stubs/unix/texfont b/Build/source/texk/texlive/linked_scripts/context/stubs/unix/texfont new file mode 100755 index 00000000000..a91f786e3f8 --- /dev/null +++ b/Build/source/texk/texlive/linked_scripts/context/stubs/unix/texfont @@ -0,0 +1,2 @@ +#!/bin/sh +texmfstart texfont.pl "$@" diff --git a/Build/source/texk/texlive/linked_scripts/context/stubs/unix/texshow b/Build/source/texk/texlive/linked_scripts/context/stubs/unix/texshow new file mode 100755 index 00000000000..afd62c339b3 --- /dev/null +++ b/Build/source/texk/texlive/linked_scripts/context/stubs/unix/texshow @@ -0,0 +1,2 @@ +#!/bin/sh +texmfstart texshow.pl "$@" diff --git a/Build/source/texk/texlive/linked_scripts/context/stubs/unix/textools b/Build/source/texk/texlive/linked_scripts/context/stubs/unix/textools new file mode 100755 index 00000000000..7445eac371c --- /dev/null +++ b/Build/source/texk/texlive/linked_scripts/context/stubs/unix/textools @@ -0,0 +1,2 @@ +#!/bin/sh +texmfstart textools.rb "$@" diff --git a/Build/source/texk/texlive/linked_scripts/context/stubs/unix/texutil b/Build/source/texk/texlive/linked_scripts/context/stubs/unix/texutil new file mode 100755 index 00000000000..607154af059 --- /dev/null +++ b/Build/source/texk/texlive/linked_scripts/context/stubs/unix/texutil @@ -0,0 +1,2 @@ +#!/bin/sh +texmfstart texutil.rb "$@" diff --git a/Build/source/texk/texlive/linked_scripts/context/stubs/unix/tmftools b/Build/source/texk/texlive/linked_scripts/context/stubs/unix/tmftools new file mode 100755 index 00000000000..7531a966390 --- /dev/null +++ b/Build/source/texk/texlive/linked_scripts/context/stubs/unix/tmftools @@ -0,0 +1,2 @@ +#!/bin/sh +texmfstart tmftools.rb "$@" diff --git a/Build/source/texk/texlive/linked_scripts/context/stubs/unix/xmltools b/Build/source/texk/texlive/linked_scripts/context/stubs/unix/xmltools new file mode 100755 index 00000000000..03086d0436b --- /dev/null +++ b/Build/source/texk/texlive/linked_scripts/context/stubs/unix/xmltools @@ -0,0 +1,2 @@ +#!/bin/sh +texmfstart xmltools.rb "$@" diff --git a/Build/source/texk/texlive/linked_scripts/ctxtools b/Build/source/texk/texlive/linked_scripts/ctxtools deleted file mode 100755 index 84e47bbee3c..00000000000 --- a/Build/source/texk/texlive/linked_scripts/ctxtools +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/sh -texmfstart ctxtools.rb "$@" diff --git a/Build/source/texk/texlive/linked_scripts/dviasm.py b/Build/source/texk/texlive/linked_scripts/dviasm.py deleted file mode 100755 index adfe77c1609..00000000000 --- a/Build/source/texk/texlive/linked_scripts/dviasm.py +++ /dev/null @@ -1,960 +0,0 @@ -#! /usr/bin/env python -# -*- coding: utf_8 -*- -# -# This is DVIasm, a DVI utility for editing DVI files directly. -# -# Copyright (C) 2007-2008 by Jin-Hwan Cho -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . - -import sys, os.path -from optparse import OptionParser - -# Global variables -is_ptex = False -is_subfont = False -cur_font = None -cur_dsize = 0 -cur_ssize = 0 -subfont_idx = 0 -subfont_list = ['cyberb', 'outbtm', 'outbtb', 'outgtm', 'outgtb'] - -# DVI opcodes -SET_CHAR_0 = 0; SET_CHAR_127 = 127; -SET1 = 128; SET2 = 129; SET3 = 130; SET4 = 131; -SET_RULE = 132; -PUT1 = 133; PUT2 = 134; PUT3 = 135; PUT4 = 136; -PUT_RULE = 137; -NOP = 138; -BOP = 139; EOP = 140; -PUSH = 141; POP = 142; -RIGHT1 = 143; RIGHT2 = 144; RIGHT3 = 145; RIGHT4 = 146; -W0 = 147; W1 = 148; W2 = 149; W3 = 150; W4 = 151; -X0 = 152; X1 = 153; X2 = 154; X3 = 155; X4 = 156; -DOWN1 = 157; DOWN2 = 158; DOWN3 = 159; DOWN4 = 160; -Y0 = 161; Y1 = 162; Y2 = 163; Y3 = 164; Y4 = 165; -Z0 = 166; Z1 = 167; Z2 = 168; Z3 = 169; Z4 = 170; -FNT_NUM_0 = 171; FNT_NUM_63 = 234; -FNT1 = 235; FNT2 = 236; FNT3 = 237; FNT4 = 238; -XXX1 = 239; XXX2 = 240; XXX3 = 241; XXX4 = 242; -FNT_DEF1 = 243; FNT_DEF2 = 244; FNT_DEF3 = 245; FNT_DEF4 = 246; -PRE = 247; POST = 248; POST_POST = 249; -# DVIV opcodes -DIR = 255; -# XDVI opcodes (not supported yet!) -NATIVE_FONT_DEF = 250; -PDF_FILE = 251; PIC_FILE = 252; -GLYPH_ARRAY = 253; GLYPH_STRING = 254; -# DVI identifications -DVI_ID = 2; DVIV_ID = 3; XDVI_ID = 5; - -def Warning(msg): - sys.stderr.write('%s\n' % msg) - -def BadDVI(msg): - raise AttributeError, 'Bad DVI file: %s!' % msg - -def GetByte(fp): # { returns the next byte, unsigned } - try: return ord(fp.read(1)) - except: return -1 - -def SignedByte(fp): # { returns the next byte, signed } - try: b = ord(fp.read(1)) - except: return -1 - if b < 128: return b - else: return b - 256 - -def Get2Bytes(fp): # { returns the next two bytes, unsigned } - try: a, b = map(ord, fp.read(2)) - except: BadDVI('Failed to Get2Bytes()') - return (a << 8) + b - -def SignedPair(fp): # {returns the next two bytes, signed } - try: a, b = map(ord, fp.read(2)) - except: BadDVI('Failed to SignedPair()') - if a < 128: return (a << 8) + b - else: return ((a - 256) << 8) + b - -def Get3Bytes(fp): # { returns the next three bytes, unsigned } - try: a, b, c = map(ord, fp.read(3)) - except: BadDVI('Failed to Get3Bytes()') - return (((a << 8) + b) << 8) + c - -def SignedTrio(fp): # { returns the next three bytes, signed } - try: a, b, c = map(ord, fp.read(3)) - except: BadDVI('Failed to SignedTrio()') - if a < 128: return (((a << 8) + b) << 8) + c - else: return ((((a - 256) << 8) + b) << 8) + c - -def SignedQuad(fp): # { returns the next four bytes, signed } - try: a, b, c, d = map(ord, fp.read(4)) - except: BadDVI('Failed to get SignedQuad()') - if a < 128: return (((((a << 8) + b) << 8) + c) << 8) + d - else: return ((((((a - 256) << 8) + b) << 8) + c) << 8) + d - -def PutByte(q): - return chr(q & 0xff) - -def Put2Bytes(q): - return PutByte(q>>8) + PutByte(q) - -def Put3Bytes(q): - return PutByte(q>>16) + PutByte(q>>8) + PutByte(q) - -def PutSignedQuad(q): - if q < 0: q += 0x100000000 - return PutByte(q>>24) + PutByte(q>>16) + PutByte(q>>8) + PutByte(q) - -def PutUnsigned(q): - if q >= 0x1000000: return (3, PutSignedQuad(q)) - if q >= 0x10000: return (2, Put3Bytes(q)) - if q >= 0x100: return (1, Put2Bytes(q)) - return (0, PutByte(q)) - -def PutSigned(q): - if 0 <= q < 0x800000: return PutUnsigned(q) - if q < -0x800000 or q >= 0x800000: return (3, PutSignedQuad(q)) - if q < -0x8000: q += 0x1000000; return (2, Put3Bytes(q)) - if q < -0x80: q += 0x10000; return (1, Put2Bytes(q)) - return (0, PutByte(q)) - -def GetInt(s): - try: return int(s) - except: return -1 - -def GetStrASCII(s): # used in Parse() - if len(s) > 1 and ((s[0] == "'" and s[-1] == "'") or (s[0] == '"' and s[-1] == '"')): return [ord(c) for c in s[1:-1].decode('unicode_escape')] - else: return '' - -def UCS2toJIS(c): - s = c.encode('iso2022-jp') - if len(s) == 1: return ord(s) - else: return (ord(s[3]) << 8) + ord(s[4]) - -def GetStrUTF8(s): # used in Parse() - if len(s) > 1 and ((s[0] == "'" and s[-1] == "'") or (s[0] == '"' and s[-1] == '"')): - t = s[1:-1].decode('string_escape').decode('utf8') - if is_ptex: return [UCS2toJIS(c) for c in t] - else: return [ord(c) for c in t] - else: return '' - -def PutStrASCII(t): # unsed in Dump() - s = '' - for o in t: - if o == 92: s += '\\\\' - elif 32 <= o < 127: s += chr(o) - elif o < 256: s += ('\\x%02x' % o) - elif o < 65536: s += ('\\u%04x' % o) - else: - Warning('Not support characters > 65535; may skip %d.\n' % o) - return "'%s'" % s - -def PutStrLatin1(t): # unsed in Dump() - s = '' - for o in t: - if o == 92: s += '\\\\' - elif 32 <= o < 127 or 161 <= o < 256: s += chr(o) - elif o < 256: s += ('\\x%02x' % o) - elif o < 65536: s += ('\\u%04x' % o) - else: - Warning('Not support characters > 65535; may skip %d.\n' % o) - return "'%s'" % s - -def PutStrUTF8(t): # unsed in Dump() - s = '' - if is_subfont: - for o in t: - s += unichr((subfont_idx << 8) + o).encode('utf8') - else: # not the case of subfont - for o in t: - if o == 92: s += '\\\\' - elif 32 <= o < 127: s += chr(o) - elif o < 128: s += ('\\x%02x' % o) - elif is_ptex: - s += ''.join(['\x1b$B', chr(o/256), chr(o%256)]).decode('iso2022-jp').encode('utf8') - else: s += unichr(o).encode('utf8') - return "'%s'" % s - -def PutStrSJIS(t): # unsed in Dump() - s = '' - for o in t: - if o == 92: s += '\\\\' - elif 32 <= o < 127: s += chr(o) - elif o < 128: s += ('\\x%02x' % o) - else: - s += ''.join(['\x1b$B', chr(o/256), chr(o%256)]).decode('iso2022-jp').encode('sjis') - return "'%s'" % s - -def IsFontChanged(f, z): - global cur_font, cur_ssize, subfont_idx, is_subfont - for n in subfont_list: - if n == f[:-2]: - is_subfont = True - subfont_idx = int(f[-2:], 16) - if cur_font == n and cur_ssize == z: - return False - else: - cur_font = n; cur_ssize = z - return True - else: - is_subfont = False - cur_font = f; cur_ssize = z - return True - -############################################################ -# DVI class -############################################################ -class DVI(object): - def __init__(self, unit='pt'): - if unit == 'sp': self.byconv = self.by_sp_conv - elif unit == 'bp': self.byconv = self.by_bp_conv - elif unit == 'mm': self.byconv = self.by_mm_conv - elif unit == 'cm': self.byconv = self.by_cm_conv - elif unit == 'in': self.byconv = self.by_in_conv - else: self.byconv = self.by_pt_conv - self.Initialize() - - ########################################################## - # Initialize: Required by __init__(), Load(), and Parse() - ########################################################## - def Initialize(self): - self.id = DVI_ID - self.numerator = 25400000 - self.denominator = 473628672 - self.mag = 1000 - self.ComputeConversionFactors() - self.comment = '' - self.font_def = {} - self.max_v = self.max_h = self.max_s = self.total_pages = 0 - self.pages = [] - - ########################################################## - # Load: DVI -> Internal Format - ########################################################## - def Load(self, fn): - fp = file(fn, 'rb') - self.LoadFromFile(fp) - fp.close() - - def LoadFromFile(self, fp): - self.Initialize() - fp.seek(0, 2) - if fp.tell() < 53: BadDVI('less than 53 bytes long') - self.ProcessPreamble(fp) - self.ProcessPostamble(fp) - loc = self.first_backpointer - while loc >= 0: - fp.seek(loc) - if GetByte(fp) != BOP: BadDVI('byte %d is not bop' % fp.tell()) - cnt = [SignedQuad(fp) for i in xrange(10)] - loc = SignedQuad(fp) - page = self.ProcessPage(fp) - self.pages.insert(0, {'count':cnt, 'content':page}) - - def ProcessPreamble(self, fp): - fp.seek(0) - if GetByte(fp) != PRE: BadDVI("First byte isn't start of preamble") - id = GetByte(fp) - if id != DVI_ID and id != DVIV_ID and id != XDVI_ID: - Warning("ID byte is %d; use the default %d!" % (id, DVI_ID)) - else: - self.id = id - numerator = SignedQuad(fp) - if numerator <= 0: - Warning('numerator is %d; use the default 25400000!' % numerator) - else: - self.numerator = numerator - denominator = SignedQuad(fp) - if denominator <= 0: - Warning('denominator is %d; use the default 473628672!' % denominator) - else: - self.denominator = denominator - mag = SignedQuad(fp) - if mag <= 0: - Warning('magnification is %d; use the default 1000!' % mag) - else: - self.mag = mag - self.comment = fp.read(GetByte(fp)) - self.ComputeConversionFactors() - - def ProcessPostamble(self, fp): - fp.seek(-5, 2) # at least four 223's - while True: - k = GetByte(fp) - if k < 0: BadDVI('all 223s; is it a DVI file?') # found EOF - elif k != 223: break - fp.seek(-2, 1) - if k != DVI_ID and k != DVIV_ID and k != XDVI_ID: - Warning('ID byte is %d' % k) - fp.seek(-5, 1) - q = SignedQuad(fp) - m = fp.tell() # id_byte - if q < 0 or q > m - 33: BadDVI('post pointer %d at byte %d' % (q, m - 4)) - fp.seek(q) # move to post - k = GetByte(fp) - if k != POST: BadDVI('byte %d is not post' % k) - self.post_loc = q - self.first_backpointer = SignedQuad(fp) - - if SignedQuad(fp) != self.numerator: - Warning("numerator doesn't match the preamble!") - if SignedQuad(fp) != self.denominator: - Warning("denominator doesn't match the preamble!") - if SignedQuad(fp) != self.mag: - Warning("magnification doesn't match the preamble!") - self.max_v = SignedQuad(fp) - self.max_h = SignedQuad(fp) - self.max_s = Get2Bytes(fp) - self.total_pages = Get2Bytes(fp) - while True: - k = GetByte(fp) - if k == FNT_DEF1: p = GetByte(fp) - elif k == FNT_DEF2: p = Get2Bytes(fp) - elif k == FNT_DEF3: p = Get3Bytes(fp) - elif k == FNT_DEF4: p = SignedQuad(fp) - elif k != NOP: break - self.DefineFont(p, fp) - if k != POST_POST: - Warning('byte %d is not postpost!' % (fp.tell() - 1)) - if SignedQuad(fp) != self.post_loc: - Warning('bad postamble pointer in byte %d!' % (fp.tell() - 4)) - m = GetByte(fp) - if m != DVI_ID and m != DVIV_ID and m != XDVI_ID: - Warning('identification in byte %d should be %d, %d, or %d!' % (fp.tell() - 1, DVI_ID, DVIV_ID, XDVI_ID)) - - def DefineFont(self, e, fp): - c = SignedQuad(fp) # font_check_sum - q = SignedQuad(fp) # font_scaled_size - d = SignedQuad(fp) # font_design_size - n = fp.read(GetByte(fp) + GetByte(fp)) - try: - f = self.font_def[e] - except KeyError: - self.font_def[e] = {'name':n, 'checksum':c, 'scaled_size':q, 'design_size':d} - if q <= 0 or q >= 01000000000: - Warning("%s---not loaded, bad scale (%d)!" % (n, q)) - elif d <= 0 or d >= 01000000000: - msssage("%s---not loaded, bad design size (%d)!" % (n, d)) - else: - if f['checksum'] != c: - Warning("\t---check sum doesn't match previous definition!") - if f['scaled_size'] != q: - Warning("\t---scaled size doesn't match previous definition!") - if f['design_size'] != d: - Warning("\t---design size doesn't match previous definition!") - if f['name'] != n: - Warning("\t---font name doesn't match previous definition!") - - def ProcessPage(self, fp): - s = [] - while True: - o = GetByte(fp) - p = self.Get1Arg(o, fp) - if o < SET_CHAR_0 + 128 or o in (SET1, SET2, SET3, SET4): - q = [p] - while True: - o = GetByte(fp) - p = self.Get1Arg(o, fp) - if o < SET_CHAR_0 + 128 or o in (SET1, SET2, SET3, SET4): - q.append(p) - else: - break - s.append([SET1, q]) - if o == SET_RULE: - s.append([SET_RULE, [p, SignedQuad(fp)]]) - elif o in (PUT1, PUT2, PUT3, PUT4): - s.append([PUT1, p]) - elif o == PUT_RULE: - s.append([PUT_RULE, [p, SignedQuad(fp)]]) - elif o == NOP: - continue - elif o == BOP: - Warning('bop occurred before eop!') - break - elif o == EOP: - break - elif o == PUSH: - s.append([PUSH]) - elif o == POP: - s.append([POP]) - elif o in (RIGHT1, RIGHT2, RIGHT3, RIGHT4): - s.append([RIGHT1, p]) - elif o == W0: - s.append([W0]) - elif o in (W1, W2, W3, W4): - s.append([W1, p]) - elif o == X0: - s.append([X0]) - elif o in (X1, X2, X3, X4): - s.append([X1, p]) - elif o in (DOWN1, DOWN2, DOWN3, DOWN4): - s.append([DOWN1, p]) - elif o == Y0: - s.append([Y0]) - elif o in (Y1, Y2, Y3, Y4): - s.append([Y1, p]) - elif o == Z0: - s.append([Z0]) - elif o in (Z1, Z2, Z3, Z4): - s.append([Z1, p]) - elif o < FNT_NUM_0 + 64 or o in (FNT1, FNT2, FNT3, FNT4): - s.append([FNT1, p]) - elif o in (XXX1, XXX2, XXX3, XXX4): - q = fp.read(p) - s.append([XXX1, q]) - elif o in (FNT_DEF1, FNT_DEF2, FNT_DEF3, FNT_DEF4): - self.DefineFont(p, fp) - elif o == DIR: - s.append([DIR, p]) - elif o == PRE: - Warning('preamble command within a page!') - break - elif o in (POST, POST_POST): - Warning('postamble command %d!' % o) - break - else: - Warning('undefined command %d!' % o) - break - return s - - def Get1Arg(self, o, fp): - if o < SET_CHAR_0 + 128: - return o - SET_CHAR_0 - if o in (SET1, PUT1, FNT1, XXX1, FNT_DEF1, DIR): - return GetByte(fp) - if o in (SET2, PUT2, FNT2, XXX2, FNT_DEF2): - return Get2Bytes(fp) - if o in (SET3, PUT3, FNT3, XXX3, FNT_DEF3): - return Get3Bytes(fp) - if o in (RIGHT1, W1, X1, DOWN1, Y1, Z1): - return SignedByte(fp) - if o in (RIGHT2, W2, X2, DOWN2, Y2, Z2): - return SignedPair(fp) - if o in (RIGHT3, W3, X3, DOWN3, Y3, Z3): - return SignedTrio(fp) - if o in (SET4, SET_RULE, PUT4, PUT_RULE, RIGHT4, W4, X4, DOWN4, Y4, Z4, FNT4, XXX4, FNT_DEF4): - return SignedQuad(fp) - if o in (NOP, BOP, EOP, PUSH, POP, PRE, POST, POST_POST) or o > POST_POST: - return 0 - if o in (W0, X0, Y0, Z0): - return 0 - if o < FNT_NUM_0 + 64: - return o - FNT_NUM_0 - - ########################################################## - # Save: Internal Format -> DVI - ########################################################## - def Save(self, fn): - fp = file(fn, 'wb') - self.SaveToFile(fp) - fp.close() - - def SaveToFile(self, fp): - # WritePreamble - fp.write(''.join([chr(PRE), PutByte(self.id), PutSignedQuad(self.numerator), PutSignedQuad(self.denominator), PutSignedQuad(self.mag), PutByte(len(self.comment)), self.comment])) - # WriteFontDefinitions - self.WriteFontDefinitions(fp) - # WritePages - stackdepth = 0; loc = -1 - for page in self.pages: - w = x = y = z = 0; stack = [] - s = [chr(BOP)] - s.extend([PutSignedQuad(c) for c in page['count']]) - s.append(PutSignedQuad(loc)) - for cmd in page['content']: - if cmd[0] == SET1: - for o in cmd[1]: - if o < 128: s.append(chr(SET_CHAR_0 + o)) - else: s.append(self.CmdPair([SET1, o])) - elif cmd[0] in (SET_RULE, PUT_RULE): - s.append(chr(cmd[0]) + PutSignedQuad(cmd[1][0]) + PutSignedQuad(cmd[1][1])) - elif cmd[0] == PUT1: - s.append(self.CmdPair([PUT1, cmd[1][0]])) - elif cmd[0] in (RIGHT1, DOWN1): - s.append(self.CmdPair(cmd)) - elif cmd[0] in (W0, X0, Y0, Z0): - s.append(chr(cmd[0])) - elif cmd[0] == PUSH: - s.append(chr(PUSH)) - stack.append((w, x, y, z)) - if len(stack) > stackdepth: stackdepth = len(stack) - elif cmd[0] == POP: - s.append(chr(POP)) - w, x, y, z = stack.pop() - elif cmd[0] == W1: - w = cmd[1]; s.append(self.CmdPair(cmd)) - elif cmd[0] == X1: - x = cmd[1]; s.append(self.CmdPair(cmd)) - elif cmd[0] == Y1: - y = cmd[1]; s.append(self.CmdPair(cmd)) - elif cmd[0] == Z1: - z = cmd[1]; s.append(self.CmdPair(cmd)) - elif cmd[0] == FNT1: - if cmd[1] < 64: s.append(chr(FNT_NUM_0 + cmd[1])) - else: s.append(self.CmdPair(cmd)) - elif cmd[0] == XXX1: - l = len(cmd[1]) - if l < 256: s.append(chr(XXX1) + chr(l) + cmd[1]) - else: s.append(chr(XXX4) + PutSignedQuad(l) + cmd[1]) - elif cmd[0] == DIR: - s.append(chr(DIR) + chr(cmd[1])) - else: - Warning('invalid command %s!' % cmd[0]) - s.append(chr(EOP)) - loc = fp.tell() - fp.write(''.join(s)) - # WritePostamble - post_loc = fp.tell() - fp.write(''.join([chr(POST), PutSignedQuad(loc), PutSignedQuad(self.numerator), PutSignedQuad(self.denominator), PutSignedQuad(self.mag), PutSignedQuad(self.max_v), PutSignedQuad(self.max_h), Put2Bytes(stackdepth+1), Put2Bytes(len(self.pages))])) - # WriteFontDefinitions - self.WriteFontDefinitions(fp) - # WritePostPostamble - fp.write(''.join([chr(POST_POST), PutSignedQuad(post_loc), PutByte(self.id), '\xdf\xdf\xdf\xdf'])) - loc = fp.tell() - while (loc % 4) != 0: - fp.write('\xdf'); loc += 1 - - def WriteFontDefinitions(self, fp): - s = [] - for e in sorted(self.font_def.keys()): - l, q = PutUnsigned(e) - s.append(PutByte(FNT_DEF1 + l)) - s.append(q) - s.append(PutSignedQuad(self.font_def[e]['checksum'])) - s.append(PutSignedQuad(self.font_def[e]['scaled_size'])) - s.append(PutSignedQuad(self.font_def[e]['design_size'])) - s.append('\x00') - s.append(PutByte(len(self.font_def[e]['name']))) - s.append(self.font_def[e]['name']) - fp.write(''.join(s)) - - def CmdPair(self, cmd): - l, q = PutSigned(cmd[1]) - return chr(cmd[0] + l) + q - - ########################################################## - # Parse: Text -> Internal Format - ########################################################## - def Parse(self, fn, encoding=''): - fp = file(fn, 'r') - s = fp.read() - fp.close() - self.ParseFromString(s, encoding=encoding) - - def ParseFromString(self, s, encoding=''): - global GetStr, cur_font, cur_dsize, cur_ssize, subfont_idx - if encoding == 'ascii': GetStr = GetStrASCII - else: GetStr = GetStrUTF8 - self.Initialize() - self.fnt_num = 0 - for l in s.split('\n'): - l = l.strip() - if not l or l[0] == '%': continue - try: - key, val = l.split(':', 1) - key = key.strip(); val = val.strip() - except: - if l[-1] == ']': v = l[:-1].split(' ') - else: v = l.split(' ') - if v[0] == "[page": - self.cur_page = [] - count = [GetInt(c) for c in v[1:]] - if len(count) < 10: count += ([0] * (10-len(count))) - self.pages.append({'count':count, 'content':self.cur_page}) - continue - # ParsePreamble - if key == "id": - self.id = GetInt(val) - if self.id != DVI_ID and self.id != DVIV_ID and self.id != XDVI_ID: - Warning("identification byte should be %d, %d, or %d!" % (DVI_ID, DVIV_ID, XDVI_ID)) - elif key == "numerator": - d = GetInt(val) - if d <= 0: - Warning('non-positive numerator %d!' % d) - else: - self.numerator = d - self.ComputeConversionFactors() - elif key == "denominator": - d = GetInt(val) - if d <= 0: - Warning('non-positive denominator %d!' % d) - else: - self.denominator = d - self.ComputeConversionFactors() - elif key == "magnification": - d = GetInt(val) - if d <= 0: - Warning('non-positive magnification %d!' % d) - else: - self.mag = d - elif key == "comment": - self.comment = val[1:-1] - # Parse Postamble - elif key == "maxv": - self.max_v = self.ConvLen(val) - elif key == "maxh": - self.max_h = self.ConvLen(val) - elif key == "maxs": - self.max_s = GetInt(val) - elif key == "pages": - self.total_pages = GetInt(val) - # Parse Font Definitions - elif key == "fntdef": - n, q, d = self.GetFntDef(val) - self.font_def[self.fnt_num] = {'name':n, 'design_size':d, 'scaled_size':q, 'checksum':0} - self.fnt_num += 1 - # Parse Pages - elif key == 'xxx': - self.cur_page.append([XXX1, eval(val)]) - elif key == 'set': - ol = GetStr(val) - if is_subfont: - subfont_idx = (ol[0] >> 8) - self.AppendFNT1() - nl = [ol[0] & 0xff] - for o in ol[1:]: - idx = (o >> 8) - if idx != subfont_idx: - self.cur_page.append([SET1, nl]) - subfont_idx = idx - self.AppendFNT1() - nl = [o & 0xff] - else: - nl.append(o & 0xff) - self.cur_page.append([SET1, nl]) - else: - self.cur_page.append([SET1, ol]) - elif key == 'put': - self.cur_page.append([PUT1, GetStr(val)]) - elif key == 'setrule': - v = val.split(' ') - if len(v) != 2: - Warning('two values are required for setrule!') - continue - self.cur_page.append([SET_RULE, [self.ConvLen(c) for c in v]]) - elif key == 'putrule': - v = val.split(' ') - if len(v) != 2: - Warning('two values are required for putrule!') - continue - self.cur_page.append([PUT_RULE, [self.ConvLen(c) for c in v]]) - elif key == 'fnt': - n, q, d = self.GetFntDef(val) - if n in subfont_list: - is_subfont = True - cur_font = n; cur_dsize = d; cur_ssize = q - else: - is_subfont = False - f = {'name':n, 'design_size':d, 'scaled_size':q, 'checksum':0} - try: - e = self.font_def.keys()[self.font_def.values().index(f)] - except: - e = self.fnt_num - self.font_def[self.fnt_num] = f - self.fnt_num += 1 - self.cur_page.append([FNT1, e]) - elif key == 'right': - self.cur_page.append([RIGHT1, self.ConvLen(val)]) - elif key == 'down': - self.cur_page.append([DOWN1, self.ConvLen(val)]) - elif key == 'w': - self.cur_page.append([W1, self.ConvLen(val)]) - elif key == 'x': - self.cur_page.append([X1, self.ConvLen(val)]) - elif key == 'y': - self.cur_page.append([Y1, self.ConvLen(val)]) - elif key == 'z': - self.cur_page.append([Z1, self.ConvLen(val)]) - elif key == 'push': - self.cur_page.append([PUSH]) - elif key == 'pop': - self.cur_page.append([POP]) - elif key == 'w0': - self.cur_page.append([W0]) - elif key == 'x0': - self.cur_page.append([X0]) - elif key == 'y0': - self.cur_page.append([Y0]) - elif key == 'z0': - self.cur_page.append([Z0]) - elif key == 'dir': - self.cur_page.append([DIR, GetInt(val)]) - else: - Warning('invalid command %s!' % key) - - def AppendFNT1(self): - f = {'name':cur_font+"%02x"%subfont_idx, 'design_size':cur_dsize, 'scaled_size':cur_ssize, 'checksum':0} - try: - e = self.font_def.keys()[self.font_def.values().index(f)] - except: - e = self.fnt_num - self.font_def[e] = f - self.fnt_num += 1 - self.cur_page.append([FNT1, e]) - - ########################################################## - # Dump: Internal Format -> Text - ########################################################## - def Dump(self, fn, tabsize=2, encoding=''): - fp = file(fn, 'w') - self.DumpToFile(fp, tabsize=tabsize, encoding=encoding) - fp.close() - - def DumpToFile(self, fp, tabsize=2, encoding=''): - global PutStr - if encoding == 'ascii': PutStr = PutStrASCII - elif encoding == 'latin1': PutStr = PutStrLatin1 - elif encoding == 'sjis': PutStr = PutStrSJIS - else: PutStr = PutStrUTF8 - # DumpPreamble - fp.write("[preamble]\n") - fp.write("id: %d\n" % self.id) - fp.write("numerator: %d\n" % self.numerator) - fp.write("denominator: %d\n" % self.denominator) - fp.write("magnification: %d\n" % self.mag) - fp.write("comment: %s\n" % repr(self.comment)) - # DumpPostamble - fp.write("\n[postamble]\n") - fp.write("maxv: %s\n" % self.byconv(self.max_v)) - fp.write("maxh: %s\n" % self.byconv(self.max_h)) - fp.write("maxs: %d\n" % self.max_s) - fp.write("pages: %d\n" % self.total_pages) - # DumpFontDefinitions - fp.write("\n[font definitions]\n") - for e in sorted(self.font_def.keys()): - fp.write("fntdef: %s " % self.font_def[e]['name']) - if self.font_def[e]['design_size'] != self.font_def[e]['scaled_size']: - fp.write("(%s) " % self.by_pt_conv(self.font_def[e]['design_size'])) - fp.write("at %s\n" % self.by_pt_conv(self.font_def[e]['scaled_size'])) - # DumpPages - for page in self.pages: - fp.write("\n[page" + (" %d"*10 % tuple(page['count'])) + "]\n") - indent = 0 - for cmd in page['content']: - if cmd[0] == POP: - indent -= tabsize - fp.write("%spop:\n" % (' ' * indent)) - continue - fp.write("%s" % (' ' * indent)) - if cmd[0] == PUSH: - fp.write("push:\n") - indent += tabsize - elif cmd[0] == XXX1: - fp.write("xxx: %s\n" % repr(cmd[1])) - elif cmd[0] == DIR: - fp.write("dir: %d\n" % cmd[1]) - elif cmd[0] == SET_RULE: - fp.write("setrule: %s %s\n" % (self.byconv(cmd[1][0]), self.byconv(cmd[1][1]))) - elif cmd[0] == PUT_RULE: - fp.write("putrule: %s %s\n" % (self.byconv(cmd[1][0]), self.byconv(cmd[1][1]))) - elif cmd[0] == SET1: - fp.write("set: %s\n" % PutStr(cmd[1])) - elif cmd[0] == PUT1: - fp.write("put: %s\n" % PutStr(cmd[1])) - elif cmd[0] == FNT1: - f = self.font_def[cmd[1]]['name'] - z = self.font_def[cmd[1]]['scaled_size'] - if IsFontChanged(f, z): - fp.write("fnt: %s " % cur_font) - if self.font_def[cmd[1]]['design_size'] != self.font_def[cmd[1]]['scaled_size']: - fp.write("(%s) " % self.by_pt_conv(self.font_def[cmd[1]]['design_size'])) - fp.write("at %s\n" % self.by_pt_conv(cur_ssize)) - elif cmd[0] == RIGHT1: - fp.write("right: %s\n" % self.byconv(cmd[1])) - elif cmd[0] == DOWN1: - fp.write("down: %s\n" % self.byconv(cmd[1])) - elif cmd[0] == W1: - fp.write("w: %s\n" % self.byconv(cmd[1])) - elif cmd[0] == X1: - fp.write("x: %s\n" % self.byconv(cmd[1])) - elif cmd[0] == Y1: - fp.write("y: %s\n" % self.byconv(cmd[1])) - elif cmd[0] == Z1: - fp.write("z: %s\n" % self.byconv(cmd[1])) - elif cmd[0] == W0: - fp.write("w0:\n") - elif cmd[0] == X0: - fp.write("x0:\n") - elif cmd[0] == Y0: - fp.write("y0:\n") - elif cmd[0] == Z0: - fp.write("z0:\n") - - ########################################################## - # Misc Functions - ########################################################## - def ComputeConversionFactors(self): - self.sp_conv = (self.numerator / 25400000.) * (473628672. / self.denominator) - self.pt_conv = (self.numerator / 25400000.) * (7227. / self.denominator) - self.bp_conv = (self.numerator / 254000.) * (72. / self.denominator) - self.mm_conv = (self.numerator / 10000.) / self.denominator - self.cm_conv = (self.numerator / 100000.) / self.denominator - self.in_conv = (self.numerator / 254000.) * (1. / self.denominator) - - def ConvLen(self, s): - try: return int(s) - except: pass - try: f = float(s[:-2]) - except: return 0 - m = s[-2:] - if m == "pt": return int(round(f / self.pt_conv)) - elif m == "in": return int(round(f / self.in_conv)) - elif m == "mm": return int(round(f / self.mm_conv)) - elif m == "cm": return int(round(f / self.cm_conv)) - elif m == "bp": return int(round(f / self.bp_conv)) - elif m == "sp": return int(round(f / self.sp_conv)) - else: - try: return int(round(f / self.pt_conv)) - except: return 0 - - def GetFntDef(self, s): - try: - n, size = s.split('(', 1) - d, q = size.split(')', 1) - except: - n, q = s.split(' ', 1) - n = n.strip(); q = q.strip() - if q[:2] == "at": q = q[2:] - q = self.ConvLen(q.strip()) - try: d = self.ConvLen(d.strip()) - except: d = q - return n, q, d - - def by_sp_conv(self, a): - v = self.sp_conv * a - return "%dsp" % int(v) - - def by_pt_conv(self, a): - v = self.pt_conv * a - if v == int(v): return "%dpt" % int(v) - else: return "%fpt" % v - - def by_bp_conv(self, a): - v = self.bp_conv * a - if v == int(v): return "%dbp" % int(v) - else: return "%fbp" % v - - def by_mm_conv(self, a): - v = self.mm_conv * a - if v == int(v): return "%dmm" % int(v) - else: return "%fmm" % v - - def by_cm_conv(self, a): - v = self.cm_conv * a - if v == int(v): return "%dcm" % int(v) - else: return "%fcm" % v - - def by_in_conv(self, a): - v = self.in_conv * a - if v == int(v): return "%din" % int(v) - else: return "%fin" % v - -############################################################ -# Misc Functions for Main Routine -############################################################ -def ProcessOptions(): - usage = """%prog [options] dvi_file|dvi_dump_file - -DVIasm is a Python script to support changing or creating DVI files -via disassembling into text, editing, and then reassembling into -binary format. It is fully documented at - -http://tug.org/TUGboat/Articles/tb28-2/tb89cho.pdf -http://ajt.ktug.kr/assets/2008/5/1/0201cho.pdf""" - - version = """This is %prog-20080520 by Jin-Hwan Cho (Korean TeX Society) - -Copyright (C) 2007-2008 by Jin-Hwan Cho - -This is free software; you can redistribute it and/or modify -it under the terms of the GNU General Public License as published by -the Free Software Foundation; either version 2 of the License, or -(at your option) any later version.""" - - parser = OptionParser(usage=usage, version=version) - parser.add_option("-u", "--unit", - action="store", type="string", dest="unit", - metavar="STR", - help="unit (sp, pt, bp, mm, cm, in) [default=%default]") - parser.add_option("-o", "--output", - action="store", type="string", dest="output", - metavar="FILE", - help="filename for output instead of stdout") - parser.add_option("-e", "--encoding", - action="store", type="string", dest="encoding", - metavar="STR", - help="encoding for input/output [default=%default]") - parser.add_option("-t", "--tabsize", - action="store", type="int", dest="tabsize", - metavar="INT", - help="tab size for push/pop [default=%default]") - parser.add_option("-p", "--ptex", - action="store_true", dest="ptex", default=False, - help="extended DVI for Japanese pTeX") - parser.add_option("-s", "--subfont", - action="append", type="string", dest="subfont", - metavar="STR", - help="the list of fonts with UCS2 subfont scheme (comma separated); disable internal subfont list if STR is empty") - parser.set_defaults(unit='pt', encoding='utf8', tabsize=2) - (options, args) = parser.parse_args() - if not options.unit in ['sp', 'pt', 'bp', 'mm', 'cm', 'in']: - parser.error("invalid unit name '%s'!" % options.unit) - if options.tabsize < 0: - parser.error("negative tabsize!") - if not options.encoding in ['ascii', 'latin1', 'utf8', 'sjis']: - parser.error("invalid encoding '%s'!" % options.encoding) - if options.ptex: - global is_ptex - is_ptex = True - if not options.encoding in ['utf8', 'sjis']: - parser.error("invalid encoding '%s' for Japanese pTeX!" % options.encoding) - if options.subfont: - global subfont_list - if not options.subfont[0]: # disable subfont - subfont_list = [] - for l in options.subfont: - subfont_list.extend([f.strip() for f in l.split(',')]) - if len(args) != 1: - parser.error("try with the option --help!") - return (options, args) - -def IsDVI(fname): - from os.path import splitext - if splitext(fname)[1] != '.dvi': return False - try: - fp = file(fname, 'rb') - fp.seek(0) - if GetByte(fp) != PRE: return False - fp.seek(-4, 2) - if GetByte(fp) != 223: return False - fp.close() - except: - sys.stderr.write('Failed to read %s\n' % fname) - return False - return True - -############################################################ -# Main Routine -############################################################ -if __name__ == '__main__': - (options, args) = ProcessOptions() - aDVI = DVI(unit=options.unit) - if IsDVI(args[0]): # dvi -> dump - aDVI.Load(args[0]) - if options.output: aDVI.Dump(options.output, tabsize=options.tabsize, encoding=options.encoding) - else: aDVI.DumpToFile(sys.stdout, tabsize=options.tabsize, encoding=options.encoding) - else: # dump -> dvi - aDVI.Parse(args[0], encoding=options.encoding) - if options.output: aDVI.Save(options.output) - else: aDVI.SaveToFile(sys.stdout) diff --git a/Build/source/texk/texlive/linked_scripts/dviasm/dviasm.py b/Build/source/texk/texlive/linked_scripts/dviasm/dviasm.py new file mode 100755 index 00000000000..adfe77c1609 --- /dev/null +++ b/Build/source/texk/texlive/linked_scripts/dviasm/dviasm.py @@ -0,0 +1,960 @@ +#! /usr/bin/env python +# -*- coding: utf_8 -*- +# +# This is DVIasm, a DVI utility for editing DVI files directly. +# +# Copyright (C) 2007-2008 by Jin-Hwan Cho +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +import sys, os.path +from optparse import OptionParser + +# Global variables +is_ptex = False +is_subfont = False +cur_font = None +cur_dsize = 0 +cur_ssize = 0 +subfont_idx = 0 +subfont_list = ['cyberb', 'outbtm', 'outbtb', 'outgtm', 'outgtb'] + +# DVI opcodes +SET_CHAR_0 = 0; SET_CHAR_127 = 127; +SET1 = 128; SET2 = 129; SET3 = 130; SET4 = 131; +SET_RULE = 132; +PUT1 = 133; PUT2 = 134; PUT3 = 135; PUT4 = 136; +PUT_RULE = 137; +NOP = 138; +BOP = 139; EOP = 140; +PUSH = 141; POP = 142; +RIGHT1 = 143; RIGHT2 = 144; RIGHT3 = 145; RIGHT4 = 146; +W0 = 147; W1 = 148; W2 = 149; W3 = 150; W4 = 151; +X0 = 152; X1 = 153; X2 = 154; X3 = 155; X4 = 156; +DOWN1 = 157; DOWN2 = 158; DOWN3 = 159; DOWN4 = 160; +Y0 = 161; Y1 = 162; Y2 = 163; Y3 = 164; Y4 = 165; +Z0 = 166; Z1 = 167; Z2 = 168; Z3 = 169; Z4 = 170; +FNT_NUM_0 = 171; FNT_NUM_63 = 234; +FNT1 = 235; FNT2 = 236; FNT3 = 237; FNT4 = 238; +XXX1 = 239; XXX2 = 240; XXX3 = 241; XXX4 = 242; +FNT_DEF1 = 243; FNT_DEF2 = 244; FNT_DEF3 = 245; FNT_DEF4 = 246; +PRE = 247; POST = 248; POST_POST = 249; +# DVIV opcodes +DIR = 255; +# XDVI opcodes (not supported yet!) +NATIVE_FONT_DEF = 250; +PDF_FILE = 251; PIC_FILE = 252; +GLYPH_ARRAY = 253; GLYPH_STRING = 254; +# DVI identifications +DVI_ID = 2; DVIV_ID = 3; XDVI_ID = 5; + +def Warning(msg): + sys.stderr.write('%s\n' % msg) + +def BadDVI(msg): + raise AttributeError, 'Bad DVI file: %s!' % msg + +def GetByte(fp): # { returns the next byte, unsigned } + try: return ord(fp.read(1)) + except: return -1 + +def SignedByte(fp): # { returns the next byte, signed } + try: b = ord(fp.read(1)) + except: return -1 + if b < 128: return b + else: return b - 256 + +def Get2Bytes(fp): # { returns the next two bytes, unsigned } + try: a, b = map(ord, fp.read(2)) + except: BadDVI('Failed to Get2Bytes()') + return (a << 8) + b + +def SignedPair(fp): # {returns the next two bytes, signed } + try: a, b = map(ord, fp.read(2)) + except: BadDVI('Failed to SignedPair()') + if a < 128: return (a << 8) + b + else: return ((a - 256) << 8) + b + +def Get3Bytes(fp): # { returns the next three bytes, unsigned } + try: a, b, c = map(ord, fp.read(3)) + except: BadDVI('Failed to Get3Bytes()') + return (((a << 8) + b) << 8) + c + +def SignedTrio(fp): # { returns the next three bytes, signed } + try: a, b, c = map(ord, fp.read(3)) + except: BadDVI('Failed to SignedTrio()') + if a < 128: return (((a << 8) + b) << 8) + c + else: return ((((a - 256) << 8) + b) << 8) + c + +def SignedQuad(fp): # { returns the next four bytes, signed } + try: a, b, c, d = map(ord, fp.read(4)) + except: BadDVI('Failed to get SignedQuad()') + if a < 128: return (((((a << 8) + b) << 8) + c) << 8) + d + else: return ((((((a - 256) << 8) + b) << 8) + c) << 8) + d + +def PutByte(q): + return chr(q & 0xff) + +def Put2Bytes(q): + return PutByte(q>>8) + PutByte(q) + +def Put3Bytes(q): + return PutByte(q>>16) + PutByte(q>>8) + PutByte(q) + +def PutSignedQuad(q): + if q < 0: q += 0x100000000 + return PutByte(q>>24) + PutByte(q>>16) + PutByte(q>>8) + PutByte(q) + +def PutUnsigned(q): + if q >= 0x1000000: return (3, PutSignedQuad(q)) + if q >= 0x10000: return (2, Put3Bytes(q)) + if q >= 0x100: return (1, Put2Bytes(q)) + return (0, PutByte(q)) + +def PutSigned(q): + if 0 <= q < 0x800000: return PutUnsigned(q) + if q < -0x800000 or q >= 0x800000: return (3, PutSignedQuad(q)) + if q < -0x8000: q += 0x1000000; return (2, Put3Bytes(q)) + if q < -0x80: q += 0x10000; return (1, Put2Bytes(q)) + return (0, PutByte(q)) + +def GetInt(s): + try: return int(s) + except: return -1 + +def GetStrASCII(s): # used in Parse() + if len(s) > 1 and ((s[0] == "'" and s[-1] == "'") or (s[0] == '"' and s[-1] == '"')): return [ord(c) for c in s[1:-1].decode('unicode_escape')] + else: return '' + +def UCS2toJIS(c): + s = c.encode('iso2022-jp') + if len(s) == 1: return ord(s) + else: return (ord(s[3]) << 8) + ord(s[4]) + +def GetStrUTF8(s): # used in Parse() + if len(s) > 1 and ((s[0] == "'" and s[-1] == "'") or (s[0] == '"' and s[-1] == '"')): + t = s[1:-1].decode('string_escape').decode('utf8') + if is_ptex: return [UCS2toJIS(c) for c in t] + else: return [ord(c) for c in t] + else: return '' + +def PutStrASCII(t): # unsed in Dump() + s = '' + for o in t: + if o == 92: s += '\\\\' + elif 32 <= o < 127: s += chr(o) + elif o < 256: s += ('\\x%02x' % o) + elif o < 65536: s += ('\\u%04x' % o) + else: + Warning('Not support characters > 65535; may skip %d.\n' % o) + return "'%s'" % s + +def PutStrLatin1(t): # unsed in Dump() + s = '' + for o in t: + if o == 92: s += '\\\\' + elif 32 <= o < 127 or 161 <= o < 256: s += chr(o) + elif o < 256: s += ('\\x%02x' % o) + elif o < 65536: s += ('\\u%04x' % o) + else: + Warning('Not support characters > 65535; may skip %d.\n' % o) + return "'%s'" % s + +def PutStrUTF8(t): # unsed in Dump() + s = '' + if is_subfont: + for o in t: + s += unichr((subfont_idx << 8) + o).encode('utf8') + else: # not the case of subfont + for o in t: + if o == 92: s += '\\\\' + elif 32 <= o < 127: s += chr(o) + elif o < 128: s += ('\\x%02x' % o) + elif is_ptex: + s += ''.join(['\x1b$B', chr(o/256), chr(o%256)]).decode('iso2022-jp').encode('utf8') + else: s += unichr(o).encode('utf8') + return "'%s'" % s + +def PutStrSJIS(t): # unsed in Dump() + s = '' + for o in t: + if o == 92: s += '\\\\' + elif 32 <= o < 127: s += chr(o) + elif o < 128: s += ('\\x%02x' % o) + else: + s += ''.join(['\x1b$B', chr(o/256), chr(o%256)]).decode('iso2022-jp').encode('sjis') + return "'%s'" % s + +def IsFontChanged(f, z): + global cur_font, cur_ssize, subfont_idx, is_subfont + for n in subfont_list: + if n == f[:-2]: + is_subfont = True + subfont_idx = int(f[-2:], 16) + if cur_font == n and cur_ssize == z: + return False + else: + cur_font = n; cur_ssize = z + return True + else: + is_subfont = False + cur_font = f; cur_ssize = z + return True + +############################################################ +# DVI class +############################################################ +class DVI(object): + def __init__(self, unit='pt'): + if unit == 'sp': self.byconv = self.by_sp_conv + elif unit == 'bp': self.byconv = self.by_bp_conv + elif unit == 'mm': self.byconv = self.by_mm_conv + elif unit == 'cm': self.byconv = self.by_cm_conv + elif unit == 'in': self.byconv = self.by_in_conv + else: self.byconv = self.by_pt_conv + self.Initialize() + + ########################################################## + # Initialize: Required by __init__(), Load(), and Parse() + ########################################################## + def Initialize(self): + self.id = DVI_ID + self.numerator = 25400000 + self.denominator = 473628672 + self.mag = 1000 + self.ComputeConversionFactors() + self.comment = '' + self.font_def = {} + self.max_v = self.max_h = self.max_s = self.total_pages = 0 + self.pages = [] + + ########################################################## + # Load: DVI -> Internal Format + ########################################################## + def Load(self, fn): + fp = file(fn, 'rb') + self.LoadFromFile(fp) + fp.close() + + def LoadFromFile(self, fp): + self.Initialize() + fp.seek(0, 2) + if fp.tell() < 53: BadDVI('less than 53 bytes long') + self.ProcessPreamble(fp) + self.ProcessPostamble(fp) + loc = self.first_backpointer + while loc >= 0: + fp.seek(loc) + if GetByte(fp) != BOP: BadDVI('byte %d is not bop' % fp.tell()) + cnt = [SignedQuad(fp) for i in xrange(10)] + loc = SignedQuad(fp) + page = self.ProcessPage(fp) + self.pages.insert(0, {'count':cnt, 'content':page}) + + def ProcessPreamble(self, fp): + fp.seek(0) + if GetByte(fp) != PRE: BadDVI("First byte isn't start of preamble") + id = GetByte(fp) + if id != DVI_ID and id != DVIV_ID and id != XDVI_ID: + Warning("ID byte is %d; use the default %d!" % (id, DVI_ID)) + else: + self.id = id + numerator = SignedQuad(fp) + if numerator <= 0: + Warning('numerator is %d; use the default 25400000!' % numerator) + else: + self.numerator = numerator + denominator = SignedQuad(fp) + if denominator <= 0: + Warning('denominator is %d; use the default 473628672!' % denominator) + else: + self.denominator = denominator + mag = SignedQuad(fp) + if mag <= 0: + Warning('magnification is %d; use the default 1000!' % mag) + else: + self.mag = mag + self.comment = fp.read(GetByte(fp)) + self.ComputeConversionFactors() + + def ProcessPostamble(self, fp): + fp.seek(-5, 2) # at least four 223's + while True: + k = GetByte(fp) + if k < 0: BadDVI('all 223s; is it a DVI file?') # found EOF + elif k != 223: break + fp.seek(-2, 1) + if k != DVI_ID and k != DVIV_ID and k != XDVI_ID: + Warning('ID byte is %d' % k) + fp.seek(-5, 1) + q = SignedQuad(fp) + m = fp.tell() # id_byte + if q < 0 or q > m - 33: BadDVI('post pointer %d at byte %d' % (q, m - 4)) + fp.seek(q) # move to post + k = GetByte(fp) + if k != POST: BadDVI('byte %d is not post' % k) + self.post_loc = q + self.first_backpointer = SignedQuad(fp) + + if SignedQuad(fp) != self.numerator: + Warning("numerator doesn't match the preamble!") + if SignedQuad(fp) != self.denominator: + Warning("denominator doesn't match the preamble!") + if SignedQuad(fp) != self.mag: + Warning("magnification doesn't match the preamble!") + self.max_v = SignedQuad(fp) + self.max_h = SignedQuad(fp) + self.max_s = Get2Bytes(fp) + self.total_pages = Get2Bytes(fp) + while True: + k = GetByte(fp) + if k == FNT_DEF1: p = GetByte(fp) + elif k == FNT_DEF2: p = Get2Bytes(fp) + elif k == FNT_DEF3: p = Get3Bytes(fp) + elif k == FNT_DEF4: p = SignedQuad(fp) + elif k != NOP: break + self.DefineFont(p, fp) + if k != POST_POST: + Warning('byte %d is not postpost!' % (fp.tell() - 1)) + if SignedQuad(fp) != self.post_loc: + Warning('bad postamble pointer in byte %d!' % (fp.tell() - 4)) + m = GetByte(fp) + if m != DVI_ID and m != DVIV_ID and m != XDVI_ID: + Warning('identification in byte %d should be %d, %d, or %d!' % (fp.tell() - 1, DVI_ID, DVIV_ID, XDVI_ID)) + + def DefineFont(self, e, fp): + c = SignedQuad(fp) # font_check_sum + q = SignedQuad(fp) # font_scaled_size + d = SignedQuad(fp) # font_design_size + n = fp.read(GetByte(fp) + GetByte(fp)) + try: + f = self.font_def[e] + except KeyError: + self.font_def[e] = {'name':n, 'checksum':c, 'scaled_size':q, 'design_size':d} + if q <= 0 or q >= 01000000000: + Warning("%s---not loaded, bad scale (%d)!" % (n, q)) + elif d <= 0 or d >= 01000000000: + msssage("%s---not loaded, bad design size (%d)!" % (n, d)) + else: + if f['checksum'] != c: + Warning("\t---check sum doesn't match previous definition!") + if f['scaled_size'] != q: + Warning("\t---scaled size doesn't match previous definition!") + if f['design_size'] != d: + Warning("\t---design size doesn't match previous definition!") + if f['name'] != n: + Warning("\t---font name doesn't match previous definition!") + + def ProcessPage(self, fp): + s = [] + while True: + o = GetByte(fp) + p = self.Get1Arg(o, fp) + if o < SET_CHAR_0 + 128 or o in (SET1, SET2, SET3, SET4): + q = [p] + while True: + o = GetByte(fp) + p = self.Get1Arg(o, fp) + if o < SET_CHAR_0 + 128 or o in (SET1, SET2, SET3, SET4): + q.append(p) + else: + break + s.append([SET1, q]) + if o == SET_RULE: + s.append([SET_RULE, [p, SignedQuad(fp)]]) + elif o in (PUT1, PUT2, PUT3, PUT4): + s.append([PUT1, p]) + elif o == PUT_RULE: + s.append([PUT_RULE, [p, SignedQuad(fp)]]) + elif o == NOP: + continue + elif o == BOP: + Warning('bop occurred before eop!') + break + elif o == EOP: + break + elif o == PUSH: + s.append([PUSH]) + elif o == POP: + s.append([POP]) + elif o in (RIGHT1, RIGHT2, RIGHT3, RIGHT4): + s.append([RIGHT1, p]) + elif o == W0: + s.append([W0]) + elif o in (W1, W2, W3, W4): + s.append([W1, p]) + elif o == X0: + s.append([X0]) + elif o in (X1, X2, X3, X4): + s.append([X1, p]) + elif o in (DOWN1, DOWN2, DOWN3, DOWN4): + s.append([DOWN1, p]) + elif o == Y0: + s.append([Y0]) + elif o in (Y1, Y2, Y3, Y4): + s.append([Y1, p]) + elif o == Z0: + s.append([Z0]) + elif o in (Z1, Z2, Z3, Z4): + s.append([Z1, p]) + elif o < FNT_NUM_0 + 64 or o in (FNT1, FNT2, FNT3, FNT4): + s.append([FNT1, p]) + elif o in (XXX1, XXX2, XXX3, XXX4): + q = fp.read(p) + s.append([XXX1, q]) + elif o in (FNT_DEF1, FNT_DEF2, FNT_DEF3, FNT_DEF4): + self.DefineFont(p, fp) + elif o == DIR: + s.append([DIR, p]) + elif o == PRE: + Warning('preamble command within a page!') + break + elif o in (POST, POST_POST): + Warning('postamble command %d!' % o) + break + else: + Warning('undefined command %d!' % o) + break + return s + + def Get1Arg(self, o, fp): + if o < SET_CHAR_0 + 128: + return o - SET_CHAR_0 + if o in (SET1, PUT1, FNT1, XXX1, FNT_DEF1, DIR): + return GetByte(fp) + if o in (SET2, PUT2, FNT2, XXX2, FNT_DEF2): + return Get2Bytes(fp) + if o in (SET3, PUT3, FNT3, XXX3, FNT_DEF3): + return Get3Bytes(fp) + if o in (RIGHT1, W1, X1, DOWN1, Y1, Z1): + return SignedByte(fp) + if o in (RIGHT2, W2, X2, DOWN2, Y2, Z2): + return SignedPair(fp) + if o in (RIGHT3, W3, X3, DOWN3, Y3, Z3): + return SignedTrio(fp) + if o in (SET4, SET_RULE, PUT4, PUT_RULE, RIGHT4, W4, X4, DOWN4, Y4, Z4, FNT4, XXX4, FNT_DEF4): + return SignedQuad(fp) + if o in (NOP, BOP, EOP, PUSH, POP, PRE, POST, POST_POST) or o > POST_POST: + return 0 + if o in (W0, X0, Y0, Z0): + return 0 + if o < FNT_NUM_0 + 64: + return o - FNT_NUM_0 + + ########################################################## + # Save: Internal Format -> DVI + ########################################################## + def Save(self, fn): + fp = file(fn, 'wb') + self.SaveToFile(fp) + fp.close() + + def SaveToFile(self, fp): + # WritePreamble + fp.write(''.join([chr(PRE), PutByte(self.id), PutSignedQuad(self.numerator), PutSignedQuad(self.denominator), PutSignedQuad(self.mag), PutByte(len(self.comment)), self.comment])) + # WriteFontDefinitions + self.WriteFontDefinitions(fp) + # WritePages + stackdepth = 0; loc = -1 + for page in self.pages: + w = x = y = z = 0; stack = [] + s = [chr(BOP)] + s.extend([PutSignedQuad(c) for c in page['count']]) + s.append(PutSignedQuad(loc)) + for cmd in page['content']: + if cmd[0] == SET1: + for o in cmd[1]: + if o < 128: s.append(chr(SET_CHAR_0 + o)) + else: s.append(self.CmdPair([SET1, o])) + elif cmd[0] in (SET_RULE, PUT_RULE): + s.append(chr(cmd[0]) + PutSignedQuad(cmd[1][0]) + PutSignedQuad(cmd[1][1])) + elif cmd[0] == PUT1: + s.append(self.CmdPair([PUT1, cmd[1][0]])) + elif cmd[0] in (RIGHT1, DOWN1): + s.append(self.CmdPair(cmd)) + elif cmd[0] in (W0, X0, Y0, Z0): + s.append(chr(cmd[0])) + elif cmd[0] == PUSH: + s.append(chr(PUSH)) + stack.append((w, x, y, z)) + if len(stack) > stackdepth: stackdepth = len(stack) + elif cmd[0] == POP: + s.append(chr(POP)) + w, x, y, z = stack.pop() + elif cmd[0] == W1: + w = cmd[1]; s.append(self.CmdPair(cmd)) + elif cmd[0] == X1: + x = cmd[1]; s.append(self.CmdPair(cmd)) + elif cmd[0] == Y1: + y = cmd[1]; s.append(self.CmdPair(cmd)) + elif cmd[0] == Z1: + z = cmd[1]; s.append(self.CmdPair(cmd)) + elif cmd[0] == FNT1: + if cmd[1] < 64: s.append(chr(FNT_NUM_0 + cmd[1])) + else: s.append(self.CmdPair(cmd)) + elif cmd[0] == XXX1: + l = len(cmd[1]) + if l < 256: s.append(chr(XXX1) + chr(l) + cmd[1]) + else: s.append(chr(XXX4) + PutSignedQuad(l) + cmd[1]) + elif cmd[0] == DIR: + s.append(chr(DIR) + chr(cmd[1])) + else: + Warning('invalid command %s!' % cmd[0]) + s.append(chr(EOP)) + loc = fp.tell() + fp.write(''.join(s)) + # WritePostamble + post_loc = fp.tell() + fp.write(''.join([chr(POST), PutSignedQuad(loc), PutSignedQuad(self.numerator), PutSignedQuad(self.denominator), PutSignedQuad(self.mag), PutSignedQuad(self.max_v), PutSignedQuad(self.max_h), Put2Bytes(stackdepth+1), Put2Bytes(len(self.pages))])) + # WriteFontDefinitions + self.WriteFontDefinitions(fp) + # WritePostPostamble + fp.write(''.join([chr(POST_POST), PutSignedQuad(post_loc), PutByte(self.id), '\xdf\xdf\xdf\xdf'])) + loc = fp.tell() + while (loc % 4) != 0: + fp.write('\xdf'); loc += 1 + + def WriteFontDefinitions(self, fp): + s = [] + for e in sorted(self.font_def.keys()): + l, q = PutUnsigned(e) + s.append(PutByte(FNT_DEF1 + l)) + s.append(q) + s.append(PutSignedQuad(self.font_def[e]['checksum'])) + s.append(PutSignedQuad(self.font_def[e]['scaled_size'])) + s.append(PutSignedQuad(self.font_def[e]['design_size'])) + s.append('\x00') + s.append(PutByte(len(self.font_def[e]['name']))) + s.append(self.font_def[e]['name']) + fp.write(''.join(s)) + + def CmdPair(self, cmd): + l, q = PutSigned(cmd[1]) + return chr(cmd[0] + l) + q + + ########################################################## + # Parse: Text -> Internal Format + ########################################################## + def Parse(self, fn, encoding=''): + fp = file(fn, 'r') + s = fp.read() + fp.close() + self.ParseFromString(s, encoding=encoding) + + def ParseFromString(self, s, encoding=''): + global GetStr, cur_font, cur_dsize, cur_ssize, subfont_idx + if encoding == 'ascii': GetStr = GetStrASCII + else: GetStr = GetStrUTF8 + self.Initialize() + self.fnt_num = 0 + for l in s.split('\n'): + l = l.strip() + if not l or l[0] == '%': continue + try: + key, val = l.split(':', 1) + key = key.strip(); val = val.strip() + except: + if l[-1] == ']': v = l[:-1].split(' ') + else: v = l.split(' ') + if v[0] == "[page": + self.cur_page = [] + count = [GetInt(c) for c in v[1:]] + if len(count) < 10: count += ([0] * (10-len(count))) + self.pages.append({'count':count, 'content':self.cur_page}) + continue + # ParsePreamble + if key == "id": + self.id = GetInt(val) + if self.id != DVI_ID and self.id != DVIV_ID and self.id != XDVI_ID: + Warning("identification byte should be %d, %d, or %d!" % (DVI_ID, DVIV_ID, XDVI_ID)) + elif key == "numerator": + d = GetInt(val) + if d <= 0: + Warning('non-positive numerator %d!' % d) + else: + self.numerator = d + self.ComputeConversionFactors() + elif key == "denominator": + d = GetInt(val) + if d <= 0: + Warning('non-positive denominator %d!' % d) + else: + self.denominator = d + self.ComputeConversionFactors() + elif key == "magnification": + d = GetInt(val) + if d <= 0: + Warning('non-positive magnification %d!' % d) + else: + self.mag = d + elif key == "comment": + self.comment = val[1:-1] + # Parse Postamble + elif key == "maxv": + self.max_v = self.ConvLen(val) + elif key == "maxh": + self.max_h = self.ConvLen(val) + elif key == "maxs": + self.max_s = GetInt(val) + elif key == "pages": + self.total_pages = GetInt(val) + # Parse Font Definitions + elif key == "fntdef": + n, q, d = self.GetFntDef(val) + self.font_def[self.fnt_num] = {'name':n, 'design_size':d, 'scaled_size':q, 'checksum':0} + self.fnt_num += 1 + # Parse Pages + elif key == 'xxx': + self.cur_page.append([XXX1, eval(val)]) + elif key == 'set': + ol = GetStr(val) + if is_subfont: + subfont_idx = (ol[0] >> 8) + self.AppendFNT1() + nl = [ol[0] & 0xff] + for o in ol[1:]: + idx = (o >> 8) + if idx != subfont_idx: + self.cur_page.append([SET1, nl]) + subfont_idx = idx + self.AppendFNT1() + nl = [o & 0xff] + else: + nl.append(o & 0xff) + self.cur_page.append([SET1, nl]) + else: + self.cur_page.append([SET1, ol]) + elif key == 'put': + self.cur_page.append([PUT1, GetStr(val)]) + elif key == 'setrule': + v = val.split(' ') + if len(v) != 2: + Warning('two values are required for setrule!') + continue + self.cur_page.append([SET_RULE, [self.ConvLen(c) for c in v]]) + elif key == 'putrule': + v = val.split(' ') + if len(v) != 2: + Warning('two values are required for putrule!') + continue + self.cur_page.append([PUT_RULE, [self.ConvLen(c) for c in v]]) + elif key == 'fnt': + n, q, d = self.GetFntDef(val) + if n in subfont_list: + is_subfont = True + cur_font = n; cur_dsize = d; cur_ssize = q + else: + is_subfont = False + f = {'name':n, 'design_size':d, 'scaled_size':q, 'checksum':0} + try: + e = self.font_def.keys()[self.font_def.values().index(f)] + except: + e = self.fnt_num + self.font_def[self.fnt_num] = f + self.fnt_num += 1 + self.cur_page.append([FNT1, e]) + elif key == 'right': + self.cur_page.append([RIGHT1, self.ConvLen(val)]) + elif key == 'down': + self.cur_page.append([DOWN1, self.ConvLen(val)]) + elif key == 'w': + self.cur_page.append([W1, self.ConvLen(val)]) + elif key == 'x': + self.cur_page.append([X1, self.ConvLen(val)]) + elif key == 'y': + self.cur_page.append([Y1, self.ConvLen(val)]) + elif key == 'z': + self.cur_page.append([Z1, self.ConvLen(val)]) + elif key == 'push': + self.cur_page.append([PUSH]) + elif key == 'pop': + self.cur_page.append([POP]) + elif key == 'w0': + self.cur_page.append([W0]) + elif key == 'x0': + self.cur_page.append([X0]) + elif key == 'y0': + self.cur_page.append([Y0]) + elif key == 'z0': + self.cur_page.append([Z0]) + elif key == 'dir': + self.cur_page.append([DIR, GetInt(val)]) + else: + Warning('invalid command %s!' % key) + + def AppendFNT1(self): + f = {'name':cur_font+"%02x"%subfont_idx, 'design_size':cur_dsize, 'scaled_size':cur_ssize, 'checksum':0} + try: + e = self.font_def.keys()[self.font_def.values().index(f)] + except: + e = self.fnt_num + self.font_def[e] = f + self.fnt_num += 1 + self.cur_page.append([FNT1, e]) + + ########################################################## + # Dump: Internal Format -> Text + ########################################################## + def Dump(self, fn, tabsize=2, encoding=''): + fp = file(fn, 'w') + self.DumpToFile(fp, tabsize=tabsize, encoding=encoding) + fp.close() + + def DumpToFile(self, fp, tabsize=2, encoding=''): + global PutStr + if encoding == 'ascii': PutStr = PutStrASCII + elif encoding == 'latin1': PutStr = PutStrLatin1 + elif encoding == 'sjis': PutStr = PutStrSJIS + else: PutStr = PutStrUTF8 + # DumpPreamble + fp.write("[preamble]\n") + fp.write("id: %d\n" % self.id) + fp.write("numerator: %d\n" % self.numerator) + fp.write("denominator: %d\n" % self.denominator) + fp.write("magnification: %d\n" % self.mag) + fp.write("comment: %s\n" % repr(self.comment)) + # DumpPostamble + fp.write("\n[postamble]\n") + fp.write("maxv: %s\n" % self.byconv(self.max_v)) + fp.write("maxh: %s\n" % self.byconv(self.max_h)) + fp.write("maxs: %d\n" % self.max_s) + fp.write("pages: %d\n" % self.total_pages) + # DumpFontDefinitions + fp.write("\n[font definitions]\n") + for e in sorted(self.font_def.keys()): + fp.write("fntdef: %s " % self.font_def[e]['name']) + if self.font_def[e]['design_size'] != self.font_def[e]['scaled_size']: + fp.write("(%s) " % self.by_pt_conv(self.font_def[e]['design_size'])) + fp.write("at %s\n" % self.by_pt_conv(self.font_def[e]['scaled_size'])) + # DumpPages + for page in self.pages: + fp.write("\n[page" + (" %d"*10 % tuple(page['count'])) + "]\n") + indent = 0 + for cmd in page['content']: + if cmd[0] == POP: + indent -= tabsize + fp.write("%spop:\n" % (' ' * indent)) + continue + fp.write("%s" % (' ' * indent)) + if cmd[0] == PUSH: + fp.write("push:\n") + indent += tabsize + elif cmd[0] == XXX1: + fp.write("xxx: %s\n" % repr(cmd[1])) + elif cmd[0] == DIR: + fp.write("dir: %d\n" % cmd[1]) + elif cmd[0] == SET_RULE: + fp.write("setrule: %s %s\n" % (self.byconv(cmd[1][0]), self.byconv(cmd[1][1]))) + elif cmd[0] == PUT_RULE: + fp.write("putrule: %s %s\n" % (self.byconv(cmd[1][0]), self.byconv(cmd[1][1]))) + elif cmd[0] == SET1: + fp.write("set: %s\n" % PutStr(cmd[1])) + elif cmd[0] == PUT1: + fp.write("put: %s\n" % PutStr(cmd[1])) + elif cmd[0] == FNT1: + f = self.font_def[cmd[1]]['name'] + z = self.font_def[cmd[1]]['scaled_size'] + if IsFontChanged(f, z): + fp.write("fnt: %s " % cur_font) + if self.font_def[cmd[1]]['design_size'] != self.font_def[cmd[1]]['scaled_size']: + fp.write("(%s) " % self.by_pt_conv(self.font_def[cmd[1]]['design_size'])) + fp.write("at %s\n" % self.by_pt_conv(cur_ssize)) + elif cmd[0] == RIGHT1: + fp.write("right: %s\n" % self.byconv(cmd[1])) + elif cmd[0] == DOWN1: + fp.write("down: %s\n" % self.byconv(cmd[1])) + elif cmd[0] == W1: + fp.write("w: %s\n" % self.byconv(cmd[1])) + elif cmd[0] == X1: + fp.write("x: %s\n" % self.byconv(cmd[1])) + elif cmd[0] == Y1: + fp.write("y: %s\n" % self.byconv(cmd[1])) + elif cmd[0] == Z1: + fp.write("z: %s\n" % self.byconv(cmd[1])) + elif cmd[0] == W0: + fp.write("w0:\n") + elif cmd[0] == X0: + fp.write("x0:\n") + elif cmd[0] == Y0: + fp.write("y0:\n") + elif cmd[0] == Z0: + fp.write("z0:\n") + + ########################################################## + # Misc Functions + ########################################################## + def ComputeConversionFactors(self): + self.sp_conv = (self.numerator / 25400000.) * (473628672. / self.denominator) + self.pt_conv = (self.numerator / 25400000.) * (7227. / self.denominator) + self.bp_conv = (self.numerator / 254000.) * (72. / self.denominator) + self.mm_conv = (self.numerator / 10000.) / self.denominator + self.cm_conv = (self.numerator / 100000.) / self.denominator + self.in_conv = (self.numerator / 254000.) * (1. / self.denominator) + + def ConvLen(self, s): + try: return int(s) + except: pass + try: f = float(s[:-2]) + except: return 0 + m = s[-2:] + if m == "pt": return int(round(f / self.pt_conv)) + elif m == "in": return int(round(f / self.in_conv)) + elif m == "mm": return int(round(f / self.mm_conv)) + elif m == "cm": return int(round(f / self.cm_conv)) + elif m == "bp": return int(round(f / self.bp_conv)) + elif m == "sp": return int(round(f / self.sp_conv)) + else: + try: return int(round(f / self.pt_conv)) + except: return 0 + + def GetFntDef(self, s): + try: + n, size = s.split('(', 1) + d, q = size.split(')', 1) + except: + n, q = s.split(' ', 1) + n = n.strip(); q = q.strip() + if q[:2] == "at": q = q[2:] + q = self.ConvLen(q.strip()) + try: d = self.ConvLen(d.strip()) + except: d = q + return n, q, d + + def by_sp_conv(self, a): + v = self.sp_conv * a + return "%dsp" % int(v) + + def by_pt_conv(self, a): + v = self.pt_conv * a + if v == int(v): return "%dpt" % int(v) + else: return "%fpt" % v + + def by_bp_conv(self, a): + v = self.bp_conv * a + if v == int(v): return "%dbp" % int(v) + else: return "%fbp" % v + + def by_mm_conv(self, a): + v = self.mm_conv * a + if v == int(v): return "%dmm" % int(v) + else: return "%fmm" % v + + def by_cm_conv(self, a): + v = self.cm_conv * a + if v == int(v): return "%dcm" % int(v) + else: return "%fcm" % v + + def by_in_conv(self, a): + v = self.in_conv * a + if v == int(v): return "%din" % int(v) + else: return "%fin" % v + +############################################################ +# Misc Functions for Main Routine +############################################################ +def ProcessOptions(): + usage = """%prog [options] dvi_file|dvi_dump_file + +DVIasm is a Python script to support changing or creating DVI files +via disassembling into text, editing, and then reassembling into +binary format. It is fully documented at + +http://tug.org/TUGboat/Articles/tb28-2/tb89cho.pdf +http://ajt.ktug.kr/assets/2008/5/1/0201cho.pdf""" + + version = """This is %prog-20080520 by Jin-Hwan Cho (Korean TeX Society) + +Copyright (C) 2007-2008 by Jin-Hwan Cho + +This is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version.""" + + parser = OptionParser(usage=usage, version=version) + parser.add_option("-u", "--unit", + action="store", type="string", dest="unit", + metavar="STR", + help="unit (sp, pt, bp, mm, cm, in) [default=%default]") + parser.add_option("-o", "--output", + action="store", type="string", dest="output", + metavar="FILE", + help="filename for output instead of stdout") + parser.add_option("-e", "--encoding", + action="store", type="string", dest="encoding", + metavar="STR", + help="encoding for input/output [default=%default]") + parser.add_option("-t", "--tabsize", + action="store", type="int", dest="tabsize", + metavar="INT", + help="tab size for push/pop [default=%default]") + parser.add_option("-p", "--ptex", + action="store_true", dest="ptex", default=False, + help="extended DVI for Japanese pTeX") + parser.add_option("-s", "--subfont", + action="append", type="string", dest="subfont", + metavar="STR", + help="the list of fonts with UCS2 subfont scheme (comma separated); disable internal subfont list if STR is empty") + parser.set_defaults(unit='pt', encoding='utf8', tabsize=2) + (options, args) = parser.parse_args() + if not options.unit in ['sp', 'pt', 'bp', 'mm', 'cm', 'in']: + parser.error("invalid unit name '%s'!" % options.unit) + if options.tabsize < 0: + parser.error("negative tabsize!") + if not options.encoding in ['ascii', 'latin1', 'utf8', 'sjis']: + parser.error("invalid encoding '%s'!" % options.encoding) + if options.ptex: + global is_ptex + is_ptex = True + if not options.encoding in ['utf8', 'sjis']: + parser.error("invalid encoding '%s' for Japanese pTeX!" % options.encoding) + if options.subfont: + global subfont_list + if not options.subfont[0]: # disable subfont + subfont_list = [] + for l in options.subfont: + subfont_list.extend([f.strip() for f in l.split(',')]) + if len(args) != 1: + parser.error("try with the option --help!") + return (options, args) + +def IsDVI(fname): + from os.path import splitext + if splitext(fname)[1] != '.dvi': return False + try: + fp = file(fname, 'rb') + fp.seek(0) + if GetByte(fp) != PRE: return False + fp.seek(-4, 2) + if GetByte(fp) != 223: return False + fp.close() + except: + sys.stderr.write('Failed to read %s\n' % fname) + return False + return True + +############################################################ +# Main Routine +############################################################ +if __name__ == '__main__': + (options, args) = ProcessOptions() + aDVI = DVI(unit=options.unit) + if IsDVI(args[0]): # dvi -> dump + aDVI.Load(args[0]) + if options.output: aDVI.Dump(options.output, tabsize=options.tabsize, encoding=options.encoding) + else: aDVI.DumpToFile(sys.stdout, tabsize=options.tabsize, encoding=options.encoding) + else: # dump -> dvi + aDVI.Parse(args[0], encoding=options.encoding) + if options.output: aDVI.Save(options.output) + else: aDVI.SaveToFile(sys.stdout) diff --git a/Build/source/texk/texlive/linked_scripts/ebong.py b/Build/source/texk/texlive/linked_scripts/ebong.py deleted file mode 100755 index c4162cf0ef6..00000000000 --- a/Build/source/texk/texlive/linked_scripts/ebong.py +++ /dev/null @@ -1,346 +0,0 @@ -#!/usr/bin/env python -# look in newbong -import sre -A='A' -B='B' -S='S' -s='s' -F='F' -X='X' - -NCLINE = 0 -global NCWORD,CWORD - -AKSAR={ - 'k' :[B,'k'], - 'kh' :[B,'kh'], - 'g' :[B,'g'], - 'gh' :[B,'gh'], - 'ng' :[B,'NG'], - - 'ch' :[B,'c'], - '^ch' :[B,'ch'], - 'j' :[B,'j'], - 'jh' :[B,'jh'], - '^y' :[B,'NJ'], - '_n' :[B,'NJ'], - - 't' :[B,'T'], - '^th' :[B,'Th'], - 'd' :[B,'D'], - 'dh' :[B,'Dh'], - '^n' :[B,'N'], - - '_t' :[B,'t'], - 'th' :[B,'th'], - '_d' :[B,'d'], - '_dh' :[B,'dh'], - 'n' :[B,'n'], - - 'p' :[B,'p'], - 'ph' :[B,'ph'], - 'f' :[B,'ph'], - 'b' :[B,'b'], - 'bh' :[B,'bh'], - 'v' :[B,'bh'], - 'm' :[B,'m'], - 'M' :[F,'M'], - - '^j' :[B,'J'], - 'J' :[B,'J'], - 'r' :[B,'r'], - 'R' :[F,'R'], - 'l' :[B,'l'], - 'L' :[F,'L'], - 'W' :[F,'W'], - 'V' :[F,'W'], - 'h' :[B,'H'], - 'kk' :[B,'kK'], - 'kkm' :[B,'kK/N'], - - 'sh' :[B,'sh'], - '^s' :[B,'Sh'], - '^sh' :[B,'Sh'], - 's' :[B,'s'], - - '^r' :[B,'rh'], - '^rh' :[B,'rhh'], - 'y' :[B,'y'], - 'Y' :[F,'Y'], - 'JY' :[F,'Y'], - '__t' :[B,'t//'], - '^ng' :[B,'NNG'], - ':h' :[B,'h'], - '^' :[F,'NN'], - '_' :[F,':/'], - - 'A' :[S,'A'], - 'AA' :[S,'Aa'], - 'I' :[S,'I'], - 'II' :[S,'II'], - 'U' :[S,'U'], - 'UU' :[S,'UU'], - 'RI' :[S,'RR'], - 'E' :[S,'E'], - 'OI' :[S,'OI'], - 'O' :[S,'O'], - 'OU' :[S,'OU'], - - 'a' :[X,'o',1], - 'aa' :[s,'a',1], - 'i' :[s,'i',-1], - 'ii' :[s,'ii',1], - 'u' :[s,'u',1], - 'uu' :[s,'uu',1], - 'RII' :[s,'rR',1], - 'e' :[s,'e',-1], - 'oi' :[s,'oi',-2], - 'oo' :[s,'oo',11], - 'o' :[X,'o',1], - 'ou' :[s,'ou',12], - - '.' :[F,'.'], - '..' :[F,'..'], - '...' :[F,'...'], - '|' :[F,'|'], - - '~' :[F,'~'], - '`' :[F,'`'], - '!' :[F,'!'], - '1' :[F,'1'], - '2' :[F,'2'], - 'at' :[F,'@'], - '#' :[F,'#'], - '3' :[F,'3'], - '$' :[F,'$'], - '4' :[F,'4'], - '%' :[F,'%'], - '5' :[F,'5'], - '6' :[F,'6'], - '&' :[F,'&'], - '7' :[F,'7'], - '*' :[F,'*'], - '8' :[F,'8'], - '(' :[F,'('], - '9' :[F,'9'], - ')' :[F,')'], - '0' :[F,'0'], - 'dash' :[F,'-'], - '+' :[F,'+'], - '=' :[F,'='], - '|' :[F,'|'], - '{' :[F,'{'], - '[' :[F,'['], - '}' :[F,'}'], - ']' :[F,']'], - ':' :[F,':'], - ';' :[F,';'], - '"' :[F,'"'], - "'" :[F,"'"], - '<' :[F,'<'], - ',' :[F,','], - '>' :[F,'>'], - '.' :[F,'.'], - '?' :[F,'?'], - '/' :[F,'/']} - -CATCODES = {'SS' :[S,'','','',1], - 'SB' :[B,'','','',1], - 'BS' :[S,'','','',1], - 'BB' :[B,'','/','',1], - 'BF' :[F,'','','',1], - 'Bs1' :[S,'','','',1], - 'Bs-1':[S,'\*','','*',1], - 'Bs-2':[S,'\*','','*{oi}',0], - 'Bs11':[S,'\*','','*ea',0], - 'Bs12':[S,'\*','','*eou',0], - 'Fs1' :[S,'','','',1], - 'Fs-1':[S,'\*','','*',1], - 'Fs-2':[S,'\*','','*{oi}',0], - 'Fs11':[S,'\*','','*ea',0], - 'Fs12':[S,'\*','','*eou',0], - 'FF' :[F,'','','',1], - 'AX' :[F,'','','',1]} - -def blocked(line): - #print '@ blocked', line , '->', - m = sre.findall('@[^@]+@',line) - outline = line - if not m : - #print outline - return(outline) - else: - for i in range(len(m)): - s=m[i][:-1].replace(' ','%X%') - outline = outline.replace(m[i],s,1) - #print outline - return(outline) - -def unblock(line): - #print '@unblock', line, '->', - m = sre.findall('@[^\s]+',line) - outline = line - if not m : - #print outline - return(outline) - else: - for i in range(len(m)): - s=m[i].replace('@','').replace('%X%',' ') - outline = outline.replace(m[i],s) - #print outline - return(outline) - -def printamp(line): - #print '@unblock', line, '->', - m = sre.findall('#AT',line) - outline = line - if not m : - #print outline - return(outline) - else: - for i in range(len(m)): - outline = outline.replace('#AT','@') - #print outline - return(outline) - -def readsyll(syll): - syllparts=[] - start = 0; end = len(syll) - while syll[start : end]: - slice = syll[start : end] - #print slice - if AKSAR.has_key(slice): - syllparts.append(AKSAR[slice]) - start = start + len(slice) - end = len(syll) - else : - end = end -1 - return(syllparts) - -def fuse(list1,list2): - global CCATCODE - #print list1,list2 - Type1 = list1[0] - Type2 = list2[0] - - if Type2 == s: - Type3 = str(list2[2]) - elif Type2 == X: - Type1=A - Type3='' - else: - Type3 ='' - - Type = Type1+Type2+Type3 - - #print 'Type:', Type - - try: - CATCODE = CATCODES[Type] - TARGET = CATCODE[0] - PREFIX = CATCODE[1] - MIDFIX = CATCODE[2] - POSTFIX = CATCODE[3] - FLAG = CATCODE[4] - - #print 'TGT:', TARGET, PREFIX,MIDFIX,POSTFIX,FLAG - #print 'RAWC', AKSAR[list1[1]][1],AKSAR[list2[1]][1] - - c1=list1[1] - c2=list2[1] - - if FLAG == 1 : - c = PREFIX + c1 + MIDFIX + POSTFIX + c2 - else : - c = PREFIX + c1 + MIDFIX + POSTFIX - - fused = [TARGET,c] - #print CATCODE - return(fused) - except KeyError: - print '\n ERROR AT LINE:', NCLINE, 'WORD:',NCWORD, '(',CWORD,')' - return(['ERROR','UNKNOWN CATCODE']) - -def fuseatoms(syll): - slist=readsyll(syll); - #print slist - lslist=len(slist); - l0=slist[0]; - for i in range(1,lslist): - nextitem = slist[i] - l0=fuse(l0,nextitem) - - return(l0[1]) - -def fuseword(wrd): - if wrd[0] == '@' : - return(wrd) - syllables = wrd.split('-') - w0='' - for eachsyll in syllables: - syll=eachsyll - thesyll = fuseatoms(syll) - w0 = w0 + thesyll - #print 'FUSED WORD',w0 - return(w0) - -def fuseline(line): - global NCWORD,CWORD - NCWORD = 0 - #line = blocked(line) - words = line.split() - l0='' - for eachword in words: - NCWORD=NCWORD+1 - word = eachword - CWORD=word - theword=fuseword(word) - #print 'XX',theword - l0=l0+' '+theword - #print 'FUSED LINE', l0 - return(l0) - -# The main program -import sys -OK=1 -finnam = sys.argv[1] -foutnam = finnam.split('.')[0] + '.' + 'tex' - -fin = file(finnam,'rt') -fout = file(foutnam,'wt') - -textin = fin.readlines() -nlines = len(textin) - -textout = [] - -fin.close() - -for eachline in textin: - NCLINE = NCLINE+1 - if eachline[0] == '#' : - lineout = eachline[1:] - elif eachline[0] == '\\' : - lineout = eachline - elif eachline == '\n': - lineout = eachline - else : - line1 = eachline.strip() - line2 = blocked(line1) - lineout = fuseline(line2) + '\n' - lineout = lineout[1:] - #print ':::', lineout - if lineout.find('UNKNOWN CATCODE') == -1 : - lineout = unblock(lineout) - #print ':::', lineout - textout.append(printamp(lineout)) - else : - OK = 0 - fout.close() - -if OK == 1: - fout.writelines(textout) - fout.close() - print 'done' -else: - print 'Unknown CATCODE, Fix The errors and try again' diff --git a/Build/source/texk/texlive/linked_scripts/epspdf b/Build/source/texk/texlive/linked_scripts/epspdf deleted file mode 100755 index 4492608f398..00000000000 --- a/Build/source/texk/texlive/linked_scripts/epspdf +++ /dev/null @@ -1,3 +0,0 @@ -#!/bin/sh -script=`kpsewhich -format=texmfscripts epspdf.rb` -"$script" $* diff --git a/Build/source/texk/texlive/linked_scripts/epspdf.x b/Build/source/texk/texlive/linked_scripts/epspdf.x new file mode 100755 index 00000000000..4492608f398 --- /dev/null +++ b/Build/source/texk/texlive/linked_scripts/epspdf.x @@ -0,0 +1,3 @@ +#!/bin/sh +script=`kpsewhich -format=texmfscripts epspdf.rb` +"$script" $* diff --git a/Build/source/texk/texlive/linked_scripts/exatools b/Build/source/texk/texlive/linked_scripts/exatools deleted file mode 100755 index 50ff0f07e46..00000000000 --- a/Build/source/texk/texlive/linked_scripts/exatools +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/sh -texmfstart exatools.rb "$@" diff --git a/Build/source/texk/texlive/linked_scripts/fragmaster.pl b/Build/source/texk/texlive/linked_scripts/fragmaster.pl deleted file mode 100755 index efb4c9aa892..00000000000 --- a/Build/source/texk/texlive/linked_scripts/fragmaster.pl +++ /dev/null @@ -1,259 +0,0 @@ -#!/usr/bin/perl -w - -###################################################################### -# $Id: fragmaster.pl,v 1.3 2006/09/26 08:59:30 tvogel Exp $ -# -# fragmaster.pl -# creates EPS and PDF graphics from source EPS and control files -# with \psfrag commands -# -# Copyright (C) 2004 Tilman Vogel (dot at dot) -# -# This program is free software; you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation; either version 2 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA -# -# IMPORTANT: ALLOW DVIPS TO MAKE _PORTRAIT_ PS WITH WIDTH > HEIGHT -# BY ADDING -# -# @ custom 0pt 0pt -# -# TO YOUR /usr/share/texmf/dvips/config/config.pdf -# IF THIS ENTRY IS MISSING, DVIPS WILL GUESS ORIENTATION FROM -# WIDTH / HEIGHT RATIO. THIS STILL CAN HAPPEN IN CASE YOUR INPUT EPS -# MATCHES A STANDARD PAPER SIZE! -# -# Source files: -# _fm.eps -# a source EPS file -# _fm -# a control file containing \psfrag commands and optionally -# special comments: -# % fmclass: -# use instead of "article" -# % fmclassopt: -# use as class options instead of "12pt" -# % head: -# % -# % end head -# causes to be put into the preamble -# % fmopt:

'; - $htmlMacro1b{'section'} = '

\n'; -$htmlMacro1a{'subsection'} = '\n

'; - $htmlMacro1b{'subsection'} = '

\n'; -$htmlMacro1a{'subsubsection'} = '\n
'; - $htmlMacro1b{'subsubsection'} = '
\n'; -$htmlMacro1a{'Email'} = '\n'; - $htmlMacro1b{'Email'} = ''; -$htmlMacro1a{'URL'} = '\n'; - $htmlMacro1b{'URL'} = ''; - -$htmlMacro1a{'Prog'} = ''; - $htmlMacro1b{'Prog'} = ''; -$htmlMacro1a{'File'} = ''; - $htmlMacro1b{'File'} = ''; -$htmlMacro1a{'Opt'} = ''; - $htmlMacro1b{'Opt'} = ''; -$htmlMacro1a{'oOpt'} = '['; - $htmlMacro1b{'oOpt'} = ']'; -$htmlMacro1a{'Arg'} = ''; - $htmlMacro1b{'Arg'} = ''; -$htmlMacro1a{'oArg'} = '['; - $htmlMacro1b{'oArg'} = ']'; - -# LaTeX macros with two arguments -$htmlMacro2a{'Cmd'} = ''; - $htmlMacro2b{'Cmd'} = '('; - $htmlMacro2c{'Cmd'} = ')'; -$htmlMacro2a{'OptArg'} = ''; - $htmlMacro2b{'OptArg'} = ''; - $htmlMacro2c{'OptArg'} = ''; -$htmlMacro2a{'OptoArg'} = ''; - $htmlMacro2b{'OptoArg'} = '['; - $htmlMacro2c{'OptoArg'} = ']'; -$htmlMacro2a{'oOptArg'} = '['; - $htmlMacro2b{'oOptArg'} = ''; - $htmlMacro2c{'oOptArg'} = ']'; -$htmlMacro2a{'oOptoArg'} = '['; - $htmlMacro2b{'oOptoArg'} = '['; - $htmlMacro2c{'oOptoArg'} = ']]'; -$htmlMacro2a{'setlength'} = ''; - $htmlMacro2b{'setlength'} = ''; - $htmlMacro2c{'setlength'} = ''; - -# we handle sections in HTML as having two arguments, 1. the number, 2. the name -$htmlMacro2a{'section'} = '\n

'; - $htmlMacro2c{'section'} = '

\n'; -$htmlMacro2a{'subsection'} = '\n

'; - $htmlMacro2c{'subsection'} = '

\n'; -$htmlMacro2a{'subsubsection'} = '\n
'; - $htmlMacro2c{'subsubsection'} = '
\n'; - -# we handle Email and URL special in HTML, the LaTeX argument is doubled. -$htmlMacro2a{'Email'} = ''; - $htmlMacro2c{'Email'} = ''; -$htmlMacro2a{'URL'} = ''; - $htmlMacro2c{'URL'} = ''; - -######################################################################## -# Translation for LaTeX macros for TexInfo - -# translation of special characters -$texiLetterCode{'ä'} = '@"a'; -$texiLetterCode{'ö'} = '@"o'; -$texiLetterCode{'ü'} = '@"u'; -$texiLetterCode{'Ä'} = '@"A'; -$texiLetterCode{'Ö'} = '@"O'; -$texiLetterCode{'Ü'} = '@"U'; -$texiLetterCode{'ß'} = '@ss{}'; - -# LaTeX macros without arguments -$texiMacro{'LaTeX'} = 'LaTeX'; -$texiMacro{'LATEX'} = 'LaTeX'; # needed, since \LaTeX is contained in a - # section name (which are transposed - # into uppercase -$texiMacro{'itemsep'} = ''; - -# some math -$texiMacro{'rightarrow'} = '-->'; -$texiMacro{'Rightarrow'} = '==>'; -$texiMacro{'leftarrow'} = '<--'; -$texiMacro{'Leftarrow'} = '<=='; -$texiMacro{'ge'} = '>='; -$texiMacro{'le'} = '<='; - -$texiMacro{'Dollar'} = '$'; -$texiMacro{'Bar'} = '|'; -$texiMacro{'Bs'} = '\\'; -$texiMacro{'Tilde'} = '~'; -$texiMacro{'hline'} = ''; -$texiMacro{'noindent'} = '\n@noindent\n'; -$texiMacro{'copyright'} = '@copyright{}'; -$texiMacro{'Dots'} = '...'; -$texiMacro{'Circum'} = '^'; -$texiMacro{'Lbr'} = '['; -$texiMacro{'Rbr'} = ']'; -$texiMacro{'LBr'} = '@{'; -$texiMacro{'RBr'} = '@}'; -$texiMacro{'Percent'} = '%'; -$texiMacro{'Bullet'} = '*'; -$texiMacro{'TEXbr'} = ''; -$texiMacro{'MANbr'} = ''; -$texiMacro{'TEXIbr'} = '@*\n'; -$texiMacro{'HTMLbr'} = ''; -$texiMacro{'medskip'} = '@sp 2\n'; -$texiMacro{'SP'} = '@ @ '; - -if ($opt_a) { - $texiMacro{'SPfirst'} = $opt_a . '@ '; - } else { - $texiMacro{'SPfirst'} = '.@ '; -} - -$texiMacro{'~'} = ' '; -$texiMacro{'|'} = '|'; -$texiMacro{'<'} = '<'; -$texiMacro{'>'} = '>'; -$texiMacro{'<='} = '<='; -$texiMacro{'>='} = '>='; -$texiMacro{'='} = '='; -$texiMacro{'<>'} = '<>'; -$texiMacro{'{'} = '@{'; -$texiMacro{'}'} = '@}'; -$texiMacro{'_'} = '_'; -$texiMacro{'$'} = '$'; -$texiMacro{'#'} = '#'; -$texiMacro{'&'} = '&'; -$texiMacro{'%'} = '%'; -$texiMacro{'-'} = '@-'; -$texiMacro{','} = ' '; - -$texiMacro{'\\'} = '@*\n'; # line break -$texiMacro{'\\Tab'} = '\n'; # end of column in a table environment - -# LaTeX macros with one argument -$texiMacro1a{'emph'} = '@emph{'; $texiMacro1b{'emph'} = '}'; -$texiMacro1a{'textbf'} = '@strong{'; $texiMacro1b{'textbf'} = '}'; -$texiMacro1a{'texttt'} = '@t{'; $texiMacro1b{'texttt'} = '}'; -$texiMacro1a{'verb'} = '@t{'; $texiMacro1b{'verb'} = '}'; -$texiMacro1a{'underline'} = ''; $texiMacro1b{'underline'} = ''; -$texiMacro1a{'section'} = '\n@section '; $texiMacro1b{'section'} = '\n'; -$texiMacro1a{'subsection'} = '\n@subsection '; $texiMacro1b{'subsection'} = '\n'; -$texiMacro1a{'subsubsection'} = '\n@subsubsection '; $texiMacro1b{'subsubsection'} = '\n'; - -$texiMacro1a{'Prog'} = ''; $texiMacro1b{'Prog'} = ''; -$texiMacro1a{'File'} = '@file{'; $texiMacro1b{'File'} = '}'; -$texiMacro1a{'Opt'} = ''; $texiMacro1b{'Opt'} = ''; -$texiMacro1a{'oOpt'} = '[ '; $texiMacro1b{'oOpt'} = ' ]'; -$texiMacro1a{'Arg'} = '@var{'; $texiMacro1b{'Arg'} = '}'; -$texiMacro1a{'oArg'} = '[ @var{'; $texiMacro1b{'oArg'} = '} ]'; -$texiMacro1a{'Email'} = '@email{'; $texiMacro1b{'Email'} = '}'; -$texiMacro1a{'URL'} = '@url{'; $texiMacro1b{'URL'} = '}'; - -# LaTeX macros with two arguments -$texiMacro2a{'Cmd'} = ''; - $texiMacro2b{'Cmd'} = '('; - $texiMacro2c{'Cmd'} = ')'; -$texiMacro2a{'OptArg'} = ''; - $texiMacro2b{'OptArg'} = '@var{'; - $texiMacro2c{'OptArg'} = '}'; -$texiMacro2a{'OptoArg'} = ''; - $texiMacro2b{'OptoArg'} = '[@var{'; - $texiMacro2c{'OptoArg'} = '}]'; -$texiMacro2a{'oOptArg'} = '[ '; - $texiMacro2b{'oOptArg'} = '@var{'; - $texiMacro2c{'oOptArg'} = '} ]'; -$texiMacro2a{'oOptoArg'} = '[ '; - $texiMacro2b{'oOptoArg'} = '[@var{'; - $texiMacro2c{'oOptoArg'} = '}] ]'; -$texiMacro2a{'setlength'} = ''; - $texiMacro2b{'setlength'} = ''; - $texiMacro2c{'setlength'} = ''; - -######################################################################## -# reading of translations for user macros - -if ($opt_t) { - do $opt_t; -} - -######################################################################## -# processing for MAN - -sub manStart -{ - printf DEST "\'\\\" t\n"; # process with tbl - printf DEST ".\\\" Manual page created with $CMD on $gen_date\n"; - printf DEST ".\\\" NOTE: This file is generated, DO NOT EDIT.\n"; - - # Definitionen von Verbatimbegin and Verbatimend - Print ".de Vb\n.ft CW\n.nf\n..\n.de Ve\n.ft R\n\n.fi\n..\n"; - - Print ".TH \"$Name\" \"$chapter\" \"". $date ."\" \""; - interpret_word "$tool"; - Print "\" \""; interpret_word "$tool"; Print "\""; NL; - # thanks to Andrew Anderson -} -sub manEnd -{ - NL; printf DEST ".\\\" NOTE: This file is generated, DO NOT EDIT.\n"; -} -sub manSection -{ - my ($cnt, $kind, $section) = @_; - if ($kind ne "subsubsection"){ - $section = uc $section; - } - interpret_line "\\$kind\{$section\}"; -} -sub manParagraph -{ - if (!$paragraph) { - if ($manRS == 0 && $list_nest > 1) { - Print '\n.RS'; - $manRS = 1; - } - Print '\n.PP\n'; - $paragraph = 1; - } -} -sub manVerb -{ - my $arg = $_[0]; - if ($arg =~ /^\./) { print DEST '\\&' }; - Print $arg -} -sub manItemWithArg -{ - my $arg = $_[0]; - if ($manRS == 1) { - Print '\n.RE\n'; - } - $manRS = 0; - Print '\n.TP\n'; - interpret_word $arg; - PrintM ' '; - NL; -} -sub manItem -{ - if ($manRS == 1) { - Print '\n.RE\n'; - } - $manRS = 0; - Print '\n.TP\n'; - if ($cur_list[$list_nest] eq 'item') { - Print '.B *'; - } elsif ($cur_list[$list_nest] eq 'enum') { - Print $item_nr[$list_nest] . '.'; - } - NL; -} -sub manDescriptionStart -{ - if ($list_nest > 1) { - Print '\n.RS\n'; - } -} -sub manDescriptionEnd -{ - if ($manRS) { - Print '\n.RE\n'; - $manRS == 0; - } - if ($list_nest > 1) { - Print '\n.RE\n'; - } - manParagraph; -} -sub manItemStart -{ - if ($list_nest > 1) { - Print '\n.RS\n'; - } -} -sub manItemEnd -{ - if ($manRS) { - Print '\n.RE\n'; - $manRS == 0; - } - if ($list_nest > 1) { - Print '\n.RE\n'; - } - manParagraph; -} -sub manEnumEnd -{ - if ($manRS) { - Print '\n.RE\n'; - $manRS == 0; - } - if ($list_nest > 1) { - Print '\n.RE\n'; - } - manParagraph; -} - -sub manEnumStart -{ - if ($list_nest > 1) { - Print '\n.RS\n'; - } -} -sub manCenterStart -{ - PrintM '\n.ce 100\n'; -} -sub manCenterEnd -{ - PrintM '\n.ce 0\n'; -} -sub manNameStart -{ - interpret_line "\\section\{NAME\}$rest"; -} -sub manNameEnd -{ - # nothing -} -sub manTableStart -{ - my $columns = $_[0]; - my $width = $_[1]; - my $i; - manParagraph; - Print '.TS\n'; - Print 'tab(&);\n'; - for ($i = 1; $i <= $columns; $i++) { - Print " l"; - } - Print "w($width)" if ($width); - Print '.\n'; -} -sub manTableSep -{ - Print '\nT}&T{\n'; -} -sub manTableEnd -{ - Print '\n.TE\n'; - manParagraph; -} - -sub manVerbatimStart -{ - Print '\n.Vb\n'; -} - -sub manVerbatimEnd -{ - Print '.Ve\n'; -} - -sub manVerbatimLine -{ - s/\\/\\\\/g; - s/-/\\-/g; - print DEST "$_"; -} - -########################################################################### -# processing for HTML - -sub htmlStart -{ - Print ""; NL; - Print ""; NL; - Print ""; NL; - Print "$Name"; NL; - Print ""; NL; - Print "

"; NL; - interpret_line $title; - Print "

"; NL; - Print "

"; interpret_word $author; Print "

"; NL; - Print "

$date

"; NL; - Print "

Version $version

"; NL; -} -sub htmlEnd -{ - Print ""; NL; - Print ""; NL; - Print ""; NL; -} -sub htmlSection -{ - my ($cnt, $kind, $section) = @_; - interpret_line "\\$kind\{$cnt\}\{$section\}"; -} -sub htmlCenterStart -{ - Print '\n
\n'; -} -sub htmlCenterEnd -{ - Print '\n
\n'; -} -sub htmlNameStart -{ - # nothing -} -sub htmlNameEnd -{ - Print '\n@@INSERTION-POINT@@-TOC@@\n'; -} -sub htmlParagraph -{ - if (!$paragraph) { - NL; Print "

"; NL; - $paragraph = 1; - } -} -sub htmlVerb -{ - $arg = $_[0]; - $arg =~ s/&/&/g; - $arg =~ s/>/>/g; - $arg =~ s/ 1) { - NL; Print ""; NL; - } - Print "

"; - interpret_word $arg; Print "
"; NL; - Print "
"; -} -sub htmlItem -{ - if ($item_nr[$list_nest] > 1) { - Print '\n'; - } - if ($cur_list[$list_nest] eq 'item') { - Print '
  • '; - } elsif ($cur_list[$list_nest] eq 'enum') { - Print '
  • '; - } -} -sub htmlDescriptionStart -{ - NL; Print "
    "; NL; -} -sub htmlDescriptionEnd -{ - NL; Print "
  • \n"; NL; -} -sub htmlItemStart -{ - NL; Print "
      "; NL; -} -sub htmlItemEnd -{ - NL; Print "\n
    "; NL; -} -sub htmlEnumStart -{ - NL; Print "
      "; NL; -} -sub htmlEnumEnd -{ - NL; Print "\n
    "; NL; -} -sub htmlTableStart -{ - my $columns = $_[0]; - my $width = $_[1]; - NL; Print ""; NL: -} -sub htmlTableSep -{ - if ($first_column == 0) { - Print '\n'; - } - Print '
    '; -} -sub htmlTableEnd -{ - NL; Print "
    "; NL; -} - -sub htmlVerbatimStart -{ - NL; Print '
    '; NL;
    -}
    -
    -sub htmlVerbatimEnd
    -{
    -    Print '
    '; NL; -} - -sub htmlVerbatimLine -{ - s/&/&/g; - s//>/g; - print DEST "$_"; -} - -########################################################################### -# processing for TexInfo - -sub texiStart -{ - Print '\input texinfo @c -*-texinfo-*-'; NL; - Print '@c %**start of header'; NL; - Print '@setfilename ' . "$name.info"; NL; - Print '@settitle ' . "$name"; NL; - Print '@c %**end of header'; NL; - Print '@c Manual page created with' ." $CMD on $gen_date>"; NL; - Print '@c NOTE: This file is generated, DO NOT EDIT.'; NL; -} -sub texiEnd -{ - Print '@bye'; NL; - Print '@c NOTE: This file is generated, DO NOT EDIT.'; NL; -} -sub texiSection -{ - my ($cnt, $kind, $section) = @_; - if (uc $sections[$cnt-1] eq "SYNOPSIS") { - Print '\n@@INSERTION-POINT@@-TOC@@\n'; - $sections[$cnt-1] = "Top"; # The predecessor node is Top and not SYNOPSIS - } - if (uc $sections[$cnt] eq "SYNOPSIS") { - $cnt == 1 || - die "$CMD: The Synopsis section must be the first section after\n" . - "\t the Name environment\n"; - } else { - Print '\n@@INSERTION-POINT@@-TEXI-SEC@@' . " $kind $cnt" . '\n'; - } - interpret_line "\\$kind\{$section\}"; -} -sub texiNameStart -{ - my ($name, $chapter, $author, $tool) = @_; - $sections[0] = "Top"; - # Print '@dircategory ' .$tool; NL; - Print '@dircategory Man-pages'; NL; - Print '@direntry'; NL; - Print "* " . (ucfirst $name) . ": ($name). Its Man-Page "; NL; - Print '@end direntry'; NL; - Print '@titlepage'; NL; - Print '@title ' . "$name"; NL; - Print '@subtitle ' . "$tool"; NL; - Print '@author ' . "$author"; NL; - Print '@end titlepage'; NL; - Print '\n@@INSERTION-POINT@@-TEXI-TOP@@'; NL; - Print '@top ' . "$name"; NL; -} -sub texiNameEnd -{ - # nothing -} -sub texiParagraph -{ - if (!$paragraph) { - NL; print DEST "\n"; - $paragraph = 1; - } -} -sub texiVerb -{ - $arg = $_[0]; - $arg =~ s/({|})/\@$1/g; - Print $arg; -} -sub texiItemWithArg -{ - my $arg = $_[0]; - Print '\n@item '; - interpret_word $arg; - NL; -} -sub texiItem -{ - Print '\n@item\n'; -} -sub texiDescriptionStart -{ - Print '\n@table @samp\n'; -} -sub texiDescriptionEnd -{ - Print '\n@end table\n'; -} -sub texiItemStart -{ - Print '\n@itemize @bullet\n'; -} -sub texiItemEnd -{ - Print '\n@end itemize\n'; -} -sub texiCenterStart -{ - $texiCenterLine = 1; - $newline = 0; - $texiMacro{'\\'} = '@*'; # line break - $texiMacro{'TEXIbr'} = '@*'; - NL; -} -sub texiCenterEnd -{ - $texiCenterLine = 0; - $newline = 0; - $texiMacro{'\\'} = '@*\n'; # line break - $texiMacro{'TEXIbr'} = '@*\n'; - NL; -} - -sub texiEnumStart -{ - Print '\n@enumerate\n'; -} -sub texiEnumEnd -{ - Print '\n@end enumerate\n'; -} -sub texiTableStart -{ - my $columns = $_[0]; - my $width = $_[1]; - my $i; - Print '\n@multitable @columnfractions '; - for ($i = 1; $i <= $columns; $i++) { - Print " " .0.9/$columns ; - } - Print '\n'; -} -sub texiTableSep -{ - Print '@tab '; -} -sub texiTableEnd -{ - Print '\n@end multitable\n'; -} - -sub texiVerbatimStart -{ - NL; - Print '@*'; NL -} - -sub texiVerbatimEnd -{ - NL; -} - -sub texiVerbatimLine -{ - s/({|}|@| )/@\1/g; - chop; - print DEST ".$_\@*\n"; -} - -########################################################################### -########################################################################### -# general processing - -# emit an error message is the given macro does not exists. -sub check_Macro -{ - exists $Macro->{$_[0]} || - die "Error in line $.: no such macro: \\$_[0]\n"; -} -sub check_Macro1 -{ - (exists $Macro1a->{$_[0]} && exists $Macro1b->{$_[0]}) || - die "$CMD: Error in line $.: no such macro: \\$_[0]\n"; -} -sub check_Macro2 -{ - (exists $Macro2a->{$_[0]} && exists $Macro2b->{$_[0]} && exists $Macro2c->{$_[0]}) || - die "$CMD: Error in line $.: no such macro: \\$_[0]\n"; -} - -sub NL -{ - if (!$newline) { - printf DEST "\n"; - if ($texiCenterLine) { - print DEST "\@center "; - } - $newline = 1; - } -} - -sub interpret_word -{ - if (@_ <= 0) { - return; - } - $_ = join " ", @_; - my ($s,$m,$a1,$a2,$r); # start, match/macro, argument1, argument2 - my $add_blank = 1; # if true, add a blank after the word - if ($opt_D == 2) { - if ($nesting == 0) { - print "**** "; - } else { - print " "; - } - print "\`$_'\n"; - } - - if ($opt_H) { - # handling of HTML table rows - if ($inside_table == 1) { - if ($first_column == 1) { - if (/^$/) { - return; - } - if (/^\\hline/) { - Print '\n
    \n'; - } - Print '\n\n'; - } - $first_column = 0; - } - } elsif ($opt_M) { - # handling of troff table rows - if ($inside_table == 1) { - if ($first_column == 1) { - if (/^$/) { - return; - } - Print 'T{\n'; - } - $first_column = 0; - } - } elsif ($opt_T) { - # handling of TexInfo specific stuff - if ($nesting == 0) { - s'@'@@'g; - } - if ($inside_table == 1) { - if ($first_column == 1) { - Print '\n@item '; - } - $first_column = 0; - } - } - - $nesting ++; - - SWITCH: { - /^$/ && do {$add_blank = 0; - last SWITCH; - }; - /\\verb\+([^+]*)\+/ && do {$s=$`;$m=$1;$r=$'; - interpret_word $s; - PrintM $Macro1a->{'verb'}; - &{$Prefix . "Verb"} ($m); - PrintM $Macro1b->{'verb'}; - interpret_word $r; - last SWITCH; - }; - /\\(".|ss)/ && do {$s=$`;$m=$1;$r=$'; #" - interpret_word $s; - check_Macro $m; - PrintM $Macro->{$m}; - interpret_word $r; - last SWITCH; - }; - /\\item\s*\[([^]]*)\]/ && do {$s=$`;$m=$1;$r=$'; - interpret_word $s; - $item_nr[$list_nest] ++; - &{$Prefix . "ItemWithArg"} ($m); - interpret_word $r; - last SWITCH; - }; - /\\item\s*/ && do {$s=$`;$r=$'; - interpret_word $s; - $item_nr[$list_nest] ++; - &{$Prefix . "Item"}; - interpret_word $r; - last SWITCH; - }; - # LaTeX macros with two arguments - /\\([a-zA-Z]+){([^}]*)}{([^}]*)}/ - && do {$s=$`;$m=$1;$a1=$2;$a2=$3;$r=$'; - check_Macro2 $m; - interpret_word $s; - PrintM $Macro2a->{$m}; - interpret_word $a1; - PrintM $Macro2b->{$m}; - interpret_word $a2; - PrintM $Macro2c->{$m}; - interpret_word $r; - NL; - last SWITCH; - }; - # Special Handling of Email and URL LaTeX macros with one argument - /\\(URL|Email){([^}]*)}/ && ($opt_H) - && do {$s=$`;$m=$1;$a1=$2;$r=$'; - interpret_word $s; - PrintM $Macro2a->{$m}; - interpret_word $a1; - PrintM $Macro2b->{$m}; - interpret_word $a1; - PrintM $Macro2c->{$m}; - interpret_word $r; - NL; - last SWITCH; - }; - # LaTeX macros with one argument - /\\([a-zA-Z]+){([^}]*)}/ && do {$s=$`;$m=$1;$a1=$2;$r=$'; - check_Macro1 $m; - interpret_word $s; - PrintM $Macro1a->{$m}; - interpret_word $a1; - PrintM $Macro1b->{$m}; - interpret_word $r; - NL; - last SWITCH; - }; - # Special handling of some LaTeX macros without an argument - /\\SP\s*/ && do {$s=$`;$m=$1;$r=$'; - interpret_word $s; - if ($first_word) { - PrintM $Macro->{"SPfirst"}; - } else { - PrintM $Macro->{"SP"}; - } - interpret_word $r; - $add_blank = 0; - last SWITCH; - }; - /\\(MANbr|TEXIbr|HTMLbr)\s*/ && do {$s=$`;$m=$1;$r=$'; - # set $first_word to true - check_Macro $m; - interpret_word $s; - PrintM $Macro->{$m}; - $first_word = 1; - interpret_word $r; - $add_blank = 0; - last SWITCH; - }; - # LaTeX macros without an argument: - /\\([a-zA-Z]+)\s*/ && do {$s=$`;$m=$1;$r=$'; - check_Macro $m; - interpret_word $s; - PrintM $Macro->{$m}; - interpret_word $r; - $add_blank = 0; - last SWITCH; - }; - /\\({|}|\$|_|#|&|-|%|,|\.|;)/ && do {$s=$`;$m=$1;$r=$'; - interpret_word $s; - PrintM $Macro->{$m}; - interpret_word $r; - last SWITCH; - }; - # LaTeX Math - /\$(<|>|<=|>=|=|<>)\$/ && do {$s=$`;$m=$1;$r=$'; - interpret_word $s; - PrintM $Macro->{$m}; - interpret_word $r; - last SWITCH; - }; - /\$([^\$]*)\$/ && do {$s=$`;$m=$1;$r=$'; - interpret_word $s; - interpret_word $m; - interpret_word $r; - last SWITCH; - }; - /&/ && do {$s=$`;$r=$'; - interpret_word $s; - &{$Prefix . "TableSep"}; - $first_column = 0; - interpret_word $r; - last SWITCH; - }; - /~/ && do {$s=$`;$r=$'; - interpret_word $s; - PrintM $Macro->{'~'}; - interpret_word $r; - last SWITCH; - }; - /\\\\/ && do {$s=$`;$r=$'; - interpret_word $s; - if ($inside_table) { - PrintM $Macro->{'\\Tab'}; - $first_column = 1; - if (length ($r) > 0) { - interpret_word $r; - } - } else { - PrintM $Macro->{'\\'}; - $first_word = 1; - interpret_word $r; - } - $add_blank = 0; - last SWITCH; - }; - /\\$|\\ / && do {$s=$`;$r=$'; - # LaTeX explicit blank \ will be - # represented as a single \ at - # the end of the word - interpret_word $s; - Print " "; - interpret_word $r; - last SWITCH; - }; - /\\/ && do {$s=$`;$r=$'; - interpret_word $s; - interpret_word "\\$r"; - last SWITCH; - }; - ($opt_M == 1) && /((^\.|')+)/ && do {$s=$`;$m=$1;$r=$'; - interpret_word $s; - print DEST "\\&$m"; - $newline = 0; - interpret_word $r; - last SWITCH; - }; - Print "$_"; - }; - $nesting --; - Print " " if ($nesting == 0 && $add_blank); -} - -sub interpret_line -{ - my $line = $_[0]; chomp $line; - my @words = split(/\s+/,$line); - my $max = $#words; - my $i; - my $join = 0; # true, if words must be joined - my $word = ""; # the joined word - my $kind = 0; # 1: item[ .. ], 2: { .. }, 3: \verb+ .. + - if ($max < 0) { - # empty line marks a paragraph - &{$Prefix . "Paragraph"}; - $first_word = 1; - return; - } - for ($i = 0; $i <= $max; $i++) { - $_ = $words[$i]; - # printf "\`$words[$i]'"; - if (/^[\s]*$/) { # skip leading blanks - # nothing - } elsif ((!$join || ($kind != 3)) && /^%+/) { # skip comments - last; - } else { - # if blanks in a { .. }, \item[ .. ], \verb+ .. + then we have to joind words - if ($join) { - # check whether this is the last word to be joined - if ($kind == 1) { # item - $join = index ($_, "]") == -1; - } elsif ($kind == 2) { # braces - my @x = $_ =~ /[^\\]}/g; - $join = $#x == -1; - } elsif ($kind == 3) { # verb - $join = index ($_, "+") == -1; - } - $word .= " " . $words[$i]; - if (!$join) { - interpret_word $word; - $word = ""; - } - } else { - # check whether we have to join some words - if (/\\item/) { - my $cnt1 = tr/[/[/; - my $cnt2 = tr/]/]/; - $join = $cnt1 != $cnt2 | $cnt1 == 0; - $kind = 1; - } elsif (/\\verb/) { - my $cnt = tr/+/+/; - $join = $cnt % 2 != 0; - $kind = 3; - } else { - my @x = $_ =~ /[^\\]{/g; - my @y = $_ =~ /[^\\]}/g; - $join = $#x != $#y; - $kind = 2; - } - if ($join) { - $word = $words[$i]; - } else { - interpret_word $words[$i]; - } - } - } - } - if ($join) { - interpret_word $word; - } - NL; -} - -sub PrintM -# print only for Macro command text -{ - if (@_ <= 0 || length ($_[0]) == 0) { - return; - } - my $l = shift; - my $c; - $l =~ s/\\n/\n/g; - foreach $c (split ("", $l)) { - if ($c eq "\n") { - NL; - $newline = 0; - } else { - print DEST "$c"; - } - } -} - -sub Print -{ - - # printf "\`$_[0]'\n"; - if (@_ <= 0 || length ($_[0]) == 0) { - return; - } - my $x = $_[0]; - - unless ($x =~ /^\s*$/) { - # if other chars than blanks are printed: - $first_word = 0; - } - - if (!$inside_verb) { - # transform special characters - $x =~ s/([äöüÄÖÜß])/$LetterCode->{"$1"}/go; - } - - my @parts = split /\\n/, $x, -1; - # -1: trailing \n generates an empty list element - my $i; - for ($i = 0; $i <= $#parts; $i++) { - if ($newline) { - # skip leading blanks after a newline - $parts[$i] =~ s/^\s+//; - } - if (length($parts[$i]) > 0) { - if ($opt_M) { - $parts[$i] =~ s/\\/\\\\/g; - $parts[$i] =~ s/-/\\-/g; - } - printf DEST "%s", $parts[$i]; - $newline = 0; - } - if ($i < $#parts) { - NL; - } - } - $paragraph = 0; -} -######################################################################### - -sub date2str -{ - @EnglishMonthName = ('','January','February','March','April','May','June','July', - 'August','September','October','November','December'); - # split date - my ($Year,$Month,$Day) = split (/\//,$_[0]); - return $Day . " " . $EnglishMonthName[$Month] . " " . $Year; -} - -$rcs_date=`date '+%Y/%m/%d/'`; # date of the man-page, overwritten - # by \rcsInfo -$date = date2str ($rcs_date); -$Macro->{'today'} = $date; - -######################################################################### -my @skip; # stack of skip-flags, 1: skip, 0: don't skip -push @skip, 0; # synthetic "outer most" IF, don't skip -my $last_cond_clause = ""; - -sub handle_conditional_text -{ - $_ = $_[0]; -# printf "skip-stack: %s;\t top = %s\n", join (", ", @skip), $skip[-1]; - if (/^\s*%@%\s+IF\s+([^%]*)\s+%@%\s*$/) { - # produce a program, which evaluates the condition: - my $prog_cond = $1; - $prog_cond =~ s/(\w+)/\$$1/g; - my $prog = ""; - my $var; - foreach $var (keys %cond_name) { - if ($var ne "") { - $prog .= "my \$$var = 1; " # declare set names, - # undeclared ones get value 0 - } - } - $prog .= "return ($prog_cond) ? 0 : 1;"; -# print "\n**** [$prog]\n"; - my $skip = eval $prog; - ($@ eq "") || die "$CMD: error in line $.: wrong condition of `%@% IF.. ($@)\n"; -# print "**** skip=$skip\n"; - - $last_cond_clause = "IF"; - if ($skip[-1] == 1) { - # skip this text, since outer IF skips - push @skip, 1; - } else { - # outer IF is not skipped, hence consider this IF - push @skip, $skip; - } - } elsif (/^\s*%@%\s+ELSE\s+%@%\s*$/) { - ($last_cond_clause =~ /IF/ && ($#skip > 0)) || - die "$CMD: error in line $.: `%@% ELSE %@%' without an `%@% IF..'\n"; - $last_cond_clause = "ELSE"; - if ($skip[-2] == 0) { - $skip[-1] = $skip[-1]? 0 : 1; - } - } elsif (/^\s*%@%\s+END-IF\s+%@%\s*$/) { - $last_cond_clause = "END-IF"; - ($#skip == 0) && - die "$CMD: error in line $.: `%@% END-IF %@%' without an `%@% IF..'\n"; - pop @skip; - } -# print "$last_cond_clause: skip = $skip[-1]\n"; -} - -############################################################################ -# handle LaTeX output -if ($opt_L) { - while () { - if ($opt_D == 1) { - my $line = $_; chop $line; - print "--- \`$line'\n"; - } - - if (/^\s*%@%\s/) { - my $skip = $skip[-1]; - handle_conditional_text ("$_"); - # write %@% directive - print DEST $_; - next; - } - next if ($skip[-1] == 1); - print DEST $_; - } - close (DEST); - exit (0); -} - -############################################################################ -# handle non-LaTeX output - -# read sections -# Variables: $section name of the section in uppercase letters -# $chapter chapter of the man page -# $name name of the man page -# $Name name of the man page in uppercase -# $author author of the man page -# $tool info about the tool set, $name is part of -# $date date -# $version version info -$started = 0; -while () { - if ($opt_D == 1) { - my $line = $_; chop $line; - print "--- \`$line'\n"; - } - - if ((/^\s*%@%\s/) && ($inside_verb==0)) { - my $skip = $skip[-1]; - handle_conditional_text ("$_"); - } - next if ($skip[-1] == 1); - - if ($inside_verb) { - if (/^\s*\\end{verbatim}/) { - if ($started == 1) { - &{$Prefix . "VerbatimEnd"}; - $inside_verb = 0; - } - } else { - &{$Prefix . "VerbatimLine"} ($_); - } - next; - } - - # remove {, } around Umlaute - s/{(\\".)}/\1/g; # " - s/{(\\ss)}/\1/g; - - # normalize special characters - s/\\"a/ä/g; - s/\\"o/ö/g; - s/\\"u/ü/g; - s/\\"A/Ä/g; - s/\\"O/Ö/g; - s/\\"U/Ü/g; - s/\\ss/ß/g; - - if (/^\s*\\rcsInfo \$(.*)\$/) { - my ($rcs_id,$rcs_file,$rcs_revision, - $rcs_date,$rcs_time,$rcs_owner,$rcs_status,$rcs_locker) = split(/\s/,$1); - $date = date2str ($rcs_date); - $Macro->{'today'} = $date; - } elsif (/^\s*\\setDate{\\rcsInfoLongDate}/) { - $Macro->{'Date'} = $date; - } elsif (/^\s*\\setDate{\\today}/) { - $Macro->{'Date'} = $date; - } elsif (/^\s*\\setDate{([^}]*)}/) { - $date = $1; - $date =~ s/~/$Macro->{'~'}/g; - $Macro->{'Date'} = $date; - } elsif (/^\s*\\setVersion{([^}]*)}/) { - $version = $1; - $versin =~ s/~/$Macro->{'~'}/g; - $Macro->{'Version'} = $version; - } elsif (/^\s*\\begin{Name}{([^}]*)}{([^}]*)}{([^}]*)}{([^}]*)}{([^}]*)}/) { - $section = "Name"; - $chapter = $1; - $name = $2; - $Name = uc $name; - $author = $3; - $tool = $4; - $title = $5; - $rest = $'; - $started = 1; - $sections[0] = $section; - $section_cnt = 0; - &{$Prefix . "Start"} ($name, $chapter, $author, $tool, $title); - &{$Prefix . "NameStart"} ($name, $chapter, $author, $tool, $title); - } elsif (/^\s*\\end{Name}/) { - &{$Prefix . "NameEnd"} ($name, $chapter, $author, $tool); - } elsif (/^\s*\\begin{Table}(\[([^]]*)\])?{([^}]*)}/) { - # \begin{Table}[width]{columns} - if ($started == 1) { - $columns = $3; - $column = $_[0]; - $inside_table = 1; - $first_column = 1; - &{$Prefix . "TableStart"} ($columns, $2); - } - } elsif (/^\s*\\end{Table}/) { - if ($started == 1) { - $inside_table = 0; - $first_column = 0; - &{$Prefix . "TableEnd"} ($columns); - } - } elsif (/^\s*\\begin{Description}(\[[^]]*\])?/) { - if ($started == 1) { - $list_nest++; - $cur_list[$list_nest] = 'descr'; - $item_nr[$list_nest] = 0; - &{$Prefix . "DescriptionStart"}; - } - } elsif (/^\s*\\end{Description}/) { - if ($started == 1) { - &{$Prefix . "DescriptionEnd"}; - $list_nest--; - } - } elsif (/^\s*\\begin{description}/) { - if ($started == 1) { - $list_nest++; - $cur_list[$list_nest] = 'descr'; - $item_nr[$list_nest] = 0; - &{$Prefix . "DescriptionStart"}; - } - } elsif (/^\s*\\end{description}/) { - if ($started == 1) { - &{$Prefix . "DescriptionEnd"}; - $list_nest--; - } - } elsif (/^\s*\\begin{center}/) { - if ($started == 1) { - &{$Prefix . "CenterStart"}; - } - } elsif (/^\s*\\end{center}/) { - if ($started == 1) { - &{$Prefix . "CenterEnd"}; - } - } elsif (/^\s*\\begin{enumerate}/) { - if ($started == 1) { - $list_nest++; - $cur_list[$list_nest] = 'enum'; - $item_nr[$list_nest] = 0; - &{$Prefix . "EnumStart"} ; - } - } elsif (/^\s*\\end{enumerate}/) { - if ($started == 1) { - &{$Prefix . "EnumEnd"} ; - $list_nest--; - } - } elsif (/^\s*\\begin{itemize}/) { - if ($started == 1) { - $list_nest++; - $cur_list[$list_nest] = 'item'; - $item_nr[$list_nest] = 0; - &{$Prefix . "ItemStart"} ; - } - } elsif (/^\s*\\end{itemize}/) { - if ($started == 1) { - &{$Prefix . "ItemEnd"} ; - $list_nest--; - } - } elsif (/^\s*\\begin{verbatim}/) { - if ($started == 1) { - &{$Prefix . "VerbatimStart"}; - $inside_verb = 1; - } - } elsif (/^\s*\\(subsubsection|subsection|section){([^}]*)}/) { - $kind = $1; - $section = $2; - $section_cnt ++; - $sections[$section_cnt] = $section; - $section_kind[$section_cnt] = $kind; - if ($started == 1) { - &{$Prefix . "Section"} ($section_cnt, $kind, $section); - } - } elsif (/^\s*\\LatexManEnd/) { - last; - } elsif (/^\s*((\\begin{Name|Table|Description})|(\\(sub)?section))/) { - die "$CMD: in line $.\n " . - "Arguments of $1 are not contained in a single " . - "line.\n " . - "Remember: all arguments of a macro must be on the same line.\n"; - } else { - if ($started == 1) { - interpret_line $_; - } - } -} -&{$Prefix . "End"}; - -close DEST; - -if ($opt_H || $opt_T) { - open (TMP, "<$tmp") || die "$CMD: Can't open file \`$tmp' for reading.\n"; - open (DEST, ">$DestFile") || die "$CMD: Can't open file \`$DestFile' for writing.\n"; - while () { - if (/^\@\@INSERTION-POINT\@\@-TOC\@\@$/) { - if ($opt_H) { - # Table of contents for HTML - my $nesting = 0; # nesting of section/subsection/subsubsection - Print '\n

    Table of Contents

    \n'; - for ($i = 1; $i <= $section_cnt; $i++) { - my $cur_nesting = 0; - if ($section_kind[$i] eq "subsubsection") { - $cur_nesting = 3; - } elsif ($section_kind[$i] eq "subsection") { - $cur_nesting = 2; - } elsif ($section_kind[$i] eq "section") { - $cur_nesting = 1; - } - if ($cur_nesting > $nesting) { - # open a new list - Print '\n
      \n' - } - if ($cur_nesting == $nesting) { - # same level, close list item - Print '\n'; - } - if ($cur_nesting < $nesting) { - # close list and list item - for my $i ($cur_nesting .. $nesting-1) { - Print '\n'; - Print '
    \n'; - } - } - # print item - Print "
  • "; - interpret_word $sections[$i]; - Print ""; - $nesting = $cur_nesting; - } - # close remaining lists - for my $i (1 .. $nesting) { - Print '
  • \n'; - Print '\n'; - } - } else { - # Menu of sections for texi - Print '@menu\n'; - for ($i = 2; $i <= $section_cnt; $i++) { - interpret_line "* " . $sections[$i] . "::"; - } - Print '@end menu\n'; - } - } elsif (/\@\@INSERTION-POINT\@\@-TEXI-TOP\@\@/) { - # Texi-top node - Print '@node Top, ' . $sections[2] . ', (dir), (dir)\n'; - } elsif (/\@\@INSERTION-POINT\@\@-TEXI-SEC\@\@ (\w+) (\d+)/) { - # print section header for texi - $kind = $1; - $cnt = $2; - $section = $sections[$cnt]; - Print '@node ' ; - interpret_word "$sections[$cnt], "; - interpret_word "$sections[$cnt+1], "; - interpret_word "$sections[$cnt-1], Top "; - NL; - } else { - print DEST $_; - } - } - close TMP; - close DEST; - unlink $tmp; -} - -######################################################################### - -## Emacs specific: -## Local Variables: *** -## mode: perl *** -## End: *** - diff --git a/Build/source/texk/texlive/linked_scripts/latex2man.x b/Build/source/texk/texlive/linked_scripts/latex2man.x new file mode 100755 index 00000000000..45da55fb578 --- /dev/null +++ b/Build/source/texk/texlive/linked_scripts/latex2man.x @@ -0,0 +1,1817 @@ +#!/usr/bin/env perl +# Project: Documentation Tools +# Descr: Latex --> MAN-page (groff -man), HTML and TexInfo; +# Language: PERL (>= 5.0) +# Author: Dr. Jürgen Vollmer, Juergen.Vollmer@informatik-vollmer.de +# $Id: latex2man,v 1.151 2008/11/11 07:21:14 vollmer Exp $ +# +# Copyright (C) 1998 Dr. Juergen Vollmer +# Viktoriastrasse 15, D-76133 Karlsruhe, Germany +# Juergen.Vollmer@informatik-vollmer.de +# License: +# This program can be redistributed and/or modified under the terms +# of the LaTeX Project Public License Distributed from CTAN +# archives in directory macros/latex/base/lppl.txt; either +# version 1 of the License, or any later version. +# +# If you find this software useful, please send me a postcard. + +require 5.0004_03; + +use Getopt::Std; + +# use strict 'vars'; + +$CMD=`basename $0`; chop ($CMD); +$gen_date = `date`; chomp $gen_date; # date when the output was generated + +sub date2str; +$VERSION = "1.23"; +$DATE = date2str ('$Date: 2008/11/11 07:21:14 $' =~ m|(\d+/\d+/\d+)|); + +$tmp = "/tmp/$CMD.$$"; + +################################################################## +# check option and arguments +################################################################## + +getopts('o:t:VhMHTLC:D:a:'); # -D1: write each read line -D2: write each word + +sub usage +{ +print <<'END'; +usage: latex2man [-t transfile] [-HTML] [-C name] [-h] [-V] infile outfile. + A tool to translate UNIX manual pages written with LaTeX into a format + understood by the UNIX man(1)-command. + Reads infile, writes outfile. + + -t transfile: Translation for user defined LaTeX macros. + -M: Produce output suitable for the man(1) command (default). + -H: Instead of producing output suitable for the man(1) command, + HTML code is produced (despite of the name of the command). + -T: Instead of producing output suitable for the man(1) command, + TEXINFO code is produced (despite of the name of the command). + -L: Output the LaTeX source. Useful in conjunctin with the -C + option. + -C name: Enable conditional text \`name\'. + To enable more than one conditional name use quotes: + -C 'name1 name2 ...' + The following names are defined automatically: + -H defines HTML + -T defines TEXI + -M defines MAN + -L defines LATEX + -a char: Is used only in conjunction with -T. + Background: + TEXINFO ignores all blanks before the first word on a + new line. In order to produce some additional space before + that word (using \SP) some character has to be printed + before the additional space. By default this is a . (dot). + The \`char' specifies an alternative for that first character. + Giving a blank (-a" ") supresses the indentation of a line. + Note: only for the first \SP of a series that char is printed. + -h: Help. + -V: Version. + + Copyright (C) 1998 Dr. Jürgen Vollmer, Viktoriastr. 15, D-76133 Karlsruhe + email: Juergen.Vollmer@informatik-vollmer.de + License: + This program can be redistributed and/or modified under the terms + of the LaTeX Project Public License Distributed from CTAN + archives in directory macros/latex/base/lppl.txt; either + version 1 of the License, or any later version. + + If you find this software useful, please send me a postcard from the place + where you are living. +END + print " Version $VERSION, $DATE.\n"; + exit 1; +} + +($opt_h) && usage; +($opt_V) && print "Version: $VERSION, $DATE\n"; + +# check command line arguments +$opt_cnt = 0; +$opt_cnt++ if ($opt_H); +$opt_cnt++ if ($opt_T); +$opt_cnt++ if ($opt_M); +$opt_cnt++ if ($opt_L); +if ($opt_cnt == 0) { + # if no option -H, -T, -M, -L is given, -M is the default + $opt_M = 1; +} +die "$CMD: you may give only one of the -H -T -M -L options\n" if ($opt_cnt > 1); + +(@ARGV == 2) || die "$CMD: Expected two arguments: infile outfile. Try \`$CMD -h'.\n"; + +my %cond_name; +if ($opt_C) { + my $name; + foreach $name ((split (/\s/, $opt_C))) { + $cond_name{$name} = 1; + } +} +$cond_name{MAN} = 1 if ($opt_M); +$cond_name{HTML} = 1 if ($opt_H); +$cond_name{TEXI} = 1 if ($opt_T); +$cond_name{LATEX} = 1 if ($opt_L); + + +$SrcFile = $ARGV[0]; +$DestFile = $ARGV[1]; +open (SRC, "<$SrcFile") || die "$CMD: Can't open file \`$SrcFile' for reading.\n"; +if ($opt_H || $opt_T) { + # DestFile will be written in the postprocess + open (DEST, ">$tmp") || die "$CMD: Can't open file \`$tmp' for writing.\n"; +} else { + open (DEST, ">$DestFile") || die "$CMD: Can't open file \`$DestFile' for writing.\n"; +} + +######################################################################## + +# global variables + +# $Prefix is used to construct procedure and variable names +if ($opt_M) { + $Prefix = "man"; +} +if ($opt_H) { + $Prefix = "html"; +} +if ($opt_T) { + $Prefix = "texi"; +} + +$texiCenterLine = 0; # true, only in TEXI-mode if a line must be centered +$paragraph = 0; # true, if the last output was a paragraph marker +$newline = 0; # true, if the last output was a newline char + +$first_word = 1; # true, if the next word to be processed is the first + # of a new paragraph or after a line break. + +# handling of itemize/enumerate/description environments: +$list_nest = 0; # counts nesting of itemize/enumerate/description envrionments +$cur_list[0] = "";# array, indexed with list_nest, indicates kind of list: + # values are: 'enum' / 'descr' / 'item' +$item_nr[0] = 0; # array, indexed with list_nest, counts the number of \item in the + # list +$manRS = 0; # true, if for Man a .RS was given after a \item + +$inside_verb = 0; # true, if inside a verbatim environment +$inside_table = 0; # true, if inside a table environment +$first_column = 0; # true, if this is the first column in a table row +$columns = 0; # nr of columns in the current table +$enum_nr = 0; # current number of an enumeration +$nesting = 0; # count recursive calls of interpret_word +$section_cnt = 0; # Index into $sections +#$sections[0] # Array of all sections +#$section_kind # Array of section kind (subsection/section) + +# translation of LaTeX macros without, with one and with two arguments +$Macro = \%{$Prefix . "Macro"}; +$Macro1a = \%{$Prefix . "Macro1a"}; +$Macro1b = \%{$Prefix . "Macro1b"}; +$Macro2a = \%{$Prefix . "Macro2a"}; +$Macro2b = \%{$Prefix . "Macro2b"}; +$Macro2c = \%{$Prefix . "Macro2c"}; + +# translations of special characters +$LetterCode = \%{$Prefix . "LetterCode"}; + +######################################################################## + +sub interpret_word; +sub interpret_line; +sub Print; +sub PrintM; +sub NL; + +######################################################################## +# Translation for LaTeX macros for MAN + +# translation of special characters +$manLetterCode{'ä'} = 'ä'; +$manLetterCode{'ö'} = 'ö'; +$manLetterCode{'ü'} = 'ü'; +$manLetterCode{'Ä'} = 'Ä'; +$manLetterCode{'Ö'} = 'Ö'; +$manLetterCode{'Ü'} = 'Ü'; +$manLetterCode{'ß'} = 'ß'; + +# LaTeX macros without arguments +$manMacro{'LaTeX'} = 'LaTeX'; +$manMacro{'LATEX'} = 'LaTeX'; # needed, since \LaTeX is contained in a + # section name (which are transposed + # into uppercase +$manMacro{'itemsep'} = ' '; + +# some math +$manMacro{'rightarrow'} = '-->'; +$manMacro{'Rightarrow'} = '==>'; +$manMacro{'leftarrow'} = '<--'; +$manMacro{'Leftarrow'} = '<=='; +$manMacro{'ge'} = '>='; +$manMacro{'le'} = '<='; + +$manMacro{'Dollar'} = '$'; +$manMacro{'Bar'} = '|'; +$manMacro{'Bs'} = '\\\\'; +$manMacro{'Tilde'} = '~'; +$manMacro{'hline'} = '\n_'; +$manMacro{'noindent'} = ''; +$manMacro{'copyright'} = '(C)'; +$manMacro{'Dots'} = '\&...\n'; +$manMacro{'Circum'} = '^'; +$manMacro{'Lbr'} = '['; +$manMacro{'Rbr'} = ']'; +$manMacro{'LBr'} = '{'; +$manMacro{'RBr'} = '}'; +$manMacro{'Percent'} = '%'; +$manMacro{'Bullet'} = '*'; +$manMacro{'TEXbr'} = ''; +$manMacro{'MANbr'} = '\n.br\n'; +$manMacro{'TEXIbr'} = ''; +$manMacro{'HTMLbr'} = ''; +$manMacro{'medskip'} = '\n'; +$manMacro{'SP'} = '\fB \fP'; # hack hack this works even on + # the beginning of a line +$manMacro{'SPfirst'} = $manMacro{'SP'}; + +$manMacro{'~'} = ' '; +$manMacro{'|'} = '|'; +$manMacro{'<'} = '<'; +$manMacro{'>'} = '>'; +$manMacro{'<='} = '<='; +$manMacro{'>='} = '>='; +$manMacro{'='} = '='; +$manMacro{'<>'} = '<>'; +$manMacro{'{'} = '{'; +$manMacro{'}'} = '}'; +$manMacro{'_'} = '_'; +$manMacro{'$'} = '$'; +$manMacro{'#'} = '#'; +$manMacro{'&'} = '&'; +$manMacro{'%'} = '%'; +$manMacro{'-'} = ''; +$manMacro{','} = ' '; + +$manMacro{'\\'} = '\n.br'; # line break +$manMacro{'\\Tab'} = '\nT}'; # end of column in a table environment + +# LaTeX macros with one argument +$manMacro1a{'emph'} = '\fI'; + $manMacro1b{'emph'} = '\fP'; +$manMacro1a{'textbf'} = '\fB'; + $manMacro1b{'textbf'} = '\fP'; +$manMacro1a{'texttt'} = ''; + $manMacro1b{'texttt'} = ''; +$manMacro1a{'verb'} = ''; + $manMacro1b{'verb'} = ''; +$manMacro1a{'underline'} = '\n.ul\n'; + $manMacro1b{'underline'}= '\n'; +$manMacro1a{'section'} = '\n.SH '; + $manMacro1b{'section'} = '\n'; +$manMacro1a{'subsection'} = '\n.SS '; + $manMacro1b{'subsection'} = ''; +$manMacro1a{'subsubsection'} = '\n.SS '; + $manMacro1b{'subsubsection'} = ''; + +$manMacro1a{'Prog'} = ''; + $manMacro1b{'Prog'} = ''; +$manMacro1a{'File'} = ''; + $manMacro1b{'File'} = ''; +$manMacro1a{'Opt'} = '\fB'; + $manMacro1b{'Opt'} = '\fP'; +$manMacro1a{'oOpt'} = '[\fB'; + $manMacro1b{'oOpt'} = '\fP]'; +$manMacro1a{'Arg'} = '\fI'; + $manMacro1b{'Arg'} = '\fP'; +$manMacro1a{'oArg'} = '[\fI'; + $manMacro1b{'oArg'} = '\fP]'; +$manMacro1a{'Email'} = '\fB'; + $manMacro1b{'Email'} = '\fP'; +$manMacro1a{'URL'} = '\fB'; + $manMacro1b{'URL'} = '\fP'; + +# LaTeX macros with two arguments +$manMacro2a{'Cmd'} = '\fI'; + $manMacro2b{'Cmd'} = '\fP('; + $manMacro2c{'Cmd'} = ')'; +$manMacro2a{'OptArg'} = '\fB'; + $manMacro2b{'OptArg'} = '\fP\fI'; + $manMacro2c{'OptArg'} = '\fP'; +$manMacro2a{'OptoArg'} = '\fB'; + $manMacro2b{'OptoArg'} = '\fP[\fI'; + $manMacro2c{'OptoArg'} = '\fP]'; +$manMacro2a{'oOptArg'} = '[\fB'; + $manMacro2b{'oOptArg'} = '\fP\fI'; + $manMacro2c{'oOptArg'} = '\fP]'; +$manMacro2a{'oOptoArg'} = '[\fB'; + $manMacro2b{'oOptoArg'} = '\fP[\fI'; + $manMacro2c{'oOptoArg'} = '\fP]]'; +$manMacro2a{'setlength'} = ''; + $manMacro2b{'setlength'}= ''; + $manMacro2c{'setlength'}= ''; + +######################################################################## +# Translation for LaTeX macros for HTML + +# translation of special characters +$htmlLetterCode{'ä'} = 'ä'; +$htmlLetterCode{'ö'} = 'ö'; +$htmlLetterCode{'ü'} = 'ü'; +$htmlLetterCode{'Ä'} = 'Ä'; +$htmlLetterCode{'Ö'} = 'Ö'; +$htmlLetterCode{'Ü'} = 'Ü'; +$htmlLetterCode{'ß'} = 'ß'; + +# LaTeX macros without arguments +$htmlMacro{'LaTeX'} = 'LaTeX'; +$htmlMacro{'LATEX'} = 'LaTeX'; # needed, since \LaTeX is contained in a + # section name (which are transposed + # into uppercase +$htmlMacro{'itemsep'} = ''; + +# some math +$htmlMacro{'rightarrow'} = '-->'; +$htmlMacro{'Rightarrow'} = '==>'; +$htmlMacro{'leftarrow'} = '<--'; +$htmlMacro{'Leftarrow'} = '<=='; +$htmlMacro{'ge'} = '>'; +$htmlMacro{'le'} = '<='; + +$htmlMacro{'Dollar'} = '$'; +$htmlMacro{'Bar'} = '|'; +$htmlMacro{'Bs'} = '\\'; +$htmlMacro{'Tilde'} = '~'; +$htmlMacro{'hline'} = ''; +$htmlMacro{'noindent'} = ''; +$htmlMacro{'copyright'} = '©'; +$htmlMacro{'Dots'} = '...'; +$htmlMacro{'Circum'} = '^'; +$htmlMacro{'Lbr'} = '['; +$htmlMacro{'Rbr'} = ']'; +$htmlMacro{'LBr'} = '{'; +$htmlMacro{'RBr'} = '}'; +$htmlMacro{'Percent'} = '%'; +$htmlMacro{'Bullet'} = '*'; +$htmlMacro{'TEXbr'} = ''; +$htmlMacro{'MANbr'} = ''; +$htmlMacro{'TEXIbr'} = ''; +$htmlMacro{'HTMLbr'} = '
    \n'; +$htmlMacro{'medskip'} = '
    \n'; +$htmlMacro{'SP'} = '  '; +$htmlMacro{'SPfirst'} = $htmlMacro{'SP'}; + +$htmlMacro{'~'} = ' '; +$htmlMacro{'|'} = '|'; +$htmlMacro{'<'} = '<'; +$htmlMacro{'>'} = '>'; +$htmlMacro{'<='} = '<='; +$htmlMacro{'>='} = '>='; +$htmlMacro{'='} = '='; +$htmlMacro{'<>'} = '<>'; +$htmlMacro{'{'} = '{'; +$htmlMacro{'}'} = '}'; +$htmlMacro{'_'} = '_'; +$htmlMacro{'$'} = '$'; +$htmlMacro{'#'} = '#'; +$htmlMacro{'&'} = '&'; +$htmlMacro{'%'} = '%'; +$htmlMacro{'-'} = ''; +$htmlMacro{','} = ' '; + +$htmlMacro{'\\'} = '
    \n'; # line break +$htmlMacro{'\\Tab'} = '\n\n'; # end of column in a table environment + +# LaTeX macros with one argument +$htmlMacro1a{'emph'} = ''; + $htmlMacro1b{'emph'} = ''; +$htmlMacro1a{'textbf'} = ''; + $htmlMacro1b{'textbf'} = ''; +$htmlMacro1a{'texttt'} = ''; + $htmlMacro1b{'texttt'} = ''; +$htmlMacro1a{'verb'} = ''; + $htmlMacro1b{'verb'} = ''; +$htmlMacro1a{'underline'} = ''; + $htmlMacro1b{'underline'} = ''; +$htmlMacro1a{'section'} = '\n

    '; + $htmlMacro1b{'section'} = '

    \n'; +$htmlMacro1a{'subsection'} = '\n

    '; + $htmlMacro1b{'subsection'} = '

    \n'; +$htmlMacro1a{'subsubsection'} = '\n
    '; + $htmlMacro1b{'subsubsection'} = '
    \n'; +$htmlMacro1a{'Email'} = '\n'; + $htmlMacro1b{'Email'} = ''; +$htmlMacro1a{'URL'} = '\n'; + $htmlMacro1b{'URL'} = ''; + +$htmlMacro1a{'Prog'} = ''; + $htmlMacro1b{'Prog'} = ''; +$htmlMacro1a{'File'} = ''; + $htmlMacro1b{'File'} = ''; +$htmlMacro1a{'Opt'} = ''; + $htmlMacro1b{'Opt'} = ''; +$htmlMacro1a{'oOpt'} = '['; + $htmlMacro1b{'oOpt'} = ']'; +$htmlMacro1a{'Arg'} = ''; + $htmlMacro1b{'Arg'} = ''; +$htmlMacro1a{'oArg'} = '['; + $htmlMacro1b{'oArg'} = ']'; + +# LaTeX macros with two arguments +$htmlMacro2a{'Cmd'} = ''; + $htmlMacro2b{'Cmd'} = '('; + $htmlMacro2c{'Cmd'} = ')'; +$htmlMacro2a{'OptArg'} = ''; + $htmlMacro2b{'OptArg'} = ''; + $htmlMacro2c{'OptArg'} = ''; +$htmlMacro2a{'OptoArg'} = ''; + $htmlMacro2b{'OptoArg'} = '['; + $htmlMacro2c{'OptoArg'} = ']'; +$htmlMacro2a{'oOptArg'} = '['; + $htmlMacro2b{'oOptArg'} = ''; + $htmlMacro2c{'oOptArg'} = ']'; +$htmlMacro2a{'oOptoArg'} = '['; + $htmlMacro2b{'oOptoArg'} = '['; + $htmlMacro2c{'oOptoArg'} = ']]'; +$htmlMacro2a{'setlength'} = ''; + $htmlMacro2b{'setlength'} = ''; + $htmlMacro2c{'setlength'} = ''; + +# we handle sections in HTML as having two arguments, 1. the number, 2. the name +$htmlMacro2a{'section'} = '\n

    '; + $htmlMacro2c{'section'} = '

    \n'; +$htmlMacro2a{'subsection'} = '\n

    '; + $htmlMacro2c{'subsection'} = '

    \n'; +$htmlMacro2a{'subsubsection'} = '\n
    '; + $htmlMacro2c{'subsubsection'} = '
    \n'; + +# we handle Email and URL special in HTML, the LaTeX argument is doubled. +$htmlMacro2a{'Email'} = ''; + $htmlMacro2c{'Email'} = ''; +$htmlMacro2a{'URL'} = ''; + $htmlMacro2c{'URL'} = ''; + +######################################################################## +# Translation for LaTeX macros for TexInfo + +# translation of special characters +$texiLetterCode{'ä'} = '@"a'; +$texiLetterCode{'ö'} = '@"o'; +$texiLetterCode{'ü'} = '@"u'; +$texiLetterCode{'Ä'} = '@"A'; +$texiLetterCode{'Ö'} = '@"O'; +$texiLetterCode{'Ü'} = '@"U'; +$texiLetterCode{'ß'} = '@ss{}'; + +# LaTeX macros without arguments +$texiMacro{'LaTeX'} = 'LaTeX'; +$texiMacro{'LATEX'} = 'LaTeX'; # needed, since \LaTeX is contained in a + # section name (which are transposed + # into uppercase +$texiMacro{'itemsep'} = ''; + +# some math +$texiMacro{'rightarrow'} = '-->'; +$texiMacro{'Rightarrow'} = '==>'; +$texiMacro{'leftarrow'} = '<--'; +$texiMacro{'Leftarrow'} = '<=='; +$texiMacro{'ge'} = '>='; +$texiMacro{'le'} = '<='; + +$texiMacro{'Dollar'} = '$'; +$texiMacro{'Bar'} = '|'; +$texiMacro{'Bs'} = '\\'; +$texiMacro{'Tilde'} = '~'; +$texiMacro{'hline'} = ''; +$texiMacro{'noindent'} = '\n@noindent\n'; +$texiMacro{'copyright'} = '@copyright{}'; +$texiMacro{'Dots'} = '...'; +$texiMacro{'Circum'} = '^'; +$texiMacro{'Lbr'} = '['; +$texiMacro{'Rbr'} = ']'; +$texiMacro{'LBr'} = '@{'; +$texiMacro{'RBr'} = '@}'; +$texiMacro{'Percent'} = '%'; +$texiMacro{'Bullet'} = '*'; +$texiMacro{'TEXbr'} = ''; +$texiMacro{'MANbr'} = ''; +$texiMacro{'TEXIbr'} = '@*\n'; +$texiMacro{'HTMLbr'} = ''; +$texiMacro{'medskip'} = '@sp 2\n'; +$texiMacro{'SP'} = '@ @ '; + +if ($opt_a) { + $texiMacro{'SPfirst'} = $opt_a . '@ '; + } else { + $texiMacro{'SPfirst'} = '.@ '; +} + +$texiMacro{'~'} = ' '; +$texiMacro{'|'} = '|'; +$texiMacro{'<'} = '<'; +$texiMacro{'>'} = '>'; +$texiMacro{'<='} = '<='; +$texiMacro{'>='} = '>='; +$texiMacro{'='} = '='; +$texiMacro{'<>'} = '<>'; +$texiMacro{'{'} = '@{'; +$texiMacro{'}'} = '@}'; +$texiMacro{'_'} = '_'; +$texiMacro{'$'} = '$'; +$texiMacro{'#'} = '#'; +$texiMacro{'&'} = '&'; +$texiMacro{'%'} = '%'; +$texiMacro{'-'} = '@-'; +$texiMacro{','} = ' '; + +$texiMacro{'\\'} = '@*\n'; # line break +$texiMacro{'\\Tab'} = '\n'; # end of column in a table environment + +# LaTeX macros with one argument +$texiMacro1a{'emph'} = '@emph{'; $texiMacro1b{'emph'} = '}'; +$texiMacro1a{'textbf'} = '@strong{'; $texiMacro1b{'textbf'} = '}'; +$texiMacro1a{'texttt'} = '@t{'; $texiMacro1b{'texttt'} = '}'; +$texiMacro1a{'verb'} = '@t{'; $texiMacro1b{'verb'} = '}'; +$texiMacro1a{'underline'} = ''; $texiMacro1b{'underline'} = ''; +$texiMacro1a{'section'} = '\n@section '; $texiMacro1b{'section'} = '\n'; +$texiMacro1a{'subsection'} = '\n@subsection '; $texiMacro1b{'subsection'} = '\n'; +$texiMacro1a{'subsubsection'} = '\n@subsubsection '; $texiMacro1b{'subsubsection'} = '\n'; + +$texiMacro1a{'Prog'} = ''; $texiMacro1b{'Prog'} = ''; +$texiMacro1a{'File'} = '@file{'; $texiMacro1b{'File'} = '}'; +$texiMacro1a{'Opt'} = ''; $texiMacro1b{'Opt'} = ''; +$texiMacro1a{'oOpt'} = '[ '; $texiMacro1b{'oOpt'} = ' ]'; +$texiMacro1a{'Arg'} = '@var{'; $texiMacro1b{'Arg'} = '}'; +$texiMacro1a{'oArg'} = '[ @var{'; $texiMacro1b{'oArg'} = '} ]'; +$texiMacro1a{'Email'} = '@email{'; $texiMacro1b{'Email'} = '}'; +$texiMacro1a{'URL'} = '@url{'; $texiMacro1b{'URL'} = '}'; + +# LaTeX macros with two arguments +$texiMacro2a{'Cmd'} = ''; + $texiMacro2b{'Cmd'} = '('; + $texiMacro2c{'Cmd'} = ')'; +$texiMacro2a{'OptArg'} = ''; + $texiMacro2b{'OptArg'} = '@var{'; + $texiMacro2c{'OptArg'} = '}'; +$texiMacro2a{'OptoArg'} = ''; + $texiMacro2b{'OptoArg'} = '[@var{'; + $texiMacro2c{'OptoArg'} = '}]'; +$texiMacro2a{'oOptArg'} = '[ '; + $texiMacro2b{'oOptArg'} = '@var{'; + $texiMacro2c{'oOptArg'} = '} ]'; +$texiMacro2a{'oOptoArg'} = '[ '; + $texiMacro2b{'oOptoArg'} = '[@var{'; + $texiMacro2c{'oOptoArg'} = '}] ]'; +$texiMacro2a{'setlength'} = ''; + $texiMacro2b{'setlength'} = ''; + $texiMacro2c{'setlength'} = ''; + +######################################################################## +# reading of translations for user macros + +if ($opt_t) { + do $opt_t; +} + +######################################################################## +# processing for MAN + +sub manStart +{ + printf DEST "\'\\\" t\n"; # process with tbl + printf DEST ".\\\" Manual page created with $CMD on $gen_date\n"; + printf DEST ".\\\" NOTE: This file is generated, DO NOT EDIT.\n"; + + # Definitionen von Verbatimbegin and Verbatimend + Print ".de Vb\n.ft CW\n.nf\n..\n.de Ve\n.ft R\n\n.fi\n..\n"; + + Print ".TH \"$Name\" \"$chapter\" \"". $date ."\" \""; + interpret_word "$tool"; + Print "\" \""; interpret_word "$tool"; Print "\""; NL; + # thanks to Andrew Anderson +} +sub manEnd +{ + NL; printf DEST ".\\\" NOTE: This file is generated, DO NOT EDIT.\n"; +} +sub manSection +{ + my ($cnt, $kind, $section) = @_; + if ($kind ne "subsubsection"){ + $section = uc $section; + } + interpret_line "\\$kind\{$section\}"; +} +sub manParagraph +{ + if (!$paragraph) { + if ($manRS == 0 && $list_nest > 1) { + Print '\n.RS'; + $manRS = 1; + } + Print '\n.PP\n'; + $paragraph = 1; + } +} +sub manVerb +{ + my $arg = $_[0]; + if ($arg =~ /^\./) { print DEST '\\&' }; + Print $arg +} +sub manItemWithArg +{ + my $arg = $_[0]; + if ($manRS == 1) { + Print '\n.RE\n'; + } + $manRS = 0; + Print '\n.TP\n'; + interpret_word $arg; + PrintM ' '; + NL; +} +sub manItem +{ + if ($manRS == 1) { + Print '\n.RE\n'; + } + $manRS = 0; + Print '\n.TP\n'; + if ($cur_list[$list_nest] eq 'item') { + Print '.B *'; + } elsif ($cur_list[$list_nest] eq 'enum') { + Print $item_nr[$list_nest] . '.'; + } + NL; +} +sub manDescriptionStart +{ + if ($list_nest > 1) { + Print '\n.RS\n'; + } +} +sub manDescriptionEnd +{ + if ($manRS) { + Print '\n.RE\n'; + $manRS == 0; + } + if ($list_nest > 1) { + Print '\n.RE\n'; + } + manParagraph; +} +sub manItemStart +{ + if ($list_nest > 1) { + Print '\n.RS\n'; + } +} +sub manItemEnd +{ + if ($manRS) { + Print '\n.RE\n'; + $manRS == 0; + } + if ($list_nest > 1) { + Print '\n.RE\n'; + } + manParagraph; +} +sub manEnumEnd +{ + if ($manRS) { + Print '\n.RE\n'; + $manRS == 0; + } + if ($list_nest > 1) { + Print '\n.RE\n'; + } + manParagraph; +} + +sub manEnumStart +{ + if ($list_nest > 1) { + Print '\n.RS\n'; + } +} +sub manCenterStart +{ + PrintM '\n.ce 100\n'; +} +sub manCenterEnd +{ + PrintM '\n.ce 0\n'; +} +sub manNameStart +{ + interpret_line "\\section\{NAME\}$rest"; +} +sub manNameEnd +{ + # nothing +} +sub manTableStart +{ + my $columns = $_[0]; + my $width = $_[1]; + my $i; + manParagraph; + Print '.TS\n'; + Print 'tab(&);\n'; + for ($i = 1; $i <= $columns; $i++) { + Print " l"; + } + Print "w($width)" if ($width); + Print '.\n'; +} +sub manTableSep +{ + Print '\nT}&T{\n'; +} +sub manTableEnd +{ + Print '\n.TE\n'; + manParagraph; +} + +sub manVerbatimStart +{ + Print '\n.Vb\n'; +} + +sub manVerbatimEnd +{ + Print '.Ve\n'; +} + +sub manVerbatimLine +{ + s/\\/\\\\/g; + s/-/\\-/g; + print DEST "$_"; +} + +########################################################################### +# processing for HTML + +sub htmlStart +{ + Print ""; NL; + Print ""; NL; + Print ""; NL; + Print "$Name"; NL; + Print ""; NL; + Print "

    "; NL; + interpret_line $title; + Print "

    "; NL; + Print "

    "; interpret_word $author; Print "

    "; NL; + Print "

    $date

    "; NL; + Print "

    Version $version

    "; NL; +} +sub htmlEnd +{ + Print ""; NL; + Print ""; NL; + Print ""; NL; +} +sub htmlSection +{ + my ($cnt, $kind, $section) = @_; + interpret_line "\\$kind\{$cnt\}\{$section\}"; +} +sub htmlCenterStart +{ + Print '\n
    \n'; +} +sub htmlCenterEnd +{ + Print '\n
    \n'; +} +sub htmlNameStart +{ + # nothing +} +sub htmlNameEnd +{ + Print '\n@@INSERTION-POINT@@-TOC@@\n'; +} +sub htmlParagraph +{ + if (!$paragraph) { + NL; Print "

    "; NL; + $paragraph = 1; + } +} +sub htmlVerb +{ + $arg = $_[0]; + $arg =~ s/&/&/g; + $arg =~ s/>/>/g; + $arg =~ s/ 1) { + NL; Print ""; NL; + } + Print "

    "; + interpret_word $arg; Print "
    "; NL; + Print "
    "; +} +sub htmlItem +{ + if ($item_nr[$list_nest] > 1) { + Print '\n'; + } + if ($cur_list[$list_nest] eq 'item') { + Print '
  • '; + } elsif ($cur_list[$list_nest] eq 'enum') { + Print '
  • '; + } +} +sub htmlDescriptionStart +{ + NL; Print "
    "; NL; +} +sub htmlDescriptionEnd +{ + NL; Print "
  • \n"; NL; +} +sub htmlItemStart +{ + NL; Print "
      "; NL; +} +sub htmlItemEnd +{ + NL; Print "\n
    "; NL; +} +sub htmlEnumStart +{ + NL; Print "
      "; NL; +} +sub htmlEnumEnd +{ + NL; Print "\n
    "; NL; +} +sub htmlTableStart +{ + my $columns = $_[0]; + my $width = $_[1]; + NL; Print ""; NL: +} +sub htmlTableSep +{ + if ($first_column == 0) { + Print '\n'; + } + Print '
    '; +} +sub htmlTableEnd +{ + NL; Print "
    "; NL; +} + +sub htmlVerbatimStart +{ + NL; Print '
    '; NL;
    +}
    +
    +sub htmlVerbatimEnd
    +{
    +    Print '
    '; NL; +} + +sub htmlVerbatimLine +{ + s/&/&/g; + s//>/g; + print DEST "$_"; +} + +########################################################################### +# processing for TexInfo + +sub texiStart +{ + Print '\input texinfo @c -*-texinfo-*-'; NL; + Print '@c %**start of header'; NL; + Print '@setfilename ' . "$name.info"; NL; + Print '@settitle ' . "$name"; NL; + Print '@c %**end of header'; NL; + Print '@c Manual page created with' ." $CMD on $gen_date>"; NL; + Print '@c NOTE: This file is generated, DO NOT EDIT.'; NL; +} +sub texiEnd +{ + Print '@bye'; NL; + Print '@c NOTE: This file is generated, DO NOT EDIT.'; NL; +} +sub texiSection +{ + my ($cnt, $kind, $section) = @_; + if (uc $sections[$cnt-1] eq "SYNOPSIS") { + Print '\n@@INSERTION-POINT@@-TOC@@\n'; + $sections[$cnt-1] = "Top"; # The predecessor node is Top and not SYNOPSIS + } + if (uc $sections[$cnt] eq "SYNOPSIS") { + $cnt == 1 || + die "$CMD: The Synopsis section must be the first section after\n" . + "\t the Name environment\n"; + } else { + Print '\n@@INSERTION-POINT@@-TEXI-SEC@@' . " $kind $cnt" . '\n'; + } + interpret_line "\\$kind\{$section\}"; +} +sub texiNameStart +{ + my ($name, $chapter, $author, $tool) = @_; + $sections[0] = "Top"; + # Print '@dircategory ' .$tool; NL; + Print '@dircategory Man-pages'; NL; + Print '@direntry'; NL; + Print "* " . (ucfirst $name) . ": ($name). Its Man-Page "; NL; + Print '@end direntry'; NL; + Print '@titlepage'; NL; + Print '@title ' . "$name"; NL; + Print '@subtitle ' . "$tool"; NL; + Print '@author ' . "$author"; NL; + Print '@end titlepage'; NL; + Print '\n@@INSERTION-POINT@@-TEXI-TOP@@'; NL; + Print '@top ' . "$name"; NL; +} +sub texiNameEnd +{ + # nothing +} +sub texiParagraph +{ + if (!$paragraph) { + NL; print DEST "\n"; + $paragraph = 1; + } +} +sub texiVerb +{ + $arg = $_[0]; + $arg =~ s/({|})/\@$1/g; + Print $arg; +} +sub texiItemWithArg +{ + my $arg = $_[0]; + Print '\n@item '; + interpret_word $arg; + NL; +} +sub texiItem +{ + Print '\n@item\n'; +} +sub texiDescriptionStart +{ + Print '\n@table @samp\n'; +} +sub texiDescriptionEnd +{ + Print '\n@end table\n'; +} +sub texiItemStart +{ + Print '\n@itemize @bullet\n'; +} +sub texiItemEnd +{ + Print '\n@end itemize\n'; +} +sub texiCenterStart +{ + $texiCenterLine = 1; + $newline = 0; + $texiMacro{'\\'} = '@*'; # line break + $texiMacro{'TEXIbr'} = '@*'; + NL; +} +sub texiCenterEnd +{ + $texiCenterLine = 0; + $newline = 0; + $texiMacro{'\\'} = '@*\n'; # line break + $texiMacro{'TEXIbr'} = '@*\n'; + NL; +} + +sub texiEnumStart +{ + Print '\n@enumerate\n'; +} +sub texiEnumEnd +{ + Print '\n@end enumerate\n'; +} +sub texiTableStart +{ + my $columns = $_[0]; + my $width = $_[1]; + my $i; + Print '\n@multitable @columnfractions '; + for ($i = 1; $i <= $columns; $i++) { + Print " " .0.9/$columns ; + } + Print '\n'; +} +sub texiTableSep +{ + Print '@tab '; +} +sub texiTableEnd +{ + Print '\n@end multitable\n'; +} + +sub texiVerbatimStart +{ + NL; + Print '@*'; NL +} + +sub texiVerbatimEnd +{ + NL; +} + +sub texiVerbatimLine +{ + s/({|}|@| )/@\1/g; + chop; + print DEST ".$_\@*\n"; +} + +########################################################################### +########################################################################### +# general processing + +# emit an error message is the given macro does not exists. +sub check_Macro +{ + exists $Macro->{$_[0]} || + die "Error in line $.: no such macro: \\$_[0]\n"; +} +sub check_Macro1 +{ + (exists $Macro1a->{$_[0]} && exists $Macro1b->{$_[0]}) || + die "$CMD: Error in line $.: no such macro: \\$_[0]\n"; +} +sub check_Macro2 +{ + (exists $Macro2a->{$_[0]} && exists $Macro2b->{$_[0]} && exists $Macro2c->{$_[0]}) || + die "$CMD: Error in line $.: no such macro: \\$_[0]\n"; +} + +sub NL +{ + if (!$newline) { + printf DEST "\n"; + if ($texiCenterLine) { + print DEST "\@center "; + } + $newline = 1; + } +} + +sub interpret_word +{ + if (@_ <= 0) { + return; + } + $_ = join " ", @_; + my ($s,$m,$a1,$a2,$r); # start, match/macro, argument1, argument2 + my $add_blank = 1; # if true, add a blank after the word + if ($opt_D == 2) { + if ($nesting == 0) { + print "**** "; + } else { + print " "; + } + print "\`$_'\n"; + } + + if ($opt_H) { + # handling of HTML table rows + if ($inside_table == 1) { + if ($first_column == 1) { + if (/^$/) { + return; + } + if (/^\\hline/) { + Print '\n
    \n'; + } + Print '\n\n'; + } + $first_column = 0; + } + } elsif ($opt_M) { + # handling of troff table rows + if ($inside_table == 1) { + if ($first_column == 1) { + if (/^$/) { + return; + } + Print 'T{\n'; + } + $first_column = 0; + } + } elsif ($opt_T) { + # handling of TexInfo specific stuff + if ($nesting == 0) { + s'@'@@'g; + } + if ($inside_table == 1) { + if ($first_column == 1) { + Print '\n@item '; + } + $first_column = 0; + } + } + + $nesting ++; + + SWITCH: { + /^$/ && do {$add_blank = 0; + last SWITCH; + }; + /\\verb\+([^+]*)\+/ && do {$s=$`;$m=$1;$r=$'; + interpret_word $s; + PrintM $Macro1a->{'verb'}; + &{$Prefix . "Verb"} ($m); + PrintM $Macro1b->{'verb'}; + interpret_word $r; + last SWITCH; + }; + /\\(".|ss)/ && do {$s=$`;$m=$1;$r=$'; #" + interpret_word $s; + check_Macro $m; + PrintM $Macro->{$m}; + interpret_word $r; + last SWITCH; + }; + /\\item\s*\[([^]]*)\]/ && do {$s=$`;$m=$1;$r=$'; + interpret_word $s; + $item_nr[$list_nest] ++; + &{$Prefix . "ItemWithArg"} ($m); + interpret_word $r; + last SWITCH; + }; + /\\item\s*/ && do {$s=$`;$r=$'; + interpret_word $s; + $item_nr[$list_nest] ++; + &{$Prefix . "Item"}; + interpret_word $r; + last SWITCH; + }; + # LaTeX macros with two arguments + /\\([a-zA-Z]+){([^}]*)}{([^}]*)}/ + && do {$s=$`;$m=$1;$a1=$2;$a2=$3;$r=$'; + check_Macro2 $m; + interpret_word $s; + PrintM $Macro2a->{$m}; + interpret_word $a1; + PrintM $Macro2b->{$m}; + interpret_word $a2; + PrintM $Macro2c->{$m}; + interpret_word $r; + NL; + last SWITCH; + }; + # Special Handling of Email and URL LaTeX macros with one argument + /\\(URL|Email){([^}]*)}/ && ($opt_H) + && do {$s=$`;$m=$1;$a1=$2;$r=$'; + interpret_word $s; + PrintM $Macro2a->{$m}; + interpret_word $a1; + PrintM $Macro2b->{$m}; + interpret_word $a1; + PrintM $Macro2c->{$m}; + interpret_word $r; + NL; + last SWITCH; + }; + # LaTeX macros with one argument + /\\([a-zA-Z]+){([^}]*)}/ && do {$s=$`;$m=$1;$a1=$2;$r=$'; + check_Macro1 $m; + interpret_word $s; + PrintM $Macro1a->{$m}; + interpret_word $a1; + PrintM $Macro1b->{$m}; + interpret_word $r; + NL; + last SWITCH; + }; + # Special handling of some LaTeX macros without an argument + /\\SP\s*/ && do {$s=$`;$m=$1;$r=$'; + interpret_word $s; + if ($first_word) { + PrintM $Macro->{"SPfirst"}; + } else { + PrintM $Macro->{"SP"}; + } + interpret_word $r; + $add_blank = 0; + last SWITCH; + }; + /\\(MANbr|TEXIbr|HTMLbr)\s*/ && do {$s=$`;$m=$1;$r=$'; + # set $first_word to true + check_Macro $m; + interpret_word $s; + PrintM $Macro->{$m}; + $first_word = 1; + interpret_word $r; + $add_blank = 0; + last SWITCH; + }; + # LaTeX macros without an argument: + /\\([a-zA-Z]+)\s*/ && do {$s=$`;$m=$1;$r=$'; + check_Macro $m; + interpret_word $s; + PrintM $Macro->{$m}; + interpret_word $r; + $add_blank = 0; + last SWITCH; + }; + /\\({|}|\$|_|#|&|-|%|,|\.|;)/ && do {$s=$`;$m=$1;$r=$'; + interpret_word $s; + PrintM $Macro->{$m}; + interpret_word $r; + last SWITCH; + }; + # LaTeX Math + /\$(<|>|<=|>=|=|<>)\$/ && do {$s=$`;$m=$1;$r=$'; + interpret_word $s; + PrintM $Macro->{$m}; + interpret_word $r; + last SWITCH; + }; + /\$([^\$]*)\$/ && do {$s=$`;$m=$1;$r=$'; + interpret_word $s; + interpret_word $m; + interpret_word $r; + last SWITCH; + }; + /&/ && do {$s=$`;$r=$'; + interpret_word $s; + &{$Prefix . "TableSep"}; + $first_column = 0; + interpret_word $r; + last SWITCH; + }; + /~/ && do {$s=$`;$r=$'; + interpret_word $s; + PrintM $Macro->{'~'}; + interpret_word $r; + last SWITCH; + }; + /\\\\/ && do {$s=$`;$r=$'; + interpret_word $s; + if ($inside_table) { + PrintM $Macro->{'\\Tab'}; + $first_column = 1; + if (length ($r) > 0) { + interpret_word $r; + } + } else { + PrintM $Macro->{'\\'}; + $first_word = 1; + interpret_word $r; + } + $add_blank = 0; + last SWITCH; + }; + /\\$|\\ / && do {$s=$`;$r=$'; + # LaTeX explicit blank \ will be + # represented as a single \ at + # the end of the word + interpret_word $s; + Print " "; + interpret_word $r; + last SWITCH; + }; + /\\/ && do {$s=$`;$r=$'; + interpret_word $s; + interpret_word "\\$r"; + last SWITCH; + }; + ($opt_M == 1) && /((^\.|')+)/ && do {$s=$`;$m=$1;$r=$'; + interpret_word $s; + print DEST "\\&$m"; + $newline = 0; + interpret_word $r; + last SWITCH; + }; + Print "$_"; + }; + $nesting --; + Print " " if ($nesting == 0 && $add_blank); +} + +sub interpret_line +{ + my $line = $_[0]; chomp $line; + my @words = split(/\s+/,$line); + my $max = $#words; + my $i; + my $join = 0; # true, if words must be joined + my $word = ""; # the joined word + my $kind = 0; # 1: item[ .. ], 2: { .. }, 3: \verb+ .. + + if ($max < 0) { + # empty line marks a paragraph + &{$Prefix . "Paragraph"}; + $first_word = 1; + return; + } + for ($i = 0; $i <= $max; $i++) { + $_ = $words[$i]; + # printf "\`$words[$i]'"; + if (/^[\s]*$/) { # skip leading blanks + # nothing + } elsif ((!$join || ($kind != 3)) && /^%+/) { # skip comments + last; + } else { + # if blanks in a { .. }, \item[ .. ], \verb+ .. + then we have to joind words + if ($join) { + # check whether this is the last word to be joined + if ($kind == 1) { # item + $join = index ($_, "]") == -1; + } elsif ($kind == 2) { # braces + my @x = $_ =~ /[^\\]}/g; + $join = $#x == -1; + } elsif ($kind == 3) { # verb + $join = index ($_, "+") == -1; + } + $word .= " " . $words[$i]; + if (!$join) { + interpret_word $word; + $word = ""; + } + } else { + # check whether we have to join some words + if (/\\item/) { + my $cnt1 = tr/[/[/; + my $cnt2 = tr/]/]/; + $join = $cnt1 != $cnt2 | $cnt1 == 0; + $kind = 1; + } elsif (/\\verb/) { + my $cnt = tr/+/+/; + $join = $cnt % 2 != 0; + $kind = 3; + } else { + my @x = $_ =~ /[^\\]{/g; + my @y = $_ =~ /[^\\]}/g; + $join = $#x != $#y; + $kind = 2; + } + if ($join) { + $word = $words[$i]; + } else { + interpret_word $words[$i]; + } + } + } + } + if ($join) { + interpret_word $word; + } + NL; +} + +sub PrintM +# print only for Macro command text +{ + if (@_ <= 0 || length ($_[0]) == 0) { + return; + } + my $l = shift; + my $c; + $l =~ s/\\n/\n/g; + foreach $c (split ("", $l)) { + if ($c eq "\n") { + NL; + $newline = 0; + } else { + print DEST "$c"; + } + } +} + +sub Print +{ + + # printf "\`$_[0]'\n"; + if (@_ <= 0 || length ($_[0]) == 0) { + return; + } + my $x = $_[0]; + + unless ($x =~ /^\s*$/) { + # if other chars than blanks are printed: + $first_word = 0; + } + + if (!$inside_verb) { + # transform special characters + $x =~ s/([äöüÄÖÜß])/$LetterCode->{"$1"}/go; + } + + my @parts = split /\\n/, $x, -1; + # -1: trailing \n generates an empty list element + my $i; + for ($i = 0; $i <= $#parts; $i++) { + if ($newline) { + # skip leading blanks after a newline + $parts[$i] =~ s/^\s+//; + } + if (length($parts[$i]) > 0) { + if ($opt_M) { + $parts[$i] =~ s/\\/\\\\/g; + $parts[$i] =~ s/-/\\-/g; + } + printf DEST "%s", $parts[$i]; + $newline = 0; + } + if ($i < $#parts) { + NL; + } + } + $paragraph = 0; +} +######################################################################### + +sub date2str +{ + @EnglishMonthName = ('','January','February','March','April','May','June','July', + 'August','September','October','November','December'); + # split date + my ($Year,$Month,$Day) = split (/\//,$_[0]); + return $Day . " " . $EnglishMonthName[$Month] . " " . $Year; +} + +$rcs_date=`date '+%Y/%m/%d/'`; # date of the man-page, overwritten + # by \rcsInfo +$date = date2str ($rcs_date); +$Macro->{'today'} = $date; + +######################################################################### +my @skip; # stack of skip-flags, 1: skip, 0: don't skip +push @skip, 0; # synthetic "outer most" IF, don't skip +my $last_cond_clause = ""; + +sub handle_conditional_text +{ + $_ = $_[0]; +# printf "skip-stack: %s;\t top = %s\n", join (", ", @skip), $skip[-1]; + if (/^\s*%@%\s+IF\s+([^%]*)\s+%@%\s*$/) { + # produce a program, which evaluates the condition: + my $prog_cond = $1; + $prog_cond =~ s/(\w+)/\$$1/g; + my $prog = ""; + my $var; + foreach $var (keys %cond_name) { + if ($var ne "") { + $prog .= "my \$$var = 1; " # declare set names, + # undeclared ones get value 0 + } + } + $prog .= "return ($prog_cond) ? 0 : 1;"; +# print "\n**** [$prog]\n"; + my $skip = eval $prog; + ($@ eq "") || die "$CMD: error in line $.: wrong condition of `%@% IF.. ($@)\n"; +# print "**** skip=$skip\n"; + + $last_cond_clause = "IF"; + if ($skip[-1] == 1) { + # skip this text, since outer IF skips + push @skip, 1; + } else { + # outer IF is not skipped, hence consider this IF + push @skip, $skip; + } + } elsif (/^\s*%@%\s+ELSE\s+%@%\s*$/) { + ($last_cond_clause =~ /IF/ && ($#skip > 0)) || + die "$CMD: error in line $.: `%@% ELSE %@%' without an `%@% IF..'\n"; + $last_cond_clause = "ELSE"; + if ($skip[-2] == 0) { + $skip[-1] = $skip[-1]? 0 : 1; + } + } elsif (/^\s*%@%\s+END-IF\s+%@%\s*$/) { + $last_cond_clause = "END-IF"; + ($#skip == 0) && + die "$CMD: error in line $.: `%@% END-IF %@%' without an `%@% IF..'\n"; + pop @skip; + } +# print "$last_cond_clause: skip = $skip[-1]\n"; +} + +############################################################################ +# handle LaTeX output +if ($opt_L) { + while () { + if ($opt_D == 1) { + my $line = $_; chop $line; + print "--- \`$line'\n"; + } + + if (/^\s*%@%\s/) { + my $skip = $skip[-1]; + handle_conditional_text ("$_"); + # write %@% directive + print DEST $_; + next; + } + next if ($skip[-1] == 1); + print DEST $_; + } + close (DEST); + exit (0); +} + +############################################################################ +# handle non-LaTeX output + +# read sections +# Variables: $section name of the section in uppercase letters +# $chapter chapter of the man page +# $name name of the man page +# $Name name of the man page in uppercase +# $author author of the man page +# $tool info about the tool set, $name is part of +# $date date +# $version version info +$started = 0; +while () { + if ($opt_D == 1) { + my $line = $_; chop $line; + print "--- \`$line'\n"; + } + + if ((/^\s*%@%\s/) && ($inside_verb==0)) { + my $skip = $skip[-1]; + handle_conditional_text ("$_"); + } + next if ($skip[-1] == 1); + + if ($inside_verb) { + if (/^\s*\\end{verbatim}/) { + if ($started == 1) { + &{$Prefix . "VerbatimEnd"}; + $inside_verb = 0; + } + } else { + &{$Prefix . "VerbatimLine"} ($_); + } + next; + } + + # remove {, } around Umlaute + s/{(\\".)}/\1/g; # " + s/{(\\ss)}/\1/g; + + # normalize special characters + s/\\"a/ä/g; + s/\\"o/ö/g; + s/\\"u/ü/g; + s/\\"A/Ä/g; + s/\\"O/Ö/g; + s/\\"U/Ü/g; + s/\\ss/ß/g; + + if (/^\s*\\rcsInfo \$(.*)\$/) { + my ($rcs_id,$rcs_file,$rcs_revision, + $rcs_date,$rcs_time,$rcs_owner,$rcs_status,$rcs_locker) = split(/\s/,$1); + $date = date2str ($rcs_date); + $Macro->{'today'} = $date; + } elsif (/^\s*\\setDate{\\rcsInfoLongDate}/) { + $Macro->{'Date'} = $date; + } elsif (/^\s*\\setDate{\\today}/) { + $Macro->{'Date'} = $date; + } elsif (/^\s*\\setDate{([^}]*)}/) { + $date = $1; + $date =~ s/~/$Macro->{'~'}/g; + $Macro->{'Date'} = $date; + } elsif (/^\s*\\setVersion{([^}]*)}/) { + $version = $1; + $versin =~ s/~/$Macro->{'~'}/g; + $Macro->{'Version'} = $version; + } elsif (/^\s*\\begin{Name}{([^}]*)}{([^}]*)}{([^}]*)}{([^}]*)}{([^}]*)}/) { + $section = "Name"; + $chapter = $1; + $name = $2; + $Name = uc $name; + $author = $3; + $tool = $4; + $title = $5; + $rest = $'; + $started = 1; + $sections[0] = $section; + $section_cnt = 0; + &{$Prefix . "Start"} ($name, $chapter, $author, $tool, $title); + &{$Prefix . "NameStart"} ($name, $chapter, $author, $tool, $title); + } elsif (/^\s*\\end{Name}/) { + &{$Prefix . "NameEnd"} ($name, $chapter, $author, $tool); + } elsif (/^\s*\\begin{Table}(\[([^]]*)\])?{([^}]*)}/) { + # \begin{Table}[width]{columns} + if ($started == 1) { + $columns = $3; + $column = $_[0]; + $inside_table = 1; + $first_column = 1; + &{$Prefix . "TableStart"} ($columns, $2); + } + } elsif (/^\s*\\end{Table}/) { + if ($started == 1) { + $inside_table = 0; + $first_column = 0; + &{$Prefix . "TableEnd"} ($columns); + } + } elsif (/^\s*\\begin{Description}(\[[^]]*\])?/) { + if ($started == 1) { + $list_nest++; + $cur_list[$list_nest] = 'descr'; + $item_nr[$list_nest] = 0; + &{$Prefix . "DescriptionStart"}; + } + } elsif (/^\s*\\end{Description}/) { + if ($started == 1) { + &{$Prefix . "DescriptionEnd"}; + $list_nest--; + } + } elsif (/^\s*\\begin{description}/) { + if ($started == 1) { + $list_nest++; + $cur_list[$list_nest] = 'descr'; + $item_nr[$list_nest] = 0; + &{$Prefix . "DescriptionStart"}; + } + } elsif (/^\s*\\end{description}/) { + if ($started == 1) { + &{$Prefix . "DescriptionEnd"}; + $list_nest--; + } + } elsif (/^\s*\\begin{center}/) { + if ($started == 1) { + &{$Prefix . "CenterStart"}; + } + } elsif (/^\s*\\end{center}/) { + if ($started == 1) { + &{$Prefix . "CenterEnd"}; + } + } elsif (/^\s*\\begin{enumerate}/) { + if ($started == 1) { + $list_nest++; + $cur_list[$list_nest] = 'enum'; + $item_nr[$list_nest] = 0; + &{$Prefix . "EnumStart"} ; + } + } elsif (/^\s*\\end{enumerate}/) { + if ($started == 1) { + &{$Prefix . "EnumEnd"} ; + $list_nest--; + } + } elsif (/^\s*\\begin{itemize}/) { + if ($started == 1) { + $list_nest++; + $cur_list[$list_nest] = 'item'; + $item_nr[$list_nest] = 0; + &{$Prefix . "ItemStart"} ; + } + } elsif (/^\s*\\end{itemize}/) { + if ($started == 1) { + &{$Prefix . "ItemEnd"} ; + $list_nest--; + } + } elsif (/^\s*\\begin{verbatim}/) { + if ($started == 1) { + &{$Prefix . "VerbatimStart"}; + $inside_verb = 1; + } + } elsif (/^\s*\\(subsubsection|subsection|section){([^}]*)}/) { + $kind = $1; + $section = $2; + $section_cnt ++; + $sections[$section_cnt] = $section; + $section_kind[$section_cnt] = $kind; + if ($started == 1) { + &{$Prefix . "Section"} ($section_cnt, $kind, $section); + } + } elsif (/^\s*\\LatexManEnd/) { + last; + } elsif (/^\s*((\\begin{Name|Table|Description})|(\\(sub)?section))/) { + die "$CMD: in line $.\n " . + "Arguments of $1 are not contained in a single " . + "line.\n " . + "Remember: all arguments of a macro must be on the same line.\n"; + } else { + if ($started == 1) { + interpret_line $_; + } + } +} +&{$Prefix . "End"}; + +close DEST; + +if ($opt_H || $opt_T) { + open (TMP, "<$tmp") || die "$CMD: Can't open file \`$tmp' for reading.\n"; + open (DEST, ">$DestFile") || die "$CMD: Can't open file \`$DestFile' for writing.\n"; + while () { + if (/^\@\@INSERTION-POINT\@\@-TOC\@\@$/) { + if ($opt_H) { + # Table of contents for HTML + my $nesting = 0; # nesting of section/subsection/subsubsection + Print '\n

    Table of Contents

    \n'; + for ($i = 1; $i <= $section_cnt; $i++) { + my $cur_nesting = 0; + if ($section_kind[$i] eq "subsubsection") { + $cur_nesting = 3; + } elsif ($section_kind[$i] eq "subsection") { + $cur_nesting = 2; + } elsif ($section_kind[$i] eq "section") { + $cur_nesting = 1; + } + if ($cur_nesting > $nesting) { + # open a new list + Print '\n
      \n' + } + if ($cur_nesting == $nesting) { + # same level, close list item + Print '\n'; + } + if ($cur_nesting < $nesting) { + # close list and list item + for my $i ($cur_nesting .. $nesting-1) { + Print '\n'; + Print '
    \n'; + } + } + # print item + Print "
  • "; + interpret_word $sections[$i]; + Print ""; + $nesting = $cur_nesting; + } + # close remaining lists + for my $i (1 .. $nesting) { + Print '
  • \n'; + Print '\n'; + } + } else { + # Menu of sections for texi + Print '@menu\n'; + for ($i = 2; $i <= $section_cnt; $i++) { + interpret_line "* " . $sections[$i] . "::"; + } + Print '@end menu\n'; + } + } elsif (/\@\@INSERTION-POINT\@\@-TEXI-TOP\@\@/) { + # Texi-top node + Print '@node Top, ' . $sections[2] . ', (dir), (dir)\n'; + } elsif (/\@\@INSERTION-POINT\@\@-TEXI-SEC\@\@ (\w+) (\d+)/) { + # print section header for texi + $kind = $1; + $cnt = $2; + $section = $sections[$cnt]; + Print '@node ' ; + interpret_word "$sections[$cnt], "; + interpret_word "$sections[$cnt+1], "; + interpret_word "$sections[$cnt-1], Top "; + NL; + } else { + print DEST $_; + } + } + close TMP; + close DEST; + unlink $tmp; +} + +######################################################################### + +## Emacs specific: +## Local Variables: *** +## mode: perl *** +## End: *** + diff --git a/Build/source/texk/texlive/linked_scripts/latexmk.pl b/Build/source/texk/texlive/linked_scripts/latexmk.pl deleted file mode 100755 index 4d124a67893..00000000000 --- a/Build/source/texk/texlive/linked_scripts/latexmk.pl +++ /dev/null @@ -1,6031 +0,0 @@ -eval '(exit $?0)' && eval 'exec perl -x -S "$0" ${1+"$@"}' && -eval 'exec perl -x -S "$0" $argv:q' -if 0; -#!/usr/bin/perl -w -#!/opt/local/bin/perl -w -#!/usr/local/bin/perl -w -# The above code allows this script to be run under UNIX/LINUX without -# the need to adjust the path to the perl program in a "shebang" line. -# (The location of perl changes between different installations, and -# may even be different when several computers running different -# flavors of UNIX/LINUX share a copy of latex or other scripts.) The -# script is started under the default command interpreter sh, and the -# evals in the first two lines restart the script under perl, and work -# under various flavors of sh. The -x switch tells perl to start the -# script at the first #! line containing "perl". The "if 0;" on the -# 3rd line converts the first two lines into a valid perl statement -# that does nothing. -# -# Source of the above: manpage for perlrun - -# Delete #??!! when working - -# See ?? <=============================== - -# Results of 8 Sep 2007: - -# Some improvements relative to the issues below. - -# ????????: -# Why is bibtex not always running right? Or running when it shouldn't -# I've put in rdb_make_links in a few places. -# and rdb_write -# Problem is that aux file is always out of date, until after a -# primary run. Ensure fdb and c. is updated enough etc. -# I may have it correct now: fdb_write in makeB -# See also routine rdb_update_files_for_rule, and who calls it - -# Apparently excess runs of latex after change in .tex file that entails -# change in bibliography. - -# Now I am missing diagnostics - - -## ???!!!!!!!!!!!!! Should I remove bibtex rule? NO -## ?? Need to set dependence of extra bibtex rules on .bib file -## ?? Put $pass as variable in rule. - -#======================================= - - -#?? Check all code for rdb stuff. -#?? Use of $update and $failure, etc -# Especially in pvc. Should I restore source file set up -# if there is a latex error?????????????????????? -#?? Force mode doesn't appear to do force (if error in latex file) -#??? Get banner back in. -#?? ==> Clean up of rdb. It accumulates files that aren't in use any more. -# Restrict to dependents (existent or not) discovered during -# parse of log file, and its consequences. -#?? CORRECT DIAGNOSTICS ON CHANGED FILES IF THEY DIDN'T EXIST BEFORE -#?? Further corrections to deal with disappeared source files for custom dependencies. -# Message repeatedly appears about remake when source file of cusdep doesn't exist. -#?? logfile w/o fdb file: don't set changed file, perhaps for generated exts. -# Reconsider -#?? Do proper run-stuff for bibtex, makeindex, cus-deps. OK I think -# Parse and correctly find bst and ist files -#?? Remove superfluous code when it's working. Mostly done. -#?? update_source_times in particular. I think it's done OK -#?? Add making of other files to rdb. Unify -#?? Ditto for printing and viewing? -#?? Update documentation - -# ATTEMPT TO ALLOW FILENAMES WITH SPACES: -# (as of 1 Apr 2006, and then 14 Sep. 2007) - -# Problems: -# A. Quoting filenames will not always work. -# a. Under UNIX, quotes are legal in filenames, so when PERL -# directly runs a binary, a quoted filename will be treated as -# as a filename containing a quote character. But when it calls -# a shell, the quotes are handled by the shell as quotes. -# b. Under MSWin32, quotes are illegal filename characters, and tend -# to be handled correctly. -# c. But under cygwin, results are not so clear (there are many -# combinations: native v. cygwin perl, native v cygwin programs -# NT v. unix scripts, which shell is called. -# B. TeX doesn't always handle filenames with spaces gracefully. -# a. UNIX/LINUX: The version on gluon2 Mar 31, 2006 to Sep. 2007) -# doesn't handle them at all. (TeX treats space as separator.) -# b. At least some later versions actually do (Brad Miller e-mail, -# Sep. 2007). -# c. fptex [[e-TeXk, Version 3.141592-2.1 (Web2c 7.5.2)] does, on -# my MSWin at home. In \input the filename must be in quotes. -# d. Bibtex [BibTeX (Web2c 7.5.2) 0.99c on my MSWin system at home, -# Sep. 2007] does not allow names of bibfiles to have spaces. -# C. =====> Using the shell for command lines is not safe, since special -# characters can cause lots of mayhem. -# It will therefore be a good idea to sanitize filenames. -# -# I've sanitized all calls out: -# a. system and exec use a single argument, which forces -# use of shell, under all circumstances -# Thus I can safely use quotes on filenames: They will be handled by -# the shell under UNIX, and simply passed on to the program under MSWin32. -# b. I reorganized Run, Run_Detached to use single command line -# c. All calls to Run and Run_Detached have quoted filenames. -# d. So if a space-free filename with wildcards is given on latexmk's -# command line, and it globs to space-containing filename(s), that -# works (fptex on home computer, native NT tex) -# e. ====> But globbing fails: the glob function takes space as filename -# separator. ==================== - -#================= TO DO ================ -# -# 1. See ?? ESPECIALLY $MSWin_fudge_break -# 2. Check fudged conditions in looping and make_files -# 3. Should not completely abort after a run that ends in failure from latex -# Missing input files (including via custom dependency) should be checked for -# a change in status -# If sources for missing files from custom dependency -# are available, then do a rerun -# If sources of any kind become available rerun (esp. for pvc) -# rerun -# Must parse log_file after unsuccessful run of latex: it may give -# information about missing files. -# 4. Check file of bug reports and requests -# 5. Rationalize bibtex warnings and errors. Two almost identical routines. -# Should 1. Use single routine -# 2. Convert errors to failure only in calling routine -# 3. Save first warning/error. - - -# To do: -# Rationalize again handling of include files. -# Now I use kpsewhich to do searches, if file not found -# (How do I avoid getting slowed down too much?) -# Better parsing of log file for includes. -# Document the assumptions at each stage of processing algorithm. -# Option to restart previewer automatically, if it dies under -pvc -# Test for already running previewer gets wrong answer if another -# process has the viewed file in its command line - -$my_name = 'latexmk'; -$My_name = 'Latexmk'; -$version_num = '4.01'; -$version_details = "$My_name, John Collins, 24 September 2008"; - - -use Config; -use File::Copy; -use File::Basename; -use FileHandle; -use File::Find; -use Cwd; # To be able to change cwd -use Cwd "chdir"; # Ensure $ENV{PWD} tracks cwd -use Digest; - -#use strict; - -# The following variables are assigned once and then used in symbolic -# references, so we need to avoid warnings 'name used only once': -use vars qw( $dvi_update_command $ps_update_command $pdf_update_command ); - -# Translation of signal names to numbers and vv: -%signo = (); -@signame = (); -if ( defined $Config{sig_name} ) { - $i = 0; - foreach $name (split(' ', $Config{sig_name})) { - $signo{$name} = $i; - $signame[$i] = $name; - $i++; - } -} -else { - warn "Something wrong with the perl configuration: No signals?\n"; -} - -## Copyright John Collins 1998-2008 -## (username collins at node phys.psu.edu) -## (and thanks to David Coppit (username david at node coppit.org) -## for suggestions) -## Copyright Evan McLean -## (modifications up to version 2) -## Copyright 1992 by David J. Musliner and The University of Michigan. -## (original version) -## -## This program is free software; you can redistribute it and/or modify -## it under the terms of the GNU General Public License as published by -## the Free Software Foundation; either version 2 of the License, or -## (at your option) any later version. -## -## This program is distributed in the hope that it will be useful, -## but WITHOUT ANY WARRANTY; without even the implied warranty of -## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -## GNU General Public License for more details. -## -## You should have received a copy of the GNU General Public License -## along with this program; if not, write to the Free Software -## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 -## -## -## -## NEW FEATURES, since v. 2.0: -## 1. Correct algorithm for deciding how many times to run latex: -## based on whether source file(s) change between runs -## 2. Continuous preview works, and can be of ps file or dvi file -## 3. pdf creation by pdflatex possible -## 4. Defaults for commands are OS dependent. -## 5. Parsing of log file instead of source file is used to -## obtain dependencies, by default. -## -## Modification log for 28 Mar 2007 onwards in detail -## -## 24 Sep 2008, John Collins Release version 4.01. -## -## 1998-2008, John Collins. Many improvements and fixes. -## -## Modified by Evan McLean (no longer available for support) -## Original script (RCS version 2.3) called "go" written by David J. Musliner -## -## 2.0 - Final release, no enhancements. LatexMk is no longer supported -## by the author. -## 1.9 - Fixed bug that was introduced in 1.8 with path name fix. -## - Fixed buglet in man page. -## 1.8 - Add not about announcement mailling list above. -## - Added texput.dvi and texput.aux to files deleted with -c and/or -## the -C options. -## - Added landscape mode (-l option and a bunch of RC variables). -## - Added sensing of "\epsfig{file=...}" forms in dependency generation. -## - Fixed path names when specified tex file is not in the current -## directory. -## - Fixed combined use of -pvc and -s options. -## - Fixed a bunch of speling errors in the source. :-) -## - Fixed bugs in xdvi patches in contrib directory. -## 1.7 - Fixed -pvc continuous viewing to reattach to pre-existing -## process correctly. -## - Added $pscmd to allow changing process grepping for different -## systems. -## 1.6 - Fixed buglet in help message -## - Fixed bugs in detection of input and include files. -## 1.5 - Removed test message I accidentally left in version 1.4 -## - Made dvips use -o option instead of stdout redirection as some -## people had problems with dvips not going to stdout by default. -## - Fixed bug in input and include file detection -## - Fixed dependency resolution process so it detects new .toc file -## and makeindex files properly. -## - Added dvi and postscript filtering options -dF and -pF. -## - Added -v version commmand. -## 1.4 - Fixed bug in -pvc option. -## - Made "-F" option include non-existant file in the dependency list. -## (RC variable: $force_include_mode) -## - Added .lot and .lof files to clean up list of extensions. -## - Added file "texput.log" to list of files to clean for -c. -## - LatexMk now handles file names in a similar fashion to latex. -## The ".tex" extension is no longer enforced. -## - Added $texfile_search RC variable to look for default files. -## - Fixed \input and \include so they add ".tex" extension if necessary. -## - Allow intermixing of file names and options. -## - Added "-d" and banner options (-bm, -bs, and -bi). -## (RC variables: $banner, $banner_message, $banner_scale, -## $banner_intensity, $tmpdir) -## - Fixed "-r" option to detect an command line syntax errors better. -## 1.3 - Added "-F" option, patch supplied by Patrick van der Smagt. -## 1.2 - Added "-C" option. -## - Added $clean_ext and $clean_full_ext variables for RC files. -## - Added custom dependency generation capabilities. -## - Added command line and variable to specify custom RC file. -## - Added reading of rc file in current directly. -## 1.1 - Fixed bug where Dependency file generation header is printed -## rependatively. -## - Fixed bug where TEXINPUTS path is searched for file that was -## specified with absolute an pathname. -## 1.0 - Ripped from script by David J. Musliner (RCS version 2.3) called "go" -## - Fixed a couple of file naming bugs -## e.g. when calling latex, left the ".tex" extension off the end -## of the file name which could do some interesting things -## with some file names. -## - Redirected output of dvips. My version of dvips was a filter. -## - Cleaned up the rc file mumbo jumbo and created a dependency file -## instead. Include dependencies are always searched for if a -## dependency file doesn't exist. The -i option regenerates the -## dependency file. -## Getting rid of the rc file stuff also gave the advantage of -## not being restricted to one tex file per directory. -## - Can specify multiple files on the command line or no files -## on the command line. -## - Removed lpr options stuff. I would guess that generally, -## you always use the same options in which case they can -## be set up from an rc file with the $lpr variable. -## - Removed the dviselect stuff. If I ever get time (or money :-) ) -## I might put it back in if I find myself needing it or people -## express interest in it. -## - Made it possible to view dvi or postscript file automatically -## depending on if -ps option selected. -## - Made specification of dvi file viewer seperate for -pv and -pvc -## options. -##----------------------------------------------------------------------- - - -## Explicit exit codes: -## 10 = bad command line arguments -## 11 = file specified on command line not found -## or other file not found -## 12 = failure in some part of making files -## 13 = error in initialization file -## 20 = probable bug -## or retcode from called program. - - -#Line length in log file that indicates wrapping. -# This number EXCLUDES line-end characters, and is one-based -$log_wrap = 79; - -######################################################################### -## Default parsing and file-handling settings - -## Array of reg-exps for patterns in log-file for file-not-found -## Each item is the string in a regexp, without the enclosing slashes. -## First parenthesized part is the filename. -## Note the need to quote slashes and single right quotes to make them -## appear in the regexp. -## Add items by push, e.g., -## push @file_not_found, '^No data file found `([^\\\']*)\\\''; -## will give match to line starting "No data file found `filename'" -@file_not_found = ( - '^No file\\s*(.*)\\.$', - '^\\! LaTeX Error: File `([^\\\']*)\\\' not found\\.', - '.*?:\\d*: LaTeX Error: File `([^\\\']*)\\\' not found\\.', - '^LaTeX Warning: File `([^\\\']*)\\\' not found', - '^Package .* file `([^\\\']*)\\\' not found', -); - -## Hash mapping file extension (w/o period, e.g., 'eps') to a single regexp, -# whose matching by a line in a file with that extension indicates that the -# line is to be ignored in the calculation of the hash number (md5 checksum) -# for the file. Typically used for ignoring datestamps in testing whether -# a file has changed. -# Add items e.g., by -# $hash_calc_ignore_pattern{'eps'} = '^%%CreationDate: '; -# This makes the hash calculation for an eps file ignore lines starting with -# '%%CreationDate: ' -# ?? Note that a file will be considered changed if -# (a) its size changes -# or (b) its hash changes -# So it is useful to ignore lines in the hash calculation only if they -# are of a fixed size (as with a date/time stamp). -%hash_calc_ignore_pattern =(); - -######################################################################### -## Default document processing programs, and related settings, -## These are mostly the same on all systems. -## Most of these variables represents the external command needed to -## perform a certain action. Some represent switches. - -## Commands to invoke latex, pdflatex -$latex = 'latex %O %S'; -$pdflatex = 'pdflatex %O %S'; -## Switch(es) to make them silent: -$latex_silent_switch = '-interaction=batchmode'; -$pdflatex_silent_switch = '-interaction=batchmode'; - -## Command to invoke bibtex -$bibtex = 'bibtex %O %B'; -# Switch(es) to make bibtex silent: -$bibtex_silent_switch = '-terse'; - -## Command to invoke makeindex -$makeindex = 'makeindex %O -o %D %S'; -# Switch(es) to make makeinex silent: -$makeindex_silent_switch = '-q'; - -## Command to convert dvi file to pdf file directly: -$dvipdf = 'dvipdf %O %S %D'; - -## Command to convert dvi file to ps file: -$dvips = 'dvips %O -o %D %S'; -## Command to convert dvi file to ps file in landscape format: -$dvips_landscape = 'dvips -tlandscape %O -o %D %S'; -# Switch(es) to get dvips to make ps file suitable for conversion to good pdf: -# (If this is not used, ps file and hence pdf file contains bitmap fonts -# (type 3), which look horrible under acroread. An appropriate switch -# ensures type 1 fonts are generated. You can put this switch in the -# dvips command if you prefer.) -$dvips_pdf_switch = '-P pdf'; -# Switch(es) to make dvips silent: -$dvips_silent_switch = '-q'; - -## Command to convert ps file to pdf file: -$ps2pdf = 'ps2pdf %O %S %D'; - -## Command to search for tex-related files -$kpsewhich = 'kpsewhich %S'; - - -##Printing: -$print_type = 'ps'; # When printing, print the postscript file. - # Possible values: 'dvi', 'ps', 'pdf', 'none' - -## Which treatment of default extensions and filenames with -## multiple extensions is used, for given filename on -## tex/latex's command line? See sub find_basename for the -## possibilities. -## Current tex's treat extensions like UNIX teTeX: -$extension_treatment = 'unix'; - -$dvi_update_signal = undef; -$ps_update_signal = undef; -$pdf_update_signal = undef; - -$dvi_update_command = undef; -$ps_update_command = undef; -$pdf_update_command = undef; - -$new_viewer_always = 0; # If 1, always open a new viewer in pvc mode. - # If 0, only open a new viewer if no previous - # viewer for the same file is detected. - -$quote_filenames = 1; # Quote filenames in external commands - -######################################################################### - -################################################################ -## Special variables for system-dependent fudges, etc. -$MSWin_fudge_break = 1; # Give special treatment to ctrl/C and ctrl/break - # in -pvc mode under MSWin - # Under MSWin32 (at least with perl 5.8 and WinXP) - # when latemk is running another program, and the - # user gives ctrl/C or ctrl/break, to stop the - # daughter program, not only does it reach - # the daughter, but also latexmk/perl, so - # latexmk is stopped also. In -pvc mode, - # this is not normally desired. So when the - # $MSWin_fudge_break variable is set, - # latexmk arranges to ignore ctrl/C and - # ctrl/break during processing of files; - # only the daughter programs receive them. - # This fudge is not applied in other - # situations, since then having latexmk also - # stopping because of the ctrl/C or - # ctrl/break signal is desirable. - # The fudge is not needed under UNIX (at least - # with Perl 5.005 on Solaris 8). Only the - # daughter programs receive the signal. In - # fact the inverse would be useful: In - # normal processing, as opposed to -pvc, if - # force mode (-f) is set, a ctrl/C is - # received by a daughter program does not - # also stop latexmk. Under tcsh, we get - # back to a command prompt, while latexmk - # keeps running in the background! - - -################################################################ - - -# System-dependent overrides: -if ( $^O eq "MSWin32" ) { -# Pure MSWindows configuration - ## Configuration parameters: - - ## Use first existing case for $tmpdir: - $tmpdir = $ENV{TMPDIR} || $ENV{TEMP} || '.'; - - ## List of possibilities for the system-wide initialization file. - ## The first one found (if any) is used. - @rc_system_files = ( 'C:/latexmk/LatexMk' ); - - $search_path_separator = ';'; # Separator of elements in search_path - - # For both fptex and miktex, the following makes error messages explicit: - $latex_silent_switch = '-interaction=batchmode -c-style-errors'; - $pdflatex_silent_switch = '-interaction=batchmode -c-style-errors'; - - # For a pdf-file, "start x.pdf" starts the pdf viewer associated with - # pdf files, so no program name is needed: - $pdf_previewer = 'start %O %S'; - $ps_previewer = 'start %O %S'; - $ps_previewer_landscape = $ps_previewer; - $dvi_previewer = 'start %O %S'; - $dvi_previewer_landscape = "$dvi_previewer"; - # Viewer update methods: - # 0 => auto update: viewer watches file (e.g., gv) - # 1 => manual update: user must do something: e.g., click on window. - # (e.g., ghostview, MSWIN previewers, acroread under UNIX) - # 2 => send signal. Number of signal in $dvi_update_signal, - # $ps_update_signal, $pdf_update_signal - # 3 => viewer can't update, because it locks the file and the file - # cannot be updated. (acroread under MSWIN) - # 4 => run a command to force the update. The commands are - # specified by the variables $dvi_update_command, - # $ps_update_command, $pdf_update_command - $dvi_update_method = 1; - $ps_update_method = 1; - $pdf_update_method = 3; # acroread locks the pdf file - # Use NONE as flag that I am not implementing some commands: - $lpr = - 'NONE $lpr variable is not configured to allow printing of ps files'; - $lpr_dvi = - 'NONE $lpr_dvi variable is not configured to allow printing of dvi files'; - $lpr_pdf = - 'NONE $lpr_pdf variable is not configured to allow printing of pdf files'; - # The $pscmd below holds a command to list running processes. It - # is used to find the process ID of the viewer looking at the - # current output file. The output of the command must include the - # process number and the command line of the processes, since the - # relevant process is identified by the name of file to be viewed. - # Its use is not essential. - $pscmd = - 'NONE $pscmd variable is not configured to detect running processes'; - $pid_position = -1; # offset of PID in output of pscmd. - # Negative means I cannot use ps -} -elsif ( $^O eq "cygwin" ) { - # The problem is a mixed MSWin32 and UNIX environment. - # Perl decides the OS is cygwin in two situations: - # 1. When latexmk is run from a cygwin shell under a cygwin - # environment. Perl behaves in a UNIX way. This is OK, since - # the user is presumably expecting UNIXy behavior. - # 2. When CYGWIN exectuables are in the path, but latexmk is run - # from a native NT shell. Presumably the user is expecting NT - # behavior. But perl behaves more UNIXy. This causes some - # clashes. - # The issues to handle are: - # 1. Perl sees both MSWin32 and cygwin filenames. This is - # normally only an advantage. - # 2. Perl uses a UNIX shell in the system command - # This is a nasty problem: under native NT, there is a - # start command that knows about NT file associations, so that - # we can do, e.g., (under native NT) system("start file.pdf"); - # But this won't work when perl has decided the OS is cygwin, - # even if it is invoked from a native NT command line. An - # NT command processor must be used to deal with this. - # 3. External executables can be native NT (which only know - # NT-style file names) or cygwin executables (which normally - # know both cygwin UNIX-style file names and NT file names, - # but not always; some do not know about drive names, for - # example). - # Cygwin executables for tex and latex may only know cygwin - # filenames. - # 4. The BIBINPUTS and TEXINPUTS environment variables may be - # UNIX-style or MSWin-style depending on whether native NT or - # cygwin executables are used. They are therefore parsed - # differently. Here is the clash: - # a. If a user is running under an NT shell, is using a - # native NT installation of tex (e.g., fptex or miktex), - # but has the cygwin executables in the path, then perl - # detects the OS as cygwin, but the user needs NT - # behavior from latexmk. - # b. If a user is running under an UNIX shell in a cygwin - # environment, and is using the cygwin installation of - # tex, then perl detects the OS as cygwin, and the user - # needs UNIX behavior from latexmk. - # Latexmk has no way of detecting the difference. The two - # situations may even arise for the same user on the same - # computer simply by changing the order of directories in the - # path environment variable - - - ## Configuration parameters: We'll assume native NT executables. - ## The user should override if they are not. - - # This may fail: perl converts MSWin temp directory name to cygwin - # format. Names containing this string cannot be handled by native - # NT executables. - $tmpdir = $ENV{TMPDIR} || $ENV{TEMP} || '.'; - - ## List of possibilities for the system-wide initialization file. - ## The first one found (if any) is used. - ## We can stay with MSWin files here, since perl understands them, - @rc_system_files = ( 'C:/latexmk/LatexMk' ); - - $search_path_separator = ';'; # Separator of elements in search_path - # This is tricky. The search_path_separator depends on the kind - # of executable: native NT v. cygwin. - # So the user will have to override this. - - # For both fptex and miktex, the following makes error messages explicit: - $latex_silent_switch = '-interaction=batchmode -c-style-errors'; - $pdflatex_silent_switch = '-interaction=batchmode -c-style-errors'; - - # We will assume that files can be viewed by native NT programs. - # Then we must fix the start command/directive, so that the - # NT-native start command of a cmd.exe is used. - # For a pdf-file, "start x.pdf" starts the pdf viewer associated with - # pdf files, so no program name is needed: - $start_NT = "cmd /c start"; - $pdf_previewer = "$start_NT %O %S"; - $ps_previewer = "$start_NT %O %S"; - $ps_previewer_landscape = $ps_previewer; - $dvi_previewer = "$start_NT %O %S"; - $dvi_previewer_landscape = $dvi_previewer; - # Viewer update methods: - # 0 => auto update: viewer watches file (e.g., gv) - # 1 => manual update: user must do something: e.g., click on window. - # (e.g., ghostview, MSWIN previewers, acroread under UNIX) - # 2 => send signal. Number of signal in $dvi_update_signal, - # $ps_update_signal, $pdf_update_signal - # 3 => viewer can't update, because it locks the file and the file - # cannot be updated. (acroread under MSWIN) - $dvi_update_method = 1; - $ps_update_method = 1; - $pdf_update_method = 3; # acroread locks the pdf file - # Use NONE as flag that I am not implementing some commands: - $lpr = - 'NONE $lpr variable is not configured to allow printing of ps files'; - $lpr_dvi = - 'NONE $lpr_dvi variable is not configured to allow printing of dvi files'; - $lpr_pdf = - 'NONE $lpr_pdf variable is not configured to allow printing of pdf files'; - # The $pscmd below holds a command to list running processes. It - # is used to find the process ID of the viewer looking at the - # current output file. The output of the command must include the - # process number and the command line of the processes, since the - # relevant process is identified by the name of file to be viewed. - # Its use is not essential. - # When the OS is detected as cygwin, there are two possibilities: - # a. Latexmk was run from an NT prompt, but cygwin is in the - # path. Then the cygwin ps command will not see commands - # started from latexmk. So we cannot use it. - # b. Latexmk was started within a cygwin environment. Then - # the ps command works as we need. - # Only the user, not latemk knows which, so we default to not - # using the ps command. The user can override this in a - # configuration file. - $pscmd = - 'NONE $pscmd variable is not configured to detect running processes'; - $pid_position = -1; # offset of PID in output of pscmd. - # Negative means I cannot use ps -} -else { - # Assume anything else is UNIX or clone - - ## Configuration parameters: - - - ## Use first existing case for $tmpdir: - $tmpdir = $ENV{TMPDIR} || '/tmp'; - - ## List of possibilities for the system-wide initialization file. - ## The first one found (if any) is used. - ## Normally on a UNIX it will be in a subdirectory of /opt/local/share or - ## /usr/local/share, depending on the local conventions. - ## /usr/local/lib/latexmk/LatexMk is put in the list for - ## compatibility with older versions of latexmk. - @rc_system_files = - ( '/opt/local/share/latexmk/LatexMk', - '/usr/local/share/latexmk/LatexMk', - '/usr/local/lib/latexmk/LatexMk' ); - - $search_path_separator = ':'; # Separator of elements in search_path - - $dvi_update_signal = $signo{USR1} - if ( defined $signo{USR1} ); # Suitable for xdvi - $ps_update_signal = $signo{HUP} - if ( defined $signo{HUP} ); # Suitable for gv - $pdf_update_signal = $signo{HUP} - if ( defined $signo{HUP} ); # Suitable for gv - ## default document processing programs. - # Viewer update methods: - # 0 => auto update: viewer watches file (e.g., gv) - # 1 => manual update: user must do something: e.g., click on window. - # (e.g., ghostview, MSWIN previewers, acroread under UNIX) - # 2 => send signal. Number of signal in $dvi_update_signal, - # $ps_update_signal, $pdf_update_signal - # 3 => viewer can't update, because it locks the file and the file - # cannot be updated. (acroread under MSWIN) - # 4 => Run command to update. Command in $dvi_update_command, - # $ps_update_command, $pdf_update_command. - $dvi_previewer = 'start xdvi %O %S'; - $dvi_previewer_landscape = 'start xdvi -paper usr %O %S'; - if ( defined $dvi_update_signal ) { - $dvi_update_method = 2; # xdvi responds to signal to update - } else { - $dvi_update_method = 1; - } -# if ( defined $ps_update_signal ) { -# $ps_update_method = 2; # gv responds to signal to update -# $ps_previewer = 'start gv -nowatch'; -# $ps_previewer_landscape = 'start gv -swap -nowatch'; -# } else { -# $ps_update_method = 0; # gv -watch watches the ps file -# $ps_previewer = 'start gv -watch'; -# $ps_previewer_landscape = 'start gv -swap -watch'; -# } - # Turn off the fancy options for gv. Regular gv likes -watch etc - # GNU gv likes --watch etc. User must configure - $ps_update_method = 0; # gv -watch watches the ps file - $ps_previewer = 'start gv %O %S'; - $ps_previewer_landscape = 'start gv -swap %O %S'; - $pdf_previewer = 'start acroread %O %S'; - $pdf_update_method = 1; # acroread under unix needs manual update - $lpr = 'lpr %O %S'; # Assume lpr command prints postscript files correctly - $lpr_dvi = - 'NONE $lpr_dvi variable is not configured to allow printing of dvi files'; - $lpr_pdf = - 'NONE $lpr_pdf variable is not configured to allow printing of pdf files'; - # The $pscmd below holds a command to list running processes. It - # is used to find the process ID of the viewer looking at the - # current output file. The output of the command must include the - # process number and the command line of the processes, since the - # relevant process is identified by the name of file to be viewed. - # Uses: - # 1. In preview_continuous mode, to save running a previewer - # when one is already running on the relevant file. - # 2. With xdvi in preview_continuous mode, xdvi must be - # signalled to make it read a new dvi file. - # - # The following works on Solaris, LINUX, HP-UX, IRIX - # Use -f to get full listing, including command line arguments. - # Use -u $ENV{CMD} to get all processes started by current user (not just - # those associated with current terminal), but none of other users' - # processes. - $pscmd = "ps -f -u $ENV{USER}"; - $pid_position = 1; # offset of PID in output of pscmd; first item is 0. - if ( $^O eq "linux" ) { - # Ps on Redhat (at least v. 7.2) appears to truncate its output - # at 80 cols, so that a long command string is truncated. - # Fix this with the --width option. This option works under - # other versions of linux even if not necessary (at least - # for SUSE 7.2). - # However the option is not available under other UNIX-type - # systems, e.g., Solaris 8. - $pscmd = "ps --width 200 -f -u $ENV{USER}"; - } - elsif ( $^O eq "darwin" ) { - # OS-X on Macintosh - $lpr_pdf = 'lpr %O %S'; - $pscmd = "ps -ww -u $ENV{USER}"; - } -} - -## default parameters -$max_repeat = 5; # Maximum times I repeat latex. Normally - # 3 would be sufficient: 1st run generates aux file, - # 2nd run picks up aux file, and maybe toc, lof which - # contain out-of-date information, e.g., wrong page - # references in toc, lof and index, and unresolved - # references in the middle of lines. But the - # formatting is more-or-less correct. On the 3rd - # run, the page refs etc in toc, lof, etc are about - # correct, but some slight formatting changes may - # occur, which mess up page numbers in the toc and lof, - # Hence a 4th run is conceivably necessary. - # At least one document class (JHEP.cls) works - # in such a way that a 4th run is needed. - # We allow an extra run for safety for a - # maximum of 5. Needing further runs is - # usually an indication of a problem; further - # runs may not resolve the problem, and - # instead could cause an infinite loop. -$clean_ext = ""; # space separated extensions of files that are - # to be deleted when doing cleanup, beyond - # standard set -$clean_full_ext = ""; # space separated extensions of files that are - # to be deleted when doing cleanup_full, beyond - # standard set and those in $clean_ext -@cus_dep_list = (); # Custom dependency list -@default_files = ( '*.tex' ); # Array of LaTeX files to process when - # no files are specified on the command line. - # Wildcards allowed - # Best used for project specific files. -@default_excluded_files = ( ); - # Array of LaTeX files to exclude when using - # @default_files, i.e., when no files are specified - # on the command line. - # Wildcards allowed - # Best used for project specific files. -$texfile_search = ""; # Specification for extra files to search for - # when no files are specified on the command line - # and the @default_files variable is empty. - # Space separated, and wildcards allowed. - # These files are IN ADDITION to *.tex in current - # directory. - # This variable is obsolete, and only in here for - # backward compatibility. - -$fdb_ext = 'fdb_latexmk'; # Extension for the file for latexmk's - # file-database - # Make it long to avoid possible collisions. -$fdb_ver = 2; # Version number for kind of fdb_file. - -$jobname = ''; # Jobname: as with current tex, etc indicates - # basename of generated files. - # Defined so that --jobname=STRING on latexmk's - # command line has same effect as with current - # tex, etc. (If $jobname is non-empty, then - # the --jobname=... option is used on tex.) - - -## default flag settings. -$silent = 0; # silence latex's messages? -$landscape_mode = 0; # default to portrait mode - -# The following two arrays contain lists of extensions (without -# period) for files that are read in during a (pdf)LaTeX run but that -# are generated automatically from the previous run, as opposed to -# being user generated files (directly or indirectly from a custom -# dependency). These files get two kinds of special treatment: -# 1. In clean up, where depending on the kind of clean up, some -# or all of these generated files are deleted. -# (Note that special treatment is given to aux files.) -# 2. In analyzing the results of a run of (pdf)LaTeX, to -# determine if another run is needed. With an error free run, -# a rerun should be provoked by a change in any source file, -# whether a user file or a generated file. But with a run -# that ends in an error, only a change in a user file during -# the run (which might correct the error) should provoke a -# rerun, but a change in a generated file should not. -# These arrays can be user-configured. -@generated_exts = ( 'aux', 'bbl', 'idx', 'ind', 'lof', 'lot', 'out', 'toc' ); - # N.B. 'out' is generated by hyperref package - -# Which kinds of file do I have requests to make? -# If no requests at all are made, then I will make dvi file -# If particular requests are made then other files may also have to be -# made. E.g., ps file requires a dvi file -$dvi_mode = 0; # No dvi file requested -$postscript_mode = 0; # No postscript file requested -$pdf_mode = 0; # No pdf file requested to be made by pdflatex - # Possible values: - # 0 don't create pdf file - # 1 to create pdf file by pdflatex - # 2 to create pdf file by ps2pdf - # 3 to create pdf file by dvipdf -$view = 'default'; # Default preview is of highest of dvi, ps, pdf -$sleep_time = 2; # time to sleep b/w checks for file changes in -pvc mode -$banner = 0; # Non-zero if we have a banner to insert -$banner_scale = 220; # Original default scale -$banner_intensity = 0.95; # Darkness of the banner message -$banner_message = 'DRAFT'; # Original default message -$do_cd = 0; # Do not do cd to directory of source file. - # Thus behave like latex. -$dependents_list = 0; # Whether to display list(s) of dependencies -@dir_stack = (); # Stack of pushed directories. -$cleanup_mode = 0; # No cleanup of nonessential LaTex-related files. - # $cleanup_mode = 0: no cleanup - # $cleanup_mode = 1: full cleanup - # $cleanup_mode = 2: cleanup except for dvi, - # dviF, pdf, ps, & psF -$cleanup_fdb = 0; # No removal of file for latexmk's file-database -$cleanup_only = 0; # When doing cleanup, do not go-on to making files -$diagnostics = 0; -$dvi_filter = ''; # DVI filter command -$ps_filter = ''; # Postscript filter command - -$force_mode = 0; # =1 to force processing past errors -$force_include_mode = 0;# =1 to ignore non-existent files when testing - # for dependency. (I.e., don't treat them as error) -$go_mode = 0; # =1 to force processing regardless of time-stamps - # =2 full clean-up first -$preview_mode = 0; -$preview_continuous_mode = 0; -$printout_mode = 0; # Don't print the file - -# Do we make view file in temporary then move to final destination? -# (To avoid premature updating by viewer). -$always_view_file_via_temporary = 0; # Set to 1 if viewed file is always - # made through a temporary. -$pvc_view_file_via_temporary = 1; # Set to 1 if only in -pvc mode is viewed - # file made through a temporary. - -# State variables initialized here: - -$updated = 0; # Flags when something has been remade - # Used to allow convenient user message in -pvc mode -$waiting = 0; # Flags whether we are in loop waiting for an event - # Used to avoid unnecessary repeated o/p in wait loop - -# Used for some results of parsing log file: -$reference_changed = 0; -$bad_reference = 0; -$bad_citation = 0; - - -# Set search paths for includes. -# Set them early so that they can be overridden -$BIBINPUTS = $ENV{'BIBINPUTS'}; -if (!$BIBINPUTS) { $BIBINPUTS = '.'; } -#?? OBSOLETE -$TEXINPUTS = $ENV{'TEXINPUTS'}; -if (!$TEXINPUTS) { $TEXINPUTS = '.'; } - -# Convert search paths to arrays: -# If any of the paths end in '//' then recursively search the -# directory. After these operations, @BIBINPUTS should -# have all the directories that need to be searched - -@BIBINPUTS = find_dirs1 ($BIBINPUTS); - - -###################################################################### -###################################################################### -# -# ??? UPDATE THE FOLLOWING!! -# -# We will need to determine whether source files for runs of various -# programs are out of date. In a normal situation, this is done by -# asking whether the times of the source files are later than the -# destination files. But this won't work for us, since a common -# situation is that a file is written on one run of latex, for -# example, and read back in on the next run (e.g., an .aux file). -# Some situations of this kind are standard in latex generally; others -# occur with particular macro packages or with particular -# postprocessors. -# -# The correct criterion for whether a source is out-of-date is -# therefore NOT that its modification time is later than the -# destination file, but whether the contents of the source file have -# changed since the last successful run. This also handles the case -# that the user undoes some changes to a source file by replacing the -# source file by reverting to an earlier version, which may well have -# an older time stamp. Since a direct comparison of old and new files -# would involve storage and access of a large number of backup files, -# we instead use the md5 signature of the files. (Previous versions -# of latexmk used the backup file method, but restricted to the case -# of .aux and .idx files, sufficient for most, but not all, -# situations.) -# -# We will have a database of (time, size, md5) for the relevant -# files. If the time and size of a file haven't changed, then the file -# is assumed not to have changed; this saves us from having to -# determine its md5 signature, which would involve reading the whole -# file, which is naturally time-consuming, especially if network file -# access to a server is needed, and many files are involved, when most -# of them don't change. It is of course possible to change a file -# without changing its size, but then to adjust its timestamp -# to what it was previously; this requires a certain amount of -# perversity. We can safely assume that if the user edits a file or -# changes its contents, then the file's timestamp changes. The -# interesting case is that the timestamp does change, because the file -# has actually been written to, but that the contents do not change; -# it is for this that we use the md5 signature. However, since -# computing the md5 signature involves reading the whole file, which -# may be large, we should avoid computing it more than necessary. -# -# So we get the following structure: -# -# 1. For each relevant run (latex, pdflatex, each instance of a -# custom dependency) we have a database of the state of the -# source files that were last used by the run. -# 2. On an initial startup, the database for a primary tex file -# is read that was created by a previous run of latex or -# pdflatex, if this exists. -# 3. If the file doesn't exist, then the criterion for -# out-of-dateness for an initial run is that it goes by file -# timestamps, as in previous versions of latexmk, with due -# (dis)regard to those files that are known to be generated by -# latex and re-read on the next run. -# 4. Immediately before a run, the database is updated to -# represent the current conditions of the run's source files. -# 5. After the run, it is determined whether any of the source -# files have changed. This covers both files written by the -# run, which are therefore in a dependency loop, and files that -# the user may have updated during the run. (The last often -# happens when latex takes a long time, for a big document, -# and the user makes edits before latex has finished. This is -# particularly prevalent when latexmk is used with -# preview-continuous mode.) -# 6. In the case of latex or pdflatex, the custom dependencies -# must also be checked and redone if out-of-date. -# 7. If any source files have changed, the run is redone, -# starting at step 1. -# 8. There is naturally a limit on the number of reruns, to avoid -# infinite loops from bugs and from pathological or unforeseen -# conditions. -# 9. After the run is done, the run's file database is updated. -# (By hypothesis, the sizes and md5s are correct, if the run -# is successful.) -# 10. To allow reuse of data from previous runs, the file database -# is written to a file after every complete set of passes -# through latex or pdflatex. (Note that there is separate -# information for latex and pdflatex; the necessary -# information won't coincide: Out-of-dateness for the files -# for each program concerns the properties of the files when -# the other program was run, and the set of source files could -# be different, e.g., for graphics files.) -# -# We therefore maintain the following data structures.: -# -# a. For each run (latex, pdflatex, each custom dependency) a -# database is maintained. This is a hash from filenames to a -# reference to an array: [time, size, md5]. The semantics of -# the database is that it represents the state of the source -# files used in the run. During a run it represents the state -# immediately before the run; after a run, with all reruns, it -# represents the state of the files used, modified by having -# the latest timestamps for generated files. -# b. There is a global database for all files, which represents -# the current state. This saves having to recompute the md5 -# signatures of a changed file used in more than one run -# (e.g., latex and pdflatex). -# c. Each of latex and pdflatex has a list of the relevant custom -# dependencies. -# -# In all the following a fdb-hash is a hash of the form: -# filename -> [time, size, md5] -# If a file is found to disappear, its entry is removed from the hash. -# In returns from fdb access routines, a size entry of -1 indicates a -# non-existent file. - - -# List of known rules. Rule types: primary, -# external (calls program), internal (calls routine), cusdep. - -%known_rules = ( 'latex' => 'primary', 'pdflatex' => 'primary', - ); -%primaries = (); # Hash of rules for primary part of make. Keys are - # currently 'latex', 'pdflatex' or both. Value is - # currently irrelevant. Use hash for ease of lookup - # Make remove this later, if use makeB - -# Hashes, whose keys give names of particular kinds of rule. We use -# hashes for ease of lookup. -%possible_one_time = ( 'view' => 1, 'print' => 1, 'update_view' => 1, ); -%requested_filerules = (); # Hash for rules corresponding to requested files. - # The keys are the rulenames and the value is - # currently irrelevant. -%one_time = (); # Hash for requested one-time-only rules, currently - # possible values 'print' and 'view'. - - -%rule_db = (); # Database of all rules: - # Hash: rulename -> [array of rule data] - # Rule data: - # 0: [ cmd_type, ext_cmd, int_cmd, out_of_date-crit, - # source, dest, base, out_of_date, - # out_of_date_user, time_of_last_run ] - # where - # cmd_type is 'primary', 'external' or 'cusdep', - # ext_cmd is string for associated external command - # with substitutions (%D for destination, %S - # for source, %B for base of current rule, - # %R for base of primary tex file, %T for - # texfile name, and %O for options. - # int_cmd specifies any internal command to be - # used to implement the application of the - # rule. If this is present, it overrides - # the external command, and it is the - # responsibility of the perl subroutine - # specified in intcmd to execute the - # external command if this is appropriate. - # This variable intcmd is a reference to an array, - # $$intcmd[0] = internal routine - # $$intcmd[1...] = its arguments (if any) - # out_of_date_crit specifies method of determining - # whether a file is out-of-date: - # 0 for never - # 1 for usual: whether there is a source - # file change - # 2 for dest earlier than source - # 3 for method 2 at first run, 1 thereafter - # (used when don't have file data from - # previous run). - # source = name of primary source file, if any - # dest = name of primary destination file, - # if any - # base = base name, if any, of files for - # this rule - # out_of_date = 1 if it has been detected that - # this rule needs to be run - # (typically because a source - # file has changed). - # 0 otherwise - # out_of_date_user is like out_of_date, except - # that the detection of out-of-dateness - # has been made from a change of a - # putative user file, i.e., one that is - # not a generated file (e.g., aux). This - # kind of out-of-dateness should provoke a - # rerun where or not there was an error - # during a run of (pdf)LaTeX. Normally, - # if there is an error, one should wait - # for the user to correct the error. But - # it is possible the error condition is - # already corrected during the run, e.g., - # by the user changing a source file in - # response to an error message. - # time_of_last_run = time that this rule was - # last applied. (In standard units - # from perl, to be directly compared - # with file modification times.) - # changed flags whether special changes have been made - # that require file-existence status to be ignored - # 1: {Hash sourcefile -> [source-file data] } - # Source-file data array: - # 0: time - # 1: size - # 2: md5 - # 3: name of rule to make this file - # 4: whether the file is of the kind made by epstopdf.sty - # during a primary run. It will have been read during - # the run, so that even though the file changes during - # a primary run, there is no need to trigger another - # run because of this. - -%fdb_current = (); # Fdb-hash for all files used. - - -#================================================== -## Read rc files: - -sub read_first_rc_file_in_list { - foreach my $rc_file ( @_ ) { - #print "===Testing for rc file \"$rc_file\" ...\n"; - if ( -e $rc_file ) { - #print "===Reading rc file \"$rc_file\" ...\n"; - process_rc_file( $rc_file ); - return; - } - } -} - -# Read system rc file: -read_first_rc_file_in_list( @rc_system_files ); -# Read user rc file. -read_first_rc_file_in_list( "$ENV{'HOME'}/.latexmkrc" ); -# Read rc file in current directory. -read_first_rc_file_in_list( "latexmkrc", ".latexmkrc" ); - -#================================================== - -#show_array ("BIBINPUTS", @BIBINPUTS); die; - -## Process command line args. -@command_line_file_list = (); -$bad_options = 0; - -#print "Command line arguments:\n"; for ($i = 0; $i <= $#ARGV; $i++ ) { print "$i: '$ARGV[$i]'\n"; } - -while ($_ = $ARGV[0]) -{ - # Make -- and - equivalent at beginning of option: - s/^--/-/; - shift; - if (/^-c$/) { $cleanup_mode = 2; $cleanup_only = 1; } - elsif (/^-C$/) { $cleanup_mode = 1; $cleanup_only = 1; } - elsif (/^-CA$/) { $cleanup_mode = 1; $cleanup_fdb = 1; $cleanup_only = 1;} - elsif (/^-CF$/) { $cleanup_fdb = 1; } - elsif (/^-cd$/) { $do_cd = 1; } - elsif (/^-cd-$/) { $do_cd = 0; } - elsif (/^-commands$/) { &print_commands; exit; } - elsif (/^-d$/) { $banner = 1; } - elsif (/^-dependents$/) { $dependents_list = 1; } - elsif (/^-nodependents$/ || /^-dependents-$/) { $dependents_list = 0; } - elsif (/^-dvi$/) { $dvi_mode = 1; } - elsif (/^-dvi-$/) { $dvi_mode = 0; } - elsif (/^-F$/) { $force_include_mode = 1; } - elsif (/^-F-$/) { $force_include_mode = 0; } - elsif (/^-f$/) { $force_mode = 1; } - elsif (/^-f-$/) { $force_mode = 0; } - elsif (/^-g$/) { $go_mode = 1; } - elsif (/^-g-$/) { $go_mode = 0; } - elsif (/^-gg$/) { - $go_mode = 2; $cleanup_mode = 1; $cleanup_fdb = 1; $cleanup_only = 0; - } - elsif ( /^-h$/ || /^-help$/ ) { &print_help; exit;} - elsif (/^-diagnostics/) { $diagnostics = 1; } - elsif (/^-jobname=(.*)$/) { - $jobname = $1; - } - elsif (/^-l$/) { $landscape_mode = 1; } - elsif (/^-new-viewer$/) { - $new_viewer_always = 1; - } - elsif (/^-new-viewer-$/) { - $new_viewer_always = 0; - } - elsif (/^-l-$/) { $landscape_mode = 0; } - elsif (/^-p$/) { $printout_mode = 1; - $preview_continuous_mode = 0; # to avoid conflicts - $preview_mode = 0; - } - elsif (/^-p-$/) { $printout_mode = 0; } - elsif (/^-pdfdvi$/){ $pdf_mode = 3; } - elsif (/^-pdfps$/) { $pdf_mode = 2; } - elsif (/^-pdf$/) { $pdf_mode = 1; } - elsif (/^-pdf-$/) { $pdf_mode = 0; } - elsif (/^-print=(.*)$/) { - $value = $1; - if ( $value =~ /^dvi$|^ps$|^pdf$/ ) { - $print_type = $value; - $printout_mode = 1; - } - else { - &exit_help("$My_name: unknown print type '$value' in option '$_'"); - } - } - elsif (/^-ps$/) { $postscript_mode = 1; } - elsif (/^-ps-$/) { $postscript_mode = 0; } - elsif (/^-pv$/) { $preview_mode = 1; - $preview_continuous_mode = 0; # to avoid conflicts - $printout_mode = 0; - } - elsif (/^-pv-$/) { $preview_mode = 0; } - elsif (/^-pvc$/) { $preview_continuous_mode = 1; - $force_mode = 0; # So that errors do not cause loops - $preview_mode = 0; # to avoid conflicts - $printout_mode = 0; - } - elsif (/^-pvc-$/) { $preview_continuous_mode = 0; } - elsif (/^-silent$/ || /^-quiet$/ ){ $silent = 1; } - elsif (/^-v$/ || /^-version$/) { - print "\n$version_details. Version $version_num\n"; - exit; - } - elsif (/^-verbose$/) { $silent = 0; } - elsif (/^-view=default$/) { $view = "default";} - elsif (/^-view=dvi$/) { $view = "dvi";} - elsif (/^-view=none$/) { $view = "none";} - elsif (/^-view=ps$/) { $view = "ps";} - elsif (/^-view=pdf$/) { $view = "pdf"; } - elsif (/^-e$/) { - if ( $ARGV[0] eq '' ) { - &exit_help( "No code to execute specified after -e switch"); - } - else { - execute_code_string( $ARGV[0] ); - } - shift; - } - elsif (/^-r$/) { - if ( $ARGV[0] eq '' ) { - &exit_help( "No RC file specified after -r switch"); - } - if ( -e $ARGV[0] ) { - process_rc_file( $ARGV[0] ); - } - else { - $! = 11; - die "$My_name: RC file [$ARGV[0]] does not exist\n"; - } - shift; - } - elsif (/^-bm$/) { - if ( $ARGV[0] eq '' ) { - &exit_help( "No message specified after -bm switch"); - } - $banner = 1; $banner_message = $ARGV[0]; - shift; - } - elsif (/^-bi$/) { - if ( $ARGV[0] eq '' ) { - &exit_help( "No intensity specified after -bi switch"); - } - $banner_intensity = $ARGV[0]; - shift; - } - elsif (/^-bs$/) { - if ( $ARGV[0] eq '' ) { - &exit_help( "No scale specified after -bs switch"); - } - $banner_scale = $ARGV[0]; - shift; - } - elsif (/^-dF$/) { - if ( $ARGV[0] eq '' ) { - &exit_help( "No dvi filter specified after -dF switch"); - } - $dvi_filter = $ARGV[0]; - shift; - } - elsif (/^-pF$/) { - if ( $ARGV[0] eq '' ) { - &exit_help( "No ps filter specified after -pF switch"); - } - $ps_filter = $ARGV[0]; - shift; - } - elsif (/^-/) { - warn "$My_name: $_ bad option\n"; - $bad_options++; - } - else { - push @command_line_file_list, $_ ; - } -} - -if ( $bad_options > 0 ) { - &exit_help( "Bad options specified" ); -} - -warn "$My_name: This is $version_details, version: $version_num.\n", - "**** Report bugs etc to John Collins . ****\n" - unless $silent; - -# For backward compatibility, convert $texfile_search to @default_files -# Since $texfile_search is initialized to "", a nonzero value indicates -# that an initialization file has set it. -if ( $texfile_search ne "" ) { - @default_files = split / /, "*.tex $texfile_search"; -} - -#printA "A: Command line file list:\n"; -#for ($i = 0; $i <= $#command_line_file_list; $i++ ) { print "$i: '$command_line_file_list[$i]'\n"; } - -#Glob the filenames command line if the script was not invoked under a -# UNIX-like environment. -# Cases: (1) MS/MSwin native Glob -# (OS detected as MSWin32) -# (2) MS/MSwin cygwin Glob [because we do not know whether -# the cmd interpreter is UNIXy (and does glob) or is -# native MS-Win (and does not glob).] -# (OS detected as cygwin) -# (3) UNIX Don't glob (cmd interpreter does it) -# (Currently, I assume this is everything else) -if ( ($^O eq "MSWin32") || ($^O eq "cygwin") ) { - # Preserve ordering of files - @file_list = glob_list1(@command_line_file_list); -#print "A1:File list:\n"; -#for ($i = 0; $i <= $#file_list; $i++ ) { print "$i: '$file_list[$i]'\n"; } -} -else { - @file_list = @command_line_file_list; -#print "A2:File list:\n"; -#for ($i = 0; $i <= $#file_list; $i++ ) { print "$i: '$file_list[$i]'\n"; } -} -@file_list = uniq1( @file_list ); - - -# Check we haven't selected mutually exclusive modes. -# Note that -c overides all other options, but doesn't cause -# an error if they are selected. -if (($printout_mode && ( $preview_mode || $preview_continuous_mode )) - || ( $preview_mode && $preview_continuous_mode )) -{ - # Each of the options -p, -pv, -pvc turns the other off. - # So the only reason to arrive here is an incorrect inititalization - # file, or a bug. - &exit_help( "Conflicting options (print, preview, preview_continuous) selected"); -} - -if ( @command_line_file_list ) { - # At least one file specified on command line (before possible globbing). - if ( !@file_list ) { - &exit_help( "Wildcards in file names didn't match any files"); - } -} -else { - # No files specified on command line, try and find some - # Evaluate in order specified. The user may have some special - # for wanting processing in a particular order, especially - # if there are no wild cards. - # Preserve ordering of files - my @file_list1 = uniq1( glob_list1(@default_files) ); - my @excluded_file_list = uniq1( glob_list1(@default_excluded_files) ); - # Make hash of excluded files, for easy checking: - my %excl = (); - foreach my $file (@excluded_file_list) { - $excl{$file} = ''; - } - foreach my $file (@file_list1) { - push( @file_list, $file) unless ( exists $excl{$file} ); - } - if ( !@file_list ) { - &exit_help( "No file name specified, and I couldn't find any"); - } -} - -$num_files = $#file_list + 1; -$num_specified = $#command_line_file_list + 1; - -#print "Command line file list:\n"; -#for ($i = 0; $i <= $#command_line_file_list; $i++ ) { print "$i: '$command_line_file_list[$i]'\n"; } -#print "File list:\n"; -#for ($i = 0; $i <= $#file_list; $i++ ) { print "$i: '$file_list[$i]'\n"; } - - -# If selected a preview-continuous mode, make sure exactly one filename was specified -if ($preview_continuous_mode && ($num_files != 1) ) { - if ($num_specified > 1) { - &exit_help( - "Need to specify exactly one filename for ". - "preview-continuous mode\n". - " but $num_specified were specified" - ); - } - elsif ($num_specified == 1) { - &exit_help( - "Need to specify exactly one filename for ". - "preview-continuous mode\n". - " but wildcarding produced $num_files files" - ); - } - else { - &exit_help( - "Need to specify exactly one filename for ". - "preview-continuous mode.\n". - " Since none were specified on the command line, I looked for \n". - " files in '@default_files'.\n". - " But I found $num_files files, not 1." - ); - } -} - -# If selected jobname, can only apply that to one file: -if ( ($jobname ne '') && ($num_files > 1) ) { - &exit_help( - "Need to specify at most one filename if ". - "jobname specified, \n". - " but $num_files were found (after defaults and wildcarding)." - ); -} - - -# Normalize the commands, to have place-holders for source, dest etc: -&fix_cmds; - -# If landscape mode, change dvips processor, and the previewers: -if ( $landscape_mode ) -{ - $dvips = $dvips_landscape; - $dvi_previewer = $dvi_previewer_landscape; - $ps_previewer = $ps_previewer_landscape; -} - -if ( $silent ) { - add_option( \$latex, " $latex_silent_switch" ); - add_option( \$pdflatex, " $pdflatex_silent_switch" ); - add_option( \$bibtex, " $bibtex_silent_switch" ); - add_option( \$makeindex, " $makeindex_silent_switch" ); - add_option( \$dvips, " $dvips_silent_switch" ); -} - -if ( $jobname ne '' ) { - $jobstring = "--jobname=$jobname"; - add_option( \$latex, " $jobstring" ); - add_option( \$pdflatex, " $jobstring" ); -} - -# Which kind of file do we preview? -if ( $view eq "default" ) { - # If default viewer requested, use "highest" of dvi, ps and pdf - # that was requested by user. - # No explicit request means view dvi. - $view = "dvi"; - if ( $postscript_mode ) { $view = "ps"; } - if ( $pdf_mode ) { $view = "pdf"; } -} - -if ( ! ( $dvi_mode || $pdf_mode || $postscript_mode || $printout_mode) ) { - print "No specific requests made, so default to dvi by latex\n"; - $dvi_mode = 1; -} - -# Set new-style requested rules: -if ( $dvi_mode ) { $requested_filerules{'latex'} = 1; } -if ( $pdf_mode == 1 ) { $requested_filerules{'pdflatex'} = 1; } -elsif ( $pdf_mode == 2 ) { $requested_filerules{'ps2pdf'} = 1; } -elsif ( $pdf_mode == 3 ) { $requested_filerules{'dvipdf'} = 1; } -if ( $postscript_mode ) { $requested_filerules{'dvips'} = 1; } -if ( $printout_mode ) { $one_time{'print'} = 1; } -if ( $preview_continuous_mode || $preview_mode ) { $one_time{'view'} = 1; } -if ( length($dvi_filter) != 0 ) { $requested_filerules{'dvi_filter'} = 1; } -if ( length($ps_filter) != 0 ) { $requested_filerules{'ps_filter'} = 1; } -if ( $banner ) { $requested_filerules{'dvips'} = 1; } - - -%possible_primaries = (); -foreach (&rdb_possible_primaries) { - $possible_primaries{$_} = 1; -} - -#print "POSSIBLE PRIMARIES: "; -#foreach (keys %possible_primaries ) {print "$_, ";} -#print "\n"; - - -if ( $pdf_mode == 2 ) { - # We generate pdf from ps. Make sure we have the correct kind of ps. - add_option( \$dvips, " $dvips_pdf_switch" ); -} - - -# Make convenient forms for lookup. -# Extensions always have period. - -# Convert @generated_exts to a hash for ease of look up, with exts -# preceeded by a '.' -# %generated_exts_all is used in analyzing file changes, to -# distinguish changes in user files from changes in generated files. -%generated_exts_all = (); -foreach (@generated_exts ) { - $generated_exts_all{".$_"} = 1; -} - -$quell_uptodate_msgs = $silent; - # Whether to quell informational messages when files are uptodate - # Will turn off in -pvc mode - -# Process for each file. -# The value of $bibtex_mode set in an initialization file may get -# overridden, during file processing, so save it: -#?? Unneeded now: $save_bibtex_mode = $bibtex_mode; - -$failure_count = 0; -$last_failed = 0; # Flag whether failed on making last file - # This is used for showing suitable error diagnostics -FILE: -foreach $filename ( @file_list ) -{ - # Global variables for making of current file: - $updated = 0; - $failure = 0; # Set nonzero to indicate failure at some point of - # a make. Use value as exit code if I exit. - $failure_msg = ''; # Indicate reason for failure -#?? Unneeded now: $bibtex_mode = $save_bibtex_mode; - - if ( $do_cd ) { - ($filename, $path) = fileparse( $filename ); - warn "$My_name: Changing directory to '$path'\n"; - pushd( $path ); - } - else { - $path = ''; - } - - - ## remove extension from filename if was given. - if ( &find_basename($filename, $root_filename, $texfile_name) ) - { - if ( $force_mode ) { - warn "$My_name: Could not find file [$texfile_name]\n"; - } - else { - &ifcd_popd; - &exit_msg1( "Could not find file [$texfile_name]", - 11); - } - } - if ($jobname ne '' ) { - $root_filename = $jobname; - } - - # Initialize basic dependency information: - - # For use under error conditions: - @default_includes = ($texfile_name, "$root_filename.aux"); - - $fdb_file = "$root_filename.$fdb_ext"; - - if ($cleanup_fdb) { unlink $fdb_file; } - if ( $cleanup_mode > 0 ) { - my @extra_generated = (); - my @aux_files = (); - rdb_read_generatedB( $fdb_file, \@extra_generated, \@aux_files ); - if ( ($go_mode == 2) && !$silent ) { - warn "$My_name: Removing all generated files\n" unless $silent; - } - if ($diagnostics) { - show_array( "For deletion:\n Extra_generated:", @extra_generated ); - show_array( " Aux files:", @aux_files ); - } - # Add to the generated files, some log file and some backup - # files used in previous versions of latexmk - &cleanup1( 'blg', 'ilg', 'log', 'aux.bak', 'idx.bak', - split(' ',$clean_ext), - @generated_exts - ); - unlink( 'texput.log', @extra_generated, "texput.aux", @aux_files ); - if ( $cleanup_mode == 1 ) { - &cleanup1( 'dvi', 'dviF', 'ps', 'psF', 'pdf', - split(' ', $clean_full_ext) - ); - } - } - if ($cleanup_only) { next FILE; } - - # Initialize file and rule databases. - %rule_list = (); - &rdb_make_rule_list; - &rdb_set_rules(\%rule_list); - - -#??? The following are not needed if use makeB. -# ?? They may be set too early? -# Arrays and hashes for picking out accessible rules. -# Distinguish rules for making files and others - @accessible_all = sort ( &rdb_accessible( keys %requested_filerules, keys %one_time )); - %accessible_filerules = (); - foreach (@accessible_all) { - unless ( /view/ || /print/ ) { $accessible_filerules{$_} = 1; } - } - @accessible_filerules = sort keys %accessible_filerules; - -# show_array ( "=======All rules used", @accessible_all ); -# show_array ( "=======Requested file rules", sort keys %requested_filerules ); -# show_array ( "=======Rules for files", @accessible_filerules ); - - if ( $diagnostics ) { - print "$My_name: Rules after start up for '$texfile_name'\n"; - rdb_show(); - } - - %primaries = (); - foreach (@accessible_all) { - if ( ($_ eq 'latex') || ($_ eq 'pdflatex') ) { $primaries{$_} = 1; } - } - - $have_fdb = 0; - if ( (! -e $fdb_file) && (! -e "$root_filename.aux") ) { - # No aux and no fdb file => set up trivial aux file - # and corresponding fdb_file. Arrange them to provoke one run - # as minimum, but no more if actual aux file is trivial. - # (Useful on big files without cross references.) - &set_trivial_aux_fdb; - } - - if ( -e $fdb_file ) { - $rdb_errors = rdb_read( $fdb_file ); - $have_fdb = ($rdb_errors == 0); - } - if (!$have_fdb) { - # We didn't get a valid set of data on files used in - # previous run. So use filetime criterion for make - # instead of change from previous run, until we have - # done our own make. - rdb_recurseA( [keys %possible_primaries], - sub{ if ( $$Ptest_kind == 1 ) { $$Ptest_kind = 3;} } - ); - if ( -e "$root_filename.log" ) { - rdb_for_some( [keys %possible_primaries], \&rdb_set_from_logB ); - } - } - if ($go_mode) { - # Force everything to be remade. - rdb_recurseA( [keys %requested_filerules], sub{$$Pout_of_date=1;} ); - } - - - if ( $diagnostics ) { - print "$My_name: Rules after initialization\n"; - rdb_show(); - } - - #************************************************************ - - if ( $preview_continuous_mode ) { - &make_preview_continuousB; - # Will probably exit by ctrl/C and never arrive here. - next FILE; - } - - -## Handling of failures: -## Variable $failure is set to indicate a failure, with information -## put in $failure_msg. -## These variables should be set to 0 and '' at any point at which it -## should be assumed that no failures have occurred. -## When after a routine is called it is found that $failure is set, then -## processing should normally be aborted, e.g., by return. -## Then there is a cascade of returns back to the outermost level whose -## responsibility is to handle the error. -## Exception: An outer level routine may reset $failure and $failure_msg -## after initial processing, when the error condition may get -## ameliorated later. - #Initialize failure flags now. - $failure = 0; - $failure_msg = ''; - $failure = rdb_makeB( keys %requested_filerules ); - if ($failure > 0) { next FILE;} - rdb_for_some( [keys %one_time], \&rdb_run1 ); -} # end FILE -continue { - if ($dependents_list) { rdb_list(); } - # Handle any errors - if ( $failure > 0 ) { - if ( $failure_msg ) { - #Remove trailing space - $failure_msg =~ s/\s*$//; - warn "$My_name: Did not finish processing file: $failure_msg\n"; - $failure = 1; - } - $failure_count ++; - $last_failed = 1; - } - else { - $last_failed = 0; - } - &ifcd_popd; -} -# If we get here without going through the continue section: -if ( $do_cd && ($#dir_stack > -1) ) { - # Just in case we did an abnormal exit from the loop - warn "$My_name: Potential bug: dir_stack not yet unwound, undoing all directory changes now\n"; - &finish_dir_stack; -} - -if ($failure_count > 0) { - if ( $last_failed <= 0 ) { - # Error occured, but not on last file, so - # user may not have seen error messages - warn "\n------------\n"; - warn "$My_name: Some operations failed.\n"; - } - if ( !$force_mode ) { - warn "$My_name: Use the -f option to force complete processing.\n"; - } - exit 12; -} - - - -# end MAIN PROGRAM -############################################################# - -sub fix_cmds { - # If commands do not have placeholders for %S etc, put them in - foreach ($latex, $pdflatex, $lpr, $lpr_dvi, $lpr_pdf, - $pdf_previewer, $ps_previewer, $ps_previewer_landscape, - $dvi_previewer, $dvi_previewer_landscape, - $kpsewhich - ) { - # Source only - if ( $_ && ! /%/ ) { $_ .= " %O %S"; } - } - foreach ($bibtex) { - # Base only - if ( $_ && ! /%/ ) { $_ .= " %O %B"; } - } - foreach ($dvipdf, $ps2pdf) { - # Source and dest without flag for destination - if ( $_ && ! /%/ ) { $_ .= " %O %S %D"; } - } - foreach ($dvips, $makeindex) { - # Source and dest with -o dest before source - if ( $_ && ! /%/ ) { $_ .= " %O -o %D %S"; } - } - foreach ($dvi_filter, $ps_filter) { - # Source and dest, but as filters - if ( $_ && ! /%/ ) { $_ .= " %O <%S >%D"; } - } -} #END fix_cmds - -############################################################# - -sub add_option { - # Call add_option( \$cmd, $opt ) - # Add option to command - if ( ${$_[0]} !~ /%/ ) { &fix_cmds; } - ${$_[0]} =~ s/%O/$_[1] %O/; -} #END add_option - -############################################################# - -sub rdb_make_rule_list { -# Substitutions: %S = source, %D = dest, %B = this rule's base -# %T = texfile, %R = root = base for latex. - - # Defaults for dvi, ps, and pdf files - # Use local, not my, so these variables can be referenced - local $dvi_final = "%R.dvi"; - local $ps_final = "%R.ps"; - local $pdf_final = "%R.pdf"; - if ( length($dvi_filter) > 0) { - $dvi_final = "%R.dviF"; - } - if ( length($ps_filter) > 0) { - $ps_final = "%R.psF"; - } - - my $print_file = ''; - my $print_cmd = ''; - if ( $print_type eq 'dvi' ) { - $print_file = $dvi_final; - $print_cmd = $lpr_dvi; - } - elsif ( $print_type eq 'pdf' ) { - $print_file = $pdf_final; - $print_cmd = $lpr_pdf; - } - elsif ( $print_type eq 'ps' ) { - $print_file = $ps_final; - $print_cmd = $lpr; - } - - my $view_file = ''; - my $viewer = ''; - my $viewer_update_method = 0; - my $viewer_update_signal = undef; - my $viewer_update_command = undef; - - if ( ($view eq 'dvi') || ($view eq 'pdf') || ($view eq 'ps') ) { - $view_file = ${$view.'_final'}; - $viewer = ${$view.'_previewer'}; - $viewer_update_method = ${$view.'_update_method'}; - $viewer_update_signal = ${$view.'_update_signal'}; - if (defined ${$view.'_update_command'}) { - $viewer_update_command = ${$view.'_update_command'}; - } - } - # Specification of internal command for viewer update: - my $PA_update = ['do_update_view', $viewer_update_method, $viewer_update_signal, 0, 1]; - -# For test_kind: Use file contents for latex and friends, but file time for the others. -# This is because, especially for dvi file, the contents of the file may contain -# a pointer to a file to be included, not the contents of the file! - %rule_list = ( - 'latex' => [ 'primary', "$latex", '', "%T", "%B.dvi", "%R", 1 ], - 'pdflatex' => [ 'primary', "$pdflatex", '', "%T", "%B.pdf", "%R", 1 ], - 'dvipdf' => [ 'external', "$dvipdf", 'do_viewfile', $dvi_final, "%B.pdf", "%R", 2 ], - 'dvips' => [ 'external', "$dvips", 'do_viewfile', $dvi_final, "%B.ps", "%R", 2 ], - 'dvifilter'=> [ 'external', $dvi_filter, 'do_viewfile', "%B.dvi", "%B.dviF", "%R", 2 ], - 'ps2pdf' => [ 'external', "$ps2pdf", 'do_viewfile', $ps_final, "%B.pdf", "%R", 2 ], - 'psfilter' => [ 'external', $ps_filter, 'do_viewfile', "%B.ps", "%B.psF", "%R", 2 ], - 'print' => [ 'external', "$print_cmd", 'if_source', $print_file, "", "", 2 ], - 'update_view' => [ 'external', $viewer_update_command, $PA_update, - $view_file, "", "", 2 ], - 'view' => [ 'external', "$viewer", 'if_source', $view_file, "", "", 2 ], - ); - %source_list = (); - foreach my $rule (keys %rule_list) { - $source_list{$rule} = []; - my $PAsources = $source_list{$rule}; - my ( $cmd_type, $cmd, $source, $dest, $root ) = @{$rule_list{$rule}}; - if ($source) { - push @$PAsources, [ $rule, $source, '' ]; - } - } - -# Ensure we only have one way to make pdf file, and that it is appropriate: - if ($pdf_mode == 1) { delete $rule_list{'dvipdf'}; delete $rule_list{'ps2pdf'}; } - elsif ($pdf_mode == 2) { delete $rule_list{'dvipdf'}; delete $rule_list{'pdflatex'}; } - else { delete $rule_list{'pdflatex'}; delete $rule_list{'ps2pdf'}; } - -} # END rdb_make_rule_list - -#************************************************************ - -sub rdb_set_rules { - # Call rdb_set_rules( \%rule_list, ...) - # Set up rule database from definitions - - # Map of files to rules that MAKE them: - local %from_rules = (); - %rule_db = (); - - foreach my $Prule_list (@_) { - foreach my $rule ( sort keys %$Prule_list) { - my ( $cmd_type, $ext_cmd, $int_cmd, $source, $dest, $base, $test_kind ) = @{$$Prule_list{$rule}}; - my $needs_making = 0; - # Substitute in the filename variables, since we will use - # those for determining filenames. But delay expanding $cmd - # until run time, in case of changes. - foreach ($base, $source, $dest ) { - s/%R/$root_filename/; - } - foreach ($source, $dest ) { - s/%B/$base/; - s/%T/$texfile_name/; - } - # print "$rule: $cmd_type, EC='$ext_cmd', IC='$int_cmd', $test_kind,\n", - # " S='$source', D='$dest', B='$base' $needs_making\n"; - rdb_create_rule( $rule, $cmd_type, $ext_cmd, $int_cmd, $test_kind, - $source, $dest, $base, - $needs_making ); - if ($dest) { $from_rules{$dest} = $rule ; } - } - rdb_for_all( - 0, - sub{ - # my ($base, $path, $ext) = fileparse( $file, '\.[^\.]*' ); - # if ( exists $from_rules{$file} && ! exists $generated_exts_all{$ext} ) { - # # Show how to make this file. But don't worry about generated - # # files. - if ( exists $from_rules{$file} ) { - $$Pfrom_rule = $from_rules{$file}; - } - #?? print "$rule: $file, $$Pfrom_rule\n"; - } - ); - } # End arguments of subroutine - &rdb_make_links; -} # END rdb_set_rules - -#************************************************************ - -sub rdb_make_links { -# ?? Problem if there are multiple rules for getting a file. Notably pdf. -# Which one to choose? - # Create $from_rule if there's a suitable rule. - # Map files to rules: - local %from_rules = (); - rdb_for_all( sub{ if($$Pdest){$from_rules{$$Pdest} = $rule;} } ); -#?? foreach (sort keys %from_rules) {print "D='$_' F='$from_rules{$_}\n";} - rdb_for_all( - 0, - sub{ - if ( exists $from_rules{$file} ) { $$Pfrom_rule = $from_rules{$file}; } -#?? print "$rule: $file, $$Pfrom_rule\n"; - } - ); - rdb_for_all( - 0, - sub{ - if ( exists $from_rules{$file} ) { - $$Pfrom_rule = $from_rules{$file}; - } - if ( $$Pfrom_rule && (! rdb_rule_exists( $$Pfrom_rule ) ) ) { - $$Pfrom_rule = ''; - } -#?? print "$rule: $file, $$Pfrom_rule\n"; - } - ); -} # END rdb_make_links - -#************************************************************ - -sub set_trivial_aux_fdb { - # 1. Write aux file EXACTLY as would be written if the tex file - # had no cross references, etc. I.e., a minimal .aux file. - # 2. Write a corresponding fdb file - # 3. Provoke a run of (pdf)latex (actually of all primaries). - - local $aux_file = "$root_filename.aux"; - open( aux_file, '>', $aux_file ) - or die "Cannot write file '$aux_file'\n"; - print aux_file "\\relax \n"; - close(aux_file); - - foreach my $rule (keys %primaries ) { - rdb_ensure_file( $rule, $texfile_name ); - rdb_ensure_file( $rule, $aux_file ); - rdb_one_rule( $rule, - sub{ $$Pout_of_date = 1; } - ); - } - &rdb_write( $fdb_file ); -} #END set_trivial_aux_fdb - -#************************************************************ -#### Particular actions -#************************************************************ -#************************************************************ - -sub do_cusdep { - # Unconditional application of custom-dependency - # except that rule is not applied if the source file source - # does not exist, and an error is returned if the dest is not made. - # - # Assumes rule context for the custom-dependency, and that my first - # argument is the name of the subroutine to apply - my $func_name = $_[0]; - my $return = 0; - if ( !-e $$Psource ) { - # Source does not exist. Users of this rule will need to turn - # it off when custom dependencies are reset - if ( !$silent ) { -## ??? Was commented out. 1 Sep. 2008 restored, for cusdep no-file-exists issue - warn "$My_name: In trying to apply custom-dependency rule\n", - " to make '$$Pdest' from '$$Psource'\n", - " the source file has disappeared since the last run\n"; - } - # Treat as successful - } - elsif ( !$func_name ) { - warn "$My_name: Possible misconfiguration or bug:\n", - " In trying to apply custom-dependency rule\n", - " to make '$$Pdest' from '$$Psource'\n", - " the function name is blank.\n"; - } - elsif ( ! defined &$func_name ) { - warn "$My_name: Misconfiguration or bug,", - " in trying to apply custom-dependency rule\n", - " to make '$$Pdest' from '$$Psource'\n", - " function name '$func_name' does not exists.\n"; - } - else { - my $cusdep_ret = &$func_name( $$Pbase ); - if ( defined $cusdep_ret && ($cusdep_ret != 0) ) { - $return = $cusdep_ret; - if ($return) { - warn "Rule '$rule', function '$func_name'\n", - " failed with return code = $return\n"; - } - } - elsif ( !-e $$Pdest ) { - # Destination non-existent, but routine failed to give an error - warn "$My_name: In running custom-dependency rule\n", - " to make '$$Pdest' from '$$Psource'\n", - " function '$func_name' did not make the destination.\n"; - $return = -1; - } - } - return $return; -} # END do_cusdep - -#************************************************************ - -sub do_viewfile { - # Unconditionally make file for viewing, going through temporary file if - # Assumes rule context - - my $return = 0; - my ($base, $path, $ext) = fileparseA( $$Pdest ); - if ( &view_file_via_temporary ) { - my $tmpfile = tempfile1( "${root_filename}_tmp", $ext ); - $return = &rdb_ext_cmd1( '', '', $tmpfile ); - move( $tmpfile, $$Pdest ); - } - else { - $return = &rdb_ext_cmd; - } - return $return; -} #END do_viewfile - -#************************************************************ - -sub do_update_view { - # Update viewer - # Assumes rule context - # Arguments: (method, signal, viewer_process) - - my $return = 0; - - # Although the process is passed as an argument, we'll need to update it. - # So (FUDGE??) bypass the standard interface for the process. - # We might as well do this for all the arguments. - my $viewer_update_method = ${$PAint_cmd}[1]; - my $viewer_update_signal = ${$PAint_cmd}[2]; - my $Pviewer_process = \${$PAint_cmd}[3]; - my $Pneed_to_get_viewer_process = \${$PAint_cmd}[4]; - - if ($viewer_update_method == 2) { - if ($$Pneed_to_get_viewer_process) { - $$Pviewer_process = &find_process_id( $$Psource ); - if ($$Pviewer_process != 0) { - $$Pneed_to_get_viewer_process = 0; - } - } - if ($$Pviewer_process == 0) { - print "$My_name: need to signal viewer for file '$$Psource', but didn't get \n", - " process ID for some reason, e.g., no viewer, bad configuration, bug\n" - if $diagnostics ; - } - elsif ( defined $viewer_update_signal) { - print "$My_name: signalling viewer, process ID $$Pviewer_process\n" - if $diagnostics ; - kill $viewer_update_signal, $$Pviewer_process; - } - else { - warn "$My_name: viewer is supposed to be sent a signal\n", - " but no signal is defined. Misconfiguration or bug?\n"; - $return = 1; - } - } - elsif ($viewer_update_method == 4) { - if (defined $$Pext_cmd) { - $return = &rdb_ext_cmd; - } - else { - warn "$My_name: viewer is supposed to be updated by running a command,\n", - " but no command is defined. Misconfiguration or bug?\n"; - } - } - return $return; -} #END do_update_view - -#************************************************************ - -sub if_source { - # Unconditionally apply rule if source file exists. - # Assumes rule context - if ( -e $$Psource ) { - return &rdb_ext_cmd; - } - else { - return -1; - } -} #END if_source - -#************************************************************ -#### Subroutines -#************************************************************ -#************************************************************ - -# Finds the basename of the root file -# Arguments: -# 1 - Filename to breakdown -# 2 - Where to place base file -# 3 - Where to place tex file -# Returns non-zero if tex file does not exist -# -# The rules for determining this depend on the implementation of TeX. -# The variable $extension_treatment determines which rules are used. - -sub find_basename -#?? Need to use kpsewhich, if possible -{ - local($given_name, $base_name, $ext, $path, $tex_name); - $given_name = $_[0]; - if ( "$extension_treatment" eq "miktex_old" ) { - # Miktex v. 1.20d: - # 1. If the filename has an extension, then use it. - # 2. Else append ".tex". - # 3. The basename is obtained from the filename by - # removing the path component, and the extension, if it - # exists. If a filename has a multiple extension, then - # all parts of the extension are removed. - # 4. The names of generated files (log, aux) are obtained by - # appending .log, .aux, etc to the basename. Note that - # these are all in the CURRENT directory, and the drive/path - # part of the originally given filename is ignored. - # - # Thus when the given filename is "\tmp\a.b.c", the tex - # filename is the same, and the basename is "a". - - ($base_name, $path, $ext) = fileparse( $given_name, '\..*' ); - if ( "$ext" eq "") { $tex_name = "$given_name.tex"; } - else { $tex_name = $given_name; } - $_[1] = $base_name; - $_[2] = $tex_name; - } - elsif ( "$extension_treatment" eq "unix" ) { - # unix (at least web2c 7.3.1) => - # 1. If filename.tex exists, use it, - # 2. else if filename exists, use it. - # 3. The base filename is obtained by deleting the path - # component and, if an extension exists, the last - # component of the extension, even if the extension is - # null. (A name ending in "." has a null extension.) - # 4. The names of generated files (log, aux) are obtained by - # appending .log, .aux, etc to the basename. Note that - # these are all in the CURRENT directory, and the drive/path - # part of the originally given filename is ignored. - # - # Thus when the given filename is "/tmp/a.b.c", there are two - # cases: - # a. /tmp/a.b.c.tex exists. Then this is the tex file, - # and the basename is "a.b.c". - # b. /tmp/a.b.c.tex does not exist. Then the tex file is - # "/tmp/a.b.c", and the basename is "a.b". - - if ( -e "$given_name.tex" ) { - $tex_name = "$given_name.tex"; - } - else { - $tex_name = "$given_name"; - } - ($base_name, $path, $ext) = fileparse( $tex_name, '\.[^\.]*' ); - $_[1] = $base_name; - $_[2] = $tex_name; - } - else { - die "$My_name: Incorrect configuration gives \$extension_treatment=", - "'$extension_treatment'\n"; - } - if ($diagnostics) { - print "Given='$given_name', tex='$tex_name', base='$base_name'\n"; - } - return ! -e $tex_name; -} #END find_basename - -#************************************************************ - -sub make_preview_continuousB { - # Version for use with makeB - local @changed = (); - local @disappeared = (); - local @no_dest = (); # Non-existent destination files - local @rules_to_apply = (); - local $failure = 0; - local $runs = 0; - local %rules_applied = (); - local $updated = 0; - - # What to make? - my @targets = keys %requested_filerules; - - $quell_uptodate_msgs = 1; - - local $view_file = ''; - rdb_one_rule( 'view', sub{ $view_file = $$Psource; } ); - - if ( ($view eq 'dvi') || ($view eq 'pdf') || ($view eq 'ps') ) { - warn "Viewing $view\n"; - } - elsif ( $view eq 'none' ) { - warn "Not using a previewer\n"; - $view_file = ''; - } - else { - warn "$My_name: BUG: Invalid preview method '$view'\n"; - exit 20; - } - - my $viewer_running = 0; # No viewer known to be running yet - # Get information from update_view rule - local $viewer_update_method = 0; - # Pointers so we can update the following: - local $Pviewer_process = undef; - local $Pneed_to_get_viewer_process = undef; - rdb_one_rule( 'update_view', - sub{ $viewer_update_method = $$PAint_cmd[1]; - $Pviewer_process = \$$PAint_cmd[3]; - $Pneed_to_get_viewer_process = \$$PAint_cmd[4]; - } - ); - # Note that we don't get the previewer process number from the program - # that starts it; that might only be a script to get things set up and the - # actual previewer could be (and sometimes IS) another process. - - if ( ($view_file ne '') && (-e $view_file) && !$new_viewer_always ) { - # Is a viewer already running? - # (We'll save starting up another viewer.) - $$Pviewer_process = &find_process_id( $view_file ); - if ( $$Pviewer_process ) { - warn "$My_name: Previewer is already running\n" - if !$silent; - $viewer_running = 1; - $$Pneed_to_get_viewer_process = 0; - } - } - - # Loop forever, rebuilding .dvi and .ps as necessary. - # Set $first_time to flag first run (to save unnecessary diagnostics) -CHANGE: - for (my $first_time = 1; 1; $first_time = 0 ) { - $updated = 0; - $failure = 0; - $failure_msg = ''; - if ( $MSWin_fudge_break && ($^O eq "MSWin32") ) { - # Fudge under MSWin32 ONLY, to stop perl/latexmk from - # catching ctrl/C and ctrl/break, and let it only reach - # downstream programs. See comments at first definition of - # $MSWin_fudge_break. - $SIG{BREAK} = $SIG{INT} = 'IGNORE'; - } - $failure = rdb_makeB( @targets ); - -## warn "=========Viewer PID = $$Pviewer_process; updated=$updated\n"; - - if ( $MSWin_fudge_break && ($^O eq "MSWin32") ) { - $SIG{BREAK} = $SIG{INT} = 'DEFAULT'; - } - if ( $failure > 0 ) { - if ( !$failure_msg ) { - $failure_msg = 'Failure to make the files correctly'; - } - # There will be files changed during the run that are irrelevant. - # We need to wait for the user to change the files. - # So set the GENERATED files as up-to-date - rdb_for_some( [keys %current_primaries], \&rdb_update_gen_files ); - - $failure_msg =~ s/\s*$//; #Remove trailing space - warn "$My_name: $failure_msg\n", - " ==> You will need to change a source file before I do another run <==\n"; - } - elsif ( ($view_file ne '') && (-e $view_file) && $updated && $viewer_running ) { - # A viewer is running. Explicitly get it to update screen if we have to do it: - rdb_one_rule( 'update_view', \&rdb_run1 ); - } - elsif ( ($view_file ne '') && (-e $view_file) && !$viewer_running ) { - # Start the viewer - if ( !$silent ) { - if ($new_viewer_always) { - warn "$My_name: starting previewer for '$view_file'\n", - "------------\n"; - } - else { - warn "$My_name: I have not found a previewer that ", - "is already running. \n", - " So I will start it for '$view_file'\n", - "------------\n"; - } - } - local $retcode = rdb_makeB ( 'view' ); - if ( $retcode != 0 ) { - if ($force_mode) { - warn "$My_name: I could not run previewer\n"; - } - else { - &exit_msg1( "I could not run previewer", $retcode); - } - } - else { - $viewer_running = 1; - $$Pneed_to_get_viewer_process = 1; - } # end analyze result of trying to run viewer - } # end start viewer - if ( $first_time || $updated || $failure ) { - print "\n=== Watching for updated files. Use ctrl/C to stop ...\n"; - } - $waiting = 1; if ($diagnostics) { warn "WAITING\n"; } - WAIT: while (1) { - sleep($sleep_time); - &rdb_clear_change_record; - rdb_recurseA( [@targets], \&rdb_flag_changes_here ); - if ( &rdb_count_changes > 0) { - &rdb_diagnose_changes - unless $silent; -#??? - warn "$My_name: File(s) changed or not used in previous run(s). Remake files.\n"; - last WAIT; - } - # Does this do this job???? - local $new_files = 0; - rdb_for_some( [keys %current_primaries], sub{ $new_files += &rdb_find_new_filesB } ); - if ($new_files > 0) { - warn "$My_name: New file(s) found.\n"; - last WAIT; - } - } # end WAIT: - $waiting = 0; if ($diagnostics) { warn "NOT WAITING\n"; } - } #end infinite_loop CHANGE: -} #END sub make_preview_continuousB - -#************************************************************ - -sub process_rc_file { - # Usage process_rc_file( filename ) - # Run rc_file whose name is given in first argument - # Exit with code 11 if file could not be read. - # (In general this is not QUITE the right error) - # Exit with code 13 if there is a syntax error or other problem. - # ???Should I leave the exiting to the caller (perhaps as an option)? - # But I can always catch it with an eval if necessary. - # That confuses ctrl/C and ctrl/break handling. - my $rc_file = $_[0]; - warn "$My_name: Executing PERL code in file '$rc_file'...\n" - if $diagnostics; - do( $rc_file ); - # The return value from the do is not useful, since it is the value of - # the last expression evaluated, which could be anything. - # The correct test of errors is on the values of $! and $@. - -# This is not entirely correct. On gluon2: -# rc_file does open of file, and $! has error, apparently innocuous -# See ~/proposal/06/latexmkrc-effect - - my $OK = 1; - if ( $! ) { - # Get both numeric error and its string, by forcing numeric and - # string contexts: - my $err_no = $!+0; - my $err_string = "$!"; - warn "$My_name: Initialization file '$rc_file' could not be read,\n", - " or it gave some other problem. Error code \$! = $err_no.\n", - " Error string = '$err_string'\n"; - $! = 256; - $OK = 0; - } - if ( $@ ) { - $! = 256; - # Indent the error message to make it easier to locate - my $indented = prefix( $@, " " ); - $@ = ""; - warn "$My_name: Initialization file '$rc_file' gave an error:\n", - "$indented"; - $OK = 0; - } - if ( ! $OK ) { - die "$My_name: Stopping because of problem with rc file\n"; - } -} #END process_rc_file - -#************************************************************ - -sub execute_code_string { - # Usage execute_code_string( string_of_code ) - # Run the PERL code contained in first argument - # Exit with code 13 if there is a syntax error or other problem. - # ???Should I leave the exiting to the caller (perhaps as an option)? - # But I can always catch it with an eval if necessary. - # That confuses ctrl/C and ctrl/break handling. - my $code = $_[0]; - warn "$My_name: Executing initialization code specified by -e:\n", - " '$code'...\n" - if $diagnostics; - eval $code; - # The return value from the eval is not useful, since it is the value of - # the last expression evaluated, which could be anything. - # The correct test of errors is on the values of $! and $@. - - if ( $@ ) { - $! = 256; - my $message = $@; - $@ = ""; - $message =~ s/\s*$//; - die "$My_name: ", - "Stopping because executing following code from command line\n", - " $code\n", - "gave an error:\n", - " $message\n"; - } -} #END execute_code_string - -#************************************************************ - -sub cleanup1 { - # Usage: cleanup1( exts_without_period, ... ) - foreach (@_) { unlink("$root_filename.$_"); } -} #END cleanup1 - -#************************************************************ -#************************************************************ -#************************************************************ - -# Error handling routines, warning routines, help - -#************************************************************ - -sub die_trace { - # Call: die_trace( message ); - &traceback; # argument(s) passed unchanged - die "\n"; -} #END die_trace - -#************************************************************ - -sub traceback { - # Call: &traceback - # or traceback( message, ) - my $msg = shift; - if ($msg) { warn "$msg\n"; } - warn "Traceback:\n"; - my $i=0; # Start with immediate caller - while ( my ($pack, $file, $line, $func) = caller($i++) ) { - if ($func eq 'die_trace') { next; } - warn " $func called from line $line\n"; - } -} #END traceback - -#************************************************************ - -sub exit_msg1 -{ - # exit_msg1( error_message, retcode [, action]) - # 1. display error message - # 2. if action set, then restore aux file - # 3. exit with retcode - warn "\n------------\n"; - warn "$My_name: $_[0].\n"; - warn "-- Use the -f option to force complete processing.\n"; - - my $retcode = $_[1]; - if ($retcode >= 256) { - # Retcode is the kind returned by system from an external command - # which is 256 * command's_retcode - $retcode /= 256; - } - exit $retcode; -} #END exit_msg1 - -#************************************************************ - -sub warn_running { - # Message about running program: - if ( $silent ) { - warn "$My_name: @_\n"; - } - else { - warn "------------\n@_\n------------\n"; - } -} #END warn_running - -#************************************************************ - -sub exit_help -# Exit giving diagnostic from arguments and how to get help. -{ - warn "\n$My_name: @_\n", - "Use\n", - " $my_name -help\nto get usage information\n"; - exit 10; -} #END exit_help - - -#************************************************************ - -sub print_help -{ - print - "$My_name $version_num: Automatic LaTeX document generation routine\n\n", - "Usage: $my_name [latexmk_options] [filename ...]\n\n", - " Latexmk_options:\n", - " -bm - Print message across the page when converting to postscript\n", - " -bi - Set contrast or intensity of banner\n", - " -bs - Set scale for banner\n", - " -commands - list commands used by $my_name for processing files\n", - " -c - clean up (remove) all nonessential files, except\n", - " dvi, ps and pdf files.\n", - " This and the other clean-ups are instead of a regular make.\n", - " -C - clean up (remove) all nonessential files\n", - " including aux, dep, dvi, postscript and pdf files\n", - " But exclude file of database of file information\n", - " -CA - clean up (remove) absolutely ALL nonessential files\n", - " including aux, dep, dvi, postscript and pdf files,\n", - " and file of database of file information\n", - " -CF - Remove file of database of file information before doing \n", - " other actions\n", - " -cd - Change to directory of source file when processing it\n", - " -cd- - Do NOT change to directory of source file when processing it\n", - " -dependents - Show list of dependent files after processing\n", - " -dependents- - Do not show list of dependent files after processing\n", - " -dF - Filter to apply to dvi file\n", - " -dvi - generate dvi\n", - " -dvi- - turn off required dvi\n", - " -e - Execute specified PERL code\n", - " -f - force continued processing past errors\n", - " -f- - turn off forced continuing processing past errors\n", - " -F - Ignore non-existent files when testing for dependencies\n", - " -F- - Turn off -F\n", - " -gg - Super go mode: clean out generated files (-CA), and then\n", - " process files regardless of file timestamps\n", - " -g - process regardless of file timestamps\n", - " -g- - Turn off -g\n", - " -h - print help\n", - " -help - print help\n", - " -jobname=STRING - set basename of output file(s) to STRING.\n", - " (Like --jobname=STRING on command line for many current\n", - " implementations of latex/pdflatex.)\n", - " -l - force landscape mode\n", - " -l- - turn off -l\n", - " -new-viewer - in -pvc mode, always start a new viewer\n", - " -new-viewer- - in -pvc mode, start a new viewer only if needed\n", - " -nodependents - Do not show list of dependent files after processing\n", - " -pdf - generate pdf by pdflatex\n", - " -pdfdvi - generate pdf by dvipdf\n", - " -pdfps - generate pdf by ps2pdf\n", - " -pdf- - turn off pdf\n", - " -ps - generate postscript\n", - " -ps- - turn off postscript\n", - " -pF - Filter to apply to postscript file\n", - " -p - print document after generating postscript.\n", - " (Can also .dvi or .pdf files -- see documentation)\n", - " -print=dvi - when file is to be printed, print the dvi file\n", - " -print=ps - when file is to be printed, print the ps file (default)\n", - " -print=pdf - when file is to be printed, print the pdf file\n", - " -pv - preview document. (Side effect turn off continuous preview)\n", - " -pv- - turn off preview mode\n", - " -pvc - preview document and continuously update. (This also turns\n", - " on force mode, so errors do not cause $my_name to stop.)\n", - " (Side effect: turn off ordinary preview mode.)\n", - " -pvc- - turn off -pvc\n", - " -r - Read custom RC file\n", - " -silent - silence progress messages from called programs\n", - " -v - display program version\n", - " -verbose - display usual progress messages from called programs\n", - " -version - display program version\n", - " -view=default - viewer is default (dvi, ps, pdf)\n", - " -view=dvi - viewer is for dvi\n", - " -view=none - no viewer is used\n", - " -view=ps - viewer is for ps\n", - " -view=pdf - viewer is for pdf\n", - " filename = the root filename of LaTeX document\n", - "\n", - "-p, -pv and -pvc are mutually exclusive\n", - "-h, -c and -C overides all other options.\n", - "-pv and -pvc require one and only one filename specified\n", - "All options can be introduced by '-' or '--'. (E.g., --help or -help.)\n", - "Contents of RC file specified by -r overrides options specified\n", - " before the -r option on the command line\n"; - -} #END print_help - -#************************************************************ -sub print_commands -{ - warn "Commands used by $my_name:\n", - " To run latex, I use \"$latex\"\n", - " To run pdflatex, I use \"$pdflatex\"\n", - " To run bibtex, I use \"$bibtex\"\n", - " To run makeindex, I use \"$makeindex\"\n", - " To make a ps file from a dvi file, I use \"$dvips\"\n", - " To make a ps file from a dvi file with landscape format, ", - "I use \"$dvips_landscape\"\n", - " To make a pdf file from a dvi file, I use \"$dvipdf\"\n", - " To make a pdf file from a ps file, I use \"$ps2pdf\"\n", - " To view a pdf file, I use \"$pdf_previewer\"\n", - " To view a ps file, I use \"$ps_previewer\"\n", - " To view a ps file in landscape format, ", - "I use \"$ps_previewer_landscape\"\n", - " To view a dvi file, I use \"$dvi_previewer\"\n", - " To view a dvi file in landscape format, ", - "I use \"$dvi_previewer_landscape\"\n", - " To print a ps file, I use \"$lpr\"\n", - " To print a dvi file, I use \"$lpr_dvi\"\n", - " To print a pdf file, I use \"$lpr_pdf\"\n", - " To find running processes, I use \"$pscmd\", \n", - " and the process number is at position $pid_position\n"; - warn "Notes:\n", - " Command starting with \"start\" is run detached\n", - " Command that is just \"start\" without any other command, is\n", - " used under MS-Windows to run the command the operating system\n", - " has associated with the relevant file.\n", - " Command starting with \"NONE\" is not used at all\n"; -} #END print_commands - -#************************************************************ - -sub view_file_via_temporary { - return $always_view_file_via_temporary - || ($pvc_view_file_via_temporary && $preview_continuous_mode); -} #END view_file_via_temporary - -#************************************************************ -#### Tex-related utilities - - -sub check_bibtex_log { - # Check for bibtex warnings: - # Usage: check_bibtex_log( base_of_bibtex_run ) - # return 0: OK, 1: bibtex warnings, 2: bibtex errors, - # 3: could not open .blg file. - - my $base = $_[0]; - my $log_name = "$base.blg"; - my $log_file = new FileHandle; - open( $log_file, "<$log_name" ) - or return 3; - my $have_warning = 0; - my $have_error = 0; - while (<$log_file>) { - if (/Warning--/) { - #print "Bibtex warning: $_"; - $have_warning = 1; - } - if (/error message/) { - #print "Bibtex error: $_"; - $have_error = 1; - } - } - close $log_file; - if ($have_error) {return 2;} - if ($have_warning) {return 1;} - return 0; -} #END check_bibtex_log - -#************************************************** - -sub clean_file_name{ - # Convert filename found in log file to true filename. - # Used normally only by parse_logB, below - # 1. For names of form - # `"string".ext', which arises e.g., from \jobname.bbl: - # when the base filename contains spaces, \jobname has quotes. - # and from \includegraphics with basename specified. - # 2. Or "string.ext" from \includegraphcs with basename and ext specified. - my $filename = $_[0]; - $filename =~ s/^\"([^\"]*)\"(.*)$/$1$2/; - return $filename; -} -# ------------------------------ - -sub parse_logB { -# Scan log file for: dependent files -# reference_changed, bad_reference, bad_citation -# Return value: 1 if success, 0 if no log file. -# Set global variables: -# %dependents: maps definite dependents to code: -# 0 = from missing-file line -# May have no extension -# May be missing path -# 1 = from 'File: ... Graphic file (type ...)' line -# no path. Should exist, but may need a search, by kpsewhich. -# 2 = from regular '(...' coding for input file, -# Has NO path, which it would do if LaTeX file -# Highly likely to be mis-parsed line -# 3 = ditto, but has a path character ('/'). -# Should be LaTeX file that exists. -# If it doesn't exist, we have probably a mis-parsed line. -# There's no need to do a search. -# 4 = definitive, which in this subroutine is only -# done for default dependents -# Treat the following specially, since they have special rules -# @bbl_files to list of .bbl files. -# %idx_files to map from .idx files to .ind files. -# Also set -# $reference_changed, $bad_reference, $bad_citation -# Trivial or default values if log file does not exist/cannot be opened - -# Give a quick way of looking up custom-dependency extensions - my %cusdep_from = (); - my %cusdep_to = (); - foreach ( @cus_dep_list ) { - my ($fromext, $toext) = split; - $cusdep_from{$fromext} = $cusdep_from{".$fromext"} = $_; - $cusdep_to{$toext} = $cusdep_to{".$toext"} = $_; - } -# print "==== Cusdep from-exts:"; foreach (keys %cusdep_from) {print " '$_'";} print "\n"; -# print "==== Cusdep to-exts:"; foreach (keys %cusdep_to) {print " '$_'";} print "\n"; - - # Returned info: - %dependents = (); - foreach (@default_includes) { $dependents{$_} = 4; } - @bbl_files = (); - %idx_files = (); # Maps idx_file to (ind_file, base) - - $reference_changed = 0; - $bad_reference = 0; - $bad_citation = 0; - - my $log_name = "$root_filename.log"; - my $log_file = new FileHandle; - if ( ! open( $log_file, "<$log_name" ) ) { - return 0; - } - -LINE: - while(<$log_file>) { - # Could use chomp here, but that fails if there is a mismatch - # between the end-of-line sequence used by latex and that - # used by perl. (Notably a problem with MSWin latex and - # cygwin perl!) - s/[\n\r]*$//; - if ( $. == 1 ){ - if ( /^This is / ) { - # First line OK - next LINE; - } else { - warn "$My_name: Error on first line of '$log_name'. ". - "This is apparently not a TeX log file.\n"; - close $log_file; - $failure = 1; - $failure_msg = "Log file '$log_name' appears to have wrong format."; - return 0; - } - } - # Handle wrapped lines: - # They are lines brutally broken at exactly $log_wrap chars - # excluding line-end. - my $len = length($_); - while ($len == $log_wrap) { - my $extra = <$log_file>; - $extra =~ s/[\n\r]*$//; - $len = length($extra); - $_ .= $extra; - } - # Check for changed references, bad references and bad citations: - if (/Rerun to get/) { - warn "$My_name: References changed.\n"; - $reference_changed = 1; - } - if (/LaTeX Warning: (Reference[^\001]*undefined)./) { - warn "$My_name: $1 \n"; - $bad_reference = 1; - } - if (/LaTeX Warning: (Citation[^\001]*undefined)./) { - warn "$My_name: $1 \n"; - $bad_citation = 1; - } - if ( /^Document Class: / ) { - # Class sign-on line - next LINE; - } - if ( /^\(Font\)/ ) { - # Font info line - next LINE; - } - if ( /^Output written on / ) { - # Latex message - next LINE; - } - if ( /^Overfull / - || /^Underfull / - || /^or enter new name\. \(Default extension: .*\)/ - || /^\*\*\* \(cannot \\read from terminal in nonstop modes\)/ - ) { - # Latex error/warning, etc. - next LINE; - } -# Test for writing of index file. The precise format of the message -# depends on which package (makeidx.sty , multind.sty or index.sty) and -# which version writes the message. - if ( /Writing index file (.*)$/ ) { - my $idx_file = ''; - if ( /^Writing index file (.*)$/ ) { - # From makeidx.sty or multind.sty - $idx_file = $1; - } - elsif ( /^index\.sty> Writing index file (.*)$/ ) { - # From old versions of index.sty - $idx_file = $1; - } - elsif ( /^Package \S* Info: Writing index file (.*) on input line/ ) { - # From new versions of index.sty - $idx_file = $1; - } - else { - warn "$My_name: Message indicates index file was written\n", - " ==> but I do not know how to understand it: <==\n", - " '$_'\n"; - next LINE; - } - # Typically, there is trailing space, not part of filename: - $idx_file =~ s/\s*$//; - $idx_file = clean_file_name($idx_file); - my ($idx_base, $idx_path, $idx_ext) = fileparseA( $idx_file ); - $idx_base = $idx_path.$idx_base; - $idx_file = $idx_base.$idx_ext; - if ( $idx_ext eq '.idx' ) { - warn "$My_name: Index file '$idx_file' was written\n" - unless $silent; - $idx_files{$idx_file} = [ "$idx_base.ind", $idx_base ]; - } - elsif ( exists $cusdep_from{$idx_ext} ) { - if ( !$silent ) { - warn "$My_name: Index file '$idx_file' was written\n"; - warn " Cusdep '$cusdep_from{$idx_ext}' should be used\n"; - } - # No action needed here - } - else { - warn "$My_name: Index file '$idx_file' written\n", - " ==> but it has an extension I do not know how to handle <==\n"; - } - - next LINE; - } - if ( /^No file (.*?\.bbl)./ ) { - # Notice that the - my $bbl_file = clean_file_name($1); - warn "$My_name: Non-existent bbl file '$bbl_file'\n $_\n"; - $dependents{$bbl_file} = 0; - push @bbl_files, $bbl_file; - next LINE; - } - foreach my $pattern (@file_not_found) { - if ( /$pattern/ ) { - my $file = clean_file_name($1); - warn "$My_name: Missing input file: '$file' from line\n '$_'\n" - unless $silent; - $dependents{$file} = 0; - next LINE; - } - } - if ( /^File: ([^\s\[]*) Graphic file \(type / ) { - # First line of message from includegraphics/x - $dependents{$1} = 1; - next LINE; - } - # Now test for generic lines to ignore, only after special cases! - if ( /^File: / ) { - # Package sign-on line. Includegraphics/x also produces a line - # with this signature, but I've already handled it. - next LINE; - } - if ( /^Package: / ) { - # Package sign-on line - next LINE; - } - if (/^\! LaTeX Error: / ) { - next LINE; - } - if (/^No pages of output\./) { - warn "$My_name: Log file says no output from latex\n" - unless $silent; - next LINE; - } - INCLUDE_CANDIDATE: - while ( /\((.*$)/ ) { - # Filename found by - # '(', then filename, then terminator. - # Terminators: obvious candidates: ')': end of reading file - # '(': beginning of next file - # ' ': space is an obvious separator - # ' [': start of page: latex - # and pdflatex put a - # space before the '[' - # '[': start of config file - # in pdflatex, after - # basefilename. - # '{': some kind of grouping - # Problem: - # All or almost all special characters are allowed in - # filenames under some OS, notably UNIX. Luckily most cases - # are rare, if only because the special characters need - # escaping. BUT 2 important cases are characters that are - # natural punctuation - # Under MSWin, spaces are common (e.g., "C:\Program Files") - # Under VAX/VMS, '[' delimits directory names. This is - # tricky to handle. But I think few users use this OS - # anymore. - # - # Solution: use ' [', but not '[' as first try at delimiter. - # Then if candidate filename is of form 'name1[name2]', then - # try splitting it. If 'name1' and/or 'name2' exists, put - # it/them in list, else just put 'name1[name2]' in list. - # So form of filename is now: - # '(', - # then any number of characters that are NOT ')', '(', or '{' - # (these form the filename); - # then ' [', or ' (', or ')', or end-of-string. - # That fails for pdflatex - # In log file: - # '(' => start of reading of file, followed by filename - # ')' => end of reading of file - # '[' => start of page (normally preceeded by space) - # Remember: - # filename (on VAX/VMS) may include '[' and ']' (directory - # separators) - # filenames (on MS-Win) commonly include space. - - # First step: replace $_ by whole of line after the '(' - # Thus $_ is putative filename followed by other stuff. - $_ = $1; - if ( /^([^\(^\)^\{]*?)\s\[/ ) { - # Terminator: space then '[' - # Use *? in condition: to pick up first ' [' as terminator - # 'file [' should give good filename. - } - elsif ( /^([^\(^\)^\{]*)\s(?=\()/ ) { - # Terminator is ' (', but '(' isn't in matched string, - # so we keep the '(' ready for the next match - } - elsif ( /^([^\(^\)^\{]*)(\))/ ) { - # Terminator is ')' - } - elsif ( /^([^\(^\)^\{]*?)\s*\{/ ) { - # Terminator: arbitrary space then '{' - # Use *? in condition: to pick up first ' [' as terminator - # 'file [' should give good filename. - } - else { - #Terminator is end-of-string - } - $_ = $'; # Put $_ equal to the unmatched tail of string ' - my $include_candidate = $1; - $include_candidate =~ s/\s*$//; # Remove trailing space. - if ( $include_candidate eq "[]" ) { - # Part of overfull hbox message - next INCLUDE_CANDIDATE; - } - if ( $include_candidate =~ /^\\/ ) { - # Part of font message - next INCLUDE_CANDIDATE; - } - # Make list of new include files; sometimes more than one. - my @new_includes = ($include_candidate); - if ( $include_candidate =~ /^(.+)\[([^\]]+)\]$/ ) { - # Construct of form 'file1[file2]', as produced by pdflatex - if ( -e $1 ) { - # If the first component exists, we probably have the - # pdflatex form - @new_includes = ($1, $2); - } - else { - # We have something else. - # So leave the original candidate in the list - } - } - INCLUDE_NAME: - foreach my $include_name (@new_includes) { - my ($base, $path, $ext) = fileparseB( $include_name ); - if ( ($path eq './') || ($path eq '.\\') ) { - $include_name = $base.$ext; - } - if ( $include_name !~ m'[/|\\]' ) { - # Filename does not include a path character - # High potential for misparsed line - $dependents{$include_name} = 2; - } else { - $dependents{$include_name} = 3; - } - if ( $ext eq '.bbl' ) { - warn "$My_name: Found input bbl file '$include_name'\n" - unless $silent; - push @bbl_files, $include_name; - } - } # INCLUDE_NAME - } # INCLUDE_CANDIDATE - } # LINE - close($log_file); - - # Default includes are always definitive: - foreach (@default_includes) { $dependents{$_} = 4; } - - ###print "New parse: \n"; - ###foreach (sort keys %dependents) { print " '$_': $dependents{$_}\n"; } - - my @misparsed = (); - my @missing = (); - my @not_found = (); -CANDIDATE: - foreach my $candidate (keys %dependents) { - my $code = $dependents{$candidate}; - if ( -e $candidate ) { - $dependents{$candidate} = 4; - } - elsif ($code == 1) { - # Graphics file that is supposed to have been read. - # Candidate name is as given in source file, not as path - # to actual file. - # We have already tested that file doesn't exist, as given. - # so use kpsewhich. - # If the file still is not found, assume non-existent; - my @kpse_result = kpsewhich( $candidate ); - if ($#kpse_result > -1) { - $dependents{$kpse_result[0]} = 4; - delete $dependents{$candidate}; - next CANDIDATE; - } - else { - push @not_found, $candidate; - } - } - elsif ($code == 2) { - # Candidate is from '(...' construct in log file, for input file - # which should include pathname if valid input file. - # Name does not have pathname-characteristic character (hence - # $code==2. - # Candidate file does not exist with given name - # Almost surely result of a misparsed line in log file. - delete $dependents{$candidate}; - push @misparse, $candidate; - } - elsif ($code == 0) { - my ($base, $path, $ext) = fileparseA($candidate); - $ext =~ s/^\.//; - if ( ($ext eq '') && (-e "$path$base.tex") ) { - $dependents{"$path$base.tex"} = 4; - delete $dependents{$candidate}; - } - push @missing, $candidate; - } - } - - - if ( $diagnostics ) { - @misparse = uniqs( @misparse ); - @missing = uniqs( @missing ); - @not_found = uniqs( @not_found ); - my @dependents = sort( keys %dependents ); - - my $dependents = $#dependents + 1; - my $misparse = $#misparse + 1; - my $missing = $#missing + 1; - my $not_found = $#not_found + 1; - my $exist = $dependents - $not_found - $missing; - my $bbl = $#bbl_files + 1; - - print "$dependents dependent files detected, of which ", - "$exist exist, $not_found were not found,\n", - " and $missing appear not to exist.\n"; - print "Dependents:\n"; - foreach (@dependents) { print " $_\n"; } - if ($not_found > 0) { - print "Not found:\n"; - foreach (@not_found) { print " $_\n"; } - } - if ($missing > 0) { - print "Not existent:\n"; - foreach (@missing) { print " $_\n"; } - } - if ( $bbl > 0 ) { - print "Input bbl files:\n"; - foreach (@bbl_files) { print " $_\n"; } - } - - if ( $misparse > 0 ) { - print "$misparse\n"; - print "Apparent input files appearently from misunderstood lines in .log file:\n"; - foreach ( @misparse ) { print " $_\n"; } - } - } - - return 1; -} #END parse_logB - -#************************************************************ - -sub parse_aux { - #Usage: parse_aux( $aux_file, \@new_bib_files, \@new_aux_files ) - # Parse aux_file (recursively) for bib files. - # If can't open aux file, then - # Return 0 and leave @new_bib_files empty - # Else set @new_bib_files from information in the aux files - # And: - # Return 1 if no problems - # Return 2 with @new_bib_files empty if there are no \bibdata - # lines. - # Return 3 if I couldn't locate all the bib_files - # Set @new_aux_files to aux files parsed - - my $aux_file = $_[0]; - local $Pbib_files = $_[1]; - local $Paux_files = $_[2]; - - @$Pbib_files = (); - @$Paux_files = (); - - parse_aux1( $aux_file ); - if ($#{$Paux_files} < 0) { - return 0; - } - @$Pbib_files = uniqs( @$Pbib_files ); - - if ( $#{$Pbib_files} == -1 ) { - warn "$My_name: No .bib files listed in .aux file '$aux_file' \n", - return 2; - } - my $bibret = &find_file_list1( $Pbib_files, $Pbib_files, - '.bib', \@BIBINPUTS ); - @$Pbib_files = uniqs( @$Pbib_files ); - if ($bibret == 0) { - warn "$My_name: Found bibliography file(s) [@$Pbib_files]\n" - unless $silent; - } - else { - warn "$My_name: Failed to find one or more bibliography files ", - "in [@$Pbib_files]\n"; - if ($force_mode) { - warn "==== Force_mode is on, so I will continue. ", - "But there may be problems ===\n"; - } - else { - #$failure = -1; - #$failure_msg = 'Failed to find one or more bib files'; - #warn "$My_name: Failed to find one or more bib files\n"; - } - return 3; - } - return 1; -} #END parse_aux - -#************************************************************ - -sub parse_aux1 -# Parse single aux file for bib files. -# Usage: &parse_aux1( aux_file_name ) -# Append newly found bib_filenames in @$Pbib_files, already -# initialized/in use. -# Append aux_file_name to @$Paux_files if aux file opened -# Recursively check \@input aux files -# Return 1 if success in opening $aux_file_name and parsing it -# Return 0 if fail to open it -{ - my $aux_file = $_[0]; - my $aux_fh = new FileHandle; - if (! open($aux_fh, $aux_file) ) { - warn "$My_name: Couldn't find aux file '$aux_file'\n"; - return 0; - } - push @$Paux_files, $aux_file; -AUX_LINE: - while (<$aux_fh>) { - if ( /^\\bibdata\{(.*)\}/ ) { - # \\bibdata{comma_separated_list_of_bib_file_names} - # (Without the '.bib' extension) - push( @$Pbib_files, split /,/, $1 ); - } - elsif ( /^\\\@input\{(.*)\}/ ) { - # \\@input{next_aux_file_name} - &parse_aux1( $1 ); - } - } - close($aux_fh); - return 1; -} #END parse_aux1 - -#************************************************************ - -#************************************************************ -#************************************************************ -#************************************************************ - -# Manipulations of main file database: - -#************************************************************ - -sub fdb_get { - # Call: fdb_get(filename) - # Returns an array (time, size, md5) for the current state of the - # named file. - # For non-existent file, deletes entry in fdb_current, and returns (0,-1,0) - my $file = shift; - my ($new_time, $new_size) = get_time_size($file); - my @nofile = (0,-1,0); # What we use for initializing - # a new entry in fdb or flagging - # non-existent file - if ( $new_size < 0 ) { - delete $fdb_current{$file}; - return @nofile; - } - my $recalculate_md5 = 0; - if ( ! exists $fdb_current{$file} ) { - # Ensure we have a record. - $fdb_current{$file} = [@nofile]; - $recalculate_md5 = 1; - } - my $file_data = $fdb_current{$file}; - my ( $time, $size, $md5 ) = @$file_data; - if ( ($new_time != $time) || ($new_size != $size) ) { - # Only force recalculation of md5 if time or size changed - # Else we assume file is really unchanged. - $recalculate_md5 = 1; - } - if ($recalculate_md5) { - @$file_data = ( $new_time, $new_size, get_checksum_md5( $file ) ); - } - return @$file_data;; -} #END fdb_get - -#************************************************************ - -sub fdb_show { - # Displays contents of fdb - foreach my $file ( sort keys %fdb_current ) { - print "'$file': @{$fdb_current{$file}}\n"; - } -} #END fdb_show - -#************************************************************ -#************************************************************ -#************************************************************ - -# Routines for manipulating rule database - -#************************************************************ - -sub rdb_read { - # Call: rdb_read( $in_name ) - # Sets rule database from saved file, in format written by rdb_write. - # Returns -1 if file could not be read else number of errors. - # Thus return value on success is 0 - my $in_name = $_[0]; - - my $in_handle = new FileHandle; - $in_handle->open( $in_name, '<' ) - or return (); - my $errors = 0; - my $state = 0; # Outside a section - my $rule = ''; - my $run_time = 0; - my $source = ''; - my $dest = ''; - my $base = ''; - local %new_sources = (); # Hash: rule => { file=>[ time, size, md5, fromrule ] } - my $new_source = undef; # Reference to hash of sources for current rule -LINE: - while ( <$in_handle> ) { - # Remove leading and trailing white space. - s/^\s*//; - s/\s*$//; - # Ignore blank lines and comments - if ( /^$/ || /^#/ || /^%/ ) { next LINE;} - if ( /^\[\"([^\"]+)\"\]/ ) { - # Start of section - $rule = $1; -#?? print "--- Starting rule '$rule'\n"; - my $tail = $'; #' Single quote in comment tricks the parser in - # emacs from misparsing an isolated single quote - $run_time = 0; - $source = $dest = $base = ''; - if ( $tail =~ /^\s*(\S+)\s*$/ ) { - $run_time = $1; - } - elsif ( $tail =~ /^\s*(\S+)\s+\"([^\"]*)\"\s+\"([^\"]*)\"\s+\"([^\"]*)\"\s*$/ ) { - $run_time = $1; - $source = $2; - $dest = $3; - $base = $4; - } - if ( rdb_rule_exists( $rule ) ) { - rdb_one_rule( $rule, - sub{ $$Ptest_kind = 1; - $$Prun_time = $run_time; - #??if ($source) { $$Psource = $source; } - #??if ($dest) { $$Pdest = $dest; } - #??if ($base) { $$Pbase = $base; } - } - ); - } - elsif ($rule =~ /^cusdep\s+(\S+)\s+(\S+)\s+(.+)$/ ) { - # Create custom dependency - my $fromext = $1; - my $toext = $2; - my $base = $3; - $source = "$base.$fromext"; - $dest = "$base.$toext"; - my $PAnew_cmd = ['do_cusdep', '']; - foreach my $dep ( @cus_dep_list ) { - my ($tryfromext,$trytoext,$must,$func_name) = split(' ',$dep); - if ( ($tryfromext eq $fromext) && ($trytoext eq $toext) ) { - $$PAnew_cmd[1] = $func_name; - } - } - rdb_create_rule( $rule, 'cusdep', '', $PAnew_cmd, 1, - $source, $dest, $base, 0, $run_time ); - } - elsif ( $rule =~ /^(makeindex|bibtex)\s*(.*)$/ ) { - my $rule_generic = $1; - if ( ! $source ) { - # If fdb_file was old-style (v. 1) - $source = $2; - my $path = ''; - my $ext = ''; - ($base, $path, $ext) = fileparseA( $source ); - $base = $path.$base; - if ($rule_generic eq 'makeindex') { - $dest = "$base.ind"; - } - elsif ($rule_generic eq 'bibtex') { - $dest = "$base.bbl"; - $source = "$base.aux"; - } - } - warn "$My_name: File-database '$in_name': setting rule '$rule'\n" - if $diagnostics; - my $cmd_type = 'external'; - my $ext_cmd = ${$rule_generic}; - warn " Rule kind = '$rule_generic'; ext_cmd = '$ext_cmd';\n", - " source = '$source'; dest = '$dest'; base = '$base';\n" - if $diagnostics; - rdb_create_rule( $rule, $cmd_type, $ext_cmd, '', 1, - $source, $dest, $base, 0, $run_time); - } - else { - warn "$My_name: In file-database '$in_name' rule '$rule'\n", - " is not in use in this session\n" - if $diagnostics; - $new_source = undef; - $state = 3; - next LINE; - } - $new_source = $new_sources{$rule} = {}; - $state = 1; #Reading a section - } - elsif ( /^\"([^\"]*)\"\s+(\S+)\s+(\S+)\s+(\S+)\s+\"([^\"]*)\"/ ) { - # Source file line - if ($state == 3) { - # The rule is not being currently used. - next LINE; - } - my $file = $1; - my $time = $2; - my $size = $3; - my $md5 = $4; - my $from_rule = $5; -#?? print " --- File '$file'\n"; - if ($state != 1) { - warn "$My_name: In file-database '$in_name' ", - "line $. is outside a section:\n '$_'\n"; - $errors++; - next LINE; - } - rdb_ensure_file( $rule, $file ); - rdb_set_file1( $rule, $file, $time, $size, $md5 ); - # Save the rest of the data, especially the from_fule until we know all - # the rules, otherwise the from_rule may not exist. - # Also we'll have a better chance of looping through files. - ${$new_source}{$file} = [ $time, $size, $md5, $from_rule ]; - } - elsif ($state == 0) { - # Outside a section. Nothing to do. - } - else { - warn "$My_name: In file-database '$in_name' ", - "line $. is of wrong format:\n '$_'\n"; - $errors++; - next LINE; - } - } - undef $in_handle; - # Set cus dependencies. - &rdb_set_dependentsA( keys %rule_db ); - -#?? Check from_rules exist. - - return $errors; -} # END rdb_read - -#************************************************************ - -sub rdb_read_generatedB { - # Call: rdb_read_generatedB( $in_name, \@extra_generated, \@aux_files ) - # From rule database in saved file, in format written by rdb_write, - # finds the non-basic generated files that are to be deleted by a cleanup. - # Returns an array of these files, or an empty array if the file - # does not exist or cannot be opened. - my ($in_name, $Pgenerated, $Paux_files) = @_; - @$Pgenerated = (); - @$Paux_files = (); - - my $in_handle = new FileHandle; - $in_handle->open( $in_name, '<' ) - or return (); - my $rule = ''; - my $run_time = 0; - my $source = ''; - my $dest = ''; - my $base = ''; - my $ext = ''; - my $path = ''; - my $state = 0; # Outside a section -LINE: - while ( <$in_handle> ) { - # Remove leading and trailing white space. - s/^\s*//; - s/\s*$//; - # Ignore blank lines and comments - if ( /^$/ || /^#/ || /^%/ ) { next LINE;} - if ( /^\[\"([^\"]+)\"\]/ ) { - # Start of section - $rule = $1; - my $tail = $'; #' Single quote in comment tricks the parser in - # emacs from misparsing an isolated single quote - $run_time = 0; - $source = $dest = $base = ''; - if ( $tail =~ /^\s*(\S+)\s+\"([^\"]*)\"\s+\"([^\"]*)\"\s+\"([^\"]*)\"\s*$/ ) { - $source = $2; - $dest = $3; - $base = $4; - } - else { next LINE; } - if ( $rule =~ /^makeindex/ ) { - push @$Pgenerated, $source, $dest, "$base.ilg"; - } - elsif ( $rule =~ /^bibtex/ ) { - push @$Pgenerated, $dest, "$base.blg"; - push @$Paux_files, $source; - } - $state = 1; #Reading a section - } - elsif ( /^\"([^\"]*)\"\s+(\S+)\s+(\S+)\s+(\S+)\s+\"([^\"]*)\"/ ) { - # Source file line - if ($state == 3) { - # The rule is not being currently used. - next LINE; - } - my $file = $1; - ($base, $path, $ext) = fileparseA( $file ); - if ( $ext eq '.aux' ) { push @$Paux_files, $file; } - } - elsif ($state == 0) { - # Outside a section. Nothing to do. - } - else { - warn "$My_name: In file-database '$in_name' ", - "line $. is of wrong format:\n '$_'\n"; - next LINE; - } - } # LINE - undef $in_handle; - -} # END rdb_read_generatedB - -#************************************************************ - -sub rdb_write { - # Call: rdb_write( $out_name ) - # Writes to the given file name the database of file and rule data - # accessible from the primary rules. - # Returns 1 on success, 0 if file couldn't be opened. - local $out_name = $_[0]; - local $out_handle = new FileHandle; - if ( ($out_name eq "") || ($out_name eq "-") ) { - # Open STDOUT - $out_handle->open( '>-' ); - } - else { - $out_handle->open( $out_name, '>' ); - } - if (!$out_handle) { return 0; } - - local %current_primaries = (); # Hash whose keys are primary rules - # needed, i.e., known latex-like rules which trigger - # circular dependencies - local @pre_primary = (); # Array of rules - local @post_primary = (); # Array of rules - local @one_time = (); # Array of rules - &rdb_classify_rules( \%possible_primaries, keys %requested_filerules ); - - print $out_handle "# Fdb version $fdb_ver\n"; - my @rules = sort( - rdb_accessible( - uniq1( keys %known_rules, keys %current_primaries ))); - rdb_for_some( - \@rules, - sub { print $out_handle "[\"$rule\"] $$Prun_time \"$$Psource\" \"$$Pdest\" \"$$Pbase\" \n"; }, - sub { print $out_handle " \"$file\" $$Ptime $$Psize $$Pmd5 \"$$Pfrom_rule\"\n"; }, - ); - undef $out_handle; - return 1; -} #END rdb_write - -#************************************************************ - -sub rdb_set_from_logB { - # Assume rule context. - # This is intended to be applied only for a primary (LaTeX-like) rule - # Starting from the log_file, set current details for the current rule. - - # Rules should only be primary - if ( $$Pcmd_type ne 'primary' ) { - warn "\n$My_name: ==========$My_name: Probable BUG======= \n ", - " rdb_set_from_logB called to set files ", - "for non-primary rule '$rule'\n\n"; - return; - } - - -#?? # We'll prune this by all files determined to be needed for source files. -#?? my %unneeded_source = %$PHsource; - - # Parse log file to find relevant filenames - # Result in the following variables: - local %dependents = (); # Maps files to status - local @bbl_files = (); - local %idx_files = (); # Maps idx_file to (ind_file, base) - - # The following are also returned, but are global, to be used by caller - # $reference_changed, $bad_reference $bad_citation - - &parse_logB; - - IDX_FILE: - foreach my $idx_file ( keys %idx_files ) { - my ($ind_file, $ind_base) = @{$idx_files{$idx_file}}; - my $from_rule = "makeindex $idx_file"; - if ( ! rdb_rule_exists( $from_rule ) ){ - print "!!!===Creating rule '$from_rule': '$ind_file' from '$idx_file'\n" - if ($diagnostics); - rdb_create_rule( $from_rule, 'external', $makeindex, '', 1, - $idx_file, $ind_file, $ind_base, 1, 0); - foreach my $primary ( keys %primaries ) { - print " ===Source file '$ind_file' for '$primary'\n" - if ($diagnostics > -1); - rdb_ensure_file( $primary, $ind_file, $from_rule ); - } - } - if ( ! -e $ind_file ) { - # Failure was non-existence of makable file - # Leave failure issue to other rules. - $failure = 0; - } - } - - BBL_FILE: - foreach my $bbl_file ( uniqs( @bbl_files ) ) { - my ($bbl_base, $bbl_path, $bbl_ext) = fileparseA( $bbl_file ); - $bbl_base = $bbl_path.$bbl_base; - my @new_bib_files; - my @new_aux_files; - &parse_aux( "$bbl_base.aux", \@new_bib_files, \@new_aux_files ); - my $from_rule = "bibtex $bbl_base"; - if ( ! rdb_rule_exists( $from_rule ) ){ - print "!!!===Creating rule '$from_rule'\n" - if ($diagnostics); - rdb_create_rule( $from_rule, 'external', $bibtex, '', 1, - "$bbl_base.aux", $bbl_file, $bbl_base, 1, 0); - foreach my $source ( @new_bib_files, @new_aux_files ) { - print " ===Source file '$source'\n" - if ($diagnostics); - rdb_ensure_file( $from_rule, $source ); - } - foreach my $primary ( keys %primaries ) { - print " ===Source file '$bbl_file' for '$primary'\n" - if ($diagnostics); - rdb_ensure_file( $primary, $bbl_file, $from_rule ); - if ( ! -e $bbl_file ) { - # Failure was non-existence of makable file - # Leave failure issue to other rules. - $failure = 0; - } - } - } - } - -NEW_SOURCE: - foreach my $new_source (keys %dependents) { - foreach my $primary ( keys %primaries ) { - rdb_ensure_file( $primary, $new_source ); - } - } - - my @more_sources = &rdb_set_dependentsA( $rule ); - my $num_new = $#more_sources + 1; - foreach (@more_sources) { - $dependents{$_} = 4; - if ( ! -e $_ ) { - # Failure was non-existence of makable file - # Leave failure issue to other rules. - $failure = 0; - $$Pchanged = 1; # New files can be made. Ignore error. - } - } - if ($diagnostics) { - if ($num_new > 0 ) { - print "$num_new new source files for rule '$rule':\n"; - foreach (@more_sources) { print " '$_'\n"; } - } - else { - print "No new source files for rule '$rule':\n"; - } - } - - my @files_not_needed = (); - foreach (keys %$PHsource) { - if ( ! exists $dependents{$_} ) { - print "Removing no-longer-needed dependent '$_' from rule '$rule'\n" - if $diagnostics>-1; - push @files_not_needed, $_; - } - } - rdb_remove_files( $rule, @files_not_needed ); - -} # END rdb_set_from_logB - -#************************************************************ - -sub rdb_find_new_filesB { - # Call: rdb_find_new_filesB - # Assumes rule context for primary rule. - # Deal with files which were missing and for which a method - # of finding them has become available: - # (a) A newly available source file for a custom dependency. - # (b) When there was no extension, a file with appropriate - # extension - # (c) When there was no extension, and a newly available source - # file for a custom dependency can make it. - - my %new_includes = (); - -MISSING_FILE: - foreach my $missing ( keys %$PHsource ) { - next if ( $$PHsource{$missing} != 0 ); - my ($base, $path, $ext) = fileparseA( $missing ); - $ext =~ s/^\.//; - if ( -e "$missing.tex" ) { - $new_includes{"$missing.tex"} = 1; - } - if ( -e $missing ) { - $new_includes{$missing} = 1; - } - if ( $ext ne "" ) { - foreach my $dep (@cus_dep_list){ - my ($fromext,$toext) = split(' ',$dep); - if ( ( "$ext" eq "$toext" ) - && ( -e "$path$base.$fromext" ) - ) { - # Source file for the missing file exists - # So we have a real include file, and it will be made - # next time by rdb_set_dependents - $new_includes{$missing} = 1; - } - else { - # no point testing the $toext if the file doesn't exist. - } - next MISSING_FILE; - } - } - else { - # $_ doesn't exist, $_.tex doesn't exist, - # and $_ doesn't have an extension - foreach my $dep (@cus_dep_list){ - my ($fromext,$toext) = split(' ',$dep); - if ( -e "$path$base.$fromext" ) { - # Source file for the missing file exists - # So we have a real include file, and it will be made - # next time by &rdb__dependents - $new_includes{"$path$base.$toext"} = 1; -# next MISSING_FILE; - } - if ( -e "$path$base.$toext" ) { - # We've found the extension for the missing file, - # and the file exists - $new_includes{"$path$base.$toext"} = 1; -# next MISSING_FILE; - } - } - } - } # end MISSING_FILES - - # Sometimes bad line-breaks in log file (etc) create the - # impression of a missing file e.g., ./file, but with an incorrect - # extension. The above tests find the file with an extension, - # e.g., ./file.tex, but it is already in the list. So now I will - # remove files in the new_include list that are already in the - # include list. Also handle aliasing of file.tex and ./file.tex. - # For example, I once found: -# (./qcdbook.aux (./to-do.aux) (./ideas.aux) (./intro.aux) (./why.aux) (./basics -#.aux) (./classics.aux) - - my $found = 0; - foreach my $file (keys %new_includes) { - my $stripped = $file; - $stripped =~ s{^\./}{}; - if ( exists $PHsource{$file} ) { - delete $new_includes{$file}; - } - else { - $found ++; - rdb_ensure_file( $rule, $file ); - } - } - -## ?? Is this correct? I used to use @includes -# rdb_update_files_for_rule( keys %PHsources ); - if ( $diagnostics && ( $found > 0 ) ) { - warn "$My_name: Detected previously missing files:\n"; - foreach ( sort keys %new_includes ) { - warn " '$_'\n"; - } - } - return $found; -} # END rdb_find_new_filesB - -#************************************************************ - -sub rdb_update_files_for_rule { -#=========== APPEARS NOT TO BE USED! ========================= -# Usage: rdb_update_files_for_rule( source_files ...) -# Assume rule context. -# Update list of source files for current rule, treating properly cases -# where file didn't exist before run, etc - foreach my $file ( @_ ) { - if ( ! rdb_file_exists( $rule, $file ) ) { - # File that didn't appear in the source files for the run - # before. Two cases: (a) it was created during the run; - # (b) it existed before the run. - # If case (a), then the file was non-existent before the - # run, so we must now label it as non-existent, and - # we trigger a new run -#?? print "?? Adding '$file' to '$rule'\n"; - rdb_ensure_file( $rule, $file ); - my $file_time = get_mtime0( $file ); - if ( ($$Ptest_kind == 2) || ($$Ptest_kind == 3) ) { - # Test wrt destination time, but exclude files - # which appear to be generated (according to extension) - # Assume generated files up-to-date after last run. - # I.e., last run was valid. - my $ext = ext( $file ); - - if ( (! exists $generated_exts_all{$ext} ) - && ($file_time >= $dest_mtime) - ) { - # Only changes since the mtime of the destination matter, - # and only non-generated files count. - # Non-existent destination etc gives $dest_mtime=0 - # so this will automatically give out-of-date condition - # Flag out-of-date for a file by treating it as non-existent - rdb_set_file1( $rule, $file, 0, -1, 0); - } - } - elsif ($file_time >= $$Prun_time ) { - # File generated during run. So treat as non-existent at beginning - rdb_set_file1( $rule, $file, 0, -1, 0); - $$Pout_of_date = 1; - } - # Else default of current state of file is correct. - } # END not previously existent file - } # END file -} # END rdb_update_files_for_rule - -#************************************************************ - -sub rdb_set_dependentsA { - # Call rdb_set_dependentsA( rules ...) - # Returns array (sorted), of new source files. - local @new_sources = (); - rdb_recurseA( [@_], 0, \&rdb_one_depA ); - &rdb_make_links; - return uniqs( @new_sources ); -} #END rdb_set_dependentsA - -#************************************************************ - -sub rdb_one_depA { - # Helper for finding dependencies. One case, $rule and $file given - # Assume file (and rule) context for DESTINATION file. - local $new_dest = $file; - my ($base_name, $path, $toext) = fileparseA( $new_dest ); - $base_name = $path.$base_name; - $toext =~ s/^\.//; -DEP: - foreach my $dep ( @cus_dep_list ) { - my ($fromext,$proptoext,$must,$func_name) = split(' ',$dep); - if ( $toext eq $proptoext ) { - my $source = "$base_name.$fromext"; - # Found match of rule - if ($diagnostics) { - print "Found cusdep: $source to make $rule:$new_dest ====\n"; - } - if ( -e $source ) { - $$Pfrom_rule = "cusdep $fromext $toext $base_name"; -#?? print "?? Ensuring rule for '$$Pfrom_rule'\n"; - local @PAnew_cmd = ( 'do_cusdep', $func_name ); - if ( !-e $new_dest ) { - push @new_sources, $new_dest; - } - if (! rdb_rule_exists( $$Pfrom_rule ) ) { - rdb_create_rule( $$Pfrom_rule, 'cusdep', '', \@PAnew_cmd, 3, - $source, $new_dest, $base_name, 0 ); - } - else { - rdb_one_rule( - $$Pfrom_rule, - sub{ @$PAint_cmd = @PAnew_cmd; $$Pdest = $new_dest;} - ); - } - return; - } - else { - # Source file does not exist - if ( !$force_mode && ( $must != 0 ) ) { - # But it is required that the source exist ($must !=0) - $failure = 1; - $failure_msg = "File '$base_name.$fromext' does not exist ". - "to build '$base_name.$toext'"; - return; - } - elsif ( $$Pfrom_rule =~ /^cusdep $fromext $toext / ) { - # Source file does not exist, destination has the rule set. - # So turn the from_rule off - $$Pfrom_rule = ''; - } - else { - } - } - } - elsif ( ($toext eq '') && (! -e $file ) ) { - # Empty extension and non-existent destination - # This normally results from \includegraphics{A} - # without graphics extension for file, when file does - # not exist. So we will try to find something to make it. - my $source = "$base_name.$fromext"; - if ( -e $source ) { - $new_dest = "$base_name.$proptoext"; - my $from_rule = "cusdep $fromext $toext $base_name"; - push @new_sources, $new_dest; - print "Ensuring rule for '$from_rule', to make '$new_dest'\n" - if $diagnostics > -1; - local @PAnew_cmd = ( 'do_cusdep', $func_name ); - if (! rdb_rule_exists( $from_rule ) ) { - rdb_create_rule( $from_rule, 'cusdep', '', \@PAnew_cmd, 3, - $source, $new_dest, $base_name, 0); - } - else { - rdb_one_rule( - $$Pfrom_rule, - sub{ @$PAint_cmd = @PAnew_cmd; $$Pdest = $new_dest;} - ); - } - rdb_ensure_file( $rule, $new_dest, $from_rule ); - return; - } - } # End of Rule found - } # End DEP -} #END rdb_one_depA - -#************************************************************ - -sub rdb_list { - # Call: rdb_list() - # List rules and their source files - print "===Rules:\n"; - local $count_rules = 0; - my @accessible_all = rdb_accessible( keys %requested_filerules ); - rdb_for_some( - \@accessible_all, - sub{ $count_rules++; - print "Rule '$rule' depends on:\n"; - }, - sub{ print " '$file'\n"; } - ); - if ($count_rules <= 0) { - print " ---No rules defined\n"; - } -} #END rdb_list - -#************************************************************ - -sub rdb_show { - # Call: rdb_show() - # Displays contents of rule data base. - # Side effect: Exercises access routines! - print "===Rules:\n"; - local $count_rules = 0; - rdb_for_all( - sub{ $count_rules++; - my @int_cmd = @$PAint_cmd; - foreach (@int_cmd) { - if ( !defined($_) ) { $_='undef';} - } - print " [$rule]: '$$Pcmd_type' '$$Pext_cmd' '@int_cmd' $$Ptest_kind ", - "'$$Psource' '$$Pdest' '$$Pbase' $$Pout_of_date $$Pout_of_date_user\n"; }, - sub{ print " '$file': $$Ptime $$Psize $$Pmd5 '$$Pfrom_rule'\n"; } - ); - if ($count_rules <= 0) { - print " ---No rules defined\n"; - } -} #END rdb_show - -#************************************************************ - -sub rdb_accessible { - # Call: rdb_accessible( rule, ...) - # Returns array of rules accessible from the given rules - local @accessible = (); - rdb_recurseA( [@_], sub{ push @accessible, $rule; } ); - return @accessible; -} #END rdb_accessible - -#************************************************************ - -sub rdb_possible_primaries { - # Returns array of possible primaries - my @rules = (); - foreach my $rule ( keys %known_rules ) { - if ( $known_rules{$rule} eq 'primary') { - push @rules, $rule; - } - } - return @rules; -} #END rdb_possible_primaries - -#************************************************************ -#************************************************************ -#************************************************************ - -# Routines for makes. NEW VERSIONS ?? - -#????????Debugging routines: -sub R1 {print "===START $rule\n"} -sub R2 {print "===END $rule\n"} -sub F1 {print " ---START $file\n"} -sub F2 {print " ---END $file\n"} -#************************************************************ - -sub rdb_makeB { - # Call: rdb_makeB( target, ... ) - # Makes the targets and prerequisites. - # Leaves one-time rules to last. - # Does appropriate repeated makes to resolve dependency loops - - # Returns 0 on success, nonzero on failure. - - # General method: Find all accessible rules, then repeatedly make - # them until all accessible rules are up-to-date and the source - # files are unchanged between runs. On termination, all - # accessible rules have stable source files. - # - # One-time rules are view and print rules that should not be - # repeated in an algorithm that repeats rules until the source - # files are stable. It is the calling routine's responsibility to - # arrange to call them, or to use them here with caution. - # - # Note that an update-viewer rule need not be considered - # one-time. It can be legitimately applied everytime the viewed - # file changes. - # - # Note also that the criterion of stability is to be applied to - # source files, not to output files. Repeated application of a - # rule to IDENTICALLY CONSTANT source files may produce different - # output files. This may be for a trivial reason (e.g., the - # output file contains a time stamp, as in the header comments for - # a typical postscript file), or for a non-trivial reason (e.g., a - # stochastic algorithm, as in abcm2ps). - # - # This caused me some actual trouble. In general, circular - # dependencies produce non-termination, and the the following - # situation is an example of a generic situation where certain - # rules must be obeyed in order to obtain proper results: - # 1. A/the latex source file contains specifications for - # certain postprocessing operations. Standard (pdf)latex - # already has this, for indexing and bibliography. - # 2. In the case in point that caused me trouble, the - # specification was for musical tunes that were contained - # in external source files not directly input to - # (pdf)latex. But in the original version, there was a - # style file (abc.sty) that caused latex itself to call - # abcm2ps to make .eps files for each tune that were to be - # read in on the next run of latex. - # 3. Thus the specification can cause a non-terminating loop - # for latexmk, because the output files of abcm2ps changed - # even with identical input. - # 4. The solution was to - # a. Use a style file abc_get.sty that simply wrote the - # specification on the tunes to the .aux file in a - # completely deterministic fashion. - # b. Instead of latex, use a script abclatex.pl that runs - # latex and then extracts the abc contents for each tune - # from the source abc file. This is also - # deterministic. - # c. Use a cusdep rule in latexmk to convert the tune abc - # files to eps. This is non-deterministic, but only - # gets called when the (deterministic) source file - # changes. - # This solves the problem. Latexmk works. Also, it is no - # longer necessary to enable write18 in latex, and multiple - # unnecessary runs of abcm2ps are no longer used. - # - # The order of testing and applying rules is chosen by the - # following heuristics: - # 1. Both latex and pdflatex may be used, but the resulting - # aux files etc may not be completely identical. Define - # latex and pdflatex as primary rules. Apply the general - # method of repeated circulating through all rules until - # the source files are stable for each primary rule - # separately. Naturally the rules are all accessible - # rules, but excluding primary rules except for the current - # primary. - # 2. Assume that the primary rules are relatively - # time-consuming, so that unnecessary passes through them - # to check stability of the source files should be avoided. - # 3. Assume that although circular dependencies exist, the - # rules can nevertheless be thought of as basically - # non-circular, and that many rules are strictly or - # normally non-circular. In particular cusdep rules are - # typically non-circular (e.g., fig2eps), as are normal - # output processing rules like dvi2ps. - # 4. The order for the non-circular approximation is - # determined by applying the assumption that an output file - # from one rule that is read in for an earlier stage is - # unchanged. - # HOWEVER, at a first attempt, the ordering is not needed. It - # only gives an optimization - # 5. (Note that these assumptions could be violated, e.g., if - # $dvips is arranged not only to do the basic dvips - # command, but also to extract information from the ps file - # and feed it back to an input file for (pdf)latex.) - # 6. Nevertheless, the overall algorithm should allow - # circularities. Then the general criterion of stability - # of source files covers the general case, and also - # robustly handles the case that the USER changes source - # files during a run. This is particularly important in - # -pvc mode, given that a full make on a large document can - # be quite lengthy in time, and moreover that a user - # naturally wishes to make corrections in response to - # errors, particularly latex errors, and have them apply - # right away. - # This leads to the following approach: - # 1. Classify accessible rules as: primary, pre-primary - # (typically cusdep, bibtex, makeindex, etc), post-primary - # (typically dvips, etc), and one-time - # 2. Then stratify the rules into an order of application that - # corresponds to the basic feedforward structure, with the - # exclusion of one-time rules. - # 3. Always require that one-time rules are among the - # explicitly requested rules, i.e., the last to be applied, - # were we to apply them. Anything else would not match the - # idea of a one-time rule. - # 4. Then work as follows: - # a. Loop over primaries - # b. For each primary, examine each pre-primary rule and - # apply if needed, then the primary rule and then each - # post-primary rule. The ordering of the pre-primary - # and post-primary rules was found in step 2. - # BUT applying the ordering is not essential - # c. Any time that a pre-primary or primary rule is - # applied, loop back to the beginning of step b. This - # ensures that bibtex etc are applied before rerunning - # (pdf)latex, and also covers changing source files, and - # gives priority to quick pre-primary rules for changing - # source files against slow reruns of latex. - # d. Then apply post-primary rules in order, but not - # looping back after each rule. This non-looping back - # is because the rules are normally feed-forward only. - # BUT applying the ordering is not essential - # e. But after completing post-primary rules do loop back - # to b if any rules were applied. This covers exotic - # circular dependence (and as a byproduct, changing - # source files). - # f. On each case of looping back to b, re-evaluate the - # dependence setup to allow for the effect of changing - # source files. - # - - local @requested_targets = @_; - local %current_primaries = (); # Hash whose keys are primary rules - # needed, i.e., known latex-like rules which trigger - # circular dependencies - local @pre_primary = (); # Array of rules - local @post_primary = (); # Array of rules - local @one_time = (); # Array of rules - - - # For diagnostics on changed files, etc: - local @changed = (); - local @disappeared = (); - local @no_dest = (); # Non-existent destination files - local @rules_to_apply = (); - - &rdb_classify_rules( \%possible_primaries, @requested_targets ); - - local %pass = (); - local $failure = 0; # General accumulated error flag - local $runs = 0; - local $too_many_runs = 0; - local %rules_applied = (); - my $retry_msg = 0; # Did I earlier say I was going to attempt - # another pass after a failure? - PRIMARY: - foreach my $primary (keys %current_primaries ) { - foreach my $rule (keys %rule_db) { - $pass{$rule} = 0; - } - PASS: - while (1==1) { - $runs = 0; - my $previous_failure = $failure; - $failure = 0; - local $newrule_nofile = 0; # Flags whether rule created for - # making currently non-existent file, which - # could become a needed source file for a run - # and therefore undo an error condition - if ($diagnostics) { - print "MakeB: doing pre_primary and primary...\n"; - } - rdb_for_some( [@pre_primary, $primary], \&rdb_makeB1 ); - if ( ($runs > 0) && ! $too_many_runs ) { - $retry_msg = 0; - if ( $failure && $newrule_nofile ) { - $retry_msg = 1; - print "$My_name: Error on run, but found possibility to ", - "make new source files\n"; - next PASS; - } - elsif ( ! $failure ) { - next PASS; - } - } - elsif ($runs == 0) { - # $failure not set on this pass, so use value from previous pass: - $failure = $previous_failure; - if ($retry_msg) { - print "But in fact no new files made\n"; - } - } - if ($failure && !$force_mode ) { last PASS; } - if ($diagnostics) { - print "MakeB: doing post_primary...\n"; - } - rdb_for_some( [@post_primary], \&rdb_makeB1 ); - if ($failure) { last PASS; } - if ($runs > 0) { next PASS; } - # Get here if nothing was run. - last PASS; - } - continue { - # Re-evaluate rule classification and accessibility, - # but do not change primaries. - &rdb_classify_rules( \%current_primaries, @requested_targets ); - &rdb_make_links; - } - } - rdb_for_some( [@one_time], \&rdb_makeB1 ); - rdb_write( $fdb_file ); - - if (! $silent) { - # Diagnose of the runs - if ( $#{keys %rules_applied } > -1 ) { - print "$My_name: $runs runs. Rules applied:\n"; - foreach (sort keys %rules_applied) { - print " '$_'\n"; - } - } - elsif ($failure && $force_mode) { - print "$My_name: Errors, in force_mode: so I tried finishing targets\n"; - } - elsif ($failure) { - print "$My_name: Errors, so I did not complete making targets\n"; - } - else { - local @dests = (); - rdb_for_some( [@_], sub{ push @dests, $$Pdest if ($$Pdest); } ); - print "$My_name: All targets (@dests) are up-to-date\n"; - } - } - return $failure; -} #END rdb_makeB - -#------------------- - -sub rdb_makeB1 { - # Call: rdb_makeB1 - # Helper routine for rdb_makeB. - # Carries out make at level of given rule (all data available). - # Assumes contexts for recursion, make, and rule, and - # assumes that source files for the rule are to be considered - # up-to-date. - if ($diagnostics) { print " MakeB1 $rule\n"; } - if ($failure & ! $force_mode) {return;} - &rdb_clear_change_record; - &rdb_flag_changes_here; -# if ($diagnostics>-1) { print " MakeB1.1 $rule $$Pout_of_date\n"; } - - my $return = 0; # Return code from called routine -#?? print "makeB1: Trying '$rule' for '$$Pdest': "; - if (!$$Pout_of_date) { -#?? if ( ($$Pcmd_type eq 'primary') && (! $silent) ) { -# print "Rule '$rule' up to date\n"; -# } - return; - } - if ($diagnostics) { print " remake\n"; } - if (!$silent) { - print "$My_name: applying rule '$rule'...\n"; - &rdb_diagnose_changes( "Rule $rule: "); - } -##????????????????????????????????????: variable rules_applied not used - $rules_applied{$rule} = 1; - $runs++; -#?? print "$rule: $$Pcmd_type\n"; - - # We are applying the rule, so its source file state for when it - # was last made is as of now: - # ??IS IT CORRECT TO DO NOTHING IN CURRENT VERSION? - - # The actual run - $return = 0; - # Rule may have been created since last run: - if ( ! defined $pass{$rule} ) {$pass{$rule} = 0; } - if ( $pass{$rule} ge $max_repeat ) { - # Avoid infinite loop by having a maximum repeat count - # Getting here represents some kind of weird error. - warn "$My_name: Maximum runs of $rule reached ", - "without getting stable files\n"; - $too_many_runs = 1; - $failure = 1; - $failure_msg = "'$rule' needed too many passes"; - return; - } - $pass{$rule}++; - warn_running( "Run number $pass{$rule} of rule '$rule'" ); - if ($$Pcmd_type eq 'primary' ) { - $return = &rdb_primary_run; - } - else { $return = &rdb_run1; } - if ($$Pchanged) { - $newrule_nofile = 1; - $return = 0; - } - elsif ( $$Pdest && ( !-e $$Pdest ) && (! $failure) ){ - # If there is a destination to make, but for some reason - # it did not get made, then make sure a failure gets reported. - # But if the failure has already been reported, there's no need - # to report here, since that would give a generic error - # message instead of a specific one. - -## ??? 1 Sep. 2008, for cusdep no-file-exists issue - if ( ( $$Pcmd_type eq 'cusdep') && $$Psource && (! -e $$Psource) ) { - # However, if the rule is a custom dependency, this is not by - # itself an error, if also the source file does not exist. In - # that case, we may have the situation that (1) the dest file is no - # longer needed by the tex file, and (2) therefore the user - # has deleted the source and dest files. After the next - # latex run and the consequent analysis of the log file, the - # cusdep rule will no longer be needed, and will be removed. - - # So in this case, do NOT report an error - $$Pout_of_date = 0; - } - else { - $failure = 1; - $failure_msg = "'$rule' did not make '$$Pdest'"; - } - } - if ($return != 0) {$failure = 1;} -} #END rdb_makeB1 - -#************************************************************ - -sub rdb_submakeB { - # Call: rdb_submakeB - # Makes all the source files for a given rule. - # Assumes contexts for recursion, for make, and rule. - %visited = %visited_at_rule_start; - local $failure = 0; # Error flag - my @v = keys %visited; -#?? print "---submakeB $rule. @v \n"; - rdb_do_files( sub{ rdb_recurse_rule( $$Pfrom_rule, 0,0,0, \&rdb_makeB1 ) } ); - return $failure; -} #END rdb_submakeB - -#************************************************************ - - -sub rdb_classify_rules { - # Usage: rdb_classify_rules( \%allowed_primaries, requested targets ) - # Assume the following variables are available (global or local): - # Input: - # @requested_targets # Set to target rules - - # Output: - # %current_primaries # Keys are actual primaries - # @pre_primary # Array of rules - # @post_primary # Array of rules - # @one_time # Array of rules - # @pre_primary and @post_primary are in natural order of application. - - local $P_allowed_primaries = shift; - local @requested_targets = @_; - local $state = 0; # Post-primary - local @classify_stack = (); - - %current_primaries = (); - @pre_primary = (); - @post_primary = (); - @one_time = (); - - rdb_recurseA( \@requested_targets, \&rdb_classify1, 0,0, \&rdb_classify2 ); - - # Reverse, as tendency is to find last rules first. - @pre_primary = reverse @pre_primary; - @post_primary = reverse @post_primary; - - if ($diagnostics) { - print "Rule classification: \n"; - if ($#requested_targets < 0) { - print " No requested rules\n"; - } - else { - print " Requested rules:\n"; - foreach ( @requested_targets ) { print " $_\n"; } - } - if ($#pre_primary < 0) { - print " No pre-primaries\n"; - } - else { - print " Pre-primaries:\n"; - foreach (@pre_primary) { print " $_\n"; } - } - print " Primaries:\n"; - foreach (keys %current_primaries) { print " $_\n"; } - if ($#post_primary < 0) { - print " No post-primaries\n"; - } - else { - print " Post-primaries:\n"; - foreach (@post_primary) { print " $_\n"; } - } - if ($#one_time < 0) { - print " No one_time rules\n"; - } - else { - print " One_time rules:\n"; - foreach ( @one_time ) { print " $_\n"; } - } - } #end diagnostics -} #END rdb_classify_rules - -#------------------- - -sub rdb_classify1 { - # Helper routine for rdb_classify_rules - # Applied as rule_act1 in recursion over rules - # Assumes rule context, and local variables from rdb_classify_rules -# print "=========== '$rule' $depth ========== \n"; - push @classify_stack, [$state]; - if ( exists $possible_one_time{$rule} ) { - # Normally, we will have already extracted the one_time rules, - # and they will never be accessed here. But just in case of - # problems or generalizations, we will cover all possibilities: - if ($depth > 1) { - warn "ONE TIME rule not at outer level '$rule'\n"; - } - push @one_time, $rule; - } - elsif ($state == 0) { - if ( exists ${$P_allowed_primaries}{$rule} ) { - $state = 1; # In primary rule - $current_primaries{ $rule } = 1; - } - else { - push @post_primary, $rule; - } - } - else { - $state = 2; # in post-primary rule - push @pre_primary, $rule; - } -} #END rdb_classify1 - -#------------------- - -sub rdb_classify2 { - # Helper routine for rdb_classify_rules - # Applied as rule_act2 in recursion over rules - # Assumes rule context - ($state) = @{ pop @classify_stack }; -} #END rdb_classify2 - -#************************************************************ - - -sub rdb_run1 { - # Assumes contexts for: rule. - # Unconditionally apply the rule - # Returns return code from applying the rule. - # Otherwise: 0 on other kind of success, -1 on error. - - # Source file data, by definition, correspond to the file state just before - # the latest run, and the run_time to the time just before the run: - &rdb_update_filesA; - $$Prun_time = time; - $$Pchanged = 0; # No special changes in files - - # Return values for external command: - my $return = 0; - - # Find any internal command - my @int_args = @$PAint_cmd; - my $int_cmd = shift @int_args; - my @int_args_for_printing = @int_args; - foreach (@int_args_for_printing) { - if ( ! defined $_ ) { $_ = 'undef'; } - } - if ($int_cmd) { - print "For rule '$rule', running '\&$int_cmd( @int_args_for_printing )' ...\n"; - $return = &$int_cmd( @int_args ); - } - elsif ($$Pext_cmd) { - $return = &rdb_ext_cmd; - } - else { - warn "$My_name: Either a bug OR a configuration error:\n", - " Need to implement the command for '$rule'\n"; - &traceback(); - $return = -1; - } - if ( $rule =~ /^bibtex/ ) { - my $retcode = &check_bibtex_log($$Pbase); - if ($retcode == 3) { - push @warnings, - "Could not open bibtex log file for '$$Pbase'"; - } - elsif ($retcode == 2) { - push @warnings, "Bibtex errors for '$$Pbase'"; - } - elsif ($retcode == 1) { - push @warnings, "Bibtex warnings for '$$Pbase'"; - } - } - - $updated = 1; - if ($$Ptest_kind == 3) { - # We are time-criterion first time only. Now switch to - # file-change criterion - $$Ptest_kind = 1; - } - $$Pout_of_date = $$Pout_of_date_user = 0; - return $return; -} # END rdb_run1 - -#----------------- - -sub rdb_ext_cmd { - # Call: rdb_ext_cmd - # Assumes rule context. Runs external command with substitutions. - # Uses defaults for the substitutions. See rdb_ext_cmd1. - return rdb_ext_cmd1(); -} #END rdb_ext_cmd - -#----------------- - -sub rdb_ext_cmd1 { - # Call: rdb_ext_cmd1( options, source, dest, base ) or rdb_ext_cmd1() or ... - # Assumes rule context. Returns command with substitutions. - # Null arguments or unprovided arguments => use defaults. - # for %S=source, %D=dest, %B=base, %R=root=base for latex, %O='', %T=texfile - my ($options, $source, $dest, $base ) = @_; - # Apply defaults - $options ||= ''; - $source ||= $$Psource; - $dest ||= $$Pdest; - $base ||= $$Pbase; - - my $ext_cmd = $$Pext_cmd; - - #Set character to surround filenames: - my $q = $quote_filenames ? '"' : ''; - foreach ($ext_cmd) { - s/%O/$options/g; - s/%R/$q$root_filename$q/g; - s/%B/$q$base$q/g; - s/%T/$q$texfile_name$q/g; - s/%S/$q$source$q/g; - s/%D/$q$dest$q/g; - } - # print "quote is '$q'; ext_cmd = '$ext_cmd'\n"; - my ($pid, $return) = &Run_msg($ext_cmd); - return $return; -} #END rdb_ext_cmd1 - -#----------------- - -sub rdb_primary_run { -#?? See multipass_run in previous version Aug 2007 for issues - # Call: rdb_primary_run - # Assumes contexts for: recursion, make, & rule. - # Assumes (a) the rule is a primary, - # (b) a run has to be made, - # (c) source files have been made. - # This routine carries out the run of the rule unconditionally, - # and then parses log file etc. - my $return = 0; - - my $return_latex = &rdb_run1; - - ######### Analyze results of run: - if ( ! -e "$root_filename.log" ) { - $failure = 1; - $failure_msg = "(Pdf)LaTeX failed to generate a log file"; - return -1; - } - ####### NOT ANY MORE! Capture any changes in source file status before we - # check for errors in the latex run - - # Find current set of source files: - &rdb_set_from_logB; - - # For each file of the kind made by epstopdf.sty during a run, - # if the file has changed during a run, then the new version of - # the file will have been read during the run. Unlike the usual - # case, we will need to redo the primary run because of the - # change of this file during the run. Therefore set the file as - # up-to-date: - rdb_do_files( sub { if ($$Pcorrect_after_primary) {&rdb_update1;} } ); - - # There may be new source files, and the run may have caused - # circular-dependency files to be changed. And the regular - # source files may have been updated during a lengthy run of - # latex. So redo the makes for sources of the current rule: - my $submake_return = &rdb_submakeB; - &rdb_clear_change_record; - &rdb_flag_changes_here; - $updated = 1; # Flag that some dependent file has been remade - # Fix the state of the files as of now: this will solve the - # problem of latex and pdflatex interfering with each other, - # at the expense of some non-optimality - #?? Check this is correct: - &rdb_update_filesA; - if ( $diagnostics ) { - print "$My_name: Rules after run: \n"; - rdb_show(); - } - - $return = $return_latex; - if ($return_latex && $$Pout_of_date_user) { - print "Error in (pdf)LaTeX, but change of user file(s), ", - "so ignore error & provoke rerun\n" - if (! $silent); - $return = 0; - } - - # Summarize issues that may have escaped notice: - my @warnings = (); - if ($bad_reference) { - push @warnings, "Latex could not resolve all references"; - } - if ($bad_citation) { - push @warnings, "Latex could not resolve all citations"; - } - if ($#warnings > 0) { - show_array( "$My_name: Summary of warnings:", @warnings ); - } - return $return; -} #END rdb_primary_run - -#************************************************************ - -sub rdb_clear_change_record { - @changed = (); - @disappeared = (); - @no_dest = (); - @rules_to_apply = (); -#??????????????? $failure = 0; -##????????????????????????????????????: variable rules_applied not used - $rules_applied = 0; -} #END rdb_clear_change_record - -#************************************************************ - -sub rdb_flag_changes_here { - # Flag changes in current rule. - # Assumes rule context. - local $dest_mtime = 0; - $dest_mtime = get_mtime($$Pdest) if ($$Pdest); - rdb_do_files( \&rdb_file_change1); - if ( $$Pdest && (! -e $$Pdest) ) { -## ??? 1 Sep. 2008, for cusdep no-file-exists issue - if ( ( $$Pcmd_type eq 'cusdep') && $$Psource && (! -e $$Psource) ) { - # However, if the rule is a custom dependency, this is not by - # itself an error, if also the source file does not exist. In - # that case, we may have the situation that (1) the dest file is no - # longer needed by the tex file, and (2) therefore the user - # has deleted the source and dest files. After the next - # latex run and the consequent analysis of the log file, the - # cusdep rule will no longer be needed, and will be removed. - - # So in this case, do NOT report an error - } - else { - $$Pout_of_date = 1; - push @no_dest, $$Pdest; - } - } - if ($$Pout_of_date) { - push @rules_to_apply, $rule; - } -#?? print "======== flag: $rule $$Pout_of_date ==========\n"; -} #END rdb_flag_changes_here - -#************************************************************ - -sub rdb_file_change1 { - # Call: &rdb_file_change1 - # Assumes rule and file context. Assumes $dest_mtime set. - # Flag whether $file in $rule has changed or disappeared. - # Set rule's make flag if there's a change. - my ($new_time, $new_size, $new_md5) = fdb_get($file); -#?? print "FC1 '$rule':$file $$Pout_of_date TK=$$Ptest_kind\n"; -#?? print " OLD $$Ptime, $$Psize, $$Pmd5\n", -#?? " New $new_time, $new_size, $new_md5\n"; - my $ext = ext( $file ); - if ( ($new_size < 0) && ($$Psize >= 0) ) { - print "Disappeared '$file' in '$rule'\n"; - push @disappeared, $file; - # No reaction is good. - #$$Pout_of_date = 1; - # ??? 1 Sep. 2008: I do NOT think so, for cusdep no-file-exists issue - $$Pout_of_date = 1; - return; - } - if ( ($new_size < 0) && ($$Psize < 0) ) { - return; - } - if ( ($new_size != $$Psize) || ($new_md5 ne $$Pmd5) ) { -#?? print "FC1: changed $file: ($new_size != $$Psize) $new_md5 ne $$Pmd5)\n"; - push @changed, $file; - $$Pout_of_date = 1; - if ( ! exists $generated_exts_all{$ext} ) { - $$Pout_of_date_user = 1; - } - } - if ( ( ($$Ptest_kind == 2) || ($$Ptest_kind == 3) ) - && (! exists $generated_exts_all{$ext} ) - && ( $new_time > $dest_mtime ) - ) { -#?? print "FC1: changed $file: ($new_time > $dest_mtime)\n"; - push @changed, $file; - $$Pout_of_date = $$Pout_of_date_user = 1; - } -} #END rdb_file_change1 - -#************************************************************ - -sub rdb_count_changes { - return $#changed + $#disappeared + $#no_dest + $#rules_to_apply + 4; -} #END rdb_count_changes - -#************************************************************ - -sub rdb_diagnose_changes { - # Call: rdb_diagnose_changes or rdb_diagnose_changes( heading ) - # List changes on STDERR - # Precede the message by the optional heading, else by "$My_name: " - my $heading = defined($_[0]) ? $_[0] : "$My_name: "; - - if ( &rdb_count_changes == 0 ) { - warn "${heading}No changes\n"; - return; - } - warn "${heading}Changes:\n"; - if ( $#changed >= 0 ) { - warn " Changed files, or newly in use since previous run(s):\n"; - foreach (uniqs(@changed)) { warn " '$_'\n"; } - } - if ( $#disappeared >= 0 ) { - warn " No-longer-existing files:\n"; - foreach (uniqs(@disappeared)) { warn " '$_'\n"; } - } - if ( $#no_dest >= 0 ) { - warn " Non-existent destination files:\n"; - foreach (uniqs(@no_dest)) { warn " '$_'\n"; } - } - if ( $#rules_to_apply >= 0 ) { - warn " Rules to apply:\n"; - foreach (uniqs(@rules_to_apply)) { warn " '$_'\n"; } - } -} #END rdb_diagnose_changes - - -#************************************************************ -#************************************************************ -#************************************************************ -#************************************************************ - -#************************************************************ -#************************************************************ -#************************************************************ -#************************************************************ - -# Routines for convenient looping and recursion through rule database -# ================= NEW VERSION ================ - -# There are several places where we need to loop through or recurse -# through rules and files. This tends to involve repeated, tedious -# and error-prone coding of much book-keeping detail. In particular, -# working on files and rules needs access to the variables involved, -# which either involves direct access to the elements of the database, -# and consequent fragility against changes and upgrades in the -# database structure, or involves lots of routines for reading and -# writing data in the database, then with lots of repetitious -# house-keeping code. -# -# The routines below provide a solution. Looping and recursion -# through the database are provided by a set of basic routines where -# each necessary kind of looping and iteration is coded once. The -# actual actions are provided as references to action subroutines. -# (These can be either actual references, as in \&routine, or -# anonymous subroutines, as in sub{...}, or aas a zero value 0 or an -# omitted argument, to indicate that no action is to be performed.) -# -# When the action subroutine(s) are actually called, a context for the -# rule and/or file (as appropriate) is given by setting named -## NEW ?? -# variables to REFERENCES to the relevant data values. These can be -# used to retrieve and set the data values. As a convention, -# references to scalars are given by variables named start with "$P", -# as in "$Pdest", while references to arrays start with "$PA", as in -# "$PAint_cmd", and references to hashes with "$PH", as in "$PHsource". -# After the action subroutine has finished, checks for data -# consistency may be made. -## ??? OLD -# variables to the relevant data values. After the action subroutine -# has finished, the database is updated with the values of these named -# variables, with any necessary consistency checks. Thus the action -# subroutines can act on sensibly named variables without needed to -# know the database structure. -# -# The only routines that actually use the database structure and need -# to be changed if that is changed are: (a) the routines rdb_one_rule -# and rdb_one_file that implement the calling of the action subroutines, -# (b) routines for creation of single rules and file items, and (c) to -# a lesser extent, the routine for destroying a file item. -# -# Note that no routine is provided for destroying a rule. During a -# run, a rule, with its source files, may become inaccessible or -# unused. This happens dynamically, depending on the dependencies -# caused by changes in the source file or by error conditions that -# cause the computation of dependencies, particular of latex files, to -# become wrong. In that situation the files certainly come and go in -# the database, but subsidiary rules, with their content information -# on their source files, need to be retained so that their use can be -# reinstated later depending on dynamic changes in other files. -# -# However, there is a potential memory leak unless some pruning is -# done in what is written to the fdb file. (Probably only accessible -# rules and those for which source files exist. Other cases have no -# relevant information that needs to be preserved between runs.) - -# -# - - -#************************************************************ - -# First the top level routines for recursion and iteration - -#************************************************************ - -sub rdb_recurseA { - # Call: rdb_recurseA( rule | [ rules], - # \&rule_act1, \&file_act1, \&file_act2, - # \&rule_act2 ) - # The actions are pointers to subroutines, and may be null (0, or - # undefined) to indicate no action to be applied. - # Recursively acts on the given rules and all ancestors: - # foreach rule found: - # apply rule_act1 - # loop through its files: - # apply file_act1 - # act on its ancestor rule, if any - # apply file_act2 - # apply rule_act2 - # Guards against loops. - # Access to the rule and file data by local variables, only - # for getting and setting. - - # This routine sets a context for anything recursive, with @heads, - # %visited and $depth being set as local variables. - local @heads = (); - my $rules = shift; - - # Distinguish between single rule (a string) and a reference to an - # array of rules: - if ( ref $rules eq 'ARRAY' ) { @heads = @$rules; } - else { @heads = ( $rules ); } - - # Keep a list of visited rules, used to block loops in recursion: - local %visited = (); - local $depth = 0; - - foreach $rule ( @heads ) { rdb_recurse_rule( $rule, @_ ); } - -} #END rdb_recurseA - -#************************************************************ - -sub rdb_for_all { - # Call: rdb_for_all( \&rule_act1, \&file_act, \&rule_act2 ) - # Loops through all rules and their source files, using the - # specified set of actions, which are pointers to subroutines. - # Sorts rules alphabetically. - # See rdb_for_some for details. - rdb_for_some( [ sort keys %rule_db ], @_); -} #END rdb_for_all - -#************************************************************ - -sub rdb_for_some { - # Call: rdb_for_some( rule | [ rules], - # \&rule_act1, \&file_act, \&rule_act2) - # Actions can be zero, and rules at tail of argument list can be - # omitted. E.g. rdb_for_some( rule, 0, \&file_act ). - # Anonymous subroutines can be used, e.g., rdb_for_some( rule, sub{...} ). - # - # Loops through rules and their source files, using the - # specified set of rules: - # foreach rule: - # apply rule_act1 - # loop through its files: - # apply file_act - # apply rule_act2 - # - # Rule data and file data are made available in local variables - # for access by the subroutines. - - local @heads = (); - my $rules = shift; - # Distinguish between single rule (a string) and a reference to an - # array of rules: - if ( ref $rules eq 'ARRAY' ) { @heads = @$rules; } - else { @heads = ( $rules ); } - - foreach $rule ( @heads ) { - # $rule is implicitly local - &rdb_one_rule( $rule, @_ ); - } -} #END rdb_for_some - -#************************************************************ - -sub rdb_for_one_file { - my $rule = shift; - # Avoid name collisions with general recursion and iteraction routines: - local $file1 = shift; - local $action1 = shift; - rdb_for_some( $rule, sub{rdb_one_file($file1,$action1)} ); -} #END rdb_for_one_file - - -#************************************************************ - -# Routines for inner part of recursion and iterations - -#************************************************************ - -sub rdb_recurse_rule { - # Call: rdb_recurse_rule($rule, \&rule_act1, \&file_act1, \&file_act2, - # \&rule_act2 ) - # to do the work for one rule, recurisvely called from_rules for - # the sources of the rules. - # Assumes recursion context, i.e. that %visited, @heads, $depth. - # We are overriding actions: - my ($rule, $rule_act1, $new_file_act1, $new_file_act2, $rule_act2) - = @_; - # and must propagate the file actions: - local $file_act1 = $new_file_act1; - local $file_act2 = $new_file_act2; - # Prevent loops: - if ( (! $rule) || exists $visited{$rule} ) { return; } - $visited{$rule} = 1; - # Recursion depth - $depth++; - # We may need to repeat actions on dependent rules, without being - # blocked by the test on visited files. So save %visited: - local %visited_at_rule_start = %visited; - # At end, the last value set for %visited wins. - rdb_one_rule( $rule, $rule_act1, \&rdb_recurse_file, $rule_act2 ); - $depth--; - } #END rdb_recurse_rule - -#************************************************************ - -sub rdb_recurse_file { - # Call: rdb_recurse_file to do the work for one file. - # This has no arguments, since it is used as an action subroutine, - # passed as a reference in calls in higher-level subroutine. - # Assumes contexts set for: Recursion, rule, and file - &$file_act1 if $file_act1; - rdb_recurse_rule( $$Pfrom_rule, $rule_act1, $file_act1, $file_act2, - $rule_act2 ) - if $$Pfrom_rule; - &$file_act2 if $file_act2; -} #END rdb_recurse_file - -#************************************************************ - -sub rdb_do_files { - # Assumes rule context, including $PHsource. - # Applies an action to all the source files of the rule. - local $file_act = shift; - my @file_list = sort keys %$PHsource; - foreach my $file ( @file_list ){ - rdb_one_file( $file, $file_act ); - } -} #END rdb_do_files - -#************************************************************ - -# Routines for action on one rule and one file. These are the main -# places (in addition to creation and destruction routines for rules -# and files) where the database structure is accessed. - -#************************************************************ - -sub rdb_one_rule { - # Call: rdb_one_rule( $rule, $rule_act1, $file_act, $rule_act2 ) - # Sets context for rule and carries out the actions. -#===== Accesses rule part of database structure ======= - - local ( $rule, $rule_act1, $file_act, $rule_act2 ) = @_; -#?? &R1; - if ( (! $rule) || ! rdb_rule_exists($rule) ) { return; } - - local ( $PArule_data, $PHsource ) = @{$rule_db{$rule}}; - local ($Pcmd_type, $Pext_cmd, $PAint_cmd, $Ptest_kind, - $Psource, $Pdest, $Pbase, - $Pout_of_date, $Pout_of_date_user, $Prun_time, $Pchanged ) - = Parray( $PArule_data ); - # Correct array ref: - $PAint_cmd = $$PArule_data[2]; - - &$rule_act1 if $rule_act1; - &rdb_do_files( $file_act ) if $file_act; - &$rule_act2 if $rule_act2; - -#?? &R2; -} #END rdb_one_rule - -#************************************************************ - -sub rdb_one_file { - # Call: rdb_one_file($file, $file_act) - # Sets context for file and carries out the action. - # Assumes $rule context set. -#===== Accesses file part of database structure ======= - local ($file, $file_act) = @_; -#?? &F1; - if ( (!$file) ||(!exists ${$PHsource}{$file}) ) { return; } - local $PAfile_data = ${$PHsource}{$file}; - local ($Ptime, $Psize, $Pmd5, $Pfrom_rule, $Pcorrect_after_primary ) - = Parray( $PAfile_data ); - &$file_act if $file_act; - if ( ! rdb_rule_exists( $$Pfrom_rule ) ) { - $$Pfrom_rule = ''; - } -#?? &F2; -} #END rdb_one_file - -#************************************************************ - -# Routines for creation of rules and file items, and for removing file -# items. - -#************************************************************ - -sub rdb_create_rule { - # rdb_create_rule( rule, command_type, ext_cmd, int_cmd, test_kind, - # source, dest, base, - # needs_making, run_time ) - # int_cmd is either a string naming a perl subroutine or it is a - # reference to an array containing the subroutine name and its - # arguments. - # Makes rule. Error if it already exists. - # Omitted arguments: replaced by 0 or '' as needed. -# ==== Sets rule data ==== - my ( $rule, $cmd_type, $int_cmd, $PAext_cmd, $test_kind, - $source, $dest, $base, - $needs_making, $run_time ) = @_; - my $changed = 0; - # Set defaults, and normalize parameters: - foreach ( $cmd_type, $int_cmd, $PAext_cmd, $source, $dest, $base ) { - if (! defined $_) { $_ = ''; } - } - foreach ( $needs_making, $run_time, $test_kind ) { - if (! defined $_) { $_ = 0; } - } - if (!defined $test_kind) { - # Default to test on file change - $test_kind = 1; - } - if ( ref( $PAext_cmd ) eq '' ) { - # It is a single command. Convert to array reference: - $PAext_cmd = [ $PAext_cmd ]; - } - else { - # COPY the referenced array: - $PAext_cmd = [ @$PAext_cmd ]; - } - - $rule_db{$rule} = - [ [$cmd_type, $int_cmd, $PAext_cmd, $test_kind, - $source, $dest, $base, $needs_making, 0, $run_time, - $changed ], - {} - ]; - if ($source) { rdb_ensure_file( $rule, $source ); } -} #END rdb_create_rule - -#************************************************************ - -sub rdb_ensure_file { - # rdb_ensure_file( rule, file[, fromrule] ) - # Ensures the source file item exists in the given rule. - # Initialize to current file state if the item is created. - # Then if the fromrule is specified, set it for the file item. -#============ rule and file data set here ====================================== - my $rule = shift; - local ( $new_file, $new_from_rule ) = @_; - if ( ! rdb_rule_exists( $rule ) ) { - die_trace( "$My_name: BUG in rdb_ensure_file: non-existent rule '$rule'" ); - } - if ( ! defined $new_file ) { - die_trace( "$My_name: BUG in rdb_ensure_file: undefined file for '$rule'" ); - } - rdb_one_rule( $rule, - sub{ - if (! exists ${$PHsource}{$new_file} ) { - ${$PHsource}{$new_file} = [fdb_get($new_file), '', 0]; - } - } - ); - if (defined $new_from_rule ) { - rdb_for_one_file( $rule, $new_file, sub{ $$Pfrom_rule = $new_from_rule; }); - } -} #END rdb_ensure_file - -#************************************************************ - -sub rdb_remove_files { - # rdb_remove_file( rule, file,... ) - # Removes file(s) for the rule. - my $rule = shift; - if (!$rule) { return; } - local @files = @_; - rdb_one_rule( $rule, - sub{ foreach (@files) { delete ${$PHsource}{$_}; } } - ); -} #END rdb_remove_files - -#************************************************************ - -sub rdb_rule_exists { - # Call rdb_rule_exists($rule): Returns whether rule exists. - my $rule = shift; - if (! $rule ) { return 0; } - return exists $rule_db{$rule}; -} #END rdb_rule_exists - -#************************************************************ - -sub rdb_file_exists { - # Call rdb_file_exists($rule, $file): - # Returns whether source file item in rule exists. - local ( $rule, $file ) = @_; - local $exists = 0; - rdb_one_rule( $rule, - sub{ $exists = exists( ${$PHsource}{$file} ) ? 1:0; } - ); - return $exists; -} #END rdb_file_exists - -#************************************************************ - -sub rdb_update_gen_files { - # Call: fdb_updateA - # Assumes rule context. Update source files of rule to current state. - rdb_do_files( - sub{ - if ( exists $generated_exts_all{ ext($file) } ) {&rdb_update1;} - } - ); -} #END rdb_update_gen_files - -#************************************************************ - -sub rdb_update_filesA { - # Call: fdb_updateA - # Assumes rule context. Update source files of rule to current state. - rdb_do_files( \&rdb_update1 ); -} - -#************************************************************ - -sub rdb_update1 { - # Call: fdb_update1. - # Assumes file context. Updates file data to correspond to - # current file state on disk - ($$Ptime, $$Psize, $$Pmd5) = fdb_get($file); -} - -#************************************************************ - -sub rdb_set_file1 { - # Call: fdb_file1(rule, file, new_time, new_size, new_md5) - # Sets file time, size and md5. - my $rule = shift; - my $file = shift; - local @new_file_data = @_; - rdb_for_one_file( $rule, $file, sub{ ($$Ptime,$$Psize,$$Pmd5)=@new_file_data; } ); -} - -#************************************************************ - -sub rdb_dummy_file { - # Returns file data for non-existent file -# ==== Uses rule_db structure ==== - return (0, -1, 0, ''); -} - -#************************************************************ -#************************************************************ - -# Predefined subroutines for custom dependency - -sub cus_dep_delete_dest { - # This subroutine is used for situations like epstopdf.sty, when - # the destination (target) of the custom dependency invoking - # this subroutine will be made by the primary run provided the - # file (destination of the custom dependency, source of the - # primary run) doesn't exist. - # It is assumed that the resulting file will be read by the - # primary run. - - # Remove the destination file, to indicate it needs to be remade: - unlink $$Pdest; - # Arrange that the non-existent destination file is not treated as - # an error. The variable changed here is a bit misnamed. - $$Pchanged = 1; - # Ensure a primary run is done - &cus_dep_require_primary_run; - # Return success: - return 0; -} - -#************************************************************ - -sub cus_dep_require_primary_run { - # This subroutine is used for situations like epstopdf.sty, when - # the destination (target) of the custom dependency invoking - # this subroutine will be made by the primary run provided the - # file (destination of the custom dependency, source of the - # primary run) doesn't exist. - # It is assumed that the resulting file will be read by the - # primary run. - - local $cus_dep_target = $$Pdest; - # Loop over all rules and source files: - rdb_for_all( 0, - sub { if ($file eq $cus_dep_target) { - $$Pout_of_date = 1; - $$Pcorrect_after_primary = 1; - } - } - ); - # Return success: - return 0; -} - - -#************************************************************ -#************************************************************ -#************************************************************ -# -# UTILITIES: -# - -#************************************************************ -# Miscellaneous - -sub show_array { -# For use in diagnostics and debugging. -# On stderr, print line with $_[0] = label. -# Then print rest of @_, one item per line preceeded by some space - warn "$_[0]\n"; - shift; - foreach (@_){ warn " $_\n";} -} - -#************************************************************ - -sub Parray { - # Call: Parray( \@A ) - # Returns array of references to the elements of @A - my $PA = shift; - my @P = (undef) x (1+$#$PA); - foreach my $i (0..$#$PA) { $P[$i] = \$$PA[$i]; } - return @P; -} - -#************************************************************ - -sub glob_list { - # Glob a collection of filenames. Sort and eliminate duplicates - # Usage: e.g., @globbed = glob_list(string, ...); - my @globbed = (); - foreach (@_) { - push @globbed, glob; - } - return uniqs( @globbed ); -} - -#================================================== - -sub glob_list1 { - # Glob a collection of filenames. - # But no sorting or elimination of duplicates - # Usage: e.g., @globbed = glob_list1(string, ...); - # Since perl's glob appears to use space as separator, I'll do a special check - # for existence of non-globbed file (assumed to be tex like) - - my @globbed = (); - foreach my $file_spec (@_) { - # Problem, when the PATTERN contains spaces, the space(s) are - # treated as pattern separaters (in MSWin at least). - # MSWin: I can quote the pattern (is that MSWin native, or also - # cygwin?) - # Linux: Quotes in a pattern are treated as part of the filename! - # So quoting a pattern is definitively wrong. - # The following hack solves this partly, for the cases that there is no wildcarding - # and the specified file exists possibly space-containing, and that there is wildcarding, - # but spaces are prohibited. - if ( -e $file_spec || -e "$file_spec.tex" ) { - # Non-globbed file exists, return the file_spec. - # Return $file_spec only because this is not a file-finding subroutine, but - # only a globber - push @globbed, $file_spec; - } - else { - # This glob fails to work as desired, if the pattern contains spaces. - push @globbed, glob( "$file_spec" ); - } - } - return @globbed; -} - -#************************************************************ -# Miscellaneous - -sub prefix { - #Usage: prefix( string, prefix ); - #Return string with prefix inserted at the front of each line - my @line = split( /\n/, $_[0] ); - my $prefix = $_[1]; - for (my $i = 0; $i <= $#line; $i++ ) { - $line[$i] = $prefix.$line[$i]."\n"; - } - return join( "", @line ); -} - - -#************************************************************ -#************************************************************ -# File handling utilities: - - -#************************************************************ - -sub get_latest_mtime -# - arguments: each is a filename. -# - returns most recent modify time. -{ - my $return_mtime = 0; - foreach my $include (@_) - { - my $include_mtime = &get_mtime($include); - # The file $include may not exist. If so ignore it, otherwise - # we'll get an undefined variable warning. - if ( ($include_mtime) && ($include_mtime > $return_mtime) ) - { - $return_mtime = $include_mtime; - } - } - return $return_mtime; -} - -#************************************************************ - -sub get_mtime_raw -{ - my $mtime = (stat($_[0]))[9]; - return $mtime; -} - -#************************************************************ - -sub get_mtime { - return get_mtime0($_[0]); -} - -#************************************************************ - -sub get_mtime0 { - # Return time of file named in argument - # If file does not exist, return 0; - if ( -e $_[0] ) { - return get_mtime_raw($_[0]); - } - else { - return 0; - } -} - -#************************************************************ - -sub get_size { - # Return time of file named in argument - # If file does not exist, return 0; - if ( -e $_[0] ) { - return get_size_raw($_[0]); - } - else { - return 0; - } -} - -#************************************************************ - -sub get_size_raw -{ - my $size = (stat($_[0]))[7]; - return $size; -} - -#************************************************************ - -sub get_time_size { - # Return time and size of file named in argument - # If file does not exist, return (0,-1); - if ( -e $_[0] ) { - return get_time_size_raw($_[0]); - } - else { - return (0,-1); - } -} - -#************************************************************ - -sub get_time_size_raw -{ - my $mtime = (stat($_[0]))[9]; - my $size = (stat($_[0]))[7]; - return ($mtime, $size); -} - -#************************************************************ - -sub get_checksum_md5 { - my $source = shift; - my $input = new FileHandle; - my $md5 = Digest->MD5; - my $ignore_pattern = ''; - - if ( $source eq "" ) { - # STDIN: - open( $input, '-' ); - } - else { - open( $input, '<', $source ) - or return 0; - my ($base, $path, $ext) = fileparseA( $source ); - $ext =~ s/^\.//; - if ( exists $hash_calc_ignore_pattern{$ext} ) { - $ignore_pattern = $hash_calc_ignore_pattern{$ext}; - } - } - - if ( $ignore_pattern ) { - while (<$input>) { - if ( /$ignore_pattern/ ){ - $_= ''; - } - $md5->add($_); - } - } - else { - $md5->addfile($input); - } - close $input; - return $md5->hexdigest(); -} - -#************************************************************ - -#?? OBSOLETE -# Find file with default extension -# Usage: find_file_ext( name, default_ext, ref_to_array_search_path) -sub find_file_ext -#?? Need to use kpsewhich, if possible. Leave to find_file? -{ - my $full_filename = shift; - my $ext = shift; - my $ref_search_path = shift; - my $full_filename1 = &find_file($full_filename, $ref_search_path, '1'); -#print "Finding \"$full_filename\" with ext \"$ext\" ... "; - if (( $full_filename1 eq '' ) || ( ! -e $full_filename1 )) - { - my $full_filename2 = - &find_file("$full_filename.$ext",$ref_search_path,'1'); - if (( $full_filename2 ne '' ) && ( -e $full_filename2 )) - { - $full_filename = $full_filename2; - } - else - { - $full_filename = $full_filename1; - } - } - else - { - $full_filename = $full_filename1; - } -#print "Found \"$full_filename\".\n"; - return $full_filename; -} - -#************************************************************ -#?? OBSOLETE -# given filename and path, return full name of file, or die if none found. -# when force_include_mode=1, only warn if an include file was not -# found, and return 0 (PvdS). -# Usage: find_file(name, ref_to_array_search_path, warn_on_continue) -sub find_file -#?? Need to use kpsewhich, if possible -{ - my $name = $_[0]; - my $ref_path = $_[1]; - my $dir; - if ( $name =~ /^\// ) - { - #Aboslute pathname (by UNIX standards) - if ( (!-e $name) && ( $_[2] eq '' ) ) { - if ($force_include_mode) { - warn "$My_name: Could not find file [$name]\n"; - } - else { - die "$My_name: Could not find file [$name]\n"; - } - } - return $name; - } - # Relative pathname - foreach $dir ( @{$ref_path} ) - { -#warn "\"$dir\", \"$name\"\n"; - if (-e "$dir/$name") - { - return("$dir/$name"); - } - } - if ($force_include_mode) - { - if ( $_[2] eq '' ) - { - warn "$My_name: Could not find file [$name] in path [@{$ref_path}]\n"; - warn " assuming in current directory (./$name)\n"; - } - return("./$name"); - } - else - { - if ( $_[2] ne '' ) - { - return(''); - } -# warn "\"$name\", \"$ref_path\", \"$dir\"\n"; - die "$My_name: Could not find file [$name] in path [@{$ref_path}]\n"; - } -} - -#************************************************************ - -sub find_file1 { -#?? Need to use kpsewhich, if possible - - # Usage: find_file1(name, ref_to_array_search_path) - # Modified find_file, which doesn't die. - # Given filename and path, return array of: - # full name - # retcode - # On success: full_name = full name with path, retcode = 0 - # On failure: full_name = given name, retcode = 1 - - my $name = $_[0]; - # Make local copy of path, since we may rewrite it! - my @path = @{$_[1]}; - if ( $name =~ /^\// ) { - # Absolute path (if under UNIX) - # This needs fixing, in general - if (-e $name) { return( $name, 0 );} - else { return( $name, 1 );} - } - foreach my $dir ( @path ) { - #??print "-------------dir='$dir', "; - # Make $dir concatenatable, and empty for current dir: - if ( $dir eq '.' ) { - $dir = ''; - } - elsif ( $dir =~ /[\/\\:]$/ ) { - #OK if dir ends in / or \ or : - } - elsif ( $dir ne '' ) { - #Append directory separator only to non-empty dir - $dir = "$dir/"; - } - #?? print " newdir='$dir'\n"; - if (-e "$dir$name") { - return("$dir$name", 0); - } - } - my @kpse_result = kpsewhich( $name ); - if ($#kpse_result > -1) { - return( $kpse_result[0], 0); - } - return("$name" , 1); -} #END find_file1 - -#************************************************************ - -sub find_file_list1 { - # Modified version of find_file_list that doesn't die. - # Given output and input arrays of filenames, a file suffix, and a path, - # fill the output array with full filenames - # Return a status code: - # Retcode = 0 on success - # Retocde = 1 if at least one file was not found - # Usage: find_file_list1( ref_to_output_file_array, - # ref_to_input_file_array, - # suffix, - # ref_to_array_search_path - # ) - - my $ref_output = $_[0]; - my $ref_input = $_[1]; - my $suffix = $_[2]; - my $ref_search = $_[3]; - -#?? show_array( "=====find_file_list1. Suffix: '$suffix'\n Source:", @$ref_input ); -#?? show_array( " Bibinputs:", @$ref_search ); - - my @return_list = (); # Generate list in local array, since input - # and output arrays may be same - my $retcode = 0; - foreach my $file (@$ref_input) { - my ($tmp_file, $find_retcode) = &find_file1( "$file$suffix", $ref_search ); - if ($tmp_file) { - push @return_list, $tmp_file; - } - if ( $find_retcode != 0 ) { - $retcode = 1; - } - } - @$ref_output = @return_list; -#?? show_array( " Output", @$ref_output ); -#?? foreach (@$ref_output) { if ( /\/\// ) { print " ====== double slash in '$_'\n"; } } - return $retcode; -} #END find_file_list1 - -#************************************************************ - -sub kpsewhich { -# Usage: kpsewhich( filespec, ...) -# Returns array of files with paths as found by kpsewhich -# kpsewhich( 'try.sty', 'jcc.bib' ); -# Can also do, e.g., -# kpsewhich( '-format=bib', 'trial.bib', 'file with spaces'); - my $cmd = $kpsewhich; - my @args = @_; - foreach (@args) { - if ( ! /^-/ ) { - $_ = "\"$_\""; - } - } - foreach ($cmd) { - s/%[RBTDO]//g; - } - $cmd =~ s/%S/@args/g; - my @found = (); - local $fh; - open $fh, "$cmd|" - or die "Cannot open pipe for \"$cmd\"\n"; - while ( <$fh> ) { - s/^\s*//; - s/\s*$//; - push @found, $_; - } - close $fh; -# show_array( "Kpsewhich: '$cmd', '$file_list' ==>", @found ); - return @found; -} - -#################################################### - -sub add_cus_dep { - # Usage: add_cus_dep( from_ext, to_ext, flag, sub_name ) - # Add cus_dep after removing old versions - my ($from_ext, $to_ext, $must, $sub_name) = @_; - remove_cus_dep( $from_ext, $to_ext ); - push @cus_dep_list, "$from_ext $to_ext $must $sub_name"; -} - -#################################################### - -sub remove_cus_dep { - # Usage: remove_cus_dep( from_ext, to_ext ) - my ($from_ext, $to_ext) = @_; - my $i = 0; - while ($i <= $#cus_dep_list) { - if ( $cus_dep_list[$i] =~ /^$from_ext $to_ext / ) { - splice @cus_dep_list, $i, 1; - } - else { - $i++; - } - } -} - -#################################################### - -sub show_cus_dep { - show_array( "Custom dependency list:", @cus_dep_list ); -} - -#################################################### - -sub find_dirs1 { - # Same as find_dirs, but argument is single string with directories - # separated by $search_path_separator - find_dirs( &split_search_path( $search_path_separator, ".", $_[0] ) ); -} - - -#************************************************************ - -sub find_dirs { -# @_ is list of directories -# return: same list of directories, except that for each directory -# name ending in //, a list of all subdirectories (recursive) -# is added to the list. -# Non-existent directories and non-directories are removed from the list -# Trailing "/"s and "\"s are removed - local @result = (); - my $find_action - = sub - { ## Subroutine for use in File::find - ## Check to see if we have a directory - if (-d) { push @result, $File::Find::name; } - }; - foreach my $directory (@_) { - my $recurse = ( $directory =~ m[//$] ); - # Remove all trailing /s, since directory name with trailing / - # is not always allowed: - $directory =~ s[/+$][]; - # Similarly for MSWin reverse slash - $directory =~ s[\\+$][]; - if ( ! -e $directory ){ - next; - } - elsif ( $recurse ){ - # Recursively search directory - find( $find_action, $directory ); - } - else { - push @result, $directory; - } - } - return @result; -} - -#************************************************************ - -sub uniq -# Read arguments, delete neighboring items that are identical, -# return array of results -{ - my @sort = (); - my ($current, $prev); - my $first = 1; - while (@_) - { - $current = shift; - if ($first || ($current ne $prev) ) - { - push @sort, $current; - $prev = $current; - $first = 0; - } - } - return @sort; -} - -#================================================== - -sub uniq1 { - # Usage: uniq1( strings ) - # Returns array of strings with duplicates later in list than - # first occurence deleted. Otherwise preserves order. - - my @strings = (); - my %string_hash = (); - - foreach my $string (@_) { - if (!exists( $string_hash{$string} )) { - $string_hash{$string} = 1; - push @strings, $string; - } - } - return @strings; -} - -#************************************************************ - -sub uniqs { - # Usage: uniq2( strings ) - # Returns array of strings sorted and with duplicates deleted - return uniq( sort @_ ); -} - -#************************************************************ - -sub ext { - # Return extension of filename. Extension includes the period - my $file_name = $_[0]; - my ($base_name, $path, $ext) = fileparseA( $file_name ); - return $ext; - } - -#************************************************************ - -sub fileparseA { - # Like fileparse but replace $path for current dir ('./' or '.\') by '' - # Also default second argument to get normal extension. - my $given = $_[0]; - my $pattern = '\.[^\.]*'; - if ($#_ > 0 ) { $pattern = $_[1]; } - my ($base_name, $path, $ext) = fileparse( $given, $pattern ); - if ( ($path eq './') || ($path eq '.\\') ) { - $path = ''; - } - return ($base_name, $path, $ext); - } - -#************************************************************ - -sub fileparseB { - # Like fileparse but with default second argument for normal extension - my $given = $_[0]; - my $pattern = '\.[^\.]*'; - if ($#_ > 0 ) { $pattern = $_[1]; } - my ($base_name, $path, $ext) = fileparse( $given, $pattern ); - return ($base_name, $path, $ext); - } - -#************************************************************ - -sub split_search_path -{ -# Usage: &split_search_path( separator, default, string ) -# Splits string by separator and returns array of the elements -# Allow empty last component. -# Replace empty terms by the default. - my $separator = $_[0]; - my $default = $_[1]; - my $search_path = $_[2]; - my @list = split( /$separator/, $search_path); - if ( $search_path =~ /$separator$/ ) { - # If search path ends in a blank item, the split subroutine - # won't have picked it up. - # So add it to the list by hand: - push @list, ""; - } - # Replace each blank argument (default) by current directory: - for ($i = 0; $i <= $#list ; $i++ ) { - if ($list[$i] eq "") {$list[$i] = $default;} - } - return @list; -} - -################################# - - -sub tempfile1 { - # Makes a temporary file of a unique name. I could use file::temp, - # but it is not present in all versions of perl - # Filename is of form $tmpdir/$_[0]nnn$suffix, where nnn is an integer - my $tmp_file_count = 0; - my $prefix = $_[0]; - my $suffix = $_[1]; - while (1==1) { - # Find a new temporary file, and make it. - $tmp_file_count++; - my $tmp_file = "${tmpdir}/${prefix}${tmp_file_count}${suffix}"; - if ( ! -e $tmp_file ) { - open( TMP, ">$tmp_file" ) - or next; - close(TMP); - return $tmp_file; - } - } - die "$My_name.tempfile1: BUG TO ARRIVE HERE\n"; -} - -################################# - -#************************************************************ -#************************************************************ -# Process/subprocess routines - -sub Run_msg { - # Same as Run, but give message about my running - warn_running( "Running '$_[0]'" ); - Run($_[0]); -} - -sub Run { -# Usage: Run ("program arguments "); -# or Run ("start program arguments"); -# or Run ("NONE program arguments"); -# First form is just a call to system, and the routine returns after the -# program has finished executing. -# Second form (with 'start') runs the program detached, as appropriate for -# the operating system: It runs "program arguments &" on UNIX, and -# "start program arguments" on WIN95 and WINNT. If multiple start -# words are at the beginning of the command, the extra ones are removed. -# Third form (with 'NONE') does not run anything, but prints an error -# message. This is provided to allow program names defined in the -# configuration to flag themselves as unimplemented. -# Return value is a list (pid, exitcode): -# If process is spawned sucessfully, and I know the PID, -# return (pid, 0), -# else if process is spawned sucessfully, but I do not know the PID, -# return (0, 0), -# else if process is run, -# return (0, exitcode of process) -# else (I fail to run the requested process) -# return (0, suitable return code) -# where return code is 1 if cmdline is null or begins with "NONE" (for -# an unimplemented command) -# or the return value of the system subroutine. - - -# Split command line into one word per element, separating words by -# one (OR MORE) spaces: -# The purpose of this is to identify latexmk-defined pseudocommands -# 'start' and 'NONE'. -# After dealing with them, the command line is reassembled - my $cmd_line = $_[0]; - if ( $cmd_line eq '' ) { - traceback( "$My_name: Bug OR configuration error\n". - " In run of'$rule', attempt to run a null program" ); - return (0, 1); - } - if ( $cmd_line =~ /^start +/ ) { - #warn "Before: '$cmd_line'\n"; - # Run detached. How to do this depends on the OS - # But first remove extra starts (which may have been inserted - # to force a command to be run detached, when the command - # already contained a "start"). - while ( $cmd_line =~ s/^start +// ) {} - #warn "After: '$cmd_line'\n"; - return &Run_Detached( $cmd_line ); - } - elsif ( $cmd_line =~ /^NONE/ ) { - warn "$My_name: ", - "Program not implemented for this version. Command line:\n"; - warn " '$cmd_line'\n"; - return (0, 1); - } - else { - # The command is given to system as a single argument, to force shell - # metacharacters to be interpreted: - return( 0, system( $cmd_line ) ); - } -} - -#************************************************************ - -sub Run_Detached { -# Usage: Run_Detached ("program arguments "); -# Runs program detached. Returns 0 on success, 1 on failure. -# Under UNIX use a trick to avoid the program being killed when the -# parent process, i.e., me, gets a ctrl/C, which is undesirable for pvc -# mode. (The simplest method, system ("program arguments &"), makes the -# child process respond to the ctrl/C.) -# Return value is a list (pid, exitcode): -# If process is spawned sucessfully, and I know the PID, -# return (pid, 0), -# else if process is spawned sucessfully, but I do not know the PID, -# return (0, 0), -# else if I fail to spawn a process -# return (0, 1) - - my $cmd_line = $_[0]; - -## warn "Running '$cmd_line' detached...\n"; - if ( $cmd_line =~ /^NONE / ) { - warn "$My_name: ", - "Program not implemented for this version. Command line:\n"; - warn " '$cmd_line'\n"; - return (0, 1); - } - - if ( "$^O" eq "MSWin32" ){ - # Win95, WinNT, etc: Use MS's start command: - return( 0, system( "start $cmd_line" ) ); - } else { - # Assume anything else is UNIX or clone - # For this purpose cygwin behaves like UNIX. - ## warn "Run_Detached.UNIX: A\n"; - my $pid = fork(); - ## warn "Run_Detached.UNIX: B pid=$pid\n"; - if ( ! defined $pid ) { - ## warn "Run_Detached.UNIX: C\n"; - warn "$My_name: Could not fork to run the following command:\n"; - warn " '$cmd_line'\n"; - return (0, 1); - } - elsif( $pid == 0 ){ - ## warn "Run_Detached.UNIX: D\n"; - # Forked child process arrives here - # Insulate child process from interruption by ctrl/C to kill parent: - # setpgrp(0,0); - # Perhaps this works if setpgrp doesn't exist - # (and therefore gives fatal error): - eval{ setpgrp(0,0);}; - exec( $cmd_line ); - # Exec never returns; it replaces current process by new process - die "$My_name forked process: could not run the command\n", - " '$cmd_line'\n"; - } - ##warn "Run_Detached.UNIX: E\n"; - # Original process arrives here - return ($pid, 0); - } - # NEVER GET HERE. - ##warn "Run_Detached.UNIX: F\n"; -} - -#************************************************************ - -sub find_process_id { -# find_process_id(string) finds id of process containing string and -# being run by the present user. Typically the string will be the -# name of the process or part of its command line. -# On success, this subroutine returns the process ID. -# On failure, it returns 0. -# This subroutine only works on UNIX systems at the moment. - - if ( $pid_position < 0 ) { - # I cannot do a ps on this system - return (0); - } - - my $looking_for = $_[0]; - my @ps_output = `$pscmd`; - -# There may be multiple processes. Find only latest, -# almost surely the one with the highest process number -# This will deal with cases like xdvi where a script is used to -# run the viewer and both the script and the actual viewer binary -# have running processes. - my @found = (); - - shift(@ps_output); # Discard the header line from ps - foreach (@ps_output) { - next unless ( /$looking_for/ ) ; - my @ps_line = split (' '); -# OLD return($ps_line[$pid_position]); - push @found, $ps_line[$pid_position]; - } - - if ($#found < 0) { - # No luck in finding the specified process. - return(0); - } - @found = reverse sort @found; - if ($diagnostics) { - print "Found the following processes concerning '$looking_for'\n", - " @found\n", - " I will use $found[0]\n"; - } - return $found[0]; -} - -#************************************************************ -#************************************************************ -#************************************************************ - -# Directory stack routines - -sub pushd { - push @dir_stack, cwd(); - if ( $#_ > -1) { chdir $_[0]; } -} - -#************************************************************ - -sub popd { - if ($#dir_stack > -1 ) { chdir pop @dir_stack; } -} - -#************************************************************ - -sub ifcd_popd { - if ( $do_cd ) { - warn "$My_name: Undoing directory change\n"; - &popd; - } -} - -#************************************************************ - -sub finish_dir_stack { - while ($#dir_stack > -1 ) { &popd; } -} - -#************************************************************ -#************************************************************ -#************************************************************ -#************************************************************ -#************************************************************ -#************************************************************ -#************************************************************ -#************************************************************ diff --git a/Build/source/texk/texlive/linked_scripts/latexmk/latexmk.pl b/Build/source/texk/texlive/linked_scripts/latexmk/latexmk.pl new file mode 100755 index 00000000000..4d124a67893 --- /dev/null +++ b/Build/source/texk/texlive/linked_scripts/latexmk/latexmk.pl @@ -0,0 +1,6031 @@ +eval '(exit $?0)' && eval 'exec perl -x -S "$0" ${1+"$@"}' && +eval 'exec perl -x -S "$0" $argv:q' +if 0; +#!/usr/bin/perl -w +#!/opt/local/bin/perl -w +#!/usr/local/bin/perl -w +# The above code allows this script to be run under UNIX/LINUX without +# the need to adjust the path to the perl program in a "shebang" line. +# (The location of perl changes between different installations, and +# may even be different when several computers running different +# flavors of UNIX/LINUX share a copy of latex or other scripts.) The +# script is started under the default command interpreter sh, and the +# evals in the first two lines restart the script under perl, and work +# under various flavors of sh. The -x switch tells perl to start the +# script at the first #! line containing "perl". The "if 0;" on the +# 3rd line converts the first two lines into a valid perl statement +# that does nothing. +# +# Source of the above: manpage for perlrun + +# Delete #??!! when working + +# See ?? <=============================== + +# Results of 8 Sep 2007: + +# Some improvements relative to the issues below. + +# ????????: +# Why is bibtex not always running right? Or running when it shouldn't +# I've put in rdb_make_links in a few places. +# and rdb_write +# Problem is that aux file is always out of date, until after a +# primary run. Ensure fdb and c. is updated enough etc. +# I may have it correct now: fdb_write in makeB +# See also routine rdb_update_files_for_rule, and who calls it + +# Apparently excess runs of latex after change in .tex file that entails +# change in bibliography. + +# Now I am missing diagnostics + + +## ???!!!!!!!!!!!!! Should I remove bibtex rule? NO +## ?? Need to set dependence of extra bibtex rules on .bib file +## ?? Put $pass as variable in rule. + +#======================================= + + +#?? Check all code for rdb stuff. +#?? Use of $update and $failure, etc +# Especially in pvc. Should I restore source file set up +# if there is a latex error?????????????????????? +#?? Force mode doesn't appear to do force (if error in latex file) +#??? Get banner back in. +#?? ==> Clean up of rdb. It accumulates files that aren't in use any more. +# Restrict to dependents (existent or not) discovered during +# parse of log file, and its consequences. +#?? CORRECT DIAGNOSTICS ON CHANGED FILES IF THEY DIDN'T EXIST BEFORE +#?? Further corrections to deal with disappeared source files for custom dependencies. +# Message repeatedly appears about remake when source file of cusdep doesn't exist. +#?? logfile w/o fdb file: don't set changed file, perhaps for generated exts. +# Reconsider +#?? Do proper run-stuff for bibtex, makeindex, cus-deps. OK I think +# Parse and correctly find bst and ist files +#?? Remove superfluous code when it's working. Mostly done. +#?? update_source_times in particular. I think it's done OK +#?? Add making of other files to rdb. Unify +#?? Ditto for printing and viewing? +#?? Update documentation + +# ATTEMPT TO ALLOW FILENAMES WITH SPACES: +# (as of 1 Apr 2006, and then 14 Sep. 2007) + +# Problems: +# A. Quoting filenames will not always work. +# a. Under UNIX, quotes are legal in filenames, so when PERL +# directly runs a binary, a quoted filename will be treated as +# as a filename containing a quote character. But when it calls +# a shell, the quotes are handled by the shell as quotes. +# b. Under MSWin32, quotes are illegal filename characters, and tend +# to be handled correctly. +# c. But under cygwin, results are not so clear (there are many +# combinations: native v. cygwin perl, native v cygwin programs +# NT v. unix scripts, which shell is called. +# B. TeX doesn't always handle filenames with spaces gracefully. +# a. UNIX/LINUX: The version on gluon2 Mar 31, 2006 to Sep. 2007) +# doesn't handle them at all. (TeX treats space as separator.) +# b. At least some later versions actually do (Brad Miller e-mail, +# Sep. 2007). +# c. fptex [[e-TeXk, Version 3.141592-2.1 (Web2c 7.5.2)] does, on +# my MSWin at home. In \input the filename must be in quotes. +# d. Bibtex [BibTeX (Web2c 7.5.2) 0.99c on my MSWin system at home, +# Sep. 2007] does not allow names of bibfiles to have spaces. +# C. =====> Using the shell for command lines is not safe, since special +# characters can cause lots of mayhem. +# It will therefore be a good idea to sanitize filenames. +# +# I've sanitized all calls out: +# a. system and exec use a single argument, which forces +# use of shell, under all circumstances +# Thus I can safely use quotes on filenames: They will be handled by +# the shell under UNIX, and simply passed on to the program under MSWin32. +# b. I reorganized Run, Run_Detached to use single command line +# c. All calls to Run and Run_Detached have quoted filenames. +# d. So if a space-free filename with wildcards is given on latexmk's +# command line, and it globs to space-containing filename(s), that +# works (fptex on home computer, native NT tex) +# e. ====> But globbing fails: the glob function takes space as filename +# separator. ==================== + +#================= TO DO ================ +# +# 1. See ?? ESPECIALLY $MSWin_fudge_break +# 2. Check fudged conditions in looping and make_files +# 3. Should not completely abort after a run that ends in failure from latex +# Missing input files (including via custom dependency) should be checked for +# a change in status +# If sources for missing files from custom dependency +# are available, then do a rerun +# If sources of any kind become available rerun (esp. for pvc) +# rerun +# Must parse log_file after unsuccessful run of latex: it may give +# information about missing files. +# 4. Check file of bug reports and requests +# 5. Rationalize bibtex warnings and errors. Two almost identical routines. +# Should 1. Use single routine +# 2. Convert errors to failure only in calling routine +# 3. Save first warning/error. + + +# To do: +# Rationalize again handling of include files. +# Now I use kpsewhich to do searches, if file not found +# (How do I avoid getting slowed down too much?) +# Better parsing of log file for includes. +# Document the assumptions at each stage of processing algorithm. +# Option to restart previewer automatically, if it dies under -pvc +# Test for already running previewer gets wrong answer if another +# process has the viewed file in its command line + +$my_name = 'latexmk'; +$My_name = 'Latexmk'; +$version_num = '4.01'; +$version_details = "$My_name, John Collins, 24 September 2008"; + + +use Config; +use File::Copy; +use File::Basename; +use FileHandle; +use File::Find; +use Cwd; # To be able to change cwd +use Cwd "chdir"; # Ensure $ENV{PWD} tracks cwd +use Digest; + +#use strict; + +# The following variables are assigned once and then used in symbolic +# references, so we need to avoid warnings 'name used only once': +use vars qw( $dvi_update_command $ps_update_command $pdf_update_command ); + +# Translation of signal names to numbers and vv: +%signo = (); +@signame = (); +if ( defined $Config{sig_name} ) { + $i = 0; + foreach $name (split(' ', $Config{sig_name})) { + $signo{$name} = $i; + $signame[$i] = $name; + $i++; + } +} +else { + warn "Something wrong with the perl configuration: No signals?\n"; +} + +## Copyright John Collins 1998-2008 +## (username collins at node phys.psu.edu) +## (and thanks to David Coppit (username david at node coppit.org) +## for suggestions) +## Copyright Evan McLean +## (modifications up to version 2) +## Copyright 1992 by David J. Musliner and The University of Michigan. +## (original version) +## +## This program is free software; you can redistribute it and/or modify +## it under the terms of the GNU General Public License as published by +## the Free Software Foundation; either version 2 of the License, or +## (at your option) any later version. +## +## This program is distributed in the hope that it will be useful, +## but WITHOUT ANY WARRANTY; without even the implied warranty of +## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +## GNU General Public License for more details. +## +## You should have received a copy of the GNU General Public License +## along with this program; if not, write to the Free Software +## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 +## +## +## +## NEW FEATURES, since v. 2.0: +## 1. Correct algorithm for deciding how many times to run latex: +## based on whether source file(s) change between runs +## 2. Continuous preview works, and can be of ps file or dvi file +## 3. pdf creation by pdflatex possible +## 4. Defaults for commands are OS dependent. +## 5. Parsing of log file instead of source file is used to +## obtain dependencies, by default. +## +## Modification log for 28 Mar 2007 onwards in detail +## +## 24 Sep 2008, John Collins Release version 4.01. +## +## 1998-2008, John Collins. Many improvements and fixes. +## +## Modified by Evan McLean (no longer available for support) +## Original script (RCS version 2.3) called "go" written by David J. Musliner +## +## 2.0 - Final release, no enhancements. LatexMk is no longer supported +## by the author. +## 1.9 - Fixed bug that was introduced in 1.8 with path name fix. +## - Fixed buglet in man page. +## 1.8 - Add not about announcement mailling list above. +## - Added texput.dvi and texput.aux to files deleted with -c and/or +## the -C options. +## - Added landscape mode (-l option and a bunch of RC variables). +## - Added sensing of "\epsfig{file=...}" forms in dependency generation. +## - Fixed path names when specified tex file is not in the current +## directory. +## - Fixed combined use of -pvc and -s options. +## - Fixed a bunch of speling errors in the source. :-) +## - Fixed bugs in xdvi patches in contrib directory. +## 1.7 - Fixed -pvc continuous viewing to reattach to pre-existing +## process correctly. +## - Added $pscmd to allow changing process grepping for different +## systems. +## 1.6 - Fixed buglet in help message +## - Fixed bugs in detection of input and include files. +## 1.5 - Removed test message I accidentally left in version 1.4 +## - Made dvips use -o option instead of stdout redirection as some +## people had problems with dvips not going to stdout by default. +## - Fixed bug in input and include file detection +## - Fixed dependency resolution process so it detects new .toc file +## and makeindex files properly. +## - Added dvi and postscript filtering options -dF and -pF. +## - Added -v version commmand. +## 1.4 - Fixed bug in -pvc option. +## - Made "-F" option include non-existant file in the dependency list. +## (RC variable: $force_include_mode) +## - Added .lot and .lof files to clean up list of extensions. +## - Added file "texput.log" to list of files to clean for -c. +## - LatexMk now handles file names in a similar fashion to latex. +## The ".tex" extension is no longer enforced. +## - Added $texfile_search RC variable to look for default files. +## - Fixed \input and \include so they add ".tex" extension if necessary. +## - Allow intermixing of file names and options. +## - Added "-d" and banner options (-bm, -bs, and -bi). +## (RC variables: $banner, $banner_message, $banner_scale, +## $banner_intensity, $tmpdir) +## - Fixed "-r" option to detect an command line syntax errors better. +## 1.3 - Added "-F" option, patch supplied by Patrick van der Smagt. +## 1.2 - Added "-C" option. +## - Added $clean_ext and $clean_full_ext variables for RC files. +## - Added custom dependency generation capabilities. +## - Added command line and variable to specify custom RC file. +## - Added reading of rc file in current directly. +## 1.1 - Fixed bug where Dependency file generation header is printed +## rependatively. +## - Fixed bug where TEXINPUTS path is searched for file that was +## specified with absolute an pathname. +## 1.0 - Ripped from script by David J. Musliner (RCS version 2.3) called "go" +## - Fixed a couple of file naming bugs +## e.g. when calling latex, left the ".tex" extension off the end +## of the file name which could do some interesting things +## with some file names. +## - Redirected output of dvips. My version of dvips was a filter. +## - Cleaned up the rc file mumbo jumbo and created a dependency file +## instead. Include dependencies are always searched for if a +## dependency file doesn't exist. The -i option regenerates the +## dependency file. +## Getting rid of the rc file stuff also gave the advantage of +## not being restricted to one tex file per directory. +## - Can specify multiple files on the command line or no files +## on the command line. +## - Removed lpr options stuff. I would guess that generally, +## you always use the same options in which case they can +## be set up from an rc file with the $lpr variable. +## - Removed the dviselect stuff. If I ever get time (or money :-) ) +## I might put it back in if I find myself needing it or people +## express interest in it. +## - Made it possible to view dvi or postscript file automatically +## depending on if -ps option selected. +## - Made specification of dvi file viewer seperate for -pv and -pvc +## options. +##----------------------------------------------------------------------- + + +## Explicit exit codes: +## 10 = bad command line arguments +## 11 = file specified on command line not found +## or other file not found +## 12 = failure in some part of making files +## 13 = error in initialization file +## 20 = probable bug +## or retcode from called program. + + +#Line length in log file that indicates wrapping. +# This number EXCLUDES line-end characters, and is one-based +$log_wrap = 79; + +######################################################################### +## Default parsing and file-handling settings + +## Array of reg-exps for patterns in log-file for file-not-found +## Each item is the string in a regexp, without the enclosing slashes. +## First parenthesized part is the filename. +## Note the need to quote slashes and single right quotes to make them +## appear in the regexp. +## Add items by push, e.g., +## push @file_not_found, '^No data file found `([^\\\']*)\\\''; +## will give match to line starting "No data file found `filename'" +@file_not_found = ( + '^No file\\s*(.*)\\.$', + '^\\! LaTeX Error: File `([^\\\']*)\\\' not found\\.', + '.*?:\\d*: LaTeX Error: File `([^\\\']*)\\\' not found\\.', + '^LaTeX Warning: File `([^\\\']*)\\\' not found', + '^Package .* file `([^\\\']*)\\\' not found', +); + +## Hash mapping file extension (w/o period, e.g., 'eps') to a single regexp, +# whose matching by a line in a file with that extension indicates that the +# line is to be ignored in the calculation of the hash number (md5 checksum) +# for the file. Typically used for ignoring datestamps in testing whether +# a file has changed. +# Add items e.g., by +# $hash_calc_ignore_pattern{'eps'} = '^%%CreationDate: '; +# This makes the hash calculation for an eps file ignore lines starting with +# '%%CreationDate: ' +# ?? Note that a file will be considered changed if +# (a) its size changes +# or (b) its hash changes +# So it is useful to ignore lines in the hash calculation only if they +# are of a fixed size (as with a date/time stamp). +%hash_calc_ignore_pattern =(); + +######################################################################### +## Default document processing programs, and related settings, +## These are mostly the same on all systems. +## Most of these variables represents the external command needed to +## perform a certain action. Some represent switches. + +## Commands to invoke latex, pdflatex +$latex = 'latex %O %S'; +$pdflatex = 'pdflatex %O %S'; +## Switch(es) to make them silent: +$latex_silent_switch = '-interaction=batchmode'; +$pdflatex_silent_switch = '-interaction=batchmode'; + +## Command to invoke bibtex +$bibtex = 'bibtex %O %B'; +# Switch(es) to make bibtex silent: +$bibtex_silent_switch = '-terse'; + +## Command to invoke makeindex +$makeindex = 'makeindex %O -o %D %S'; +# Switch(es) to make makeinex silent: +$makeindex_silent_switch = '-q'; + +## Command to convert dvi file to pdf file directly: +$dvipdf = 'dvipdf %O %S %D'; + +## Command to convert dvi file to ps file: +$dvips = 'dvips %O -o %D %S'; +## Command to convert dvi file to ps file in landscape format: +$dvips_landscape = 'dvips -tlandscape %O -o %D %S'; +# Switch(es) to get dvips to make ps file suitable for conversion to good pdf: +# (If this is not used, ps file and hence pdf file contains bitmap fonts +# (type 3), which look horrible under acroread. An appropriate switch +# ensures type 1 fonts are generated. You can put this switch in the +# dvips command if you prefer.) +$dvips_pdf_switch = '-P pdf'; +# Switch(es) to make dvips silent: +$dvips_silent_switch = '-q'; + +## Command to convert ps file to pdf file: +$ps2pdf = 'ps2pdf %O %S %D'; + +## Command to search for tex-related files +$kpsewhich = 'kpsewhich %S'; + + +##Printing: +$print_type = 'ps'; # When printing, print the postscript file. + # Possible values: 'dvi', 'ps', 'pdf', 'none' + +## Which treatment of default extensions and filenames with +## multiple extensions is used, for given filename on +## tex/latex's command line? See sub find_basename for the +## possibilities. +## Current tex's treat extensions like UNIX teTeX: +$extension_treatment = 'unix'; + +$dvi_update_signal = undef; +$ps_update_signal = undef; +$pdf_update_signal = undef; + +$dvi_update_command = undef; +$ps_update_command = undef; +$pdf_update_command = undef; + +$new_viewer_always = 0; # If 1, always open a new viewer in pvc mode. + # If 0, only open a new viewer if no previous + # viewer for the same file is detected. + +$quote_filenames = 1; # Quote filenames in external commands + +######################################################################### + +################################################################ +## Special variables for system-dependent fudges, etc. +$MSWin_fudge_break = 1; # Give special treatment to ctrl/C and ctrl/break + # in -pvc mode under MSWin + # Under MSWin32 (at least with perl 5.8 and WinXP) + # when latemk is running another program, and the + # user gives ctrl/C or ctrl/break, to stop the + # daughter program, not only does it reach + # the daughter, but also latexmk/perl, so + # latexmk is stopped also. In -pvc mode, + # this is not normally desired. So when the + # $MSWin_fudge_break variable is set, + # latexmk arranges to ignore ctrl/C and + # ctrl/break during processing of files; + # only the daughter programs receive them. + # This fudge is not applied in other + # situations, since then having latexmk also + # stopping because of the ctrl/C or + # ctrl/break signal is desirable. + # The fudge is not needed under UNIX (at least + # with Perl 5.005 on Solaris 8). Only the + # daughter programs receive the signal. In + # fact the inverse would be useful: In + # normal processing, as opposed to -pvc, if + # force mode (-f) is set, a ctrl/C is + # received by a daughter program does not + # also stop latexmk. Under tcsh, we get + # back to a command prompt, while latexmk + # keeps running in the background! + + +################################################################ + + +# System-dependent overrides: +if ( $^O eq "MSWin32" ) { +# Pure MSWindows configuration + ## Configuration parameters: + + ## Use first existing case for $tmpdir: + $tmpdir = $ENV{TMPDIR} || $ENV{TEMP} || '.'; + + ## List of possibilities for the system-wide initialization file. + ## The first one found (if any) is used. + @rc_system_files = ( 'C:/latexmk/LatexMk' ); + + $search_path_separator = ';'; # Separator of elements in search_path + + # For both fptex and miktex, the following makes error messages explicit: + $latex_silent_switch = '-interaction=batchmode -c-style-errors'; + $pdflatex_silent_switch = '-interaction=batchmode -c-style-errors'; + + # For a pdf-file, "start x.pdf" starts the pdf viewer associated with + # pdf files, so no program name is needed: + $pdf_previewer = 'start %O %S'; + $ps_previewer = 'start %O %S'; + $ps_previewer_landscape = $ps_previewer; + $dvi_previewer = 'start %O %S'; + $dvi_previewer_landscape = "$dvi_previewer"; + # Viewer update methods: + # 0 => auto update: viewer watches file (e.g., gv) + # 1 => manual update: user must do something: e.g., click on window. + # (e.g., ghostview, MSWIN previewers, acroread under UNIX) + # 2 => send signal. Number of signal in $dvi_update_signal, + # $ps_update_signal, $pdf_update_signal + # 3 => viewer can't update, because it locks the file and the file + # cannot be updated. (acroread under MSWIN) + # 4 => run a command to force the update. The commands are + # specified by the variables $dvi_update_command, + # $ps_update_command, $pdf_update_command + $dvi_update_method = 1; + $ps_update_method = 1; + $pdf_update_method = 3; # acroread locks the pdf file + # Use NONE as flag that I am not implementing some commands: + $lpr = + 'NONE $lpr variable is not configured to allow printing of ps files'; + $lpr_dvi = + 'NONE $lpr_dvi variable is not configured to allow printing of dvi files'; + $lpr_pdf = + 'NONE $lpr_pdf variable is not configured to allow printing of pdf files'; + # The $pscmd below holds a command to list running processes. It + # is used to find the process ID of the viewer looking at the + # current output file. The output of the command must include the + # process number and the command line of the processes, since the + # relevant process is identified by the name of file to be viewed. + # Its use is not essential. + $pscmd = + 'NONE $pscmd variable is not configured to detect running processes'; + $pid_position = -1; # offset of PID in output of pscmd. + # Negative means I cannot use ps +} +elsif ( $^O eq "cygwin" ) { + # The problem is a mixed MSWin32 and UNIX environment. + # Perl decides the OS is cygwin in two situations: + # 1. When latexmk is run from a cygwin shell under a cygwin + # environment. Perl behaves in a UNIX way. This is OK, since + # the user is presumably expecting UNIXy behavior. + # 2. When CYGWIN exectuables are in the path, but latexmk is run + # from a native NT shell. Presumably the user is expecting NT + # behavior. But perl behaves more UNIXy. This causes some + # clashes. + # The issues to handle are: + # 1. Perl sees both MSWin32 and cygwin filenames. This is + # normally only an advantage. + # 2. Perl uses a UNIX shell in the system command + # This is a nasty problem: under native NT, there is a + # start command that knows about NT file associations, so that + # we can do, e.g., (under native NT) system("start file.pdf"); + # But this won't work when perl has decided the OS is cygwin, + # even if it is invoked from a native NT command line. An + # NT command processor must be used to deal with this. + # 3. External executables can be native NT (which only know + # NT-style file names) or cygwin executables (which normally + # know both cygwin UNIX-style file names and NT file names, + # but not always; some do not know about drive names, for + # example). + # Cygwin executables for tex and latex may only know cygwin + # filenames. + # 4. The BIBINPUTS and TEXINPUTS environment variables may be + # UNIX-style or MSWin-style depending on whether native NT or + # cygwin executables are used. They are therefore parsed + # differently. Here is the clash: + # a. If a user is running under an NT shell, is using a + # native NT installation of tex (e.g., fptex or miktex), + # but has the cygwin executables in the path, then perl + # detects the OS as cygwin, but the user needs NT + # behavior from latexmk. + # b. If a user is running under an UNIX shell in a cygwin + # environment, and is using the cygwin installation of + # tex, then perl detects the OS as cygwin, and the user + # needs UNIX behavior from latexmk. + # Latexmk has no way of detecting the difference. The two + # situations may even arise for the same user on the same + # computer simply by changing the order of directories in the + # path environment variable + + + ## Configuration parameters: We'll assume native NT executables. + ## The user should override if they are not. + + # This may fail: perl converts MSWin temp directory name to cygwin + # format. Names containing this string cannot be handled by native + # NT executables. + $tmpdir = $ENV{TMPDIR} || $ENV{TEMP} || '.'; + + ## List of possibilities for the system-wide initialization file. + ## The first one found (if any) is used. + ## We can stay with MSWin files here, since perl understands them, + @rc_system_files = ( 'C:/latexmk/LatexMk' ); + + $search_path_separator = ';'; # Separator of elements in search_path + # This is tricky. The search_path_separator depends on the kind + # of executable: native NT v. cygwin. + # So the user will have to override this. + + # For both fptex and miktex, the following makes error messages explicit: + $latex_silent_switch = '-interaction=batchmode -c-style-errors'; + $pdflatex_silent_switch = '-interaction=batchmode -c-style-errors'; + + # We will assume that files can be viewed by native NT programs. + # Then we must fix the start command/directive, so that the + # NT-native start command of a cmd.exe is used. + # For a pdf-file, "start x.pdf" starts the pdf viewer associated with + # pdf files, so no program name is needed: + $start_NT = "cmd /c start"; + $pdf_previewer = "$start_NT %O %S"; + $ps_previewer = "$start_NT %O %S"; + $ps_previewer_landscape = $ps_previewer; + $dvi_previewer = "$start_NT %O %S"; + $dvi_previewer_landscape = $dvi_previewer; + # Viewer update methods: + # 0 => auto update: viewer watches file (e.g., gv) + # 1 => manual update: user must do something: e.g., click on window. + # (e.g., ghostview, MSWIN previewers, acroread under UNIX) + # 2 => send signal. Number of signal in $dvi_update_signal, + # $ps_update_signal, $pdf_update_signal + # 3 => viewer can't update, because it locks the file and the file + # cannot be updated. (acroread under MSWIN) + $dvi_update_method = 1; + $ps_update_method = 1; + $pdf_update_method = 3; # acroread locks the pdf file + # Use NONE as flag that I am not implementing some commands: + $lpr = + 'NONE $lpr variable is not configured to allow printing of ps files'; + $lpr_dvi = + 'NONE $lpr_dvi variable is not configured to allow printing of dvi files'; + $lpr_pdf = + 'NONE $lpr_pdf variable is not configured to allow printing of pdf files'; + # The $pscmd below holds a command to list running processes. It + # is used to find the process ID of the viewer looking at the + # current output file. The output of the command must include the + # process number and the command line of the processes, since the + # relevant process is identified by the name of file to be viewed. + # Its use is not essential. + # When the OS is detected as cygwin, there are two possibilities: + # a. Latexmk was run from an NT prompt, but cygwin is in the + # path. Then the cygwin ps command will not see commands + # started from latexmk. So we cannot use it. + # b. Latexmk was started within a cygwin environment. Then + # the ps command works as we need. + # Only the user, not latemk knows which, so we default to not + # using the ps command. The user can override this in a + # configuration file. + $pscmd = + 'NONE $pscmd variable is not configured to detect running processes'; + $pid_position = -1; # offset of PID in output of pscmd. + # Negative means I cannot use ps +} +else { + # Assume anything else is UNIX or clone + + ## Configuration parameters: + + + ## Use first existing case for $tmpdir: + $tmpdir = $ENV{TMPDIR} || '/tmp'; + + ## List of possibilities for the system-wide initialization file. + ## The first one found (if any) is used. + ## Normally on a UNIX it will be in a subdirectory of /opt/local/share or + ## /usr/local/share, depending on the local conventions. + ## /usr/local/lib/latexmk/LatexMk is put in the list for + ## compatibility with older versions of latexmk. + @rc_system_files = + ( '/opt/local/share/latexmk/LatexMk', + '/usr/local/share/latexmk/LatexMk', + '/usr/local/lib/latexmk/LatexMk' ); + + $search_path_separator = ':'; # Separator of elements in search_path + + $dvi_update_signal = $signo{USR1} + if ( defined $signo{USR1} ); # Suitable for xdvi + $ps_update_signal = $signo{HUP} + if ( defined $signo{HUP} ); # Suitable for gv + $pdf_update_signal = $signo{HUP} + if ( defined $signo{HUP} ); # Suitable for gv + ## default document processing programs. + # Viewer update methods: + # 0 => auto update: viewer watches file (e.g., gv) + # 1 => manual update: user must do something: e.g., click on window. + # (e.g., ghostview, MSWIN previewers, acroread under UNIX) + # 2 => send signal. Number of signal in $dvi_update_signal, + # $ps_update_signal, $pdf_update_signal + # 3 => viewer can't update, because it locks the file and the file + # cannot be updated. (acroread under MSWIN) + # 4 => Run command to update. Command in $dvi_update_command, + # $ps_update_command, $pdf_update_command. + $dvi_previewer = 'start xdvi %O %S'; + $dvi_previewer_landscape = 'start xdvi -paper usr %O %S'; + if ( defined $dvi_update_signal ) { + $dvi_update_method = 2; # xdvi responds to signal to update + } else { + $dvi_update_method = 1; + } +# if ( defined $ps_update_signal ) { +# $ps_update_method = 2; # gv responds to signal to update +# $ps_previewer = 'start gv -nowatch'; +# $ps_previewer_landscape = 'start gv -swap -nowatch'; +# } else { +# $ps_update_method = 0; # gv -watch watches the ps file +# $ps_previewer = 'start gv -watch'; +# $ps_previewer_landscape = 'start gv -swap -watch'; +# } + # Turn off the fancy options for gv. Regular gv likes -watch etc + # GNU gv likes --watch etc. User must configure + $ps_update_method = 0; # gv -watch watches the ps file + $ps_previewer = 'start gv %O %S'; + $ps_previewer_landscape = 'start gv -swap %O %S'; + $pdf_previewer = 'start acroread %O %S'; + $pdf_update_method = 1; # acroread under unix needs manual update + $lpr = 'lpr %O %S'; # Assume lpr command prints postscript files correctly + $lpr_dvi = + 'NONE $lpr_dvi variable is not configured to allow printing of dvi files'; + $lpr_pdf = + 'NONE $lpr_pdf variable is not configured to allow printing of pdf files'; + # The $pscmd below holds a command to list running processes. It + # is used to find the process ID of the viewer looking at the + # current output file. The output of the command must include the + # process number and the command line of the processes, since the + # relevant process is identified by the name of file to be viewed. + # Uses: + # 1. In preview_continuous mode, to save running a previewer + # when one is already running on the relevant file. + # 2. With xdvi in preview_continuous mode, xdvi must be + # signalled to make it read a new dvi file. + # + # The following works on Solaris, LINUX, HP-UX, IRIX + # Use -f to get full listing, including command line arguments. + # Use -u $ENV{CMD} to get all processes started by current user (not just + # those associated with current terminal), but none of other users' + # processes. + $pscmd = "ps -f -u $ENV{USER}"; + $pid_position = 1; # offset of PID in output of pscmd; first item is 0. + if ( $^O eq "linux" ) { + # Ps on Redhat (at least v. 7.2) appears to truncate its output + # at 80 cols, so that a long command string is truncated. + # Fix this with the --width option. This option works under + # other versions of linux even if not necessary (at least + # for SUSE 7.2). + # However the option is not available under other UNIX-type + # systems, e.g., Solaris 8. + $pscmd = "ps --width 200 -f -u $ENV{USER}"; + } + elsif ( $^O eq "darwin" ) { + # OS-X on Macintosh + $lpr_pdf = 'lpr %O %S'; + $pscmd = "ps -ww -u $ENV{USER}"; + } +} + +## default parameters +$max_repeat = 5; # Maximum times I repeat latex. Normally + # 3 would be sufficient: 1st run generates aux file, + # 2nd run picks up aux file, and maybe toc, lof which + # contain out-of-date information, e.g., wrong page + # references in toc, lof and index, and unresolved + # references in the middle of lines. But the + # formatting is more-or-less correct. On the 3rd + # run, the page refs etc in toc, lof, etc are about + # correct, but some slight formatting changes may + # occur, which mess up page numbers in the toc and lof, + # Hence a 4th run is conceivably necessary. + # At least one document class (JHEP.cls) works + # in such a way that a 4th run is needed. + # We allow an extra run for safety for a + # maximum of 5. Needing further runs is + # usually an indication of a problem; further + # runs may not resolve the problem, and + # instead could cause an infinite loop. +$clean_ext = ""; # space separated extensions of files that are + # to be deleted when doing cleanup, beyond + # standard set +$clean_full_ext = ""; # space separated extensions of files that are + # to be deleted when doing cleanup_full, beyond + # standard set and those in $clean_ext +@cus_dep_list = (); # Custom dependency list +@default_files = ( '*.tex' ); # Array of LaTeX files to process when + # no files are specified on the command line. + # Wildcards allowed + # Best used for project specific files. +@default_excluded_files = ( ); + # Array of LaTeX files to exclude when using + # @default_files, i.e., when no files are specified + # on the command line. + # Wildcards allowed + # Best used for project specific files. +$texfile_search = ""; # Specification for extra files to search for + # when no files are specified on the command line + # and the @default_files variable is empty. + # Space separated, and wildcards allowed. + # These files are IN ADDITION to *.tex in current + # directory. + # This variable is obsolete, and only in here for + # backward compatibility. + +$fdb_ext = 'fdb_latexmk'; # Extension for the file for latexmk's + # file-database + # Make it long to avoid possible collisions. +$fdb_ver = 2; # Version number for kind of fdb_file. + +$jobname = ''; # Jobname: as with current tex, etc indicates + # basename of generated files. + # Defined so that --jobname=STRING on latexmk's + # command line has same effect as with current + # tex, etc. (If $jobname is non-empty, then + # the --jobname=... option is used on tex.) + + +## default flag settings. +$silent = 0; # silence latex's messages? +$landscape_mode = 0; # default to portrait mode + +# The following two arrays contain lists of extensions (without +# period) for files that are read in during a (pdf)LaTeX run but that +# are generated automatically from the previous run, as opposed to +# being user generated files (directly or indirectly from a custom +# dependency). These files get two kinds of special treatment: +# 1. In clean up, where depending on the kind of clean up, some +# or all of these generated files are deleted. +# (Note that special treatment is given to aux files.) +# 2. In analyzing the results of a run of (pdf)LaTeX, to +# determine if another run is needed. With an error free run, +# a rerun should be provoked by a change in any source file, +# whether a user file or a generated file. But with a run +# that ends in an error, only a change in a user file during +# the run (which might correct the error) should provoke a +# rerun, but a change in a generated file should not. +# These arrays can be user-configured. +@generated_exts = ( 'aux', 'bbl', 'idx', 'ind', 'lof', 'lot', 'out', 'toc' ); + # N.B. 'out' is generated by hyperref package + +# Which kinds of file do I have requests to make? +# If no requests at all are made, then I will make dvi file +# If particular requests are made then other files may also have to be +# made. E.g., ps file requires a dvi file +$dvi_mode = 0; # No dvi file requested +$postscript_mode = 0; # No postscript file requested +$pdf_mode = 0; # No pdf file requested to be made by pdflatex + # Possible values: + # 0 don't create pdf file + # 1 to create pdf file by pdflatex + # 2 to create pdf file by ps2pdf + # 3 to create pdf file by dvipdf +$view = 'default'; # Default preview is of highest of dvi, ps, pdf +$sleep_time = 2; # time to sleep b/w checks for file changes in -pvc mode +$banner = 0; # Non-zero if we have a banner to insert +$banner_scale = 220; # Original default scale +$banner_intensity = 0.95; # Darkness of the banner message +$banner_message = 'DRAFT'; # Original default message +$do_cd = 0; # Do not do cd to directory of source file. + # Thus behave like latex. +$dependents_list = 0; # Whether to display list(s) of dependencies +@dir_stack = (); # Stack of pushed directories. +$cleanup_mode = 0; # No cleanup of nonessential LaTex-related files. + # $cleanup_mode = 0: no cleanup + # $cleanup_mode = 1: full cleanup + # $cleanup_mode = 2: cleanup except for dvi, + # dviF, pdf, ps, & psF +$cleanup_fdb = 0; # No removal of file for latexmk's file-database +$cleanup_only = 0; # When doing cleanup, do not go-on to making files +$diagnostics = 0; +$dvi_filter = ''; # DVI filter command +$ps_filter = ''; # Postscript filter command + +$force_mode = 0; # =1 to force processing past errors +$force_include_mode = 0;# =1 to ignore non-existent files when testing + # for dependency. (I.e., don't treat them as error) +$go_mode = 0; # =1 to force processing regardless of time-stamps + # =2 full clean-up first +$preview_mode = 0; +$preview_continuous_mode = 0; +$printout_mode = 0; # Don't print the file + +# Do we make view file in temporary then move to final destination? +# (To avoid premature updating by viewer). +$always_view_file_via_temporary = 0; # Set to 1 if viewed file is always + # made through a temporary. +$pvc_view_file_via_temporary = 1; # Set to 1 if only in -pvc mode is viewed + # file made through a temporary. + +# State variables initialized here: + +$updated = 0; # Flags when something has been remade + # Used to allow convenient user message in -pvc mode +$waiting = 0; # Flags whether we are in loop waiting for an event + # Used to avoid unnecessary repeated o/p in wait loop + +# Used for some results of parsing log file: +$reference_changed = 0; +$bad_reference = 0; +$bad_citation = 0; + + +# Set search paths for includes. +# Set them early so that they can be overridden +$BIBINPUTS = $ENV{'BIBINPUTS'}; +if (!$BIBINPUTS) { $BIBINPUTS = '.'; } +#?? OBSOLETE +$TEXINPUTS = $ENV{'TEXINPUTS'}; +if (!$TEXINPUTS) { $TEXINPUTS = '.'; } + +# Convert search paths to arrays: +# If any of the paths end in '//' then recursively search the +# directory. After these operations, @BIBINPUTS should +# have all the directories that need to be searched + +@BIBINPUTS = find_dirs1 ($BIBINPUTS); + + +###################################################################### +###################################################################### +# +# ??? UPDATE THE FOLLOWING!! +# +# We will need to determine whether source files for runs of various +# programs are out of date. In a normal situation, this is done by +# asking whether the times of the source files are later than the +# destination files. But this won't work for us, since a common +# situation is that a file is written on one run of latex, for +# example, and read back in on the next run (e.g., an .aux file). +# Some situations of this kind are standard in latex generally; others +# occur with particular macro packages or with particular +# postprocessors. +# +# The correct criterion for whether a source is out-of-date is +# therefore NOT that its modification time is later than the +# destination file, but whether the contents of the source file have +# changed since the last successful run. This also handles the case +# that the user undoes some changes to a source file by replacing the +# source file by reverting to an earlier version, which may well have +# an older time stamp. Since a direct comparison of old and new files +# would involve storage and access of a large number of backup files, +# we instead use the md5 signature of the files. (Previous versions +# of latexmk used the backup file method, but restricted to the case +# of .aux and .idx files, sufficient for most, but not all, +# situations.) +# +# We will have a database of (time, size, md5) for the relevant +# files. If the time and size of a file haven't changed, then the file +# is assumed not to have changed; this saves us from having to +# determine its md5 signature, which would involve reading the whole +# file, which is naturally time-consuming, especially if network file +# access to a server is needed, and many files are involved, when most +# of them don't change. It is of course possible to change a file +# without changing its size, but then to adjust its timestamp +# to what it was previously; this requires a certain amount of +# perversity. We can safely assume that if the user edits a file or +# changes its contents, then the file's timestamp changes. The +# interesting case is that the timestamp does change, because the file +# has actually been written to, but that the contents do not change; +# it is for this that we use the md5 signature. However, since +# computing the md5 signature involves reading the whole file, which +# may be large, we should avoid computing it more than necessary. +# +# So we get the following structure: +# +# 1. For each relevant run (latex, pdflatex, each instance of a +# custom dependency) we have a database of the state of the +# source files that were last used by the run. +# 2. On an initial startup, the database for a primary tex file +# is read that was created by a previous run of latex or +# pdflatex, if this exists. +# 3. If the file doesn't exist, then the criterion for +# out-of-dateness for an initial run is that it goes by file +# timestamps, as in previous versions of latexmk, with due +# (dis)regard to those files that are known to be generated by +# latex and re-read on the next run. +# 4. Immediately before a run, the database is updated to +# represent the current conditions of the run's source files. +# 5. After the run, it is determined whether any of the source +# files have changed. This covers both files written by the +# run, which are therefore in a dependency loop, and files that +# the user may have updated during the run. (The last often +# happens when latex takes a long time, for a big document, +# and the user makes edits before latex has finished. This is +# particularly prevalent when latexmk is used with +# preview-continuous mode.) +# 6. In the case of latex or pdflatex, the custom dependencies +# must also be checked and redone if out-of-date. +# 7. If any source files have changed, the run is redone, +# starting at step 1. +# 8. There is naturally a limit on the number of reruns, to avoid +# infinite loops from bugs and from pathological or unforeseen +# conditions. +# 9. After the run is done, the run's file database is updated. +# (By hypothesis, the sizes and md5s are correct, if the run +# is successful.) +# 10. To allow reuse of data from previous runs, the file database +# is written to a file after every complete set of passes +# through latex or pdflatex. (Note that there is separate +# information for latex and pdflatex; the necessary +# information won't coincide: Out-of-dateness for the files +# for each program concerns the properties of the files when +# the other program was run, and the set of source files could +# be different, e.g., for graphics files.) +# +# We therefore maintain the following data structures.: +# +# a. For each run (latex, pdflatex, each custom dependency) a +# database is maintained. This is a hash from filenames to a +# reference to an array: [time, size, md5]. The semantics of +# the database is that it represents the state of the source +# files used in the run. During a run it represents the state +# immediately before the run; after a run, with all reruns, it +# represents the state of the files used, modified by having +# the latest timestamps for generated files. +# b. There is a global database for all files, which represents +# the current state. This saves having to recompute the md5 +# signatures of a changed file used in more than one run +# (e.g., latex and pdflatex). +# c. Each of latex and pdflatex has a list of the relevant custom +# dependencies. +# +# In all the following a fdb-hash is a hash of the form: +# filename -> [time, size, md5] +# If a file is found to disappear, its entry is removed from the hash. +# In returns from fdb access routines, a size entry of -1 indicates a +# non-existent file. + + +# List of known rules. Rule types: primary, +# external (calls program), internal (calls routine), cusdep. + +%known_rules = ( 'latex' => 'primary', 'pdflatex' => 'primary', + ); +%primaries = (); # Hash of rules for primary part of make. Keys are + # currently 'latex', 'pdflatex' or both. Value is + # currently irrelevant. Use hash for ease of lookup + # Make remove this later, if use makeB + +# Hashes, whose keys give names of particular kinds of rule. We use +# hashes for ease of lookup. +%possible_one_time = ( 'view' => 1, 'print' => 1, 'update_view' => 1, ); +%requested_filerules = (); # Hash for rules corresponding to requested files. + # The keys are the rulenames and the value is + # currently irrelevant. +%one_time = (); # Hash for requested one-time-only rules, currently + # possible values 'print' and 'view'. + + +%rule_db = (); # Database of all rules: + # Hash: rulename -> [array of rule data] + # Rule data: + # 0: [ cmd_type, ext_cmd, int_cmd, out_of_date-crit, + # source, dest, base, out_of_date, + # out_of_date_user, time_of_last_run ] + # where + # cmd_type is 'primary', 'external' or 'cusdep', + # ext_cmd is string for associated external command + # with substitutions (%D for destination, %S + # for source, %B for base of current rule, + # %R for base of primary tex file, %T for + # texfile name, and %O for options. + # int_cmd specifies any internal command to be + # used to implement the application of the + # rule. If this is present, it overrides + # the external command, and it is the + # responsibility of the perl subroutine + # specified in intcmd to execute the + # external command if this is appropriate. + # This variable intcmd is a reference to an array, + # $$intcmd[0] = internal routine + # $$intcmd[1...] = its arguments (if any) + # out_of_date_crit specifies method of determining + # whether a file is out-of-date: + # 0 for never + # 1 for usual: whether there is a source + # file change + # 2 for dest earlier than source + # 3 for method 2 at first run, 1 thereafter + # (used when don't have file data from + # previous run). + # source = name of primary source file, if any + # dest = name of primary destination file, + # if any + # base = base name, if any, of files for + # this rule + # out_of_date = 1 if it has been detected that + # this rule needs to be run + # (typically because a source + # file has changed). + # 0 otherwise + # out_of_date_user is like out_of_date, except + # that the detection of out-of-dateness + # has been made from a change of a + # putative user file, i.e., one that is + # not a generated file (e.g., aux). This + # kind of out-of-dateness should provoke a + # rerun where or not there was an error + # during a run of (pdf)LaTeX. Normally, + # if there is an error, one should wait + # for the user to correct the error. But + # it is possible the error condition is + # already corrected during the run, e.g., + # by the user changing a source file in + # response to an error message. + # time_of_last_run = time that this rule was + # last applied. (In standard units + # from perl, to be directly compared + # with file modification times.) + # changed flags whether special changes have been made + # that require file-existence status to be ignored + # 1: {Hash sourcefile -> [source-file data] } + # Source-file data array: + # 0: time + # 1: size + # 2: md5 + # 3: name of rule to make this file + # 4: whether the file is of the kind made by epstopdf.sty + # during a primary run. It will have been read during + # the run, so that even though the file changes during + # a primary run, there is no need to trigger another + # run because of this. + +%fdb_current = (); # Fdb-hash for all files used. + + +#================================================== +## Read rc files: + +sub read_first_rc_file_in_list { + foreach my $rc_file ( @_ ) { + #print "===Testing for rc file \"$rc_file\" ...\n"; + if ( -e $rc_file ) { + #print "===Reading rc file \"$rc_file\" ...\n"; + process_rc_file( $rc_file ); + return; + } + } +} + +# Read system rc file: +read_first_rc_file_in_list( @rc_system_files ); +# Read user rc file. +read_first_rc_file_in_list( "$ENV{'HOME'}/.latexmkrc" ); +# Read rc file in current directory. +read_first_rc_file_in_list( "latexmkrc", ".latexmkrc" ); + +#================================================== + +#show_array ("BIBINPUTS", @BIBINPUTS); die; + +## Process command line args. +@command_line_file_list = (); +$bad_options = 0; + +#print "Command line arguments:\n"; for ($i = 0; $i <= $#ARGV; $i++ ) { print "$i: '$ARGV[$i]'\n"; } + +while ($_ = $ARGV[0]) +{ + # Make -- and - equivalent at beginning of option: + s/^--/-/; + shift; + if (/^-c$/) { $cleanup_mode = 2; $cleanup_only = 1; } + elsif (/^-C$/) { $cleanup_mode = 1; $cleanup_only = 1; } + elsif (/^-CA$/) { $cleanup_mode = 1; $cleanup_fdb = 1; $cleanup_only = 1;} + elsif (/^-CF$/) { $cleanup_fdb = 1; } + elsif (/^-cd$/) { $do_cd = 1; } + elsif (/^-cd-$/) { $do_cd = 0; } + elsif (/^-commands$/) { &print_commands; exit; } + elsif (/^-d$/) { $banner = 1; } + elsif (/^-dependents$/) { $dependents_list = 1; } + elsif (/^-nodependents$/ || /^-dependents-$/) { $dependents_list = 0; } + elsif (/^-dvi$/) { $dvi_mode = 1; } + elsif (/^-dvi-$/) { $dvi_mode = 0; } + elsif (/^-F$/) { $force_include_mode = 1; } + elsif (/^-F-$/) { $force_include_mode = 0; } + elsif (/^-f$/) { $force_mode = 1; } + elsif (/^-f-$/) { $force_mode = 0; } + elsif (/^-g$/) { $go_mode = 1; } + elsif (/^-g-$/) { $go_mode = 0; } + elsif (/^-gg$/) { + $go_mode = 2; $cleanup_mode = 1; $cleanup_fdb = 1; $cleanup_only = 0; + } + elsif ( /^-h$/ || /^-help$/ ) { &print_help; exit;} + elsif (/^-diagnostics/) { $diagnostics = 1; } + elsif (/^-jobname=(.*)$/) { + $jobname = $1; + } + elsif (/^-l$/) { $landscape_mode = 1; } + elsif (/^-new-viewer$/) { + $new_viewer_always = 1; + } + elsif (/^-new-viewer-$/) { + $new_viewer_always = 0; + } + elsif (/^-l-$/) { $landscape_mode = 0; } + elsif (/^-p$/) { $printout_mode = 1; + $preview_continuous_mode = 0; # to avoid conflicts + $preview_mode = 0; + } + elsif (/^-p-$/) { $printout_mode = 0; } + elsif (/^-pdfdvi$/){ $pdf_mode = 3; } + elsif (/^-pdfps$/) { $pdf_mode = 2; } + elsif (/^-pdf$/) { $pdf_mode = 1; } + elsif (/^-pdf-$/) { $pdf_mode = 0; } + elsif (/^-print=(.*)$/) { + $value = $1; + if ( $value =~ /^dvi$|^ps$|^pdf$/ ) { + $print_type = $value; + $printout_mode = 1; + } + else { + &exit_help("$My_name: unknown print type '$value' in option '$_'"); + } + } + elsif (/^-ps$/) { $postscript_mode = 1; } + elsif (/^-ps-$/) { $postscript_mode = 0; } + elsif (/^-pv$/) { $preview_mode = 1; + $preview_continuous_mode = 0; # to avoid conflicts + $printout_mode = 0; + } + elsif (/^-pv-$/) { $preview_mode = 0; } + elsif (/^-pvc$/) { $preview_continuous_mode = 1; + $force_mode = 0; # So that errors do not cause loops + $preview_mode = 0; # to avoid conflicts + $printout_mode = 0; + } + elsif (/^-pvc-$/) { $preview_continuous_mode = 0; } + elsif (/^-silent$/ || /^-quiet$/ ){ $silent = 1; } + elsif (/^-v$/ || /^-version$/) { + print "\n$version_details. Version $version_num\n"; + exit; + } + elsif (/^-verbose$/) { $silent = 0; } + elsif (/^-view=default$/) { $view = "default";} + elsif (/^-view=dvi$/) { $view = "dvi";} + elsif (/^-view=none$/) { $view = "none";} + elsif (/^-view=ps$/) { $view = "ps";} + elsif (/^-view=pdf$/) { $view = "pdf"; } + elsif (/^-e$/) { + if ( $ARGV[0] eq '' ) { + &exit_help( "No code to execute specified after -e switch"); + } + else { + execute_code_string( $ARGV[0] ); + } + shift; + } + elsif (/^-r$/) { + if ( $ARGV[0] eq '' ) { + &exit_help( "No RC file specified after -r switch"); + } + if ( -e $ARGV[0] ) { + process_rc_file( $ARGV[0] ); + } + else { + $! = 11; + die "$My_name: RC file [$ARGV[0]] does not exist\n"; + } + shift; + } + elsif (/^-bm$/) { + if ( $ARGV[0] eq '' ) { + &exit_help( "No message specified after -bm switch"); + } + $banner = 1; $banner_message = $ARGV[0]; + shift; + } + elsif (/^-bi$/) { + if ( $ARGV[0] eq '' ) { + &exit_help( "No intensity specified after -bi switch"); + } + $banner_intensity = $ARGV[0]; + shift; + } + elsif (/^-bs$/) { + if ( $ARGV[0] eq '' ) { + &exit_help( "No scale specified after -bs switch"); + } + $banner_scale = $ARGV[0]; + shift; + } + elsif (/^-dF$/) { + if ( $ARGV[0] eq '' ) { + &exit_help( "No dvi filter specified after -dF switch"); + } + $dvi_filter = $ARGV[0]; + shift; + } + elsif (/^-pF$/) { + if ( $ARGV[0] eq '' ) { + &exit_help( "No ps filter specified after -pF switch"); + } + $ps_filter = $ARGV[0]; + shift; + } + elsif (/^-/) { + warn "$My_name: $_ bad option\n"; + $bad_options++; + } + else { + push @command_line_file_list, $_ ; + } +} + +if ( $bad_options > 0 ) { + &exit_help( "Bad options specified" ); +} + +warn "$My_name: This is $version_details, version: $version_num.\n", + "**** Report bugs etc to John Collins . ****\n" + unless $silent; + +# For backward compatibility, convert $texfile_search to @default_files +# Since $texfile_search is initialized to "", a nonzero value indicates +# that an initialization file has set it. +if ( $texfile_search ne "" ) { + @default_files = split / /, "*.tex $texfile_search"; +} + +#printA "A: Command line file list:\n"; +#for ($i = 0; $i <= $#command_line_file_list; $i++ ) { print "$i: '$command_line_file_list[$i]'\n"; } + +#Glob the filenames command line if the script was not invoked under a +# UNIX-like environment. +# Cases: (1) MS/MSwin native Glob +# (OS detected as MSWin32) +# (2) MS/MSwin cygwin Glob [because we do not know whether +# the cmd interpreter is UNIXy (and does glob) or is +# native MS-Win (and does not glob).] +# (OS detected as cygwin) +# (3) UNIX Don't glob (cmd interpreter does it) +# (Currently, I assume this is everything else) +if ( ($^O eq "MSWin32") || ($^O eq "cygwin") ) { + # Preserve ordering of files + @file_list = glob_list1(@command_line_file_list); +#print "A1:File list:\n"; +#for ($i = 0; $i <= $#file_list; $i++ ) { print "$i: '$file_list[$i]'\n"; } +} +else { + @file_list = @command_line_file_list; +#print "A2:File list:\n"; +#for ($i = 0; $i <= $#file_list; $i++ ) { print "$i: '$file_list[$i]'\n"; } +} +@file_list = uniq1( @file_list ); + + +# Check we haven't selected mutually exclusive modes. +# Note that -c overides all other options, but doesn't cause +# an error if they are selected. +if (($printout_mode && ( $preview_mode || $preview_continuous_mode )) + || ( $preview_mode && $preview_continuous_mode )) +{ + # Each of the options -p, -pv, -pvc turns the other off. + # So the only reason to arrive here is an incorrect inititalization + # file, or a bug. + &exit_help( "Conflicting options (print, preview, preview_continuous) selected"); +} + +if ( @command_line_file_list ) { + # At least one file specified on command line (before possible globbing). + if ( !@file_list ) { + &exit_help( "Wildcards in file names didn't match any files"); + } +} +else { + # No files specified on command line, try and find some + # Evaluate in order specified. The user may have some special + # for wanting processing in a particular order, especially + # if there are no wild cards. + # Preserve ordering of files + my @file_list1 = uniq1( glob_list1(@default_files) ); + my @excluded_file_list = uniq1( glob_list1(@default_excluded_files) ); + # Make hash of excluded files, for easy checking: + my %excl = (); + foreach my $file (@excluded_file_list) { + $excl{$file} = ''; + } + foreach my $file (@file_list1) { + push( @file_list, $file) unless ( exists $excl{$file} ); + } + if ( !@file_list ) { + &exit_help( "No file name specified, and I couldn't find any"); + } +} + +$num_files = $#file_list + 1; +$num_specified = $#command_line_file_list + 1; + +#print "Command line file list:\n"; +#for ($i = 0; $i <= $#command_line_file_list; $i++ ) { print "$i: '$command_line_file_list[$i]'\n"; } +#print "File list:\n"; +#for ($i = 0; $i <= $#file_list; $i++ ) { print "$i: '$file_list[$i]'\n"; } + + +# If selected a preview-continuous mode, make sure exactly one filename was specified +if ($preview_continuous_mode && ($num_files != 1) ) { + if ($num_specified > 1) { + &exit_help( + "Need to specify exactly one filename for ". + "preview-continuous mode\n". + " but $num_specified were specified" + ); + } + elsif ($num_specified == 1) { + &exit_help( + "Need to specify exactly one filename for ". + "preview-continuous mode\n". + " but wildcarding produced $num_files files" + ); + } + else { + &exit_help( + "Need to specify exactly one filename for ". + "preview-continuous mode.\n". + " Since none were specified on the command line, I looked for \n". + " files in '@default_files'.\n". + " But I found $num_files files, not 1." + ); + } +} + +# If selected jobname, can only apply that to one file: +if ( ($jobname ne '') && ($num_files > 1) ) { + &exit_help( + "Need to specify at most one filename if ". + "jobname specified, \n". + " but $num_files were found (after defaults and wildcarding)." + ); +} + + +# Normalize the commands, to have place-holders for source, dest etc: +&fix_cmds; + +# If landscape mode, change dvips processor, and the previewers: +if ( $landscape_mode ) +{ + $dvips = $dvips_landscape; + $dvi_previewer = $dvi_previewer_landscape; + $ps_previewer = $ps_previewer_landscape; +} + +if ( $silent ) { + add_option( \$latex, " $latex_silent_switch" ); + add_option( \$pdflatex, " $pdflatex_silent_switch" ); + add_option( \$bibtex, " $bibtex_silent_switch" ); + add_option( \$makeindex, " $makeindex_silent_switch" ); + add_option( \$dvips, " $dvips_silent_switch" ); +} + +if ( $jobname ne '' ) { + $jobstring = "--jobname=$jobname"; + add_option( \$latex, " $jobstring" ); + add_option( \$pdflatex, " $jobstring" ); +} + +# Which kind of file do we preview? +if ( $view eq "default" ) { + # If default viewer requested, use "highest" of dvi, ps and pdf + # that was requested by user. + # No explicit request means view dvi. + $view = "dvi"; + if ( $postscript_mode ) { $view = "ps"; } + if ( $pdf_mode ) { $view = "pdf"; } +} + +if ( ! ( $dvi_mode || $pdf_mode || $postscript_mode || $printout_mode) ) { + print "No specific requests made, so default to dvi by latex\n"; + $dvi_mode = 1; +} + +# Set new-style requested rules: +if ( $dvi_mode ) { $requested_filerules{'latex'} = 1; } +if ( $pdf_mode == 1 ) { $requested_filerules{'pdflatex'} = 1; } +elsif ( $pdf_mode == 2 ) { $requested_filerules{'ps2pdf'} = 1; } +elsif ( $pdf_mode == 3 ) { $requested_filerules{'dvipdf'} = 1; } +if ( $postscript_mode ) { $requested_filerules{'dvips'} = 1; } +if ( $printout_mode ) { $one_time{'print'} = 1; } +if ( $preview_continuous_mode || $preview_mode ) { $one_time{'view'} = 1; } +if ( length($dvi_filter) != 0 ) { $requested_filerules{'dvi_filter'} = 1; } +if ( length($ps_filter) != 0 ) { $requested_filerules{'ps_filter'} = 1; } +if ( $banner ) { $requested_filerules{'dvips'} = 1; } + + +%possible_primaries = (); +foreach (&rdb_possible_primaries) { + $possible_primaries{$_} = 1; +} + +#print "POSSIBLE PRIMARIES: "; +#foreach (keys %possible_primaries ) {print "$_, ";} +#print "\n"; + + +if ( $pdf_mode == 2 ) { + # We generate pdf from ps. Make sure we have the correct kind of ps. + add_option( \$dvips, " $dvips_pdf_switch" ); +} + + +# Make convenient forms for lookup. +# Extensions always have period. + +# Convert @generated_exts to a hash for ease of look up, with exts +# preceeded by a '.' +# %generated_exts_all is used in analyzing file changes, to +# distinguish changes in user files from changes in generated files. +%generated_exts_all = (); +foreach (@generated_exts ) { + $generated_exts_all{".$_"} = 1; +} + +$quell_uptodate_msgs = $silent; + # Whether to quell informational messages when files are uptodate + # Will turn off in -pvc mode + +# Process for each file. +# The value of $bibtex_mode set in an initialization file may get +# overridden, during file processing, so save it: +#?? Unneeded now: $save_bibtex_mode = $bibtex_mode; + +$failure_count = 0; +$last_failed = 0; # Flag whether failed on making last file + # This is used for showing suitable error diagnostics +FILE: +foreach $filename ( @file_list ) +{ + # Global variables for making of current file: + $updated = 0; + $failure = 0; # Set nonzero to indicate failure at some point of + # a make. Use value as exit code if I exit. + $failure_msg = ''; # Indicate reason for failure +#?? Unneeded now: $bibtex_mode = $save_bibtex_mode; + + if ( $do_cd ) { + ($filename, $path) = fileparse( $filename ); + warn "$My_name: Changing directory to '$path'\n"; + pushd( $path ); + } + else { + $path = ''; + } + + + ## remove extension from filename if was given. + if ( &find_basename($filename, $root_filename, $texfile_name) ) + { + if ( $force_mode ) { + warn "$My_name: Could not find file [$texfile_name]\n"; + } + else { + &ifcd_popd; + &exit_msg1( "Could not find file [$texfile_name]", + 11); + } + } + if ($jobname ne '' ) { + $root_filename = $jobname; + } + + # Initialize basic dependency information: + + # For use under error conditions: + @default_includes = ($texfile_name, "$root_filename.aux"); + + $fdb_file = "$root_filename.$fdb_ext"; + + if ($cleanup_fdb) { unlink $fdb_file; } + if ( $cleanup_mode > 0 ) { + my @extra_generated = (); + my @aux_files = (); + rdb_read_generatedB( $fdb_file, \@extra_generated, \@aux_files ); + if ( ($go_mode == 2) && !$silent ) { + warn "$My_name: Removing all generated files\n" unless $silent; + } + if ($diagnostics) { + show_array( "For deletion:\n Extra_generated:", @extra_generated ); + show_array( " Aux files:", @aux_files ); + } + # Add to the generated files, some log file and some backup + # files used in previous versions of latexmk + &cleanup1( 'blg', 'ilg', 'log', 'aux.bak', 'idx.bak', + split(' ',$clean_ext), + @generated_exts + ); + unlink( 'texput.log', @extra_generated, "texput.aux", @aux_files ); + if ( $cleanup_mode == 1 ) { + &cleanup1( 'dvi', 'dviF', 'ps', 'psF', 'pdf', + split(' ', $clean_full_ext) + ); + } + } + if ($cleanup_only) { next FILE; } + + # Initialize file and rule databases. + %rule_list = (); + &rdb_make_rule_list; + &rdb_set_rules(\%rule_list); + + +#??? The following are not needed if use makeB. +# ?? They may be set too early? +# Arrays and hashes for picking out accessible rules. +# Distinguish rules for making files and others + @accessible_all = sort ( &rdb_accessible( keys %requested_filerules, keys %one_time )); + %accessible_filerules = (); + foreach (@accessible_all) { + unless ( /view/ || /print/ ) { $accessible_filerules{$_} = 1; } + } + @accessible_filerules = sort keys %accessible_filerules; + +# show_array ( "=======All rules used", @accessible_all ); +# show_array ( "=======Requested file rules", sort keys %requested_filerules ); +# show_array ( "=======Rules for files", @accessible_filerules ); + + if ( $diagnostics ) { + print "$My_name: Rules after start up for '$texfile_name'\n"; + rdb_show(); + } + + %primaries = (); + foreach (@accessible_all) { + if ( ($_ eq 'latex') || ($_ eq 'pdflatex') ) { $primaries{$_} = 1; } + } + + $have_fdb = 0; + if ( (! -e $fdb_file) && (! -e "$root_filename.aux") ) { + # No aux and no fdb file => set up trivial aux file + # and corresponding fdb_file. Arrange them to provoke one run + # as minimum, but no more if actual aux file is trivial. + # (Useful on big files without cross references.) + &set_trivial_aux_fdb; + } + + if ( -e $fdb_file ) { + $rdb_errors = rdb_read( $fdb_file ); + $have_fdb = ($rdb_errors == 0); + } + if (!$have_fdb) { + # We didn't get a valid set of data on files used in + # previous run. So use filetime criterion for make + # instead of change from previous run, until we have + # done our own make. + rdb_recurseA( [keys %possible_primaries], + sub{ if ( $$Ptest_kind == 1 ) { $$Ptest_kind = 3;} } + ); + if ( -e "$root_filename.log" ) { + rdb_for_some( [keys %possible_primaries], \&rdb_set_from_logB ); + } + } + if ($go_mode) { + # Force everything to be remade. + rdb_recurseA( [keys %requested_filerules], sub{$$Pout_of_date=1;} ); + } + + + if ( $diagnostics ) { + print "$My_name: Rules after initialization\n"; + rdb_show(); + } + + #************************************************************ + + if ( $preview_continuous_mode ) { + &make_preview_continuousB; + # Will probably exit by ctrl/C and never arrive here. + next FILE; + } + + +## Handling of failures: +## Variable $failure is set to indicate a failure, with information +## put in $failure_msg. +## These variables should be set to 0 and '' at any point at which it +## should be assumed that no failures have occurred. +## When after a routine is called it is found that $failure is set, then +## processing should normally be aborted, e.g., by return. +## Then there is a cascade of returns back to the outermost level whose +## responsibility is to handle the error. +## Exception: An outer level routine may reset $failure and $failure_msg +## after initial processing, when the error condition may get +## ameliorated later. + #Initialize failure flags now. + $failure = 0; + $failure_msg = ''; + $failure = rdb_makeB( keys %requested_filerules ); + if ($failure > 0) { next FILE;} + rdb_for_some( [keys %one_time], \&rdb_run1 ); +} # end FILE +continue { + if ($dependents_list) { rdb_list(); } + # Handle any errors + if ( $failure > 0 ) { + if ( $failure_msg ) { + #Remove trailing space + $failure_msg =~ s/\s*$//; + warn "$My_name: Did not finish processing file: $failure_msg\n"; + $failure = 1; + } + $failure_count ++; + $last_failed = 1; + } + else { + $last_failed = 0; + } + &ifcd_popd; +} +# If we get here without going through the continue section: +if ( $do_cd && ($#dir_stack > -1) ) { + # Just in case we did an abnormal exit from the loop + warn "$My_name: Potential bug: dir_stack not yet unwound, undoing all directory changes now\n"; + &finish_dir_stack; +} + +if ($failure_count > 0) { + if ( $last_failed <= 0 ) { + # Error occured, but not on last file, so + # user may not have seen error messages + warn "\n------------\n"; + warn "$My_name: Some operations failed.\n"; + } + if ( !$force_mode ) { + warn "$My_name: Use the -f option to force complete processing.\n"; + } + exit 12; +} + + + +# end MAIN PROGRAM +############################################################# + +sub fix_cmds { + # If commands do not have placeholders for %S etc, put them in + foreach ($latex, $pdflatex, $lpr, $lpr_dvi, $lpr_pdf, + $pdf_previewer, $ps_previewer, $ps_previewer_landscape, + $dvi_previewer, $dvi_previewer_landscape, + $kpsewhich + ) { + # Source only + if ( $_ && ! /%/ ) { $_ .= " %O %S"; } + } + foreach ($bibtex) { + # Base only + if ( $_ && ! /%/ ) { $_ .= " %O %B"; } + } + foreach ($dvipdf, $ps2pdf) { + # Source and dest without flag for destination + if ( $_ && ! /%/ ) { $_ .= " %O %S %D"; } + } + foreach ($dvips, $makeindex) { + # Source and dest with -o dest before source + if ( $_ && ! /%/ ) { $_ .= " %O -o %D %S"; } + } + foreach ($dvi_filter, $ps_filter) { + # Source and dest, but as filters + if ( $_ && ! /%/ ) { $_ .= " %O <%S >%D"; } + } +} #END fix_cmds + +############################################################# + +sub add_option { + # Call add_option( \$cmd, $opt ) + # Add option to command + if ( ${$_[0]} !~ /%/ ) { &fix_cmds; } + ${$_[0]} =~ s/%O/$_[1] %O/; +} #END add_option + +############################################################# + +sub rdb_make_rule_list { +# Substitutions: %S = source, %D = dest, %B = this rule's base +# %T = texfile, %R = root = base for latex. + + # Defaults for dvi, ps, and pdf files + # Use local, not my, so these variables can be referenced + local $dvi_final = "%R.dvi"; + local $ps_final = "%R.ps"; + local $pdf_final = "%R.pdf"; + if ( length($dvi_filter) > 0) { + $dvi_final = "%R.dviF"; + } + if ( length($ps_filter) > 0) { + $ps_final = "%R.psF"; + } + + my $print_file = ''; + my $print_cmd = ''; + if ( $print_type eq 'dvi' ) { + $print_file = $dvi_final; + $print_cmd = $lpr_dvi; + } + elsif ( $print_type eq 'pdf' ) { + $print_file = $pdf_final; + $print_cmd = $lpr_pdf; + } + elsif ( $print_type eq 'ps' ) { + $print_file = $ps_final; + $print_cmd = $lpr; + } + + my $view_file = ''; + my $viewer = ''; + my $viewer_update_method = 0; + my $viewer_update_signal = undef; + my $viewer_update_command = undef; + + if ( ($view eq 'dvi') || ($view eq 'pdf') || ($view eq 'ps') ) { + $view_file = ${$view.'_final'}; + $viewer = ${$view.'_previewer'}; + $viewer_update_method = ${$view.'_update_method'}; + $viewer_update_signal = ${$view.'_update_signal'}; + if (defined ${$view.'_update_command'}) { + $viewer_update_command = ${$view.'_update_command'}; + } + } + # Specification of internal command for viewer update: + my $PA_update = ['do_update_view', $viewer_update_method, $viewer_update_signal, 0, 1]; + +# For test_kind: Use file contents for latex and friends, but file time for the others. +# This is because, especially for dvi file, the contents of the file may contain +# a pointer to a file to be included, not the contents of the file! + %rule_list = ( + 'latex' => [ 'primary', "$latex", '', "%T", "%B.dvi", "%R", 1 ], + 'pdflatex' => [ 'primary', "$pdflatex", '', "%T", "%B.pdf", "%R", 1 ], + 'dvipdf' => [ 'external', "$dvipdf", 'do_viewfile', $dvi_final, "%B.pdf", "%R", 2 ], + 'dvips' => [ 'external', "$dvips", 'do_viewfile', $dvi_final, "%B.ps", "%R", 2 ], + 'dvifilter'=> [ 'external', $dvi_filter, 'do_viewfile', "%B.dvi", "%B.dviF", "%R", 2 ], + 'ps2pdf' => [ 'external', "$ps2pdf", 'do_viewfile', $ps_final, "%B.pdf", "%R", 2 ], + 'psfilter' => [ 'external', $ps_filter, 'do_viewfile', "%B.ps", "%B.psF", "%R", 2 ], + 'print' => [ 'external', "$print_cmd", 'if_source', $print_file, "", "", 2 ], + 'update_view' => [ 'external', $viewer_update_command, $PA_update, + $view_file, "", "", 2 ], + 'view' => [ 'external', "$viewer", 'if_source', $view_file, "", "", 2 ], + ); + %source_list = (); + foreach my $rule (keys %rule_list) { + $source_list{$rule} = []; + my $PAsources = $source_list{$rule}; + my ( $cmd_type, $cmd, $source, $dest, $root ) = @{$rule_list{$rule}}; + if ($source) { + push @$PAsources, [ $rule, $source, '' ]; + } + } + +# Ensure we only have one way to make pdf file, and that it is appropriate: + if ($pdf_mode == 1) { delete $rule_list{'dvipdf'}; delete $rule_list{'ps2pdf'}; } + elsif ($pdf_mode == 2) { delete $rule_list{'dvipdf'}; delete $rule_list{'pdflatex'}; } + else { delete $rule_list{'pdflatex'}; delete $rule_list{'ps2pdf'}; } + +} # END rdb_make_rule_list + +#************************************************************ + +sub rdb_set_rules { + # Call rdb_set_rules( \%rule_list, ...) + # Set up rule database from definitions + + # Map of files to rules that MAKE them: + local %from_rules = (); + %rule_db = (); + + foreach my $Prule_list (@_) { + foreach my $rule ( sort keys %$Prule_list) { + my ( $cmd_type, $ext_cmd, $int_cmd, $source, $dest, $base, $test_kind ) = @{$$Prule_list{$rule}}; + my $needs_making = 0; + # Substitute in the filename variables, since we will use + # those for determining filenames. But delay expanding $cmd + # until run time, in case of changes. + foreach ($base, $source, $dest ) { + s/%R/$root_filename/; + } + foreach ($source, $dest ) { + s/%B/$base/; + s/%T/$texfile_name/; + } + # print "$rule: $cmd_type, EC='$ext_cmd', IC='$int_cmd', $test_kind,\n", + # " S='$source', D='$dest', B='$base' $needs_making\n"; + rdb_create_rule( $rule, $cmd_type, $ext_cmd, $int_cmd, $test_kind, + $source, $dest, $base, + $needs_making ); + if ($dest) { $from_rules{$dest} = $rule ; } + } + rdb_for_all( + 0, + sub{ + # my ($base, $path, $ext) = fileparse( $file, '\.[^\.]*' ); + # if ( exists $from_rules{$file} && ! exists $generated_exts_all{$ext} ) { + # # Show how to make this file. But don't worry about generated + # # files. + if ( exists $from_rules{$file} ) { + $$Pfrom_rule = $from_rules{$file}; + } + #?? print "$rule: $file, $$Pfrom_rule\n"; + } + ); + } # End arguments of subroutine + &rdb_make_links; +} # END rdb_set_rules + +#************************************************************ + +sub rdb_make_links { +# ?? Problem if there are multiple rules for getting a file. Notably pdf. +# Which one to choose? + # Create $from_rule if there's a suitable rule. + # Map files to rules: + local %from_rules = (); + rdb_for_all( sub{ if($$Pdest){$from_rules{$$Pdest} = $rule;} } ); +#?? foreach (sort keys %from_rules) {print "D='$_' F='$from_rules{$_}\n";} + rdb_for_all( + 0, + sub{ + if ( exists $from_rules{$file} ) { $$Pfrom_rule = $from_rules{$file}; } +#?? print "$rule: $file, $$Pfrom_rule\n"; + } + ); + rdb_for_all( + 0, + sub{ + if ( exists $from_rules{$file} ) { + $$Pfrom_rule = $from_rules{$file}; + } + if ( $$Pfrom_rule && (! rdb_rule_exists( $$Pfrom_rule ) ) ) { + $$Pfrom_rule = ''; + } +#?? print "$rule: $file, $$Pfrom_rule\n"; + } + ); +} # END rdb_make_links + +#************************************************************ + +sub set_trivial_aux_fdb { + # 1. Write aux file EXACTLY as would be written if the tex file + # had no cross references, etc. I.e., a minimal .aux file. + # 2. Write a corresponding fdb file + # 3. Provoke a run of (pdf)latex (actually of all primaries). + + local $aux_file = "$root_filename.aux"; + open( aux_file, '>', $aux_file ) + or die "Cannot write file '$aux_file'\n"; + print aux_file "\\relax \n"; + close(aux_file); + + foreach my $rule (keys %primaries ) { + rdb_ensure_file( $rule, $texfile_name ); + rdb_ensure_file( $rule, $aux_file ); + rdb_one_rule( $rule, + sub{ $$Pout_of_date = 1; } + ); + } + &rdb_write( $fdb_file ); +} #END set_trivial_aux_fdb + +#************************************************************ +#### Particular actions +#************************************************************ +#************************************************************ + +sub do_cusdep { + # Unconditional application of custom-dependency + # except that rule is not applied if the source file source + # does not exist, and an error is returned if the dest is not made. + # + # Assumes rule context for the custom-dependency, and that my first + # argument is the name of the subroutine to apply + my $func_name = $_[0]; + my $return = 0; + if ( !-e $$Psource ) { + # Source does not exist. Users of this rule will need to turn + # it off when custom dependencies are reset + if ( !$silent ) { +## ??? Was commented out. 1 Sep. 2008 restored, for cusdep no-file-exists issue + warn "$My_name: In trying to apply custom-dependency rule\n", + " to make '$$Pdest' from '$$Psource'\n", + " the source file has disappeared since the last run\n"; + } + # Treat as successful + } + elsif ( !$func_name ) { + warn "$My_name: Possible misconfiguration or bug:\n", + " In trying to apply custom-dependency rule\n", + " to make '$$Pdest' from '$$Psource'\n", + " the function name is blank.\n"; + } + elsif ( ! defined &$func_name ) { + warn "$My_name: Misconfiguration or bug,", + " in trying to apply custom-dependency rule\n", + " to make '$$Pdest' from '$$Psource'\n", + " function name '$func_name' does not exists.\n"; + } + else { + my $cusdep_ret = &$func_name( $$Pbase ); + if ( defined $cusdep_ret && ($cusdep_ret != 0) ) { + $return = $cusdep_ret; + if ($return) { + warn "Rule '$rule', function '$func_name'\n", + " failed with return code = $return\n"; + } + } + elsif ( !-e $$Pdest ) { + # Destination non-existent, but routine failed to give an error + warn "$My_name: In running custom-dependency rule\n", + " to make '$$Pdest' from '$$Psource'\n", + " function '$func_name' did not make the destination.\n"; + $return = -1; + } + } + return $return; +} # END do_cusdep + +#************************************************************ + +sub do_viewfile { + # Unconditionally make file for viewing, going through temporary file if + # Assumes rule context + + my $return = 0; + my ($base, $path, $ext) = fileparseA( $$Pdest ); + if ( &view_file_via_temporary ) { + my $tmpfile = tempfile1( "${root_filename}_tmp", $ext ); + $return = &rdb_ext_cmd1( '', '', $tmpfile ); + move( $tmpfile, $$Pdest ); + } + else { + $return = &rdb_ext_cmd; + } + return $return; +} #END do_viewfile + +#************************************************************ + +sub do_update_view { + # Update viewer + # Assumes rule context + # Arguments: (method, signal, viewer_process) + + my $return = 0; + + # Although the process is passed as an argument, we'll need to update it. + # So (FUDGE??) bypass the standard interface for the process. + # We might as well do this for all the arguments. + my $viewer_update_method = ${$PAint_cmd}[1]; + my $viewer_update_signal = ${$PAint_cmd}[2]; + my $Pviewer_process = \${$PAint_cmd}[3]; + my $Pneed_to_get_viewer_process = \${$PAint_cmd}[4]; + + if ($viewer_update_method == 2) { + if ($$Pneed_to_get_viewer_process) { + $$Pviewer_process = &find_process_id( $$Psource ); + if ($$Pviewer_process != 0) { + $$Pneed_to_get_viewer_process = 0; + } + } + if ($$Pviewer_process == 0) { + print "$My_name: need to signal viewer for file '$$Psource', but didn't get \n", + " process ID for some reason, e.g., no viewer, bad configuration, bug\n" + if $diagnostics ; + } + elsif ( defined $viewer_update_signal) { + print "$My_name: signalling viewer, process ID $$Pviewer_process\n" + if $diagnostics ; + kill $viewer_update_signal, $$Pviewer_process; + } + else { + warn "$My_name: viewer is supposed to be sent a signal\n", + " but no signal is defined. Misconfiguration or bug?\n"; + $return = 1; + } + } + elsif ($viewer_update_method == 4) { + if (defined $$Pext_cmd) { + $return = &rdb_ext_cmd; + } + else { + warn "$My_name: viewer is supposed to be updated by running a command,\n", + " but no command is defined. Misconfiguration or bug?\n"; + } + } + return $return; +} #END do_update_view + +#************************************************************ + +sub if_source { + # Unconditionally apply rule if source file exists. + # Assumes rule context + if ( -e $$Psource ) { + return &rdb_ext_cmd; + } + else { + return -1; + } +} #END if_source + +#************************************************************ +#### Subroutines +#************************************************************ +#************************************************************ + +# Finds the basename of the root file +# Arguments: +# 1 - Filename to breakdown +# 2 - Where to place base file +# 3 - Where to place tex file +# Returns non-zero if tex file does not exist +# +# The rules for determining this depend on the implementation of TeX. +# The variable $extension_treatment determines which rules are used. + +sub find_basename +#?? Need to use kpsewhich, if possible +{ + local($given_name, $base_name, $ext, $path, $tex_name); + $given_name = $_[0]; + if ( "$extension_treatment" eq "miktex_old" ) { + # Miktex v. 1.20d: + # 1. If the filename has an extension, then use it. + # 2. Else append ".tex". + # 3. The basename is obtained from the filename by + # removing the path component, and the extension, if it + # exists. If a filename has a multiple extension, then + # all parts of the extension are removed. + # 4. The names of generated files (log, aux) are obtained by + # appending .log, .aux, etc to the basename. Note that + # these are all in the CURRENT directory, and the drive/path + # part of the originally given filename is ignored. + # + # Thus when the given filename is "\tmp\a.b.c", the tex + # filename is the same, and the basename is "a". + + ($base_name, $path, $ext) = fileparse( $given_name, '\..*' ); + if ( "$ext" eq "") { $tex_name = "$given_name.tex"; } + else { $tex_name = $given_name; } + $_[1] = $base_name; + $_[2] = $tex_name; + } + elsif ( "$extension_treatment" eq "unix" ) { + # unix (at least web2c 7.3.1) => + # 1. If filename.tex exists, use it, + # 2. else if filename exists, use it. + # 3. The base filename is obtained by deleting the path + # component and, if an extension exists, the last + # component of the extension, even if the extension is + # null. (A name ending in "." has a null extension.) + # 4. The names of generated files (log, aux) are obtained by + # appending .log, .aux, etc to the basename. Note that + # these are all in the CURRENT directory, and the drive/path + # part of the originally given filename is ignored. + # + # Thus when the given filename is "/tmp/a.b.c", there are two + # cases: + # a. /tmp/a.b.c.tex exists. Then this is the tex file, + # and the basename is "a.b.c". + # b. /tmp/a.b.c.tex does not exist. Then the tex file is + # "/tmp/a.b.c", and the basename is "a.b". + + if ( -e "$given_name.tex" ) { + $tex_name = "$given_name.tex"; + } + else { + $tex_name = "$given_name"; + } + ($base_name, $path, $ext) = fileparse( $tex_name, '\.[^\.]*' ); + $_[1] = $base_name; + $_[2] = $tex_name; + } + else { + die "$My_name: Incorrect configuration gives \$extension_treatment=", + "'$extension_treatment'\n"; + } + if ($diagnostics) { + print "Given='$given_name', tex='$tex_name', base='$base_name'\n"; + } + return ! -e $tex_name; +} #END find_basename + +#************************************************************ + +sub make_preview_continuousB { + # Version for use with makeB + local @changed = (); + local @disappeared = (); + local @no_dest = (); # Non-existent destination files + local @rules_to_apply = (); + local $failure = 0; + local $runs = 0; + local %rules_applied = (); + local $updated = 0; + + # What to make? + my @targets = keys %requested_filerules; + + $quell_uptodate_msgs = 1; + + local $view_file = ''; + rdb_one_rule( 'view', sub{ $view_file = $$Psource; } ); + + if ( ($view eq 'dvi') || ($view eq 'pdf') || ($view eq 'ps') ) { + warn "Viewing $view\n"; + } + elsif ( $view eq 'none' ) { + warn "Not using a previewer\n"; + $view_file = ''; + } + else { + warn "$My_name: BUG: Invalid preview method '$view'\n"; + exit 20; + } + + my $viewer_running = 0; # No viewer known to be running yet + # Get information from update_view rule + local $viewer_update_method = 0; + # Pointers so we can update the following: + local $Pviewer_process = undef; + local $Pneed_to_get_viewer_process = undef; + rdb_one_rule( 'update_view', + sub{ $viewer_update_method = $$PAint_cmd[1]; + $Pviewer_process = \$$PAint_cmd[3]; + $Pneed_to_get_viewer_process = \$$PAint_cmd[4]; + } + ); + # Note that we don't get the previewer process number from the program + # that starts it; that might only be a script to get things set up and the + # actual previewer could be (and sometimes IS) another process. + + if ( ($view_file ne '') && (-e $view_file) && !$new_viewer_always ) { + # Is a viewer already running? + # (We'll save starting up another viewer.) + $$Pviewer_process = &find_process_id( $view_file ); + if ( $$Pviewer_process ) { + warn "$My_name: Previewer is already running\n" + if !$silent; + $viewer_running = 1; + $$Pneed_to_get_viewer_process = 0; + } + } + + # Loop forever, rebuilding .dvi and .ps as necessary. + # Set $first_time to flag first run (to save unnecessary diagnostics) +CHANGE: + for (my $first_time = 1; 1; $first_time = 0 ) { + $updated = 0; + $failure = 0; + $failure_msg = ''; + if ( $MSWin_fudge_break && ($^O eq "MSWin32") ) { + # Fudge under MSWin32 ONLY, to stop perl/latexmk from + # catching ctrl/C and ctrl/break, and let it only reach + # downstream programs. See comments at first definition of + # $MSWin_fudge_break. + $SIG{BREAK} = $SIG{INT} = 'IGNORE'; + } + $failure = rdb_makeB( @targets ); + +## warn "=========Viewer PID = $$Pviewer_process; updated=$updated\n"; + + if ( $MSWin_fudge_break && ($^O eq "MSWin32") ) { + $SIG{BREAK} = $SIG{INT} = 'DEFAULT'; + } + if ( $failure > 0 ) { + if ( !$failure_msg ) { + $failure_msg = 'Failure to make the files correctly'; + } + # There will be files changed during the run that are irrelevant. + # We need to wait for the user to change the files. + # So set the GENERATED files as up-to-date + rdb_for_some( [keys %current_primaries], \&rdb_update_gen_files ); + + $failure_msg =~ s/\s*$//; #Remove trailing space + warn "$My_name: $failure_msg\n", + " ==> You will need to change a source file before I do another run <==\n"; + } + elsif ( ($view_file ne '') && (-e $view_file) && $updated && $viewer_running ) { + # A viewer is running. Explicitly get it to update screen if we have to do it: + rdb_one_rule( 'update_view', \&rdb_run1 ); + } + elsif ( ($view_file ne '') && (-e $view_file) && !$viewer_running ) { + # Start the viewer + if ( !$silent ) { + if ($new_viewer_always) { + warn "$My_name: starting previewer for '$view_file'\n", + "------------\n"; + } + else { + warn "$My_name: I have not found a previewer that ", + "is already running. \n", + " So I will start it for '$view_file'\n", + "------------\n"; + } + } + local $retcode = rdb_makeB ( 'view' ); + if ( $retcode != 0 ) { + if ($force_mode) { + warn "$My_name: I could not run previewer\n"; + } + else { + &exit_msg1( "I could not run previewer", $retcode); + } + } + else { + $viewer_running = 1; + $$Pneed_to_get_viewer_process = 1; + } # end analyze result of trying to run viewer + } # end start viewer + if ( $first_time || $updated || $failure ) { + print "\n=== Watching for updated files. Use ctrl/C to stop ...\n"; + } + $waiting = 1; if ($diagnostics) { warn "WAITING\n"; } + WAIT: while (1) { + sleep($sleep_time); + &rdb_clear_change_record; + rdb_recurseA( [@targets], \&rdb_flag_changes_here ); + if ( &rdb_count_changes > 0) { + &rdb_diagnose_changes + unless $silent; +#??? + warn "$My_name: File(s) changed or not used in previous run(s). Remake files.\n"; + last WAIT; + } + # Does this do this job???? + local $new_files = 0; + rdb_for_some( [keys %current_primaries], sub{ $new_files += &rdb_find_new_filesB } ); + if ($new_files > 0) { + warn "$My_name: New file(s) found.\n"; + last WAIT; + } + } # end WAIT: + $waiting = 0; if ($diagnostics) { warn "NOT WAITING\n"; } + } #end infinite_loop CHANGE: +} #END sub make_preview_continuousB + +#************************************************************ + +sub process_rc_file { + # Usage process_rc_file( filename ) + # Run rc_file whose name is given in first argument + # Exit with code 11 if file could not be read. + # (In general this is not QUITE the right error) + # Exit with code 13 if there is a syntax error or other problem. + # ???Should I leave the exiting to the caller (perhaps as an option)? + # But I can always catch it with an eval if necessary. + # That confuses ctrl/C and ctrl/break handling. + my $rc_file = $_[0]; + warn "$My_name: Executing PERL code in file '$rc_file'...\n" + if $diagnostics; + do( $rc_file ); + # The return value from the do is not useful, since it is the value of + # the last expression evaluated, which could be anything. + # The correct test of errors is on the values of $! and $@. + +# This is not entirely correct. On gluon2: +# rc_file does open of file, and $! has error, apparently innocuous +# See ~/proposal/06/latexmkrc-effect + + my $OK = 1; + if ( $! ) { + # Get both numeric error and its string, by forcing numeric and + # string contexts: + my $err_no = $!+0; + my $err_string = "$!"; + warn "$My_name: Initialization file '$rc_file' could not be read,\n", + " or it gave some other problem. Error code \$! = $err_no.\n", + " Error string = '$err_string'\n"; + $! = 256; + $OK = 0; + } + if ( $@ ) { + $! = 256; + # Indent the error message to make it easier to locate + my $indented = prefix( $@, " " ); + $@ = ""; + warn "$My_name: Initialization file '$rc_file' gave an error:\n", + "$indented"; + $OK = 0; + } + if ( ! $OK ) { + die "$My_name: Stopping because of problem with rc file\n"; + } +} #END process_rc_file + +#************************************************************ + +sub execute_code_string { + # Usage execute_code_string( string_of_code ) + # Run the PERL code contained in first argument + # Exit with code 13 if there is a syntax error or other problem. + # ???Should I leave the exiting to the caller (perhaps as an option)? + # But I can always catch it with an eval if necessary. + # That confuses ctrl/C and ctrl/break handling. + my $code = $_[0]; + warn "$My_name: Executing initialization code specified by -e:\n", + " '$code'...\n" + if $diagnostics; + eval $code; + # The return value from the eval is not useful, since it is the value of + # the last expression evaluated, which could be anything. + # The correct test of errors is on the values of $! and $@. + + if ( $@ ) { + $! = 256; + my $message = $@; + $@ = ""; + $message =~ s/\s*$//; + die "$My_name: ", + "Stopping because executing following code from command line\n", + " $code\n", + "gave an error:\n", + " $message\n"; + } +} #END execute_code_string + +#************************************************************ + +sub cleanup1 { + # Usage: cleanup1( exts_without_period, ... ) + foreach (@_) { unlink("$root_filename.$_"); } +} #END cleanup1 + +#************************************************************ +#************************************************************ +#************************************************************ + +# Error handling routines, warning routines, help + +#************************************************************ + +sub die_trace { + # Call: die_trace( message ); + &traceback; # argument(s) passed unchanged + die "\n"; +} #END die_trace + +#************************************************************ + +sub traceback { + # Call: &traceback + # or traceback( message, ) + my $msg = shift; + if ($msg) { warn "$msg\n"; } + warn "Traceback:\n"; + my $i=0; # Start with immediate caller + while ( my ($pack, $file, $line, $func) = caller($i++) ) { + if ($func eq 'die_trace') { next; } + warn " $func called from line $line\n"; + } +} #END traceback + +#************************************************************ + +sub exit_msg1 +{ + # exit_msg1( error_message, retcode [, action]) + # 1. display error message + # 2. if action set, then restore aux file + # 3. exit with retcode + warn "\n------------\n"; + warn "$My_name: $_[0].\n"; + warn "-- Use the -f option to force complete processing.\n"; + + my $retcode = $_[1]; + if ($retcode >= 256) { + # Retcode is the kind returned by system from an external command + # which is 256 * command's_retcode + $retcode /= 256; + } + exit $retcode; +} #END exit_msg1 + +#************************************************************ + +sub warn_running { + # Message about running program: + if ( $silent ) { + warn "$My_name: @_\n"; + } + else { + warn "------------\n@_\n------------\n"; + } +} #END warn_running + +#************************************************************ + +sub exit_help +# Exit giving diagnostic from arguments and how to get help. +{ + warn "\n$My_name: @_\n", + "Use\n", + " $my_name -help\nto get usage information\n"; + exit 10; +} #END exit_help + + +#************************************************************ + +sub print_help +{ + print + "$My_name $version_num: Automatic LaTeX document generation routine\n\n", + "Usage: $my_name [latexmk_options] [filename ...]\n\n", + " Latexmk_options:\n", + " -bm - Print message across the page when converting to postscript\n", + " -bi - Set contrast or intensity of banner\n", + " -bs - Set scale for banner\n", + " -commands - list commands used by $my_name for processing files\n", + " -c - clean up (remove) all nonessential files, except\n", + " dvi, ps and pdf files.\n", + " This and the other clean-ups are instead of a regular make.\n", + " -C - clean up (remove) all nonessential files\n", + " including aux, dep, dvi, postscript and pdf files\n", + " But exclude file of database of file information\n", + " -CA - clean up (remove) absolutely ALL nonessential files\n", + " including aux, dep, dvi, postscript and pdf files,\n", + " and file of database of file information\n", + " -CF - Remove file of database of file information before doing \n", + " other actions\n", + " -cd - Change to directory of source file when processing it\n", + " -cd- - Do NOT change to directory of source file when processing it\n", + " -dependents - Show list of dependent files after processing\n", + " -dependents- - Do not show list of dependent files after processing\n", + " -dF - Filter to apply to dvi file\n", + " -dvi - generate dvi\n", + " -dvi- - turn off required dvi\n", + " -e - Execute specified PERL code\n", + " -f - force continued processing past errors\n", + " -f- - turn off forced continuing processing past errors\n", + " -F - Ignore non-existent files when testing for dependencies\n", + " -F- - Turn off -F\n", + " -gg - Super go mode: clean out generated files (-CA), and then\n", + " process files regardless of file timestamps\n", + " -g - process regardless of file timestamps\n", + " -g- - Turn off -g\n", + " -h - print help\n", + " -help - print help\n", + " -jobname=STRING - set basename of output file(s) to STRING.\n", + " (Like --jobname=STRING on command line for many current\n", + " implementations of latex/pdflatex.)\n", + " -l - force landscape mode\n", + " -l- - turn off -l\n", + " -new-viewer - in -pvc mode, always start a new viewer\n", + " -new-viewer- - in -pvc mode, start a new viewer only if needed\n", + " -nodependents - Do not show list of dependent files after processing\n", + " -pdf - generate pdf by pdflatex\n", + " -pdfdvi - generate pdf by dvipdf\n", + " -pdfps - generate pdf by ps2pdf\n", + " -pdf- - turn off pdf\n", + " -ps - generate postscript\n", + " -ps- - turn off postscript\n", + " -pF - Filter to apply to postscript file\n", + " -p - print document after generating postscript.\n", + " (Can also .dvi or .pdf files -- see documentation)\n", + " -print=dvi - when file is to be printed, print the dvi file\n", + " -print=ps - when file is to be printed, print the ps file (default)\n", + " -print=pdf - when file is to be printed, print the pdf file\n", + " -pv - preview document. (Side effect turn off continuous preview)\n", + " -pv- - turn off preview mode\n", + " -pvc - preview document and continuously update. (This also turns\n", + " on force mode, so errors do not cause $my_name to stop.)\n", + " (Side effect: turn off ordinary preview mode.)\n", + " -pvc- - turn off -pvc\n", + " -r - Read custom RC file\n", + " -silent - silence progress messages from called programs\n", + " -v - display program version\n", + " -verbose - display usual progress messages from called programs\n", + " -version - display program version\n", + " -view=default - viewer is default (dvi, ps, pdf)\n", + " -view=dvi - viewer is for dvi\n", + " -view=none - no viewer is used\n", + " -view=ps - viewer is for ps\n", + " -view=pdf - viewer is for pdf\n", + " filename = the root filename of LaTeX document\n", + "\n", + "-p, -pv and -pvc are mutually exclusive\n", + "-h, -c and -C overides all other options.\n", + "-pv and -pvc require one and only one filename specified\n", + "All options can be introduced by '-' or '--'. (E.g., --help or -help.)\n", + "Contents of RC file specified by -r overrides options specified\n", + " before the -r option on the command line\n"; + +} #END print_help + +#************************************************************ +sub print_commands +{ + warn "Commands used by $my_name:\n", + " To run latex, I use \"$latex\"\n", + " To run pdflatex, I use \"$pdflatex\"\n", + " To run bibtex, I use \"$bibtex\"\n", + " To run makeindex, I use \"$makeindex\"\n", + " To make a ps file from a dvi file, I use \"$dvips\"\n", + " To make a ps file from a dvi file with landscape format, ", + "I use \"$dvips_landscape\"\n", + " To make a pdf file from a dvi file, I use \"$dvipdf\"\n", + " To make a pdf file from a ps file, I use \"$ps2pdf\"\n", + " To view a pdf file, I use \"$pdf_previewer\"\n", + " To view a ps file, I use \"$ps_previewer\"\n", + " To view a ps file in landscape format, ", + "I use \"$ps_previewer_landscape\"\n", + " To view a dvi file, I use \"$dvi_previewer\"\n", + " To view a dvi file in landscape format, ", + "I use \"$dvi_previewer_landscape\"\n", + " To print a ps file, I use \"$lpr\"\n", + " To print a dvi file, I use \"$lpr_dvi\"\n", + " To print a pdf file, I use \"$lpr_pdf\"\n", + " To find running processes, I use \"$pscmd\", \n", + " and the process number is at position $pid_position\n"; + warn "Notes:\n", + " Command starting with \"start\" is run detached\n", + " Command that is just \"start\" without any other command, is\n", + " used under MS-Windows to run the command the operating system\n", + " has associated with the relevant file.\n", + " Command starting with \"NONE\" is not used at all\n"; +} #END print_commands + +#************************************************************ + +sub view_file_via_temporary { + return $always_view_file_via_temporary + || ($pvc_view_file_via_temporary && $preview_continuous_mode); +} #END view_file_via_temporary + +#************************************************************ +#### Tex-related utilities + + +sub check_bibtex_log { + # Check for bibtex warnings: + # Usage: check_bibtex_log( base_of_bibtex_run ) + # return 0: OK, 1: bibtex warnings, 2: bibtex errors, + # 3: could not open .blg file. + + my $base = $_[0]; + my $log_name = "$base.blg"; + my $log_file = new FileHandle; + open( $log_file, "<$log_name" ) + or return 3; + my $have_warning = 0; + my $have_error = 0; + while (<$log_file>) { + if (/Warning--/) { + #print "Bibtex warning: $_"; + $have_warning = 1; + } + if (/error message/) { + #print "Bibtex error: $_"; + $have_error = 1; + } + } + close $log_file; + if ($have_error) {return 2;} + if ($have_warning) {return 1;} + return 0; +} #END check_bibtex_log + +#************************************************** + +sub clean_file_name{ + # Convert filename found in log file to true filename. + # Used normally only by parse_logB, below + # 1. For names of form + # `"string".ext', which arises e.g., from \jobname.bbl: + # when the base filename contains spaces, \jobname has quotes. + # and from \includegraphics with basename specified. + # 2. Or "string.ext" from \includegraphcs with basename and ext specified. + my $filename = $_[0]; + $filename =~ s/^\"([^\"]*)\"(.*)$/$1$2/; + return $filename; +} +# ------------------------------ + +sub parse_logB { +# Scan log file for: dependent files +# reference_changed, bad_reference, bad_citation +# Return value: 1 if success, 0 if no log file. +# Set global variables: +# %dependents: maps definite dependents to code: +# 0 = from missing-file line +# May have no extension +# May be missing path +# 1 = from 'File: ... Graphic file (type ...)' line +# no path. Should exist, but may need a search, by kpsewhich. +# 2 = from regular '(...' coding for input file, +# Has NO path, which it would do if LaTeX file +# Highly likely to be mis-parsed line +# 3 = ditto, but has a path character ('/'). +# Should be LaTeX file that exists. +# If it doesn't exist, we have probably a mis-parsed line. +# There's no need to do a search. +# 4 = definitive, which in this subroutine is only +# done for default dependents +# Treat the following specially, since they have special rules +# @bbl_files to list of .bbl files. +# %idx_files to map from .idx files to .ind files. +# Also set +# $reference_changed, $bad_reference, $bad_citation +# Trivial or default values if log file does not exist/cannot be opened + +# Give a quick way of looking up custom-dependency extensions + my %cusdep_from = (); + my %cusdep_to = (); + foreach ( @cus_dep_list ) { + my ($fromext, $toext) = split; + $cusdep_from{$fromext} = $cusdep_from{".$fromext"} = $_; + $cusdep_to{$toext} = $cusdep_to{".$toext"} = $_; + } +# print "==== Cusdep from-exts:"; foreach (keys %cusdep_from) {print " '$_'";} print "\n"; +# print "==== Cusdep to-exts:"; foreach (keys %cusdep_to) {print " '$_'";} print "\n"; + + # Returned info: + %dependents = (); + foreach (@default_includes) { $dependents{$_} = 4; } + @bbl_files = (); + %idx_files = (); # Maps idx_file to (ind_file, base) + + $reference_changed = 0; + $bad_reference = 0; + $bad_citation = 0; + + my $log_name = "$root_filename.log"; + my $log_file = new FileHandle; + if ( ! open( $log_file, "<$log_name" ) ) { + return 0; + } + +LINE: + while(<$log_file>) { + # Could use chomp here, but that fails if there is a mismatch + # between the end-of-line sequence used by latex and that + # used by perl. (Notably a problem with MSWin latex and + # cygwin perl!) + s/[\n\r]*$//; + if ( $. == 1 ){ + if ( /^This is / ) { + # First line OK + next LINE; + } else { + warn "$My_name: Error on first line of '$log_name'. ". + "This is apparently not a TeX log file.\n"; + close $log_file; + $failure = 1; + $failure_msg = "Log file '$log_name' appears to have wrong format."; + return 0; + } + } + # Handle wrapped lines: + # They are lines brutally broken at exactly $log_wrap chars + # excluding line-end. + my $len = length($_); + while ($len == $log_wrap) { + my $extra = <$log_file>; + $extra =~ s/[\n\r]*$//; + $len = length($extra); + $_ .= $extra; + } + # Check for changed references, bad references and bad citations: + if (/Rerun to get/) { + warn "$My_name: References changed.\n"; + $reference_changed = 1; + } + if (/LaTeX Warning: (Reference[^\001]*undefined)./) { + warn "$My_name: $1 \n"; + $bad_reference = 1; + } + if (/LaTeX Warning: (Citation[^\001]*undefined)./) { + warn "$My_name: $1 \n"; + $bad_citation = 1; + } + if ( /^Document Class: / ) { + # Class sign-on line + next LINE; + } + if ( /^\(Font\)/ ) { + # Font info line + next LINE; + } + if ( /^Output written on / ) { + # Latex message + next LINE; + } + if ( /^Overfull / + || /^Underfull / + || /^or enter new name\. \(Default extension: .*\)/ + || /^\*\*\* \(cannot \\read from terminal in nonstop modes\)/ + ) { + # Latex error/warning, etc. + next LINE; + } +# Test for writing of index file. The precise format of the message +# depends on which package (makeidx.sty , multind.sty or index.sty) and +# which version writes the message. + if ( /Writing index file (.*)$/ ) { + my $idx_file = ''; + if ( /^Writing index file (.*)$/ ) { + # From makeidx.sty or multind.sty + $idx_file = $1; + } + elsif ( /^index\.sty> Writing index file (.*)$/ ) { + # From old versions of index.sty + $idx_file = $1; + } + elsif ( /^Package \S* Info: Writing index file (.*) on input line/ ) { + # From new versions of index.sty + $idx_file = $1; + } + else { + warn "$My_name: Message indicates index file was written\n", + " ==> but I do not know how to understand it: <==\n", + " '$_'\n"; + next LINE; + } + # Typically, there is trailing space, not part of filename: + $idx_file =~ s/\s*$//; + $idx_file = clean_file_name($idx_file); + my ($idx_base, $idx_path, $idx_ext) = fileparseA( $idx_file ); + $idx_base = $idx_path.$idx_base; + $idx_file = $idx_base.$idx_ext; + if ( $idx_ext eq '.idx' ) { + warn "$My_name: Index file '$idx_file' was written\n" + unless $silent; + $idx_files{$idx_file} = [ "$idx_base.ind", $idx_base ]; + } + elsif ( exists $cusdep_from{$idx_ext} ) { + if ( !$silent ) { + warn "$My_name: Index file '$idx_file' was written\n"; + warn " Cusdep '$cusdep_from{$idx_ext}' should be used\n"; + } + # No action needed here + } + else { + warn "$My_name: Index file '$idx_file' written\n", + " ==> but it has an extension I do not know how to handle <==\n"; + } + + next LINE; + } + if ( /^No file (.*?\.bbl)./ ) { + # Notice that the + my $bbl_file = clean_file_name($1); + warn "$My_name: Non-existent bbl file '$bbl_file'\n $_\n"; + $dependents{$bbl_file} = 0; + push @bbl_files, $bbl_file; + next LINE; + } + foreach my $pattern (@file_not_found) { + if ( /$pattern/ ) { + my $file = clean_file_name($1); + warn "$My_name: Missing input file: '$file' from line\n '$_'\n" + unless $silent; + $dependents{$file} = 0; + next LINE; + } + } + if ( /^File: ([^\s\[]*) Graphic file \(type / ) { + # First line of message from includegraphics/x + $dependents{$1} = 1; + next LINE; + } + # Now test for generic lines to ignore, only after special cases! + if ( /^File: / ) { + # Package sign-on line. Includegraphics/x also produces a line + # with this signature, but I've already handled it. + next LINE; + } + if ( /^Package: / ) { + # Package sign-on line + next LINE; + } + if (/^\! LaTeX Error: / ) { + next LINE; + } + if (/^No pages of output\./) { + warn "$My_name: Log file says no output from latex\n" + unless $silent; + next LINE; + } + INCLUDE_CANDIDATE: + while ( /\((.*$)/ ) { + # Filename found by + # '(', then filename, then terminator. + # Terminators: obvious candidates: ')': end of reading file + # '(': beginning of next file + # ' ': space is an obvious separator + # ' [': start of page: latex + # and pdflatex put a + # space before the '[' + # '[': start of config file + # in pdflatex, after + # basefilename. + # '{': some kind of grouping + # Problem: + # All or almost all special characters are allowed in + # filenames under some OS, notably UNIX. Luckily most cases + # are rare, if only because the special characters need + # escaping. BUT 2 important cases are characters that are + # natural punctuation + # Under MSWin, spaces are common (e.g., "C:\Program Files") + # Under VAX/VMS, '[' delimits directory names. This is + # tricky to handle. But I think few users use this OS + # anymore. + # + # Solution: use ' [', but not '[' as first try at delimiter. + # Then if candidate filename is of form 'name1[name2]', then + # try splitting it. If 'name1' and/or 'name2' exists, put + # it/them in list, else just put 'name1[name2]' in list. + # So form of filename is now: + # '(', + # then any number of characters that are NOT ')', '(', or '{' + # (these form the filename); + # then ' [', or ' (', or ')', or end-of-string. + # That fails for pdflatex + # In log file: + # '(' => start of reading of file, followed by filename + # ')' => end of reading of file + # '[' => start of page (normally preceeded by space) + # Remember: + # filename (on VAX/VMS) may include '[' and ']' (directory + # separators) + # filenames (on MS-Win) commonly include space. + + # First step: replace $_ by whole of line after the '(' + # Thus $_ is putative filename followed by other stuff. + $_ = $1; + if ( /^([^\(^\)^\{]*?)\s\[/ ) { + # Terminator: space then '[' + # Use *? in condition: to pick up first ' [' as terminator + # 'file [' should give good filename. + } + elsif ( /^([^\(^\)^\{]*)\s(?=\()/ ) { + # Terminator is ' (', but '(' isn't in matched string, + # so we keep the '(' ready for the next match + } + elsif ( /^([^\(^\)^\{]*)(\))/ ) { + # Terminator is ')' + } + elsif ( /^([^\(^\)^\{]*?)\s*\{/ ) { + # Terminator: arbitrary space then '{' + # Use *? in condition: to pick up first ' [' as terminator + # 'file [' should give good filename. + } + else { + #Terminator is end-of-string + } + $_ = $'; # Put $_ equal to the unmatched tail of string ' + my $include_candidate = $1; + $include_candidate =~ s/\s*$//; # Remove trailing space. + if ( $include_candidate eq "[]" ) { + # Part of overfull hbox message + next INCLUDE_CANDIDATE; + } + if ( $include_candidate =~ /^\\/ ) { + # Part of font message + next INCLUDE_CANDIDATE; + } + # Make list of new include files; sometimes more than one. + my @new_includes = ($include_candidate); + if ( $include_candidate =~ /^(.+)\[([^\]]+)\]$/ ) { + # Construct of form 'file1[file2]', as produced by pdflatex + if ( -e $1 ) { + # If the first component exists, we probably have the + # pdflatex form + @new_includes = ($1, $2); + } + else { + # We have something else. + # So leave the original candidate in the list + } + } + INCLUDE_NAME: + foreach my $include_name (@new_includes) { + my ($base, $path, $ext) = fileparseB( $include_name ); + if ( ($path eq './') || ($path eq '.\\') ) { + $include_name = $base.$ext; + } + if ( $include_name !~ m'[/|\\]' ) { + # Filename does not include a path character + # High potential for misparsed line + $dependents{$include_name} = 2; + } else { + $dependents{$include_name} = 3; + } + if ( $ext eq '.bbl' ) { + warn "$My_name: Found input bbl file '$include_name'\n" + unless $silent; + push @bbl_files, $include_name; + } + } # INCLUDE_NAME + } # INCLUDE_CANDIDATE + } # LINE + close($log_file); + + # Default includes are always definitive: + foreach (@default_includes) { $dependents{$_} = 4; } + + ###print "New parse: \n"; + ###foreach (sort keys %dependents) { print " '$_': $dependents{$_}\n"; } + + my @misparsed = (); + my @missing = (); + my @not_found = (); +CANDIDATE: + foreach my $candidate (keys %dependents) { + my $code = $dependents{$candidate}; + if ( -e $candidate ) { + $dependents{$candidate} = 4; + } + elsif ($code == 1) { + # Graphics file that is supposed to have been read. + # Candidate name is as given in source file, not as path + # to actual file. + # We have already tested that file doesn't exist, as given. + # so use kpsewhich. + # If the file still is not found, assume non-existent; + my @kpse_result = kpsewhich( $candidate ); + if ($#kpse_result > -1) { + $dependents{$kpse_result[0]} = 4; + delete $dependents{$candidate}; + next CANDIDATE; + } + else { + push @not_found, $candidate; + } + } + elsif ($code == 2) { + # Candidate is from '(...' construct in log file, for input file + # which should include pathname if valid input file. + # Name does not have pathname-characteristic character (hence + # $code==2. + # Candidate file does not exist with given name + # Almost surely result of a misparsed line in log file. + delete $dependents{$candidate}; + push @misparse, $candidate; + } + elsif ($code == 0) { + my ($base, $path, $ext) = fileparseA($candidate); + $ext =~ s/^\.//; + if ( ($ext eq '') && (-e "$path$base.tex") ) { + $dependents{"$path$base.tex"} = 4; + delete $dependents{$candidate}; + } + push @missing, $candidate; + } + } + + + if ( $diagnostics ) { + @misparse = uniqs( @misparse ); + @missing = uniqs( @missing ); + @not_found = uniqs( @not_found ); + my @dependents = sort( keys %dependents ); + + my $dependents = $#dependents + 1; + my $misparse = $#misparse + 1; + my $missing = $#missing + 1; + my $not_found = $#not_found + 1; + my $exist = $dependents - $not_found - $missing; + my $bbl = $#bbl_files + 1; + + print "$dependents dependent files detected, of which ", + "$exist exist, $not_found were not found,\n", + " and $missing appear not to exist.\n"; + print "Dependents:\n"; + foreach (@dependents) { print " $_\n"; } + if ($not_found > 0) { + print "Not found:\n"; + foreach (@not_found) { print " $_\n"; } + } + if ($missing > 0) { + print "Not existent:\n"; + foreach (@missing) { print " $_\n"; } + } + if ( $bbl > 0 ) { + print "Input bbl files:\n"; + foreach (@bbl_files) { print " $_\n"; } + } + + if ( $misparse > 0 ) { + print "$misparse\n"; + print "Apparent input files appearently from misunderstood lines in .log file:\n"; + foreach ( @misparse ) { print " $_\n"; } + } + } + + return 1; +} #END parse_logB + +#************************************************************ + +sub parse_aux { + #Usage: parse_aux( $aux_file, \@new_bib_files, \@new_aux_files ) + # Parse aux_file (recursively) for bib files. + # If can't open aux file, then + # Return 0 and leave @new_bib_files empty + # Else set @new_bib_files from information in the aux files + # And: + # Return 1 if no problems + # Return 2 with @new_bib_files empty if there are no \bibdata + # lines. + # Return 3 if I couldn't locate all the bib_files + # Set @new_aux_files to aux files parsed + + my $aux_file = $_[0]; + local $Pbib_files = $_[1]; + local $Paux_files = $_[2]; + + @$Pbib_files = (); + @$Paux_files = (); + + parse_aux1( $aux_file ); + if ($#{$Paux_files} < 0) { + return 0; + } + @$Pbib_files = uniqs( @$Pbib_files ); + + if ( $#{$Pbib_files} == -1 ) { + warn "$My_name: No .bib files listed in .aux file '$aux_file' \n", + return 2; + } + my $bibret = &find_file_list1( $Pbib_files, $Pbib_files, + '.bib', \@BIBINPUTS ); + @$Pbib_files = uniqs( @$Pbib_files ); + if ($bibret == 0) { + warn "$My_name: Found bibliography file(s) [@$Pbib_files]\n" + unless $silent; + } + else { + warn "$My_name: Failed to find one or more bibliography files ", + "in [@$Pbib_files]\n"; + if ($force_mode) { + warn "==== Force_mode is on, so I will continue. ", + "But there may be problems ===\n"; + } + else { + #$failure = -1; + #$failure_msg = 'Failed to find one or more bib files'; + #warn "$My_name: Failed to find one or more bib files\n"; + } + return 3; + } + return 1; +} #END parse_aux + +#************************************************************ + +sub parse_aux1 +# Parse single aux file for bib files. +# Usage: &parse_aux1( aux_file_name ) +# Append newly found bib_filenames in @$Pbib_files, already +# initialized/in use. +# Append aux_file_name to @$Paux_files if aux file opened +# Recursively check \@input aux files +# Return 1 if success in opening $aux_file_name and parsing it +# Return 0 if fail to open it +{ + my $aux_file = $_[0]; + my $aux_fh = new FileHandle; + if (! open($aux_fh, $aux_file) ) { + warn "$My_name: Couldn't find aux file '$aux_file'\n"; + return 0; + } + push @$Paux_files, $aux_file; +AUX_LINE: + while (<$aux_fh>) { + if ( /^\\bibdata\{(.*)\}/ ) { + # \\bibdata{comma_separated_list_of_bib_file_names} + # (Without the '.bib' extension) + push( @$Pbib_files, split /,/, $1 ); + } + elsif ( /^\\\@input\{(.*)\}/ ) { + # \\@input{next_aux_file_name} + &parse_aux1( $1 ); + } + } + close($aux_fh); + return 1; +} #END parse_aux1 + +#************************************************************ + +#************************************************************ +#************************************************************ +#************************************************************ + +# Manipulations of main file database: + +#************************************************************ + +sub fdb_get { + # Call: fdb_get(filename) + # Returns an array (time, size, md5) for the current state of the + # named file. + # For non-existent file, deletes entry in fdb_current, and returns (0,-1,0) + my $file = shift; + my ($new_time, $new_size) = get_time_size($file); + my @nofile = (0,-1,0); # What we use for initializing + # a new entry in fdb or flagging + # non-existent file + if ( $new_size < 0 ) { + delete $fdb_current{$file}; + return @nofile; + } + my $recalculate_md5 = 0; + if ( ! exists $fdb_current{$file} ) { + # Ensure we have a record. + $fdb_current{$file} = [@nofile]; + $recalculate_md5 = 1; + } + my $file_data = $fdb_current{$file}; + my ( $time, $size, $md5 ) = @$file_data; + if ( ($new_time != $time) || ($new_size != $size) ) { + # Only force recalculation of md5 if time or size changed + # Else we assume file is really unchanged. + $recalculate_md5 = 1; + } + if ($recalculate_md5) { + @$file_data = ( $new_time, $new_size, get_checksum_md5( $file ) ); + } + return @$file_data;; +} #END fdb_get + +#************************************************************ + +sub fdb_show { + # Displays contents of fdb + foreach my $file ( sort keys %fdb_current ) { + print "'$file': @{$fdb_current{$file}}\n"; + } +} #END fdb_show + +#************************************************************ +#************************************************************ +#************************************************************ + +# Routines for manipulating rule database + +#************************************************************ + +sub rdb_read { + # Call: rdb_read( $in_name ) + # Sets rule database from saved file, in format written by rdb_write. + # Returns -1 if file could not be read else number of errors. + # Thus return value on success is 0 + my $in_name = $_[0]; + + my $in_handle = new FileHandle; + $in_handle->open( $in_name, '<' ) + or return (); + my $errors = 0; + my $state = 0; # Outside a section + my $rule = ''; + my $run_time = 0; + my $source = ''; + my $dest = ''; + my $base = ''; + local %new_sources = (); # Hash: rule => { file=>[ time, size, md5, fromrule ] } + my $new_source = undef; # Reference to hash of sources for current rule +LINE: + while ( <$in_handle> ) { + # Remove leading and trailing white space. + s/^\s*//; + s/\s*$//; + # Ignore blank lines and comments + if ( /^$/ || /^#/ || /^%/ ) { next LINE;} + if ( /^\[\"([^\"]+)\"\]/ ) { + # Start of section + $rule = $1; +#?? print "--- Starting rule '$rule'\n"; + my $tail = $'; #' Single quote in comment tricks the parser in + # emacs from misparsing an isolated single quote + $run_time = 0; + $source = $dest = $base = ''; + if ( $tail =~ /^\s*(\S+)\s*$/ ) { + $run_time = $1; + } + elsif ( $tail =~ /^\s*(\S+)\s+\"([^\"]*)\"\s+\"([^\"]*)\"\s+\"([^\"]*)\"\s*$/ ) { + $run_time = $1; + $source = $2; + $dest = $3; + $base = $4; + } + if ( rdb_rule_exists( $rule ) ) { + rdb_one_rule( $rule, + sub{ $$Ptest_kind = 1; + $$Prun_time = $run_time; + #??if ($source) { $$Psource = $source; } + #??if ($dest) { $$Pdest = $dest; } + #??if ($base) { $$Pbase = $base; } + } + ); + } + elsif ($rule =~ /^cusdep\s+(\S+)\s+(\S+)\s+(.+)$/ ) { + # Create custom dependency + my $fromext = $1; + my $toext = $2; + my $base = $3; + $source = "$base.$fromext"; + $dest = "$base.$toext"; + my $PAnew_cmd = ['do_cusdep', '']; + foreach my $dep ( @cus_dep_list ) { + my ($tryfromext,$trytoext,$must,$func_name) = split(' ',$dep); + if ( ($tryfromext eq $fromext) && ($trytoext eq $toext) ) { + $$PAnew_cmd[1] = $func_name; + } + } + rdb_create_rule( $rule, 'cusdep', '', $PAnew_cmd, 1, + $source, $dest, $base, 0, $run_time ); + } + elsif ( $rule =~ /^(makeindex|bibtex)\s*(.*)$/ ) { + my $rule_generic = $1; + if ( ! $source ) { + # If fdb_file was old-style (v. 1) + $source = $2; + my $path = ''; + my $ext = ''; + ($base, $path, $ext) = fileparseA( $source ); + $base = $path.$base; + if ($rule_generic eq 'makeindex') { + $dest = "$base.ind"; + } + elsif ($rule_generic eq 'bibtex') { + $dest = "$base.bbl"; + $source = "$base.aux"; + } + } + warn "$My_name: File-database '$in_name': setting rule '$rule'\n" + if $diagnostics; + my $cmd_type = 'external'; + my $ext_cmd = ${$rule_generic}; + warn " Rule kind = '$rule_generic'; ext_cmd = '$ext_cmd';\n", + " source = '$source'; dest = '$dest'; base = '$base';\n" + if $diagnostics; + rdb_create_rule( $rule, $cmd_type, $ext_cmd, '', 1, + $source, $dest, $base, 0, $run_time); + } + else { + warn "$My_name: In file-database '$in_name' rule '$rule'\n", + " is not in use in this session\n" + if $diagnostics; + $new_source = undef; + $state = 3; + next LINE; + } + $new_source = $new_sources{$rule} = {}; + $state = 1; #Reading a section + } + elsif ( /^\"([^\"]*)\"\s+(\S+)\s+(\S+)\s+(\S+)\s+\"([^\"]*)\"/ ) { + # Source file line + if ($state == 3) { + # The rule is not being currently used. + next LINE; + } + my $file = $1; + my $time = $2; + my $size = $3; + my $md5 = $4; + my $from_rule = $5; +#?? print " --- File '$file'\n"; + if ($state != 1) { + warn "$My_name: In file-database '$in_name' ", + "line $. is outside a section:\n '$_'\n"; + $errors++; + next LINE; + } + rdb_ensure_file( $rule, $file ); + rdb_set_file1( $rule, $file, $time, $size, $md5 ); + # Save the rest of the data, especially the from_fule until we know all + # the rules, otherwise the from_rule may not exist. + # Also we'll have a better chance of looping through files. + ${$new_source}{$file} = [ $time, $size, $md5, $from_rule ]; + } + elsif ($state == 0) { + # Outside a section. Nothing to do. + } + else { + warn "$My_name: In file-database '$in_name' ", + "line $. is of wrong format:\n '$_'\n"; + $errors++; + next LINE; + } + } + undef $in_handle; + # Set cus dependencies. + &rdb_set_dependentsA( keys %rule_db ); + +#?? Check from_rules exist. + + return $errors; +} # END rdb_read + +#************************************************************ + +sub rdb_read_generatedB { + # Call: rdb_read_generatedB( $in_name, \@extra_generated, \@aux_files ) + # From rule database in saved file, in format written by rdb_write, + # finds the non-basic generated files that are to be deleted by a cleanup. + # Returns an array of these files, or an empty array if the file + # does not exist or cannot be opened. + my ($in_name, $Pgenerated, $Paux_files) = @_; + @$Pgenerated = (); + @$Paux_files = (); + + my $in_handle = new FileHandle; + $in_handle->open( $in_name, '<' ) + or return (); + my $rule = ''; + my $run_time = 0; + my $source = ''; + my $dest = ''; + my $base = ''; + my $ext = ''; + my $path = ''; + my $state = 0; # Outside a section +LINE: + while ( <$in_handle> ) { + # Remove leading and trailing white space. + s/^\s*//; + s/\s*$//; + # Ignore blank lines and comments + if ( /^$/ || /^#/ || /^%/ ) { next LINE;} + if ( /^\[\"([^\"]+)\"\]/ ) { + # Start of section + $rule = $1; + my $tail = $'; #' Single quote in comment tricks the parser in + # emacs from misparsing an isolated single quote + $run_time = 0; + $source = $dest = $base = ''; + if ( $tail =~ /^\s*(\S+)\s+\"([^\"]*)\"\s+\"([^\"]*)\"\s+\"([^\"]*)\"\s*$/ ) { + $source = $2; + $dest = $3; + $base = $4; + } + else { next LINE; } + if ( $rule =~ /^makeindex/ ) { + push @$Pgenerated, $source, $dest, "$base.ilg"; + } + elsif ( $rule =~ /^bibtex/ ) { + push @$Pgenerated, $dest, "$base.blg"; + push @$Paux_files, $source; + } + $state = 1; #Reading a section + } + elsif ( /^\"([^\"]*)\"\s+(\S+)\s+(\S+)\s+(\S+)\s+\"([^\"]*)\"/ ) { + # Source file line + if ($state == 3) { + # The rule is not being currently used. + next LINE; + } + my $file = $1; + ($base, $path, $ext) = fileparseA( $file ); + if ( $ext eq '.aux' ) { push @$Paux_files, $file; } + } + elsif ($state == 0) { + # Outside a section. Nothing to do. + } + else { + warn "$My_name: In file-database '$in_name' ", + "line $. is of wrong format:\n '$_'\n"; + next LINE; + } + } # LINE + undef $in_handle; + +} # END rdb_read_generatedB + +#************************************************************ + +sub rdb_write { + # Call: rdb_write( $out_name ) + # Writes to the given file name the database of file and rule data + # accessible from the primary rules. + # Returns 1 on success, 0 if file couldn't be opened. + local $out_name = $_[0]; + local $out_handle = new FileHandle; + if ( ($out_name eq "") || ($out_name eq "-") ) { + # Open STDOUT + $out_handle->open( '>-' ); + } + else { + $out_handle->open( $out_name, '>' ); + } + if (!$out_handle) { return 0; } + + local %current_primaries = (); # Hash whose keys are primary rules + # needed, i.e., known latex-like rules which trigger + # circular dependencies + local @pre_primary = (); # Array of rules + local @post_primary = (); # Array of rules + local @one_time = (); # Array of rules + &rdb_classify_rules( \%possible_primaries, keys %requested_filerules ); + + print $out_handle "# Fdb version $fdb_ver\n"; + my @rules = sort( + rdb_accessible( + uniq1( keys %known_rules, keys %current_primaries ))); + rdb_for_some( + \@rules, + sub { print $out_handle "[\"$rule\"] $$Prun_time \"$$Psource\" \"$$Pdest\" \"$$Pbase\" \n"; }, + sub { print $out_handle " \"$file\" $$Ptime $$Psize $$Pmd5 \"$$Pfrom_rule\"\n"; }, + ); + undef $out_handle; + return 1; +} #END rdb_write + +#************************************************************ + +sub rdb_set_from_logB { + # Assume rule context. + # This is intended to be applied only for a primary (LaTeX-like) rule + # Starting from the log_file, set current details for the current rule. + + # Rules should only be primary + if ( $$Pcmd_type ne 'primary' ) { + warn "\n$My_name: ==========$My_name: Probable BUG======= \n ", + " rdb_set_from_logB called to set files ", + "for non-primary rule '$rule'\n\n"; + return; + } + + +#?? # We'll prune this by all files determined to be needed for source files. +#?? my %unneeded_source = %$PHsource; + + # Parse log file to find relevant filenames + # Result in the following variables: + local %dependents = (); # Maps files to status + local @bbl_files = (); + local %idx_files = (); # Maps idx_file to (ind_file, base) + + # The following are also returned, but are global, to be used by caller + # $reference_changed, $bad_reference $bad_citation + + &parse_logB; + + IDX_FILE: + foreach my $idx_file ( keys %idx_files ) { + my ($ind_file, $ind_base) = @{$idx_files{$idx_file}}; + my $from_rule = "makeindex $idx_file"; + if ( ! rdb_rule_exists( $from_rule ) ){ + print "!!!===Creating rule '$from_rule': '$ind_file' from '$idx_file'\n" + if ($diagnostics); + rdb_create_rule( $from_rule, 'external', $makeindex, '', 1, + $idx_file, $ind_file, $ind_base, 1, 0); + foreach my $primary ( keys %primaries ) { + print " ===Source file '$ind_file' for '$primary'\n" + if ($diagnostics > -1); + rdb_ensure_file( $primary, $ind_file, $from_rule ); + } + } + if ( ! -e $ind_file ) { + # Failure was non-existence of makable file + # Leave failure issue to other rules. + $failure = 0; + } + } + + BBL_FILE: + foreach my $bbl_file ( uniqs( @bbl_files ) ) { + my ($bbl_base, $bbl_path, $bbl_ext) = fileparseA( $bbl_file ); + $bbl_base = $bbl_path.$bbl_base; + my @new_bib_files; + my @new_aux_files; + &parse_aux( "$bbl_base.aux", \@new_bib_files, \@new_aux_files ); + my $from_rule = "bibtex $bbl_base"; + if ( ! rdb_rule_exists( $from_rule ) ){ + print "!!!===Creating rule '$from_rule'\n" + if ($diagnostics); + rdb_create_rule( $from_rule, 'external', $bibtex, '', 1, + "$bbl_base.aux", $bbl_file, $bbl_base, 1, 0); + foreach my $source ( @new_bib_files, @new_aux_files ) { + print " ===Source file '$source'\n" + if ($diagnostics); + rdb_ensure_file( $from_rule, $source ); + } + foreach my $primary ( keys %primaries ) { + print " ===Source file '$bbl_file' for '$primary'\n" + if ($diagnostics); + rdb_ensure_file( $primary, $bbl_file, $from_rule ); + if ( ! -e $bbl_file ) { + # Failure was non-existence of makable file + # Leave failure issue to other rules. + $failure = 0; + } + } + } + } + +NEW_SOURCE: + foreach my $new_source (keys %dependents) { + foreach my $primary ( keys %primaries ) { + rdb_ensure_file( $primary, $new_source ); + } + } + + my @more_sources = &rdb_set_dependentsA( $rule ); + my $num_new = $#more_sources + 1; + foreach (@more_sources) { + $dependents{$_} = 4; + if ( ! -e $_ ) { + # Failure was non-existence of makable file + # Leave failure issue to other rules. + $failure = 0; + $$Pchanged = 1; # New files can be made. Ignore error. + } + } + if ($diagnostics) { + if ($num_new > 0 ) { + print "$num_new new source files for rule '$rule':\n"; + foreach (@more_sources) { print " '$_'\n"; } + } + else { + print "No new source files for rule '$rule':\n"; + } + } + + my @files_not_needed = (); + foreach (keys %$PHsource) { + if ( ! exists $dependents{$_} ) { + print "Removing no-longer-needed dependent '$_' from rule '$rule'\n" + if $diagnostics>-1; + push @files_not_needed, $_; + } + } + rdb_remove_files( $rule, @files_not_needed ); + +} # END rdb_set_from_logB + +#************************************************************ + +sub rdb_find_new_filesB { + # Call: rdb_find_new_filesB + # Assumes rule context for primary rule. + # Deal with files which were missing and for which a method + # of finding them has become available: + # (a) A newly available source file for a custom dependency. + # (b) When there was no extension, a file with appropriate + # extension + # (c) When there was no extension, and a newly available source + # file for a custom dependency can make it. + + my %new_includes = (); + +MISSING_FILE: + foreach my $missing ( keys %$PHsource ) { + next if ( $$PHsource{$missing} != 0 ); + my ($base, $path, $ext) = fileparseA( $missing ); + $ext =~ s/^\.//; + if ( -e "$missing.tex" ) { + $new_includes{"$missing.tex"} = 1; + } + if ( -e $missing ) { + $new_includes{$missing} = 1; + } + if ( $ext ne "" ) { + foreach my $dep (@cus_dep_list){ + my ($fromext,$toext) = split(' ',$dep); + if ( ( "$ext" eq "$toext" ) + && ( -e "$path$base.$fromext" ) + ) { + # Source file for the missing file exists + # So we have a real include file, and it will be made + # next time by rdb_set_dependents + $new_includes{$missing} = 1; + } + else { + # no point testing the $toext if the file doesn't exist. + } + next MISSING_FILE; + } + } + else { + # $_ doesn't exist, $_.tex doesn't exist, + # and $_ doesn't have an extension + foreach my $dep (@cus_dep_list){ + my ($fromext,$toext) = split(' ',$dep); + if ( -e "$path$base.$fromext" ) { + # Source file for the missing file exists + # So we have a real include file, and it will be made + # next time by &rdb__dependents + $new_includes{"$path$base.$toext"} = 1; +# next MISSING_FILE; + } + if ( -e "$path$base.$toext" ) { + # We've found the extension for the missing file, + # and the file exists + $new_includes{"$path$base.$toext"} = 1; +# next MISSING_FILE; + } + } + } + } # end MISSING_FILES + + # Sometimes bad line-breaks in log file (etc) create the + # impression of a missing file e.g., ./file, but with an incorrect + # extension. The above tests find the file with an extension, + # e.g., ./file.tex, but it is already in the list. So now I will + # remove files in the new_include list that are already in the + # include list. Also handle aliasing of file.tex and ./file.tex. + # For example, I once found: +# (./qcdbook.aux (./to-do.aux) (./ideas.aux) (./intro.aux) (./why.aux) (./basics +#.aux) (./classics.aux) + + my $found = 0; + foreach my $file (keys %new_includes) { + my $stripped = $file; + $stripped =~ s{^\./}{}; + if ( exists $PHsource{$file} ) { + delete $new_includes{$file}; + } + else { + $found ++; + rdb_ensure_file( $rule, $file ); + } + } + +## ?? Is this correct? I used to use @includes +# rdb_update_files_for_rule( keys %PHsources ); + if ( $diagnostics && ( $found > 0 ) ) { + warn "$My_name: Detected previously missing files:\n"; + foreach ( sort keys %new_includes ) { + warn " '$_'\n"; + } + } + return $found; +} # END rdb_find_new_filesB + +#************************************************************ + +sub rdb_update_files_for_rule { +#=========== APPEARS NOT TO BE USED! ========================= +# Usage: rdb_update_files_for_rule( source_files ...) +# Assume rule context. +# Update list of source files for current rule, treating properly cases +# where file didn't exist before run, etc + foreach my $file ( @_ ) { + if ( ! rdb_file_exists( $rule, $file ) ) { + # File that didn't appear in the source files for the run + # before. Two cases: (a) it was created during the run; + # (b) it existed before the run. + # If case (a), then the file was non-existent before the + # run, so we must now label it as non-existent, and + # we trigger a new run +#?? print "?? Adding '$file' to '$rule'\n"; + rdb_ensure_file( $rule, $file ); + my $file_time = get_mtime0( $file ); + if ( ($$Ptest_kind == 2) || ($$Ptest_kind == 3) ) { + # Test wrt destination time, but exclude files + # which appear to be generated (according to extension) + # Assume generated files up-to-date after last run. + # I.e., last run was valid. + my $ext = ext( $file ); + + if ( (! exists $generated_exts_all{$ext} ) + && ($file_time >= $dest_mtime) + ) { + # Only changes since the mtime of the destination matter, + # and only non-generated files count. + # Non-existent destination etc gives $dest_mtime=0 + # so this will automatically give out-of-date condition + # Flag out-of-date for a file by treating it as non-existent + rdb_set_file1( $rule, $file, 0, -1, 0); + } + } + elsif ($file_time >= $$Prun_time ) { + # File generated during run. So treat as non-existent at beginning + rdb_set_file1( $rule, $file, 0, -1, 0); + $$Pout_of_date = 1; + } + # Else default of current state of file is correct. + } # END not previously existent file + } # END file +} # END rdb_update_files_for_rule + +#************************************************************ + +sub rdb_set_dependentsA { + # Call rdb_set_dependentsA( rules ...) + # Returns array (sorted), of new source files. + local @new_sources = (); + rdb_recurseA( [@_], 0, \&rdb_one_depA ); + &rdb_make_links; + return uniqs( @new_sources ); +} #END rdb_set_dependentsA + +#************************************************************ + +sub rdb_one_depA { + # Helper for finding dependencies. One case, $rule and $file given + # Assume file (and rule) context for DESTINATION file. + local $new_dest = $file; + my ($base_name, $path, $toext) = fileparseA( $new_dest ); + $base_name = $path.$base_name; + $toext =~ s/^\.//; +DEP: + foreach my $dep ( @cus_dep_list ) { + my ($fromext,$proptoext,$must,$func_name) = split(' ',$dep); + if ( $toext eq $proptoext ) { + my $source = "$base_name.$fromext"; + # Found match of rule + if ($diagnostics) { + print "Found cusdep: $source to make $rule:$new_dest ====\n"; + } + if ( -e $source ) { + $$Pfrom_rule = "cusdep $fromext $toext $base_name"; +#?? print "?? Ensuring rule for '$$Pfrom_rule'\n"; + local @PAnew_cmd = ( 'do_cusdep', $func_name ); + if ( !-e $new_dest ) { + push @new_sources, $new_dest; + } + if (! rdb_rule_exists( $$Pfrom_rule ) ) { + rdb_create_rule( $$Pfrom_rule, 'cusdep', '', \@PAnew_cmd, 3, + $source, $new_dest, $base_name, 0 ); + } + else { + rdb_one_rule( + $$Pfrom_rule, + sub{ @$PAint_cmd = @PAnew_cmd; $$Pdest = $new_dest;} + ); + } + return; + } + else { + # Source file does not exist + if ( !$force_mode && ( $must != 0 ) ) { + # But it is required that the source exist ($must !=0) + $failure = 1; + $failure_msg = "File '$base_name.$fromext' does not exist ". + "to build '$base_name.$toext'"; + return; + } + elsif ( $$Pfrom_rule =~ /^cusdep $fromext $toext / ) { + # Source file does not exist, destination has the rule set. + # So turn the from_rule off + $$Pfrom_rule = ''; + } + else { + } + } + } + elsif ( ($toext eq '') && (! -e $file ) ) { + # Empty extension and non-existent destination + # This normally results from \includegraphics{A} + # without graphics extension for file, when file does + # not exist. So we will try to find something to make it. + my $source = "$base_name.$fromext"; + if ( -e $source ) { + $new_dest = "$base_name.$proptoext"; + my $from_rule = "cusdep $fromext $toext $base_name"; + push @new_sources, $new_dest; + print "Ensuring rule for '$from_rule', to make '$new_dest'\n" + if $diagnostics > -1; + local @PAnew_cmd = ( 'do_cusdep', $func_name ); + if (! rdb_rule_exists( $from_rule ) ) { + rdb_create_rule( $from_rule, 'cusdep', '', \@PAnew_cmd, 3, + $source, $new_dest, $base_name, 0); + } + else { + rdb_one_rule( + $$Pfrom_rule, + sub{ @$PAint_cmd = @PAnew_cmd; $$Pdest = $new_dest;} + ); + } + rdb_ensure_file( $rule, $new_dest, $from_rule ); + return; + } + } # End of Rule found + } # End DEP +} #END rdb_one_depA + +#************************************************************ + +sub rdb_list { + # Call: rdb_list() + # List rules and their source files + print "===Rules:\n"; + local $count_rules = 0; + my @accessible_all = rdb_accessible( keys %requested_filerules ); + rdb_for_some( + \@accessible_all, + sub{ $count_rules++; + print "Rule '$rule' depends on:\n"; + }, + sub{ print " '$file'\n"; } + ); + if ($count_rules <= 0) { + print " ---No rules defined\n"; + } +} #END rdb_list + +#************************************************************ + +sub rdb_show { + # Call: rdb_show() + # Displays contents of rule data base. + # Side effect: Exercises access routines! + print "===Rules:\n"; + local $count_rules = 0; + rdb_for_all( + sub{ $count_rules++; + my @int_cmd = @$PAint_cmd; + foreach (@int_cmd) { + if ( !defined($_) ) { $_='undef';} + } + print " [$rule]: '$$Pcmd_type' '$$Pext_cmd' '@int_cmd' $$Ptest_kind ", + "'$$Psource' '$$Pdest' '$$Pbase' $$Pout_of_date $$Pout_of_date_user\n"; }, + sub{ print " '$file': $$Ptime $$Psize $$Pmd5 '$$Pfrom_rule'\n"; } + ); + if ($count_rules <= 0) { + print " ---No rules defined\n"; + } +} #END rdb_show + +#************************************************************ + +sub rdb_accessible { + # Call: rdb_accessible( rule, ...) + # Returns array of rules accessible from the given rules + local @accessible = (); + rdb_recurseA( [@_], sub{ push @accessible, $rule; } ); + return @accessible; +} #END rdb_accessible + +#************************************************************ + +sub rdb_possible_primaries { + # Returns array of possible primaries + my @rules = (); + foreach my $rule ( keys %known_rules ) { + if ( $known_rules{$rule} eq 'primary') { + push @rules, $rule; + } + } + return @rules; +} #END rdb_possible_primaries + +#************************************************************ +#************************************************************ +#************************************************************ + +# Routines for makes. NEW VERSIONS ?? + +#????????Debugging routines: +sub R1 {print "===START $rule\n"} +sub R2 {print "===END $rule\n"} +sub F1 {print " ---START $file\n"} +sub F2 {print " ---END $file\n"} +#************************************************************ + +sub rdb_makeB { + # Call: rdb_makeB( target, ... ) + # Makes the targets and prerequisites. + # Leaves one-time rules to last. + # Does appropriate repeated makes to resolve dependency loops + + # Returns 0 on success, nonzero on failure. + + # General method: Find all accessible rules, then repeatedly make + # them until all accessible rules are up-to-date and the source + # files are unchanged between runs. On termination, all + # accessible rules have stable source files. + # + # One-time rules are view and print rules that should not be + # repeated in an algorithm that repeats rules until the source + # files are stable. It is the calling routine's responsibility to + # arrange to call them, or to use them here with caution. + # + # Note that an update-viewer rule need not be considered + # one-time. It can be legitimately applied everytime the viewed + # file changes. + # + # Note also that the criterion of stability is to be applied to + # source files, not to output files. Repeated application of a + # rule to IDENTICALLY CONSTANT source files may produce different + # output files. This may be for a trivial reason (e.g., the + # output file contains a time stamp, as in the header comments for + # a typical postscript file), or for a non-trivial reason (e.g., a + # stochastic algorithm, as in abcm2ps). + # + # This caused me some actual trouble. In general, circular + # dependencies produce non-termination, and the the following + # situation is an example of a generic situation where certain + # rules must be obeyed in order to obtain proper results: + # 1. A/the latex source file contains specifications for + # certain postprocessing operations. Standard (pdf)latex + # already has this, for indexing and bibliography. + # 2. In the case in point that caused me trouble, the + # specification was for musical tunes that were contained + # in external source files not directly input to + # (pdf)latex. But in the original version, there was a + # style file (abc.sty) that caused latex itself to call + # abcm2ps to make .eps files for each tune that were to be + # read in on the next run of latex. + # 3. Thus the specification can cause a non-terminating loop + # for latexmk, because the output files of abcm2ps changed + # even with identical input. + # 4. The solution was to + # a. Use a style file abc_get.sty that simply wrote the + # specification on the tunes to the .aux file in a + # completely deterministic fashion. + # b. Instead of latex, use a script abclatex.pl that runs + # latex and then extracts the abc contents for each tune + # from the source abc file. This is also + # deterministic. + # c. Use a cusdep rule in latexmk to convert the tune abc + # files to eps. This is non-deterministic, but only + # gets called when the (deterministic) source file + # changes. + # This solves the problem. Latexmk works. Also, it is no + # longer necessary to enable write18 in latex, and multiple + # unnecessary runs of abcm2ps are no longer used. + # + # The order of testing and applying rules is chosen by the + # following heuristics: + # 1. Both latex and pdflatex may be used, but the resulting + # aux files etc may not be completely identical. Define + # latex and pdflatex as primary rules. Apply the general + # method of repeated circulating through all rules until + # the source files are stable for each primary rule + # separately. Naturally the rules are all accessible + # rules, but excluding primary rules except for the current + # primary. + # 2. Assume that the primary rules are relatively + # time-consuming, so that unnecessary passes through them + # to check stability of the source files should be avoided. + # 3. Assume that although circular dependencies exist, the + # rules can nevertheless be thought of as basically + # non-circular, and that many rules are strictly or + # normally non-circular. In particular cusdep rules are + # typically non-circular (e.g., fig2eps), as are normal + # output processing rules like dvi2ps. + # 4. The order for the non-circular approximation is + # determined by applying the assumption that an output file + # from one rule that is read in for an earlier stage is + # unchanged. + # HOWEVER, at a first attempt, the ordering is not needed. It + # only gives an optimization + # 5. (Note that these assumptions could be violated, e.g., if + # $dvips is arranged not only to do the basic dvips + # command, but also to extract information from the ps file + # and feed it back to an input file for (pdf)latex.) + # 6. Nevertheless, the overall algorithm should allow + # circularities. Then the general criterion of stability + # of source files covers the general case, and also + # robustly handles the case that the USER changes source + # files during a run. This is particularly important in + # -pvc mode, given that a full make on a large document can + # be quite lengthy in time, and moreover that a user + # naturally wishes to make corrections in response to + # errors, particularly latex errors, and have them apply + # right away. + # This leads to the following approach: + # 1. Classify accessible rules as: primary, pre-primary + # (typically cusdep, bibtex, makeindex, etc), post-primary + # (typically dvips, etc), and one-time + # 2. Then stratify the rules into an order of application that + # corresponds to the basic feedforward structure, with the + # exclusion of one-time rules. + # 3. Always require that one-time rules are among the + # explicitly requested rules, i.e., the last to be applied, + # were we to apply them. Anything else would not match the + # idea of a one-time rule. + # 4. Then work as follows: + # a. Loop over primaries + # b. For each primary, examine each pre-primary rule and + # apply if needed, then the primary rule and then each + # post-primary rule. The ordering of the pre-primary + # and post-primary rules was found in step 2. + # BUT applying the ordering is not essential + # c. Any time that a pre-primary or primary rule is + # applied, loop back to the beginning of step b. This + # ensures that bibtex etc are applied before rerunning + # (pdf)latex, and also covers changing source files, and + # gives priority to quick pre-primary rules for changing + # source files against slow reruns of latex. + # d. Then apply post-primary rules in order, but not + # looping back after each rule. This non-looping back + # is because the rules are normally feed-forward only. + # BUT applying the ordering is not essential + # e. But after completing post-primary rules do loop back + # to b if any rules were applied. This covers exotic + # circular dependence (and as a byproduct, changing + # source files). + # f. On each case of looping back to b, re-evaluate the + # dependence setup to allow for the effect of changing + # source files. + # + + local @requested_targets = @_; + local %current_primaries = (); # Hash whose keys are primary rules + # needed, i.e., known latex-like rules which trigger + # circular dependencies + local @pre_primary = (); # Array of rules + local @post_primary = (); # Array of rules + local @one_time = (); # Array of rules + + + # For diagnostics on changed files, etc: + local @changed = (); + local @disappeared = (); + local @no_dest = (); # Non-existent destination files + local @rules_to_apply = (); + + &rdb_classify_rules( \%possible_primaries, @requested_targets ); + + local %pass = (); + local $failure = 0; # General accumulated error flag + local $runs = 0; + local $too_many_runs = 0; + local %rules_applied = (); + my $retry_msg = 0; # Did I earlier say I was going to attempt + # another pass after a failure? + PRIMARY: + foreach my $primary (keys %current_primaries ) { + foreach my $rule (keys %rule_db) { + $pass{$rule} = 0; + } + PASS: + while (1==1) { + $runs = 0; + my $previous_failure = $failure; + $failure = 0; + local $newrule_nofile = 0; # Flags whether rule created for + # making currently non-existent file, which + # could become a needed source file for a run + # and therefore undo an error condition + if ($diagnostics) { + print "MakeB: doing pre_primary and primary...\n"; + } + rdb_for_some( [@pre_primary, $primary], \&rdb_makeB1 ); + if ( ($runs > 0) && ! $too_many_runs ) { + $retry_msg = 0; + if ( $failure && $newrule_nofile ) { + $retry_msg = 1; + print "$My_name: Error on run, but found possibility to ", + "make new source files\n"; + next PASS; + } + elsif ( ! $failure ) { + next PASS; + } + } + elsif ($runs == 0) { + # $failure not set on this pass, so use value from previous pass: + $failure = $previous_failure; + if ($retry_msg) { + print "But in fact no new files made\n"; + } + } + if ($failure && !$force_mode ) { last PASS; } + if ($diagnostics) { + print "MakeB: doing post_primary...\n"; + } + rdb_for_some( [@post_primary], \&rdb_makeB1 ); + if ($failure) { last PASS; } + if ($runs > 0) { next PASS; } + # Get here if nothing was run. + last PASS; + } + continue { + # Re-evaluate rule classification and accessibility, + # but do not change primaries. + &rdb_classify_rules( \%current_primaries, @requested_targets ); + &rdb_make_links; + } + } + rdb_for_some( [@one_time], \&rdb_makeB1 ); + rdb_write( $fdb_file ); + + if (! $silent) { + # Diagnose of the runs + if ( $#{keys %rules_applied } > -1 ) { + print "$My_name: $runs runs. Rules applied:\n"; + foreach (sort keys %rules_applied) { + print " '$_'\n"; + } + } + elsif ($failure && $force_mode) { + print "$My_name: Errors, in force_mode: so I tried finishing targets\n"; + } + elsif ($failure) { + print "$My_name: Errors, so I did not complete making targets\n"; + } + else { + local @dests = (); + rdb_for_some( [@_], sub{ push @dests, $$Pdest if ($$Pdest); } ); + print "$My_name: All targets (@dests) are up-to-date\n"; + } + } + return $failure; +} #END rdb_makeB + +#------------------- + +sub rdb_makeB1 { + # Call: rdb_makeB1 + # Helper routine for rdb_makeB. + # Carries out make at level of given rule (all data available). + # Assumes contexts for recursion, make, and rule, and + # assumes that source files for the rule are to be considered + # up-to-date. + if ($diagnostics) { print " MakeB1 $rule\n"; } + if ($failure & ! $force_mode) {return;} + &rdb_clear_change_record; + &rdb_flag_changes_here; +# if ($diagnostics>-1) { print " MakeB1.1 $rule $$Pout_of_date\n"; } + + my $return = 0; # Return code from called routine +#?? print "makeB1: Trying '$rule' for '$$Pdest': "; + if (!$$Pout_of_date) { +#?? if ( ($$Pcmd_type eq 'primary') && (! $silent) ) { +# print "Rule '$rule' up to date\n"; +# } + return; + } + if ($diagnostics) { print " remake\n"; } + if (!$silent) { + print "$My_name: applying rule '$rule'...\n"; + &rdb_diagnose_changes( "Rule $rule: "); + } +##????????????????????????????????????: variable rules_applied not used + $rules_applied{$rule} = 1; + $runs++; +#?? print "$rule: $$Pcmd_type\n"; + + # We are applying the rule, so its source file state for when it + # was last made is as of now: + # ??IS IT CORRECT TO DO NOTHING IN CURRENT VERSION? + + # The actual run + $return = 0; + # Rule may have been created since last run: + if ( ! defined $pass{$rule} ) {$pass{$rule} = 0; } + if ( $pass{$rule} ge $max_repeat ) { + # Avoid infinite loop by having a maximum repeat count + # Getting here represents some kind of weird error. + warn "$My_name: Maximum runs of $rule reached ", + "without getting stable files\n"; + $too_many_runs = 1; + $failure = 1; + $failure_msg = "'$rule' needed too many passes"; + return; + } + $pass{$rule}++; + warn_running( "Run number $pass{$rule} of rule '$rule'" ); + if ($$Pcmd_type eq 'primary' ) { + $return = &rdb_primary_run; + } + else { $return = &rdb_run1; } + if ($$Pchanged) { + $newrule_nofile = 1; + $return = 0; + } + elsif ( $$Pdest && ( !-e $$Pdest ) && (! $failure) ){ + # If there is a destination to make, but for some reason + # it did not get made, then make sure a failure gets reported. + # But if the failure has already been reported, there's no need + # to report here, since that would give a generic error + # message instead of a specific one. + +## ??? 1 Sep. 2008, for cusdep no-file-exists issue + if ( ( $$Pcmd_type eq 'cusdep') && $$Psource && (! -e $$Psource) ) { + # However, if the rule is a custom dependency, this is not by + # itself an error, if also the source file does not exist. In + # that case, we may have the situation that (1) the dest file is no + # longer needed by the tex file, and (2) therefore the user + # has deleted the source and dest files. After the next + # latex run and the consequent analysis of the log file, the + # cusdep rule will no longer be needed, and will be removed. + + # So in this case, do NOT report an error + $$Pout_of_date = 0; + } + else { + $failure = 1; + $failure_msg = "'$rule' did not make '$$Pdest'"; + } + } + if ($return != 0) {$failure = 1;} +} #END rdb_makeB1 + +#************************************************************ + +sub rdb_submakeB { + # Call: rdb_submakeB + # Makes all the source files for a given rule. + # Assumes contexts for recursion, for make, and rule. + %visited = %visited_at_rule_start; + local $failure = 0; # Error flag + my @v = keys %visited; +#?? print "---submakeB $rule. @v \n"; + rdb_do_files( sub{ rdb_recurse_rule( $$Pfrom_rule, 0,0,0, \&rdb_makeB1 ) } ); + return $failure; +} #END rdb_submakeB + +#************************************************************ + + +sub rdb_classify_rules { + # Usage: rdb_classify_rules( \%allowed_primaries, requested targets ) + # Assume the following variables are available (global or local): + # Input: + # @requested_targets # Set to target rules + + # Output: + # %current_primaries # Keys are actual primaries + # @pre_primary # Array of rules + # @post_primary # Array of rules + # @one_time # Array of rules + # @pre_primary and @post_primary are in natural order of application. + + local $P_allowed_primaries = shift; + local @requested_targets = @_; + local $state = 0; # Post-primary + local @classify_stack = (); + + %current_primaries = (); + @pre_primary = (); + @post_primary = (); + @one_time = (); + + rdb_recurseA( \@requested_targets, \&rdb_classify1, 0,0, \&rdb_classify2 ); + + # Reverse, as tendency is to find last rules first. + @pre_primary = reverse @pre_primary; + @post_primary = reverse @post_primary; + + if ($diagnostics) { + print "Rule classification: \n"; + if ($#requested_targets < 0) { + print " No requested rules\n"; + } + else { + print " Requested rules:\n"; + foreach ( @requested_targets ) { print " $_\n"; } + } + if ($#pre_primary < 0) { + print " No pre-primaries\n"; + } + else { + print " Pre-primaries:\n"; + foreach (@pre_primary) { print " $_\n"; } + } + print " Primaries:\n"; + foreach (keys %current_primaries) { print " $_\n"; } + if ($#post_primary < 0) { + print " No post-primaries\n"; + } + else { + print " Post-primaries:\n"; + foreach (@post_primary) { print " $_\n"; } + } + if ($#one_time < 0) { + print " No one_time rules\n"; + } + else { + print " One_time rules:\n"; + foreach ( @one_time ) { print " $_\n"; } + } + } #end diagnostics +} #END rdb_classify_rules + +#------------------- + +sub rdb_classify1 { + # Helper routine for rdb_classify_rules + # Applied as rule_act1 in recursion over rules + # Assumes rule context, and local variables from rdb_classify_rules +# print "=========== '$rule' $depth ========== \n"; + push @classify_stack, [$state]; + if ( exists $possible_one_time{$rule} ) { + # Normally, we will have already extracted the one_time rules, + # and they will never be accessed here. But just in case of + # problems or generalizations, we will cover all possibilities: + if ($depth > 1) { + warn "ONE TIME rule not at outer level '$rule'\n"; + } + push @one_time, $rule; + } + elsif ($state == 0) { + if ( exists ${$P_allowed_primaries}{$rule} ) { + $state = 1; # In primary rule + $current_primaries{ $rule } = 1; + } + else { + push @post_primary, $rule; + } + } + else { + $state = 2; # in post-primary rule + push @pre_primary, $rule; + } +} #END rdb_classify1 + +#------------------- + +sub rdb_classify2 { + # Helper routine for rdb_classify_rules + # Applied as rule_act2 in recursion over rules + # Assumes rule context + ($state) = @{ pop @classify_stack }; +} #END rdb_classify2 + +#************************************************************ + + +sub rdb_run1 { + # Assumes contexts for: rule. + # Unconditionally apply the rule + # Returns return code from applying the rule. + # Otherwise: 0 on other kind of success, -1 on error. + + # Source file data, by definition, correspond to the file state just before + # the latest run, and the run_time to the time just before the run: + &rdb_update_filesA; + $$Prun_time = time; + $$Pchanged = 0; # No special changes in files + + # Return values for external command: + my $return = 0; + + # Find any internal command + my @int_args = @$PAint_cmd; + my $int_cmd = shift @int_args; + my @int_args_for_printing = @int_args; + foreach (@int_args_for_printing) { + if ( ! defined $_ ) { $_ = 'undef'; } + } + if ($int_cmd) { + print "For rule '$rule', running '\&$int_cmd( @int_args_for_printing )' ...\n"; + $return = &$int_cmd( @int_args ); + } + elsif ($$Pext_cmd) { + $return = &rdb_ext_cmd; + } + else { + warn "$My_name: Either a bug OR a configuration error:\n", + " Need to implement the command for '$rule'\n"; + &traceback(); + $return = -1; + } + if ( $rule =~ /^bibtex/ ) { + my $retcode = &check_bibtex_log($$Pbase); + if ($retcode == 3) { + push @warnings, + "Could not open bibtex log file for '$$Pbase'"; + } + elsif ($retcode == 2) { + push @warnings, "Bibtex errors for '$$Pbase'"; + } + elsif ($retcode == 1) { + push @warnings, "Bibtex warnings for '$$Pbase'"; + } + } + + $updated = 1; + if ($$Ptest_kind == 3) { + # We are time-criterion first time only. Now switch to + # file-change criterion + $$Ptest_kind = 1; + } + $$Pout_of_date = $$Pout_of_date_user = 0; + return $return; +} # END rdb_run1 + +#----------------- + +sub rdb_ext_cmd { + # Call: rdb_ext_cmd + # Assumes rule context. Runs external command with substitutions. + # Uses defaults for the substitutions. See rdb_ext_cmd1. + return rdb_ext_cmd1(); +} #END rdb_ext_cmd + +#----------------- + +sub rdb_ext_cmd1 { + # Call: rdb_ext_cmd1( options, source, dest, base ) or rdb_ext_cmd1() or ... + # Assumes rule context. Returns command with substitutions. + # Null arguments or unprovided arguments => use defaults. + # for %S=source, %D=dest, %B=base, %R=root=base for latex, %O='', %T=texfile + my ($options, $source, $dest, $base ) = @_; + # Apply defaults + $options ||= ''; + $source ||= $$Psource; + $dest ||= $$Pdest; + $base ||= $$Pbase; + + my $ext_cmd = $$Pext_cmd; + + #Set character to surround filenames: + my $q = $quote_filenames ? '"' : ''; + foreach ($ext_cmd) { + s/%O/$options/g; + s/%R/$q$root_filename$q/g; + s/%B/$q$base$q/g; + s/%T/$q$texfile_name$q/g; + s/%S/$q$source$q/g; + s/%D/$q$dest$q/g; + } + # print "quote is '$q'; ext_cmd = '$ext_cmd'\n"; + my ($pid, $return) = &Run_msg($ext_cmd); + return $return; +} #END rdb_ext_cmd1 + +#----------------- + +sub rdb_primary_run { +#?? See multipass_run in previous version Aug 2007 for issues + # Call: rdb_primary_run + # Assumes contexts for: recursion, make, & rule. + # Assumes (a) the rule is a primary, + # (b) a run has to be made, + # (c) source files have been made. + # This routine carries out the run of the rule unconditionally, + # and then parses log file etc. + my $return = 0; + + my $return_latex = &rdb_run1; + + ######### Analyze results of run: + if ( ! -e "$root_filename.log" ) { + $failure = 1; + $failure_msg = "(Pdf)LaTeX failed to generate a log file"; + return -1; + } + ####### NOT ANY MORE! Capture any changes in source file status before we + # check for errors in the latex run + + # Find current set of source files: + &rdb_set_from_logB; + + # For each file of the kind made by epstopdf.sty during a run, + # if the file has changed during a run, then the new version of + # the file will have been read during the run. Unlike the usual + # case, we will need to redo the primary run because of the + # change of this file during the run. Therefore set the file as + # up-to-date: + rdb_do_files( sub { if ($$Pcorrect_after_primary) {&rdb_update1;} } ); + + # There may be new source files, and the run may have caused + # circular-dependency files to be changed. And the regular + # source files may have been updated during a lengthy run of + # latex. So redo the makes for sources of the current rule: + my $submake_return = &rdb_submakeB; + &rdb_clear_change_record; + &rdb_flag_changes_here; + $updated = 1; # Flag that some dependent file has been remade + # Fix the state of the files as of now: this will solve the + # problem of latex and pdflatex interfering with each other, + # at the expense of some non-optimality + #?? Check this is correct: + &rdb_update_filesA; + if ( $diagnostics ) { + print "$My_name: Rules after run: \n"; + rdb_show(); + } + + $return = $return_latex; + if ($return_latex && $$Pout_of_date_user) { + print "Error in (pdf)LaTeX, but change of user file(s), ", + "so ignore error & provoke rerun\n" + if (! $silent); + $return = 0; + } + + # Summarize issues that may have escaped notice: + my @warnings = (); + if ($bad_reference) { + push @warnings, "Latex could not resolve all references"; + } + if ($bad_citation) { + push @warnings, "Latex could not resolve all citations"; + } + if ($#warnings > 0) { + show_array( "$My_name: Summary of warnings:", @warnings ); + } + return $return; +} #END rdb_primary_run + +#************************************************************ + +sub rdb_clear_change_record { + @changed = (); + @disappeared = (); + @no_dest = (); + @rules_to_apply = (); +#??????????????? $failure = 0; +##????????????????????????????????????: variable rules_applied not used + $rules_applied = 0; +} #END rdb_clear_change_record + +#************************************************************ + +sub rdb_flag_changes_here { + # Flag changes in current rule. + # Assumes rule context. + local $dest_mtime = 0; + $dest_mtime = get_mtime($$Pdest) if ($$Pdest); + rdb_do_files( \&rdb_file_change1); + if ( $$Pdest && (! -e $$Pdest) ) { +## ??? 1 Sep. 2008, for cusdep no-file-exists issue + if ( ( $$Pcmd_type eq 'cusdep') && $$Psource && (! -e $$Psource) ) { + # However, if the rule is a custom dependency, this is not by + # itself an error, if also the source file does not exist. In + # that case, we may have the situation that (1) the dest file is no + # longer needed by the tex file, and (2) therefore the user + # has deleted the source and dest files. After the next + # latex run and the consequent analysis of the log file, the + # cusdep rule will no longer be needed, and will be removed. + + # So in this case, do NOT report an error + } + else { + $$Pout_of_date = 1; + push @no_dest, $$Pdest; + } + } + if ($$Pout_of_date) { + push @rules_to_apply, $rule; + } +#?? print "======== flag: $rule $$Pout_of_date ==========\n"; +} #END rdb_flag_changes_here + +#************************************************************ + +sub rdb_file_change1 { + # Call: &rdb_file_change1 + # Assumes rule and file context. Assumes $dest_mtime set. + # Flag whether $file in $rule has changed or disappeared. + # Set rule's make flag if there's a change. + my ($new_time, $new_size, $new_md5) = fdb_get($file); +#?? print "FC1 '$rule':$file $$Pout_of_date TK=$$Ptest_kind\n"; +#?? print " OLD $$Ptime, $$Psize, $$Pmd5\n", +#?? " New $new_time, $new_size, $new_md5\n"; + my $ext = ext( $file ); + if ( ($new_size < 0) && ($$Psize >= 0) ) { + print "Disappeared '$file' in '$rule'\n"; + push @disappeared, $file; + # No reaction is good. + #$$Pout_of_date = 1; + # ??? 1 Sep. 2008: I do NOT think so, for cusdep no-file-exists issue + $$Pout_of_date = 1; + return; + } + if ( ($new_size < 0) && ($$Psize < 0) ) { + return; + } + if ( ($new_size != $$Psize) || ($new_md5 ne $$Pmd5) ) { +#?? print "FC1: changed $file: ($new_size != $$Psize) $new_md5 ne $$Pmd5)\n"; + push @changed, $file; + $$Pout_of_date = 1; + if ( ! exists $generated_exts_all{$ext} ) { + $$Pout_of_date_user = 1; + } + } + if ( ( ($$Ptest_kind == 2) || ($$Ptest_kind == 3) ) + && (! exists $generated_exts_all{$ext} ) + && ( $new_time > $dest_mtime ) + ) { +#?? print "FC1: changed $file: ($new_time > $dest_mtime)\n"; + push @changed, $file; + $$Pout_of_date = $$Pout_of_date_user = 1; + } +} #END rdb_file_change1 + +#************************************************************ + +sub rdb_count_changes { + return $#changed + $#disappeared + $#no_dest + $#rules_to_apply + 4; +} #END rdb_count_changes + +#************************************************************ + +sub rdb_diagnose_changes { + # Call: rdb_diagnose_changes or rdb_diagnose_changes( heading ) + # List changes on STDERR + # Precede the message by the optional heading, else by "$My_name: " + my $heading = defined($_[0]) ? $_[0] : "$My_name: "; + + if ( &rdb_count_changes == 0 ) { + warn "${heading}No changes\n"; + return; + } + warn "${heading}Changes:\n"; + if ( $#changed >= 0 ) { + warn " Changed files, or newly in use since previous run(s):\n"; + foreach (uniqs(@changed)) { warn " '$_'\n"; } + } + if ( $#disappeared >= 0 ) { + warn " No-longer-existing files:\n"; + foreach (uniqs(@disappeared)) { warn " '$_'\n"; } + } + if ( $#no_dest >= 0 ) { + warn " Non-existent destination files:\n"; + foreach (uniqs(@no_dest)) { warn " '$_'\n"; } + } + if ( $#rules_to_apply >= 0 ) { + warn " Rules to apply:\n"; + foreach (uniqs(@rules_to_apply)) { warn " '$_'\n"; } + } +} #END rdb_diagnose_changes + + +#************************************************************ +#************************************************************ +#************************************************************ +#************************************************************ + +#************************************************************ +#************************************************************ +#************************************************************ +#************************************************************ + +# Routines for convenient looping and recursion through rule database +# ================= NEW VERSION ================ + +# There are several places where we need to loop through or recurse +# through rules and files. This tends to involve repeated, tedious +# and error-prone coding of much book-keeping detail. In particular, +# working on files and rules needs access to the variables involved, +# which either involves direct access to the elements of the database, +# and consequent fragility against changes and upgrades in the +# database structure, or involves lots of routines for reading and +# writing data in the database, then with lots of repetitious +# house-keeping code. +# +# The routines below provide a solution. Looping and recursion +# through the database are provided by a set of basic routines where +# each necessary kind of looping and iteration is coded once. The +# actual actions are provided as references to action subroutines. +# (These can be either actual references, as in \&routine, or +# anonymous subroutines, as in sub{...}, or aas a zero value 0 or an +# omitted argument, to indicate that no action is to be performed.) +# +# When the action subroutine(s) are actually called, a context for the +# rule and/or file (as appropriate) is given by setting named +## NEW ?? +# variables to REFERENCES to the relevant data values. These can be +# used to retrieve and set the data values. As a convention, +# references to scalars are given by variables named start with "$P", +# as in "$Pdest", while references to arrays start with "$PA", as in +# "$PAint_cmd", and references to hashes with "$PH", as in "$PHsource". +# After the action subroutine has finished, checks for data +# consistency may be made. +## ??? OLD +# variables to the relevant data values. After the action subroutine +# has finished, the database is updated with the values of these named +# variables, with any necessary consistency checks. Thus the action +# subroutines can act on sensibly named variables without needed to +# know the database structure. +# +# The only routines that actually use the database structure and need +# to be changed if that is changed are: (a) the routines rdb_one_rule +# and rdb_one_file that implement the calling of the action subroutines, +# (b) routines for creation of single rules and file items, and (c) to +# a lesser extent, the routine for destroying a file item. +# +# Note that no routine is provided for destroying a rule. During a +# run, a rule, with its source files, may become inaccessible or +# unused. This happens dynamically, depending on the dependencies +# caused by changes in the source file or by error conditions that +# cause the computation of dependencies, particular of latex files, to +# become wrong. In that situation the files certainly come and go in +# the database, but subsidiary rules, with their content information +# on their source files, need to be retained so that their use can be +# reinstated later depending on dynamic changes in other files. +# +# However, there is a potential memory leak unless some pruning is +# done in what is written to the fdb file. (Probably only accessible +# rules and those for which source files exist. Other cases have no +# relevant information that needs to be preserved between runs.) + +# +# + + +#************************************************************ + +# First the top level routines for recursion and iteration + +#************************************************************ + +sub rdb_recurseA { + # Call: rdb_recurseA( rule | [ rules], + # \&rule_act1, \&file_act1, \&file_act2, + # \&rule_act2 ) + # The actions are pointers to subroutines, and may be null (0, or + # undefined) to indicate no action to be applied. + # Recursively acts on the given rules and all ancestors: + # foreach rule found: + # apply rule_act1 + # loop through its files: + # apply file_act1 + # act on its ancestor rule, if any + # apply file_act2 + # apply rule_act2 + # Guards against loops. + # Access to the rule and file data by local variables, only + # for getting and setting. + + # This routine sets a context for anything recursive, with @heads, + # %visited and $depth being set as local variables. + local @heads = (); + my $rules = shift; + + # Distinguish between single rule (a string) and a reference to an + # array of rules: + if ( ref $rules eq 'ARRAY' ) { @heads = @$rules; } + else { @heads = ( $rules ); } + + # Keep a list of visited rules, used to block loops in recursion: + local %visited = (); + local $depth = 0; + + foreach $rule ( @heads ) { rdb_recurse_rule( $rule, @_ ); } + +} #END rdb_recurseA + +#************************************************************ + +sub rdb_for_all { + # Call: rdb_for_all( \&rule_act1, \&file_act, \&rule_act2 ) + # Loops through all rules and their source files, using the + # specified set of actions, which are pointers to subroutines. + # Sorts rules alphabetically. + # See rdb_for_some for details. + rdb_for_some( [ sort keys %rule_db ], @_); +} #END rdb_for_all + +#************************************************************ + +sub rdb_for_some { + # Call: rdb_for_some( rule | [ rules], + # \&rule_act1, \&file_act, \&rule_act2) + # Actions can be zero, and rules at tail of argument list can be + # omitted. E.g. rdb_for_some( rule, 0, \&file_act ). + # Anonymous subroutines can be used, e.g., rdb_for_some( rule, sub{...} ). + # + # Loops through rules and their source files, using the + # specified set of rules: + # foreach rule: + # apply rule_act1 + # loop through its files: + # apply file_act + # apply rule_act2 + # + # Rule data and file data are made available in local variables + # for access by the subroutines. + + local @heads = (); + my $rules = shift; + # Distinguish between single rule (a string) and a reference to an + # array of rules: + if ( ref $rules eq 'ARRAY' ) { @heads = @$rules; } + else { @heads = ( $rules ); } + + foreach $rule ( @heads ) { + # $rule is implicitly local + &rdb_one_rule( $rule, @_ ); + } +} #END rdb_for_some + +#************************************************************ + +sub rdb_for_one_file { + my $rule = shift; + # Avoid name collisions with general recursion and iteraction routines: + local $file1 = shift; + local $action1 = shift; + rdb_for_some( $rule, sub{rdb_one_file($file1,$action1)} ); +} #END rdb_for_one_file + + +#************************************************************ + +# Routines for inner part of recursion and iterations + +#************************************************************ + +sub rdb_recurse_rule { + # Call: rdb_recurse_rule($rule, \&rule_act1, \&file_act1, \&file_act2, + # \&rule_act2 ) + # to do the work for one rule, recurisvely called from_rules for + # the sources of the rules. + # Assumes recursion context, i.e. that %visited, @heads, $depth. + # We are overriding actions: + my ($rule, $rule_act1, $new_file_act1, $new_file_act2, $rule_act2) + = @_; + # and must propagate the file actions: + local $file_act1 = $new_file_act1; + local $file_act2 = $new_file_act2; + # Prevent loops: + if ( (! $rule) || exists $visited{$rule} ) { return; } + $visited{$rule} = 1; + # Recursion depth + $depth++; + # We may need to repeat actions on dependent rules, without being + # blocked by the test on visited files. So save %visited: + local %visited_at_rule_start = %visited; + # At end, the last value set for %visited wins. + rdb_one_rule( $rule, $rule_act1, \&rdb_recurse_file, $rule_act2 ); + $depth--; + } #END rdb_recurse_rule + +#************************************************************ + +sub rdb_recurse_file { + # Call: rdb_recurse_file to do the work for one file. + # This has no arguments, since it is used as an action subroutine, + # passed as a reference in calls in higher-level subroutine. + # Assumes contexts set for: Recursion, rule, and file + &$file_act1 if $file_act1; + rdb_recurse_rule( $$Pfrom_rule, $rule_act1, $file_act1, $file_act2, + $rule_act2 ) + if $$Pfrom_rule; + &$file_act2 if $file_act2; +} #END rdb_recurse_file + +#************************************************************ + +sub rdb_do_files { + # Assumes rule context, including $PHsource. + # Applies an action to all the source files of the rule. + local $file_act = shift; + my @file_list = sort keys %$PHsource; + foreach my $file ( @file_list ){ + rdb_one_file( $file, $file_act ); + } +} #END rdb_do_files + +#************************************************************ + +# Routines for action on one rule and one file. These are the main +# places (in addition to creation and destruction routines for rules +# and files) where the database structure is accessed. + +#************************************************************ + +sub rdb_one_rule { + # Call: rdb_one_rule( $rule, $rule_act1, $file_act, $rule_act2 ) + # Sets context for rule and carries out the actions. +#===== Accesses rule part of database structure ======= + + local ( $rule, $rule_act1, $file_act, $rule_act2 ) = @_; +#?? &R1; + if ( (! $rule) || ! rdb_rule_exists($rule) ) { return; } + + local ( $PArule_data, $PHsource ) = @{$rule_db{$rule}}; + local ($Pcmd_type, $Pext_cmd, $PAint_cmd, $Ptest_kind, + $Psource, $Pdest, $Pbase, + $Pout_of_date, $Pout_of_date_user, $Prun_time, $Pchanged ) + = Parray( $PArule_data ); + # Correct array ref: + $PAint_cmd = $$PArule_data[2]; + + &$rule_act1 if $rule_act1; + &rdb_do_files( $file_act ) if $file_act; + &$rule_act2 if $rule_act2; + +#?? &R2; +} #END rdb_one_rule + +#************************************************************ + +sub rdb_one_file { + # Call: rdb_one_file($file, $file_act) + # Sets context for file and carries out the action. + # Assumes $rule context set. +#===== Accesses file part of database structure ======= + local ($file, $file_act) = @_; +#?? &F1; + if ( (!$file) ||(!exists ${$PHsource}{$file}) ) { return; } + local $PAfile_data = ${$PHsource}{$file}; + local ($Ptime, $Psize, $Pmd5, $Pfrom_rule, $Pcorrect_after_primary ) + = Parray( $PAfile_data ); + &$file_act if $file_act; + if ( ! rdb_rule_exists( $$Pfrom_rule ) ) { + $$Pfrom_rule = ''; + } +#?? &F2; +} #END rdb_one_file + +#************************************************************ + +# Routines for creation of rules and file items, and for removing file +# items. + +#************************************************************ + +sub rdb_create_rule { + # rdb_create_rule( rule, command_type, ext_cmd, int_cmd, test_kind, + # source, dest, base, + # needs_making, run_time ) + # int_cmd is either a string naming a perl subroutine or it is a + # reference to an array containing the subroutine name and its + # arguments. + # Makes rule. Error if it already exists. + # Omitted arguments: replaced by 0 or '' as needed. +# ==== Sets rule data ==== + my ( $rule, $cmd_type, $int_cmd, $PAext_cmd, $test_kind, + $source, $dest, $base, + $needs_making, $run_time ) = @_; + my $changed = 0; + # Set defaults, and normalize parameters: + foreach ( $cmd_type, $int_cmd, $PAext_cmd, $source, $dest, $base ) { + if (! defined $_) { $_ = ''; } + } + foreach ( $needs_making, $run_time, $test_kind ) { + if (! defined $_) { $_ = 0; } + } + if (!defined $test_kind) { + # Default to test on file change + $test_kind = 1; + } + if ( ref( $PAext_cmd ) eq '' ) { + # It is a single command. Convert to array reference: + $PAext_cmd = [ $PAext_cmd ]; + } + else { + # COPY the referenced array: + $PAext_cmd = [ @$PAext_cmd ]; + } + + $rule_db{$rule} = + [ [$cmd_type, $int_cmd, $PAext_cmd, $test_kind, + $source, $dest, $base, $needs_making, 0, $run_time, + $changed ], + {} + ]; + if ($source) { rdb_ensure_file( $rule, $source ); } +} #END rdb_create_rule + +#************************************************************ + +sub rdb_ensure_file { + # rdb_ensure_file( rule, file[, fromrule] ) + # Ensures the source file item exists in the given rule. + # Initialize to current file state if the item is created. + # Then if the fromrule is specified, set it for the file item. +#============ rule and file data set here ====================================== + my $rule = shift; + local ( $new_file, $new_from_rule ) = @_; + if ( ! rdb_rule_exists( $rule ) ) { + die_trace( "$My_name: BUG in rdb_ensure_file: non-existent rule '$rule'" ); + } + if ( ! defined $new_file ) { + die_trace( "$My_name: BUG in rdb_ensure_file: undefined file for '$rule'" ); + } + rdb_one_rule( $rule, + sub{ + if (! exists ${$PHsource}{$new_file} ) { + ${$PHsource}{$new_file} = [fdb_get($new_file), '', 0]; + } + } + ); + if (defined $new_from_rule ) { + rdb_for_one_file( $rule, $new_file, sub{ $$Pfrom_rule = $new_from_rule; }); + } +} #END rdb_ensure_file + +#************************************************************ + +sub rdb_remove_files { + # rdb_remove_file( rule, file,... ) + # Removes file(s) for the rule. + my $rule = shift; + if (!$rule) { return; } + local @files = @_; + rdb_one_rule( $rule, + sub{ foreach (@files) { delete ${$PHsource}{$_}; } } + ); +} #END rdb_remove_files + +#************************************************************ + +sub rdb_rule_exists { + # Call rdb_rule_exists($rule): Returns whether rule exists. + my $rule = shift; + if (! $rule ) { return 0; } + return exists $rule_db{$rule}; +} #END rdb_rule_exists + +#************************************************************ + +sub rdb_file_exists { + # Call rdb_file_exists($rule, $file): + # Returns whether source file item in rule exists. + local ( $rule, $file ) = @_; + local $exists = 0; + rdb_one_rule( $rule, + sub{ $exists = exists( ${$PHsource}{$file} ) ? 1:0; } + ); + return $exists; +} #END rdb_file_exists + +#************************************************************ + +sub rdb_update_gen_files { + # Call: fdb_updateA + # Assumes rule context. Update source files of rule to current state. + rdb_do_files( + sub{ + if ( exists $generated_exts_all{ ext($file) } ) {&rdb_update1;} + } + ); +} #END rdb_update_gen_files + +#************************************************************ + +sub rdb_update_filesA { + # Call: fdb_updateA + # Assumes rule context. Update source files of rule to current state. + rdb_do_files( \&rdb_update1 ); +} + +#************************************************************ + +sub rdb_update1 { + # Call: fdb_update1. + # Assumes file context. Updates file data to correspond to + # current file state on disk + ($$Ptime, $$Psize, $$Pmd5) = fdb_get($file); +} + +#************************************************************ + +sub rdb_set_file1 { + # Call: fdb_file1(rule, file, new_time, new_size, new_md5) + # Sets file time, size and md5. + my $rule = shift; + my $file = shift; + local @new_file_data = @_; + rdb_for_one_file( $rule, $file, sub{ ($$Ptime,$$Psize,$$Pmd5)=@new_file_data; } ); +} + +#************************************************************ + +sub rdb_dummy_file { + # Returns file data for non-existent file +# ==== Uses rule_db structure ==== + return (0, -1, 0, ''); +} + +#************************************************************ +#************************************************************ + +# Predefined subroutines for custom dependency + +sub cus_dep_delete_dest { + # This subroutine is used for situations like epstopdf.sty, when + # the destination (target) of the custom dependency invoking + # this subroutine will be made by the primary run provided the + # file (destination of the custom dependency, source of the + # primary run) doesn't exist. + # It is assumed that the resulting file will be read by the + # primary run. + + # Remove the destination file, to indicate it needs to be remade: + unlink $$Pdest; + # Arrange that the non-existent destination file is not treated as + # an error. The variable changed here is a bit misnamed. + $$Pchanged = 1; + # Ensure a primary run is done + &cus_dep_require_primary_run; + # Return success: + return 0; +} + +#************************************************************ + +sub cus_dep_require_primary_run { + # This subroutine is used for situations like epstopdf.sty, when + # the destination (target) of the custom dependency invoking + # this subroutine will be made by the primary run provided the + # file (destination of the custom dependency, source of the + # primary run) doesn't exist. + # It is assumed that the resulting file will be read by the + # primary run. + + local $cus_dep_target = $$Pdest; + # Loop over all rules and source files: + rdb_for_all( 0, + sub { if ($file eq $cus_dep_target) { + $$Pout_of_date = 1; + $$Pcorrect_after_primary = 1; + } + } + ); + # Return success: + return 0; +} + + +#************************************************************ +#************************************************************ +#************************************************************ +# +# UTILITIES: +# + +#************************************************************ +# Miscellaneous + +sub show_array { +# For use in diagnostics and debugging. +# On stderr, print line with $_[0] = label. +# Then print rest of @_, one item per line preceeded by some space + warn "$_[0]\n"; + shift; + foreach (@_){ warn " $_\n";} +} + +#************************************************************ + +sub Parray { + # Call: Parray( \@A ) + # Returns array of references to the elements of @A + my $PA = shift; + my @P = (undef) x (1+$#$PA); + foreach my $i (0..$#$PA) { $P[$i] = \$$PA[$i]; } + return @P; +} + +#************************************************************ + +sub glob_list { + # Glob a collection of filenames. Sort and eliminate duplicates + # Usage: e.g., @globbed = glob_list(string, ...); + my @globbed = (); + foreach (@_) { + push @globbed, glob; + } + return uniqs( @globbed ); +} + +#================================================== + +sub glob_list1 { + # Glob a collection of filenames. + # But no sorting or elimination of duplicates + # Usage: e.g., @globbed = glob_list1(string, ...); + # Since perl's glob appears to use space as separator, I'll do a special check + # for existence of non-globbed file (assumed to be tex like) + + my @globbed = (); + foreach my $file_spec (@_) { + # Problem, when the PATTERN contains spaces, the space(s) are + # treated as pattern separaters (in MSWin at least). + # MSWin: I can quote the pattern (is that MSWin native, or also + # cygwin?) + # Linux: Quotes in a pattern are treated as part of the filename! + # So quoting a pattern is definitively wrong. + # The following hack solves this partly, for the cases that there is no wildcarding + # and the specified file exists possibly space-containing, and that there is wildcarding, + # but spaces are prohibited. + if ( -e $file_spec || -e "$file_spec.tex" ) { + # Non-globbed file exists, return the file_spec. + # Return $file_spec only because this is not a file-finding subroutine, but + # only a globber + push @globbed, $file_spec; + } + else { + # This glob fails to work as desired, if the pattern contains spaces. + push @globbed, glob( "$file_spec" ); + } + } + return @globbed; +} + +#************************************************************ +# Miscellaneous + +sub prefix { + #Usage: prefix( string, prefix ); + #Return string with prefix inserted at the front of each line + my @line = split( /\n/, $_[0] ); + my $prefix = $_[1]; + for (my $i = 0; $i <= $#line; $i++ ) { + $line[$i] = $prefix.$line[$i]."\n"; + } + return join( "", @line ); +} + + +#************************************************************ +#************************************************************ +# File handling utilities: + + +#************************************************************ + +sub get_latest_mtime +# - arguments: each is a filename. +# - returns most recent modify time. +{ + my $return_mtime = 0; + foreach my $include (@_) + { + my $include_mtime = &get_mtime($include); + # The file $include may not exist. If so ignore it, otherwise + # we'll get an undefined variable warning. + if ( ($include_mtime) && ($include_mtime > $return_mtime) ) + { + $return_mtime = $include_mtime; + } + } + return $return_mtime; +} + +#************************************************************ + +sub get_mtime_raw +{ + my $mtime = (stat($_[0]))[9]; + return $mtime; +} + +#************************************************************ + +sub get_mtime { + return get_mtime0($_[0]); +} + +#************************************************************ + +sub get_mtime0 { + # Return time of file named in argument + # If file does not exist, return 0; + if ( -e $_[0] ) { + return get_mtime_raw($_[0]); + } + else { + return 0; + } +} + +#************************************************************ + +sub get_size { + # Return time of file named in argument + # If file does not exist, return 0; + if ( -e $_[0] ) { + return get_size_raw($_[0]); + } + else { + return 0; + } +} + +#************************************************************ + +sub get_size_raw +{ + my $size = (stat($_[0]))[7]; + return $size; +} + +#************************************************************ + +sub get_time_size { + # Return time and size of file named in argument + # If file does not exist, return (0,-1); + if ( -e $_[0] ) { + return get_time_size_raw($_[0]); + } + else { + return (0,-1); + } +} + +#************************************************************ + +sub get_time_size_raw +{ + my $mtime = (stat($_[0]))[9]; + my $size = (stat($_[0]))[7]; + return ($mtime, $size); +} + +#************************************************************ + +sub get_checksum_md5 { + my $source = shift; + my $input = new FileHandle; + my $md5 = Digest->MD5; + my $ignore_pattern = ''; + + if ( $source eq "" ) { + # STDIN: + open( $input, '-' ); + } + else { + open( $input, '<', $source ) + or return 0; + my ($base, $path, $ext) = fileparseA( $source ); + $ext =~ s/^\.//; + if ( exists $hash_calc_ignore_pattern{$ext} ) { + $ignore_pattern = $hash_calc_ignore_pattern{$ext}; + } + } + + if ( $ignore_pattern ) { + while (<$input>) { + if ( /$ignore_pattern/ ){ + $_= ''; + } + $md5->add($_); + } + } + else { + $md5->addfile($input); + } + close $input; + return $md5->hexdigest(); +} + +#************************************************************ + +#?? OBSOLETE +# Find file with default extension +# Usage: find_file_ext( name, default_ext, ref_to_array_search_path) +sub find_file_ext +#?? Need to use kpsewhich, if possible. Leave to find_file? +{ + my $full_filename = shift; + my $ext = shift; + my $ref_search_path = shift; + my $full_filename1 = &find_file($full_filename, $ref_search_path, '1'); +#print "Finding \"$full_filename\" with ext \"$ext\" ... "; + if (( $full_filename1 eq '' ) || ( ! -e $full_filename1 )) + { + my $full_filename2 = + &find_file("$full_filename.$ext",$ref_search_path,'1'); + if (( $full_filename2 ne '' ) && ( -e $full_filename2 )) + { + $full_filename = $full_filename2; + } + else + { + $full_filename = $full_filename1; + } + } + else + { + $full_filename = $full_filename1; + } +#print "Found \"$full_filename\".\n"; + return $full_filename; +} + +#************************************************************ +#?? OBSOLETE +# given filename and path, return full name of file, or die if none found. +# when force_include_mode=1, only warn if an include file was not +# found, and return 0 (PvdS). +# Usage: find_file(name, ref_to_array_search_path, warn_on_continue) +sub find_file +#?? Need to use kpsewhich, if possible +{ + my $name = $_[0]; + my $ref_path = $_[1]; + my $dir; + if ( $name =~ /^\// ) + { + #Aboslute pathname (by UNIX standards) + if ( (!-e $name) && ( $_[2] eq '' ) ) { + if ($force_include_mode) { + warn "$My_name: Could not find file [$name]\n"; + } + else { + die "$My_name: Could not find file [$name]\n"; + } + } + return $name; + } + # Relative pathname + foreach $dir ( @{$ref_path} ) + { +#warn "\"$dir\", \"$name\"\n"; + if (-e "$dir/$name") + { + return("$dir/$name"); + } + } + if ($force_include_mode) + { + if ( $_[2] eq '' ) + { + warn "$My_name: Could not find file [$name] in path [@{$ref_path}]\n"; + warn " assuming in current directory (./$name)\n"; + } + return("./$name"); + } + else + { + if ( $_[2] ne '' ) + { + return(''); + } +# warn "\"$name\", \"$ref_path\", \"$dir\"\n"; + die "$My_name: Could not find file [$name] in path [@{$ref_path}]\n"; + } +} + +#************************************************************ + +sub find_file1 { +#?? Need to use kpsewhich, if possible + + # Usage: find_file1(name, ref_to_array_search_path) + # Modified find_file, which doesn't die. + # Given filename and path, return array of: + # full name + # retcode + # On success: full_name = full name with path, retcode = 0 + # On failure: full_name = given name, retcode = 1 + + my $name = $_[0]; + # Make local copy of path, since we may rewrite it! + my @path = @{$_[1]}; + if ( $name =~ /^\// ) { + # Absolute path (if under UNIX) + # This needs fixing, in general + if (-e $name) { return( $name, 0 );} + else { return( $name, 1 );} + } + foreach my $dir ( @path ) { + #??print "-------------dir='$dir', "; + # Make $dir concatenatable, and empty for current dir: + if ( $dir eq '.' ) { + $dir = ''; + } + elsif ( $dir =~ /[\/\\:]$/ ) { + #OK if dir ends in / or \ or : + } + elsif ( $dir ne '' ) { + #Append directory separator only to non-empty dir + $dir = "$dir/"; + } + #?? print " newdir='$dir'\n"; + if (-e "$dir$name") { + return("$dir$name", 0); + } + } + my @kpse_result = kpsewhich( $name ); + if ($#kpse_result > -1) { + return( $kpse_result[0], 0); + } + return("$name" , 1); +} #END find_file1 + +#************************************************************ + +sub find_file_list1 { + # Modified version of find_file_list that doesn't die. + # Given output and input arrays of filenames, a file suffix, and a path, + # fill the output array with full filenames + # Return a status code: + # Retcode = 0 on success + # Retocde = 1 if at least one file was not found + # Usage: find_file_list1( ref_to_output_file_array, + # ref_to_input_file_array, + # suffix, + # ref_to_array_search_path + # ) + + my $ref_output = $_[0]; + my $ref_input = $_[1]; + my $suffix = $_[2]; + my $ref_search = $_[3]; + +#?? show_array( "=====find_file_list1. Suffix: '$suffix'\n Source:", @$ref_input ); +#?? show_array( " Bibinputs:", @$ref_search ); + + my @return_list = (); # Generate list in local array, since input + # and output arrays may be same + my $retcode = 0; + foreach my $file (@$ref_input) { + my ($tmp_file, $find_retcode) = &find_file1( "$file$suffix", $ref_search ); + if ($tmp_file) { + push @return_list, $tmp_file; + } + if ( $find_retcode != 0 ) { + $retcode = 1; + } + } + @$ref_output = @return_list; +#?? show_array( " Output", @$ref_output ); +#?? foreach (@$ref_output) { if ( /\/\// ) { print " ====== double slash in '$_'\n"; } } + return $retcode; +} #END find_file_list1 + +#************************************************************ + +sub kpsewhich { +# Usage: kpsewhich( filespec, ...) +# Returns array of files with paths as found by kpsewhich +# kpsewhich( 'try.sty', 'jcc.bib' ); +# Can also do, e.g., +# kpsewhich( '-format=bib', 'trial.bib', 'file with spaces'); + my $cmd = $kpsewhich; + my @args = @_; + foreach (@args) { + if ( ! /^-/ ) { + $_ = "\"$_\""; + } + } + foreach ($cmd) { + s/%[RBTDO]//g; + } + $cmd =~ s/%S/@args/g; + my @found = (); + local $fh; + open $fh, "$cmd|" + or die "Cannot open pipe for \"$cmd\"\n"; + while ( <$fh> ) { + s/^\s*//; + s/\s*$//; + push @found, $_; + } + close $fh; +# show_array( "Kpsewhich: '$cmd', '$file_list' ==>", @found ); + return @found; +} + +#################################################### + +sub add_cus_dep { + # Usage: add_cus_dep( from_ext, to_ext, flag, sub_name ) + # Add cus_dep after removing old versions + my ($from_ext, $to_ext, $must, $sub_name) = @_; + remove_cus_dep( $from_ext, $to_ext ); + push @cus_dep_list, "$from_ext $to_ext $must $sub_name"; +} + +#################################################### + +sub remove_cus_dep { + # Usage: remove_cus_dep( from_ext, to_ext ) + my ($from_ext, $to_ext) = @_; + my $i = 0; + while ($i <= $#cus_dep_list) { + if ( $cus_dep_list[$i] =~ /^$from_ext $to_ext / ) { + splice @cus_dep_list, $i, 1; + } + else { + $i++; + } + } +} + +#################################################### + +sub show_cus_dep { + show_array( "Custom dependency list:", @cus_dep_list ); +} + +#################################################### + +sub find_dirs1 { + # Same as find_dirs, but argument is single string with directories + # separated by $search_path_separator + find_dirs( &split_search_path( $search_path_separator, ".", $_[0] ) ); +} + + +#************************************************************ + +sub find_dirs { +# @_ is list of directories +# return: same list of directories, except that for each directory +# name ending in //, a list of all subdirectories (recursive) +# is added to the list. +# Non-existent directories and non-directories are removed from the list +# Trailing "/"s and "\"s are removed + local @result = (); + my $find_action + = sub + { ## Subroutine for use in File::find + ## Check to see if we have a directory + if (-d) { push @result, $File::Find::name; } + }; + foreach my $directory (@_) { + my $recurse = ( $directory =~ m[//$] ); + # Remove all trailing /s, since directory name with trailing / + # is not always allowed: + $directory =~ s[/+$][]; + # Similarly for MSWin reverse slash + $directory =~ s[\\+$][]; + if ( ! -e $directory ){ + next; + } + elsif ( $recurse ){ + # Recursively search directory + find( $find_action, $directory ); + } + else { + push @result, $directory; + } + } + return @result; +} + +#************************************************************ + +sub uniq +# Read arguments, delete neighboring items that are identical, +# return array of results +{ + my @sort = (); + my ($current, $prev); + my $first = 1; + while (@_) + { + $current = shift; + if ($first || ($current ne $prev) ) + { + push @sort, $current; + $prev = $current; + $first = 0; + } + } + return @sort; +} + +#================================================== + +sub uniq1 { + # Usage: uniq1( strings ) + # Returns array of strings with duplicates later in list than + # first occurence deleted. Otherwise preserves order. + + my @strings = (); + my %string_hash = (); + + foreach my $string (@_) { + if (!exists( $string_hash{$string} )) { + $string_hash{$string} = 1; + push @strings, $string; + } + } + return @strings; +} + +#************************************************************ + +sub uniqs { + # Usage: uniq2( strings ) + # Returns array of strings sorted and with duplicates deleted + return uniq( sort @_ ); +} + +#************************************************************ + +sub ext { + # Return extension of filename. Extension includes the period + my $file_name = $_[0]; + my ($base_name, $path, $ext) = fileparseA( $file_name ); + return $ext; + } + +#************************************************************ + +sub fileparseA { + # Like fileparse but replace $path for current dir ('./' or '.\') by '' + # Also default second argument to get normal extension. + my $given = $_[0]; + my $pattern = '\.[^\.]*'; + if ($#_ > 0 ) { $pattern = $_[1]; } + my ($base_name, $path, $ext) = fileparse( $given, $pattern ); + if ( ($path eq './') || ($path eq '.\\') ) { + $path = ''; + } + return ($base_name, $path, $ext); + } + +#************************************************************ + +sub fileparseB { + # Like fileparse but with default second argument for normal extension + my $given = $_[0]; + my $pattern = '\.[^\.]*'; + if ($#_ > 0 ) { $pattern = $_[1]; } + my ($base_name, $path, $ext) = fileparse( $given, $pattern ); + return ($base_name, $path, $ext); + } + +#************************************************************ + +sub split_search_path +{ +# Usage: &split_search_path( separator, default, string ) +# Splits string by separator and returns array of the elements +# Allow empty last component. +# Replace empty terms by the default. + my $separator = $_[0]; + my $default = $_[1]; + my $search_path = $_[2]; + my @list = split( /$separator/, $search_path); + if ( $search_path =~ /$separator$/ ) { + # If search path ends in a blank item, the split subroutine + # won't have picked it up. + # So add it to the list by hand: + push @list, ""; + } + # Replace each blank argument (default) by current directory: + for ($i = 0; $i <= $#list ; $i++ ) { + if ($list[$i] eq "") {$list[$i] = $default;} + } + return @list; +} + +################################# + + +sub tempfile1 { + # Makes a temporary file of a unique name. I could use file::temp, + # but it is not present in all versions of perl + # Filename is of form $tmpdir/$_[0]nnn$suffix, where nnn is an integer + my $tmp_file_count = 0; + my $prefix = $_[0]; + my $suffix = $_[1]; + while (1==1) { + # Find a new temporary file, and make it. + $tmp_file_count++; + my $tmp_file = "${tmpdir}/${prefix}${tmp_file_count}${suffix}"; + if ( ! -e $tmp_file ) { + open( TMP, ">$tmp_file" ) + or next; + close(TMP); + return $tmp_file; + } + } + die "$My_name.tempfile1: BUG TO ARRIVE HERE\n"; +} + +################################# + +#************************************************************ +#************************************************************ +# Process/subprocess routines + +sub Run_msg { + # Same as Run, but give message about my running + warn_running( "Running '$_[0]'" ); + Run($_[0]); +} + +sub Run { +# Usage: Run ("program arguments "); +# or Run ("start program arguments"); +# or Run ("NONE program arguments"); +# First form is just a call to system, and the routine returns after the +# program has finished executing. +# Second form (with 'start') runs the program detached, as appropriate for +# the operating system: It runs "program arguments &" on UNIX, and +# "start program arguments" on WIN95 and WINNT. If multiple start +# words are at the beginning of the command, the extra ones are removed. +# Third form (with 'NONE') does not run anything, but prints an error +# message. This is provided to allow program names defined in the +# configuration to flag themselves as unimplemented. +# Return value is a list (pid, exitcode): +# If process is spawned sucessfully, and I know the PID, +# return (pid, 0), +# else if process is spawned sucessfully, but I do not know the PID, +# return (0, 0), +# else if process is run, +# return (0, exitcode of process) +# else (I fail to run the requested process) +# return (0, suitable return code) +# where return code is 1 if cmdline is null or begins with "NONE" (for +# an unimplemented command) +# or the return value of the system subroutine. + + +# Split command line into one word per element, separating words by +# one (OR MORE) spaces: +# The purpose of this is to identify latexmk-defined pseudocommands +# 'start' and 'NONE'. +# After dealing with them, the command line is reassembled + my $cmd_line = $_[0]; + if ( $cmd_line eq '' ) { + traceback( "$My_name: Bug OR configuration error\n". + " In run of'$rule', attempt to run a null program" ); + return (0, 1); + } + if ( $cmd_line =~ /^start +/ ) { + #warn "Before: '$cmd_line'\n"; + # Run detached. How to do this depends on the OS + # But first remove extra starts (which may have been inserted + # to force a command to be run detached, when the command + # already contained a "start"). + while ( $cmd_line =~ s/^start +// ) {} + #warn "After: '$cmd_line'\n"; + return &Run_Detached( $cmd_line ); + } + elsif ( $cmd_line =~ /^NONE/ ) { + warn "$My_name: ", + "Program not implemented for this version. Command line:\n"; + warn " '$cmd_line'\n"; + return (0, 1); + } + else { + # The command is given to system as a single argument, to force shell + # metacharacters to be interpreted: + return( 0, system( $cmd_line ) ); + } +} + +#************************************************************ + +sub Run_Detached { +# Usage: Run_Detached ("program arguments "); +# Runs program detached. Returns 0 on success, 1 on failure. +# Under UNIX use a trick to avoid the program being killed when the +# parent process, i.e., me, gets a ctrl/C, which is undesirable for pvc +# mode. (The simplest method, system ("program arguments &"), makes the +# child process respond to the ctrl/C.) +# Return value is a list (pid, exitcode): +# If process is spawned sucessfully, and I know the PID, +# return (pid, 0), +# else if process is spawned sucessfully, but I do not know the PID, +# return (0, 0), +# else if I fail to spawn a process +# return (0, 1) + + my $cmd_line = $_[0]; + +## warn "Running '$cmd_line' detached...\n"; + if ( $cmd_line =~ /^NONE / ) { + warn "$My_name: ", + "Program not implemented for this version. Command line:\n"; + warn " '$cmd_line'\n"; + return (0, 1); + } + + if ( "$^O" eq "MSWin32" ){ + # Win95, WinNT, etc: Use MS's start command: + return( 0, system( "start $cmd_line" ) ); + } else { + # Assume anything else is UNIX or clone + # For this purpose cygwin behaves like UNIX. + ## warn "Run_Detached.UNIX: A\n"; + my $pid = fork(); + ## warn "Run_Detached.UNIX: B pid=$pid\n"; + if ( ! defined $pid ) { + ## warn "Run_Detached.UNIX: C\n"; + warn "$My_name: Could not fork to run the following command:\n"; + warn " '$cmd_line'\n"; + return (0, 1); + } + elsif( $pid == 0 ){ + ## warn "Run_Detached.UNIX: D\n"; + # Forked child process arrives here + # Insulate child process from interruption by ctrl/C to kill parent: + # setpgrp(0,0); + # Perhaps this works if setpgrp doesn't exist + # (and therefore gives fatal error): + eval{ setpgrp(0,0);}; + exec( $cmd_line ); + # Exec never returns; it replaces current process by new process + die "$My_name forked process: could not run the command\n", + " '$cmd_line'\n"; + } + ##warn "Run_Detached.UNIX: E\n"; + # Original process arrives here + return ($pid, 0); + } + # NEVER GET HERE. + ##warn "Run_Detached.UNIX: F\n"; +} + +#************************************************************ + +sub find_process_id { +# find_process_id(string) finds id of process containing string and +# being run by the present user. Typically the string will be the +# name of the process or part of its command line. +# On success, this subroutine returns the process ID. +# On failure, it returns 0. +# This subroutine only works on UNIX systems at the moment. + + if ( $pid_position < 0 ) { + # I cannot do a ps on this system + return (0); + } + + my $looking_for = $_[0]; + my @ps_output = `$pscmd`; + +# There may be multiple processes. Find only latest, +# almost surely the one with the highest process number +# This will deal with cases like xdvi where a script is used to +# run the viewer and both the script and the actual viewer binary +# have running processes. + my @found = (); + + shift(@ps_output); # Discard the header line from ps + foreach (@ps_output) { + next unless ( /$looking_for/ ) ; + my @ps_line = split (' '); +# OLD return($ps_line[$pid_position]); + push @found, $ps_line[$pid_position]; + } + + if ($#found < 0) { + # No luck in finding the specified process. + return(0); + } + @found = reverse sort @found; + if ($diagnostics) { + print "Found the following processes concerning '$looking_for'\n", + " @found\n", + " I will use $found[0]\n"; + } + return $found[0]; +} + +#************************************************************ +#************************************************************ +#************************************************************ + +# Directory stack routines + +sub pushd { + push @dir_stack, cwd(); + if ( $#_ > -1) { chdir $_[0]; } +} + +#************************************************************ + +sub popd { + if ($#dir_stack > -1 ) { chdir pop @dir_stack; } +} + +#************************************************************ + +sub ifcd_popd { + if ( $do_cd ) { + warn "$My_name: Undoing directory change\n"; + &popd; + } +} + +#************************************************************ + +sub finish_dir_stack { + while ($#dir_stack > -1 ) { &popd; } +} + +#************************************************************ +#************************************************************ +#************************************************************ +#************************************************************ +#************************************************************ +#************************************************************ +#************************************************************ +#************************************************************ diff --git a/Build/source/texk/texlive/linked_scripts/luatools.lua b/Build/source/texk/texlive/linked_scripts/luatools.lua deleted file mode 100755 index 35986137950..00000000000 --- a/Build/source/texk/texlive/linked_scripts/luatools.lua +++ /dev/null @@ -1,6659 +0,0 @@ -#!/usr/bin/env texlua - --- one can make a stub: --- --- #!/bin/sh --- env LUATEXDIR=/....../texmf/scripts/context/lua luatex --luaonly=luatools.lua "$@" --- filename : luatools.lua --- comment : companion to context.tex --- author : Hans Hagen, PRAGMA-ADE, Hasselt NL --- copyright: PRAGMA ADE / ConTeXt Development Team --- license : see context related readme files --- Although this script is part of the ConTeXt distribution it is --- relatively indepent of ConTeXt. The same is true for some of --- the luat files. We may may make them even less dependent in --- the future. As long as Luatex is under development the --- interfaces and names of functions may change. - -banner = "version 1.2.0 - 2006+ - PRAGMA ADE / CONTEXT" -texlua = true - --- For the sake of independence we optionally can merge the library --- code here. It's too much code, but that does not harm. Much of the --- library code is used elsewhere. We don't want dependencies on --- Lua library paths simply because these scripts are located in the --- texmf tree and not in some Lua path. Normally this merge is not --- needed when texmfstart is used, or when the proper stub is used or --- when (windows) suffix binding is active. - --- begin library merge --- filename : l-string.lua --- comment : split off from luat-lib --- author : Hans Hagen, PRAGMA-ADE, Hasselt NL --- copyright: PRAGMA ADE / ConTeXt Development Team --- license : see context related readme files - -if not versions then versions = { } end versions['l-string'] = 1.001 - ---~ function string.split(str, pat) -- taken from the lua wiki ---~ local t = {n = 0} -- so this table has a length field, traverse with ipairs then! ---~ local fpat = "(.-)"..pat ---~ local last_end = 1 ---~ local s, e, cap = string.find(str, fpat, 1) ---~ while s ~= nil do ---~ if s~=1 or cap~="" then ---~ table.insert(t,cap) ---~ end ---~ last_end = e+1 ---~ s, e, cap = string.find(str, fpat, last_end) ---~ end ---~ if last_end<=string.len(str) then ---~ table.insert(t,(string.sub(str,last_end))) ---~ end ---~ return t ---~ end - ---~ function string:split(pat) -- taken from the lua wiki but adapted ---~ local t = { } -- self and colon usage (faster) ---~ local fpat = "(.-)"..pat ---~ local last_end = 1 ---~ local s, e, cap = self:find(fpat, 1) ---~ while s ~= nil do ---~ if s~=1 or cap~="" then ---~ t[#t+1] = cap ---~ end ---~ last_end = e+1 ---~ s, e, cap = self:find(fpat, last_end) ---~ end ---~ if last_end <= #self then ---~ t[#t+1] = self:sub(last_end) ---~ end ---~ return t ---~ end - ---~ a piece of brilliant code by Rici Lake (posted on lua list) -- only names changed ---~ ---~ function string:splitter(pat) ---~ local st, g = 1, self:gmatch("()"..pat.."()") ---~ local function splitter(self) ---~ if st then ---~ local s, f = g() ---~ local rv = self:sub(st, (s or 0)-1) ---~ st = f ---~ return rv ---~ end ---~ end ---~ return splitter, self ---~ end - -function string:splitter(pat) - -- by Rici Lake (posted on lua list) -- only names changed - -- p 79 ref man: () returns position of match - local st, g = 1, self:gmatch("()("..pat..")") - local function strgetter(self, segs, seps, sep, cap1, ...) - st = sep and seps + #sep - return self:sub(segs, (seps or 0) - 1), cap1 or sep, ... - end - local function strsplitter(self) - if st then return strgetter(self, st, g()) end - end - return strsplitter, self -end - -function string:split(separator) - local t = {} - for k in self:splitter(separator) do t[#t+1] = k end - return t -end - --- faster than a string:split: - -function string:splitchr(chr) - if #self > 0 then - local t = { } - for s in string.gmatch(self..chr,"(.-)"..chr) do - t[#t+1] = s - end - return t - else - return { } - end -end - ---~ function string.piecewise(str, pat, fnc) -- variant of split ---~ local fpat = "(.-)"..pat ---~ local last_end = 1 ---~ local s, e, cap = string.find(str, fpat, 1) ---~ while s ~= nil do ---~ if s~=1 or cap~="" then ---~ fnc(cap) ---~ end ---~ last_end = e+1 ---~ s, e, cap = string.find(str, fpat, last_end) ---~ end ---~ if last_end <= #str then ---~ fnc((string.sub(str,last_end))) ---~ end ---~ end - -function string.piecewise(str, pat, fnc) -- variant of split - for k in string.splitter(str,pat) do fnc(k) end -end - ---~ function string.piecewise(str, pat, fnc) -- variant of split ---~ for k in str:splitter(pat) do fnc(k) end ---~ end - ---~ do if lpeg then - ---~ -- this alternative is 30% faster esp when we cache them ---~ -- problem: no expressions - ---~ splitters = { } - ---~ function string:split(separator) ---~ if #self > 0 then ---~ local split = splitters[separator] ---~ if not split then ---~ -- based on code by Roberto ---~ local p = lpeg.P(separator) ---~ local c = lpeg.C((1-p)^0) ---~ split = lpeg.Ct(c*(p*c)^0) ---~ splitters[separator] = split ---~ end ---~ return split:match(self) ---~ else ---~ return { } ---~ end ---~ end - ---~ string.splitchr = string.split - ---~ function string:piecewise(separator,fnc) ---~ for _,v in pairs(self:split(separator)) do ---~ fnc(v) ---~ end ---~ end - ---~ end end - -string.chr_to_esc = { - ["%"] = "%%", - ["."] = "%.", - ["+"] = "%+", ["-"] = "%-", ["*"] = "%*", - ["^"] = "%^", ["$"] = "%$", - ["["] = "%[", ["]"] = "%]", - ["("] = "%(", [")"] = "%)", - ["{"] = "%{", ["}"] = "%}" -} - -function string:esc() -- variant 2 - return (self:gsub("(.)",string.chr_to_esc)) -end - -function string.unquote(str) - return (str:gsub("^([\"\'])(.*)%1$","%2")) -end - -function string.quote(str) - return '"' .. str:unquote() .. '"' -end - -function string:count(pattern) -- variant 3 - local n = 0 - for _ in self:gmatch(pattern) do - n = n + 1 - end - return n -end - -function string:limit(n,sentinel) - if #self > n then - sentinel = sentinel or " ..." - return self:sub(1,(n-#sentinel)) .. sentinel - else - return self - end -end - -function string:strip() - return (self:gsub("^%s*(.-)%s*$", "%1")) -end - ---~ function string.strip(str) -- slightly different ---~ return (string.gsub(string.gsub(str,"^%s*(.-)%s*$","%1"),"%s+"," ")) ---~ end - -function string:is_empty() - return not self:find("%S") -end - -function string:enhance(pattern,action) - local ok, n = true, 0 - while ok do - ok = false - self = self:gsub(pattern, function(...) - ok, n = true, n + 1 - return action(...) - end) - end - return self, n -end - ---~ function string:enhance(pattern,action) ---~ local ok, n = 0, 0 ---~ repeat ---~ self, ok = self:gsub(pattern, function(...) ---~ n = n + 1 ---~ return action(...) ---~ end) ---~ until ok == 0 ---~ return self, n ---~ end - ---~ function string:to_hex() ---~ if self then ---~ return (self:gsub("(.)",function(c) ---~ return string.format("%02X",c:byte()) ---~ end)) ---~ else ---~ return "" ---~ end ---~ end - ---~ function string:from_hex() ---~ if self then ---~ return (self:gsub("(..)",function(c) ---~ return string.char(tonumber(c,16)) ---~ end)) ---~ else ---~ return "" ---~ end ---~ end - -string.chr_to_hex = { } -string.hex_to_chr = { } - -for i=0,255 do - local c, h = string.char(i), string.format("%02X",i) - string.chr_to_hex[c], string.hex_to_chr[h] = h, c -end - ---~ function string:to_hex() ---~ if self then return (self:gsub("(.)",string.chr_to_hex)) else return "" end ---~ end - ---~ function string:from_hex() ---~ if self then return (self:gsub("(..)",string.hex_to_chr)) else return "" end ---~ end - -function string:to_hex() - return ((self or ""):gsub("(.)",string.chr_to_hex)) -end - -function string:from_hex() - return ((self or ""):gsub("(..)",string.hex_to_chr)) -end - -if not string.characters then - - local function nextchar(str, index) - index = index + 1 - return (index <= #str) and index or nil, str:sub(index,index) - end - function string:characters() - return nextchar, self, 0 - end - local function nextbyte(str, index) - index = index + 1 - return (index <= #str) and index or nil, string.byte(str:sub(index,index)) - end - function string:bytes() - return nextbyte, self, 0 - end - -end - ---~ function string:padd(n,chr) ---~ return self .. self.rep(chr or " ",n-#self) ---~ end - -function string:rpadd(n,chr) - local m = n-#self - if m > 0 then - return self .. self.rep(chr or " ",m) - else - return self - end -end - -function string:lpadd(n,chr) - local m = n-#self - if m > 0 then - return self.rep(chr or " ",m) .. self - else - return self - end -end - -string.padd = string.rpadd - -function is_number(str) - return str:find("^[%-%+]?[%d]-%.?[%d+]$") == 1 -end - ---~ print(is_number("1")) ---~ print(is_number("1.1")) ---~ print(is_number(".1")) ---~ print(is_number("-0.1")) ---~ print(is_number("+0.1")) ---~ print(is_number("-.1")) ---~ print(is_number("+.1")) - -function string:split_settings() -- no {} handling, see l-aux for lpeg variant - if self:find("=") then - local t = { } - for k,v in self:gmatch("(%a+)=([^%,]*)") do - t[k] = v - end - return t - else - return nil - end -end - -local patterns_escapes = { - ["-"] = "%-", - ["."] = "%.", - ["+"] = "%+", - ["*"] = "%*", - ["%"] = "%%", - ["("] = "%)", - [")"] = "%)", - ["["] = "%[", - ["]"] = "%]", -} - -function string:pattesc() - return (self:gsub(".",patterns_escapes)) -end - -function string:tohash() - local t = { } - for s in self:gmatch("([^, ]+)") do -- lpeg - t[s] = true - end - return t -end - - --- filename : l-lpeg.lua --- author : Hans Hagen, PRAGMA-ADE, Hasselt NL --- copyright: PRAGMA ADE / ConTeXt Development Team --- license : see context related readme files - -if not versions then versions = { } end versions['l-lpeg'] = 1.001 - ---~ l-lpeg.lua : - ---~ lpeg.digit = lpeg.R('09')^1 ---~ lpeg.sign = lpeg.S('+-')^1 ---~ lpeg.cardinal = lpeg.P(lpeg.sign^0 * lpeg.digit^1) ---~ lpeg.integer = lpeg.P(lpeg.sign^0 * lpeg.digit^1) ---~ lpeg.float = lpeg.P(lpeg.sign^0 * lpeg.digit^0 * lpeg.P('.') * lpeg.digit^1) ---~ lpeg.number = lpeg.float + lpeg.integer ---~ lpeg.oct = lpeg.P("0") * lpeg.R('07')^1 ---~ lpeg.hex = lpeg.P("0x") * (lpeg.R('09') + lpeg.R('AF'))^1 ---~ lpeg.uppercase = lpeg.P("AZ") ---~ lpeg.lowercase = lpeg.P("az") - ---~ lpeg.eol = lpeg.S('\r\n\f')^1 -- includes formfeed ---~ lpeg.space = lpeg.S(' ')^1 ---~ lpeg.nonspace = lpeg.P(1-lpeg.space)^1 ---~ lpeg.whitespace = lpeg.S(' \r\n\f\t')^1 ---~ lpeg.nonwhitespace = lpeg.P(1-lpeg.whitespace)^1 - -local hash = { } - -function lpeg.anywhere(pattern) --slightly adapted from website - return lpeg.P { lpeg.P(pattern) + 1 * lpeg.V(1) } -end - -function lpeg.startswith(pattern) --slightly adapted - return lpeg.P(pattern) -end - ---~ g = lpeg.splitter(" ",function(s) ... end) -- gmatch:lpeg = 3:2 - -function lpeg.splitter(pattern, action) - return (((1-lpeg.P(pattern))^1)/action+1)^0 -end - -local crlf = lpeg.P("\r\n") -local cr = lpeg.P("\r") -local lf = lpeg.P("\n") -local space = lpeg.S(" \t\f\v") -local newline = crlf + cr + lf -local spacing = space^0 * newline - -local empty = spacing * lpeg.Cc("") -local nonempty = lpeg.Cs((1-spacing)^1) * spacing^-1 -local content = (empty + nonempty)^1 - -local capture = lpeg.Ct(content^0) - -function string:splitlines() - return capture:match(self) -end - - --- filename : l-table.lua --- comment : split off from luat-lib --- author : Hans Hagen, PRAGMA-ADE, Hasselt NL --- copyright: PRAGMA ADE / ConTeXt Development Team --- license : see context related readme files - -if not versions then versions = { } end versions['l-table'] = 1.001 - -table.join = table.concat - -function table.strip(tab) - local lst = { } - for k, v in ipairs(tab) do - -- s = string.gsub(v, "^%s*(.-)%s*$", "%1") - s = v:gsub("^%s*(.-)%s*$", "%1") - if s == "" then - -- skip this one - else - lst[#lst+1] = s - end - end - return lst -end - ---~ function table.sortedkeys(tab) ---~ local srt = { } ---~ for key,_ in pairs(tab) do ---~ srt[#srt+1] = key ---~ end ---~ table.sort(srt) ---~ return srt ---~ end - -function table.sortedkeys(tab) - local srt, kind = { }, 0 -- 0=unknown 1=string, 2=number 3=mixed - for key,_ in pairs(tab) do - srt[#srt+1] = key - if kind == 3 then - -- no further check - else - local tkey = type(key) - if tkey == "string" then - -- if kind == 2 then kind = 3 else kind = 1 end - kind = (kind == 2 and 3) or 1 - elseif tkey == "number" then - -- if kind == 1 then kind = 3 else kind = 2 end - kind = (kind == 1 and 3) or 2 - else - kind = 3 - end - end - end - if kind == 0 or kind == 3 then - table.sort(srt,function(a,b) return (tostring(a) < tostring(b)) end) - else - table.sort(srt) - end - return srt -end - -function table.append(t, list) - for _,v in pairs(list) do - table.insert(t,v) - end -end - -function table.prepend(t, list) - for k,v in pairs(list) do - table.insert(t,k,v) - end -end - -function table.merge(t, ...) -- first one is target - t = t or {} - local lst = {...} - for i=1,#lst do - for k, v in pairs(lst[i]) do - t[k] = v - end - end - return t -end - -function table.merged(...) - local tmp, lst = { }, {...} - for i=1,#lst do - for k, v in pairs(lst[i]) do - tmp[k] = v - end - end - return tmp -end - -function table.imerge(t, ...) - local lst = {...} - for i=1,#lst do - local nst = lst[i] - for j=1,#nst do - t[#t+1] = nst[j] - end - end - return t -end - -function table.imerged(...) - local tmp, lst = { }, {...} - for i=1,#lst do - local nst = lst[i] - for j=1,#nst do - tmp[#tmp+1] = nst[j] - end - end - return tmp -end - -if not table.fastcopy then do - - local type, pairs, getmetatable, setmetatable = type, pairs, getmetatable, setmetatable - - local function fastcopy(old) -- fast one - if old then - local new = { } - for k,v in pairs(old) do - if type(v) == "table" then - new[k] = fastcopy(v) -- was just table.copy - else - new[k] = v - end - end - local mt = getmetatable(old) - if mt then - setmetatable(new,mt) - end - return new - else - return { } - end - end - - table.fastcopy = fastcopy - -end end - -if not table.copy then do - - local type, pairs, getmetatable, setmetatable = type, pairs, getmetatable, setmetatable - - local function copy(t, tables) -- taken from lua wiki, slightly adapted - tables = tables or { } - local tcopy = {} - if not tables[t] then - tables[t] = tcopy - end - for i,v in pairs(t) do -- brrr, what happens with sparse indexed - if type(i) == "table" then - if tables[i] then - i = tables[i] - else - i = copy(i, tables) - end - end - if type(v) ~= "table" then - tcopy[i] = v - elseif tables[v] then - tcopy[i] = tables[v] - else - tcopy[i] = copy(v, tables) - end - end - local mt = getmetatable(t) - if mt then - setmetatable(tcopy,mt) - end - return tcopy - end - - table.copy = copy - -end end - --- rougly: copy-loop : unpack : sub == 0.9 : 0.4 : 0.45 (so in critical apps, use unpack) - -function table.sub(t,i,j) - return { unpack(t,i,j) } -end - -function table.replace(a,b) - for k,v in pairs(b) do - a[k] = v - end -end - --- slower than #t on indexed tables (#t only returns the size of the numerically indexed slice) - -function table.is_empty(t) - return not t or not next(t) -end - -function table.one_entry(t) - local n = next(t) - return n and not next(t,n) -end - -function table.starts_at(t) - return ipairs(t,1)(t,0) -end - -do - - -- one of my first exercises in lua ... - - -- 34.055.092 32.403.326 arabtype.tma - -- 1.620.614 1.513.863 lmroman10-italic.tma - -- 1.325.585 1.233.044 lmroman10-regular.tma - -- 1.248.157 1.158.903 lmsans10-regular.tma - -- 194.646 153.120 lmtypewriter10-regular.tma - -- 1.771.678 1.658.461 palatinosanscom-bold.tma - -- 1.695.251 1.584.491 palatinosanscom-regular.tma - -- 13.736.534 13.409.446 zapfinoextraltpro.tma - - -- 13.679.038 11.774.106 arabtype.tmc - -- 886.248 754.944 lmroman10-italic.tmc - -- 729.828 466.864 lmroman10-regular.tmc - -- 688.482 441.962 lmsans10-regular.tmc - -- 128.685 95.853 lmtypewriter10-regular.tmc - -- 715.929 582.985 palatinosanscom-bold.tmc - -- 669.942 540.126 palatinosanscom-regular.tmc - -- 1.560.588 1.317.000 zapfinoextraltpro.tmc - - table.serialize_functions = true - table.serialize_compact = true - table.serialize_inline = true - - local function key(k) - if type(k) == "number" then -- or k:find("^%d+$") then - return "["..k.."]" - elseif noquotes and k:find("^%a[%a%d%_]*$") then - return k - else - return '["'..k..'"]' - end - end - - local function simple_table(t) - if #t > 0 then - local n = 0 - for _,v in pairs(t) do - n = n + 1 - end - if n == #t then - local tt = { } - for i=1,#t do - local v = t[i] - local tv = type(v) - if tv == "number" or tv == "boolean" then - tt[#tt+1] = tostring(v) - elseif tv == "string" then - tt[#tt+1] = ("%q"):format(v) - else - tt = nil - break - end - end - return tt - end - end - return nil - end - - local function serialize(root,name,handle,depth,level,reduce,noquotes,indexed) - handle = handle or print - reduce = reduce or false - if depth then - depth = depth .. " " - if indexed then - handle(("%s{"):format(depth)) - else - handle(("%s%s={"):format(depth,key(name))) - end - else - depth = "" - local tname = type(name) - if tname == "string" then - if name == "return" then - handle("return {") - else - handle(name .. "={") - end - elseif tname == "number" then - handle("[" .. name .. "]={") - elseif tname == "boolean" then - if name then - handle("return {") - else - handle("{") - end - else - handle("t={") - end - end - if root and next(root) then - local compact = table.serialize_compact - local inline = compact and table.serialize_inline - local first, last = nil, 0 -- #root cannot be trusted here - if compact then - for k,v in ipairs(root) do -- NOT: for k=1,#root do (why) - if not first then first = k end - last = last + 1 - end - end - for _,k in pairs(table.sortedkeys(root)) do - local v = root[k] - local t = type(v) - if compact and first and type(k) == "number" and k >= first and k <= last then - if t == "number" then - handle(("%s %s,"):format(depth,v)) - elseif t == "string" then - if reduce and (v:find("^[%-%+]?[%d]-%.?[%d+]$") == 1) then - handle(("%s %s,"):format(depth,v)) - else - handle(("%s %q,"):format(depth,v)) - end - elseif t == "table" then - if not next(v) then - handle(("%s {},"):format(depth)) - elseif inline then - local st = simple_table(v) - if st then - handle(("%s { %s },"):format(depth,table.concat(st,", "))) - else - serialize(v,k,handle,depth,level+1,reduce,noquotes,true) - end - else - serialize(v,k,handle,depth,level+1,reduce,noquotes,true) - end - elseif t == "boolean" then - handle(("%s %s,"):format(depth,tostring(v))) - elseif t == "function" then - if table.serialize_functions then - handle(('%s loadstring(%q),'):format(depth,string.dump(v))) - else - handle(('%s "function",'):format(depth)) - end - else - handle(("%s %q,"):format(depth,tostring(v))) - end - elseif k == "__p__" then -- parent - if false then - handle(("%s __p__=nil,"):format(depth)) - end - elseif t == "number" then - handle(("%s %s=%s,"):format(depth,key(k),v)) - elseif t == "string" then - if reduce and (v:find("^[%-%+]?[%d]-%.?[%d+]$") == 1) then - handle(("%s %s=%s,"):format(depth,key(k),v)) - else - handle(("%s %s=%q,"):format(depth,key(k),v)) - end - elseif t == "table" then - if not next(v) then - handle(("%s %s={},"):format(depth,key(k))) - elseif inline then - local st = simple_table(v) - if st then - handle(("%s %s={ %s },"):format(depth,key(k),table.concat(st,", "))) - else - serialize(v,k,handle,depth,level+1,reduce,noquotes) - end - else - serialize(v,k,handle,depth,level+1,reduce,noquotes) - end - elseif t == "boolean" then - handle(("%s %s=%s,"):format(depth,key(k),tostring(v))) - elseif t == "function" then - if table.serialize_functions then - handle(('%s %s=loadstring(%q),'):format(depth,key(k),string.dump(v))) - else - handle(('%s %s="function",'):format(depth,key(k))) - end - else - handle(("%s %s=%q,"):format(depth,key(k),tostring(v))) - -- handle(('%s %s=loadstring(%q),'):format(depth,key(k),string.dump(function() return v end))) - end - end - if level > 0 then - handle(("%s},"):format(depth)) - else - handle(("%s}"):format(depth)) - end - else - handle(("%s}"):format(depth)) - end - end - - --~ name: - --~ - --~ true : return { } - --~ false : { } - --~ nil : t = { } - --~ string : string = { } - --~ 'return' : return { } - --~ number : [number] = { } - - function table.serialize(root,name,reduce,noquotes) - local t = { } - local function flush(s) - t[#t+1] = s - end - serialize(root, name, flush, nil, 0, reduce, noquotes) - return table.concat(t,"\n") - end - - function table.tohandle(handle,root,name,reduce,noquotes) - serialize(root, name, handle, nil, 0, reduce, noquotes) - end - - -- sometimes tables are real use (zapfino extra pro is some 85M) in which - -- case a stepwise serialization is nice; actually, we could consider: - -- - -- for line in table.serializer(root,name,reduce,noquotes) do - -- ...(line) - -- end - -- - -- so this is on the todo list - - table.tofile_maxtab = 2*1024 - - function table.tofile(filename,root,name,reduce,noquotes) - local f = io.open(filename,'w') - if f then - local concat = table.concat - local maxtab = table.tofile_maxtab - if maxtab > 1 then - local t = { } - local function flush(s) - t[#t+1] = s - if #t > maxtab then - f:write(concat(t,"\n"),"\n") -- hm, write(sometable) should be nice - t = { } - end - end - serialize(root, name, flush, nil, 0, reduce, noquotes) - f:write(concat(t,"\n"),"\n") - else - local function flush(s) - f:write(s,"\n") - end - serialize(root, name, flush, nil, 0, reduce, noquotes) - end - f:close() - end - end - -end - ---~ t = { ---~ b = "123", ---~ a = "x", ---~ c = 1.23, ---~ d = "1.23", ---~ e = true, ---~ f = { ---~ d = "1.23", ---~ a = "x", ---~ b = "123", ---~ c = 1.23, ---~ e = true, ---~ f = { ---~ e = true, ---~ f = { ---~ e = true ---~ }, ---~ }, ---~ }, ---~ g = function() end ---~ } - ---~ print(table.serialize(t), "\n") ---~ print(table.serialize(t,"name"), "\n") ---~ print(table.serialize(t,false), "\n") ---~ print(table.serialize(t,true), "\n") ---~ print(table.serialize(t,"name",true), "\n") ---~ print(table.serialize(t,"name",true,true), "\n") - -do - - local function flatten(t,f,complete) - for i=1,#t do - local v = t[i] - if type(v) == "table" then - if complete or type(v[1]) == "table" then - flatten(v,f,complete) - else - f[#f+1] = v - end - else - f[#f+1] = v - end - end - end - - function table.flatten(t) - local f = { } - flatten(t,f,true) - return f - end - - function table.unnest(t) -- bad name - local f = { } - flatten(t,f,false) - return f - end - - table.flatten_one_level = table.unnest - -end - -function table.insert_before_value(t,value,str) - for i=1,#t do - if t[i] == value then - table.insert(t,i,str) - return - end - end - table.insert(t,1,str) -end - -function table.insert_after_value(t,value,str) - for i=1,#t do - if t[i] == value then - table.insert(t,i+1,str) - return - end - end - t[#t+1] = str -end - -function table.are_equal(a,b,n,m) - if #a == #b then - n = n or 1 - m = m or #a - for i=n,m do - local ai, bi = a[i], b[i] - if (ai==bi) or (type(ai)=="table" and type(bi)=="table" and table.are_equal(ai,bi)) then - -- continue - else - return false - end - end - return true - else - return false - end -end - -function table.compact(t) - if t then - for k,v in pairs(t) do - if not next(v) then - t[k] = nil - end - end - end -end - -function table.tohash(t) - local h = { } - for _, v in pairs(t) do -- no ipairs here - h[v] = true - end - return h -end - -function table.fromhash(t) - local h = { } - for k, v in pairs(t) do -- no ipairs here - if v then h[#h+1] = k end - end - return h -end - -function table.contains(t, v) - if t then - for i=1, #t do - if t[i] == v then - return true - end - end - end - return false -end - -function table.count(t) - local n, e = 0, next(t) - while e do - n, e = n + 1, next(t,e) - end - return n -end - -function table.swapped(t) - local s = { } - for k, v in pairs(t) do - s[v] = k - end - return s -end - ---~ function table.are_equal(a,b) ---~ return table.serialize(a) == table.serialize(b) ---~ end - -function table.clone(t,p) -- t is optional or nil or table - if not p then - t, p = { }, t or { } - elseif not t then - t = { } - end - setmetatable(t, { __index = function(_,key) return p[key] end }) - return t -end - - -function table.hexed(t,seperator) - local tt = { } - for i=1,#t do tt[i] = string.format("0x%04X",t[i]) end - return table.concat(tt,seperator or " ") -end - -function table.reverse_hash(h) - local r = { } - for k,v in pairs(h) do - r[v] = (k:gsub(" ","")):lower() - end - return r -end - -function table.reverse(t) - local tt = { } - if #t > 0 then - for i=#t,1,-1 do - tt[#tt+1] = t[i] - end - end - return tt -end - - --- filename : l-io.lua --- comment : split off from luat-lib --- author : Hans Hagen, PRAGMA-ADE, Hasselt NL --- copyright: PRAGMA ADE / ConTeXt Development Team --- license : see context related readme files - -if not versions then versions = { } end versions['l-io'] = 1.001 - -if string.find(os.getenv("PATH"),";") then - io.fileseparator, io.pathseparator = "\\", ";" -else - io.fileseparator, io.pathseparator = "/" , ":" -end - -function io.loaddata(filename) - local f = io.open(filename,'rb') - if f then - local data = f:read('*all') - f:close() - return data - else - return nil - end -end - -function io.savedata(filename,data,joiner) - local f = io.open(filename, "wb") - if f then - if type(data) == "table" then - f:write(table.join(data,joiner or "")) - elseif type(data) == "function" then - data(f) - else - f:write(data) - end - f:close() - end -end - -function io.exists(filename) - local f = io.open(filename) - if f == nil then - return false - else - assert(f:close()) - return true - end -end - -function io.size(filename) - local f = io.open(filename) - if f == nil then - return 0 - else - local s = f:seek("end") - assert(f:close()) - return s - end -end - -function io.noflines(f) - local n = 0 - for _ in f:lines() do - n = n + 1 - end - f:seek('set',0) - return n -end - -do - - local sb = string.byte - - local nextchar = { - [ 4] = function(f) - return f:read(1,1,1,1) - end, - [ 2] = function(f) - return f:read(1,1) - end, - [ 1] = function(f) - return f:read(1) - end, - [-2] = function(f) - local a, b = f:read(1,1) - return b, a - end, - [-4] = function(f) - local a, b, c, d = f:read(1,1,1,1) - return d, c, b, a - end - } - - function io.characters(f,n) - if f then - return nextchar[n or 1], f - else - return nil, nil - end - end - -end - -do - - local sb = string.byte - ---~ local nextbyte = { ---~ [4] = function(f) ---~ local a = f:read(1) ---~ local b = f:read(1) ---~ local c = f:read(1) ---~ local d = f:read(1) ---~ if d then ---~ return sb(a), sb(b), sb(c), sb(d) ---~ else ---~ return nil, nil, nil, nil ---~ end ---~ end, ---~ [2] = function(f) ---~ local a = f:read(1) ---~ local b = f:read(1) ---~ if b then ---~ return sb(a), sb(b) ---~ else ---~ return nil, nil ---~ end ---~ end, ---~ [1] = function (f) ---~ local a = f:read(1) ---~ if a then ---~ return sb(a) ---~ else ---~ return nil ---~ end ---~ end, ---~ [-2] = function (f) ---~ local a = f:read(1) ---~ local b = f:read(1) ---~ if b then ---~ return sb(b), sb(a) ---~ else ---~ return nil, nil ---~ end ---~ end, ---~ [-4] = function(f) ---~ local a = f:read(1) ---~ local b = f:read(1) ---~ local c = f:read(1) ---~ local d = f:read(1) ---~ if d then ---~ return sb(d), sb(c), sb(b), sb(a) ---~ else ---~ return nil, nil, nil, nil ---~ end ---~ end ---~ } - - local nextbyte = { - [4] = function(f) - local a, b, c, d = f:read(1,1,1,1) - if d then - return sb(a), sb(b), sb(c), sb(d) - else - return nil, nil, nil, nil - end - end, - [2] = function(f) - local a, b = f:read(1,1) - if b then - return sb(a), sb(b) - else - return nil, nil - end - end, - [1] = function (f) - local a = f:read(1) - if a then - return sb(a) - else - return nil - end - end, - [-2] = function (f) - local a, b = f:read(1,1) - if b then - return sb(b), sb(a) - else - return nil, nil - end - end, - [-4] = function(f) - local a, b, c, d = f:read(1,1,1,1) - if d then - return sb(d), sb(c), sb(b), sb(a) - else - return nil, nil, nil, nil - end - end - } - - function io.bytes(f,n) - if f then - return nextbyte[n or 1], f - else - return nil, nil - end - end - -end - -function io.ask(question,default,options) - while true do - io.write(question) - if options then - io.write(string.format(" [%s]",table.concat(options,"|"))) - end - if default then - io.write(string.format(" [%s]",default)) - end - io.write(string.format(" ")) - local answer = io.read() - answer = answer:gsub("^%s*(.*)%s*$","%1") - if answer == "" and default then - return default - elseif not options then - return answer - else - for _,v in pairs(options) do - if v == answer then - return answer - end - end - local pattern = "^" .. answer - for _,v in pairs(options) do - if v:find(pattern) then - return v - end - end - end - end -end - - --- filename : l-number.lua --- comment : split off from luat-lib --- author : Hans Hagen, PRAGMA-ADE, Hasselt NL --- copyright: PRAGMA ADE / ConTeXt Development Team --- license : see context related readme files - -if not versions then versions = { } end versions['l-number'] = 1.001 - -if not number then number = { } end - --- a,b,c,d,e,f = number.toset(100101) - -function number.toset(n) - return (tostring(n)):match("(.?)(.?)(.?)(.?)(.?)(.?)(.?)(.?)") -end - -local format = string.format - -function number.toevenhex(n) - local s = format("%X",n) - if #s % 2 == 0 then - return s - else - return "0" .. s - end -end - --- the lpeg way is slower on 8 digits, but faster on 4 digits, some 7.5% --- on --- --- for i=1,1000000 do --- local a,b,c,d,e,f,g,h = number.toset(12345678) --- local a,b,c,d = number.toset(1234) --- local a,b,c = number.toset(123) --- end --- --- of course dedicated "(.)(.)(.)(.)" matches are even faster - -do - local one = lpeg.C(1-lpeg.S(''))^1 - - function number.toset(n) - return one:match(tostring(n)) - end -end - - - --- filename : l-set.lua --- author : Hans Hagen, PRAGMA-ADE, Hasselt NL --- copyright: PRAGMA ADE / ConTeXt Development Team --- license : see context related readme files - -if not versions then versions = { } end versions['l-set'] = 1.001 - -if not set then set = { } end - -do - - local nums = { } - local tabs = { } - local concat = table.concat - - set.create = table.tohash - - function set.tonumber(t) - if next(t) then - local s = "" - -- we could save mem by sorting, but it slows down - for k, v in pairs(t) do - if v then - -- why bother about the leading space - s = s .. " " .. k - end - end - if not nums[s] then - tabs[#tabs+1] = t - nums[s] = #tabs - end - return nums[s] - else - return 0 - end - end - - function set.totable(n) - if n == 0 then - return { } - else - return tabs[n] or { } - end - end - - function set.contains(n,s) - if type(n) == "table" then - return n[s] - elseif n == 0 then - return false - else - local t = tabs[n] - return t and t[s] - end - end - -end - ---~ local c = set.create{'aap','noot','mies'} ---~ local s = set.tonumber(c) ---~ local t = set.totable(s) ---~ print(t['aap']) ---~ local c = set.create{'zus','wim','jet'} ---~ local s = set.tonumber(c) ---~ local t = set.totable(s) ---~ print(t['aap']) ---~ print(t['jet']) ---~ print(set.contains(t,'jet')) ---~ print(set.contains(t,'aap')) - - - --- filename : l-os.lua --- comment : split off from luat-lib --- author : Hans Hagen, PRAGMA-ADE, Hasselt NL --- copyright: PRAGMA ADE / ConTeXt Development Team --- license : see context related readme files - -if not versions then versions = { } end versions['l-os'] = 1.001 - -function os.resultof(command) - return io.popen(command,"r"):read("*all") -end - -if not os.exec then os.exec = os.execute end -if not os.spawn then os.spawn = os.execute end - ---~ os.type : windows | unix (new, we already guessed os.platform) ---~ os.name : windows | msdos | linux | macosx | solaris | .. | generic (new) - -if not io.fileseparator then - if string.find(os.getenv("PATH"),";") then - io.fileseparator, io.pathseparator, os.platform = "\\", ";", os.type or "windows" - else - io.fileseparator, io.pathseparator, os.platform = "/" , ":", os.type or "unix" - end -end - -os.platform = os.platform or os.type or (io.pathseparator == ";" and "windows") or "unix" - -function os.launch(str) - if os.platform == "windows" then - os.execute("start " .. str) -- os.spawn ? - else - os.execute(str .. " &") -- os.spawn ? - end -end - -if not os.setenv then - function os.setenv() return false end -end - -if not os.times then - -- utime = user time - -- stime = system time - -- cutime = children user time - -- cstime = children system time - function os.times() - return { - utime = os.gettimeofday(), -- user - stime = 0, -- system - cutime = 0, -- children user - cstime = 0, -- children system - } - end -end - -os.gettimeofday = os.gettimeofday or os.clock - -do - local startuptime = os.gettimeofday() - function os.runtime() - return os.gettimeofday() - startuptime - end -end - ---~ print(os.gettimeofday()-os.time()) ---~ os.sleep(1.234) ---~ print (">>",os.runtime()) ---~ print(os.date("%H:%M:%S",os.gettimeofday())) ---~ print(os.date("%H:%M:%S",os.time())) - - --- filename : l-md5.lua --- author : Hans Hagen, PRAGMA-ADE, Hasselt NL --- copyright: PRAGMA ADE / ConTeXt Development Team --- license : see context related readme files - -if not versions then versions = { } end versions['l-md5'] = 1.001 - -if md5 then do - - local function convert(str,fmt) - return (string.gsub(md5.sum(str),".",function(chr) return string.format(fmt,string.byte(chr)) end)) - end - - if not md5.HEX then function md5.HEX(str) return convert(str,"%02X") end end - if not md5.hex then function md5.hex(str) return convert(str,"%02x") end end - if not md5.dec then function md5.dec(str) return convert(str,"%03i") end end - -end end - - --- filename : l-file.lua --- comment : split off from luat-lib --- author : Hans Hagen, PRAGMA-ADE, Hasselt NL --- copyright: PRAGMA ADE / ConTeXt Development Team --- license : see context related readme files - -if not versions then versions = { } end versions['l-file'] = 1.001 - -if not file then file = { } end - -function file.removesuffix(filename) - return filename:gsub("%.[%a%d]+$", "") -end - -function file.addsuffix(filename, suffix) - if not filename:find("%.[%a%d]+$") then - return filename .. "." .. suffix - else - return filename - end -end - -function file.replacesuffix(filename, suffix) - if not filename:find("%.[%a%d]+$") then - return filename .. "." .. suffix - else - return (filename:gsub("%.[%a%d]+$","."..suffix)) - end -end - -function file.dirname(name) - return name:match("^(.+)[/\\].-$") or "" -end - -function file.basename(name) - return name:match("^.+[/\\](.-)$") or name -end - -function file.nameonly(name) - return ((name:match("^.+[/\\](.-)$") or name):gsub("%..*$","")) -end - -function file.extname(name) - return name:match("^.+%.([^/\\]-)$") or "" -end - -file.suffix = file.extname - -function file.stripsuffix(name) - return (name:gsub("%.[%a%d]+$","")) -end - ---~ function file.join(...) ---~ local t = { ... } ---~ for i=1,#t do ---~ t[i] = (t[i]:gsub("\\","/")):gsub("/+$","") ---~ end ---~ return table.concat(t,"/") ---~ end - ---~ print(file.join("x/","/y")) ---~ print(file.join("http://","/y")) ---~ print(file.join("http://a","/y")) ---~ print(file.join("http:///a","/y")) ---~ print(file.join("//nas-1","/y")) - -function file.join(...) - local pth = table.concat({...},"/") - pth = pth:gsub("\\","/") - local a, b = pth:match("^(.*://)(.*)$") - if a and b then - return a .. b:gsub("//+","/") - end - a, b = pth:match("^(//)(.*)$") - if a and b then - return a .. b:gsub("//+","/") - end - return (pth:gsub("//+","/")) -end - -function file.is_writable(name) - local f = io.open(name, 'w') - if f then - f:close() - return true - else - return false - end -end - -function file.is_readable(name) - local f = io.open(name,'r') - if f then - f:close() - return true - else - return false - end -end - ---~ function file.split_path(str) ---~ if str:find(';') then ---~ return str:splitchr(";") ---~ else ---~ return str:splitchr(io.pathseparator) ---~ end ---~ end - --- todo: lpeg - -function file.split_path(str) - local t = { } - str = str:gsub("\\", "/") - str = str:gsub("(%a):([;/])", "%1\001%2") - for name in str:gmatch("([^;:]+)") do - if name ~= "" then - name = name:gsub("\001",":") - t[#t+1] = name - end - end - return t -end - -function file.join_path(tab) - return table.concat(tab,io.pathseparator) -- can have trailing // -end - ---~ print('test' .. " == " .. file.collapse_path("test")) ---~ print("test/test" .. " == " .. file.collapse_path("test/test")) ---~ print("test/test/test" .. " == " .. file.collapse_path("test/test/test")) ---~ print("test/test" .. " == " .. file.collapse_path("test/../test/test")) ---~ print("test" .. " == " .. file.collapse_path("test/../test")) ---~ print("../test" .. " == " .. file.collapse_path("../test")) ---~ print("../test/" .. " == " .. file.collapse_path("../test/")) ---~ print("a/a" .. " == " .. file.collapse_path("a/b/c/../../a")) - ---~ function file.collapse_path(str) ---~ local ok, n = false, 0 ---~ while not ok do ---~ ok = true ---~ str, n = str:gsub("[^%./]+/%.%./", function(s) ---~ ok = false ---~ return "" ---~ end) ---~ end ---~ return (str:gsub("/%./","/")) ---~ end - -function file.collapse_path(str) - local n = 1 - while n > 0 do - str, n = str:gsub("([^/%.]+/%.%./)","") - end - return (str:gsub("/%./","/")) -end - -function file.robustname(str) - return (str:gsub("[^%a%d%/%-%.\\]+","-")) -end - -file.readdata = io.loaddata -file.savedata = io.savedata - -function file.copy(oldname,newname) - file.savedata(newname,io.loaddata(oldname)) -end - - --- filename : l-url.lua --- author : Hans Hagen, PRAGMA-ADE, Hasselt NL --- copyright: PRAGMA ADE / ConTeXt Development Team --- license : see context related readme files - -if not versions then versions = { } end versions['l-url'] = 1.001 -if not url then url = { } end - --- from the spec (on the web): --- --- foo://example.com:8042/over/there?name=ferret#nose --- \_/ \______________/\_________/ \_________/ \__/ --- | | | | | --- scheme authority path query fragment --- | _____________________|__ --- / \ / \ --- urn:example:animal:ferret:nose - -do - - local function tochar(s) - return string.char(tonumber(s,16)) - end - - local colon, qmark, hash, slash, percent, endofstring = lpeg.P(":"), lpeg.P("?"), lpeg.P("#"), lpeg.P("/"), lpeg.P("%"), lpeg.P(-1) - - local hexdigit = lpeg.R("09","AF","af") - local escaped = percent * lpeg.C(hexdigit * hexdigit) / tochar - - local scheme = lpeg.Cs((escaped+(1-colon-slash-qmark-hash))^0) * colon + lpeg.Cc("") - local authority = slash * slash * lpeg.Cs((escaped+(1- slash-qmark-hash))^0) + lpeg.Cc("") - local path = slash * lpeg.Cs((escaped+(1- qmark-hash))^0) + lpeg.Cc("") - local query = qmark * lpeg.Cs((escaped+(1- hash))^0) + lpeg.Cc("") - local fragment = hash * lpeg.Cs((escaped+(1- endofstring))^0) + lpeg.Cc("") - - local parser = lpeg.Ct(scheme * authority * path * query * fragment) - - function url.split(str) - return (type(str) == "string" and parser:match(str)) or str - end - -end - -function url.hashed(str) - local s = url.split(str) - return { - scheme = (s[1] ~= "" and s[1]) or "file", - authority = s[2], - path = s[3], - query = s[4], - fragment = s[5], - original = str - } -end - -function url.filename(filename) - local t = url.hashed(filename) - return (t.scheme == "file" and t.path:gsub("^/([a-zA-Z])([:|])/)","%1:")) or filename -end - -function url.query(str) - if type(str) == "string" then - local t = { } - for k, v in str:gmatch("([^&=]*)=([^&=]*)") do - t[k] = v - end - return t - else - return str - end -end - ---~ print(url.filename("file:///c:/oeps.txt")) ---~ print(url.filename("c:/oeps.txt")) ---~ print(url.filename("file:///oeps.txt")) ---~ print(url.filename("file:///etc/test.txt")) ---~ print(url.filename("/oeps.txt")) - --- from the spec on the web (sort of): ---~ ---~ function test(str) ---~ print(table.serialize(url.hashed(str))) ---~ end ----~ ---~ test("%56pass%20words") ---~ test("file:///c:/oeps.txt") ---~ test("file:///c|/oeps.txt") ---~ test("file:///etc/oeps.txt") ---~ test("file://./etc/oeps.txt") ---~ test("file:////etc/oeps.txt") ---~ test("ftp://ftp.is.co.za/rfc/rfc1808.txt") ---~ test("http://www.ietf.org/rfc/rfc2396.txt") ---~ test("ldap://[2001:db8::7]/c=GB?objectClass?one#what") ---~ test("mailto:John.Doe@example.com") ---~ test("news:comp.infosystems.www.servers.unix") ---~ test("tel:+1-816-555-1212") ---~ test("telnet://192.0.2.16:80/") ---~ test("urn:oasis:names:specification:docbook:dtd:xml:4.1.2") ---~ test("/etc/passwords") ---~ test("http://www.pragma-ade.com/spaced%20name") - ---~ test("zip:///oeps/oeps.zip#bla/bla.tex") ---~ test("zip:///oeps/oeps.zip?bla/bla.tex") - - --- filename : l-dir.lua --- comment : split off from luat-lib --- author : Hans Hagen, PRAGMA-ADE, Hasselt NL --- copyright: PRAGMA ADE / ConTeXt Development Team --- license : see context related readme files - -if not versions then versions = { } end versions['l-dir'] = 1.001 - -dir = { } - --- optimizing for no string.find (*) does not save time - -if lfs then do - ---~ local attributes = lfs.attributes ---~ local walkdir = lfs.dir ---~ ---~ local function glob_pattern(path,patt,recurse,action) ---~ local ok, scanner = xpcall(function() return walkdir(path) end, function() end) -- kepler safe ---~ if ok and type(scanner) == "function" then ---~ if not path:find("/$") then path = path .. '/' end ---~ for name in scanner do ---~ local full = path .. name ---~ local mode = attributes(full,'mode') ---~ if mode == 'file' then ---~ if name:find(patt) then ---~ action(full) ---~ end ---~ elseif recurse and (mode == "directory") and (name ~= '.') and (name ~= "..") then ---~ glob_pattern(full,patt,recurse,action) ---~ end ---~ end ---~ end ---~ end ---~ ---~ dir.glob_pattern = glob_pattern ---~ ---~ local function glob(pattern, action) ---~ local t = { } ---~ local action = action or function(name) t[#t+1] = name end ---~ local path, patt = pattern:match("^(.*)/*%*%*/*(.-)$") ---~ local recurse = path and patt ---~ if not recurse then ---~ path, patt = pattern:match("^(.*)/(.-)$") ---~ if not (path and patt) then ---~ path, patt = '.', pattern ---~ end ---~ end ---~ patt = patt:gsub("([%.%-%+])", "%%%1") ---~ patt = patt:gsub("%*", ".*") ---~ patt = patt:gsub("%?", ".") ---~ patt = "^" .. patt .. "$" ---~ -- print('path: ' .. path .. ' | pattern: ' .. patt .. ' | recurse: ' .. tostring(recurse)) ---~ glob_pattern(path,patt,recurse,action) ---~ return t ---~ end ---~ ---~ dir.glob = glob - - local attributes = lfs.attributes - local walkdir = lfs.dir - - local function glob_pattern(path,patt,recurse,action) - local ok, scanner - if path == "/" then - ok, scanner = xpcall(function() return walkdir(path..".") end, function() end) -- kepler safe - else - ok, scanner = xpcall(function() return walkdir(path) end, function() end) -- kepler safe - end - if ok and type(scanner) == "function" then - if not path:find("/$") then path = path .. '/' end - for name in scanner do - local full = path .. name - local mode = attributes(full,'mode') - if mode == 'file' then - if full:find(patt) then - action(full) - end - elseif recurse and (mode == "directory") and (name ~= '.') and (name ~= "..") then - glob_pattern(full,patt,recurse,action) - end - end - end - end - - dir.glob_pattern = glob_pattern - - --~ local function glob(pattern, action) - --~ local t = { } - --~ local path, rest, patt, recurse - --~ local action = action or function(name) t[#t+1] = name end - --~ local pattern = pattern:gsub("^%*%*","./**") - --~ local pattern = pattern:gsub("/%*/","/**/") - --~ path, rest = pattern:match("^(/)(.-)$") - --~ if path then - --~ path = path - --~ else - --~ path, rest = pattern:match("^([^/]*)/(.-)$") - --~ end - --~ if rest then - --~ patt = rest:gsub("([%.%-%+])", "%%%1") - --~ end - --~ patt = patt:gsub("%*", "[^/]*") - --~ patt = patt:gsub("%?", "[^/]") - --~ patt = patt:gsub("%[%^/%]%*%[%^/%]%*", ".*") - --~ if path == "" then path = "." end - --~ recurse = patt:find("%.%*/") ~= nil - --~ glob_pattern(path,patt,recurse,action) - --~ return t - --~ end - - local P, S, R, C, Cc, Cs, Ct, Cv, V = lpeg.P, lpeg.S, lpeg.R, lpeg.C, lpeg.Cc, lpeg.Cs, lpeg.Ct, lpeg.Cv, lpeg.V - - local pattern = Ct { - [1] = (C(P(".") + P("/")^1) + C(R("az","AZ") * P(":") * P("/")^0) + Cc("./")) * V(2) * V(3), - [2] = C(((1-S("*?/"))^0 * P("/"))^0), - [3] = C(P(1)^0) - } - - local filter = Cs ( ( - P("**") / ".*" + - P("*") / "[^/]*" + - P("?") / "[^/]" + - P(".") / "%%." + - P("+") / "%%+" + - P("-") / "%%-" + - P(1) - )^0 ) - - local function glob(str,t) - if type(str) == "table" then - local t = t or { } - for _, s in ipairs(str) do - glob(s,t) - end - return t - else - local split = pattern:match(str) - if split then - local t = t or { } - local action = action or function(name) t[#t+1] = name end - local root, path, base = split[1], split[2], split[3] - local recurse = base:find("**") - local start = root .. path - local result = filter:match(start .. base) - glob_pattern(start,result,recurse,action) - return t - else - return { } - end - end - end - - dir.glob = glob - - --~ list = dir.glob("**/*.tif") - --~ list = dir.glob("/**/*.tif") - --~ list = dir.glob("./**/*.tif") - --~ list = dir.glob("oeps/**/*.tif") - --~ list = dir.glob("/oeps/**/*.tif") - - local function globfiles(path,recurse,func,files) -- func == pattern or function - if type(func) == "string" then - local s = func -- alas, we need this indirect way - func = function(name) return name:find(s) end - end - files = files or { } - for name in walkdir(path) do - if name:find("^%.") then - --- skip - elseif attributes(name,'mode') == "directory" then - if recurse then - globfiles(path .. "/" .. name,recurse,func,files) - end - elseif func then - if func(name) then - files[#files+1] = path .. "/" .. name - end - else - files[#files+1] = path .. "/" .. name - end - end - return files - end - - dir.globfiles = globfiles - - -- t = dir.glob("c:/data/develop/context/sources/**/????-*.tex") - -- t = dir.glob("c:/data/develop/tex/texmf/**/*.tex") - -- t = dir.glob("c:/data/develop/context/texmf/**/*.tex") - -- t = dir.glob("f:/minimal/tex/**/*") - -- print(dir.ls("f:/minimal/tex/**/*")) - -- print(dir.ls("*.tex")) - - function dir.ls(pattern) - return table.concat(glob(pattern),"\n") - end - - --~ mkdirs("temp") - --~ mkdirs("a/b/c") - --~ mkdirs(".","/a/b/c") - --~ mkdirs("a","b","c") - - local make_indeed = true -- false - - if string.find(os.getenv("PATH"),";") then - - function dir.mkdirs(...) - local str, pth = "", "" - for _, s in ipairs({...}) do - if s ~= "" then - if str ~= "" then - str = str .. "/" .. s - else - str = s - end - end - end - local first, middle, last - local drive = false - first, middle, last = str:match("^(//)(//*)(.*)$") - if first then - -- empty network path == local path - else - first, last = str:match("^(//)/*(.-)$") - if first then - middle, last = str:match("([^/]+)/+(.-)$") - if middle then - pth = "//" .. middle - else - pth = "//" .. last - last = "" - end - else - first, middle, last = str:match("^([a-zA-Z]:)(/*)(.-)$") - if first then - pth, drive = first .. middle, true - else - middle, last = str:match("^(/*)(.-)$") - if not middle then - last = str - end - end - end - end - for s in last:gmatch("[^/]+") do - if pth == "" then - pth = s - elseif drive then - pth, drive = pth .. s, false - else - pth = pth .. "/" .. s - end - if make_indeed and not lfs.isdir(pth) then - lfs.mkdir(pth) - end - end - return pth, (lfs.isdir(pth) == true) - end - ---~ print(dir.mkdirs("","","a","c")) ---~ print(dir.mkdirs("a")) ---~ print(dir.mkdirs("a:")) ---~ print(dir.mkdirs("a:/b/c")) ---~ print(dir.mkdirs("a:b/c")) ---~ print(dir.mkdirs("a:/bbb/c")) ---~ print(dir.mkdirs("/a/b/c")) ---~ print(dir.mkdirs("/aaa/b/c")) ---~ print(dir.mkdirs("//a/b/c")) ---~ print(dir.mkdirs("///a/b/c")) ---~ print(dir.mkdirs("a/bbb//ccc/")) - - function dir.expand_name(str) - local first, nothing, last = str:match("^(//)(//*)(.*)$") - if first then - first = lfs.currentdir() .. "/" - first = first:gsub("\\","/") - end - if not first then - first, last = str:match("^(//)/*(.*)$") - end - if not first then - first, last = str:match("^([a-zA-Z]:)(.*)$") - if first and not last:find("^/") then - local d = lfs.currentdir() - if lfs.chdir(first) then - first = lfs.currentdir() - first = first:gsub("\\","/") - end - lfs.chdir(d) - end - end - if not first then - first, last = lfs.currentdir(), str - first = first:gsub("\\","/") - end - last = last:gsub("//","/") - last = last:gsub("/%./","/") - last = last:gsub("^/*","") - first = first:gsub("/*$","") - if last == "" then - return first - else - return first .. "/" .. last - end - end - - else - - function dir.mkdirs(...) - local str, pth = "", "" - for _, s in ipairs({...}) do - if s ~= "" then - if str ~= "" then - str = str .. "/" .. s - else - str = s - end - end - end - str = str:gsub("/+","/") - if str:find("^/") then - pth = "/" - for s in str:gmatch("[^/]+") do - local first = (pth == "/") - if first then - pth = pth .. s - else - pth = pth .. "/" .. s - end - if make_indeed and not first and not lfs.isdir(pth) then - lfs.mkdir(pth) - end - end - else - pth = "." - for s in str:gmatch("[^/]+") do - pth = pth .. "/" .. s - if make_indeed and not lfs.isdir(pth) then - lfs.mkdir(pth) - end - end - end - return pth, (lfs.isdir(pth) == true) - end - ---~ print(dir.mkdirs("","","a","c")) ---~ print(dir.mkdirs("a")) ---~ print(dir.mkdirs("/a/b/c")) ---~ print(dir.mkdirs("/aaa/b/c")) ---~ print(dir.mkdirs("//a/b/c")) ---~ print(dir.mkdirs("///a/b/c")) ---~ print(dir.mkdirs("a/bbb//ccc/")) - - function dir.expand_name(str) - if not str:find("^/") then - str = lfs.currentdir() .. "/" .. str - end - str = str:gsub("//","/") - str = str:gsub("/%./","/") - return str - end - - end - - dir.makedirs = dir.mkdirs - -end end - - --- filename : l-boolean.lua --- comment : split off from luat-lib --- author : Hans Hagen, PRAGMA-ADE, Hasselt NL --- copyright: PRAGMA ADE / ConTeXt Development Team --- license : see context related readme files - -if not versions then versions = { } end versions['l-boolean'] = 1.001 -if not boolean then boolean = { } end - -function boolean.tonumber(b) - if b then return 1 else return 0 end -end - -function toboolean(str,tolerant) - if tolerant then - local tstr = type(str) - if tstr == "string" then - return str == "true" or str == "yes" or str == "on" or str == "1" - elseif tstr == "number" then - return tonumber(str) ~= 0 - elseif tstr == "nil" then - return false - else - return str - end - elseif str == "true" then - return true - elseif str == "false" then - return false - else - return str - end -end - -function string.is_boolean(str) - if type(str) == "string" then - if str == "true" or str == "yes" or str == "on" then - return true - elseif str == "false" or str == "no" or str == "off" then - return false - end - end - return nil -end - -function boolean.alwaystrue() - return true -end - -function boolean.falsetrue() - return false -end - - --- filename : l-unicode.lua --- comment : split off from luat-inp --- author : Hans Hagen, PRAGMA-ADE, Hasselt NL --- copyright: PRAGMA ADE / ConTeXt Development Team --- license : see context related readme files - -if not versions then versions = { } end versions['l-unicode'] = 1.001 -if not unicode then unicode = { } end - -if not garbagecollector then - garbagecollector = { - push = function() collectgarbage("stop") end, - pop = function() collectgarbage("restart") end, - } -end - --- 0 EF BB BF UTF-8 --- 1 FF FE UTF-16-little-endian --- 2 FE FF UTF-16-big-endian --- 3 FF FE 00 00 UTF-32-little-endian --- 4 00 00 FE FF UTF-32-big-endian - -unicode.utfname = { - [0] = 'utf-8', - [1] = 'utf-16-le', - [2] = 'utf-16-be', - [3] = 'utf-32-le', - [4] = 'utf-32-be' -} - -function unicode.utftype(f) -- \000 fails ! - local str = f:read(4) - if not str then - f:seek('set') - return 0 - elseif str:find("^%z%z\254\255") then - return 4 - elseif str:find("^\255\254%z%z") then - return 3 - elseif str:find("^\254\255") then - f:seek('set',2) - return 2 - elseif str:find("^\255\254") then - f:seek('set',2) - return 1 - elseif str:find("^\239\187\191") then - f:seek('set',3) - return 0 - else - f:seek('set') - return 0 - end -end - -function unicode.utf16_to_utf8(str, endian) - garbagecollector.push() - local result = { } - local tc, uc = table.concat, unicode.utf8.char - local tmp, n, m, p = { }, 0, 0, 0 - -- lf | cr | crlf / (cr:13, lf:10) - local function doit() - if n == 10 then - if p ~= 13 then - result[#result+1] = tc(tmp,"") - tmp = { } - p = 0 - end - elseif n == 13 then - result[#result+1] = tc(tmp,"") - tmp = { } - p = n - else - tmp[#tmp+1] = uc(n) - p = 0 - end - end - for l,r in str:bytepairs() do - if endian then - n = l*256 + r - else - n = r*256 + l - end - if m > 0 then - n = (m-0xD800)*0x400 + (n-0xDC00) + 0x10000 - m = 0 - doit() - elseif n >= 0xD800 and n <= 0xDBFF then - m = n - else - doit() - end - end - if #tmp > 0 then - result[#result+1] = tc(tmp,"") - end - garbagecollector.pop() - return result -end - -function unicode.utf32_to_utf8(str, endian) - garbagecollector.push() - local result = { } - local tc, uc = table.concat, unicode.utf8.char - local tmp, n, m, p = { }, 0, -1, 0 - -- lf | cr | crlf / (cr:13, lf:10) - local function doit() - if n == 10 then - if p ~= 13 then - result[#result+1] = tc(tmp,"") - tmp = { } - p = 0 - end - elseif n == 13 then - result[#result+1] = tc(tmp,"") - tmp = { } - p = n - else - tmp[#tmp+1] = uc(n) - p = 0 - end - end - for a,b in str:bytepairs() do - if a and b then - if m < 0 then - if endian then - m = a*256*256*256 + b*256*256 - else - m = b*256 + a - end - else - if endian then - n = m + a*256 + b - else - n = m + b*256*256*256 + a*256*256 - end - m = -1 - doit() - end - else - break - end - end - if #tmp > 0 then - result[#result+1] = tc(tmp,"") - end - garbagecollector.pop() - return result -end - - --- filename : l-utils.lua --- comment : split off from luat-lib --- author : Hans Hagen, PRAGMA-ADE, Hasselt NL --- copyright: PRAGMA ADE / ConTeXt Development Team --- license : see context related readme files - -if not versions then versions = { } end versions['l-utils'] = 1.001 - -if not utils then utils = { } end -if not utils.merger then utils.merger = { } end -if not utils.lua then utils.lua = { } end - -utils.merger.m_begin = "begin library merge" -utils.merger.m_end = "end library merge" -utils.merger.pattern = - "%c+" .. - "%-%-%s+" .. utils.merger.m_begin .. - "%c+(.-)%c+" .. - "%-%-%s+" .. utils.merger.m_end .. - "%c+" - -function utils.merger._self_fake_() - return - "-- " .. "created merged file" .. "\n\n" .. - "-- " .. utils.merger.m_begin .. "\n\n" .. - "-- " .. utils.merger.m_end .. "\n\n" -end - -function utils.report(...) - print(...) -end - -function utils.merger._self_load_(name) - local f, data = io.open(name), "" - if f then - data = f:read("*all") - f:close() - end - return data or "" -end - -function utils.merger._self_save_(name, data) - if data ~= "" then - local f = io.open(name,'w') - if f then - f:write(data) - f:close() - end - end -end - -function utils.merger._self_swap_(data,code) - if data ~= "" then - return (data:gsub(utils.merger.pattern, function(s) - return "\n\n" .. "-- "..utils.merger.m_begin .. "\n" .. code .. "\n" .. "-- "..utils.merger.m_end .. "\n\n" - end, 1)) - else - return "" - end -end - -function utils.merger._self_libs_(libs,list) - local result, f = { }, nil - if type(libs) == 'string' then libs = { libs } end - if type(list) == 'string' then list = { list } end - for _, lib in ipairs(libs) do - for _, pth in ipairs(list) do - local name = string.gsub(pth .. "/" .. lib,"\\","/") - f = io.open(name) - if f then - -- utils.report("merging library",name) - result[#result+1] = f:read("*all") - f:close() - list = { pth } -- speed up the search - break - else - -- utils.report("no library",name) - end - end - end - return table.concat(result, "\n\n") -end - -function utils.merger.selfcreate(libs,list,target) - if target then - utils.merger._self_save_( - target, - utils.merger._self_swap_( - utils.merger._self_fake_(), - utils.merger._self_libs_(libs,list) - ) - ) - end -end - -function utils.merger.selfmerge(name,libs,list,target) - utils.merger._self_save_( - target or name, - utils.merger._self_swap_( - utils.merger._self_load_(name), - utils.merger._self_libs_(libs,list) - ) - ) -end - -function utils.merger.selfclean(name) - utils.merger._self_save_( - name, - utils.merger._self_swap_( - utils.merger._self_load_(name), - "" - ) - ) -end - -utils.lua.compile_strip = true - -function utils.lua.compile(luafile, lucfile) - -- utils.report("compiling",luafile,"into",lucfile) - os.remove(lucfile) - local command = "-o " .. string.quote(lucfile) .. " " .. string.quote(luafile) - if utils.lua.compile_strip then - command = "-s " .. command - end - if os.spawn("texluac " .. command) == 0 then - return true - elseif os.spawn("luac " .. command) == 0 then - return true - else - return false - end -end - - - --- filename : luat-lib.lua --- comment : companion to luat-lib.tex --- author : Hans Hagen, PRAGMA-ADE, Hasselt NL --- copyright: PRAGMA ADE / ConTeXt Development Team --- license : see context related readme files - -if not versions then versions = { } end versions['luat-lib'] = 1.001 - --- mostcode moved to the l-*.lua and other luat-*.lua files - --- os / io - -os.setlocale(nil,nil) -- useless feature and even dangerous in luatex - --- os.platform - --- mswin|bccwin|mingw|cygwin windows --- darwin|rhapsody|nextstep macosx --- netbsd|unix unix --- linux linux - -if not io.fileseparator then - if string.find(os.getenv("PATH"),";") then - io.fileseparator, io.pathseparator, os.platform = "\\", ";", os.type or "windows" - else - io.fileseparator, io.pathseparator, os.platform = "/" , ":", os.type or "unix" - end -end - -os.platform = os.platform or os.type or (io.pathseparator == ";" and "windows") or "unix" - --- arg normalization --- --- for k,v in pairs(arg) do print(k,v) end - --- environment - -if not environment then environment = { } end - -environment.ownbin = environment.ownbin or arg[-2] or arg[-1] or arg[0] or "luatex" - -local ownpath = nil -- we could use a metatable here - -function environment.ownpath() - if not ownpath then - for p in string.gmatch(os.getenv("PATH"),"[^"..io.pathseparator.."]+") do - local b = file.join(p,environment.ownbin) - if lfs.isfile(b..".exe") or lfs.isfile(b) then - ownpath = p - break - end - end - if not ownpath then ownpath = '.' end - end - return ownpath -end - -if arg and (arg[0] == 'luatex' or arg[0] == 'luatex.exe') and arg[1] == "--luaonly" then - arg[-1]=arg[0] arg[0]=arg[2] for k=3,#arg do arg[k-2]=arg[k] end arg[#arg]=nil arg[#arg]=nil -end - -environment.arguments = { } -environment.files = { } -environment.sorted_argument_keys = nil - -environment.platform = os.platform - -function environment.initialize_arguments(arg) - environment.arguments = { } - environment.files = { } - environment.sorted_argument_keys = nil - for index, argument in pairs(arg) do - if index > 0 then - local flag, value = argument:match("^%-+(.+)=(.-)$") - if flag then - environment.arguments[flag] = string.unquote(value or "") - else - flag = argument:match("^%-+(.+)") - if flag then - environment.arguments[flag] = true - else - environment.files[#environment.files+1] = argument - end - end - end - end - environment.ownname = environment.ownname or arg[0] or 'unknown.lua' -end - -function environment.showarguments() - for k,v in pairs(environment.arguments) do - print(k .. " : " .. tostring(v)) - end - if #environment.files > 0 then - print("files : " .. table.concat(environment.files, " ")) - end -end - -function environment.setargument(name,value) - environment.arguments[name] = value -end - -function environment.argument(name) - if environment.arguments[name] then - return environment.arguments[name] - else - if not environment.sorted_argument_keys then - environment.sorted_argument_keys = { } - for _,v in pairs(table.sortedkeys(environment.arguments)) do - table.insert(environment.sorted_argument_keys, "^" .. v) - end - end - for _,v in pairs(environment.sorted_argument_keys) do - if name:find(v) then - return environment.arguments[v:sub(2,#v)] - end - end - end - return nil -end - -function environment.split_arguments(separator) -- rather special, cut-off before separator - local done, before, after = false, { }, { } - for _,v in ipairs(environment.original_arguments) do - if not done and v == separator then - done = true - elseif done then - after[#after+1] = v - else - before[#before+1] = v - end - end - return before, after -end - -function environment.reconstruct_commandline(arg) - if not arg then arg = environment.original_arguments end - local result = { } - for _,a in ipairs(arg) do -- ipairs 1 .. #n - local kk, vv = a:match("^(%-+.-)=(.+)$") - if kk and vv then - if vv:find(" ") then - result[#result+1] = kk .. "=" .. string.quote(vv) - else - result[#result+1] = a - end - elseif a:find(" ") then - result[#result+1] = string.quote(a) - else - result[#result+1] = a - end - end - return table.join(result," ") -end - -if arg then - environment.initialize_arguments(arg) - environment.original_arguments = arg - arg = { } -- prevent duplicate handling -end - - --- filename : luat-inp.lua --- comment : companion to luat-lib.tex --- author : Hans Hagen, PRAGMA-ADE, Hasselt NL --- copyright: PRAGMA ADE / ConTeXt Development Team --- license : see context related readme files - --- This lib is multi-purpose and can be loaded again later on so that --- additional functionality becomes available. We will split this --- module in components when we're done with prototyping. - --- TODO: os.getenv -> os.env[] --- TODO: instances.[hashes,cnffiles,configurations,522] -> ipairs (alles check, sneller) --- TODO: check escaping in find etc, too much, too slow - --- This is the first code I wrote for LuaTeX, so it needs some cleanup. - --- To be considered: hash key lowercase, first entry in table filename --- (any case), rest paths (so no need for optimization). Or maybe a --- separate table that matches lowercase names to mixed case when --- present. In that case the lower() cases can go away. I will do that --- only when we run into problems with names ... well ... Iwona-Regular. - --- Beware, loading and saving is overloaded in luat-tmp! - -if not versions then versions = { } end versions['luat-inp'] = 1.001 -if not environment then environment = { } end -if not file then file = { } end - -if environment.aleph_mode == nil then environment.aleph_mode = true end -- temp hack - -if not input then input = { } end -if not input.suffixes then input.suffixes = { } end -if not input.formats then input.formats = { } end -if not input.aux then input.aux = { } end - -if not input.suffixmap then input.suffixmap = { } end - -if not input.locators then input.locators = { } end -- locate databases -if not input.hashers then input.hashers = { } end -- load databases -if not input.generators then input.generators = { } end -- generate databases -if not input.filters then input.filters = { } end -- conversion filters - -local format = string.format - -input.locators.notfound = { nil } -input.hashers.notfound = { nil } -input.generators.notfound = { nil } - -input.cacheversion = '1.0.1' -input.banner = nil -input.verbose = false -input.debug = false -input.cnfname = 'texmf.cnf' -input.luaname = 'texmfcnf.lua' -input.lsrname = 'ls-R' -input.luasuffix = '.tma' -input.lucsuffix = '.tmc' - --- we use a cleaned up list / format=any is a wildcard, as is *name - -input.formats['afm'] = 'AFMFONTS' input.suffixes['afm'] = { 'afm' } -input.formats['enc'] = 'ENCFONTS' input.suffixes['enc'] = { 'enc' } -input.formats['fmt'] = 'TEXFORMATS' input.suffixes['fmt'] = { 'fmt' } -input.formats['map'] = 'TEXFONTMAPS' input.suffixes['map'] = { 'map' } -input.formats['mp'] = 'MPINPUTS' input.suffixes['mp'] = { 'mp' } -input.formats['ocp'] = 'OCPINPUTS' input.suffixes['ocp'] = { 'ocp' } -input.formats['ofm'] = 'OFMFONTS' input.suffixes['ofm'] = { 'ofm', 'tfm' } -input.formats['otf'] = 'OPENTYPEFONTS' input.suffixes['otf'] = { 'otf' } -- 'ttf' -input.formats['opl'] = 'OPLFONTS' input.suffixes['opl'] = { 'opl' } -input.formats['otp'] = 'OTPINPUTS' input.suffixes['otp'] = { 'otp' } -input.formats['ovf'] = 'OVFFONTS' input.suffixes['ovf'] = { 'ovf', 'vf' } -input.formats['ovp'] = 'OVPFONTS' input.suffixes['ovp'] = { 'ovp' } -input.formats['tex'] = 'TEXINPUTS' input.suffixes['tex'] = { 'tex' } -input.formats['tfm'] = 'TFMFONTS' input.suffixes['tfm'] = { 'tfm' } -input.formats['ttf'] = 'TTFONTS' input.suffixes['ttf'] = { 'ttf', 'ttc' } -input.formats['pfb'] = 'T1FONTS' input.suffixes['pfb'] = { 'pfb', 'pfa' } -input.formats['vf'] = 'VFFONTS' input.suffixes['vf'] = { 'vf' } - -input.formats['fea'] = 'FONTFEATURES' input.suffixes['fea'] = { 'fea' } -input.formats['cid'] = 'FONTCIDMAPS' input.suffixes['cid'] = { 'cid', 'cidmap' } - -input.formats ['texmfscripts'] = 'TEXMFSCRIPTS' -- new -input.suffixes['texmfscripts'] = { 'rb', 'pl', 'py' } -- 'lua' - -input.formats ['lua'] = 'LUAINPUTS' -- new -input.suffixes['lua'] = { 'lua', 'luc', 'tma', 'tmc' } - --- here we catch a few new thingies (todo: add these paths to context.tmf) --- --- FONTFEATURES = .;$TEXMF/fonts/fea// --- FONTCIDMAPS = .;$TEXMF/fonts/cid// - -function input.checkconfigdata(instance) -- not yet ok, no time for debugging now - local function fix(varname,default) - local proname = varname .. "." .. instance.progname or "crap" - local p = instance.environment[proname] - local v = instance.environment[varname] - if not ((p and p ~= "") or (v and v ~= "")) then - instance.variables[varname] = default -- or environment? - end - end - fix("LUAINPUTS" , ".;$TEXINPUTS;$TEXMFSCRIPTS") - fix("FONTFEATURES", ".;$TEXMF/fonts/fea//;$OPENTYPEFONTS;$TTFONTS;$T1FONTS;$AFMFONTS") - fix("FONTCIDMAPS" , ".;$TEXMF/fonts/cid//;$OPENTYPEFONTS;$TTFONTS;$T1FONTS;$AFMFONTS") -end - --- backward compatible ones - -input.alternatives = { } - -input.alternatives['map files'] = 'map' -input.alternatives['enc files'] = 'enc' -input.alternatives['cid files'] = 'cid' -input.alternatives['fea files'] = 'fea' -input.alternatives['opentype fonts'] = 'otf' -input.alternatives['truetype fonts'] = 'ttf' -input.alternatives['truetype collections'] = 'ttc' -input.alternatives['type1 fonts'] = 'pfb' - --- obscure ones - -input.formats ['misc fonts'] = '' -input.suffixes['misc fonts'] = { } - -input.formats ['sfd'] = 'SFDFONTS' -input.suffixes ['sfd'] = { 'sfd' } -input.alternatives['subfont definition files'] = 'sfd' - -function input.reset() - - local instance = { } - - instance.rootpath = '' - instance.treepath = '' - instance.progname = environment.progname or 'context' - instance.engine = environment.engine or 'luatex' - instance.format = '' - instance.environment = { } - instance.variables = { } - instance.expansions = { } - instance.files = { } - instance.remap = { } - instance.configuration = { } - instance.setup = { } - instance.order = { } - instance.found = { } - instance.foundintrees = { } - instance.kpsevars = { } - instance.hashes = { } - instance.cnffiles = { } - instance.luafiles = { } - instance.lists = { } - instance.remember = true - instance.diskcache = true - instance.renewcache = false - instance.scandisk = true - instance.cachepath = nil - instance.loaderror = false - instance.smallcache = false - instance.savelists = true - instance.cleanuppaths = true - instance.allresults = false - instance.pattern = nil -- lists - instance.kpseonly = false -- lists - instance.cachefile = 'tmftools' - instance.loadtime = 0 - instance.starttime = 0 - instance.stoptime = 0 - instance.validfile = function(path,name) return true end - instance.data = { } -- only for loading - instance.force_suffixes = true - instance.dummy_path_expr = "^!*unset/*$" - instance.fakepaths = { } - instance.lsrmode = false - - if os.env then - -- store once, freeze and faster - for k,v in pairs(os.env) do - instance.environment[k] = input.bare_variable(v) - end - else - -- we will access os.env frequently - for k,v in pairs({'HOME','TEXMF','TEXMFCNF'}) do - local e = os.getenv(v) - if e then - -- input.report("setting",v,"to",input.bare_variable(e)) - instance.environment[v] = input.bare_variable(e) - end - end - end - - -- cross referencing - - for k, v in pairs(input.suffixes) do - for _, vv in pairs(v) do - if vv then - input.suffixmap[vv] = k - end - end - end - - return instance - -end - -function input.reset_hashes(instance) - instance.lists = { } - instance.found = { } -end - -function input.bare_variable(str) -- assumes str is a string - -- return string.gsub(string.gsub(string.gsub(str,"%s+$",""),'^"(.+)"$',"%1"),"^'(.+)'$","%1") - return (str:gsub("\s*([\"\']?)(.+)%1\s*", "%2")) -end - -if texio then - input.log = texio.write_nl -else - input.log = print -end - -function input.simple_logger(kind, name) - if name and name ~= "" then - if input.banner then - input.log(input.banner..kind..": "..name) - else - input.log("<<"..kind..": "..name..">>") - end - else - if input.banner then - input.log(input.banner..kind..": no name") - else - input.log("<<"..kind..": no name>>") - end - end -end - -function input.dummy_logger() -end - -function input.settrace(n) - input.trace = tonumber(n or 0) - if input.trace > 0 then - input.logger = input.simple_logger - input.verbose = true - else - input.logger = function() end - end -end - -function input.report(...) -- inefficient - if input.verbose then - if input.banner then - input.log(input.banner .. table.concat({...},' ')) - elseif input.logmode() == 'xml' then - input.log(""..table.concat({...},' ').."") - else - input.log("<<"..table.concat({...},' ')..">>") - end - end -end - -function input.reportlines(str) - if type(str) == "string" then - str = str:split("\n") - end - for _,v in pairs(str) do input.report(v) end -end - -input.settrace(tonumber(os.getenv("MTX.INPUT.TRACE") or os.getenv("MTX_INPUT_TRACE") or input.trace or 0)) - --- These functions can be used to test the performance, especially --- loading the database files. - -do - local clock = os.gettimeofday or os.clock - - function input.starttiming(instance) - if instance then - instance.starttime = clock() - if not instance.loadtime then - instance.loadtime = 0 - end - end - end - - function input.stoptiming(instance, report) - if instance then - local starttime = instance.starttime - if starttime then - local stoptime = clock() - local loadtime = stoptime - starttime - instance.stoptime = stoptime - instance.loadtime = instance.loadtime + loadtime - if report then - input.report('load time', format("%0.3f",loadtime)) - end - return loadtime - end - end - return 0 - end - -end - -function input.elapsedtime(instance) - return format("%0.3f",(instance and instance.loadtime) or 0) -end - -function input.report_loadtime(instance) - if instance then - input.report('total load time', input.elapsedtime(instance)) - end -end - -input.loadtime = input.elapsedtime - -function input.env(instance,key) - return instance.environment[key] or input.osenv(instance,key) -end - -function input.osenv(instance,key) - local ie = instance.environment - local value = ie[key] - if value == nil then - -- local e = os.getenv(key) - local e = os.env[key] - if e == nil then - -- value = "" -- false - else - value = input.bare_variable(e) - end - ie[key] = value - end - return value or "" -end - --- we follow a rather traditional approach: --- --- (1) texmf.cnf given in TEXMFCNF --- (2) texmf.cnf searched in TEXMF/web2c --- --- for the moment we don't expect a configuration file in a zip - -function input.identify_cnf(instance) - -- we no longer support treepath and rootpath (was handy for testing); - -- also we now follow the stupid route: if not set then just assume *one* - -- cnf file under texmf (i.e. distribution) - if #instance.cnffiles == 0 then - if input.env(instance,'TEXMFCNF') == "" then - local ownpath = environment.ownpath() or "." - if ownpath then - -- beware, this is tricky on my own system because at that location I do have - -- the raw tree that ends up in the zip; i.e. I cannot test this kind of mess - local function locate(filename,list) - local ownroot = input.normalize_name(file.join(ownpath,"../..")) - if not lfs.isdir(file.join(ownroot,"texmf")) then - ownroot = input.normalize_name(file.join(ownpath,"..")) - if not lfs.isdir(file.join(ownroot,"texmf")) then - input.verbose = true - input.report("error", "unable to identify cnf file") - return - end - end - local texmfcnf = file.join(ownroot,"texmf-local/web2c",filename) -- for minimals and myself - if not lfs.isfile(texmfcnf) then - texmfcnf = file.join(ownroot,"texmf/web2c",filename) - if not lfs.isfile(texmfcnf) then - input.verbose = true - input.report("error", "unable to locate",filename) - return - end - end - table.insert(list,texmfcnf) - local ie = instance.environment - if not ie['SELFAUTOPARENT'] then ie['SELFAUTOPARENT'] = ownroot end - if not ie['TEXMFCNF'] then ie['TEXMFCNF'] = file.dirname(texmfcnf) end - end - locate(input.luaname,instance.luafiles) - locate(input.cnfname,instance.cnffiles) - if #instance.luafiles == 0 and instance.cnffiles == 0 then - input.verbose = true - input.report("error", "unable to locate",filename) - os.exit() - end - -- here we also assume then TEXMF is set in the distribution, if this trickery is - -- used in the minimals, then users who don't use setuptex are on their own with - -- regards to extra trees - else - input.verbose = true - input.report("error", "unable to identify own path") - os.exit() - end - else - local t = input.split_path(input.env(instance,'TEXMFCNF')) - t = input.aux.expanded_path(instance,t) - input.aux.expand_vars(instance,t) - local function locate(filename,list) - for _,v in ipairs(t) do - local texmfcnf = input.normalize_name(file.join(v,filename)) - if lfs.isfile(texmfcnf) then - table.insert(list,texmfcnf) - end - end - end - locate(input.luaname,instance.luafiles) - locate(input.cnfname,instance.cnffiles) - end - end -end - -function input.load_cnf(instance) - local function loadoldconfigdata() - for _, fname in ipairs(instance.cnffiles) do - input.aux.load_cnf(instance,fname) - end - end - -- instance.cnffiles contain complete names now ! - if #instance.cnffiles == 0 then - input.report("no cnf files found (TEXMFCNF may not be set/known)") - else - instance.rootpath = instance.cnffiles[1] - for k,fname in ipairs(instance.cnffiles) do - instance.cnffiles[k] = input.normalize_name(fname:gsub("\\",'/')) - end - for i=1,3 do - instance.rootpath = file.dirname(instance.rootpath) - end - instance.rootpath = input.normalize_name(instance.rootpath) - instance.environment['SELFAUTOPARENT'] = instance.rootpath -- just to be sure - if instance.lsrmode then - loadoldconfigdata() - elseif instance.diskcache and not instance.renewcache then - input.loadoldconfig(instance,instance.cnffiles) - if instance.loaderror then - loadoldconfigdata() - input.saveoldconfig(instance) - end - else - loadoldconfigdata() - if instance.renewcache then - input.saveoldconfig(instance) - end - end - input.aux.collapse_cnf_data(instance) - end - input.checkconfigdata(instance) -end - -function input.load_lua(instance) - if #instance.luafiles == 0 then - -- yet harmless - else - instance.rootpath = instance.luafiles[1] - for k,fname in ipairs(instance.luafiles) do - instance.luafiles[k] = input.normalize_name(fname:gsub("\\",'/')) - end - for i=1,3 do - instance.rootpath = file.dirname(instance.rootpath) - end - instance.rootpath = input.normalize_name(instance.rootpath) - instance.environment['SELFAUTOPARENT'] = instance.rootpath -- just to be sure - input.loadnewconfig(instance) - input.aux.collapse_cnf_data(instance) - end - input.checkconfigdata(instance) -end - -function input.aux.collapse_cnf_data(instance) -- potential optmization: pass start index (setup and configuration are shared) - for _,c in ipairs(instance.order) do - for k,v in pairs(c) do - if not instance.variables[k] then - if instance.environment[k] then - instance.variables[k] = instance.environment[k] - else - instance.kpsevars[k] = true - instance.variables[k] = input.bare_variable(v) - end - end - end - end -end - -function input.aux.load_cnf(instance,fname) - fname = input.clean_path(fname) - local lname = fname:gsub("%.%a+$",input.luasuffix) - local f = io.open(lname) - if f then -- this will go - f:close() - local dname = file.dirname(fname) - if not instance.configuration[dname] then - input.aux.load_configuration(instance,dname,lname) - instance.order[#instance.order+1] = instance.configuration[dname] - end - else - f = io.open(fname) - if f then - input.report("loading", fname) - local line, data, n, k, v - local dname = file.dirname(fname) - if not instance.configuration[dname] then - instance.configuration[dname] = { } - instance.order[#instance.order+1] = instance.configuration[dname] - end - local data = instance.configuration[dname] - while true do - local line, n = f:read(), 0 - if line then - while true do -- join lines - line, n = line:gsub("\\%s*$", "") - if n > 0 then - line = line .. f:read() - else - break - end - end - if not line:find("^[%%#]") then - local k, v = (line:gsub("%s*%%.*$","")):match("%s*(.-)%s*=%s*(.-)%s*$") - if k and v and not data[k] then - data[k] = (v:gsub("[%%#].*",'')):gsub("~", "$HOME") - instance.kpsevars[k] = true - end - end - else - break - end - end - f:close() - else - input.report("skipping", fname) - end - end -end - --- database loading - -function input.load_hash(instance) - input.locatelists(instance) - if instance.lsrmode then - input.loadlists(instance) - elseif instance.diskcache and not instance.renewcache then - input.loadfiles(instance) - if instance.loaderror then - input.loadlists(instance) - input.savefiles(instance) - end - else - input.loadlists(instance) - if instance.renewcache then - input.savefiles(instance) - end - end -end - -function input.aux.append_hash(instance,type,tag,name) - input.logger("= hash append",tag) - table.insert(instance.hashes, { ['type']=type, ['tag']=tag, ['name']=name } ) -end - -function input.aux.prepend_hash(instance,type,tag,name) - input.logger("= hash prepend",tag) - table.insert(instance.hashes, 1, { ['type']=type, ['tag']=tag, ['name']=name } ) -end - -function input.aux.extend_texmf_var(instance,specification) -- crap - if instance.environment['TEXMF'] then - input.report("extending environment variable TEXMF with", specification) - instance.environment['TEXMF'] = instance.environment['TEXMF']:gsub("^%{", function() - return "{" .. specification .. "," - end) - elseif instance.variables['TEXMF'] then - input.report("extending configuration variable TEXMF with", specification) - instance.variables['TEXMF'] = instance.variables['TEXMF']:gsub("^%{", function() - return "{" .. specification .. "," - end) - else - input.report("setting configuration variable TEXMF to", specification) - instance.variables['TEXMF'] = "{" .. specification .. "}" - end - if instance.variables['TEXMF']:find("%,") and not instance.variables['TEXMF']:find("^%{") then - input.report("adding {} to complex TEXMF variable, best do that yourself") - instance.variables['TEXMF'] = "{" .. instance.variables['TEXMF'] .. "}" - end - input.expand_variables(instance) - input.reset_hashes(instance) -end - --- locators - -function input.locatelists(instance) - for _, path in pairs(input.simplified_list(input.expansion(instance,'TEXMF'))) do - path = file.collapse_path(path) - input.report("locating list of",path) - input.locatedatabase(instance,input.normalize_name(path)) - end -end - -function input.locatedatabase(instance,specification) - return input.methodhandler('locators', instance, specification) -end - -function input.locators.tex(instance,specification) - if specification and specification ~= '' and lfs.isdir(specification) then - input.logger('! tex locator', specification..' found') - input.aux.append_hash(instance,'file',specification,filename) - else - input.logger('? tex locator', specification..' not found') - end -end - --- hashers - -function input.hashdatabase(instance,tag,name) - return input.methodhandler('hashers',instance,tag,name) -end - -function input.loadfiles(instance) - instance.loaderror = false - instance.files = { } - if not instance.renewcache then - for _, hash in ipairs(instance.hashes) do - input.hashdatabase(instance,hash.tag,hash.name) - if instance.loaderror then break end - end - end -end - -function input.hashers.tex(instance,tag,name) - input.aux.load_files(instance,tag) -end - --- generators: - -function input.loadlists(instance) - for _, hash in ipairs(instance.hashes) do - input.generatedatabase(instance,hash.tag) - end -end - -function input.generatedatabase(instance,specification) - return input.methodhandler('generators', instance, specification) -end - -do - - local weird = lpeg.anywhere(lpeg.S("~`!#$%^&*()={}[]:;\"\'||<>,?\n\r\t")) - - function input.generators.tex(instance,specification) - local tag = specification - if not instance.lsrmode and lfs and lfs.dir then - input.report("scanning path",specification) - instance.files[tag] = { } - local files = instance.files[tag] - local n, m, r = 0, 0, 0 - local spec = specification .. '/' - local attributes = lfs.attributes - local directory = lfs.dir - local small = instance.smallcache - local function action(path) - local mode, full - if path then - full = spec .. path .. '/' - else - full = spec - end - for name in directory(full) do - if name:find("^%.") then - -- skip - -- elseif name:find("[%~%`%!%#%$%%%^%&%*%(%)%=%{%}%[%]%:%;\"\'%|%<%>%,%?\n\r\t]") then -- too much escaped - elseif weird:match(name) then - -- texio.write_nl("skipping " .. name) - -- skip - else - mode = attributes(full..name,'mode') - if mode == "directory" then - m = m + 1 - if path then - action(path..'/'..name) - else - action(name) - end - elseif path and mode == 'file' then - n = n + 1 - local f = files[name] - if f then - if not small then - if type(f) == 'string' then - files[name] = { f, path } - else - f[#f+1] = path - end - end - else - files[name] = path - local lower = name:lower() - if name ~= lower then - files["remap:"..lower] = name - r = r + 1 - end - end - end - end - end - end - action() - input.report(format("%s files found on %s directories with %s uppercase remappings",n,m,r)) - else - local fullname = file.join(specification,input.lsrname) - local path = '.' - local f = io.open(fullname) - if f then - instance.files[tag] = { } - local files = instance.files[tag] - local small = instance.smallcache - input.report("loading lsr file",fullname) - -- for line in f:lines() do -- much slower then the next one - for line in (f:read("*a")):gmatch("(.-)\n") do - if line:find("^[%a%d]") then - local fl = files[line] - if fl then - if not small then - if type(fl) == 'string' then - files[line] = { fl, path } -- table - else - fl[#fl+1] = path - end - end - else - files[line] = path -- string - local lower = line:lower() - if line ~= lower then - files["remap:"..lower] = line - end - end - else - path = line:match("%.%/(.-)%:$") or path -- match could be nil due to empty line - end - end - f:close() - end - end - end - -end - --- savers, todo - -function input.savefiles(instance) - input.aux.save_data(instance, 'files', function(k,v) - return instance.validfile(k,v) -- path, name - end) -end - --- A config (optionally) has the paths split in tables. Internally --- we join them and split them after the expansion has taken place. This --- is more convenient. - -function input.splitconfig(instance) - for i,c in ipairs(instance) do - for k,v in pairs(c) do - if type(v) == 'string' then - local t = file.split_path(v) - if #t > 1 then - c[k] = t - end - end - end - end -end -function input.joinconfig(instance) - for i,c in ipairs(instance.order) do - for k,v in pairs(c) do - if type(v) == 'table' then - c[k] = file.join_path(v) - end - end - end -end -function input.split_path(str) - if type(str) == 'table' then - return str - else - return file.split_path(str) - end -end -function input.join_path(str) - if type(str) == 'table' then - return file.join_path(str) - else - return str - end -end - -function input.splitexpansions(instance) - for k,v in pairs(instance.expansions) do - local t, h = { }, { } - for _,vv in pairs(file.split_path(v)) do - if vv ~= "" and not h[vv] then - t[#t+1] = vv - h[vv] = true - end - end - if #t > 1 then - instance.expansions[k] = t - else - instance.expansions[k] = t[1] - end - end -end - --- end of split/join code - -function input.saveoldconfig(instance) - input.splitconfig(instance) - input.aux.save_data(instance, 'configuration', nil) - input.joinconfig(instance) -end - -input.configbanner = [[ --- This is a Luatex configuration file created by 'luatools.lua' or --- 'luatex.exe' directly. For comment, suggestions and questions you can --- contact the ConTeXt Development Team. This configuration file is --- not copyrighted. [HH & TH] -]] - -function input.serialize(files) - -- This version is somewhat optimized for the kind of - -- tables that we deal with, so it's much faster than - -- the generic serializer. This makes sense because - -- luatools and mtxtools are called frequently. Okay, - -- we pay a small price for properly tabbed tables. - local t = { } - local concat = table.concat - local sorted = table.sortedkeys - local function dump(k,v,m) - if type(v) == 'string' then - return m .. "['" .. k .. "']='" .. v .. "'," - elseif #v == 1 then - return m .. "['" .. k .. "']='" .. v[1] .. "'," - else - return m .. "['" .. k .. "']={'" .. concat(v,"','").. "'}," - end - end - t[#t+1] = "return {" - if instance.sortdata then - for _, k in pairs(sorted(files)) do - local fk = files[k] - if type(fk) == 'table' then - t[#t+1] = "\t['" .. k .. "']={" - for _, kk in pairs(sorted(fk)) do - t[#t+1] = dump(kk,fk[kk],"\t\t") - end - t[#t+1] = "\t}," - else - t[#t+1] = dump(k,fk,"\t") - end - end - else - for k, v in pairs(files) do - if type(v) == 'table' then - t[#t+1] = "\t['" .. k .. "']={" - for kk,vv in pairs(v) do - t[#t+1] = dump(kk,vv,"\t\t") - end - t[#t+1] = "\t}," - else - t[#t+1] = dump(k,v,"\t") - end - end - end - t[#t+1] = "}" - return concat(t,"\n") -end - -if not texmf then texmf = {} end -- no longer needed, at least not here - -function input.aux.save_data(instance, dataname, check, makename) -- untested without cache overload - for cachename, files in pairs(instance[dataname]) do - local name = (makename or file.join)(cachename,dataname) - local luaname, lucname = name .. input.luasuffix, name .. input.lucsuffix - input.report("preparing " .. dataname .. " for", luaname) - for k, v in pairs(files) do - if not check or check(v,k) then -- path, name - if type(v) == "table" and #v == 1 then - files[k] = v[1] - end - else - files[k] = nil -- false - end - end - local data = { - type = dataname, - root = cachename, - version = input.cacheversion, - date = os.date("%Y-%m-%d"), - time = os.date("%H:%M:%S"), - content = files, - } - local f = io.open(luaname,'w') - if f then - input.report("saving " .. dataname .. " in", luaname) - f:write(input.serialize(data)) - f:close() - input.report("compiling " .. dataname .. " to", lucname) - if not utils.lua.compile(luaname,lucname) then - input.report("compiling failed for " .. dataname .. ", deleting file " .. lucname) - os.remove(lucname) - end - else - input.report("unable to save " .. dataname .. " in " .. name..input.luasuffix) - end - end -end - -function input.aux.load_data(instance,pathname,dataname,filename,makename) -- untested without cache overload - filename = ((not filename or (filename == "")) and dataname) or filename - filename = (makename and makename(dataname,filename)) or file.join(pathname,filename) - local blob = loadfile(filename .. input.lucsuffix) or loadfile(filename .. input.luasuffix) - if blob then - local data = blob() - if data and data.content and data.type == dataname and data.version == input.cacheversion then - input.report("loading",dataname,"for",pathname,"from",filename) - instance[dataname][pathname] = data.content - else - input.report("skipping",dataname,"for",pathname,"from",filename) - instance[dataname][pathname] = { } - instance.loaderror = true - end - else - input.report("skipping",dataname,"for",pathname,"from",filename) - end -end - --- some day i'll use the nested approach, but not yet (actually we even drop --- engine/progname support since we have only luatex now) --- --- first texmfcnf.lua files are located, next the cached texmf.cnf files --- --- return { --- TEXMFBOGUS = 'effe checken of dit werkt', --- } - -function input.aux.load_texmfcnf(instance,dataname,pathname) - local filename = file.join(pathname,input.luaname) - local blob = loadfile(filename) - if blob then - local data = blob() - if data then - input.report("loading","configuration file",filename) - if true then - -- flatten to variable.progname - local t = { } - for k, v in pairs(data) do -- v = progname - if type(v) == "string" then - t[k] = v - else - for kk, vv in pairs(v) do -- vv = variable - if type(vv) == "string" then - t[vv.."."..v] = kk - end - end - end - end - instance[dataname][pathname] = t - else - instance[dataname][pathname] = data - end - else - input.report("skipping","configuration file",filename) - instance[dataname][pathname] = { } - instance.loaderror = true - end - else - input.report("skipping","configuration file",filename) - end -end - -function input.aux.load_configuration(instance,dname,lname) - input.aux.load_data(instance,dname,'configuration',lname and file.basename(lname)) -end -function input.aux.load_files(instance,tag) - input.aux.load_data(instance,tag,'files') -end - -function input.resetconfig(instance) - instance.configuration, instance.setup, instance.order, instance.loaderror = { }, { }, { }, false -end - -function input.loadnewconfig(instance) - for _, cnf in ipairs(instance.luafiles) do - local dname = file.dirname(cnf) - input.aux.load_texmfcnf(instance,'setup',dname) - instance.order[#instance.order+1] = instance.setup[dname] - if instance.loaderror then break end - end -end - -function input.loadoldconfig(instance) - if not instance.renewcache then - for _, cnf in ipairs(instance.cnffiles) do - local dname = file.dirname(cnf) - input.aux.load_configuration(instance,dname) - instance.order[#instance.order+1] = instance.configuration[dname] - if instance.loaderror then break end - end - end - input.joinconfig(instance) -end - -function input.expand_variables(instance) - instance.expansions = { } ---~ instance.environment['SELFAUTOPARENT'] = instance.environment['SELFAUTOPARENT'] or instance.rootpath - if instance.engine ~= "" then instance.environment['engine'] = instance.engine end - if instance.progname ~= "" then instance.environment['progname'] = instance.progname end - for k,v in pairs(instance.environment) do - local a, b = k:match("^(%a+)%_(.*)%s*$") - if a and b then - instance.expansions[a..'.'..b] = v - else - instance.expansions[k] = v - end - end - for k,v in pairs(instance.environment) do -- move environment to expansions - if not instance.expansions[k] then instance.expansions[k] = v end - end - for k,v in pairs(instance.variables) do -- move variables to expansions - if not instance.expansions[k] then instance.expansions[k] = v end - end - while true do - local busy = false - for k,v in pairs(instance.expansions) do - local s, n = v:gsub("%$([%a%d%_%-]+)", function(a) - busy = true - return instance.expansions[a] or input.env(instance,a) - end) - local s, m = s:gsub("%$%{([%a%d%_%-]+)%}", function(a) - busy = true - return instance.expansions[a] or input.env(instance,a) - end) - if n > 0 or m > 0 then - instance.expansions[k]= s - end - end - if not busy then break end - end - local homedir = - instance.environment[(os.type == "windows" and 'USERPROFILE') or 'HOME'] or '~' - for k,v in pairs(instance.expansions) do - v = v:gsub("^~", homedir) - instance.expansions[k] = v:gsub("\\", '/') - end -end - -function input.aux.expand_vars(instance,lst) -- simple vars - for k,v in pairs(lst) do - lst[k] = v:gsub("%$([%a%d%_%-]+)", function(a) - return instance.variables[a] or input.env(instance,a) - end) - end -end - -function input.aux.expanded_var(instance,var) -- simple vars - return var:gsub("%$([%a%d%_%-]+)", function(a) - return instance.variables[a] or input.env(instance,a) - end) -end - -function input.aux.entry(instance,entries,name) - if name and (name ~= "") then - name = name:gsub('%$','') - local result = entries[name..'.'..instance.progname] or entries[name] - if result then - return result - else - result = input.env(instance,name) - if result then - instance.variables[name] = result - input.expand_variables(instance) - return instance.expansions[name] or "" - end - end - end - return "" -end -function input.variable(instance,name) - return input.aux.entry(instance,instance.variables,name) -end -function input.expansion(instance,name) - return input.aux.entry(instance,instance.expansions,name) -end - -function input.aux.is_entry(instance,entries,name) - if name and name ~= "" then - name = name:gsub('%$','') - return (entries[name..'.'..instance.progname] or entries[name]) ~= nil - else - return false - end -end - -function input.is_variable(instance,name) - return input.aux.is_entry(instance,instance.variables,name) -end -function input.is_expansion(instance,name) - return input.aux.is_entry(instance,instance.expansions,name) -end - -function input.simplified_list(str) - if type(str) == 'table' then - return str -- troubles ; ipv , in texmf - elseif str == '' then - return { } - else - local t = { } - for _,v in ipairs(string.splitchr(str:gsub("^\{(.+)\}$","%1"),",")) do - t[#t+1] = (v:gsub("^[%!]*(.+)[%/\\]*$","%1")) - end - return t - end -end - -function input.unexpanded_path_list(instance,str) - local pth = input.variable(instance,str) - local lst = input.split_path(pth) - return input.aux.expanded_path(instance,lst) -end -function input.unexpanded_path(instance,str) - return file.join_path(input.unexpanded_path_list(instance,str)) -end - -do - local done = { } - - function input.reset_extra_path(instance) - local ep = instance.extra_paths - if not ep then - ep, done = { }, { } - instance.extra_paths = ep - elseif #ep > 0 then - instance.lists, done = { }, { } - end - end - - function input.register_extra_path(instance,paths,subpaths) - local ep = instance.extra_paths or { } - local n = #ep - if paths and paths ~= "" then - if subpaths and subpaths ~= "" then - for p in paths:gmatch("[^,]+") do - -- we gmatch each step again, not that fast, but used seldom - for s in subpaths:gmatch("[^,]+") do - local ps = p .. "/" .. s - if not done[ps] then - ep[#ep+1] = input.clean_path(ps) - done[ps] = true - end - end - end - else - for p in paths:gmatch("[^,]+") do - if not done[p] then - ep[#ep+1] = input.clean_path(p) - done[p] = true - end - end - end - elseif subpaths and subpaths ~= "" then - for i=1,n do - -- we gmatch each step again, not that fast, but used seldom - for s in subpaths:gmatch("[^,]+") do - local ps = ep[i] .. "/" .. s - if not done[ps] then - ep[#ep+1] = input.clean_path(ps) - done[ps] = true - end - end - end - end - if #ep > 0 then - instance.extra_paths = ep -- register paths - end - if #ep > n then - instance.lists = { } -- erase the cache - end - end - -end - -function input.expanded_path_list(instance,str) - local function made_list(list) - local ep = instance.extra_paths - if not ep or #ep == 0 then - return list - else - local done, new = { }, { } - -- honour . .. ../.. but only when at the start - for k, v in ipairs(list) do - if not done[v] then - if v:find("^[%.%/]$") then - done[v] = true - new[#new+1] = v - else - break - end - end - end - -- first the extra paths - for k, v in ipairs(ep) do - if not done[v] then - done[v] = true - new[#new+1] = v - end - end - -- next the formal paths - for k, v in ipairs(list) do - if not done[v] then - done[v] = true - new[#new+1] = v - end - end - return new - end - end - if not str then - return ep or { } - elseif instance.savelists then - -- engine+progname hash - str = str:gsub("%$","") - if not instance.lists[str] then -- cached - local lst = made_list(input.split_path(input.expansion(instance,str))) - instance.lists[str] = input.aux.expanded_path(instance,lst) - end - return instance.lists[str] - else - local lst = input.split_path(input.expansion(instance,str)) - return made_list(input.aux.expanded_path(instance,lst)) - end -end - -function input.expand_path(instance,str) - return file.join_path(input.expanded_path_list(instance,str)) -end - ---~ function input.first_writable_path(instance,name) ---~ for _,v in pairs(input.expanded_path_list(instance,name)) do ---~ if file.is_writable(file.join(v,'luatex-cache.tmp')) then ---~ return v ---~ end ---~ end ---~ return "." ---~ end - -function input.expanded_path_list_from_var(instance,str) -- brrr - local tmp = input.var_of_format_or_suffix(str:gsub("%$","")) - if tmp ~= "" then - return input.expanded_path_list(instance,str) - else - return input.expanded_path_list(instance,tmp) - end -end -function input.expand_path_from_var(instance,str) - return file.join_path(input.expanded_path_list_from_var(instance,str)) -end - -function input.format_of_var(str) - return input.formats[str] or input.formats[input.alternatives[str]] or '' -end -function input.format_of_suffix(str) - return input.suffixmap[file.extname(str)] or 'tex' -end - -function input.variable_of_format(str) - return input.formats[str] or input.formats[input.alternatives[str]] or '' -end - -function input.var_of_format_or_suffix(str) - local v = input.formats[str] - if v then - return v - end - v = input.formats[input.alternatives[str]] - if v then - return v - end - v = input.suffixmap[file.extname(str)] - if v then - return input.formats[isf] - end - return '' -end - -function input.expand_braces(instance,str) -- output variable and brace expansion of STRING - local ori = input.variable(instance,str) - local pth = input.aux.expanded_path(instance,input.split_path(ori)) - return file.join_path(pth) -end - --- {a,b,c,d} --- a,b,c/{p,q,r},d --- a,b,c/{p,q,r}/d/{x,y,z}// --- a,b,c/{p,q/{x,y,z},r},d/{p,q,r} --- a,b,c/{p,q/{x,y,z},r},d/{p,q,r} --- a{b,c}{d,e}f --- {a,b,c,d} --- {a,b,c/{p,q,r},d} --- {a,b,c/{p,q,r}/d/{x,y,z}//} --- {a,b,c/{p,q/{x,y,z}},d/{p,q,r}} --- {a,b,c/{p,q/{x,y,z},w}v,d/{p,q,r}} - --- this one is better and faster, but it took me a while to realize --- that this kind of replacement is cleaner than messy parsing and --- fuzzy concatenating we can probably gain a bit with selectively --- applying lpeg, but experiments with lpeg parsing this proved not to --- work that well; the parsing is ok, but dealing with the resulting --- table is a pain because we need to work inside-out recursively - --- get rid of piecewise here, just a gmatch is ok - -function input.aux.splitpathexpr(str, t, validate) - -- no need for optimization, only called a few times, we can use lpeg for the sub - t = t or { } - local concat = table.concat - while true do - local done = false - while true do - local ok = false - str = str:gsub("([^{},]+){([^{}]-)}", function(a,b) - local t = { } - b:piecewise(",", function(s) t[#t+1] = a .. s end) - ok, done = true, true - return "{" .. concat(t,",") .. "}" - end) - if not ok then break end - end - while true do - local ok = false - str = str:gsub("{([^{}]-)}([^{},]+)", function(a,b) - local t = { } - a:piecewise(",", function(s) t[#t+1] = s .. b end) - ok, done = true, true - return "{" .. concat(t,",") .. "}" - end) - if not ok then break end - end - while true do - local ok = false - str = str:gsub("([,{]){([^{}]+)}([,}])", function(a,b,c) - ok, done = true, true - return a .. b .. c - end) - if not ok then break end - end - if not done then break end - end - while true do - local ok = false - str = str:gsub("{([^{}]-)}{([^{}]-)}", function(a,b) - local t = { } - a:piecewise(",", function(sa) - b:piecewise(",", function(sb) - t[#t+1] = sa .. sb - end) - end) - ok = true - return "{" .. concat(t,",") .. "}" - end) - if not ok then break end - end - while true do - local ok = false - str = str:gsub("{([^{}]-)}", function(a) - ok = true - return a - end) - if not ok then break end - end - if validate then - str:piecewise(",", function(s) - s = validate(s) - if s then t[#t+1] = s end - end) - else - str:piecewise(",", function(s) - t[#t+1] = s - end) - end - return t -end - -function input.aux.expanded_path(instance,pathlist) -- maybe not a list, just a path - -- a previous version fed back into pathlist - local newlist, ok = { }, false - for _,v in ipairs(pathlist) do - if v:find("[{}]") then - ok = true - break - end - end - if ok then - for _, v in ipairs(pathlist) do - input.aux.splitpathexpr(v, newlist, function(s) - s = file.collapse_path(s) - return s ~= "" and not s:find(instance.dummy_path_expr) and s - end) - end - else - for _,v in ipairs(pathlist) do - for vv in string.gmatch(v..',',"(.-),") do - vv = file.collapse_path(v) - if vv ~= "" then newlist[#newlist+1] = vv end - end - end - end - return newlist -end - -input.is_readable = { } - -function input.aux.is_readable(readable, name) - if input.trace > 2 then - if readable then - input.logger("+ readable", name) - else - input.logger("- readable", name) - end - end - return readable -end - -function input.is_readable.file(name) - -- return input.aux.is_readable(file.is_readable(name), name) - return input.aux.is_readable(input.aux.is_file(name), name) -end - -input.is_readable.tex = input.is_readable.file - --- name --- name/name - -function input.aux.collect_files(instance,names) - local filelist = { } - for _, fname in pairs(names) do - if fname then - if input.trace > 2 then - input.logger("? blobpath asked",fname) - end - local bname = file.basename(fname) - local dname = file.dirname(fname) - if dname == "" or dname:find("^%.") then - dname = false - else - dname = "/" .. dname .. "$" - end - for _, hash in ipairs(instance.hashes) do - local blobpath = hash.tag - local files = blobpath and instance.files[blobpath] - if files then - if input.trace > 2 then - input.logger('? blobpath do',blobpath .. " (" .. bname ..")") - end - local blobfile = files[bname] - if not blobfile then - local rname = "remap:"..bname - blobfile = files[rname] - if blobfile then - bname = files[rname] - blobfile = files[bname] - end - end - if blobfile then - if type(blobfile) == 'string' then - if not dname or blobfile:find(dname) then - filelist[#filelist+1] = { - hash.type, - file.join(blobpath,blobfile,bname), -- search - input.concatinators[hash.type](blobpath,blobfile,bname) -- result - } - end - else - for _, vv in pairs(blobfile) do - if not dname or vv:find(dname) then - filelist[#filelist+1] = { - hash.type, - file.join(blobpath,vv,bname), -- search - input.concatinators[hash.type](blobpath,vv,bname) -- result - } - end - end - end - end - elseif input.trace > 1 then - input.logger('! blobpath no',blobpath .. " (" .. bname ..")" ) - end - end - end - end - if #filelist > 0 then - return filelist - else - return nil - end -end - -function input.suffix_of_format(str) - if input.suffixes[str] then - return input.suffixes[str][1] - else - return "" - end -end - -function input.suffixes_of_format(str) - if input.suffixes[str] then - return input.suffixes[str] - else - return {} - end -end - -do - - -- called about 700 times for an empty doc (font initializations etc) - -- i need to weed the font files for redundant calls - - local letter = lpeg.R("az","AZ") - local separator = lpeg.P("://") - - local qualified = lpeg.P(".")^0 * lpeg.P("/") + letter*lpeg.P(":") + letter^1*separator - local rootbased = lpeg.P("/") + letter*lpeg.P(":") - - -- ./name ../name /name c: :// - function input.aux.qualified_path(filename) - return qualified:match(filename) - end - function input.aux.rootbased_path(filename) - return rootbased:match(filename) - end - - function input.normalize_name(original) - return original - end - - input.normalize_name = file.collapse_path - -end - -function input.aux.register_in_trees(instance,name) - if not name:find("^%.") then - instance.foundintrees[name] = (instance.foundintrees[name] or 0) + 1 -- maybe only one - end -end - --- split the next one up, better for jit - -function input.aux.find_file(instance,filename) -- todo : plugin (scanners, checkers etc) - local result = { } - local stamp = nil - filename = input.normalize_name(filename) -- elsewhere - filename = file.collapse_path(filename:gsub("\\","/")) -- elsewhere - -- speed up / beware: format problem - if instance.remember then - stamp = filename .. "--" .. instance.engine .. "--" .. instance.progname .. "--" .. instance.format - if instance.found[stamp] then - input.logger('! remembered', filename) - return instance.found[stamp] - end - end - if filename:find('%*') then - input.logger('! wildcard', filename) - result = input.find_wildcard_files(instance,filename) - elseif input.aux.qualified_path(filename) then - if input.is_readable.file(filename) then - input.logger('! qualified', filename) - result = { filename } - else - local forcedname, ok = "", false - if file.extname(filename) == "" then - if instance.format == "" then - forcedname = filename .. ".tex" - if input.is_readable.file(forcedname) then - input.logger('! no suffix, forcing standard filetype tex') - result, ok = { forcedname }, true - end - else - for _, s in pairs(input.suffixes_of_format(instance.format)) do - forcedname = filename .. "." .. s - if input.is_readable.file(forcedname) then - input.logger('! no suffix, forcing format filetype', s) - result, ok = { forcedname }, true - break - end - end - end - end - if not ok then - input.logger('? qualified', filename) - end - end - else - -- search spec - local filetype, extra, done, wantedfiles, ext = '', nil, false, { }, file.extname(filename) - if ext == "" then - if not instance.force_suffixes then - wantedfiles[#wantedfiles+1] = filename - end - else - wantedfiles[#wantedfiles+1] = filename - end - if instance.format == "" then - if ext == "" then - local forcedname = filename .. '.tex' - wantedfiles[#wantedfiles+1] = forcedname - filetype = input.format_of_suffix(forcedname) - input.logger('! forcing filetype',filetype) - else - filetype = input.format_of_suffix(filename) - input.logger('! using suffix based filetype',filetype) - end - else - if ext == "" then - for _, s in pairs(input.suffixes_of_format(instance.format)) do - wantedfiles[#wantedfiles+1] = filename .. "." .. s - end - end - filetype = instance.format - input.logger('! using given filetype',filetype) - end - local typespec = input.variable_of_format(filetype) - local pathlist = input.expanded_path_list(instance,typespec) - if not pathlist or #pathlist == 0 then - -- no pathlist, access check only / todo == wildcard - if input.trace > 2 then - input.logger('? filename',filename) - input.logger('? filetype',filetype or '?') - input.logger('? wanted files',table.concat(wantedfiles," | ")) - end - for _, fname in pairs(wantedfiles) do - if fname and input.is_readable.file(fname) then - filename, done = fname, true - result[#result+1] = file.join('.',fname) - break - end - end - -- this is actually 'other text files' or 'any' or 'whatever' - local filelist = input.aux.collect_files(instance,wantedfiles) - local fl = filelist and filelist[1] - if fl then - filename = fl[3] - result[#result+1] = filename - done = true - end - else - -- list search - local filelist = input.aux.collect_files(instance,wantedfiles) - local doscan, recurse - if input.trace > 2 then - input.logger('? filename',filename) - -- if pathlist then input.logger('? path list',table.concat(pathlist," | ")) end - -- if filelist then input.logger('? file list',table.concat(filelist," | ")) end - end - -- a bit messy ... esp the doscan setting here - for _, path in pairs(pathlist) do - if path:find("^!!") then doscan = false else doscan = true end - if path:find("//$") then recurse = true else recurse = false end - local pathname = path:gsub("^!+", '') - done = false - -- using file list - if filelist and not (done and not instance.allresults) and recurse then - -- compare list entries with permitted pattern - pathname = pathname:gsub("([%-%.])","%%%1") -- this also influences - pathname = pathname:gsub("/+$", '/.*') -- later usage of pathname - pathname = pathname:gsub("//", '/.-/') -- not ok for /// but harmless - local expr = "^" .. pathname - -- input.debug('?',expr) - for _, fl in ipairs(filelist) do - local f = fl[2] - if f:find(expr) then - -- input.debug('T',' '..f) - if input.trace > 2 then - input.logger('= found in hash',f) - end - --- todo, test for readable - result[#result+1] = fl[3] - input.aux.register_in_trees(instance,f) -- for tracing used files - done = true - if not instance.allresults then break end - else - -- input.debug('F',' '..f) - end - end - end - if not done and doscan then - -- check if on disk / unchecked / does not work at all / also zips - if input.method_is_file(pathname) then -- ? - local pname = pathname:gsub("%.%*$",'') - if not pname:find("%*") then - local ppname = pname:gsub("/+$","") - if input.aux.can_be_dir(instance,ppname) then - for _, w in pairs(wantedfiles) do - local fname = file.join(ppname,w) - if input.is_readable.file(fname) then - if input.trace > 2 then - input.logger('= found by scanning',fname) - end - result[#result+1] = fname - done = true - if not instance.allresults then break end - end - end - else - -- no access needed for non existing path, speedup (esp in large tree with lots of fake) - end - end - end - end - if not done and doscan then - -- todo: slow path scanning - end - if done and not instance.allresults then break end - end - end - end - for k,v in pairs(result) do - result[k] = file.collapse_path(v) - end - if instance.remember then - instance.found[stamp] = result - end - return result -end - -input.aux._find_file_ = input.aux.find_file - -function input.aux.find_file(instance,filename) -- maybe make a lowres cache too - local result = input.aux._find_file_(instance,filename) - if #result == 0 then - local lowered = filename:lower() - if filename ~= lowered then - return input.aux._find_file_(instance,lowered) - end - end - return result -end - -if lfs and lfs.isfile then - input.aux.is_file = lfs.isfile -- to be done: use this -else - input.aux.is_file = file.is_readable -end - -if lfs and lfs.isdir then - function input.aux.can_be_dir(instance,name) - if not instance.fakepaths[name] then - if lfs.isdir(name) then - instance.fakepaths[name] = 1 -- directory - else - instance.fakepaths[name] = 2 -- no directory - end - end - return (instance.fakepaths[name] == 1) - end -else - function input.aux.can_be_dir() - return true - end -end - -if not input.concatinators then input.concatinators = { } end - -input.concatinators.tex = file.join -input.concatinators.file = input.concatinators.tex - -function input.find_files(instance,filename,filetype,mustexist) - if type(mustexist) == boolean then - -- all set - elseif type(filetype) == 'boolean' then - filetype, mustexist = nil, false - elseif type(filetype) ~= 'string' then - filetype, mustexist = nil, false - end - instance.format = filetype or '' - local t = input.aux.find_file(instance,filename,true) - instance.format = '' - return t -end - -function input.find_file(instance,filename,filetype,mustexist) - return (input.find_files(instance,filename,filetype,mustexist)[1] or "") -end - -function input.find_given_files(instance,filename) - local bname, result = file.basename(filename), { } - for k, hash in ipairs(instance.hashes) do - local files = instance.files[hash.tag] - local blist = files[bname] - if not blist then - local rname = "remap:"..bname - blist = files[rname] - if blist then - bname = files[rname] - blist = files[bname] - end - end - if blist then - if type(blist) == 'string' then - result[#result+1] = input.concatinators[hash.type](hash.tag,blist,bname) or "" - if not instance.allresults then break end - else - for kk,vv in pairs(blist) do - result[#result+1] = input.concatinators[hash.type](hash.tag,vv,bname) or "" - if not instance.allresults then break end - end - end - end - end - return result -end - -function input.find_given_file(instance,filename) - return (input.find_given_files(instance,filename)[1] or "") -end - -function input.find_wildcard_files(instance,filename) -- todo: remap: - local result = { } - local bname, dname = file.basename(filename), file.dirname(filename) - local path = dname:gsub("^*/","") - path = path:gsub("*",".*") - path = path:gsub("-","%%-") - if dname == "" then - path = ".*" - end - local name = bname - name = name:gsub("*",".*") - name = name:gsub("-","%%-") - path = path:lower() - name = name:lower() - local function doit(blist,bname,hash,allresults) - local done = false - if blist then - if type(blist) == 'string' then - -- make function and share code - if (blist:lower()):find(path) then - result[#result+1] = input.concatinators[hash.type](hash.tag,blist,bname) or "" - done = true - end - else - for kk,vv in pairs(blist) do - if (vv:lower()):find(path) then - result[#result+1] = input.concatinators[hash.type](hash.tag,vv,bname) or "" - done = true - if not allresults then break end - end - end - end - end - return done - end - local files, allresults, done = instance.files, instance.allresults, false - if name:find("%*") then - for k, hash in ipairs(instance.hashes) do - for kk, hh in pairs(files[hash.tag]) do - if not kk:find("^remap:") then - if (kk:lower()):find(name) then - if doit(hh,kk,hash,allresults) then done = true end - if done and not allresults then break end - end - end - end - end - else - for k, hash in ipairs(instance.hashes) do - if doit(files[hash.tag][bname],bname,hash,allresults) then done = true end - if done and not allresults then break end - end - end - return result -end - -function input.find_wildcard_file(instance,filename) - return (input.find_wildcard_files(instance,filename)[1] or "") -end - --- main user functions - -function input.save_used_files_in_trees(instance, filename,jobname) - if not filename then filename = 'luatex.jlg' end - local f = io.open(filename,'w') - if f then - f:write("\n") - f:write("\n") - if jobname then - f:write("\t" .. jobname .. "\n") - end - f:write("\t\n") - for _,v in pairs(table.sortedkeys(instance.foundintrees)) do - f:write("\t\t" .. v .. "\n") - end - f:write("\t\n") - f:write("\n") - f:close() - end -end - -function input.automount(instance) - -- implemented later -end - -function input.load(instance) - input.starttiming(instance) - input.resetconfig(instance) - input.identify_cnf(instance) - input.load_lua(instance) - input.expand_variables(instance) - input.load_cnf(instance) - input.expand_variables(instance) - input.load_hash(instance) - input.automount(instance) - input.stoptiming(instance) -end - -function input.for_files(instance, command, files, filetype, mustexist) - if files and #files > 0 then - local function report(str) - if input.verbose then - input.report(str) -- has already verbose - else - print(str) - end - end - if input.verbose then - report('') - end - for _, file in pairs(files) do - local result = command(instance,file,filetype,mustexist) - if type(result) == 'string' then - report(result) - else - for _,v in pairs(result) do - report(v) - end - end - end - end -end - --- strtab - -function input.var_value(instance,str) -- output the value of variable $STRING. - return input.variable(instance,str) -end -function input.expand_var(instance,str) -- output variable expansion of STRING. - return input.expansion(instance,str) -end -function input.show_path(instance,str) -- output search path for file type NAME - return file.join_path(input.expanded_path_list(instance,input.format_of_var(str))) -end - --- input.find_file(filename) --- input.find_file(filename, filetype, mustexist) --- input.find_file(filename, mustexist) --- input.find_file(filename, filetype) - -function input.aux.register_file(files, name, path) - if files[name] then - if type(files[name]) == 'string' then - files[name] = { files[name], path } - else - files[name] = path - end - else - files[name] = path - end -end - -if not input.finders then input.finders = { } end -if not input.openers then input.openers = { } end -if not input.loaders then input.loaders = { } end - -input.finders.notfound = { nil } -input.openers.notfound = { nil } -input.loaders.notfound = { false, nil, 0 } - -function input.splitmethod(filename) - if not filename then - return { } -- safeguard - elseif type(filename) == "table" then - return filename -- already split - elseif not filename:find("://") then - return { scheme="file", path = filename, original=filename } -- quick hack - else - return url.hashed(filename) - end -end - -function input.method_is_file(filename) - return input.splitmethod(filename).scheme == 'file' -end - -function table.sequenced(t,sep) -- temp here - local s = { } - for k, v in pairs(t) do - s[#s+1] = k .. "=" .. v - end - return table.concat(s, sep or " | ") -end - -function input.methodhandler(what, instance, filename, filetype) -- ... - local specification = (type(filename) == "string" and input.splitmethod(filename)) or filename -- no or { }, let it bomb - local scheme = specification.scheme - if input[what][scheme] then - input.logger('= handler',specification.original .." -> " .. what .. " -> " .. table.sequenced(specification)) - return input[what][scheme](instance,filename,filetype) -- todo: specification - else - return input[what].tex(instance,filename,filetype) -- todo: specification - end -end - --- also inside next test? - -function input.findtexfile(instance, filename, filetype) - return input.methodhandler('finders',instance, input.normalize_name(filename), filetype) -end -function input.opentexfile(instance,filename) - return input.methodhandler('openers',instance, input.normalize_name(filename)) -end - -function input.findbinfile(instance, filename, filetype) - return input.methodhandler('finders',instance, input.normalize_name(filename), filetype) -end -function input.openbinfile(instance,filename) - return input.methodhandler('loaders',instance, input.normalize_name(filename)) -end - -function input.loadbinfile(instance, filename, filetype) - local fname = input.findbinfile(instance, input.normalize_name(filename), filetype) - if fname and fname ~= "" then - return input.openbinfile(instance,fname) - else - return unpack(input.loaders.notfound) - end -end - -function input.texdatablob(instance, filename, filetype) - local ok, data, size = input.loadbinfile(instance, filename, filetype) - return data or "" -end - -input.loadtexfile = input.texdatablob - -function input.openfile(filename) -- brrr texmf.instance here / todo ! ! ! ! ! - local fullname = input.findtexfile(texmf.instance, filename) - if fullname and (fullname ~= "") then - return input.opentexfile(texmf.instance, fullname) - else - return nil - end -end - -function input.logmode() - return (os.getenv("MTX.LOG.MODE") or os.getenv("MTX_LOG_MODE") or "tex"):lower() -end - --- this is a prelude to engine/progname specific configuration files --- in which case we can omit files meant for other programs and --- packages - ---- ctx - --- maybe texinputs + font paths --- maybe positive selection tex/context fonts/tfm|afm|vf|opentype|type1|map|enc - -input.validators = { } -input.validators.visibility = { } - -function input.validators.visibility.default(path, name) - return true -end - -function input.validators.visibility.context(path, name) - path = path[1] or path -- some day a loop - return not ( - path:find("latex") or --- path:find("doc") or - path:find("tex4ht") or - path:find("source") or --- path:find("config") or --- path:find("metafont") or - path:find("lists$") or - name:find("%.tpm$") or - name:find("%.bak$") - ) -end - --- todo: describe which functions are public (maybe input.private. ... ) - --- beware: i need to check where we still need a / on windows: - -function input.clean_path(str) ---~ return (((str:gsub("\\","/")):gsub("^!+","")):gsub("//+","//")) - if str then - return ((str:gsub("\\","/")):gsub("^!+","")) - else - return nil - end -end - -function input.do_with_path(name,func) - for _, v in pairs(input.expanded_path_list(instance,name)) do - func("^"..input.clean_path(v)) - end -end - -function input.do_with_var(name,func) - func(input.aux.expanded_var(name)) -end - -function input.with_files(instance,pattern,handle) - for _, hash in ipairs(instance.hashes) do - local blobpath = hash.tag - local blobtype = hash.type - if blobpath then - local files = instance.files[blobpath] - if files then - for k,v in pairs(files) do - if k:find("^remap:") then - k = files[k] - v = files[k] -- chained - end - if k:find(pattern) then - if type(v) == "string" then - handle(blobtype,blobpath,v,k) - else - for _,vv in pairs(v) do - handle(blobtype,blobpath,vv,k) - end - end - end - end - end - end - end -end - ---~ function input.update_script(oldname,newname) -- oldname -> own.name, not per se a suffix ---~ newname = file.addsuffix(newname,"lua") ---~ local newscript = input.clean_path(input.find_file(instance, newname)) ---~ local oldscript = input.clean_path(oldname) ---~ input.report("old script", oldscript) ---~ input.report("new script", newscript) ---~ if oldscript ~= newscript and (oldscript:find(file.removesuffix(newname).."$") or oldscript:find(newname.."$")) then ---~ local newdata = io.loaddata(newscript) ---~ if newdata then ---~ input.report("old script content replaced by new content") ---~ io.savedata(oldscript,newdata) ---~ end ---~ end ---~ end - -function input.update_script(instance,oldname,newname) -- oldname -> own.name, not per se a suffix - local scriptpath = "scripts/context/lua" - newname = file.addsuffix(newname,"lua") - local oldscript = input.clean_path(oldname) - input.report("to be replaced old script", oldscript) - local newscripts = input.find_files(instance, newname) or { } - if #newscripts == 0 then - input.report("unable to locate new script") - else - for _, newscript in ipairs(newscripts) do - newscript = input.clean_path(newscript) - input.report("checking new script", newscript) - if oldscript == newscript then - input.report("old and new script are the same") - elseif not newscript:find(scriptpath) then - input.report("new script should come from",scriptpath) - elseif not (oldscript:find(file.removesuffix(newname).."$") or oldscript:find(newname.."$")) then - input.report("invalid new script name") - else - local newdata = io.loaddata(newscript) - if newdata then - input.report("old script content replaced by new content") - io.savedata(oldscript,newdata) - break - else - input.report("unable to load new script") - end - end - end - end -end - - ---~ print(table.serialize(input.aux.splitpathexpr("/usr/share/texmf-{texlive,tetex}", {}))) - --- command line resolver: - ---~ print(input.resolve("abc env:tmp file:cont-en.tex path:cont-en.tex full:cont-en.tex rel:zapf/one/p-chars.tex")) - -do - - local resolvers = { } - - resolvers.environment = function(instance,str) - return input.clean_path(os.getenv(str) or os.getenv(str:upper()) or os.getenv(str:lower()) or "") - end - resolvers.relative = function(instance,str,n) - if io.exists(str) then - -- nothing - elseif io.exists("./" .. str) then - str = "./" .. str - else - local p = "../" - for i=1,n or 2 do - if io.exists(p .. str) then - str = p .. str - break - else - p = p .. "../" - end - end - end - return input.clean_path(str) - end - resolvers.locate = function(instance,str) - local fullname = input.find_given_file(instance,str) or "" - return input.clean_path((fullname ~= "" and fullname) or str) - end - resolvers.filename = function(instance,str) - local fullname = input.find_given_file(instance,str) or "" - return input.clean_path(file.basename((fullname ~= "" and fullname) or str)) - end - resolvers.pathname = function(instance,str) - local fullname = input.find_given_file(instance,str) or "" - return input.clean_path(file.dirname((fullname ~= "" and fullname) or str)) - end - - resolvers.env = resolvers.environment - resolvers.rel = resolvers.relative - resolvers.loc = resolvers.locate - resolvers.kpse = resolvers.locate - resolvers.full = resolvers.locate - resolvers.file = resolvers.filename - resolvers.path = resolvers.pathname - - local function resolve(instance,str) - if type(str) == "table" then - for k, v in pairs(str) do - str[k] = resolve(instance,v) or v - end - elseif str and str ~= "" then - str = str:gsub("([a-z]+):([^ ]+)", function(method,target) - if resolvers[method] then - return resolvers[method](instance,target) - else - return method .. ":" .. target - end - end) - end - return str - end - - input.resolve = resolve - -end - - -if not modules then modules = { } end modules ['luat-tmp'] = { - version = 1.001, - comment = "companion to luat-lib.tex", - author = "Hans Hagen, PRAGMA-ADE, Hasselt NL", - copyright = "PRAGMA ADE / ConTeXt Development Team", - license = "see context related readme files" -} - ---[[ldx-- -

    This module deals with caching data. It sets up the paths and -implements loaders and savers for tables. Best is to set the -following variable. When not set, the usual paths will be -checked. Personally I prefer the (users) temporary path.

    - -
    -TEXMFCACHE=$TMP;$TEMP;$TMPDIR;$TEMPDIR;$HOME;$TEXMFVAR;$VARTEXMF;. -
    - -

    Currently we do no locking when we write files. This is no real -problem because most caching involves fonts and the chance of them -being written at the same time is small. We also need to extend -luatools with a recache feature.

    ---ldx]]-- - -caches = caches or { } -dir = dir or { } -texmf = texmf or { } - -caches.path = caches.path or nil -caches.base = caches.base or "luatex-cache" -caches.more = caches.more or "context" -caches.direct = false -- true is faster but may need huge amounts of memory -caches.trace = false -caches.tree = false -caches.paths = caches.paths or nil -caches.force = false - -input.usecache = not toboolean(os.getenv("TEXMFSHARECACHE") or "false",true) -- true - -function caches.temp(instance) - local function checkpath(cachepath) - if not cachepath or cachepath == "" then - return nil - elseif lfs.attributes(cachepath,"mode") == "directory" then -- lfs.isdir(cachepath) then - return cachepath - elseif caches.force or io.ask(string.format("Should I create the cache path %s?",cachepath), "no", { "yes", "no" }) == "yes" then - dir.mkdirs(cachepath) - return (lfs.attributes(cachepath,"mode") == "directory") and cachepath - else - return nil - end - end - local cachepath = input.expanded_path_list(instance,"TEXMFCACHE") - cachepath = cachepath and #cachepath > 0 and checkpath(cachepath[1]) - if not cachepath then - cachepath = os.getenv("TEXMFCACHE") or os.getenv("HOME") or os.getenv("HOMEPATH") or os.getenv("TMP") or os.getenv("TEMP") or os.getenv("TMPDIR") or nil - cachepath = checkpath(cachepath) - end - if not cachepath then - print("\nfatal error: there is no valid cache path defined\n") - os.exit() - elseif lfs.attributes(cachepath,"mode") ~= "directory" then - print(string.format("\nfatal error: cache path %s is not a directory\n",cachepath)) - os.exit() - end - function caches.temp(instance) - return cachepath - end - return cachepath -end - -function caches.configpath(instance) - return table.concat(instance.cnffiles,";") -end - -function caches.hashed(tree) - return md5.hex((tree:lower()):gsub("[\\\/]+","/")) -end - -function caches.treehash(instance) - local tree = caches.configpath(instance) - if not tree or tree == "" then - return false - else - return caches.hashed(tree) - end -end - -function caches.setpath(instance,...) - if not caches.path then - if not caches.path then - caches.path = caches.temp(instance) - end - caches.path = input.clean_path(caches.path) -- to be sure - if lfs then - caches.tree = caches.tree or caches.treehash(instance) - if caches.tree then - caches.path = dir.mkdirs(caches.path,caches.base,caches.more,caches.tree) - else - caches.path = dir.mkdirs(caches.path,caches.base,caches.more) - end - end - end - if not caches.path then - caches.path = '.' - end - caches.path = input.clean_path(caches.path) - if lfs and not table.is_empty({...}) then - local pth = dir.mkdirs(caches.path,...) - return pth - end - caches.path = dir.expand_name(caches.path) - return caches.path -end - -function caches.definepath(instance,category,subcategory) - return function() - return caches.setpath(instance,category,subcategory) - end -end - -function caches.setluanames(path,name) - return path .. "/" .. name .. ".tma", path .. "/" .. name .. ".tmc" -end - -function caches.loaddata(path,name) - local tmaname, tmcname = caches.setluanames(path,name) - local loader = loadfile(tmcname) or loadfile(tmaname) - if loader then - return loader() - else - return false - end -end - -function caches.is_writable(filepath,filename) - local tmaname, tmcname = caches.setluanames(filepath,filename) - return file.is_writable(tmaname) -end - -function caches.savedata(filepath,filename,data,raw) -- raw needed for file cache - local tmaname, tmcname = caches.setluanames(filepath,filename) - local reduce, simplify = true, true - if raw then - reduce, simplify = false, false - end - if caches.direct then - file.savedata(tmaname, table.serialize(data,'return',true,true)) - else - table.tofile (tmaname, data,'return',true,true) -- maybe not the last true - end - utils.lua.compile(tmaname, tmcname) -end - --- here we use the cache for format loading (texconfig.[formatname|jobname]) - ---~ if tex and texconfig and texconfig.formatname and texconfig.formatname == "" then -if tex and texconfig and (not texconfig.formatname or texconfig.formatname == "") and texmf.instance then - if not texconfig.luaname then texconfig.luaname = "cont-en.lua" end -- or luc - texconfig.formatname = caches.setpath(texmf.instance,"formats") .. "/" .. texconfig.luaname:gsub("%.lu.$",".fmt") -end - ---[[ldx-- -

    Once we found ourselves defining similar cache constructs -several times, containers were introduced. Containers are used -to collect tables in memory and reuse them when possible based -on (unique) hashes (to be provided by the calling function).

    - -

    Caching to disk is disabled by default. Version numbers are -stored in the saved table which makes it possible to change the -table structures without bothering about the disk cache.

    - -

    Examples of usage can be found in the font related code.

    ---ldx]]-- - -containers = { } -containers.trace = false - -do -- local report - - local function report(container,tag,name) - if caches.trace or containers.trace or container.trace then - logs.report(string.format("%s cache",container.subcategory),string.format("%s: %s",tag,name or 'invalid')) - end - end - - local allocated = { } - - -- tracing - - function containers.define(category, subcategory, version, enabled) - return function() - if category and subcategory then - local c = allocated[category] - if not c then - c = { } - allocated[category] = c - end - local s = c[subcategory] - if not s then - s = { - category = category, - subcategory = subcategory, - storage = { }, - enabled = enabled, - version = version or 1.000, - trace = false, - path = caches.setpath(texmf.instance,category,subcategory), - } - c[subcategory] = s - end - return s - else - return nil - end - end - end - - function containers.is_usable(container, name) - return container.enabled and caches.is_writable(container.path, name) - end - - function containers.is_valid(container, name) - if name and name ~= "" then - local storage = container.storage[name] - return storage and not table.is_empty(storage) and storage.cache_version == container.version - else - return false - end - end - - function containers.read(container,name) - if container.enabled and not container.storage[name] then - container.storage[name] = caches.loaddata(container.path,name) - if containers.is_valid(container,name) then - report(container,"loaded",name) - else - container.storage[name] = nil - end - end - if container.storage[name] then - report(container,"reusing",name) - end - return container.storage[name] - end - - function containers.write(container, name, data) - if data then - data.cache_version = container.version - if container.enabled then - local unique, shared = data.unique, data.shared - data.unique, data.shared = nil, nil - caches.savedata(container.path, name, data) - report(container,"saved",name) - data.unique, data.shared = unique, shared - end - report(container,"stored",name) - container.storage[name] = data - end - return data - end - - function containers.content(container,name) - return container.storage[name] - end - -end - --- since we want to use the cache instead of the tree, we will now --- reimplement the saver. - -local save_data = input.aux.save_data - -input.cachepath = nil - -function input.aux.save_data(instance, dataname, check) - input.cachepath = input.cachepath or caches.definepath(instance,"trees") - save_data(instance, dataname, check, function(cachename,dataname) - if input.usecache then - return file.join(input.cachepath(),caches.hashed(cachename)) - else - return file.join(cachename,dataname) - end - end) -end - -local load_data = input.aux.load_data - -function input.aux.load_data(instance,pathname,dataname,filename) - input.cachepath = input.cachepath or caches.definepath(instance,"trees") - load_data(instance,pathname,dataname,filename,function(dataname,filename) - if input.usecache then - return file.join(input.cachepath(),caches.hashed(pathname)) - else - if not filename or (filename == "") then - filename = dataname - end - return file.join(pathname,filename) - end - end) -end - --- we will make a better format, maybe something xml or just text or lua - -input.automounted = input.automounted or { } - -function input.automount(instance,usecache) - local mountpaths = input.simplified_list(input.expansion(instance,'TEXMFMOUNT')) - if table.is_empty(mountpaths) and usecache then - mountpaths = { caches.setpath(instance,"mount") } - end - if not table.is_empty(mountpaths) then - input.starttiming(instance) - for k, root in pairs(mountpaths) do - local f = io.open(root.."/url.tmi") - if f then - for line in f:lines() do - if line then - if line:find("^[%%#%-]") then -- or %W - -- skip - elseif line:find("^zip://") then - input.report("mounting",line) - table.insert(input.automounted,line) - input.usezipfile(instance,line) - end - end - end - f:close() - end - end - input.stoptiming(instance) - end -end - --- store info in format - -input.storage = { } -input.storage.data = { } -input.storage.min = 0 -- 500 -input.storage.max = input.storage.min - 1 -input.storage.trace = false -- true -input.storage.done = 0 -input.storage.evaluators = { } --- (evaluate,message,names) - -function input.storage.register(...) - input.storage.data[#input.storage.data+1] = { ... } -end - -function input.storage.evaluate(name) - input.storage.evaluators[#input.storage.evaluators+1] = name -end - -function input.storage.finalize() -- we can prepend the string with "evaluate:" - for _, t in ipairs(input.storage.evaluators) do - for i, v in pairs(t) do - if type(v) == "string" then - t[i] = loadstring(v)() - elseif type(v) == "table" then - for _, vv in pairs(v) do - if type(vv) == "string" then - t[i] = loadstring(vv)() - end - end - end - end - end -end - -function input.storage.dump() - for name, data in ipairs(input.storage.data) do - local evaluate, message, original, target = data[1], data[2], data[3] ,data[4] - local name, initialize, finalize, code = nil, "", "", "" - for str in target:gmatch("([^%.]+)") do - if name then - name = name .. "." .. str - else - name = str - end - initialize = string.format("%s %s = %s or {} ", initialize, name, name) - end - if evaluate then - finalize = "input.storage.evaluate(" .. name .. ")" - end - input.storage.max = input.storage.max + 1 - if input.storage.trace then - logs.report('storage',string.format('saving %s in slot %s',message,input.storage.max)) - code = - initialize .. - string.format("logs.report('storage','restoring %s from slot %s') ",message,input.storage.max) .. - table.serialize(original,name) .. - finalize - else - code = initialize .. table.serialize(original,name) .. finalize - end - lua.bytecode[input.storage.max] = loadstring(code) - end -end - -if lua.bytecode then -- from 0 upwards - local i = input.storage.min - while lua.bytecode[i] do - lua.bytecode[i]() - lua.bytecode[i] = nil - i = i + 1 - end - input.storage.done = i -end - - --- filename : luat-zip.lua --- comment : companion to luat-lib.tex --- author : Hans Hagen, PRAGMA-ADE, Hasselt NL --- copyright: PRAGMA ADE / ConTeXt Development Team --- license : see context related readme files - -if not versions then versions = { } end versions['luat-zip'] = 1.001 - -if zip and input then - zip.supported = true -else - zip = { } - zip.supported = false -end - -if not zip.supported then - - if not input then input = { } end -- will go away - - function zip.openarchive (...) return nil end -- needed ? - function zip.closenarchive (...) end -- needed ? - function input.usezipfile (...) end -- needed ? - -else - - -- zip:///oeps.zip?name=bla/bla.tex - -- zip:///oeps.zip?tree=tex/texmf-local - - local function validzip(str) - if not str:find("^zip://") then - return "zip:///" .. str - else - return str - end - end - - zip.archives = { } - zip.registeredfiles = { } - - function zip.openarchive(instance,name) - if not name or name == "" then - return nil - else - local arch = zip.archives[name] - if arch then - return arch - else - local full = input.find_file(instance,name) or "" - local arch = (full ~= "" and zip.open(full)) or false - zip.archives[name] = arch - return arch - end - end - end - - function zip.closearchive(instance,name) - if not name or name == "" and zip.archives[name] then - zip.close(zip.archives[name]) - zip.archives[name] = nil - end - end - - -- zip:///texmf.zip?tree=/tex/texmf - -- zip:///texmf.zip?tree=/tex/texmf-local - -- zip:///texmf-mine.zip?tree=/tex/texmf-projects - - function input.locators.zip(instance,specification) -- where is this used? startup zips (untested) - specification = input.splitmethod(specification) - local zipfile = specification.path - local zfile = zip.openarchive(instance,name) -- tricky, could be in to be initialized tree - if zfile then - input.logger('! zip locator', specification.original ..' found') - else - input.logger('? zip locator', specification.original ..' not found') - end - end - - function input.hashers.zip(instance,tag,name) - input.report("loading zip file",name,"as",tag) - input.usezipfile(instance,tag .."?tree=" .. name) - end - - function input.concatinators.zip(tag,path,name) - if not path or path == "" then - return tag .. '?name=' .. name - else - return tag .. '?name=' .. path .. "/" .. name - end - end - - function input.is_readable.zip(name) - return true - end - - function input.finders.zip(instance,specification,filetype) - specification = input.splitmethod(specification) - if specification.path then - local q = url.query(specification.query) - if q.name then - local zfile = zip.openarchive(instance,specification.path) - if zfile then - input.logger('! zip finder',specification.path) - local dfile = zfile:open(q.name) - if dfile then - dfile = zfile:close() - input.logger('+ zip finder',q.name) - return specification.original - end - else - input.logger('? zip finder',specification.path) - end - end - end - input.logger('- zip finder',filename) - return unpack(input.finders.notfound) - end - - function input.openers.zip(instance,specification) - local zipspecification = input.splitmethod(specification) - if zipspecification.path then - local q = url.query(zipspecification.query) - if q.name then - local zfile = zip.openarchive(instance,zipspecification.path) - if zfile then - input.logger('+ zip starter',zipspecification.path) - local dfile = zfile:open(q.name) - if dfile then - input.show_open(specification) - return input.openers.text_opener(specification,dfile,'zip') - end - else - input.logger('- zip starter',zipspecification.path) - end - end - end - input.logger('- zip opener',filename) - return unpack(input.openers.notfound) - end - - function input.loaders.zip(instance,specification) - specification = input.splitmethod(specification) - if specification.path then - local q = url.query(specification.query) - if q.name then - local zfile = zip.openarchive(instance,specification.path) - if zfile then - input.logger('+ zip starter',specification.path) - local dfile = zfile:open(q.name) - if dfile then - input.show_load(filename) - input.logger('+ zip loader',filename) - local s = dfile:read("*all") - dfile:close() - return true, s, #s - end - else - input.logger('- zip starter',specification.path) - end - end - end - input.logger('- zip loader',filename) - return unpack(input.openers.notfound) - end - - -- zip:///somefile.zip - -- zip:///somefile.zip?tree=texmf-local -> mount - - function input.usezipfile(instance,zipname) - zipname = validzip(zipname) - input.logger('! zip use','file '..zipname) - local specification = input.splitmethod(zipname) - local zipfile = specification.path - if zipfile and not zip.registeredfiles[zipname] then - local tree = url.query(specification.query).tree or "" - input.logger('! zip register','file '..zipname) - local z = zip.openarchive(instance,zipfile) - if z then - input.logger("= zipfile","registering "..zipname) - input.starttiming(instance) - input.aux.prepend_hash(instance,'zip',zipname,zipfile) - input.aux.extend_texmf_var(instance,zipname) -- resets hashes too - zip.registeredfiles[zipname] = z - instance.files[zipname] = input.aux.register_zip_file(z,tree or "") - input.stoptiming(instance) - else - input.logger("? zipfile","unknown "..zipname) - end - else - input.logger('! zip register','no file '..zipname) - end - end - - function input.aux.register_zip_file(z,tree) - local files, filter = { }, "" - if tree == "" then - filter = "^(.+)/(.-)$" - else - filter = "^"..tree.."/(.+)/(.-)$" - end - input.logger('= zip filter',filter) - local register, n = input.aux.register_file, 0 - for i in z:files() do - local path, name = i.filename:match(filter) - if path then - if name and name ~= '' then - register(files, name, path) - n = n + 1 - else - -- directory - end - else - register(files, i.filename, '') - n = n + 1 - end - end - input.report('= zip entries',n) - return files - end - -end - - --- filename : luat-zip.lua --- comment : companion to luat-lib.tex --- author : Hans Hagen, PRAGMA-ADE, Hasselt NL --- copyright: PRAGMA ADE / ConTeXt Development Team --- license : see context related readme files - -if not versions then versions = { } end versions['luat-tex'] = 1.001 - --- special functions that deal with io - -if texconfig and not texlua then - - input.level = input.level or 0 - - if input.logmode() == 'xml' then - function input.show_open(name) - input.level = input.level + 1 - texio.write_nl("") - end - function input.show_close(name) - texio.write(" ") - input.level = input.level - 1 - end - function input.show_load(name) - texio.write_nl("") -- level? - end - else - function input.show_open () end - function input.show_close() end - function input.show_load () end - end - - function input.finders.generic(instance,tag,filename,filetype) - local foundname = input.find_file(instance,filename,filetype) - if foundname and foundname ~= "" then - input.logger('+ ' .. tag .. ' finder',filename,'filetype') - return foundname - else - input.logger('- ' .. tag .. ' finder',filename,'filetype') - return unpack(input.finders.notfound) - end - end - - input.filters.dynamic_translator = nil - input.filters.frozen_translator = nil - input.filters.utf_translator = nil - - function input.openers.text_opener(filename,file_handle,tag) - local u = unicode.utftype(file_handle) - local t = { } - if u > 0 then - input.logger('+ ' .. tag .. ' opener (' .. unicode.utfname[u] .. ')',filename) - local l - if u > 2 then - l = unicode.utf32_to_utf8(file_handle:read("*a"),u==4) - else - l = unicode.utf16_to_utf8(file_handle:read("*a"),u==2) - end - file_handle:close() - t = { - utftype = u, -- may go away - lines = l, - current = 0, -- line number, not really needed - handle = nil, - noflines = #l, - close = function() - input.logger('= ' .. tag .. ' closer (' .. unicode.utfname[u] .. ')',filename) - input.show_close(filename) - end, ---~ getline = function(n) ---~ local line = t.lines[n] ---~ if not line or line == "" then ---~ return "" ---~ else ---~ local translator = input.filters.utf_translator ---~ return (translator and translator(line)) or line ---~ end ---~ end, - reader = function(self) - self = self or t - local current, lines = self.current, self.lines - if current >= #lines then - return nil - else - current = current + 1 - self.current = current - local line = lines[current] - if line == "" then - return "" - else - local translator = input.filters.utf_translator - -- return (translator and translator(line)) or line - if translator then - return translator(line) - else - return line - end - end - end - end - } - else - input.logger('+ ' .. tag .. ' opener',filename) - -- todo: file;name -> freeze / eerste regel scannen -> freeze - local filters = input.filters - t = { - reader = function(self) - local line = file_handle:read() - if line == "" then - return "" - end - local translator = filters.utf_translator - if translator then - return translator(line) - end - translator = filters.dynamic_translator - if translator then - return translator(line) - end - return line - end, - close = function() - input.logger('= ' .. tag .. ' closer',filename) - input.show_close(filename) - file_handle:close() - end, - handle = function() - return file_handle - end, - noflines = function() - t.noflines = io.noflines(file_handle) - return t.noflines - end - } - end - return t - end - - function input.openers.generic(instance,tag,filename) - if filename and filename ~= "" then - local f = io.open(filename,"r") - if f then - input.show_open(filename) - return input.openers.text_opener(filename,f,tag) - end - end - input.logger('- ' .. tag .. ' opener',filename) - return unpack(input.openers.notfound) - end - - function input.loaders.generic(instance,tag,filename) - if filename and filename ~= "" then - local f = io.open(filename,"rb") - if f then - input.show_load(filename) - input.logger('+ ' .. tag .. ' loader',filename) - local s = f:read("*a") - f:close() - if s then - return true, s, #s - end - end - end - input.logger('- ' .. tag .. ' loader',filename) - return unpack(input.loaders.notfound) - end - - function input.finders.tex(instance,filename,filetype) - return input.finders.generic(instance,'tex',filename,filetype) - end - function input.openers.tex(instance,filename) - return input.openers.generic(instance,'tex',filename) - end - function input.loaders.tex(instance,filename) - return input.loaders.generic(instance,'tex',filename) - end - -end - --- callback into the file io and related things; disabling kpse - - -if texconfig and not texlua then do - - -- this is not the right place, because we refer to quite some not yet defined tables, but who cares ... - - ctx = ctx or { } - - local ss = { } - - function ctx.writestatus(a,b) - local s = ss[a] - if not ss[a] then - s = a:rpadd(15) .. ": " - ss[a] = s - end - texio.write_nl(s .. b .. "\n") - end - - -- this will become: ctx.install_statistics(fnc() return ..,.. end) etc - - local statusinfo, n = { }, 0 - - function ctx.register_statistics(tag,pattern,fnc) - statusinfo[#statusinfo+1] = { tag, pattern, fnc } - if #tag > n then n = #tag end - end - - function ctx.show_statistics() -- todo: move calls - if caches then - ctx.register_statistics("used config path", "%s", function() return caches.configpath(texmf.instance) end) - ctx.register_statistics("used cache path", "%s", function() return caches.path end) - end - if status.luabytecodes > 0 and input.storage and input.storage.done then - ctx.register_statistics("modules/dumps/instances", "%s/%s/%s", function() return status.luabytecodes-500, input.storage.done, status.luastates end) - end - if texmf.instance then - ctx.register_statistics("input load time", "%s seconds", function() return input.loadtime(texmf.instance) end) - end - if fonts then - ctx.register_statistics("fonts load time","%s seconds", function() return input.loadtime(fonts) end) - end - if xml then - ctx.register_statistics("xml load time", "%s seconds, backreferences: %i, outer filtering time: %s", function() return input.loadtime(xml), #lxml.self, input.loadtime(lxml) end) - end - if mptopdf then - ctx.register_statistics("mps conversion time", "%s seconds", function() return input.loadtime(mptopdf) end) - end - if nodes then - ctx.register_statistics("node processing time", "%s seconds (including kernel)", function() return input.loadtime(nodes) end) - end - if kernel then - ctx.register_statistics("kernel processing time", "%s seconds", function() return input.loadtime(kernel) end) - end - if attributes then - ctx.register_statistics("attribute processing time", "%s seconds", function() return input.loadtime(attributes) end) - end - if languages then - ctx.register_statistics("language load time", "%s seconds, n=%s", function() return input.loadtime(languages), languages.hyphenation.n() end) - end - if figures then - ctx.register_statistics("graphics processing time", "%s seconds, n=%s (including tex)", function() return input.loadtime(figures), figures.n or "?" end) - end - if metapost then - ctx.register_statistics("metapost processing time", "%s seconds, loading: %s seconds, execution: %s seconds, n: %s", function() return input.loadtime(metapost), input.loadtime(mplib), input.loadtime(metapost.exectime), metapost.n end) - end - if status.luastate_bytes then - ctx.register_statistics("current memory usage", "%s bytes", function() return status.luastate_bytes end) - end - if nodes then - ctx.register_statistics("cleaned up reserved nodes", "%s nodes, %s lists of %s", function() return nodes.cleanup_reserved(tex.count[24]) end) -- \topofboxstack - end - if status.node_mem_usage then - ctx.register_statistics("node memory usage", "%s", function() return status.node_mem_usage end) - end - if languages then - ctx.register_statistics("loaded patterns", "%s", function() return languages.logger.report() end) - end - if fonts then - ctx.register_statistics("loaded fonts", "%s", function() return fonts.logger.report() end) - end - if xml then -- so we are in mkiv, we need a different check - ctx.register_statistics("runtime", "%s seconds, %i processed pages, %i shipped pages, %.3f pages/second", function() - input.stoptiming(texmf) - local runtime = input.loadtime(texmf) - local shipped = tex.count['nofshipouts'] - local pages = tex.count['realpageno'] - 1 - local persecond = shipped / runtime - return runtime, pages, shipped, persecond - end) - end - for _, t in ipairs(statusinfo) do - local tag, pattern, fnc = t[1], t[2], t[3] - ctx.writestatus("mkiv lua stats", string.format("%s - %s", tag:rpadd(n," "), pattern:format(fnc()))) - end - end - -end end - -if texconfig and not texlua then - - texconfig.kpse_init = false - texconfig.trace_file_names = input.logmode() == 'tex' - texconfig.max_print_line = 100000 - - -- if still present, we overload kpse (put it off-line so to say) - - if not texmf then texmf = { } end - - input.starttiming(texmf) - - if not texmf.instance then - - if not texmf.instance then -- prevent a second loading - - texmf.instance = input.reset() - texmf.instance.progname = environment.progname or 'context' - texmf.instance.engine = environment.engine or 'luatex' - texmf.instance.validfile = input.validctxfile - - input.load(texmf.instance) - - end - - if callback then - callback.register('find_read_file' , function(id,name) return input.findtexfile(texmf.instance,name) end) - callback.register('open_read_file' , function( name) return input.opentexfile(texmf.instance,name) end) - end - - if callback then - callback.register('find_data_file' , function(name) return input.findbinfile(texmf.instance,name,"tex") end) - callback.register('find_enc_file' , function(name) return input.findbinfile(texmf.instance,name,"enc") end) - callback.register('find_font_file' , function(name) return input.findbinfile(texmf.instance,name,"tfm") end) - callback.register('find_format_file' , function(name) return input.findbinfile(texmf.instance,name,"fmt") end) - callback.register('find_image_file' , function(name) return input.findbinfile(texmf.instance,name,"tex") end) - callback.register('find_map_file' , function(name) return input.findbinfile(texmf.instance,name,"map") end) - callback.register('find_ocp_file' , function(name) return input.findbinfile(texmf.instance,name,"ocp") end) - callback.register('find_opentype_file' , function(name) return input.findbinfile(texmf.instance,name,"otf") end) - callback.register('find_output_file' , function(name) return name end) - callback.register('find_pk_file' , function(name) return input.findbinfile(texmf.instance,name,"pk") end) - callback.register('find_sfd_file' , function(name) return input.findbinfile(texmf.instance,name,"sfd") end) - callback.register('find_truetype_file' , function(name) return input.findbinfile(texmf.instance,name,"ttf") end) - callback.register('find_type1_file' , function(name) return input.findbinfile(texmf.instance,name,"pfb") end) - callback.register('find_vf_file' , function(name) return input.findbinfile(texmf.instance,name,"vf") end) - - callback.register('read_data_file' , function(file) return input.loadbinfile(texmf.instance,file,"tex") end) - callback.register('read_enc_file' , function(file) return input.loadbinfile(texmf.instance,file,"enc") end) - callback.register('read_font_file' , function(file) return input.loadbinfile(texmf.instance,file,"tfm") end) - -- format - -- image - callback.register('read_map_file' , function(file) return input.loadbinfile(texmf.instance,file,"map") end) - callback.register('read_ocp_file' , function(file) return input.loadbinfile(texmf.instance,file,"ocp") end) - callback.register('read_opentype_file' , function(file) return input.loadbinfile(texmf.instance,file,"otf") end) - -- output - callback.register('read_pk_file' , function(file) return input.loadbinfile(texmf.instance,file,"pk") end) - callback.register('read_sfd_file' , function(file) return input.loadbinfile(texmf.instance,file,"sfd") end) - callback.register('read_truetype_file' , function(file) return input.loadbinfile(texmf.instance,file,"ttf") end) - callback.register('read_type1_file' , function(file) return input.loadbinfile(texmf.instance,file,"pfb") end) - callback.register('read_vf_file' , function(file) return input.loadbinfile(texmf.instance,file,"vf" ) end) - end - - if callback and environment.aleph_mode then - callback.register('find_font_file' , function(name) return input.findbinfile(texmf.instance,name,"ofm") end) - callback.register('read_font_file' , function(file) return input.loadbinfile(texmf.instance,file,"ofm") end) - callback.register('find_vf_file' , function(name) return input.findbinfile(texmf.instance,name,"ovf") end) - callback.register('read_vf_file' , function(file) return input.loadbinfile(texmf.instance,file,"ovf") end) - end - - if callback then - callback.register('find_write_file' , function(id,name) return name end) - end - - if callback and (not config or (#config == 0)) then - callback.register('find_format_file' , function(name) return name end) - end - - if callback and false then - for k, v in pairs(callback.list()) do - if not v then texio.write_nl("callback "..k.." is not set") end - end - end - - if callback then - - input.start_actions = { } - input.stop_actions = { } - - function input.register_start_actions(f) table.insert(input.start_actions, f) end - function input.register_stop_actions (f) table.insert(input.stop_actions, f) end - - --~ callback.register('start_run', function() for _, a in pairs(input.start_actions) do a() end end) - --~ callback.register('stop_run' , function() for _, a in pairs(input.stop_actions ) do a() end end) - - end - - if callback then - - if input.logmode() == 'xml' then - - function input.start_page_number() - texio.write_nl("

    ") - texio.write_nl("") - end - - callback.register('start_page_number' , input.start_page_number) - callback.register('stop_page_number' , input.stop_page_number ) - - function input.report_output_pages(p,b) - texio.write_nl(""..p.."") - texio.write_nl(""..b.."") - texio.write_nl("") - end - function input.report_output_log() - end - - callback.register('report_output_pages', input.report_output_pages) - callback.register('report_output_log' , input.report_output_log ) - - function input.start_run() - texio.write_nl("") - texio.write_nl("") - texio.write_nl("") - end - function input.stop_run() - texio.write_nl("") - end - function input.show_statistics() - for k,v in pairs(status.list()) do - texio.write_nl("log",""..tostring(v).."") - end - end - - table.insert(input.start_actions, input.start_run) - table.insert(input.stop_actions , input.show_statistics) - table.insert(input.stop_actions , input.stop_run) - - else - table.insert(input.stop_actions , input.show_statistics) - end - - callback.register('start_run', function() for _, a in pairs(input.start_actions) do a() end end) - callback.register('stop_run' , function() for _, a in pairs(input.stop_actions ) do a() end ctx.show_statistics() end) - - end - - end - - if kpse then - - function kpse.find_file(filename,filetype,mustexist) - return input.find_file(texmf.instance,filename,filetype,mustexist) - end - function kpse.expand_path(variable) - return input.expand_path(texmf.instance,variable) - end - function kpse.expand_var(variable) - return input.expand_var(texmf.instance,variable) - end - function kpse.expand_braces(variable) - return input.expand_braces(texmf.instance,variable) - end - - end - -end - --- program specific configuration (memory settings and alike) - -if texconfig and not texlua then - - luatex = luatex or { } - - luatex.variablenames = { - 'main_memory', 'extra_mem_bot', 'extra_mem_top', - 'buf_size','expand_depth', - 'font_max', 'font_mem_size', - 'hash_extra', 'max_strings', 'pool_free', 'pool_size', 'string_vacancies', - 'obj_tab_size', 'pdf_mem_size', 'dest_names_size', - 'nest_size', 'param_size', 'save_size', 'stack_size', - 'trie_size', 'hyph_size', 'max_in_open', - 'ocp_stack_size', 'ocp_list_size', 'ocp_buf_size' - } - - function luatex.variables() - local t, x = { }, nil - for _,v in pairs(luatex.variablenames) do - x = input.var_value(texmf.instance,v) - if x and x:find("^%d+$") then - t[v] = tonumber(x) - end - end - return t - end - - function luatex.setvariables(tab) - for k,v in pairs(luatex.variables()) do - tab[k] = v - end - end - - if not luatex.variables_set then - luatex.setvariables(texconfig) - luatex.variables_set = true - end - - texconfig.max_print_line = 100000 - texconfig.max_in_open = 127 - -end - --- some tex basics - -if not cs then cs = { } end - -function cs.def(k,v) - tex.sprint(tex.texcatcodes, "\\def\\" .. k .. "{" .. v .. "}") -end - -function cs.chardef(k,v) - tex.sprint(tex.texcatcodes, "\\chardef\\" .. k .. "=" .. v .. "\\relax") -end - -function cs.boolcase(b) - if b then tex.write(1) else tex.write(0) end -end - -function cs.testcase(b) - if b then - tex.sprint(tex.texcatcodes, "\\firstoftwoarguments") - else - tex.sprint(tex.texcatcodes, "\\secondoftwoarguments") - end -end - - -if not modules then modules = { } end modules ['luat-kps'] = { - version = 1.001, - comment = "companion to luatools.lua", - author = "Hans Hagen, PRAGMA-ADE, Hasselt NL", - copyright = "PRAGMA ADE / ConTeXt Development Team", - license = "see context related readme files" -} - ---[[ldx-- -

    This file is used when we want the input handlers to behave like -kpsewhich. What to do with the following:

    - - -{$SELFAUTOLOC,$SELFAUTODIR,$SELFAUTOPARENT}{,{/share,}/texmf{-local,}/web2c} -$SELFAUTOLOC : /usr/tex/bin/platform -$SELFAUTODIR : /usr/tex/bin -$SELFAUTOPARENT : /usr/tex - - -

    How about just forgetting abou them?

    ---ldx]]-- - -input = input or { } -input.suffixes = input.suffixes or { } -input.formats = input.formats or { } - -input.suffixes['gf'] = { 'gf' } -input.suffixes['pk'] = { 'pk' } -input.suffixes['base'] = { 'base' } -input.suffixes['bib'] = { 'bib' } -input.suffixes['bst'] = { 'bst' } -input.suffixes['cnf'] = { 'cnf' } -input.suffixes['mem'] = { 'mem' } -input.suffixes['mf'] = { 'mf' } -input.suffixes['mfpool'] = { 'pool' } -input.suffixes['mft'] = { 'mft' } -input.suffixes['mppool'] = { 'pool' } -input.suffixes['graphic/figure'] = { 'eps', 'epsi' } -input.suffixes['texpool'] = { 'pool' } -input.suffixes['PostScript header'] = { 'pro' } -input.suffixes['ist'] = { 'ist' } -input.suffixes['web'] = { 'web', 'ch' } -input.suffixes['cweb'] = { 'w', 'web', 'ch' } -input.suffixes['cmap files'] = { 'cmap' } -input.suffixes['lig files'] = { 'lig' } -input.suffixes['bitmap font'] = { } -input.suffixes['MetaPost support'] = { } -input.suffixes['TeX system documentation'] = { } -input.suffixes['TeX system sources'] = { } -input.suffixes['dvips config'] = { } -input.suffixes['type42 fonts'] = { } -input.suffixes['web2c files'] = { } -input.suffixes['other text files'] = { } -input.suffixes['other binary files'] = { } -input.suffixes['opentype fonts'] = { 'otf' } - -input.suffixes['fmt'] = { 'fmt' } -input.suffixes['texmfscripts'] = { 'rb','lua','py','pl' } - -input.suffixes['pdftex config'] = { } -input.suffixes['Troff fonts'] = { } - -input.suffixes['ls-R'] = { } - ---[[ldx-- -

    If you wondered abou tsome of the previous mappings, how about -the next bunch:

    ---ldx]]-- - -input.formats['bib'] = '' -input.formats['bst'] = '' -input.formats['mft'] = '' -input.formats['ist'] = '' -input.formats['web'] = '' -input.formats['cweb'] = '' -input.formats['MetaPost support'] = '' -input.formats['TeX system documentation'] = '' -input.formats['TeX system sources'] = '' -input.formats['Troff fonts'] = '' -input.formats['dvips config'] = '' -input.formats['graphic/figure'] = '' -input.formats['ls-R'] = '' -input.formats['other text files'] = '' -input.formats['other binary files'] = '' - -input.formats['gf'] = '' -input.formats['pk'] = '' -input.formats['base'] = 'MFBASES' -input.formats['cnf'] = '' -input.formats['mem'] = 'MPMEMS' -input.formats['mf'] = 'MFINPUTS' -input.formats['mfpool'] = 'MFPOOL' -input.formats['mppool'] = 'MPPOOL' -input.formats['texpool'] = 'TEXPOOL' -input.formats['PostScript header'] = 'TEXPSHEADERS' -input.formats['cmap files'] = 'CMAPFONTS' -input.formats['type42 fonts'] = 'T42FONTS' -input.formats['web2c files'] = 'WEB2C' -input.formats['pdftex config'] = 'PDFTEXCONFIG' -input.formats['texmfscripts'] = 'TEXMFSCRIPTS' -input.formats['bitmap font'] = '' -input.formats['lig files'] = 'LIGFONTS' - --- end library merge - --- We initialize some characteristics of this program. We need to --- do this before we load the libraries, else own.name will not be --- properly set (handy for selfcleaning the file). It's an ugly --- looking piece of code. - -own = { } - -own.libs = { -- todo: check which ones are really needed - 'l-string.lua', - 'l-lpeg.lua', - 'l-table.lua', - 'l-io.lua', - 'l-number.lua', - 'l-set.lua', - 'l-os.lua', - 'l-md5.lua', - 'l-file.lua', - 'l-url.lua', - 'l-dir.lua', - 'l-boolean.lua', - 'l-unicode.lua', - 'l-utils.lua', - 'luat-lib.lua', - 'luat-inp.lua', - 'luat-tmp.lua', - 'luat-zip.lua', - 'luat-tex.lua', - 'luat-kps.lua', -} - --- We need this hack till luatex is fixed. - -if arg and arg[0] == 'luatex' and arg[1] == "--luaonly" then - arg[-1]=arg[0] arg[0]=arg[2] for k=3,#arg do arg[k-2]=arg[k] end arg[#arg]=nil arg[#arg]=nil -end - --- End of hack. - -own.name = (environment and environment.ownname) or arg[0] or 'luatools.lua' -own.path = string.match(own.name,"^(.+)[\\/].-$") or "." -own.list = { '.' } - -if own.path ~= '.' then - table.insert(own.list,own.path) -end - -table.insert(own.list,own.path.."/../../../tex/context/base") -table.insert(own.list,own.path.."/mtx") -table.insert(own.list,own.path.."/../sources") - -function locate_libs() - for _, lib in pairs(own.libs) do - for _, pth in pairs(own.list) do - local filename = string.gsub(pth .. "/" .. lib,"\\","/") - local codeblob = loadfile(filename) - if codeblob then - codeblob() - own.list = { pth } -- speed up te search - break - end - end - end -end - -if not input then - locate_libs() -end - -if not input then - print("") - print("Luatools is unable to start up due to lack of libraries. You may") - print("try to run 'lua luatools.lua --selfmerge' in the path where this") - print("script is located (normally under ..../scripts/context/lua) which") - print("will make luatools library independent.") - os.exit() -end - -instance = input.reset() -input.verbose = environment.arguments["verbose"] or false -input.banner = 'LuaTools | ' -utils.report = input.report - -input.defaultlibs = { -- not all are needed - 'l-string.lua', 'l-lpeg.lua', 'l-table.lua', 'l-boolean.lua', 'l-number.lua', 'l-set.lua', 'l-unicode.lua', - 'l-md5.lua', 'l-os.lua', 'l-io.lua', 'l-file.lua', 'l-url.lua', 'l-dir.lua', 'l-utils.lua', 'l-tex.lua', - 'luat-env.lua', 'luat-lib.lua', 'luat-inp.lua', 'luat-tmp.lua', 'luat-zip.lua', 'luat-tex.lua' -} - --- todo: use environment.argument() instead of environment.arguments[] - -instance.engine = environment.arguments["engine"] or 'luatex' -instance.progname = environment.arguments["progname"] or 'context' -instance.luaname = environment.arguments["luafile"] or "" -- environment.ownname or "" -instance.lualibs = environment.arguments["lualibs"] or table.concat(input.defaultlibs,",") -instance.allresults = environment.arguments["all"] or false -instance.pattern = environment.arguments["pattern"] or nil -instance.sortdata = environment.arguments["sort"] or false -instance.kpseonly = not environment.arguments["all"] or false -instance.my_format = environment.arguments["format"] or instance.format -instance.lsrmode = environment.arguments["lsr"] or false - -if type(instance.pattern) == 'boolean' then - input.report("invalid pattern specification") -- toto, force verbose for one message - instance.pattern = nil -end - -if environment.arguments["trace"] then input.settrace(environment.arguments["trace"]) end - -if environment.arguments["minimize"] then - if input.validators.visibility[instance.progname] then - instance.validfile = input.validators.visibility[instance.progname] - end -end - -function input.my_prepare_a(instance) - input.resetconfig(instance) - input.identify_cnf(instance) - input.load_lua(instance) - input.expand_variables(instance) - input.load_cnf(instance) - input.expand_variables(instance) -end - -function input.my_prepare_b(instance) - input.my_prepare_a(instance) - input.load_hash(instance) - input.automount(instance) -end - --- barename - -if not messages then messages = { } end - -messages.no_ini_file = [[ -There is no lua initialization file found. This file can be forced by the -"--progname" directive, or specified with "--luaname", or it is derived -automatically from the formatname (aka jobname). It may be that you have -to regenerate the file database using "luatools --generate". -]] - -messages.help = [[ ---generate generate file database ---variables show configuration variables ---expansions show expanded variables ---configurations show configuration order ---expand-braces expand complex variable ---expand-path expand variable (resolve paths) ---expand-var expand variable (resolve references) ---show-path show path expansion of ... ---var-value report value of variable ---find-file report file location ---find-path report path of file ---make or --ini make luatex format ---run or --fmt= run luatex format ---luafile=str lua inifile (default is .lua) ---lualibs=list libraries to assemble (optional when --compile) ---compile assemble and compile lua inifile ---mkii force context mkii mode (only for testing, not usable!) ---verbose give a bit more info ---minimize optimize lists for format ---all show all found files ---sort sort cached data ---engine=str target engine ---progname=str format or backend ---pattern=str filter variables ---lsr use lsr and cnf directly -]] - -function input.my_make_format(instance,texname) - if texname and texname ~= "" then - if input.usecache then - local path = file.join(caches.setpath(instance,"formats")) -- maybe platform - if path and lfs then - lfs.chdir(path) - end - end - local barename = texname:gsub("%.%a+$","") - if barename == texname then - texname = texname .. ".tex" - end - local fullname = input.find_files(instance,texname)[1] or "" - if fullname == "" then - input.report("no tex file with name",texname) - else - local luaname, lucname, luapath, lualibs = "", "", "", { } - -- the following is optional, since context.lua can also - -- handle this collect and compile business - if environment.arguments["compile"] then - if luaname == "" then luaname = barename end - input.report("creating initialization file " .. luaname) - luapath = file.dirname(luaname) - if luapath == "" then - luapath = file.dirname(texname) - end - if luapath == "" then - luapath = file.dirname(input.find_files(instance,texname)[1] or "") - end - lualibs = string.split(instance.lualibs,",") - luaname = file.basename(barename .. ".lua") - lucname = file.basename(barename .. ".luc") - -- todo: when this fails, we can just copy the merged libraries from - -- luatools since they are normally the same, at least for context - if lualibs[1] then - local firstlib = file.join(luapath,lualibs[1]) - if not lfs.isfile(firstlib) then - local foundname = input.find_files(instance,lualibs[1])[1] - if foundname then - input.report("located library path : " .. luapath) - luapath = file.dirname(foundname) - end - end - end - input.report("using library path : " .. luapath) - input.report("using lua libraries: " .. table.join(lualibs," ")) - utils.merger.selfcreate(lualibs,luapath,luaname) - if utils.lua.compile(luaname, lucname) and io.exists(lucname) then - luaname = lucname - input.report("using compiled initialization file " .. lucname) - else - input.report("using uncompiled initialization file " .. luaname) - end - else - for _, v in pairs({instance.luaname, instance.progname, barename}) do - v = string.gsub(v..".lua","%.lua%.lua$",".lua") - if v and (v ~= "") then - luaname = input.find_files(instance,v)[1] or "" - if luaname ~= "" then - break - end - end - end - end - if luaname == "" then - input.reportlines(messages.no_ini_file) - input.report("texname : " .. texname) - input.report("luaname : " .. instance.luaname) - input.report("progname : " .. instance.progname) - input.report("barename : " .. barename) - else - input.report("using lua initialization file " .. luaname) - local flags = { "--ini" } - if environment.arguments["mkii"] then - flags[#flags+1] = "--progname=" .. instance.progname - else - flags[#flags+1] = "--lua=" .. string.quote(luaname) - end - local bs = (environment.platform == "unix" and "\\\\") or "\\" -- todo: make a function - local command = "luatex ".. table.concat(flags," ") .. " " .. string.quote(fullname) .. " " .. bs .. "dump" - input.report("running command: " .. command .. "\n") - os.spawn(command) - end - end - else - input.report("no tex file given") - end -end - -function input.my_run_format(instance,name,data,more) - -- hm, rather old code here; we can now use the file.whatever functions - if name and (name ~= "") then - local barename = name:gsub("%.%a+$","") - local fmtname = "" - if input.usecache then - local path = file.join(caches.setpath(instance,"formats")) -- maybe platform - fmtname = file.join(path,barename..".fmt") or "" - end - if fmtname == "" then - fmtname = input.find_files(instance,barename..".fmt")[1] or "" - end - fmtname = input.clean_path(fmtname) - barename = fmtname:gsub("%.%a+$","") - if fmtname == "" then - input.report("no format with name",name) - else - local luaname = barename .. ".luc" - local f = io.open(luaname) - if not f then - luaname = barename .. ".lua" - f = io.open(luaname) - end - if f then - f:close() - local command = "luatex --fmt=" .. string.quote(barename) .. " --lua=" .. string.quote(luaname) .. " " .. string.quote(data) .. " " .. string.quote(more) - input.report("running command: " .. command) - os.spawn(command) - else - input.report("using format name",fmtname) - input.report("no luc/lua with name",barename) - end - end - end -end - --- helpers for verbose lists - -input.listers = input.listers or { } - -local function tabstr(str) - if type(str) == 'table' then - return table.concat(str," | ") - else - return str - end -end - -local function list(instance,list) - local pat = string.upper(instance.pattern or "","") - for _,key in pairs(table.sortedkeys(list)) do - if instance.pattern == "" or string.find(key:upper(),pat) then - if instance.kpseonly then - if instance.kpsevars[key] then - print(format("%s=%s",key,tabstr(list[key]))) - end - else - print(format('%s %s=%s',(instance.kpsevars[key] and 'K') or 'E',key,tabstr(list[key]))) - end - end - end -end - -function input.listers.variables (instance) list(instance,instance.variables ) end -function input.listers.expansions(instance) list(instance,instance.expansions) end - -function input.listers.configurations(instance) - for _,key in pairs(table.sortedkeys(instance.kpsevars)) do - if not instance.pattern or (instance.pattern=="") or key:find(instance.pattern) then - print(key.."\n") - for i,c in ipairs(instance.order) do - local str = c[key] - if str then - print(format("\t%s\t\t%s",i,input.aux.tabstr(str))) - end - end - print() - end - end -end - -input.report(banner,"\n") - -local ok = true - -if environment.arguments["find-file"] then - input.my_prepare_b(instance) - instance.format = environment.arguments["format"] or instance.format - if instance.pattern then - instance.allresults = true - input.for_files(instance, input.find_files, { instance.pattern }, instance.my_format) - else - input.for_files(instance, input.find_files, environment.files, instance.my_format) - end -elseif environment.arguments["find-path"] then - input.my_prepare_b(instance) - local path = input.find_file(instance, environment.files[1], instance.my_format) - if input.verbose then - input.report(file.dirname(path)) - else - print(file.dirname(path)) - end ---~ elseif environment.arguments["first-writable-path"] then ---~ input.my_prepare_b(instance) ---~ input.report(input.first_writable_path(instance,environment.files[1] or ".")) -elseif environment.arguments["run"] then - input.my_prepare_a(instance) -- ! no need for loading databases - input.verbose = true - input.my_run_format(instance,environment.files[1] or "",environment.files[2] or "",environment.files[3] or "") -elseif environment.arguments["fmt"] then - input.my_prepare_a(instance) -- ! no need for loading databases - input.verbose = true - input.my_run_format(instance,environment.arguments["fmt"], environment.files[1] or "",environment.files[2] or "") -elseif environment.arguments["expand-braces"] then - input.my_prepare_a(instance) - input.for_files(instance, input.expand_braces, environment.files) -elseif environment.arguments["expand-path"] then - input.my_prepare_a(instance) - input.for_files(instance, input.expand_path, environment.files) -elseif environment.arguments["expand-var"] or environment.arguments["expand-variable"] then - input.my_prepare_a(instance) - input.for_files(instance, input.expand_var, environment.files) -elseif environment.arguments["show-path"] or environment.arguments["path-value"] then - input.my_prepare_a(instance) - input.for_files(instance, input.show_path, environment.files) -elseif environment.arguments["var-value"] or environment.arguments["show-value"] then - input.my_prepare_a(instance) - input.for_files(instance, input.var_value, environment.files) -elseif environment.arguments["format-path"] then - input.my_prepare_b(instance) - input.report(caches.setpath(instance,"format")) -elseif instance.pattern then -- brrr - input.my_prepare_b(instance) - instance.format = environment.arguments["format"] or instance.format - instance.allresults = true - input.for_files(instance, input.find_files, { instance.pattern }, instance.my_format) -elseif environment.arguments["generate"] then - instance.renewcache = true - input.verbose = true - input.my_prepare_b(instance) -elseif environment.arguments["make"] or environment.arguments["ini"] or environment.arguments["compile"] then - input.my_prepare_b(instance) - input.verbose = true - input.my_make_format(instance,environment.files[1] or "") -elseif environment.arguments["selfmerge"] then - utils.merger.selfmerge(own.name,own.libs,own.list) -elseif environment.arguments["selfclean"] then - utils.merger.selfclean(own.name) -elseif environment.arguments["selfupdate"] then - input.my_prepare_b(instance) - input.verbose = true - input.update_script(instance,own.name,"luatools") -elseif environment.arguments["variables"] or environment.arguments["show-variables"] then - input.my_prepare_a(instance) - input.listers.variables(instance) -elseif environment.arguments["expansions"] or environment.arguments["show-expansions"] then - input.my_prepare_a(instance) - input.listers.expansions(instance) -elseif environment.arguments["configurations"] or environment.arguments["show-configurations"] then - input.my_prepare_a(instance) - input.listers.configurations(instance) -elseif environment.arguments["help"] or (environment.files[1]=='help') or (#environment.files==0) then - if not input.verbose then - input.verbose = true - input.report(banner,"\n") - end - input.reportlines(messages.help) -else - input.my_prepare_b(instance) - input.for_files(instance, input.find_files, environment.files, instance.my_format) -end - -if input.verbose then - input.report("") - input.report(string.format("runtime: %0.3f seconds",os.runtime())) -end - ---~ if ok then ---~ input.report("exit code: 0") os.exit(0) ---~ else ---~ input.report("exit code: 1") os.exit(1) ---~ end - -if environment.platform == "unix" then - io.write("\n") -end diff --git a/Build/source/texk/texlive/linked_scripts/makeglossaries b/Build/source/texk/texlive/linked_scripts/makeglossaries deleted file mode 100755 index 3b9f1cb6b48..00000000000 --- a/Build/source/texk/texlive/linked_scripts/makeglossaries +++ /dev/null @@ -1,293 +0,0 @@ -#!/usr/bin/env perl - -# File : makeglossaries -# Author : Nicola Talbot -# Version : 1.4 (2008/05/10) -# Description: simple Perl script that calls makeindex. -# Intended for use with "glossaries.sty" (saves having to remember -# all the various switches) - -# This file is distributed as part of the glossaries LaTeX package. -# Copyright 2007 Nicola L.C. Talbot -# This work may be distributed and/or modified under the -# conditions of the LaTeX Project Public License, either version 1.3 -# of this license of (at your option) any later version. -# The latest version of this license is in -# http://www.latex-project.org/lppl.txt -# and version 1.3 or later is part of all distributions of LaTeX -# version 2005/12/01 or later. -# -# This work has the LPPL maintenance status `maintained'. -# -# The Current Maintainer of this work is Nicola Talbot. - -# This work consists of the files glossaries.dtx and glossaries.ins -# and the derived files glossaries.sty, mfirstuc.sty, -# glossary-hypernav.sty, glossary-list.sty, glossary-long.sty, -# glossary-super.sty, glossaries.perl. -# Also makeglossaries and makeglossaries. -# -# History: -# v1.4 (2008-05-10) : -# * added support for filenames with spaces. -# v1.3 (2008-03-08) : -# * changed first line from /usr/bin/perl -w to /usr/bin/env perl -# (Thanks to Karl Berry for suggesting this.) -# v1.2 (2008-03-02) : -# * added support for --help and --version -# * improved error handling -# v1.1 (2008-02-13) : -# * added -w and strict -# * added check to ensure .tex file not passed to makeglossaries -# -# v1.0 (2007-05-10) : Initial release. - -use Getopt::Std; -use strict; - -$Getopt::Std::STANDARD_HELP_VERSION = 1; - -my $version="1.3 (2008-03-08)"; - -my($opt_q, $opt_t, $opt_o, $opt_s, $opt_p, $opt_g, $opt_c, $opt_r, - $opt_l, $opt_i)=("","","","","","","","","",""); - -getopts('s:o:t:p:ilqrcg'); - -unless ($#ARGV == 0) -{ - die "makeglossaries: Need exactly one file argument.\nUse `makeglossaries --help' for help.\n"; -} - -# define known extensions - -my %exttype = ( - main => {in=>'glo', out=>'gls', 'log'=>'glg'}, - ); - -my $ext = ''; -my $name = $ARGV[0]; - -# modified this to make sure users don't try passing the -# tex file: -if (length($ARGV[0]) > 3 and substr($ARGV[0],-4,1) eq ".") -{ - $name = substr($ARGV[0],0,length($ARGV[0])-4); - - $ext = substr($ARGV[0],-3,3); - - if (lc($ext) eq 'tex') - { - die("Don't pass the tex file to makeglossaries:\n" - ."either omit the extension to make all the glossaries, " - ."or specify one of the glossary files, e.g. $name.glo, to " - ."make just that glossary.\n") - } -} - -my $istfile = "$name.ist"; - -# check aux file for other glossary types -# and for ist file name - -if (open AUXFILE, "$name.aux") -{ - while () - { - if (m/\\\@newglossary\s*\{(.*)\}{(.*)}{(.*)}{(.*)}/ - and ($1 ne 'main')) - { - $exttype{$1}{'log'} = $2; - $exttype{$1}{'out'} = $3; - $exttype{$1}{'in'} = $4; - - if (!$opt_q) - { - print "added glossary type '$1' ($2,$3,$4)\n"; - } - } - - if (m/\\\@istfilename\s*{([^}]*)}/) - { - $istfile = $1; - - # check if double quotes were added to \jobname - $istfile=~s/^"(.*)"\.ist$/$1.ist/; - } - } - - close AUXFILE; -} -else -{ - print STDERR "Unable to open $name.aux: $!\n"; -} - -# save all the general makeindex switches - -my $mkidxopts = ''; - -if ($opt_i) -{ - $mkidxopts .= " -i"; -} - -if ($opt_l) -{ - $mkidxopts .= " -l"; -} - -if ($opt_q) -{ - $mkidxopts .= " -q"; -} - -if ($opt_r) -{ - $mkidxopts .= " -r"; -} - -if ($opt_c) -{ - $mkidxopts .= " -c"; -} - -if ($opt_g) -{ - $mkidxopts .= " -g"; -} - -unless ($opt_p eq "") -{ - $mkidxopts .= " -p $opt_p"; -} - -unless ($opt_s eq "") -{ - $istfile = $opt_s; -} - -if ($ext ne '') -{ - my %thistype = %{$exttype{'main'}}; #default - - foreach my $type (keys %exttype) - { - if ($exttype{$type}{'in'} eq $ext) - { - %thistype = %{$exttype{$type}}; - - last; - } - } - - my $outfile; - - if ($opt_o eq "") - { - $outfile = "$name.$thistype{out}"; - } - else - { - $outfile = $opt_o; - } - - my $transcript; - - if ($opt_t eq "") - { - $transcript = "$name.$thistype{'log'}"; - } - else - { - $transcript = $opt_t; - } - - &makeindex("$name.$ext",$outfile,$transcript,$istfile, - $mkidxopts,$opt_q); -} -else -{ - foreach my $type (keys %exttype) - { - my %thistype = %{$exttype{$type}}; - - my $inputfile = "$name.$thistype{in}"; - - if (-r $inputfile) - { - my $outfile; - - if ($opt_o eq "") - { - $outfile = "$name.$thistype{out}"; - } - else - { - $outfile = $opt_o; - } - - my $transcript; - - if ($opt_t eq "") - { - $transcript = "$name.$thistype{'log'}"; - } - else - { - $transcript = $opt_t; - } - - &makeindex($inputfile,$outfile,$transcript, - $istfile,$mkidxopts,$opt_q); - } - else - { - print STDERR "No read access for '$inputfile': $!\n"; - } - } -} - -sub makeindex{ - my($in,$out,$trans,$ist,$rest,$quiet) = @_; - my($name,$cmdstr,$buffer,$n,$i,$j); - my(@stuff,@item); - - $cmdstr = "$rest -s \"$ist\" -t \"$trans\" -o \"$out\" \"$in\""; - - unless ($quiet) - { - print "makeindex $cmdstr\n"; - } - - `makeindex $cmdstr`; -} - -sub HELP_MESSAGE{ - print "\nSyntax : makeglossaries [options] \n\n"; - print "For use with the glossaries package to pass relevant\n"; - print "files to makeindex\n\n"; - print "\tBase name of glossary file(s). This should\n"; - print "\t\tbe the name of your main LaTeX document without any\n"; - print "\t\textension.\n"; - print "\nOptions:\n"; - print "-c\t\tCompress intermediate blanks\n"; - print "-g\t\tEmploy German word ordering\n"; - print "-l\t\tLetter ordering\n"; - print "-o \tUse as the output file.\n"; - print "-p \tSet the starting page number to be \n"; - print "-q\t\tQuiet mode\n"; - print "-r\t\tDisable implicit page range formation\n"; - print "-s \tEmploy as the style file\n"; - print "-t \tEmploy as the transcript file\n"; - print "\nSee makeindex documentation for further details on these "; - print "options\n"; -} - -sub VERSION_MESSAGE{ - print "Makeglossaries Version $version\n"; - print "Copyright (C) 2007 Nicola L C Talbot\n"; - print "This material is subject to the LaTeX Project Public License.\n"; -} - -1; diff --git a/Build/source/texk/texlive/linked_scripts/makempy b/Build/source/texk/texlive/linked_scripts/makempy deleted file mode 100755 index 4bf7a1af230..00000000000 --- a/Build/source/texk/texlive/linked_scripts/makempy +++ /dev/null @@ -1,2 +0,0 @@ -#!/bin/sh -texmfstart makempy.pl "$@" diff --git a/Build/source/texk/texlive/linked_scripts/mkjobtexmf.pl b/Build/source/texk/texlive/linked_scripts/mkjobtexmf.pl deleted file mode 100755 index d9ce104e158..00000000000 --- a/Build/source/texk/texlive/linked_scripts/mkjobtexmf.pl +++ /dev/null @@ -1,810 +0,0 @@ -#!/usr/bin/env perl -# -# ToDos/unsolved problems: -# * collision of symbol links -# * texmf.cnf (path settings, variables) -# * aliases -# * binaries, libraries -# -use strict; -$^W=1; - -my $prj = 'mkjobtexmf'; -my $version = '0.7'; -my $date = '2008/06/28'; -my $author = 'Heiko Oberdiek'; -my $copyright = "Copyright 2007, 2008 $author"; - -my $cmd_tex = 'pdflatex'; -my $cmd_kpsewhich = 'kpsewhich'; -my $cmd_texhash = 'texhash'; -my $cmd_strace = 'strace'; -my $ext_tex = '.tex'; -my $ext_recorder = '.fls'; -my $ext_strace = '.strace'; -my $ext_mkjobtexmf = '.mjt'; -my $jobname = ''; -my $texname = ''; -my $destdir = ''; -my @args = (); -my @texopt = (); -my $verbose = 0; -my $output = 0; -my $strace = 0; -my $copy = 0; -my $flat = 0; -my $needs_texhash = 0; -my @texmf; -my %files; -my %links; -my %flat_ignore = ( - 'ls-R' => '', - 'aliases' => '', -); - -my $title = "\U$prj\E $date v$version, $copyright\n"; - -print $title; - -sub die_error ($) { - my $msg = shift; - die "!!! Error: $msg!\n"; -} - -sub warning ($) { - my $msg = shift; - print "!!! Warning: $msg!\n"; -} - -sub verbose (@) { - my @msg = @_; - print "* @msg\n" if $verbose; -} - -sub value ($) { - my $value = $_[0]; - "[$value]"; -} - -sub die_usage { - my $msg = $_[0]; - pod2usage( - -exitstatus => 2, - -msg => "\n==> $msg!\n"); -} - -use Getopt::Long; -use Pod::Usage; - -my $man = 0; -my $help = 0; - -GetOptions( - 'jobname=s' => \$jobname, - 'texname=s' => \$texname, - 'texopt=s' => \@texopt, - 'destdir=s' => \$destdir, - 'cmd-tex=s' => \$cmd_tex, - 'cmd-kpsewhich=s' => \$cmd_kpsewhich, - 'cmd-texhash=s' => \$cmd_texhash, - 'cmd-strace=s' => \$cmd_strace, - 'strace' => \$strace, - 'copy' => \$copy, - 'flat' => \$flat, - 'verbose' => \$verbose, - 'output' => \$output, - 'help|?' => \$help, - 'man' => \$man, -) or die_usage('Unknown option'); -pod2usage(1) if $help; -pod2usage(-exitstatus => 0, -verbose => 2) if $man; - -if (@ARGV > 0) { - $strace = 1; - $texname = ''; - my @args = @ARGV; -} -$jobname or die_usage('Missing jobname'); -$texname = "$jobname$ext_tex" unless $texname; -$destdir = "$jobname$ext_mkjobtexmf" unless $destdir; - -verbose "jobname: " . value $jobname; -verbose "texname: " . value $texname if $texname; -verbose "command: " . value "@args" if @args; -verbose "destdir: " . value $destdir; - -if (!$copy) { - my $symlink_exists = eval { symlink('', ''); 1 }; - if ($symlink_exists) { - verbose "symbolic linking: supported"; - } - else { - $copy = 1; - verbose "symbolic linking: unsupported"; - } -} -my $umask = umask; -if (defined($umask)) { - verbose "umask: " . sprintf("%04o", $umask); -} -else { - $umask = 0; - verbose "umask: unsupported"; -} - -if ($copy) { - use File::Copy; -} -if ($flat) { - use File::Basename; -} - -sub check_child_error () { - if ($? != 0) { - if ($? == -1) { - die_error "Failed to execute: $!"; - } - elsif ($? & 127) { - die_error sprintf "Child died with signal %d, %s coredump", - ($? & 127), ($? & 128) ? 'with' : 'without'; - } - else { - die_error sprintf "Child exited with value %d", $? >> 8; - } - } - verbose "child exit: ok"; -} - -sub run_generic (@) { - my @args = @_; - my $cmd = $_[0]; - verbose "exec: " . value "@args"; - print '>' x 79, "\n"; - system $cmd @args; - print '<' x 79, "\n"; - check_child_error; -} - -sub run_tex { - if ($strace) { - my @run_args; - if (@args) { - @run_args = @args; - } - else { - @run_args = ( - $cmd_tex, - '-interaction=nonstopmode', - @texopt, - $texname - ); - } - run_generic( - $cmd_strace, - '-f', - '-e', - 'trace=open,access', # trace=file - '-o', - "$jobname$ext_strace", - @run_args - ) - } - else { - run_generic( - $cmd_tex, - '-recorder', - "-jobname=$jobname", - '-interaction=nonstopmode', - @texopt, - $texname - ); - } -} - -sub run_texhash { - return if $flat; - if ($needs_texhash) { - run_generic( - $cmd_texhash, - "$destdir/texmf" - ); - } - else { - verbose("texhash run skipped, no files added"); - } -} - -use Cwd 'abs_path', 'getcwd'; - -sub get_texmf_trees () { - return if $flat; - my $cmdline = "$cmd_kpsewhich -expand-path='\$TEXMF'"; - verbose "exec: " . value($cmdline); - my $str = `$cmdline`; - check_child_error; - chomp $str; - @texmf = split ':', $str; - my %texmf; - foreach my $texmf (@texmf) { - $texmf{$texmf} = ''; - $texmf{abs_path($texmf)} = ''; - } - @texmf = sort keys %texmf; - if ($verbose) { - if (@texmf) { - map { verbose 'texmf: ' . value($_) } @texmf; - } - else { - verbose 'texmf: none'; - } - } -} - -sub analyze_recorder { - my $pwd = getcwd; - verbose "pwd: " . value($pwd); - - my $file_rec = $jobname . ($strace ? $ext_strace : $ext_recorder); - verbose 'File with recorded file names: ' . value($file_rec); - open(IN, '<', $file_rec) - or die_error "Cannot open `$file_rec'"; - if ($strace) { - while () { - chomp; - next if /\)\s+= -\d/; # -1 ENOENT, ... - next if /\WO_DIRECTORY\W/; # skip directories - my $type = 'INPUT'; - if ($output) { - $type = 'OUTPUT' if /\WO_WRONLY\W/; - } - else { - next if /\WO_WRONLY\W/; - } - /^\d+\s+\w+\(\"([^"]+)\",/ or warning "Unknown entry `$_'"; - my $file = $1; - $files{$file} = ''; - } - } - else { - while () { - chomp; - next if /^PWD /; - next if not $output and /^OUTPUT /; - /^(INPUT|OUTPUT) (.*)$/ or warning "Unknown entry `$_'"; - my $type = $1; - my $file = $2; - $files{$file} = ''; - } - } - close(IN); -} - -sub map_files { - if ($flat) { - map_files_flat(); - } - else { - map_files_texmf(); - } -} - -sub map_files_flat { - my %abs_files; - my %names; - my %clashes; - - foreach my $file (keys %files) { - $abs_files{abs_path($file)} = ''; - } - - foreach my $file (keys %abs_files) { - my $name = basename($file); - next if exists $flat_ignore{$name}; - if (defined($names{$name})) { - push @{$names{$name}}, $file; - $clashes{$name} = ''; - } - else { - my @a = ($file); - $names{$name} = \@a; - } - } - - foreach my $name (sort keys %clashes) { - print "* file name clash for " . value($name) . "\n"; - my @a = @{$names{$name}}; - foreach my $file (@a) { - print " " . value($file) . "\n"; - } - } - - foreach my $name (sort keys %names) { - my $file = @{$names{$name}}[0]; - my $clash = $clashes{$name} ? ' (clash)' : ''; - verbose value($name) . ' => ' . value($file) . $clash; - $links{$name} = $file; - } -} - -sub map_files_texmf { - my @failed; - - foreach my $file (sort keys %files) { - verbose "file: " . value($file); - - my $abs_file = abs_path($file); - - my $found = ''; - foreach (@texmf) { - my $texmf = "$_/"; - my $len = length($texmf); - my $str = substr $file, 0, $len; - if ($texmf eq $str) { - $found = 'texmf/' . substr $file, $len; - if ($found =~ /(^|\/)\.\.\//) { - $found = ''; - } - } - last if $found; - my $str = substr $abs_file, 0, $len; - if ($texmf eq $str) { - $found = 'texmf/' . substr $abs_file, $len; - last; - } - } - if (not($found)) { - if ($file =~ /(^|\/)\.\.\// or $file =~ /^\//) { - push @failed, $file; - } - else { - $found = $file; - } - } - if ($found) { - $links{$found} = abs_path($file); - } - } - - if ($verbose) { - foreach (sort keys %links) { - verbose value($_) . ' => ' . value($links{$_}); - } - } - - foreach (@failed) { - print "!!! Failed: " . value($_) . "\n"; - } -} - -sub make_dirs ($) { - my $path = shift; - my @elems = split /\/+/, $path; - if (@elems <= 1) { - return; - } - pop @elems; - my $dir = ''; - foreach my $elem (@elems) { - $dir .= '/' if $dir; - $dir .= $elem; - next if -d $dir; - verbose 'mkdir: ' . value($dir); - mkdir $dir or die_error "Cannot make directory `$dir'"; - } -} - -sub make_links { - - foreach my $key (sort keys %links) { - my $source = $links{$key}; - my $dest = "$destdir/$key"; - make_dirs $dest; - if (-e $dest) { - my $type = ''; - if (-l $dest) { - $type .= 'link'; - } - elsif (-f $dest) { - $type = 'file'; - } - elsif (-d $dest) { - $type = 'directory'; - } - elsif (-b $dest) { - $type = 'block device'; - } - elsif (-c $dest) { - $type = 'character device'; - } - elsif (-p $dest) { - $type = 'pipe'; - } - elsif (-S $dest) { - $type = 'socket'; - } - elsif (-t $dest) { - $type = 'tty'; - } - $type = " ($type)" if $type; - verbose "destination$type exists: " . value($dest); - next; - } - $needs_texhash = 1; - do_link_copy($source, $dest); - } -} - -sub do_link_copy { - my $source = shift; - my $dest = shift; - my $success = 0; - - if ($copy) { - if (copy($source, $dest) == 1) { - $success = 1; - my ($source_mode, $source_atime, $source_mtime) - = (stat($source))[2, 8, 9]; - my ($dest_mode, $dest_atime, $dest_mtime) - = (stat($dest))[2, 8, 9]; - # preserve executable permissions if necessary - my $new_dest_mode = $dest_mode - | (($source_mode & 0111) & ~$umask); - if ($new_dest_mode != $dest_mode) { - if (chmod($new_dest_mode, $dest) < 1) { - print "!!! Setting executive mode failed: " - . value($dest) . "\n"; - } - } - # preserve file times - if ($source_atime != $dest_atime - || $source_mtime != $dest_mtime) { - if (utime($source_atime, $source_mtime, $dest) < 1) { - print "!!! Setting file times failed: " - . value($dest) . "\n"; - } - } - } - } - else { - if (symlink($source, $dest) == 1) { - $success = 1; - } - } - if ($success == 0) { - my $method = $copy ? 'Copying' : 'Symbolic linking'; - print "!!! $method failed:\n " - . value($dest) . ' => ' . value($source) . "\n"; - } -} - -run_tex; -get_texmf_trees; -analyze_recorder; -map_files; -make_links; -run_texhash; - -1; - -__DATA__ - -=head1 NAME - -mkjobtexmf -- Generate a texmf tree for a particular job - -=head1 SYNOPSIS - -The progam B runs a program and tries to -find the used file names. Two methods are available, -option C<-recorder> of TeX (Web2C) or the program B. - -Then it generates a directory with a texmf tree. It checks -the found files and tries sort them in this texmf tree. - -It can be used for archiving purposes or to speed up -following TeX runs. - - mkjobtexmf [options] - -This runs TeX that can be configured by options. -Both methods for getting the used file names are available. - - mkjobtexmf [options] -- [args] - -The latter form runs program I with arguments I -instead of TeX. As method only program B is available. - -Options: - - --jobname Name of the job (mandatory). - Usually this is the TeX file - without extension - --texname Input file for TeX. Default is the - job name with extension '.tex' - --texopt