diff options
author | Akira Kakuto <kakuto@fuk.kindai.ac.jp> | 2016-02-24 00:09:37 +0000 |
---|---|---|
committer | Akira Kakuto <kakuto@fuk.kindai.ac.jp> | 2016-02-24 00:09:37 +0000 |
commit | 50108019f84d1c2a7f83cc165ba09f8dc3bebcf3 (patch) | |
tree | cea5c82717c8903c8c6295d2aa9d900f9dbefc48 /Build | |
parent | 655a66b1cc5bbf0241709ba5a1f88f7d4b7ffd9f (diff) |
web2c/luatexdir: sync with the upstream
git-svn-id: svn://tug.org/texlive/trunk@39841 c570f23f-e606-0410-a88d-b1316a301751
Diffstat (limited to 'Build')
-rw-r--r-- | Build/source/texk/web2c/luatexdir/lang/texlang.w | 209 | ||||
-rw-r--r-- | Build/source/texk/web2c/luatexdir/lua/lnewtokenlib.c | 99 | ||||
-rw-r--r-- | Build/source/texk/web2c/luatexdir/lua/lnodelib.c | 7 | ||||
-rw-r--r-- | Build/source/texk/web2c/luatexdir/lua/ltexlib.c | 17 | ||||
-rw-r--r-- | Build/source/texk/web2c/luatexdir/lua/luainit.w | 14 | ||||
-rw-r--r-- | Build/source/texk/web2c/luatexdir/luatex.c | 4 | ||||
-rw-r--r-- | Build/source/texk/web2c/luatexdir/pdf/pdfgen.w | 4 | ||||
-rw-r--r-- | Build/source/texk/web2c/luatexdir/tex/commands.w | 1 | ||||
-rw-r--r-- | Build/source/texk/web2c/luatexdir/tex/dumpdata.w | 2 | ||||
-rw-r--r-- | Build/source/texk/web2c/luatexdir/tex/equivalents.h | 8 | ||||
-rw-r--r-- | Build/source/texk/web2c/luatexdir/tex/packaging.h | 2 | ||||
-rw-r--r-- | Build/source/texk/web2c/luatexdir/tex/packaging.w | 13 |
12 files changed, 256 insertions, 124 deletions
diff --git a/Build/source/texk/web2c/luatexdir/lang/texlang.w b/Build/source/texk/web2c/luatexdir/lang/texlang.w index 8e0deb3357d..07f6de1f469 100644 --- a/Build/source/texk/web2c/luatexdir/lang/texlang.w +++ b/Build/source/texk/web2c/luatexdir/lang/texlang.w @@ -193,8 +193,8 @@ void load_tex_patterns(int curlang, halfword head) uindex = uni2string(uindex, xx); \ } while (0) -@ Cleans one word which is returned in |cleaned|, returns the new offset -into |buffer| +@ Cleans one word which is returned in |cleaned|, returns the new offset into +|buffer| @c const char *clean_hyphenation(int id, const char *buff, char **cleaned) @@ -243,17 +243,17 @@ const char *clean_hyphenation(int id, const char *buff, char **cleaned) } if (u == '}') { items++; - u = uword[i++];; + u = uword[i++]; } if (u == '{') { - u = uword[i++];; + u = uword[i++]; } while (u && u != '}') { STORE_CHAR(id,u); u = uword[i++]; } if (u == '}') { - items++; + items++; } if (items != 3) { /* syntax error */ *cleaned = NULL; @@ -318,16 +318,15 @@ void clear_hyphenation(struct tex_language *lang) } } - void load_tex_hyphenation(int curlang, halfword head) { char *s = tokenlist_to_cstring(head, 1, NULL); load_hyphenation(get_language(curlang), (unsigned char *) s); } -@ TODO: clean this up. The |delete_attribute_ref()| statements are not very - nice, but needed. Also, in the post-break, it would be nicer to get the - attribute list from |vlink(n)|. No rush, as it is currently not used much. +@ TODO: clean this up. The |delete_attribute_ref()| statements are not very nice, +but needed. Also, in the post-break, it would be nicer to get the attribute list +from |vlink(n)|. No rush, as it is currently not used much. @c halfword insert_discretionary(halfword t, halfword pre, halfword post, @@ -512,9 +511,7 @@ char *exception_strings(struct tex_language *lang) while (lua_next(L, -2) != 0) { value = lua_tolstring(L, -1, &l); if (current + 2 + l > size) { - ret = - xrealloc(ret, - (unsigned) ((size + size / 5) + current + l + 1024)); + ret = xrealloc(ret, (unsigned) ((size + size / 5) + current + l + 1024)); size = (size + size / 5) + current + l + 1024; } *(ret + current) = ' '; @@ -526,8 +523,8 @@ char *exception_strings(struct tex_language *lang) return ret; } -@ the sequence from |wordstart| to |r| can contain only normal characters -it could be faster to modify a halfword pointer and return an integer +@ the sequence from |wordstart| to |r| can contain only normal characters it +could be faster to modify a halfword pointer and return an integer @c static halfword find_exception_part(unsigned int *j, unsigned int *uword, int len) @@ -593,8 +590,7 @@ static void do_exception(halfword wordstart, halfword r, char *replacement) for (i = 0; i < len; i++) { if (uword[i + 1] == '-') { /* a hyphen follows */ - while (vlink(t) != r - && (type(t) != glyph_node || !is_simple_character(t))) + while (vlink(t) != r && (type(t) != glyph_node || !is_simple_character(t))) t = vlink(t); if (vlink(t) == r) break; @@ -641,75 +637,69 @@ static void do_exception(halfword wordstart, halfword r, char *replacement) } } -@ This is a documentation section from the pascal web file. It is not -true any more, but I do not have time right now to rewrite it -- Taco +@ This is a documentation section from the pascal web file. It is not true any +more, but I do not have time right now to rewrite it -- Taco When the line-breaking routine is unable to find a feasible sequence of -breakpoints, it makes a second pass over the paragraph, attempting to -hyphenate the hyphenatable words. The goal of hyphenation is to insert -discretionary material into the paragraph so that there are more -potential places to break. - -The general rules for hyphenation are somewhat complex and technical, -because we want to be able to hyphenate words that are preceded or -followed by punctuation marks, and because we want the rules to work -for languages other than English. We also must contend with the fact -that hyphens might radically alter the ligature and kerning structure -of a word. - -A sequence of characters will be considered for hyphenation only if it -belongs to a ``potentially hyphenatable part'' of the current paragraph. -This is a sequence of nodes $p_0p_1\ldots p_m$ where $p_0$ is a glue node, -$p_1\ldots p_{m-1}$ are either character or ligature or whatsit or -implicit kern nodes, and $p_m$ is a glue or penalty or insertion or adjust -or mark or whatsit or explicit kern node. (Therefore hyphenation is -disabled by boxes, math formulas, and discretionary nodes already inserted -by the user.) The ligature nodes among $p_1\ldots p_{m-1}$ are effectively -expanded into the original non-ligature characters; the kern nodes and -whatsits are ignored. Each character |c| is now classified as either a -nonletter (if |lc_code(c)=0|), a lowercase letter (if -|lc_code(c)=c|), or an uppercase letter (otherwise); an uppercase letter -is treated as if it were |lc_code(c)| for purposes of hyphenation. The -characters generated by $p_1\ldots p_{m-1}$ may begin with nonletters; let -$c_1$ be the first letter that is not in the middle of a ligature. Whatsit -nodes preceding $c_1$ are ignored; a whatsit found after $c_1$ will be the -terminating node $p_m$. All characters that do not have the same font as -$c_1$ will be treated as nonletters. The |hyphen_char| for that font -must be between 0 and 255, otherwise hyphenation will not be attempted. -\TeX\ looks ahead for as many consecutive letters $c_1\ldots c_n$ as -possible; however, |n| must be less than 64, so a character that would -otherwise be $c_{64}$ is effectively not a letter. Furthermore $c_n$ must -not be in the middle of a ligature. In this way we obtain a string of -letters $c_1\ldots c_n$ that are generated by nodes $p_a\ldots p_b$, where -|1<=a<=b+1<=m|. If |n>=l_hyf+r_hyf|, this string qualifies for hyphenation; -however, |uc_hyph| must be positive, if $c_1$ is uppercase. +breakpoints, it makes a second pass over the paragraph, attempting to hyphenate +the hyphenatable words. The goal of hyphenation is to insert discretionary +material into the paragraph so that there are more potential places to break. + +The general rules for hyphenation are somewhat complex and technical, because we +want to be able to hyphenate words that are preceded or followed by punctuation +marks, and because we want the rules to work for languages other than English. We +also must contend with the fact that hyphens might radically alter the ligature +and kerning structure of a word. + +A sequence of characters will be considered for hyphenation only if it belongs to +a ``potentially hyphenatable part'' of the current paragraph. This is a sequence +of nodes $p_0p_1\ldots p_m$ where $p_0$ is a glue node, $p_1\ldots p_{m-1}$ are +either character or ligature or whatsit or implicit kern nodes, and $p_m$ is a +glue or penalty or insertion or adjust or mark or whatsit or explicit kern node. +(Therefore hyphenation is disabled by boxes, math formulas, and discretionary +nodes already inserted by the user.) The ligature nodes among $p_1\ldots p_{m-1}$ +are effectively expanded into the original non-ligature characters; the kern +nodes and whatsits are ignored. Each character |c| is now classified as either a +nonletter (if |lc_code(c)=0|), a lowercase letter (if |lc_code(c)=c|), or an +uppercase letter (otherwise); an uppercase letter is treated as if it were +|lc_code(c)| for purposes of hyphenation. The characters generated by $p_1\ldots +p_{m-1}$ may begin with nonletters; let $c_1$ be the first letter that is not in +the middle of a ligature. Whatsit nodes preceding $c_1$ are ignored; a whatsit +found after $c_1$ will be the terminating node $p_m$. All characters that do not +have the same font as $c_1$ will be treated as nonletters. The |hyphen_char| for +that font must be between 0 and 255, otherwise hyphenation will not be attempted. +\TeX\ looks ahead for as many consecutive letters $c_1\ldots c_n$ as possible; +however, |n| must be less than 64, so a character that would otherwise be +$c_{64}$ is effectively not a letter. Furthermore $c_n$ must not be in the middle +of a ligature. In this way we obtain a string of letters $c_1\ldots c_n$ that are +generated by nodes $p_a\ldots p_b$, where |1<=a<=b+1<=m|. If |n>=l_hyf+r_hyf|, +this string qualifies for hyphenation; however, |uc_hyph| must be positive, if +$c_1$ is uppercase. The hyphenation process takes place in three stages. First, the candidate -sequence $c_1\ldots c_n$ is found; then potential positions for hyphens -are determined by referring to hyphenation tables; and finally, the nodes -$p_a\ldots p_b$ are replaced by a new sequence of nodes that includes the -discretionary breaks found. - -Fortunately, we do not have to do all this calculation very often, because -of the way it has been taken out of \TeX's inner loop. For example, when -the second edition of the author's 700-page book {\sl Seminumerical -Algorithms} was typeset by \TeX, only about 1.2 hyphenations needed to be -@^Knuth, Donald Ervin@> -tried per paragraph, since the line breaking algorithm needed to use two -passes on only about 5 per cent of the paragraphs. - -When a word been set up to contain a candidate for hyphenation, -\TeX\ first looks to see if it is in the user's exception dictionary. If not, -hyphens are inserted based on patterns that appear within the given word, -using an algorithm due to Frank~M. Liang. -@^Liang, Franklin Mark@> - -@ This is incompatible with TEX because the first word of a paragraph -can be hyphenated, but most european users seem to agree that -prohibiting hyphenation there was not the best idea ever. +sequence $c_1\ldots c_n$ is found; then potential positions for hyphens are +determined by referring to hyphenation tables; and finally, the nodes $p_a\ldots +p_b$ are replaced by a new sequence of nodes that includes the discretionary +breaks found. + +Fortunately, we do not have to do all this calculation very often, because of the +way it has been taken out of \TeX's inner loop. For example, when the second +edition of the author's 700-page book {\sl Seminumerical Algorithms} was typeset +by \TeX, only about 1.2 hyphenations needed to be @^Knuth, Donald Ervin@> tried +per paragraph, since the line breaking algorithm needed to use two passes on only +about 5 per cent of the paragraphs. + +When a word been set up to contain a candidate for hyphenation, \TeX\ first looks +to see if it is in the user's exception dictionary. If not, hyphens are inserted +based on patterns that appear within the given word, using an algorithm due to +Frank~M. Liang. @^Liang, Franklin Mark@> + +@ This is incompatible with TEX because the first word of a paragraph can be +hyphenated, but most european users seem to agree that prohibiting hyphenation +there was not the best idea ever. @c -static halfword find_next_wordstart(halfword r) +static halfword find_next_wordstart(halfword r, halfword first_language) { register int l; register int start_ok = 1; @@ -758,7 +748,7 @@ static halfword find_next_wordstart(halfword r) } else { start_ok = 0; } - } else if (start_ok && (char_lang(r)>0) && ((l = get_hj_code(char_lang(r),chr)) > 0)) { + } else if (start_ok && (char_lang(r)>=first_language) && ((l = get_hj_code(char_lang(r),chr)) > 0)) { if (char_uchyph(r) || l == chr) { return r; } else { @@ -813,8 +803,8 @@ void hnj_hyphenation(halfword head, halfword tail) char *hy = utf8word; char *replacement = NULL; boolean explicit_hyphen = false; - halfword s, r = head, wordstart = null, save_tail1 = null, left = - null, right = null; + halfword first_language = int_par(first_valid_language_code); + halfword s, r = head, wordstart = null, save_tail1 = null, left = null, right = null; /* this first movement assures two things: \item{a)} that we won't waste lots of time on something that has been @@ -828,7 +818,7 @@ void hnj_hyphenation(halfword head, halfword tail) r = vlink(r); } /* this will make |r| a glyph node with subtype character */ - r = find_next_wordstart(r); + r = find_next_wordstart(r,first_language); if (r == null) return; @@ -852,8 +842,18 @@ void hnj_hyphenation(halfword head, halfword tail) hmin = get_hyphenation_min(clang); langdata.pre_hyphen_char = get_pre_hyphen_char(clang); langdata.post_hyphen_char = get_post_hyphen_char(clang); - while (r != null && type(r) == glyph_node && is_simple_character(r) && clang == char_lang(r) && - (((clang > 0) && (lchar = get_hj_code(clang,character(r))) > 0) || (character(r) == ex_hyphen_char && (lchar = ex_hyphen_char)))) { + while ( r != null + && type(r) == glyph_node + && is_simple_character(r) + && clang == char_lang(r) + && ( ( (clang >= first_language) + && (lchar = get_hj_code(clang,character(r))) > 0 + ) + || ( character(r) == ex_hyphen_char + && (lchar = ex_hyphen_char) + ) + ) + ) { if (character(r) == ex_hyphen_char) explicit_hyphen = true; wordlen++; @@ -863,12 +863,17 @@ void hnj_hyphenation(halfword head, halfword tail) end_word = r; r = vlink(r); } - if (valid_wordend(r) && wordlen >= lhmin + rhmin && (hmin <= 0 || wordlen >= hmin) - && (hyf_font != 0) && clang >=0 && (lang = tex_languages[clang]) != NULL) { + if ( valid_wordend(r) + && clang >= first_language + && wordlen >= lhmin + rhmin + && (hmin <= 0 || wordlen >= hmin) + && (hyf_font != 0) + && (lang = tex_languages[clang]) != NULL + ) { *hy = 0; - if (lang->exceptions != 0 && - (replacement = - hyphenation_exception(lang->exceptions, utf8word)) != NULL) { + if ( lang->exceptions != 0 + && (replacement = hyphenation_exception(lang->exceptions, utf8word)) != NULL + ) { #ifdef VERBOSE formatted_warning("hyphenation","replacing %s (c=%d) by %s", utf8word, clang, replacement); #endif @@ -887,17 +892,15 @@ void hnj_hyphenation(halfword head, halfword tail) if (character(rr) == ex_hyphen_char) { t = compound_word_break(rr, clang); subtype(t) = automatic_disc; - while(character(alink(rr)) == ex_hyphen_char) - rr = alink(rr); - if (rr == wordstart) - break; + while (character(alink(rr)) == ex_hyphen_char) + rr = alink(rr); + if (rr == wordstart) + break; } } rr = alink(rr); } - } else if (lang->patterns != NULL) { - left = wordstart; for (i = lhmin; i > 1; i--) { left = vlink(left); @@ -910,11 +913,9 @@ void hnj_hyphenation(halfword head, halfword tail) while (!is_simple_character(right)) right = alink(right); } - #ifdef VERBOSE formatted_warning("hyphenation","hyphenate %s (c=%d,l=%d,r=%d) from %c to %c", - utf8word, clang, lhmin, rhmin, character(left), - character(right)); + utf8word, clang, lhmin, rhmin, character(left), character(right)); #endif (void) hnj_hyphen_hyphenate(lang->patterns, wordstart, end_word, wordlen, left, right, &langdata); } @@ -924,7 +925,7 @@ void hnj_hyphenation(halfword head, halfword tail) hy = utf8word; if (r == null) break; - r = find_next_wordstart(r); + r = find_next_wordstart(r,first_language); } flush_node(vlink(tail)); vlink(tail) = save_tail1; @@ -1083,8 +1084,8 @@ void new_patterns(void) } @ `\.{\\prehyphenchar}', sets the |pre_break| character, and -`\.{\\posthyphenchar}' the |post_break| character. Their respective -defaults are ascii hyphen ("-") and zero (nul). +`\.{\\posthyphenchar}' the |post_break| character. Their respective defaults are +ascii hyphen ("-") and zero (nul). @c void new_pre_hyphen_char(void) @@ -1102,8 +1103,8 @@ void new_post_hyphen_char(void) } @ `\.{\\preexhyphenchar}', sets the |pre_break| character, and -`\.{\\postexhyphenchar}' the |post_break| character. Their -defaults are both zero (nul). +`\.{\\postexhyphenchar}' the |post_break| character. Their defaults are both zero +(nul). @c void new_pre_exhyphen_char(void) diff --git a/Build/source/texk/web2c/luatexdir/lua/lnewtokenlib.c b/Build/source/texk/web2c/luatexdir/lua/lnewtokenlib.c index 08ca9e9a1ba..6ac8489e9b1 100644 --- a/Build/source/texk/web2c/luatexdir/lua/lnewtokenlib.c +++ b/Build/source/texk/web2c/luatexdir/lua/lnewtokenlib.c @@ -186,6 +186,104 @@ static int run_get_next(lua_State * L) return 1; } +/* + This is experimental code: + + local t1 = token.get_next() + local t2 = token.get_next() + local t3 = token.get_next() + local t4 = token.get_next() + -- watch out, we flush in sequence + token.put_next { t1, t2 } + -- but this one gets pushed in front + token.put_next ( t3, t4 ) + -- so when we get wxyz we put yzwx! + + At some point we can consider a token.print that delays and goes via + the same rope mechanism as texio.prints and friends but then one can + as well serialize the tokens and do a normal print so there is no real + gain in it. After all, the tokenlib operates at the input level so we + might as well keep it there. + +*/ + +inline static int run_put_next(lua_State * L) +{ + int n = lua_gettop(L); + int m = 0; + int i = 0; + halfword h = null; + halfword t = null; + halfword x = null; + lua_token *p ; + if (n == 0) { + /* we accept a single nil argument */ + return 0; + } + lua_rawgeti(L, LUA_REGISTRYINDEX, luaS_index(luatex_token)); /* n+1 */ + lua_gettable(L, LUA_REGISTRYINDEX); /* n+1 */ + m = lua_gettop(L); + if (lua_type(L,1) == LUA_TTABLE) { + if (n>1) { + normal_error("token lib","only one table permitted in put_next"); + } else { + for (i = 1;; i++) { + lua_rawgeti(L, 1, i); /* table mt token */ + if (lua_type(L,-1) == LUA_TNIL) { + break; + } else { + p = lua_touserdata(L, -1); + if (p == NULL) { + normal_error("token lib","lua <token> expected in put_next (1)"); + } else if (!lua_getmetatable(L, -1)) { /* table mt token mt */ + normal_error("token lib","lua <token> expected in put_next (2)"); + } else if (!lua_rawequal(L, m, -1)) { + normal_error("token lib","lua <token> expected in put_next (3)"); + } else { + fast_get_avail(x) ; + token_info(x) = token_info(p->token); + if (h == null) { + h = x; + } else { + token_link(t) = x; + } + t = x; + } + lua_pop(L, 1); + } + } + } + } else { + for (i = 1; i <= n; i++) { + p = lua_touserdata(L,i); + if (p == NULL) { + normal_error("token lib","lua <token> expected in put_next (4)"); + } else if (!lua_getmetatable(L, i)) { /* table mt token mt */ + normal_error("token lib","lua <token> expected in put_next (5)"); + } else if (!lua_rawequal(L, m, -1)) { + normal_error("token lib","lua <token> expected in put_next (6)"); + } else { + fast_get_avail(x) ; + token_info(x) = token_info(p->token); + if (h == null) { + h = x; + } else { + token_link(t) = x; + } + t = x; + } + lua_pop(L, 1); + } + } + if (h == null) { + /* can't happen */ + } else { + begin_token_list(h,0); + } + lua_settop(L,n); + return 0; +} + static int run_scan_keyword(lua_State * L) { saved_tex_scanner texstate; @@ -831,6 +929,7 @@ static const struct luaL_Reg tokenlib[] = { { "is_token", lua_tokenlib_is_token }, /* scanners */ { "get_next", run_get_next }, + { "put_next", run_put_next }, { "scan_keyword", run_scan_keyword }, { "scan_int", run_scan_int }, { "scan_dimen", run_scan_dimen }, diff --git a/Build/source/texk/web2c/luatexdir/lua/lnodelib.c b/Build/source/texk/web2c/luatexdir/lua/lnodelib.c index 2b44f6facad..a5d7d7439ee 100644 --- a/Build/source/texk/web2c/luatexdir/lua/lnodelib.c +++ b/Build/source/texk/web2c/luatexdir/lua/lnodelib.c @@ -1600,13 +1600,12 @@ static int lua_nodelib_hpack(lua_State * L) m = 2; } else if (lua_key_eq(s, subst_ex_font)) { m = 3; - } else { - luaL_error(L, "3rd argument should be either additional or exactly"); } } else if (lua_type(L, 3) == LUA_TNUMBER) { m = (int) lua_tointeger(L, 3); - } else { - lua_pushstring(L, "incorrect 3rd argument"); + } + if ((m<0) || (m>3)) { + luaL_error(L, "wrong mode in hpack"); } if (lua_gettop(L) > 3) { if (lua_type(L, 4) == LUA_TSTRING) { diff --git a/Build/source/texk/web2c/luatexdir/lua/ltexlib.c b/Build/source/texk/web2c/luatexdir/lua/ltexlib.c index 060a60708f0..872b2994e23 100644 --- a/Build/source/texk/web2c/luatexdir/lua/ltexlib.c +++ b/Build/source/texk/web2c/luatexdir/lua/ltexlib.c @@ -1078,10 +1078,25 @@ static int getbox(lua_State * L) static int splitbox(lua_State * L) { + const char *s; int k = get_box_id(L, 1, true); check_index_range(k, "splitbox"); if (lua_isnumber(L, 2)) { - nodelist_to_lua(L, vsplit(k,lua_tointeger(L,2))); + int m = 1; + if (lua_type(L, 3) == LUA_TSTRING) { + s = lua_tostring(L, 3); + if (lua_key_eq(s, exactly)) { + m = 0; + } else if (lua_key_eq(s, additional)) { + m = 1; + } + } else if (lua_type(L, 3) == LUA_TNUMBER) { + m = (int) lua_tointeger(L, 3); + } + if ((m<0) || (m>1)) { + luaL_error(L, "wrong mode in splitbox"); + } + nodelist_to_lua(L, vsplit(k,lua_tointeger(L,2),m)); } else { /* maybe a warning */ lua_pushnil(L); diff --git a/Build/source/texk/web2c/luatexdir/lua/luainit.w b/Build/source/texk/web2c/luatexdir/lua/luainit.w index fb9d2c72909..48bbd310d57 100644 --- a/Build/source/texk/web2c/luatexdir/lua/luainit.w +++ b/Build/source/texk/web2c/luatexdir/lua/luainit.w @@ -221,7 +221,7 @@ static struct option long_options[] = { {"progname", 1, 0, 0}, {"version", 0, 0, 0}, {"credits", 0, 0, 0}, - {"recorder", 0, &recorder_enabled, 1}, + {"recorder", 0, 0, 0}, {"etex", 0, 0, 0}, {"output-comment", 1, 0, 0}, {"output-directory", 1, 0, 0}, @@ -282,6 +282,8 @@ unsigned int lua_unsigned_numeric_field_by_index(lua_State * L, int name_index, } @ @c +static int recorderoption = 0; + static void parse_options(int ac, char **av) { #ifdef WIN32 @@ -396,6 +398,8 @@ static void parse_options(int ac, char **av) } else if (ARGUMENT_IS("synctex")) { /* Synchronize TeXnology: catching the command line option as a long */ synctexoption = (int) strtol(optarg, NULL, 0); + } else if (ARGUMENT_IS("recorder")) { + recorderoption = 1 ; } else if (ARGUMENT_IS("help")) { usagehelp(LUATEX_IHELP, BUG_ADDRESS); } else if (ARGUMENT_IS("version")) { @@ -532,6 +536,7 @@ static char *find_filename(char *name, const char *envkey) } @ @c + static void init_kpse(void) { if (!user_progname) { @@ -568,7 +573,10 @@ static void init_kpse(void) kpse_set_program_name(argv[0], user_progname); init_shell_escape(); /* set up 'restrictedshell' */ - program_name_set = 1; + program_name_set = 1 ; + if (recorderoption) { + recorder_enabled = 1; + } } @ @c @@ -585,7 +593,7 @@ static void fix_dumpname(void) } else { /* For |dump_name| to be NULL is a bug. */ if (!ini_version) - abort(); + normal_error("luatex","no format given"); } } diff --git a/Build/source/texk/web2c/luatexdir/luatex.c b/Build/source/texk/web2c/luatexdir/luatex.c index 6a1e20d2206..f36f226af92 100644 --- a/Build/source/texk/web2c/luatexdir/luatex.c +++ b/Build/source/texk/web2c/luatexdir/luatex.c @@ -29,9 +29,9 @@ #define TeX int luatex_version = 89; /* \.{\\luatexversion} */ -int luatex_revision = '1'; /* \.{\\luatexrevision} */ +int luatex_revision = '2'; /* \.{\\luatexrevision} */ int luatex_date_info = 2016020500; /* the compile date is now hardwired */ -const char *luatex_version_string = "beta-0.89.1"; +const char *luatex_version_string = "beta-0.89.2"; const char *engine_name = my_name; /* the name of this engine */ #include <kpathsea/c-ctype.h> diff --git a/Build/source/texk/web2c/luatexdir/pdf/pdfgen.w b/Build/source/texk/web2c/luatexdir/pdf/pdfgen.w index fe5f97b8411..7cd27fbc5ac 100644 --- a/Build/source/texk/web2c/luatexdir/pdf/pdfgen.w +++ b/Build/source/texk/web2c/luatexdir/pdf/pdfgen.w @@ -2432,6 +2432,10 @@ void finish_pdf_file(PDF pdf, int luatexversion, str_number luatexrevision) } else if (callback_id > 0) { run_callback(callback_id, "->"); } + } else { + if (callback_id > 0) { + run_callback(callback_id, "->"); + } } libpdffinish(pdf); if (pdf->draftmode == 0) diff --git a/Build/source/texk/web2c/luatexdir/tex/commands.w b/Build/source/texk/web2c/luatexdir/tex/commands.w index 53497af528c..4f796a7b1a7 100644 --- a/Build/source/texk/web2c/luatexdir/tex/commands.w +++ b/Build/source/texk/web2c/luatexdir/tex/commands.w @@ -115,6 +115,7 @@ void initialize_commands(void) primitive_tex("newlinechar", assign_int_cmd, int_base + new_line_char_code, int_base); primitive_tex("language", assign_int_cmd, int_base + language_code, int_base); primitive_tex("setlanguage", assign_int_cmd, int_base + cur_lang_code, int_base); + primitive_tex("firstvalidlanguage", assign_int_cmd, int_base + first_valid_language_code, int_base); primitive_tex("exhyphenchar", assign_int_cmd, int_base + ex_hyphen_char_code, int_base); primitive_tex("lefthyphenmin", assign_int_cmd, int_base + left_hyphen_min_code, int_base); primitive_tex("righthyphenmin", assign_int_cmd, int_base + right_hyphen_min_code, int_base); diff --git a/Build/source/texk/web2c/luatexdir/tex/dumpdata.w b/Build/source/texk/web2c/luatexdir/tex/dumpdata.w index 527612ba61c..c2fba1934c7 100644 --- a/Build/source/texk/web2c/luatexdir/tex/dumpdata.w +++ b/Build/source/texk/web2c/luatexdir/tex/dumpdata.w @@ -27,7 +27,7 @@ /* we start with 907: the sum of the values of the bytes of "don knuth" */ -#define FORMAT_ID (907+11) +#define FORMAT_ID (907+12) #if ((FORMAT_ID>=0) && (FORMAT_ID<=256)) #error Wrong value for FORMAT_ID. #endif diff --git a/Build/source/texk/web2c/luatexdir/tex/equivalents.h b/Build/source/texk/web2c/luatexdir/tex/equivalents.h index 7853462507b..09a63150cca 100644 --- a/Build/source/texk/web2c/luatexdir/tex/equivalents.h +++ b/Build/source/texk/web2c/luatexdir/tex/equivalents.h @@ -286,10 +286,12 @@ the |number_regs| \.{\\dimen} registers. # define math_old_code 95 /* this one is stable */ # define math_option_code 96 -# define backend_int_base (int_base+97) -# define backend_int_last (int_base+116) +# define first_valid_language_code 97 -# define tex_int_pars (117) /* total number of integer parameters */ +# define backend_int_base (int_base+98) +# define backend_int_last (int_base+117) + +# define tex_int_pars (118) /* total number of integer parameters */ # define page_direction_code (tex_int_pars) # define body_direction_code (tex_int_pars+1) diff --git a/Build/source/texk/web2c/luatexdir/tex/packaging.h b/Build/source/texk/web2c/luatexdir/tex/packaging.h index c3af41a123b..6c909d64680 100644 --- a/Build/source/texk/web2c/luatexdir/tex/packaging.h +++ b/Build/source/texk/web2c/luatexdir/tex/packaging.h @@ -87,7 +87,7 @@ extern scaled active_height[10]; /* distance from first active node to~|c extern scaled best_height_plus_depth; /* height of the best box, without stretching or shrinking */ extern halfword vert_break(halfword p, scaled h, scaled d); -extern halfword vsplit(halfword n, scaled h); /* extracts a page of height |h| from box |n| */ +extern halfword vsplit(halfword n, scaled h, int m); /* extracts a page of height |h| from box |n| */ # define box_code 0 /* |chr_code| for `\.{\\box}' */ # define copy_code 1 /* |chr_code| for `\.{\\copy}' */ diff --git a/Build/source/texk/web2c/luatexdir/tex/packaging.w b/Build/source/texk/web2c/luatexdir/tex/packaging.w index b1e8c44425e..7541339dc36 100644 --- a/Build/source/texk/web2c/luatexdir/tex/packaging.w +++ b/Build/source/texk/web2c/luatexdir/tex/packaging.w @@ -1824,7 +1824,7 @@ was, erroneously, an hlist box). @c /* extracts a page of height |h| from box |n| */ -halfword vsplit(halfword n, scaled h) +halfword vsplit(halfword n, scaled h, int m) { halfword v; /* the box to be split */ int vdir; /* the direction of the box to be split */ @@ -1889,10 +1889,13 @@ halfword vsplit(halfword n, scaled h) /* the |eq_level| of the box stays the same */ box(n) = null; } else { - box(n) = - filtered_vpackage(q, 0, additional, dimen_par(max_depth_code), split_keep_group, vdir, 0); + box(n) = filtered_vpackage(q, 0, additional, dimen_par(max_depth_code), split_keep_group, vdir, 0); + } + if (m == exactly) { + return filtered_vpackage(p, h, exactly, dimen_par(split_max_depth_code), split_off_group, vdir, 0); + } else { + return filtered_vpackage(p, 0, additional, dimen_par(max_depth_code), split_off_group, vdir, 0); } - return filtered_vpackage(p, h, exactly, dimen_par(split_max_depth_code), split_off_group, vdir, 0); } @ Now that we can see what eventually happens to boxes, we can consider the first @@ -1972,7 +1975,7 @@ void begin_box(int box_context) error(); } scan_normal_dimen(); - cur_box = vsplit(n, cur_val); + cur_box = vsplit(n, cur_val, additional); break; default: /* |