diff options
Diffstat (limited to 'Build/source/texk/web2c/luatexdir/lang')
-rw-r--r-- | Build/source/texk/web2c/luatexdir/lang/hnjalloc.c | 72 | ||||
-rw-r--r-- | Build/source/texk/web2c/luatexdir/lang/hnjalloc.h | 46 | ||||
-rw-r--r-- | Build/source/texk/web2c/luatexdir/lang/hyphen.c | 830 | ||||
-rw-r--r-- | Build/source/texk/web2c/luatexdir/lang/texlang.c | 825 |
4 files changed, 1773 insertions, 0 deletions
diff --git a/Build/source/texk/web2c/luatexdir/lang/hnjalloc.c b/Build/source/texk/web2c/luatexdir/lang/hnjalloc.c new file mode 100644 index 00000000000..59d62e2aabb --- /dev/null +++ b/Build/source/texk/web2c/luatexdir/lang/hnjalloc.c @@ -0,0 +1,72 @@ +/* LibHnj is dual licensed under LGPL and MPL. Boilerplate for both + * licenses follows. + */ + +/* LibHnj - a library for high quality hyphenation and justification + * Copyright (C) 1998 Raph Levien, (C) 2001 ALTLinux, Moscow + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public + * License along with this library; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 02111-1307 USA. +*/ + +/* + * The contents of this file are subject to the Mozilla Public License + * Version 1.0 (the "MPL"); you may not use this file except in + * compliance with the MPL. You may obtain a copy of the MPL at + * http://www.mozilla.org/MPL/ + * + * Software distributed under the MPL is distributed on an "AS IS" basis, + * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the MPL + * for the specific language governing rights and limitations under the + * MPL. + * + */ +/* wrappers for malloc */ + +#include <stdlib.h> +#include <stdio.h> + +void * +hnj_malloc (int size) +{ + void *p; + + p = malloc (size); + if (p == NULL) + { + fprintf (stderr, "can't allocate %d bytes\n", size); + exit (1); + } + return p; +} + +void * +hnj_realloc (void *p, int size) +{ + p = realloc (p, size); + if (p == NULL) + { + fprintf (stderr, "can't allocate %d bytes\n", size); + exit (1); + } + return p; +} + +void +hnj_free (void *p) +{ + free (p); +} + diff --git a/Build/source/texk/web2c/luatexdir/lang/hnjalloc.h b/Build/source/texk/web2c/luatexdir/lang/hnjalloc.h new file mode 100644 index 00000000000..db82fb9c758 --- /dev/null +++ b/Build/source/texk/web2c/luatexdir/lang/hnjalloc.h @@ -0,0 +1,46 @@ +/* LibHnj is dual licensed under LGPL and MPL. Boilerplate for both + * licenses follows. + */ + +/* LibHnj - a library for high quality hyphenation and justification + * Copyright (C) 1998 Raph Levien + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public + * License along with this library; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 02111-1307 USA. +*/ + +/* + * The contents of this file are subject to the Mozilla Public License + * Version 1.0 (the "MPL"); you may not use this file except in + * compliance with the MPL. You may obtain a copy of the MPL at + * http://www.mozilla.org/MPL/ + * + * Software distributed under the MPL is distributed on an "AS IS" basis, + * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the MPL + * for the specific language governing rights and limitations under the + * MPL. + * + */ +/* wrappers for malloc */ + +void * +hnj_malloc (int size); + +void * +hnj_realloc (void *p, int size); + +void +hnj_free (void *p); + diff --git a/Build/source/texk/web2c/luatexdir/lang/hyphen.c b/Build/source/texk/web2c/luatexdir/lang/hyphen.c new file mode 100644 index 00000000000..140cde8d768 --- /dev/null +++ b/Build/source/texk/web2c/luatexdir/lang/hyphen.c @@ -0,0 +1,830 @@ +/* Libhnj is dual licensed under LGPL and MPL. Boilerplate for both + * licenses follows. + */ + +/* LibHnj - a library for high quality hyphenation and justification + * Copyright (C) 1998 Raph Levien, + * (C) 2001 ALTLinux, Moscow (http://www.alt-linux.org), + * (C) 2001 Peter Novodvorsky (nidd@cs.msu.su) + * + * This library is free software; you can redistribute it and/or + * modify it under the terms of the GNU Library General Public + * License as published by the Free Software Foundation; either + * version 2 of the License, or (at your option) any later version. + * + * This library is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Library General Public License for more details. + * + * You should have received a copy of the GNU Library General Public + * License along with this library; if not, write to the + * Free Software Foundation, Inc., 59 Temple Place - Suite 330, + * Boston, MA 02111-1307 USA. +*/ + +/* + * The contents of this file are subject to the Mozilla Public License + * Version 1.0 (the "MPL"); you may not use this file except in + * compliance with the MPL. You may obtain a copy of the MPL at + * http://www.mozilla.org/MPL/ + * + * Software distributed under the MPL is distributed on an "AS IS" basis, + * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the MPL + * for the specific language governing rights and limitations under the + * MPL. + * + */ +#include <stdlib.h> /* for NULL, malloc */ +#include <stdio.h> /* for fprintf */ +#include <string.h> /* for strdup */ +#include <stdlib.h> /* for malloc used by substring inclusion*/ + +#define MAXPATHS 40960 + +#ifdef UNX +#include <unistd.h> /* for exit */ +#endif + +#include <ctype.h> + +/*#define VERBOSE*/ + +#include "hnjalloc.h" +#include "hyphen.h" + +/* SHOULD BE MOVED TO SEPARATE LIBRARY */ +static unsigned char * hnj_strdup( + const unsigned char *s +) { + unsigned char *new; + int l; + + l = strlen ((char*)s); + new = hnj_malloc (l + 1); + memcpy (new, s, l); + new[l] = 0; + return new; +} + +static int is_utf8_follow( + unsigned char c +) { + if (c>=0x80 && c<0xC0) return 1; + return 0; +} + +/* -------------------------------------------------------------------- + * + * Type definitions + * + * -------------------------------------------------------------------- + */ + +/* a little bit of a hash table implementation. This simply maps strings + to state numbers */ + +typedef struct _HashTab HashTab; +typedef struct _HashEntry HashEntry; +typedef struct _HashIter HashIter; +typedef union _HashVal HashVal; + +/* A cheap, but effective, hack. */ +#define HASH_SIZE 31627 + +struct _HashTab { + HashEntry *entries[HASH_SIZE]; +}; + +union _HashVal { + int state; + char* hyppat; +}; + +struct _HashEntry { + HashEntry *next; + unsigned char *key; + HashVal u; +}; + +struct _HashIter { + HashEntry** e; + HashEntry* cur; + int ndx; +}; + +/* State machine */ +typedef struct _HyphenState HyphenState; +typedef struct _HyphenTrans HyphenTrans; +#define MAX_CHARS 256 +#define MAX_NAME 20 + +struct _HyphenDict { + int num_states; + int pat_length; + char cset[MAX_NAME]; + HyphenState *states; + HashTab *patterns; + HashTab *merged; + HashTab *state_num; +}; + +struct _HyphenState { + char *match; + /*char *repl;*/ + /*signed char replindex;*/ + /*signed char replcut;*/ + int fallback_state; + int num_trans; + HyphenTrans *trans; +}; + +struct _HyphenTrans { + int uni_ch; + int new_state; +}; + + +/* Combine two right-aligned number patterns, 04000 + 020 becomes 04020*/ +static char *combine( + char *expr, + const char *subexpr +) { + int l1 = strlen(expr); + int l2 = strlen(subexpr); + int off = l1-l2; + int j; + /* this works also for utf8 sequences because the substring is identical + * to the last substring-length bytes of expr except for the (single byte) + * hyphenation encoders + */ + for (j=0; j<l2; j++) { + if (expr[off+j]<subexpr[j]) expr[off+j] = subexpr[j]; + } + return expr; +} + + +/* -------------------------------------------------------------------- + * ORIGINAL CODE + * -------------------------------------------------------------------- + */ + +HashIter* new_HashIter( + HashTab* h +) { + HashIter* i = hnj_malloc(sizeof(HashIter)); + i->e = h->entries; + i->cur = NULL; + i->ndx = -1; + return i; +} + + +int nextHashStealPattern( + HashIter*i, + unsigned char**word, + char **pattern +) { + while (i->cur==NULL) { + if (i->ndx >= HASH_SIZE-1) return 0; + i->cur = i->e[++i->ndx]; + } + *word = i->cur->key; + *pattern = i->cur->u.hyppat; + i->cur->u.hyppat = NULL; + i->cur = i->cur->next; + return 1; +} + + +int nextHash( + HashIter*i, + unsigned char**word +) { + while (i->cur==NULL) { + if (i->ndx >= HASH_SIZE-1) return 0; + i->cur = i->e[++i->ndx]; + } + *word = i->cur->key; + i->cur = i->cur->next; + return 1; +} + + +int eachHash( + HashIter*i, + unsigned char**word, + char**pattern +) { + while (i->cur==NULL) { + if (i->ndx >= HASH_SIZE-1) return 0; + i->cur = i->e[++i->ndx]; + } + *word = i->cur->key; + *pattern = i->cur->u.hyppat; + i->cur = i->cur->next; + return 1; +} + + +void delete_HashIter( + HashIter*i +) { + hnj_free(i); +} + + +/* a char* hash function from ASU - adapted from Gtk+ */ +static unsigned int hnj_string_hash ( + const unsigned char *s +) { + const unsigned char *p; + unsigned int h=0, g; + + for (p = s; *p != '\0'; p += 1) { + h = ( h << 4 ) + *p; + if ( ( g = h & 0xf0000000 ) ) { + h = h ^ (g >> 24); + h = h ^ g; + } + } + return h /* % M */; +} + + +/* assumes that key is not already present! */ +static void state_insert( + HashTab *hashtab, + unsigned char *key, + int state +) { + int i; + HashEntry *e; + + i = hnj_string_hash (key) % HASH_SIZE; + e = hnj_malloc (sizeof(HashEntry)); + e->next = hashtab->entries[i]; + e->key = key; + e->u.state = state; + hashtab->entries[i] = e; +} + + +/* assumes that key is not already present! */ +static void hyppat_insert( + HashTab *hashtab, + unsigned char *key, + char* hyppat +) { + int i; + HashEntry *e; + + i = hnj_string_hash (key) % HASH_SIZE; + for (e = hashtab->entries[i]; e; e=e->next) { + if (strcmp((char*)e->key,(char*)key)==0) { + if (e->u.hyppat) hnj_free(e->u.hyppat); + e->u.hyppat = hyppat; + hnj_free(key); + return; + } + } + e = hnj_malloc (sizeof(HashEntry)); + e->next = hashtab->entries[i]; + e->key = key; + e->u.hyppat = hyppat; + hashtab->entries[i] = e; +} + + +/* return state if found, otherwise -1 */ +static int state_lookup( + HashTab *hashtab, + const unsigned char *key +) { + int i; + HashEntry *e; + + i = hnj_string_hash (key) % HASH_SIZE; + for (e = hashtab->entries[i]; e; e = e->next) { + if (!strcmp ((char*)key, (char*)e->key)) { + return e->u.state; + } + } + return -1; +} + + +/* return state if found, otherwise -1 */ +static char* hyppat_lookup( + HashTab *hashtab, + const unsigned char *chars, + int l +) { + int i; + HashEntry *e; + unsigned char key[128]; /* should be ample*/ + strncpy((char*)key,(char*)chars,l); key[l]=0; + i = hnj_string_hash (key) % HASH_SIZE; + for (e = hashtab->entries[i]; e; e = e->next) { + if (!strcmp ((char*)key, (char*)e->key)) { + return e->u.hyppat; + } + } + return NULL; +} + + +/* Get the state number, allocating a new state if necessary. */ +static int hnj_get_state( + HyphenDict *dict, + const unsigned char *string, + int *state_num +) { + *state_num = state_lookup(dict->state_num, string); + + if (*state_num >= 0) + return *state_num; + + state_insert(dict->state_num, hnj_strdup(string), dict->num_states); + /* predicate is true if dict->num_states is a power of two */ + if (!(dict->num_states & (dict->num_states - 1))) { + dict->states = hnj_realloc( + dict->states, + (dict->num_states << 1) * sizeof(HyphenState)); + } + dict->states[dict->num_states].match = NULL; + dict->states[dict->num_states].fallback_state = -1; + dict->states[dict->num_states].num_trans = 0; + dict->states[dict->num_states].trans = NULL; + return dict->num_states++; +} + + +/* add a transition from state1 to state2 through ch - assumes that the + transition does not already exist */ +static void hnj_add_trans( + HyphenDict *dict, + int state1, + int state2, + int uni_ch +) { + int num_trans; + /* TH: this test was a bit too strict, it is quite normal for old + patterns to have chars in the range 0-31 or 127-159 (inclusive). + To ease the transition, let's only disallow NUL for now + (this is probably a requirement of the code anyway). + */ + if (uni_ch==0) { + fprintf(stderr,"Character out of bounds: u%04x \n",uni_ch); + exit(1); + } + num_trans = dict->states[state1].num_trans; + if (num_trans == 0) { + dict->states[state1].trans = hnj_malloc(sizeof(HyphenTrans)); + } else if (!(num_trans & (num_trans - 1))) { + dict->states[state1].trans = hnj_realloc( + dict->states[state1].trans, + (num_trans << 1) * sizeof(HyphenTrans)); + } + dict->states[state1].trans[num_trans].uni_ch = uni_ch; + dict->states[state1].trans[num_trans].new_state = state2; + dict->states[state1].num_trans++; +} + + +#ifdef VERBOSE + +static unsigned char *get_state_str( + int state +) { + int i; + HashEntry *e; + + for (i = 0; i < HASH_SIZE; i++) + for (e = global->entries[i]; e; e = e->next) + if (e->u.state == state) + return e->key; + return NULL; +} +#endif + + +/* I've changed the semantics a bit here: hnj_hyphen_load used to + operate on a file, but now the argument is a string buffer. + */ + +static const unsigned char* next_pattern( + size_t* length, + const unsigned char **buf +) { + const unsigned char *rover = *buf; + while (*rover && isspace(*rover)) rover++; + const unsigned char *here = rover; + while (*rover) { + if (isspace(*rover)) { + *length = rover-here; + *buf = rover; + return here; + } + rover++; + } + *length = rover-here; + *buf = rover; + return *length ? here : NULL; /* zero sensed */ +} + +static void init_hash( + HashTab**h +) { + if (*h) return; + int i; + *h = hnj_malloc(sizeof(HashTab)); + for (i = 0; i < HASH_SIZE; i++) (*h)->entries[i] = NULL; +} + + +static void clear_state_hash( + HashTab**h +) { + if (*h==NULL) return; + int i; + for (i = 0; i < HASH_SIZE; i++) { + HashEntry *e, *next; + for (e = (*h)->entries[i]; e; e = next) { + next = e->next; + hnj_free (e->key); + hnj_free (e); + } + } + hnj_free(*h); + *h=NULL; +} + + +static void clear_hyppat_hash( + HashTab**h +) { + if (*h==NULL) return; + int i; + for (i = 0; i < HASH_SIZE; i++) { + HashEntry *e, *next; + for (e = (*h)->entries[i]; e; e = next) { + next = e->next; + hnj_free(e->key); + if (e->u.hyppat) hnj_free(e->u.hyppat); + hnj_free(e); + } + } + hnj_free(*h); + *h=NULL; +} + + +static void init_dict( + HyphenDict* dict +) { + dict->num_states = 1; + dict->pat_length = 0; + dict->states = hnj_malloc (sizeof(HyphenState)); + dict->states[0].match = NULL; + dict->states[0].fallback_state = -1; + dict->states[0].num_trans = 0; + dict->states[0].trans = NULL; + dict->patterns = NULL; + dict->merged = NULL; + dict->state_num = NULL; + init_hash(&dict->patterns); +} + + +static void clear_dict( + HyphenDict* dict +) { + int state_num; + for (state_num = 0; state_num < dict->num_states; state_num++) { + HyphenState *hstate = &dict->states[state_num]; + if (hstate->match) hnj_free (hstate->match); + if (hstate->trans) hnj_free (hstate->trans); + } + hnj_free (dict->states); + clear_hyppat_hash(&dict->patterns); + clear_hyppat_hash(&dict->merged); + clear_state_hash(&dict->state_num); +} + + + +HyphenDict* hnj_hyphen_new() { + HyphenDict* dict = hnj_malloc (sizeof(HyphenDict)); + init_dict(dict); + return dict; +} + + +void hnj_hyphen_clear( + HyphenDict* dict +) { + clear_dict(dict); + init_dict(dict); +} + + +void hnj_hyphen_free( + HyphenDict *dict +) { + clear_dict(dict); + hnj_free(dict); +} + +unsigned char* hnj_serialize( + HyphenDict* dict +) { + HashIter *v; + unsigned char* word; + char* pattern; + unsigned char* buf = hnj_malloc(dict->pat_length); + unsigned char* cur = buf; + v = new_HashIter(dict->patterns); + while (eachHash(v,&word,&pattern)) { + int i=0, e=0; + while(word[e+i]) { + if (pattern[i]!='0') *cur++ = (unsigned char) pattern[i]; + *cur++ = word[e+i++]; + while (is_utf8_follow(word[e+i])) *cur++ = word[i+e++]; + } + if (pattern[i]!='0') *cur++ = (unsigned char) pattern[i]; + *cur++ = ' '; + } + delete_HashIter(v); + *cur = 0; + return buf; +} + + +void hnj_free_serialize( + unsigned char* c +) { + hnj_free(c); +} + + +/* hyphenation pattern: + * signed bytes + * 0 indicates end (actually any negative number) + * : prio(1+),startpos,length,len1,[replace],len2,[replace] + * most basic example is: + * p n 0 0 0 + * for a hyphenation point between characters + */ + + +void hnj_hyphen_load( + HyphenDict* dict, + const unsigned char *f +) { + int state_num, last_state; + int i, j = 0; + int ch; + int found; + HashEntry *e; + HashIter *v; + unsigned char* word; + char* pattern; + size_t l = 0; + + + /***************************************/ + + const unsigned char* format; + const unsigned char* begin = f; + while((format = next_pattern(&l,&f))!=NULL) { + int i,j,e; + /* + printf("%s\n",format); + char* repl = strnchr(format, '/',l); + int replindex = 0; + int replcut = 0; + if (repl) { + int clen = l-(repl-format); + l = repl-format; + char * index = strnchr(repl + 1, ',',clen); + if (index) { + char * index2 = strnchr(index + 1, ',',clen-(index-repl)); + if (index2) { + replindex = (signed char) atoi(index + 1) - 1; + replcut = (signed char) atoi(index2 + 1); + } + } else { + hnj_strchomp(repl + 1); + replindex = 0; + replcut = strlen(buf); + } + repl = hnj_strdup(repl + 1); + } + */ + for (i=0,j=0,e=0; i<l; i++) { + if (format[i]>='0'&&format[i]<='9') j++; + if (is_utf8_follow(format[i])) e++; + } + /* l-e => number of _characters_ not _bytes_*/ + /* l-e-j => number of pattern characters*/ + unsigned char *pat = (unsigned char*) malloc(1+l-j); + char *org = ( char*) malloc(2+l-e-j); + /* remove hyphenation encoders (digits) from pat*/ + org[0] = '0'; + for (i=0,j=0,e=0; i<l; i++) { + unsigned char c = format[i]; + if (is_utf8_follow(c)) { + pat[j+e++] = c; + } else if (c<'0' || c>'9') { + pat[e+j++] = c; + org[j] = '0'; + } else { + org[j] = c; + } + } + pat[e+j] = 0; + org[j+1] = 0; + hyppat_insert(dict->patterns,pat,org); + } + dict->pat_length += (f-begin)+2; /* 2 for spurious spaces*/ + init_hash(&dict->merged); + v = new_HashIter(dict->patterns); + while (nextHash(v,&word)) { + int wordsize = strlen((char*)word); + int j,l; + for (l=1; l<=wordsize; l++) { + if (is_utf8_follow(word[l])) continue; /* Do not clip an utf8 sequence*/ + for (j=1; j<=l; j++) { + int i = l-j; + if (is_utf8_follow(word[i])) continue; /* Do not start halfway an utf8 sequence*/ + char *subpat_pat; + if ((subpat_pat = hyppat_lookup(dict->patterns,word+i,j))!=NULL) { + char* newpat_pat; + if ((newpat_pat = hyppat_lookup(dict->merged,word,l))==NULL) { + unsigned char *newword=(unsigned char*)malloc(l+1); + strncpy((char*)newword, (char*)word,l); newword[l]=0; + int e=0; + for (i=0; i<l; i++) if (is_utf8_follow(newword[i])) e++; + char *neworg = malloc(l+2-e); + sprintf(neworg,"%0*d",l+1-e,0); /* fill with right amount of '0'*/ + hyppat_insert(dict->merged,newword,combine(neworg,subpat_pat)); + } else { + combine(newpat_pat,subpat_pat); + } + } + } + } + } + delete_HashIter(v); + + init_hash(&dict->state_num); + state_insert(dict->state_num, hnj_strdup((unsigned char*)""), 0); + v = new_HashIter(dict->merged); + while (nextHashStealPattern(v,&word,&pattern)) { + static unsigned char mask[] = {0x3F,0x1F,0xF,0x7}; + int j = strlen((char*)word); +#ifdef VERBOSE + printf ("word %s pattern %s, j = %d\n", word, pattern, j); +#endif + state_num = hnj_get_state( dict, word, &found ); + dict->states[state_num].match = pattern; + + /* now, put in the prefix transitions */ + while (found < 0) { + j--; + last_state = state_num; + ch = word[j]; + if (ch>=0x80) { + int i=1; + while (is_utf8_follow(word[j-i])) i++; + ch = word[j-i] & mask[i]; + int m = j-i; + while (i--) { + ch = (ch<<6)+(0x3F & word[j-i]); + } + j = m; + } + word[j] = '\0'; + state_num = hnj_get_state (dict, word, &found); + hnj_add_trans (dict, state_num, last_state, ch); + } + } + delete_HashIter(v); + clear_hyppat_hash(&dict->merged); + + /***************************************/ + + /* put in the fallback states */ + for (i = 0; i < HASH_SIZE; i++) { + for (e = dict->state_num->entries[i]; e; e = e->next) { + /* do not do state==0 otherwise things get confused*/ + if (e->u.state) { + for (j = 1; 1; j++) { + state_num = state_lookup(dict->state_num, e->key + j); + if (state_num >= 0) break; + } + dict->states[e->u.state].fallback_state = state_num; + } + } + } +#ifdef VERBOSE + for (i = 0; i < HASH_SIZE; i++) { + for (e = dict->state_num->entries[i]; e; e = e->next) { + printf ("%d string %s state %d, fallback=%d\n", i, e->key, e->u.state, + dict->states[e->u.state].fallback_state); + for (j = 0; j < dict->states[e->u.state].num_trans; j++) { + printf (" u%4x->%d\n", (int)dict->states[e->u.state].trans[j].uni_ch, + dict->states[e->u.state].trans[j].new_state); + } + } + } +#endif + clear_state_hash(&dict->state_num); +} + + +void hnj_hyphen_hyphenate( + HyphenDict *dict, + halfword first, + halfword last, + int length, + halfword left, + halfword right, + lang_variables *lan +) { + /* +2 for dots at each end, +1 for points /outside/ characters*/ + int ext_word_len = length+2; + int hyphen_len = ext_word_len+1; + /*char *hyphens = hnj_malloc((hyphen_len*2)+1); */ /* LATER */ + char *hyphens = hnj_malloc(hyphen_len+1); + + /* Add a '.' to beginning and end to facilitate matching*/ + set_vlink(begin_point,first); + set_vlink(end_point,get_vlink(last)); + set_vlink(last,end_point); + + int char_num; + for (char_num = 0; char_num < hyphen_len; char_num++) { + /* hyphens[char_num*2] = '0'; */ /* LATER */ + /* hyphens[char_num*2+1] = '0'; */ /* LATER */ + hyphens[char_num] = '0'; + } + /*hyphens[hyphen_len*2] = 0; */ /* LATER */ + hyphens[hyphen_len] = 0; + + /* now, run the finite state machine */ + int state = 0; + halfword here; + for (char_num=0, here=begin_point; here!=end_point; here=get_vlink(here)) { + + int ch = get_character(here); + + while (state!=-1) { + /* printf("%*s%s%c",char_num-strlen(get_state_str(state)),"",get_state_str(state),(char)ch);*/ + HyphenState *hstate = &dict->states[state]; + int k; + for (k = 0; k < hstate->num_trans; k++) { + if (hstate->trans[k].uni_ch == ch) { + state = hstate->trans[k].new_state; + /* printf(" state %d\n",state);*/ + char *match = dict->states[state].match; + if (match) { + /* +2 because: + * 1 string length is one bigger than offset + * 1 hyphenation starts before first character + */ + int offset = char_num + 2 - strlen (match); + /* printf ("%*s%s\n", offset,"", match);*/ + int m; + for (m = 0; match[m]; m++) { + if (hyphens[offset+m] < match[m]) hyphens[offset+m] = match[m]; + } + } + goto try_next_letter; + } + } + state = hstate->fallback_state; + /* printf (" back to %d\n", state);*/ + } + /* nothing worked, let's go to the next character*/ + state = 0; +try_next_letter: ; + char_num++; + } + + /* restore the correct pointers*/ + set_vlink(last,get_vlink(end_point)); + + /* pattern is ^.^w^o^r^d^.^ word_len=4, ext_word_len=6, hyphens=7 + * check ^ ^ ^ so drop first two and stop after word_len-1 + */ + for (here=first,char_num=2; here!=left; here=get_vlink(here)) char_num++; + for (; here!=right; here=get_vlink(here)) { + if (hyphens[char_num] & 1) + here = insert_syllable_discretionary(here, lan); + char_num++; + } + hnj_free(hyphens); +} diff --git a/Build/source/texk/web2c/luatexdir/lang/texlang.c b/Build/source/texk/web2c/luatexdir/lang/texlang.c new file mode 100644 index 00000000000..12390ae2fcf --- /dev/null +++ b/Build/source/texk/web2c/luatexdir/lang/texlang.c @@ -0,0 +1,825 @@ +/* +Copyright (c) 2007 Taco Hoekwater <taco@latex.org> + +This file is part of luatex. + +luatex is free software; you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation; either version 2 of the License, or +(at your option) any later version. + +luatex is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with luatex; if not, write to the Free Software +Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + +This is texlang.c +*/ + +#include "luatex-api.h" +#include <ptexlib.h> + +#include <string.h> + +#include "nodes.h" +#include "hyphen.h" + +/* functions from the fontforge unicode library */ + +extern unsigned int *utf82u_strcpy(unsigned int *ubuf,const char *utf8buf); +extern unsigned int u_strlen(unsigned int *ubuf); +extern char *utf8_idpb(char *w,unsigned int i); + +#define noVERBOSE + +#define MAX_TEX_LANGUAGES 32767 + +static struct tex_language *tex_languages[MAX_TEX_LANGUAGES] = {NULL}; +static int next_lang_id = 0; + +struct tex_language * +new_language (void) { + struct tex_language* lang; + if (next_lang_id<MAX_TEX_LANGUAGES) { + lang = xmalloc(sizeof(struct tex_language)); + tex_languages[next_lang_id] = lang; + lang->id = next_lang_id++; + lang->exceptions = 0; + lang->patterns = NULL; + lang->pre_hyphen_char = '-'; + lang->post_hyphen_char = 0; + return lang; + } else { + return NULL; + } +} + +struct tex_language * +get_language (int n) { + if (n>=0 && n<=MAX_TEX_LANGUAGES ) { + if (tex_languages[n]!=NULL) { + return tex_languages[n]; + } else { + return new_language(); + } + } else { + return NULL; + } +} + +void +set_pre_hyphen_char (integer n, integer v) { + struct tex_language *l = get_language((int)n); + l->pre_hyphen_char = (int)v; +} + +void +set_post_hyphen_char (integer n, integer v) { + struct tex_language *l = get_language((int)n); + l->post_hyphen_char = (int)v; +} + +integer +get_pre_hyphen_char (integer n) { + struct tex_language *l = get_language((int)n); + return (integer)l->pre_hyphen_char; +} + +integer +get_post_hyphen_char (integer n) { + struct tex_language *l = get_language((int)n); + return (integer)l->post_hyphen_char; +} + +void +load_patterns (struct tex_language *lang, unsigned char *buffer) { + if (lang==NULL) + return; + if (lang->patterns==NULL) { + lang->patterns = hnj_hyphen_new(); + } + hnj_hyphen_load (lang->patterns,buffer); +} + +void +clear_patterns (struct tex_language *lang) { + if (lang==NULL) + return; + if (lang->patterns!=NULL) { + hnj_hyphen_clear(lang->patterns); + } +} + +void +load_tex_patterns(int curlang, halfword head) { + char *s = tokenlist_to_cstring (head,1, NULL); + load_patterns(get_language(curlang),(unsigned char *)s); +} + + +#define STORE_CHAR(x) { word[w] = x ; if (w<MAX_WORD_LEN) w++; } + +/* todo change this! */ + +char * +clean_hyphenation (char *buffer, char **cleaned) { + int items; + unsigned char word [MAX_WORD_LEN+1]; + int w = 0; + char *s = buffer; + while (*s && !isspace(*s)) { + if (*s == '-') { /* skip */ + } else if (*s == '=') { + STORE_CHAR('-'); + } else if (*s == '{') { + s++; + items=0; + while (*s && *s!='}') { s++; } + if (*s=='}') { items++; s++; } + while (*s && *s!='}') { s++; } + if (*s=='}') { items++; s++; } + if (*s=='{') { s++; } + while (*s && *s!='}') {STORE_CHAR(*s); s++; } + if (*s=='}') { items++; } else { s--; } + if (items!=3) { /* syntax error */ + *cleaned = NULL; + while (*s && !isspace(*s)) { s++; } + return s; + } + } else { + STORE_CHAR(*s); + } + s++; + } + word[w] = 0; + *cleaned = xstrdup((char *)word); + return s; +} + +void +load_hyphenation (struct tex_language *lang, unsigned char *buffer) { + char *s, *value, *cleaned; + lua_State *L = Luas[0]; + if (lang==NULL) + return; + if (lang->exceptions==0) { + lua_newtable(L); + lang->exceptions = luaL_ref(L,LUA_REGISTRYINDEX); + } + lua_rawgeti(L, LUA_REGISTRYINDEX, lang->exceptions); + s = (char *)buffer; + while (*s) { + while (isspace(*s)) s++; + if (*s) { + value = s; + s = clean_hyphenation(s, &cleaned); + if (cleaned!=NULL) { + if ((s-value)>0) { + lua_pushstring(L,cleaned); + lua_pushlstring(L,value,(s-value)); + lua_rawset(L,-3); + } + free(cleaned); + } else { +#ifdef VERBOSE + fprintf(stderr,"skipping invalid hyphenation exception: %s\n",value); +#endif + } + } + } +} + +void +clear_hyphenation (struct tex_language *lang) { + if (lang==NULL) + return; + if (lang->exceptions!=0) { + luaL_unref(Luas[0],LUA_REGISTRYINDEX,lang->exceptions); + lang->exceptions = 0; + } +} + + +void +load_tex_hyphenation(int curlang, halfword head) { + char *s = tokenlist_to_cstring (head,1, NULL); + load_hyphenation(get_language(curlang),(unsigned char *)s); +} + +/* TODO: clean this up. The delete_attribute_ref() statements are not very + nice, but needed. Also, in the post-break, it would be nicer to get the + attribute list from vlink(n). No rush, as it is currently not used much. */ + +halfword insert_discretionary ( halfword t, halfword pre, halfword post, halfword replace) { + halfword g, n; + n = new_node(disc_node,syllable_disc); + try_couple_nodes(n,vlink(t)); + couple_nodes(t,n); + for (g=pre;g!=null;g=vlink(g)) { + font(g)=font(replace); + if (node_attr(t)!=null) { + delete_attribute_ref(node_attr(g)); + node_attr(g) = node_attr(t); + attr_list_ref(node_attr(t)) += 1; + } + } + for (g=post;g!=null;g =vlink(g)) { + font(g)=font(replace); + if (node_attr(t)!=null) { + delete_attribute_ref(node_attr(g)); + node_attr(g) = node_attr(t); + attr_list_ref(node_attr(t)) += 1; + } + } + for (g=replace;g!=null;g =vlink(g)) { + if (node_attr(t)!=null) { + delete_attribute_ref(node_attr(g)); + node_attr(g) = node_attr(t); + attr_list_ref(node_attr(t)) += 1; + } + } + if (node_attr(t)!=null) { + delete_attribute_ref(node_attr(vlink(t))); + node_attr(vlink(t)) = node_attr(t); + attr_list_ref(node_attr(t)) += 1; + } + t = vlink(t); + set_disc_field(pre_break(t),pre); + set_disc_field(post_break(t),post); + set_disc_field(no_break(t),replace); + return t; +} + +halfword +insert_syllable_discretionary ( halfword t, lang_variables *lan) { + halfword g, n; + n = new_node(disc_node,syllable_disc); + couple_nodes(n,vlink(t)); + couple_nodes(t,n); + delete_attribute_ref(node_attr(n)); + if (node_attr(t)!=null) { + node_attr(n) = node_attr(t); + attr_list_ref(node_attr(t))++ ; + } else { + node_attr(n) = null; + } + if (lan->pre_hyphen_char >0) { + g = raw_glyph_node(); + set_to_character(g); + character(g)=lan->pre_hyphen_char; + font(g)=font(t); + lang_data(g)=lang_data(t); + if (node_attr(t)!=null) { + node_attr(g) = node_attr(t); + attr_list_ref(node_attr(t)) ++; + } + set_disc_field(pre_break(n),g); + } + + if (lan->post_hyphen_char >0) { + t = vlink(n); + g = raw_glyph_node(); + set_to_character(g); + character(g)=lan->post_hyphen_char; + font(g)=font(t); + lang_data(g)=lang_data(t); + if (node_attr(t)!=null) { + node_attr(g) = node_attr(t); + attr_list_ref(node_attr(t)) += 1; + } + set_disc_field(post_break(n),g); + } + return n; +} + +halfword insert_word_discretionary ( halfword t, lang_variables *lan) { + halfword pre = null, pos = null; + if (lan->pre_hyphen_char >0) pre = insert_character ( null, lan->pre_hyphen_char); + if (lan->post_hyphen_char>0) pos = insert_character ( null, lan->post_hyphen_char); + return insert_discretionary ( t, pre, pos, null); +} + +halfword insert_complex_discretionary ( halfword t, lang_variables *lan, + halfword pre, halfword pos, halfword replace) { + return insert_discretionary ( t, pre, pos, replace); +} + + +halfword insert_character ( halfword t, int c) { + halfword p; + p=new_node(glyph_node,0); + set_to_character(p); + character(p)=c; + if (t!=null) { + couple_nodes(t,p); + } + return p; +} + + +void +set_disc_field (halfword f, halfword t) { + if (t!=null) { + couple_nodes(f,t); + tlink(f) = tail_of_list(t); + } +} + + + +char *hyphenation_exception(int exceptions, char *w) { + char *ret = NULL; + lua_State *L = Luas[0]; + lua_checkstack(L,2); + lua_rawgeti(L,LUA_REGISTRYINDEX,exceptions); + if (lua_istable(L,-1)) { /* ?? */ + lua_pushstring(L,w); /* word table */ + lua_rawget(L,-2); + if (lua_isstring(L,-1)) { + ret = xstrdup((char *)lua_tostring(L,-1)); + } + lua_pop(L,2); + } else { + lua_pop(L,1); + } + return ret; +} + + +char *exception_strings(struct tex_language *lang) { + char *value; + int size = 0, current =0; + size_t l =0; + char *ret = NULL; + lua_State *L = Luas[0]; + if (lang->exceptions==0) + return NULL; + lua_checkstack(L,2); + lua_rawgeti(L,LUA_REGISTRYINDEX,lang->exceptions); + if (lua_istable(L,-1)) { + /* iterate and join */ + lua_pushnil(L); /* first key */ + while (lua_next(L,-2) != 0) { + value = (char *)lua_tolstring(L, -1, &l); + if (current + 2 + l > size ) { + ret = xrealloc(ret, (1.2*size)+current+l+1024); + size = (1.2*size)+current+l+1024; + } + *(ret+current) = ' '; + strcpy(ret+current+1,value); + current += l+1; + lua_pop(L, 1); + } + } + return ret; +} + + +/* the sequence from |wordstart| to |r| can contain only normal characters */ +/* it could be faster to modify a halfword pointer and return an integer */ + +halfword find_exception_part(int *j, int *uword, int len) { + halfword g = null, gg = null; + register int i = *j; + i++; /* this puts uword[i] on the '{' */ + while (i<len && uword[i+1] != '}') { + if (g==null) { + gg = new_char(0,uword[i+1]); + g = gg; + } else { + halfword s = new_char(0,uword[i+1]); + couple_nodes(g,s); + g = vlink(g); + } + i++; + } + *j = ++i; + return gg; +} + +int count_exception_part(int *j, int *uword, int len) { + int ret=0; + register int i = *j; + i++; /* this puts uword[i] on the '{' */ + while (i<len && uword[i+1] != '}') { + ret++; + i++; + } + *j = ++i; + return ret; +} + + +static char *PAT_ERROR[] = { + "Exception discretionaries should contain three pairs of braced items.", + "No intervening spaces are allowed.", + NULL }; + +void do_exception (halfword wordstart, halfword r, char *replacement) { + int i; + halfword t; + unsigned len; + int clang; + lang_variables langdata; + int uword[MAX_WORD_LEN+1] = {0}; + (void)utf82u_strcpy((unsigned int *)uword,replacement); + len = u_strlen((unsigned int *)uword); + i = 0; + t=wordstart; + clang = char_lang(wordstart); + langdata.pre_hyphen_char = get_pre_hyphen_char(clang); + langdata.post_hyphen_char = get_post_hyphen_char(clang); + + for (i=0;i<len;i++) { + if (uword[i+1] == '-') { /* a hyphen follows */ + while (vlink(t)!=r && (type(t)!=glyph_node || !is_simple_character(t))) + t = vlink(t); + if (vlink(t)==r) + break; + insert_syllable_discretionary(t, &langdata); + t = vlink(t); /* skip the new disc */ + } else if (uword[i+1] == '=') { + /* do nothing ? */ + t = vlink(t); + } else if (uword[i+1] == '{') { + halfword gg, hh, replace = null; + int repl; + gg = find_exception_part(&i,uword,len); + if (i==len || uword[i+1] != '{') { + tex_error ("broken pattern 1", PAT_ERROR); + } + hh = find_exception_part(&i,uword,len); + if (i==len || uword[i+1] != '{') { + tex_error ("broken pattern 2", PAT_ERROR); + } + repl = count_exception_part(&i,uword,len); + if (i==len) { + tex_error ("broken pattern 3", PAT_ERROR); + } + /*i++; */ /* jump over the last right brace */ + if (vlink(t)==r) + break; + if (repl>0) { + halfword q = t; + replace = vlink(q); + while(repl>0 && q!=null) { + q=vlink(q); + if (type(q)==glyph_node) { + repl--; + } + } + try_couple_nodes(t,vlink(q)); + vlink(q)=null; + } + t = insert_discretionary(t,gg,hh,replace); + } else { + t = vlink(t); + } + } +} + +/* This is a documentation section from the pascal web file. It is not +true any more, but I do not have time right now to rewrite it -- Taco + +When the line-breaking routine is unable to find a feasible sequence of +breakpoints, it makes a second pass over the paragraph, attempting to +hyphenate the hyphenatable words. The goal of hyphenation is to insert +discretionary material into the paragraph so that there are more +potential places to break. + +The general rules for hyphenation are somewhat complex and technical, +because we want to be able to hyphenate words that are preceded or +followed by punctuation marks, and because we want the rules to work +for languages other than English. We also must contend with the fact +that hyphens might radically alter the ligature and kerning structure +of a word. + +A sequence of characters will be considered for hyphenation only if it +belongs to a ``potentially hyphenatable part'' of the current paragraph. +This is a sequence of nodes $p_0p_1\ldots p_m$ where $p_0$ is a glue node, +$p_1\ldots p_{m-1}$ are either character or ligature or whatsit or +implicit kern nodes, and $p_m$ is a glue or penalty or insertion or adjust +or mark or whatsit or explicit kern node. (Therefore hyphenation is +disabled by boxes, math formulas, and discretionary nodes already inserted +by the user.) The ligature nodes among $p_1\ldots p_{m-1}$ are effectively +expanded into the original non-ligature characters; the kern nodes and +whatsits are ignored. Each character |c| is now classified as either a +nonletter (if |lc_code(c)=0|), a lowercase letter (if +|lc_code(c)=c|), or an uppercase letter (otherwise); an uppercase letter +is treated as if it were |lc_code(c)| for purposes of hyphenation. The +characters generated by $p_1\ldots p_{m-1}$ may begin with nonletters; let +$c_1$ be the first letter that is not in the middle of a ligature. Whatsit +nodes preceding $c_1$ are ignored; a whatsit found after $c_1$ will be the +terminating node $p_m$. All characters that do not have the same font as +$c_1$ will be treated as nonletters. The |hyphen_char| for that font +must be between 0 and 255, otherwise hyphenation will not be attempted. +\TeX\ looks ahead for as many consecutive letters $c_1\ldots c_n$ as +possible; however, |n| must be less than 64, so a character that would +otherwise be $c_{64}$ is effectively not a letter. Furthermore $c_n$ must +not be in the middle of a ligature. In this way we obtain a string of +letters $c_1\ldots c_n$ that are generated by nodes $p_a\ldots p_b$, where +|1<=a<=b+1<=m|. If |n>=l_hyf+r_hyf|, this string qualifies for hyphenation; +however, |uc_hyph| must be positive, if $c_1$ is uppercase. + +The hyphenation process takes place in three stages. First, the candidate +sequence $c_1\ldots c_n$ is found; then potential positions for hyphens +are determined by referring to hyphenation tables; and finally, the nodes +$p_a\ldots p_b$ are replaced by a new sequence of nodes that includes the +discretionary breaks found. + +Fortunately, we do not have to do all this calculation very often, because +of the way it has been taken out of \TeX's inner loop. For example, when +the second edition of the author's 700-page book {\sl Seminumerical +Algorithms} was typeset by \TeX, only about 1.2 hyphenations needed to be +@^Knuth, Donald Ervin@> +tried per paragraph, since the line breaking algorithm needed to use two +passes on only about 5 per cent of the paragraphs. + + +When a word been set up to contain a candidate for hyphenation, +\TeX\ first looks to see if it is in the user's exception dictionary. If not, +hyphens are inserted based on patterns that appear within the given word, +using an algorithm due to Frank~M. Liang. +@^Liang, Franklin Mark@> +*/ + +/* + * This is incompatible with TEX because the first word of a paragraph + * can be hyphenated, but most european users seem to agree that + * prohibiting hyphenation there was not a the best idea ever. + */ + +halfword find_next_wordstart(halfword r) { + register int l; + register int start_ok = 1; + int mathlevel = 1; + while (r!=null) { + switch (type(r)) { + case glue_node: + start_ok = 1; + break; + case math_node: + while (mathlevel>0 ){ + r = vlink(r); + if (r==null) + return r; + if (type(r)==math_node) { + if (subtype(r)==before) { + mathlevel++; + } else { + mathlevel--; + } + } + } + break; + case glyph_node: + if (start_ok && + is_simple_character(r) && + (l = get_lc_code(character(r)))>0 && + (char_uchyph(r) || l == character(r))) + return r; + /* fall through */ + default: + start_ok = 0; + break; + } + r = vlink(r); + } + return r; +} + +int valid_wordend( halfword s) { + register halfword r = s; + register int clang = char_lang(s); + if (r==null) + return 1; + while ((r!=null) && + ((type(r)==glyph_node && is_simple_character(r) && clang == char_lang(r)) || + (type(r)==kern_node && subtype(r)==normal))) { + r = vlink(r); + } + if (r==null || + (type(r)==glyph_node && is_simple_character(r) && clang != char_lang(r)) || + type(r)==glue_node || + type(r)==whatsit_node || + type(r)==ins_node || + type(r)==adjust_node || + type(r)==penalty_node || + (type(r)==kern_node && subtype(r)==explicit)) + return 1; + return 0; +} + +void +hnj_hyphenation (halfword head, halfword tail) { + int lchar, i; + struct tex_language* lang; + lang_variables langdata; + char utf8word[(4*MAX_WORD_LEN)+1] = {0}; + int wordlen = 0; + char *hy = utf8word; + char *replacement = NULL; + halfword s, r = head, wordstart = null, save_tail = null, left = null, right = null; + + /* this first movement assures two things: + * a) that we won't waste lots of time on something that has been + * handled already (in that case, none of the glyphs match |simple_character|). + * b) that the first word can be hyphenated. if the movement was + * not explicit, then the indentation at the start of a paragraph + * list would make find_next_wordstart() look too far ahead. + */ + + while (r!=null && (type(r)!=glyph_node || !is_simple_character(r))) + r =vlink(r); + /* this will make |r| a glyph node with subtype_character */ + r = find_next_wordstart(r); + if (r==null) + return; + + assert (tail!=null); + save_tail = vlink(tail); + s = new_penalty(0); + couple_nodes(tail, s); + + while (r!=null) { /* could be while(1), but let's be paranoid */ + wordstart = r; + assert (is_simple_character(wordstart)); + int clang = char_lang(wordstart); + int lhmin = char_lhmin(wordstart); + int rhmin = char_rhmin(wordstart); + langdata.pre_hyphen_char = get_pre_hyphen_char(clang); + langdata.post_hyphen_char = get_post_hyphen_char(clang); + while (r!=null && + type(r)==glyph_node && + is_simple_character(r) && + clang == char_lang(r) && + (lchar = get_lc_code(character(r)))>0) { + wordlen++; + hy = utf8_idpb(hy,character(r)); + /* this should not be needed any more */ + /*if (vlink(r)!=null) alink(vlink(r))=r;*/ + r = vlink(r); + } + if (valid_wordend(r) && wordlen>=lhmin+rhmin && (lang=tex_languages[clang])!=NULL) { + *hy=0; + if (lang->exceptions!=0 && + (replacement = hyphenation_exception(lang->exceptions,utf8word))!=NULL) { +#ifdef VERBOSE + fprintf(stderr,"replacing %s (c=%d) by %s\n",utf8word,clang,replacement); +#endif + do_exception(wordstart,r,replacement); + free(replacement); + } else if (lang->patterns!=NULL) { + + left = wordstart; + for (i=lhmin;i>1;i--) { + left = vlink(left); + while (!is_simple_character(left)) + left = vlink(left); + } + right = r; + for (i=rhmin;i>0;i--) { + right = alink(right); + while (!is_simple_character(right)) + right = alink(right); + } + +#ifdef VERBOSE + fprintf(stderr,"hyphenate %s (c=%d,l=%d,r=%d) from %c to %c\n",utf8word, + clang,lhmin,rhmin, + character(left), character(right)); +#endif + (void)hnj_hyphen_hyphenate(lang->patterns,wordstart,r,wordlen,left,right, &langdata); + } + } + wordlen = 0; + hy = utf8word; + if (r==null) + break; + r = find_next_wordstart(r); + } + flush_node(vlink(tail)); + vlink(tail) = save_tail; +} + + +void +new_hyphenation (halfword head, halfword tail) { + register int callback_id = 0; + if (head==null || vlink(head)==null) + return; + fix_node_list (head); /* TODO: use couple_nodes() in append_tail()!*/ + callback_id = callback_defined(hyphenate_callback); + if (callback_id>0) { + lua_State *L = Luas[0]; + lua_rawgeti(L,LUA_REGISTRYINDEX,callback_callbacks_id); + lua_rawgeti(L,-1, callback_id); + if (!lua_isfunction(L,-1)) { + lua_pop(L,2); + return; + } + nodelist_to_lua(L,head); + nodelist_to_lua(L,tail); + if (lua_pcall(L,2,0,0) != 0) { + fprintf(stdout,"error: %s\n",lua_tostring(L,-1)); + lua_pop(L,2); + lua_error(L); + return; + } + lua_pop(L,1); + } else { + hnj_hyphenation(head,tail); + } +} + +/* dumping and undumping fonts */ + +#define dump_string(a) \ + if (a!=NULL) { \ + x = strlen(a)+1; \ + dump_int(x); dump_things(*a, x); \ + } else { \ + x = 0; dump_int(x); \ + } + + +void dump_one_language (int i) { + char *s = NULL; + unsigned x = 0; + struct tex_language *lang; + lang = tex_languages[i]; + dump_int(lang->id); + dump_int(lang->pre_hyphen_char); + dump_int(lang->post_hyphen_char); + if (lang->patterns!=NULL) { + s = (char *)hnj_serialize(lang->patterns); + } + dump_string(s); + if (s!=NULL) { + free(s); + s = NULL; + } + if (lang->exceptions!=0) + s = exception_strings(lang); + dump_string(s); + if (s!=NULL) { + free(s); + } + free (lang); +} + +void dump_language_data (void) { + int i; + dump_int(next_lang_id); + for (i=0;i<next_lang_id;i++) { + if (tex_languages[i]) { + dump_int(1); + dump_one_language(i); + } else { + dump_int(0); + } + } +} + + +void undump_one_language (int i) { + char *s = NULL; + unsigned x = 0; + struct tex_language *lang = get_language(i); + undump_int(x); lang->id = x; + undump_int(x); lang->pre_hyphen_char = x; + undump_int(x); lang->post_hyphen_char = x; + /* patterns */ + undump_int (x); + if (x>0) { + s = xmalloc(x); + undump_things(*s,x); + load_patterns(lang,(unsigned char *)s); + free(s); + } + /* exceptions */ + undump_int (x); + if (x>0) { + s = xmalloc(x); + undump_things(*s,x); + load_hyphenation(lang,(unsigned char *)s); + free(s); + } +} + +void undump_language_data (void) { + int i; + unsigned x, numlangs; + undump_int(numlangs); + for (i=0;i<numlangs;i++) { + undump_int(x); + if (x==1) { + undump_one_language(i); + } + } +} + |