summaryrefslogtreecommitdiff
path: root/Build/source/libs/xpdf/xpdf-src/xpdf/Parser.cc
diff options
context:
space:
mode:
Diffstat (limited to 'Build/source/libs/xpdf/xpdf-src/xpdf/Parser.cc')
-rw-r--r--Build/source/libs/xpdf/xpdf-src/xpdf/Parser.cc143
1 files changed, 95 insertions, 48 deletions
diff --git a/Build/source/libs/xpdf/xpdf-src/xpdf/Parser.cc b/Build/source/libs/xpdf/xpdf-src/xpdf/Parser.cc
index 87e3fc5e24b..4fad6afae13 100644
--- a/Build/source/libs/xpdf/xpdf-src/xpdf/Parser.cc
+++ b/Build/source/libs/xpdf/xpdf-src/xpdf/Parser.cc
@@ -152,36 +152,35 @@ Object *Parser::getObj(Object *obj, GBool simpleOnly,
Stream *Parser::makeStream(Object *dict, Guchar *fileKey,
CryptAlgorithm encAlgorithm, int keyLength,
int objNum, int objGen, int recursion) {
- Object obj;
- BaseStream *baseStr;
- Stream *str, *str2;
- GFileOffset pos, endPos, length;
- char endstreamBuf[8];
- GBool foundEndstream;
- int c, i;
-
// get stream start position
lexer->skipToNextLine();
- if (!(str = lexer->getStream())) {
+ Stream *curStr = lexer->getStream();
+ if (!curStr) {
return NULL;
}
- pos = str->getPos();
+ GFileOffset pos = curStr->getPos();
+
+ GBool haveLength = gFalse;
+ GFileOffset length = 0;
+ GFileOffset endPos;
// check for length in damaged file
if (xref && xref->getStreamEnd(pos, &endPos)) {
length = endPos - pos;
+ haveLength = gTrue;
// get length from the stream object
} else {
+ Object obj;
dict->dictLookup("Length", &obj, recursion);
if (obj.isInt()) {
length = (GFileOffset)(Guint)obj.getInt();
- obj.free();
+ haveLength = gTrue;
} else {
- error(errSyntaxError, getPos(), "Bad 'Length' attribute in stream");
- obj.free();
- return NULL;
+ error(errSyntaxError, getPos(),
+ "Missing or invalid 'Length' attribute in stream");
}
+ obj.free();
}
// in badly damaged PDF files, we can run off the end of the input
@@ -189,47 +188,78 @@ Stream *Parser::makeStream(Object *dict, Guchar *fileKey,
if (!lexer->getStream()) {
return NULL;
}
+
// copy the base stream (Lexer will free stream objects when it gets
// to end of stream -- which can happen in the shift() calls below)
- baseStr = (BaseStream *)lexer->getStream()->getBaseStream()->copy();
+ BaseStream *baseStr =
+ (BaseStream *)lexer->getStream()->getBaseStream()->copy();
+
+ // 'Length' attribute is missing -- search for 'endstream'
+ if (!haveLength) {
+ GBool foundEndstream = gFalse;
+ char endstreamBuf[8];
+ if ((curStr = lexer->getStream())) {
+ int c;
+ while ((c = curStr->getChar()) != EOF) {
+ if (c == 'e' &&
+ curStr->getBlock(endstreamBuf, 8) == 8 &&
+ !memcmp(endstreamBuf, "ndstream", 8)) {
+ length = curStr->getPos() - 9 - pos;
+ foundEndstream = gTrue;
+ break;
+ }
+ }
+ }
+ if (!foundEndstream) {
+ error(errSyntaxError, getPos(), "Couldn't find 'endstream' for stream");
+ delete baseStr;
+ return NULL;
+ }
+ }
// make new base stream
- str = baseStr->makeSubStream(pos, gTrue, length, dict);
+ Stream *str = baseStr->makeSubStream(pos, gTrue, length, dict);
- // skip over stream data
- lexer->setPos(pos + length);
+ // look for the 'endstream' marker
+ if (haveLength) {
+ // skip over stream data
+ lexer->setPos(pos + length);
- // check for 'endstream'
- // NB: we never reuse the Parser object to parse objects after a
- // stream, and we could (if the PDF file is damaged) be in the
- // middle of binary data at this point, so we check the stream data
- // directly for 'endstream', rather than calling shift() to parse
- // objects
- foundEndstream = gFalse;
- if ((str2 = lexer->getStream())) {
- // skip up to 100 whitespace chars
- for (i = 0; i < 100; ++i) {
- c = str2->getChar();
- if (!Lexer::isSpace(c)) {
- break;
+ // check for 'endstream'
+ // NB: we never reuse the Parser object to parse objects after a
+ // stream, and we could (if the PDF file is damaged) be in the
+ // middle of binary data at this point, so we check the stream
+ // data directly for 'endstream', rather than calling shift() to
+ // parse objects
+ GBool foundEndstream = gFalse;
+ char endstreamBuf[8];
+ if ((curStr = lexer->getStream())) {
+ // skip up to 100 whitespace chars
+ int c;
+ for (int i = 0; i < 100; ++i) {
+ c = curStr->getChar();
+ if (!Lexer::isSpace(c)) {
+ break;
+ }
}
- }
- if (c == 'e') {
- if (str2->getBlock(endstreamBuf, 8) == 8 ||
- !memcmp(endstreamBuf, "ndstream", 8)) {
- foundEndstream = gTrue;
+ if (c == 'e') {
+ if (curStr->getBlock(endstreamBuf, 8) == 8 &&
+ !memcmp(endstreamBuf, "ndstream", 8)) {
+ foundEndstream = gTrue;
+ }
}
}
- }
- if (!foundEndstream) {
- error(errSyntaxError, getPos(), "Missing 'endstream'");
- // kludge for broken PDF files: just add 5k to the length, and
- // hope it's enough
- // (dict is now owned by str, so we need to copy it before deleting str)
- dict->copy(&obj);
- delete str;
- length += 5000;
- str = baseStr->makeSubStream(pos, gTrue, length, &obj);
+ if (!foundEndstream) {
+ error(errSyntaxError, getPos(), "Missing 'endstream'");
+ // kludge for broken PDF files: just add 5k to the length, and
+ // hope it's enough
+ // (dict is now owned by str, so we need to copy it before deleting str)
+ Object obj;
+ dict->copy(&obj);
+ delete str;
+ length += 5000;
+ str = baseStr->makeSubStream(pos, gTrue, length, &obj);
+ }
}
// free the copied base stream
@@ -237,8 +267,25 @@ Stream *Parser::makeStream(Object *dict, Guchar *fileKey,
// handle decryption
if (fileKey) {
- str = new DecryptStream(str, fileKey, encAlgorithm, keyLength,
- objNum, objGen);
+ // the 'Crypt' filter is used to mark unencrypted metadata streams
+ //~ this should also check for an empty DecodeParams entry
+ GBool encrypted = gTrue;
+ Object obj;
+ dict->dictLookup("Filter", &obj, recursion);
+ if (obj.isName("Crypt")) {
+ encrypted = gFalse;
+ } else if (obj.isArray() && obj.arrayGetLength() >= 1) {
+ Object obj2;
+ if (obj.arrayGet(0, &obj2)->isName("Crypt")) {
+ encrypted = gFalse;
+ }
+ obj2.free();
+ }
+ obj.free();
+ if (encrypted) {
+ str = new DecryptStream(str, fileKey, encAlgorithm, keyLength,
+ objNum, objGen);
+ }
}
// get filters