diff options
Diffstat (limited to 'gnu')
-rw-r--r-- | gnu/local.mk | 9 | ||||
-rw-r--r-- | gnu/packages/gnuzilla.scm | 11 | ||||
-rw-r--r-- | gnu/packages/patches/icecat-CVE-2016-2805.patch | 75 | ||||
-rw-r--r-- | gnu/packages/patches/icecat-CVE-2016-2807-pt1.patch | 35 | ||||
-rw-r--r-- | gnu/packages/patches/icecat-CVE-2016-2807-pt2.patch | 69 | ||||
-rw-r--r-- | gnu/packages/patches/icecat-CVE-2016-2807-pt3.patch | 33 | ||||
-rw-r--r-- | gnu/packages/patches/icecat-CVE-2016-2807-pt4.patch | 37 | ||||
-rw-r--r-- | gnu/packages/patches/icecat-CVE-2016-2807-pt5.patch | 35 | ||||
-rw-r--r-- | gnu/packages/patches/icecat-CVE-2016-2808.patch | 389 | ||||
-rw-r--r-- | gnu/packages/patches/icecat-CVE-2016-2814.patch | 35 | ||||
-rw-r--r-- | gnu/packages/patches/icecat-update-bundled-graphite2.patch | 2488 |
11 files changed, 3215 insertions, 1 deletions
diff --git a/gnu/local.mk b/gnu/local.mk index 80017ea354..9e31ef9a4b 100644 --- a/gnu/local.mk +++ b/gnu/local.mk @@ -530,6 +530,15 @@ dist_patch_DATA = \ gnu/packages/patches/hydra-disable-darcs-test.patch \ gnu/packages/patches/icecat-avoid-bundled-includes.patch \ gnu/packages/patches/icecat-re-enable-DHE-cipher-suites.patch \ + gnu/packages/patches/icecat-update-bundled-graphite2.patch \ + gnu/packages/patches/icecat-CVE-2016-2805.patch \ + gnu/packages/patches/icecat-CVE-2016-2807-pt1.patch \ + gnu/packages/patches/icecat-CVE-2016-2807-pt2.patch \ + gnu/packages/patches/icecat-CVE-2016-2807-pt3.patch \ + gnu/packages/patches/icecat-CVE-2016-2807-pt4.patch \ + gnu/packages/patches/icecat-CVE-2016-2807-pt5.patch \ + gnu/packages/patches/icecat-CVE-2016-2808.patch \ + gnu/packages/patches/icecat-CVE-2016-2814.patch \ gnu/packages/patches/icu4c-CVE-2014-6585.patch \ gnu/packages/patches/icu4c-CVE-2015-1270.patch \ gnu/packages/patches/icu4c-CVE-2015-4760.patch \ diff --git a/gnu/packages/gnuzilla.scm b/gnu/packages/gnuzilla.scm index e774ed1ad2..abefd90304 100644 --- a/gnu/packages/gnuzilla.scm +++ b/gnu/packages/gnuzilla.scm @@ -299,7 +299,16 @@ standards.") "1wdmd6hasra36g86ha1dw8sl7a5mvr7c4jbjx4zyg9629y5gqr8g")) (patches (search-patches "icecat-avoid-bundled-includes.patch" - "icecat-re-enable-DHE-cipher-suites.patch")) + "icecat-re-enable-DHE-cipher-suites.patch" + "icecat-update-bundled-graphite2.patch" + "icecat-CVE-2016-2805.patch" + "icecat-CVE-2016-2807-pt1.patch" + "icecat-CVE-2016-2807-pt2.patch" + "icecat-CVE-2016-2807-pt3.patch" + "icecat-CVE-2016-2807-pt4.patch" + "icecat-CVE-2016-2807-pt5.patch" + "icecat-CVE-2016-2808.patch" + "icecat-CVE-2016-2814.patch")) (modules '((guix build utils))) (snippet '(begin diff --git a/gnu/packages/patches/icecat-CVE-2016-2805.patch b/gnu/packages/patches/icecat-CVE-2016-2805.patch new file mode 100644 index 0000000000..5e4150f00c --- /dev/null +++ b/gnu/packages/patches/icecat-CVE-2016-2805.patch @@ -0,0 +1,75 @@ +Copied from https://hg.mozilla.org/releases/mozilla-esr38/raw-rev/bf34b97757b3 + +# HG changeset patch +# User Jon Coppeard <jcoppeard@mozilla.com> +# Date 1453890675 0 +# Node ID bf34b97757b334af1f9f53b9b59e0b6902e7ed6f +# Parent 228ca3f46cabaf3f388f6c6640690772aa13c1a5 +Bug 1241731 - Handle incomplete buffer in DiscardTransferables r=sfink a=abillings a=sylvestre + +diff --git a/js/src/jit-test/tests/gc/bug-1241731.js b/js/src/jit-test/tests/gc/bug-1241731.js +new file mode 100644 +--- /dev/null ++++ b/js/src/jit-test/tests/gc/bug-1241731.js +@@ -0,0 +1,4 @@ ++if (!('oomTest' in this)) ++ quit(); ++ ++oomTest(() => serialize(0, [{}])); +diff --git a/js/src/vm/StructuredClone.cpp b/js/src/vm/StructuredClone.cpp +--- a/js/src/vm/StructuredClone.cpp ++++ b/js/src/vm/StructuredClone.cpp +@@ -379,39 +379,50 @@ ReadStructuredClone(JSContext* cx, uint6 + + // If the given buffer contains Transferables, free them. Note that custom + // Transferables will use the JSStructuredCloneCallbacks::freeTransfer() to + // delete their transferables. + static void + Discard(uint64_t* buffer, size_t nbytes, const JSStructuredCloneCallbacks* cb, void* cbClosure) + { + MOZ_ASSERT(nbytes % sizeof(uint64_t) == 0); +- if (nbytes < sizeof(uint64_t)) ++ uint64_t* end = buffer + nbytes / sizeof(uint64_t); ++ uint64_t* point = buffer; ++ if (point == end) + return; // Empty buffer + +- uint64_t* point = buffer; + uint32_t tag, data; + SCInput::getPair(point++, &tag, &data); + if (tag != SCTAG_TRANSFER_MAP_HEADER) + return; + + if (TransferableMapHeader(data) == SCTAG_TM_TRANSFERRED) + return; + + // freeTransfer should not GC + JS::AutoSuppressGCAnalysis nogc; + ++ if (point == end) ++ return; ++ + uint64_t numTransferables = LittleEndian::readUint64(point++); + while (numTransferables--) { ++ if (point == end) ++ return; ++ + uint32_t ownership; + SCInput::getPair(point++, &tag, &ownership); + MOZ_ASSERT(tag >= SCTAG_TRANSFER_MAP_PENDING_ENTRY); ++ if (point == end) ++ return; + + void* content; + SCInput::getPtr(point++, &content); ++ if (point == end) ++ return; + + uint64_t extraData = LittleEndian::readUint64(point++); + + if (ownership < JS::SCTAG_TMO_FIRST_OWNED) + continue; + + if (ownership == JS::SCTAG_TMO_ALLOC_DATA) { + js_free(content); + diff --git a/gnu/packages/patches/icecat-CVE-2016-2807-pt1.patch b/gnu/packages/patches/icecat-CVE-2016-2807-pt1.patch new file mode 100644 index 0000000000..0a6bee378b --- /dev/null +++ b/gnu/packages/patches/icecat-CVE-2016-2807-pt1.patch @@ -0,0 +1,35 @@ +Copied from https://hg.mozilla.org/releases/mozilla-esr38/raw-rev/e7c23c08bf84 + +# HG changeset patch +# User Randell Jesup <rjesup@jesup.org> +# Date 1458543433 14400 +# Node ID e7c23c08bf84a02d9154f31e0c5d121a45884a69 +# Parent a6de1f453712edabff597879398606708c191098 +Bug 1254876: assert windows recording is shut down r=pkerr a=ritu + +MozReview-Commit-ID: JRqxBb5TgrE + +diff --git a/media/webrtc/trunk/webrtc/modules/audio_device/win/audio_device_core_win.cc b/media/webrtc/trunk/webrtc/modules/audio_device/win/audio_device_core_win.cc +--- a/media/webrtc/trunk/webrtc/modules/audio_device/win/audio_device_core_win.cc ++++ b/media/webrtc/trunk/webrtc/modules/audio_device/win/audio_device_core_win.cc +@@ -567,16 +567,19 @@ AudioDeviceWindowsCore::AudioDeviceWindo + // ---------------------------------------------------------------------------- + + AudioDeviceWindowsCore::~AudioDeviceWindowsCore() + { + WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s destroyed", __FUNCTION__); + + Terminate(); + ++ // Recording thread should be shut down before this! ++ assert(_hRecThread == NULL); ++ + // The IMMDeviceEnumerator is created during construction. Must release + // it here and not in Terminate() since we don't recreate it in Init(). + SAFE_RELEASE(_ptrEnumerator); + + _ptrAudioBuffer = NULL; + + if (NULL != _hRenderSamplesReadyEvent) + { + diff --git a/gnu/packages/patches/icecat-CVE-2016-2807-pt2.patch b/gnu/packages/patches/icecat-CVE-2016-2807-pt2.patch new file mode 100644 index 0000000000..f4b4c0d4eb --- /dev/null +++ b/gnu/packages/patches/icecat-CVE-2016-2807-pt2.patch @@ -0,0 +1,69 @@ +Copied from https://hg.mozilla.org/releases/mozilla-esr38/raw-rev/3d2b62083a6a + +# HG changeset patch +# User Shu-yu Guo <shu@rfrn.org> +# Date 1459741387 -7200 +# Node ID 3d2b62083a6a4fb43cb330d77142f9dce0959a23 +# Parent 9d4364f6b55c6ee65c13c491292c3abe1ee2c993 +Bug 1254164 - Make aliasedBodyLevelLexicalBegin a uint32. r=Waldo, a=ritu + +diff --git a/js/src/jit-test/tests/parser/bug-1254164.js b/js/src/jit-test/tests/parser/bug-1254164.js +new file mode 100644 +--- /dev/null ++++ b/js/src/jit-test/tests/parser/bug-1254164.js +@@ -0,0 +1,6 @@ ++// |jit-test| slow; ++ ++var s = ''; ++for (var i = 0; i < 70000; i++) ++ s += 'function x' + i + '() { x' + i + '(); }\n'; ++eval("(function() { " + s + " })();"); +diff --git a/js/src/jsscript.cpp b/js/src/jsscript.cpp +--- a/js/src/jsscript.cpp ++++ b/js/src/jsscript.cpp +@@ -111,17 +111,20 @@ Bindings::initWithTemporaryStorage(Exclu + // JITs when interpreting/compiling aliasedvar ops.) + + // Since unaliased variables are, by definition, only accessed by local + // operations and never through the scope chain, only give shapes to + // aliased variables. While the debugger may observe any scope object at + // any time, such accesses are mediated by DebugScopeProxy (see + // DebugScopeProxy::handleUnaliasedAccess). + uint32_t nslots = CallObject::RESERVED_SLOTS; +- uint32_t aliasedBodyLevelLexicalBegin = UINT16_MAX; ++ ++ // Unless there are aliased body-level lexical bindings at all, set the ++ // begin index to an impossible slot number. ++ uint32_t aliasedBodyLevelLexicalBegin = LOCALNO_LIMIT; + for (BindingIter bi(self); bi; bi++) { + if (bi->aliased()) { + // Per ES6, lexical bindings cannot be accessed until + // initialized. Remember the first aliased slot that is a + // body-level lexical, so that they may be initialized to sentinel + // magic values. + if (numBodyLevelLexicals > 0 && + nslots < aliasedBodyLevelLexicalBegin && +diff --git a/js/src/jsscript.h b/js/src/jsscript.h +--- a/js/src/jsscript.h ++++ b/js/src/jsscript.h +@@ -201,18 +201,18 @@ class Bindings + friend class BindingIter; + friend class AliasedFormalIter; + + RelocatablePtrShape callObjShape_; + uintptr_t bindingArrayAndFlag_; + uint16_t numArgs_; + uint16_t numBlockScoped_; + uint16_t numBodyLevelLexicals_; +- uint16_t aliasedBodyLevelLexicalBegin_; + uint16_t numUnaliasedBodyLevelLexicals_; ++ uint32_t aliasedBodyLevelLexicalBegin_; + uint32_t numVars_; + uint32_t numUnaliasedVars_; + + #if JS_BITS_PER_WORD == 32 + // Bindings is allocated inline inside JSScript, which needs to be + // gc::Cell aligned. + uint32_t padding_; + #endif + diff --git a/gnu/packages/patches/icecat-CVE-2016-2807-pt3.patch b/gnu/packages/patches/icecat-CVE-2016-2807-pt3.patch new file mode 100644 index 0000000000..a5a4212c28 --- /dev/null +++ b/gnu/packages/patches/icecat-CVE-2016-2807-pt3.patch @@ -0,0 +1,33 @@ +Copied from https://hg.mozilla.org/releases/mozilla-esr38/raw-rev/88f1eb2c3f4b + +# HG changeset patch +# User Timothy Nikkel <tnikkel@gmail.com> +# Date 1457637807 21600 +# Node ID 88f1eb2c3f4b4b57365ed88223cf8adc2bec4610 +# Parent bf34b97757b334af1f9f53b9b59e0b6902e7ed6f +Bug 1187420. r=drc r=jmuizelaar a=sylvestre + +MozReview-Commit-ID: Hh0Khqfj8Bf + +diff --git a/media/libjpeg/jstdhuff.c b/media/libjpeg/jstdhuff.c +--- a/media/libjpeg/jstdhuff.c ++++ b/media/libjpeg/jstdhuff.c +@@ -36,16 +36,17 @@ add_huff_table (j_common_ptr cinfo, + */ + nsymbols = 0; + for (len = 1; len <= 16; len++) + nsymbols += bits[len]; + if (nsymbols < 1 || nsymbols > 256) + ERREXIT(cinfo, JERR_BAD_HUFF_TABLE); + + MEMCOPY((*htblptr)->huffval, val, nsymbols * sizeof(UINT8)); ++ MEMZERO(&((*htblptr)->huffval[nsymbols]), (256 - nsymbols) * sizeof(UINT8)); + + /* Initialize sent_table FALSE so table will be written to JPEG file. */ + (*htblptr)->sent_table = FALSE; + } + + + LOCAL(void) + std_huff_tables (j_common_ptr cinfo) + diff --git a/gnu/packages/patches/icecat-CVE-2016-2807-pt4.patch b/gnu/packages/patches/icecat-CVE-2016-2807-pt4.patch new file mode 100644 index 0000000000..5eff4fe99c --- /dev/null +++ b/gnu/packages/patches/icecat-CVE-2016-2807-pt4.patch @@ -0,0 +1,37 @@ +Copied from https://hg.mozilla.org/releases/mozilla-esr38/raw-rev/5c312182da90 + +# HG changeset patch +# User Jan de Mooij <jdemooij@mozilla.com> +# Date 1458828581 -3600 +# Node ID 5c312182da9020504103aa329360abaffa7e232d +# Parent fa4efccde9b7efde8763a178a6cf422b6d37a0e9 +Bug 1254622 - Relookup group->newScript in CreateThisForFunctionWithGroup. r=bhackett a=sylvestre + +MozReview-Commit-ID: KXd7kB70f1Z + +diff --git a/js/src/jsobj.cpp b/js/src/jsobj.cpp +--- a/js/src/jsobj.cpp ++++ b/js/src/jsobj.cpp +@@ -1574,18 +1574,19 @@ CreateThisForFunctionWithGroup(JSContext + // Not enough objects with this group have been created yet, so make a + // plain object and register it with the group. Use the maximum number + // of fixed slots, as is also required by the TypeNewScript. + gc::AllocKind allocKind = GuessObjectGCKind(NativeObject::MAX_FIXED_SLOTS); + PlainObject* res = NewObjectWithGroup<PlainObject>(cx, group, parent, allocKind, newKind); + if (!res) + return nullptr; + +- if (newKind != SingletonObject) +- newScript->registerNewObject(res); ++ // Make sure group->newScript is still there. ++ if (newKind != SingletonObject && group->newScript()) ++ group->newScript()->registerNewObject(res); + + return res; + } + + gc::AllocKind allocKind = NewObjectGCKind(&PlainObject::class_); + + if (newKind == SingletonObject) { + Rooted<TaggedProto> protoRoot(cx, group->proto()); + diff --git a/gnu/packages/patches/icecat-CVE-2016-2807-pt5.patch b/gnu/packages/patches/icecat-CVE-2016-2807-pt5.patch new file mode 100644 index 0000000000..00718ebaac --- /dev/null +++ b/gnu/packages/patches/icecat-CVE-2016-2807-pt5.patch @@ -0,0 +1,35 @@ +Copied from https://hg.mozilla.org/releases/mozilla-esr38/raw-rev/3fdd280fa099 + +# HG changeset patch +# User Carsten "Tomcat" Book <cbook@mozilla.com> +# Date 1461123938 -7200 +# Node ID 3fdd280fa099b6453ce9fd9905af883bc2ebce24 +# Parent 52dfdd37150d62f708dc5bf61dd28f3967596788 +Bug 1252707 - a=sylvestre + +diff --git a/js/src/vm/Shape.cpp b/js/src/vm/Shape.cpp +--- a/js/src/vm/Shape.cpp ++++ b/js/src/vm/Shape.cpp +@@ -382,18 +382,20 @@ NativeObject::getChildPropertyOnDictiona + + if (obj->inDictionaryMode()) { + MOZ_ASSERT(parent == obj->lastProperty()); + RootedGeneric<StackShape*> childRoot(cx, &child); + shape = childRoot->isAccessorShape() ? NewGCAccessorShape(cx) : NewGCShape(cx); + if (!shape) + return nullptr; + if (childRoot->hasSlot() && childRoot->slot() >= obj->lastProperty()->base()->slotSpan()) { +- if (!obj->setSlotSpan(cx, childRoot->slot() + 1)) ++ if (!obj->setSlotSpan(cx, childRoot->slot() + 1)) { ++ new (shape) Shape(obj->lastProperty()->base()->unowned(), 0); + return nullptr; ++ } + } + shape->initDictionaryShape(*childRoot, obj->numFixedSlots(), &obj->shape_); + } + + return shape; + } + + /* static */ Shape* + diff --git a/gnu/packages/patches/icecat-CVE-2016-2808.patch b/gnu/packages/patches/icecat-CVE-2016-2808.patch new file mode 100644 index 0000000000..ae190b8b4c --- /dev/null +++ b/gnu/packages/patches/icecat-CVE-2016-2808.patch @@ -0,0 +1,389 @@ +Copied from https://hg.mozilla.org/releases/mozilla-esr38/raw-rev/71f611fd27c7 + +# HG changeset patch +# User Jeff Walden <jwalden@mit.edu> +# Date 1458941573 25200 +# Node ID 71f611fd27c7d6cb7d6dab9895c2922948042543 +# Parent 861f6b83ce1deade2a976cabe059776ad51ce370 +Bug 1246061. r=luke, r=froydnj, a=sylvestre + +diff --git a/js/public/HashTable.h b/js/public/HashTable.h +--- a/js/public/HashTable.h ++++ b/js/public/HashTable.h +@@ -8,16 +8,17 @@ + #define js_HashTable_h + + #include "mozilla/Alignment.h" + #include "mozilla/Assertions.h" + #include "mozilla/Attributes.h" + #include "mozilla/Casting.h" + #include "mozilla/MemoryReporting.h" + #include "mozilla/Move.h" ++#include "mozilla/Opaque.h" + #include "mozilla/PodOperations.h" + #include "mozilla/ReentrancyGuard.h" + #include "mozilla/TemplateLib.h" + #include "mozilla/TypeTraits.h" + + #include "js/Utility.h" + + namespace js { +@@ -27,16 +28,18 @@ template <class> struct DefaultHasher; + template <class, class> class HashMapEntry; + namespace detail { + template <class T> class HashTableEntry; + template <class T, class HashPolicy, class AllocPolicy> class HashTable; + } + + /*****************************************************************************/ + ++using Generation = mozilla::Opaque<uint64_t>; ++ + // A JS-friendly, STL-like container providing a hash-based map from keys to + // values. In particular, HashMap calls constructors and destructors of all + // objects added so non-PODs may be used safely. + // + // Key/Value requirements: + // - movable, destructible, assignable + // HashPolicy requirements: + // - see Hash Policy section below +@@ -200,17 +203,19 @@ class HashMap + return impl.sizeOfExcludingThis(mallocSizeOf); + } + size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const { + return mallocSizeOf(this) + impl.sizeOfExcludingThis(mallocSizeOf); + } + + // If |generation()| is the same before and after a HashMap operation, + // pointers into the table remain valid. +- uint32_t generation() const { return impl.generation(); } ++ Generation generation() const { ++ return impl.generation(); ++ } + + /************************************************** Shorthand operations */ + + bool has(const Lookup& l) const { + return impl.lookup(l).found(); + } + + // Overwrite existing value with v. Return false on oom. +@@ -431,17 +436,19 @@ class HashSet + return impl.sizeOfExcludingThis(mallocSizeOf); + } + size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const { + return mallocSizeOf(this) + impl.sizeOfExcludingThis(mallocSizeOf); + } + + // If |generation()| is the same before and after a HashSet operation, + // pointers into the table remain valid. +- uint32_t generation() const { return impl.generation(); } ++ Generation generation() const { ++ return impl.generation(); ++ } + + /************************************************** Shorthand operations */ + + bool has(const Lookup& l) const { + return impl.lookup(l).found(); + } + + // Add |u| if it is not present already. Return false on oom. +@@ -766,17 +773,17 @@ class HashTable : private AllocPolicy + // table operations unless |generation()| is tested. + class Ptr + { + friend class HashTable; + + Entry* entry_; + #ifdef JS_DEBUG + const HashTable* table_; +- uint32_t generation; ++ Generation generation; + #endif + + protected: + Ptr(Entry& entry, const HashTable& tableArg) + : entry_(&entry) + #ifdef JS_DEBUG + , table_(&tableArg) + , generation(tableArg.generation()) +@@ -873,17 +880,17 @@ class HashTable : private AllocPolicy + while (cur < end && !cur->isLive()) + ++cur; + } + + Entry* cur, *end; + #ifdef JS_DEBUG + const HashTable* table_; + uint64_t mutationCount; +- uint32_t generation; ++ Generation generation; + bool validEntry; + #endif + + public: + Range() + : cur(nullptr) + , end(nullptr) + #ifdef JS_DEBUG +@@ -1012,18 +1019,18 @@ class HashTable : private AllocPolicy + // HashTable is not copyable or assignable + HashTable(const HashTable&) = delete; + void operator=(const HashTable&) = delete; + + private: + static const size_t CAP_BITS = 24; + + public: +- Entry* table; // entry storage +- uint32_t gen; // entry storage generation number ++ uint64_t gen; // entry storage generation number ++ Entry* table; // entry storage + uint32_t entryCount; // number of entries in table + uint32_t removedCount:CAP_BITS; // removed entry sentinels in table + uint32_t hashShift:8; // multiplicative hash shift + + #ifdef JS_DEBUG + uint64_t mutationCount; + mutable bool mEntered; + mutable struct Stats +@@ -1097,18 +1104,18 @@ class HashTable : private AllocPolicy + for (Entry* e = oldTable, *end = e + capacity; e < end; ++e) + e->destroyIfLive(); + alloc.free_(oldTable); + } + + public: + explicit HashTable(AllocPolicy ap) + : AllocPolicy(ap) ++ , gen(0) + , table(nullptr) +- , gen(0) + , entryCount(0) + , removedCount(0) + , hashShift(sHashBits) + #ifdef JS_DEBUG + , mutationCount(0) + , mEntered(false) + #endif + {} +@@ -1524,20 +1531,20 @@ class HashTable : private AllocPolicy + } + + uint32_t capacity() const + { + MOZ_ASSERT(table); + return JS_BIT(sHashBits - hashShift); + } + +- uint32_t generation() const ++ Generation generation() const + { + MOZ_ASSERT(table); +- return gen; ++ return Generation(gen); + } + + size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const + { + return mallocSizeOf(table); + } + + size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const +diff --git a/js/src/jsapi.h b/js/src/jsapi.h +--- a/js/src/jsapi.h ++++ b/js/src/jsapi.h +@@ -270,20 +270,16 @@ class AutoHashMapRooter : protected Auto + + size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const { + return map.sizeOfExcludingThis(mallocSizeOf); + } + size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const { + return map.sizeOfIncludingThis(mallocSizeOf); + } + +- uint32_t generation() const { +- return map.generation(); +- } +- + /************************************************** Shorthand operations */ + + bool has(const Lookup& l) const { + return map.has(l); + } + + template<typename KeyInput, typename ValueInput> + bool put(const KeyInput& k, const ValueInput& v) { +@@ -385,20 +381,16 @@ class AutoHashSetRooter : protected Auto + + size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const { + return set.sizeOfExcludingThis(mallocSizeOf); + } + size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const { + return set.sizeOfIncludingThis(mallocSizeOf); + } + +- uint32_t generation() const { +- return set.generation(); +- } +- + /************************************************** Shorthand operations */ + + bool has(const Lookup& l) const { + return set.has(l); + } + + bool put(const T& t) { + return set.put(t); +diff --git a/js/src/jscntxt.h b/js/src/jscntxt.h +--- a/js/src/jscntxt.h ++++ b/js/src/jscntxt.h +@@ -30,21 +30,21 @@ class DebugModeOSRVolatileJitFrameIterat + } + + typedef HashSet<JSObject*> ObjectSet; + typedef HashSet<Shape*> ShapeSet; + + /* Detects cycles when traversing an object graph. */ + class AutoCycleDetector + { ++ Generation hashsetGenerationAtInit; + JSContext* cx; + RootedObject obj; ++ ObjectSet::AddPtr hashsetAddPointer; + bool cyclic; +- uint32_t hashsetGenerationAtInit; +- ObjectSet::AddPtr hashsetAddPointer; + MOZ_DECL_USE_GUARD_OBJECT_NOTIFIER + + public: + AutoCycleDetector(JSContext* cx, HandleObject objArg + MOZ_GUARD_OBJECT_NOTIFIER_PARAM) + : cx(cx), obj(cx, objArg), cyclic(true) + { + MOZ_GUARD_OBJECT_NOTIFIER_INIT; +diff --git a/js/src/jswatchpoint.cpp b/js/src/jswatchpoint.cpp +--- a/js/src/jswatchpoint.cpp ++++ b/js/src/jswatchpoint.cpp +@@ -22,25 +22,25 @@ DefaultHasher<WatchKey>::hash(const Look + { + return DefaultHasher<JSObject*>::hash(key.object.get()) ^ HashId(key.id.get()); + } + + namespace { + + class AutoEntryHolder { + typedef WatchpointMap::Map Map; ++ Generation gen; + Map& map; + Map::Ptr p; +- uint32_t gen; + RootedObject obj; + RootedId id; + + public: + AutoEntryHolder(JSContext* cx, Map& map, Map::Ptr p) +- : map(map), p(p), gen(map.generation()), obj(cx, p->key().object), id(cx, p->key().id) ++ : gen(map.generation()), map(map), p(p), obj(cx, p->key().object), id(cx, p->key().id) + { + MOZ_ASSERT(!p->value().held); + p->value().held = true; + } + + ~AutoEntryHolder() { + if (gen != map.generation()) + p = map.lookup(WatchKey(obj, id)); +diff --git a/js/src/shell/jsheaptools.cpp b/js/src/shell/jsheaptools.cpp +--- a/js/src/shell/jsheaptools.cpp ++++ b/js/src/shell/jsheaptools.cpp +@@ -267,17 +267,17 @@ HeapReverser::traverseEdge(void* cell, J + Map::AddPtr a = map.lookupForAdd(cell); + if (!a) { + /* + * We've never visited this cell before. Add it to the map (thus + * marking it as visited), and put it on the work stack, to be + * visited from the main loop. + */ + Node n(kind); +- uint32_t generation = map.generation(); ++ Generation generation = map.generation(); + if (!map.add(a, cell, Move(n)) || + !work.append(Child(cell, kind))) + return false; + /* If the map has been resized, re-check the pointer. */ + if (map.generation() != generation) + a = map.lookupForAdd(cell); + } + +diff --git a/mfbt/Opaque.h b/mfbt/Opaque.h +new file mode 100644 +--- /dev/null ++++ b/mfbt/Opaque.h +@@ -0,0 +1,44 @@ ++/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ ++/* vim: set ts=8 sts=2 et sw=2 tw=80: */ ++/* This Source Code Form is subject to the terms of the Mozilla Public ++ * License, v. 2.0. If a copy of the MPL was not distributed with this ++ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ ++ ++/* An opaque integral type supporting only comparison operators. */ ++ ++#ifndef mozilla_Opaque_h ++#define mozilla_Opaque_h ++ ++#include "mozilla/TypeTraits.h" ++ ++namespace mozilla { ++ ++/** ++ * Opaque<T> is a replacement for integral T in cases where only comparisons ++ * must be supported, and it's desirable to prevent accidental dependency on ++ * exact values. ++ */ ++template<typename T> ++class Opaque final ++{ ++ static_assert(mozilla::IsIntegral<T>::value, ++ "mozilla::Opaque only supports integral types"); ++ ++ T mValue; ++ ++public: ++ Opaque() {} ++ explicit Opaque(T aValue) : mValue(aValue) {} ++ ++ bool operator==(const Opaque& aOther) const { ++ return mValue == aOther.mValue; ++ } ++ ++ bool operator!=(const Opaque& aOther) const { ++ return !(*this == aOther); ++ } ++}; ++ ++} // namespace mozilla ++ ++#endif /* mozilla_Opaque_h */ +diff --git a/mfbt/moz.build b/mfbt/moz.build +--- a/mfbt/moz.build ++++ b/mfbt/moz.build +@@ -48,16 +48,17 @@ EXPORTS.mozilla = [ + 'MathAlgorithms.h', + 'Maybe.h', + 'MaybeOneOf.h', + 'MemoryChecking.h', + 'MemoryReporting.h', + 'Move.h', + 'NullPtr.h', + 'NumericLimits.h', ++ 'Opaque.h', + 'Pair.h', + 'PodOperations.h', + 'Poison.h', + 'Range.h', + 'RangedPtr.h', + 'RefCountType.h', + 'ReentrancyGuard.h', + 'RefPtr.h', + diff --git a/gnu/packages/patches/icecat-CVE-2016-2814.patch b/gnu/packages/patches/icecat-CVE-2016-2814.patch new file mode 100644 index 0000000000..5f197f25e6 --- /dev/null +++ b/gnu/packages/patches/icecat-CVE-2016-2814.patch @@ -0,0 +1,35 @@ + +# HG changeset patch +# User Jean-Yves Avenard <jyavenard@mozilla.com> +# Date 1460655260 25200 +# Node ID a13c0bc84d6eb132f4199f563fbe228d2d3b3a51 +# Parent 88f1eb2c3f4b4b57365ed88223cf8adc2bec4610 +Bug 1254721: Ensure consistency between Cenc offsets and sizes table. r=gerald a=sylvestre + +MozReview-Commit-ID: E1KbKIIBR87 + +diff --git a/media/libstagefright/frameworks/av/media/libstagefright/SampleTable.cpp b/media/libstagefright/frameworks/av/media/libstagefright/SampleTable.cpp +--- a/media/libstagefright/frameworks/av/media/libstagefright/SampleTable.cpp ++++ b/media/libstagefright/frameworks/av/media/libstagefright/SampleTable.cpp +@@ -612,18 +612,18 @@ status_t + SampleTable::parseSampleCencInfo() { + if ((!mCencDefaultSize && !mCencInfoCount) || mCencOffsets.isEmpty()) { + // We don't have all the cenc information we need yet. Quietly fail and + // hope we get the data we need later in the track header. + ALOGV("Got half of cenc saio/saiz pair. Deferring parse until we get the other half."); + return OK; + } + +- if (!mCencSizes.isEmpty() && mCencOffsets.size() > 1 && +- mCencSizes.size() != mCencOffsets.size()) { ++ if ((mCencOffsets.size() > 1 && mCencOffsets.size() < mCencInfoCount) || ++ (!mCencDefaultSize && mCencSizes.size() < mCencInfoCount)) { + return ERROR_MALFORMED; + } + + if (mCencInfoCount > kMAX_ALLOCATION / sizeof(SampleCencInfo)) { + // Avoid future OOM. + return ERROR_MALFORMED; + } + + diff --git a/gnu/packages/patches/icecat-update-bundled-graphite2.patch b/gnu/packages/patches/icecat-update-bundled-graphite2.patch new file mode 100644 index 0000000000..c3ab920335 --- /dev/null +++ b/gnu/packages/patches/icecat-update-bundled-graphite2.patch @@ -0,0 +1,2488 @@ + +# HG changeset patch +# User Jonathan Kew <jkew@mozilla.com> +# Date 1460660890 -3600 +# Node ID 7330633d20ffb33941e41ea0666c4099b6e6d317 +# Parent 5c312182da9020504103aa329360abaffa7e232d +Bug 1262846 (patch for ESR trees) - Update Graphite2 library to 1.3.8. r=jrmuizel a=sledru + +diff --git a/gfx/graphite2/README.mozilla b/gfx/graphite2/README.mozilla +--- a/gfx/graphite2/README.mozilla ++++ b/gfx/graphite2/README.mozilla +@@ -1,3 +1,3 @@ +-This directory contains the Graphite2 library release 1.3.6 from +-https://github.com/silnrsi/graphite/releases/download/1.3.6/graphite-minimal-1.3.6.tgz ++This directory contains the Graphite2 library release 1.3.8 from ++https://github.com/silnrsi/graphite/releases/download/1.3.8/graphite2-minimal-1.3.8.tgz + See gfx/graphite2/moz-gr-update.sh for update procedure. +diff --git a/gfx/graphite2/include/graphite2/Font.h b/gfx/graphite2/include/graphite2/Font.h +--- a/gfx/graphite2/include/graphite2/Font.h ++++ b/gfx/graphite2/include/graphite2/Font.h +@@ -25,17 +25,17 @@ + either version 2 of the License or (at your option) any later version. + */ + #pragma once + + #include "graphite2/Types.h" + + #define GR2_VERSION_MAJOR 1 + #define GR2_VERSION_MINOR 3 +-#define GR2_VERSION_BUGFIX 6 ++#define GR2_VERSION_BUGFIX 8 + + #ifdef __cplusplus + extern "C" + { + #endif + + typedef struct gr_face gr_face; + typedef struct gr_font gr_font; +diff --git a/gfx/graphite2/moz-gr-update.sh b/gfx/graphite2/moz-gr-update.sh +--- a/gfx/graphite2/moz-gr-update.sh ++++ b/gfx/graphite2/moz-gr-update.sh +@@ -14,17 +14,17 @@ + RELEASE=$1 + + if [ "x$RELEASE" == "x" ] + then + echo "Must provide the version number to be used." + exit 1 + fi + +-TARBALL="https://github.com/silnrsi/graphite/releases/download/$RELEASE/graphite-minimal-$RELEASE.tgz" ++TARBALL="https://github.com/silnrsi/graphite/releases/download/$RELEASE/graphite2-minimal-$RELEASE.tgz" + + foo=`basename $0` + TMPFILE=`mktemp -t ${foo}` || exit 1 + + curl -L "$TARBALL" -o "$TMPFILE" + tar -x -z -C gfx/graphite2/ --strip-components 1 -f "$TMPFILE" || exit 1 + rm "$TMPFILE" + +diff --git a/gfx/graphite2/src/CachedFace.cpp b/gfx/graphite2/src/CachedFace.cpp +--- a/gfx/graphite2/src/CachedFace.cpp ++++ b/gfx/graphite2/src/CachedFace.cpp +@@ -64,20 +64,20 @@ bool CachedFace::runGraphite(Segment *se + return false; + + assert(m_cacheStore); + // find where the segment can be broken + Slot * subSegStartSlot = seg->first(); + Slot * subSegEndSlot = subSegStartSlot; + uint16 cmapGlyphs[eMaxSpliceSize]; + int subSegStart = 0; +- for (unsigned int i = 0; i < seg->charInfoCount(); ++i) ++ for (unsigned int i = 0; i < seg->charInfoCount() && subSegEndSlot; ++i) + { + const unsigned int length = i - subSegStart + 1; +- if (length < eMaxSpliceSize) ++ if (length < eMaxSpliceSize && subSegEndSlot->gid() < m_cacheStore->maxCmapGid()) + cmapGlyphs[length-1] = subSegEndSlot->gid(); + else return false; + const bool spaceOnly = m_cacheStore->isSpaceGlyph(subSegEndSlot->gid()); + // at this stage the character to slot mapping is still 1 to 1 + const int breakWeight = seg->charinfo(i)->breakWeight(), + nextBreakWeight = (i + 1 < seg->charInfoCount())? + seg->charinfo(i+1)->breakWeight() : 0; + const uint8 f = seg->charinfo(i)->flags(); +diff --git a/gfx/graphite2/src/Code.cpp b/gfx/graphite2/src/Code.cpp +--- a/gfx/graphite2/src/Code.cpp ++++ b/gfx/graphite2/src/Code.cpp +@@ -61,93 +61,88 @@ inline bool is_return(const instr i) { + const instr pop_ret = *opmap[POP_RET].impl, + ret_zero = *opmap[RET_ZERO].impl, + ret_true = *opmap[RET_TRUE].impl; + return i == pop_ret || i == ret_zero || i == ret_true; + } + + struct context + { +- context(uint8 ref=0) : codeRef(ref) {flags.changed=false; flags.referenced=false; flags.inserted=false;} ++ context(uint8 ref=0) : codeRef(ref) {flags.changed=false; flags.referenced=false;} + struct { + uint8 changed:1, +- referenced:1, +- inserted:1; ++ referenced:1; + } flags; + uint8 codeRef; + }; + + } // end namespace + + + class Machine::Code::decoder + { + public: + struct limits; +- struct analysis +- { +- static const int NUMCONTEXTS = 256; +- uint8 slotref; +- context contexts[NUMCONTEXTS]; +- byte max_ref; +- +- analysis() : slotref(0), max_ref(0) {}; +- void set_ref(int index, bool incinsert=false) throw(); +- void set_noref(int index) throw(); +- void set_changed(int index) throw(); +- +- }; ++ static const int NUMCONTEXTS = 256; + + decoder(limits & lims, Code &code, enum passtype pt) throw(); + + bool load(const byte * bc_begin, const byte * bc_end); + void apply_analysis(instr * const code, instr * code_end); +- byte max_ref() { return _analysis.max_ref; } +- int pre_context() const { return _pre_context; } ++ byte max_ref() { return _max_ref; } ++ int out_index() const { return _out_index; } + + private: ++ void set_ref(int index) throw(); ++ void set_noref(int index) throw(); ++ void set_changed(int index) throw(); + opcode fetch_opcode(const byte * bc); + void analyse_opcode(const opcode, const int8 * const dp) throw(); + bool emit_opcode(opcode opc, const byte * & bc); +- bool validate_opcode(const opcode opc, const byte * const bc); ++ bool validate_opcode(const byte opc, const byte * const bc); + bool valid_upto(const uint16 limit, const uint16 x) const throw(); + bool test_context() const throw(); ++ bool test_ref(int8 index) const throw(); + void failure(const status_t s) const throw() { _code.failure(s); } + + Code & _code; +- int _pre_context; +- uint16 _rule_length; ++ int _out_index; ++ uint16 _out_length; + instr * _instr; + byte * _data; + limits & _max; +- analysis _analysis; + enum passtype _passtype; + int _stack_depth; + bool _in_ctxt_item; ++ int16 _slotref; ++ context _contexts[NUMCONTEXTS]; ++ byte _max_ref; + }; + + + struct Machine::Code::decoder::limits + { + const byte * bytecode; + const uint8 pre_context; + const uint16 rule_length, + classes, + glyf_attrs, + features; + const byte attrid[gr_slatMax]; + }; + + inline Machine::Code::decoder::decoder(limits & lims, Code &code, enum passtype pt) throw() + : _code(code), +- _pre_context(code._constraint ? 0 : lims.pre_context), +- _rule_length(code._constraint ? 1 : lims.rule_length), ++ _out_index(code._constraint ? 0 : lims.pre_context), ++ _out_length(code._constraint ? 1 : lims.rule_length), + _instr(code._code), _data(code._data), _max(lims), _passtype(pt), + _stack_depth(0), +- _in_ctxt_item(false) ++ _in_ctxt_item(false), ++ _slotref(0), ++ _max_ref(0) + { } + + + + Machine::Code::Code(bool is_constraint, const byte * bytecode_begin, const byte * const bytecode_end, + uint8 pre_context, uint16 rule_length, const Silf & silf, const Face & face, + enum passtype pt, byte * * const _out) + : _code(0), _data(0), _data_size(0), _instr_count(0), _max_ref(0), _status(loaded), +@@ -163,17 +158,17 @@ Machine::Code::Code(bool is_constraint, + return; + } + assert(bytecode_end > bytecode_begin); + const opcode_t * op_to_fn = Machine::getOpcodeTable(); + + // Allocate code and data target buffers, these sizes are a worst case + // estimate. Once we know their real sizes the we'll shrink them. + if (_out) _code = reinterpret_cast<instr *>(*_out); +- else _code = static_cast<instr *>(malloc(estimateCodeDataOut(bytecode_end-bytecode_begin))); ++ else _code = static_cast<instr *>(malloc(estimateCodeDataOut(bytecode_end-bytecode_begin, 1, is_constraint ? 0 : rule_length))); + _data = reinterpret_cast<byte *>(_code + (bytecode_end - bytecode_begin)); + + if (!_code || !_data) { + failure(alloc_failed); + return; + } + + decoder::limits lims = { +@@ -266,23 +261,23 @@ bool Machine::Code::decoder::load(const + return bool(_code); + } + + // Validation check and fixups. + // + + opcode Machine::Code::decoder::fetch_opcode(const byte * bc) + { +- const opcode opc = opcode(*bc++); ++ const byte opc = *bc++; + + // Do some basic sanity checks based on what we know about the opcode + if (!validate_opcode(opc, bc)) return MAX_OPCODE; + + // And check it's arguments as far as possible +- switch (opc) ++ switch (opcode(opc)) + { + case NOP : + break; + case PUSH_BYTE : + case PUSH_BYTEU : + case PUSH_SHORT : + case PUSH_SHORTU : + case PUSH_LONG : +@@ -319,47 +314,57 @@ opcode Machine::Code::decoder::fetch_opc + case COND : + _stack_depth -= 2; + if (_stack_depth <= 0) + failure(underfull_stack); + break; + case NEXT : + case NEXT_N : // runtime checked + case COPY_NEXT : +- test_context(); +- ++_pre_context; ++ ++_out_index; ++ if (_out_index < -1 || _out_index > _out_length || _slotref > _max.rule_length) ++ failure(out_of_range_data); + break; + case PUT_GLYPH_8BIT_OBS : + valid_upto(_max.classes, bc[0]); + test_context(); + break; + case PUT_SUBS_8BIT_OBS : +- valid_upto(_rule_length, _pre_context + int8(bc[0])); ++ test_ref(int8(bc[0])); + valid_upto(_max.classes, bc[1]); + valid_upto(_max.classes, bc[2]); + test_context(); + break; + case PUT_COPY : +- valid_upto(_rule_length, _pre_context + int8(bc[0])); ++ test_ref(int8(bc[0])); + test_context(); + break; + case INSERT : + if (_passtype >= PASS_TYPE_POSITIONING) + failure(invalid_opcode); +- else +- --_pre_context; ++ ++_out_length; ++ if (_out_index < 0) ++_out_index; ++ if (_out_index < -1 || _out_index >= _out_length) ++ failure(out_of_range_data); + break; + case DELETE : + if (_passtype >= PASS_TYPE_POSITIONING) + failure(invalid_opcode); +- test_context(); ++ if (_out_index < _max.pre_context) ++ failure(out_of_range_data); ++ --_out_index; ++ --_out_length; ++ if (_out_index < -1 || _out_index > _out_length) ++ failure(out_of_range_data); + break; + case ASSOC : ++ if (bc[0] == 0) ++ failure(out_of_range_data); + for (uint8 num = bc[0]; num; --num) +- valid_upto(_rule_length, _pre_context + int8(bc[num])); ++ test_ref(int8(bc[num])); + test_context(); + break; + case CNTXT_ITEM : + valid_upto(_max.rule_length, _max.pre_context + int8(bc[0])); + if (bc + 2 + bc[1] >= _max.bytecode) failure(jump_past_end); + if (_in_ctxt_item) failure(nested_context_item); + break; + case ATTR_SET : +@@ -378,52 +383,43 @@ opcode Machine::Code::decoder::fetch_opc + failure(underfull_stack); + if (valid_upto(gr_slatMax, bc[0])) + valid_upto(_max.attrid[bc[0]], bc[1]); + test_context(); + break; + case PUSH_SLOT_ATTR : + ++_stack_depth; + valid_upto(gr_slatMax, bc[0]); +- valid_upto(_rule_length, _pre_context + int8(bc[1])); ++ test_ref(int8(bc[1])); + if (attrCode(bc[0]) == gr_slatUserDefn) // use IATTR for user attributes + failure(out_of_range_data); + break; + case PUSH_GLYPH_ATTR_OBS : ++ case PUSH_ATT_TO_GATTR_OBS : + ++_stack_depth; + valid_upto(_max.glyf_attrs, bc[0]); +- valid_upto(_rule_length, _pre_context + int8(bc[1])); ++ test_ref(int8(bc[1])); + break; ++ case PUSH_ATT_TO_GLYPH_METRIC : + case PUSH_GLYPH_METRIC : + ++_stack_depth; + valid_upto(kgmetDescent, bc[0]); +- valid_upto(_rule_length, _pre_context + int8(bc[1])); ++ test_ref(int8(bc[1])); + // level: dp[2] no check necessary + break; + case PUSH_FEAT : + ++_stack_depth; + valid_upto(_max.features, bc[0]); +- valid_upto(_rule_length, _pre_context + int8(bc[1])); +- break; +- case PUSH_ATT_TO_GATTR_OBS : +- ++_stack_depth; +- valid_upto(_max.glyf_attrs, bc[0]); +- valid_upto(_rule_length, _pre_context + int8(bc[1])); +- break; +- case PUSH_ATT_TO_GLYPH_METRIC : +- ++_stack_depth; +- valid_upto(kgmetDescent, bc[0]); +- valid_upto(_rule_length, _pre_context + int8(bc[1])); +- // level: dp[2] no check necessary ++ test_ref(int8(bc[1])); + break; + case PUSH_ISLOT_ATTR : + ++_stack_depth; + if (valid_upto(gr_slatMax, bc[0])) + { +- valid_upto(_rule_length, _pre_context + int8(bc[1])); ++ test_ref(int8(bc[1])); + valid_upto(_max.attrid[bc[0]], bc[2]); + } + break; + case PUSH_IGLYPH_ATTR :// not implemented + ++_stack_depth; + break; + case POP_RET : + if (--_stack_depth < 0) +@@ -442,118 +438,107 @@ opcode Machine::Code::decoder::fetch_opc + valid_upto(_max.attrid[bc[0]], bc[1]); + test_context(); + break; + case PUSH_PROC_STATE : // dummy: dp[0] no check necessary + case PUSH_VERSION : + ++_stack_depth; + break; + case PUT_SUBS : +- valid_upto(_rule_length, _pre_context + int8(bc[0])); ++ test_ref(int8(bc[0])); + valid_upto(_max.classes, uint16(bc[1]<< 8) | bc[2]); + valid_upto(_max.classes, uint16(bc[3]<< 8) | bc[4]); + test_context(); + break; + case PUT_SUBS2 : // not implemented + case PUT_SUBS3 : // not implemented + break; + case PUT_GLYPH : + valid_upto(_max.classes, uint16(bc[0]<< 8) | bc[1]); + test_context(); + break; + case PUSH_GLYPH_ATTR : + case PUSH_ATT_TO_GLYPH_ATTR : + ++_stack_depth; + valid_upto(_max.glyf_attrs, uint16(bc[0]<< 8) | bc[1]); +- valid_upto(_rule_length, _pre_context + int8(bc[2])); ++ test_ref(int8(bc[2])); ++ break; ++ case SET_FEAT : ++ valid_upto(_max.features, bc[0]); ++ test_ref(int8(bc[1])); + break; + default: + failure(invalid_opcode); + break; + } + +- return bool(_code) ? opc : MAX_OPCODE; ++ return bool(_code) ? opcode(opc) : MAX_OPCODE; + } + + + void Machine::Code::decoder::analyse_opcode(const opcode opc, const int8 * arg) throw() + { +- if (_code._constraint) return; +- + switch (opc) + { + case DELETE : + _code._delete = true; + break; ++ case ASSOC : ++ set_changed(0); ++// for (uint8 num = arg[0]; num; --num) ++// _analysis.set_noref(num); ++ break; + case PUT_GLYPH_8BIT_OBS : + case PUT_GLYPH : + _code._modify = true; +- _analysis.set_changed(0); ++ set_changed(0); + break; + case ATTR_SET : + case ATTR_ADD : ++ case ATTR_SUB : + case ATTR_SET_SLOT : + case IATTR_SET_SLOT : + case IATTR_SET : + case IATTR_ADD : + case IATTR_SUB : +- _analysis.set_noref(0); ++ set_noref(0); + break; + case NEXT : + case COPY_NEXT : +- if (!_analysis.contexts[_analysis.slotref].flags.inserted) +- ++_analysis.slotref; +- _analysis.contexts[_analysis.slotref] = context(_code._instr_count+1); ++ ++_slotref; ++ _contexts[_slotref] = context(_code._instr_count+1); + // if (_analysis.slotref > _analysis.max_ref) _analysis.max_ref = _analysis.slotref; + break; + case INSERT : +- _analysis.contexts[_analysis.slotref].flags.inserted = true; ++ if (_slotref >= 0) --_slotref; + _code._modify = true; + break; + case PUT_SUBS_8BIT_OBS : // slotref on 1st parameter + case PUT_SUBS : + _code._modify = true; +- _analysis.set_changed(0); ++ set_changed(0); + GR_FALLTHROUGH; + // no break + case PUT_COPY : +- { +- if (arg[0] != 0) { _analysis.set_changed(0); _code._modify = true; } +- if (arg[0] <= 0 && -arg[0] <= _analysis.slotref - _analysis.contexts[_analysis.slotref].flags.inserted) +- _analysis.set_ref(arg[0], true); +- else if (arg[0] > 0) +- _analysis.set_ref(arg[0], true); ++ if (arg[0] != 0) { set_changed(0); _code._modify = true; } ++ set_ref(arg[0]); + break; +- } +- case PUSH_ATT_TO_GATTR_OBS : // slotref on 2nd parameter +- if (_code._constraint) return; +- GR_FALLTHROUGH; +- // no break + case PUSH_GLYPH_ATTR_OBS : + case PUSH_SLOT_ATTR : + case PUSH_GLYPH_METRIC : ++ case PUSH_ATT_TO_GATTR_OBS : + case PUSH_ATT_TO_GLYPH_METRIC : + case PUSH_ISLOT_ATTR : + case PUSH_FEAT : +- if (arg[1] <= 0 && -arg[1] <= _analysis.slotref - _analysis.contexts[_analysis.slotref].flags.inserted) +- _analysis.set_ref(arg[1], true); +- else if (arg[1] > 0) +- _analysis.set_ref(arg[1], true); ++ case SET_FEAT : ++ set_ref(arg[1]); + break; + case PUSH_ATT_TO_GLYPH_ATTR : +- if (_code._constraint) return; +- GR_FALLTHROUGH; +- // no break + case PUSH_GLYPH_ATTR : +- if (arg[2] <= 0 && -arg[2] <= _analysis.slotref - _analysis.contexts[_analysis.slotref].flags.inserted) +- _analysis.set_ref(arg[2], true); +- else if (arg[2] > 0) +- _analysis.set_ref(arg[2], true); +- break; +- case ASSOC : // slotrefs in varargs ++ set_ref(arg[2]); + break; + default: + break; + } + } + + + bool Machine::Code::decoder::emit_opcode(opcode opc, const byte * & bc) +@@ -579,81 +564,89 @@ bool Machine::Code::decoder::emit_opcode + _data += param_sz; + _code._data_size += param_sz; + } + + // recursively decode a context item so we can split the skip into + // instruction and data portions. + if (opc == CNTXT_ITEM) + { +- assert(_pre_context == 0); ++ assert(_out_index == 0); + _in_ctxt_item = true; +- _pre_context = _max.pre_context + int8(_data[-2]); +- _rule_length = _max.rule_length; ++ _out_index = _max.pre_context + int8(_data[-2]); ++ _slotref = int8(_data[-2]); ++ _out_length = _max.rule_length; + + const size_t ctxt_start = _code._instr_count; + byte & instr_skip = _data[-1]; + byte & data_skip = *_data++; + ++_code._data_size; + const byte *curr_end = _max.bytecode; + + if (load(bc, bc + instr_skip)) + { + bc += instr_skip; + data_skip = instr_skip - (_code._instr_count - ctxt_start); + instr_skip = _code._instr_count - ctxt_start; + _max.bytecode = curr_end; + +- _rule_length = 1; +- _pre_context = 0; ++ _out_length = 1; ++ _out_index = 0; ++ _slotref = 0; + _in_ctxt_item = false; + } + else + { +- _pre_context = 0; ++ _out_index = 0; ++ _slotref = 0; + return false; + } + } + + return bool(_code); + } + + + void Machine::Code::decoder::apply_analysis(instr * const code, instr * code_end) + { + // insert TEMP_COPY commands for slots that need them (that change and are referenced later) + int tempcount = 0; + if (_code._constraint) return; + + const instr temp_copy = Machine::getOpcodeTable()[TEMP_COPY].impl[0]; +- for (const context * c = _analysis.contexts, * const ce = c + _analysis.slotref; c != ce; ++c) ++ for (const context * c = _contexts, * const ce = c + _slotref; c < ce; ++c) + { + if (!c->flags.referenced || !c->flags.changed) continue; + + instr * const tip = code + c->codeRef + tempcount; + memmove(tip+1, tip, (code_end - tip) * sizeof(instr)); + *tip = temp_copy; + ++code_end; + ++tempcount; + _code._delete = true; + } + + _code._instr_count = code_end - code; + } + + + inline +-bool Machine::Code::decoder::validate_opcode(const opcode opc, const byte * const bc) ++bool Machine::Code::decoder::validate_opcode(const byte opc, const byte * const bc) + { + if (opc >= MAX_OPCODE) + { + failure(invalid_opcode); + return false; + } + const opcode_t & op = Machine::getOpcodeTable()[opc]; ++ if (op.impl[_code._constraint] == 0) ++ { ++ failure(unimplemented_opcode_used); ++ return false; ++ } + if (op.param_sz == VARARGS && bc >= _max.bytecode) + { + failure(arguments_exhausted); + return false; + } + const size_t param_sz = op.param_sz == VARARGS ? bc[0] + 1 : op.param_sz; + if (bc - 1 + param_sz >= _max.bytecode) + { +@@ -666,56 +659,69 @@ bool Machine::Code::decoder::validate_op + + bool Machine::Code::decoder::valid_upto(const uint16 limit, const uint16 x) const throw() + { + const bool t = (limit != 0) && (x < limit); + if (!t) failure(out_of_range_data); + return t; + } + ++inline ++bool Machine::Code::decoder::test_ref(int8 index) const throw() ++{ ++ if (_code._constraint && !_in_ctxt_item) ++ { ++ if (index > 0 || -index > _max.pre_context) ++ { ++ failure(out_of_range_data); ++ return false; ++ } ++ } ++ else ++ return valid_upto(_max.rule_length, _slotref + _max.pre_context + index); ++ return true; ++} ++ + bool Machine::Code::decoder::test_context() const throw() + { +- if (_pre_context >= _rule_length || _analysis.slotref >= analysis::NUMCONTEXTS - 1) ++ if (_out_index >= _out_length || _out_index < 0 || _slotref >= NUMCONTEXTS - 1) + { + failure(out_of_range_data); + return false; + } + return true; + } + + inline + void Machine::Code::failure(const status_t s) throw() { + release_buffers(); + _status = s; + } + + + inline +-void Machine::Code::decoder::analysis::set_ref(int index, bool incinsert) throw() { +- if (incinsert && contexts[slotref].flags.inserted) --index; +- if (index + slotref < 0 || index + slotref >= NUMCONTEXTS) return; +- contexts[index + slotref].flags.referenced = true; +- if ((index > 0 || !contexts[index + slotref].flags.inserted) && index + slotref > max_ref) max_ref = index + slotref; ++void Machine::Code::decoder::set_ref(int index) throw() { ++ if (index + _slotref < 0 || index + _slotref >= NUMCONTEXTS) return; ++ _contexts[index + _slotref].flags.referenced = true; ++ if (index + _slotref > _max_ref) _max_ref = index + _slotref; + } + + + inline +-void Machine::Code::decoder::analysis::set_noref(int index) throw() { +- if (contexts[slotref].flags.inserted) --index; +- if (index + slotref < 0 || index + slotref >= NUMCONTEXTS) return; +- if ((index > 0 || !contexts[index + slotref].flags.inserted) && index + slotref > max_ref) max_ref = index + slotref; ++void Machine::Code::decoder::set_noref(int index) throw() { ++ if (index + _slotref < 0 || index + _slotref >= NUMCONTEXTS) return; ++ if (index + _slotref > _max_ref) _max_ref = index + _slotref; + } + + + inline +-void Machine::Code::decoder::analysis::set_changed(int index) throw() { +- if (contexts[slotref].flags.inserted) --index; +- if (index + slotref < 0 || index + slotref >= NUMCONTEXTS) return; +- contexts[index + slotref].flags.changed = true; +- if ((index > 0 || !contexts[index + slotref].flags.inserted) && index + slotref > max_ref) max_ref = index + slotref; ++void Machine::Code::decoder::set_changed(int index) throw() { ++ if (index + _slotref < 0 || index + _slotref >= NUMCONTEXTS) return; ++ _contexts[index + _slotref].flags.changed= true; ++ if (index + _slotref > _max_ref) _max_ref = index + _slotref; + } + + + void Machine::Code::release_buffers() throw() + { + if (_own) + free(_code); + _code = 0; +diff --git a/gfx/graphite2/src/Collider.cpp b/gfx/graphite2/src/Collider.cpp +--- a/gfx/graphite2/src/Collider.cpp ++++ b/gfx/graphite2/src/Collider.cpp +@@ -21,17 +21,17 @@ + + Alternatively, the contents of this file may be used under the terms of the + Mozilla Public License (http://mozilla.org/MPL) or the GNU General Public + License, as published by the Free Software Foundation, either version 2 + of the License or (at your option) any later version. + */ + #include <algorithm> + #include <limits> +-#include <math.h> ++#include <cmath> + #include <string> + #include <functional> + #include "inc/Collider.h" + #include "inc/Segment.h" + #include "inc/Slot.h" + #include "inc/GlyphCache.h" + #include "inc/Sparse.h" + +@@ -824,43 +824,43 @@ bool KernCollider::initSlot(Segment *seg + if (margin < 10) margin = 10; + + _limit = limit; + _offsetPrev = offsetPrev; // kern from a previous pass + + // Calculate the height of the glyph and how many horizontal slices to use. + if (_maxy >= 1e37f) + { +- _maxy = ymax; +- _miny = ymin; + _sliceWidth = margin / 1.5f; ++ _maxy = ymax + margin; ++ _miny = ymin - margin; + numSlices = int((_maxy - _miny + 2) / (_sliceWidth / 1.5f) + 1.f); // +2 helps with rounding errors + _edges.clear(); + _edges.insert(_edges.begin(), numSlices, (dir & 1) ? 1e38f : -1e38f); + _xbound = (dir & 1) ? (float)1e38f : (float)-1e38f; + } + else if (_maxy != ymax || _miny != ymin) + { + if (_miny != ymin) + { +- numSlices = int((ymin - _miny) / _sliceWidth - 1); ++ numSlices = int((ymin - margin - _miny) / _sliceWidth - 1); + _miny += numSlices * _sliceWidth; + if (numSlices < 0) + _edges.insert(_edges.begin(), -numSlices, (dir & 1) ? 1e38f : -1e38f); + else if ((unsigned)numSlices < _edges.size()) // this shouldn't fire since we always grow the range + { + Vector<float>::iterator e = _edges.begin(); + while (numSlices--) + ++e; + _edges.erase(_edges.begin(), e); + } + } + if (_maxy != ymax) + { +- numSlices = int((ymax - _miny) / _sliceWidth + 1); ++ numSlices = int((ymax + margin - _miny) / _sliceWidth + 1); + _maxy = numSlices * _sliceWidth + _miny; + if (numSlices > (int)_edges.size()) + _edges.insert(_edges.end(), numSlices - _edges.size(), (dir & 1) ? 1e38f : -1e38f); + else if (numSlices < (int)_edges.size()) // this shouldn't fire since we always grow the range + { + while ((int)_edges.size() > numSlices) + _edges.pop_back(); + } +@@ -930,53 +930,60 @@ bool KernCollider::initSlot(Segment *seg + // Return false if we know there is no collision, true if we think there might be one. + bool KernCollider::mergeSlot(Segment *seg, Slot *slot, const Position &currShift, float currSpace, int dir, GR_MAYBE_UNUSED json * const dbgout) + { + int rtl = (dir & 1) * 2 - 1; + if (!seg->getFace()->glyphs().check(slot->gid())) + return false; + const Rect &bb = seg->theGlyphBBoxTemporary(slot->gid()); + const float sx = slot->origin().x + currShift.x; +- float x = sx + (rtl > 0 ? bb.tr.x : bb.bl.x); ++ float x = (sx + (rtl > 0 ? bb.tr.x : bb.bl.x)) * rtl; + // this isn't going to reduce _mingap so skip +- if ((rtl > 0 && x < _xbound - _mingap - currSpace) || (rtl <= 0 && x > _xbound + _mingap + currSpace)) ++ if (x < rtl * (_xbound - _mingap - currSpace)) + return false; + + const float sy = slot->origin().y + currShift.y; +- int smin = max(0, int((bb.bl.y + (1 - _miny + sy)) / _sliceWidth + 1)); +- int smax = min((int)_edges.size() - 1, int((bb.tr.y + (1 - _miny + sy)) / _sliceWidth + 1)); ++ int smin = max(1, int((bb.bl.y + (1 - _miny + sy)) / _sliceWidth + 1)) - 1; ++ int smax = min((int)_edges.size() - 2, int((bb.tr.y + (1 - _miny + sy)) / _sliceWidth + 1)) + 1; ++ if (smin > smax) ++ return false; + bool collides = false; ++ float below = smin > 0 ? _edges[smin-1] * rtl : 1e38f; ++ float here = _edges[smin] * rtl; ++ float above = smin < (int)_edges.size() - 1 ? _edges[smin+1] * rtl : 1e38f; + + for (int i = smin; i <= smax; ++i) + { + float t; + float y = (float)(_miny - 1 + (i + .5f) * _sliceWidth); // vertical center of slice +- if (x * rtl > _edges[i] * rtl - _mingap - currSpace) ++ if ( (x > here - _mingap - currSpace) ++ || (x > below - _mingap - currSpace) ++ || (x > above - _mingap - currSpace)) + { + // 2 * currSpace to account for the space that is already separating them and the space we want to add +- float m = get_edge(seg, slot, currShift, y, _sliceWidth, rtl > 0) + 2 * rtl * currSpace; +- t = rtl * (_edges[i] - m); ++ float m = get_edge(seg, slot, currShift, y, _sliceWidth, rtl > 0) * rtl + 2 * currSpace; + // Check slices above and below (if any). +- if (i < (int)_edges.size() - 1) t = min(t, rtl * (_edges[i+1] - m)); +- if (i > 0) t = min(t, rtl * (_edges[i-1] - m)); ++ t = min(min(here, below), above) - m; + // _mingap is positive to shrink + if (t < _mingap) + { + _mingap = t; + collides = true; + } + #if !defined GRAPHITE2_NTRACING + // Debugging - remember the closest neighboring edge for this slice. +- if (rtl * m > rtl * _nearEdges[i]) ++ if (m > rtl * _nearEdges[i]) + { + _slotNear[i] = slot; +- _nearEdges[i] = m; ++ _nearEdges[i] = m * rtl; + } + #endif + } ++ below = here; here = above; ++ above = i < (int)_edges.size() - 2 ? _edges[i+2] * rtl : 1e38f; + } + return collides; // note that true is not a necessarily reliable value + + } // end of KernCollider::mergeSlot + + + // Return the amount to kern by. + Position KernCollider::resolve(GR_MAYBE_UNUSED Segment *seg, GR_MAYBE_UNUSED Slot *slot, +diff --git a/gfx/graphite2/src/Face.cpp b/gfx/graphite2/src/Face.cpp +--- a/gfx/graphite2/src/Face.cpp ++++ b/gfx/graphite2/src/Face.cpp +@@ -178,17 +178,18 @@ bool Face::runGraphite(Segment *seg, con + if ((seg->dir() & 3) == 3 && aSilf->bidiPass() == 0xFF) + seg->doMirror(aSilf->aMirror()); + bool res = aSilf->runGraphite(seg, 0, aSilf->positionPass(), true); + if (res) + { + seg->associateChars(0, seg->charInfoCount()); + if (aSilf->flags() & 0x20) + res &= seg->initCollisions(); +- res &= aSilf->runGraphite(seg, aSilf->positionPass(), aSilf->numPasses(), false); ++ if (res) ++ res &= aSilf->runGraphite(seg, aSilf->positionPass(), aSilf->numPasses(), false); + } + + #if !defined GRAPHITE2_NTRACING + if (dbgout) + { + seg->positionSlots(0, 0, 0, aSilf->dir()); + *dbgout << json::item + << json::close // Close up the passes array +@@ -226,17 +227,17 @@ const Silf *Face::chooseSilf(uint32 scri + return m_silfs; + } + + uint16 Face::findPseudo(uint32 uid) const + { + return (m_numSilf) ? m_silfs[0].findPseudo(uid) : 0; + } + +-uint16 Face::getGlyphMetric(uint16 gid, uint8 metric) const ++int32 Face::getGlyphMetric(uint16 gid, uint8 metric) const + { + switch (metrics(metric)) + { + case kgmetAscent : return m_ascent; + case kgmetDescent : return m_descent; + default: + if (gid >= glyphs().numGlyphs()) return 0; + return glyphs().glyph(gid)->getMetric(metric); +@@ -277,17 +278,17 @@ Face::Table::Table(const Face & face, co + : _f(&face), _compressed(false) + { + size_t sz = 0; + _p = static_cast<const byte *>((*_f->m_ops.get_table)(_f->m_appFaceHandle, n, &sz)); + _sz = uint32(sz); + + if (!TtfUtil::CheckTable(n, _p, _sz)) + { +- this->~Table(); // Make sure we release the table buffer even if the table filed it's checks ++ releaseBuffers(); // Make sure we release the table buffer even if the table failed it's checks + return; + } + + if (be::peek<uint32>(_p) >= version) + decompress(); + } + + void Face::Table::releaseBuffers() +@@ -324,17 +325,18 @@ Error Face::Table::decompress() + switch(compression(hdr >> 27)) + { + case NONE: return e; + + case LZ4: + { + uncompressed_size = hdr & 0x07ffffff; + uncompressed_table = gralloc<byte>(uncompressed_size); +- if (!e.test(!uncompressed_table, E_OUTOFMEM)) ++ if (!e.test(!uncompressed_table || uncompressed_size < 4, E_OUTOFMEM)) ++ memset(uncompressed_table, 0, 4); // make sure version number is initialised + // coverity[forward_null : FALSE] - uncompressed_table has been checked so can't be null + // coverity[checked_return : FALSE] - we test e later + e.test(lz4::decompress(p, _sz - 2*sizeof(uint32), uncompressed_table, uncompressed_size) != signed(uncompressed_size), E_SHRINKERFAILED); + break; + } + + default: + e.error(E_BADSCHEME); +diff --git a/gfx/graphite2/src/GlyphCache.cpp b/gfx/graphite2/src/GlyphCache.cpp +--- a/gfx/graphite2/src/GlyphCache.cpp ++++ b/gfx/graphite2/src/GlyphCache.cpp +@@ -111,18 +111,20 @@ private: + _num_glyphs_attributes, + _num_attrs; // number of glyph attributes per glyph + }; + + + + GlyphCache::GlyphCache(const Face & face, const uint32 face_options) + : _glyph_loader(new Loader(face, bool(face_options & gr_face_dumbRendering))), +- _glyphs(_glyph_loader && *_glyph_loader ? grzeroalloc<const GlyphFace *>(_glyph_loader->num_glyphs()) : 0), +- _boxes(_glyph_loader && _glyph_loader->has_boxes() ? grzeroalloc<GlyphBox *>(_glyph_loader->num_glyphs()) : 0), ++ _glyphs(_glyph_loader && *_glyph_loader && _glyph_loader->num_glyphs() ++ ? grzeroalloc<const GlyphFace *>(_glyph_loader->num_glyphs()) : 0), ++ _boxes(_glyph_loader && _glyph_loader->has_boxes() && _glyph_loader->num_glyphs() ++ ? grzeroalloc<GlyphBox *>(_glyph_loader->num_glyphs()) : 0), + _num_glyphs(_glyphs ? _glyph_loader->num_glyphs() : 0), + _num_attrs(_glyphs ? _glyph_loader->num_attrs() : 0), + _upem(_glyphs ? _glyph_loader->units_per_em() : 0) + { + if ((face_options & gr_face_preloadGlyphs) && _glyph_loader && _glyphs) + { + int numsubs = 0; + GlyphFace * const glyphs = new GlyphFace [_num_glyphs]; +@@ -139,17 +141,17 @@ GlyphCache::GlyphCache(const Face & face + for (uint16 gid = 1; loaded && gid != _num_glyphs; ++gid) + _glyphs[gid] = loaded = _glyph_loader->read_glyph(gid, glyphs[gid], &numsubs); + + if (!loaded) + { + _glyphs[0] = 0; + delete [] glyphs; + } +- else if (numsubs > 0) ++ else if (numsubs > 0 && _boxes) + { + GlyphBox * boxes = (GlyphBox *)gralloc<char>(_num_glyphs * sizeof(GlyphBox) + numsubs * 8 * sizeof(float)); + GlyphBox * currbox = boxes; + + for (uint16 gid = 0; currbox && gid != _num_glyphs; ++gid) + { + _boxes[gid] = currbox; + currbox = _glyph_loader->read_box(gid, currbox, *_glyphs[gid]); +@@ -204,16 +206,18 @@ GlyphCache::~GlyphCache() + free(_boxes[0]); + free(_boxes); + } + delete _glyph_loader; + } + + const GlyphFace *GlyphCache::glyph(unsigned short glyphid) const //result may be changed by subsequent call with a different glyphid + { ++ if (glyphid >= numGlyphs()) ++ return _glyphs[0]; + const GlyphFace * & p = _glyphs[glyphid]; + if (p == 0 && _glyph_loader) + { + int numsubs = 0; + GlyphFace * g = new GlyphFace(); + if (g) p = _glyph_loader->read_glyph(glyphid, *g, &numsubs); + if (!p) + { +@@ -280,26 +284,27 @@ GlyphCache::Loader::Loader(const Face & + _long_fmt = flags & 1; + int tmpnumgattrs = (m_pGloc.size() + - (p - m_pGloc) + - sizeof(uint16)*(flags & 0x2 ? _num_attrs : 0)) + / (_long_fmt ? sizeof(uint32) : sizeof(uint16)) - 1; + + if (version >= 0x00020000 || tmpnumgattrs < 0 || tmpnumgattrs > 65535 + || _num_attrs == 0 || _num_attrs > 0x3000 // is this hard limit appropriate? +- || _num_glyphs_graphics > tmpnumgattrs) ++ || _num_glyphs_graphics > tmpnumgattrs ++ || m_pGlat.size() < 4) + { + _head = Face::Table(); + return; + } + + _num_glyphs_attributes = static_cast<unsigned short>(tmpnumgattrs); + p = m_pGlat; + version = be::read<uint32>(p); +- if (version >= 0x00040000) // reject Glat tables that are too new ++ if (version >= 0x00040000 || (version >= 0x00030000 && m_pGlat.size() < 8)) // reject Glat tables that are too new + { + _head = Face::Table(); + return; + } + else if (version >= 0x00030000) + { + unsigned int glatflags = be::read<uint32>(p); + _has_boxes = glatflags & 1; +@@ -381,22 +386,24 @@ const GlyphFace * GlyphCache::Loader::re + } + else + { + be::skip<uint16>(gloc, glyphid); + glocs = be::read<uint16>(gloc); + gloce = be::peek<uint16>(gloc); + } + +- if (glocs >= m_pGlat.size() || gloce > m_pGlat.size()) ++ if (glocs >= m_pGlat.size() - 1 || gloce > m_pGlat.size()) + return 0; + + const uint32 glat_version = be::peek<uint32>(m_pGlat); +- if (glat_version == 0x00030000) ++ if (glat_version >= 0x00030000) + { ++ if (glocs >= gloce) ++ return 0; + const byte * p = m_pGlat + glocs; + uint16 bmap = be::read<uint16>(p); + int num = bit_set_count((uint32)bmap); + if (numsubs) *numsubs += num; + glocs += 6 + 8 * num; + if (glocs > gloce) + return 0; + } +@@ -449,29 +456,31 @@ GlyphBox * GlyphCache::Loader::read_box( + } + else + { + be::skip<uint16>(gloc, gid); + glocs = be::read<uint16>(gloc); + gloce = be::peek<uint16>(gloc); + } + +- if (glocs >= m_pGlat.size() || gloce > m_pGlat.size()) ++ if (gloce > m_pGlat.size() || glocs + 6 >= gloce) + return 0; + + const byte * p = m_pGlat + glocs; + uint16 bmap = be::read<uint16>(p); + int num = bit_set_count((uint32)bmap); + + Rect bbox = glyph.theBBox(); + Rect diamax(Position(bbox.bl.x + bbox.bl.y, bbox.bl.x - bbox.tr.y), + Position(bbox.tr.x + bbox.tr.y, bbox.tr.x - bbox.bl.y)); + Rect diabound = readbox(diamax, p[0], p[2], p[1], p[3]); + ::new (curr) GlyphBox(num, bmap, &diabound); + be::skip<uint8>(p, 4); ++ if (glocs + 6 + num * 8 >= gloce) ++ return 0; + + for (int i = 0; i < num * 2; ++i) + { + Rect box = readbox((i & 1) ? diamax : bbox, p[0], p[2], p[1], p[3]); + curr->addSubBox(i >> 1, i & 1, &box); + be::skip<uint8>(p, 4); + } + return (GlyphBox *)((char *)(curr) + sizeof(GlyphBox) + 2 * num * sizeof(Rect)); +diff --git a/gfx/graphite2/src/GlyphFace.cpp b/gfx/graphite2/src/GlyphFace.cpp +--- a/gfx/graphite2/src/GlyphFace.cpp ++++ b/gfx/graphite2/src/GlyphFace.cpp +@@ -24,25 +24,25 @@ Mozilla Public License (http://mozilla.o + License, as published by the Free Software Foundation, either version 2 + of the License or (at your option) any later version. + */ + #include "inc/GlyphFace.h" + + + using namespace graphite2; + +-uint16 GlyphFace::getMetric(uint8 metric) const ++int32 GlyphFace::getMetric(uint8 metric) const + { + switch (metrics(metric)) + { +- case kgmetLsb : return static_cast<uint16>(m_bbox.bl.x); +- case kgmetRsb : return static_cast<uint16>(m_advance.x - m_bbox.tr.x); +- case kgmetBbTop : return static_cast<uint16>(m_bbox.tr.y); +- case kgmetBbBottom : return static_cast<uint16>(m_bbox.bl.y); +- case kgmetBbLeft : return static_cast<uint16>(m_bbox.bl.x); +- case kgmetBbRight : return static_cast<uint16>(m_bbox.tr.x); +- case kgmetBbHeight : return static_cast<uint16>(m_bbox.tr.y - m_bbox.bl.y); +- case kgmetBbWidth : return static_cast<uint16>(m_bbox.tr.x - m_bbox.bl.x); +- case kgmetAdvWidth : return static_cast<uint16>(m_advance.x); +- case kgmetAdvHeight : return static_cast<uint16>(m_advance.y); ++ case kgmetLsb : return m_bbox.bl.x; ++ case kgmetRsb : return m_advance.x - m_bbox.tr.x; ++ case kgmetBbTop : return m_bbox.tr.y; ++ case kgmetBbBottom : return m_bbox.bl.y; ++ case kgmetBbLeft : return m_bbox.bl.x; ++ case kgmetBbRight : return m_bbox.tr.x; ++ case kgmetBbHeight : return m_bbox.tr.y - m_bbox.bl.y; ++ case kgmetBbWidth : return m_bbox.tr.x - m_bbox.bl.x; ++ case kgmetAdvWidth : return m_advance.x; ++ case kgmetAdvHeight : return m_advance.y; + default : return 0; + } + } +diff --git a/gfx/graphite2/src/Justifier.cpp b/gfx/graphite2/src/Justifier.cpp +--- a/gfx/graphite2/src/Justifier.cpp ++++ b/gfx/graphite2/src/Justifier.cpp +@@ -95,62 +95,63 @@ float Segment::justify(Slot *pSlot, cons + + end = pLast->nextSibling(); + pFirst = pFirst->nextSibling(); + + int icount = 0; + int numLevels = silf()->numJustLevels(); + if (!numLevels) + { +- for (s = pSlot; s != end; s = s->next()) ++ for (s = pSlot; s && s != end; s = s->nextSibling()) + { + CharInfo *c = charinfo(s->before()); + if (isWhitespace(c->unicodeChar())) + { + s->setJustify(this, 0, 3, 1); + s->setJustify(this, 0, 2, 1); + s->setJustify(this, 0, 0, -1); + ++icount; + } + } + if (!icount) + { +- for (s = pSlot; s != end; s = s->nextSibling()) ++ for (s = pSlot; s && s != end; s = s->nextSibling()) + { + s->setJustify(this, 0, 3, 1); + s->setJustify(this, 0, 2, 1); + s->setJustify(this, 0, 0, -1); + } + } + ++numLevels; + } + + Vector<JustifyTotal> stats(numLevels); +- for (s = pFirst; s != end; s = s->nextSibling()) ++ for (s = pFirst; s && s != end; s = s->nextSibling()) + { + float w = s->origin().x / scale + s->advance() - base; + if (w > currWidth) currWidth = w; + for (int j = 0; j < numLevels; ++j) + stats[j].accumulate(s, this, j); + s->just(0); + } + + for (int i = (width < 0.0f) ? -1 : numLevels - 1; i >= 0; --i) + { + float diff; + float error = 0.; + float diffpw; + int tWeight = stats[i].weight(); ++ if (tWeight == 0) continue; + + do { + error = 0.; + diff = width - currWidth; + diffpw = diff / tWeight; + tWeight = 0; +- for (s = pFirst; s != end; s = s->nextSibling()) // don't include final glyph ++ for (s = pFirst; s && s != end; s = s->nextSibling()) // don't include final glyph + { + int w = s->getJustify(this, i, 3); + float pref = diffpw * w + error; + int step = s->getJustify(this, i, 2); + if (!step) step = 1; // handle lazy font developers + if (pref > 0) + { + float max = uint16(s->getJustify(this, i, 0)); +diff --git a/gfx/graphite2/src/NameTable.cpp b/gfx/graphite2/src/NameTable.cpp +--- a/gfx/graphite2/src/NameTable.cpp ++++ b/gfx/graphite2/src/NameTable.cpp +@@ -42,25 +42,26 @@ NameTable::NameTable(const void* data, s + memcpy(pdata, data, length); + m_table = reinterpret_cast<const TtfUtil::Sfnt::FontNames*>(pdata); + + if ((length > sizeof(TtfUtil::Sfnt::FontNames)) && + (length > sizeof(TtfUtil::Sfnt::FontNames) + + sizeof(TtfUtil::Sfnt::NameRecord) * ( be::swap<uint16>(m_table->count) - 1))) + { + uint16 offset = be::swap<uint16>(m_table->string_offset); +- m_nameData = reinterpret_cast<const uint8*>(pdata) + offset; +- setPlatformEncoding(platformId, encodingID); +- m_nameDataLength = length - offset; ++ if (offset < length) ++ { ++ m_nameData = reinterpret_cast<const uint8*>(pdata) + offset; ++ setPlatformEncoding(platformId, encodingID); ++ m_nameDataLength = length - offset; ++ return; ++ } + } +- else +- { +- free(const_cast<TtfUtil::Sfnt::FontNames*>(m_table)); +- m_table = NULL; +- } ++ free(const_cast<TtfUtil::Sfnt::FontNames*>(m_table)); ++ m_table = NULL; + } + + uint16 NameTable::setPlatformEncoding(uint16 platformId, uint16 encodingID) + { + if (!m_nameData) return 0; + uint16 i = 0; + uint16 count = be::swap<uint16>(m_table->count); + for (; i < count; i++) +@@ -139,28 +140,36 @@ void* NameTable::getName(uint16& languag + uint16 offset = be::swap<uint16>(nameRecord.offset); + if(offset + utf16Length > m_nameDataLength) + { + languageId = 0; + length = 0; + return NULL; + } + utf16Length >>= 1; // in utf16 units +- utf16::codeunit_t * utf16Name = gralloc<utf16::codeunit_t>(utf16Length); ++ utf16::codeunit_t * utf16Name = gralloc<utf16::codeunit_t>(utf16Length + 1); + if (!utf16Name) + { + languageId = 0; + length = 0; + return NULL; + } + const uint8* pName = m_nameData + offset; + for (size_t i = 0; i < utf16Length; i++) + { + utf16Name[i] = be::read<uint16>(pName); + } ++ utf16Name[utf16Length] = 0; ++ if (!utf16::validate(utf16Name, utf16Name + utf16Length)) ++ { ++ free(utf16Name); ++ languageId = 0; ++ length = 0; ++ return NULL; ++ } + switch (enc) + { + case gr_utf8: + { + utf8::codeunit_t* uniBuffer = gralloc<utf8::codeunit_t>(3 * utf16Length + 1); + if (!uniBuffer) + { + free(utf16Name); +diff --git a/gfx/graphite2/src/Pass.cpp b/gfx/graphite2/src/Pass.cpp +--- a/gfx/graphite2/src/Pass.cpp ++++ b/gfx/graphite2/src/Pass.cpp +@@ -96,17 +96,17 @@ bool Pass::readPass(const byte * const p + const byte * p = pass_start, + * const pass_end = p + pass_length; + size_t numRanges; + + if (e.test(pass_length < 40, E_BADPASSLENGTH)) return face.error(e); + // Read in basic values + const byte flags = be::read<byte>(p); + if (e.test((flags & 0x1f) && +- (pt < PASS_TYPE_POSITIONING || !m_silf->aCollision() || !face.glyphs().hasBoxes()), ++ (pt < PASS_TYPE_POSITIONING || !m_silf->aCollision() || !face.glyphs().hasBoxes() || !(m_silf->flags() & 0x20)), + E_BADCOLLISIONPASS)) + return face.error(e); + m_numCollRuns = flags & 0x7; + m_kernColls = (flags >> 3) & 0x3; + m_isReverseDir = (flags >> 5) & 0x1; + m_iMaxLoop = be::read<byte>(p); + if (m_iMaxLoop < 1) m_iMaxLoop = 1; + be::skip<byte>(p,2); // skip maxContext & maxBackup +@@ -226,17 +226,21 @@ bool Pass::readRules(const byte * rule_m + // Load rules. + const byte * ac_begin = 0, * rc_begin = 0, + * ac_end = ac_data + be::peek<uint16>(o_action), + * rc_end = rc_data + be::peek<uint16>(o_constraint); + + // Allocate pools + m_rules = new Rule [m_numRules]; + m_codes = new Code [m_numRules*2]; +- const size_t prog_pool_sz = vm::Machine::Code::estimateCodeDataOut(ac_end - ac_data + rc_end - rc_data); ++ int totalSlots = 0; ++ const uint16 *tsort = sort_key; ++ for (int i = 0; i < m_numRules; ++i) ++ totalSlots += be::peek<uint16>(--tsort); ++ const size_t prog_pool_sz = vm::Machine::Code::estimateCodeDataOut(ac_end - ac_data + rc_end - rc_data, 2 * m_numRules, totalSlots); + m_progs = gralloc<byte>(prog_pool_sz); + byte * prog_pool_free = m_progs, + * prog_pool_end = m_progs + prog_pool_sz; + if (e.test(!(m_rules && m_codes && m_progs), E_OUTOFMEM)) return face.error(e); + + Rule * r = m_rules + m_numRules - 1; + for (size_t n = m_numRules; r >= m_rules; --n, --r, ac_end = ac_begin, rc_end = rc_begin) + { +@@ -249,17 +253,17 @@ bool Pass::readRules(const byte * rule_m + if (r->sort > 63 || r->preContext >= r->sort || r->preContext > m_maxPreCtxt || r->preContext < m_minPreCtxt) + return false; + ac_begin = ac_data + be::peek<uint16>(--o_action); + --o_constraint; + rc_begin = be::peek<uint16>(o_constraint) ? rc_data + be::peek<uint16>(o_constraint) : rc_end; + + if (ac_begin > ac_end || ac_begin > ac_data_end || ac_end > ac_data_end + || rc_begin > rc_end || rc_begin > rc_data_end || rc_end > rc_data_end +- || vm::Machine::Code::estimateCodeDataOut(ac_end - ac_begin + rc_end - rc_begin) > size_t(prog_pool_end - prog_pool_free)) ++ || vm::Machine::Code::estimateCodeDataOut(ac_end - ac_begin + rc_end - rc_begin, 2, r->sort) > size_t(prog_pool_end - prog_pool_free)) + return false; + r->action = new (m_codes+n*2-2) vm::Machine::Code(false, ac_begin, ac_end, r->preContext, r->sort, *m_silf, face, pt, &prog_pool_free); + r->constraint = new (m_codes+n*2-1) vm::Machine::Code(true, rc_begin, rc_end, r->preContext, r->sort, *m_silf, face, pt, &prog_pool_free); + + if (e.test(!r->action || !r->constraint, E_OUTOFMEM) + || e.test(r->action->status() != Code::loaded, r->action->status() + E_CODEFAILURE) + || e.test(r->constraint->status() != Code::loaded, r->constraint->status() + E_CODEFAILURE) + || e.test(!r->constraint->immutable(), E_MUTABLECCODE)) +@@ -330,17 +334,17 @@ bool Pass::readStates(const byte * start + + // load state transition table. + for (uint16 * t = m_transitions, + * const t_end = t + m_numTransition*m_numColumns; t != t_end; ++t) + { + *t = be::read<uint16>(states); + if (e.test(*t >= m_numStates, E_BADSTATE)) + { +- face.error_context((face.error_context() & 0xFFFF00) + EC_ATRANS + (((t - m_transitions) / m_numColumns) << 24)); ++ face.error_context((face.error_context() & 0xFFFF00) + EC_ATRANS + (((t - m_transitions) / m_numColumns) << 8)); + return face.error(e); + } + } + + State * s = m_states, + * const success_begin = m_states + m_numStates - m_numSuccess; + const RuleEntry * rule_map_end = m_ruleMap + be::peek<uint16>(o_rule_map + m_numSuccess*sizeof(uint16)); + for (size_t n = m_numStates; n; --n, ++s) +@@ -351,17 +355,18 @@ bool Pass::readStates(const byte * start + if (e.test(begin >= rule_map_end || end > rule_map_end || begin > end, E_BADRULEMAPPING)) + { + face.error_context((face.error_context() & 0xFFFF00) + EC_ARULEMAP + (n << 24)); + return face.error(e); + } + s->rules = begin; + s->rules_end = (end - begin <= FiniteStateMachine::MAX_RULES)? end : + begin + FiniteStateMachine::MAX_RULES; +- qsort(begin, end - begin, sizeof(RuleEntry), &cmpRuleEntry); ++ if (begin) // keep UBSan happy can't call qsort with null begin ++ qsort(begin, end - begin, sizeof(RuleEntry), &cmpRuleEntry); + } + + return true; + } + + bool Pass::readRanges(const byte * ranges, size_t num_ranges, Error &e) + { + m_cols = gralloc<uint16>(m_numGlyphs); +@@ -449,19 +454,19 @@ bool Pass::runFSM(FiniteStateMachine& fs + if (fsm.slots.context() < m_minPreCtxt) + return false; + + uint16 state = m_startStates[m_maxPreCtxt - fsm.slots.context()]; + uint8 free_slots = SlotMap::MAX_SLOTS; + do + { + fsm.slots.pushSlot(slot); +- if (--free_slots == 0 +- || slot->gid() >= m_numGlyphs ++ if (slot->gid() >= m_numGlyphs + || m_cols[slot->gid()] == 0xffffU ++ || --free_slots == 0 + || state >= m_numTransition) + return free_slots != 0; + + const uint16 * transitions = m_transitions + state*m_numColumns; + state = transitions[m_cols[slot->gid()]]; + if (state >= m_successStart) + fsm.rules.accumulate_rules(m_states[state]); + +@@ -627,37 +632,40 @@ bool Pass::testPassConstraint(Machine & + } + + + bool Pass::testConstraint(const Rule & r, Machine & m) const + { + const uint16 curr_context = m.slotMap().context(); + if (unsigned(r.sort - r.preContext) > m.slotMap().size() - curr_context + || curr_context - r.preContext < 0) return false; ++ ++ vm::slotref * map = m.slotMap().begin() + curr_context - r.preContext; ++ if (map[r.sort - 1] == 0) ++ return false; ++ + if (!*r.constraint) return true; + assert(r.constraint->constraint()); +- +- vm::slotref * map = m.slotMap().begin() + curr_context - r.preContext; + for (int n = r.sort; n && map; --n, ++map) + { + if (!*map) continue; + const int32 ret = r.constraint->run(m, map); + if (!ret || m.status() != Machine::finished) + return false; + } + + return true; + } + + + void SlotMap::collectGarbage(Slot * &aSlot) + { + for(Slot **s = begin(), *const *const se = end() - 1; s != se; ++s) { + Slot *& slot = *s; +- if(slot->isDeleted() || slot->isCopied()) ++ if(slot && (slot->isDeleted() || slot->isCopied())) + { + if (slot == aSlot) + aSlot = slot->prev() ? slot->prev() : slot->next(); + segment.freeSlot(slot); + } + } + } + +@@ -848,17 +856,16 @@ bool Pass::collisionShift(Segment *seg, + } + } + } + return true; + } + + bool Pass::collisionKern(Segment *seg, int dir, json * const dbgout) const + { +- KernCollider kerncoll(dbgout); + Slot *start = seg->first(); + float ymin = 1e38f; + float ymax = -1e38f; + const GlyphCache &gc = seg->getFace()->glyphs(); + + // phase 3 : handle kerning of clusters + #if !defined GRAPHITE2_NTRACING + if (dbgout) +@@ -871,17 +878,17 @@ bool Pass::collisionKern(Segment *seg, i + return false; + const SlotCollision * c = seg->collisionInfo(s); + const Rect &bbox = seg->theGlyphBBoxTemporary(s->gid()); + float y = s->origin().y + c->shift().y; + ymax = max(y + bbox.tr.y, ymax); + ymin = min(y + bbox.bl.y, ymin); + if (start && (c->flags() & (SlotCollision::COLL_KERN | SlotCollision::COLL_FIX)) + == (SlotCollision::COLL_KERN | SlotCollision::COLL_FIX)) +- resolveKern(seg, s, start, kerncoll, dir, ymin, ymax, dbgout); ++ resolveKern(seg, s, start, dir, ymin, ymax, dbgout); + if (c->flags() & SlotCollision::COLL_END) + start = NULL; + if (c->flags() & SlotCollision::COLL_START) + start = s; + } + + #if !defined GRAPHITE2_NTRACING + if (dbgout) +@@ -1010,17 +1017,17 @@ bool Pass::resolveCollisions(Segment *se + if (isCol) + { cFix->setFlags(cFix->flags() | SlotCollision::COLL_ISCOL | SlotCollision::COLL_KNOWN); } + else + { cFix->setFlags((cFix->flags() & ~SlotCollision::COLL_ISCOL) | SlotCollision::COLL_KNOWN); } + hasCol |= isCol; + return true; + } + +-float Pass::resolveKern(Segment *seg, Slot *slotFix, GR_MAYBE_UNUSED Slot *start, KernCollider &coll, int dir, ++float Pass::resolveKern(Segment *seg, Slot *slotFix, GR_MAYBE_UNUSED Slot *start, int dir, + float &ymin, float &ymax, json *const dbgout) const + { + Slot *nbor; // neighboring slot + float currSpace = 0.; + bool collides = false; + unsigned int space_count = 0; + Slot *base = slotFix; + while (base->attachedTo()) +@@ -1030,16 +1037,17 @@ float Pass::resolveKern(Segment *seg, Sl + + if (base != slotFix) + { + cFix->setFlags(cFix->flags() | SlotCollision::COLL_KERN | SlotCollision::COLL_FIX); + return 0; + } + bool seenEnd = (cFix->flags() & SlotCollision::COLL_END) != 0; + bool isInit = false; ++ KernCollider coll(dbgout); + + for (nbor = slotFix->next(); nbor; nbor = nbor->next()) + { + if (nbor->isChildOf(base)) + continue; + if (!gc.check(nbor->gid())) + return 0.; + const Rect &bb = seg->theGlyphBBoxTemporary(nbor->gid()); +diff --git a/gfx/graphite2/src/Segment.cpp b/gfx/graphite2/src/Segment.cpp +--- a/gfx/graphite2/src/Segment.cpp ++++ b/gfx/graphite2/src/Segment.cpp +@@ -419,16 +419,19 @@ Position Segment::positionSlots(const Fo + reverseSlots(); + temp = iStart; + iStart = iEnd; + iEnd = temp; + } + if (!iStart) iStart = m_first; + if (!iEnd) iEnd = m_last; + ++ if (!iStart || !iEnd) // only true for empty segments ++ return currpos; ++ + if (isRtl) + { + for (Slot * s = iEnd, * const end = iStart->prev(); s && s != end; s = s->prev()) + { + if (s->isBase()) + currpos = s->finalise(this, font, currpos, bbox, 0, clusterMin = currpos.x, isRtl, isFinal); + } + } +@@ -526,11 +529,14 @@ void Segment::doMirror(uint16 aMirror) + } + + bool Segment::initCollisions() + { + m_collisions = grzeroalloc<SlotCollision>(slotCount()); + if (!m_collisions) return false; + + for (Slot *p = m_first; p; p = p->next()) +- ::new (collisionInfo(p)) SlotCollision(this, p); ++ if (p->index() < slotCount()) ++ ::new (collisionInfo(p)) SlotCollision(this, p); ++ else ++ return false; + return true; + } +diff --git a/gfx/graphite2/src/Silf.cpp b/gfx/graphite2/src/Silf.cpp +--- a/gfx/graphite2/src/Silf.cpp ++++ b/gfx/graphite2/src/Silf.cpp +@@ -350,20 +350,20 @@ uint16 Silf::getClassGlyph(uint16 cid, u + } + return 0; + } + + + bool Silf::runGraphite(Segment *seg, uint8 firstPass, uint8 lastPass, int dobidi) const + { + assert(seg != 0); +- SlotMap map(*seg, m_dir); ++ unsigned int maxSize = seg->slotCount() * MAX_SEG_GROWTH_FACTOR; ++ SlotMap map(*seg, m_dir, maxSize); + FiniteStateMachine fsm(map, seg->getFace()->logger()); + vm::Machine m(map); +- unsigned int initSize = seg->slotCount(); + uint8 lbidi = m_bPass; + #if !defined GRAPHITE2_NTRACING + json * const dbgout = seg->getFace()->logger(); + #endif + + if (lastPass == 0) + { + if (firstPass == lastPass && lbidi == 0xFF) +@@ -419,13 +419,13 @@ bool Silf::runGraphite(Segment *seg, uin + + // test whether to reorder, prepare for positioning + bool reverse = (lbidi == 0xFF) && (seg->currdir() != ((m_dir & 1) ^ m_passes[i].reverseDir())); + if ((i >= 32 || (seg->passBits() & (1 << i)) == 0 || m_passes[i].collisionLoops()) + && !m_passes[i].runGraphite(m, fsm, reverse)) + return false; + // only subsitution passes can change segment length, cached subsegments are short for their text + if (m.status() != vm::Machine::finished +- || (seg->slotCount() && seg->slotCount() * MAX_SEG_GROWTH_FACTOR < initSize)) ++ || (seg->slotCount() && seg->slotCount() > maxSize)) + return false; + } + return true; + } +diff --git a/gfx/graphite2/src/Slot.cpp b/gfx/graphite2/src/Slot.cpp +--- a/gfx/graphite2/src/Slot.cpp ++++ b/gfx/graphite2/src/Slot.cpp +@@ -80,20 +80,20 @@ void Slot::set(const Slot & orig, int ch + + void Slot::update(int /*numGrSlots*/, int numCharInfo, Position &relpos) + { + m_before += numCharInfo; + m_after += numCharInfo; + m_position = m_position + relpos; + } + +-Position Slot::finalise(const Segment *seg, const Font *font, Position & base, Rect & bbox, uint8 attrLevel, float & clusterMin, bool rtl, bool isFinal) ++Position Slot::finalise(const Segment *seg, const Font *font, Position & base, Rect & bbox, uint8 attrLevel, float & clusterMin, bool rtl, bool isFinal, int depth) + { + SlotCollision *coll = NULL; +- if (attrLevel && m_attLevel > attrLevel) return Position(0, 0); ++ if (depth > 100 || (attrLevel && m_attLevel > attrLevel)) return Position(0, 0); + float scale = font ? font->scale() : 1.0f; + Position shift(m_shift.x * (rtl * -2 + 1) + m_just, m_shift.y); + float tAdvance = m_advance.x + m_just; + if (isFinal && (coll = seg->collisionInfo(this))) + { + const Position &collshift = coll->offset(); + if (!(coll->flags() & SlotCollision::COLL_KERN) || rtl) + shift = shift + collshift; +@@ -128,23 +128,23 @@ Position Slot::finalise(const Segment *s + if (glyphFace) + { + Rect ourBbox = glyphFace->theBBox() * scale + m_position; + bbox = bbox.widen(ourBbox); + } + + if (m_child && m_child != this && m_child->attachedTo() == this) + { +- Position tRes = m_child->finalise(seg, font, m_position, bbox, attrLevel, clusterMin, rtl, isFinal); ++ Position tRes = m_child->finalise(seg, font, m_position, bbox, attrLevel, clusterMin, rtl, isFinal, depth + 1); + if ((!m_parent || m_advance.x >= 0.5f) && tRes.x > res.x) res = tRes; + } + + if (m_parent && m_sibling && m_sibling != this && m_sibling->attachedTo() == m_parent) + { +- Position tRes = m_sibling->finalise(seg, font, base, bbox, attrLevel, clusterMin, rtl, isFinal); ++ Position tRes = m_sibling->finalise(seg, font, base, bbox, attrLevel, clusterMin, rtl, isFinal, depth + 1); + if (tRes.x > res.x) res = tRes; + } + + if (!m_parent && clusterMin < base.x) + { + Position adj = Position(m_position.x - clusterMin, 0.); + res += adj; + m_position += adj; +@@ -160,35 +160,35 @@ int32 Slot::clusterMetric(const Segment + return 0; + Rect bbox = seg->theGlyphBBoxTemporary(glyph()); + float clusterMin = 0.; + Position res = finalise(seg, NULL, base, bbox, attrLevel, clusterMin, rtl, false); + + switch (metrics(metric)) + { + case kgmetLsb : +- return static_cast<uint32>(bbox.bl.x); ++ return bbox.bl.x; + case kgmetRsb : +- return static_cast<uint32>(res.x - bbox.tr.x); ++ return res.x - bbox.tr.x; + case kgmetBbTop : +- return static_cast<uint32>(bbox.tr.y); ++ return bbox.tr.y; + case kgmetBbBottom : +- return static_cast<uint32>(bbox.bl.y); ++ return bbox.bl.y; + case kgmetBbLeft : +- return static_cast<uint32>(bbox.bl.x); ++ return bbox.bl.x; + case kgmetBbRight : +- return static_cast<uint32>(bbox.tr.x); ++ return bbox.tr.x; + case kgmetBbWidth : +- return static_cast<uint32>(bbox.tr.x - bbox.bl.x); ++ return bbox.tr.x - bbox.bl.x; + case kgmetBbHeight : +- return static_cast<uint32>(bbox.tr.y - bbox.bl.y); ++ return bbox.tr.y - bbox.bl.y; + case kgmetAdvWidth : +- return static_cast<uint32>(res.x); ++ return res.x; + case kgmetAdvHeight : +- return static_cast<uint32>(res.y); ++ return res.y; + default : + return 0; + } + } + + #define SLOTGETCOLATTR(x) { SlotCollision *c = seg->collisionInfo(this); return c ? int(c-> x) : 0; } + + int Slot::getAttr(const Segment *seg, attrCode ind, uint8 subindex) const +@@ -290,19 +290,32 @@ void Slot::setAttr(Segment *seg, attrCod + case gr_slatAdvX : m_advance.x = value; break; + case gr_slatAdvY : m_advance.y = value; break; + case gr_slatAttTo : + { + const uint16 idx = uint16(value); + if (idx < map.size() && map[idx]) + { + Slot *other = map[idx]; +- if (other == this || other == m_parent) break; +- if (m_parent) m_parent->removeChild(this); +- if (!other->isChildOf(this) && other->child(this)) ++ if (other == this || other == m_parent || other->isCopied()) break; ++ if (m_parent) { m_parent->removeChild(this); attachTo(NULL); } ++ Slot *pOther = other; ++ int count = 0; ++ bool foundOther = false; ++ while (pOther) ++ { ++ ++count; ++ if (pOther == this) foundOther = true; ++ pOther = pOther->attachedTo(); ++ } ++ for (pOther = m_child; pOther; pOther = pOther->m_child) ++ ++count; ++ for (pOther = m_sibling; pOther; pOther = pOther->m_sibling) ++ ++count; ++ if (count < 100 && !foundOther && other->child(this)) + { + attachTo(other); + if ((map.dir() != 0) ^ (idx > subindex)) + m_with = Position(advance(), 0); + else // normal match to previous root + m_attach = Position(other->advance(), 0); + } + } +@@ -416,41 +429,34 @@ bool Slot::sibling(Slot *ap) + m_sibling = ap; + else + return m_sibling->sibling(ap); + return true; + } + + bool Slot::removeChild(Slot *ap) + { +- if (this == ap || !m_child) return false; ++ if (this == ap || !m_child || !ap) return false; + else if (ap == m_child) + { + Slot *nSibling = m_child->nextSibling(); +- m_child->removeSibling(nSibling); ++ m_child->nextSibling(NULL); + m_child = nSibling; + return true; + } +- else +- return m_child->removeSibling(ap); +- return true; +-} +- +-bool Slot::removeSibling(Slot *ap) +-{ +- if (this == ap || !m_sibling) return false; +- else if (ap == m_sibling) ++ for (Slot *p = m_child; p; p = p->m_sibling) + { +- m_sibling = m_sibling->nextSibling(); +- if (m_sibling) ap->removeSibling(m_sibling); +- return true; ++ if (p->m_sibling && p->m_sibling == ap) ++ { ++ p->m_sibling = p->m_sibling->m_sibling; ++ ap->nextSibling(NULL); ++ return true; ++ } + } +- else +- return m_sibling->removeSibling(ap); +- return true; ++ return false; + } + + void Slot::setGlyph(Segment *seg, uint16 glyphid, const GlyphFace * theGlyph) + { + m_glyphid = glyphid; + m_bidiCls = -1; + if (!theGlyph) + { +@@ -475,21 +481,23 @@ void Slot::setGlyph(Segment *seg, uint16 + if (seg->silf()->aPassBits()) + { + seg->mergePassBits(theGlyph->attrs()[seg->silf()->aPassBits()]); + if (seg->silf()->numPasses() > 16) + seg->mergePassBits(theGlyph->attrs()[seg->silf()->aPassBits()+1] << 16); + } + } + +-void Slot::floodShift(Position adj) ++void Slot::floodShift(Position adj, int depth) + { ++ if (depth > 100) ++ return; + m_position += adj; +- if (m_child) m_child->floodShift(adj); +- if (m_sibling) m_sibling->floodShift(adj); ++ if (m_child) m_child->floodShift(adj, depth + 1); ++ if (m_sibling) m_sibling->floodShift(adj, depth + 1); + } + + void SlotJustify::LoadSlot(const Slot *s, const Segment *seg) + { + for (int i = seg->silf()->numJustLevels() - 1; i >= 0; --i) + { + Justinfo *justs = seg->silf()->justAttrs() + i; + int16 *v = values + i * NUMJUSTPARAMS; +@@ -514,15 +522,14 @@ Slot * Slot::nextInCluster(const Slot *s + return base->nextSibling(); + s = base; + } + return NULL; + } + + bool Slot::isChildOf(const Slot *base) const + { +- if (m_parent == base) +- return true; +- else if (!m_parent) +- return false; +- else +- return m_parent->isChildOf(base); ++ for (Slot *p = m_parent; p; p = p->m_parent) ++ if (p == base) ++ return true; ++ return false; + } ++ +diff --git a/gfx/graphite2/src/TtfUtil.cpp b/gfx/graphite2/src/TtfUtil.cpp +--- a/gfx/graphite2/src/TtfUtil.cpp ++++ b/gfx/graphite2/src/TtfUtil.cpp +@@ -891,25 +891,27 @@ const void * FindCmapSubtable(const void + ----------------------------------------------------------------------------------------------*/ + bool CheckCmapSubtable4(const void * pCmapSubtable4, const void * pCmapEnd /*, unsigned int maxgid*/) + { + size_t table_len = (const byte *)pCmapEnd - (const byte *)pCmapSubtable4; + if (!pCmapSubtable4) return false; + const Sfnt::CmapSubTable * pTable = reinterpret_cast<const Sfnt::CmapSubTable *>(pCmapSubtable4); + // Bob H say some freeware TT fonts have version 1 (eg, CALIGULA.TTF) + // so don't check subtable version. 21 Mar 2002 spec changes version to language. +- if (be::swap(pTable->format) != 4) return false; ++ if (table_len < sizeof(*pTable) || be::swap(pTable->format) != 4) return false; + const Sfnt::CmapSubTableFormat4 * pTable4 = reinterpret_cast<const Sfnt::CmapSubTableFormat4 *>(pCmapSubtable4); ++ if (table_len < sizeof(*pTable4)) ++ return false; + uint16 length = be::swap(pTable4->length); + if (length > table_len) + return false; + if (length < sizeof(Sfnt::CmapSubTableFormat4)) + return false; + uint16 nRanges = be::swap(pTable4->seg_count_x2) >> 1; +- if (length < sizeof(Sfnt::CmapSubTableFormat4) + 4 * nRanges * sizeof(uint16)) ++ if (!nRanges || length < sizeof(Sfnt::CmapSubTableFormat4) + 4 * nRanges * sizeof(uint16)) + return false; + // check last range is properly terminated + uint16 chEnd = be::peek<uint16>(pTable4->end_code + nRanges - 1); + if (chEnd != 0xFFFF) + return false; + #if 0 + int lastend = -1; + for (int i = 0; i < nRanges; ++i) +@@ -999,17 +1001,17 @@ gid16 CmapSubtable4Lookup(const void * p + uint16 idRangeOffset = be::peek<uint16>(pMid += nSeg); + + if (idRangeOffset == 0) + return (uint16)(idDelta + nUnicodeId); // must use modulus 2^16 + + // Look up value in glyphIdArray + const ptrdiff_t offset = (nUnicodeId - chStart) + (idRangeOffset >> 1) + + (pMid - reinterpret_cast<const uint16 *>(pTable)); +- if (offset * 2 >= be::swap<uint16>(pTable->length)) ++ if (offset * 2 + 1 >= be::swap<uint16>(pTable->length)) + return 0; + gid16 nGlyphId = be::peek<uint16>(reinterpret_cast<const uint16 *>(pTable)+offset); + // If this value is 0, return 0. Else add the idDelta + return nGlyphId ? nGlyphId + idDelta : 0; + } + + return 0; + } +@@ -1081,19 +1083,21 @@ unsigned int CmapSubtable4NextCodepoint( + /*---------------------------------------------------------------------------------------------- + Check the Microsoft UCS-4 subtable for expected values. + ----------------------------------------------------------------------------------------------*/ + bool CheckCmapSubtable12(const void *pCmapSubtable12, const void *pCmapEnd /*, unsigned int maxgid*/) + { + size_t table_len = (const byte *)pCmapEnd - (const byte *)pCmapSubtable12; + if (!pCmapSubtable12) return false; + const Sfnt::CmapSubTable * pTable = reinterpret_cast<const Sfnt::CmapSubTable *>(pCmapSubtable12); +- if (be::swap(pTable->format) != 12) ++ if (table_len < sizeof(*pTable) || be::swap(pTable->format) != 12) + return false; + const Sfnt::CmapSubTableFormat12 * pTable12 = reinterpret_cast<const Sfnt::CmapSubTableFormat12 *>(pCmapSubtable12); ++ if (table_len < sizeof(*pTable12)) ++ return false; + uint32 length = be::swap(pTable12->length); + if (length > table_len) + return false; + if (length < sizeof(Sfnt::CmapSubTableFormat12)) + return false; + uint32 num_groups = be::swap(pTable12->num_groups); + if (num_groups > 0x10000000 || length != (sizeof(Sfnt::CmapSubTableFormat12) + (num_groups - 1) * sizeof(uint32) * 3)) + return false; +diff --git a/gfx/graphite2/src/inc/Code.h b/gfx/graphite2/src/inc/Code.h +--- a/gfx/graphite2/src/inc/Code.h ++++ b/gfx/graphite2/src/inc/Code.h +@@ -81,17 +81,17 @@ private: + _modify, + _delete; + mutable bool _own; + + void release_buffers() throw (); + void failure(const status_t) throw(); + + public: +- static size_t estimateCodeDataOut(size_t num_bytecodes); ++ static size_t estimateCodeDataOut(size_t num_bytecodes, int nRules, int nSlots); + + Code() throw(); + Code(bool is_constraint, const byte * bytecode_begin, const byte * const bytecode_end, + uint8 pre_context, uint16 rule_length, const Silf &, const Face &, + enum passtype pt, byte * * const _out = 0); + Code(const Machine::Code &) throw(); + ~Code() throw(); + +@@ -107,19 +107,21 @@ public: + void externalProgramMoved(ptrdiff_t) throw(); + + int32 run(Machine &m, slotref * & map) const; + + CLASS_NEW_DELETE; + }; + + inline +-size_t Machine::Code::estimateCodeDataOut(size_t n_bc) ++size_t Machine::Code::estimateCodeDataOut(size_t n_bc, int nRules, int nSlots) + { +- return (n_bc + 1) * (sizeof(instr)+sizeof(byte)); ++ // max is: all codes are instructions + 1 for each rule + max tempcopies ++ // allocate space for separate maximal code and data then merge them later ++ return (n_bc + nRules + nSlots) * sizeof(instr) + n_bc * sizeof(byte); + } + + + inline Machine::Code::Code() throw() + : _code(0), _data(0), _data_size(0), _instr_count(0), _max_ref(0), + _status(loaded), _constraint(false), _modify(false), _delete(false), + _own(false) + { +diff --git a/gfx/graphite2/src/inc/Face.h b/gfx/graphite2/src/inc/Face.h +--- a/gfx/graphite2/src/inc/Face.h ++++ b/gfx/graphite2/src/inc/Face.h +@@ -82,17 +82,17 @@ public: + uint16 languageForLocale(const char * locale) const; + + // Features + uint16 numFeatures() const; + const FeatureRef * featureById(uint32 id) const; + const FeatureRef * feature(uint16 index) const; + + // Glyph related +- uint16 getGlyphMetric(uint16 gid, uint8 metric) const; ++ int32 getGlyphMetric(uint16 gid, uint8 metric) const; + uint16 findPseudo(uint32 uid) const; + + // Errors + unsigned int error() const { return m_error; } + bool error(Error e) { m_error = e.error(); return false; } + unsigned int error_context() const { return m_error; } + void error_context(unsigned int errcntxt) { m_errcntxt = errcntxt; } + +diff --git a/gfx/graphite2/src/inc/GlyphFace.h b/gfx/graphite2/src/inc/GlyphFace.h +--- a/gfx/graphite2/src/inc/GlyphFace.h ++++ b/gfx/graphite2/src/inc/GlyphFace.h +@@ -46,17 +46,17 @@ class GlyphFace + public: + GlyphFace(); + template<typename I> + GlyphFace(const Rect & bbox, const Position & adv, I first, const I last); + + const Position & theAdvance() const; + const Rect & theBBox() const { return m_bbox; } + const sparse & attrs() const { return m_attrs; } +- uint16 getMetric(uint8 metric) const; ++ int32 getMetric(uint8 metric) const; + + CLASS_NEW_DELETE; + private: + Rect m_bbox; // bounding box metrics in design units + Position m_advance; // Advance width and height in design units + sparse m_attrs; + }; + +diff --git a/gfx/graphite2/src/inc/Machine.h b/gfx/graphite2/src/inc/Machine.h +--- a/gfx/graphite2/src/inc/Machine.h ++++ b/gfx/graphite2/src/inc/Machine.h +@@ -179,17 +179,17 @@ inline SlotMap& Machine::slotMap() const + return _map; + } + + inline Machine::status_t Machine::status() const throw() + { + return _status; + } + +-inline void Machine::check_final_stack(const int32 * const sp) ++inline void Machine::check_final_stack(const stack_t * const sp) + { + stack_t const * const base = _stack + STACK_GUARD, + * const limit = base + STACK_MAX; + if (sp < base) _status = stack_underflow; // This should be impossible now. + else if (sp >= limit) _status = stack_overflow; // So should this. + else if (sp != base) _status = stack_not_empty; + } + +diff --git a/gfx/graphite2/src/inc/Pass.h b/gfx/graphite2/src/inc/Pass.h +--- a/gfx/graphite2/src/inc/Pass.h ++++ b/gfx/graphite2/src/inc/Pass.h +@@ -76,17 +76,17 @@ private: + void dumpRuleEventConsidered(const FiniteStateMachine & fsm, const RuleEntry & re) const; + void dumpRuleEventOutput(const FiniteStateMachine & fsm, vm::Machine & m, const Rule & r, Slot * os) const; + void adjustSlot(int delta, Slot * & slot_out, SlotMap &) const; + bool collisionShift(Segment *seg, int dir, json * const dbgout) const; + bool collisionKern(Segment *seg, int dir, json * const dbgout) const; + bool collisionFinish(Segment *seg, GR_MAYBE_UNUSED json * const dbgout) const; + bool resolveCollisions(Segment *seg, Slot *slot, Slot *start, ShiftCollider &coll, bool isRev, + int dir, bool &moved, bool &hasCol, json * const dbgout) const; +- float resolveKern(Segment *seg, Slot *slot, Slot *start, KernCollider &coll, int dir, ++ float resolveKern(Segment *seg, Slot *slot, Slot *start, int dir, + float &ymin, float &ymax, json *const dbgout) const; + + const Silf * m_silf; + uint16 * m_cols; + Rule * m_rules; // rules + RuleEntry * m_ruleMap; + uint16 * m_startStates; // prectxt length + uint16 * m_transitions; +diff --git a/gfx/graphite2/src/inc/Rule.h b/gfx/graphite2/src/inc/Rule.h +--- a/gfx/graphite2/src/inc/Rule.h ++++ b/gfx/graphite2/src/inc/Rule.h +@@ -97,17 +97,17 @@ bool State::empty() const + return rules_end == rules; + } + + + class SlotMap + { + public: + enum {MAX_SLOTS=64}; +- SlotMap(Segment & seg, uint8 direction); ++ SlotMap(Segment & seg, uint8 direction, int maxSize); + + Slot * * begin(); + Slot * * end(); + size_t size() const; + unsigned short context() const; + void reset(Slot &, unsigned short); + + Slot * const & operator[](int n) const; +@@ -116,23 +116,25 @@ public: + void collectGarbage(Slot *& aSlot); + + Slot * highwater() { return m_highwater; } + void highwater(Slot *s) { m_highwater = s; m_highpassed = false; } + bool highpassed() const { return m_highpassed; } + void highpassed(bool v) { m_highpassed = v; } + + uint8 dir() const { return m_dir; } ++ int decMax() { return --m_maxSize; } + + Segment & segment; + private: + Slot * m_slot_map[MAX_SLOTS+1]; + unsigned short m_size; + unsigned short m_precontext; + Slot * m_highwater; ++ int m_maxSize; + uint8 m_dir; + bool m_highpassed; + }; + + + class FiniteStateMachine + { + public: +@@ -237,18 +239,19 @@ void FiniteStateMachine::Rules::accumula + return; + } + } + while (rre != rrend && out != lrend) { *out++ = *rre++; } + m_end = out; + } + + inline +-SlotMap::SlotMap(Segment & seg, uint8 direction) +-: segment(seg), m_size(0), m_precontext(0), m_highwater(0), m_dir(direction), m_highpassed(false) ++SlotMap::SlotMap(Segment & seg, uint8 direction, int maxSize) ++: segment(seg), m_size(0), m_precontext(0), m_highwater(0), ++ m_maxSize(maxSize), m_dir(direction), m_highpassed(false) + { + m_slot_map[0] = 0; + } + + inline + Slot * * SlotMap::begin() + { + return &m_slot_map[1]; // allow map to go 1 before slot_map when inserting +diff --git a/gfx/graphite2/src/inc/Segment.h b/gfx/graphite2/src/inc/Segment.h +--- a/gfx/graphite2/src/inc/Segment.h ++++ b/gfx/graphite2/src/inc/Segment.h +@@ -35,17 +35,17 @@ of the License or (at your option) any l + #include "inc/FeatureVal.h" + #include "inc/GlyphCache.h" + #include "inc/GlyphFace.h" + #include "inc/Slot.h" + #include "inc/Position.h" + #include "inc/List.h" + #include "inc/Collider.h" + +-#define MAX_SEG_GROWTH_FACTOR 256 ++#define MAX_SEG_GROWTH_FACTOR 64 + + namespace graphite2 { + + typedef Vector<Features> FeatureList; + typedef Vector<Slot *> SlotRope; + typedef Vector<int16 *> AttributeRope; + typedef Vector<SlotJustify *> JustifyRope; + +@@ -154,17 +154,17 @@ public: + int8 getSlotBidiClass(Slot *s) const; + void doMirror(uint16 aMirror); + Slot *addLineEnd(Slot *nSlot); + void delLineEnd(Slot *s); + bool hasJustification() const { return m_justifies.size() != 0; } + void reverseSlots(); + + bool isWhitespace(const int cid) const; +- bool hasCollisionInfo() const { return (m_flags & SEG_HASCOLLISIONS); } ++ bool hasCollisionInfo() const { return (m_flags & SEG_HASCOLLISIONS) && m_collisions; } + SlotCollision *collisionInfo(const Slot *s) const { return m_collisions ? m_collisions + s->index() : 0; } + CLASS_NEW_DELETE + + public: //only used by: GrSegment* makeAndInitialize(const GrFont *font, const GrFace *face, uint32 script, const FeaturesHandle& pFeats/*must not be IsNull*/, encform enc, const void* pStart, size_t nChars, int dir); + bool read_text(const Face *face, const Features* pFeats/*must not be NULL*/, gr_encform enc, const void*pStart, size_t nChars); + void finalise(const Font *font, bool reverse=false); + float justify(Slot *pSlot, const Font *font, float width, enum justFlags flags, Slot *pFirst, Slot *pLast); + bool initCollisions(); +diff --git a/gfx/graphite2/src/inc/Slot.h b/gfx/graphite2/src/inc/Slot.h +--- a/gfx/graphite2/src/inc/Slot.h ++++ b/gfx/graphite2/src/inc/Slot.h +@@ -92,17 +92,17 @@ public: + void adjKern(const Position &pos) { m_shift = m_shift + pos; m_advance = m_advance + pos; } + void origin(const Position &pos) { m_position = pos + m_shift; } + void originate(int ind) { m_original = ind; } + int original() const { return m_original; } + void before(int ind) { m_before = ind; } + void after(int ind) { m_after = ind; } + bool isBase() const { return (!m_parent); } + void update(int numSlots, int numCharInfo, Position &relpos); +- Position finalise(const Segment* seg, const Font* font, Position & base, Rect & bbox, uint8 attrLevel, float & clusterMin, bool rtl, bool isFinal); ++ Position finalise(const Segment* seg, const Font* font, Position & base, Rect & bbox, uint8 attrLevel, float & clusterMin, bool rtl, bool isFinal, int depth = 0); + bool isDeleted() const { return (m_flags & DELETED) ? true : false; } + void markDeleted(bool state) { if (state) m_flags |= DELETED; else m_flags &= ~DELETED; } + bool isCopied() const { return (m_flags & COPIED) ? true : false; } + void markCopied(bool state) { if (state) m_flags |= COPIED; else m_flags &= ~COPIED; } + bool isPositioned() const { return (m_flags & POSITIONED) ? true : false; } + void markPositioned(bool state) { if (state) m_flags |= POSITIONED; else m_flags &= ~POSITIONED; } + bool isInsertBefore() const { return !(m_flags & INSERTED); } + uint8 getBidiLevel() const { return m_bidiLevel; } +@@ -123,20 +123,19 @@ public: + Position attachOffset() const { return m_attach - m_with; } + Slot* firstChild() const { return m_child; } + void firstChild(Slot *ap) { m_child = ap; } + bool child(Slot *ap); + Slot* nextSibling() const { return m_sibling; } + void nextSibling(Slot *ap) { m_sibling = ap; } + bool sibling(Slot *ap); + bool removeChild(Slot *ap); +- bool removeSibling(Slot *ap); + int32 clusterMetric(const Segment* seg, uint8 metric, uint8 attrLevel, bool rtl); + void positionShift(Position a) { m_position += a; } +- void floodShift(Position adj); ++ void floodShift(Position adj, int depth = 0); + float just() const { return m_just; } + void just(float j) { m_just = j; } + Slot *nextInCluster(const Slot *s) const; + bool isChildOf(const Slot *base) const; + + CLASS_NEW_DELETE + + private: +diff --git a/gfx/graphite2/src/inc/UtfCodec.h b/gfx/graphite2/src/inc/UtfCodec.h +--- a/gfx/graphite2/src/inc/UtfCodec.h ++++ b/gfx/graphite2/src/inc/UtfCodec.h +@@ -35,16 +35,17 @@ typedef uint32 uchar_t; + + template <int N> + struct _utf_codec + { + typedef uchar_t codeunit_t; + + static void put(codeunit_t * cp, const uchar_t , int8 & len) throw(); + static uchar_t get(const codeunit_t * cp, int8 & len) throw(); ++ static bool validate(const codeunit_t * s, const codeunit_t * e) throw(); + }; + + + template <> + struct _utf_codec<32> + { + private: + static const uchar_t limit = 0x110000; +@@ -58,16 +59,22 @@ public: + } + + inline + static uchar_t get(const codeunit_t * cp, int8 & l) throw() + { + if (cp[0] < limit) { l = 1; return cp[0]; } + else { l = -1; return 0xFFFD; } + } ++ ++ inline ++ static bool validate(codeunit_t * s, codeunit_t * e) throw() ++ { ++ return e > s; ++ } + }; + + + template <> + struct _utf_codec<16> + { + private: + static const int32 lead_offset = 0xD800 - (0x10000 >> 10); +@@ -88,22 +95,31 @@ public: + } + + inline + static uchar_t get(const codeunit_t * cp, int8 & l) throw() + { + const uint32 uh = cp[0]; + l = 1; + +- if (0xD800 > uh || uh > 0xDFFF) { return uh; } ++ if (uh < 0xD800|| uh > 0xDFFF) { return uh; } + const uint32 ul = cp[1]; +- if (uh > 0xDBFF || 0xDC00 > ul || ul > 0xDFFF) { l = -1; return 0xFFFD; } ++ if (uh > 0xDBFF || ul < 0xDC00 || ul > 0xDFFF) { l = -1; return 0xFFFD; } + ++l; + return (uh<<10) + ul + surrogate_offset; + } ++ ++ inline ++ static bool validate(codeunit_t * s, codeunit_t * e) throw() ++ { ++ const ptrdiff_t n = e-s; ++ if (n <= 0) return n == 0; ++ const uint32 u = *(s+(n-1)); // Get the last codepoint ++ return (u < 0xD800 || u > 0xDBFF); ++ } + }; + + + template <> + struct _utf_codec<8> + { + private: + static const int8 sz_lut[16]; +@@ -143,16 +159,34 @@ public: + + if (l != seq_sz || toolong) + { + l = -l; + return 0xFFFD; + } + return u; + } ++ ++ inline ++ static bool validate(codeunit_t * s, codeunit_t * e) throw() ++ { ++ const ptrdiff_t n = e-s; ++ if (n <= 0) return n == 0; ++ s += (n-1); ++ if (*s < 0x80) return true; ++ if (*s >= 0xC0) return false; ++ if (n == 1) return true; ++ if (*--s < 0x80) return true; ++ if (*s >= 0xe0) return false; ++ if (n == 2 || *s >= 0xC0) return true; ++ if (*--s < 0x80) return true; ++ if (*s >= 0xF0) return false; ++ return true; ++ } ++ + }; + + + template <typename C> + class _utf_iterator + { + typedef _utf_codec<sizeof(C)*8> codec; + +@@ -195,16 +229,21 @@ public: + + template <typename C> + struct utf + { + typedef typename _utf_codec<sizeof(C)*8>::codeunit_t codeunit_t; + + typedef _utf_iterator<C> iterator; + typedef _utf_iterator<const C> const_iterator; ++ ++ inline ++ static bool validate(codeunit_t * s, codeunit_t * e) throw() { ++ return _utf_codec<sizeof(C)*8>::validate(s,e); ++ } + }; + + + typedef utf<uint32> utf32; + typedef utf<uint16> utf16; + typedef utf<uint8> utf8; + + } // namespace graphite2 +diff --git a/gfx/graphite2/src/inc/opcode_table.h b/gfx/graphite2/src/inc/opcode_table.h +--- a/gfx/graphite2/src/inc/opcode_table.h ++++ b/gfx/graphite2/src/inc/opcode_table.h +@@ -113,13 +113,13 @@ static const opcode_t opcode_table[] = + {{NILOP,NILOP}, 0, "PUT_SUBS3"}, + {{do_(put_glyph), NILOP}, 2, "PUT_GLYPH"}, // output_class output_class + {{do2(push_glyph_attr)}, 3, "PUSH_GLYPH_ATTR"}, // gattrnum gattrnum slot + {{do2(push_att_to_glyph_attr)}, 3, "PUSH_ATT_TO_GLYPH_ATTR"}, // gattrnum gattrnum slot + {{do2(bor)}, 0, "BITOR"}, + {{do2(band)}, 0, "BITAND"}, + {{do2(bnot)}, 0, "BITNOT"}, // 0x40 + {{do2(setbits)}, 4, "BITSET"}, +- {{do2(set_feat)}, 2, "SET_FEAT"}, ++ {{do_(set_feat), NILOP}, 2, "SET_FEAT"}, // featidx slot + // private opcodes for internal use only, comes after all other on disk opcodes. + {{do_(temp_copy), NILOP}, 0, "TEMP_COPY"} + }; + +diff --git a/gfx/graphite2/src/inc/opcodes.h b/gfx/graphite2/src/inc/opcodes.h +--- a/gfx/graphite2/src/inc/opcodes.h ++++ b/gfx/graphite2/src/inc/opcodes.h +@@ -62,17 +62,18 @@ of the License or (at your option) any l + // ip = The current instruction pointer + // endPos = Position of advance of last cluster + // dir = writing system directionality of the font + + + // #define NOT_IMPLEMENTED assert(false) + #define NOT_IMPLEMENTED + +-#define binop(op) const int32 a = pop(); *sp = int32(*sp) op a ++#define binop(op) const uint32 a = pop(); *sp = uint32(*sp) op a ++#define sbinop(op) const int32 a = pop(); *sp = int32(*sp) op a + #define use_params(n) dp += n + + #define declare_params(n) const byte * param = dp; \ + use_params(n); + + #define push(n) { *++sp = n; } + #define pop() (*sp--) + #define slotat(x) (map[(x)]) +@@ -125,17 +126,17 @@ STARTOP(sub) + ENDOP + + STARTOP(mul) + binop(*); + ENDOP + + STARTOP(div_) + if (*sp == 0) DIE; +- binop(/); ++ sbinop(/); + ENDOP + + STARTOP(min_) + const int32 a = pop(), b = *sp; + if (a < b) *sp = a; + ENDOP + + STARTOP(max_) +@@ -176,29 +177,29 @@ STARTOP(equal) + binop(==); + ENDOP + + STARTOP(not_eq_) + binop(!=); + ENDOP + + STARTOP(less) +- binop(<); ++ sbinop(<); + ENDOP + + STARTOP(gtr) +- binop(>); ++ sbinop(>); + ENDOP + + STARTOP(less_eq) +- binop(<=); ++ sbinop(<=); + ENDOP + + STARTOP(gtr_eq) +- binop(>=); ++ sbinop(>=); + ENDOP + + STARTOP(next) + if (map - &smap[0] >= int(smap.size())) DIE + if (is) + { + if (is == smap.highwater()) + smap.highpassed(true); +@@ -237,17 +238,17 @@ STARTOP(put_subs_8bit_obs) + index = seg.findClassIndex(input_class, slot->gid()); + is->setGlyph(&seg, seg.getClassGlyph(output_class, index)); + } + ENDOP + + STARTOP(put_copy) + declare_params(1); + const int slot_ref = int8(*param); +- if (is) ++ if (is && !is->isDeleted()) + { + slotref ref = slotat(slot_ref); + if (ref && ref != is) + { + int16 *tempUserAttrs = is->userAttrs(); + if (is->attachedTo() || is->firstChild()) DIE + Slot *prev = is->prev(); + Slot *next = is->next(); +@@ -262,16 +263,17 @@ STARTOP(put_copy) + is->attachedTo()->child(is); + } + is->markCopied(false); + is->markDeleted(false); + } + ENDOP + + STARTOP(insert) ++ if (smap.decMax() <= 0) DIE; + Slot *newSlot = seg.newSlot(); + if (!newSlot) DIE; + Slot *iss = is; + while (iss && iss->isDeleted()) iss = iss->next(); + if (!iss) + { + if (seg.last()) + { +@@ -550,31 +552,31 @@ ENDOP + + STARTOP(iattr_add) + declare_params(2); + const attrCode slat = attrCode(uint8(param[0])); + const size_t idx = uint8(param[1]); + const int val = int(pop()); + if ((slat == gr_slatPosX || slat == gr_slatPosY) && (flags & POSITIONED) == 0) + { +- seg.positionSlots(0, *smap.begin(), *(smap.end()-1), dir); ++ seg.positionSlots(0, *smap.begin(), *(smap.end()-1), seg.currdir()); + flags |= POSITIONED; + } + int res = is->getAttr(&seg, slat, idx); + is->setAttr(&seg, slat, idx, val + res, smap); + ENDOP + + STARTOP(iattr_sub) + declare_params(2); + const attrCode slat = attrCode(uint8(param[0])); + const size_t idx = uint8(param[1]); + const int val = int(pop()); + if ((slat == gr_slatPosX || slat == gr_slatPosY) && (flags & POSITIONED) == 0) + { +- seg.positionSlots(0, *smap.begin(), *(smap.end()-1), dir); ++ seg.positionSlots(0, *smap.begin(), *(smap.end()-1), seg.currdir()); + flags |= POSITIONED; + } + int res = is->getAttr(&seg, slat, idx); + is->setAttr(&seg, slat, idx, res - val, smap); + ENDOP + + STARTOP(push_proc_state) + use_params(1); + |