flow like the river
This commit is contained in:
commit
013fe673f3
42435 changed files with 5764238 additions and 0 deletions
2
BACK_BACK/node_modules/unicode-trie/.npmignore
generated
vendored
Executable file
2
BACK_BACK/node_modules/unicode-trie/.npmignore
generated
vendored
Executable file
|
|
@ -0,0 +1,2 @@
|
|||
node_modules/
|
||||
coverage.html
|
||||
13
BACK_BACK/node_modules/unicode-trie/Makefile
generated
vendored
Executable file
13
BACK_BACK/node_modules/unicode-trie/Makefile
generated
vendored
Executable file
|
|
@ -0,0 +1,13 @@
|
|||
test:
|
||||
@./node_modules/.bin/mocha
|
||||
|
||||
coverage:
|
||||
@./node_modules/.bin/mocha --require coverage.js --reporter html-cov > coverage.html
|
||||
|
||||
build:
|
||||
coffee --bare -c *.coffee
|
||||
|
||||
clean:
|
||||
rm -rf index.js builder.js
|
||||
|
||||
.PHONY: test coverage
|
||||
76
BACK_BACK/node_modules/unicode-trie/README.md
generated
vendored
Executable file
76
BACK_BACK/node_modules/unicode-trie/README.md
generated
vendored
Executable file
|
|
@ -0,0 +1,76 @@
|
|||
# unicode-trie
|
||||
A data structure for fast Unicode character metadata lookup, ported from ICU
|
||||
|
||||
## Background
|
||||
|
||||
When implementing many Unicode algorithms such as text segmentation,
|
||||
normalization, bidi processing, etc., fast access to character metadata
|
||||
is crucial to good performance. There over a million code points in the
|
||||
Unicode standard, many of which produce the same result when looked up,
|
||||
so an array or hash table is not appropriate - those data structures are
|
||||
fast but would require a lot of memory. The data is generally
|
||||
grouped in ranges, so you could do a binary search, but that is not
|
||||
fast enough for some applications.
|
||||
|
||||
The [International Components for Unicode](http://site.icu-project.org) (ICU) project
|
||||
came up with a data structure based on a [Trie](http://en.wikipedia.org/wiki/Trie) that provides fast access
|
||||
to Unicode metadata. The range data is precompiled to a serialized
|
||||
and flattened trie, which is then used at runtime to lookup the necessary
|
||||
data. According to my own tests, this is generally at least 50% faster
|
||||
than binary search, with not too much additional memory required.
|
||||
|
||||
## Installation
|
||||
|
||||
npm install unicode-trie
|
||||
|
||||
## Building a Trie
|
||||
|
||||
Unicode Tries are generally precompiled from data in the Unicode database
|
||||
for faster runtime performance. To build a Unicode Trie, use the
|
||||
`UnicodeTrieBuilder` class.
|
||||
|
||||
```coffeescript
|
||||
UnicodeTrieBuilder = require 'unicode-trie/builder'
|
||||
|
||||
# create a trie
|
||||
t = new UnicodeTrieBuilder
|
||||
|
||||
# optional parameters for default value, and error value
|
||||
# if not provided, both are set to 0
|
||||
t = new UnicodeTrieBuilder 10, 999
|
||||
|
||||
# set individual values and ranges
|
||||
t.set 0x4567, 99
|
||||
t.setRange 0x40, 0xe7, 0x1234
|
||||
|
||||
# you can lookup a value if you like
|
||||
t.get 0x4567 # => 99
|
||||
|
||||
# get a compiled trie (returns a UnicodeTrie object)
|
||||
trie = t.freeze()
|
||||
|
||||
# write compressed trie to a binary file
|
||||
fs.writeFile 'data.trie', t.toBuffer()
|
||||
```
|
||||
|
||||
## Using a precompiled Trie
|
||||
|
||||
Once you've built a precompiled trie, you can load it into the
|
||||
`UnicodeTrie` class, which is a readonly representation of the
|
||||
trie. From there, you can lookup values.
|
||||
|
||||
```coffeescript
|
||||
UnicodeTrie = require 'unicode-trie'
|
||||
fs = require 'fs'
|
||||
|
||||
# load serialized trie from binary file
|
||||
data = fs.readFileSync 'data.trie'
|
||||
trie = new UnicodeTrie data
|
||||
|
||||
# lookup a value
|
||||
trie.get 0x4567 # => 99
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
MIT
|
||||
810
BACK_BACK/node_modules/unicode-trie/builder.coffee
generated
vendored
Executable file
810
BACK_BACK/node_modules/unicode-trie/builder.coffee
generated
vendored
Executable file
|
|
@ -0,0 +1,810 @@
|
|||
UnicodeTrie = require './'
|
||||
pako = require 'pako'
|
||||
|
||||
class UnicodeTrieBuilder
|
||||
# Shift size for getting the index-1 table offset.
|
||||
SHIFT_1 = 6 + 5
|
||||
|
||||
# Shift size for getting the index-2 table offset.
|
||||
SHIFT_2 = 5
|
||||
|
||||
# Difference between the two shift sizes,
|
||||
# for getting an index-1 offset from an index-2 offset. 6=11-5
|
||||
SHIFT_1_2 = SHIFT_1 - SHIFT_2
|
||||
|
||||
# Number of index-1 entries for the BMP. 32=0x20
|
||||
# This part of the index-1 table is omitted from the serialized form.
|
||||
OMITTED_BMP_INDEX_1_LENGTH = 0x10000 >> SHIFT_1
|
||||
|
||||
# Number of code points per index-1 table entry. 2048=0x800
|
||||
CP_PER_INDEX_1_ENTRY = 1 << SHIFT_1
|
||||
|
||||
# Number of entries in an index-2 block. 64=0x40
|
||||
INDEX_2_BLOCK_LENGTH = 1 << SHIFT_1_2
|
||||
|
||||
# Mask for getting the lower bits for the in-index-2-block offset. */
|
||||
INDEX_2_MASK = INDEX_2_BLOCK_LENGTH - 1
|
||||
|
||||
# Number of entries in a data block. 32=0x20
|
||||
DATA_BLOCK_LENGTH = 1 << SHIFT_2
|
||||
|
||||
# Mask for getting the lower bits for the in-data-block offset.
|
||||
DATA_MASK = DATA_BLOCK_LENGTH - 1
|
||||
|
||||
# Shift size for shifting left the index array values.
|
||||
# Increases possible data size with 16-bit index values at the cost
|
||||
# of compactability.
|
||||
# This requires data blocks to be aligned by DATA_GRANULARITY.
|
||||
INDEX_SHIFT = 2
|
||||
|
||||
# The alignment size of a data block. Also the granularity for compaction.
|
||||
DATA_GRANULARITY = 1 << INDEX_SHIFT
|
||||
|
||||
# The BMP part of the index-2 table is fixed and linear and starts at offset 0.
|
||||
# Length=2048=0x800=0x10000>>SHIFT_2.
|
||||
INDEX_2_OFFSET = 0
|
||||
|
||||
# The part of the index-2 table for U+D800..U+DBFF stores values for
|
||||
# lead surrogate code _units_ not code _points_.
|
||||
# Values for lead surrogate code _points_ are indexed with this portion of the table.
|
||||
# Length=32=0x20=0x400>>SHIFT_2. (There are 1024=0x400 lead surrogates.)
|
||||
LSCP_INDEX_2_OFFSET = 0x10000 >> SHIFT_2
|
||||
LSCP_INDEX_2_LENGTH = 0x400 >> SHIFT_2
|
||||
|
||||
# Count the lengths of both BMP pieces. 2080=0x820
|
||||
INDEX_2_BMP_LENGTH = LSCP_INDEX_2_OFFSET + LSCP_INDEX_2_LENGTH
|
||||
|
||||
# The 2-byte UTF-8 version of the index-2 table follows at offset 2080=0x820.
|
||||
# Length 32=0x20 for lead bytes C0..DF, regardless of SHIFT_2.
|
||||
UTF8_2B_INDEX_2_OFFSET = INDEX_2_BMP_LENGTH
|
||||
UTF8_2B_INDEX_2_LENGTH = 0x800 >> 6 # U+0800 is the first code point after 2-byte UTF-8
|
||||
|
||||
# The index-1 table, only used for supplementary code points, at offset 2112=0x840.
|
||||
# Variable length, for code points up to highStart, where the last single-value range starts.
|
||||
# Maximum length 512=0x200=0x100000>>SHIFT_1.
|
||||
# (For 0x100000 supplementary code points U+10000..U+10ffff.)
|
||||
#
|
||||
# The part of the index-2 table for supplementary code points starts
|
||||
# after this index-1 table.
|
||||
#
|
||||
# Both the index-1 table and the following part of the index-2 table
|
||||
# are omitted completely if there is only BMP data.
|
||||
INDEX_1_OFFSET = UTF8_2B_INDEX_2_OFFSET + UTF8_2B_INDEX_2_LENGTH
|
||||
MAX_INDEX_1_LENGTH = 0x100000 >> SHIFT_1
|
||||
|
||||
# The illegal-UTF-8 data block follows the ASCII block, at offset 128=0x80.
|
||||
# Used with linear access for single bytes 0..0xbf for simple error handling.
|
||||
# Length 64=0x40, not DATA_BLOCK_LENGTH.
|
||||
BAD_UTF8_DATA_OFFSET = 0x80
|
||||
|
||||
# The start of non-linear-ASCII data blocks, at offset 192=0xc0.
|
||||
# !!!!
|
||||
DATA_START_OFFSET = 0xc0
|
||||
|
||||
# The null data block.
|
||||
# Length 64=0x40 even if DATA_BLOCK_LENGTH is smaller,
|
||||
# to work with 6-bit trail bytes from 2-byte UTF-8.
|
||||
DATA_NULL_OFFSET = DATA_START_OFFSET
|
||||
|
||||
# The start of allocated data blocks.
|
||||
NEW_DATA_START_OFFSET = DATA_NULL_OFFSET + 0x40
|
||||
|
||||
# The start of data blocks for U+0800 and above.
|
||||
# Below, compaction uses a block length of 64 for 2-byte UTF-8.
|
||||
# From here on, compaction uses DATA_BLOCK_LENGTH.
|
||||
# Data values for 0x780 code points beyond ASCII.
|
||||
DATA_0800_OFFSET = NEW_DATA_START_OFFSET + 0x780
|
||||
|
||||
# Start with allocation of 16k data entries. */
|
||||
INITIAL_DATA_LENGTH = 1 << 14
|
||||
|
||||
# Grow about 8x each time.
|
||||
MEDIUM_DATA_LENGTH = 1 << 17
|
||||
|
||||
# Maximum length of the runtime data array.
|
||||
# Limited by 16-bit index values that are left-shifted by INDEX_SHIFT,
|
||||
# and by uint16_t UTrie2Header.shiftedDataLength.
|
||||
MAX_DATA_LENGTH = 0xffff << INDEX_SHIFT
|
||||
|
||||
INDEX_1_LENGTH = 0x110000 >> SHIFT_1
|
||||
|
||||
# Maximum length of the build-time data array.
|
||||
# One entry per 0x110000 code points, plus the illegal-UTF-8 block and the null block,
|
||||
# plus values for the 0x400 surrogate code units.
|
||||
MAX_DATA_LENGTH = 0x110000 + 0x40 + 0x40 + 0x400
|
||||
|
||||
# At build time, leave a gap in the index-2 table,
|
||||
# at least as long as the maximum lengths of the 2-byte UTF-8 index-2 table
|
||||
# and the supplementary index-1 table.
|
||||
# Round up to INDEX_2_BLOCK_LENGTH for proper compacting.
|
||||
INDEX_GAP_OFFSET = INDEX_2_BMP_LENGTH
|
||||
INDEX_GAP_LENGTH = ((UTF8_2B_INDEX_2_LENGTH + MAX_INDEX_1_LENGTH) + INDEX_2_MASK) & ~INDEX_2_MASK
|
||||
|
||||
# Maximum length of the build-time index-2 array.
|
||||
# Maximum number of Unicode code points (0x110000) shifted right by SHIFT_2,
|
||||
# plus the part of the index-2 table for lead surrogate code points,
|
||||
# plus the build-time index gap,
|
||||
# plus the null index-2 block.)
|
||||
MAX_INDEX_2_LENGTH = (0x110000 >> SHIFT_2) + LSCP_INDEX_2_LENGTH + INDEX_GAP_LENGTH + INDEX_2_BLOCK_LENGTH
|
||||
|
||||
# The null index-2 block, following the gap in the index-2 table.
|
||||
INDEX_2_NULL_OFFSET = INDEX_GAP_OFFSET + INDEX_GAP_LENGTH
|
||||
|
||||
# The start of allocated index-2 blocks.
|
||||
INDEX_2_START_OFFSET = INDEX_2_NULL_OFFSET + INDEX_2_BLOCK_LENGTH
|
||||
|
||||
# Maximum length of the runtime index array.
|
||||
# Limited by its own 16-bit index values, and by uint16_t UTrie2Header.indexLength.
|
||||
# (The actual maximum length is lower,
|
||||
# (0x110000>>SHIFT_2)+UTF8_2B_INDEX_2_LENGTH+MAX_INDEX_1_LENGTH.)
|
||||
MAX_INDEX_LENGTH = 0xffff
|
||||
|
||||
constructor: (@initialValue = 0, @errorValue = 0) ->
|
||||
@index1 = new Int32Array INDEX_1_LENGTH
|
||||
@index2 = new Int32Array MAX_INDEX_2_LENGTH
|
||||
@highStart = 0x110000
|
||||
|
||||
@data = new Uint32Array INITIAL_DATA_LENGTH
|
||||
@dataCapacity = INITIAL_DATA_LENGTH
|
||||
|
||||
@firstFreeBlock = 0
|
||||
@isCompacted = false
|
||||
|
||||
# Multi-purpose per-data-block table.
|
||||
#
|
||||
# Before compacting:
|
||||
#
|
||||
# Per-data-block reference counters/free-block list.
|
||||
# 0: unused
|
||||
# >0: reference counter (number of index-2 entries pointing here)
|
||||
# <0: next free data block in free-block list
|
||||
#
|
||||
# While compacting:
|
||||
#
|
||||
# Map of adjusted indexes, used in compactData() and compactIndex2().
|
||||
# Maps from original indexes to new ones.
|
||||
@map = new Int32Array MAX_DATA_LENGTH >> SHIFT_2
|
||||
|
||||
for i in [0...0x80] by 1
|
||||
@data[i] = @initialValue
|
||||
|
||||
for i in [i...0xc0] by 1
|
||||
@data[i] = @errorValue
|
||||
|
||||
for i in [DATA_NULL_OFFSET...NEW_DATA_START_OFFSET] by 1
|
||||
@data[i] = @initialValue
|
||||
|
||||
@dataNullOffset = DATA_NULL_OFFSET
|
||||
@dataLength = NEW_DATA_START_OFFSET
|
||||
|
||||
# set the index-2 indexes for the 2=0x80>>SHIFT_2 ASCII data blocks
|
||||
i = 0
|
||||
for j in [0...0x80] by DATA_BLOCK_LENGTH
|
||||
@index2[i] = j
|
||||
@map[i++] = 1
|
||||
|
||||
# reference counts for the bad-UTF-8-data block
|
||||
for j in [j...0xc0] by DATA_BLOCK_LENGTH
|
||||
@map[i++] = 0
|
||||
|
||||
# Reference counts for the null data block: all blocks except for the ASCII blocks.
|
||||
# Plus 1 so that we don't drop this block during compaction.
|
||||
# Plus as many as needed for lead surrogate code points.
|
||||
# i==newTrie->dataNullOffset
|
||||
@map[i++] = (0x110000 >> SHIFT_2) - (0x80 >> SHIFT_2) + 1 + LSCP_INDEX_2_LENGTH
|
||||
j += DATA_BLOCK_LENGTH
|
||||
for j in [j...NEW_DATA_START_OFFSET] by DATA_BLOCK_LENGTH
|
||||
@map[i++] = 0
|
||||
|
||||
# set the remaining indexes in the BMP index-2 block
|
||||
# to the null data block
|
||||
for i in [0x80 >> SHIFT_2...INDEX_2_BMP_LENGTH] by 1
|
||||
@index2[i] = DATA_NULL_OFFSET
|
||||
|
||||
# Fill the index gap with impossible values so that compaction
|
||||
# does not overlap other index-2 blocks with the gap.
|
||||
for i in [0...INDEX_GAP_LENGTH] by 1
|
||||
@index2[INDEX_GAP_OFFSET + i] = -1
|
||||
|
||||
# set the indexes in the null index-2 block
|
||||
for i in [0...INDEX_2_BLOCK_LENGTH] by 1
|
||||
@index2[INDEX_2_NULL_OFFSET + i] = DATA_NULL_OFFSET
|
||||
|
||||
@index2NullOffset = INDEX_2_NULL_OFFSET
|
||||
@index2Length = INDEX_2_START_OFFSET
|
||||
|
||||
# set the index-1 indexes for the linear index-2 block
|
||||
j = 0
|
||||
for i in [0...OMITTED_BMP_INDEX_1_LENGTH] by 1
|
||||
@index1[i] = j
|
||||
j += INDEX_2_BLOCK_LENGTH
|
||||
|
||||
# set the remaining index-1 indexes to the null index-2 block
|
||||
for i in [i...INDEX_1_LENGTH] by 1
|
||||
@index1[i] = INDEX_2_NULL_OFFSET
|
||||
|
||||
# Preallocate and reset data for U+0080..U+07ff,
|
||||
# for 2-byte UTF-8 which will be compacted in 64-blocks
|
||||
# even if DATA_BLOCK_LENGTH is smaller.
|
||||
for i in [0x80...0x800] by DATA_BLOCK_LENGTH
|
||||
@set i, @initialValue
|
||||
|
||||
return
|
||||
|
||||
set: (codePoint, value) ->
|
||||
if codePoint < 0 or codePoint > 0x10ffff
|
||||
throw new Error 'Invalid code point'
|
||||
|
||||
if @isCompacted
|
||||
throw new Error 'Already compacted'
|
||||
|
||||
block = @_getDataBlock codePoint, true
|
||||
@data[block + (codePoint & DATA_MASK)] = value
|
||||
return this
|
||||
|
||||
setRange: (start, end, value, overwrite = true) ->
|
||||
if start > 0x10ffff or end > 0x10ffff or start > end
|
||||
throw new Error 'Invalid code point'
|
||||
|
||||
if @isCompacted
|
||||
throw new Error 'Already compacted'
|
||||
|
||||
if not overwrite and value is @initialValue
|
||||
return this # nothing to do
|
||||
|
||||
limit = end + 1
|
||||
if (start & DATA_MASK) isnt 0
|
||||
# set partial block at [start..following block boundary
|
||||
block = @_getDataBlock start, true
|
||||
|
||||
nextStart = (start + DATA_BLOCK_LENGTH) & ~DATA_MASK
|
||||
if nextStart <= limit
|
||||
@_fillBlock block, start & DATA_MASK, DATA_BLOCK_LENGTH, value, @initialValue, overwrite
|
||||
start = nextStart
|
||||
else
|
||||
@_fillBlock block, start & DATA_MASK, limit & DATA_MASK, value, @initialValue, overwrite
|
||||
return this
|
||||
|
||||
# number of positions in the last, partial block
|
||||
rest = limit & DATA_MASK
|
||||
|
||||
# round down limit to a block boundary
|
||||
limit &= ~DATA_MASK
|
||||
|
||||
# iterate over all-value blocks
|
||||
if value is @initialValue
|
||||
repeatBlock = @dataNullOffset
|
||||
else
|
||||
repeatBlock = -1
|
||||
|
||||
while start < limit
|
||||
setRepeatBlock = false
|
||||
|
||||
if value is @initialValue and @_isInNullBlock start, true
|
||||
start += DATA_BLOCK_LENGTH # nothing to do
|
||||
continue
|
||||
|
||||
# get index value
|
||||
i2 = @_getIndex2Block start, true
|
||||
i2 += (start >> SHIFT_2) & INDEX_2_MASK
|
||||
|
||||
block = @index2[i2]
|
||||
if @_isWritableBlock block
|
||||
# already allocated
|
||||
if overwrite and block >= DATA_0800_OFFSET
|
||||
# We overwrite all values, and it's not a
|
||||
# protected (ASCII-linear or 2-byte UTF-8) block:
|
||||
# replace with the repeatBlock.
|
||||
setRepeatBlock = true
|
||||
else
|
||||
# protected block: just write the values into this block
|
||||
@_fillBlock block, 0, DATA_BLOCK_LENGTH, value, @initialValue, overwrite
|
||||
|
||||
else if @data[block] isnt value and (overwrite or block is @dataNullOffset)
|
||||
# Set the repeatBlock instead of the null block or previous repeat block:
|
||||
#
|
||||
# If !isWritableBlock() then all entries in the block have the same value
|
||||
# because it's the null block or a range block (the repeatBlock from a previous
|
||||
# call to utrie2_setRange32()).
|
||||
# No other blocks are used multiple times before compacting.
|
||||
#
|
||||
# The null block is the only non-writable block with the initialValue because
|
||||
# of the repeatBlock initialization above. (If value==initialValue, then
|
||||
# the repeatBlock will be the null data block.)
|
||||
#
|
||||
# We set our repeatBlock if the desired value differs from the block's value,
|
||||
# and if we overwrite any data or if the data is all initial values
|
||||
# (which is the same as the block being the null block, see above).
|
||||
setRepeatBlock = true
|
||||
|
||||
if setRepeatBlock
|
||||
if repeatBlock >= 0
|
||||
@_setIndex2Entry i2, repeatBlock
|
||||
else
|
||||
# create and set and fill the repeatBlock
|
||||
repeatBlock = @_getDataBlock start, true
|
||||
@_writeBlock repeatBlock, value
|
||||
|
||||
start += DATA_BLOCK_LENGTH
|
||||
|
||||
if rest > 0
|
||||
# set partial block at [last block boundary..limit
|
||||
block = @_getDataBlock start, true
|
||||
@_fillBlock block, 0, rest, value, @initialValue, overwrite
|
||||
|
||||
return this
|
||||
|
||||
get: (c, fromLSCP = true) ->
|
||||
if c < 0 or c > 0x10ffff
|
||||
return @errorValue
|
||||
|
||||
if c >= @highStart and (!(c >= 0xd800 and c < 0xdc00) or fromLSCP)
|
||||
return @data[@dataLength - DATA_GRANULARITY];
|
||||
|
||||
if (c >= 0xd800 and c < 0xdc00) and fromLSCP
|
||||
i2 = (LSCP_INDEX_2_OFFSET - (0xd800 >> SHIFT_2)) + (c >> SHIFT_2)
|
||||
else
|
||||
i2 = @index1[c >> SHIFT_1] + ((c >> SHIFT_2) & INDEX_2_MASK)
|
||||
|
||||
block = @index2[i2]
|
||||
return @data[block + (c & DATA_MASK)]
|
||||
|
||||
_isInNullBlock: (c, forLSCP) ->
|
||||
if (c & 0xfffffc00) is 0xd800 and forLSCP
|
||||
i2 = LSCP_INDEX_2_OFFSET - (0xd800 >> SHIFT_2) + (c >> SHIFT_2)
|
||||
else
|
||||
i2 = @index1[c >> SHIFT_1] + ((c >> SHIFT_2) & INDEX_2_MASK)
|
||||
|
||||
block = @index2[i2]
|
||||
return block is @dataNullOffset
|
||||
|
||||
_allocIndex2Block: ->
|
||||
newBlock = @index2Length
|
||||
newTop = newBlock + INDEX_2_BLOCK_LENGTH
|
||||
if newTop > @index2.length
|
||||
# Should never occur.
|
||||
# Either MAX_BUILD_TIME_INDEX_LENGTH is incorrect,
|
||||
# or the code writes more values than should be possible.
|
||||
throw new Error("Internal error in Trie2 creation.");
|
||||
|
||||
@index2Length = newTop
|
||||
@index2.set(@index2.subarray(@index2NullOffset, @index2NullOffset + INDEX_2_BLOCK_LENGTH), newBlock)
|
||||
|
||||
return newBlock
|
||||
|
||||
_getIndex2Block: (c, forLSCP) ->
|
||||
if c >= 0xd800 and c < 0xdc00 and forLSCP
|
||||
return LSCP_INDEX_2_OFFSET
|
||||
|
||||
i1 = c >> SHIFT_1
|
||||
i2 = @index1[i1]
|
||||
if i2 is @index2NullOffset
|
||||
i2 = @_allocIndex2Block()
|
||||
@index1[i1] = i2
|
||||
|
||||
return i2
|
||||
|
||||
_isWritableBlock: (block) ->
|
||||
return block isnt @dataNullOffset and @map[block >> SHIFT_2] is 1
|
||||
|
||||
_allocDataBlock: (copyBlock) ->
|
||||
if @firstFreeBlock isnt 0
|
||||
# get the first free block
|
||||
newBlock = @firstFreeBlock
|
||||
@firstFreeBlock = -@map[newBlock >> SHIFT_2]
|
||||
else
|
||||
# get a new block from the high end
|
||||
newBlock = @dataLength
|
||||
newTop = newBlock + DATA_BLOCK_LENGTH
|
||||
if newTop > @dataCapacity
|
||||
# out of memory in the data array
|
||||
if @dataCapacity < MEDIUM_DATA_LENGTH
|
||||
capacity = MEDIUM_DATA_LENGTH
|
||||
else if @dataCapacity < MAX_DATA_LENGTH
|
||||
capacity = MAX_DATA_LENGTH
|
||||
else
|
||||
# Should never occur.
|
||||
# Either MAX_DATA_LENGTH is incorrect,
|
||||
# or the code writes more values than should be possible.
|
||||
throw new Error("Internal error in Trie2 creation.");
|
||||
|
||||
newData = new Uint32Array(capacity)
|
||||
newData.set(@data.subarray(0, @dataLength))
|
||||
@data = newData
|
||||
@dataCapacity = capacity
|
||||
|
||||
@dataLength = newTop
|
||||
|
||||
@data.set(@data.subarray(copyBlock, copyBlock + DATA_BLOCK_LENGTH), newBlock)
|
||||
@map[newBlock >> SHIFT_2] = 0
|
||||
return newBlock
|
||||
|
||||
_releaseDataBlock: (block) ->
|
||||
# put this block at the front of the free-block chain
|
||||
@map[block >> SHIFT_2] = -@firstFreeBlock
|
||||
@firstFreeBlock = block
|
||||
|
||||
_setIndex2Entry: (i2, block) ->
|
||||
++@map[block >> SHIFT_2] # increment first, in case block == oldBlock!
|
||||
oldBlock = @index2[i2]
|
||||
if --@map[oldBlock >> SHIFT_2] is 0
|
||||
@_releaseDataBlock oldBlock
|
||||
|
||||
@index2[i2] = block
|
||||
|
||||
_getDataBlock: (c, forLSCP) ->
|
||||
i2 = @_getIndex2Block c, forLSCP
|
||||
i2 += (c >> SHIFT_2) & INDEX_2_MASK
|
||||
|
||||
oldBlock = @index2[i2]
|
||||
if @_isWritableBlock oldBlock
|
||||
return oldBlock
|
||||
|
||||
# allocate a new data block
|
||||
newBlock = @_allocDataBlock oldBlock
|
||||
@_setIndex2Entry i2, newBlock
|
||||
return newBlock
|
||||
|
||||
_fillBlock: (block, start, limit, value, initialValue, overwrite) ->
|
||||
if overwrite
|
||||
for i in [block+start...block+limit] by 1
|
||||
@data[i] = value
|
||||
else
|
||||
for i in [block+start...block+limit] by 1
|
||||
if @data[i] is initialValue
|
||||
@data[i] = value
|
||||
|
||||
return
|
||||
|
||||
_writeBlock: (block, value) ->
|
||||
limit = block + DATA_BLOCK_LENGTH
|
||||
while block < limit
|
||||
@data[block++] = value
|
||||
|
||||
return
|
||||
|
||||
_findHighStart: (highValue) ->
|
||||
data32 = @data
|
||||
initialValue = @initialValue
|
||||
index2NullOffset = @index2NullOffset
|
||||
nullBlock = @dataNullOffset
|
||||
|
||||
# set variables for previous range
|
||||
if highValue is initialValue
|
||||
prevI2Block = index2NullOffset
|
||||
prevBlock = nullBlock
|
||||
else
|
||||
prevI2Block = -1
|
||||
prevBlock = -1
|
||||
|
||||
prev = 0x110000
|
||||
|
||||
# enumerate index-2 blocks
|
||||
i1 = INDEX_1_LENGTH
|
||||
c = prev
|
||||
while c > 0
|
||||
i2Block = @index1[--i1]
|
||||
if i2Block is prevI2Block
|
||||
# the index-2 block is the same as the previous one, and filled with highValue
|
||||
c -= CP_PER_INDEX_1_ENTRY
|
||||
continue
|
||||
|
||||
prevI2Block = i2Block
|
||||
if i2Block is index2NullOffset
|
||||
# this is the null index-2 block
|
||||
return c unless highValue is initialValue
|
||||
c -= CP_PER_INDEX_1_ENTRY
|
||||
else
|
||||
# enumerate data blocks for one index-2 block
|
||||
i2 = INDEX_2_BLOCK_LENGTH
|
||||
while i2 > 0
|
||||
block = @index2[i2Block + --i2]
|
||||
if block is prevBlock
|
||||
# the block is the same as the previous one, and filled with highValue
|
||||
c -= DATA_BLOCK_LENGTH
|
||||
continue
|
||||
|
||||
prevBlock = block
|
||||
if block is nullBlock
|
||||
# this is the null data block
|
||||
return c unless highValue is initialValue
|
||||
c -= DATA_BLOCK_LENGTH
|
||||
else
|
||||
j = DATA_BLOCK_LENGTH
|
||||
while j > 0
|
||||
value = data32[block + --j]
|
||||
return c unless value is highValue
|
||||
--c
|
||||
|
||||
# deliver last range
|
||||
return 0
|
||||
|
||||
equal_int = (a, s, t, length) ->
|
||||
for i in [0...length] by 1
|
||||
return false unless a[s + i] is a[t + i]
|
||||
|
||||
return true
|
||||
|
||||
_findSameDataBlock: (dataLength, otherBlock, blockLength) ->
|
||||
# ensure that we do not even partially get past dataLength
|
||||
dataLength -= blockLength
|
||||
block = 0
|
||||
while block <= dataLength
|
||||
return block if equal_int(@data, block, otherBlock, blockLength)
|
||||
block += DATA_GRANULARITY
|
||||
|
||||
return -1
|
||||
|
||||
_findSameIndex2Block: (index2Length, otherBlock) ->
|
||||
# ensure that we do not even partially get past index2Length
|
||||
index2Length -= INDEX_2_BLOCK_LENGTH
|
||||
for block in [0..index2Length] by 1
|
||||
return block if equal_int(@index2, block, otherBlock, INDEX_2_BLOCK_LENGTH)
|
||||
|
||||
return -1
|
||||
|
||||
_compactData: ->
|
||||
# do not compact linear-ASCII data
|
||||
newStart = DATA_START_OFFSET
|
||||
start = 0
|
||||
i = 0
|
||||
|
||||
while start < newStart
|
||||
@map[i++] = start
|
||||
start += DATA_BLOCK_LENGTH
|
||||
|
||||
# Start with a block length of 64 for 2-byte UTF-8,
|
||||
# then switch to DATA_BLOCK_LENGTH.
|
||||
blockLength = 64
|
||||
blockCount = blockLength >> SHIFT_2
|
||||
start = newStart
|
||||
while start < @dataLength
|
||||
# start: index of first entry of current block
|
||||
# newStart: index where the current block is to be moved
|
||||
# (right after current end of already-compacted data)
|
||||
if start is DATA_0800_OFFSET
|
||||
blockLength = DATA_BLOCK_LENGTH
|
||||
blockCount = 1
|
||||
|
||||
# skip blocks that are not used
|
||||
if @map[start >> SHIFT_2] <= 0
|
||||
# advance start to the next block
|
||||
start += blockLength
|
||||
|
||||
# leave newStart with the previous block!
|
||||
continue
|
||||
|
||||
# search for an identical block
|
||||
if (movedStart = @_findSameDataBlock(newStart, start, blockLength)) >= 0
|
||||
# found an identical block, set the other block's index value for the current block
|
||||
mapIndex = start >> SHIFT_2
|
||||
for i in [blockCount...0] by -1
|
||||
@map[mapIndex++] = movedStart
|
||||
movedStart += DATA_BLOCK_LENGTH
|
||||
|
||||
# advance start to the next block
|
||||
start += blockLength
|
||||
|
||||
# leave newStart with the previous block!
|
||||
continue
|
||||
|
||||
# see if the beginning of this block can be overlapped with the end of the previous block
|
||||
# look for maximum overlap (modulo granularity) with the previous, adjacent block
|
||||
overlap = blockLength - DATA_GRANULARITY
|
||||
while overlap > 0 and not equal_int(@data, (newStart - overlap), start, overlap)
|
||||
overlap -= DATA_GRANULARITY
|
||||
|
||||
if overlap > 0 or newStart < start
|
||||
# some overlap, or just move the whole block
|
||||
movedStart = newStart - overlap
|
||||
mapIndex = start >> SHIFT_2
|
||||
|
||||
for i in [blockCount...0] by -1
|
||||
@map[mapIndex++] = movedStart
|
||||
movedStart += DATA_BLOCK_LENGTH
|
||||
|
||||
# move the non-overlapping indexes to their new positions
|
||||
start += overlap
|
||||
for i in [blockLength - overlap...0] by -1
|
||||
@data[newStart++] = @data[start++]
|
||||
|
||||
else # no overlap && newStart==start
|
||||
mapIndex = start >> SHIFT_2
|
||||
for i in [blockCount...0] by -1
|
||||
@map[mapIndex++] = start
|
||||
start += DATA_BLOCK_LENGTH
|
||||
|
||||
newStart = start
|
||||
|
||||
# now adjust the index-2 table
|
||||
i = 0
|
||||
while i < @index2Length
|
||||
# Gap indexes are invalid (-1). Skip over the gap.
|
||||
i += INDEX_GAP_LENGTH if i is INDEX_GAP_OFFSET
|
||||
@index2[i] = @map[@index2[i] >> SHIFT_2]
|
||||
++i
|
||||
|
||||
@dataNullOffset = @map[@dataNullOffset >> SHIFT_2]
|
||||
|
||||
# ensure dataLength alignment
|
||||
@data[newStart++] = @initialValue until (newStart & (DATA_GRANULARITY - 1)) is 0
|
||||
@dataLength = newStart
|
||||
return
|
||||
|
||||
_compactIndex2: ->
|
||||
# do not compact linear-BMP index-2 blocks
|
||||
newStart = INDEX_2_BMP_LENGTH
|
||||
start = 0
|
||||
i = 0
|
||||
|
||||
while start < newStart
|
||||
@map[i++] = start
|
||||
start += INDEX_2_BLOCK_LENGTH
|
||||
|
||||
# Reduce the index table gap to what will be needed at runtime.
|
||||
newStart += UTF8_2B_INDEX_2_LENGTH + ((@highStart - 0x10000) >> SHIFT_1)
|
||||
start = INDEX_2_NULL_OFFSET
|
||||
while start < @index2Length
|
||||
# start: index of first entry of current block
|
||||
# newStart: index where the current block is to be moved
|
||||
# (right after current end of already-compacted data)
|
||||
|
||||
# search for an identical block
|
||||
if (movedStart = @_findSameIndex2Block(newStart, start)) >= 0
|
||||
# found an identical block, set the other block's index value for the current block
|
||||
@map[start >> SHIFT_1_2] = movedStart
|
||||
|
||||
# advance start to the next block
|
||||
start += INDEX_2_BLOCK_LENGTH
|
||||
|
||||
# leave newStart with the previous block!
|
||||
continue
|
||||
|
||||
# see if the beginning of this block can be overlapped with the end of the previous block
|
||||
# look for maximum overlap with the previous, adjacent block
|
||||
overlap = INDEX_2_BLOCK_LENGTH - 1
|
||||
while overlap > 0 and not equal_int(@index2, (newStart - overlap), start, overlap)
|
||||
--overlap
|
||||
|
||||
if overlap > 0 or newStart < start
|
||||
# some overlap, or just move the whole block
|
||||
@map[start >> SHIFT_1_2] = newStart - overlap
|
||||
|
||||
# move the non-overlapping indexes to their new positions
|
||||
start += overlap
|
||||
for i in [INDEX_2_BLOCK_LENGTH - overlap...0] by -1
|
||||
@index2[newStart++] = @index2[start++]
|
||||
|
||||
else # no overlap && newStart==start
|
||||
@map[start >> SHIFT_1_2] = start
|
||||
start += INDEX_2_BLOCK_LENGTH
|
||||
newStart = start
|
||||
|
||||
# now adjust the index-1 table
|
||||
for i in [0...INDEX_1_LENGTH] by 1
|
||||
@index1[i] = @map[@index1[i] >> SHIFT_1_2]
|
||||
|
||||
@index2NullOffset = @map[@index2NullOffset >> SHIFT_1_2]
|
||||
|
||||
# Ensure data table alignment:
|
||||
# Needs to be granularity-aligned for 16-bit trie
|
||||
# (so that dataMove will be down-shiftable),
|
||||
# and 2-aligned for uint32_t data.
|
||||
|
||||
# Arbitrary value: 0x3fffc not possible for real data.
|
||||
until (newStart & ((DATA_GRANULARITY - 1) | 1)) is 0
|
||||
@index2[newStart++] = 0x0000ffff << INDEX_SHIFT
|
||||
|
||||
@index2Length = newStart
|
||||
|
||||
_compact: ->
|
||||
# find highStart and round it up
|
||||
highValue = @get 0x10ffff
|
||||
highStart = @_findHighStart highValue
|
||||
highStart = (highStart + (CP_PER_INDEX_1_ENTRY - 1)) & ~(CP_PER_INDEX_1_ENTRY - 1)
|
||||
if highStart is 0x110000
|
||||
highValue = @errorValue
|
||||
|
||||
# Set trie->highStart only after utrie2_get32(trie, highStart).
|
||||
# Otherwise utrie2_get32(trie, highStart) would try to read the highValue.
|
||||
@highStart = highStart
|
||||
if @highStart < 0x110000
|
||||
# Blank out [highStart..10ffff] to release associated data blocks.
|
||||
suppHighStart = if @highStart <= 0x10000 then 0x10000 else @highStart
|
||||
@setRange suppHighStart, 0x10ffff, @initialValue, true
|
||||
|
||||
@_compactData()
|
||||
if @highStart > 0x10000
|
||||
@_compactIndex2()
|
||||
|
||||
# Store the highValue in the data array and round up the dataLength.
|
||||
# Must be done after compactData() because that assumes that dataLength
|
||||
# is a multiple of DATA_BLOCK_LENGTH.
|
||||
@data[@dataLength++] = highValue
|
||||
until (@dataLength & (DATA_GRANULARITY - 1)) is 0
|
||||
@data[@dataLength++] = @initialValue
|
||||
|
||||
@isCompacted = true
|
||||
|
||||
freeze: ->
|
||||
unless @isCompacted
|
||||
@_compact()
|
||||
|
||||
if @highStart <= 0x10000
|
||||
allIndexesLength = INDEX_1_OFFSET
|
||||
else
|
||||
allIndexesLength = @index2Length
|
||||
|
||||
dataMove = allIndexesLength
|
||||
|
||||
# for shiftedDataLength
|
||||
if allIndexesLength > MAX_INDEX_LENGTH or
|
||||
(dataMove + @dataNullOffset) > 0xffff or
|
||||
(dataMove + DATA_0800_OFFSET) > 0xffff or
|
||||
(dataMove + @dataLength) > MAX_DATA_LENGTH
|
||||
throw new Error("Trie data is too large.")
|
||||
|
||||
# calculate the sizes of, and allocate, the index and data arrays
|
||||
indexLength = allIndexesLength + @dataLength
|
||||
data = new Int32Array(indexLength)
|
||||
|
||||
# write the index-2 array values shifted right by INDEX_SHIFT, after adding dataMove
|
||||
destIdx = 0
|
||||
for i in [0...INDEX_2_BMP_LENGTH] by 1
|
||||
data[destIdx++] = ((@index2[i] + dataMove) >> INDEX_SHIFT)
|
||||
|
||||
# write UTF-8 2-byte index-2 values, not right-shifted
|
||||
for i in [0...(0xc2 - 0xc0)] by 1 # C0..C1
|
||||
data[destIdx++] = (dataMove + BAD_UTF8_DATA_OFFSET)
|
||||
|
||||
for i in [i...(0xe0 - 0xc0)] by 1 # C2..DF
|
||||
data[destIdx++] = (dataMove + @index2[i << (6 - SHIFT_2)])
|
||||
|
||||
if @highStart > 0x10000
|
||||
index1Length = (@highStart - 0x10000) >> SHIFT_1
|
||||
index2Offset = INDEX_2_BMP_LENGTH + UTF8_2B_INDEX_2_LENGTH + index1Length
|
||||
|
||||
# write 16-bit index-1 values for supplementary code points
|
||||
for i in [0...index1Length] by 1
|
||||
data[destIdx++] = (INDEX_2_OFFSET + @index1[i + OMITTED_BMP_INDEX_1_LENGTH])
|
||||
|
||||
# write the index-2 array values for supplementary code points,
|
||||
# shifted right by INDEX_SHIFT, after adding dataMove
|
||||
for i in [0...@index2Length - index2Offset] by 1
|
||||
data[destIdx++] = ((dataMove + @index2[index2Offset + i]) >> INDEX_SHIFT)
|
||||
|
||||
# write 16-bit data values
|
||||
for i in [0...@dataLength] by 1
|
||||
data[destIdx++] = @data[i]
|
||||
|
||||
dest = new UnicodeTrie
|
||||
data: data
|
||||
highStart: @highStart
|
||||
errorValue: @errorValue
|
||||
|
||||
return dest
|
||||
|
||||
# Generates a Buffer containing the serialized and compressed trie.
|
||||
# Trie data is compressed twice using the deflate algorithm to minimize file size.
|
||||
# Format:
|
||||
# uint32_t highStart;
|
||||
# uint32_t errorValue;
|
||||
# uint32_t uncompressedDataLength;
|
||||
# uint8_t trieData[dataLength];
|
||||
toBuffer: ->
|
||||
trie = @freeze()
|
||||
|
||||
data = new Uint8Array(trie.data.buffer)
|
||||
compressed = pako.deflateRaw data
|
||||
compressed = pako.deflateRaw compressed
|
||||
|
||||
buf = new Buffer compressed.length + 12
|
||||
buf.writeUInt32BE trie.highStart, 0
|
||||
buf.writeUInt32BE trie.errorValue, 4
|
||||
buf.writeUInt32BE data.length, 8
|
||||
for b, i in compressed
|
||||
buf[i + 12] = b
|
||||
|
||||
return buf
|
||||
|
||||
module.exports = UnicodeTrieBuilder
|
||||
659
BACK_BACK/node_modules/unicode-trie/builder.js
generated
vendored
Executable file
659
BACK_BACK/node_modules/unicode-trie/builder.js
generated
vendored
Executable file
|
|
@ -0,0 +1,659 @@
|
|||
// Generated by CoffeeScript 1.7.1
|
||||
var UnicodeTrie, UnicodeTrieBuilder, pako;
|
||||
|
||||
UnicodeTrie = require('./');
|
||||
|
||||
pako = require('pako');
|
||||
|
||||
UnicodeTrieBuilder = (function() {
|
||||
var BAD_UTF8_DATA_OFFSET, CP_PER_INDEX_1_ENTRY, DATA_0800_OFFSET, DATA_BLOCK_LENGTH, DATA_GRANULARITY, DATA_MASK, DATA_NULL_OFFSET, DATA_START_OFFSET, INDEX_1_LENGTH, INDEX_1_OFFSET, INDEX_2_BLOCK_LENGTH, INDEX_2_BMP_LENGTH, INDEX_2_MASK, INDEX_2_NULL_OFFSET, INDEX_2_OFFSET, INDEX_2_START_OFFSET, INDEX_GAP_LENGTH, INDEX_GAP_OFFSET, INDEX_SHIFT, INITIAL_DATA_LENGTH, LSCP_INDEX_2_LENGTH, LSCP_INDEX_2_OFFSET, MAX_DATA_LENGTH, MAX_INDEX_1_LENGTH, MAX_INDEX_2_LENGTH, MAX_INDEX_LENGTH, MEDIUM_DATA_LENGTH, NEW_DATA_START_OFFSET, OMITTED_BMP_INDEX_1_LENGTH, SHIFT_1, SHIFT_1_2, SHIFT_2, UTF8_2B_INDEX_2_LENGTH, UTF8_2B_INDEX_2_OFFSET, equal_int;
|
||||
|
||||
SHIFT_1 = 6 + 5;
|
||||
|
||||
SHIFT_2 = 5;
|
||||
|
||||
SHIFT_1_2 = SHIFT_1 - SHIFT_2;
|
||||
|
||||
OMITTED_BMP_INDEX_1_LENGTH = 0x10000 >> SHIFT_1;
|
||||
|
||||
CP_PER_INDEX_1_ENTRY = 1 << SHIFT_1;
|
||||
|
||||
INDEX_2_BLOCK_LENGTH = 1 << SHIFT_1_2;
|
||||
|
||||
INDEX_2_MASK = INDEX_2_BLOCK_LENGTH - 1;
|
||||
|
||||
DATA_BLOCK_LENGTH = 1 << SHIFT_2;
|
||||
|
||||
DATA_MASK = DATA_BLOCK_LENGTH - 1;
|
||||
|
||||
INDEX_SHIFT = 2;
|
||||
|
||||
DATA_GRANULARITY = 1 << INDEX_SHIFT;
|
||||
|
||||
INDEX_2_OFFSET = 0;
|
||||
|
||||
LSCP_INDEX_2_OFFSET = 0x10000 >> SHIFT_2;
|
||||
|
||||
LSCP_INDEX_2_LENGTH = 0x400 >> SHIFT_2;
|
||||
|
||||
INDEX_2_BMP_LENGTH = LSCP_INDEX_2_OFFSET + LSCP_INDEX_2_LENGTH;
|
||||
|
||||
UTF8_2B_INDEX_2_OFFSET = INDEX_2_BMP_LENGTH;
|
||||
|
||||
UTF8_2B_INDEX_2_LENGTH = 0x800 >> 6;
|
||||
|
||||
INDEX_1_OFFSET = UTF8_2B_INDEX_2_OFFSET + UTF8_2B_INDEX_2_LENGTH;
|
||||
|
||||
MAX_INDEX_1_LENGTH = 0x100000 >> SHIFT_1;
|
||||
|
||||
BAD_UTF8_DATA_OFFSET = 0x80;
|
||||
|
||||
DATA_START_OFFSET = 0xc0;
|
||||
|
||||
DATA_NULL_OFFSET = DATA_START_OFFSET;
|
||||
|
||||
NEW_DATA_START_OFFSET = DATA_NULL_OFFSET + 0x40;
|
||||
|
||||
DATA_0800_OFFSET = NEW_DATA_START_OFFSET + 0x780;
|
||||
|
||||
INITIAL_DATA_LENGTH = 1 << 14;
|
||||
|
||||
MEDIUM_DATA_LENGTH = 1 << 17;
|
||||
|
||||
MAX_DATA_LENGTH = 0xffff << INDEX_SHIFT;
|
||||
|
||||
INDEX_1_LENGTH = 0x110000 >> SHIFT_1;
|
||||
|
||||
MAX_DATA_LENGTH = 0x110000 + 0x40 + 0x40 + 0x400;
|
||||
|
||||
INDEX_GAP_OFFSET = INDEX_2_BMP_LENGTH;
|
||||
|
||||
INDEX_GAP_LENGTH = ((UTF8_2B_INDEX_2_LENGTH + MAX_INDEX_1_LENGTH) + INDEX_2_MASK) & ~INDEX_2_MASK;
|
||||
|
||||
MAX_INDEX_2_LENGTH = (0x110000 >> SHIFT_2) + LSCP_INDEX_2_LENGTH + INDEX_GAP_LENGTH + INDEX_2_BLOCK_LENGTH;
|
||||
|
||||
INDEX_2_NULL_OFFSET = INDEX_GAP_OFFSET + INDEX_GAP_LENGTH;
|
||||
|
||||
INDEX_2_START_OFFSET = INDEX_2_NULL_OFFSET + INDEX_2_BLOCK_LENGTH;
|
||||
|
||||
MAX_INDEX_LENGTH = 0xffff;
|
||||
|
||||
function UnicodeTrieBuilder(initialValue, errorValue) {
|
||||
var i, j, _i, _j, _k, _l, _m, _n, _o, _p, _q, _r, _ref, _s, _t;
|
||||
this.initialValue = initialValue != null ? initialValue : 0;
|
||||
this.errorValue = errorValue != null ? errorValue : 0;
|
||||
this.index1 = new Int32Array(INDEX_1_LENGTH);
|
||||
this.index2 = new Int32Array(MAX_INDEX_2_LENGTH);
|
||||
this.highStart = 0x110000;
|
||||
this.data = new Uint32Array(INITIAL_DATA_LENGTH);
|
||||
this.dataCapacity = INITIAL_DATA_LENGTH;
|
||||
this.firstFreeBlock = 0;
|
||||
this.isCompacted = false;
|
||||
this.map = new Int32Array(MAX_DATA_LENGTH >> SHIFT_2);
|
||||
for (i = _i = 0; _i < 0x80; i = _i += 1) {
|
||||
this.data[i] = this.initialValue;
|
||||
}
|
||||
for (i = _j = i; _j < 0xc0; i = _j += 1) {
|
||||
this.data[i] = this.errorValue;
|
||||
}
|
||||
for (i = _k = DATA_NULL_OFFSET; _k < NEW_DATA_START_OFFSET; i = _k += 1) {
|
||||
this.data[i] = this.initialValue;
|
||||
}
|
||||
this.dataNullOffset = DATA_NULL_OFFSET;
|
||||
this.dataLength = NEW_DATA_START_OFFSET;
|
||||
i = 0;
|
||||
for (j = _l = 0; _l < 128; j = _l += DATA_BLOCK_LENGTH) {
|
||||
this.index2[i] = j;
|
||||
this.map[i++] = 1;
|
||||
}
|
||||
for (j = _m = j; DATA_BLOCK_LENGTH > 0 ? _m < 0xc0 : _m > 0xc0; j = _m += DATA_BLOCK_LENGTH) {
|
||||
this.map[i++] = 0;
|
||||
}
|
||||
this.map[i++] = (0x110000 >> SHIFT_2) - (0x80 >> SHIFT_2) + 1 + LSCP_INDEX_2_LENGTH;
|
||||
j += DATA_BLOCK_LENGTH;
|
||||
for (j = _n = j; DATA_BLOCK_LENGTH > 0 ? _n < NEW_DATA_START_OFFSET : _n > NEW_DATA_START_OFFSET; j = _n += DATA_BLOCK_LENGTH) {
|
||||
this.map[i++] = 0;
|
||||
}
|
||||
for (i = _o = _ref = 0x80 >> SHIFT_2; _o < INDEX_2_BMP_LENGTH; i = _o += 1) {
|
||||
this.index2[i] = DATA_NULL_OFFSET;
|
||||
}
|
||||
for (i = _p = 0; _p < INDEX_GAP_LENGTH; i = _p += 1) {
|
||||
this.index2[INDEX_GAP_OFFSET + i] = -1;
|
||||
}
|
||||
for (i = _q = 0; _q < INDEX_2_BLOCK_LENGTH; i = _q += 1) {
|
||||
this.index2[INDEX_2_NULL_OFFSET + i] = DATA_NULL_OFFSET;
|
||||
}
|
||||
this.index2NullOffset = INDEX_2_NULL_OFFSET;
|
||||
this.index2Length = INDEX_2_START_OFFSET;
|
||||
j = 0;
|
||||
for (i = _r = 0; _r < OMITTED_BMP_INDEX_1_LENGTH; i = _r += 1) {
|
||||
this.index1[i] = j;
|
||||
j += INDEX_2_BLOCK_LENGTH;
|
||||
}
|
||||
for (i = _s = i; _s < INDEX_1_LENGTH; i = _s += 1) {
|
||||
this.index1[i] = INDEX_2_NULL_OFFSET;
|
||||
}
|
||||
for (i = _t = 0x80; _t < 2048; i = _t += DATA_BLOCK_LENGTH) {
|
||||
this.set(i, this.initialValue);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
UnicodeTrieBuilder.prototype.set = function(codePoint, value) {
|
||||
var block;
|
||||
if (codePoint < 0 || codePoint > 0x10ffff) {
|
||||
throw new Error('Invalid code point');
|
||||
}
|
||||
if (this.isCompacted) {
|
||||
throw new Error('Already compacted');
|
||||
}
|
||||
block = this._getDataBlock(codePoint, true);
|
||||
this.data[block + (codePoint & DATA_MASK)] = value;
|
||||
return this;
|
||||
};
|
||||
|
||||
UnicodeTrieBuilder.prototype.setRange = function(start, end, value, overwrite) {
|
||||
var block, i2, limit, nextStart, repeatBlock, rest, setRepeatBlock;
|
||||
if (overwrite == null) {
|
||||
overwrite = true;
|
||||
}
|
||||
if (start > 0x10ffff || end > 0x10ffff || start > end) {
|
||||
throw new Error('Invalid code point');
|
||||
}
|
||||
if (this.isCompacted) {
|
||||
throw new Error('Already compacted');
|
||||
}
|
||||
if (!overwrite && value === this.initialValue) {
|
||||
return this;
|
||||
}
|
||||
limit = end + 1;
|
||||
if ((start & DATA_MASK) !== 0) {
|
||||
block = this._getDataBlock(start, true);
|
||||
nextStart = (start + DATA_BLOCK_LENGTH) & ~DATA_MASK;
|
||||
if (nextStart <= limit) {
|
||||
this._fillBlock(block, start & DATA_MASK, DATA_BLOCK_LENGTH, value, this.initialValue, overwrite);
|
||||
start = nextStart;
|
||||
} else {
|
||||
this._fillBlock(block, start & DATA_MASK, limit & DATA_MASK, value, this.initialValue, overwrite);
|
||||
return this;
|
||||
}
|
||||
}
|
||||
rest = limit & DATA_MASK;
|
||||
limit &= ~DATA_MASK;
|
||||
if (value === this.initialValue) {
|
||||
repeatBlock = this.dataNullOffset;
|
||||
} else {
|
||||
repeatBlock = -1;
|
||||
}
|
||||
while (start < limit) {
|
||||
setRepeatBlock = false;
|
||||
if (value === this.initialValue && this._isInNullBlock(start, true)) {
|
||||
start += DATA_BLOCK_LENGTH;
|
||||
continue;
|
||||
}
|
||||
i2 = this._getIndex2Block(start, true);
|
||||
i2 += (start >> SHIFT_2) & INDEX_2_MASK;
|
||||
block = this.index2[i2];
|
||||
if (this._isWritableBlock(block)) {
|
||||
if (overwrite && block >= DATA_0800_OFFSET) {
|
||||
setRepeatBlock = true;
|
||||
} else {
|
||||
this._fillBlock(block, 0, DATA_BLOCK_LENGTH, value, this.initialValue, overwrite);
|
||||
}
|
||||
} else if (this.data[block] !== value && (overwrite || block === this.dataNullOffset)) {
|
||||
setRepeatBlock = true;
|
||||
}
|
||||
if (setRepeatBlock) {
|
||||
if (repeatBlock >= 0) {
|
||||
this._setIndex2Entry(i2, repeatBlock);
|
||||
} else {
|
||||
repeatBlock = this._getDataBlock(start, true);
|
||||
this._writeBlock(repeatBlock, value);
|
||||
}
|
||||
}
|
||||
start += DATA_BLOCK_LENGTH;
|
||||
}
|
||||
if (rest > 0) {
|
||||
block = this._getDataBlock(start, true);
|
||||
this._fillBlock(block, 0, rest, value, this.initialValue, overwrite);
|
||||
}
|
||||
return this;
|
||||
};
|
||||
|
||||
UnicodeTrieBuilder.prototype.get = function(c, fromLSCP) {
|
||||
var block, i2;
|
||||
if (fromLSCP == null) {
|
||||
fromLSCP = true;
|
||||
}
|
||||
if (c < 0 || c > 0x10ffff) {
|
||||
return this.errorValue;
|
||||
}
|
||||
if (c >= this.highStart && (!(c >= 0xd800 && c < 0xdc00) || fromLSCP)) {
|
||||
return this.data[this.dataLength - DATA_GRANULARITY];
|
||||
}
|
||||
if ((c >= 0xd800 && c < 0xdc00) && fromLSCP) {
|
||||
i2 = (LSCP_INDEX_2_OFFSET - (0xd800 >> SHIFT_2)) + (c >> SHIFT_2);
|
||||
} else {
|
||||
i2 = this.index1[c >> SHIFT_1] + ((c >> SHIFT_2) & INDEX_2_MASK);
|
||||
}
|
||||
block = this.index2[i2];
|
||||
return this.data[block + (c & DATA_MASK)];
|
||||
};
|
||||
|
||||
UnicodeTrieBuilder.prototype._isInNullBlock = function(c, forLSCP) {
|
||||
var block, i2;
|
||||
if ((c & 0xfffffc00) === 0xd800 && forLSCP) {
|
||||
i2 = LSCP_INDEX_2_OFFSET - (0xd800 >> SHIFT_2) + (c >> SHIFT_2);
|
||||
} else {
|
||||
i2 = this.index1[c >> SHIFT_1] + ((c >> SHIFT_2) & INDEX_2_MASK);
|
||||
}
|
||||
block = this.index2[i2];
|
||||
return block === this.dataNullOffset;
|
||||
};
|
||||
|
||||
UnicodeTrieBuilder.prototype._allocIndex2Block = function() {
|
||||
var newBlock, newTop;
|
||||
newBlock = this.index2Length;
|
||||
newTop = newBlock + INDEX_2_BLOCK_LENGTH;
|
||||
if (newTop > this.index2.length) {
|
||||
throw new Error("Internal error in Trie2 creation.");
|
||||
}
|
||||
this.index2Length = newTop;
|
||||
this.index2.set(this.index2.subarray(this.index2NullOffset, this.index2NullOffset + INDEX_2_BLOCK_LENGTH), newBlock);
|
||||
return newBlock;
|
||||
};
|
||||
|
||||
UnicodeTrieBuilder.prototype._getIndex2Block = function(c, forLSCP) {
|
||||
var i1, i2;
|
||||
if (c >= 0xd800 && c < 0xdc00 && forLSCP) {
|
||||
return LSCP_INDEX_2_OFFSET;
|
||||
}
|
||||
i1 = c >> SHIFT_1;
|
||||
i2 = this.index1[i1];
|
||||
if (i2 === this.index2NullOffset) {
|
||||
i2 = this._allocIndex2Block();
|
||||
this.index1[i1] = i2;
|
||||
}
|
||||
return i2;
|
||||
};
|
||||
|
||||
UnicodeTrieBuilder.prototype._isWritableBlock = function(block) {
|
||||
return block !== this.dataNullOffset && this.map[block >> SHIFT_2] === 1;
|
||||
};
|
||||
|
||||
UnicodeTrieBuilder.prototype._allocDataBlock = function(copyBlock) {
|
||||
var capacity, newBlock, newData, newTop;
|
||||
if (this.firstFreeBlock !== 0) {
|
||||
newBlock = this.firstFreeBlock;
|
||||
this.firstFreeBlock = -this.map[newBlock >> SHIFT_2];
|
||||
} else {
|
||||
newBlock = this.dataLength;
|
||||
newTop = newBlock + DATA_BLOCK_LENGTH;
|
||||
if (newTop > this.dataCapacity) {
|
||||
if (this.dataCapacity < MEDIUM_DATA_LENGTH) {
|
||||
capacity = MEDIUM_DATA_LENGTH;
|
||||
} else if (this.dataCapacity < MAX_DATA_LENGTH) {
|
||||
capacity = MAX_DATA_LENGTH;
|
||||
} else {
|
||||
throw new Error("Internal error in Trie2 creation.");
|
||||
}
|
||||
newData = new Uint32Array(capacity);
|
||||
newData.set(this.data.subarray(0, this.dataLength));
|
||||
this.data = newData;
|
||||
this.dataCapacity = capacity;
|
||||
}
|
||||
this.dataLength = newTop;
|
||||
}
|
||||
this.data.set(this.data.subarray(copyBlock, copyBlock + DATA_BLOCK_LENGTH), newBlock);
|
||||
this.map[newBlock >> SHIFT_2] = 0;
|
||||
return newBlock;
|
||||
};
|
||||
|
||||
UnicodeTrieBuilder.prototype._releaseDataBlock = function(block) {
|
||||
this.map[block >> SHIFT_2] = -this.firstFreeBlock;
|
||||
return this.firstFreeBlock = block;
|
||||
};
|
||||
|
||||
UnicodeTrieBuilder.prototype._setIndex2Entry = function(i2, block) {
|
||||
var oldBlock;
|
||||
++this.map[block >> SHIFT_2];
|
||||
oldBlock = this.index2[i2];
|
||||
if (--this.map[oldBlock >> SHIFT_2] === 0) {
|
||||
this._releaseDataBlock(oldBlock);
|
||||
}
|
||||
return this.index2[i2] = block;
|
||||
};
|
||||
|
||||
UnicodeTrieBuilder.prototype._getDataBlock = function(c, forLSCP) {
|
||||
var i2, newBlock, oldBlock;
|
||||
i2 = this._getIndex2Block(c, forLSCP);
|
||||
i2 += (c >> SHIFT_2) & INDEX_2_MASK;
|
||||
oldBlock = this.index2[i2];
|
||||
if (this._isWritableBlock(oldBlock)) {
|
||||
return oldBlock;
|
||||
}
|
||||
newBlock = this._allocDataBlock(oldBlock);
|
||||
this._setIndex2Entry(i2, newBlock);
|
||||
return newBlock;
|
||||
};
|
||||
|
||||
UnicodeTrieBuilder.prototype._fillBlock = function(block, start, limit, value, initialValue, overwrite) {
|
||||
var i, _i, _j, _ref, _ref1, _ref2, _ref3;
|
||||
if (overwrite) {
|
||||
for (i = _i = _ref = block + start, _ref1 = block + limit; _i < _ref1; i = _i += 1) {
|
||||
this.data[i] = value;
|
||||
}
|
||||
} else {
|
||||
for (i = _j = _ref2 = block + start, _ref3 = block + limit; _j < _ref3; i = _j += 1) {
|
||||
if (this.data[i] === initialValue) {
|
||||
this.data[i] = value;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
UnicodeTrieBuilder.prototype._writeBlock = function(block, value) {
|
||||
var limit;
|
||||
limit = block + DATA_BLOCK_LENGTH;
|
||||
while (block < limit) {
|
||||
this.data[block++] = value;
|
||||
}
|
||||
};
|
||||
|
||||
UnicodeTrieBuilder.prototype._findHighStart = function(highValue) {
|
||||
var block, c, data32, i1, i2, i2Block, index2NullOffset, initialValue, j, nullBlock, prev, prevBlock, prevI2Block, value;
|
||||
data32 = this.data;
|
||||
initialValue = this.initialValue;
|
||||
index2NullOffset = this.index2NullOffset;
|
||||
nullBlock = this.dataNullOffset;
|
||||
if (highValue === initialValue) {
|
||||
prevI2Block = index2NullOffset;
|
||||
prevBlock = nullBlock;
|
||||
} else {
|
||||
prevI2Block = -1;
|
||||
prevBlock = -1;
|
||||
}
|
||||
prev = 0x110000;
|
||||
i1 = INDEX_1_LENGTH;
|
||||
c = prev;
|
||||
while (c > 0) {
|
||||
i2Block = this.index1[--i1];
|
||||
if (i2Block === prevI2Block) {
|
||||
c -= CP_PER_INDEX_1_ENTRY;
|
||||
continue;
|
||||
}
|
||||
prevI2Block = i2Block;
|
||||
if (i2Block === index2NullOffset) {
|
||||
if (highValue !== initialValue) {
|
||||
return c;
|
||||
}
|
||||
c -= CP_PER_INDEX_1_ENTRY;
|
||||
} else {
|
||||
i2 = INDEX_2_BLOCK_LENGTH;
|
||||
while (i2 > 0) {
|
||||
block = this.index2[i2Block + --i2];
|
||||
if (block === prevBlock) {
|
||||
c -= DATA_BLOCK_LENGTH;
|
||||
continue;
|
||||
}
|
||||
prevBlock = block;
|
||||
if (block === nullBlock) {
|
||||
if (highValue !== initialValue) {
|
||||
return c;
|
||||
}
|
||||
c -= DATA_BLOCK_LENGTH;
|
||||
} else {
|
||||
j = DATA_BLOCK_LENGTH;
|
||||
while (j > 0) {
|
||||
value = data32[block + --j];
|
||||
if (value !== highValue) {
|
||||
return c;
|
||||
}
|
||||
--c;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
};
|
||||
|
||||
equal_int = function(a, s, t, length) {
|
||||
var i, _i;
|
||||
for (i = _i = 0; _i < length; i = _i += 1) {
|
||||
if (a[s + i] !== a[t + i]) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
};
|
||||
|
||||
UnicodeTrieBuilder.prototype._findSameDataBlock = function(dataLength, otherBlock, blockLength) {
|
||||
var block;
|
||||
dataLength -= blockLength;
|
||||
block = 0;
|
||||
while (block <= dataLength) {
|
||||
if (equal_int(this.data, block, otherBlock, blockLength)) {
|
||||
return block;
|
||||
}
|
||||
block += DATA_GRANULARITY;
|
||||
}
|
||||
return -1;
|
||||
};
|
||||
|
||||
UnicodeTrieBuilder.prototype._findSameIndex2Block = function(index2Length, otherBlock) {
|
||||
var block, _i;
|
||||
index2Length -= INDEX_2_BLOCK_LENGTH;
|
||||
for (block = _i = 0; _i <= index2Length; block = _i += 1) {
|
||||
if (equal_int(this.index2, block, otherBlock, INDEX_2_BLOCK_LENGTH)) {
|
||||
return block;
|
||||
}
|
||||
}
|
||||
return -1;
|
||||
};
|
||||
|
||||
UnicodeTrieBuilder.prototype._compactData = function() {
|
||||
var blockCount, blockLength, i, mapIndex, movedStart, newStart, overlap, start, _i, _j, _k, _l, _ref;
|
||||
newStart = DATA_START_OFFSET;
|
||||
start = 0;
|
||||
i = 0;
|
||||
while (start < newStart) {
|
||||
this.map[i++] = start;
|
||||
start += DATA_BLOCK_LENGTH;
|
||||
}
|
||||
blockLength = 64;
|
||||
blockCount = blockLength >> SHIFT_2;
|
||||
start = newStart;
|
||||
while (start < this.dataLength) {
|
||||
if (start === DATA_0800_OFFSET) {
|
||||
blockLength = DATA_BLOCK_LENGTH;
|
||||
blockCount = 1;
|
||||
}
|
||||
if (this.map[start >> SHIFT_2] <= 0) {
|
||||
start += blockLength;
|
||||
continue;
|
||||
}
|
||||
if ((movedStart = this._findSameDataBlock(newStart, start, blockLength)) >= 0) {
|
||||
mapIndex = start >> SHIFT_2;
|
||||
for (i = _i = blockCount; _i > 0; i = _i += -1) {
|
||||
this.map[mapIndex++] = movedStart;
|
||||
movedStart += DATA_BLOCK_LENGTH;
|
||||
}
|
||||
start += blockLength;
|
||||
continue;
|
||||
}
|
||||
overlap = blockLength - DATA_GRANULARITY;
|
||||
while (overlap > 0 && !equal_int(this.data, newStart - overlap, start, overlap)) {
|
||||
overlap -= DATA_GRANULARITY;
|
||||
}
|
||||
if (overlap > 0 || newStart < start) {
|
||||
movedStart = newStart - overlap;
|
||||
mapIndex = start >> SHIFT_2;
|
||||
for (i = _j = blockCount; _j > 0; i = _j += -1) {
|
||||
this.map[mapIndex++] = movedStart;
|
||||
movedStart += DATA_BLOCK_LENGTH;
|
||||
}
|
||||
start += overlap;
|
||||
for (i = _k = _ref = blockLength - overlap; _k > 0; i = _k += -1) {
|
||||
this.data[newStart++] = this.data[start++];
|
||||
}
|
||||
} else {
|
||||
mapIndex = start >> SHIFT_2;
|
||||
for (i = _l = blockCount; _l > 0; i = _l += -1) {
|
||||
this.map[mapIndex++] = start;
|
||||
start += DATA_BLOCK_LENGTH;
|
||||
}
|
||||
newStart = start;
|
||||
}
|
||||
}
|
||||
i = 0;
|
||||
while (i < this.index2Length) {
|
||||
if (i === INDEX_GAP_OFFSET) {
|
||||
i += INDEX_GAP_LENGTH;
|
||||
}
|
||||
this.index2[i] = this.map[this.index2[i] >> SHIFT_2];
|
||||
++i;
|
||||
}
|
||||
this.dataNullOffset = this.map[this.dataNullOffset >> SHIFT_2];
|
||||
while ((newStart & (DATA_GRANULARITY - 1)) !== 0) {
|
||||
this.data[newStart++] = this.initialValue;
|
||||
}
|
||||
this.dataLength = newStart;
|
||||
};
|
||||
|
||||
UnicodeTrieBuilder.prototype._compactIndex2 = function() {
|
||||
var i, movedStart, newStart, overlap, start, _i, _j, _ref;
|
||||
newStart = INDEX_2_BMP_LENGTH;
|
||||
start = 0;
|
||||
i = 0;
|
||||
while (start < newStart) {
|
||||
this.map[i++] = start;
|
||||
start += INDEX_2_BLOCK_LENGTH;
|
||||
}
|
||||
newStart += UTF8_2B_INDEX_2_LENGTH + ((this.highStart - 0x10000) >> SHIFT_1);
|
||||
start = INDEX_2_NULL_OFFSET;
|
||||
while (start < this.index2Length) {
|
||||
if ((movedStart = this._findSameIndex2Block(newStart, start)) >= 0) {
|
||||
this.map[start >> SHIFT_1_2] = movedStart;
|
||||
start += INDEX_2_BLOCK_LENGTH;
|
||||
continue;
|
||||
}
|
||||
overlap = INDEX_2_BLOCK_LENGTH - 1;
|
||||
while (overlap > 0 && !equal_int(this.index2, newStart - overlap, start, overlap)) {
|
||||
--overlap;
|
||||
}
|
||||
if (overlap > 0 || newStart < start) {
|
||||
this.map[start >> SHIFT_1_2] = newStart - overlap;
|
||||
start += overlap;
|
||||
for (i = _i = _ref = INDEX_2_BLOCK_LENGTH - overlap; _i > 0; i = _i += -1) {
|
||||
this.index2[newStart++] = this.index2[start++];
|
||||
}
|
||||
} else {
|
||||
this.map[start >> SHIFT_1_2] = start;
|
||||
start += INDEX_2_BLOCK_LENGTH;
|
||||
newStart = start;
|
||||
}
|
||||
}
|
||||
for (i = _j = 0; _j < INDEX_1_LENGTH; i = _j += 1) {
|
||||
this.index1[i] = this.map[this.index1[i] >> SHIFT_1_2];
|
||||
}
|
||||
this.index2NullOffset = this.map[this.index2NullOffset >> SHIFT_1_2];
|
||||
while ((newStart & ((DATA_GRANULARITY - 1) | 1)) !== 0) {
|
||||
this.index2[newStart++] = 0x0000ffff << INDEX_SHIFT;
|
||||
}
|
||||
return this.index2Length = newStart;
|
||||
};
|
||||
|
||||
UnicodeTrieBuilder.prototype._compact = function() {
|
||||
var highStart, highValue, suppHighStart;
|
||||
highValue = this.get(0x10ffff);
|
||||
highStart = this._findHighStart(highValue);
|
||||
highStart = (highStart + (CP_PER_INDEX_1_ENTRY - 1)) & ~(CP_PER_INDEX_1_ENTRY - 1);
|
||||
if (highStart === 0x110000) {
|
||||
highValue = this.errorValue;
|
||||
}
|
||||
this.highStart = highStart;
|
||||
if (this.highStart < 0x110000) {
|
||||
suppHighStart = this.highStart <= 0x10000 ? 0x10000 : this.highStart;
|
||||
this.setRange(suppHighStart, 0x10ffff, this.initialValue, true);
|
||||
}
|
||||
this._compactData();
|
||||
if (this.highStart > 0x10000) {
|
||||
this._compactIndex2();
|
||||
}
|
||||
this.data[this.dataLength++] = highValue;
|
||||
while ((this.dataLength & (DATA_GRANULARITY - 1)) !== 0) {
|
||||
this.data[this.dataLength++] = this.initialValue;
|
||||
}
|
||||
return this.isCompacted = true;
|
||||
};
|
||||
|
||||
UnicodeTrieBuilder.prototype.freeze = function() {
|
||||
var allIndexesLength, data, dataMove, dest, destIdx, i, index1Length, index2Offset, indexLength, _i, _j, _k, _l, _m, _n, _ref, _ref1, _ref2, _ref3;
|
||||
if (!this.isCompacted) {
|
||||
this._compact();
|
||||
}
|
||||
if (this.highStart <= 0x10000) {
|
||||
allIndexesLength = INDEX_1_OFFSET;
|
||||
} else {
|
||||
allIndexesLength = this.index2Length;
|
||||
}
|
||||
dataMove = allIndexesLength;
|
||||
if (allIndexesLength > MAX_INDEX_LENGTH || (dataMove + this.dataNullOffset) > 0xffff || (dataMove + DATA_0800_OFFSET) > 0xffff || (dataMove + this.dataLength) > MAX_DATA_LENGTH) {
|
||||
throw new Error("Trie data is too large.");
|
||||
}
|
||||
indexLength = allIndexesLength + this.dataLength;
|
||||
data = new Int32Array(indexLength);
|
||||
destIdx = 0;
|
||||
for (i = _i = 0; _i < INDEX_2_BMP_LENGTH; i = _i += 1) {
|
||||
data[destIdx++] = (this.index2[i] + dataMove) >> INDEX_SHIFT;
|
||||
}
|
||||
for (i = _j = 0, _ref = 0xc2 - 0xc0; _j < _ref; i = _j += 1) {
|
||||
data[destIdx++] = dataMove + BAD_UTF8_DATA_OFFSET;
|
||||
}
|
||||
for (i = _k = i, _ref1 = 0xe0 - 0xc0; _k < _ref1; i = _k += 1) {
|
||||
data[destIdx++] = dataMove + this.index2[i << (6 - SHIFT_2)];
|
||||
}
|
||||
if (this.highStart > 0x10000) {
|
||||
index1Length = (this.highStart - 0x10000) >> SHIFT_1;
|
||||
index2Offset = INDEX_2_BMP_LENGTH + UTF8_2B_INDEX_2_LENGTH + index1Length;
|
||||
for (i = _l = 0; _l < index1Length; i = _l += 1) {
|
||||
data[destIdx++] = INDEX_2_OFFSET + this.index1[i + OMITTED_BMP_INDEX_1_LENGTH];
|
||||
}
|
||||
for (i = _m = 0, _ref2 = this.index2Length - index2Offset; _m < _ref2; i = _m += 1) {
|
||||
data[destIdx++] = (dataMove + this.index2[index2Offset + i]) >> INDEX_SHIFT;
|
||||
}
|
||||
}
|
||||
for (i = _n = 0, _ref3 = this.dataLength; _n < _ref3; i = _n += 1) {
|
||||
data[destIdx++] = this.data[i];
|
||||
}
|
||||
dest = new UnicodeTrie({
|
||||
data: data,
|
||||
highStart: this.highStart,
|
||||
errorValue: this.errorValue
|
||||
});
|
||||
return dest;
|
||||
};
|
||||
|
||||
UnicodeTrieBuilder.prototype.toBuffer = function() {
|
||||
var b, buf, compressed, data, i, trie, _i, _len;
|
||||
trie = this.freeze();
|
||||
data = new Uint8Array(trie.data.buffer);
|
||||
compressed = pako.deflateRaw(data);
|
||||
compressed = pako.deflateRaw(compressed);
|
||||
buf = new Buffer(compressed.length + 12);
|
||||
buf.writeUInt32BE(trie.highStart, 0);
|
||||
buf.writeUInt32BE(trie.errorValue, 4);
|
||||
buf.writeUInt32BE(data.length, 8);
|
||||
for (i = _i = 0, _len = compressed.length; _i < _len; i = ++_i) {
|
||||
b = compressed[i];
|
||||
buf[i + 12] = b;
|
||||
}
|
||||
return buf;
|
||||
};
|
||||
|
||||
return UnicodeTrieBuilder;
|
||||
|
||||
})();
|
||||
|
||||
module.exports = UnicodeTrieBuilder;
|
||||
5
BACK_BACK/node_modules/unicode-trie/coverage.js
generated
vendored
Executable file
5
BACK_BACK/node_modules/unicode-trie/coverage.js
generated
vendored
Executable file
|
|
@ -0,0 +1,5 @@
|
|||
require('coffee-coverage').register({
|
||||
basePath: __dirname,
|
||||
path: 'relative',
|
||||
exclude: ['/test', '/node_modules', '/.git'],
|
||||
});
|
||||
120
BACK_BACK/node_modules/unicode-trie/index.coffee
generated
vendored
Executable file
120
BACK_BACK/node_modules/unicode-trie/index.coffee
generated
vendored
Executable file
|
|
@ -0,0 +1,120 @@
|
|||
inflate = require 'tiny-inflate'
|
||||
|
||||
class UnicodeTrie
|
||||
# Shift size for getting the index-1 table offset.
|
||||
SHIFT_1 = 6 + 5
|
||||
|
||||
# Shift size for getting the index-2 table offset.
|
||||
SHIFT_2 = 5
|
||||
|
||||
# Difference between the two shift sizes,
|
||||
# for getting an index-1 offset from an index-2 offset. 6=11-5
|
||||
SHIFT_1_2 = SHIFT_1 - SHIFT_2
|
||||
|
||||
# Number of index-1 entries for the BMP. 32=0x20
|
||||
# This part of the index-1 table is omitted from the serialized form.
|
||||
OMITTED_BMP_INDEX_1_LENGTH = 0x10000 >> SHIFT_1
|
||||
|
||||
# Number of entries in an index-2 block. 64=0x40
|
||||
INDEX_2_BLOCK_LENGTH = 1 << SHIFT_1_2
|
||||
|
||||
# Mask for getting the lower bits for the in-index-2-block offset. */
|
||||
INDEX_2_MASK = INDEX_2_BLOCK_LENGTH - 1
|
||||
|
||||
# Shift size for shifting left the index array values.
|
||||
# Increases possible data size with 16-bit index values at the cost
|
||||
# of compactability.
|
||||
# This requires data blocks to be aligned by DATA_GRANULARITY.
|
||||
INDEX_SHIFT = 2
|
||||
|
||||
# Number of entries in a data block. 32=0x20
|
||||
DATA_BLOCK_LENGTH = 1 << SHIFT_2
|
||||
|
||||
# Mask for getting the lower bits for the in-data-block offset.
|
||||
DATA_MASK = DATA_BLOCK_LENGTH - 1
|
||||
|
||||
# The part of the index-2 table for U+D800..U+DBFF stores values for
|
||||
# lead surrogate code _units_ not code _points_.
|
||||
# Values for lead surrogate code _points_ are indexed with this portion of the table.
|
||||
# Length=32=0x20=0x400>>SHIFT_2. (There are 1024=0x400 lead surrogates.)
|
||||
LSCP_INDEX_2_OFFSET = 0x10000 >> SHIFT_2
|
||||
LSCP_INDEX_2_LENGTH = 0x400 >> SHIFT_2
|
||||
|
||||
# Count the lengths of both BMP pieces. 2080=0x820
|
||||
INDEX_2_BMP_LENGTH = LSCP_INDEX_2_OFFSET + LSCP_INDEX_2_LENGTH
|
||||
|
||||
# The 2-byte UTF-8 version of the index-2 table follows at offset 2080=0x820.
|
||||
# Length 32=0x20 for lead bytes C0..DF, regardless of SHIFT_2.
|
||||
UTF8_2B_INDEX_2_OFFSET = INDEX_2_BMP_LENGTH
|
||||
UTF8_2B_INDEX_2_LENGTH = 0x800 >> 6 # U+0800 is the first code point after 2-byte UTF-8
|
||||
|
||||
# The index-1 table, only used for supplementary code points, at offset 2112=0x840.
|
||||
# Variable length, for code points up to highStart, where the last single-value range starts.
|
||||
# Maximum length 512=0x200=0x100000>>SHIFT_1.
|
||||
# (For 0x100000 supplementary code points U+10000..U+10ffff.)
|
||||
#
|
||||
# The part of the index-2 table for supplementary code points starts
|
||||
# after this index-1 table.
|
||||
#
|
||||
# Both the index-1 table and the following part of the index-2 table
|
||||
# are omitted completely if there is only BMP data.
|
||||
INDEX_1_OFFSET = UTF8_2B_INDEX_2_OFFSET + UTF8_2B_INDEX_2_LENGTH
|
||||
|
||||
# The alignment size of a data block. Also the granularity for compaction.
|
||||
DATA_GRANULARITY = 1 << INDEX_SHIFT
|
||||
|
||||
constructor: (data) ->
|
||||
isBuffer = typeof data.readUInt32BE is 'function' and typeof data.slice is 'function'
|
||||
|
||||
if isBuffer or data instanceof Uint8Array
|
||||
# read binary format
|
||||
if isBuffer
|
||||
@highStart = data.readUInt32BE 0
|
||||
@errorValue = data.readUInt32BE 4
|
||||
uncompressedLength = data.readUInt32BE 8
|
||||
data = data.slice 12
|
||||
else
|
||||
view = new DataView data.buffer
|
||||
@highStart = view.getUint32 0
|
||||
@errorValue = view.getUint32 4
|
||||
uncompressedLength = view.getUint32 8
|
||||
data = data.subarray 12
|
||||
|
||||
# double inflate the actual trie data
|
||||
data = inflate data, new Uint8Array uncompressedLength
|
||||
data = inflate data, new Uint8Array uncompressedLength
|
||||
@data = new Uint32Array data.buffer
|
||||
|
||||
else
|
||||
# pre-parsed data
|
||||
{@data, @highStart, @errorValue} = data
|
||||
|
||||
get: (codePoint) ->
|
||||
if codePoint < 0 or codePoint > 0x10ffff
|
||||
return @errorValue
|
||||
|
||||
if (codePoint < 0xd800 or (codePoint > 0xdbff and codePoint <= 0xffff))
|
||||
# Ordinary BMP code point, excluding leading surrogates.
|
||||
# BMP uses a single level lookup. BMP index starts at offset 0 in the index.
|
||||
# data is stored in the index array itself.
|
||||
index = (@data[codePoint >> SHIFT_2] << INDEX_SHIFT) + (codePoint & DATA_MASK)
|
||||
return @data[index]
|
||||
|
||||
if codePoint <= 0xffff
|
||||
# Lead Surrogate Code Point. A Separate index section is stored for
|
||||
# lead surrogate code units and code points.
|
||||
# The main index has the code unit data.
|
||||
# For this function, we need the code point data.
|
||||
index = (@data[LSCP_INDEX_2_OFFSET + ((codePoint - 0xd800) >> SHIFT_2)] << INDEX_SHIFT) + (codePoint & DATA_MASK)
|
||||
return @data[index]
|
||||
|
||||
if codePoint < @highStart
|
||||
# Supplemental code point, use two-level lookup.
|
||||
index = @data[(INDEX_1_OFFSET - OMITTED_BMP_INDEX_1_LENGTH) + (codePoint >> SHIFT_1)]
|
||||
index = @data[index + ((codePoint >> SHIFT_2) & INDEX_2_MASK)]
|
||||
index = (index << INDEX_SHIFT) + (codePoint & DATA_MASK)
|
||||
return @data[index]
|
||||
|
||||
return @data[@data.length - DATA_GRANULARITY]
|
||||
|
||||
module.exports = UnicodeTrie
|
||||
91
BACK_BACK/node_modules/unicode-trie/index.js
generated
vendored
Executable file
91
BACK_BACK/node_modules/unicode-trie/index.js
generated
vendored
Executable file
|
|
@ -0,0 +1,91 @@
|
|||
// Generated by CoffeeScript 1.7.1
|
||||
var UnicodeTrie, inflate;
|
||||
|
||||
inflate = require('tiny-inflate');
|
||||
|
||||
UnicodeTrie = (function() {
|
||||
var DATA_BLOCK_LENGTH, DATA_GRANULARITY, DATA_MASK, INDEX_1_OFFSET, INDEX_2_BLOCK_LENGTH, INDEX_2_BMP_LENGTH, INDEX_2_MASK, INDEX_SHIFT, LSCP_INDEX_2_LENGTH, LSCP_INDEX_2_OFFSET, OMITTED_BMP_INDEX_1_LENGTH, SHIFT_1, SHIFT_1_2, SHIFT_2, UTF8_2B_INDEX_2_LENGTH, UTF8_2B_INDEX_2_OFFSET;
|
||||
|
||||
SHIFT_1 = 6 + 5;
|
||||
|
||||
SHIFT_2 = 5;
|
||||
|
||||
SHIFT_1_2 = SHIFT_1 - SHIFT_2;
|
||||
|
||||
OMITTED_BMP_INDEX_1_LENGTH = 0x10000 >> SHIFT_1;
|
||||
|
||||
INDEX_2_BLOCK_LENGTH = 1 << SHIFT_1_2;
|
||||
|
||||
INDEX_2_MASK = INDEX_2_BLOCK_LENGTH - 1;
|
||||
|
||||
INDEX_SHIFT = 2;
|
||||
|
||||
DATA_BLOCK_LENGTH = 1 << SHIFT_2;
|
||||
|
||||
DATA_MASK = DATA_BLOCK_LENGTH - 1;
|
||||
|
||||
LSCP_INDEX_2_OFFSET = 0x10000 >> SHIFT_2;
|
||||
|
||||
LSCP_INDEX_2_LENGTH = 0x400 >> SHIFT_2;
|
||||
|
||||
INDEX_2_BMP_LENGTH = LSCP_INDEX_2_OFFSET + LSCP_INDEX_2_LENGTH;
|
||||
|
||||
UTF8_2B_INDEX_2_OFFSET = INDEX_2_BMP_LENGTH;
|
||||
|
||||
UTF8_2B_INDEX_2_LENGTH = 0x800 >> 6;
|
||||
|
||||
INDEX_1_OFFSET = UTF8_2B_INDEX_2_OFFSET + UTF8_2B_INDEX_2_LENGTH;
|
||||
|
||||
DATA_GRANULARITY = 1 << INDEX_SHIFT;
|
||||
|
||||
function UnicodeTrie(data) {
|
||||
var isBuffer, uncompressedLength, view;
|
||||
isBuffer = typeof data.readUInt32BE === 'function' && typeof data.slice === 'function';
|
||||
if (isBuffer || data instanceof Uint8Array) {
|
||||
if (isBuffer) {
|
||||
this.highStart = data.readUInt32BE(0);
|
||||
this.errorValue = data.readUInt32BE(4);
|
||||
uncompressedLength = data.readUInt32BE(8);
|
||||
data = data.slice(12);
|
||||
} else {
|
||||
view = new DataView(data.buffer);
|
||||
this.highStart = view.getUint32(0);
|
||||
this.errorValue = view.getUint32(4);
|
||||
uncompressedLength = view.getUint32(8);
|
||||
data = data.subarray(12);
|
||||
}
|
||||
data = inflate(data, new Uint8Array(uncompressedLength));
|
||||
data = inflate(data, new Uint8Array(uncompressedLength));
|
||||
this.data = new Uint32Array(data.buffer);
|
||||
} else {
|
||||
this.data = data.data, this.highStart = data.highStart, this.errorValue = data.errorValue;
|
||||
}
|
||||
}
|
||||
|
||||
UnicodeTrie.prototype.get = function(codePoint) {
|
||||
var index;
|
||||
if (codePoint < 0 || codePoint > 0x10ffff) {
|
||||
return this.errorValue;
|
||||
}
|
||||
if (codePoint < 0xd800 || (codePoint > 0xdbff && codePoint <= 0xffff)) {
|
||||
index = (this.data[codePoint >> SHIFT_2] << INDEX_SHIFT) + (codePoint & DATA_MASK);
|
||||
return this.data[index];
|
||||
}
|
||||
if (codePoint <= 0xffff) {
|
||||
index = (this.data[LSCP_INDEX_2_OFFSET + ((codePoint - 0xd800) >> SHIFT_2)] << INDEX_SHIFT) + (codePoint & DATA_MASK);
|
||||
return this.data[index];
|
||||
}
|
||||
if (codePoint < this.highStart) {
|
||||
index = this.data[(INDEX_1_OFFSET - OMITTED_BMP_INDEX_1_LENGTH) + (codePoint >> SHIFT_1)];
|
||||
index = this.data[index + ((codePoint >> SHIFT_2) & INDEX_2_MASK)];
|
||||
index = (index << INDEX_SHIFT) + (codePoint & DATA_MASK);
|
||||
return this.data[index];
|
||||
}
|
||||
return this.data[this.data.length - DATA_GRANULARITY];
|
||||
};
|
||||
|
||||
return UnicodeTrie;
|
||||
|
||||
})();
|
||||
|
||||
module.exports = UnicodeTrie;
|
||||
90
BACK_BACK/node_modules/unicode-trie/node_modules/pako/CHANGELOG.md
generated
vendored
Executable file
90
BACK_BACK/node_modules/unicode-trie/node_modules/pako/CHANGELOG.md
generated
vendored
Executable file
|
|
@ -0,0 +1,90 @@
|
|||
1.0.2 / 2016-07-21
|
||||
------------------
|
||||
|
||||
- Fixed nasty bug in deflate (wrong `d_buf` offset), which could cause
|
||||
broken data in some rare cases.
|
||||
- Also released as 0.2.9 to give chance to old dependents, not updated to 1.x
|
||||
version.
|
||||
|
||||
|
||||
1.0.1 / 2016-04-01
|
||||
------------------
|
||||
|
||||
- Added dictionary support. Thanks to @dignifiedquire.
|
||||
|
||||
|
||||
1.0.0 / 2016-02-17
|
||||
------------------
|
||||
|
||||
- Maintenance release (semver, coding style).
|
||||
|
||||
|
||||
0.2.8 / 2015-09-14
|
||||
------------------
|
||||
|
||||
- Fixed regression after 0.2.4 for edge conditions in inflate wrapper (#65).
|
||||
Added more tests to cover possible cases.
|
||||
|
||||
|
||||
0.2.7 / 2015-06-09
|
||||
------------------
|
||||
|
||||
- Added Z_SYNC_FLUSH support. Thanks to @TinoLange.
|
||||
|
||||
|
||||
0.2.6 / 2015-03-24
|
||||
------------------
|
||||
|
||||
- Allow ArrayBuffer input.
|
||||
|
||||
|
||||
0.2.5 / 2014-07-19
|
||||
------------------
|
||||
|
||||
- Workaround for Chrome 38.0.2096.0 script parser bug, #30.
|
||||
|
||||
|
||||
0.2.4 / 2014-07-07
|
||||
------------------
|
||||
|
||||
- Fixed bug in inflate wrapper, #29
|
||||
|
||||
|
||||
0.2.3 / 2014-06-09
|
||||
------------------
|
||||
|
||||
- Maintenance release, dependencies update.
|
||||
|
||||
|
||||
0.2.2 / 2014-06-04
|
||||
------------------
|
||||
|
||||
- Fixed iOS 5.1 Safary issue with `apply(typed_array)`, #26.
|
||||
|
||||
|
||||
0.2.1 / 2014-05-01
|
||||
------------------
|
||||
|
||||
- Fixed collision on switch dynamic/fixed tables.
|
||||
|
||||
|
||||
0.2.0 / 2014-04-18
|
||||
------------------
|
||||
|
||||
- Added custom gzip headers support.
|
||||
- Added strings support.
|
||||
- Improved memory allocations for small chunks.
|
||||
- ZStream properties rename/cleanup.
|
||||
- More coverage tests.
|
||||
|
||||
|
||||
0.1.1 / 2014-03-20
|
||||
------------------
|
||||
|
||||
- Bugfixes for inflate/deflate.
|
||||
|
||||
|
||||
0.1.0 / 2014-03-15
|
||||
------------------
|
||||
|
||||
- First release.
|
||||
21
BACK_BACK/node_modules/unicode-trie/node_modules/pako/LICENSE
generated
vendored
Executable file
21
BACK_BACK/node_modules/unicode-trie/node_modules/pako/LICENSE
generated
vendored
Executable file
|
|
@ -0,0 +1,21 @@
|
|||
(The MIT License)
|
||||
|
||||
Copyright (C) 2014-2016 by Vitaly Puzrin
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
176
BACK_BACK/node_modules/unicode-trie/node_modules/pako/README.md
generated
vendored
Executable file
176
BACK_BACK/node_modules/unicode-trie/node_modules/pako/README.md
generated
vendored
Executable file
|
|
@ -0,0 +1,176 @@
|
|||
pako - zlib port to javascript, very fast!
|
||||
==========================================
|
||||
|
||||
[](https://travis-ci.org/nodeca/pako)
|
||||
[](https://www.npmjs.org/package/pako)
|
||||
|
||||
__Why pako is cool:__
|
||||
|
||||
- Almost as fast in modern JS engines as C implementation (see benchmarks).
|
||||
- Works in browsers, you can browserify any separate component.
|
||||
- Chunking support for big blobs.
|
||||
- Results are binary equal to well known [zlib](http://www.zlib.net/) (now v1.2.8 ported).
|
||||
|
||||
This project was done to understand how fast JS can be and is it necessary to
|
||||
develop native C modules for CPU-intensive tasks. Enjoy the result!
|
||||
|
||||
|
||||
__Famous projects, using pako:__
|
||||
|
||||
- [browserify](http://browserify.org/) (via [browserify-zlib](https://github.com/devongovett/browserify-zlib))
|
||||
- [JSZip](http://stuk.github.io/jszip/)
|
||||
- [mincer](https://github.com/nodeca/mincer)
|
||||
- [JS-Git](https://github.com/creationix/js-git) and
|
||||
[Tedit](https://chrome.google.com/webstore/detail/tedit-development-environ/ooekdijbnbbjdfjocaiflnjgoohnblgf)
|
||||
by [@creatronix](https://github.com/creationix)
|
||||
|
||||
|
||||
__Benchmarks:__
|
||||
|
||||
```
|
||||
node v0.10.26, 1mb sample:
|
||||
|
||||
deflate-dankogai x 4.73 ops/sec ±0.82% (15 runs sampled)
|
||||
deflate-gildas x 4.58 ops/sec ±2.33% (15 runs sampled)
|
||||
deflate-imaya x 3.22 ops/sec ±3.95% (12 runs sampled)
|
||||
! deflate-pako x 6.99 ops/sec ±0.51% (21 runs sampled)
|
||||
deflate-pako-string x 5.89 ops/sec ±0.77% (18 runs sampled)
|
||||
deflate-pako-untyped x 4.39 ops/sec ±1.58% (14 runs sampled)
|
||||
* deflate-zlib x 14.71 ops/sec ±4.23% (59 runs sampled)
|
||||
inflate-dankogai x 32.16 ops/sec ±0.13% (56 runs sampled)
|
||||
inflate-imaya x 30.35 ops/sec ±0.92% (53 runs sampled)
|
||||
! inflate-pako x 69.89 ops/sec ±1.46% (71 runs sampled)
|
||||
inflate-pako-string x 19.22 ops/sec ±1.86% (49 runs sampled)
|
||||
inflate-pako-untyped x 17.19 ops/sec ±0.85% (32 runs sampled)
|
||||
* inflate-zlib x 70.03 ops/sec ±1.64% (81 runs sampled)
|
||||
|
||||
node v0.11.12, 1mb sample:
|
||||
|
||||
deflate-dankogai x 5.60 ops/sec ±0.49% (17 runs sampled)
|
||||
deflate-gildas x 5.06 ops/sec ±6.00% (16 runs sampled)
|
||||
deflate-imaya x 3.52 ops/sec ±3.71% (13 runs sampled)
|
||||
! deflate-pako x 11.52 ops/sec ±0.22% (32 runs sampled)
|
||||
deflate-pako-string x 9.53 ops/sec ±1.12% (27 runs sampled)
|
||||
deflate-pako-untyped x 5.44 ops/sec ±0.72% (17 runs sampled)
|
||||
* deflate-zlib x 14.05 ops/sec ±3.34% (63 runs sampled)
|
||||
inflate-dankogai x 42.19 ops/sec ±0.09% (56 runs sampled)
|
||||
inflate-imaya x 79.68 ops/sec ±1.07% (68 runs sampled)
|
||||
! inflate-pako x 97.52 ops/sec ±0.83% (80 runs sampled)
|
||||
inflate-pako-string x 45.19 ops/sec ±1.69% (57 runs sampled)
|
||||
inflate-pako-untyped x 24.35 ops/sec ±2.59% (40 runs sampled)
|
||||
* inflate-zlib x 60.32 ops/sec ±1.36% (69 runs sampled)
|
||||
```
|
||||
|
||||
zlib's test is partialy afferted by marshling (that make sense for inflate only).
|
||||
You can change deflate level to 0 in benchmark source, to investigate details.
|
||||
For deflate level 6 results can be considered as correct.
|
||||
|
||||
__Install:__
|
||||
|
||||
node.js:
|
||||
|
||||
```
|
||||
npm install pako
|
||||
```
|
||||
|
||||
browser:
|
||||
|
||||
```
|
||||
bower install pako
|
||||
```
|
||||
|
||||
|
||||
Example & API
|
||||
-------------
|
||||
|
||||
Full docs - http://nodeca.github.io/pako/
|
||||
|
||||
```javascript
|
||||
var pako = require('pako');
|
||||
|
||||
// Deflate
|
||||
//
|
||||
var input = new Uint8Array();
|
||||
//... fill input data here
|
||||
var output = pako.deflate(input);
|
||||
|
||||
// Inflate (simple wrapper can throw exception on broken stream)
|
||||
//
|
||||
var compressed = new Uint8Array();
|
||||
//... fill data to uncompress here
|
||||
try {
|
||||
var result = pako.inflate(compressed);
|
||||
} catch (err) {
|
||||
console.log(err);
|
||||
}
|
||||
|
||||
//
|
||||
// Alternate interface for chunking & without exceptions
|
||||
//
|
||||
|
||||
var inflator = new pako.Inflate();
|
||||
|
||||
inflator.push(chunk1, false);
|
||||
inflator.push(chunk2, false);
|
||||
...
|
||||
inflator.push(chunkN, true); // true -> last chunk
|
||||
|
||||
if (inflator.err) {
|
||||
console.log(inflator.msg);
|
||||
}
|
||||
|
||||
var output = inflator.result;
|
||||
|
||||
```
|
||||
|
||||
Sometime you can wish to work with strings. For example, to send
|
||||
big objects as json to server. Pako detects input data type. You can
|
||||
force output to be string with option `{ to: 'string' }`.
|
||||
|
||||
```javascript
|
||||
var pako = require('pako');
|
||||
|
||||
var test = { my: 'super', puper: [456, 567], awesome: 'pako' };
|
||||
|
||||
var binaryString = pako.deflate(JSON.stringify(test), { to: 'string' });
|
||||
|
||||
//
|
||||
// Here you can do base64 encode, make xhr requests and so on.
|
||||
//
|
||||
|
||||
var restored = JSON.parse(pako.inflate(binaryString, { to: 'string' }));
|
||||
```
|
||||
|
||||
|
||||
Notes
|
||||
-----
|
||||
|
||||
Pako does not contain some specific zlib functions:
|
||||
|
||||
- __deflate__ - methods `deflateCopy`, `deflateBound`, `deflateParams`,
|
||||
`deflatePending`, `deflatePrime`, `deflateTune`.
|
||||
- __inflate__ - methods `inflateCopy`, `inflateMark`,
|
||||
`inflatePrime`, `inflateGetDictionary`, `inflateSync`, `inflateSyncPoint`, `inflateUndermine`.
|
||||
- High level inflate/deflate wrappers (classes) may not support some flush
|
||||
modes. Those should work: Z_NO_FLUSH, Z_FINISH, Z_SYNC_FLUSH.
|
||||
|
||||
|
||||
Authors
|
||||
-------
|
||||
|
||||
- Andrey Tupitsin [@anrd83](https://github.com/andr83)
|
||||
- Vitaly Puzrin [@puzrin](https://github.com/puzrin)
|
||||
|
||||
Personal thanks to:
|
||||
|
||||
- Vyacheslav Egorov ([@mraleph](https://github.com/mraleph)) for his awesome
|
||||
tutorials about optimising JS code for v8, [IRHydra](http://mrale.ph/irhydra/)
|
||||
tool and his advices.
|
||||
- David Duponchel ([@dduponchel](https://github.com/dduponchel)) for help with
|
||||
testing.
|
||||
|
||||
|
||||
License
|
||||
-------
|
||||
|
||||
MIT
|
||||
6606
BACK_BACK/node_modules/unicode-trie/node_modules/pako/dist/pako.js
generated
vendored
Executable file
6606
BACK_BACK/node_modules/unicode-trie/node_modules/pako/dist/pako.js
generated
vendored
Executable file
File diff suppressed because it is too large
Load diff
3
BACK_BACK/node_modules/unicode-trie/node_modules/pako/dist/pako.min.js
generated
vendored
Executable file
3
BACK_BACK/node_modules/unicode-trie/node_modules/pako/dist/pako.min.js
generated
vendored
Executable file
File diff suppressed because one or more lines are too long
3879
BACK_BACK/node_modules/unicode-trie/node_modules/pako/dist/pako_deflate.js
generated
vendored
Executable file
3879
BACK_BACK/node_modules/unicode-trie/node_modules/pako/dist/pako_deflate.js
generated
vendored
Executable file
File diff suppressed because it is too large
Load diff
2
BACK_BACK/node_modules/unicode-trie/node_modules/pako/dist/pako_deflate.min.js
generated
vendored
Executable file
2
BACK_BACK/node_modules/unicode-trie/node_modules/pako/dist/pako_deflate.min.js
generated
vendored
Executable file
File diff suppressed because one or more lines are too long
3127
BACK_BACK/node_modules/unicode-trie/node_modules/pako/dist/pako_inflate.js
generated
vendored
Executable file
3127
BACK_BACK/node_modules/unicode-trie/node_modules/pako/dist/pako_inflate.js
generated
vendored
Executable file
File diff suppressed because it is too large
Load diff
2
BACK_BACK/node_modules/unicode-trie/node_modules/pako/dist/pako_inflate.min.js
generated
vendored
Executable file
2
BACK_BACK/node_modules/unicode-trie/node_modules/pako/dist/pako_inflate.min.js
generated
vendored
Executable file
File diff suppressed because one or more lines are too long
14
BACK_BACK/node_modules/unicode-trie/node_modules/pako/index.js
generated
vendored
Executable file
14
BACK_BACK/node_modules/unicode-trie/node_modules/pako/index.js
generated
vendored
Executable file
|
|
@ -0,0 +1,14 @@
|
|||
// Top level file is just a mixin of submodules & constants
|
||||
'use strict';
|
||||
|
||||
var assign = require('./lib/utils/common').assign;
|
||||
|
||||
var deflate = require('./lib/deflate');
|
||||
var inflate = require('./lib/inflate');
|
||||
var constants = require('./lib/zlib/constants');
|
||||
|
||||
var pako = {};
|
||||
|
||||
assign(pako, deflate, inflate, constants);
|
||||
|
||||
module.exports = pako;
|
||||
400
BACK_BACK/node_modules/unicode-trie/node_modules/pako/lib/deflate.js
generated
vendored
Executable file
400
BACK_BACK/node_modules/unicode-trie/node_modules/pako/lib/deflate.js
generated
vendored
Executable file
|
|
@ -0,0 +1,400 @@
|
|||
'use strict';
|
||||
|
||||
|
||||
var zlib_deflate = require('./zlib/deflate');
|
||||
var utils = require('./utils/common');
|
||||
var strings = require('./utils/strings');
|
||||
var msg = require('./zlib/messages');
|
||||
var ZStream = require('./zlib/zstream');
|
||||
|
||||
var toString = Object.prototype.toString;
|
||||
|
||||
/* Public constants ==========================================================*/
|
||||
/* ===========================================================================*/
|
||||
|
||||
var Z_NO_FLUSH = 0;
|
||||
var Z_FINISH = 4;
|
||||
|
||||
var Z_OK = 0;
|
||||
var Z_STREAM_END = 1;
|
||||
var Z_SYNC_FLUSH = 2;
|
||||
|
||||
var Z_DEFAULT_COMPRESSION = -1;
|
||||
|
||||
var Z_DEFAULT_STRATEGY = 0;
|
||||
|
||||
var Z_DEFLATED = 8;
|
||||
|
||||
/* ===========================================================================*/
|
||||
|
||||
|
||||
/**
|
||||
* class Deflate
|
||||
*
|
||||
* Generic JS-style wrapper for zlib calls. If you don't need
|
||||
* streaming behaviour - use more simple functions: [[deflate]],
|
||||
* [[deflateRaw]] and [[gzip]].
|
||||
**/
|
||||
|
||||
/* internal
|
||||
* Deflate.chunks -> Array
|
||||
*
|
||||
* Chunks of output data, if [[Deflate#onData]] not overriden.
|
||||
**/
|
||||
|
||||
/**
|
||||
* Deflate.result -> Uint8Array|Array
|
||||
*
|
||||
* Compressed result, generated by default [[Deflate#onData]]
|
||||
* and [[Deflate#onEnd]] handlers. Filled after you push last chunk
|
||||
* (call [[Deflate#push]] with `Z_FINISH` / `true` param) or if you
|
||||
* push a chunk with explicit flush (call [[Deflate#push]] with
|
||||
* `Z_SYNC_FLUSH` param).
|
||||
**/
|
||||
|
||||
/**
|
||||
* Deflate.err -> Number
|
||||
*
|
||||
* Error code after deflate finished. 0 (Z_OK) on success.
|
||||
* You will not need it in real life, because deflate errors
|
||||
* are possible only on wrong options or bad `onData` / `onEnd`
|
||||
* custom handlers.
|
||||
**/
|
||||
|
||||
/**
|
||||
* Deflate.msg -> String
|
||||
*
|
||||
* Error message, if [[Deflate.err]] != 0
|
||||
**/
|
||||
|
||||
|
||||
/**
|
||||
* new Deflate(options)
|
||||
* - options (Object): zlib deflate options.
|
||||
*
|
||||
* Creates new deflator instance with specified params. Throws exception
|
||||
* on bad params. Supported options:
|
||||
*
|
||||
* - `level`
|
||||
* - `windowBits`
|
||||
* - `memLevel`
|
||||
* - `strategy`
|
||||
* - `dictionary`
|
||||
*
|
||||
* [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced)
|
||||
* for more information on these.
|
||||
*
|
||||
* Additional options, for internal needs:
|
||||
*
|
||||
* - `chunkSize` - size of generated data chunks (16K by default)
|
||||
* - `raw` (Boolean) - do raw deflate
|
||||
* - `gzip` (Boolean) - create gzip wrapper
|
||||
* - `to` (String) - if equal to 'string', then result will be "binary string"
|
||||
* (each char code [0..255])
|
||||
* - `header` (Object) - custom header for gzip
|
||||
* - `text` (Boolean) - true if compressed data believed to be text
|
||||
* - `time` (Number) - modification time, unix timestamp
|
||||
* - `os` (Number) - operation system code
|
||||
* - `extra` (Array) - array of bytes with extra data (max 65536)
|
||||
* - `name` (String) - file name (binary string)
|
||||
* - `comment` (String) - comment (binary string)
|
||||
* - `hcrc` (Boolean) - true if header crc should be added
|
||||
*
|
||||
* ##### Example:
|
||||
*
|
||||
* ```javascript
|
||||
* var pako = require('pako')
|
||||
* , chunk1 = Uint8Array([1,2,3,4,5,6,7,8,9])
|
||||
* , chunk2 = Uint8Array([10,11,12,13,14,15,16,17,18,19]);
|
||||
*
|
||||
* var deflate = new pako.Deflate({ level: 3});
|
||||
*
|
||||
* deflate.push(chunk1, false);
|
||||
* deflate.push(chunk2, true); // true -> last chunk
|
||||
*
|
||||
* if (deflate.err) { throw new Error(deflate.err); }
|
||||
*
|
||||
* console.log(deflate.result);
|
||||
* ```
|
||||
**/
|
||||
function Deflate(options) {
|
||||
if (!(this instanceof Deflate)) return new Deflate(options);
|
||||
|
||||
this.options = utils.assign({
|
||||
level: Z_DEFAULT_COMPRESSION,
|
||||
method: Z_DEFLATED,
|
||||
chunkSize: 16384,
|
||||
windowBits: 15,
|
||||
memLevel: 8,
|
||||
strategy: Z_DEFAULT_STRATEGY,
|
||||
to: ''
|
||||
}, options || {});
|
||||
|
||||
var opt = this.options;
|
||||
|
||||
if (opt.raw && (opt.windowBits > 0)) {
|
||||
opt.windowBits = -opt.windowBits;
|
||||
}
|
||||
|
||||
else if (opt.gzip && (opt.windowBits > 0) && (opt.windowBits < 16)) {
|
||||
opt.windowBits += 16;
|
||||
}
|
||||
|
||||
this.err = 0; // error code, if happens (0 = Z_OK)
|
||||
this.msg = ''; // error message
|
||||
this.ended = false; // used to avoid multiple onEnd() calls
|
||||
this.chunks = []; // chunks of compressed data
|
||||
|
||||
this.strm = new ZStream();
|
||||
this.strm.avail_out = 0;
|
||||
|
||||
var status = zlib_deflate.deflateInit2(
|
||||
this.strm,
|
||||
opt.level,
|
||||
opt.method,
|
||||
opt.windowBits,
|
||||
opt.memLevel,
|
||||
opt.strategy
|
||||
);
|
||||
|
||||
if (status !== Z_OK) {
|
||||
throw new Error(msg[status]);
|
||||
}
|
||||
|
||||
if (opt.header) {
|
||||
zlib_deflate.deflateSetHeader(this.strm, opt.header);
|
||||
}
|
||||
|
||||
if (opt.dictionary) {
|
||||
var dict;
|
||||
// Convert data if needed
|
||||
if (typeof opt.dictionary === 'string') {
|
||||
// If we need to compress text, change encoding to utf8.
|
||||
dict = strings.string2buf(opt.dictionary);
|
||||
} else if (toString.call(opt.dictionary) === '[object ArrayBuffer]') {
|
||||
dict = new Uint8Array(opt.dictionary);
|
||||
} else {
|
||||
dict = opt.dictionary;
|
||||
}
|
||||
|
||||
status = zlib_deflate.deflateSetDictionary(this.strm, dict);
|
||||
|
||||
if (status !== Z_OK) {
|
||||
throw new Error(msg[status]);
|
||||
}
|
||||
|
||||
this._dict_set = true;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Deflate#push(data[, mode]) -> Boolean
|
||||
* - data (Uint8Array|Array|ArrayBuffer|String): input data. Strings will be
|
||||
* converted to utf8 byte sequence.
|
||||
* - mode (Number|Boolean): 0..6 for corresponding Z_NO_FLUSH..Z_TREE modes.
|
||||
* See constants. Skipped or `false` means Z_NO_FLUSH, `true` meansh Z_FINISH.
|
||||
*
|
||||
* Sends input data to deflate pipe, generating [[Deflate#onData]] calls with
|
||||
* new compressed chunks. Returns `true` on success. The last data block must have
|
||||
* mode Z_FINISH (or `true`). That will flush internal pending buffers and call
|
||||
* [[Deflate#onEnd]]. For interim explicit flushes (without ending the stream) you
|
||||
* can use mode Z_SYNC_FLUSH, keeping the compression context.
|
||||
*
|
||||
* On fail call [[Deflate#onEnd]] with error code and return false.
|
||||
*
|
||||
* We strongly recommend to use `Uint8Array` on input for best speed (output
|
||||
* array format is detected automatically). Also, don't skip last param and always
|
||||
* use the same type in your code (boolean or number). That will improve JS speed.
|
||||
*
|
||||
* For regular `Array`-s make sure all elements are [0..255].
|
||||
*
|
||||
* ##### Example
|
||||
*
|
||||
* ```javascript
|
||||
* push(chunk, false); // push one of data chunks
|
||||
* ...
|
||||
* push(chunk, true); // push last chunk
|
||||
* ```
|
||||
**/
|
||||
Deflate.prototype.push = function (data, mode) {
|
||||
var strm = this.strm;
|
||||
var chunkSize = this.options.chunkSize;
|
||||
var status, _mode;
|
||||
|
||||
if (this.ended) { return false; }
|
||||
|
||||
_mode = (mode === ~~mode) ? mode : ((mode === true) ? Z_FINISH : Z_NO_FLUSH);
|
||||
|
||||
// Convert data if needed
|
||||
if (typeof data === 'string') {
|
||||
// If we need to compress text, change encoding to utf8.
|
||||
strm.input = strings.string2buf(data);
|
||||
} else if (toString.call(data) === '[object ArrayBuffer]') {
|
||||
strm.input = new Uint8Array(data);
|
||||
} else {
|
||||
strm.input = data;
|
||||
}
|
||||
|
||||
strm.next_in = 0;
|
||||
strm.avail_in = strm.input.length;
|
||||
|
||||
do {
|
||||
if (strm.avail_out === 0) {
|
||||
strm.output = new utils.Buf8(chunkSize);
|
||||
strm.next_out = 0;
|
||||
strm.avail_out = chunkSize;
|
||||
}
|
||||
status = zlib_deflate.deflate(strm, _mode); /* no bad return value */
|
||||
|
||||
if (status !== Z_STREAM_END && status !== Z_OK) {
|
||||
this.onEnd(status);
|
||||
this.ended = true;
|
||||
return false;
|
||||
}
|
||||
if (strm.avail_out === 0 || (strm.avail_in === 0 && (_mode === Z_FINISH || _mode === Z_SYNC_FLUSH))) {
|
||||
if (this.options.to === 'string') {
|
||||
this.onData(strings.buf2binstring(utils.shrinkBuf(strm.output, strm.next_out)));
|
||||
} else {
|
||||
this.onData(utils.shrinkBuf(strm.output, strm.next_out));
|
||||
}
|
||||
}
|
||||
} while ((strm.avail_in > 0 || strm.avail_out === 0) && status !== Z_STREAM_END);
|
||||
|
||||
// Finalize on the last chunk.
|
||||
if (_mode === Z_FINISH) {
|
||||
status = zlib_deflate.deflateEnd(this.strm);
|
||||
this.onEnd(status);
|
||||
this.ended = true;
|
||||
return status === Z_OK;
|
||||
}
|
||||
|
||||
// callback interim results if Z_SYNC_FLUSH.
|
||||
if (_mode === Z_SYNC_FLUSH) {
|
||||
this.onEnd(Z_OK);
|
||||
strm.avail_out = 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
return true;
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Deflate#onData(chunk) -> Void
|
||||
* - chunk (Uint8Array|Array|String): ouput data. Type of array depends
|
||||
* on js engine support. When string output requested, each chunk
|
||||
* will be string.
|
||||
*
|
||||
* By default, stores data blocks in `chunks[]` property and glue
|
||||
* those in `onEnd`. Override this handler, if you need another behaviour.
|
||||
**/
|
||||
Deflate.prototype.onData = function (chunk) {
|
||||
this.chunks.push(chunk);
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Deflate#onEnd(status) -> Void
|
||||
* - status (Number): deflate status. 0 (Z_OK) on success,
|
||||
* other if not.
|
||||
*
|
||||
* Called once after you tell deflate that the input stream is
|
||||
* complete (Z_FINISH) or should be flushed (Z_SYNC_FLUSH)
|
||||
* or if an error happened. By default - join collected chunks,
|
||||
* free memory and fill `results` / `err` properties.
|
||||
**/
|
||||
Deflate.prototype.onEnd = function (status) {
|
||||
// On success - join
|
||||
if (status === Z_OK) {
|
||||
if (this.options.to === 'string') {
|
||||
this.result = this.chunks.join('');
|
||||
} else {
|
||||
this.result = utils.flattenChunks(this.chunks);
|
||||
}
|
||||
}
|
||||
this.chunks = [];
|
||||
this.err = status;
|
||||
this.msg = this.strm.msg;
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* deflate(data[, options]) -> Uint8Array|Array|String
|
||||
* - data (Uint8Array|Array|String): input data to compress.
|
||||
* - options (Object): zlib deflate options.
|
||||
*
|
||||
* Compress `data` with deflate algorithm and `options`.
|
||||
*
|
||||
* Supported options are:
|
||||
*
|
||||
* - level
|
||||
* - windowBits
|
||||
* - memLevel
|
||||
* - strategy
|
||||
* - dictionary
|
||||
*
|
||||
* [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced)
|
||||
* for more information on these.
|
||||
*
|
||||
* Sugar (options):
|
||||
*
|
||||
* - `raw` (Boolean) - say that we work with raw stream, if you don't wish to specify
|
||||
* negative windowBits implicitly.
|
||||
* - `to` (String) - if equal to 'string', then result will be "binary string"
|
||||
* (each char code [0..255])
|
||||
*
|
||||
* ##### Example:
|
||||
*
|
||||
* ```javascript
|
||||
* var pako = require('pako')
|
||||
* , data = Uint8Array([1,2,3,4,5,6,7,8,9]);
|
||||
*
|
||||
* console.log(pako.deflate(data));
|
||||
* ```
|
||||
**/
|
||||
function deflate(input, options) {
|
||||
var deflator = new Deflate(options);
|
||||
|
||||
deflator.push(input, true);
|
||||
|
||||
// That will never happens, if you don't cheat with options :)
|
||||
if (deflator.err) { throw deflator.msg; }
|
||||
|
||||
return deflator.result;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* deflateRaw(data[, options]) -> Uint8Array|Array|String
|
||||
* - data (Uint8Array|Array|String): input data to compress.
|
||||
* - options (Object): zlib deflate options.
|
||||
*
|
||||
* The same as [[deflate]], but creates raw data, without wrapper
|
||||
* (header and adler32 crc).
|
||||
**/
|
||||
function deflateRaw(input, options) {
|
||||
options = options || {};
|
||||
options.raw = true;
|
||||
return deflate(input, options);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* gzip(data[, options]) -> Uint8Array|Array|String
|
||||
* - data (Uint8Array|Array|String): input data to compress.
|
||||
* - options (Object): zlib deflate options.
|
||||
*
|
||||
* The same as [[deflate]], but create gzip wrapper instead of
|
||||
* deflate one.
|
||||
**/
|
||||
function gzip(input, options) {
|
||||
options = options || {};
|
||||
options.gzip = true;
|
||||
return deflate(input, options);
|
||||
}
|
||||
|
||||
|
||||
exports.Deflate = Deflate;
|
||||
exports.deflate = deflate;
|
||||
exports.deflateRaw = deflateRaw;
|
||||
exports.gzip = gzip;
|
||||
418
BACK_BACK/node_modules/unicode-trie/node_modules/pako/lib/inflate.js
generated
vendored
Executable file
418
BACK_BACK/node_modules/unicode-trie/node_modules/pako/lib/inflate.js
generated
vendored
Executable file
|
|
@ -0,0 +1,418 @@
|
|||
'use strict';
|
||||
|
||||
|
||||
var zlib_inflate = require('./zlib/inflate');
|
||||
var utils = require('./utils/common');
|
||||
var strings = require('./utils/strings');
|
||||
var c = require('./zlib/constants');
|
||||
var msg = require('./zlib/messages');
|
||||
var ZStream = require('./zlib/zstream');
|
||||
var GZheader = require('./zlib/gzheader');
|
||||
|
||||
var toString = Object.prototype.toString;
|
||||
|
||||
/**
|
||||
* class Inflate
|
||||
*
|
||||
* Generic JS-style wrapper for zlib calls. If you don't need
|
||||
* streaming behaviour - use more simple functions: [[inflate]]
|
||||
* and [[inflateRaw]].
|
||||
**/
|
||||
|
||||
/* internal
|
||||
* inflate.chunks -> Array
|
||||
*
|
||||
* Chunks of output data, if [[Inflate#onData]] not overriden.
|
||||
**/
|
||||
|
||||
/**
|
||||
* Inflate.result -> Uint8Array|Array|String
|
||||
*
|
||||
* Uncompressed result, generated by default [[Inflate#onData]]
|
||||
* and [[Inflate#onEnd]] handlers. Filled after you push last chunk
|
||||
* (call [[Inflate#push]] with `Z_FINISH` / `true` param) or if you
|
||||
* push a chunk with explicit flush (call [[Inflate#push]] with
|
||||
* `Z_SYNC_FLUSH` param).
|
||||
**/
|
||||
|
||||
/**
|
||||
* Inflate.err -> Number
|
||||
*
|
||||
* Error code after inflate finished. 0 (Z_OK) on success.
|
||||
* Should be checked if broken data possible.
|
||||
**/
|
||||
|
||||
/**
|
||||
* Inflate.msg -> String
|
||||
*
|
||||
* Error message, if [[Inflate.err]] != 0
|
||||
**/
|
||||
|
||||
|
||||
/**
|
||||
* new Inflate(options)
|
||||
* - options (Object): zlib inflate options.
|
||||
*
|
||||
* Creates new inflator instance with specified params. Throws exception
|
||||
* on bad params. Supported options:
|
||||
*
|
||||
* - `windowBits`
|
||||
* - `dictionary`
|
||||
*
|
||||
* [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced)
|
||||
* for more information on these.
|
||||
*
|
||||
* Additional options, for internal needs:
|
||||
*
|
||||
* - `chunkSize` - size of generated data chunks (16K by default)
|
||||
* - `raw` (Boolean) - do raw inflate
|
||||
* - `to` (String) - if equal to 'string', then result will be converted
|
||||
* from utf8 to utf16 (javascript) string. When string output requested,
|
||||
* chunk length can differ from `chunkSize`, depending on content.
|
||||
*
|
||||
* By default, when no options set, autodetect deflate/gzip data format via
|
||||
* wrapper header.
|
||||
*
|
||||
* ##### Example:
|
||||
*
|
||||
* ```javascript
|
||||
* var pako = require('pako')
|
||||
* , chunk1 = Uint8Array([1,2,3,4,5,6,7,8,9])
|
||||
* , chunk2 = Uint8Array([10,11,12,13,14,15,16,17,18,19]);
|
||||
*
|
||||
* var inflate = new pako.Inflate({ level: 3});
|
||||
*
|
||||
* inflate.push(chunk1, false);
|
||||
* inflate.push(chunk2, true); // true -> last chunk
|
||||
*
|
||||
* if (inflate.err) { throw new Error(inflate.err); }
|
||||
*
|
||||
* console.log(inflate.result);
|
||||
* ```
|
||||
**/
|
||||
function Inflate(options) {
|
||||
if (!(this instanceof Inflate)) return new Inflate(options);
|
||||
|
||||
this.options = utils.assign({
|
||||
chunkSize: 16384,
|
||||
windowBits: 0,
|
||||
to: ''
|
||||
}, options || {});
|
||||
|
||||
var opt = this.options;
|
||||
|
||||
// Force window size for `raw` data, if not set directly,
|
||||
// because we have no header for autodetect.
|
||||
if (opt.raw && (opt.windowBits >= 0) && (opt.windowBits < 16)) {
|
||||
opt.windowBits = -opt.windowBits;
|
||||
if (opt.windowBits === 0) { opt.windowBits = -15; }
|
||||
}
|
||||
|
||||
// If `windowBits` not defined (and mode not raw) - set autodetect flag for gzip/deflate
|
||||
if ((opt.windowBits >= 0) && (opt.windowBits < 16) &&
|
||||
!(options && options.windowBits)) {
|
||||
opt.windowBits += 32;
|
||||
}
|
||||
|
||||
// Gzip header has no info about windows size, we can do autodetect only
|
||||
// for deflate. So, if window size not set, force it to max when gzip possible
|
||||
if ((opt.windowBits > 15) && (opt.windowBits < 48)) {
|
||||
// bit 3 (16) -> gzipped data
|
||||
// bit 4 (32) -> autodetect gzip/deflate
|
||||
if ((opt.windowBits & 15) === 0) {
|
||||
opt.windowBits |= 15;
|
||||
}
|
||||
}
|
||||
|
||||
this.err = 0; // error code, if happens (0 = Z_OK)
|
||||
this.msg = ''; // error message
|
||||
this.ended = false; // used to avoid multiple onEnd() calls
|
||||
this.chunks = []; // chunks of compressed data
|
||||
|
||||
this.strm = new ZStream();
|
||||
this.strm.avail_out = 0;
|
||||
|
||||
var status = zlib_inflate.inflateInit2(
|
||||
this.strm,
|
||||
opt.windowBits
|
||||
);
|
||||
|
||||
if (status !== c.Z_OK) {
|
||||
throw new Error(msg[status]);
|
||||
}
|
||||
|
||||
this.header = new GZheader();
|
||||
|
||||
zlib_inflate.inflateGetHeader(this.strm, this.header);
|
||||
}
|
||||
|
||||
/**
|
||||
* Inflate#push(data[, mode]) -> Boolean
|
||||
* - data (Uint8Array|Array|ArrayBuffer|String): input data
|
||||
* - mode (Number|Boolean): 0..6 for corresponding Z_NO_FLUSH..Z_TREE modes.
|
||||
* See constants. Skipped or `false` means Z_NO_FLUSH, `true` meansh Z_FINISH.
|
||||
*
|
||||
* Sends input data to inflate pipe, generating [[Inflate#onData]] calls with
|
||||
* new output chunks. Returns `true` on success. The last data block must have
|
||||
* mode Z_FINISH (or `true`). That will flush internal pending buffers and call
|
||||
* [[Inflate#onEnd]]. For interim explicit flushes (without ending the stream) you
|
||||
* can use mode Z_SYNC_FLUSH, keeping the decompression context.
|
||||
*
|
||||
* On fail call [[Inflate#onEnd]] with error code and return false.
|
||||
*
|
||||
* We strongly recommend to use `Uint8Array` on input for best speed (output
|
||||
* format is detected automatically). Also, don't skip last param and always
|
||||
* use the same type in your code (boolean or number). That will improve JS speed.
|
||||
*
|
||||
* For regular `Array`-s make sure all elements are [0..255].
|
||||
*
|
||||
* ##### Example
|
||||
*
|
||||
* ```javascript
|
||||
* push(chunk, false); // push one of data chunks
|
||||
* ...
|
||||
* push(chunk, true); // push last chunk
|
||||
* ```
|
||||
**/
|
||||
Inflate.prototype.push = function (data, mode) {
|
||||
var strm = this.strm;
|
||||
var chunkSize = this.options.chunkSize;
|
||||
var dictionary = this.options.dictionary;
|
||||
var status, _mode;
|
||||
var next_out_utf8, tail, utf8str;
|
||||
var dict;
|
||||
|
||||
// Flag to properly process Z_BUF_ERROR on testing inflate call
|
||||
// when we check that all output data was flushed.
|
||||
var allowBufError = false;
|
||||
|
||||
if (this.ended) { return false; }
|
||||
_mode = (mode === ~~mode) ? mode : ((mode === true) ? c.Z_FINISH : c.Z_NO_FLUSH);
|
||||
|
||||
// Convert data if needed
|
||||
if (typeof data === 'string') {
|
||||
// Only binary strings can be decompressed on practice
|
||||
strm.input = strings.binstring2buf(data);
|
||||
} else if (toString.call(data) === '[object ArrayBuffer]') {
|
||||
strm.input = new Uint8Array(data);
|
||||
} else {
|
||||
strm.input = data;
|
||||
}
|
||||
|
||||
strm.next_in = 0;
|
||||
strm.avail_in = strm.input.length;
|
||||
|
||||
do {
|
||||
if (strm.avail_out === 0) {
|
||||
strm.output = new utils.Buf8(chunkSize);
|
||||
strm.next_out = 0;
|
||||
strm.avail_out = chunkSize;
|
||||
}
|
||||
|
||||
status = zlib_inflate.inflate(strm, c.Z_NO_FLUSH); /* no bad return value */
|
||||
|
||||
if (status === c.Z_NEED_DICT && dictionary) {
|
||||
// Convert data if needed
|
||||
if (typeof dictionary === 'string') {
|
||||
dict = strings.string2buf(dictionary);
|
||||
} else if (toString.call(dictionary) === '[object ArrayBuffer]') {
|
||||
dict = new Uint8Array(dictionary);
|
||||
} else {
|
||||
dict = dictionary;
|
||||
}
|
||||
|
||||
status = zlib_inflate.inflateSetDictionary(this.strm, dict);
|
||||
|
||||
}
|
||||
|
||||
if (status === c.Z_BUF_ERROR && allowBufError === true) {
|
||||
status = c.Z_OK;
|
||||
allowBufError = false;
|
||||
}
|
||||
|
||||
if (status !== c.Z_STREAM_END && status !== c.Z_OK) {
|
||||
this.onEnd(status);
|
||||
this.ended = true;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (strm.next_out) {
|
||||
if (strm.avail_out === 0 || status === c.Z_STREAM_END || (strm.avail_in === 0 && (_mode === c.Z_FINISH || _mode === c.Z_SYNC_FLUSH))) {
|
||||
|
||||
if (this.options.to === 'string') {
|
||||
|
||||
next_out_utf8 = strings.utf8border(strm.output, strm.next_out);
|
||||
|
||||
tail = strm.next_out - next_out_utf8;
|
||||
utf8str = strings.buf2string(strm.output, next_out_utf8);
|
||||
|
||||
// move tail
|
||||
strm.next_out = tail;
|
||||
strm.avail_out = chunkSize - tail;
|
||||
if (tail) { utils.arraySet(strm.output, strm.output, next_out_utf8, tail, 0); }
|
||||
|
||||
this.onData(utf8str);
|
||||
|
||||
} else {
|
||||
this.onData(utils.shrinkBuf(strm.output, strm.next_out));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// When no more input data, we should check that internal inflate buffers
|
||||
// are flushed. The only way to do it when avail_out = 0 - run one more
|
||||
// inflate pass. But if output data not exists, inflate return Z_BUF_ERROR.
|
||||
// Here we set flag to process this error properly.
|
||||
//
|
||||
// NOTE. Deflate does not return error in this case and does not needs such
|
||||
// logic.
|
||||
if (strm.avail_in === 0 && strm.avail_out === 0) {
|
||||
allowBufError = true;
|
||||
}
|
||||
|
||||
} while ((strm.avail_in > 0 || strm.avail_out === 0) && status !== c.Z_STREAM_END);
|
||||
|
||||
if (status === c.Z_STREAM_END) {
|
||||
_mode = c.Z_FINISH;
|
||||
}
|
||||
|
||||
// Finalize on the last chunk.
|
||||
if (_mode === c.Z_FINISH) {
|
||||
status = zlib_inflate.inflateEnd(this.strm);
|
||||
this.onEnd(status);
|
||||
this.ended = true;
|
||||
return status === c.Z_OK;
|
||||
}
|
||||
|
||||
// callback interim results if Z_SYNC_FLUSH.
|
||||
if (_mode === c.Z_SYNC_FLUSH) {
|
||||
this.onEnd(c.Z_OK);
|
||||
strm.avail_out = 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
return true;
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Inflate#onData(chunk) -> Void
|
||||
* - chunk (Uint8Array|Array|String): ouput data. Type of array depends
|
||||
* on js engine support. When string output requested, each chunk
|
||||
* will be string.
|
||||
*
|
||||
* By default, stores data blocks in `chunks[]` property and glue
|
||||
* those in `onEnd`. Override this handler, if you need another behaviour.
|
||||
**/
|
||||
Inflate.prototype.onData = function (chunk) {
|
||||
this.chunks.push(chunk);
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* Inflate#onEnd(status) -> Void
|
||||
* - status (Number): inflate status. 0 (Z_OK) on success,
|
||||
* other if not.
|
||||
*
|
||||
* Called either after you tell inflate that the input stream is
|
||||
* complete (Z_FINISH) or should be flushed (Z_SYNC_FLUSH)
|
||||
* or if an error happened. By default - join collected chunks,
|
||||
* free memory and fill `results` / `err` properties.
|
||||
**/
|
||||
Inflate.prototype.onEnd = function (status) {
|
||||
// On success - join
|
||||
if (status === c.Z_OK) {
|
||||
if (this.options.to === 'string') {
|
||||
// Glue & convert here, until we teach pako to send
|
||||
// utf8 alligned strings to onData
|
||||
this.result = this.chunks.join('');
|
||||
} else {
|
||||
this.result = utils.flattenChunks(this.chunks);
|
||||
}
|
||||
}
|
||||
this.chunks = [];
|
||||
this.err = status;
|
||||
this.msg = this.strm.msg;
|
||||
};
|
||||
|
||||
|
||||
/**
|
||||
* inflate(data[, options]) -> Uint8Array|Array|String
|
||||
* - data (Uint8Array|Array|String): input data to decompress.
|
||||
* - options (Object): zlib inflate options.
|
||||
*
|
||||
* Decompress `data` with inflate/ungzip and `options`. Autodetect
|
||||
* format via wrapper header by default. That's why we don't provide
|
||||
* separate `ungzip` method.
|
||||
*
|
||||
* Supported options are:
|
||||
*
|
||||
* - windowBits
|
||||
*
|
||||
* [http://zlib.net/manual.html#Advanced](http://zlib.net/manual.html#Advanced)
|
||||
* for more information.
|
||||
*
|
||||
* Sugar (options):
|
||||
*
|
||||
* - `raw` (Boolean) - say that we work with raw stream, if you don't wish to specify
|
||||
* negative windowBits implicitly.
|
||||
* - `to` (String) - if equal to 'string', then result will be converted
|
||||
* from utf8 to utf16 (javascript) string. When string output requested,
|
||||
* chunk length can differ from `chunkSize`, depending on content.
|
||||
*
|
||||
*
|
||||
* ##### Example:
|
||||
*
|
||||
* ```javascript
|
||||
* var pako = require('pako')
|
||||
* , input = pako.deflate([1,2,3,4,5,6,7,8,9])
|
||||
* , output;
|
||||
*
|
||||
* try {
|
||||
* output = pako.inflate(input);
|
||||
* } catch (err)
|
||||
* console.log(err);
|
||||
* }
|
||||
* ```
|
||||
**/
|
||||
function inflate(input, options) {
|
||||
var inflator = new Inflate(options);
|
||||
|
||||
inflator.push(input, true);
|
||||
|
||||
// That will never happens, if you don't cheat with options :)
|
||||
if (inflator.err) { throw inflator.msg; }
|
||||
|
||||
return inflator.result;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* inflateRaw(data[, options]) -> Uint8Array|Array|String
|
||||
* - data (Uint8Array|Array|String): input data to decompress.
|
||||
* - options (Object): zlib inflate options.
|
||||
*
|
||||
* The same as [[inflate]], but creates raw data, without wrapper
|
||||
* (header and adler32 crc).
|
||||
**/
|
||||
function inflateRaw(input, options) {
|
||||
options = options || {};
|
||||
options.raw = true;
|
||||
return inflate(input, options);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* ungzip(data[, options]) -> Uint8Array|Array|String
|
||||
* - data (Uint8Array|Array|String): input data to decompress.
|
||||
* - options (Object): zlib inflate options.
|
||||
*
|
||||
* Just shortcut to [[inflate]], because it autodetects format
|
||||
* by header.content. Done for convenience.
|
||||
**/
|
||||
|
||||
|
||||
exports.Inflate = Inflate;
|
||||
exports.inflate = inflate;
|
||||
exports.inflateRaw = inflateRaw;
|
||||
exports.ungzip = inflate;
|
||||
102
BACK_BACK/node_modules/unicode-trie/node_modules/pako/lib/utils/common.js
generated
vendored
Executable file
102
BACK_BACK/node_modules/unicode-trie/node_modules/pako/lib/utils/common.js
generated
vendored
Executable file
|
|
@ -0,0 +1,102 @@
|
|||
'use strict';
|
||||
|
||||
|
||||
var TYPED_OK = (typeof Uint8Array !== 'undefined') &&
|
||||
(typeof Uint16Array !== 'undefined') &&
|
||||
(typeof Int32Array !== 'undefined');
|
||||
|
||||
|
||||
exports.assign = function (obj /*from1, from2, from3, ...*/) {
|
||||
var sources = Array.prototype.slice.call(arguments, 1);
|
||||
while (sources.length) {
|
||||
var source = sources.shift();
|
||||
if (!source) { continue; }
|
||||
|
||||
if (typeof source !== 'object') {
|
||||
throw new TypeError(source + 'must be non-object');
|
||||
}
|
||||
|
||||
for (var p in source) {
|
||||
if (source.hasOwnProperty(p)) {
|
||||
obj[p] = source[p];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return obj;
|
||||
};
|
||||
|
||||
|
||||
// reduce buffer size, avoiding mem copy
|
||||
exports.shrinkBuf = function (buf, size) {
|
||||
if (buf.length === size) { return buf; }
|
||||
if (buf.subarray) { return buf.subarray(0, size); }
|
||||
buf.length = size;
|
||||
return buf;
|
||||
};
|
||||
|
||||
|
||||
var fnTyped = {
|
||||
arraySet: function (dest, src, src_offs, len, dest_offs) {
|
||||
if (src.subarray && dest.subarray) {
|
||||
dest.set(src.subarray(src_offs, src_offs + len), dest_offs);
|
||||
return;
|
||||
}
|
||||
// Fallback to ordinary array
|
||||
for (var i = 0; i < len; i++) {
|
||||
dest[dest_offs + i] = src[src_offs + i];
|
||||
}
|
||||
},
|
||||
// Join array of chunks to single array.
|
||||
flattenChunks: function (chunks) {
|
||||
var i, l, len, pos, chunk, result;
|
||||
|
||||
// calculate data length
|
||||
len = 0;
|
||||
for (i = 0, l = chunks.length; i < l; i++) {
|
||||
len += chunks[i].length;
|
||||
}
|
||||
|
||||
// join chunks
|
||||
result = new Uint8Array(len);
|
||||
pos = 0;
|
||||
for (i = 0, l = chunks.length; i < l; i++) {
|
||||
chunk = chunks[i];
|
||||
result.set(chunk, pos);
|
||||
pos += chunk.length;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
};
|
||||
|
||||
var fnUntyped = {
|
||||
arraySet: function (dest, src, src_offs, len, dest_offs) {
|
||||
for (var i = 0; i < len; i++) {
|
||||
dest[dest_offs + i] = src[src_offs + i];
|
||||
}
|
||||
},
|
||||
// Join array of chunks to single array.
|
||||
flattenChunks: function (chunks) {
|
||||
return [].concat.apply([], chunks);
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
// Enable/Disable typed arrays use, for testing
|
||||
//
|
||||
exports.setTyped = function (on) {
|
||||
if (on) {
|
||||
exports.Buf8 = Uint8Array;
|
||||
exports.Buf16 = Uint16Array;
|
||||
exports.Buf32 = Int32Array;
|
||||
exports.assign(exports, fnTyped);
|
||||
} else {
|
||||
exports.Buf8 = Array;
|
||||
exports.Buf16 = Array;
|
||||
exports.Buf32 = Array;
|
||||
exports.assign(exports, fnUntyped);
|
||||
}
|
||||
};
|
||||
|
||||
exports.setTyped(TYPED_OK);
|
||||
185
BACK_BACK/node_modules/unicode-trie/node_modules/pako/lib/utils/strings.js
generated
vendored
Executable file
185
BACK_BACK/node_modules/unicode-trie/node_modules/pako/lib/utils/strings.js
generated
vendored
Executable file
|
|
@ -0,0 +1,185 @@
|
|||
// String encode/decode helpers
|
||||
'use strict';
|
||||
|
||||
|
||||
var utils = require('./common');
|
||||
|
||||
|
||||
// Quick check if we can use fast array to bin string conversion
|
||||
//
|
||||
// - apply(Array) can fail on Android 2.2
|
||||
// - apply(Uint8Array) can fail on iOS 5.1 Safary
|
||||
//
|
||||
var STR_APPLY_OK = true;
|
||||
var STR_APPLY_UIA_OK = true;
|
||||
|
||||
try { String.fromCharCode.apply(null, [ 0 ]); } catch (__) { STR_APPLY_OK = false; }
|
||||
try { String.fromCharCode.apply(null, new Uint8Array(1)); } catch (__) { STR_APPLY_UIA_OK = false; }
|
||||
|
||||
|
||||
// Table with utf8 lengths (calculated by first byte of sequence)
|
||||
// Note, that 5 & 6-byte values and some 4-byte values can not be represented in JS,
|
||||
// because max possible codepoint is 0x10ffff
|
||||
var _utf8len = new utils.Buf8(256);
|
||||
for (var q = 0; q < 256; q++) {
|
||||
_utf8len[q] = (q >= 252 ? 6 : q >= 248 ? 5 : q >= 240 ? 4 : q >= 224 ? 3 : q >= 192 ? 2 : 1);
|
||||
}
|
||||
_utf8len[254] = _utf8len[254] = 1; // Invalid sequence start
|
||||
|
||||
|
||||
// convert string to array (typed, when possible)
|
||||
exports.string2buf = function (str) {
|
||||
var buf, c, c2, m_pos, i, str_len = str.length, buf_len = 0;
|
||||
|
||||
// count binary size
|
||||
for (m_pos = 0; m_pos < str_len; m_pos++) {
|
||||
c = str.charCodeAt(m_pos);
|
||||
if ((c & 0xfc00) === 0xd800 && (m_pos + 1 < str_len)) {
|
||||
c2 = str.charCodeAt(m_pos + 1);
|
||||
if ((c2 & 0xfc00) === 0xdc00) {
|
||||
c = 0x10000 + ((c - 0xd800) << 10) + (c2 - 0xdc00);
|
||||
m_pos++;
|
||||
}
|
||||
}
|
||||
buf_len += c < 0x80 ? 1 : c < 0x800 ? 2 : c < 0x10000 ? 3 : 4;
|
||||
}
|
||||
|
||||
// allocate buffer
|
||||
buf = new utils.Buf8(buf_len);
|
||||
|
||||
// convert
|
||||
for (i = 0, m_pos = 0; i < buf_len; m_pos++) {
|
||||
c = str.charCodeAt(m_pos);
|
||||
if ((c & 0xfc00) === 0xd800 && (m_pos + 1 < str_len)) {
|
||||
c2 = str.charCodeAt(m_pos + 1);
|
||||
if ((c2 & 0xfc00) === 0xdc00) {
|
||||
c = 0x10000 + ((c - 0xd800) << 10) + (c2 - 0xdc00);
|
||||
m_pos++;
|
||||
}
|
||||
}
|
||||
if (c < 0x80) {
|
||||
/* one byte */
|
||||
buf[i++] = c;
|
||||
} else if (c < 0x800) {
|
||||
/* two bytes */
|
||||
buf[i++] = 0xC0 | (c >>> 6);
|
||||
buf[i++] = 0x80 | (c & 0x3f);
|
||||
} else if (c < 0x10000) {
|
||||
/* three bytes */
|
||||
buf[i++] = 0xE0 | (c >>> 12);
|
||||
buf[i++] = 0x80 | (c >>> 6 & 0x3f);
|
||||
buf[i++] = 0x80 | (c & 0x3f);
|
||||
} else {
|
||||
/* four bytes */
|
||||
buf[i++] = 0xf0 | (c >>> 18);
|
||||
buf[i++] = 0x80 | (c >>> 12 & 0x3f);
|
||||
buf[i++] = 0x80 | (c >>> 6 & 0x3f);
|
||||
buf[i++] = 0x80 | (c & 0x3f);
|
||||
}
|
||||
}
|
||||
|
||||
return buf;
|
||||
};
|
||||
|
||||
// Helper (used in 2 places)
|
||||
function buf2binstring(buf, len) {
|
||||
// use fallback for big arrays to avoid stack overflow
|
||||
if (len < 65537) {
|
||||
if ((buf.subarray && STR_APPLY_UIA_OK) || (!buf.subarray && STR_APPLY_OK)) {
|
||||
return String.fromCharCode.apply(null, utils.shrinkBuf(buf, len));
|
||||
}
|
||||
}
|
||||
|
||||
var result = '';
|
||||
for (var i = 0; i < len; i++) {
|
||||
result += String.fromCharCode(buf[i]);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
// Convert byte array to binary string
|
||||
exports.buf2binstring = function (buf) {
|
||||
return buf2binstring(buf, buf.length);
|
||||
};
|
||||
|
||||
|
||||
// Convert binary string (typed, when possible)
|
||||
exports.binstring2buf = function (str) {
|
||||
var buf = new utils.Buf8(str.length);
|
||||
for (var i = 0, len = buf.length; i < len; i++) {
|
||||
buf[i] = str.charCodeAt(i);
|
||||
}
|
||||
return buf;
|
||||
};
|
||||
|
||||
|
||||
// convert array to string
|
||||
exports.buf2string = function (buf, max) {
|
||||
var i, out, c, c_len;
|
||||
var len = max || buf.length;
|
||||
|
||||
// Reserve max possible length (2 words per char)
|
||||
// NB: by unknown reasons, Array is significantly faster for
|
||||
// String.fromCharCode.apply than Uint16Array.
|
||||
var utf16buf = new Array(len * 2);
|
||||
|
||||
for (out = 0, i = 0; i < len;) {
|
||||
c = buf[i++];
|
||||
// quick process ascii
|
||||
if (c < 0x80) { utf16buf[out++] = c; continue; }
|
||||
|
||||
c_len = _utf8len[c];
|
||||
// skip 5 & 6 byte codes
|
||||
if (c_len > 4) { utf16buf[out++] = 0xfffd; i += c_len - 1; continue; }
|
||||
|
||||
// apply mask on first byte
|
||||
c &= c_len === 2 ? 0x1f : c_len === 3 ? 0x0f : 0x07;
|
||||
// join the rest
|
||||
while (c_len > 1 && i < len) {
|
||||
c = (c << 6) | (buf[i++] & 0x3f);
|
||||
c_len--;
|
||||
}
|
||||
|
||||
// terminated by end of string?
|
||||
if (c_len > 1) { utf16buf[out++] = 0xfffd; continue; }
|
||||
|
||||
if (c < 0x10000) {
|
||||
utf16buf[out++] = c;
|
||||
} else {
|
||||
c -= 0x10000;
|
||||
utf16buf[out++] = 0xd800 | ((c >> 10) & 0x3ff);
|
||||
utf16buf[out++] = 0xdc00 | (c & 0x3ff);
|
||||
}
|
||||
}
|
||||
|
||||
return buf2binstring(utf16buf, out);
|
||||
};
|
||||
|
||||
|
||||
// Calculate max possible position in utf8 buffer,
|
||||
// that will not break sequence. If that's not possible
|
||||
// - (very small limits) return max size as is.
|
||||
//
|
||||
// buf[] - utf8 bytes array
|
||||
// max - length limit (mandatory);
|
||||
exports.utf8border = function (buf, max) {
|
||||
var pos;
|
||||
|
||||
max = max || buf.length;
|
||||
if (max > buf.length) { max = buf.length; }
|
||||
|
||||
// go back from last position, until start of sequence found
|
||||
pos = max - 1;
|
||||
while (pos >= 0 && (buf[pos] & 0xC0) === 0x80) { pos--; }
|
||||
|
||||
// Fuckup - very small and broken sequence,
|
||||
// return max, because we should return something anyway.
|
||||
if (pos < 0) { return max; }
|
||||
|
||||
// If we came to start of buffer - that means vuffer is too small,
|
||||
// return max too.
|
||||
if (pos === 0) { return max; }
|
||||
|
||||
return (pos + _utf8len[buf[pos]] > max) ? pos : max;
|
||||
};
|
||||
32
BACK_BACK/node_modules/unicode-trie/node_modules/pako/lib/zlib/adler32.js
generated
vendored
Executable file
32
BACK_BACK/node_modules/unicode-trie/node_modules/pako/lib/zlib/adler32.js
generated
vendored
Executable file
|
|
@ -0,0 +1,32 @@
|
|||
'use strict';
|
||||
|
||||
// Note: adler32 takes 12% for level 0 and 2% for level 6.
|
||||
// It doesn't worth to make additional optimizationa as in original.
|
||||
// Small size is preferable.
|
||||
|
||||
function adler32(adler, buf, len, pos) {
|
||||
var s1 = (adler & 0xffff) |0,
|
||||
s2 = ((adler >>> 16) & 0xffff) |0,
|
||||
n = 0;
|
||||
|
||||
while (len !== 0) {
|
||||
// Set limit ~ twice less than 5552, to keep
|
||||
// s2 in 31-bits, because we force signed ints.
|
||||
// in other case %= will fail.
|
||||
n = len > 2000 ? 2000 : len;
|
||||
len -= n;
|
||||
|
||||
do {
|
||||
s1 = (s1 + buf[pos++]) |0;
|
||||
s2 = (s2 + s1) |0;
|
||||
} while (--n);
|
||||
|
||||
s1 %= 65521;
|
||||
s2 %= 65521;
|
||||
}
|
||||
|
||||
return (s1 | (s2 << 16)) |0;
|
||||
}
|
||||
|
||||
|
||||
module.exports = adler32;
|
||||
50
BACK_BACK/node_modules/unicode-trie/node_modules/pako/lib/zlib/constants.js
generated
vendored
Executable file
50
BACK_BACK/node_modules/unicode-trie/node_modules/pako/lib/zlib/constants.js
generated
vendored
Executable file
|
|
@ -0,0 +1,50 @@
|
|||
'use strict';
|
||||
|
||||
|
||||
module.exports = {
|
||||
|
||||
/* Allowed flush values; see deflate() and inflate() below for details */
|
||||
Z_NO_FLUSH: 0,
|
||||
Z_PARTIAL_FLUSH: 1,
|
||||
Z_SYNC_FLUSH: 2,
|
||||
Z_FULL_FLUSH: 3,
|
||||
Z_FINISH: 4,
|
||||
Z_BLOCK: 5,
|
||||
Z_TREES: 6,
|
||||
|
||||
/* Return codes for the compression/decompression functions. Negative values
|
||||
* are errors, positive values are used for special but normal events.
|
||||
*/
|
||||
Z_OK: 0,
|
||||
Z_STREAM_END: 1,
|
||||
Z_NEED_DICT: 2,
|
||||
Z_ERRNO: -1,
|
||||
Z_STREAM_ERROR: -2,
|
||||
Z_DATA_ERROR: -3,
|
||||
//Z_MEM_ERROR: -4,
|
||||
Z_BUF_ERROR: -5,
|
||||
//Z_VERSION_ERROR: -6,
|
||||
|
||||
/* compression levels */
|
||||
Z_NO_COMPRESSION: 0,
|
||||
Z_BEST_SPEED: 1,
|
||||
Z_BEST_COMPRESSION: 9,
|
||||
Z_DEFAULT_COMPRESSION: -1,
|
||||
|
||||
|
||||
Z_FILTERED: 1,
|
||||
Z_HUFFMAN_ONLY: 2,
|
||||
Z_RLE: 3,
|
||||
Z_FIXED: 4,
|
||||
Z_DEFAULT_STRATEGY: 0,
|
||||
|
||||
/* Possible values of the data_type field (though see inflate()) */
|
||||
Z_BINARY: 0,
|
||||
Z_TEXT: 1,
|
||||
//Z_ASCII: 1, // = Z_TEXT (deprecated)
|
||||
Z_UNKNOWN: 2,
|
||||
|
||||
/* The deflate compression method */
|
||||
Z_DEFLATED: 8
|
||||
//Z_NULL: null // Use -1 or null inline, depending on var type
|
||||
};
|
||||
41
BACK_BACK/node_modules/unicode-trie/node_modules/pako/lib/zlib/crc32.js
generated
vendored
Executable file
41
BACK_BACK/node_modules/unicode-trie/node_modules/pako/lib/zlib/crc32.js
generated
vendored
Executable file
|
|
@ -0,0 +1,41 @@
|
|||
'use strict';
|
||||
|
||||
// Note: we can't get significant speed boost here.
|
||||
// So write code to minimize size - no pregenerated tables
|
||||
// and array tools dependencies.
|
||||
|
||||
|
||||
// Use ordinary array, since untyped makes no boost here
|
||||
function makeTable() {
|
||||
var c, table = [];
|
||||
|
||||
for (var n = 0; n < 256; n++) {
|
||||
c = n;
|
||||
for (var k = 0; k < 8; k++) {
|
||||
c = ((c & 1) ? (0xEDB88320 ^ (c >>> 1)) : (c >>> 1));
|
||||
}
|
||||
table[n] = c;
|
||||
}
|
||||
|
||||
return table;
|
||||
}
|
||||
|
||||
// Create table on load. Just 255 signed longs. Not a problem.
|
||||
var crcTable = makeTable();
|
||||
|
||||
|
||||
function crc32(crc, buf, len, pos) {
|
||||
var t = crcTable,
|
||||
end = pos + len;
|
||||
|
||||
crc ^= -1;
|
||||
|
||||
for (var i = pos; i < end; i++) {
|
||||
crc = (crc >>> 8) ^ t[(crc ^ buf[i]) & 0xFF];
|
||||
}
|
||||
|
||||
return (crc ^ (-1)); // >>> 0;
|
||||
}
|
||||
|
||||
|
||||
module.exports = crc32;
|
||||
1855
BACK_BACK/node_modules/unicode-trie/node_modules/pako/lib/zlib/deflate.js
generated
vendored
Executable file
1855
BACK_BACK/node_modules/unicode-trie/node_modules/pako/lib/zlib/deflate.js
generated
vendored
Executable file
File diff suppressed because it is too large
Load diff
40
BACK_BACK/node_modules/unicode-trie/node_modules/pako/lib/zlib/gzheader.js
generated
vendored
Executable file
40
BACK_BACK/node_modules/unicode-trie/node_modules/pako/lib/zlib/gzheader.js
generated
vendored
Executable file
|
|
@ -0,0 +1,40 @@
|
|||
'use strict';
|
||||
|
||||
|
||||
function GZheader() {
|
||||
/* true if compressed data believed to be text */
|
||||
this.text = 0;
|
||||
/* modification time */
|
||||
this.time = 0;
|
||||
/* extra flags (not used when writing a gzip file) */
|
||||
this.xflags = 0;
|
||||
/* operating system */
|
||||
this.os = 0;
|
||||
/* pointer to extra field or Z_NULL if none */
|
||||
this.extra = null;
|
||||
/* extra field length (valid if extra != Z_NULL) */
|
||||
this.extra_len = 0; // Actually, we don't need it in JS,
|
||||
// but leave for few code modifications
|
||||
|
||||
//
|
||||
// Setup limits is not necessary because in js we should not preallocate memory
|
||||
// for inflate use constant limit in 65536 bytes
|
||||
//
|
||||
|
||||
/* space at extra (only when reading header) */
|
||||
// this.extra_max = 0;
|
||||
/* pointer to zero-terminated file name or Z_NULL */
|
||||
this.name = '';
|
||||
/* space at name (only when reading header) */
|
||||
// this.name_max = 0;
|
||||
/* pointer to zero-terminated comment or Z_NULL */
|
||||
this.comment = '';
|
||||
/* space at comment (only when reading header) */
|
||||
// this.comm_max = 0;
|
||||
/* true if there was or will be a header crc */
|
||||
this.hcrc = 0;
|
||||
/* true when done reading gzip header (not used when writing a gzip file) */
|
||||
this.done = false;
|
||||
}
|
||||
|
||||
module.exports = GZheader;
|
||||
326
BACK_BACK/node_modules/unicode-trie/node_modules/pako/lib/zlib/inffast.js
generated
vendored
Executable file
326
BACK_BACK/node_modules/unicode-trie/node_modules/pako/lib/zlib/inffast.js
generated
vendored
Executable file
|
|
@ -0,0 +1,326 @@
|
|||
'use strict';
|
||||
|
||||
// See state defs from inflate.js
|
||||
var BAD = 30; /* got a data error -- remain here until reset */
|
||||
var TYPE = 12; /* i: waiting for type bits, including last-flag bit */
|
||||
|
||||
/*
|
||||
Decode literal, length, and distance codes and write out the resulting
|
||||
literal and match bytes until either not enough input or output is
|
||||
available, an end-of-block is encountered, or a data error is encountered.
|
||||
When large enough input and output buffers are supplied to inflate(), for
|
||||
example, a 16K input buffer and a 64K output buffer, more than 95% of the
|
||||
inflate execution time is spent in this routine.
|
||||
|
||||
Entry assumptions:
|
||||
|
||||
state.mode === LEN
|
||||
strm.avail_in >= 6
|
||||
strm.avail_out >= 258
|
||||
start >= strm.avail_out
|
||||
state.bits < 8
|
||||
|
||||
On return, state.mode is one of:
|
||||
|
||||
LEN -- ran out of enough output space or enough available input
|
||||
TYPE -- reached end of block code, inflate() to interpret next block
|
||||
BAD -- error in block data
|
||||
|
||||
Notes:
|
||||
|
||||
- The maximum input bits used by a length/distance pair is 15 bits for the
|
||||
length code, 5 bits for the length extra, 15 bits for the distance code,
|
||||
and 13 bits for the distance extra. This totals 48 bits, or six bytes.
|
||||
Therefore if strm.avail_in >= 6, then there is enough input to avoid
|
||||
checking for available input while decoding.
|
||||
|
||||
- The maximum bytes that a single length/distance pair can output is 258
|
||||
bytes, which is the maximum length that can be coded. inflate_fast()
|
||||
requires strm.avail_out >= 258 for each loop to avoid checking for
|
||||
output space.
|
||||
*/
|
||||
module.exports = function inflate_fast(strm, start) {
|
||||
var state;
|
||||
var _in; /* local strm.input */
|
||||
var last; /* have enough input while in < last */
|
||||
var _out; /* local strm.output */
|
||||
var beg; /* inflate()'s initial strm.output */
|
||||
var end; /* while out < end, enough space available */
|
||||
//#ifdef INFLATE_STRICT
|
||||
var dmax; /* maximum distance from zlib header */
|
||||
//#endif
|
||||
var wsize; /* window size or zero if not using window */
|
||||
var whave; /* valid bytes in the window */
|
||||
var wnext; /* window write index */
|
||||
// Use `s_window` instead `window`, avoid conflict with instrumentation tools
|
||||
var s_window; /* allocated sliding window, if wsize != 0 */
|
||||
var hold; /* local strm.hold */
|
||||
var bits; /* local strm.bits */
|
||||
var lcode; /* local strm.lencode */
|
||||
var dcode; /* local strm.distcode */
|
||||
var lmask; /* mask for first level of length codes */
|
||||
var dmask; /* mask for first level of distance codes */
|
||||
var here; /* retrieved table entry */
|
||||
var op; /* code bits, operation, extra bits, or */
|
||||
/* window position, window bytes to copy */
|
||||
var len; /* match length, unused bytes */
|
||||
var dist; /* match distance */
|
||||
var from; /* where to copy match from */
|
||||
var from_source;
|
||||
|
||||
|
||||
var input, output; // JS specific, because we have no pointers
|
||||
|
||||
/* copy state to local variables */
|
||||
state = strm.state;
|
||||
//here = state.here;
|
||||
_in = strm.next_in;
|
||||
input = strm.input;
|
||||
last = _in + (strm.avail_in - 5);
|
||||
_out = strm.next_out;
|
||||
output = strm.output;
|
||||
beg = _out - (start - strm.avail_out);
|
||||
end = _out + (strm.avail_out - 257);
|
||||
//#ifdef INFLATE_STRICT
|
||||
dmax = state.dmax;
|
||||
//#endif
|
||||
wsize = state.wsize;
|
||||
whave = state.whave;
|
||||
wnext = state.wnext;
|
||||
s_window = state.window;
|
||||
hold = state.hold;
|
||||
bits = state.bits;
|
||||
lcode = state.lencode;
|
||||
dcode = state.distcode;
|
||||
lmask = (1 << state.lenbits) - 1;
|
||||
dmask = (1 << state.distbits) - 1;
|
||||
|
||||
|
||||
/* decode literals and length/distances until end-of-block or not enough
|
||||
input data or output space */
|
||||
|
||||
top:
|
||||
do {
|
||||
if (bits < 15) {
|
||||
hold += input[_in++] << bits;
|
||||
bits += 8;
|
||||
hold += input[_in++] << bits;
|
||||
bits += 8;
|
||||
}
|
||||
|
||||
here = lcode[hold & lmask];
|
||||
|
||||
dolen:
|
||||
for (;;) { // Goto emulation
|
||||
op = here >>> 24/*here.bits*/;
|
||||
hold >>>= op;
|
||||
bits -= op;
|
||||
op = (here >>> 16) & 0xff/*here.op*/;
|
||||
if (op === 0) { /* literal */
|
||||
//Tracevv((stderr, here.val >= 0x20 && here.val < 0x7f ?
|
||||
// "inflate: literal '%c'\n" :
|
||||
// "inflate: literal 0x%02x\n", here.val));
|
||||
output[_out++] = here & 0xffff/*here.val*/;
|
||||
}
|
||||
else if (op & 16) { /* length base */
|
||||
len = here & 0xffff/*here.val*/;
|
||||
op &= 15; /* number of extra bits */
|
||||
if (op) {
|
||||
if (bits < op) {
|
||||
hold += input[_in++] << bits;
|
||||
bits += 8;
|
||||
}
|
||||
len += hold & ((1 << op) - 1);
|
||||
hold >>>= op;
|
||||
bits -= op;
|
||||
}
|
||||
//Tracevv((stderr, "inflate: length %u\n", len));
|
||||
if (bits < 15) {
|
||||
hold += input[_in++] << bits;
|
||||
bits += 8;
|
||||
hold += input[_in++] << bits;
|
||||
bits += 8;
|
||||
}
|
||||
here = dcode[hold & dmask];
|
||||
|
||||
dodist:
|
||||
for (;;) { // goto emulation
|
||||
op = here >>> 24/*here.bits*/;
|
||||
hold >>>= op;
|
||||
bits -= op;
|
||||
op = (here >>> 16) & 0xff/*here.op*/;
|
||||
|
||||
if (op & 16) { /* distance base */
|
||||
dist = here & 0xffff/*here.val*/;
|
||||
op &= 15; /* number of extra bits */
|
||||
if (bits < op) {
|
||||
hold += input[_in++] << bits;
|
||||
bits += 8;
|
||||
if (bits < op) {
|
||||
hold += input[_in++] << bits;
|
||||
bits += 8;
|
||||
}
|
||||
}
|
||||
dist += hold & ((1 << op) - 1);
|
||||
//#ifdef INFLATE_STRICT
|
||||
if (dist > dmax) {
|
||||
strm.msg = 'invalid distance too far back';
|
||||
state.mode = BAD;
|
||||
break top;
|
||||
}
|
||||
//#endif
|
||||
hold >>>= op;
|
||||
bits -= op;
|
||||
//Tracevv((stderr, "inflate: distance %u\n", dist));
|
||||
op = _out - beg; /* max distance in output */
|
||||
if (dist > op) { /* see if copy from window */
|
||||
op = dist - op; /* distance back in window */
|
||||
if (op > whave) {
|
||||
if (state.sane) {
|
||||
strm.msg = 'invalid distance too far back';
|
||||
state.mode = BAD;
|
||||
break top;
|
||||
}
|
||||
|
||||
// (!) This block is disabled in zlib defailts,
|
||||
// don't enable it for binary compatibility
|
||||
//#ifdef INFLATE_ALLOW_INVALID_DISTANCE_TOOFAR_ARRR
|
||||
// if (len <= op - whave) {
|
||||
// do {
|
||||
// output[_out++] = 0;
|
||||
// } while (--len);
|
||||
// continue top;
|
||||
// }
|
||||
// len -= op - whave;
|
||||
// do {
|
||||
// output[_out++] = 0;
|
||||
// } while (--op > whave);
|
||||
// if (op === 0) {
|
||||
// from = _out - dist;
|
||||
// do {
|
||||
// output[_out++] = output[from++];
|
||||
// } while (--len);
|
||||
// continue top;
|
||||
// }
|
||||
//#endif
|
||||
}
|
||||
from = 0; // window index
|
||||
from_source = s_window;
|
||||
if (wnext === 0) { /* very common case */
|
||||
from += wsize - op;
|
||||
if (op < len) { /* some from window */
|
||||
len -= op;
|
||||
do {
|
||||
output[_out++] = s_window[from++];
|
||||
} while (--op);
|
||||
from = _out - dist; /* rest from output */
|
||||
from_source = output;
|
||||
}
|
||||
}
|
||||
else if (wnext < op) { /* wrap around window */
|
||||
from += wsize + wnext - op;
|
||||
op -= wnext;
|
||||
if (op < len) { /* some from end of window */
|
||||
len -= op;
|
||||
do {
|
||||
output[_out++] = s_window[from++];
|
||||
} while (--op);
|
||||
from = 0;
|
||||
if (wnext < len) { /* some from start of window */
|
||||
op = wnext;
|
||||
len -= op;
|
||||
do {
|
||||
output[_out++] = s_window[from++];
|
||||
} while (--op);
|
||||
from = _out - dist; /* rest from output */
|
||||
from_source = output;
|
||||
}
|
||||
}
|
||||
}
|
||||
else { /* contiguous in window */
|
||||
from += wnext - op;
|
||||
if (op < len) { /* some from window */
|
||||
len -= op;
|
||||
do {
|
||||
output[_out++] = s_window[from++];
|
||||
} while (--op);
|
||||
from = _out - dist; /* rest from output */
|
||||
from_source = output;
|
||||
}
|
||||
}
|
||||
while (len > 2) {
|
||||
output[_out++] = from_source[from++];
|
||||
output[_out++] = from_source[from++];
|
||||
output[_out++] = from_source[from++];
|
||||
len -= 3;
|
||||
}
|
||||
if (len) {
|
||||
output[_out++] = from_source[from++];
|
||||
if (len > 1) {
|
||||
output[_out++] = from_source[from++];
|
||||
}
|
||||
}
|
||||
}
|
||||
else {
|
||||
from = _out - dist; /* copy direct from output */
|
||||
do { /* minimum length is three */
|
||||
output[_out++] = output[from++];
|
||||
output[_out++] = output[from++];
|
||||
output[_out++] = output[from++];
|
||||
len -= 3;
|
||||
} while (len > 2);
|
||||
if (len) {
|
||||
output[_out++] = output[from++];
|
||||
if (len > 1) {
|
||||
output[_out++] = output[from++];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
else if ((op & 64) === 0) { /* 2nd level distance code */
|
||||
here = dcode[(here & 0xffff)/*here.val*/ + (hold & ((1 << op) - 1))];
|
||||
continue dodist;
|
||||
}
|
||||
else {
|
||||
strm.msg = 'invalid distance code';
|
||||
state.mode = BAD;
|
||||
break top;
|
||||
}
|
||||
|
||||
break; // need to emulate goto via "continue"
|
||||
}
|
||||
}
|
||||
else if ((op & 64) === 0) { /* 2nd level length code */
|
||||
here = lcode[(here & 0xffff)/*here.val*/ + (hold & ((1 << op) - 1))];
|
||||
continue dolen;
|
||||
}
|
||||
else if (op & 32) { /* end-of-block */
|
||||
//Tracevv((stderr, "inflate: end of block\n"));
|
||||
state.mode = TYPE;
|
||||
break top;
|
||||
}
|
||||
else {
|
||||
strm.msg = 'invalid literal/length code';
|
||||
state.mode = BAD;
|
||||
break top;
|
||||
}
|
||||
|
||||
break; // need to emulate goto via "continue"
|
||||
}
|
||||
} while (_in < last && _out < end);
|
||||
|
||||
/* return unused bytes (on entry, bits < 8, so in won't go too far back) */
|
||||
len = bits >> 3;
|
||||
_in -= len;
|
||||
bits -= len << 3;
|
||||
hold &= (1 << bits) - 1;
|
||||
|
||||
/* update state and return */
|
||||
strm.next_in = _in;
|
||||
strm.next_out = _out;
|
||||
strm.avail_in = (_in < last ? 5 + (last - _in) : 5 - (_in - last));
|
||||
strm.avail_out = (_out < end ? 257 + (end - _out) : 257 - (_out - end));
|
||||
state.hold = hold;
|
||||
state.bits = bits;
|
||||
return;
|
||||
};
|
||||
1538
BACK_BACK/node_modules/unicode-trie/node_modules/pako/lib/zlib/inflate.js
generated
vendored
Executable file
1538
BACK_BACK/node_modules/unicode-trie/node_modules/pako/lib/zlib/inflate.js
generated
vendored
Executable file
File diff suppressed because it is too large
Load diff
327
BACK_BACK/node_modules/unicode-trie/node_modules/pako/lib/zlib/inftrees.js
generated
vendored
Executable file
327
BACK_BACK/node_modules/unicode-trie/node_modules/pako/lib/zlib/inftrees.js
generated
vendored
Executable file
|
|
@ -0,0 +1,327 @@
|
|||
'use strict';
|
||||
|
||||
|
||||
var utils = require('../utils/common');
|
||||
|
||||
var MAXBITS = 15;
|
||||
var ENOUGH_LENS = 852;
|
||||
var ENOUGH_DISTS = 592;
|
||||
//var ENOUGH = (ENOUGH_LENS+ENOUGH_DISTS);
|
||||
|
||||
var CODES = 0;
|
||||
var LENS = 1;
|
||||
var DISTS = 2;
|
||||
|
||||
var lbase = [ /* Length codes 257..285 base */
|
||||
3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 15, 17, 19, 23, 27, 31,
|
||||
35, 43, 51, 59, 67, 83, 99, 115, 131, 163, 195, 227, 258, 0, 0
|
||||
];
|
||||
|
||||
var lext = [ /* Length codes 257..285 extra */
|
||||
16, 16, 16, 16, 16, 16, 16, 16, 17, 17, 17, 17, 18, 18, 18, 18,
|
||||
19, 19, 19, 19, 20, 20, 20, 20, 21, 21, 21, 21, 16, 72, 78
|
||||
];
|
||||
|
||||
var dbase = [ /* Distance codes 0..29 base */
|
||||
1, 2, 3, 4, 5, 7, 9, 13, 17, 25, 33, 49, 65, 97, 129, 193,
|
||||
257, 385, 513, 769, 1025, 1537, 2049, 3073, 4097, 6145,
|
||||
8193, 12289, 16385, 24577, 0, 0
|
||||
];
|
||||
|
||||
var dext = [ /* Distance codes 0..29 extra */
|
||||
16, 16, 16, 16, 17, 17, 18, 18, 19, 19, 20, 20, 21, 21, 22, 22,
|
||||
23, 23, 24, 24, 25, 25, 26, 26, 27, 27,
|
||||
28, 28, 29, 29, 64, 64
|
||||
];
|
||||
|
||||
module.exports = function inflate_table(type, lens, lens_index, codes, table, table_index, work, opts)
|
||||
{
|
||||
var bits = opts.bits;
|
||||
//here = opts.here; /* table entry for duplication */
|
||||
|
||||
var len = 0; /* a code's length in bits */
|
||||
var sym = 0; /* index of code symbols */
|
||||
var min = 0, max = 0; /* minimum and maximum code lengths */
|
||||
var root = 0; /* number of index bits for root table */
|
||||
var curr = 0; /* number of index bits for current table */
|
||||
var drop = 0; /* code bits to drop for sub-table */
|
||||
var left = 0; /* number of prefix codes available */
|
||||
var used = 0; /* code entries in table used */
|
||||
var huff = 0; /* Huffman code */
|
||||
var incr; /* for incrementing code, index */
|
||||
var fill; /* index for replicating entries */
|
||||
var low; /* low bits for current root entry */
|
||||
var mask; /* mask for low root bits */
|
||||
var next; /* next available space in table */
|
||||
var base = null; /* base value table to use */
|
||||
var base_index = 0;
|
||||
// var shoextra; /* extra bits table to use */
|
||||
var end; /* use base and extra for symbol > end */
|
||||
var count = new utils.Buf16(MAXBITS + 1); //[MAXBITS+1]; /* number of codes of each length */
|
||||
var offs = new utils.Buf16(MAXBITS + 1); //[MAXBITS+1]; /* offsets in table for each length */
|
||||
var extra = null;
|
||||
var extra_index = 0;
|
||||
|
||||
var here_bits, here_op, here_val;
|
||||
|
||||
/*
|
||||
Process a set of code lengths to create a canonical Huffman code. The
|
||||
code lengths are lens[0..codes-1]. Each length corresponds to the
|
||||
symbols 0..codes-1. The Huffman code is generated by first sorting the
|
||||
symbols by length from short to long, and retaining the symbol order
|
||||
for codes with equal lengths. Then the code starts with all zero bits
|
||||
for the first code of the shortest length, and the codes are integer
|
||||
increments for the same length, and zeros are appended as the length
|
||||
increases. For the deflate format, these bits are stored backwards
|
||||
from their more natural integer increment ordering, and so when the
|
||||
decoding tables are built in the large loop below, the integer codes
|
||||
are incremented backwards.
|
||||
|
||||
This routine assumes, but does not check, that all of the entries in
|
||||
lens[] are in the range 0..MAXBITS. The caller must assure this.
|
||||
1..MAXBITS is interpreted as that code length. zero means that that
|
||||
symbol does not occur in this code.
|
||||
|
||||
The codes are sorted by computing a count of codes for each length,
|
||||
creating from that a table of starting indices for each length in the
|
||||
sorted table, and then entering the symbols in order in the sorted
|
||||
table. The sorted table is work[], with that space being provided by
|
||||
the caller.
|
||||
|
||||
The length counts are used for other purposes as well, i.e. finding
|
||||
the minimum and maximum length codes, determining if there are any
|
||||
codes at all, checking for a valid set of lengths, and looking ahead
|
||||
at length counts to determine sub-table sizes when building the
|
||||
decoding tables.
|
||||
*/
|
||||
|
||||
/* accumulate lengths for codes (assumes lens[] all in 0..MAXBITS) */
|
||||
for (len = 0; len <= MAXBITS; len++) {
|
||||
count[len] = 0;
|
||||
}
|
||||
for (sym = 0; sym < codes; sym++) {
|
||||
count[lens[lens_index + sym]]++;
|
||||
}
|
||||
|
||||
/* bound code lengths, force root to be within code lengths */
|
||||
root = bits;
|
||||
for (max = MAXBITS; max >= 1; max--) {
|
||||
if (count[max] !== 0) { break; }
|
||||
}
|
||||
if (root > max) {
|
||||
root = max;
|
||||
}
|
||||
if (max === 0) { /* no symbols to code at all */
|
||||
//table.op[opts.table_index] = 64; //here.op = (var char)64; /* invalid code marker */
|
||||
//table.bits[opts.table_index] = 1; //here.bits = (var char)1;
|
||||
//table.val[opts.table_index++] = 0; //here.val = (var short)0;
|
||||
table[table_index++] = (1 << 24) | (64 << 16) | 0;
|
||||
|
||||
|
||||
//table.op[opts.table_index] = 64;
|
||||
//table.bits[opts.table_index] = 1;
|
||||
//table.val[opts.table_index++] = 0;
|
||||
table[table_index++] = (1 << 24) | (64 << 16) | 0;
|
||||
|
||||
opts.bits = 1;
|
||||
return 0; /* no symbols, but wait for decoding to report error */
|
||||
}
|
||||
for (min = 1; min < max; min++) {
|
||||
if (count[min] !== 0) { break; }
|
||||
}
|
||||
if (root < min) {
|
||||
root = min;
|
||||
}
|
||||
|
||||
/* check for an over-subscribed or incomplete set of lengths */
|
||||
left = 1;
|
||||
for (len = 1; len <= MAXBITS; len++) {
|
||||
left <<= 1;
|
||||
left -= count[len];
|
||||
if (left < 0) {
|
||||
return -1;
|
||||
} /* over-subscribed */
|
||||
}
|
||||
if (left > 0 && (type === CODES || max !== 1)) {
|
||||
return -1; /* incomplete set */
|
||||
}
|
||||
|
||||
/* generate offsets into symbol table for each length for sorting */
|
||||
offs[1] = 0;
|
||||
for (len = 1; len < MAXBITS; len++) {
|
||||
offs[len + 1] = offs[len] + count[len];
|
||||
}
|
||||
|
||||
/* sort symbols by length, by symbol order within each length */
|
||||
for (sym = 0; sym < codes; sym++) {
|
||||
if (lens[lens_index + sym] !== 0) {
|
||||
work[offs[lens[lens_index + sym]]++] = sym;
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
Create and fill in decoding tables. In this loop, the table being
|
||||
filled is at next and has curr index bits. The code being used is huff
|
||||
with length len. That code is converted to an index by dropping drop
|
||||
bits off of the bottom. For codes where len is less than drop + curr,
|
||||
those top drop + curr - len bits are incremented through all values to
|
||||
fill the table with replicated entries.
|
||||
|
||||
root is the number of index bits for the root table. When len exceeds
|
||||
root, sub-tables are created pointed to by the root entry with an index
|
||||
of the low root bits of huff. This is saved in low to check for when a
|
||||
new sub-table should be started. drop is zero when the root table is
|
||||
being filled, and drop is root when sub-tables are being filled.
|
||||
|
||||
When a new sub-table is needed, it is necessary to look ahead in the
|
||||
code lengths to determine what size sub-table is needed. The length
|
||||
counts are used for this, and so count[] is decremented as codes are
|
||||
entered in the tables.
|
||||
|
||||
used keeps track of how many table entries have been allocated from the
|
||||
provided *table space. It is checked for LENS and DIST tables against
|
||||
the constants ENOUGH_LENS and ENOUGH_DISTS to guard against changes in
|
||||
the initial root table size constants. See the comments in inftrees.h
|
||||
for more information.
|
||||
|
||||
sym increments through all symbols, and the loop terminates when
|
||||
all codes of length max, i.e. all codes, have been processed. This
|
||||
routine permits incomplete codes, so another loop after this one fills
|
||||
in the rest of the decoding tables with invalid code markers.
|
||||
*/
|
||||
|
||||
/* set up for code type */
|
||||
// poor man optimization - use if-else instead of switch,
|
||||
// to avoid deopts in old v8
|
||||
if (type === CODES) {
|
||||
base = extra = work; /* dummy value--not used */
|
||||
end = 19;
|
||||
|
||||
} else if (type === LENS) {
|
||||
base = lbase;
|
||||
base_index -= 257;
|
||||
extra = lext;
|
||||
extra_index -= 257;
|
||||
end = 256;
|
||||
|
||||
} else { /* DISTS */
|
||||
base = dbase;
|
||||
extra = dext;
|
||||
end = -1;
|
||||
}
|
||||
|
||||
/* initialize opts for loop */
|
||||
huff = 0; /* starting code */
|
||||
sym = 0; /* starting code symbol */
|
||||
len = min; /* starting code length */
|
||||
next = table_index; /* current table to fill in */
|
||||
curr = root; /* current table index bits */
|
||||
drop = 0; /* current bits to drop from code for index */
|
||||
low = -1; /* trigger new sub-table when len > root */
|
||||
used = 1 << root; /* use root table entries */
|
||||
mask = used - 1; /* mask for comparing low */
|
||||
|
||||
/* check available table space */
|
||||
if ((type === LENS && used > ENOUGH_LENS) ||
|
||||
(type === DISTS && used > ENOUGH_DISTS)) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
var i = 0;
|
||||
/* process all codes and make table entries */
|
||||
for (;;) {
|
||||
i++;
|
||||
/* create table entry */
|
||||
here_bits = len - drop;
|
||||
if (work[sym] < end) {
|
||||
here_op = 0;
|
||||
here_val = work[sym];
|
||||
}
|
||||
else if (work[sym] > end) {
|
||||
here_op = extra[extra_index + work[sym]];
|
||||
here_val = base[base_index + work[sym]];
|
||||
}
|
||||
else {
|
||||
here_op = 32 + 64; /* end of block */
|
||||
here_val = 0;
|
||||
}
|
||||
|
||||
/* replicate for those indices with low len bits equal to huff */
|
||||
incr = 1 << (len - drop);
|
||||
fill = 1 << curr;
|
||||
min = fill; /* save offset to next table */
|
||||
do {
|
||||
fill -= incr;
|
||||
table[next + (huff >> drop) + fill] = (here_bits << 24) | (here_op << 16) | here_val |0;
|
||||
} while (fill !== 0);
|
||||
|
||||
/* backwards increment the len-bit code huff */
|
||||
incr = 1 << (len - 1);
|
||||
while (huff & incr) {
|
||||
incr >>= 1;
|
||||
}
|
||||
if (incr !== 0) {
|
||||
huff &= incr - 1;
|
||||
huff += incr;
|
||||
} else {
|
||||
huff = 0;
|
||||
}
|
||||
|
||||
/* go to next symbol, update count, len */
|
||||
sym++;
|
||||
if (--count[len] === 0) {
|
||||
if (len === max) { break; }
|
||||
len = lens[lens_index + work[sym]];
|
||||
}
|
||||
|
||||
/* create new sub-table if needed */
|
||||
if (len > root && (huff & mask) !== low) {
|
||||
/* if first time, transition to sub-tables */
|
||||
if (drop === 0) {
|
||||
drop = root;
|
||||
}
|
||||
|
||||
/* increment past last table */
|
||||
next += min; /* here min is 1 << curr */
|
||||
|
||||
/* determine length of next table */
|
||||
curr = len - drop;
|
||||
left = 1 << curr;
|
||||
while (curr + drop < max) {
|
||||
left -= count[curr + drop];
|
||||
if (left <= 0) { break; }
|
||||
curr++;
|
||||
left <<= 1;
|
||||
}
|
||||
|
||||
/* check for enough space */
|
||||
used += 1 << curr;
|
||||
if ((type === LENS && used > ENOUGH_LENS) ||
|
||||
(type === DISTS && used > ENOUGH_DISTS)) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* point entry in root table to sub-table */
|
||||
low = huff & mask;
|
||||
/*table.op[low] = curr;
|
||||
table.bits[low] = root;
|
||||
table.val[low] = next - opts.table_index;*/
|
||||
table[low] = (root << 24) | (curr << 16) | (next - table_index) |0;
|
||||
}
|
||||
}
|
||||
|
||||
/* fill in remaining table entry if code is incomplete (guaranteed to have
|
||||
at most one remaining entry, since if the code is incomplete, the
|
||||
maximum code length that was allowed to get this far is one bit) */
|
||||
if (huff !== 0) {
|
||||
//table.op[next + huff] = 64; /* invalid code marker */
|
||||
//table.bits[next + huff] = len - drop;
|
||||
//table.val[next + huff] = 0;
|
||||
table[next + huff] = ((len - drop) << 24) | (64 << 16) |0;
|
||||
}
|
||||
|
||||
/* set return parameters */
|
||||
//opts.table_index += used;
|
||||
opts.bits = root;
|
||||
return 0;
|
||||
};
|
||||
13
BACK_BACK/node_modules/unicode-trie/node_modules/pako/lib/zlib/messages.js
generated
vendored
Executable file
13
BACK_BACK/node_modules/unicode-trie/node_modules/pako/lib/zlib/messages.js
generated
vendored
Executable file
|
|
@ -0,0 +1,13 @@
|
|||
'use strict';
|
||||
|
||||
module.exports = {
|
||||
2: 'need dictionary', /* Z_NEED_DICT 2 */
|
||||
1: 'stream end', /* Z_STREAM_END 1 */
|
||||
0: '', /* Z_OK 0 */
|
||||
'-1': 'file error', /* Z_ERRNO (-1) */
|
||||
'-2': 'stream error', /* Z_STREAM_ERROR (-2) */
|
||||
'-3': 'data error', /* Z_DATA_ERROR (-3) */
|
||||
'-4': 'insufficient memory', /* Z_MEM_ERROR (-4) */
|
||||
'-5': 'buffer error', /* Z_BUF_ERROR (-5) */
|
||||
'-6': 'incompatible version' /* Z_VERSION_ERROR (-6) */
|
||||
};
|
||||
1202
BACK_BACK/node_modules/unicode-trie/node_modules/pako/lib/zlib/trees.js
generated
vendored
Executable file
1202
BACK_BACK/node_modules/unicode-trie/node_modules/pako/lib/zlib/trees.js
generated
vendored
Executable file
File diff suppressed because it is too large
Load diff
29
BACK_BACK/node_modules/unicode-trie/node_modules/pako/lib/zlib/zstream.js
generated
vendored
Executable file
29
BACK_BACK/node_modules/unicode-trie/node_modules/pako/lib/zlib/zstream.js
generated
vendored
Executable file
|
|
@ -0,0 +1,29 @@
|
|||
'use strict';
|
||||
|
||||
|
||||
function ZStream() {
|
||||
/* next input byte */
|
||||
this.input = null; // JS specific, because we have no pointers
|
||||
this.next_in = 0;
|
||||
/* number of bytes available at input */
|
||||
this.avail_in = 0;
|
||||
/* total number of input bytes read so far */
|
||||
this.total_in = 0;
|
||||
/* next output byte should be put there */
|
||||
this.output = null; // JS specific, because we have no pointers
|
||||
this.next_out = 0;
|
||||
/* remaining free space at output */
|
||||
this.avail_out = 0;
|
||||
/* total number of bytes output so far */
|
||||
this.total_out = 0;
|
||||
/* last error message, NULL if no error */
|
||||
this.msg = ''/*Z_NULL*/;
|
||||
/* not visible by applications */
|
||||
this.state = null;
|
||||
/* best guess about the data type: binary or text */
|
||||
this.data_type = 2/*Z_UNKNOWN*/;
|
||||
/* adler32 value of the uncompressed data */
|
||||
this.adler = 0;
|
||||
}
|
||||
|
||||
module.exports = ZStream;
|
||||
40
BACK_BACK/node_modules/unicode-trie/node_modules/pako/package.json
generated
vendored
Executable file
40
BACK_BACK/node_modules/unicode-trie/node_modules/pako/package.json
generated
vendored
Executable file
|
|
@ -0,0 +1,40 @@
|
|||
{
|
||||
"name": "pako",
|
||||
"description": "zlib port to javascript - fast, modularized, with browser support",
|
||||
"version": "0.2.9",
|
||||
"keywords": [
|
||||
"zlib",
|
||||
"deflate",
|
||||
"inflate",
|
||||
"gzip"
|
||||
],
|
||||
"homepage": "https://github.com/nodeca/pako",
|
||||
"contributors": [
|
||||
"Andrei Tuputcyn (https://github.com/andr83)",
|
||||
"Vitaly Puzrin (https://github.com/puzrin)"
|
||||
],
|
||||
"files": [
|
||||
"index.js",
|
||||
"dist/",
|
||||
"lib/"
|
||||
],
|
||||
"license": "MIT",
|
||||
"repository": "nodeca/pako",
|
||||
"devDependencies": {
|
||||
"mocha": "1.21.5",
|
||||
"benchmark": "*",
|
||||
"ansi": "*",
|
||||
"browserify": "*",
|
||||
"eslint": "^2.1.0",
|
||||
"eslint-plugin-nodeca": "~1.0.3",
|
||||
"uglify-js": "*",
|
||||
"istanbul": "*",
|
||||
"ndoc": "*",
|
||||
"lodash": "*",
|
||||
"async": "*",
|
||||
"grunt": "~0.4.4",
|
||||
"grunt-cli": "~0.1.13",
|
||||
"grunt-saucelabs": "~8.6.0",
|
||||
"grunt-contrib-connect": "~0.9.0"
|
||||
}
|
||||
}
|
||||
29
BACK_BACK/node_modules/unicode-trie/package.json
generated
vendored
Executable file
29
BACK_BACK/node_modules/unicode-trie/package.json
generated
vendored
Executable file
|
|
@ -0,0 +1,29 @@
|
|||
{
|
||||
"name": "unicode-trie",
|
||||
"version": "0.3.1",
|
||||
"description": "Unicode Trie data structure for fast character metadata lookup, ported from ICU",
|
||||
"devDependencies": {
|
||||
"mocha": "^1.20.1",
|
||||
"coffee-script": "^1.7.1",
|
||||
"coffee-coverage": "^0.4.2"
|
||||
},
|
||||
"scripts": {
|
||||
"prepublish": "make build",
|
||||
"postpublish": "make clean",
|
||||
"test": "mocha"
|
||||
},
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "git://github.com/devongovett/unicode-trie.git"
|
||||
},
|
||||
"author": "Devon Govett <devongovett@gmail.com>",
|
||||
"license": "MIT",
|
||||
"bugs": {
|
||||
"url": "https://github.com/devongovett/unicode-trie/issues"
|
||||
},
|
||||
"homepage": "https://github.com/devongovett/unicode-trie",
|
||||
"dependencies": {
|
||||
"pako": "^0.2.5",
|
||||
"tiny-inflate": "^1.0.0"
|
||||
}
|
||||
}
|
||||
2
BACK_BACK/node_modules/unicode-trie/test/mocha.opts
generated
vendored
Executable file
2
BACK_BACK/node_modules/unicode-trie/test/mocha.opts
generated
vendored
Executable file
|
|
@ -0,0 +1,2 @@
|
|||
--compilers coffee:coffee-script/register
|
||||
--reporter spec
|
||||
219
BACK_BACK/node_modules/unicode-trie/test/test.coffee
generated
vendored
Executable file
219
BACK_BACK/node_modules/unicode-trie/test/test.coffee
generated
vendored
Executable file
|
|
@ -0,0 +1,219 @@
|
|||
assert = require 'assert'
|
||||
UnicodeTrieBuilder = require '../builder'
|
||||
UnicodeTrie = require '../'
|
||||
|
||||
describe 'unicode trie', ->
|
||||
it 'set', ->
|
||||
trie = new UnicodeTrieBuilder 10, 666
|
||||
trie.set 0x4567, 99
|
||||
assert.equal trie.get(0x4566), 10
|
||||
assert.equal trie.get(0x4567), 99
|
||||
assert.equal trie.get(-1), 666
|
||||
assert.equal trie.get(0x110000), 666
|
||||
|
||||
it 'set -> compacted trie', ->
|
||||
t = new UnicodeTrieBuilder 10, 666
|
||||
t.set 0x4567, 99
|
||||
|
||||
trie = t.freeze()
|
||||
assert.equal trie.get(0x4566), 10
|
||||
assert.equal trie.get(0x4567), 99
|
||||
assert.equal trie.get(-1), 666
|
||||
assert.equal trie.get(0x110000), 666
|
||||
|
||||
it 'setRange', ->
|
||||
trie = new UnicodeTrieBuilder 10, 666
|
||||
trie.setRange 13, 6666, 7788, false
|
||||
trie.setRange 6000, 7000, 9900, true
|
||||
|
||||
assert.equal trie.get(12), 10
|
||||
assert.equal trie.get(13), 7788
|
||||
assert.equal trie.get(5999), 7788
|
||||
assert.equal trie.get(6000), 9900
|
||||
assert.equal trie.get(7000), 9900
|
||||
assert.equal trie.get(7001), 10
|
||||
assert.equal trie.get(0x110000), 666
|
||||
|
||||
it 'setRange -> compacted trie', ->
|
||||
t = new UnicodeTrieBuilder 10, 666
|
||||
t.setRange 13, 6666, 7788, false
|
||||
t.setRange 6000, 7000, 9900, true
|
||||
|
||||
trie = t.freeze()
|
||||
assert.equal trie.get(12), 10
|
||||
assert.equal trie.get(13), 7788
|
||||
assert.equal trie.get(5999), 7788
|
||||
assert.equal trie.get(6000), 9900
|
||||
assert.equal trie.get(7000), 9900
|
||||
assert.equal trie.get(7001), 10
|
||||
assert.equal trie.get(0x110000), 666
|
||||
|
||||
it 'should work with compressed serialization format', ->
|
||||
t = new UnicodeTrieBuilder 10, 666
|
||||
t.setRange 13, 6666, 7788, false
|
||||
t.setRange 6000, 7000, 9900, true
|
||||
|
||||
buf = t.toBuffer()
|
||||
trie = new UnicodeTrie buf
|
||||
assert.equal trie.get(12), 10
|
||||
assert.equal trie.get(13), 7788
|
||||
assert.equal trie.get(5999), 7788
|
||||
assert.equal trie.get(6000), 9900
|
||||
assert.equal trie.get(7000), 9900
|
||||
assert.equal trie.get(7001), 10
|
||||
assert.equal trie.get(0x110000), 666
|
||||
|
||||
rangeTests = [
|
||||
{
|
||||
ranges: [
|
||||
[ 0, 0, 0, 0 ],
|
||||
[ 0, 0x40, 0, 0 ],
|
||||
[ 0x40, 0xe7, 0x1234, 0 ],
|
||||
[ 0xe7, 0x3400, 0, 0 ],
|
||||
[ 0x3400, 0x9fa6, 0x6162, 0 ],
|
||||
[ 0x9fa6, 0xda9e, 0x3132, 0 ],
|
||||
[ 0xdada, 0xeeee, 0x87ff, 0 ],
|
||||
[ 0xeeee, 0x11111, 1, 0 ],
|
||||
[ 0x11111, 0x44444, 0x6162, 0 ],
|
||||
[ 0x44444, 0x60003, 0, 0 ],
|
||||
[ 0xf0003, 0xf0004, 0xf, 0 ],
|
||||
[ 0xf0004, 0xf0006, 0x10, 0 ],
|
||||
[ 0xf0006, 0xf0007, 0x11, 0 ],
|
||||
[ 0xf0007, 0xf0040, 0x12, 0 ],
|
||||
[ 0xf0040, 0x110000, 0, 0 ]
|
||||
]
|
||||
|
||||
check: [
|
||||
[ 0, 0 ],
|
||||
[ 0x40, 0 ],
|
||||
[ 0xe7, 0x1234 ],
|
||||
[ 0x3400, 0 ],
|
||||
[ 0x9fa6, 0x6162 ],
|
||||
[ 0xda9e, 0x3132 ],
|
||||
[ 0xdada, 0 ],
|
||||
[ 0xeeee, 0x87ff ],
|
||||
[ 0x11111, 1 ],
|
||||
[ 0x44444, 0x6162 ],
|
||||
[ 0xf0003, 0 ],
|
||||
[ 0xf0004, 0xf ],
|
||||
[ 0xf0006, 0x10 ],
|
||||
[ 0xf0007, 0x11 ],
|
||||
[ 0xf0040, 0x12 ],
|
||||
[ 0x110000, 0 ]
|
||||
]
|
||||
},
|
||||
{
|
||||
# set some interesting overlapping ranges
|
||||
ranges: [
|
||||
[ 0, 0, 0, 0 ],
|
||||
[ 0x21, 0x7f, 0x5555, 1 ],
|
||||
[ 0x2f800, 0x2fedc, 0x7a, 1 ],
|
||||
[ 0x72, 0xdd, 3, 1 ],
|
||||
[ 0xdd, 0xde, 4, 0 ],
|
||||
[ 0x201, 0x240, 6, 1 ], # 3 consecutive blocks with the same pattern but
|
||||
[ 0x241, 0x280, 6, 1 ], # discontiguous value ranges, testing utrie2_enum()
|
||||
[ 0x281, 0x2c0, 6, 1 ],
|
||||
[ 0x2f987, 0x2fa98, 5, 1 ],
|
||||
[ 0x2f777, 0x2f883, 0, 1 ],
|
||||
[ 0x2f900, 0x2ffaa, 1, 0 ],
|
||||
[ 0x2ffaa, 0x2ffab, 2, 1 ],
|
||||
[ 0x2ffbb, 0x2ffc0, 7, 1 ]
|
||||
]
|
||||
|
||||
check: [
|
||||
[ 0, 0 ],
|
||||
[ 0x21, 0 ],
|
||||
[ 0x72, 0x5555 ],
|
||||
[ 0xdd, 3 ],
|
||||
[ 0xde, 4 ],
|
||||
[ 0x201, 0 ],
|
||||
[ 0x240, 6 ],
|
||||
[ 0x241, 0 ],
|
||||
[ 0x280, 6 ],
|
||||
[ 0x281, 0 ],
|
||||
[ 0x2c0, 6 ],
|
||||
[ 0x2f883, 0 ],
|
||||
[ 0x2f987, 0x7a ],
|
||||
[ 0x2fa98, 5 ],
|
||||
[ 0x2fedc, 0x7a ],
|
||||
[ 0x2ffaa, 1 ],
|
||||
[ 0x2ffab, 2 ],
|
||||
[ 0x2ffbb, 0 ],
|
||||
[ 0x2ffc0, 7 ],
|
||||
[ 0x110000, 0 ]
|
||||
]
|
||||
},
|
||||
{
|
||||
# use a non-zero initial value
|
||||
ranges: [
|
||||
[ 0, 0, 9, 0 ], # non-zero initial value.
|
||||
[ 0x31, 0xa4, 1, 0 ],
|
||||
[ 0x3400, 0x6789, 2, 0 ],
|
||||
[ 0x8000, 0x89ab, 9, 1 ],
|
||||
[ 0x9000, 0xa000, 4, 1 ],
|
||||
[ 0xabcd, 0xbcde, 3, 1 ],
|
||||
[ 0x55555, 0x110000, 6, 1 ], # highStart<U+ffff with non-initialValue
|
||||
[ 0xcccc, 0x55555, 6, 1 ]
|
||||
],
|
||||
|
||||
check: [
|
||||
[ 0, 9 ], # non-zero initialValue
|
||||
[ 0x31, 9 ],
|
||||
[ 0xa4, 1 ],
|
||||
[ 0x3400, 9 ],
|
||||
[ 0x6789, 2 ],
|
||||
[ 0x9000, 9 ],
|
||||
[ 0xa000, 4 ],
|
||||
[ 0xabcd, 9 ],
|
||||
[ 0xbcde, 3 ],
|
||||
[ 0xcccc, 9 ],
|
||||
[ 0x110000, 6 ]
|
||||
]
|
||||
},
|
||||
{
|
||||
# empty or single-value tries, testing highStart==0
|
||||
ranges: [
|
||||
[ 0, 0, 3, 0 ] # Only the element with the initial value.
|
||||
]
|
||||
|
||||
check: [
|
||||
[ 0, 3 ],
|
||||
[ 0x110000, 3 ]
|
||||
]
|
||||
},
|
||||
{
|
||||
ranges: [
|
||||
[ 0, 0, 3, 0 ] # Initial value = 3
|
||||
[ 0, 0x110000, 5, 1 ]
|
||||
],
|
||||
|
||||
check: [
|
||||
[ 0, 3 ]
|
||||
[ 0x110000, 5 ]
|
||||
]
|
||||
}
|
||||
]
|
||||
|
||||
it 'should pass range tests', ->
|
||||
for test in rangeTests
|
||||
initialValue = 0
|
||||
errorValue = 0x0bad
|
||||
i = 0
|
||||
if test.ranges[i][1] < 0
|
||||
errorValue = test.ranges[i][2]
|
||||
i++
|
||||
|
||||
initialValue = test.ranges[i++][2]
|
||||
trie = new UnicodeTrieBuilder initialValue, errorValue
|
||||
|
||||
for range in test.ranges[i...]
|
||||
trie.setRange range[0], range[1] - 1, range[2], range[3] isnt 0
|
||||
|
||||
frozen = trie.freeze()
|
||||
|
||||
start = 0
|
||||
for check in test.check
|
||||
for start in [start...check[0]] by 1
|
||||
assert.equal trie.get(start), check[1]
|
||||
assert.equal frozen.get(start), check[1]
|
||||
|
||||
Loading…
Add table
Add a link
Reference in a new issue