1
1
using System ;
2
+ using System . Collections . Concurrent ;
3
+ using System . Collections . Generic ;
2
4
using System . IO ;
5
+ using System . Linq ;
6
+ using System . Threading ;
7
+ using System . Threading . Tasks ;
3
8
4
9
namespace LibBundle
5
10
{
@@ -42,6 +47,7 @@ public enum COMPRESSTION_LEVEL
42
47
43
48
public string path ;
44
49
public long offset ;
50
+ public COMPRESSTION_LEVEL Compression_Level = COMPRESSTION_LEVEL . Normal ;
45
51
46
52
public int uncompressed_size ;
47
53
public int compressed_size ;
@@ -117,48 +123,138 @@ public virtual MemoryStream Read(BinaryReader br)
117
123
var chunks = new int [ entry_count ] ;
118
124
for ( int i = 0 ; i < entry_count ; i ++ )
119
125
chunks [ i ] = br . ReadInt32 ( ) ;
120
-
126
+
127
+ var compressed = new byte [ entry_count ] [ ] ;
128
+ for ( int i = 0 ; i < entry_count ; i ++ )
129
+ compressed [ i ] = br . ReadBytes ( chunks [ i ] ) ;
130
+
131
+ Parallel . For ( 0 , entry_count , i => {
132
+ var size = ( i + 1 == entry_count ) ? uncompressed_size - ( chunk_size * ( entry_count - 1 ) ) : chunk_size ; // isLast ?
133
+ var toSave = new byte [ size + 64 ] ;
134
+ OodleLZ_Decompress ( compressed [ i ] , compressed [ i ] . Length , toSave , size , 0 , 0 , 0 , IntPtr . Zero , 0 , IntPtr . Zero , IntPtr . Zero , IntPtr . Zero , 0 , 3 ) ;
135
+ compressed [ i ] = toSave . Take ( size ) . ToArray ( ) ;
136
+ } ) ;
137
+
121
138
var data = new MemoryStream ( uncompressed_size ) ;
122
139
for ( int i = 0 ; i < entry_count ; i ++ )
140
+ data . Write ( compressed [ i ] ) ;
141
+
142
+ return data ;
143
+ }
144
+
145
+ public virtual byte [ ] AppendAndSave ( Stream newData , string path = null )
146
+ {
147
+ if ( path == null )
148
+ path = this . path ;
149
+ offset = 0 ;
150
+ return AppendAndSave ( newData , File . Open ( path , FileMode . Open , FileAccess . Read , FileShare . ReadWrite ) ) ;
151
+ }
152
+
153
+ public virtual byte [ ] AppendAndSave ( Stream newData , Stream originalData )
154
+ {
155
+ originalData . Seek ( offset + 60 , SeekOrigin . Begin ) ;
156
+ var OldChunkCompressedSizes = new byte [ ( entry_count - 1 ) * 4 ] ;
157
+ originalData . Read ( OldChunkCompressedSizes ) ;
158
+
159
+ var lastCunkCompressedSize = originalData . ReadByte ( ) | originalData . ReadByte ( ) << 8 | originalData . ReadByte ( ) << 16 | originalData . ReadByte ( ) << 24 ; // ReadInt32
160
+
161
+ var lastCunkDecompressedSize = uncompressed_size - ( chunk_size * ( entry_count - 1 ) ) ;
162
+
163
+ uncompressed_size = ( int ) ( size_decompressed += newData . Length ) ;
164
+ entry_count = uncompressed_size / chunk_size ;
165
+ if ( uncompressed_size % chunk_size != 0 ) entry_count ++ ;
166
+ head_size = entry_count * 4 + 48 ;
167
+
168
+ var msToSave = new MemoryStream ( ) ;
169
+ var bw = new BinaryWriter ( msToSave ) ;
170
+
171
+ msToSave . Seek ( 60 + ( entry_count * 4 ) , SeekOrigin . Begin ) ;
172
+ var o = new byte [ compressed_size - lastCunkCompressedSize ] ;
173
+ originalData . Read ( o ) ;
174
+ bw . Write ( o ) ;
175
+
176
+ var lastChunkCompressedData = new byte [ lastCunkCompressedSize ] ;
177
+ originalData . Read ( lastChunkCompressedData ) ;
178
+ var lastCunkDecompressedData = new byte [ lastCunkDecompressedSize + 64 ] ;
179
+ OodleLZ_Decompress ( lastChunkCompressedData , lastCunkCompressedSize , lastCunkDecompressedData , lastCunkDecompressedSize , 0 , 0 , 0 , IntPtr . Zero , 0 , IntPtr . Zero , IntPtr . Zero , IntPtr . Zero , 0 , 3 ) ;
180
+
181
+ newData . Seek ( 0 , SeekOrigin . Begin ) ;
182
+ compressed_size -= lastCunkCompressedSize ;
183
+ var NewChunkCompressedSizes = new int [ entry_count - ( OldChunkCompressedSizes . Length / 4 ) ] ;
184
+
185
+ var FirstNewDataChunk = new byte [ Math . Min ( chunk_size - lastCunkDecompressedSize , newData . Length ) ] ;
186
+ newData . Read ( FirstNewDataChunk ) ;
187
+ FirstNewDataChunk = lastCunkDecompressedData . Take ( lastCunkDecompressedSize ) . Concat ( FirstNewDataChunk ) . ToArray ( ) ; // Decompressed
188
+ var CompressedChunk = new byte [ FirstNewDataChunk . Length + 548 ] ;
189
+ var CompressedLength = OodleLZ_Compress ( encoder , FirstNewDataChunk , FirstNewDataChunk . Length , CompressedChunk , Compression_Level , IntPtr . Zero , 0 , 0 , IntPtr . Zero , 0 ) ;
190
+ compressed_size += NewChunkCompressedSizes [ 0 ] = CompressedLength ;
191
+ bw . Write ( CompressedChunk , 0 , CompressedLength ) ; // Compressed
192
+ var byteArrays = new byte [ NewChunkCompressedSizes . Length ] [ ] ;
193
+ for ( int i = 1 ; i < NewChunkCompressedSizes . Length ; i ++ )
123
194
{
124
- var b = br . ReadBytes ( chunks [ i ] ) ;
125
- int size = ( i + 1 == entry_count ) ? uncompressed_size - ( chunk_size * ( entry_count - 1 ) ) : chunk_size ; // isLast ?
126
- var toSave = new byte [ size + 64 ] ;
127
- OodleLZ_Decompress ( b , b . Length , toSave , size , 0 , 0 , 0 , IntPtr . Zero , 0 , IntPtr . Zero , IntPtr . Zero , IntPtr . Zero , 0 , 3 ) ;
128
- data . Write ( toSave , 0 , size ) ;
195
+ var size = ( i + 1 == NewChunkCompressedSizes . Length ) ? uncompressed_size - ( chunk_size * ( entry_count - 1 ) ) : chunk_size ;
196
+ newData . Read ( byteArrays [ i ] = new byte [ size ] ) ;
129
197
}
130
- return data ;
198
+ Parallel . For ( 1 , NewChunkCompressedSizes . Length , i => {
199
+ var by = new byte [ byteArrays [ i ] . Length + 548 ] ;
200
+ var l = OodleLZ_Compress ( encoder , byteArrays [ i ] , byteArrays [ i ] . Length , by , Compression_Level , IntPtr . Zero , 0 , 0 , IntPtr . Zero , 0 ) ;
201
+ byteArrays [ i ] = by ;
202
+ Interlocked . Add ( ref compressed_size , NewChunkCompressedSizes [ i ] = l ) ;
203
+ } ) ;
204
+ for ( int i = 0 ; i < NewChunkCompressedSizes . Length - 1 ; i ++ )
205
+ bw . Write ( byteArrays [ i ] , 0 , NewChunkCompressedSizes [ i ] ) ;
206
+
207
+ size_compressed = compressed_size ;
208
+
209
+ msToSave . Seek ( 60 , SeekOrigin . Begin ) ;
210
+ bw . Write ( OldChunkCompressedSizes ) ;
211
+ for ( int i = 0 ; i < NewChunkCompressedSizes . Length ; i ++ )
212
+ bw . Write ( NewChunkCompressedSizes [ i ] ) ;
213
+
214
+ msToSave . Seek ( 0 , SeekOrigin . Begin ) ;
215
+ bw . Write ( uncompressed_size ) ;
216
+ bw . Write ( compressed_size ) ;
217
+ bw . Write ( head_size ) ;
218
+ bw . Write ( ( uint ) encoder ) ;
219
+ bw . Write ( unknown ) ;
220
+ bw . Write ( size_decompressed ) ;
221
+ bw . Write ( size_compressed ) ;
222
+ bw . Write ( entry_count ) ;
223
+ bw . Write ( chunk_size ) ;
224
+ bw . Write ( unknown3 ) ;
225
+ bw . Write ( unknown4 ) ;
226
+ bw . Write ( unknown5 ) ;
227
+ bw . Write ( unknown6 ) ;
228
+
229
+ bw . Flush ( ) ;
230
+ var result = msToSave . ToArray ( ) ;
231
+ bw . Close ( ) ;
232
+ return result ;
131
233
}
132
234
133
235
//Packing
134
- public virtual void Save ( Stream ms , string path )
236
+ public virtual void Save ( Stream newData , string path )
135
237
{
136
238
var bw = new BinaryWriter ( File . Open ( path , FileMode . Open , FileAccess . Write , FileShare . ReadWrite ) ) ;
137
239
138
- uncompressed_size = ( int ) ( size_decompressed = ms . Length ) ;
240
+ uncompressed_size = ( int ) ( size_decompressed = newData . Length ) ;
139
241
entry_count = uncompressed_size / chunk_size ;
140
242
if ( uncompressed_size % chunk_size != 0 ) entry_count ++ ;
141
243
head_size = entry_count * 4 + 48 ;
142
244
143
245
bw . BaseStream . Seek ( 60 + ( entry_count * 4 ) , SeekOrigin . Begin ) ;
144
- ms . Position = 0 ;
246
+ newData . Seek ( 0 , SeekOrigin . Begin ) ;
145
247
compressed_size = 0 ;
146
248
var chunks = new int [ entry_count ] ;
147
- for ( int i = 0 ; i < entry_count - 1 ; i ++ )
249
+ for ( int i = 0 ; i < entry_count ; i ++ )
148
250
{
149
- var b = new byte [ chunk_size ] ;
150
- ms . Read ( b , 0 , chunk_size ) ;
251
+ var b = new byte [ i + 1 == entry_count ? uncompressed_size - ( entry_count - 1 ) * chunk_size : chunk_size ] ;
252
+ newData . Read ( b , 0 , b . Length ) ;
151
253
var by = new byte [ b . Length + 548 ] ;
152
- var l = OodleLZ_Compress ( ENCODE_TYPES . LEVIATHAN , b , b . Length , by , COMPRESSTION_LEVEL . Normal , IntPtr . Zero , 0 , 0 , IntPtr . Zero , 0 ) ;
254
+ var l = OodleLZ_Compress ( encoder , b , b . Length , by , Compression_Level , IntPtr . Zero , 0 , 0 , IntPtr . Zero , 0 ) ;
153
255
compressed_size += chunks [ i ] = l ;
154
256
bw . Write ( by , 0 , l ) ;
155
257
}
156
- var b2 = new byte [ ms . Length - ( entry_count - 1 ) * chunk_size ] ;
157
- ms . Read ( b2 , 0 , b2 . Length ) ;
158
- var by2 = new byte [ b2 . Length + 548 ] ;
159
- var l2 = OodleLZ_Compress ( ENCODE_TYPES . LEVIATHAN , b2 , b2 . Length , by2 , COMPRESSTION_LEVEL . Normal , IntPtr . Zero , 0 , 0 , IntPtr . Zero , 0 ) ;
160
- compressed_size += chunks [ entry_count - 1 ] = l2 ;
161
- bw . Write ( by2 , 0 , l2 ) ;
162
258
size_compressed = compressed_size ;
163
259
164
260
bw . BaseStream . Seek ( 60 , SeekOrigin . Begin ) ;
@@ -184,42 +280,41 @@ public virtual void Save(Stream ms, string path)
184
280
bw . Close ( ) ;
185
281
}
186
282
//Packing
187
- public virtual byte [ ] Save ( Stream ms )
283
+ public virtual byte [ ] Save ( Stream newData )
188
284
{
189
285
var msToSave = new MemoryStream ( ) ;
190
286
var bw = new BinaryWriter ( msToSave ) ;
191
287
192
- uncompressed_size = ( int ) ( size_decompressed = ms . Length ) ;
288
+ uncompressed_size = ( int ) ( size_decompressed = newData . Length ) ;
193
289
entry_count = uncompressed_size / chunk_size ;
194
290
if ( uncompressed_size % chunk_size != 0 ) entry_count ++ ;
195
291
head_size = entry_count * 4 + 48 ;
196
292
197
- bw . BaseStream . Seek ( 60 + ( entry_count * 4 ) , SeekOrigin . Begin ) ;
198
- ms . Position = 0 ;
293
+ msToSave . Seek ( 60 + ( entry_count * 4 ) , SeekOrigin . Begin ) ;
294
+ newData . Seek ( 0 , SeekOrigin . Begin ) ;
199
295
compressed_size = 0 ;
200
296
var chunks = new int [ entry_count ] ;
201
- for ( int i = 0 ; i < entry_count - 1 ; i ++ )
297
+ var byteArrays = new byte [ entry_count ] [ ] ;
298
+ for ( int i = 0 ; i < entry_count ; i ++ )
202
299
{
203
- var b = new byte [ chunk_size ] ;
204
- ms . Read ( b , 0 , chunk_size ) ;
205
- var by = new byte [ b . Length + 548 ] ;
206
- var l = OodleLZ_Compress ( ENCODE_TYPES . LEVIATHAN , b , b . Length , by , COMPRESSTION_LEVEL . Normal , IntPtr . Zero , 0 , 0 , IntPtr . Zero , 0 ) ;
207
- compressed_size += chunks [ i ] = l ;
208
- bw . Write ( by , 0 , l ) ;
300
+ var b = new byte [ i + 1 == entry_count ? uncompressed_size - ( entry_count - 1 ) * chunk_size : chunk_size ] ;
301
+ newData . Read ( byteArrays [ i ] = b ) ;
209
302
}
210
- var b2 = new byte [ ms . Length - ( entry_count - 1 ) * chunk_size ] ;
211
- ms . Read ( b2 , 0 , b2 . Length ) ;
212
- var by2 = new byte [ b2 . Length + 548 ] ;
213
- var l2 = OodleLZ_Compress ( ENCODE_TYPES . LEVIATHAN , b2 , b2 . Length , by2 , COMPRESSTION_LEVEL . Normal , IntPtr . Zero , 0 , 0 , IntPtr . Zero , 0 ) ;
214
- compressed_size += chunks [ entry_count - 1 ] = l2 ;
215
- bw . Write ( by2 , 0 , l2 ) ;
303
+ Parallel . For ( 0 , entry_count , i => {
304
+ var by = new byte [ byteArrays [ i ] . Length + 548 ] ;
305
+ var l = OodleLZ_Compress ( encoder , byteArrays [ i ] , byteArrays [ i ] . Length , by , Compression_Level , IntPtr . Zero , 0 , 0 , IntPtr . Zero , 0 ) ;
306
+ byteArrays [ i ] = by ;
307
+ Interlocked . Add ( ref compressed_size , chunks [ i ] = l ) ;
308
+ } ) ;
216
309
size_compressed = compressed_size ;
310
+ for ( int i = 0 ; i < entry_count ; i ++ )
311
+ bw . Write ( byteArrays [ i ] , 0 , chunks [ i ] ) ;
217
312
218
- bw . BaseStream . Seek ( 60 , SeekOrigin . Begin ) ;
313
+ msToSave . Seek ( 60 , SeekOrigin . Begin ) ;
219
314
for ( int i = 0 ; i < entry_count ; i ++ )
220
315
bw . Write ( chunks [ i ] ) ;
221
316
222
- bw . BaseStream . Seek ( 0 , SeekOrigin . Begin ) ;
317
+ msToSave . Seek ( 0 , SeekOrigin . Begin ) ;
223
318
bw . Write ( uncompressed_size ) ;
224
319
bw . Write ( compressed_size ) ;
225
320
bw . Write ( head_size ) ;
0 commit comments